ordinal/0000755000176200001440000000000014533336352011711 5ustar liggesusersordinal/NAMESPACE0000644000176200001440000001012414334204246013121 0ustar liggesusersuseDynLib("ordinal", .registration = TRUE) importFrom(graphics, plot, par, abline, lines, points, contour) importFrom(grDevices, dev.interactive, devAskNewPage) importFrom(utils, "combn", "packageDescription", "as.roman") importFrom(ucminf, ucminf) importFrom(numDeriv, grad, hessian) importFrom("stats", ".checkMFClasses", ".getXlevels", "AIC", "add.scope", "approx", "as.formula", "binomial", "coef", "confint", "dcauchy", "dlogis", "dnorm", "drop.scope", "drop.terms", "extractAIC", "fitted", "formula", "glm.fit", "is.empty.model", "logLik", "model.frame", "model.matrix", "model.offset", "model.response", "model.weights", "na.pass", "napredict", "naprint", "nlminb", "optim", "pcauchy", "pchisq", "pgamma", "plogis", "pnorm", "printCoefmat", "profile", "qchisq", "qlogis", "qnorm", "runif", "setNames", "spline", "terms", "update.formula", "vcov", "nobs", "delete.response", "lm.fit", "resid", "reformulate") ## importFrom(stats, ## nobs) import(methods) ## import(stats) ## importFrom(methods, ## as, ## checkAtAssignment, ## loadMethod) import(Matrix) importFrom(nlme, ranef, # also exported VarCorr) # also exported ## importFrom(numDeriv, ## hessian, ## grad) importFrom(MASS, ginv, addterm, dropterm) ## importFrom(stats, ## coef, ## confint, ## nobs, ## logLik, ## profile, ## vcov, ## extractAIC, ## anova, ## fitted## , ## ## terms ## ## update ## ) # Functions: export(clm) export(clm.fit) export(clmm) export(clm.control) export(clmm.control) export(slice) export(convergence) export(drop.coef) export(nominal_test) export(scale_test) export(condVar) export(ranef) export(VarCorr) export(gnorm, glogis, gcauchy, pgumbel, dgumbel, ggumbel, qgumbel, rgumbel, plgamma, dlgamma, glgamma ## , ## pAO, dAO, gAO, ) ## Methods: S3method(clm.fit, default) S3method(clm.fit, factor) S3method(print, clm) S3method(vcov, clm) S3method(summary, clm) S3method(print, summary.clm) S3method(convergence, clm) S3method(print, convergence.clm) S3method(slice, clm) S3method(plot, slice.clm) S3method(anova, clm) S3method(print, anova.clm) S3method(predict, clm) S3method(coef, clm) S3method(nobs, clm) S3method(coef, summary.clm) S3method(scale_test, clm) S3method(nominal_test, clm) S3method(profile, clm) S3method(confint, clm) S3method(confint, profile.clm) S3method(plot, profile.clm) S3method(logLik, clm) S3method(extractAIC, clm) S3method(model.matrix, clm) S3method(model.frame, clm) S3method(terms, clm) S3method(print, clmm) S3method(vcov, clmm) S3method(summary, clmm) S3method(print, summary.clmm) S3method(logLik, clmm) S3method(extractAIC, clmm) S3method(anova, clmm) S3method(nobs, clmm) ## S3method(profile, clmm) ## S3method(confint, profile.clmm) ## S3method(plot, profile.clmm) ## S3method(update, clmm) ## S3method(fixef, clmm) S3method(ranef, clmm) S3method(condVar, clmm) S3method(VarCorr, clmm) S3method(model.matrix, clmm) ################################################################## ### clm2 stuff: ## Functions: export(clm2) export(clmm2) export(clm2.control) export(clmm2.control) ## Methods: S3method(print, clm2) S3method(vcov, clm2) S3method(summary, clm2) S3method(print, summary.clm2) S3method(anova, clm2) S3method(predict, clm2) S3method(profile, clm2) S3method(confint, clm2) S3method(confint, profile.clm2) S3method(plot, profile.clm2) S3method(logLik, clm2) S3method(extractAIC, clm2) S3method(update, clm2) S3method(dropterm, clm2) S3method(addterm, clm2) S3method(print, clmm2) S3method(vcov, clmm2) S3method(summary, clmm2) S3method(print, summary.clmm2) S3method(anova, clmm2) S3method(profile, clmm2) S3method(confint, profile.clmm2) S3method(plot, profile.clmm2) S3method(update, clmm2) ordinal/LICENCE.note0000644000176200001440000000155313277531311013642 0ustar liggesusersCopyrights ========== All files are copyright (C) 2011 R. H. B. Christensen with all rights assigned to R. H. B. Christensen Licence ======= This is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 or 3 of the License (at your option). This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Files share/licenses/GPL-2 and share/licenses/GPL-3 in the R (source or binary) distribution are copies of versions 2 and 3 of the 'GNU General Public License'. These can also be viewed at http://www.r-project.org/licenses/ Rune.Haubo@gmail.com ordinal/data/0000755000176200001440000000000012174033664012621 5ustar liggesusersordinal/data/income.rda0000644000176200001440000000047412174033664014570 0ustar liggesusers }RJ0IgYC"ċJfo9͓]CЭśϤ/&mJv!&F}\R!Dedȩx;Yk ͜ TBk˴*6n8F Ӽ!qnC$o:jH\NOK_9?owF-`ȳ+ 외v2hjDcPcF ׈3) x|N 0K)#K!Oi5szƺ|AoQ`7(2YYmռ^;ordinal/data/soup.rda0000644000176200001440000002170612174033664014305 0ustar liggesusers w$Y񭧪dM&dzvYl|&VN2&s9gx#9_x_x9vwݣgԣ'N ݹKx~oڒ-yrؒf Womŧ&[[{"Dg3uf̓y3OqMySg̙7w-yKg[9μ3o9μ3;9μ3Ӝy7gݙp=y/gۙq}y?gߑp9ӝy379ܙgvsfߙ8s9<әqY|3vfg-gm|3sÝg ݙ ̝̋Hg>ʙ;g>ڙqc8g>ޙOp\vy3w;s=μܙW8Jg>əW9s3Wϙy3ՙOsӝ g>ә7:Y|3:y|3_:E|3_̗:e|3_W:U|3_:u|37:M|3̷:m|3w:]|3:}|3?:C3?̏:c3?O:S3?:s3/:K\{ڣ{y4lvcm7ڍv|qp!7u[Nmw[续vkee>ﶺwnnn];];];];];];];];ݞw=v{enHwv^w|{ݞ=u{y~w]]cc]]]]]]]]]]]]]]]]mq~so^o7;y_ڼ/mޗ6K}iy_ڼ/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/m/mtx?8q/|_K;ߗvЗvЗvЗvqqqqqqqqqW/////=u{^=u{^=u{^=u{^=u{^=uW`{N\~C7JT_&]zl篥=E}3ھWбt rWGo;{]O˓}I/oݼz2Esն]v.3*lclW[2FWmZ^ߪLWߢq?}ouvq[c'=~-x2|e߳ep9NҖ'2`,3/,:W-se]_mkjޓ]Ԯkk}A;jӮl<\/vkӢm/1akֶeo[Ss}5P=j͞Ku7tvNȤuQSoV[[zZl.|qǶG^vck=52ohi?}m^+\G ]kahL-[TrLWk_vSɢCYdom*PORjmgטvhW)v5ͧKuf[̶ϷRld^/T:)ǘ-',xLc^izoC}[[t8d+͞a>lvҸ/qǶ}}RtQ_f]s;E֯=ZM.e}r\svh˳3;/Ef_kc\^;gl9_snصj]˶_tj2^`Zֶ5CjY.8l(V<Ҧa\9믭=k.qQ>nCloVnӳ&]tsZ{۹ʎrma\[ߞCl][}uێ#نuڪ^ƆMwU/vE9e[479[Z96kq.幧+cvPKi|M2/C4uM._>盲}ڹ]3+瀡g<44o, <9Ժ*:qh/j][ ٷکnh9瓲j՚nFym`u]y݋ϫgr>zLZ ^2N1{ݿc]x9K.G2v/:6ugћ.eۿS˺>NKOzLM]}nܱ;1{~ﺏu?Ov9-S5TiY:紌z]mϚW}NG) w{Ϲ+>:~@o|}n} 箼K}:x[g[Z}:uoVuo2j11uu۩?y{Z=-Ǿ'j|{;{}ju/]q0Uqy}Xƞ[u/wkзcoݭV?C=k;:z7l}ƕ{/L5}܏}|^M6&l2e|[^t̓IuMUnr~cv[]gƮ7c`~]Ʈץ^ryq5\ o]۱»c~}|ScUScglc+j<λyhs[߱Kݱer^3c統ތcToxokTjkcƮTŻ˘k!{N5}m4\=>ɱǁ޹er`s>g>y3U޸UMi7vۍvc8n}Ա3{|~oeOzom:{=Vʺu?uﺷwZ׫~foSe}{}j=-YgSͺ&X]y׼o^pޣm}ѕG~Zұлc_]1=0/ M.cy_Pv>U/Sy6k çǞ8;cylSvtZ\s2ج7֫u?O|ry6= ^-=66>vSXyeݷSǯy4o۹agns{[|:(dM6dMFdM6dM6d'Rƾ+&l&lrr'鑯 tWӶsS3Wg?tww\xkfyOOE.~;/㮗q.-&]}zۅop}ͷ]l~[nyo7u_Gr܄N~S{=޼oMɛ[$O%oEޚ y[v;w$Dޙ yW4n{$Eޛy_~$3HxM6_|sEw87uk"LI><|(y6|<Rr|"y48}y9yy%$*r/B#?jɤM>D^C^K^G^O@>|*43g7"M>|.< /"_L|)2+W"_M|-:7o"L|+6;w"M|/>"?L~(1'O"?M~,9/_"L~*57ȟ#_"&-W_%u7$mw%M~=? #?!.=?'O /ɿ"!#oȿ%{$g_%L7 /!?ܶ>7_|{ͷ7_|{77_M^JH>^I__/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ _/ sȝgR"########_-4f'=;(\灰Dx } ~ f_wzםn~;r8}u3 @.UߚB3yDž'crvƠ~,6b]Ź-PXDaŠ3U؏3Ii6P0Y/LKWƆy|; i̮(~>n6֎;.CwnWYu55ZV[nZXF_ipiI$3И f};P-VwM Li+gQqC 5I㗴(b_ Εy&>Yx<9 *Lk ordinal/man/0000755000176200001440000000000014533322576012467 5ustar liggesusersordinal/man/predict.Rd0000644000176200001440000001136013633002525014376 0ustar liggesusers\name{predict.clm} \alias{predict.clm} \title{Predict Method for CLM fits} \description{ Obtains predictions from a cumulative link model. } \usage{ \method{predict}{clm}(object, newdata, se.fit = FALSE, interval = FALSE, level = 0.95, type = c("prob", "class", "cum.prob", "linear.predictor"), na.action = na.pass, ...) } \arguments{ \item{object}{a fitted object of class inheriting from \code{clm}.} \item{newdata}{optionally, a data frame in which to look for variables with which to predict. Note that all predictor variables should be present having the same names as the variables used to fit the model. If the response variable is present in \code{newdata} predictions are obtained for the levels of the response as given by \code{newdata}. If the response variable is omitted from \code{newdata} predictions are obtained for all levels of the response variable for each of the rows of \code{newdata}. } \item{se.fit}{should standard errors of the predictions be provided? Not applicable and ignored when \code{type = "class"}. } \item{interval}{should confidence intervals for the predictions be provided? Not applicable and ignored when \code{type = "class"}. } \item{level}{the confidence level. } \item{type}{the type of predictions. \code{"prob"} gives probabilities, \code{"class"} gives predicted response class membership defined as highest probability prediction, \code{"cum.prob"} gives cumulative probabilities (see details) and \code{"linear.predictor"} gives predictions on the scale of the linear predictor including the boundary categories. } \item{na.action}{function determining what should be done with missing values in \code{newdata}. The default is to predict \code{NA}. } \item{\dots}{further arguments passed to or from other methods. } } \details{ If \code{newdata} is omitted and \code{type = "prob"} a vector of fitted probabilities are returned identical to the result from \code{fitted}. If \code{newdata} is supplied and the response variable is omitted, then predictions, standard errors and intervals are matrices rather than vectors with the same number of rows as \code{newdata} and with one column for each response class. If \code{type = "class"} predictions are always a vector. If \code{newdata} is omitted, the way missing values in the original fit are handled is determined by the \code{na.action} argument of that fit. If \code{na.action = na.omit} omitted cases will not appear in the residuals, whereas if \code{na.action = na.exclude} they will appear (in predictions, standard errors or interval limits), with residual value \code{NA}. See also \code{\link{napredict}}. If \code{type = "cum.prob"} or \code{type = "linear.predictor"} there will be two sets of predictions, standard errors and intervals; one for j and one for j-1 (in the usual notation) where j = 1, ..., J index the response classes. If newdata is supplied and the response variable is omitted, then \code{predict.clm} returns much the same thing as \code{predict.polr} (matrices of predictions). Similarly, if \code{type = "class"}. If the fit is rank-deficient, some of the columns of the design matrix will have been dropped. Prediction from such a fit only makes sense if newdata is contained in the same subspace as the original data. That cannot be checked accurately, so a warning is issued (cf. \code{\link{predict.lm}}). If a flexible link function is used (\code{Aranda-Ordaz} or \code{log-gamma}) standard errors and confidence intervals of predictions do not take the uncertainty in the link-parameter into account. } \value{ A list containing the following components \item{fit}{predictions or fitted values if \code{newdata} is not supplied. } \item{se.fit}{if \code{se.fit=TRUE} standard errors of the predictions otherwise \code{NULL}. } \item{upr, lwr}{if \code{interval=TRUE} lower and upper confidence limits.} } \author{Rune Haubo B Christensen} \seealso{ \code{\link[ordinal]{clm}}, \code{\link[ordinal]{clmm}}. } \examples{ ## simple model: fm1 <- clm(rating ~ contact + temp, data=wine) summary(fm1) ## Fitted values with standard errors and confidence intervals: predict(fm1, se.fit=TRUE, interval=TRUE) # type="prob" ## class predictions for the observations: predict(fm1, type="class") newData <- expand.grid(temp = c("cold", "warm"), contact = c("no", "yes")) ## Predicted probabilities in all five response categories for each of ## the four cases in newData: predict(fm1, newdata=newData, type="prob") ## now include standard errors and intervals: predict(fm1, newdata=newData, se.fit=TRUE, interval=TRUE, type="prob") } \keyword{models} ordinal/man/confint.clmmOld.Rd0000644000176200001440000001075012176227250016001 0ustar liggesusers\name{profile.clmm2} \alias{profile.clmm2} \alias{confint.clmm2} \alias{confint.profile.clmm2} \alias{profile.clmm2} \alias{plot.profile.clmm2} \title{ Confidence intervals and profile likelihoods for the standard deviation for the random term in cumulative link mixed models } \description{ Computes confidence intervals from the profiled likelihood for the standard devation for the random term in a fitted cumulative link mixed model, or plots the associated profile likelihood function. } \usage{ \method{confint}{profile.clmm2}(object, parm = seq_along(Pnames), level = 0.95, \dots) \method{profile}{clmm2}(fitted, alpha = 0.01, range, nSteps = 20, trace = 1, \dots) \method{plot}{profile.clmm2}(x, parm = seq_along(Pnames), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, fig = TRUE, n = 1e3, ..., ylim = NULL) } \arguments{ \item{object}{ a fitted \code{profile.clmm2} object. } \item{fitted}{ a fitted \code{\link{clmm2}} object. } \item{x}{a \code{profile.clmm2} object. } \item{parm}{ For \code{confint.profile.clmm2}: a specification of which parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all parameters are considered. Currently only \code{"stDev"} or \code{1} are supported. For \code{plot.profile.clmm2}: a specification of which parameters the profile likelihood are to be plotted for, either a vector of numbers or a vector of names. If missing, all parameters are considered. Currently only \code{"stDev"} or \code{1} are supported. } \item{level}{ the confidence level required. Observe that the model has to be profiled in the appropriate region; otherwise the limits are \code{NA}. } \item{trace}{ logical. Should profiling be traced? Defaults to \code{TRUE} due to the time consuming nature of the computation. } \item{alpha}{Determines the range of profiling. By default the likelihood is profiled approximately in the 99\% confidence interval region as determined by the Wald approximation. This is usually sufficient for 95\% profile likelihood confidence limits. } \item{range}{if range is specified, this overrules the range computation based on \code{alpha}. \code{range} should be all positive and \code{stDev} is profiled in \code{range(range)}. } \item{nSteps}{the number of points at which to profile the likelihood function. This determines the resolution and accuracy of the profile likelihood function; higher values gives a higher resolution, but also longer computation times. } \item{Log}{should the profile likelihood be plotted on the log-scale? } \item{relative}{should the relative or the absolute likelihood be plotted? } \item{fig}{should the profile likelihood be plotted? } \item{n}{the no. points used in the spline interpolation of the profile likelihood for plotting. } \item{ylim}{overrules default y-limits on the plot of the profile likelihood. } \item{\dots}{ additional argument(s), e.g. graphical parameters for the \code{plot} method. } } \details{ A \code{confint.clmm2} method deliberately does not exist due to the time consuming nature of the computations. The user is required to compute the profile object first and then call \code{confint} on the profile object to obtain profile likelihood confidence intervals. In \code{plot.profile.clm2}: at least one of \code{Log} and \code{relative} arguments have to be \code{TRUE}. } \value{ \code{confint}: A matrix with columns giving lower and upper confidence limits. These will be labelled as (1-level)/2 and 1 - (1-level)/2 in \% (by default 2.5\% and 97.5\%). \code{plot.profile.clm2} invisibly returns the profile object. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{profile}} and \code{\link{confint}} } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) if(require(lme4)) { ## access cbpp data cbpp2 <- rbind(cbpp[,-(2:3)], cbpp[,-(2:3)]) cbpp2 <- within(cbpp2, { incidence <- as.factor(rep(0:1, each=nrow(cbpp))) freq <- with(cbpp, c(incidence, size - incidence)) }) ## Fit with Laplace approximation: fm1 <- clmm2(incidence ~ period, random = herd, weights = freq, data = cbpp2, Hess = 1) pr.fm1 <- profile(fm1) confint(pr.fm1) par(mfrow = c(2,2)) plot(pr.fm1) plot(pr.fm1, Log=TRUE, relative = TRUE) plot(pr.fm1, Log=TRUE, relative = FALSE) } } \keyword{models} ordinal/man/predictOld.Rd0000644000176200001440000000573113633002525015042 0ustar liggesusers\name{predict.clm2} \alias{predict.clm2} \alias{predict.clmm2} \title{Predict Method for CLM fits} \description{ Obtains predictions from a cumulative link (mixed) model. } \usage{ \method{predict}{clm2}(object, newdata, ...) %% \method{predict}{clmm}(object, newdata, ...) } \arguments{ \item{object}{a fitted object of class inheriting from \code{clm2} including \code{clmm2} objects.} \item{newdata}{optionally, a data frame in which to look for variables with which to predict. Observe that the response variable should also be present.} \item{\dots}{further arguments passed to or from other methods.} } \details{ This method does not duplicate the behavior of \code{predict.polr} in package \code{MASS} which produces a matrix instead of a vector of predictions. The behavior of \code{predict.polr} can be mimiced as shown in the examples. If \code{newdata} is not supplied, the fitted values are obtained. For \code{clmm2} fits this means predictions that are controlled for the observed value of the random effects. If the predictions for a random effect of zero, i.e. an average 'subject', are wanted, the same data used to fit the model should be supplied in the \code{newdata} argument. For \code{clm2} fits those two sets of predictions are identical. } \value{ A vector of predicted probabilities. } \author{Rune Haubo B Christensen} \seealso{ \code{\link[ordinal]{clm2}}, \code{\link[ordinal]{clmm2}}. } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) ## More manageable data set for less voluminous printing: (tab26 <- with(soup, table("Product" = PROD, "Response" = SURENESS))) dimnames(tab26)[[2]] <- c("Sure", "Not Sure", "Guess", "Guess", "Not Sure", "Sure") dat26 <- expand.grid(sureness = as.factor(1:6), prod = c("Ref", "Test")) dat26$wghts <- c(t(tab26)) dat26 m1 <- clm2(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logistic") predict(m1) mN1 <- clm2(sureness ~ 1, nominal = ~prod, data = dat26, weights = wghts) predict(mN1) predict(update(m1, scale = ~.-prod)) ################################# ## Mimicing the behavior of predict.polr: if(require(MASS)) { ## Fit model from polr example: fm1 <- clm2(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) predict(fm1) set.seed(123) nlev <- 3 y <- gl(nlev, 5) x <- as.numeric(y) + rnorm(15) fm.clm <- clm2(y ~ x) fm.polr <- polr(y ~ x) ## The equivalent of predict.polr(object, type = "probs"): (pmat.polr <- predict(fm.polr, type = "probs")) ndat <- expand.grid(y = gl(nlev,1), x = x) (pmat.clm <- matrix(predict(fm.clm, newdata = ndat), ncol=nlev, byrow = TRUE)) all.equal(c(pmat.clm), c(pmat.polr), tol = 1e-5) # TRUE ## The equivalent of predict.polr(object, type = "class"): (class.polr <- predict(fm.polr)) (class.clm <- factor(apply(pmat.clm, 1, which.max))) all.equal(class.clm, class.polr) ## TRUE } } \keyword{internal} ordinal/man/addtermOld.Rd0000644000176200001440000000653413633002525015032 0ustar liggesusers\name{addterm.clm2} \alias{addterm.clm2} \alias{dropterm.clm2} \title{ Try all one-term additions to and deletions from a model } \description{ Try fitting all models that differ from the current model by adding or deleting a single term from those supplied while maintaining marginality. } \usage{ \method{addterm}{clm2}(object, scope, scale = 0, test = c("none", "Chisq"), k = 2, sorted = FALSE, trace = FALSE, which = c("location", "scale"), \dots) \method{dropterm}{clm2}(object, scope, scale = 0, test = c("none", "Chisq"), k = 2, sorted = FALSE, trace = FALSE, which = c("location", "scale"), \dots) } \arguments{ \item{object}{ A \code{\link{clm2}} object. } \item{scope}{ for \code{addterm}: a formula specifying a maximal model which should include the current one. All additional terms in the maximal model with all marginal terms in the original model are tried. For \code{dropterm}: a formula giving terms which might be dropped. By default, the model formula. Only terms that can be dropped and maintain marginality are actually tried. } \item{scale}{ used in the definition of the AIC statistic for selecting the models. Specifying \code{scale} asserts that the dispersion is known. } \item{test}{ should the results include a test statistic relative to the original model? The Chisq test is a likelihood-ratio test. } \item{k}{ the multiple of the number of degrees of freedom used for the penalty. Only \code{k=2} gives the genuine AIC: \code{k = log(n)} is sometimes referred to as BIC or SBC. } \item{sorted}{ should the results be sorted on the value of AIC? } \item{trace}{ if \code{TRUE} additional information may be given on the fits as they are tried. } \item{which}{should additions or deletions occur in location or scale models? } \item{\dots}{ arguments passed to or from other methods. }} \value{ A table of class \code{"anova"} containing columns for the change in degrees of freedom, AIC and the likelihood ratio statistic. If \code{test = "Chisq"} a column also contains the p-value from the Chisq test. } \details{ The definition of AIC is only up to an additive constant because the likelihood function is only defined up to an additive constant. } \author{Rune Haubo B Christensen} \seealso{ \code{\link[ordinal]{clm2}}, \code{\link[=anova.clm2]{anova}}, \code{\link{addterm.default}} and \code{\link{dropterm.default}} } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) if(require(MASS)) { ## dropterm, addterm, housing mB1 <- clm2(SURENESS ~ PROD + GENDER + SOUPTYPE, scale = ~ COLD, data = soup, link = "probit", Hess = FALSE) dropterm(mB1, test = "Chi") # or dropterm(mB1, which = "location", test = "Chi") dropterm(mB1, which = "scale", test = "Chi") addterm(mB1, scope = ~.^2, test = "Chi", which = "location") addterm(mB1, scope = ~ . + GENDER + SOUPTYPE, test = "Chi", which = "scale") addterm(mB1, scope = ~ . + AGEGROUP + SOUPFREQ, test = "Chi", which = "location") ## Fit model from polr example: fm1 <- clm2(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) addterm(fm1, ~ Infl + Type + Cont, test= "Chisq", which = "scale") dropterm(fm1, test = "Chisq") } } \keyword{internal} ordinal/man/ranef.Rd0000644000176200001440000000533214334205504014042 0ustar liggesusers\name{condVar} \alias{ranef} \alias{condVar} \alias{ranef.clmm} \alias{condVar.clmm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Extract conditional modes and conditional variances from clmm objects } \description{ The ranef function extracts the conditional modes of the random effects from a clmm object. That is, the modes of the distributions for the random effects given the observed data and estimated model parameters. In a Bayesian language they are posterior modes. The conditional variances are computed from the second order derivatives of the conditional distribution of the random effects. Note that these variances are computed at a fixed value of the model parameters and thus do not take the uncertainty of the latter into account. } \usage{ condVar(object, ...) \method{ranef}{clmm}(object, condVar=FALSE, ...) \method{condVar}{clmm}(object, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{a \code{\link{clmm}} object. } \item{condVar}{ an optional logical argument indicating of conditional variances should be added as attributes to the conditional modes. } \item{\dots}{ currently not used by the \code{clmm} methods. } } \details{ The \code{ranef} method returns a list of \code{data.frame}s; one for each distinct grouping factor. Each \code{data.frame} has as many rows as there are levels for that grouping factor and as many columns as there are random effects for each level. For example a model can contain a random intercept (one column) or a random intercept and a random slope (two columns) for the same grouping factor. If conditional variances are requested, they are returned in the same structure as the conditional modes (random effect estimates/predictions). } \value{ The \code{ranef} method returns a list of \code{data.frame}s with the random effects predictions/estimates computed as conditional modes. If \code{condVar = TRUE} a \code{data.frame} with the conditional variances is stored as an attribute on each \code{data.frame} with conditional modes. The \code{condVar} method returns a list of \code{data.frame}s with the conditional variances. It is a convenience function that simply computes the conditional modes and variances, then extracts and returns only the latter. } \author{ Rune Haubo B Christensen } \examples{ fm1 <- clmm(rating ~ contact + temp + (1|judge), data=wine) ## Extract random effect estimates/conditional modes: re <- ranef(fm1, condVar=TRUE) ## Get conditional variances: attr(re$judge, "condVar") ## Alternatively: condVar(fm1) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/ordinal-package.Rd0000644000176200001440000001102613633002525015764 0ustar liggesusers\name{ordinal-package} \alias{ordinal-package} \alias{ordinal} \docType{package} \title{ Regression Models for Ordinal Data via Cumulative Link (Mixed) Models } \description{ This package facilitates analysis of ordinal (ordered categorical data) via cumulative link models (CLMs) and cumulative link mixed models (CLMMs). Robust and efficient computational methods gives speedy and accurate estimation. A wide range of methods for model fits aids the data analysis. } \details{ \tabular{ll}{ Package: \tab ordinal\cr Type: \tab Package\cr License: \tab GPL (>= 2)\cr LazyLoad: \tab yes\cr } This package implements cumualtive link models and cumulative link models with normally distributed random effects, denoted cumulative link mixed (effects) models. Cumulative link models are also known as ordered regression models, proportional odds models, proportional hazards models for grouped survival times and ordered logit/probit/... models. Cumulative link models are fitted with \code{\link{clm}} and the main features are: \itemize{ \item{A range of standard link functions are available.} \item{In addition to the standard location (additive) effects, scale (multiplicative) effects are also allowed.} \item{nominal effects are allowed for any subset of the predictors --- these effects are also known as partial proportional odds effects when using the logit link.} \item{Restrictions can be imposed on the thresholds/cut-points, e.g., symmetry or equidistance.} \item{A (modified) Newton-Raphson algorithm provides the maximum likelihood estimates of the parameters. The estimation scheme is robust, fast and accurate.} \item{Rank-deficient designs are identified and unidentified coefficients exposed in \code{print} and \code{summary} methods as with \code{\link{glm}}.} \item{A suite of standard methods are available including \code{anova}, \code{add}/\code{drop}-methods, \code{step}, \code{profile}, \code{confint}.} \item{A \code{slice} method facilitates illustration of the likelihood function and a \code{convergence} method summarizes the accuracy of the model estimation.} \item{The \code{predict} method can predict probabilities, response class-predictions and cumulative probabilities, and it provides standard errors and confidence intervals for the predictions.} } Cumulative link mixed models are fitted with \code{\link{clmm}} and the main features are: \itemize{ \item{Any number of random effect terms can be included.} \item{The syntax for the model formula resembles that of \code{\link[lme4]{lmer}} from the \code{lme4} package.} \item{Nested random effects, crossed random effects and partially nested/crossed random effects are allowed.} \item{Estimation is via maximum likelihood using the Laplace approximation or adaptive Gauss-Hermite quadrature (one random effect).} \item{Vector-valued and correlated random effects such as random slopes (random coefficient models) are fitted with the Laplace approximation.} \item{Estimation employs sparse matrix methods from the \code{\link{Matrix}} package. } \item{During model fitting a Newton-Raphson algorithm updates the conditional modes of the random effects a large number of times. The likelihood function is optimized with a general purpose optimizer.} } A major update of the package in August 2011 introduced new and improved implementations of \code{\link{clm}} and \code{\link{clmm}}. The old implementations are available with \code{\link{clm2}} and \code{\link{clmm2}}. At the time of writing there is functionality in \code{clm2} and \code{clmm2} not yet available in \code{clm} and \code{clmm}. This includes flexible link functions (log-gamma and Aranda-Ordaz links) and a profile method for random effect variance parameters in CLMMs. The new implementations are expected to take over the old implementations at some point, hence the latter will eventually be \code{\link[=.Deprecated]{deprecated}} and \code{\link[=.Defunct]{defunct}}. } \author{ Rune Haubo B Christensen Maintainer: Rune Haubo B Christensen } %% \references{ %% ~~ Literature or other references for background information ~~ %% } \keyword{ package } %% \seealso{ %% ~~ Optional links to other man pages, e.g. ~~ %% %% ~~ \code{\link[:-package]{}} ~~ %% } \examples{ ## A simple cumulative link model: fm1 <- clm(rating ~ contact + temp, data=wine) summary(fm1) ## A simple cumulative link mixed model: fmm1 <- clmm(rating ~ contact + temp + (1|judge), data=wine) summary(fmm1) } ordinal/man/slice.clm.Rd0000644000176200001440000000701412176227250014623 0ustar liggesusers\name{slice} \alias{slice} \alias{slice.clm} \alias{plot.slice.clm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Slice the likelihood of a clm } \description{ Slice likelihood and plot the slice. This is usefull for illustrating the likelihood surface around the MLE (maximum likelihood estimate) and provides graphics to substantiate (non-)convergence of a model fit. Also, the closeness of a quadratic approximation to the log-likelihood function can be inspected for relevant parameters. A slice is considerably less computationally demanding than a profile. } \usage{ slice(object, ...) \method{slice}{clm}(object, parm = seq_along(par), lambda = 3, grid = 100, quad.approx = TRUE, ...) \method{plot}{slice.clm}(x, parm = seq_along(x), type = c("quadratic", "linear"), plot.mle = TRUE, ask = prod(par("mfcol")) < length(parm) && dev.interactive(), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{object}{for the \code{clm} method an object of class \code{"clm"}, i.e., the result of a call to \code{clm}. } \item{x}{ a \code{slice.clm} object, i.e., the result of \code{slice(clm.object)}. } \item{parm}{ for \code{slice.clm} a numeric or character vector indexing parameters, for \code{plot.slice.clm} only a numeric vector is accepted. By default all parameters are selected. } \item{lambda}{ the number of curvature units on each side of the MLE the slice should cover. } \item{grid}{ the number of values at which to compute the log-likelihood for each parameter. } \item{quad.approx}{ compute and include the quadratic approximation to the log-likelihood function? } \item{type}{ \code{"quadratic"} plots the log-likelihood function which is approximately quadratic, and \code{"linear"} plots the signed square root of the log-likelihood function which is approximately linear. } \item{plot.mle}{ include a vertical line at the MLE (maximum likelihood estimate) when \code{type = "quadratic"}? Ignored for \code{type = "linear"}. } \item{ask}{ logical; if \code{TRUE}, the user is asked before each plot, see \code{\link{par}}\code{(ask=.)}. } \item{\dots}{ further arguments to \code{plot.default} for the plot method. Not used in the slice method. } } %% \details{ bla %% %% ~~ If necessary, more details than the description above ~~ %% } \value{ The \code{slice} method returns a list of \code{data.frame}s with one \code{data.frame} for each parameter slice. Each \code{data.frame} contains in the first column the values of the parameter and in the second column the values of the (positive) log-likelihood \code{"logLik"}. A third column is present if \code{quad.approx = TRUE} and contains the corresponding quadratic approximation to the log-likelihood. The original model fit is included as the attribute \code{"original.fit"}. The \code{plot} method produces a plot of the likelihood slice for each parameter. } \author{ Rune Haubo B Christensen } \examples{ ## fit model: fm1 <- clm(rating ~ contact + temp, data = wine) ## slice the likelihood: sl1 <- slice(fm1) ## three different ways to plot the slices: par(mfrow = c(2,3)) plot(sl1) plot(sl1, type = "quadratic", plot.mle = FALSE) plot(sl1, type = "linear") ## Verify convergence to the optimum: sl2 <- slice(fm1, lambda = 1e-5, quad.approx = FALSE) plot(sl2) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/clm.fit.Rd0000644000176200001440000000756713633002525014316 0ustar liggesusers\name{clm.fit} \alias{clm.fit} \alias{clm.fit.default} \alias{clm.fit.factor} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Fit Cumulative Link Models %% ~~function to do ... ~~ } \description{ A direct fitter of cumulative link models. } \usage{ clm.fit(y, ...) \method{clm.fit}{default}(y, ...) \method{clm.fit}{factor}(y, X, S, N, weights = rep(1, nrow(X)), offset = rep(0, nrow(X)), S.offset = rep(0, nrow(X)), control = list(), start, doFit=TRUE, link = c("logit", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{y}{for the default method a list of model components. For the factor method the response variable; a factor, preferably and ordered factor. } \item{X, S, N}{optional design matrices for the regression parameters, scale parameters and nominal parameters respectively. } \item{weights}{optional case weights. } \item{offset}{an optional offset. } \item{S.offset}{an optional offset for the scale part of the model. } \item{control}{a list of control parameters, optionally a call to \code{\link{clm.control}}. } \item{start}{an optional list of starting values of the form \code{c(alpha, beta, zeta)} for the thresholds and nominal effects (\code{alpha}), regression parameters (\code{beta}) and scale parameters (\code{zeta}). } \item{doFit}{logical for whether the model should be fit or the model environment should be returned. } \item{link}{the link function. } \item{threshold}{the threshold structure, see further at \code{\link{clm}}. } \item{\dots}{currently not used.} } \details{ This function does almost the same thing that \code{\link{clm}} does: it fits a cumulative link model. The main differences are that \code{clm.fit} does not setup design matrices from formulae and only does minimal post processing after parameter estimation. Compared to \code{\link{clm}}, \code{clm.fit} does little to warn the user of any problems with data or model. However, \code{clm.fit} will attempt to identify column rank defecient designs. Any unidentified parameters are indicated in the \code{aliased} component of the fit. \code{clm.fit.factor} is not able to check if all thresholds are increasing when nominal effects are specified since it needs access to the terms object for the nominal model. If the terms object for the nominal model (\code{nom.terms}) is included in \code{y}, the default method is able to chech if all thresholds are increasing. %% In contrast to \code{\link{clm}}, \code{clm.fit} allows non-positive %% weights. } \value{ A list with the following components: \code{aliased, alpha, coefficients, cond.H, convergence, df.residual, edf, fitted.values, gradient, Hessian, logLik, maxGradient, message, n, niter, nobs, tJac, vcov} and optionally \code{beta, zeta} These components are documented in \code{\link{clm}}. } %% \references{ bla %% %% ~put references to the literature/web site here ~ %% } \author{ Rune Haubo B Christensen } %% \note{ bla %% %% ~~further notes~~ %% } %% %% %% ~Make other sections like Warning with \section{Warning }{....} ~ %% \seealso{ \code{\link{clm}} } \examples{ ## A simple example: fm1 <- clm(rating ~ contact + temp, data=wine) summary(fm1) ## get the model frame containing y and X: mf1 <- update(fm1, method="design") names(mf1) res <- clm.fit(mf1$y, mf1$X) ## invoking the factor method stopifnot(all.equal(coef(res), coef(fm1))) names(res) ## Fitting with the default method: mf1$control$method <- "Newton" res2 <- clm.fit(mf1) stopifnot(all.equal(coef(res2), coef(fm1))) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/gumbel.Rd0000644000176200001440000000614013633002525014217 0ustar liggesusers\name{gumbel} \alias{dgumbel} \alias{pgumbel} \alias{qgumbel} \alias{rgumbel} \alias{ggumbel} \title{ The Gumbel Distribution %% ~~function to do ... ~~ } \description{ Density, distribution function, quantile function, random generation, and gradient of density of the extreme value (maximum and minimum) distributions. The Gumbel distribution is also known as the extreme value maximum distribution, the double-exponential distribution and the log-Weibull distribution. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ dgumbel(x, location = 0, scale = 1, log = FALSE, max = TRUE) pgumbel(q, location = 0, scale = 1, lower.tail = TRUE, max = TRUE) qgumbel(p, location = 0, scale = 1, lower.tail = TRUE, max = TRUE) rgumbel(n, location = 0, scale = 1, max = TRUE) ggumbel(x, max = TRUE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x,q}{ numeric vector of quantiles. } \item{p}{ vector of probabilities. } \item{n}{ number of observations. } \item{location}{ numeric scalar. } \item{scale}{ numeric scalar. } \item{lower.tail}{ logical; if \code{TRUE} (default), probabilities are \eqn{P[X \leq x]}{P[X <= x]} otherwise, \eqn{P[X > x]}. } \item{log}{ logical; if \code{TRUE}, probabilities p are given as log(p). } \item{max}{ distribution for extreme maxima (default) or minima? The default corresponds to the standard right-skew Gumbel distribution. } } \details{ \code{dgumbel}, \code{pgumbel} and \code{ggumbel} are implemented in C for speed and care is taken that 'correct' results are provided for values of \code{NA}, \code{NaN}, \code{Inf}, \code{-Inf} or just extremely small or large. The distribution functions, densities and gradients are used in the Newton-Raphson algorithms in fitting cumulative link models with \code{\link{clm}} and cumulative link mixed models with \code{\link{clmm}}. } \value{ \code{pgumbel} gives the distribution function, \code{dgumbel} gives the density, \code{ggumbel} gives the gradient of the density, \code{qgumbel} is the quantile function, and \code{rgumbel} generates random deviates. } \references{ \url{https://en.wikipedia.org/wiki/Gumbel_distribution} } \seealso{ Gradients of densities are also implemented for the normal, logistic, cauchy, cf. \code{\link[=gnorm]{gfun}} and the log-gamma distribution, cf. \code{\link{lgamma}}. } \author{ Rune Haubo B Christensen } \examples{ ## Illustrating the symmetry of the distribution functions: pgumbel(5) == 1 - pgumbel(-5, max=FALSE) ## TRUE dgumbel(5) == dgumbel(-5, max=FALSE) ## TRUE ggumbel(5) == -ggumbel(-5, max=FALSE) ## TRUE ## More examples: x <- -5:5 (pp <- pgumbel(x)) qgumbel(pp) dgumbel(x) ggumbel(x) (ppp <- pgumbel(x, max=FALSE)) ## Observe that probabilities close to 0 are more accurately determined than ## probabilities close to 1: qgumbel(ppp, max=FALSE) dgumbel(x, max=FALSE) ggumbel(x, max=FALSE) ## random deviates: set.seed(1) (r1 <- rgumbel(10)) set.seed(1) r2 <- -rgumbel(10, max = FALSE) all(r1 == r2) ## TRUE } \keyword{distribution} ordinal/man/soup.Rd0000755000176200001440000000470011616740137013744 0ustar liggesusers\name{soup} \alias{soup} \title{ Discrimination study of packet soup } \description{ The \code{soup} data frame has 1847 rows and 13 variables. 185 respondents participated in an A-not A discrimination test with sureness. Before experimentation the respondents were familiarized with the reference product and during experimentation, the respondents were asked to rate samples on an ordered scale with six categories given by combinations of (reference, not reference) and (sure, not sure, guess) from 'referene, sure' = 1 to 'not reference, sure' = 6. %given by the levels of the \code{SURENESS} variable. } \usage{ soup } \format{ \describe{ \item{\code{RESP}}{ factor with 185 levels: the respondents in the study. } \item{\code{PROD}}{ factor with 2 levels: index reference and test products. } \item{\code{PRODID}}{ factor with 6 levels: index reference and the five test product variants. } \item{\code{SURENESS}}{ ordered factor with 6 levels: the respondents ratings of soup samples. } \item{\code{DAY}}{ factor with two levels: experimentation was split over two days. } \item{\code{SOUPTYPE}}{ factor with three levels: the type of soup regularly consumed by the respondent. } \item{\code{SOUPFREQ}}{ factor with 3 levels: the frequency with which the respondent consumes soup. } \item{\code{COLD}}{ factor with two levels: does the respondent have a cold? } \item{\code{EASY}}{ factor with ten levels: How easy did the respondent find the discrimation test? 1 = difficult, 10 = easy. } \item{\code{GENDER}}{ factor with two levels: gender of the respondent. } \item{\code{AGEGROUP}}{ factor with four levels: the age of the respondent. } \item{\code{LOCATION}}{ factor with three levels: three different locations where experimentation took place. } %% \item{\code{SEQ}}{ %% integer vector: the sequence at which experimentation took %% place. Numbering restarted at the second day of experimentation. %% } }} \source{ Data are produced by Unilever Research. Permission to publish the data is granted. } \references{ Christensen, R. H. B., Cleaver, G. and Brockhoff, P. B.(2011) Statistical and Thurstonian models for the A-not A protocol with and without sureness. \emph{Food Quality and Preference, 22}, pp. 542-549. } \keyword{datasets} ordinal/man/income.Rd0000644000176200001440000000315712176227250014230 0ustar liggesusers\name{income} \alias{income} \title{ Income distribution (percentages) in the Northeast US } \description{ Income distribution (percentages) in the Northeast US in 1960 and 1970 adopted from McCullagh (1980). } \usage{ income } \format{ \describe{ \item{\code{year}}{ year. } \item{\code{pct}}{ percentage of population in income class per year. } \item{\code{income}}{ income groups. The unit is thousands of constant (1973) US dollars. } } } \source{ Data are adopted from McCullagh (1980). } \references{ McCullagh, P. (1980) Regression Models for Ordinal Data. \emph{Journal of the Royal Statistical Society. Series B (Methodological)}, Vol. 42, No. 2., pp. 109-142. } \examples{ print(income) ## Convenient table: (tab <- xtabs(pct ~ year + income, income)) ## small rounding error in 1970: rowSums(tab) ## compare link functions via the log-likelihood: links <- c("logit", "probit", "cloglog", "loglog", "cauchit") sapply(links, function(link) { clm(income ~ year, data=income, weights=pct, link=link)$logLik }) ## a heavy tailed (cauchy) or left skew (cloglog) latent distribution ## is fitting best. ## The data are defined as: income.levels <- c(0, 3, 5, 7, 10, 12, 15) income <- paste(income.levels, c(rep("-", 6), "+"), c(income.levels[-1], ""), sep = "") income <- data.frame(year=factor(rep(c("1960", "1970"), each = 7)), pct = c(6.5, 8.2, 11.3, 23.5, 15.6, 12.7, 22.2, 4.3, 6, 7.7, 13.2, 10.5, 16.3, 42.1), income=factor(rep(income, 2), ordered=TRUE, levels=income)) } \keyword{datasets} ordinal/man/lgamma.Rd0000755000176200001440000000673113654501703014220 0ustar liggesusers\name{lgamma} \alias{plgamma} \alias{dlgamma} \alias{glgamma} %- Also NEED an '\alias' for EACH other topic documented here. \title{ The log-gamma distribution %% ~~function to do ... ~~ } \description{ Density, distribution function and gradient of density for the log-gamma distribution. These are implemented in C for speed and care is taken that the correct results are provided for values of \code{NA}, \code{NaN}, \code{Inf}, \code{-Inf} or just extremely small or large values. The log-gamma is a flexible location-scale distribution on the real line with an extra parameter, \eqn{\lambda}. For \eqn{\lambda = 0} the distribution equals the normal or Gaussian distribution, and for \eqn{\lambda} equal to 1 and -1, the Gumbel minimum and maximum distributions are obtained. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ plgamma(q, lambda, lower.tail = TRUE) dlgamma(x, lambda, log = FALSE) glgamma(x, lambda) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x,q}{ numeric vector of quantiles. } \item{lambda}{ numerical scalar } %% \item{location}{ %% numeric scalar. %% } %% \item{scale}{ %% numeric scalar. %% } \item{lower.tail}{ logical; if \code{TRUE} (default), probabilities are \eqn{P[X \leq x]}{P[X <= x]} otherwise, \eqn{P[X > x]}. } \item{log}{ logical; if \code{TRUE}, probabilities p are given as log(p). } } \details{ If \eqn{\lambda < 0} the distribution is right skew, if \eqn{\lambda = 0} the distribution is symmetric (and equals the normal distribution), and if \eqn{\lambda > 0} the distribution is left skew. % % The log-gamma distribution function is defined as \ldots pending. % % The density and gradient of the density are defined as\ldots pending. These distribution functions, densities and gradients are used in the Newton-Raphson algorithms in fitting cumulative link models with \code{\link{clm2}} and cumulative link mixed models with \code{\link{clmm2}} using the log-gamma link. } \value{ \code{plgamma} gives the distribution function, \code{dlgamma} gives the density and \code{glgamma} gives the gradient of the density. } \references{ Genter, F. C. and Farewell, V. T. (1985) Goodness-of-link testing in ordinal regression models. \emph{The Canadian Journal of Statistics}, 13(1), 37-44. } \seealso{ Gradients of densities are also implemented for the normal, logistic, cauchy, cf. \code{\link[=gnorm]{gfun}} and the Gumbel distribution, cf. \code{\link[=dgumbel]{gumbel}}. } \author{ Rune Haubo B Christensen } \examples{ ## Illustrating the link to other distribution functions: x <- -5:5 plgamma(x, lambda = 0) == pnorm(x) all.equal(plgamma(x, lambda = -1), pgumbel(x)) ## TRUE, but: plgamma(x, lambda = -1) == pgumbel(x) plgamma(x, lambda = 1) == pgumbel(x, max = FALSE) dlgamma(x, lambda = 0) == dnorm(x) dlgamma(x, lambda = -1) == dgumbel(x) dlgamma(x, lambda = 1) == dgumbel(x, max = FALSE) glgamma(x, lambda = 0) == gnorm(x) all.equal(glgamma(x, lambda = -1), ggumbel(x)) ## TRUE, but: glgamma(x, lambda = -1) == ggumbel(x) all.equal(glgamma(x, lambda = 1), ggumbel(x, max = FALSE)) ## TRUE, but: glgamma(x, lambda = 1) == ggumbel(x, max = FALSE) ## There is a loss of accuracy, but the difference is very small: glgamma(x, lambda = 1) - ggumbel(x, max = FALSE) ## More examples: x <- -5:5 plgamma(x, lambda = .5) dlgamma(x, lambda = .5) glgamma(x, lambda = .5) } \keyword{distribution} ordinal/man/VarCorr.Rd0000644000176200001440000000315714334205467014340 0ustar liggesusers\name{VarCorr} \alias{VarCorr} \alias{VarCorr.clmm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Extract variance and correlation parameters } \description{ The VarCorr function extracts the variance and (if present) correlation parameters for random effect terms in a cumulative link mixed model (CLMM) fitted with \code{clmm}. } \usage{ \method{VarCorr}{clmm}(x, ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{a \code{\link{clmm}} object. } \item{\dots}{ currently not used by the \code{clmm} method. } } \details{ The \code{VarCorr} method returns a list of \code{data.frame}s; one for each distinct grouping factor. Each \code{data.frame} has as many rows as there are levels for that grouping factor and as many columns as there are random effects for each level. For example a model can contain a random intercept (one column) or a random intercept and a random slope (two columns) for the same grouping factor. If conditional variances are requested, they are returned in the same structure as the conditional modes (random effect estimates/predictions). } \value{ A list of matrices with variances in the diagonal and correlation parameters in the off-diagonal --- one matrix for each random effects term in the model. Standard deviations are provided as attributes to the matrices. } \author{ Rune Haubo B Christensen } \examples{ fm1 <- clmm(rating ~ contact + temp + (1|judge), data=wine) VarCorr(fm1) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/clmm.controlOld.Rd0000755000176200001440000000322311617247306016024 0ustar liggesusers\name{clmm2.control} \alias{clmm2.control} \title{Set control parameters for cumulative link mixed models} \description{ Set control parameters for cumulative link mixed models } \usage{ clmm2.control(method = c("ucminf", "nlminb", "model.frame"), ..., trace = 0, maxIter = 50, gradTol = 1e-4, maxLineIter = 50, innerCtrl = c("warnOnly", "noWarn", "giveError")) } \arguments{ \item{method}{ the optimizer used to maximize the marginal likelihood function. } \item{\dots}{control arguments passed on to the chosen optimizer; see \code{\link[ucminf]{ucminf}}, \code{\link{optim}}, and \code{\link{nlminb}} for details. } \item{trace}{numerical, if > 0 information is printed about and during the outer optimization process, if < 0 information is also printed about the inner optimization process. Defaults to \code{0}. } \item{maxIter}{the maximum number of Newton updates of the inner optimization. \code{50}. } \item{gradTol}{the maximum absolute gradient of the inner optimization. } \item{maxLineIter}{the maximum number of step halfings allowed if a Newton(-Raphson) step over shoots during the inner optimization. } \item{innerCtrl}{the use of warnings/errors if the inner optimization fails to converge. } } \details{ When the default optimizer, \code{ucminf} is used, the default values of that optimizers control options are changed to \code{grtol = 1e-5} and \code{grad = "central"}. } \value{ a list of control parameters. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{clmm2}} } \keyword{models} ordinal/man/gfun.Rd0000755000176200001440000000375411617032222013713 0ustar liggesusers\name{gfun} \alias{gnorm} \alias{glogis} \alias{gcauchy} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Gradients of common densities %% ~~function to do ... ~~ } \description{ Gradients of common density functions in their standard forms, i.e., with zero location (mean) and unit scale. These are implemented in C for speed and care is taken that the correct results are provided for the argument being \code{NA}, \code{NaN}, \code{Inf}, \code{-Inf} or just extremely small or large. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ gnorm(x) glogis(x) gcauchy(x) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{x}{ numeric vector of quantiles. } } \details{ The gradients are given by: \itemize{ \item{gnorm: If \eqn{f(x)} is the normal density with mean 0 and spread 1, then the gradient is \deqn{f'(x) = -x f(x)} } \item{glogis: If \eqn{f(x)} is the logistic density with mean 0 and scale 1, then the gradient is \deqn{f'(x) = 2 \exp(-x)^2 (1 + \exp(-x))^{-3} - \exp(-x)(1+\exp(-x))^{-2}} } \item{pcauchy: If \eqn{f(x) = [\pi(1 + x^2)^2]^{-1}}{f(x) =1 / [pi (1 + x^2)^2]} is the cauchy density with mean 0 and scale 1, then the gradient is \deqn{f'(x) = -2x [\pi(1 + x^2)^2]^{-1}}{f'(x) = -2x / [pi (1 + x^2)^2]} } } These gradients are used in the Newton-Raphson algorithms in fitting cumulative link models with \code{\link{clm}} and cumulative link mixed models with \code{\link{clmm}}. } \value{ a numeric vector of gradients. } \seealso{ Gradients of densities are also implemented for the extreme value distribtion (\code{\link[=dgumbel]{gumbel}}) and the the log-gamma distribution (\code{\link[=lgamma]{log-gamma}}). } \author{ Rune Haubo B Christensen } \examples{ x <- -5:5 gnorm(x) glogis(x) gcauchy(x) } \keyword{distribution} ordinal/man/clmOld.Rd0000644000176200001440000003167412176227250014175 0ustar liggesusers\name{clm2} \alias{clm2} \title{Cumulative link models} \description{ A new improved implementation of CLMs is available in \code{\link{clm}}. Fits cumulative link models with an additive model for the location and a multiplicative model for the scale. The function allows for structured thresholds. A popular special case of a CLM is the proportional odds model. In addition to the standard link functions, two flexible link functions, "Arandar-Ordaz" and "log-gamma" are available, where an extra link function parameter provides additional flexibility. A subset of the predictors can be allowed to have nominal rather than ordinal effects. This has been termed "partial proportional odds" when the link is the logistic. } \usage{ clm2(location, scale, nominal, data, weights, start, subset, na.action, contrasts, Hess = TRUE, model, link = c("logistic", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), lambda, doFit = TRUE, control, threshold = c("flexible", "symmetric", "equidistant"), ...) } \arguments{ \item{location}{ a formula expression as for regression models, of the form \code{response ~ predictors}. The response should be a factor (preferably an ordered factor), which will be interpreted as an ordinal response with levels ordered as in the factor. The model must have an intercept: attempts to remove one will lead to a warning and will be ignored. An offset may be used. See the documentation of \code{\link{formula}} for other details. } \item{scale}{ a optional formula expression as for the location part, of the form \code{ ~ predictors}, i.e. with an empty left hand side. An offset may be used. See the documentation of \code{\link{formula}} for other details. } \item{nominal}{ an optional formula of the form \code{ ~ predictors}, i.e. with an empty left hand side. The effects of the predictors in this formula are assumed to nominal. } \item{data}{ an optional data frame in which to interpret the variables occurring in the formulas. } \item{weights}{ optional case weights in fitting. Defaults to 1. } \item{start}{ initial values for the parameters in the format \code{c(alpha, beta, log(zeta), lambda)}. } \item{subset}{ expression saying which subset of the rows of the data should be used in the fit. All observations are included by default. } \item{na.action}{ a function to filter missing data. Applies to terms in all three formulae. } \item{contrasts}{ a list of contrasts to be used for some or all of the factors appearing as variables in the model formula. } \item{Hess}{ logical for whether the Hessian (the inverse of the observed information matrix) should be computed. Use \code{Hess = TRUE} if you intend to call \code{summary} or \code{vcov} on the fit and \code{Hess = FALSE} in all other instances to save computing time. The argument is ignored if \code{method = "Newton"} where the Hessian is always computed and returned. Defaults to \code{TRUE}. } \item{model}{ logical for whether the model frames should be part of the returned object. } \item{link}{link function, i.e. the type of location-scale distribution assumed for the latent distribution. The \code{Aranda-Ordaz} and \code{log-gamma} links add additional flexibility with a link function parameter, \code{lambda}. The \code{Aranda-Ordaz} link (Aranda-Ordaz, 1983) equals the logistic link, when \code{lambda = 1} and approaches the \code{loglog} link when \code{lambda} approaches zero. The \code{log-gamma} link (Genter and Farewell, 1985) equals the \code{loglog} link when \code{lambda = 1}, the \code{probit} link when \code{lambda = 0} and the \code{cloglog} link when \code{lambda = -1}. } \item{lambda}{numerical scalar: the link function parameter. Used in combination with link \code{Aranda-Ordaz} or \code{log-gamma} and otherwise ignored. If lambda is specified, the model is estimated with lambda fixed at this value and otherwise lambda is estimated by ML. For \code{Aranda-Ordaz} lambda has to be positive; \code{> 1e-5} for numerical reasons. } \item{doFit}{logical for whether the model should be fit or the model environment should be returned. } \item{control}{a call to \code{\link{clm2.control}}. } \item{threshold}{specifies a potential structure for the thresholds (cut-points). \code{"flexible"} provides the standard unstructured thresholds, \code{"symmetric"} restricts the distance between the thresholds to be symmetric around the central one or two thresholds for odd or equal numbers or thresholds respectively, and \code{"equidistant"} restricts the distance between consecutive thresholds to the same value. } \item{\dots}{ additional arguments are passed on to \code{\link{clm2.control}} and possibly further on to the optimizer, which can lead to surprising error or warning messages when mistyping arguments etc. } } \details{ There are methods for the standard model-fitting functions, including \code{\link{summary}}, \code{\link{vcov}}, \code{\link[ordinal]{predict}}, \code{\link[=anova.clm2]{anova}}, \code{\link{logLik}}, \code{\link[=profile.clm2]{profile}}, \code{\link[=profile.clm2]{plot.profile}}, \code{\link[=confint.clm2]{confint}}, \code{\link[=update.clm2]{update}}, \code{\link[=addterm.clm2]{dropterm}}, \code{\link[=addterm.clm2]{addterm}}, and an \code{extractAIC} method. The design of the implementation is inspired by an idea proposed by Douglas Bates in the talk "Exploiting sparsity in model matrices" presented at the DSC conference in Copenhagen, July 14 2009. Basically an environment is set up with all the information needed to optimize the likelihood function. Extractor functions are then used to get the value of likelihood at current or given parameter values and to extract current values of the parameters. All computations are performed inside the environment and relevant variables are updated during the fitting process. After optimizer termination relevant variables are extracted from the environment and the remaining are discarded. Some aspects of \code{clm2}, for instance, how starting values are obtained, and of the associated methods are inspired by \code{\link[MASS]{polr}} from package \code{MASS}. } \value{ If \code{doFit = FALSE} the result is an environment representing the model ready to be optimized. If \code{doFit = TRUE} the result is an object of class \code{"clm2"} with the following components: \item{beta}{the parameter estimates of the location part. } \item{zeta}{the parameter estimates of the scale part on the log scale; the scale parameter estimates on the original scale are given by \code{exp(zeta)}. } \item{Alpha}{vector or matrix of the threshold parameters. } \item{Theta}{vector or matrix of the thresholds. } \item{xi}{vector of threshold parameters, which, given a threshold function (e.g. \code{"equidistant"}), and possible nominal effects define the class boundaries, \code{Theta}. } \item{lambda}{the value of lambda if lambda is supplied or estimated, otherwise missing. } \item{coefficients}{the coefficients of the intercepts (\code{theta}), the location (\code{beta}), the scale (\code{zeta}), and the link function parameter (\code{lambda}). } \item{df.residual}{the number of residual degrees of freedoms, calculated using the weights. } \item{fitted.values}{vector of fitted values for each observation. An observation here is each of the scalar elements of the multinomial table and not a multinomial vector. } \item{convergence}{\code{TRUE} if the gradient based convergence criterion is met and \code{FALSE} otherwise. } \item{gradient}{vector of gradients for all the parameters at termination of the optimizer. } \item{optRes}{list with results from the optimizer. The contents of the list depends on the choice of optimizer. } \item{logLik}{the log likelihood of the model at optimizer termination. } \item{Hessian}{if the model was fitted with \code{Hess = TRUE}, this is the Hessian matrix of the parameters at the optimum. } \item{scale}{\code{model.frame} for the scale model. } \item{location}{\code{model.frame} for the location model. } \item{nominal}{\code{model.frame} for the nominal model. } \item{edf}{the (effective) number of degrees of freedom used by the model. } \item{start}{the starting values. } \item{convTol}{convergence tolerance for the maximum absolute gradient of the parameters at termination of the optimizer. } \item{method}{character, the optimizer. } \item{y}{the response variable. } \item{lev}{the names of the levels of the response variable. } \item{nobs}{the (effective) number of observations, calculated as the sum of the weights. } \item{threshold}{character, the threshold function used in the model. } \item{estimLambda}{\code{1} if lambda is estimated in one of the flexible link functions and \code{0} otherwise. } \item{link}{character, the link function used in the model. } \item{call}{the matched call. } \item{contrasts}{contrasts applied to terms in location and scale models. } \item{na.action}{the function used to filter missing data. } } \author{Rune Haubo B Christensen} \references{ Agresti, A. (2002) \emph{Categorical Data Analysis.} Second edition. Wiley. Aranda-Ordaz, F. J. (1983) An Extension of the Proportional-Hazards Model for Grouped Data. \emph{Biometrics}, 39, 109-117. Genter, F. C. and Farewell, V. T. (1985) Goodness-of-link testing in ordinal regression models. \emph{The Canadian Journal of Statistics}, 13(1), 37-44. Christensen, R. H. B., Cleaver, G. and Brockhoff, P. B. (2011) Statistical and Thurstonian models for the A-not A protocol with and without sureness. \emph{Food Quality and Preference, 22}, pp. 542-549. } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) ## A tabular data set: (tab26 <- with(soup, table("Product" = PROD, "Response" = SURENESS))) dimnames(tab26)[[2]] <- c("Sure", "Not Sure", "Guess", "Guess", "Not Sure", "Sure") dat26 <- expand.grid(sureness = as.factor(1:6), prod = c("Ref", "Test")) dat26$wghts <- c(t(tab26)) m1 <- clm2(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logistic") ## print, summary, vcov, logLik, AIC: m1 summary(m1) vcov(m1) logLik(m1) AIC(m1) coef(m1) coef(summary(m1)) ## link functions: m2 <- update(m1, link = "probit") m3 <- update(m1, link = "cloglog") m4 <- update(m1, link = "loglog") m5 <- update(m1, link = "cauchit", start = coef(m1)) m6 <- update(m1, link = "Aranda-Ordaz", lambda = 1) m7 <- update(m1, link = "Aranda-Ordaz") m8 <- update(m1, link = "log-gamma", lambda = 1) m9 <- update(m1, link = "log-gamma") ## nominal effects: mN1 <- clm2(sureness ~ 1, nominal = ~ prod, data = dat26, weights = wghts, link = "logistic") anova(m1, mN1) ## optimizer / method: update(m1, scale = ~ 1, method = "Newton") update(m1, scale = ~ 1, method = "nlminb") update(m1, scale = ~ 1, method = "optim") \dontshow{ update(m1, scale = ~ 1, method = "model.frame") update(m1, location = ~.-prod, scale = ~ 1, nominal = ~ prod, method = "model.frame") } ## threshold functions mT1 <- update(m1, threshold = "symmetric") mT2 <- update(m1, threshold = "equidistant") anova(m1, mT1, mT2) ## Extend example from polr in package MASS: ## Fit model from polr example: if(require(MASS)) { fm1 <- clm2(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) fm1 summary(fm1) ## With probit link: summary(update(fm1, link = "probit")) ## Allow scale to depend on Cont-variable summary(fm2 <- update(fm1, scale =~ Cont)) anova(fm1, fm2) ## which seems to improve the fit } ################################# ## It is possible to fit multinomial models (i.e. with nominal ## effects) as the following example shows: if(require(nnet)) { (hous1.mu <- multinom(Sat ~ 1, weights = Freq, data = housing)) (hous1.clm <- clm2(Sat ~ 1, weights = Freq, data = housing)) ## It is the same likelihood: all.equal(logLik(hous1.mu), logLik(hous1.clm)) ## and the same fitted values: fitHous.mu <- t(fitted(hous1.mu))[t(col(fitted(hous1.mu)) == unclass(housing$Sat))] all.equal(fitted(hous1.clm), fitHous.mu) ## The coefficients of multinom can be retrieved from the clm2-object ## by: Pi <- diff(c(0, plogis(hous1.clm$xi), 1)) log(Pi[2:3]/Pi[1]) ## A larger model with explanatory variables: (hous.mu <- multinom(Sat ~ Infl + Type + Cont, weights = Freq, data = housing)) (hous.clm <- clm2(Sat ~ 1, nominal = ~ Infl + Type + Cont, weights = Freq, data = housing)) ## Almost the same likelihood: all.equal(logLik(hous.mu), logLik(hous.clm)) ## And almost the same fitted values: fitHous.mu <- t(fitted(hous.mu))[t(col(fitted(hous.mu)) == unclass(housing$Sat))] all.equal(fitted(hous.clm), fitHous.mu) all.equal(round(fitted(hous.clm), 5), round(fitHous.mu), 5) } } \keyword{models} ordinal/man/updateOld.Rd0000644000176200001440000000316513633002525014671 0ustar liggesusers\name{update.clm2} \alias{update.clm2} \alias{update.clmm2} \title{Update method for cumulative link models} \description{ Update method for cumulative link models fitted with \code{clm2}. This makes it possible to use e.g. \code{update(obj, location = ~ . - var1, scale = ~ . + var2)} } \usage{ \method{update}{clm2}(object, formula., location, scale, nominal,..., evaluate = TRUE) \method{update}{clmm2}(object, formula., location, scale, nominal,..., evaluate = TRUE) } \arguments{ \item{object}{a \code{\link{clm2}} object. } \item{formula.}{not used---unfortunately this argument is part of the default method. } \item{location}{an optional new formula for the location; see \code{\link{update.formula}} for details. } \item{scale}{an optional new formula for the scale; see \code{\link{update.formula}} for details. } \item{nominal}{an optional new formula for nominal effects; see \code{\link{update.formula}} for details. } \item{\dots}{additional arguments to the call, or arguments with changed values. } \item{evaluate}{if true evaluate the new call else return the call. } } \value{ If \code{evaluate = TRUE} the fitted object is returned, otherwise the updated call. } \author{Rune Haubo B Christensen} \examples{ options(contrasts = c("contr.treatment", "contr.poly")) m1 <- clm2(SURENESS ~ PROD, scale = ~PROD, data = soup, link = "logistic") m2 <- update(m1, link = "probit") m3 <- update(m1, link = "cloglog") m4 <- update(m1, link = "loglog") anova(m1, update(m1, scale = ~.-PROD)) mT1 <- update(m1, threshold = "symmetric") } \keyword{internal} ordinal/man/clm.anova.Rd0000644000176200001440000000366413633002525014632 0ustar liggesusers\name{anova.clm} %%\alias{anova} \alias{anova.clm} \title{ANODE Tables and Likelihood ratio test of cumulative link models} \description{ Type I, II, and III analysis of deviance (ANODE) tables for cumulative link models and comparison of cumulative link models with likelihood ratio tests. Models may differ by terms in location, scale and nominal formulae, in link, threshold function. } \usage{ \method{anova}{clm}(object, ..., type = c("I", "II", "III", "1", "2", "3")) } \arguments{ \item{object}{a \code{\link{clm}} object. } \item{\dots}{optionally one or more additional \code{\link{clm}} objects. } \item{type}{the type of hypothesis test if \code{anova} is called with a single model; ignored if more than one model is passed to the method. } } \details{ The ANODE table returned when \code{anova} is called with a single model apply only to terms in \code{formula}, that is, terms in \code{nominal} and \code{scale} are ignored. } \value{ An analysis of deviance table based on Wald chi-square test if called with a single model and a comparison of models with likelihood ratio tests if called with more than one model. } \author{Rune Haubo B Christensen} \seealso{ \code{\link[ordinal]{clm}} } \examples{ ## Analysis of deviance tables with Wald chi-square tests: fm <- clm(rating ~ temp * contact, scale=~contact, data=wine) anova(fm, type="I") anova(fm, type="II") anova(fm, type="III") options(contrasts = c("contr.treatment", "contr.poly")) m1 <- clm2(SURENESS ~ PROD, scale = ~PROD, data = soup, link = "logistic") ## anova anova(m1, update(m1, scale = ~.-PROD)) mN1 <- clm2(SURENESS ~ 1, nominal = ~PROD, data = soup, link = "logistic") anova(m1, mN1) anova(m1, update(m1, scale = ~.-PROD), mN1) ## Fit model from polr example: if(require(MASS)) { fm1 <- clm2(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) anova(fm1, update(fm1, scale =~ Cont)) } } \keyword{models} ordinal/man/clm.control.Rd0000644000176200001440000000527013633002525015201 0ustar liggesusers\name{clm.control} \alias{clm.control} %- Also NEED an '\alias' for EACH other topic documented here. \title{Set control parameters for cumulative link models} \description{ Set control parameters for cumulative link models } \usage{ clm.control(method = c("Newton", "model.frame", "design", "ucminf", "nlminb", "optim"), sign.location = c("negative", "positive"), sign.nominal = c("positive", "negative"), ..., trace = 0L, maxIter = 100L, gradTol = 1e-06, maxLineIter = 15L, relTol = 1e-6, tol = sqrt(.Machine$double.eps), maxModIter = 5L, convergence = c("warn", "silent", "stop", "message")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{method}{\code{"Newton"} fits the model by maximum likelihood and \code{"model.frame"} cause \code{\link{clm}} to return the \code{model.frame}, \code{"design"} causes \code{\link{clm}} to return a list of design matrices etc. that can be used with \code{\link{clm.fit}}. \code{ucminf}, \code{nlminb} and \code{optim} refer to general purpose optimizers. } \item{sign.location}{change sign of the location part of the model. } \item{sign.nominal}{change sign of the nominal part of the model. } \item{trace}{numerical, if \code{> 0} information is printed about and during the optimization process. Defaults to \code{0}. } \item{maxIter}{the maximum number of Newton-Raphson iterations. Defaults to \code{100}. } \item{gradTol}{the maximum absolute gradient; defaults to \code{1e-6}. } \item{maxLineIter}{the maximum number of step halfings allowed if a Newton(-Raphson) step over shoots. Defaults to \code{15}. } \item{relTol}{relative convergence tolerence: relative change in the parameter estimates between Newton iterations. Defaults to \code{1e-6}. } \item{tol}{numerical tolerence on eigenvalues to determine negative-definiteness of Hessian. If the Hessian of a model fit is negative definite, the fitting algorithm did not converge. If the Hessian is singular, the fitting algorithm did converge albeit not to a \emph{unique} optimum, so one or more parameters are not uniquely determined even though the log-likelihood value is. } \item{maxModIter}{the maximum allowable number of consecutive iterations where the Newton step needs to be modified to be a decent direction. Defaults to \code{5}. } \item{convergence}{action to take if the fitting algorithm did not converge. } \item{\dots}{control arguments parsed on to \code{\link{ucminf}}, \code{\link{nlminb}} or \code{\link{optim}}. } } \value{ a list of control parameters. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{clm}} } \keyword{models} ordinal/man/clm.controlOld.Rd0000755000176200001440000000332511616455300015644 0ustar liggesusers\name{clm2.control} \alias{clm2.control} \title{Set control parameters for cumulative link models} \description{ Set control parameters for cumulative link models } \usage{ clm2.control(method = c("ucminf", "Newton", "nlminb", "optim", "model.frame"), ..., convTol = 1e-4, trace = 0, maxIter = 100, gradTol = 1e-5, maxLineIter = 10) } \arguments{ \item{method}{ the optimizer used to maximize the likelihood function. \code{"Newton"} only works for models without \code{scale}, structured thresholds and flexible link functions, but is considerably faster than the other optimizers when applicable. \code{model.frame} simply returns a list of model frames with the location, scale and nominal model frames. \code{"optim"} uses the \code{"BFGS"} method. } \item{\dots}{control arguments passed on to the chosen optimizer; see \code{\link[ucminf]{ucminf}}, \code{\link{optim}}, and \code{\link{nlminb}} for details. } \item{convTol}{convergence criterion on the size of the maximum absolute gradient. } \item{trace}{numerical, if > 0 information is printed about and during the optimization process. Defaults to \code{0}. } \item{maxIter}{the maximum number of Newton-Raphson iterations. Defaults to \code{100}. } \item{gradTol}{the maximum absolute gradient. This is the termination criterion and defaults to \code{1e-5}. } \item{maxLineIter}{the maximum number of step halfings allowed if a Newton(-Raphson) step over shoots. Defaults to \code{10}. } } \value{ a list of control parameters. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{clm2}} } \keyword{models} ordinal/man/clmm.Rd0000644000176200001440000001572113277541507013715 0ustar liggesusers\name{clmm} \alias{clmm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Cumulative Link Mixed Models } \description{ Fits Cumulative Link Mixed Models with one or more random effects via the Laplace approximation or quadrature methods } \usage{ clmm(formula, data, weights, start, subset, na.action, contrasts, Hess = TRUE, model = TRUE, link = c("logit", "probit", "cloglog", "loglog", "cauchit"), doFit = TRUE, control = list(), nAGQ = 1L, threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) %% also document getNLA(rho, par) here and include examples } %- maybe also 'usage' for other objects documented here. \arguments{ \item{formula}{ a two-sided linear formula object describing the fixed-effects part of the model, with the response on the left of a ~ operator and the terms, separated by + operators, on the right. The vertical bar character "|" separates an expression for a model matrix and a grouping factor. } \item{data}{ an optional data frame in which to interpret the variables occurring in the formula. } \item{weights}{ optional case weights in fitting. Defaults to 1. } \item{start}{ optional initial values for the parameters in the format \code{c(alpha, beta, tau)}, where \code{alpha} are the threshold parameters, \code{beta} are the fixed regression parameters and \code{tau} are variance parameters for the random effects on the log scale. } \item{subset}{ expression saying which subset of the rows of the data should be used in the fit. All observations are included by default. } \item{na.action}{ a function to filter missing data. } \item{contrasts}{ a list of contrasts to be used for some or all of the factors appearing as variables in the model formula. } \item{Hess}{ logical for whether the Hessian (the inverse of the observed information matrix) should be computed. Use \code{Hess = TRUE} if you intend to call \code{summary} or \code{vcov} on the fit and \code{Hess = FALSE} in all other instances to save computing time. } \item{model}{ logical for whether the model frames should be part of the returned object. } \item{link}{ link function, i.e. the type of location-scale distribution assumed for the latent distribution. The default \code{"logit"} link gives the proportional odds mixed model. } \item{doFit}{ logical for whether the model should be fit or the model environment should be returned. } \item{control}{ a call to \code{\link{clmm.control}} } \item{nAGQ}{ integer; the number of quadrature points to use in the adaptive Gauss-Hermite quadrature approximation to the likelihood function. The default (\code{1}) gives the Laplace approximation. Higher values generally provide higher precision at the expense of longer computation times, and values between 5 and 10 generally provide accurate maximum likelihood estimates. Negative values give the non-adaptive Gauss-Hermite quadrature approximation, which is generally faster but less accurate than the adaptive version. See the references for further details. Quadrature methods are only available with a single random effects term; the Laplace approximation is always available. } \item{threshold}{ specifies a potential structure for the thresholds (cut-points). \code{"flexible"} provides the standard unstructured thresholds, \code{"symmetric"} restricts the distance between the thresholds to be symmetric around the central one or two thresholds for odd or equal numbers or thresholds respectively, \code{"symmetric2"} restricts the latent mean in the reference group to zero; this means that the central threshold (even no. response levels) is zero or that the two central thresholds are equal apart from their sign (uneven no. response levels), and \code{"equidistant"} restricts the distance between consecutive thresholds to be of the same size. } \item{\dots}{ additional arguments are passed on to \code{\link{clm.control}}. } } \details{ This is a new (as of August 2011) improved implementation of CLMMs. The old implementation is available in \code{\link{clmm2}}. Some features are not yet available in \code{clmm}; for instance scale effects, nominal effects and flexible link functions are currently only available in \code{clmm2}. \code{clmm} is expected to take over \code{clmm2} at some point. There are standard print, summary and anova methods implemented for \code{"clmm"} objects. } \value{ a list containing \item{alpha}{threshold parameters.} \item{beta}{fixed effect regression parameters.} \item{stDev}{standard deviation of the random effect terms.} \item{tau}{\code{log(stDev)} - the scale at which the log-likelihood function is optimized.} \item{coefficients}{the estimated model parameters = \code{c(alpha, beta, tau)}.} \item{control}{List of control parameters as generated by \code{\link{clm.control}}. } \item{Hessian}{Hessian of the model coefficients.} \item{edf}{the estimated degrees of freedom used by the model = \code{length(coefficients)}.} \item{nobs}{\code{sum(weights)}.} \item{n}{length(y).} \item{fitted.values}{fitted values evaluated with the random effects at their conditional modes.} \item{df.residual}{residual degrees of freedom; \code{length(y) - sum(weights)}} \item{tJac}{Jacobian of the threshold function corresponding to the mapping from standard flexible thresholds to those used in the model.} \item{terms}{the terms object for the fixed effects.} \item{contrasts}{contrasts applied to the fixed model terms.} \item{na.action}{the function used to filter missing data.} \item{call}{the matched call.} \item{logLik}{value of the log-likelihood function for the model at the optimum.} \item{Niter}{number of Newton iterations in the inner loop update of the conditional modes of the random effects.} \item{optRes}{list of results from the optimizer.} \item{ranef}{list of the conditional modes of the random effects.} \item{condVar}{list of the conditional variance of the random effects at their conditional modes.} } %% \references{ bla %% %% ~put references to the literature/web site here ~ %% } \author{ Rune Haubo B Christensen } \examples{ ## Cumulative link model with one random term: fmm1 <- clmm(rating ~ temp + contact + (1|judge), data = wine) summary(fmm1) \dontrun{ ## May take a couple of seconds to run this. ## Cumulative link mixed model with two random terms: mm1 <- clmm(SURENESS ~ PROD + (1|RESP) + (1|RESP:PROD), data = soup, link = "probit", threshold = "equidistant") mm1 summary(mm1) ## test random effect: mm2 <- clmm(SURENESS ~ PROD + (1|RESP), data = soup, link = "probit", threshold = "equidistant") anova(mm1, mm2) } } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/nominal.test.Rd0000644000176200001440000000521412175703724015371 0ustar liggesusers\name{nominal_test} \alias{nominal_test} \alias{scale_test} \alias{nominal_test.clm} \alias{scale_test.clm} \title{ Likelihood ratio tests of model terms in scale and nominal formulae } \description{ Add all model terms to scale and nominal formulae and perform likelihood ratio tests. These tests can be viewed as goodness-of-fit tests. With the logit link, \code{nominal_test} provides likelihood ratio tests of the proportional odds assumption. The \code{scale_test} tests can be given a similar interpretation. } \usage{ nominal_test(object, ...) \method{nominal_test}{clm}(object, scope, trace=FALSE, ...) scale_test(object, ...) \method{scale_test}{clm}(object, scope, trace=FALSE, ...) } \arguments{ \item{object}{for the \code{clm} method an object of class \code{"clm"}, i.e., the result of a call to \code{clm}. } \item{scope}{ a formula or character vector specifying the terms to add to scale or nominal. In \code{nominal_test} terms in scope already in \code{nominal} are ignored. In \code{scale_test} terms in scope already in \code{scale} are ignored. In \code{nominal_test} the default is to add all terms from \code{formula} (location part) and \code{scale} that are not also in \code{nominal}. In \code{scale_test} the default is to add all terms from \code{formula} (location part) that are not also in \code{scale}. } \item{trace}{ if \code{TRUE} additional information may be given on the fits as they are tried. } \item{\dots}{ arguments passed to or from other methods. } } \value{ A table of class \code{"anova"} containing columns for the change in degrees of freedom, AIC, the likelihood ratio statistic and a p-value based on the asymptotic chi-square distribtion of the likelihood ratio statistic under the null hypothesis. } \details{ The definition of AIC is only up to an additive constant because the likelihood function is only defined up to an additive constant. } \author{Rune Haubo B Christensen} \examples{ ## Fit cumulative link model: fm <- clm(rating ~ temp + contact, data=wine) summary(fm) ## test partial proportional odds assumption for temp and contact: nominal_test(fm) ## no evidence of non-proportional odds. ## test if there are signs of scale effects: scale_test(fm) ## no evidence of scale effects. ## tests of scale and nominal effects for the housing data from MASS: if(require(MASS)) { fm1 <- clm(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) scale_test(fm1) nominal_test(fm1) ## Evidence of multiplicative/scale effect of 'Cont'. This is a breach ## of the proportional odds assumption. } } \keyword{models} ordinal/man/convergence.clm.Rd0000644000176200001440000000474012205357257016031 0ustar liggesusers\name{convergence} \alias{convergence} \alias{convergence.clm} \alias{print.convergence.clm} \title{Check convergence of cumulative link models} \description{ Check the accuracy of the parameter estimates of cumulative link models. The number of correct decimals and number of significant digits is given for the maximum likelihood estimates of the parameters in a cumulative link model fitted with \code{\link{clm}}. } \usage{ convergence(object, ...) \method{convergence}{clm}(object, digits = max(3, getOption("digits") - 3), tol = sqrt(.Machine$double.eps), ...) } \arguments{ \item{object}{for the \code{clm} method an object of class \code{"clm"}, i.e., the result of a call to \code{clm}. } \item{digits}{the number of digits in the printed table. } \item{tol}{numerical tolerence to judge if the Hessian is positive definite from its smallest eigenvalue. } \item{...}{arguments to a from methods. Not used by the \code{clm} method. } } \value{ Convergence information. In particular a table where the \code{Error} column gives the numerical error in the parameter estimates. These numbers express how far the parameter estimates in the fitted model are from the true maximum likelihood estimates for this model. The \code{Cor.Dec} gives the number of correct decimals with which the the parameters are determined and the \code{Sig.Dig} gives the number of significant digits with which the parameters are determined. The number denoted \code{logLik.error} is the error in the value of log-likelihood in the fitted model at the parameter values of that fit. An accurate determination of the log-likelihood is essential for accurate likelihood ratio tests in model comparison. } \details{ The number of correct decimals is defined as... The number of significant digits is defined as ... The number of correct decimals and the number of significant digits are determined from the numerical errors in the parameter estimates. The numerical errors are determined from the Method Independent Error Theorem (Elden et al, 2004) and is based on the Newton step evaluated at convergence. } \references{ Elden, L., Wittmeyer-Koch, L. and Nielsen, H. B. (2004) \emph{Introduction to Numerical Computation --- analysis and Matlab illustrations.} Studentliteratur. } %% \seealso{ %% } \examples{ ## Simple model: fm1 <- clm(rating ~ contact + temp, data=wine) summary(fm1) convergence(fm1) } \author{Rune Haubo B Christensen} \keyword{models} ordinal/man/confintOld.Rd0000644000176200001440000001370313633002525015046 0ustar liggesusers\name{confint.clm2} \alias{confint.clm2} \alias{confint.profile.clm2} \alias{profile.clm2} \alias{plot.profile.clm2} \title{ Confidence intervals and profile likelihoods for parameters in cumulative link models } \description{ Computes confidence intervals from the profiled likelihood for one or more parameters in a fitted cumulative link model, or plots the profile likelihood function. } \usage{ \method{confint}{clm2}(object, parm, level = 0.95, whichL = seq_len(p), whichS = seq_len(k), lambda = TRUE, trace = 0, \dots) \method{confint}{profile.clm2}(object, parm = seq_along(Pnames), level = 0.95, \dots) \method{profile}{clm2}(fitted, whichL = seq_len(p), whichS = seq_len(k), lambda = TRUE, alpha = 0.01, maxSteps = 50, delta = LrootMax/10, trace = 0, stepWarn = 8, \dots) \method{plot}{profile.clm2}(x, parm = seq_along(Pnames), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, fig = TRUE, n = 1e3, ..., ylim = NULL) } \arguments{ \item{object}{ a fitted \code{\link{clm2}} object or a \code{profile.clm2} object. } \item{fitted}{ a fitted \code{\link{clm2}} object. } \item{x}{a \code{profile.clm2} object. } \item{parm}{not used in \code{confint.clm2}. For \code{confint.profile.clm2}: a specification of which parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all parameters are considered. For \code{plot.profile.clm2}: a specification of which parameters the profile likelihood are to be plotted for, either a vector of numbers or a vector of names. If missing, all parameters are considered. } \item{level}{ the confidence level required. } \item{whichL}{ a specification of which \emph{location} parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all location parameters are considered. } \item{whichS}{ a specification of which \emph{scale} parameters are to be given confidence intervals, either a vector of numbers or a vector of names. If missing, all scale parameters are considered. } \item{lambda}{ logical. Should profile or confidence intervals be computed for the link function parameter? Only used when one of the flexible link functions are used; see the \code{link}-argument in \code{\link{clm2}}. } \item{trace}{ logical. Should profiling be traced? } \item{alpha}{Determines the range of profiling. By default the likelihood is profiled in the 99\% confidence interval region as determined by the profile likelihood. } \item{maxSteps}{the maximum number of profiling steps in each direction (up and down) for each parameter. } \item{delta}{the length of profiling steps. To some extent this parameter determines the degree of accuracy of the profile likelihood in that smaller values, i.e. smaller steps gives a higher accuracy. Note however that a spline interpolation is used when constructing confidence intervals so fairly long steps can provide high accuracy. } \item{stepWarn}{a warning is issued if the no. steps in each direction (up or down) for a parameter is less than \code{stepWarn} (defaults to 8 steps) because this indicates an unreliable profile. } \item{Log}{should the profile likelihood be plotted on the log-scale? } \item{relative}{should the relative or the absolute likelihood be plotted? } \item{fig}{should the profile likelihood be plotted? } \item{n}{the no. points used in the spline interpolation of the profile likelihood. } \item{ylim}{overrules default y-limits on the plot of the profile likelihood. } \item{\dots}{ additional argument(s) for methods including \code{range} (for the hidden function \code{profileLambda}) that sets the range of values of \code{lambda} at which the likelihood should be profiled for this parameter. } } \value{ \code{confint}: A matrix (or vector) with columns giving lower and upper confidence limits for each parameter. These will be labelled as (1-level)/2 and 1 - (1-level)/2 in \% (by default 2.5\% and 97.5\%). The parameter names are preceded with \code{"loc."} or \code{"sca."} to indicate whether the confidence interval applies to a location or a scale parameter. \code{plot.profile.clm2} invisibly returns the profile object. } \details{ These \code{confint} methods call the appropriate profile method, then finds the confidence intervals by interpolation of the profile traces. If the profile object is already available, this should be used as the main argument rather than the fitted model object itself. In \code{plot.profile.clm2}: at least one of \code{Log} and \code{relative} arguments have to be \code{TRUE}. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{profile}} and \code{\link{confint}} } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) ## More manageable data set: (tab26 <- with(soup, table("Product" = PROD, "Response" = SURENESS))) dimnames(tab26)[[2]] <- c("Sure", "Not Sure", "Guess", "Guess", "Not Sure", "Sure") dat26 <- expand.grid(sureness = as.factor(1:6), prod = c("Ref", "Test")) dat26$wghts <- c(t(tab26)) m1 <- clm2(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logistic") ## profile pr1 <- profile(m1) par(mfrow = c(2, 2)) plot(pr1) m9 <- update(m1, link = "log-gamma") pr9 <- profile(m9, whichL = numeric(0), whichS = numeric(0)) par(mfrow = c(1, 1)) plot(pr9) plot(pr9, Log=TRUE, relative = TRUE) plot(pr9, Log=TRUE, relative = TRUE, ylim = c(-4, 0)) plot(pr9, Log=TRUE, relative = FALSE) ## confint confint(pr9) confint(pr1) ## Extend example from polr in package MASS: ## Fit model from polr example: if(require(MASS)) { fm1 <- clm2(Sat ~ Infl + Type + Cont, scale = ~ Cont, weights = Freq, data = housing) pr1 <- profile(fm1) confint(pr1) par(mfrow=c(2,2)) plot(pr1) } } \keyword{internal} ordinal/man/wine.Rd0000644000176200001440000000443012176227250013713 0ustar liggesusers\name{wine} \alias{wine} \title{ Bitterness of wine } \description{ The \code{wine} data set is adopted from Randall(1989) and from a factorial experiment on factors determining the bitterness of wine. Two treatment factors (temperature and contact) each have two levels. Temperature and contact between juice and skins can be controlled when cruching grapes during wine production. Nine judges each assessed wine from two bottles from each of the four treatment conditions, hence there are 72 observations in all. } \usage{ wine } \format{ \describe{ \item{\code{response}}{ scorings of wine bitterness on a 0---100 continuous scale. } \item{\code{rating}}{ ordered factor with 5 levels; a grouped version of \code{response}. } \item{\code{temp}}{ temperature: factor with two levels. } \item{\code{contact}}{ factor with two levels (\code{"no"} and \code{"yes"}). } \item{\code{bottle}}{ factor with eight levels. } \item{\code{judge}}{ factor with nine levels. } }} \source{ Data are adopted from Randall (1989). } \references{ Randall, J (1989). The analysis of sensory data by generalised linear model. \emph{Biometrical journal 7}, pp. 781--793. Tutz, G. and W. Hennevogl (1996). Random effects in ordinal regression models. \emph{Computational Statistics & Data Analysis 22}, pp. 537--557. } \examples{ head(wine) str(wine) ## Variables 'rating' and 'response' are related in the following way: (intervals <- seq(0,100, by = 20)) all(wine$rating == findInterval(wine$response, intervals)) ## ok ## A few illustrative tabulations: ## Table matching Table 5 in Randall (1989): temp.contact.bottle <- with(wine, temp:contact:bottle)[drop=TRUE] xtabs(response ~ temp.contact.bottle + judge, data = wine) ## Table matching Table 6 in Randall (1989): with(wine, { tcb <- temp:contact:bottle tcb <- tcb[drop=TRUE] table(tcb, rating) }) ## or simply: with(wine, table(bottle, rating)) ## Table matching Table 1 in Tutz & Hennevogl (1996): tab <- xtabs(as.numeric(rating) ~ judge + temp.contact.bottle, data = wine) colnames(tab) <- paste(rep(c("c","w"), each = 4), rep(c("n", "n", "y", "y"), 2), 1:8, sep=".") tab ## A simple model: m1 <- clm(rating ~ temp * contact, data = wine) summary(m1) } \keyword{datasets} ordinal/man/clm.Rd0000644000176200001440000002536613633002525013532 0ustar liggesusers\name{clm} \alias{clm} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Cumulative Link Models %% ~~function to do ... ~~ } \description{ Fits cumulative link models (CLMs) such as the propotional odds model. The model allows for various link functions and structured thresholds that restricts the thresholds or cut-points to be e.g., equidistant or symmetrically arranged around the central threshold(s). Nominal effects (partial proportional odds with the logit link) are also allowed. A modified Newton algorithm is used to optimize the likelihood function. %% ~~ A concise (1-5 lines) description of what the function does. ~~ } \usage{ clm(formula, scale, nominal, data, weights, start, subset, doFit = TRUE, na.action, contrasts, model = TRUE, control=list(), link = c("logit", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{formula}{ a formula expression as for regression models, of the form \code{response ~ predictors}. The response should be a factor (preferably an ordered factor), which will be interpreted as an ordinal response with levels ordered as in the factor. The model must have an intercept: attempts to remove one will lead to a warning and will be ignored. An offset may be used. See the documentation of \code{\link{formula}} for other details. } \item{scale}{ an optional formula expression, of the form \code{ ~ predictors}, i.e. with an empty left hand side. An offset may be used. Variables included here will have multiplicative effects and can be interpreted as effects on the scale (or dispersion) of a latent distribution. } \item{nominal}{ an optional formula of the form \code{ ~ predictors}, i.e. with an empty left hand side. The effects of the predictors in this formula are assumed to be nominal rather than ordinal - this corresponds to the so-called partial proportional odds (with the logit link). } \item{data}{ an optional data frame in which to interpret the variables occurring in the formulas. } \item{weights}{ optional case weights in fitting. Defaults to 1. Negative weights are not allowed. } \item{start}{ initial values for the parameters in the format \code{c(alpha, beta, zeta)}, where \code{alpha} are the threshold parameters (adjusted for potential nominal effects), \code{beta} are the regression parameters and \code{zeta} are the scale parameters. } \item{subset}{ expression saying which subset of the rows of the data should be used in the fit. All observations are included by default. } \item{doFit}{ logical for whether the model should be fitted or the model environment should be returned. } \item{na.action}{ a function to filter missing data. Applies to terms in all three formulae. } \item{contrasts}{ a list of contrasts to be used for some or all of the factors appearing as variables in the model formula. } \item{model}{ logical for whether the model frame should be part of the returned object. } \item{control}{ a list of control parameters passed on to \code{\link{clm.control}}. } \item{link}{ link function, i.e., the type of location-scale distribution assumed for the latent distribution. The default \code{"logit"} link gives the proportional odds model. } \item{threshold}{ specifies a potential structure for the thresholds (cut-points). \code{"flexible"} provides the standard unstructured thresholds, \code{"symmetric"} restricts the distance between the thresholds to be symmetric around the central one or two thresholds for odd or equal numbers or thresholds respectively, \code{"symmetric2"} restricts the latent mean in the reference group to zero; this means that the central threshold (even no. response levels) is zero or that the two central thresholds are equal apart from their sign (uneven no. response levels), and \code{"equidistant"} restricts the distance between consecutive thresholds to be of the same size. } \item{\dots}{ additional arguments are passed on to \code{\link{clm.control}}. } } \details{ This is a new (as of August 2011) improved implementation of CLMs. The old implementation is available in \code{\link{clm2}}, but will probably be removed at some point. There are methods for the standard model-fitting functions, including \code{\link{summary}}, \code{\link{anova}}, \code{\link{model.frame}}, \code{\link{model.matrix}}, \code{\link{drop1}}, \code{\link{dropterm}}, \code{\link{step}}, \code{\link{stepAIC}}, \code{\link{extractAIC}}, \code{\link{AIC}}, \code{\link{coef}}, \code{\link{nobs}}, \code{\link{profile}}, \code{\link{confint}}, \code{\link{vcov}} and \code{\link[=slice.clm]{slice}}. %% \code{slice}. } \value{ If \code{doFit = FALSE} the result is an environment representing the model ready to be optimized. If \code{doFit = TRUE} the result is an object of class \code{"clm"} with the components listed below. Note that some components are only present if \code{scale} and \code{nominal} are used. \item{aliased}{list of length 3 or less with components \code{alpha}, \code{beta} and \code{zeta} each being logical vectors containing alias information for the parameters of the same names. } \item{alpha}{a vector of threshold parameters. } \item{alpha.mat}{(where relevant) a table (\code{data.frame}) of threshold parameters where each row corresponds to an effect in the \code{nominal} formula. } \item{beta}{(where relevant) a vector of regression parameters. } \item{call}{the mathed call. } \item{coefficients}{a vector of coefficients of the form \code{c(alpha, beta, zeta)} } \item{cond.H}{condition number of the Hessian matrix at the optimum (i.e. the ratio of the largest to the smallest eigenvalue). } \item{contrasts}{(where relevant) the contrasts used for the \code{formula} part of the model. } \item{control}{list of control parameters as generated by \code{\link{clm.control}}. } \item{convergence}{convergence code where 0 indicates successful convergence and negative values indicate convergence failure; 1 indicates successful convergence to a non-unique optimum. } \item{edf}{the estimated degrees of freedom, i.e., the number of parameters in the model fit. } \item{fitted.values}{the fitted probabilities. } \item{gradient}{a vector of gradients for the coefficients at the estimated optimum. } \item{Hessian}{the Hessian matrix for the parameters at the estimated optimum. } \item{info}{a table of basic model information for printing. } \item{link}{character, the link function used. } \item{logLik}{the value of the log-likelihood at the estimated optimum. } \item{maxGradient}{the maximum absolute gradient, i.e., \code{max(abs(gradient))}. } \item{model}{if requested (the default), the \code{\link{model.frame}} containing variables from \code{formula}, \code{scale} and \code{nominal} parts. } \item{n}{the number of observations counted as \code{nrow(X)}, where \code{X} is the design matrix. } \item{na.action}{(where relevant) information returned by \code{\link{model.frame}} on the special handling of \code{NA}s. } \item{nobs}{the number of observations counted as \code{sum(weights)}. } \item{nom.contrasts}{(where relevant) the contrasts used for the \code{nominal} part of the model. } \item{nom.terms}{(where relevant) the terms object for the \code{nominal} part. } \item{nom.xlevels}{(where relevant) a record of the levels of the factors used in fitting for the \code{nominal} part. } \item{start}{the parameter values at which the optimization has started. An attribute \code{start.iter} gives the number of iterations to obtain starting values for models where \code{scale} is specified or where the \code{cauchit} link is chosen. } \item{S.contrasts}{(where relevant) the contrasts used for the \code{scale} part of the model. } \item{S.terms}{(where relevant) the terms object for the \code{scale} part. } \item{S.xlevels}{(where relevant) a record of the levels of the factors used in fitting for the \code{scale} part. } \item{terms}{the terms object for the \code{formula} part. } \item{Theta}{(where relevant) a table (\code{data.frame}) of thresholds for all combinations of levels of factors in the \code{nominal} formula. } \item{threshold}{character, the threshold structure used. } \item{tJac}{the transpose of the Jacobian for the threshold structure. } \item{xlevels}{(where relevant) a record of the levels of the factors used in fitting for the \code{formula} part. } \item{y.levels}{the levels of the response variable after removing levels for which all weights are zero. } \item{zeta}{(where relevant) a vector of scale regression parameters. } } \author{ Rune Haubo B Christensen } \examples{ fm1 <- clm(rating ~ temp * contact, data = wine) fm1 ## print method summary(fm1) fm2 <- update(fm1, ~.-temp:contact) anova(fm1, fm2) drop1(fm1, test = "Chi") add1(fm1, ~.+judge, test = "Chi") fm2 <- step(fm1) summary(fm2) coef(fm1) vcov(fm1) AIC(fm1) extractAIC(fm1) logLik(fm1) fitted(fm1) confint(fm1) ## type = "profile" confint(fm1, type = "Wald") pr1 <- profile(fm1) confint(pr1) ## plotting the profiles: par(mfrow = c(2, 2)) plot(pr1, root = TRUE) ## check for linearity par(mfrow = c(2, 2)) plot(pr1) par(mfrow = c(2, 2)) plot(pr1, approx = TRUE) par(mfrow = c(2, 2)) plot(pr1, Log = TRUE) par(mfrow = c(2, 2)) plot(pr1, Log = TRUE, relative = FALSE) ## other link functions: fm4.lgt <- update(fm1, link = "logit") ## default fm4.prt <- update(fm1, link = "probit") fm4.ll <- update(fm1, link = "loglog") fm4.cll <- update(fm1, link = "cloglog") fm4.cct <- update(fm1, link = "cauchit") anova(fm4.lgt, fm4.prt, fm4.ll, fm4.cll, fm4.cct) ## structured thresholds: fm5 <- update(fm1, threshold = "symmetric") fm6 <- update(fm1, threshold = "equidistant") anova(fm1, fm5, fm6) ## the slice methods: slice.fm1 <- slice(fm1) par(mfrow = c(3, 3)) plot(slice.fm1) ## see more at '?slice.clm' ## Another example: fm.soup <- clm(SURENESS ~ PRODID, data = soup) summary(fm.soup) if(require(MASS)) { ## dropterm, addterm, stepAIC, housing fm1 <- clm(rating ~ temp * contact, data = wine) dropterm(fm1, test = "Chi") addterm(fm1, ~.+judge, test = "Chi") fm3 <- stepAIC(fm1) summary(fm3) ## Example from MASS::polr: fm1 <- clm(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) summary(fm1) } } \keyword{models} ordinal/man/dropCoef.Rd0000644000176200001440000000275012175705440014516 0ustar liggesusers\name{drop.coef} \alias{drop.coef} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Ensure Full Rank Design Matrix } \description{ Coefficients (columns) are dropped from a design matrix to ensure that it has full rank. } \usage{ drop.coef(X, silent = FALSE) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{X}{ a design matrix, e.g., the result of \code{\link{model.matrix}} possibly of less than full column rank, i.e., with redundant parameters. Works for \code{ncol(X) >= 0} and \code{nrow(X) >= 0}. } \item{silent}{ should a message not be issued if X is column rank deficient? } } \details{ Redundant columns of the design matrix are identified with the LINPACK implementation of the \code{\link{qr}} decomposition and removed. The returned design matrix will have \code{qr(X)$rank} columns. } \value{ The design matrix \code{X} without redundant columns. } \author{ Rune Haubo B Christensen } \seealso{ \code{\link{qr}} and \code{\link{lm}} } \examples{ X <- model.matrix( ~ PRODID * DAY, data = soup) ncol(X) newX <- drop.coef(X) ncol(newX) ## Essentially this is being computed: qr.X <- qr(X, tol = 1e-7, LAPACK = FALSE) newX <- X[, qr.X$pivot[1:qr.X$rank], drop = FALSE] ## is newX of full column rank? ncol(newX) == qr(newX)$rank ## the number of columns being dropped: ncol(X) - ncol(newX) } % Add one or more standard keywords, see file 'KEYWORDS' in the % R documentation directory. \keyword{models} ordinal/man/clmm.control.Rd0000644000176200001440000000401013277541507015361 0ustar liggesusers\name{clmm.control} \alias{clmm.control} %- Also NEED an '\alias' for EACH other topic documented here. \title{ Set control parameters for cumulative link mixed models } \description{ Set control parameters for cumulative link mixed models } \usage{ clmm.control(method = c("nlminb", "ucminf", "model.frame"), ..., trace = 0, maxIter = 50, gradTol = 1e-4, maxLineIter = 50, useMatrix = FALSE, innerCtrl = c("warnOnly", "noWarn", "giveError"), checkRanef = c("warn", "error", "message")) } %- maybe also 'usage' for other objects documented here. \arguments{ \item{method}{ the optimizer used to maximize the marginal likelihood function. } \item{\dots}{control arguments passed on to the optimizer; see \code{\link[ucminf]{ucminf}} for details. \code{ucminf} for details. } \item{trace}{numerical, if > 0 information is printed about and during the outer optimization process, if < 0 information is also printed about the inner optimization process. Defaults to \code{0}. } \item{maxIter}{the maximum number of Newton updates of the inner optimization. \code{50}. } \item{gradTol}{the maximum absolute gradient of the inner optimization. } \item{maxLineIter}{the maximum number of step halfings allowed if a Newton(-Raphson) step over shoots during the inner optimization. } \item{useMatrix}{if \code{TRUE}, a general implementation of the Laplace approximation using the Matrix package is used, while if \code{FALSE} (default), a C implementation of the Laplace approximation valid only for models with a single random effects term is used when possible. \code{TRUE} is not valid for models fitted with quadrature methods. } \item{innerCtrl}{the use of warnings/errors if the inner optimization fails to converge. } \item{checkRanef}{the use of message/warning/error if there are more random effects than observations. } } \value{ a list of control parameters } \author{ Rune Haubo B Christensen } \seealso{ \code{\link{clmm}} } \keyword{models} ordinal/man/confint.clm.Rd0000644000176200001440000001302213633002525015153 0ustar liggesusers\name{confint} \alias{confint.clm} \alias{confint.profile.clm} \alias{profile.clm} \alias{plot.profile.clm} \title{ Confidence intervals and profile likelihoods for parameters in cumulative link models } \description{ Computes confidence intervals from the profiled likelihood for one or more parameters in a cumulative link model, or plots the profile likelihood. } \usage{ \method{confint}{clm}(object, parm, level = 0.95, type = c("profile", "Wald"), trace = FALSE, ...) \method{confint}{profile.clm}(object, parm = seq_len(nprofiles), level = 0.95, ...) \method{profile}{clm}(fitted, which.beta = seq_len(nbeta), which.zeta = seq_len(nzeta), alpha = 0.001, max.steps = 50, nsteps = 8, trace = FALSE, step.warn = 5, control = list(), ...) \method{plot}{profile.clm}(x, which.par = seq_len(nprofiles), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, root = FALSE, fig = TRUE, approx = root, n = 1e3, ask = prod(par("mfcol")) < length(which.par) && dev.interactive(), ..., ylim = NULL) } \arguments{ \item{object, fitted, x}{ a fitted \code{\link{clm}} object or a \code{profile.clm} object. } \item{parm, which.par, which.beta, which.zeta}{ a numeric or character vector indicating which regression coefficients should be profiled. By default all coefficients are profiled. Ignored for \code{confint.clm} where all parameters are considered. } \item{level}{ the confidence level. For the \code{plot} method a vector of levels for which horizontal lines should be drawn. } \item{type}{ the type of confidence interval. } \item{trace}{ if \code{trace} is \code{TRUE} or positive, information about progress is printed. } \item{Log}{ should the profile likelihood be plotted on the log-scale? } \item{relative}{ should the relative or the absolute likelihood be plotted? } \item{root}{ should the (approximately linear) likelihood root statistic be plotted? } \item{approx}{ should the Gaussian or quadratic approximation to the (log) likelihood be included? } \item{fig}{ should the profile likelihood be plotted? } \item{ask}{ logical; if \code{TRUE}, the user is asked before each plot, see \code{\link{par}}\code{(ask=.)}. } \item{n}{ the no. points used in the spline interpolation of the profile likelihood. } \item{ylim}{overrules default y-limits on the plot of the profile likelihood. } \item{alpha}{ the likelihood is profiled in the 100*(1-alpha)\% confidence region as determined by the profile likelihood. } \item{control}{ a list of control parameters for \code{\link{clm}}. Possibly use \code{\link{clm.control}} to set these. } %%\item{lambda}{ %% logical. Should profile or confidence intervals be computed for the %% link function parameter? Only used when one of the flexible link %% functions are used; see the \code{link}-argument in %% \code{\link{clm}}. %%} \item{max.steps}{ the maximum number of profiling steps in each direction for each parameter. } \item{nsteps}{ the (approximate) number of steps to take in each direction of the profile for each parameter. The step length is determined accordingly assuming a quadratic approximation to the log-likelihood function. The actual number of steps will often be close to \code{nsteps}, but will deviate when the log-likelihood functions is irregular. } \item{step.warn}{ a warning is issued if the number of steps in each direction (up or down) for a parameter is less than \code{step.warn}. If few steps are taken, the profile will be unreliable and derived confidence intervals will be inaccurate. } \item{\dots}{ additional arguments to be parsed on to methods. } } \value{ \code{confint}: A matrix with columns giving lower and upper confidence limits for each parameter. These will be labelled as (1-level)/2 and 1 - (1-level)/2 in \% (by default 2.5\% and 97.5\%). \code{plot.profile.clm} invisibly returns the profile object, i.e., a list of \code{\link{data.frame}}s with an \code{lroot} component for the likelihood root statistic and a matrix \code{par.vals} with values of the parameters. } \details{ These \code{confint} methods call the appropriate profile method, then finds the confidence intervals by interpolation of the profile traces. If the profile object is already available, this should be used as the main argument rather than the fitted model object itself. } \author{Rune Haubo B Christensen} \seealso{ \code{\link{profile}} and \code{\link{confint}} } \examples{ ## Accurate profile likelihood confidence intervals compared to the ## conventional Wald intervals: fm1 <- clm(rating ~ temp * contact, data = wine) confint(fm1) ## type = "profile" confint(fm1, type = "Wald") pr1 <- profile(fm1) confint(pr1) ## plotting the profiles: par(mfrow = c(2, 2)) plot(pr1, root = TRUE) ## check for linearity par(mfrow = c(2, 2)) plot(pr1) par(mfrow = c(2, 2)) plot(pr1, approx = TRUE) par(mfrow = c(2, 2)) plot(pr1, Log = TRUE) par(mfrow = c(2, 2)) plot(pr1, Log = TRUE, relative = FALSE) ## Not likely to be useful but allowed for completeness: par(mfrow = c(2, 2)) plot(pr1, Log = FALSE, relative = FALSE) ## Example from polr in package MASS: ## Fit model from polr example: if(require(MASS)) { fm1 <- clm(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) pr1 <- profile(fm1) confint(pr1) par(mfrow=c(2,2)) plot(pr1) } } \keyword{models} ordinal/man/clmmOld.Rd0000644000176200001440000002266112176227250014346 0ustar liggesusers\name{clmm2} \alias{clmm2} \title{Cumulative link mixed models} \description{ Fits cumulative link mixed models, i.e. cumulative link models with random effects via the Laplace approximation or the standard and the adaptive Gauss-Hermite quadrature approximation. The functionality in \code{\link{clm2}} is also implemented here. Currently only a single random term is allowed in the location-part of the model. A new implementation is available in \code{\link{clmm}} that allows for more than one random effect. } \usage{ clmm2(location, scale, nominal, random, data, weights, start, subset, na.action, contrasts, Hess = FALSE, model = TRUE, sdFixed, link = c("logistic", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), lambda, doFit = TRUE, control, nAGQ = 1, threshold = c("flexible", "symmetric", "equidistant"), ...) } \arguments{ \item{location}{ as in \code{\link{clm2}}. } \item{scale}{ as in \code{\link{clm2}}. } \item{nominal}{ as in \code{\link{clm2}}. } \item{random}{ a factor for the random effects in the location-part of the model. } \item{data}{ as in \code{\link{clm2}}. } \item{weights}{ as in \code{\link{clm2}}. } \item{start}{ initial values for the parameters in the format \code{c(alpha, beta, log(zeta), lambda, log(stDev))} where \code{stDev} is the standard deviation of the random effects. } \item{subset}{ as in \code{\link{clm2}}. } \item{na.action}{ as in \code{\link{clm2}}. } \item{contrasts}{ as in \code{\link{clm2}}. } \item{Hess}{ logical for whether the Hessian (the inverse of the observed information matrix) should be computed. Use \code{Hess = TRUE} if you intend to call \code{summary} or \code{vcov} on the fit and \code{Hess = FALSE} in all other instances to save computing time. } \item{model}{ as in \code{\link{clm2}}. } \item{sdFixed}{ If \code{sdFixed} is specified (a positive scalar), a model is fitted where the standard deviation for the random term is fixed at the value of \code{sdFixed}. If \code{sdFixed} is left unspecified, the standard deviation of the random term is estimated from data. } \item{link}{ as in \code{\link{clm2}}. } \item{lambda}{ as in \code{\link{clm2}}. } \item{doFit}{ as in \code{\link{clm2}} although it can also be one of \code{c("no", "R" "C")}, where \code{"R"} use the R-implementation for fitting, \code{"C"} (default) use C-implementation for fitting and \code{"no"} behaves as \code{FALSE} and returns the environment. } \item{control}{ a call to \code{\link{clmm2.control}}. } \item{threshold}{ as in \code{\link{clm2}}. } \item{nAGQ}{ the number of quadrature points to be used in the adaptive Gauss-Hermite quadrature approximation to the marginal likelihood. Defaults to \code{1} which leads to the Laplace approximation. An odd number of quadrature points is encouraged and 3, 5 or 7 are usually enough to achive high precision. Negative values give the standard, i.e. non-adaptive Gauss-Hermite quadrature. } \item{\dots}{ additional arguments are passed on to \code{\link{clm2.control}} and possibly further on to the optimizer, which can lead to surprising error or warning messages when mistyping arguments etc. } } \details{ There are methods for the standard model-fitting functions, including \code{\link{summary}}, \code{\link{vcov}}, \code{\link[=profile.clmm2]{profile}}, \code{\link[=profile.clmm2]{plot.profile}}, \code{\link[=confint.profile.clmm2]{confint}}, \code{\link[=anova.clm2]{anova}}, \code{\link{logLik}}, \code{\link[=predict.clmm2]{predict}} and an \code{extractAIC} method. A Newton scheme is used to obtain the conditional modes of the random effects for Laplace and AGQ approximations, and a non-linear optimization is performed over the fixed parameter set to get the maximum likelihood estimates. The Newton scheme uses the observed Hessian rather than the expected as is done in e.g. \code{\link[lme4]{glmer}}, so results from the Laplace approximation for binomial fits should in general be more precise - particularly for other links than the \code{"logistic"}. Core parts of the function are implemented in C-code for speed. The function calls \code{\link{clm2}} to up an environment and to get starting values. } \value{ If \code{doFit = FALSE} the result is an environment representing the model ready to be optimized. If \code{doFit = TRUE} the result is an object of class \code{"clmm2"} with the following components: \item{stDev}{ the standard deviation of the random effects. } \item{Niter}{ the total number of iterations in the Newton updates of the conditional modes of the random effects. } \item{grFac}{ the grouping factor defining the random effects. } \item{nAGQ}{ the number of quadrature points used in the adaptive Gauss-Hermite Quadrature approximation to the marginal likelihood. } \item{ranef}{ the conditional modes of the random effects, sometimes referred to as "random effect estimates". } \item{condVar}{ the conditional variances of the random effects at their conditional modes. } \item{beta}{the parameter estimates of the location part. } \item{zeta}{the parameter estimates of the scale part on the log scale; the scale parameter estimates on the original scale are given by \code{exp(zeta)}. } \item{Alpha}{vector or matrix of the threshold parameters. } \item{Theta}{vector or matrix of the thresholds. } \item{xi}{vector of threshold parameters, which, given a threshold function (e.g. \code{"equidistant"}), and possible nominal effects define the class boundaries, \code{Theta}. } \item{lambda}{the value of lambda if lambda is supplied or estimated, otherwise missing. } \item{coefficients}{the coefficients of the intercepts (\code{theta}), the location (\code{beta}), the scale (\code{zeta}), and the link function parameter (\code{lambda}). } \item{df.residual}{the number of residual degrees of freedoms, calculated using the weights. } \item{fitted.values}{vector of fitted values conditional on the values of the random effects. Use \code{\link[=predict.clm2]{predict}} to get the fitted values for a random effect of zero. An observation here is taken to be each of the scalar elements of the multinomial table and not a multinomial vector. } \item{convergence}{\code{TRUE} if the optimizer terminates wihtout error and \code{FALSE} otherwise. } \item{gradient}{vector of gradients for the unit-variance random effects at their conditional modes. } \item{optRes}{list with results from the optimizer. The contents of the list depends on the choice of optimizer. } \item{logLik}{the log likelihood of the model at optimizer termination. } \item{Hessian}{if the model was fitted with \code{Hess = TRUE}, this is the Hessian matrix of the parameters at the optimum. } \item{scale}{\code{model.frame} for the scale model. } \item{location}{\code{model.frame} for the location model. } \item{nominal}{\code{model.frame} for the nominal model. } \item{edf}{the (effective) number of degrees of freedom used by the model. } \item{start}{the starting values. } \item{method}{character, the optimizer. } \item{y}{the response variable. } \item{lev}{the names of the levels of the response variable. } \item{nobs}{the (effective) number of observations, calculated as the sum of the weights. } \item{threshold}{character, the threshold function used in the model. } \item{estimLambda}{\code{1} if lambda is estimated in one of the flexible link functions and \code{0} otherwise. } \item{link}{character, the link function used in the model. } \item{call}{the matched call. } \item{contrasts}{contrasts applied to terms in location and scale models. } \item{na.action}{the function used to filter missing data. } } \author{Rune Haubo B Christensen} \references{ Agresti, A. (2002) \emph{Categorical Data Analysis.} Second edition. Wiley. } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) ## More manageable data set: dat <- subset(soup, as.numeric(as.character(RESP)) <= 24) dat$RESP <- dat$RESP[drop=TRUE] m1 <- clmm2(SURENESS ~ PROD, random = RESP, data = dat, link="probit", Hess = TRUE, method="ucminf", threshold = "symmetric") m1 summary(m1) logLik(m1) vcov(m1) extractAIC(m1) anova(m1, update(m1, location = SURENESS ~ 1, Hess = FALSE)) anova(m1, update(m1, random = NULL)) ## Use adaptive Gauss-Hermite quadrature rather than the Laplace ## approximation: update(m1, Hess = FALSE, nAGQ = 3) ## Use standard Gauss-Hermite quadrature: update(m1, Hess = FALSE, nAGQ = -7) ################################################################## ## Binomial example with the cbpp data from the lme4-package: if(require(lme4)) { cbpp2 <- rbind(cbpp[,-(2:3)], cbpp[,-(2:3)]) cbpp2 <- within(cbpp2, { incidence <- as.factor(rep(0:1, each=nrow(cbpp))) freq <- with(cbpp, c(incidence, size - incidence)) }) ## Fit with Laplace approximation: fm1 <- clmm2(incidence ~ period, random = herd, weights = freq, data = cbpp2, Hess = 1) summary(fm1) ## Fit with the adaptive Gauss-Hermite quadrature approximation: fm2 <- clmm2(incidence ~ period, random = herd, weights = freq, data = cbpp2, Hess = 1, nAGQ = 7) summary(fm2) } } \keyword{models} ordinal/man/anovaOld.Rd0000644000176200001440000000432513633002525014512 0ustar liggesusers\name{anova.clm2} %%\alias{anova} \alias{anova.clm2} \alias{anova.clmm2} \title{Likelihood ratio test of cumulative link models} \description{ Comparison of cumulative link models in likelihood ratio tests. The models may differ by terms in location, scale and nominal formulae, in link, threshold function and random effect structure. } \usage{ \method{anova}{clm2}(object, ..., test = c("Chisq", "none")) \method{anova}{clmm2}(object, ..., test = c("Chisq", "none")) } \arguments{ \item{object}{a \code{\link{clm2}} object. } \item{\dots}{one or more additional \code{\link{clm2}} objects. } \item{test}{if \code{test = "none"} the p-value for the likelihood ratio test is suppressed. } } \value{ The method returns an object of class \code{Anova} (for printing) and \code{data.frame} with the following elements \item{Model}{character description of the cumulative link models being compared. Location, scale and nominal formulae are separated by "|"s in this order. } \item{Resid.df}{the residual degrees of freedom } \item{-2logLik}{twice the negative log likelihood (proportional to the deviance)} \item{Test}{indication of which models are being compared. } \item{DF}{the difference in the degrees of freedom in the models being compared, i.e. the degrees of freedom for the chi-squared test. } \item{LR stat.}{the likelihood ratio statistic. } \item{Pr(Chi)}{the p-value from the likelihood ratio test. Absent if \code{test = "none"}. } } \author{Rune Haubo B Christensen} \seealso{ \code{\link[ordinal]{clm2}}, \code{\link[=addterm.clm2]{addterm}}, \code{\link[ordinal:addtermOld]{dropterm}} and \code{\link[=anova]{anova.default}} } \examples{ options(contrasts = c("contr.treatment", "contr.poly")) m1 <- clm2(SURENESS ~ PROD, scale = ~PROD, data = soup, link = "logistic") ## anova anova(m1, update(m1, scale = ~.-PROD)) mN1 <- clm2(SURENESS ~ 1, nominal = ~PROD, data = soup, link = "logistic") anova(m1, mN1) anova(m1, update(m1, scale = ~.-PROD), mN1) ## Fit model from polr example: if(require(MASS)) { fm1 <- clm2(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) anova(fm1, update(fm1, scale =~ Cont)) } } \keyword{internal} ordinal/DESCRIPTION0000644000176200001440000000304614533336352013422 0ustar liggesusersPackage: ordinal Type: Package Title: Regression Models for Ordinal Data Version: 2023.12-4 Date: 2023-12-04 Authors@R: person(given="Rune Haubo Bojesen", family="Christensen", email="rune.haubo@gmail.com", role=c("aut", "cre")) LazyData: true ByteCompile: yes Depends: R (>= 2.13.0), stats, methods Imports: ucminf, MASS, Matrix, numDeriv, nlme Suggests: lme4, nnet, xtable, testthat (>= 0.8), tools Description: Implementation of cumulative link (mixed) models also known as ordered regression models, proportional odds models, proportional hazards models for grouped survival times and ordered logit/probit/... models. Estimation is via maximum likelihood and mixed models are fitted with the Laplace approximation and adaptive Gauss-Hermite quadrature. Multiple random effect terms are allowed and they may be nested, crossed or partially nested/crossed. Restrictions of symmetry and equidistance can be imposed on the thresholds (cut-points/intercepts). Standard model methods are available (summary, anova, drop-methods, step, confint, predict etc.) in addition to profile methods and slice methods for visualizing the likelihood function and checking convergence. License: GPL (>= 2) NeedsCompilation: yes URL: https://github.com/runehaubo/ordinal BugReports: https://github.com/runehaubo/ordinal/issues Packaged: 2023-12-04 10:10:06 UTC; rhbc Author: Rune Haubo Bojesen Christensen [aut, cre] Maintainer: Rune Haubo Bojesen Christensen Repository: CRAN Date/Publication: 2023-12-04 11:50:02 UTC ordinal/build/0000755000176200001440000000000014533322572013007 5ustar liggesusersordinal/build/vignette.rds0000644000176200001440000000042114533322572015343 0ustar liggesusersON0 ұI@~]&MY4/_q;h%&qg͔$HC/ GLUe\lk.oJNx}[b^ z/+}0\(G cR3E Hdi=eϋ?>q;O[,wvJw"fpLLVpY"~Z0[K;,G `Ě: fOMr\p)ge>T/C'?ordinal/tests/0000755000176200001440000000000014533322576013056 5ustar liggesusersordinal/tests/testCLM.R0000644000176200001440000002021013633002525014474 0ustar liggesuserslibrary(ordinal) options(contrasts = c("contr.treatment", "contr.poly")) ## library(devtools) ## r2path <- "/Users/rhbc/Documents/Rpackages/ordinal/pkg/ordinal" ## clean_dll(pkg = r2path) ## load_all(r2path) ## More manageable data set: data(soup, package="ordinal") (tab26 <- with(soup, table("Product" = PROD, "Response" = SURENESS))) dimnames(tab26)[[2]] <- c("Sure", "Not Sure", "Guess", "Guess", "Not Sure", "Sure") dat26 <- expand.grid(sureness = as.factor(1:6), prod = c("Ref", "Test")) dat26$wghts <- c(t(tab26)) m1 <- clm(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logit") ## print, summary, vcov, logLik, AIC: m1 summary(m1) vcov(m1) logLik(m1) ll.m1 <- structure(-2687.74456343981, df = 7L, nobs = 1847, class = "logLik") stopifnot(all.equal(logLik(m1), ll.m1)) AIC(m1) coef(m1) cm1 <- c(-1.49125702755587, -0.45218462707814, -0.107208315524318, 0.163365282774162, 0.88291347877514, 1.29587762626394, 0.147986162902775) stopifnot(all.equal(as.vector(coef(m1)), cm1)) coef(summary(m1)) csm1 <- structure(c(-1.49125702755587, -0.45218462707814, -0.107208315524318, 0.163365282774162, 0.88291347877514, 1.29587762626394, 0.147986162902775, 0.0921506468161812, 0.0718240681909781, 0.069954084652323, 0.0702546879687391, 0.0795708692869622, 0.119032405993894, 0.065104213008022, -16.1828167145758, -6.2957256316336, -1.53255261729392, 2.32532927691394, 11.0959385851501, 10.8867632762999, 2.27306584421104, 6.66732036748908e-59, 3.05965144996025e-10, 0.125386123756898, 0.0200543599621069, 1.31274723412040e-28, 1.33293711602276e-27, 0.0230222123418036), .Dim = c(7L, 4L), .Dimnames = list( c("1|2", "2|3", "3|4", "4|5", "5|6", "prodTest", "prodTest" ), c("Estimate", "Std. Error", "z value", "Pr(>|z|)"))) stopifnot(all.equal(coef(summary(m1)), csm1)) ## link functions: m2 <- update(m1, link = "probit") m3 <- update(m1, link = "cloglog") m4 <- update(m1, link = "loglog") m5 <- update(m1, link = "cauchit", start = coef(m1)) ## m6 <- update(m1, link = "Aranda-Ordaz", lambda = 1) ## m7 <- update(m1, link = "Aranda-Ordaz") ## m8 <- update(m1, link = "log-gamma", lambda = 1) ## m9 <- update(m1, link = "log-gamma") ## nominal effects: mN1 <- clm(sureness ~ 1, nominal = ~ prod, data = dat26, weights = wghts) anova(m1, mN1) ## optimizer / method: update(m1, scale = ~ 1, method = "Newton") update(m1, scale = ~ 1, method = "ucminf") update(m1, scale = ~ 1, method = "nlminb") update(m1, scale = ~ 1, method = "optim") update(m1, scale = ~ 1, method = "model.frame") update(m1, ~.-prod, scale = ~ 1, nominal = ~ prod, method = "model.frame") ## threshold functions mT1 <- update(m1, threshold = "symmetric") mT2 <- update(m1, threshold = "equidistant") anova(m1, mT1, mT2) ## Extend example from polr in package MASS: ## Fit model from polr example: if(require(MASS)) { fm1 <- clm(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) fm1 summary(fm1) ## With probit link: summary(update(fm1, link = "probit")) ## Allow scale to depend on Cont-variable summary(fm2 <- update(fm1, scale =~ Cont)) summary(fm3 <- update(fm1, location =~.-Cont, nominal =~ Cont)) summary(fm4 <- update(fm2, location =~.-Cont, nominal =~ Cont)) anova(fm1, fm2, fm3, fm4) ## which seems to improve the fit } ################################# ## Better handling of ill-defined variance-covariance matrix of the ## parameters in summary methods for clm and clmm objects: dat26.2 <- data.frame(sureness = as.factor(1:12), prod = rep(c("One", "Two", "Three"),each=4)) fm1 <- clm(sureness ~ prod, ~prod, data = dat26.2) fm1 summary(fm1) summary(fm1, corr = 1) ## fm1$Hessian ## sl1 <- slice(fm1, 13) ## fitted(fm1) ## convergence(fm1) ## eigen(fm1$Hessian)$values ## sqrt(diag(solve(fm1$Hessian))) ## sqrt(diag(ginv(fm1$Hessian))) ################################# ## Missing values: ## Bug-report from Jonathan Williams ## , 18 March 2010 12:42 data(soup, package = "ordinal") soup$SURENESS[10] <- NA c1a <- clm(ordered(SURENESS)~PROD, data=soup); summary(c1a) c2a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup) summary(c2a) c3a <- clm(ordered(SURENESS)~1, scale = ~PROD, data=soup) summary(c3a) data(soup, package = "ordinal") soup$PROD[1] <- NA c1a <- clm(ordered(SURENESS)~PROD, data=soup) summary(c1a) c2a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup) summary(c2a) c3a <- clm(ordered(SURENESS)~1, scale = ~PROD, data=soup) summary(c3a) soup$SURENESS[10] <- NA c1a <- clm(ordered(SURENESS)~PROD, data=soup) summary(c1a) c2a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup) summary(c2a) c3a <- clm(ordered(SURENESS)~1, scale = ~PROD, data=soup) summary(c3a) ## na.actions: c4a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup, na.action=na.omit) summary(c4a) tC1 <- try(clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup, na.action=na.fail), silent = TRUE) stopifnot(inherits(tC1, "try-error")) c4a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup, na.action=na.exclude) summary(c4a) tC2 <- try(clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup, na.action=na.pass), silent = TRUE) stopifnot(inherits(tC2, "try-error")) ## Subset: data(soup, package="ordinal") c4a <- clm(ordered(SURENESS)~PROD, scale = ~PROD, data=soup, subset = 1:100) c4a <- clm(ordered(SURENESS)~1, scale = ~PROD, data=soup, subset = 1:100) c4a <- clm(ordered(SURENESS)~PROD, data=soup, subset = 1:100) c4a <- clm(ordered(SURENESS)~1, data=soup, subset = 1:100) ## Offset: data(soup, package = "ordinal") set.seed(290980) offs <- runif(nrow(soup)) c4a <- clm(ordered(SURENESS)~PROD + offset(offs), scale = ~PROD, data=soup, subset = 1:100) summary(c4a) c4a <- clm(ordered(SURENESS)~PROD + offset(offs), scale = ~PROD + offset(offs), data=soup, subset = 1:100) summary(c4a) off2 <- offs c4a <- clm(ordered(SURENESS)~PROD + offset(offs), scale = ~PROD + offset(off2), data=soup, subset = 1:100) summary(c4a) c4a <- clm(ordered(SURENESS)~PROD, scale = ~PROD + offset(offs), data=soup, subset = 1:100) summary(c4a) ## data as matrix: dat26M <- as.matrix(dat26) m1 <- clm(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logit") summary(m1) ## data in enclosing environment: attach(soup) m1 <- clm(SURENESS ~ PROD, scale = ~PROD) summary(m1) detach(soup) ################################################################## ### Parameter estimates were not correct with large scale effects due ### to end cut-points being \pm 100. This is not enough for ### location-scale model, but seems to be for location only models. ### Bug report from Ioannis Kosmidis : ### A 2x3 contigency table that will give a large estimated value of ### zeta x <- rep(0:1, each = 3) response <- factor(rep(c(1, 2, 3), times = 2)) freq <- c(1, 11, 1, 13, 1, 14) totals <- rep(tapply(freq, x, sum), each = 3) Dat <- data.frame(response, x, freq) ### Fitting a cumulative link model with dispersion effects modClm <- clm(response ~ x, scale = ~ x, weights = freq, data = Dat, control = clm.control(grtol = 1e-10, convTol = 1e-10)) summary(modClm) ### The maximized log-likelihood for this saturated model should be sum(freq*log(freq/totals)) # > sum(freq*log(freq/totals)) # [1] -29.97808 ### but apparently clm fails to maximixe the log-likelihood modClm$logLik # > modClm$logLik # [1] -30.44452 stopifnot(isTRUE(all.equal(sum(freq*log(freq/totals)), modClm$logLik))) ### The estimates reported by clm are coef(modClm) coef.res <- structure(c(-2.48490664104217, 2.48490665578163, 2.48490659188594, 3.54758796387530), .Names = c("1|2", "2|3", "x", "x")) stopifnot(isTRUE(all.equal(coef.res, coef(modClm)))) # > modClm$coefficients # 1|2 2|3 x x # -2.297718 2.297038 1.239023 2.834285 ### while they should be (from my own software) # 1|2 2|3 x disp.x #-2.484907 2.484907 2.484907 3.547588 convergence(modClm) ################################################################## ordinal/tests/confint.R0000755000176200001440000000344011617035245014637 0ustar liggesusers################################# ## test profile and confint methods: library(ordinal) data(wine) fm1 <- clm(rating ~ contact + temp, data = wine) summary(fm1) ## profile.clm and confint.clm: pr1 <- profile(fm1) confint(pr1) pr1 <- profile(fm1, which.beta = 1:2) confint(pr1) pr1 <- profile(fm1, which.beta = 2:1) confint(pr1) pr1 <- profile(fm1, which.beta = 1) confint(pr1) pr1 <- profile(fm1, which.beta = 2) confint(pr1) pr1 <- try(profile(fm1, which.beta = 0), silent = TRUE) ## error pr1 <- try(profile(fm1, which.beta = "no.par"), silent = TRUE) ## error pr1 <- try(profile(fm1, which.beta = -1), silent = TRUE) ## error pr1 <- profile(fm1, which.beta = "tempwarm") confint(pr1) pr1 <- profile(fm1, alpha = 0.1) confint(pr1) ## should give NA in this case? pr1 <- profile(fm1, max.steps = 9) pr1 <- profile(fm1, step.warn = 7) pr1 <- profile(fm1, nsteps = 6) pr1 <- profile(fm1, trace = 1) pr1 <- profile(fm1, control = list(gradTol = .1)) confint(pr1) ## not at all unreliable... ## single regression coef setting: fm2 <- clm(rating ~ contact, data = wine) summary(fm2) pr2 <- profile(fm2) confint(pr2) ## confint.clm: confint(fm1) confint(fm1, 2) confint(fm1, 1) confint(fm1, "tempwarm") confint(fm1, type = "profile") confint(fm1, type = "Wald") confint(fm1, 2, type = "Wald") confint(fm1, level = 0.5) confint(fm1, level = 1 - 1e-6) confint(fm1, level = 1 - 1e-10) ## extreme, but it works confint(fm1, trace = 1) ## plot.profile: pr1 <- profile(fm1, which.beta=1:2, alpha = 1e-3) par(mfrow = c(1,2)) plot(pr1) plot(pr1, 1) plot(pr1, "contactyes") plot(pr1, level = .97) plot(pr1, Log = TRUE) plot(pr1, relative = FALSE) plot(pr1, root = TRUE) plot(pr1, approx = TRUE) plot(pr1, n=10) plot(pr1, ylim = c(0,2)) plot(pr1, las = 1) plot(pr2) ordinal/tests/test-all.R0000644000176200001440000000011112447563614014721 0ustar liggesusers if(require(testthat) && require(ordinal)) { test_check("ordinal") } ordinal/tests/nominal.test.R0000644000176200001440000000413412175707057015617 0ustar liggesuserslibrary(ordinal) if(require(MASS)) { fm1 <- clm(Sat ~ Infl + Type + Cont, data=housing, weights=Freq) scale_test(fm1) nominal_test(fm1) fm2 <- update(fm1, scale=~Cont) scale_test(fm2) nominal_test(fm2) fm3 <- update(fm1, nominal=~ Cont) fm3$Theta anova(fm2, fm3) fm3$alpha.mat summary(fm3) } ################################# ### Testing nominal_test and scale_test: fm1 <- clm(rating ~ temp * contact, data=wine) ## names(fm1) fm2 <- clm(rating ~ temp * contact, data=wine, nominal=~contact) (an <- anova(fm1, fm2)) (nm <- nominal_test(fm1)) stopifnot(isTRUE(all.equal(an[2, 6], nm["contact", 5]))) fm2 <- clm(rating ~ temp * contact, data=wine, scale=~contact) (an <- anova(fm1, fm2)) (sc <- scale_test(fm1)) stopifnot(isTRUE(all.equal(an[2, 6], sc["contact", "Pr(>Chi)"]))) fm1 <- clm(rating ~ temp + contact, nominal=~temp + contact, data=wine) fm1 try(nominal_test(fm1), silent=TRUE)[1] ## gives error OK scale_test(fm1) fm1 <- clm(rating ~ temp + contact, scale=~temp + contact, data=wine) fm1 try(scale_test(fm1), silent=TRUE)[1] ## gives error OK nominal_test(fm1) ## Using weights: set.seed(123454321) wt <- runif(nrow(wine)) fm1 <- clm(rating ~ temp * contact, data=wine, weigths=wt) nominal_test(fm1) scale_test(fm1) ## No nominal test for judge since that model is not identifiable: fm1 <- clm(rating ~ judge + temp + contact, data=wine) nominal_test(fm1) scale_test(fm1) fm1 <- clm(rating ~ judge + temp, nominal=~contact, data=wine) nominal_test(fm1) summary(fm1) ## A continuous variable: set.seed(123454321) x <- rnorm(nrow(wine), sd=1) fm <- clm(rating ~ temp, nominal=~contact * x, data=wine) nominal_test(fm) scale_test(fm) fm <- clm(rating ~ temp + x, nominal=~contact, data=wine) nominal_test(fm) scale_test(fm) ## poly: fm <- clm(rating ~ temp + poly(x, 2), nominal=~contact, data=wine) nominal_test(fm) scale_test(fm) ## another combination: fm1 <- clm(SURENESS ~ PRODID + DAY + SOUPTYPE + SOUPFREQ, scale=~PROD, nominal=~ DAY*GENDER, data=soup) fm1 nominal_test(fm1) scale_test(fm1) ################################# ordinal/tests/test.clm.profile.R0000644000176200001440000000264512431442003016357 0ustar liggesuserslibrary(ordinal) ## Testing that the profile remains the same - that the model object ## is not 'distorted' by update(object/fitted, doFit=FALSE) set.seed(1234) wts <- runif(nrow(wine), 0, 2) fm3 <- clm(rating ~ temp + contact, data=wine, weights=wts) pr <- profile(fm3) set.seed(1234) fm3 <- clm(rating ~ temp + contact, data=wine, weights=runif(nrow(wine), 0, 2)) pr3 <- profile(fm3) ## > set.seed(1234) ## > fm3 <- clm(rating ~ temp + contact, data=wine, ## + weights=runif(nrow(wine), 0, 2)) ## > pr3 <- profile(fm3) ## Warning messages: ## 1: In profile.clm.beta(fitted, which.beta, alpha, max.steps, nsteps, : ## profile may be unreliable for tempwarm because only 1 ## steps were taken down ## 2: In profile.clm.beta(fitted, which.beta, alpha, max.steps, nsteps, : ## profile may be unreliable for tempwarm because only 1 ## steps were taken up ## 3: In profile.clm.beta(fitted, which.beta, alpha, max.steps, nsteps, : ## profile may be unreliable for contactyes because only 1 ## steps were taken down ## 4: In profile.clm.beta(fitted, which.beta, alpha, max.steps, nsteps, : ## profile may be unreliable for contactyes because only 1 ## steps were taken up ## stopifnot(isTRUE(all.equal(pr, pr3, check.attributes=FALSE))) stopifnot( isTRUE(all.equal(pr$tempwarm[, "lroot"], pr3$tempwarm[, "lroot"])), isTRUE(all.equal(pr$contactyes[, "lroot"], pr3$contactyes[, "lroot"]))) ordinal/tests/testthat/0000755000176200001440000000000014533322576014716 5ustar liggesusersordinal/tests/testthat/test-clm-predict.R0000644000176200001440000000065612450062502020212 0ustar liggesuserscontext("Test that clm.predict gives warnings if prevars is absent") fm1 <- clm(rating ~ temp + contact, data=wine) newData <- expand.grid(temp=levels(wine$temp), contact=levels(wine$contact)) expect_false(givesWarnings( predict(fm1, newdata=newData) )) attr(fm1$terms, "predvars") <- NULL expect_warning( predict(fm1, newdata=newData) , "terms object does not have a predvars attribute") ordinal/tests/testthat/test-contrasts.R0000644000176200001440000000504113633002525020023 0ustar liggesuserscontext("Contrast specification") test_that("clm gives contrast warnings when it should", { ## No warnings: ## Different combinations of terms i various formulae. Note that the ## contrasts apply to e.g. 'contact' in both 'formula' and 'scale': contr <- c(temp="contr.sum", contact="contr.sum") expect_false(givesWarnings( fm1 <- clm(rating ~ temp + contact, scale=~contact, data=wine) ## OK )) # expect_false(givesWarnings( # fm1 <- clm(rating ~ temp + contact, scale=~contact, data=wine, # contrasts=contr) ## OK # )) # expect_false(givesWarnings( # fm1 <- clm(rating ~ temp, scale=~contact, data=wine, # contrasts=contr) ## OK # )) # expect_false(givesWarnings( # fm1 <- clm(rating ~ temp, nominal=~contact, data=wine, # contrasts=contr) ## OK # )) # expect_false(givesWarnings( # fm1 <- clm(rating~1, scale=~temp, nominal=~contact, data=wine, # contrasts=contr) ## OK # )) ## These should give warnings: ## A warning is given if a variable is not present in any of the ## formulae: expect_warning( fm <- clm(rating ~ temp, contrasts=c(contact="contr.sum"), data=wine) , "variable 'contact' is absent: its contrasts will be ignored") expect_warning( fm <- clm(rating ~ temp, contrasts=contr, data=wine) , "variable 'contact' is absent: its contrasts will be ignored") expect_warning( fm <- clm(rating ~ 1, scale=~contact, contrasts=c(temp="contr.sum"), data=wine) , "variable 'temp' is absent: its contrasts will be ignored") expect_warning( fm <- clm(rating ~ 1, scale=~contact, contrasts=list(temp="contr.sum"), data=wine) , "variable 'temp' is absent: its contrasts will be ignored") }) test_that("checkContrasts gives when it should", { ## No warnings: fm0 <- clm(rating ~ temp + contact, scale=~contact, data=wine) expect_false( givesWarnings(checkContrasts(fm0$S.terms, fm0$S.contrasts)) ) expect_false( givesWarnings(checkContrasts(fm0$terms, fm0$contrasts)) ) expect_false( givesWarnings(checkContrasts(fm0$terms, fm0$S.contrasts)) ) expect_false( givesWarnings(checkContrasts(fm0$terms, fm0$S.contrasts)) ) ## Warning: expect_warning( checkContrasts(fm0$S.terms, fm0$contrasts) , "variable 'temp' is absent: its contrasts will be ignored") }) ordinal/tests/testthat/test-clm.R0000644000176200001440000000537212447563614016602 0ustar liggesusers context("Appropriate error and warning messages from clm()") test_that("formula is specified in clm", { expect_error(clm(nominal=~contact, data=wine), "Model needs a formula") expect_error(clm(scale=~contact, data=wine), "Model needs a formula") expect_error(clm(), "Model needs a formula") }) test_that("response is not in scale or nominal", { ## No response in formula: expect_error( fm <- clm(~ temp + contact, data=wine) , "'formula' needs a response") ## response in scale: expect_error( fm <- clm(rating ~ temp, scale=rating ~ contact, data=wine) , "response not allowed in 'scale'") expect_error( fm <- clm(rating ~ temp, nominal=rating ~ contact, data=wine) , "response not allowed in 'nominal'") wine2 <- wine wine2$rate <- as.numeric(as.character(wine2$rating)) expect_error( fm <- clm(rate ~ temp + contact, data=wine2) , "response in 'formula' needs to be a factor") }) test_that("offset is allowed in formula, but not in scale and nominal", { wine2 <- wine set.seed(1) wine2$off <- runif(nrow(wine)) ## offset in formula is fine: expect_is( clm(rating ~ temp + contact + offset(off), data=wine2) , "clm") expect_is( clm(rating ~ offset(off), nominal=~contact, data=wine2) , "clm") ## no other terms in formula. ## offset in scale is also fine: expect_is( clm(rating ~ temp, scale=~contact + offset(off), data=wine2) , "clm") expect_is( clm(rating ~ contact + temp, scale=~offset(off), data=wine2) , "clm") ## no other terms in scale. ## offset as argument is not allowed: expect_error( clm(rating ~ temp + contact, offset=off, data=wine2) , "offset argument not allowed: specify 'offset' in formula or scale arguments instead") ## offset in nominal is not allowed: expect_error( clm(rating ~ temp, nominal=~contact + offset(off), data=wine2) , "offset not allowed in 'nominal'") expect_error( clm(rating ~ temp, nominal=~1 + offset(off), data=wine2) , "offset not allowed in 'nominal'") }) test_that("Intercept is needed and assumed", { expect_is( fm <- clm(rating ~ 1, data=wine) , "clm") expect_warning( fm <- clm(rating ~ -1 + temp, data=wine) , "an intercept is needed and assumed in 'formula'") expect_warning( fm <- clm(rating ~ 0 + temp, data=wine) , "an intercept is needed and assumed in 'formula'") expect_warning( fm <- clm(rating ~ 0, data=wine) , "an intercept is needed and assumed in 'formula'") ## and similar with scale (+nominal) }) ## test_that("", { ## ## }) ordinal/tests/testthat/test-clm-formula.R0000644000176200001440000001557613724062771020251 0ustar liggesuserscontext("Appropriate evaluation of formulae in clm()") ## These fail and give appropriate error messages: test_that("standard formulae are interpreted correctly/give right error messages", { expect_error( fm1 <- clm(rating ~ contact, scale=temp, data=wine) , "object 'temp' not found") expect_error( fm1 <- clm(rating ~ contact, scale=~Temp, data=wine) , "object 'Temp' not found") expect_error( fm1 <- clm(rating ~ contact, scale="temp", data=wine) , "unable to interpret 'formula', 'scale' or 'nominal'") sca <- "temp" expect_error( fm1 <- clm(rating ~ contact, scale=sca, data=wine) , "unable to interpret 'formula', 'scale' or 'nominal'") ## sca <- as.formula(sca) ## sca <- as.formula(temp) ## sca <- with(wine, as.formula(temp)) ## These all work as intended with no warnings or errors: fm1 <- clm(rating ~ contact, scale="~temp", data=wine) fm2 <- clm(rating ~ contact, scale=~temp, data=wine) sca <- "~temp" fm3 <- clm(rating ~ contact, scale=sca, data=wine) sca <- as.formula("~temp") fm4 <- clm(rating ~ contact, scale=sca, data=wine) fm5 <- clm(rating ~ contact, scale=as.formula(~temp), data=wine) fm6 <- clm(rating ~ contact, scale=as.formula("~temp"), data=wine) ## Test that they are all clm objects: for(txt in paste0("fm", 1:6)) expect_is(eval(parse(text=txt)), "clm") ################################# ## can evaluate if 'formula' is a character: f <- "rating ~ contact + temp" expect_is(clm(f, data=wine), "clm") expect_is(clm(as.formula(f), data=wine), "clm") ################################# }) test_that("variables are found in the right environments", { ## finding variables in the environment of the formula: makeform <- function() { f1 <- as.formula(rating ~ temp + contact) rating <- wine$rating temp <- wine$temp contact <- wine$contact f1 } ## 'makeform' makes are formula object in the environment of the ## function makeform: f1 <- makeform() f1 # print expect_is(f1, "formula") ## If we give the data, we can evaluate the model: expect_is(fm1 <- clm(f1, data=wine), "clm") ## We can also evaluate the model because the data are available in ## the environment associated with the formula: expect_is(fm1 <- clm(f1), "clm") ## For instance, the 'rating' variable is not found in the Global ## environment; we have to evaluate the 'name' of 'rating' in the ## appropriate environment: (try(rating, silent=TRUE)) expect_error( rating , "'rating' not found") expect_is( eval(as.name("rating"), envir=environment(f1)) , "factor") ## If instead we generate the formula in the Global environment where ## the variables are not found, we cannot evaluate the model: f2 <- as.formula(rating ~ temp + contact) expect_error( fm2 <- clm(f2) ) ## Setting the appropriate environment of the formula restores the ## ability to evaluate the model: environment(f2) <- environment(f1) expect_is( fm2 <- clm(f2) , "clm") ################################# ## Use of formula-objects in location, scale and nominal: ## Bug-report from Lluís Marco Almagro ## 5 May 2010 17:58 f <- formula(rating ~ temp) fs <- formula( ~ contact) expect_is( m2 <- clm(f, scale = fs, data = wine) , "clm") }) test_that("data indexing works in formulae", { ################################# ## Other ways to construct formulas: set.seed(12345) y <- factor(sample(1:4,20,replace=TRUE)) x <- rnorm(20) data <- data.frame(y=y,x=x) rm(x, y) expect_is( fit <- clm(data$y ~ data$x) , "clm") expect_is( fit <- clm(data[,1] ~ data[,2]) , "clm") ## This previously failed, but now works: expect_is( fit <- clm(data$y ~ data$x, ~data$x) , "clm") }) test_that("clm may be invoked within functions", { ################################# ## Evaluation within other functions: ## date: January 18th 2012. ## ## The problem was raised by Stefan Herzog (stefan.herzog@unibas.ch) ## January 12th 2012 in trying to make clm work with glmulti. fun.clm <- function(formula, data) ### This only works because clm via eclm.model.frame is careful to ### evaluate the 'formula' in the parent environment such it is not the ### character "formula" that is attempted evaluated. clm(formula, data = data) fun2.clm <- function(formula, data, weights, subset) { ### This should be the safe way to ensure evaluation of clm in the ### right environment. mc <- match.call() mc[[1]] <- as.name("clm") eval.parent(mc) } expect_is( fun.clm(rating ~ temp + contact, data=wine) ## works , "clm") expect_is( fun2.clm(rating ~ temp + contact, data=wine) ## works , "clm") form1 <- "rating ~ temp + contact" expect_is( fun.clm(form1, data=wine) ## works , "clm") expect_is( fun2.clm(form1, data=wine) ## works , "clm") form2 <- formula(rating ~ temp + contact) expect_is( fm1 <- fun.clm(form2, data=wine) ## works , "clm") expect_is( fm2 <- fun2.clm(form2, data=wine) ## works , "clm") ## Notice that clm is not able to get the name of the data (wine) ## correct when using fun.clm: expect_true(deparse(fm1$call$data) == "data") expect_true(deparse(fm2$call$data) == "wine") }) test_that("no line breacking in long formulae", { ################################# ## Evaluation of long formulas: no line breaking in getFullForm: rhs <- paste(names(soup)[c(3, 5:12)], collapse=" + ") Location <- as.formula(paste("SURENESS ~ ", rhs, sep=" ")) Scale <- as.formula("~ PROD") expect_is( fm5 <- clm(Location, scale=Scale, data=soup) , "clm") }) test_that("'.'-notation works in formula", { ################################# ## Check that "."-notation works in formula: ## December 25th 2014, RHBC data(wine) wine2 <- wine[c("rating", "contact", "temp")] ## str(wine2) fm0 <- clm(rating ~ ., data=wine2) fm1 <- clm(rating ~ contact + temp, data=wine2) keep <- c("coefficients", "logLik", "info") fun <- function(x, y) stopifnot(isTRUE(all.equal(x, y))) mapply(fun, fm0[keep], fm1[keep]) fun <- function(x, y) {expect_equal(x, y); invisible()} mapply(fun, fm0[keep], fm1[keep]) ################################# }) test_that("long formulae work in clmm", { # Long formulae also work: wine2 <- wine names(wine2) <- lapply(names(wine), paste0, "_quite_long") expect_warning( mm <- clmm(rating_quite_long ~ temp_quite_long + contact_quite_long + (1|judge_quite_long), data = wine2) , regexp = NA) }) ordinal/tests/testthat/test-clm-profile.R0000644000176200001440000000046512450322423020217 0ustar liggesuserscontext("Testing error message from profile.clm") expect_warning( fm2 <- clm(rating ~ contact, scale=~contact, nominal=~contact, data=wine) , "\\(1\\) Hessian is numerically singular") expect_error(profile(fm2) , "Cannot get profile when vcov\\(fitted\\) contains NAs") ordinal/tests/testthat/test-clmm-checkRanef.R0000644000176200001440000000214713277541507021002 0ustar liggesuserscontext("Testing error-warning-message from clmm via checkRanef") ## Make example with more random effects than observations: wine$fake <- factor(c(1:65, 1:65)[1:nrow(wine)]) wine$fakeToo <- factor(1:nrow(wine)) ## Check warning, error and 'message' messages: expect_warning( fmm2 <- clmm(rating ~ temp + contact + (1|judge) + (1|fake), data=wine) , "no. random effects") expect_warning( fmm2 <- clmm(rating ~ temp + contact + (1|judge) + (1|fake), data=wine, checkRanef="warn") , "no. random effects") expect_error( fmm2 <- clmm(rating ~ temp + contact + (1|judge) + (1|fake), data=wine, checkRanef="error") , "no. random effects") expect_message( fmm2 <- clmm(rating ~ temp + contact + (1|judge) + (1|fake), data=wine, checkRanef="message") , "no. random effects") expect_error( fmm2 <- clmm(rating ~ temp + contact + (1|fakeToo), data=wine, checkRanef="error") , "no. random effects") expect_error( fmm2 <- clmm(rating ~ temp + contact + (1|judge) + (1|fakeToo), data=wine, checkRanef="error") , "no. random effects") ordinal/tests/testthat/test-utils.R0000644000176200001440000000105312450062502017137 0ustar liggesusers context("testing namedList") a <- 1 b <- 2 c <- 3 d <- list(e=2, f=factor(letters[rep(1:2, 2)])) g <- matrix(runif(9), 3) h <- NULL test_that("namedList returns a named list", { res <- namedList(a, b, c) expect_equal(names(res), c("a", "b", "c")) expect_equivalent(res, list(a, b, c)) res <- namedList(a, b, c, d, g) expect_equal(names(res), c("a", "b", "c", "d", "g")) expect_equivalent(res, list(a, b, c, d, g)) res <- namedList(a, h) expect_equal(names(res), c("a", "h")) expect_equivalent(res, list(a, h)) }) ordinal/tests/testthat/test-misc.R0000644000176200001440000000024112457750665016755 0ustar liggesuserscontext("Test of general functionality") test_that("citation reports year", { txt <- citation("ordinal") expect_true(as.logical(grep("year", txt))) }) ordinal/tests/clmm.R0000755000176200001440000000230014334175405014123 0ustar liggesuserslibrary(ordinal) data(wine) ################################# ## Estimation with a single simple RE term: ## Laplace: fmm1 <- clmm(rating ~ contact + temp + (1|judge), data=wine) summary(fmm1) ## GHQ: fmm.ghq <- clmm(rating ~ contact + temp + (1|judge), data=wine, nAGQ=-10) summary(fmm.ghq) ## AGQ: fmm.agq <- clmm(rating ~ contact + temp + (1|judge), data=wine, nAGQ=10) summary(fmm.agq) ## tests: ## Notice warning about Laplace with multiple REs when nAGQ != 1: fmm1 <- try(clmm(rating ~ contact + temp + (1|judge) + (1|bottle), data=wine, nAGQ=10)) stopifnot(inherits(fmm1, "try-error")) ################################# ## Estimation with several RE terms: data(soup, package="ordinal") fmm <- clmm(SURENESS ~ PROD + (1|RESP) + (1|PROD:RESP), data=soup, threshold="equidistant") summary(fmm) ################################# ## Estimation with implicit intercept: fm1 <- clmm(rating ~ 1 + (1|judge), data = wine) fm2 <- clmm(rating ~ (1|judge), data = wine) fm3 <- clmm(rating ~ 0 + (1|judge), data = wine) stopifnot(isTRUE(all.equal(coef(fm1), coef(fm2), tolerance=1e-5)), isTRUE(all.equal(coef(fm1), coef(fm3), tolerance=1e-5))) ordinal/tests/testAnova.clm2.R0000644000176200001440000000272412176227351016002 0ustar liggesuserslibrary(ordinal) options(contrasts = c("contr.treatment", "contr.poly")) ## More manageable data set: (tab26 <- with(soup, table("Product" = PROD, "Response" = SURENESS))) dimnames(tab26)[[2]] <- c("Sure", "Not Sure", "Guess", "Guess", "Not Sure", "Sure") dat26 <- expand.grid(sureness = as.factor(1:6), prod = c("Ref", "Test")) dat26$wghts <- c(t(tab26)) m1 <- clm(sureness ~ prod, scale = ~prod, data = dat26, weights = wghts, link = "logit") ## anova m2 <- update(m1, scale = ~1) anova(m1, m2) mN1 <- clm(sureness ~ 1, nominal = ~prod, data = dat26, link = "logit") anova(m1, mN1) anova(m1, m2, mN1) ## dropterm if(require(MASS)) { dropterm(m1, test = "Chi") mB1 <- clm(SURENESS ~ PROD + GENDER + SOUPTYPE, scale = ~ COLD, data = soup, link = "probit") dropterm(mB1, test = "Chi") # or ## addterm addterm(mB1, scope = ~.^2, test = "Chi") ## addterm(mB1, scope = ~ . + AGEGROUP + SOUPFREQ, ## test = "Chi", which = "location") ## addterm(mB1, scope = ~ . + GENDER + SOUPTYPE, ## test = "Chi", which = "scale") ## Fit model from polr example: ## data(housing, package = "MASS") fm1 <- clm(Sat ~ Infl + Type + Cont, weights = Freq, data = housing) ## addterm(fm1, ~ Infl + Type + Cont, test= "Chisq", which = "scale") dropterm(fm1, test = "Chisq") fm2 <- update(fm1, scale =~ Cont) fm3 <- update(fm1, formula =~.-Cont, nominal =~ Cont) anova(fm1, fm2, fm3) } ordinal/tests/test.clm.Theta.R0000644000176200001440000001201012447075623015770 0ustar liggesuserslibrary(ordinal) ################################# ## 1 categorical variable in nominal: fm <- clm(rating ~ temp, nominal=~contact, data=wine) fm$Theta fm$alpha.mat ## Threshold effects: fm <- clm(rating ~ temp, nominal=~contact, data=wine, threshold="symmetric") fm$Theta fm$alpha.mat fm <- clm(rating ~ temp, nominal=~contact, data=wine, threshold="equidistant") fm$Theta fm$alpha.mat ## Singular fit is still ok (with a warning, though) fm <- clm(rating ~ contact, nominal=~temp, data=wine) fm$alpha.mat fm$Theta ################################# ## 1 continuous variable: set.seed(123) x <- rnorm(nrow(wine), sd=1) fm <- clm(rating ~ temp, nominal=~ x, data=wine) fm$alpha.mat fm$Theta fm <- clm(rating ~ temp, nominal=~ poly(x, 2), data=wine) fm$alpha.mat fm$Theta ################################# ## 1 categorical + 1 continuous variable: set.seed(123) x <- rnorm(nrow(wine), sd=1) fm <- clm(rating ~ temp, nominal=~contact + x, data=wine) fm$alpha.mat fm$Theta fm <- clm(rating ~ temp, nominal=~contact + x, data=wine, threshold="symmetric") fm$alpha.mat fm$Theta ################################# ### NOTE: To get the by-threshold nominal effects of continuous terms ## use: with(fm, t(apply(alpha.mat, 1, function(th) tJac %*% th))) ################################# ## Interactions: fm <- clm(rating ~ temp, nominal=~contact:x, data=wine) fm$alpha.mat fm$Theta fm <- clm(rating ~ temp, nominal=~contact+x+contact:x, data=wine) fm$alpha.mat fm$Theta fm <- clm(rating ~ temp, nominal=~contact*x, data=wine) fm$alpha.mat fm$Theta ## polynomial terms: fm <- clm(rating ~ temp, nominal=~contact + poly(x, 2), data=wine) fm$alpha.mat fm$Theta ## logical variables: (treated like numeric variables) wine$Con <- as.character(wine$contact) == "yes" fm <- clm(rating ~ temp, nominal=~Con, data=wine) fm$Theta fm$alpha.mat wine$Con.num <- 1 * wine$Con fm <- clm(rating ~ temp, nominal=~Con.num, data=wine) fm$Theta fm$alpha.mat ################################# ## Two continuous variables: set.seed(321) y <- rnorm(nrow(wine), sd=1) fm1 <- clm(rating ~ temp, nominal=~y + x, data=wine) fm1$alpha.mat fm1$Theta ## summary(fm1) ################################# ## 1 categorical + 2 continuous variables: fm1 <- clm(rating ~ temp, nominal=~y + contact + x, data=wine) fm1$alpha.mat fm1$Theta fm1 <- clm(rating ~ temp, nominal=~contact + x + contact:x + y, data=wine) summary(fm1) fm1$Theta fm1$alpha.mat fm1 <- clm(rating ~ temp, nominal=~contact*x + y, data=wine) fm1$Theta fm1$alpha.mat t(fm1$alpha.mat) fm1 ################################# ## ordered factors (behaves like numerical variables): data(soup, package="ordinal") fm2 <- clm(SURENESS ~ 1, nominal=~PRODID + DAY, data=soup) fm2$Theta fm2$alpha.mat prodid <- factor(soup$PRODID, ordered=TRUE) fm2 <- clm(SURENESS ~ 1, nominal=~prodid + DAY, data=soup) fm2$alpha.mat fm2$Theta fm2 <- clm(SURENESS ~ 1, nominal=~prodid, data=soup) fm2$alpha.mat fm2$Theta ################################# ## Aliased Coefficients: ## ## Example where the interaction in the nominal effects is aliased (by ## design). Here the two Theta matrices coincide. The alpha.mat ## matrices are similar except one has an extra row with NAs: soup2 <- soup levels(soup2$DAY) levels(soup2$GENDER) xx <- with(soup2, DAY == "2" & GENDER == "Female") ## Model with additive nominal effects: fm8 <- clm(SURENESS ~ PRODID, nominal= ~ DAY + GENDER, data=soup2, subset=!xx) fm8$alpha.mat fm8$Theta ## Model with non-additive, but aliased nominal effects: fm9 <- clm(SURENESS ~ PRODID, nominal= ~ DAY * GENDER, data=soup2, subset=!xx) fm9$alpha.mat fm9$Theta stopEqual <- function(x, y, ca=FALSE) stopifnot(isTRUE(all.equal(x, y, check.attributes=ca))) stopEqual(fm8$alpha.mat, fm9$alpha.mat[1:3, ]) stopEqual(fm8$Theta, fm9$Theta) stopEqual(logLik(fm8), logLik(fm9)) ################################# ## Weights: set.seed(12345) wts <- runif(nrow(soup)) fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE + DAY, data=soup, weights=wts) fm2$Theta ## Offset (correctly gives and error) fm2 <- try(clm(SURENESS ~ 1, nominal=~SOUPTYPE + DAY + offset(wts), data=soup), silent=TRUE) stopifnot(inherits(fm2, "try-error")) ################################# ### Other (misc) examples: fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE + DAY, data=soup) fm2$Theta fm2 fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE * DAY, data=soup) fm2$Theta fm2 fm2$alpha.mat fm2 <- clm(SURENESS ~ 1, nominal=~SOUPTYPE * DAY, data=soup, threshold="symmetric") fm2$Theta fm2$alpha.mat ################################# ### Check correctness of Theta matrix when intercept is removed in ### nominal formula: ### December 25th 2014, RHBC fm1 <- clm(rating ~ temp, nominal=~contact-1, data=wine) fm2 <- clm(rating ~ temp, nominal=~contact, data=wine) stopifnot(isTRUE(all.equal(fm1$Theta, fm2$Theta))) stopifnot(isTRUE(all.equal(fm1$logLik, fm2$logLik))) wine2 <- wine wine2$contact <- relevel(wine2$contact, "yes") fm3 <- clm(rating ~ temp, nominal=~contact, data=wine2) stopifnot(isTRUE(all.equal(coef(fm1, na.rm=TRUE), coef(fm3)))) ################################# ordinal/tests/test.clm.convergence.R0000644000176200001440000000343312431426564017227 0ustar liggesuserslibrary(ordinal) ## Testing that errors in chol() are caught soon enough: cy <- with(wine, which(temp == "cold" & contact == "yes")) wine2 <- subset(wine, subset=(!1:nrow(wine) %in% cy)) wine2[c(9, 15, 46), "rating"] <- NA fm1 <- clm(rating ~ temp, scale=~contact, nominal=~contact, data=wine2) fm1 <- try(clm(rating ~ temp, scale=~contact, nominal=~contact, data=wine2, control=list(gradTol=1e-12)), silent=TRUE) fm2 <- try(clm(rating ~ temp, scale=~contact, nominal=~contact, data=wine2, control=list(gradTol=1e-15)), silent=TRUE) ## These gave errors in version 2014.11-12. stopifnot(!inherits(fm1, "try-error")) stopifnot(!inherits(fm2, "try-error")) summary(fm1) summary(fm2) ## Error in convergence.clm() due to bad evaluation of model ## environment with update(object, doFit=FALSE): wine3 <- wine set.seed(1234) wts <- runif(nrow(wine3), 0, 2) fm3 <- clm(rating ~ temp + contact, data=wine3, weights=wts) c0 <- convergence(fm3) set.seed(1234) fm3 <- clm(rating ~ temp + contact, data=wine3, weights=runif(nrow(wine3), 0, 2)) c1 <- convergence(fm3) c0$info$logLik.Error c1$info$logLik.Error all.equal(c0$info$logLik.Error, c1$info$logLik.Error) ## In version 2014.11-14: ## > wine3 <- wine ## > set.seed(1234) ## > wts <- runif(nrow(wine3), 0, 2) ## > fm3 <- clm(rating ~ temp + contact, data=wine3, ## + weights=wts) ## > c0 <- convergence(fm3) ## > set.seed(1234) ## > fm3 <- clm(rating ~ temp + contact, data=wine3, ## + weights=runif(nrow(wine3), 0, 2)) ## > c1 <- convergence(fm3) ## > c0$info$logLik.Error ## [1] "<1e-10" ## > c1$info$logLik.Error ## [1] "4.80e+00" ## > all.equal(c0$info$logLik.Error, c1$info$logLik.Error) ## [1] "1 string mismatch" stopifnot(c0$info$logLik.Error == c1$info$logLik.Error) ordinal/tests/test.clm.model.matrix.R0000644000176200001440000001104213633002525017317 0ustar liggesuserslibrary(ordinal) ## source("test.clm.model.matrix.R") ## library(devtools) ## r2path <- "/Users/rhbc/Documents/Rpackages/ordinal/pkg/ordinal" ## clean_dll(pkg = r2path) ## load_all(r2path) ## Check that get_clmDesign works in standard setting: fm1 <- clm(rating ~ temp, scale=~contact, nominal=~contact, data=wine) contr <- c(fm1$contrasts, fm1$S.contrasts, fm1$nom.contrasts) XX <- ordinal:::get_clmDesign(fm1$model, terms(fm1, "all"), contrasts=contr) XX2 <- update(fm1, method="design") (keep <- intersect(names(XX), names(XX2))) (test <- mapply(function(x, y) isTRUE(all.equal(x, y)), XX[keep], XX2[keep])) stopifnot(all(test)) ## Check that get_clmDesign works with singular fit and NAs: cy <- with(wine, which(temp == "cold" & contact == "yes")) wine2 <- subset(wine, subset=(!1:nrow(wine) %in% cy)) wine2[c(9, 15, 46), "rating"] <- NA fm1 <- clm(rating ~ temp, scale=~contact, nominal=~contact, data=wine2) contr <- c(fm1$contrasts, fm1$S.contrasts, fm1$nom.contrasts) XX <- ordinal:::get_clmDesign(fm1$model, terms(fm1, "all"), contrasts=contr) XX2 <- update(fm1, method="design") (keep <- intersect(names(XX), names(XX2))) (test <- mapply(function(x, y) isTRUE(all.equal(x, y)), XX[keep], XX2[keep])) stopifnot(all(test)) ## In this situation update and get_clmRho give the same results: wine2 <- wine fm1 <- clm(rating ~ temp + contact, data=wine2) ## OK rho1 <- ordinal:::get_clmRho.clm(fm1) l1 <- as.list(rho1) l2 <- as.list(update(fm1, doFit=FALSE)) (test <- mapply(function(x, y) isTRUE(all.equal(x, y)), l1, l2[names(l1)])) stopifnot(all(test)) ## If we modify the data (or other subset, weights, formulae, etc.) ## used in the model call, the results from update no longer correspond ## to the elements of the fitted model object. get_clmRho gets it ## right on the other hand: wine2[10:13, "rating"] <- NA l3 <- as.list(ordinal:::get_clmRho.clm(fm1)) l4 <- as.list(update(fm1, doFit=FALSE)) (test <- mapply(function(x, y) isTRUE(all.equal(x, y)), l1, l3)) stopifnot(all(test)) ## same (test <- mapply(function(x, y) isTRUE(all.equal(x, y)), l3, l4[names(l3)])) stopifnot(sum(!test) == 8) ## not all the same anymore! ## In conclusion l1, l2, and l3 are identical. l4 is different. ################################# ## Test that checkContrasts give appropriate warnings: contr <- c(temp="contr.sum", contact="contr.sum") fm1 <- clm(rating ~ temp + contact, scale=~contact, data=wine) ## OK fm1 <- clm(rating ~ temp + contact, scale=~contact, data=wine, contrasts=contr) ## OK fm1 <- clm(rating ~ temp, scale=~contact, data=wine, contrasts=contr) ## OK ## These should give warnings: fm1 <- clm(rating ~ temp, contrasts=c(contact="contr.sum"), data=wine) fm1 <- clm(rating ~ temp, contrasts=contr, data=wine) fm1 <- clm(rating ~ 1, scale=~contact, contrasts=c(temp="contr.sum"), data=wine) fm1 <- clm(rating ~ 1, scale=~contact, contrasts=list(temp="contr.sum"), data=wine) fm0 <- clm(rating ~ temp + contact, scale=~contact, data=wine) ordinal:::checkContrasts(fm0$S.terms, fm0$contrasts) ordinal:::checkContrasts(fm0$S.terms, fm0$S.contrasts) ordinal:::checkContrasts(fm0$terms, fm0$contrasts) ordinal:::checkContrasts(fm0$terms, fm0$S.contrasts) ################################# ## Check that clm and model.matrix respects contrast settings: options("contrasts" = c("contr.treatment", "contr.poly")) fm0 <- clm(rating ~ temp + contact, data=wine) options("contrasts" = c("contr.sum", "contr.poly")) fm1 <- clm(rating ~ temp + contact, data=wine) stopifnot(all(model.matrix(fm0)$X[, 2] %in% c(0, 1))) stopifnot(all(model.matrix(fm1)$X[, 2] %in% c(1, -1))) ################################# ## Check that model.matrix results do not depend on global contrast ## setting: options("contrasts" = c("contr.sum", "contr.poly")) fm0 <- clm(rating ~ temp + contact, scale=~contact, data=wine) MM <- model.matrix(fm0) options("contrasts" = c("contr.treatment", "contr.poly")) MM2 <- model.matrix(fm0) for(x in MM) print(head(x)) for(x in MM2) print(head(x)) stopifnot(all(mapply(all.equal, MM, MM2))) ################################# ## This gave a warning before getContrasts was implemented: fm0 <- clm(rating ~ temp + contact, scale=~contact, data=wine) MM <- model.matrix(fm0) ## > fm0 <- clm(rating ~ temp + contact, scale=~contact, data=wine) ## > MM <- model.matrix(fm0) ## Warning message: ## In model.matrix.default(res$S.terms, data = fullmf, contrasts.arg = getContrasts(res$S.terms, : ## variable 'temp' is absent, its contrast will be ignored for(x in MM) print(head(x)) ordinal/tests/ranef.loading.R0000644000176200001440000000032114334204201015663 0ustar liggesusers# check that ranef and VarCorr work even after loading ordinal: library(lme4) fm1 <- lmer(Reaction ~ Days + (Days | Subject), data=sleepstudy) ranef(fm1) VarCorr(fm1) library(ordinal) ranef(fm1) VarCorr(fm1) ordinal/tests/clmm.formula.R0000755000176200001440000001231111752454332015572 0ustar liggesuserslibrary(ordinal) data(wine) ################################# ## Appropriate evaluation of formulas: ## These all work as intended with no warnings or errors: fm1 <- clmm(rating ~ contact + (1|judge), data=wine) fm1 fm1 <- clmm("rating ~ contact + (1|judge)", data=wine) fm1 fm1 <- clmm(as.formula("rating ~ contact + (1|judge)"), data=wine) fm1 fm1 <- clmm(as.formula(rating ~ contact + (1|judge)), data=wine) fm1 ################################# ### finding variables in the environment of the formula: makeform <- function() { f1 <- as.formula(rating ~ temp + contact + (1|judge)) rating <- wine$rating temp <- wine$temp contact <- wine$contact judge <- wine$judge f1 } ## 'makeform' makes are formula object in the environment of the ## function makeform: f1 <- makeform() f1 # print class(f1) ## If we give the data, we can evaluate the model: fm1 <- clmm(f1, data=wine) ## We can also evaluate the model because the data are available in ## the environment associated with the formula: fm1 <- clmm(f1) ## For instance, the 'rating' variable is not found in the Global ## environment; we have to evaluate the 'name' of 'rating' in the ## appropriate environment: (try(rating, silent=TRUE)) eval(as.name("rating"), envir=environment(f1)) ## If instead we generate the formula in the Global environment where ## the variables are not found, we cannot evaluate the model: f2 <- as.formula(rating ~ temp + contact + (1|judge)) (try(fm2 <- clmm(f2), silent=TRUE)) environment(f2) <- environment(f1) fm2 <- clmm(f2) ################################# ## Use of formula-objects f <- formula(rating ~ temp + contact + (1|judge)) m2 <- clmm(f, data = wine) summary(m2) ################################# ## Other ways to construct formulas: set.seed(12345) y <- factor(sample(1:4,20,replace=TRUE)) x <- rnorm(20) b <- gl(5, 4, labels=letters[1:5]) data <- data.frame(y=y, x=x, b=b) rm(x, y, b) clmm(y ~ x + (1|b), data=data) fit <- clmm(data$y ~ data$x + (1|data$b)) fit fit <- clmm(data[, 1] ~ data[, 2] + (1|data[, 3])) fit ################################# ## Evaluation within other functions: ## date: January 18th 2012. ## ## The problem was raised by Stefan Herzog (stefan.herzog@unibas.ch) ## January 12th 2012 in trying to make clmm work with glmulti. fun.clmm <- function(formula, data) ### This only works because clmm via eclmm.model.frame is careful to ### evaluate the 'formula' in the parent environment such it is not the ### character "formula" that is attempted evaluated. clmm(formula, data = data) fun2.clmm <- function(formula, data, weights, subset) { ### This should be the safe way to ensure evaluation of clmm in the ### right environment. mc <- match.call() mc[[1]] <- as.name("clmm") eval.parent(mc) } fun.clmm(rating ~ temp + contact + (1|judge), data=wine) ## works fun2.clmm(rating ~ temp + contact + (1|judge), data=wine) ## works form1 <- "rating ~ temp + contact + (1|judge)" fun.clmm(form1, data=wine) ## works fun2.clmm(form1, data=wine) ## works form2 <- formula(rating ~ temp + contact + (1|judge)) fun.clmm(form2, data=wine) ## works fun2.clmm(form2, data=wine) ## works ## Notice that clmm is not able to get the name of the data (wine) ## correct when using fun.clmm. ################################# ## ## Example 2: using clmm function ## # ## ## Now I want to consider judge as a random effect to account for ## ## grouping structure of data ## mod2 <- clmm(rating ~ temp + contact + (1|judge), data=wine) ## ## ##Again, I started by using my own code to run all potential models: ## ## put names of all your variables in this vector: ## vl2 <- c("temp", "contact") ## ## generate list of possible combinations of variables: ## combos2 <- NULL ## for(i in 1:length(vl2)) { ## combos2 <- c(combos2, combn(vl2, i, simplify = F)) ## } ## ## create formulae and run models one by one, saving them as model1, ## ## model2 etc... ## for (i in 1:length(combos2)) { ## vs2 <- paste(combos2[[i]], collapse=" + ") ## f2 <- formula(paste("rating ~ ", vs2, "+(1|judge)", sep="")) ## print(f2) ## assign(paste("model", i, sep=""), clmm(f2, data=wine)) ## } ## summary(model1) # etc ## summary(model2) # etc ## summary(model3) # etc ## ## models <- vector("list", length(combos2)) ## for(i in 1:length(combos2)) { ## vs2 <- paste(combos2[[i]], collapse=" + ") ## f2 <- formula(paste("rating ~ ", vs2, "+(1|judge)", sep="")) ## print(f2) ## models[[i]] <- clmm(f2, data=wine) ## ## assign(paste("model", i, sep=""), clmm(f2, data=wine)) ## } ## ## ## Coefficients, AIC and BIC: ## lapply(models, function(m) coef(summary(m))) ## lapply(models, AIC) ## lapply(models, BIC) ## ## ## library(MuMIn) ## ## dd2 <- dredge(mod2) ## does not work ## ## ?dredge ## ## traceback() ## ## mod2$formula ## ## terms(as.formula(formula(mod2))) ## ## ## ## library(lme4) ## ## fmm1 <- lmer(response ~ temp + contact + (1|judge), data=wine) ## ## fmm1 ## ## terms(as.formula(lme4:::formula(fmm1))) ## ## terms(as.formula(formula(fmm1))) ordinal/tests/anova.R0000755000176200001440000000111213277532341014300 0ustar liggesuserslibrary(ordinal) data(wine) fm1 <- clm(rating ~ temp, data=wine) fmm1 <- clmm(rating ~ temp + (1|judge), data=wine) ## These now give identical printed results: ## Previously the printed model names were messed up when anova.clmm ## were called. anova(fm1, fmm1) anova(fmm1, fm1) ## Testing if 'test' and 'type' arguments are ignored properly: fm1 <- clm(rating ~ temp + contact, data=wine) fm2 <- clm(rating ~ temp, data=wine) anova(fm1, fm2, test="Chi") anova(fm1, fm2, type="Chi") anova(fm1, fm2) ## calling anova.clmm anova(fmm1, fm1, test="Chi") anova(fmm1, fm1, type="Chi") ordinal/tests/test.sign.R0000644000176200001440000000610313633002525015104 0ustar liggesusers# test.sign.R # Test the use of sign.location and sign.nominal in clm.control(): library(ordinal) fm1 <- clm(rating ~ temp + contact, data=wine) fm2 <- clm(rating ~ temp + contact, data=wine, sign.location="positive") # dput(names(fm1)) keep <- c("aliased", "alpha", "cond.H", "contrasts", "convergence", "df.residual", "edf", "fitted.values", "formula", "formulas", "gradient", "info", "link", "logLik", "maxGradient", "message", "model", "n", "niter", "nobs", "start", "terms", "Theta", "threshold", "tJac", "xlevels", "y", "y.levels") check <- mapply(function(x, y) isTRUE(all.equal(x, y)), fm1[keep], fm2[keep]) stopifnot(all(check)) stopifnot(isTRUE(all.equal( fm1$beta, - fm2$beta ))) fm1 <- clm(rating ~ temp, nominal=~ contact, data=wine) fm2 <- clm(rating ~ temp, nominal=~ contact, data=wine, sign.nominal="negative") keep <- c("aliased", "beta", "cond.H", "contrasts", "convergence", "df.residual", "edf", "fitted.values", "formula", "formulas", "gradient", "info", "link", "logLik", "maxGradient", "message", "model", "n", "niter", "nobs", "start", "terms", "Theta", "threshold", "tJac", "xlevels", "y", "y.levels") # check <- mapply(function(x, y) isTRUE(all.equal(x, y)), fm1, fm2) check <- mapply(function(x, y) isTRUE(all.equal(x, y)), fm1[keep], fm2[keep]) stopifnot(all(check)) stopifnot(isTRUE(all.equal( fm1$alpha[5:8], -fm2$alpha[5:8] ))) fm1 <- clm(rating ~ temp, nominal=~ contact, data=wine) fm2 <- clm(rating ~ temp, nominal=~ contact, data=wine, sign.nominal="negative", sign.location="positive") keep <- c("aliased", "cond.H", "contrasts", "convergence", "df.residual", "edf", "fitted.values", "formula", "formulas", "gradient", "info", "link", "logLik", "maxGradient", "message", "model", "n", "niter", "nobs", "start", "terms", "Theta", "threshold", "tJac", "xlevels", "y", "y.levels") # check <- mapply(function(x, y) isTRUE(all.equal(x, y)), fm1, fm2) check <- mapply(function(x, y) isTRUE(all.equal(x, y)), fm1[keep], fm2[keep]) stopifnot(all(check)) stopifnot( isTRUE(all.equal(fm1$alpha[5:8], -fm2$alpha[5:8])), isTRUE(all.equal(fm1$beta, -fm2$beta)) ) # Check predict method: newData <- with(wine, expand.grid(temp=levels(temp), contact=levels(contact))) (p1 <- predict(fm1, newdata=newData)) (p2 <- predict(fm2, newdata=newData)) stopifnot(isTRUE(all.equal(p1, p2))) stopifnot(isTRUE( all.equal(predict(fm1, newdata=wine, se=TRUE, interval=TRUE), predict(fm2, newdata=wine, se=TRUE, interval=TRUE)) )) # Check profile and confint methods: confint.default(fm1) confint.default(fm2) stopifnot( isTRUE(all.equal(confint(fm1), -confint(fm2)[, 2:1, drop=FALSE], check.attributes=FALSE)) ) fm1 <- clm(rating ~ temp + contact, data=wine) fm2 <- clm(rating ~ temp + contact, data=wine, sign.location="positive") pr1 <- profile(fm1) pr2 <- profile(fm2) stopifnot( isTRUE(all.equal(confint(fm1), - confint(fm2)[, 2:1], check.attributes=FALSE)) ) ordinal/tests/clm.formula.R0000644000176200001440000001126412447062614015420 0ustar liggesuserslibrary(ordinal) ## library(devtools) ## r2path <- "/Users/rhbc/Documents/Rpackages/ordinal/pkg/ordinal" ## clean_dll(pkg = r2path) ## load_all(r2path) ################################# ## Appropriate evaluation of formulas: ## These fail and give appropriate error messages: ## fm1 <- clm(rating ~ contact, scale=temp, data=wine) ## fm1 <- clm(rating ~ contact, scale=~Temp, data=wine) ## fm1 <- clm(rating ~ contact, scale="temp", data=wine) ## sca <- "temp" ## fm1 <- clm(rating ~ contact, scale=sca, data=wine) ## sca <- as.formula(sca) ## sca <- as.formula(temp) ## sca <- with(wine, as.formula(temp)) ## These all work as intended with no warnings or errors: fm1 <- clm(rating ~ contact, scale="~temp", data=wine) fm1 <- clm(rating ~ contact, scale=~temp, data=wine) sca <- "~temp" fm1 <- clm(rating ~ contact, scale=sca, data=wine) sca <- as.formula("~temp") fm1 <- clm(rating ~ contact, scale=sca, data=wine) fm1 <- clm(rating ~ contact, scale=as.formula(~temp), data=wine) fm1 <- clm(rating ~ contact, scale=as.formula("~temp"), data=wine) ################################# ## can evaluate if 'formula' is a character: f <- "rating ~ contact + temp" clm(f, data=wine) clm(as.formula(f), data=wine) ################################# ### finding variables in the environment of the formula: makeform <- function() { f1 <- as.formula(rating ~ temp + contact) rating <- wine$rating temp <- wine$temp contact <- wine$contact f1 } ## 'makeform' makes are formula object in the environment of the ## function makeform: f1 <- makeform() f1 # print class(f1) ## If we give the data, we can evaluate the model: fm1 <- clm(f1, data=wine) ## We can also evaluate the model because the data are available in ## the environment associated with the formula: fm1 <- clm(f1) ## For instance, the 'rating' variable is not found in the Global ## environment; we have to evaluate the 'name' of 'rating' in the ## appropriate environment: (try(rating, silent=TRUE)) eval(as.name("rating"), envir=environment(f1)) ## If instead we generate the formula in the Global environment where ## the variables are not found, we cannot evaluate the model: f2 <- as.formula(rating ~ temp + contact) (try(fm2 <- clm(f2), silent=TRUE)) environment(f2) <- environment(f1) fm2 <- clm(f2) ################################# ## Use of formula-objects in location, scale and nominal: ## Bug-report from Llus Marco Almagro ## 5 May 2010 17:58 f <- formula(rating ~ temp) fs <- formula( ~ contact) m2 <- clm(f, scale = fs, data = wine) summary(m2) ################################# ## Other ways to construct formulas: set.seed(12345) y <- factor(sample(1:4,20,replace=TRUE)) x <- rnorm(20) data <- data.frame(y=y,x=x) rm(x, y) fit <- clm(data$y ~ data$x) fit fit <- clm(data[,1] ~ data[,2]) fit ## This previously failed, but now works: fit <- clm(data$y ~ data$x, ~data$x) fit ################################# ## Evaluation within other functions: ## date: January 18th 2012. ## ## The problem was raised by Stefan Herzog (stefan.herzog@unibas.ch) ## January 12th 2012 in trying to make clm work with glmulti. fun.clm <- function(formula, data) ### This only works because clm via eclm.model.frame is careful to ### evaluate the 'formula' in the parent environment such it is not the ### character "formula" that is attempted evaluated. clm(formula, data = data) fun2.clm <- function(formula, data, weights, subset) { ### This should be the safe way to ensure evaluation of clm in the ### right environment. mc <- match.call() mc[[1]] <- as.name("clm") eval.parent(mc) } fun.clm(rating ~ temp + contact, data=wine) ## works fun2.clm(rating ~ temp + contact, data=wine) ## works form1 <- "rating ~ temp + contact" fun.clm(form1, data=wine) ## works fun2.clm(form1, data=wine) ## works form2 <- formula(rating ~ temp + contact) fun.clm(form2, data=wine) ## works fun2.clm(form2, data=wine) ## works ## Notice that clm is not able to get the name of the data (wine) ## correct when using fun.clm. ################################# ## Evaluation of long formulas: no line breaking in getFullForm: data(soup, package="ordinal") rhs <- paste(names(soup)[c(3, 5:12)], collapse=" + ") Location <- as.formula(paste("SURENESS ~ ", rhs, sep=" ")) Scale <- as.formula("~ PROD") fm5 <- clm(Location, scale=Scale, data=soup) summary(fm5) ################################# ## Check that "."-notation works in formula: ## December 25th 2014, RHBC data(wine) wine2 <- wine[c("rating", "contact", "temp")] str(wine2) fm0 <- clm(rating ~ ., data=wine2) fm1 <- clm(rating ~ contact + temp, data=wine2) keep <- c("coefficients", "logLik", "info") fun <- function(x, y) stopifnot(isTRUE(all.equal(x, y))) mapply(fun, fm0[keep], fm1[keep]) ################################# ordinal/tests/test.clm.flex.link.R0000644000176200001440000000523013654764615016632 0ustar liggesusers# test.clm.flex.link.R library(ordinal) fm <- clm(rating ~ contact + temp, data=wine, link="log-gamma") fm summary(fm) vcov(fm) logLik(fm) extractAIC(fm) fm2 <- update(fm, link="probit") anova(fm, fm2) head(model.matrix(fm)$X) head(model.frame(fm)) coef(fm) coef(summary(fm)) nobs(fm) terms(fm) # profile(fm) # not implemented confint(fm) predict(fm, se=TRUE, interval = TRUE) predict(fm, type="class") newData <- expand.grid(temp = c("cold", "warm"), contact = c("no", "yes")) ## Predicted probabilities in all five response categories for each of ## the four cases in newData: predict(fm, newdata=newData, type="prob") predict(fm, newdata=newData, type="class") predict(fm, newdata=newData, type="prob", se.fit = TRUE, interval = TRUE) ## Aranda-Ordaz link: fm <- clm(rating ~ contact + temp, data=wine, link="Aranda-Ordaz") fm summary(fm) vcov(fm) logLik(fm) extractAIC(fm) fm2 <- update(fm, link="logit") anova(fm, fm2) head(model.matrix(fm)$X) head(model.frame(fm)) coef(fm) coef(summary(fm)) nobs(fm) terms(fm) # profile(fm) # not implemented confint(fm) predict(fm, se=TRUE, interval = TRUE) predict(fm, type="class") newData <- expand.grid(temp = c("cold", "warm"), contact = c("no", "yes")) ## Predicted probabilities in all five response categories for each of ## the four cases in newData: predict(fm, newdata=newData, type="prob") predict(fm, newdata=newData, type="class") predict(fm, newdata=newData, type="prob", se.fit = TRUE, interval = TRUE) ######################################################################## ### Models with scale + flex link (or cauchit link) ######################################################################## fm <- clm(SURENESS ~ PRODID, scale=~PROD, data = soup, link="Aranda-Ordaz") summary(fm) fm <- clm(SURENESS ~ PRODID, scale=~PROD, data = soup, link="log-gamma") summary(fm) fm <- clm(SURENESS ~ PRODID, scale=~PROD, data = soup, link="cauchit") summary(fm) ######################################################################## ### clm.fit ######################################################################## ## Example with log-gamma: fm1 <- clm(rating ~ contact + temp, data=wine, link="log-gamma") summary(fm1) ## get the model frame containing y and X: mf1 <- update(fm1, method="design") names(mf1) res <- clm.fit(mf1$y, mf1$X, link="log-gamma") ## invoking the factor method coef(res) stopifnot(all.equal(coef(res), coef(fm1))) ## Example with Aranda-Ordaz: fm1 <- clm(rating ~ contact + temp, data=wine, link="Aranda-Ordaz") mf1 <- update(fm1, method="design") res <- clm.fit(mf1$y, mf1$X, link="Aranda") ## invoking the factor method stopifnot(all.equal(coef(res), coef(fm1))) ordinal/tests/clmm.methods.R0000644000176200001440000000204514335121500015554 0ustar liggesuserslibrary(ordinal) data(wine) ################################# ## model.matrix method for clmm-objects: fmm1 <- clmm(rating ~ contact + temp + (1|judge), data=wine) mm <- model.matrix(fmm1) stopifnot(inherits(mm, "matrix"), dim(mm) == c(72, 3)) ################################# ## anova.clmm works even if formula does not have an environment: fmm1 <- clmm(rating ~ temp * contact + (1|judge), data = wine) fmm2 <- clmm(rating ~ temp + contact + (1|judge), data = wine) environment(fmm1$formula) <- NULL environment(fmm2$formula) <- NULL anova(fmm1, fmm2) ################################# ## Test that ranef, condVar and VarCorr work as they are supposed to whether or ## not nlme and lme4 are loaded: fm <- clmm(rating ~ temp + contact + (1|judge), data = wine) fm ranef(fm) VarCorr(fm) condVar(fm) summary(fm) library(nlme) ranef(fm) VarCorr(fm) condVar(fm) library(lme4) ranef(fm) VarCorr(fm) condVar(fm) fm1 <- lmer(Reaction ~ Days + (Days | Subject), data=sleepstudy) ranef(fm1) VarCorr(fm1) ranef(fm) VarCorr(fm) condVar(fm) summary(fm) ordinal/tests/test0weights.R0000644000176200001440000000431512450062502015620 0ustar liggesuserslibrary(ordinal) options(contrasts = c("contr.treatment", "contr.poly")) ## library(devtools) ## r2path <- "/Users/rhbc/Documents/Rpackages/ordinal/pkg/ordinal" ## clean_dll(pkg = r2path) ## load_all(r2path) ## one zero weight: data(wine, package="ordinal") wts <- rep(1, nrow(wine)) wine$rating wts[1] <- 0 fm1 <- clm(rating ~ contact + temp, data=wine, weights=wts) fm1 fm1$n ## 72 fm1$nobs ## 71 confint(fm1) plot(profile(fm1)) plot(slice(fm1), 5) convergence(fm1) drop1(fm1, test="Chi") add1(fm1, scope=~.^2, test="Chi") ## clm_anova(fm1) pred <- predict(fm1, newdata=wine) ## OK step.fm1 <- step(fm1, trace=0) fitted(fm1) dim(model.matrix(fm1)$X) dim(model.matrix(fm1, "B")$B1) mf <- update(fm1, method="model.frame") str(mf) wts <- mf$wts dim(model.matrix(fm1)$X[wts > 0, , drop=FALSE]) fm1b <- clm(rating ~ temp, scale=~contact, data=wine, weights=wts) summary(fm1b) pr <- profile(fm1b) confint(pr) plot(pr, 1) fm1c <- clm(rating ~ temp, nominal=~contact, data=wine, weights=wts) summary(fm1c) pr <- profile(fm1c) confint(pr) plot(pr, 1) ## nominal.test(fm1) ## scale.test(fm1) ## zero out an entire response category: wts2 <- 1 * with(wine, rating != "2") fm2 <- clm(rating ~ contact + temp, data=wine, weights=wts2) fm2 fm2$n ## 72 fm2$nobs ## 50 ## Dimension of X and B1, B2 differ: dim(model.matrix(fm2)$X) dim(model.matrix(fm2, "B")$B1) ## Cannot directly evaluate predictions on the original data: try(predict(fm2, newdata=wine), silent=TRUE)[1] confint(fm2) profile(fm2) plot(slice(fm2), 5) step.fm2 <- step(fm2, trace=0) fitted(fm2) ## Scale and nominal effects: fm2b <- clm(rating ~ temp, scale=~contact, data=wine, weights=wts2) summary(fm2b) pr <- profile(fm2b) confint(pr) plot(pr, 1) fm2c <- clm(rating ~ temp, nominal=~contact, data=wine, weights=wts2) summary(fm2c) pr <- profile(fm2c) confint(pr) plot(pr, 1) pred <- predict(fm2c, newdata=wine[!names(wine) %in% "rating"]) pred <- predict(fm2b, newdata=wine[!names(wine) %in% "rating"]) ## nominal.test(fm2) ## scale.test(fm2) ## Different data sets (error): try(anova(fm1, fm2), silent=TRUE)[1] ## OK ## Test clm.fit: wts2 <- 1 * with(wine, rating != "2") mf2 <- update(fm2, method="design") fm3 <- with(mf2, clm.fit(y, X, weights=wts)) ################################# ordinal/tests/test.general.R0000644000176200001440000000010512446243451015563 0ustar liggesusers txt <- citation("ordinal") stopifnot(as.logical(grep("year", txt))) ordinal/tests/test.clm.single.anova.R0000644000176200001440000000342213633002525017303 0ustar liggesusers# test.clm.single.anova.R library(ordinal) # WRE says "using if(requireNamespace("pkgname")) is preferred, if possible." # even in tests: assertError <- function(expr, ...) if(requireNamespace("tools")) tools::assertError(expr, ...) else invisible() assertWarning <- function(expr, ...) if(requireNamespace("tools")) tools::assertWarning(expr, ...) else invisible() fm <- clm(rating ~ temp * contact, scale=~contact, data=wine) anova(fm, type="I") anova(fm, type="II") anova(fm, type="III") anova(fm, type=1) anova(fm, type=2) anova(fm, type=3) anova(fm, type="1") anova(fm, type="2") anova(fm, type="3") anova(fm, type="marginal") # Nominal effects: fm <- clm(rating ~ temp, nominal=~contact, data=wine) anova(fm) # Flexible links: fm1 <- clm(rating ~ temp + contact, link="log-gamma", data=wine) anova(fm1, type=1) anova(fm1, type=2) anova(fm1, type=3) # Equivalence of tests irrespective of contrasts: fm1 <- clm(SURENESS ~ PRODID * SOUPFREQ, data=soup) # summary(fm1) (an1 <- anova(fm1, type=3)) fm2 <- clm(SURENESS ~ PRODID * SOUPFREQ, data=soup, contrasts = list(SOUPFREQ = "contr.sum", PRODID = "contr.SAS")) # summary(fm2) anova(fm1, fm2) (an2 <- anova(fm2, type=3)) stopifnot( isTRUE(all.equal(an1, an2, check.attributes=FALSE)) ) # Aliased coefficients: fm1 <- clm(SURENESS ~ PRODID * DAY, data=soup) anova(fm1, type=1) anova(fm1, type=2) anova(fm1, type=3) # Aliased term (due to nominal effects): fm <- clm(rating ~ temp * contact, nominal=~contact, data=wine) anova(fm, type=1) anova(fm, type=2) anova(fm, type=3) # model with all NA in vcov(object): fm <- clm(rating ~ temp * contact, nominal=~contact, scale=~contact, data=wine) assertError(anova(fm, type=1)) # error assertError(anova(fm, type=2)) # error assertError(anova(fm, type=3)) # error all(is.na(vcov(fm))) ordinal/tests/test.clm.predict.R0000644000176200001440000001376612447061401016365 0ustar liggesuserslibrary(ordinal) ## source("test.clm.predict.R") ## library(devtools) ## r2path <- "/Users/rhbc/Documents/Rpackages/ordinal/pkg/ordinal" ## clean_dll(pkg = r2path) ## load_all(r2path) cy <- with(wine, which(temp == "cold" & contact == "yes")) options("contrasts" = c("contr.treatment", "contr.poly")) getOption("contrasts") ## Example model wine1.clm <- clm(rating ~ temp*contact, subset = -cy, data = wine) summary(wine1.clm) names(wine1.clm) wine.clm <- clm(rating~temp*contact, data=wine) summary(wine.clm) names(wine.clm) ## Make sure the same elements are present with a rank deficient model ## fit: stopifnot(all(names(wine1.clm) == names(wine.clm))) ## With treatment contrasts: options("contrasts" = c("contr.treatment", "contr.poly")) wine.clm <- clm(rating~temp*contact, data=wine) coef(summary(wine.clm)) head(model.matrix(wine.clm)$X) wine.clm$contrasts head(pred1 <- predict(wine.clm)$fit) ## With sum contrasts: options("contrasts" = c("contr.sum", "contr.poly")) wine.clm <- clm(rating~temp*contact, data=wine) coef(summary(wine.clm)) head(model.matrix(wine.clm)$X) wine.clm$contrasts head(pred2 <- predict(wine.clm)$fit) ## Mixture of sum and treatment contrasts: options("contrasts" = c("contr.treatment", "contr.poly")) wine.clm <- clm(rating~temp*contact, data=wine, contrasts=list(temp="contr.sum")) coef(summary(wine.clm)) head(model.matrix(wine.clm)$X) wine.clm$contrasts head(pred3 <- predict(wine.clm)$fit) stopifnot(isTRUE(all.equal(pred1, pred2))) stopifnot(isTRUE(all.equal(pred1, pred3))) ################################# ### Now for a rank deficient fit: ################################# cy <- with(wine, which(temp == "cold" & contact == "yes")) options("contrasts" = c("contr.treatment", "contr.poly")) wine1.clm <- clm(rating ~ temp*contact, subset = -cy, data = wine) coef(summary(wine1.clm)) attributes(model.matrix(wine1.clm)$X)$contrasts wine1.clm$contrasts head(pred4 <- predict(wine1.clm)$fit) options("contrasts" = c("contr.sum", "contr.poly")) wine1.clm <- clm(rating ~ temp*contact, subset = -cy, data = wine) attributes(model.matrix(wine1.clm)$X)$contrasts options("contrasts" = c("contr.treatment", "contr.poly")) attributes(model.matrix(wine1.clm)$X)$contrasts ## Notice that the contrasts change in the attributes of the fit!!! coef(summary(wine1.clm)) wine1.clm$contrasts head(pred5 <- predict(wine1.clm)$fit) head(cbind(pred4, pred5)) stopifnot(isTRUE(all.equal(pred4, pred5))) options("contrasts" = c("contr.treatment", "contr.poly")) wine1.clm <- clm(rating ~ temp*contact, subset = -cy, data = wine, contrasts=list(temp="contr.sum")) coef(summary(wine1.clm)) head(model.matrix(wine1.clm)$X) attributes(model.matrix(wine1.clm)$X)$contrasts wine1.clm$contrasts head(pred6 <- predict(wine1.clm)$fit) head(cbind(pred4, pred5, pred6)) stopifnot(isTRUE(all.equal(pred4, pred6))) ################################################################## ## Compare equality of fitted values for models with different contrasts: options("contrasts" = c("contr.treatment", "contr.poly")) fm1 <- clm(rating ~ temp + contact, data=wine) fitted(fm1) options("contrasts" = c("contr.sum", "contr.poly")) fm2 <- clm(rating ~ temp + contact, data=wine) fitted(fm2) options("contrasts" = c("contr.treatment", "contr.poly")) fm3 <- clm(rating ~ temp + contact, data=wine, contrasts=list(contact="contr.sum")) fitted(fm3) stopifnot(isTRUE(all.equal(fitted(fm1), fitted(fm2)))) stopifnot(isTRUE(all.equal(fitted(fm1), fitted(fm3)))) ################################################################## ## Compare equality of fitted values for models with different ## contrasts in face of aliased coefficients: options("contrasts" = c("contr.treatment", "contr.poly")) cy <- with(wine, which(temp == "cold" & contact == "yes")) Wine <- subset(wine, subset=!(temp == "cold" & contact == "yes")) fm1 <- clm(rating ~ temp + contact, data=Wine) options("contrasts" = c("contr.sum", "contr.poly")) fm2 <- clm(rating ~ temp + contact, data=Wine) options("contrasts" = c("contr.treatment", "contr.poly")) fm3 <- clm(rating ~ temp + contact, data=Wine, contrasts=list(contact="contr.sum")) stopifnot(isTRUE(all.equal(fitted(fm1), fitted(fm2)))) stopifnot(isTRUE(all.equal(fitted(fm1), fitted(fm3)))) stopifnot(isTRUE(all.equal(predict(fm1)$fit, predict(fm2)$fit))) stopifnot(isTRUE(all.equal(predict(fm1)$fit, predict(fm3)$fit))) ################################# ## Does this also happen if the wine data has changed? options("contrasts" = c("contr.treatment", "contr.poly")) Wine <- subset(wine, subset=!(temp == "cold" & contact == "yes")) fm1 <- clm(rating ~ temp + contact, data=Wine) fit1 <- fitted(fm1) pred1 <- predict(fm1)$fit Wine <- wine pred2 <- predict(fm1)$fit stopifnot(isTRUE(all.equal(fit1, pred1))) stopifnot(isTRUE(all.equal(fit1, pred2))) ## What if weights, say, is an expression? ## Notice that updating the model object changes it: set.seed(123) fm1 <- clm(rating ~ temp + contact, data=wine, weights=runif(nrow(wine), .5, 1.5)) fm2 <- update(fm1) stopifnot(isTRUE(all.equal(fitted(fm1), predict(fm1)$fit))) stopifnot(!isTRUE(all.equal(fitted(fm1), fitted(fm2)))) ################################# ## Test equality of fits and predictions of models with: ## 'x + I(x^2)' and 'poly(x, 2)': ## December 25th 2014, RHBC. data(wine) set.seed(1) x <- rnorm(nrow(wine), sd=2) + as.numeric(wine$rating) range(x) ## Comparison of 'x + I(x^2)' and 'poly(x, 2)': fm3 <- clm(rating ~ temp + x + I(x^2), data=wine) fm4 <- clm(rating ~ temp + poly(x, 2), data=wine) ## Same model fits, but different parameterizations: stopifnot( !isTRUE(all.equal(coef(fm3), coef(fm4), check.names=FALSE)) ) stopifnot(isTRUE(all.equal(logLik(fm3), logLik(fm4)))) newData <- expand.grid(temp = levels(wine$temp), x=seq(-1, 7, 3)) predict(fm3, newdata=newData)$fit predict(fm4, newdata=newData)$fit stopifnot(isTRUE(all.equal(fitted(fm3), fitted(fm4)))) stopifnot(isTRUE( all.equal(predict(fm3, newdata=newData)$fit, predict(fm4, newdata=newData)$fit))) ################################# ordinal/tests/clm.fit.R0000644000176200001440000000223613331062435014526 0ustar liggesuserslibrary(ordinal) data(wine) ## clm.fit with nominal and scale effects: ## get simple model: fm1 <- clm(rating ~ temp, scale=~temp, nominal=~ contact, data=wine, method="design") str(fm1, give.attr=FALSE) fm1$control$method <- "Newton" res <- clm.fit(fm1) names(res) res$Theta ## construct some weights and offsets: set.seed(1) off1 <- runif(length(fm1$y)) set.seed(1) off2 <- rnorm(length(fm1$y)) set.seed(1) wet <- runif(length(fm1$y)) ## Fit various models: fit <- clm.fit(fm1$y, fm1$X, fm1$S, fm1$NOM, weights=wet) Coef <- c(-0.905224120279548, 1.31043498891987, 3.34235590523008, 4.52389661722693, -3.03954652971192, -1.56922389038976, -1.75662549320839, -1.16845464236365, 2.52988580848393, -0.0261457032829033) stopifnot(all.equal(coef(fit), Coef, check.attributes=FALSE, tol=1e-6)) str(fit) fit <- clm.fit(fm1$y, fm1$X, fm1$S, fm1$NOM, offset=off1) str(fit) fit <- clm.fit(fm1$y, fm1$X, fm1$S, fm1$NOM, offset=off1, S.offset=off2) str(fit) fit <- clm.fit(fm1$y, fm1$X, fm1$S) str(fit) fit <- clm.fit(fm1$y, fm1$X) str(fit) fit <- clm.fit(fm1$y) coef(fit) str(fit) ## Remember: compare with corresponding .Rout file ordinal/tests/test.makeThresholds.R0000644000176200001440000000115113633002525017117 0ustar liggesusers# test.makeThresholds.R library(ordinal) # Prvious bug which is now fixed: res <- ordinal:::makeThresholds(letters[1:3], "symmetric") stopifnot(length(res$alpha.names) == res$nalpha) # length(res$alpha.names) used to be 4 # Real data example: wine <- within(wine, { rating_comb3b <- rating levels(rating_comb3b) <- c("1-2", "1-2", "3", "4-5", "4-5") }) wine$rating_comb3b[1] <- "4-5" # Need to remove the zero here to avoid inf MLE ftable(rating_comb3b ~ temp + contact, data=wine) fm.comb3_c <- clm(rating_comb3b ~ contact, #scale=~contact, threshold = "symmetric", data=wine) # no error ordinal/tests/clmm.control.R0000755000176200001440000000227111761151324015604 0ustar liggesuserslibrary(ordinal) data(wine) ### 3 options for specifying control arguments: ## 1) control is a simple list, e.g. list(trace=-1) ## 2) control is a call to clmm.control ## 3) control is an empty list; list() ## all in combination with extra control arguments. ordinal:::getCtrlArgs(clmm.control(), list(maxIter=200)) ordinal:::getCtrlArgs(list(), list(maxIter=200)) ordinal:::getCtrlArgs(list(), list(trace=-1)) ordinal:::getCtrlArgs(list(), list(trace=1)) ordinal:::getCtrlArgs(list(), list()) ordinal:::getCtrlArgs(list(maxIter=2), list()) ordinal:::getCtrlArgs(clmm.control(), list()) ordinal:::getCtrlArgs(clmm.control(maxIter=100), list(maxIter=200)) ordinal:::getCtrlArgs(clmm.control(maxIter=100), list(maxIter=200)) ordinal:::getCtrlArgs(clmm.control(), list(trace=1)) ordinal:::getCtrlArgs(clmm.control(), list(trace=-1)) ordinal:::getCtrlArgs(clmm.control(trace=1), list()) ordinal:::getCtrlArgs(clmm.control(trace=-1), list()) ordinal:::getCtrlArgs(clmm.control(trace=0), list()) ## Don't specify trace twice - surprising behavior might occur: ordinal:::getCtrlArgs(clmm.control(trace=1), list(trace=-1)) ordinal:::getCtrlArgs(clmm.control(trace=-1), list(trace=1)) ordinal/src/0000755000176200001440000000000014533322576012503 5ustar liggesusersordinal/src/links.c0000755000176200001440000001624514334154604013774 0ustar liggesusers///////////////////////////////////////////////////////////////////////////// // Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen // // This file is part of the ordinal package for R (*ordinal*) // // *ordinal* is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // *ordinal* is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // A copy of the GNU General Public License is available at // and/or // . ///////////////////////////////////////////////////////////////////////////// #include "links.h" /* This file implements scalar distribution, density and gradient function */ /*-------------------------------------------------------*/ /* Scalar cumulative distribution functions (CDFs) */ /*-------------------------------------------------------*/ double d_pgumbel(double q, double loc, double scale, int lower_tail) // Consider implementing 'int give_log' to follow the convention from // pnorm etc. { if(ISNAN(q)) // true for NA and NaN return NA_REAL; if(q == R_PosInf) q = 1.; else if(q == R_NegInf) q = 0.; else { q = (q - loc) / scale; q = exp( -exp( -q)); } return !lower_tail ? 1 - q : q; } double d_pgumbel2(double q, double loc, double scale, int lower_tail) // this is (partly) redundant since d_pgumbel2(q) = 1 - d_pgumbel(-q) { if(ISNAN(q)) // true for NA and NaN return NA_REAL; if(q == R_PosInf) q = 1; else if(q == R_NegInf) q = 0; else { q = (-q - loc) / scale; q = exp(-exp(-q)); } return !lower_tail ? q : 1 - q; } double d_pAO(double q, double lambda, int lower_tail) { if(ISNAN(q) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(q == R_PosInf) q = 1; else if(q == R_NegInf) q = 0; else { if(lambda < 1.0e-6) error("'lambda' has to be positive. lambda = %e was supplied\n", lambda); q = 1 - R_pow(lambda * exp(q) + 1, -1/lambda); } return !lower_tail ? 1 - q : q; } double d_plgamma(double eta, double lambda, int lower_tail) { double v; if(ISNAN(eta) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(eta == R_PosInf) v = 1; else if(eta == R_NegInf) v = 0; else { v = R_pow_di(lambda, -2) * exp(lambda * eta); if(lambda < 1.0e-6) v = 1 - pgamma(v, R_pow_di(lambda, -2), /*scale = */ 1, 1 /*lower_tail*/, 0 /*give_log*/); if(lambda > -1.0e-6) v = pgamma(v, R_pow_di(lambda, -2), /*scale = */ 1, 1 /*lower_tail*/, 0 /*give_log*/); if(lambda >= -1.0e-6 && lambda <= 1.0e-6) // pnorm(x, mu, sigma, lower_tail, give_log); v = pnorm(eta, 0., 1., 1, 0); } return lower_tail ? v : 1 - v; } /*-------------------------------------------------------*/ /* Scalar probability density functions (PDFs) */ /*-------------------------------------------------------*/ double d_dgumbel(double x, double loc, double scale, int give_log) { if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) // if(x == INFINITE || x == -INFINITE) // seems to work as well. return 0; // this special case needs to be handled separately x = (x - loc) / scale; x = -exp(-x) - x - log(scale); return give_log ? x : exp(x); } double d_dgumbel2(double x, double loc, double scale, int give_log) { if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) return 0; x = (-x - loc) / scale; x = -exp(-x) - x - log(scale); return give_log ? x : exp(x); } double d_dAO(double eta, double lambda, int give_log) { if(ISNAN(eta) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(eta == R_PosInf || eta == R_NegInf) return 0; if(lambda < 1.0e-6) error("'lambda' has to be positive. lambda = %e was supplied\n", lambda); eta -= (1 + 1 / lambda) * log(lambda * exp(eta) + 1); return give_log ? eta : exp(eta); } double d_dlgamma(double x, double lambda, int give_log) { if(ISNAN(x) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) return 0; if(lambda < 1.0e-5 && lambda > -1.0e-5) // lambda close to zero return dnorm(x, 0. , 1., give_log); double q_2 = R_pow_di(lambda, -2); x *= lambda; x = log(fabs(lambda)) + q_2 * log(q_2) - lgammafn(q_2) + q_2 * (x - exp(x)); return !give_log ? exp(x) : x; } /*-------------------------------------------------------*/ /* Scalar gradients of probability density functions */ /*-------------------------------------------------------*/ double d_glogis(double x) { // Gradient of dlogis(x) wrt. x if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) // if(x == INFINITE || x == -INFINITE) // seems to work as well. return 0; // this special case needs to be handled separately /* Store the sign of x, compute the gradient for the absolute value and restore the sign. This is needed to avoid exp(LARGE) to blow up and the function to return NaN. */ int sign = x > 0; //could use fsign() instead... x = exp(-fabs(x)); x = 2 * x * x * R_pow_di(1 + x, -3) - x * R_pow_di(1 + x, -2); return sign ? x : -x; } double d_gnorm(double x) { if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == INFINITY || x == -INFINITY) return 0; else return -x * dnorm(x, 0., 1., 0); } double d_gcauchy(double x) { if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) return 0; return x = -2 * x / M_PI * R_pow_di(1 + x * x, -2); } double d_ggumbel(double x) { if(ISNAN(x)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) return 0; x = exp(-x); if(x == INFINITY) return 0; double eq = exp(-x); return -eq * x + eq * x * x; } double d_ggumbel2(double x) // redundant function... { return -d_ggumbel(-x); } double d_gAO(double eta, double lambda) { if(ISNAN(eta) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(eta == R_PosInf || eta == R_NegInf) return 0; double lex = lambda * exp(eta); if(lex == R_PosInf || lex == 0) return 0.; double y = d_dAO(eta, lambda, 0/*give_log*/); return y == 0. ? 0. : y * (1 - (1 + 1/lambda) * lex / (1 + lex)); } double d_glgamma(double x, double lambda) { if(ISNAN(x) || ISNAN(lambda)) // true for NA and NaN return NA_REAL; if(x == R_PosInf || x == R_NegInf) return 0.; if(lambda < 1.0e-5 && lambda > -1.0e-5) // lambda close to zero return -x * dnorm(x, 0., 1., 0/*give_log*/); double z = exp(lambda * x); if(z == R_PosInf || z == 0.) return 0.; double y = d_dlgamma(x, lambda, 0/*give_log*/); return y <= 0. ? 0.0 : y * (1 - exp(lambda * x)) / lambda; // Equivalent to: /* if(y <= 0) return 0.0; else return y * (1 - exp(lambda * x)) / lambda; */ } ordinal/src/init.c0000644000176200001440000001130314334154604013602 0ustar liggesusers///////////////////////////////////////////////////////////////////////////// // Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen // // This file is part of the ordinal package for R (*ordinal*) // // *ordinal* is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // *ordinal* is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // A copy of the GNU General Public License is available at // and/or // . ///////////////////////////////////////////////////////////////////////////// #include #include #include // for NULL #include /* .C calls */ extern void dAO_C(void *, void *, void *, void *); extern void dgumbel_C(void *, void *, void *, void *, void *); extern void dgumbel2_C(void *, void *, void *, void *, void *); extern void dlgamma_C(void *, void *, void *, void *); extern void gAO_C(void *, void *, void *); extern void gcauchy_C(void *, void *); extern void getNAGQ(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void getNGHQ_C(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void ggumbel_C(void *, void *); extern void ggumbel2_C(void *, void *); extern void glgamma_C(void *, void *, void *); extern void glogis_C(void *, void *); extern void gnorm_C(void *, void *); extern void grad_C(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void gradC(void *, void *, void *, void *, void *, void *, void *, void *); extern void grFacSum_C(void *, void *, void *, void *, void *); extern void hess(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void hessC(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void nll(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void NRalg(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void NRalgv3(void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *, void *); extern void pAO_C(void *, void *, void *, void *); extern void pgumbel_C(void *, void *, void *, void *, void *); extern void pgumbel2_C(void *, void *, void *, void *, void *); extern void plgamma_C(void *, void *, void *, void *); /* .Call calls */ extern SEXP get_fitted(SEXP, SEXP, SEXP, SEXP); static const R_CMethodDef CEntries[] = { {"dAO_C", (DL_FUNC) &dAO_C, 4}, {"dgumbel_C", (DL_FUNC) &dgumbel_C, 5}, {"dgumbel2_C", (DL_FUNC) &dgumbel2_C, 5}, {"dlgamma_C", (DL_FUNC) &dlgamma_C, 4}, {"gAO_C", (DL_FUNC) &gAO_C, 3}, {"gcauchy_C", (DL_FUNC) &gcauchy_C, 2}, {"getNAGQ", (DL_FUNC) &getNAGQ, 19}, {"getNGHQ_C", (DL_FUNC) &getNGHQ_C, 17}, {"ggumbel_C", (DL_FUNC) &ggumbel_C, 2}, {"ggumbel2_C", (DL_FUNC) &ggumbel2_C, 2}, {"glgamma_C", (DL_FUNC) &glgamma_C, 3}, {"glogis_C", (DL_FUNC) &glogis_C, 2}, {"gnorm_C", (DL_FUNC) &gnorm_C, 2}, {"grad_C", (DL_FUNC) &grad_C, 16}, {"gradC", (DL_FUNC) &gradC, 8}, {"grFacSum_C", (DL_FUNC) &grFacSum_C, 5}, {"hess", (DL_FUNC) &hess, 13}, {"hessC", (DL_FUNC) &hessC, 11}, {"nll", (DL_FUNC) &nll, 17}, {"NRalg", (DL_FUNC) &NRalg, 29}, {"NRalgv3", (DL_FUNC) &NRalgv3, 24}, {"pAO_C", (DL_FUNC) &pAO_C, 4}, {"pgumbel_C", (DL_FUNC) &pgumbel_C, 5}, {"pgumbel2_C", (DL_FUNC) &pgumbel2_C, 5}, {"plgamma_C", (DL_FUNC) &plgamma_C, 4}, {NULL, NULL, 0} }; static const R_CallMethodDef CallEntries[] = { {"get_fitted", (DL_FUNC) &get_fitted, 4}, {NULL, NULL, 0} }; void R_init_ordinal(DllInfo *dll) { R_registerRoutines(dll, CEntries, CallEntries, NULL, NULL); R_useDynamicSymbols(dll, FALSE); } ordinal/src/get_fitted.c0000755000176200001440000001150214334154604014761 0ustar liggesusers///////////////////////////////////////////////////////////////////////////// // Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen // // This file is part of the ordinal package for R (*ordinal*) // // *ordinal* is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // *ordinal* is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // A copy of the GNU General Public License is available at // and/or // . ///////////////////////////////////////////////////////////////////////////// #include #include #include #include "links.h" SEXP get_fitted(SEXP, SEXP, SEXP, SEXP); // ------------------------------------------------------- SEXP get_fitted(SEXP eta1p, SEXP eta2p, SEXP linkp, SEXP lambdap) { /* Compute fitted values (probabilities) from vectors of linear predictors (eta1 and eta2) given the link function (linkp) and an optional lambda parameter. eta1 and eta2 are required to be equal length numeric vectors, linkp a character vector and lambdap a numeric scalar. return: vector of fittec values of same length as eta1 and eta2. */ SEXP ans = PROTECT(duplicate(coerceVector(eta1p, REALSXP))); eta2p = PROTECT(coerceVector(eta2p, REALSXP)); linkp = PROTECT(coerceVector(linkp, STRSXP)); const char *linkc = CHAR(asChar(linkp)); double *eta1 = REAL(ans), *eta2 = REAL(eta2p), lambda = asReal(lambdap); int i, nans = LENGTH(ans); if(LENGTH(eta2p) != nans) { // ".. don't have to UNPROTECT before calling into "error"; it is not a bug to do so, but it is not needed either, error will result in a long jump that will UNPROTECT automatically." Email from Tomas Kalibra 19Apr2018. ; UNPROTECT(3); error("'eta1' and 'eta2' should have the same length"); } if(strcmp(linkc, "probit") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // pnorm(x, mu, sigma, lower_tail, give_log); eta1[i] = pnorm(eta1[i], 0.0, 1.0, 1, 0) - pnorm(eta2[i], 0.0, 1.0, 1, 0); else eta1[i] = pnorm(eta2[i], 0.0, 1.0, 0, 0) - pnorm(eta1[i], 0.0, 1.0, 0, 0); } } else if(strcmp(linkc, "logit") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // plogis(x, mu, sigma, lower_tail, give_log); eta1[i] = plogis(eta1[i], 0.0, 1.0, 1, 0) - plogis(eta2[i], 0.0, 1.0, 1, 0); else eta1[i] = plogis(eta2[i], 0.0, 1.0, 0, 0) - plogis(eta1[i], 0.0, 1.0, 0, 0); } } else if(strcmp(linkc, "loglog") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // d_pgumbel(double q, double loc, double scale, int lower_tail) eta1[i] = d_pgumbel(eta1[i], 0., 1., 1) - d_pgumbel(eta2[i], 0., 1., 1); else eta1[i] = d_pgumbel(eta2[i], 0., 1., 0) - d_pgumbel(eta1[i], 0., 1., 0); } } else if(strcmp(linkc, "cloglog") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // d_pgumbel2(double q, double loc, double scale, int lower_tail) eta1[i] = d_pgumbel2(eta1[i], 0., 1., 1) - d_pgumbel2(eta2[i], 0., 1., 1); else eta1[i] = d_pgumbel2(eta2[i], 0., 1., 0) - d_pgumbel2(eta1[i], 0., 1., 0); } } else if(strcmp(linkc, "cauchit") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // pcauchy(q, loc, scale, lower_tail, give_log) eta1[i] = pcauchy(eta1[i], 0., 1., 1, 0) - pcauchy(eta2[i], 0., 1., 1, 0); else eta1[i] = pcauchy(eta2[i], 0., 1., 0, 0) - pcauchy(eta1[i], 0., 1., 0, 0); } } else if(strcmp(linkc, "Aranda-Ordaz") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // d_pAO(q, lambda, lower_tail) eta1[i] = d_pAO(eta1[i], lambda, 1) - d_pAO(eta2[i], lambda, 1); else eta1[i] = d_pAO(eta2[i], lambda, 0) - d_pAO(eta1[i], lambda, 0); } } else if(strcmp(linkc, "log-gamma") == 0) { for(i = 0; i < nans; i++) { if(eta2[i] <= 0) // d_plgamma(double eta, double lambda, int lower_tail) eta1[i] = d_plgamma(eta1[i], lambda, 1) - d_plgamma(eta2[i], lambda, 1); else eta1[i] = d_plgamma(eta2[i], lambda, 0) - d_plgamma(eta1[i], lambda, 0); } } else { // ".. don't have to UNPROTECT before calling into "error"; it is not a bug to do so, but it is not needed either, error will result in a long jump that will UNPROTECT automatically." Email from Tomas Kalibra 19Apr2018. ; UNPROTECT(3); // unprotecting before exiting with an error error("link not recognized"); } UNPROTECT(3); return ans; } ordinal/src/utilityFuns.c0000755000176200001440000005662314334154562015222 0ustar liggesusers///////////////////////////////////////////////////////////////////////////// // Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen // // This file is part of the ordinal package for R (*ordinal*) // // *ordinal* is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // *ordinal* is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // A copy of the GNU General Public License is available at // and/or // . ///////////////////////////////////////////////////////////////////////////// #include #include #include "links.h" double mu = 0, sigma = 1; int give_log = 0, lower_tail = 1; //--------------------------------- double d_pfun(double, double, int); double d_pfun2(double, double, int, int); // with lower_tail arg double d_dfun(double, double, int); double d_gfun(double, double, int); //--- negative log-likelihood: double d_nll(double *, int, int *, double, double *, double *, int, double *, double *, double *, double *, double *, double *, double *, double, int *); //--- Utilities: double mmax(double *, int); double maxAbs(double *, int); void Trace(int, double, double, double, double *, int, int); //--------------------------------- //------------------------------------------------------------------ // CDFs: void pgumbel_C(double *q, int *nq, double *loc, double *scale, int *lower_tail) { // pgumbel() int i; // How can I handle if loc and scale are not of unit length? for(i = 0; i < *nq; i++) q[i] = d_pgumbel(q[i], *loc, *scale, *lower_tail); } void pgumbel2_C(double *q, int *nq, double *loc, double *scale, int *lower_tail) { int i; for(i = 0; i < *nq; i++) q[i] = 1 - d_pgumbel(-q[i], *loc, *scale, *lower_tail); } void pAO_C(double *q, int *nq, double *lambda, int *lower_tail) { int i; for(i = 0; i < *nq; i++) q[i] = d_pAO(q[i], *lambda, *lower_tail); } void plgamma_C(double *q, int *nq, double *lambda, int *lower_tail) { int i; for(i = 0; i < *nq; i++) q[i] = d_plgamma(q[i], *lambda, *lower_tail); } //------------------------------------------------------------------ // PDFs: void dgumbel_C(double *x, int *nx, double *loc, double *scale, int *give_log) { int i; for(i = 0; i < *nx; i++) x[i] = d_dgumbel(x[i], *loc, *scale, *give_log); } void dgumbel2_C(double *x, int *nx, double *loc, double *scale, int *give_log) { int i; for(i = 0; i < *nx; i++) x[i] = d_dgumbel2(x[i], *loc, *scale, *give_log); } void dAO_C(double *x, int *nx, double *lambda, int *give_log) { int i; for(i = 0; i < *nx; i++) x[i] = d_dAO(x[i], *lambda, *give_log); } void dlgamma_C(double *x, int *nx, double *lambda, int *give_log) { int i; for(i = 0; i < *nx; i++) x[i] = d_dlgamma(x[i], *lambda, *give_log); } //------------------------------------------------------------------ // gradients of PDFs: void glogis_C(double *x, int *nx) { int i; for(i = 0; i < *nx; i++) x[i] = d_glogis(x[i]); } void gnorm_C(double *x, int *nx) { // Gradient of dnorm(x) wrt. x int i; for(i = 0; i < *nx; i++) x[i] = d_gnorm(x[i]); } void gcauchy_C(double *x, int *n) { // Gradient of dcauchy(x) wrt. x int i; for(i = 0; i < *n; i++) x[i] = d_gcauchy(x[i]); } void ggumbel_C(double *x, int *nx) { int i; for(i = 0; i < *nx; i++) x[i] = d_ggumbel(x[i]); } void ggumbel2_C(double *x, int *nx) { int i; for(i = 0; i < *nx; i++) x[i] = -d_ggumbel(-x[i]); // or x[i] = d_ggumbel2(x[i]); } void gAO_C(double *x, int *nx, double *lambda) { int i; for(i = 0; i < *nx; i++) x[i] = d_gAO(x[i], *lambda); } void glgamma_C(double *x, int *nx, double *lambda) { int i; for(i = 0; i < *nx; i++) x[i] = d_glgamma(x[i], *lambda); } //------------------------------------------------------------------ // link utility functions: /* Link functions:: 1: logistic 2: probit 3: cloglog 4: loglog 5: cauchit 6: Aranda-Ordaz 7: log-gamma */ double d_pfun(double x, double lambda, int link) { switch(link) { case 1: // logistic return plogis(x, mu, sigma, lower_tail, give_log); case 2: // probit return pnorm(x, mu, sigma, lower_tail, give_log); case 3: // cloglog return d_pgumbel(x, mu, sigma, lower_tail); case 4: // loglog return d_pgumbel2(x, mu, sigma, lower_tail); case 5: // cauchit return pcauchy(x, mu, sigma, lower_tail, give_log); case 6: // Aranda-Ordaz return d_pAO(x, lambda, lower_tail); case 7: // log-gamma return d_plgamma(x, lambda, lower_tail); default : // all other // if(link == 6) // error("the Aranda-Ordaz link is not available"); // if(link == 7) // error("the log-gamma link is not available"); // else error("link not recognized\n"); return NA_REAL; } } double d_pfun2(double x, double lambda, int link, int lower_tail) // 2nd version of d_pfun with a lower_tail arg { switch(link) { case 1: // logistic return plogis(x, mu, sigma, lower_tail, give_log); case 2: // probit return pnorm(x, mu, sigma, lower_tail, give_log); case 3: // cloglog return d_pgumbel(x, mu, sigma, lower_tail); case 4: // loglog return d_pgumbel2(x, mu, sigma, lower_tail); case 5: // cauchit return pcauchy(x, mu, sigma, lower_tail, give_log); case 6: // Aranda-Ordaz return d_pAO(x, lambda, lower_tail); case 7: // log-gamma return d_plgamma(x, lambda, lower_tail); default : // all other // if(link == 6) // error("the Aranda-Ordaz link is not available"); // if(link == 7) // error("the log-gamma link is not available"); // else error("link not recognized\n"); return NA_REAL; } } void pfun(double *x, int *nx, double *lambda, int *link) { int i; for(i = 0; i < *nx; i++) x[i] = d_pfun(x[i], *lambda, *link); } double d_dfun(double x, double lambda, int link) { switch(link) { case 1: // logistic return dlogis(x, mu, sigma, give_log); case 2: // probit return dnorm(x, mu, sigma, give_log); case 3: // cloglog return d_dgumbel(x, mu, sigma, give_log); case 4: // loglog return d_dgumbel2(x, mu, sigma, give_log); case 5: // cauchit return dcauchy(x, mu, sigma, give_log); case 6: return d_dAO(x, lambda, give_log); case 7: return d_dlgamma(x, lambda, give_log); default : // all other error("link not recognized\n"); return NA_REAL; } } void dfun(double *x, int *nx, double *lambda, int *link) { int i; for(i = 0; i < *nx; i++) x[i] = d_dfun(x[i], *lambda, *link); } double d_gfun(double x, double lambda, int link) { switch(link) { case 1: // logistic return d_glogis(x); case 2: // probit return d_gnorm(x); case 3: // cloglog return d_ggumbel(x); case 4: // loglog return d_ggumbel2(x); case 5: // cauchit return d_gcauchy(x); case 6: return d_gAO(x, lambda); case 7: return d_glgamma(x, lambda); default : // all other error("link not recognized\n"); return NA_REAL; } } void gfun(double *x, int *nx, double *lambda, int *link) { int i; for(i = 0; i < *nx; i++) x[i] = d_gfun(x[i], *lambda, *link); } //------------------------------------------------------------------ void getFitted(double *eta1, double *eta2, int *neta) { // adjust for NA and NaN values? int i; for(i = 0; i < *neta; i++) { if(eta2[i] <= 0) // pnorm(x, mu, sigma, lower_tail, give_log); eta1[i] = pnorm(eta1[i], 0.0, 1.0, 1, 0) - pnorm(eta2[i], 0.0, 1.0, 1, 0); else eta1[i] = pnorm(eta2[i], 0.0, 1.0, 0, 0) - pnorm(eta1[i], 0.0, 1.0, 0, 0); } } void getFitted2(double *eta1, double *eta2, int *neta, double *lambda, int *link) // 2nd version now including a link arg { // adjust for NA and NaN values? int i; for(i = 0; i < *neta; i++) { if(eta2[i] <= 0) // d_pfun2(x, lambda, link, lower_tail) eta1[i] = d_pfun2(eta1[i], *lambda, *link, 1) - d_pfun2(eta2[i], *lambda, *link, 1); else eta1[i] = d_pfun2(eta2[i], *lambda, *link, 0) - d_pfun2(eta1[i], *lambda, *link, 0); } } //------------------------------------------------------------------ // Gradients and Hessians for update.b in clmm2(): void grFacSum_C(double *x, int *grFac, int *nx, double *u, int *nu) // compute tapply(x, grFac, sum) + u { int i, j; double z = 0; for(i = 0; i < *nu; i++) { for (j = 0; j < *nx; j++) { if(grFac[j] == i + 1) z = z + x[j]; } u[i] = u[i] + z; z = 0; } } // FIXME: grFacSum_C such that it can be used by gradC and hessC - this // should simplify the code double d_nll(double *u, int nu, int *grFac, double stDev, double *o1, double *o2, int no, double *eta1, double *eta2, double *eta1Fix, double *eta2Fix, double *sigma, double *pr, double *weights, double lambda, int *link) /* Returns: nll Updates: eta1, eta2, pr given the new value of u Leaves unchanged: u, grFac, stDev, o1, o2, eta1Fix, eta2Fix, sigma, weights */ { int i, j; double o, nll = 0.0; for(i = 0; i < no; i++) { o = u[grFac[i] - 1] * stDev; eta1[i] = (eta1Fix[i] + o1[i] - o) / sigma[i]; eta2[i] = (eta2Fix[i] + o2[i] - o) / sigma[i]; /* Accurate evaluation of pr (fitted values) even if eta1 and eta2 are both large: */ if(eta2[i] <= 0) pr[i] = d_pfun2(eta1[i], lambda, *link, 1) - d_pfun2(eta2[i], lambda, *link, 1); else pr[i] = d_pfun2(eta2[i], lambda, *link, 0) - d_pfun2(eta1[i], lambda, *link, 0); if(!R_FINITE(pr[i]) || pr[i] <= 0.) { return INFINITY; } nll -= weights[i] * log(pr[i]); } for(j = 0; j < nu; j++) nll -= dnorm(u[j], 0., 1., 1); return nll; } void nll(double *u, int *nu, int *grFac, double *stDev, double *o1, double *o2, int *no, double *eta1, double *eta2, double *eta1Fix, double *eta2Fix, double *sigma, double *pr, double *weights, double *lambda, int *link, double *nll) { *nll = d_nll(u, *nu, grFac, *stDev, o1, o2, *no, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); } void grad_C(double *stDev, double *p1, double *p2, double *pr, double *weights, double *sigma, double *wtprSig, double *eta1, double *eta2, double *gradValues, double *u, int *grFac, int *nx, int *ngv, double *lambda, int *link) /* Returns: void Updates: gradValues, p1, p2, wtprSig given the new values of eta1, eta2 Leaves unchanged: grFac, stDev, eta1, eta2, pr, sigma, weights, link, nx, ngv Assumes: nx: length of grFac, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2 ngv: length of gradValues */ { int i, j; // double tmp[*nx], z = 0; // update p1, p2, wtprSig: for(i = 0; i < *nx; i++) { p1[i] = d_dfun(eta1[i], *lambda, *link); p2[i] = d_dfun(eta2[i], *lambda, *link); wtprSig[i] = weights[i] / pr[i] / sigma[i]; } // sum for each level of the grouping factor: for(i = 0; i < *ngv; i++) { gradValues[i] = 0; // Could set these to for (j = 0; j < *nx; j++) { if(grFac[j] == i + 1) gradValues[i] += *stDev * wtprSig[j] * (p1[j] - p2[j]); } gradValues[i] += u[i]; } } void gradC(double *stDev, double *p1, double *p2, double *wtprSig, int *grFac, int *nx, double *u, int *nu) { // gradient for update.b int i, j; double z = 0; for(i = 0; i < *nx; i++) { wtprSig[i] = *stDev * wtprSig[i] * (p1[i] - p2[i]); } for(i = 0; i < *nu; i++) { for (j = 0; j < *nx; j++) { if(grFac[j] == i + 1) z += wtprSig[j]; } u[i] += z; z = 0; } } void hess(double *stDev, double *p1, double *p2, double *pr, double *wtprSig, double *eta1, double *eta2, int *link, int *grFac, int *nx, double *hessValues, double *lambda, int *nhv) /* Returns: void Updates: hessValues given the new values of eta1, eta2 Leaves unchanged: grFac, stDev, eta1, eta2, p1, p2, pr, sigma, weights, link, nx, ngv Assumes: nx: length of grFac, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2 nhv: length of hessValues */ { int i, j; // sum for each level of the grouping factor: for(i = 0; i < *nhv; i++) { hessValues[i] = 0; for (j = 0; j < *nx; j++) { if(grFac[j] == i + 1) hessValues[i] += (R_pow_di(p1[j] - p2[j], 2) / pr[j] - (d_gfun(eta1[j], *lambda, *link) - d_gfun(eta2[j], *lambda, *link))) * wtprSig[j]; } hessValues[i] = (hessValues[i] * *stDev * *stDev) + 1; } } void hessC(double *stDev, double *p1, double *p2, double *pr, double *g1, double *g2, double *wtprSig, int *grFac, int *nx, double *z, int *nz) { // hessian for update.b int i, j; double sigma2; sigma2 = R_pow_di(*stDev, 2); for(i = 0; i < *nx; i++) pr[i] = (R_pow_di(p1[i] - p2[i], 2) / pr[i] - (g1[i] - g2[i])) * wtprSig[i]; for(i = 0; i < *nz; i++) { for (j = 0; j < *nx; j++) { if(grFac[j] == i + 1) z[i] = z[i] + pr[j]; } z[i] = z[i] * sigma2 + 1; } } //------------------------------------------------------------------ // Trace function: void Trace(int iter, double stepFactor, double val, double maxGrad, double *par, int npar, int first) { int i; if(first) Rprintf("iter: step factor: Value: max|grad|: Parameters:\n"); Rprintf(" %3d: %1.3e: %.3f: %1.3e: ", iter, stepFactor, val, maxGrad); for(i = 0; i < npar; i++) Rprintf(" %.4f", par[i]); Rprintf("\n"); } //------------------------------------------------------------------ void NRalg(int *trace, int *maxIter, double *gradTol, int *maxLineIter, int *grFac, double *stDev, double *o1, double *o2, double *eta1Fix, double *eta2Fix, double *eta1, double *eta2, double *sigma, int *link, double *weights, double *u, double *pr, double *funValue, double *gradValues, double *hessValues, int *nx, int *nu, double *maxGrad, int *conv, double *p1, double *p2, double *wtprSig, double *lambda, int *Niter) { /* nx: length(pr) r: length(start) = length(u) updates: u, funValue, gradValues, hessValues, maxGrad, correct vector input: eta1, eta2, pr, funValue (grad is called before d_nll), u = 0, grFac, o1, o2, eta1Fix, eta2Fix, sigma, weights arbitrary input: p1, p2, wtprSig, gradValues, hessValues, needed output: u, funValue, gradValues, hessValues, conv, Niter, */ int lineIter, innerIter = 0, i, j; double stepFactor = 1, funValueTry, step[*nu]; *funValue = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); if(!R_FINITE(*funValue)) { *conv = 0; return ; } grad_C(stDev, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2, gradValues, u, grFac, nx, nu, lambda, link); *maxGrad = maxAbs(gradValues, *nu); *conv = -1; // Convergence flag if(*trace) Trace(0, stepFactor, *funValue, *maxGrad, u, *nu, 1); // Newton-Raphson algorithm: for(i = 0; i < *maxIter; i++) { if(*maxGrad < *gradTol) { *conv = 1; return ; } hess(stDev, p1, p2, pr, wtprSig, eta1, eta2, link, grFac, nx, hessValues, lambda, nu); for(j = 0; j < *nu; j++) { step[j] = gradValues[j] / hessValues[j]; u[j] -= stepFactor * step[j]; } funValueTry = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); lineIter = 0; // simple line search, i.e. step halfing: while(funValueTry > *funValue) { stepFactor *= 0.5; for(j = 0; j < *nu; j++) u[j] += stepFactor * step[j]; funValueTry = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); lineIter++; if(*trace) Trace(i+1+innerIter, stepFactor, *funValue, *maxGrad, u, *nu, 0); if(lineIter > *maxLineIter){ *conv = -2; return ; } innerIter++; } *funValue = funValueTry; grad_C(stDev, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2, gradValues, u, grFac, nx, nu, lambda, link); *maxGrad = maxAbs(gradValues, *nu); if(*trace) Trace(i+1+innerIter, stepFactor, *funValue, *maxGrad, u, *nu, 0); stepFactor = fmin2(1., stepFactor * 2.); (*Niter)++; } } void NRalgv3(int *trace, int *maxIter, double *gradTol, int *maxLineIter, int *grFac, double *stDev, double *o1, double *o2, double *eta1Fix, double *eta2Fix, double *sigma, int *link, double *weights, double *u, double *pr, double *funValue, double *gradValues, double *hessValues, int *nx, int *nu, double *maxGrad, int *conv, double *lambda, int *Niter) // Less input and slightly faster than NRalg(). { /* control arguments from clmm - see ?clmm.control: trace, maxIter, gradTol, maxLineIter all of length 1 length = nx: grFac, o1, o2, eta1Fix, eta2Fix, sigma, weights length = 1: stDev, funValue, nx, nu, maxGrad, conv, lambda, Niter length = nu: gradValues, hessValues, u updates: u, funValue, gradValues, hessValues, maxGrad, conv, Niter, pr, correct vector input: eta1, eta2, pr, u = 0, grFac, o1, o2, eta1Fix, eta2Fix, sigma, weights arbitrary input: gradValues, hessValues, needed output: u, funValue, gradValues, hessValues, conv, Niter, */ int lineIter, innerIter = 0, i, j; double stepFactor = 1, funValueTry, step[*nu]; double eta1[*nx], eta2[*nx], p1[*nx], p2[*nx], wtprSig[*nx]; *funValue = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); if(!R_FINITE(*funValue)) { *conv = 0; return ; } grad_C(stDev, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2, gradValues, u, grFac, nx, nu, lambda, link); *maxGrad = maxAbs(gradValues, *nu); *conv = -1; // Convergence flag if(*trace) Trace(0, stepFactor, *funValue, *maxGrad, u, *nu, 1); // Newton-Raphson algorithm: for(i = 0; i < *maxIter; i++) { if(*maxGrad < *gradTol) { *conv = 1; return ; } hess(stDev, p1, p2, pr, wtprSig, eta1, eta2, link, grFac, nx, hessValues, lambda, nu); for(j = 0; j < *nu; j++) { /* Actually there is no need to store 'step' since 'gradValues' could hold the step values (maintained here for code clarity) */ step[j] = gradValues[j] / hessValues[j]; u[j] -= stepFactor * step[j]; } funValueTry = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); lineIter = 0; // simple line search, i.e. step halfing: while(funValueTry > *funValue) { stepFactor *= 0.5; for(j = 0; j < *nu; j++) u[j] += stepFactor * step[j]; funValueTry = d_nll(u, *nu, grFac, *stDev, o1, o2, *nx, eta1, eta2, eta1Fix, eta2Fix, sigma, pr, weights, *lambda, link); lineIter++; if(*trace) Trace(i+1+innerIter, stepFactor, *funValue, *maxGrad, u, *nu, 0); if(lineIter > *maxLineIter){ *conv = -2; return ; } innerIter++; } *funValue = funValueTry; grad_C(stDev, p1, p2, pr, weights, sigma, wtprSig, eta1, eta2, gradValues, u, grFac, nx, nu, lambda, link); *maxGrad = maxAbs(gradValues, *nu); if(*trace) Trace(i+1+innerIter, stepFactor, *funValue, *maxGrad, u, *nu, 0); stepFactor = fmin2(1.0, stepFactor * 2.0); (*Niter)++; } (*Niter)--; } //------------------------------------------------------------------ void getNGHQ_C(double *nll, int *grFac, double *stDev, double *eta1Fix, double *eta2Fix, double *o1, double *o2, double *Sigma, double *weights, int *nx, int *nu, double *ghqns, /* double *ghqws,*/ double *lghqws, int *nGHQ, int *link, double *ns, double *lambda) { int i, j, h; double SS = 0, SS1 = 0, SS2 = 0, eta1tmp, eta2tmp, pr_tmp; for(i = 0; i < *nu; i++) { for(h = 0; h < *nGHQ; h++) { for(j = 0; j < *nx; j++) { if(grFac[j] == i + 1) { eta1tmp = (eta1Fix[j] + o1[j] - ns[h]) / Sigma[j]; eta2tmp = (eta2Fix[j] + o2[j] - ns[h]) / Sigma[j]; /* Accurate evaluation of differences of probabilities even if eta1tmp and eta2tmp are large: */ if(eta2tmp <= 0) pr_tmp = d_pfun2(eta1tmp, *lambda, *link, 1) - d_pfun2(eta2tmp, *lambda, *link, 1); else pr_tmp = d_pfun2(eta2tmp, *lambda, *link, 0) - d_pfun2(eta1tmp, *lambda, *link, 0); // sum up contributions: SS1 += weights[j] * log(pr_tmp); } } // SS2 += exp(SS1) * ghqws[h]; // SS2 += exp(SS1 + log(ghqws[h])); SS2 += exp(SS1 + lghqws[h]); SS1 = 0; } SS += log(SS2); SS2 = 0; } *nll = -SS + *nu * log(M_PI * 2) * 0.5; } void getNAGQ(double *nll, int *grFac, double *stDev, double *eta1Fix, double *eta2Fix, double *o1, double *o2, double *Sigma, double *weights, int *nx, int *nu, double *ghqns, double *lghqws, /* double *lghqws, */ double *ghqns2, double *u, double *D, int *nAGQ, int *link, double *lambda) /* nll: negative log-likelihood (return value) length = nx: grFac, o1, o2, eta1Fix, eta2Fix, Sigma, weights length = 1: stDev, nll, nx, nu, nAGQ, lambda, link length = nu: D, u length = nAGQ: ghqns, lghqws (log ghqws) / ghqws */ { int i, j, h; double SS1 = 0, SS2 = 0, eta1tmp, eta2tmp, K, ranNew, pr_tmp; *nll = 0; for(i = 0; i < *nu; i++) { K = sqrt(2. / D[i]); for(h = 0; h < *nAGQ; h++) { for(j = 0; j < *nx; j++) { if(grFac[j] == i + 1) { ranNew = *stDev * (u[i] + K * ghqns[h]); eta1tmp = (eta1Fix[j] + o1[j] - ranNew) / Sigma[j]; eta2tmp = (eta2Fix[j] + o2[j] - ranNew) / Sigma[j]; /* Accurate evaluation of differences of probabilities even if eta1tmp and eta2tmp are large: */ if(eta2tmp <= 0) pr_tmp = d_pfun2(eta1tmp, *lambda, *link, 1) - d_pfun2(eta2tmp, *lambda, *link, 1); else pr_tmp = d_pfun2(eta2tmp, *lambda, *link, 0) - d_pfun2(eta1tmp, *lambda, *link, 0); // sum up contributions: SS1 += weights[j] * log(pr_tmp); } } // SS2 += exp(SS1) * K * ghqws[h] * // dnorm(u[i] + K * ghqns[h], mu, sigma, give_log); // SS2 += exp(SS1 + lghqws[h] + ghqns2[h] - //R_pow_di(ghqns[h], 2) + // 0.5 * R_pow_di(u[i] + K * ghqns[h], 2)) * K; SS2 += exp(SS1 + lghqws[h] + ghqns2[h] - //R_pow_di(ghqns[h], 2) + 0.5 * R_pow_di(u[i] + K * ghqns[h], 2)); SS1 = 0; } // *nll -= log(SS2); *nll -= log(SS2) + log(K); SS2 = 0; } *nll += *nu * log(M_PI * 2) * 0.5; } //------------------------------------------------------------------ double mmax(double *x, int nx) /* Return the maximum of the elements in x nx: length of x ( >= 1) */ { int i; double cmax; // current max cmax = x[0]; if(nx == 1) return cmax; for(i = 1; i < nx; i++) { if(x[i] > cmax) cmax = x[i]; } return cmax; } double maxAbs(double *x, int nx) /* Return max(abs(x)) nx: length of x ( >= 1 ) */ { int i; double cmax; // current max cmax = fabs(x[0]); if(nx == 1) return cmax; for(i = 1; i < nx; i++) { if(fabs(x[i]) > cmax) cmax = fabs(x[i]); } return cmax; } ordinal/src/links.h0000755000176200001440000000407414334154604013776 0ustar liggesusers///////////////////////////////////////////////////////////////////////////// // Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen // // This file is part of the ordinal package for R (*ordinal*) // // *ordinal* is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 2 of the License, or // (at your option) any later version. // // *ordinal* is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // A copy of the GNU General Public License is available at // and/or // . ///////////////////////////////////////////////////////////////////////////// #ifndef _ORDINAL_LINKS_H_ #define _ORDINAL_LINKS_H_ /* That ifndef, etc. is an idiom to prevent the body of the header * being read more than once. */ #include #include #ifdef __cplusplus extern "C" { #endif /* That stanza allows the same header file to be used by C and C++ * programs. There is a matching stanza at the end of this header * file. */ /* Additional scalar cumulative probability functions */ double d_pgumbel (double,double,double,int); double d_pgumbel2 (double,double,double,int); double d_pAO (double,double,int); double d_plgamma (double,double,int); /* Additional scalar density functions */ double d_dgumbel (double,double,double,int); double d_dgumbel2 (double,double,double,int); double d_dAO (double,double,int); double d_dlgamma (double,double,int); /* Scalar density gradients */ double d_glogis (double); double d_gnorm (double); double d_gcauchy (double); double d_ggumbel (double); double d_ggumbel2 (double); double d_gAO (double,double); double d_glgamma (double,double); #ifdef __cplusplus } #endif #endif ordinal/vignettes/0000755000176200001440000000000014533322576013724 5ustar liggesusersordinal/vignettes/clm_article.Rnw0000644000176200001440000032243614334176473016706 0ustar liggesusers% \documentclass[article]{article} % \documentclass[article]{jss} \documentclass[nojss]{jss} %% -- Latex packages and custom commands --------------------------------------- %% recommended packages \usepackage{thumbpdf,lmodern,amsmath,amssymb,bm,url} \usepackage{textcomp} \usepackage[utf8]{inputenc} %% another package (only for this demo article) \usepackage{framed} %% new custom commands \newcommand{\class}[1]{`\code{#1}'} \newcommand{\fct}[1]{\code{#1()}} %% For Sweave-based articles about R packages: %% need no \usepackage{Sweave} \SweaveOpts{engine=R, eps=FALSE, keep.source = TRUE, prefix.string=clmjss} <>= options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE) library("ordinal") library("xtable") @ %%\VignetteIndexEntry{Cumulative Link Models for Ordinal Regression} %%\VignetteDepends{ordinal, xtable} %% -- Article metainformation (author, title, ...) ----------------------------- %% - \author{} with primary affiliation %% - \Plainauthor{} without affiliations %% - Separate authors by \And or \AND (in \author) or by comma (in \Plainauthor). %% - \AND starts a new line, \And does not. \author{Rune Haubo B Christensen\\Technical University of Denmark\\ \& \\ Christensen Statistics} \Plainauthor{Rune Haubo B Christensen} %% - \title{} in title case %% - \Plaintitle{} without LaTeX markup (if any) %% - \Shorttitle{} with LaTeX markup (if any), used as running title \title{Cumulative Link Models for Ordinal Regression with the \proglang{R} Package \pkg{ordinal}} \Plaintitle{Cumulative Link Models for Ordinal Regression with the R Package ordinal} \Shorttitle{Cumulative Link Models with the \proglang{R} package \pkg{ordinal}} %% - \Abstract{} almost as usual \Abstract{ This paper introduces the R-package \pkg{ordinal} for the analysis of ordinal data using cumulative link models. The model framework implemented in \pkg{ordinal} includes partial proportional odds, structured thresholds, scale effects and flexible link functions. The package also support cumulative link models with random effects which are covered in a future paper. A speedy and reliable regularized Newton estimation scheme using analytical derivatives provides maximum likelihood estimation of the model class. The paper describes the implementation in the package as well as how to use the functionality in the package for analysis of ordinal data including topics on model identifiability and customized modelling. The package implements methods for profile likelihood confidence intervals, analysis of deviance tables with type I, II and III tests, predictions of various kinds as well as methods for checking the convergence of the fitted models. } %% - \Keywords{} with LaTeX markup, at least one required %% - \Plainkeywords{} without LaTeX markup (if necessary) %% - Should be comma-separated and in sentence case. \Keywords{ordinal, cumulative link models, proportional odds, scale effects, \proglang{R}} \Plainkeywords{ordinal, cumulative link models, proportional odds, scale effects, R} %% - \Address{} of at least one author %% - May contain multiple affiliations for each author %% (in extra lines, separated by \emph{and}\\). %% - May contain multiple authors for the same affiliation %% (in the same first line, separated by comma). \Address{ Rune Haubo Bojesen Christensen\\ Section for Statistics and Data Analysis\\ Department of Applied Mathematics and Computer Science\\ DTU Compute\\ Technical University of Denmark\\ Richard Petersens Plads \\ Building 324 \\ DK-2800 Kgs. Lyngby, Denmark\\ \emph{and}\\ Christensen Statistics\\ Bringetoften 7\\ DK-3500 V\ae rl\o se, Denmark \\ E-mail: \email{Rune.Haubo@gmail.com}; \email{Rune@ChristensenStatistics.dk}%\\ % URL: \url{http://christensenstatistics.dk/} } \begin{document} This is a copy of an article that is no longer submitted for publication in Journal of Statistical Software (\url{https://www.jstatsoft.org/}). %% -- Introduction ------------------------------------------------------------- %% - In principle "as usual". %% - But should typically have some discussion of both _software_ and _methods_. %% - Use \proglang{}, \pkg{}, and \code{} markup throughout the manuscript. %% - If such markup is in (sub)section titles, a plain text version has to be %% added as well. %% - All software mentioned should be properly \cite-d. %% - All abbreviations should be introduced. %% - Unless the expansions of abbreviations are proper names (like "Journal %% of Statistical Software" above) they should be in sentence case (like %% "generalized linear models" below). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} \label{sec:intro} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Ordered categorical data, or simply \emph{ordinal} data, are common in a multitude of empirical sciences and in particular in scientific disciplines where humans are used as measurement instruments. Examples include school grades, ratings of preference in consumer studies, degree of tumor involvement in MR images and animal fitness in ecology. Cumulative link models (CLM) are a powerful model class for such data since observations are treated correctly as categorical, the ordered nature is exploited and the flexible regression framework allows for in-depth analyses. This paper introduces the \pkg{ordinal} package \citep{ordinal-pkg} for \proglang{R} \citep{R} for the analysis of ordinal data with cumulative link models. The paper describes how \pkg{ordinal} supports the fitting of CLMs with various models structures, model assessment and inferential options including tests of partial proportional odds, scale effects, threshold structures and flexible link functions. The implementation, its flexibility in allowing for costumizable models and an effective fitting algorithm is also described. The \pkg{ordinal} package also supports cumulative link \emph{mixed} models (CLMM); CLMs with normally distributed random effects. The support of this model class will not be given further treatment here but remain a topic for a future paper. The name, \emph{cumulative link models} is adopted from \citet{agresti02}, but the model class has been referred to by several other names in the literature, such as \emph{ordered logit models} and \emph{ordered probit models} \citep{greene10} for the logit and probit link functions. The cumulative link model with a logit link is widely known as the \emph{proportional odds model} due to \citet{mccullagh80} and with a complementary log-log link, the model is sometimes referred to as the \emph{proportional hazards model} for grouped survival times. CLMs is one of several types of models specifically developed for ordinal data. Alternatives to CLMs include continuation ratio models, adjacent category models, and stereotype models \citep{ananth97} but only models in the CLM framework will be considered in this paper. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Software review} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Cumulative link models can be fitted by all the major software packages and while some software packages support scale effects, partial proportional odds (also referred to as unequal slopes, partial effects, and nominal effects), different link functions and structured thresholds all model structures are not available in any one package or implementation. The following brief software review is based on the publicly available documentation at software package websites retrieved in May 2020. \proglang{IBM SPSS} \citep{SPSS} implements McCullagh's \pkg{PLUM} \citep{mccullagh80} procedure, allows for the five standard link functions (cf. Table~\ref{tab:linkFunctions}) and scale effects. Estimation is via Fisher-Scoring and a test for equal slopes is available for the location-only model while it is not possible to estimate a partial proportional odds model. \proglang{Stata} \citep{Stata} includes the \code{ologit} and \code{oprobit} procedures for CLMs with logistic and probit links but without support for scale effects, partial effect or structured thresholds. The add-on package \pkg{oglm} \citep{oglm} allows for all five standard link functions and scale effects. The \pkg{GLLAMM} package \citep{gllamm} also has some support for CLMs in addition to some support for random effects. \proglang{SAS} \citep{SAS} implements CLMs with logit links in \code{proc logistic} and CLMs with the 5 standard links in \code{prog genmod}. \proglang{Matlab} \citep{Matlab} fits CLMs with the \code{mnrfit} function allowing for logit, probit, complementary log-log and log-log links. \proglang{Python} has a package \pkg{mord} \citep{mord} for ordinal classification and prediction focused at machine learning applications. In \proglang{R}, several packages on the Comprehensive \proglang{R} Archive Network (CRAN) implements CLMs. \code{polr} from \pkg{MASS} \citep{MASS} implements standard CLMs allowing for the 5 standard link functions but no further extensions; the \pkg{VGAM} package \citep{VGAM} includes CLMs via the \code{vglm} function using the \code{cumulative} link. \code{vglm} allows for several link functions as well as partial effects. The \code{lrm} and \code{orm} functions from the \pkg{rms} package \citep{rms} also implements CLMs with the 5 standard link functions but without scale effects, partial or structured thresholds. A Bayesian alternative is implemented in the \pkg{brms} package \citep{brms, brms2} which includes structured thresholds in addition to random-effects. In addition, several other \proglang{R} packages include methods for analyses of ordinal data including \pkg{oglmx} \citep{oglmx}, \pkg{MCMCpack} \citep{MCMCpack}, \pkg{mvord} \citep{mvord}, \pkg{CUB} \citep{CUB}, and \pkg{ordinalgmifs} \citep{ordinalgmifs}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection[ordinal package overview]{\pkg{ordinal} package overview} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \pkg{ordinal} package implements CLMs and CLMMs along with functions and methods to support these model classes as summarized in Table~\ref{tab:functions_in_ordinal}. The two key functions in \pkg{ordinal} are \code{clm} and \code{clmm} which fits CLMs and CLMMs respectively; \code{clm2} and \code{clmm2}\footnote{A brief tutorial on \code{clmm2} is currently available at the package website on CRAN: \url{https://CRAN.R-project.org/package=ordinal}} provide legacy implementations primarily retained for backwards compatibility. This paper introduces \code{clm} and its associated functionality covering CLMs with location, scale and nominal effects, structured thresholds and flexible link functions. \code{clm.fit} is the main work horse behind \code{clm} and an analogue to \code{lm.fit} for linear models. The package includes methods for assessment of convergence with \code{convergence} and \code{slice}, an auxiliary method for removing linearly dependent columns from a design matrix in \code{drop.coef}. Distributional support functions in \pkg{ordinal} provide support for Gumbel and log-gamma distributions as well as gradients\footnote{gradients with respect to $x$, the quantile; not the parameters of the distributions} of normal, logistic and Cauchy probability density functions which are used in the iterative methods implemented in \code{clm} and \code{clmm}. \begin{table}[t!] \centering \renewcommand*{\arraystretch}{1.2} \begin{tabular}{llll} \hline \rotatebox{0}{Fitting} & \rotatebox{0}{Miscellaneous} & \rotatebox{0}{Former impl.} & \rotatebox{0}{Distributions} \\ \hline \code{clm} & \code{convergence} & \code{clm2} & \code{[pdqrg]gumbel}$^{\textsf{c}}$ \\ \code{clmm}$^{\textsf{c}}$ & \code{slice} & \code{clmm2}$^{\textsf{c}}$ & \code{[pdg]lgamma}$^{\textsf{c}}$ \\ \code{clm.fit} & \code{drop.coef} & \code{clm2.control} & \code{gnorm}$^{\textsf{c}}$ \\ \code{clm.control} & & \code{clmm2.control} & \code{glogis}$^{\textsf{c}}$ \\ \code{clmm.control} & & & \code{gcauchy}$^{\textsf{c}}$ \\ \hline \end{tabular} \\ \caption{Key functions in \pkg{ordinal}. Superscript "c" indicates (partial or full) implementation in \proglang{C}.\label{tab:functions_in_ordinal}} \end{table} As summarized in Table~\ref{tab:clm_methods}, \pkg{ordinal} provides the familiar suite of extractor and print methods for \code{clm} objects known from \code{lm} and \code{glm}. These methods all behave in ways similar to those for \code{glm}-objects with the exception of \code{model.matrix} which returns a list of model matrices and \code{terms} which can return the \code{terms} object for each of three formulae. The inference methods facilitate profile likelihood confidence intervals via \code{profile} and \code{confint}, likelihood ratio tests for model comparison via \code{anova}, model assessment by tests of removal of model terms via \code{drop1} and addition of new terms via \code{add1} or AIC-based model selection via \code{step}. Calling \code{anova} on a single \code{clm}-object provides an analysis of deviance table with type I, II or III Wald-based $\chi^2$ tests following the \proglang{SAS}-definitions of such tests \citep{SAStype}. In addition to standard use of \code{clm}, the implementation facilitates extraction a model environment containing a complete representation of the model allowing the user to fit costumized models containing, for instance, special structures on the threshold parameters, restrictions on regression parameters or other case-specific model requirements. As CLMMs are not covered by this paper methods for \code{clmm} objects will not be discussed. Other packages including \pkg{emmeans} \citep{emmeans}, \pkg{margins} \citep{margins}, \pkg{ggeffects} \citep{ggeffects}, \pkg{generalhoslem} \citep{generalhoslem} and \pkg{effects} \citep{effects1, effects2} extend the \pkg{ordinal} package by providing methods marginal means, tests of functions of the coefficients, goodness-of-fit tests and methods for illustration of fitted models. \begin{table}[t!] \centering \renewcommand*{\arraystretch}{1.2} \begin{tabular}{llll} \hline \multicolumn{2}{l}{Extractor and Print} & Inference & Checking \\[3pt] \hline \code{coef} & \code{print} & \code{anova} & \code{slice} \\ \code{fitted} & \code{summary} & \code{drop1} & \code{convergence}\\ \code{logLik} & \code{model.frame} & \code{add1} & \\ \code{nobs} & \code{model.matrix} & \code{confint} & \\ \code{vcov} & \code{update} & \code{profile} & \\ \code{AIC}, \code{BIC} & & \code{predict} & \\ \code{extractAIC} & & \code{step}, \code{stepAIC} & \\ \hline \end{tabular} \caption{Key methods for \code{clm} objects.\label{tab:clm_methods}} \end{table} The \pkg{ordinal} package is therefore unique in providing a comprehensive framework for cumulative link models exceeding that of other software packages with its functionality extended by a series of additional \proglang{R} packages. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Organization of the paper} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The remainder of the paper is organized as follows. The next section establishes notation by defining CLMs and associated log-likelihood functions, then describes the extended class of CLMs that is implemented in \pkg{ordinal} including details about scale effects, structured thresholds, partial proportional odds and flexible link functions. The third section describes how maximum likelihood (ML) estimation of CLMs is implemented in \pkg{ordinal}. The fourth section describes how CLMs are fitted and ordinal data are analysed with \pkg{ordinal} including sections on nominal effects, scale effects, structured thresholds, flexible link functions, profile likelihoods, assessment of model convergence, fitted values and predictions. The final parts of section four is on a more advanced level and include issues around model identifiability and customizable fitting of models not otherwise covered by the \pkg{ordinal} API. We end in section~\ref{sec:conclusions} with Conclusions. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Cumulative link models} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A cumulative link model is a model for ordinal-scale observations, i.e., observations that fall in an ordered finite set of categories. Ordinal observations can be represented by a random variable $Y_i$ that takes a value $j$ if the $i$th ordinal observations falls in the $j$'th category where $j = 1, \ldots, J$ and $J \geq 2$.\footnote{binomial models ($J = 2$) are also included.}% % A basic cumulative link model is \begin{equation} \label{eq:BasicCLM} \gamma_{ij} = F(\eta_{ij})~, \quad \eta_{ij} = \theta_j - \bm x_i^\top \bm\beta~, \quad i = 1,\ldots,n~, \quad j = 1, \ldots, J-1 ~, \end{equation} where \begin{equation*} %% \label{eq:cum} \gamma_{ij} = \Prob (Y_i \leq j) = \pi_{i1} + \ldots + \pi_{ij} \quad \mathrm{with} \quad \sum_{j=1}^J \pi_{ij} = 1 \end{equation*} are cumulative probabilities\footnote{we have suppressed the conditioning on the covariate vector, $\bm x_i$, i.e., $\gamma_{ij} = \gamma_j(\bm x_i)$ and $P(Y_i \leq j) = P(Y \leq j | \bm x_i)$.}, $\pi_{ij}$ is the probability that the $i$th observation falls in the $j$th category, $\eta_{ij}$ is the linear predictor and $\bm x_i^\top$ is a $p$-vector of regression variables for the parameters, $\bm\beta$ without a leading column for an intercept and $F$ is the inverse link function. % The thresholds (also known as cut-points or intercepts) are strictly ordered: \begin{equation*} -\infty \equiv \theta_0 \leq \theta_1 \leq \ldots \leq \theta_{J-1} \leq \theta_J \equiv \infty. \end{equation*} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{The multinomial distribution and the log-likelihood function} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The ordinal observation $Y_i$ which assumes the value $j$ can be represented by a multinomially distributed variable $\bm Y_i^* \sim \mathrm{multinom}(\bm\pi_i, 1)$, where $\bm Y_i^*$ is a $J$-vector with a $1$ at the $j$'th entry and 0 otherwise, and with probability mass function % \begin{equation} \label{eq:multinom_pmf} \Prob(\bm Y_i^* = \bm y_i^*) = \prod_j \pi_{ij}^{y_{ij}^*} ~. \end{equation} % The log-likelihood function can therefore be written as % \begin{equation*} \ell(\bm\theta, \bm\beta; \bm y^*) = \sum_i \sum_j y_{ij}^* \log \pi_{ij} \end{equation*} % or equivalently % \begin{align*} \ell(\bm\theta, \bm\beta; \bm y) =~& \sum_i \sum_j \mathrm I (y_i = j) \log \pi_{ij} \\ =~& \sum_i \log \tilde\pi_i \end{align*} % where $\tilde\pi_i$ is the $j$'th entry in $J$-vector $\bm \pi_i$ with elements $\pi_{ij}$ and $\mathrm I(\cdot)$ is the indicator function. Allowing for observation-level weights (case weights), $w_i$ leads finally to % \begin{equation} \label{eq:clm-log-likelihood} \ell(\bm\theta, \bm\beta; \bm y) = \sum_i w_i \log \tilde\pi_i ~. \end{equation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Likelihood based inference} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Confidence intervals for model parameters are obtained by appealing to the asymptotic normal distribution of a statistic $s(\cdot)$ for a scalar parameter of interest $\beta_a$ and defined as \begin{equation*} CI:~\left\{ \beta_a; |s(\beta_a)| < z_{1 - \alpha/2} \right\} . \end{equation*} where $z_{1 - \alpha/2}$ is the $(1 - \alpha/2)$ quantile of the standard normal cumulative distribution function. Taking $s(\cdot)$ to be the Wald statistic $s(\beta_a):~ w(\beta_a) = (\hat\beta_a - \beta_a)/\hat{\mathrm{se}}(\hat\beta_a)$ leads to the classical symmetric intervals. Better confidence intervals can be obtained by choosing instead the likelihood root statistic \citep[see e.g.,][]{pawitan01, brazzale07}: \begin{equation*} s(\beta_a):~ r(\beta_a) = \mathrm{sign}(\hat\beta_a - \beta_a) \sqrt{-2 [ \ell(\hat{\bm\theta}, \hat{\bm\beta}; \bm y) - \ell_p(\beta_a; \bm y)]} \end{equation*} where \begin{equation*} \ell_p(\beta_a; \bm y) = \max_{\bm\theta, \bm\beta_{-a}} \ell(\bm\theta, \bm\beta; \bm y)~, \end{equation*} is the profile likelihood for the scalar parameter $\beta_a$ and $\bm\beta_{-a}$ is the vector of regression parameters without the $a$'th one. While the profile likelihood has to be optimized over all parameters except $\beta_a$ we define a \emph{log-likelihood slice} as \begin{equation} \label{eq:slice} \ell_{\mathrm{slice}}(\beta_a; \bm y) = \ell(\beta_a; \hat{\bm\theta}, \hat{\bm\beta}_{-a}, \bm y)~, \end{equation} which is the log-likelihood function evaluated at $\beta_a$ while keeping the remaining parameters fixed at their ML estimates. A quadratic approximation to the log-likelihood slice is $(\hat\beta_a - \beta_a)^2 / 2\tau_a^2$ where the \emph{curvature unit} $\tau_a$ is the square root of $a$'th diagonal element of the Hessian of $-\ell(\hat{\bm\theta}, \hat{\bm\beta}; \bm y)$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Link functions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A commonly used link function is the logit link which leads to % \begin{equation} \label{eq:cum_logit_model} \mathrm{logit}(\gamma_{ij}) = \log \frac{\Prob (Y_i \leq j)}{1 - \Prob(Y_i \leq j)} \end{equation} % The odds ratio (OR) of the event $Y_i \leq j$ at $\bm x_1$ relative to the same event at $\bm x_2$ is then % \begin{equation} \label{eq:odds_ratio} \mathrm{OR} = \frac{\gamma_j(\bm x_1) / [1 - \gamma_j(\bm x_1)]} {\gamma_j(\bm x_2) / [1 - \gamma_j(\bm x_2)]} = \frac{\exp(\theta_j - \bm x_1^\top \bm\beta)} {\exp(\theta_j - \bm x_2^\top \bm\beta)} %% =&~ \exp(\theta_j - \theta_j - \bm x_1 \bm\beta + \bm x_2 \bm\beta) = \exp[(\bm x_2^\top - \bm x_1^\top)\bm\beta] \end{equation} which is independent of $j$. Thus the cumulative odds ratio is proportional to the distance between $\bm x_1$ and $\bm x_2$ which motivated \citet{mccullagh80} to denote the cumulative logit model a \emph{proportional odds model}. If $x$ represent a treatment variable with two levels (e.g., placebo and treatment), then $x_2 - x_1 = 1$ and the odds ratio is $\exp(-\beta_\textup{treatment})$. Similarly the odds ratio of the event $Y \geq j$ is $\exp(\beta_\textup{treatment})$. The probit link has its own interpretation through a normal linear model for a latent variable which is considered in section~\ref{sec:latent-variable-motivation}. The complementary log-log (clog-log) link is also sometimes used because of its interpretation as a proportional hazards model for grouped survival times: \begin{equation*} -\log\{1 - \gamma_{j}(\bm x_i) \} = \exp( \theta_j - \bm x_i^T \bm\beta ) \end{equation*} Here $1 - \gamma_{j}(\bm x_i)$ is the probability or survival beyond category $j$ given $\bm x_i$. The proportional hazards model has the property that \begin{equation*} \log \{ \gamma_{j}(\bm x_1) \} = \exp[ (\bm x_2^T - \bm x_1^T) \bm\beta ] \log \{ \gamma_{j}(\bm x_2) \}~. \end{equation*} thus the ratio of hazards at $\bm x_1$ relative to $\bm x_2$ are proportional. If the log-log link is used on the response categories in the reverse order, this is equivalent to using the clog-log link on the response in the original order. This reverses the sign of $\bm\beta$ as well as the sign and order of $\{\theta_j\}$ while the likelihood and standard errors remain unchanged. % % Thus, similar to the proportional odds % model, the ratio of hazard functions beyond category $j$ at $\bm x_1$ % relative to $\bm x_2$ (the hazard ratio, $HR$) is: % \begin{equation*} % HR = \frac{-\log\{1 - \gamma_{j}(\bm x_2) \}} % {-\log\{1 - \gamma_{j}(\bm x_1) \}} = % \frac{\exp( \theta_j - \bm x_1^T \bm\beta )} % {\exp( \theta_j - \bm x_2^T \bm\beta )} = % \exp[(\bm x_2 - \bm x_1)\bm\beta] % \end{equation*} % Details of the most common link functions are described in Table~\ref{tab:linkFunctions}. \begin{table}[t!] \begin{center} %\footnotesize \begin{tabular}{llll} \hline Name & logit & probit & log-log \\ \hline Distribution & logistic & normal & Gumbel (max)$^b$ \\ Shape & symmetric & symmetric & right skew\\ Link function ($F^{-1}$) & $\log[\gamma / (1 - \gamma)]$ & $\Phi^{-1}(\gamma)$ & $-\log[-\log(\gamma)]$ \\ Inverse link ($F$) & $1 / [1 + \exp(\eta)]$ & $\Phi(\eta)$ & $\exp(-\exp(-\eta))$ \\ Density ($f = F'$) & $\exp(-\eta) / [1 + \exp(-\eta)]^2$ & $\phi(\eta)$ \\ \hline \hline Name & clog-log$^a$ & cauchit \\ \hline Distribution & Gumbel (min)$^b$ & Cauchy$^c$ \\ Shape & left skew & kurtotic \\ Link function ($F^{-1}$) & $\log[ -\log(1 - \gamma)]$ & $\tan[\pi (\gamma - 0.5)]$ \\ Inverse link ($F$) & $1 - \exp[-\exp(\eta)]$ & $\arctan(\eta)/\pi + 0.5$ \\ Density ($f = F'$) & $\exp[-\exp(\eta) + \eta]$ & $1 / [\pi(1 + \eta^2)]$ \\ \hline \end{tabular} \end{center} % \footnotesize % % $^a$: the \emph{complementary log-log} link \\ % $^b$: the Gumbel distribution is also known as the extreme value % (type I) distribution for extreme minima or maxima. It is also % sometimes referred to as the Weibull (or log-Weibull) distribution % (\url{http://en.wikipedia.org/wiki/Gumbel_distribution}). \\ % $^c$: the Cauchy distribution is a $t$-distribution with one df \caption{Summary of the five standard link functions. $^a$: the \emph{complementary log-log} link; $^b$: the Gumbel distribution is also known as the extreme value (type I) distribution for extreme minima or maxima. It is also sometimes referred to as the Weibull (or log-Weibull) distribution; $^c$: the Cauchy distribution is a $t$-distribution with one degree of freedom. \label{tab:linkFunctions}} \end{table} The \pkg{ordinal} package allows for the estimation of an extended class of cumulative link models in which the basic model~(\ref{eq:BasicCLM}) is extended in a number of ways including structured thresholds, partial proportional odds, scale effects and flexible link functions. The following sections will describe these extensions of the basic CLM. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Extensions of cumulative link models} \label{sec:extensions-of-clms} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A general formulation of the class of models (excluding random effects) that is implemented in \pkg{ordinal} can be written % \begin{equation} \gamma_{ij} = F_{\lambda}(\eta_{ij}), \quad \eta_{ij} = \frac{g_{\bm\alpha} (\theta_j) - \bm x_i^\top \bm\beta - \bm w_i^\top \tilde{\bm\beta}_j}{\exp(\bm z_i\bm\zeta)} \end{equation} % where \begin{description} \item[$F_{\lambda}$] is the inverse link function. It may be parameterized by the scalar parameter $\lambda$ in which case we refer to $F_{\lambda}^{-1}$ as a \emph{flexible link function}, % \item[$g_{\bm\alpha}(\theta_j)$] parameterises thresholds $\{\theta_j\}$ by the vector $\bm\alpha$ such that $g$ restricts $\{\theta_j\}$ to be for example symmetric or equidistant. We denote this \emph{structured thresholds}. % \item[$\bm x_i^\top\bm\beta$] are the ordinary regression effects, % \item[$\bm w_i^\top \tilde{\bm\beta}_j$] are regression effects which are allowed to depend on the response category $j$ and they are denoted \emph{partial} or \emph{non-proportional odds} \citep{peterson90} when the logit link is applied. To include other link functions in the terminology we denote these effects \emph{nominal effects} (in text and code) because these effects are not integral to the ordinal nature of the data. % \item[$\exp(\bm z_i\bm\zeta)$] are \emph{scale effects} since in a latent variable view these effects model the scale of the underlying location-scale distribution. \end{description} With the exception of the structured thresholds, these extensions of the basic CLM have been considered individually in a number of sources but to the author's best knowledge not previously in a unified framework. % For example partial proportional odds have been considered by \citet{peterson90} and scale effect have been considered by \citet{mccullagh80} and \citet{cox95}. % \citet{agresti02} is a good introduction to cumulative link models in the context of categorical data analysis and includes discussions of scale effects. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Latent variable motivation of CLMs} \label{sec:latent-variable-motivation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% It is natural to motivate the CLM from a linear model for a categorized version of a latent variable. Assume the following linear model for an unobserved latent variable: % \begin{equation} \label{eq:latent} S_i = \alpha^* + \bm x_i^\top \bm\beta^* + \varepsilon_i, \quad \varepsilon_i \sim N(0, \sigma^{*2}) \end{equation} % If $S_i$ falls between two thresholds, $\theta_{j-1}^* < S_i \leq \theta_j^*$ where % \begin{equation} \label{eq:thresholds} -\infty \equiv \theta_0^* < \theta_1^* < \ldots < \theta^*_{J-1} < \theta_{J}^* \equiv \infty \end{equation} % then $Y_i = j$ is observed and the cumulative probabilities are: % \begin{equation*} \gamma_{ij} = \Prob (Y_i \leq j) = \Prob(S_i \leq \theta_j^*) = \Prob \left( Z \leq \frac{\theta_j^* - \alpha^* - \bm x_i^\top \bm\beta^*}{% \sigma^*} \right) = \Phi ( \theta_j - \bm x_i^\top \bm\beta ) \end{equation*} % where $Z$ follows a standard normal distribution, $\Phi$ denotes the standard normal cumulative distribution function, parameters with an ``$^*$'' exist on the latent scale, $\theta_j = (\theta_j^* - \alpha^*) / \sigma^*$ and $\bm\beta = \bm\beta^* / \sigma^*$. Note that $\alpha^*$, $\bm\beta^*$ and $\sigma^*$ would have been identifiable if the latent variable $S$ was directly observed, but they are not identifiable with ordinal observations. If we allow a log-linear model for the scale such that % \begin{equation*} \varepsilon_i \sim N(0, \sigma^{*2}_i), \quad \sigma_i^* = \exp(\mu + \bm z_i^\top \bm\zeta) = \sigma^* \exp(\bm z_i^\top \bm\zeta) \end{equation*} % where $\bm z_i$ is the $i$'th row of a design matrix $\bm Z$ without a leading column for an intercept and $\sigma^* = \exp(\mu)$, then \begin{equation*} \gamma_{ij} = \Prob \left( Z \leq \frac{\theta_j^* - \alpha^* - \bm x_i^\top \bm\beta^*}{% \sigma^*_i} \right) = \Phi \left( \frac{\theta_j - \bm x_i^T \bm\beta}{\sigma_i} \right) \end{equation*} where $\sigma_i = \sigma_i^* / \sigma^* = \exp(\bm z_i^\top \bm\zeta)$ is the \emph{relative} scale. The common link functions: probit, logit, log-log, c-log-log and cauchit correspond to inverse cumulative distribution functions of the normal, logistic, Gumbel(max), Gumbel(min) and Cauchy distributions respectively. These distributions are all members of the location-scale family with common form $F(\mu, \sigma)$, with location $\mu$ and non-negative scale $\sigma$, for example, the logistic distribution has mean $\mu$ and standard deviation $\sigma \pi / \sqrt{3}$. Choosing a link function therefore corresponds to assuming a particular distribution for the latent variable $S$ in which $\bm x_i^\top \bm\beta$ and $\exp(\bm z_i^\top \bm\zeta)$ models location \emph{differences} and scale \emph{ratios} respectively of that distribution. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Structured thresholds} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Structured thresholds, $\{ g(\bm\alpha)_j \}$ makes it possible to impose restrictions on the thresholds $\bm\theta = g(\bm\alpha)$. For instance restricting the thresholds to be equidistant means that only the location of, say, the first threshold and the spacing between adjacent thresholds has to be estimated, thus only two parameters are used to parameterize the thresholds irrespective of the number of response categories. \pkg{ordinal} takes $g(\bm\alpha)$ to be a linear function and operates with \begin{equation*} g(\bm\alpha) = \mathcal{J}^\top \bm\alpha = \bm \theta \end{equation*} where the Jacobian $\mathcal{J}$ defines the mapping from the parameters $\bm\alpha$ to the thresholds $\bm\theta$. The traditional ordered but otherwise unrestricted thresholds are denoted \emph{flexible thresholds} and obtained by taking $\mathcal{J}$ to be an identity matrix. Assuming $J=6$ ordered categories, the Jacobians for equidistant and symmetric thresholds (denoted \code{equidistant} and \code{symmetric} in the \code{clm}-argument \code{threshold}) are \begin{equation*} \mathcal{J}_{\mathrm{equidistant}} = \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 0 & 1 & 2 & 3 & 4 \\ \end{bmatrix}, \quad \mathcal{J}_{\mathrm{symmetric}} = \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 0 & -1 & 0 & 1 & 0 \\ -1 & 0 & 0 & 0 & 1 \\ \end{bmatrix}. \end{equation*} Another version of symmetric thresholds (denoted \code{symmetric2}) is sometimes relevant with an unequal number of response categories here illustrated with $J=5$ together with the \code{symmetric} thresholds: \begin{equation*} \mathcal{J}_{\mathrm{symmetric2}} = \begin{bmatrix} 0 & -1 & 1 & 0 \\ -1 & 0 & 0 & 1 \\ \end{bmatrix}, \quad \mathcal{J}_{\mathrm{symmetric}} = \begin{bmatrix} 1 & 1 & 0 & 0 \\ 0 & 0 & 1 & 1 \\ -1 & 0 & 0 & 1 \\ \end{bmatrix} \end{equation*} The nature of $\mathcal{J}$ for a particular model can always be inspected by printing the \code{tJac} component of the \code{clm} fit. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Partial proportional odds and nominal effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The nominal effects $\bm w_i^\top\tilde{\bm\beta}_j$ can be considered an extension of the regression part of the model $\bm x_i^\top \bm\beta$ in which the regression effects are allowed to vary with $j$. % The nominal effects can also be considered an extension of the thresholds $\theta_j$ which allows them to depend on variables $\bm w_i^\top$: $\tilde{\theta}_{ij}(\bm w_i^\top) = \theta_j - \bm w_i^\top \tilde{\bm\beta}_j$ is the $j$'th threshold for the $i$'th observation. The following treatment assumes for latter view. In general let $\bm W$ denote the design matrix for the nominal effects without a leading column for an intercept; the nominal-effects parameter vector $\tilde{\bm\beta}_j$ is then $\mathrm{ncol}(\bm W)$ long and $\tilde{\bm\beta}$ is $\mathrm{ncol}(\bm W) \cdot (J-1)$ long. If $\bm W$ is the design matrix for the nominal effects containing a single column for a continuous variable then $\tilde{\beta}_j$ is the slope parameter corresponding to the $j$'th threshold and $\theta_j$ is the $j$'th intercept, i.e., the threshold when the covariate is zero. Looking at $\tilde{\theta}_{ij}(\bm w_i^\top) = \theta_j - \bm w_i^\top \tilde{\bm\beta}_j$ as a linear model for the thresholds facilitates the interpretation. If, on the other hand, $\bm W$ is the design matrix for a categorical variable (a \code{factor} in \proglang{R}) then the interpretation of $\tilde{\bm\beta}_j$ depends on the contrast-coding of $\bm W$. If we assume that the categorical variable has 3 levels, then $\tilde{\bm\beta}_j$ is a 2-vector. In the default treatment contrast-coding (\code{"contr.treatment"}) $\theta_j$ is the $j$'th threshold for the first (base) level of the factor, $\tilde{\beta}_{1j}$ is the differences between thresholds for the first and second level and $\tilde{\beta}_{2j}$ is the difference between the thresholds for the first and third level. In general we define $\bm\Theta$ as a matrix with $J-1$ columns and with 1 row for each combination of the levels of factors in $\bm W$. This matrix is available in the \code{Theta} component of the model fit. Note that variables in $\bm X$ cannot also be part of $\bm W$ if the model is to remain identifiable. \pkg{ordinal} detects this and automatically removes the offending variables from $\bm X$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Flexible link functions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \pkg{ordinal} package allows for two kinds of flexible link functions due to \citet{aranda-ordaz83} and \citet{genter85}. The link function proposed by \citet{aranda-ordaz83} reads % \begin{equation*} F^{-1}_\lambda (\gamma_{ij}) = \log \left\{ \frac{(1 - \gamma_{ij})^{-\lambda} - 1} {\lambda} \right\}~, \end{equation*} which depends on the auxiliary parameter $\lambda \in ]0, \infty[$. When $\lambda = 1$, the logistic link function arise, and when $\lambda \rightarrow 0$, \begin{equation*} \{ (1 - \gamma_{ij})^{-\lambda} - 1 \} / \lambda \rightarrow \log (1 - \gamma_{ij})^{-1}~, \end{equation*} so the log-log link arise. The inverse link function and its derivative are given by \begin{align*} F(\eta) =&~ 1 - (\lambda \exp(\eta) + 1)^{-\lambda^{-1}} \\ f(\eta) =&~ \exp(\eta) (\lambda \exp(\eta) + 1)^{-\lambda^{-1} - 1} \end{align*} The density implied by the inverse link function is left-skewed if $0 < \lambda < 1$, symmetric if $\lambda = 1$ and right-skewed if $\lambda > 1$, so the link function can be used to assess the evidence about possible skewness of the latent distribution. The log-gamma link function proposed by \citet{genter85} is based on the log-gamma density by \citet{farewell77}. The cumulative distribution function and hence inverse link function reads \begin{equation*} F_\lambda(\eta) = \begin{cases} 1 - G(q; \lambda^{-2}) & \lambda < 0 \\ \Phi(\eta) & \lambda = 0 \\ G(q; \lambda^{-2}) & \lambda > 0 \end{cases} \end{equation*} where $q = \lambda^{-2}\exp(\lambda \eta)$ and $G(\cdot; \alpha)$ denotes the Gamma distribution with shape parameter $\alpha$ and unit rate parameter, and $\Phi$ denotes the standard normal cumulative distribution function. The corresponding density function reads \begin{equation*} f_\lambda(\eta) = \begin{cases} |\lambda| k^k \Gamma(k)^{-1} \exp\{ k(\lambda\eta - \exp(\lambda\eta)) \} & \lambda \neq 0 \\ \phi(\eta) & \lambda = 0 \end{cases} \end{equation*} where $k=\lambda^{-2}$, $\Gamma(\cdot)$ is the gamma function and $\phi$ is the standard normal density function. By attaining the Gumbel(max) distribution at $\lambda = -1$, the standard normal distribution at $\lambda = 0$ and the Gumbel(min) distribution at $\lambda = 1$ the log-gamma link bridges the log-log, probit and complementary log-log links providing right-skew, symmetric and left-skewed latent distributions in a single family of link functions. Note that choice and parameterization of the predictor, $\eta_{ij}$, e.g., the use of scale effects, can affect the evidence about the shape of the latent distribution. There are usually several link functions which provide essentially the same fit to the data and choosing among the good candidates is often better done by appealing to arguments such as ease of interpretation rather than arguments related to fit. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section[Implementation of ML Estimation of CLMs in ordinal]{Implementation of ML Estimation of CLMs in \pkg{ordinal}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In the \pkg{ordinal} package cumulative link models are (by default) estimated with a regularized Newton-Raphson (NR) algorithm with step-halving (line search) using analytical expressions for the gradient and Hessian of the negative log-likelihood function. This NR algorithm with analytical derivatives is used irrespective of whether the model contains structured thresholds, nominal effects or scale effects; the only exception being models with flexible link functions for which a general-purpose quasi-Newton optimizer is used. Due to computationally cheap and efficient evaluation of the analytical derivatives, the relative well-behaved log-likelihood function (with exceptions described below) and the speedy convergence of the Newton-Raphson algorithm, the estimation of CLMs is virtually instant on a modern computer even with complicated models on large datasets. This also facilitates simulation studies. More important than speed is perhaps that the algorithm is reliable and accurate. Technical aspects of the regularized NR algorithm with step-halving (line search) are described in appendix~\ref{sec:algorithm} and analytical gradients are described in detail in \citet{mythesis}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Properties of the log-likelihood function for extended CLMs} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \citet{pratt81} and \citet{burridge81} showed (seemingly independent of each other) that the log-likelihood function of the basic cumulative link model~(\ref{eq:BasicCLM}) is concave. This means that there is a unique global optimum of the log-likelihood function and therefore no risk of convergence to a local optimum. It also means that the Hessian matrix for the negative log-likelihood is strictly positive definite and therefore also that the Newton step is always in direction of higher likelihood. The genuine Newton step may be too long to actually cause an increase in likelihood from one iteration to the next (this is called ``overshoot''). This is easily overcome by successively halving the length of the Newton step until an increase in likelihood is achieved. Exceptions to the strict concavity of the log-likelihood function include models using the cauchit link, flexible link functions as well as models with scale effects. Notably models with structured thresholds as well as nominal effects do not affect the linearity of the predictor, $\eta_{ij}$ and so are also guaranteed to have concave log-likelihoods. The restriction of the threshold parameters $\{\theta_j\}$ being non-decreasing is dealt with by defining $\ell(\bm\theta, \bm\beta; y) = \infty$ when $\{\theta_j\}$ are not in a non-decreasing sequence. If the algorithm attempts evaluation at such illegal values step-halving effectively brings the algorithm back on track. Other implementations of CLMs re-parameterize $\{\theta_j\}$ such that the non-decreasing nature of $\{\theta_j\}$ is enforced by the parameterization, for example, \code{MASS::polr} (package version 7.3.49) optimize the likelihood using \begin{equation*} \tilde\theta_1 = \theta_1, ~\tilde{\theta}_2 = \exp(\theta_2 - \theta_1),~\ldots, ~ \tilde{\theta}_{J-1} = \exp(\theta_{J-2} - \theta_{J-1}) \end{equation*} This is deliberately not used in \pkg{ordinal} because the log-likelihood function is generally closer to quadratic in the original parameterization in our experience which facilitates faster convergence. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Starting values} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% For the basic CLMs~(\ref{eq:BasicCLM}) the threshold parameters are initialized to an increasing sequence such that the cumulative density of a logistic distribution between consecutive thresholds (and below the lowest or above the highest threshold) is constant. The regression parameters $\bm\beta$, scale parameters $\bm\zeta$ as well as nominal effect $\bm\beta^*$ are initialized to 0. If the model specifies a cauchit link or includes scale parameters estimation starts at the parameter estimates of a model using the probit link and/or without the scale-part of the model. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Estimation problems} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% With many nominal effects it may be difficult to find a model in which the threshold parameters are strictly increasing for all combinations of the parameters. Upon convergence of the NR algorithm the model evaluates the $\bm\Theta$-matrix and checks that each row of threshold estimates are increasing. When a continuous variable is included among the nominal effects it is often helpful if the continuous variable is centered at an appropriate value (at least within the observed range of the data). This is because $\{\theta_j\}$ represent the thresholds when the continuous variable is zero and $\{\theta_j\}$ are enforced to be a non-decreasing sequence. Since the nominal effects represent different slopes for the continuous variable the thresholds will necessarily be ordered differently at some other value of the continuous variable. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Convergence codes} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Irrespective of the fitting algorithm, \pkg{ordinal} reports the following convergence codes for CLMs in which negative values indicate convergence failure: % \begin{description} \item[-3] Not all thresholds are increasing. This is only possible with nominal effects and the resulting fit is invalid. \item[-2] The Hessian has at least one negative eigenvalue. This means that the point at which the algorithm terminated does not represent an optimum. \item[-1] Absolute convergence criterion (maximum absolute gradient) was not satisfied. This means that the algorithm couldn't get close enough to a stationary point of the log-likelihood function. \item[0] Successful convergence. \item[1] The Hessian is singular (i.e., at least one eigenvalue is zero). This means that some parameters are not uniquely determined. \end{description} % Note that with convergence code \textbf{1} the optimum of the log-likelihood function has been found although it is not a single point but a line (or in general a (hyper) plane), so while some parameters are not uniquely determined the value of the likelihood is valid enough and can be compared to that of other models. In addition to these convergence codes, the NR algorithm in \pkg{ordinal} reports the following messages: \begin{description} \item[0] Absolute and relative convergence criteria were met \item[1] Absolute convergence criterion was met, but relative criterion was not met \item[2] iteration limit reached \item[3] step factor reduced below minimum \item[4] maximum number of consecutive Newton modifications reached \end{description} Note that convergence is assessed irrespective of potential messages from the fitting algorithm and irrespective of whether the tailored NR algorithm or a general-purpose quasi-Newton optimizer is used. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section[Fitting cumulative link models in ordinal with clm]{Fitting cumulative link models in \pkg{ordinal} with \code{clm}} \label{sec:fitting-clms} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \code{clm} function takes the following arguments: % <>= clm_args <- gsub("function ", "clm", deparse(args(clm))) cat(paste(clm_args[-length(clm_args)], "\n")) @ % Several arguments are standard and well-known from \code{lm} and \code{glm} and will not be described in detail; \code{formula}, \code{data}, \code{weights}, \code{subset} and \code{na.action} are all parts of the standard model specification in \proglang{R}. \code{scale} and \code{nominal} are interpreted as \proglang{R}-formulae with no left hand sides and specifies the scale and nominal effects of the model respectively, see sections~\ref{sec:scale-effects} and \ref{sec:nominal-effects} for details; \code{start} is an optional vector of starting values; \code{doFit} can be set to \code{FALSE} to prompt \code{clm} to return a model \emph{environment}, for details see section~\ref{sec:customized-modelling}; \code{model} controls whether the \code{model.frame} should be included in the returned model fit; \code{link} specifies the link function and \code{threshold} specifies an optional threshold structure, for details see section~\ref{sec:threshold-effects}. Note the absence of a separate \code{offset} argument. Since \code{clm} allows for different offsets in \code{formula} and \code{scale}, offsets have to be specified within a each formulae, e.g., \verb!scale = ~ x1 + offset(x2)!. Methods for \code{clm} model fits are summarized in Table~\ref{tab:clm_methods} and introduced in the following sections. Control parameters can either be specified as a named list, among the optional \code{...} arguments, or directly as a call to \code{clm.control} --- in the first two cases the arguments are passed on to \code{clm.control}. \code{clm.control} takes the following arguments: % <>= cc_args <- gsub("function ", "clm.control", deparse(args(clm.control))) cat(paste(cc_args[-length(cc_args)], "\n")) @ % The \code{method} argument specifies the optimization and/or return method. The default estimation method (\code{Newton}) is the regularized Newton-Raphson estimation scheme described in section~\ref{sec:algorithm}; options \code{model.frame} and \code{design} prompts \code{clm} to return respectively the \code{model.frame} and a list of objects that represent the internal representation instead of fitting the model; options \code{ucminf}, \code{nlminb} and \code{optim} represent different general-purpose optimizers which may be used to fit the model (the former from package \pkg{ucminf} \citep{ucminf}, the latter two from package \pkg{stats}). The \code{sign.location} and \code{sign.nominal} options allow the user to flip the signs on the location and nominal model terms. The \code{convergence} argument instructs \code{clm} how to alert the user of potential convergence problems; \code{...} are optional arguments passed on to the general purpose optimizers; \code{trace} applies across all optimizers and positive values lead to printing of progress during iterations; the remaining arguments (\code{maxIter, gradTol, maxLineIter, relTol, tol}) control the behavior of the regularized NR algorithm described in appendix~\ref{sec:algorithm}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection[Fitting a basic cumulative link model with clm]{Fitting a basic cumulative link model with \code{clm}} \label{sec:fitting-basic-clm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In the following examples we will use the wine data from \citet{randall89} available in the object \code{wine} in package \pkg{ordinal}, cf., Table~\ref{tab:wineData}. The data represent a factorial experiment on factors determining the bitterness of wine with 1 = ``least bitter'' and 5 = ``most bitter''. Two treatment factors (temperature and contact) each have two levels. Temperature and contact between juice and skins can be controlled when crushing grapes during wine production. Nine judges each assessed wine from two bottles from each of the four treatment conditions, hence there are 72 observations in all. The main objective is to examine the effect of contact and temperature on the perceived bitterness of wine. \begin{table}[t!] \centering \begin{tabular}{llrrrrr} \hline & & \multicolumn{5}{c}{Least---Most bitter} \\ \cline{3-7} <>= ## data(wine) tab <- with(wine, table(temp:contact, rating)) mat <- cbind(rep(c("cold", "warm"), each = 2), rep(c("no", "yes"), 2), tab) colnames(mat) <- c("Temperature", "Contact", paste("~~", 1:5, sep = "")) xtab <- xtable(mat) print(xtab, only.contents = TRUE, include.rownames = FALSE, sanitize.text.function = function(x) x) @ \end{tabular} \caption{The number of ratings from nine judges in bitterness categories 1 --- 5. Wine data from \citet{randall89} aggregated over bottles and judges.% \label{tab:wineData}} \end{table}% Initially we consider the following cumulative link model for the wine data: \begin{equation} \label{eq:CLM} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j - \beta_1 (\mathtt{temp}_i) - \beta_2(\mathtt{contact}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation}% % where $\beta_1(\mathtt{temp}_i)$ attains the values $\beta_1(\mathtt{cold})$ and $\beta_1(\mathtt{warm})$, and $\beta_2(\mathtt{contact}_i)$ attains the values $\beta_2(\mathtt{no})$ and $\beta_2(\mathtt{yes})$. The effect of temperature in this model is illustrated in Figure~\ref{fig:standard_clm}. This is a model for the cumulative probability of the $i$th rating falling in the $j$th category or below, where $i$ index all observations ($n=72$), $j = 1, \ldots, J$ index the response categories ($J = 5$) and $\theta_j$ is the intercept or threshold for the $j$th cumulative logit: $\textup{logit}(P(Y_i \leq j))$. Fitting the model with \code{clm} we obtain: <<>>= library("ordinal") fm1 <- clm(rating ~ temp + contact, data = wine) summary(fm1) @ The \code{summary} method prints basic information about the fitted model. % most of which is self explanatory. % The primary result is the coefficient table with parameter estimates, standard errors and Wald based $p$~values for tests of the parameters being zero. If one of the flexible link functions (\code{link = "log-gamma"} or \code{link = "Aranda-Ordaz"}) is used a coefficient table for the link parameter, $\lambda$ is also included. The maximum likelihood estimates of the model coefficients are:% % \begin{equation} \label{eq:parameters} \begin{gathered} \hat\beta_1(\mathtt{warm} - \mathtt{cold})= 2.50, ~~\hat\beta_2(\mathtt{yes} - \mathtt{no}) = 1.53, \\ \{\hat\theta_j\} = \{-1.34,~ 1.25,~ 3.47,~ 5.01\}. \end{gathered} \end{equation} % The coefficients for \code{temp} and \code{contact} are positive indicating that higher temperature and contact increase the bitterness of wine, i.e., rating in higher categories is more likely. % Because the treatment contrast coding which is the default in \proglang{R} was used, $\{\hat\theta_j\}$ refers to the thresholds at the setting with $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{no}$. % Three natural and complementing interpretations of this model are % \begin{enumerate} \item The thresholds $\{ \hat\theta_j \}$ at $\mathtt{contact}_i = \mathtt{yes}$ conditions have been shifted a constant amount $1.53$ relative to the thresholds $\{ \hat\theta_j \}$ at $\mathtt{contact}_i = \mathtt{no}$ conditions. \item The location of the latent distribution has been shifted $+1.53 \sigma^*$ (scale units) at $\mathtt{contact}_i = \mathtt{yes}$ relative to $\mathtt{contact}_i = \mathtt{no}$. \item The odds ratio of bitterness being rated in category $j$ or above ($\mathrm{OR}(Y \geq j)$) is $\exp(\hat\beta_2(\mathtt{yes} - \mathtt{no})) = 4.61$. \end{enumerate} % Note that there are no $p$~values displayed for the threshold coefficients because it usually does not make sense to test the hypothesis that they equal zero. \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-fig2} \caption{Illustration of the effect of temperature in the standard cumulative link model in Equation~\ref{eq:CLM} for the wine data in Table~\ref{tab:wineData} through a latent variable interpretation.\label{fig:standard_clm}} \end{figure} The number of Newton-Raphson iterations is given below \code{niter} with the number of step-halvings in parenthesis. \code{max.grad} is the maximum absolute gradient of the log-likelihood function with respect to the parameters. % The condition number of the Hessian (\code{cond.H}) is well below $10^4$ and so does not indicate a problem with the model. The \code{anova} method produces an analysis of deviance (ANODE) table also based on Wald $\chi^2$-tests and provides tables with type I, II and III hypothesis tests using the \proglang{SAS} definitions. A type I table, the \proglang{R} default for linear models fitted with \code{lm}, sequentially tests terms from first to last, type II tests attempt to respect the principle of marginality and test each term after all others while ignoring higher order interactions, and type III tables are based on orthogonalized contrasts and tests of main effects or lower order terms can often be interpreted as averaged over higher order terms. Note that in this implementation any type of contrasts (e.g., \code{contr.treatment} or \code{contr.SAS} as well as \code{contr.sum}) can be used to produce type III tests. For further details on the interpretation and definition of type I, II and III tests, please see \citep{kuznetsova17} and \citep{SAStype}. Here we illustrate with a type III ANODE table, which in this case is equivalent to type I and II tables since the variables are balanced: <<>>= anova(fm1, type = "III") @ Likelihood ratio tests, though asymptotically equivalent to the Wald tests usually better reflect the evidence in the data. These tests can be obtained by comparing nested models with the \code{anova} method, for example, the likelihood ratio test of \code{contact} is <<>>= fm2 <- clm(rating ~ temp, data = wine) anova(fm2, fm1) @ which in this case produces a slightly lower $p$~value. Equivalently we can use \code{drop1} to obtain likelihood ratio tests of the explanatory variables while \emph{controlling} for the remaining variables: <<>>= drop1(fm1, test = "Chi") @ Likelihood ratio tests of the explanatory variables while \emph{ignoring} the remaining variables are provided by the \code{add1} method: <<>>= fm0 <- clm(rating ~ 1, data = wine) add1(fm0, scope = ~ temp + contact, test = "Chi") @ % Confidence intervals of the parameter estimates are provided by the \code{confint} method which by default compute the so-called profile likelihood confidence intervals: <<>>= confint(fm1) @ The cumulative link model in Equation~\ref{eq:CLM} assumes that the thresholds, $\{\theta_j\}$ are constant for all values of the remaining explanatory variables, here \code{temp} and \code{contact}. This is generally referred to as the \emph{proportional odds assumption} or \emph{equal slopes assumption}. We can relax this assumption in two general ways: with nominal effects and scale effects examples of which will now be presented in turn. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Partial and non-proportional odds: nominal effects} \label{sec:nominal-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The CLM in Equation~\ref{eq:CLM} specifies a structure in which the regression parameters, $\bm\beta$ are not allowed to vary with $j$ or equivalently that the threshold parameters $\{\theta_j\}$ are not allowed to depend on regression variables. In the following model this assumption is relaxed and the threshold parameters are allowed to depend on \code{contact}. This leads to the so-called partial proportional odds for \code{contact}: % \begin{equation} \label{eq:CLM_nominal} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j + \tilde{\beta}_{j} (\mathtt{contact}_i) - \beta (\mathtt{temp}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation} % One way to view this model is to think of two sets of thresholds being applied at conditions with and without contact as illustrated in Figure~\ref{fig:clm_nominal}. The model is specified as follows with \code{clm}: <<>>= fm.nom <- clm(rating ~ temp, nominal = ~ contact, data = wine) summary(fm.nom) @ As can be seen from the output of \code{summary} there are no regression coefficient estimated for \code{contact}, but there are additional threshold coefficients estimated instead. % The naming and meaning of the threshold coefficients depend on the contrast coding applied to \code{contact}. Here the \proglang{R} default treatment contrasts (\code{"contr.treatment"}) are used. Here coefficients translate to the following parameter functions: \begin{equation} \label{eq:nom_parameters} \begin{gathered} \hat\beta(\mathtt{warm} - \mathtt{cold})= 2.52, \\ \{\hat\theta_j\} = \{-1.32,~ 1.25,~ 3.55,~ 4.66\}, \\ \{ \hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}) \} = \{-1.62,~ -1.51,~ -1.67,~ -1.05\}. \end{gathered} \end{equation} % Again $\{ \theta_j \}$ refer to the thresholds at $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{no}$ settings while the thresholds at $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{yes}$ are $\{ \hat\theta_j + \hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}) \}$. % The odds ratio of bitterness being rated in category $j$ or above ($\mathrm{OR}(Y \geq j)$) now depend on $j$: $\{\exp(-\hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}))\} = \{ 5.03,~ 4.53,~ 5.34,~ 2.86\}$. % \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figNom2} \caption{Illustration of nominal effects leading to different sets of thresholds being applied for each level of \code{contact} in a latent variable interpretation, cf., Equation~\ref{eq:CLM_nominal}.\label{fig:clm_nominal}} \end{figure} The resulting thresholds for each level of \code{contact}, i.e., the estimated $\bm\Theta$-matrix can be extracted with: <<>>= fm.nom$Theta @ As part of the convergence checks, \code{clm} checks the validity of $\bm\Theta$, i.e., that each row of the threshold matrix is non-decreasing. We can perform a likelihood ratio test of the proportional odds assumption for \code{contact} by comparing the likelihoods of models (\ref{eq:CLM}) and (\ref{eq:CLM_nominal}) as follows: <<>>= anova(fm1, fm.nom) @ There is only little difference in the log-likelihoods of the two models and the test is insignificant. Thus there is no evidence that the proportional odds assumption is violated for \code{contact}. It is not possible to estimate both $\beta_2(\mathtt{contact}_i)$ and $\tilde{\beta}_{j}(\mathtt{contact}_i)$ in the same model. Consequently variables that appear in \code{nominal} cannot enter in \code{formula} as well. For instance, not all parameters are identifiable in the following model: <<>>= fm.nom2 <- clm(rating ~ temp + contact, nominal = ~ contact, data = wine) @ We are made aware of this when summarizing or printing the model in which the coefficient for \code{contactyes} is \code{NA}: <<>>= fm.nom2 @ To test the proportional odds assumption for all variables, we can use <<>>= nominal_test(fm1) @ This function \emph{moves} all terms in \code{formula} to \code{nominal} and \emph{copies} all terms in \code{scale} to \code{nominal} one by one and produces an \code{add1}-like table with likelihood ratio tests of each term. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Modelling scale effects} \label{sec:scale-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % To allow the scale of the latent variable distribution to depend on explanatory variables we could for instance consider the following model where the scale is allowed to differ between cold and warm conditions. The location of the latent distribution is allowed to depend on both temperature and contact: \begin{equation} \label{eq:CLM_scale_wine} \begin{gathered} \textup{logit}(P(Y_i \leq j)) = \frac{\theta_j - \beta_1 (\mathtt{temp}_i) - \beta_{2} (\mathtt{contact}_i)} {\exp( \zeta (\mathtt{temp}_i))} \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{gathered} \end{equation} This model structure is illustrated in Figure~\ref{fig:clm_scale} and can be estimated with: <<>>= fm.sca <- clm(rating ~ temp + contact, scale = ~ temp, data = wine) summary(fm.sca) @ In a latent variable interpretation the location of the latent distribution is shifted $2.63\sigma^*$ (scale units) from cold to warm conditions and $1.59\sigma^*$ from absence to presence of contact. The scale of the latent distribution is $\sigma^*$ at cold conditions but $\sigma^* \exp(\zeta(\mathtt{warm} - \mathtt{cold})) = \sigma^*\exp(0.095) = 1.10 \sigma^*$, i.e., 10\% higher, at warm conditions. However, observe that the $p$~value for the scale effect in the summary output shows that the ratio of scales is not significantly different from 1 (or equivalently that the difference on the log-scale is not different from 0). Scale effects offer an alternative to nominal effects (partial proportional odds) when non-proportional odds structures are encountered in the data. Using scale effects is often a better approach because the model is well-defined for all values of the explanatory variables irrespective of translocation and scaling of covariates. Scale effects also use fewer parameters which often lead to more sensitive tests than nominal effects. Potential scale effects of variables already included in \code{formula} can be discovered using \code{scale_test}. This function adds each model term in \code{formula} to \code{scale} in turn and reports the likelihood ratio statistic in an \code{add1}-like fashion: <<>>= scale_test(fm1) @ \code{confint} and \code{anova} methods apply with no change to models with scale and nominal parts, but \code{drop1}, \code{add1} and \code{step} methods will only drop or add terms to the (location) \code{formula}. \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figSca} \caption{Illustration of scale effects leading to different scales of the latent variable, cf., Equation~\ref{eq:CLM_scale_wine}.\label{fig:clm_scale}} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Structured thresholds} \label{sec:threshold-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In section~\ref{sec:nominal-effects} nominal effects were described where the assumption that regression parameters have the same effect across all thresholds was relaxed. In this section additional restrictions on the thresholds will be imposed instead. The following model requires that the thresholds, $\{ \theta_j \}$ are equidistant or equally spaced. This allows us to assess an assumption that judges are using the response scale in such a way that there is the same distance between adjacent response categories, i.e., that $\theta_j - \theta_{j-1} = \textup{constant}$ for $j = 2, ..., J-1$. The effect of equidistant thresholds is illustrated in Figure~\ref{fig:clm_structured_thresholds} and can be fitted with: <<>>= fm.equi <- clm(rating ~ temp + contact, data = wine, threshold = "equidistant") summary(fm.equi) @ The parameters determining the thresholds are now the first threshold (\code{threshold.1}) and the spacing among consecutive thresholds (\code{spacing}). The mapping to this parameterization is stored in the transpose of the Jacobian matrix (\code{tJac}) component of the model fit. This makes it possible to extract the thresholds imposed by the equidistance structure with <<>>= drop(fm.equi$tJac %*% coef(fm.equi)[c("threshold.1", "spacing")]) @ These thresholds are in fact already stored in the \code{Theta} component of the model fit. % The following shows that the average distance between consecutive thresholds in \code{fm1} which did not restrict the thresholds is very close to the \code{spacing} parameter from \code{fm.equi}: <<>>= mean(diff(coef(fm1)[1:4])) @ One advantage of imposing additional restrictions on the thresholds is the use of fewer parameters. Whether the restrictions are warranted by the data can be assessed in a likelihood ratio test: <<>>= anova(fm1, fm.equi) @ In this case the test is non-significant, so there is no considerable loss of fit at the gain of saving two parameters, hence we may retain the model with equally spaced thresholds. Note that the shape of the latent distribution (determined by the choice of link function) also affects the distances between the thresholds. If thresholds are equidistant under a normal distribution (i.e., with the logit link) they will in general\footnote{The exception is perfect fits such as CLMs with flexible thresholds and no predictors where models have the same likelihood irrespective of link function.} not be equidistant under a differently shaped latent distribution such as a skew latent distribution (e.g., with the log-log or clog-log link). \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figFlex} \includegraphics[width=6cm]{./static_figs/fig-figEqui} \caption{Illustration of flexible (left) and equidistant (right) thresholds being applied in a cumulative link model in a latent variable interpretation.\label{fig:clm_structured_thresholds}} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Scale effects, nominal effects and link functions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% This section presents an example that connects aspects of scale effects, nominal effects and link functions. The example is based on the \code{soup} data available in the \pkg{ordinal} package. This dataset represents a sensory discrimination study of packet soup in which 185 respondents assessed a reference product and one of 5 test products on an ordinal sureness-scale with 6 levels from "reference, sure" to "test, sure". The two key explanatory variables in this example are \code{PRODID} and \code{PROD}. \code{PRODID} identifies all 6 products while \code{PROD} distinguishes test and reference products: <<>>= with(soup, table(PROD, PRODID)) @ The so-called bi-normal model plays a special role in the field of signal detection theory \citep{decarlo98, macmillan05} and in sensometrics \citep{christensen11} and assumes the existence of normal latent distributions potentially with different variances. The bi-normal model can be fitted to ordinal data by identifying it as a CLM with a probit link. The following bi-normal model assumes that the location of the normal latent distribution depends on \code{PRODID} while the scale only varies with \code{PROD}: <<>>= fm_binorm <- clm(SURENESS ~ PRODID, scale = ~ PROD, data = soup, link="probit") summary(fm_binorm) @ Here we observe significant differences in scale for reference and test products and this is an example of what would have been denoted non-proportional odds had the link function been the logit function. In this context differences in scale are interpreted to mean that a location shift of the latent normal distribution is not enough to represent the data. Another test of such non-location effects is provided by the nominal effects: <<>>= fm_nom <- clm(SURENESS ~ PRODID, nominal = ~ PROD, data = soup, link="probit") @ A comparison of these models shows that the scale effects increase the likelihood substantially using only one extra parameter. The addition of nominal effects provides a smaller increase in likelihood using three extra parameters: <<>>= fm_location <- update(fm_binorm, scale = ~ 1) anova(fm_location, fm_binorm, fm_nom) @ Note that both the location-only and bi-normal models are nested under the model with nominal effects making these models comparable in likelihood ratio tests. This example illustrates an often seen aspect: that models allowing for scale differences frequently capture the majority of deviations from location-only effects that could otherwise be captured by nominal effects using fewer parameters. The role of link functions in relation to the evidence of non-location effects is also illustrated by this example. If we consider the complementary log-log link it is apparent that there is no evidence of scale differences. Furthermore, the likelihood of a complementary log-log model with constant scale is almost the same as that of the bi-normal model: <<>>= fm_cll_scale <- clm(SURENESS ~ PRODID, scale = ~ PROD, data = soup, link="cloglog") fm_cll <- clm(SURENESS ~ PRODID, data = soup, link="cloglog") anova(fm_cll, fm_cll_scale, fm_binorm) @ Using the log-gamma link we can also confirm that a left-skewed latent distribution ($\lambda > 0$) is best supported by the data and that the estimate of $\lambda$ is close to 1 at which the complementary log-log link is obtained: <<>>= fm_loggamma <- clm(SURENESS ~ PRODID, data = soup, link="log-gamma") summary(fm_loggamma) @ The analysis of link functions shown here can be thought of as providing a framework analogous to that of Box-Cox transformations for linear models. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Profile likelihood} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In addition to facilitating the generally quite accurate profile likelihood confidence intervals which were illustrated in section~\ref{sec:fitting-basic-clm}, the profile likelihood function can also be used to illustrate the relative importance of parameter values. As an example, the profile likelihood of model coefficients for \code{temp} and \code{contact} in \code{fm1} can be obtained with % <>= pr1 <- profile(fm1, alpha = 1e-4) plot(pr1) @ The resulting plots are provided in Figure~\ref{fig:ProfileLikelihood}. The \code{alpha} argument controls how far from the maximum likelihood estimate the likelihood function should be profiled: the profile strays no further from the MLE when values outside an (\code{1 - alpha})-level profile likelihood confidence interval. From the relative profile likelihood in Figure~\ref{fig:ProfileLikelihood} for \code{tempwarm} we see that parameter values between 1 and 4 are reasonably well supported by the data, and values outside this range has little likelihood. Values between 2 and 3 are very well supported by the data and have high likelihood. \setkeys{Gin}{width=.45\textwidth} \begin{figure} \centering <>= plot(pr1, which.par = 1) @ <>= plot(pr1, which.par = 2) @ \caption{Relative profile likelihoods for the regression parameters in \code{fm1} for the wine data. Horizontal lines indicate 95\% and 99\% confidence bounds.} \label{fig:ProfileLikelihood} \end{figure} Profiling is implemented for regression ($\beta$) and scale ($\zeta$) parameters but not available for threshold, nominal and flexible link parameters. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Assessment of model convergence} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Likelihood slices} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The maximum likelihood estimates of the parameters in cumulative link models do not have closed form expressions, so iterative methods have to be applied to fit the models. Further, CLMs are non-linear models and in general the likelihood function is not guaranteed to be well-behaved or even uni-model. In addition, the special role of the threshold parameters and the restriction on them being ordered can affect the appearance of the likelihood function. To confirm that an unequivocal optimum has been reached and that the likelihood function is reasonably well-behaved around the reported optimum we can inspect the likelihood function in a neighborhood around the reported optimum. For these purposes we can display slices of the likelihood function. The following code produces the slices shown in Figure~\ref{fig:slice1} which displays the shape of the log-likelihood function in a fairly wide neighborhood around the reported MLE; here we use $\lambda=5$ curvature units, as well as it's quadratic approximation. <<>>= slice.fm1 <- slice(fm1, lambda = 5) par(mfrow = c(2, 3)) plot(slice.fm1) @ Figure~\ref{fig:slice1} shows that log-likelihood function is fairly well behaved and relatively closely quadratic for most parameters. \setkeys{Gin}{width=.32\textwidth} \begin{figure} \centering <>= plot(slice.fm1, parm = 1) @ <>= plot(slice.fm1, parm = 2) @ <>= plot(slice.fm1, parm = 3) @ <>= plot(slice.fm1, parm = 4) @ <>= plot(slice.fm1, parm = 5) @ <>= plot(slice.fm1, parm = 6) @ \caption{Slices of the (negative) log-likelihood function (solid) for parameters in \code{fm1} for the wine data. Dashed lines indicate quadratic approximations to the log-likelihood function and vertical bars indicate maximum likelihood estimates.} \label{fig:slice1} \end{figure} Looking at the log-likelihood function much closer to the reported optimum (using $\lambda = 10^{-5}$) we can probe how accurately the parameter estimates are determined. The likelihood slices in Figure~\ref{fig:slice2} which are produced with the following code shows that the parameters are determined accurately with at least 5 correct decimals. Slices are shown for two parameters and the slices for the remaining 4 parameters are very similar. <>= slice2.fm1 <- slice(fm1, parm = 4:5, lambda = 1e-5) par(mfrow = c(1, 2)) plot(slice2.fm1) @ \setkeys{Gin}{width=.45\textwidth} \begin{figure} \centering <>= plot(slice2.fm1, parm = 1) @ <>= plot(slice2.fm1, parm = 2) @ \caption{Slices of the (negative) log-likelihood function (solid) for parameters in \code{fm1} for the wine data very close to the MLEs. Dashed lines (indistinguishable from the solid lines) indicate quadratic approximations to the log-likelihood function and vertical bars the indicate maximum likelihood estimates.} \label{fig:slice2} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Parameter accuracy} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% As discussed in section~\ref{sec:algorithm} the method independent error estimate provides an assessment of the accuracy with which the ML estimates of the parameters have been determined by the fitting algorithm. This error estimate is implemented in the \code{convergence} method which we now illustrate on a model fit: <<>>= convergence(fm1) @ The most important information is the number of correct decimals (\code{Cor.Dec}) and the number of significant digits (\code{Sig.Dig}) with which the parameters are determined. In this case all parameters are very accurately determined, so there is no reason to lower the convergence tolerance. The \code{logLik.error} shows that the error in the reported value of the log-likelihood is below $10^{-10}$, which is by far small enough that likelihood ratio tests based on this model are accurate. Note that the assessment of the number of correctly determined decimals and significant digits is only reliable sufficiently close to the optimum so in practice we caution against this assessment if the algorithm did not converge successfully. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Fitted values and predictions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Several types of fitted values and predictions can be extracted from a CLM depending on how it is viewed. By \emph{fitted values} we denote the values ($i=1, \ldots, n$) \begin{equation*} \hat{\tilde\pi}_i = \tilde\pi_i(\hat{\bm\psi}) \end{equation*} that is, the value of $\tilde\pi_i$, cf., Equation~\ref{eq:clm-log-likelihood} evaluated at the ML estimates $\hat{\bm\psi}$. These are the values returned by the \code{fitted} and \code{fitted.values} extractor methods and stored in the \code{fitted.values} component of the model fit. The values of $\pi_{ij}$ (cf., Equation~\ref{eq:multinom_pmf}) evaluated at the ML estimates of the parameters (i.e., $\hat\pi_{ij}$) can also be thought of as fitted values for the multinomially distributed variable $\bm Y_i^*$. These values can be obtained from the model fit by use of the \code{predict} method: <<>>= head(pred <- predict(fm1, newdata = subset(wine, select = -rating))$fit) @ Note that the original data set should be supplied in the \code{newdata} argument \emph{without} the response variable (here \code{rating}). If the response variable is \emph{present} in \code{newdata} predictions are produced for only those rating categories which were observed and we get back the fitted values: <<>>= stopifnot(isTRUE(all.equal(fitted(fm1), t(pred)[ t(col(pred) == wine$rating)])), isTRUE(all.equal(fitted(fm1), predict(fm1, newdata = wine)$fit))) @ Class predictions are also available and defined here as the response class with the highest probability, that is, for the $i$'th observation the class prediction is the mode of $\bm\pi_{i}$. To obtain class predictions use \code{type = "class"} as illustrated in the following small table: <<>>= newData <- expand.grid(temp = levels(wine$temp), contact = levels(wine$contact)) cbind(newData, round(predict(fm1, newdata = newData)$fit, 3), "class" = predict(fm1, newdata = newData, type = "class")$fit) @ Other definitions of class predictions can be applied, e.g., nearest mean predictions: <<>>= head(apply(pred, 1, function(x) round(weighted.mean(1:5, x)))) @ which in this case happens to be identical to the default class predictions. <>= p1 <- apply(predict(fm1, newdata = subset(wine, select=-rating))$fit, 1, function(x) round(weighted.mean(1:5, x))) p2 <- as.numeric(as.character(predict(fm1, type = "class")$fit)) stopifnot(isTRUE(all.equal(p1, p2, check.attributes = FALSE))) @ Standard errors and confidence intervals of predictions are also available, for example: <<>>= predictions <- predict(fm1, se.fit = TRUE, interval = TRUE) head(do.call("cbind", predictions)) @ where the default 95\% confidence level can be changed with the \code{level} argument. Here the standard errors of fitted values or predictions, $\hat{\tilde{\pi}} = \tilde{\pi}(\hat{\bm\psi})$ are obtained by application of the delta method: \begin{equation*} \mathsf{Var}(\hat{\tilde{\bm\pi}}) = \bm C \mathsf{Var}(\hat{\bm\psi}) \bm C^\top, \quad \bm C = \frac{\partial \tilde{\bm\pi}(\bm\psi)}{\partial \bm\psi} \Big|_{\bm\psi = \hat{\bm\psi}} \end{equation*} where $\mathsf{Var}(\hat{\bm\psi})$ is the estimated variance-covariance matrix of the parameters $\bm\psi$ evaluated at the ML estimates $\hat{\bm\psi}$ as given by the observed Fisher Information matrix and finally the standard errors are extracted as the square root of the diagonal elements of $\mathsf{Var}(\hat{\tilde{\bm\pi}})$. Since symmetric confidence intervals for probabilities are not appropriate unless perhaps if they are close to one half a more generally applicable approach is to form symmetric Wald intervals on the logit scale and then subsequently transform the confidence bounds to the probability scale. \code{predict.clm} takes this approach and computes the standard error of $\hat\kappa_i = \mathrm{logit}(\hat{\tilde{\pi}}_i)$ by yet an application of the delta method: \begin{equation*} \mathrm{se}(\hat\kappa_i) = \frac{\partial g(\hat{\tilde{\pi}}_i)}{\partial \hat{\tilde{\pi}}_i} \mathrm{se}(\hat{\tilde{\pi}}_i) = \frac{\mathrm{se}(\hat{\tilde{\pi}}_i)}{% \hat{\tilde{\pi}}_i(1 - \hat{\tilde{\pi}}_i)}, \quad g(\hat{\tilde{\pi}}_i) = \log \frac{\hat{\tilde{\pi}}_i}{1 - \hat{\tilde{\pi}}_i}. \end{equation*} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Model identifiability} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Unidentifiable models or unidentifiable parameters may happen in CLMs for several reasons some of which are special to the model class. In this section we describe issues around model identifiability and how this is handled by \code{ordinal::clm}. Material in the remainder of this section is generally on a more advanced level than up to now. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Complete separation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In binary logistic regression the issue of \emph{complete separation} is well known. This may happen, for example if only ``success'' or only ``failure'' is observed for a level of a treatment factor. In CLMs the issue may appear even when outcomes are observed in more than one response category. This can be illustrated using the \code{wine} data set if we combine the three central categories: <<>>= wine <- within(wine, { rating_comb3 <- factor(rating, labels = c("1", "2-4", "2-4", "2-4", "5")) }) ftable(rating_comb3 ~ temp, data = wine) fm.comb3 <- clm(rating_comb3 ~ temp, data = wine) summary(fm.comb3) @ Here the true ML estimates of the coefficients for \code{temp} and the second threshold are at infinity but the algorithm in \code{clm} terminates when the likelihood function is sufficiently flat. This means that the reported values of the coefficients for \code{temp} and the second threshold are arbitrary and will change if the convergence criteria are changed or a different optimization method is used. The standard errors of the coefficients are not available because the Hessian is effectively singular and so cannot be inverted to produce the variance-covariance matrix of the parameters. The ill-determined nature of the Hessian is seen from the very large condition number of the Hessian, \code{cond.H}. Note, however, that while the model parameters cannot be uniquely determined, the likelihood of the model is well defined and as such it can be compared to the likelihood of other models. For example, we could compare it to a model that excludes \code{temp} <<>>= fm.comb3_b <- clm(rating_comb3 ~ 1, data = wine) anova(fm.comb3, fm.comb3_b) @ The difference in log-likelihood is substantial, however, the criteria for the validity of the likelihood ratio test are not fulfilled, so the $p$~value should not be taken at face value. The complete-separation issue may also appear in less obvious situations. If, for example, the following model is considered allowing for nominal effects of \code{temp} the issue shows up: <<>>= fm.nom2 <- clm(rating ~ contact, nominal = ~ temp, data = wine) summary(fm.nom2) @ Analytical detection of which coefficients suffer from unidentifiability due to \emph{complete separation} is a topic for future research and therefore unavailable in current versions of \pkg{ordinal}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Aliased coefficients} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Aliased coefficients can occur in all kinds of models that build on a design matrix including linear models as well as generalized linear models. \code{lm} and \code{glm} determine the rank deficiency of the design matrix using the rank-revealing implementation of the QR-decomposition in \code{LINPACK} and displays the aliased coefficients as \code{NA}s\footnote{if the \code{singular.ok = TRUE} which is the default.}. Though the QR decomposition is not used during iterations in \code{clm}, it is used initially to determine aliased coefficients. An example is provided using the \code{soup} data available in the \pkg{ordinal} package: <<>>= fm.soup <- clm(SURENESS ~ PRODID * DAY, data = soup) summary(fm.soup) @ The source of the singularity is revealed in the following table: <<>>= with(soup, table(DAY, PRODID)) @ which shows that the third \code{PRODID} was not presented at the second day. The issue of aliased coefficients extends in CLMs to nominal effects since the joint design matrix for location and nominal effects will be singular if the same variables are included in both location and nominal formulae. \code{clm} handles this by not estimating the offending coefficients in the location formula as illustrated with the \code{fm.nom2} model fit in section~\ref{sec:nominal-effects}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Over parameterization} \label{sec:over-parameterization} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The scope of model structures allowed in \code{clm} makes it possible to specify models which are over parameterized in ways that do not lead to rank deficient design matrices and as such are not easily detected before fitting the model. An example is given here which includes both additive (location) and multiplicative (scale) effects of \code{contact} for a binomial response variable but the issue can also occur with more than two response categories: <<>>= wine <- within(wine, { rating_comb2 <- factor(rating, labels = c("1-2", "1-2", "3-5", "3-5", "3-5")) }) ftable(rating_comb2 ~ contact, data = wine) fm.comb2 <- clm(rating_comb2 ~ contact, scale = ~ contact, data = wine) summary(fm.comb2) @ <>= ## Example with unidentified parameters with 3 response categories ## not shown in paper: wine <- within(wine, { rating_comb3b <- rating levels(rating_comb3b) <- c("1-2", "1-2", "3", "4-5", "4-5") }) wine$rating_comb3b[1] <- "4-5" # Remove the zero here to avoid inf MLE ftable(rating_comb3b ~ temp + contact, data = wine) fm.comb3_c <- clm(rating_comb3b ~ contact * temp, scale = ~contact * temp, nominal = ~contact, data = wine) summary(fm.comb3_c) convergence(fm.comb3_c) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Customized modelling} \label{sec:customized-modelling} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Using the \code{doFit} argument \code{clm} can be instructed to return a \emph{model environment} that we denote \code{rho}: <<>>= rho <- update(fm1, doFit=FALSE) names(rho) @ This environment holds a complete specification of the cumulative link models including design matrices \code{B1}, \code{B2}, \code{S} and other components. The environment also contains the cumulative distribution function that defines the inverse link function \code{pfun} and its first and second derivatives, i.e., the corresponding density function \code{dfun} and gradient \code{gfun}. Of direct interest here is the parameter vector \code{par} and functions that readily evaluate the negative log-likelihood (\code{clm.nll}), its gradient with respect to the parameters (\code{clm.grad}) and the Hessian (\code{clm.hess}). The negative log-likelihood and the gradient at the starting values is therefore <<>>= rho$clm.nll(rho) c(rho$clm.grad(rho)) @ Similarly at the MLE they are: <<>>= rho$clm.nll(rho, par = coef(fm1)) print(c(rho$clm.grad(rho)), digits = 3) @ Note that the gradient function \code{clm.grad} assumes that \code{clm.nll} has been evaluated at the current parameter values; similarly, \code{clm.hess} assumes that \code{clm.grad} has been evaluated at the current parameter values. The NR algorithm in \pkg{ordinal} takes advantage of this so as to minimize the computational load. If interest is in fitting a \emph{custom} CLM with, say, restrictions on the parameter space, this can be achieved by a combination of a general purpose optimizer and the functions \code{clm.nll} and optionally \code{clm.grad}. Assume for instance we know that the regression parameters can be no larger than 2, then the model can be fitted with the following code: <<>>= nll <- function(par, envir) { envir$par <- par envir$clm.nll(envir) } grad <- function(par, envir) { envir$par <- par envir$clm.nll(envir) envir$clm.grad(envir) } nlminb(rho$par, nll, grad, upper = c(rep(Inf, 4), 2, 2), envir = rho)$par @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Constrained partial proportional odds} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A type of models which are not implemented in full generality in \pkg{ordinal} are the so-called \emph{constrained} partial proportional odds models proposed by \citet{peterson90}. These models impose restrictions on the nominal effects considered in section~\ref{sec:nominal-effects} and are well suited to illustrate the customisable modelling options available in the \pkg{ordinal} package. We consider an example from \citet{peterson90} in which disease status is tabulated by smoking status: <<>>= artery <- data.frame(disease = factor(rep(0:4, 2), ordered = TRUE), smoker = factor(rep(c("no", "yes"), each = 5)), freq = c(334, 99, 117, 159, 30, 350, 307, 345, 481, 67)) addmargins(xtabs(freq ~ smoker + disease, data = artery), margin = 2) @ The overall odds-ratio of smoking is <<>>= fm <- clm(disease ~ smoker, weights = freq, data = artery) exp(fm$beta) @ showing that overall the odds of worse disease rating is twice as high for smokers compared to non-smokers. Allowing for nominal effects we see that the log odds-ratio for smoking clearly changes with disease status, and that it does so in an almost linearly decreasing manor: <<>>= fm.nom <- clm(disease ~ 1, nominal = ~ smoker, weights = freq, data = artery, sign.nominal = "negative") coef(fm.nom)[5:8] @ \citet{peterson90} suggested a model which restricts the log odds-ratios to be linearly decreasing with disease status modelling only the intercept (first threshold) and slope of the log odds-ratios: <<>>= coef(fm.lm <- lm(I(coef(fm.nom)[5:8]) ~ I(0:3))) @ We can implement the log-likelihood of this model as follows. As starting values we combine parameter estimates from \code{fm.nom} and the linear model \code{fm.lm}, and finally optimize the log-likelihood utilizing the \code{fm.nom} model environment: <<>>= nll2 <- function(par, envir) { envir$par <- c(par[1:4], par[5] + par[6] * (0:3)) envir$clm.nll(envir) } start <- unname(c(coef(fm.nom)[1:4], coef(fm.lm))) fit <- nlminb(start, nll2, envir = update(fm.nom, doFit = FALSE)) round(fit$par[5:6], 2) @ Thus the log-odds decrease linearly from 1.02 for the first two disease categories by 0.3 per disease category. %% -- Illustrations ------------------------------------------------------------ %% - Virtually all JSS manuscripts list source code along with the generated %% output. The style files provide dedicated environments for this. %% - In R, the environments {Sinput} and {Soutput} - as produced by Sweave() or %% or knitr using the render_sweave() hook - are used (without the need to %% load Sweave.sty). %% - Equivalently, {CodeInput} and {CodeOutput} can be used. %% - The code input should use "the usual" command prompt in the respective %% software system. %% - For R code, the prompt "R> " should be used with "+ " as the %% continuation prompt. %% - Comments within the code chunks should be avoided - these should be made %% within the regular LaTeX text. %% -- Summary/conclusions/discussion ------------------------------------------- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Conclusions} \label{sec:conclusions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% This paper has described the class of cumulative link models for the analysis of ordinal data and the implementation of such models in the \proglang{R} package \pkg{ordinal}. It is shown how the package supports model building and assessment of CLMs with scale effects, partial proportional odds, structured thresholds, flexible link functions and how models can be costumized to specific needs. A number of examples have been given illustrating analyses of ordinal data using \code{clm} in practice. The significant flexibility of model structures available in \pkg{ordinal} is in one respect a clear advantage but it can also be a challenge when particular model variants turn out to be unidentifiable. Analytical detection of unidentifiable models could prove very useful in the analysis of ordinal data, but it is, unfortunately, a difficult question that remains a topic of future research. In a wider data analysis perspective, cumulative link models have been described as a very rich model class---a class that sits in between, in a sense, the perhaps the two most important model classes in statistics; linear models and logistic regression models. The greater flexibility of CLMs relative to binary logistic regression models facilitates the ability to check assumptions such as the partial proportional odds assumption. A latent variable interpretation connects cumulative link models to linear models in a natural way and also motivates non-linear structures such as scale effects. In addition to nominal effects and the non-linear scale effects, the ordered nature of the thresholds gives rise to computational challenges that we have described here and addressed in the \pkg{ordinal} package. In addition to computational challenges, practical data analysis with CLMs can also be challenging. In our experience a top-down approach in which a ``full'' model is fitted and gradually simplified is often problematic, not only because this easily leads to unidentifiable models but also because there are many different ways in which models can be reduced or expanded. A more pragmatic approach is often preferred; understanding the data through plots, tables, and even linear models can aid in finding a suitable intermediate ordinal starting model. Attempts to identify a ``correct'' model will also often lead to frustrations; the greater the model framework, the greater the risk that there are multiple models which fit the data (almost) equally well. It is well known statistical wisdom that with enough data many goodness of fit tests become sensitive to even minor deviations of little practical relevance. This is particularly true for tests of partial proportional odds; in the author's experience almost all CLMs on real data show some evidence of non-proportional odds for one or more variables but it is not always the case that models with partial or non-proportional odds are the most useful. Such effects complicate the interpretation and often generalize poorly outside the observed data and models assuming proportional odds or including scale effects are often more appropriate. %% -- Optional special unnumbered sections ------------------------------------- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section*{Computational details} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % \begin{leftbar} % If necessary or useful, information about certain computational details % such as version numbers, operating systems, or compilers could be included % in an unnumbered section. Also, auxiliary packages (say, for visualizations, % maps, tables, \dots) that are not cited in the main text can be credited here. % \end{leftbar} The results in this paper were obtained using \proglang{R}~\Sexpr{paste(R.Version()[6:7], collapse = ".")} with \pkg{ordinal}, version~\Sexpr{packageVersion("ordinal")}. \proglang{R} itself and all packages used are available from CRAN at \url{https://CRAN.R-project.org/}. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \section*{Acknowledgments} % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % \begin{leftbar} % % All acknowledgments (note the AE spelling) should be collected in this % unnumbered section before the references. It may contain the usual information % about funding and feedback from colleagues/reviewers/etc. Furthermore, % information such as relative contributions of the authors may be added here % (if any). % \end{leftbar} %% -- Bibliography ------------------------------------------------------------- %% - References need to be provided in a .bib BibTeX database. %% - All references should be made with \cite, \citet, \citep, \citealp etc. %% (and never hard-coded). See the FAQ for details. %% - JSS-specific markup (\proglang, \pkg, \code) should be used in the .bib. %% - Titles in the .bib should be in title case. %% - DOIs should be included where available. \bibliography{clm_article_refs} %% -- Appendix (if any) -------------------------------------------------------- %% - After the bibliography with page break. %% - With proper section titles and _not_ just "Appendix". \newpage \begin{appendix} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{A regularized Newton-Raphson algorithm with step halving} \label{sec:algorithm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The regularized NR algorithm is an iterative method that produce a sequence of estimates $\bm\psi^{(0)}, \ldots, \bm\psi^{(i)}, \ldots$, where parenthesized superscripts denote iterations. From the $i$th estimate, the $(i+1)$'th estimate is given by % \begin{equation*} \bm\psi^{(i+1)} = \bm\psi^{(i)} - c_1 \bm h^{(i)}, \quad \bm h^{(i)} = \tilde{\bm H}(\bm\psi^{(i)}; \bm y)^{-1} \bm g(\bm\psi^{(i)}; \bm y) \end{equation*} where \begin{equation*} \tilde{\bm H}(\bm\psi^{(i)}; \bm y) = \bm H(\bm\psi^{(i)}; \bm y) + c_2 (c_3 + \min(\bm e^{(i)})) \bm I, \end{equation*} % % where % $\bm h^{(i)}$ is the step of the $i$th iteration, $\bm H(\bm\psi^{(i)} ; \bm y)$ and $\bm g(\bm\psi^{(i)}; \bm y)$ are the Hessian and gradient of the negative log-likelihood function with respect to the parameters evaluated at the current estimates; $\bm e^{(i)}$ is a vector of eigenvalues of $\bm H(\bm\psi^{(i)}; \bm y)$, $\bm h^{(i)}$ is the $i$'th step, $c_1$ is a scalar parameter which controls the step halving, and $c_2$, $c_3$ are scalar parameters which control the regularization of the Hessian. Regularization is only enforced when the Hessian is not positive definite, so $c_2 = 1$ when $\min(\bm e^{(i)}) < \tau$ and zero otherwise, were $\tau$ is an appropriate tolerance. The choice of $c_3$ is to some extent arbitrary (though required positive) and the algorithm in \pkg{ordinal} sets $c_3 = 1$. Step-halving is enforced when the full step $\bm h^{(i)}$ causes a decrease in the likelihood function in which case $c_1$ is consecutively halved, $c_1 = \frac{1}{2}, \frac{1}{4}, \frac{1}{8}, \ldots$ until the step $c_1 \bm h^{(i)}$ is small enough to cause an increase in the likelihood or until the maximum allowed number of consecutive step-halvings has been reached. The algorithm in \pkg{ordinal} also deals with a couple of numerical issues that may occur. For example, the likelihood function may be sufficiently flat that the change in log-likelihood is smaller than what can be represented in double precision, and so, while the new parameters may be closer to the true ML estimates and be associated with a smaller gradient, it is not possible to measure progress by the change in log-likelihood. The NR algorithm in \pkg{ordinal} has two convergence criteria: (1) an absolute criterion requesting that $\max | \bm g(\bm\psi^{(i)}; \bm y) | < \tau_1$ and (2) a relative criterion requesting that $\max | \bm h^{(i)} | < \tau_2$ where the default thresholds are $\tau_1 = \tau_2 = 10^{-6}$. Here the first criterion attempts to establish closeness of $\bm\psi^{(i)}$ to the true ML estimates in absolute terms; the second criterion is an estimate of relative closeness of to the true ML estimates. % Both convergence criteria are needed if both small (e.g., $\approx 0.0001$) and large (e.g., $\approx 1000$) parameter estimates are to be determined accurately with an appropriate number of correct decimals as well as significant digits. The NR algorithm in \pkg{ordinal} attempts to satisfy the absolute criterion first and will then only attempt to satisfy the relative criterion if it can take the full un-regularized NR step and then only for a maximum of 5 steps. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Convergence properties and parameter accuracy} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Convergence to a well-defined optimum is achieved when the gradient of the negative log-likelihood function with respect to the parameters is small and the Hessian is positive definite i.e., having only positive eigenvalues away from zero. % Identifiability problems occur when the likelihood function is flat in directions of one or more parameters (or linear functions of the parameters) while well-defined, i.e., pointy in other directions. It may happen that a parameter is exactly unidentifiable and \code{clm} is in some cases (including rank-deficient design matrices) able to detect this and exclude the parameter from the optimization procedure. In other cases the likelihood is almost flat in one or more directions. These cases are not uncommon in practice and it is not possible to reduce the parameter space before optimizing the model. To measure the degree of empirical identifiability \code{clm} reports the condition number of the Hessian which is the ratio of the largest to the smallest eigenvalue. A large condition number of the Hessian does not necessarily mean there is a problem with the model, but it can be. A small condition number of the Hessian, say smaller than about $10^4$ or $10^6$, on the other hand is a good assurance that a well-defined optimum has been reached. A key problem for optimization methods is when to stop iterating: when have the parameters that determine the optimum of the function been found with sufficient accuracy? The \emph{method independent error estimate} \citep{elden04} provides a way to approximate the error in the parameter estimates. Sufficiently close to the optimum the Newton-Raphson step provides this estimate: \begin{equation*} |\hat{\bm\alpha}^{(i)} - \bm\alpha^*| \lesssim \bm h^{(i)}, \quad \bm h^{(i)} = \bm H(\bm\psi^{(i)}; \bm y)^{-1} \bm g(\bm\psi^{(i)}; \bm y) \end{equation*} where $\bm\alpha^*$ is the exact (but unknown) value of the ML estimate, $\hat{\bm\alpha}^{(i)}$ is the ML estimator of $\bm\alpha$ at the $i$'th iteration and $\bm h^{(i)}$ is the full unregularized NR step at the $i$'th iteration. % Since the gradient and Hessian of the negative log-likelihood function with respect to the parameters is already evaluated and part of the model fit at convergence, it is essentially computationally cost-free to approximate the error in the parameter estimates. Based on the error estimate the number of correctly determined decimals and significant digits is determined for each parameter. The assessment of the number of correctly determined decimals and significant digits is only reliable sufficiently close to the optimum and when the NR algorithm converges without regularization and step-halving. In practice we caution against this assessment if the algorithm did not converge successfully. % % \begin{leftbar} % Appendices can be included after the bibliography (with a page break). Each % section within the appendix should have a proper section title (rather than % just \emph{Appendix}). % % For more technical style details, please check out JSS's style FAQ at % \url{https://www.jstatsoft.org/pages/view/style#frequently-asked-questions} % which includes the following topics: % \begin{itemize} % \item Title vs.\ sentence case. % \item Graphics formatting. % \item Naming conventions. % \item Turning JSS manuscripts into \proglang{R} package vignettes. % \item Trouble shooting. % \item Many other potentially helpful details\dots % \end{itemize} % \end{leftbar} % % % \section[Using BibTeX]{Using \textsc{Bib}{\TeX}} \label{app:bibtex} % % \begin{leftbar} % References need to be provided in a \textsc{Bib}{\TeX} file (\code{.bib}). All % references should be made with \verb|\cite|, \verb|\citet|, \verb|\citep|, % \verb|\citealp| etc.\ (and never hard-coded). This commands yield different % formats of author-year citations and allow to include additional details (e.g., % pages, chapters, \dots) in brackets. In case you are not familiar with these % commands see the JSS style FAQ for details. % % Cleaning up \textsc{Bib}{\TeX} files is a somewhat tedious task -- especially % when acquiring the entries automatically from mixed online sources. However, % it is important that informations are complete and presented in a consistent % style to avoid confusions. JSS requires the following format. % \begin{itemize} % \item JSS-specific markup (\verb|\proglang|, \verb|\pkg|, \verb|\code|) should % be used in the references. % \item Titles should be in title case. % \item Journal titles should not be abbreviated and in title case. % \item DOIs should be included where available. % \item Software should be properly cited as well. For \proglang{R} packages % \code{citation("pkgname")} typically provides a good starting point. % \end{itemize} % \end{leftbar} % \end{appendix} %% ----------------------------------------------------------------------------- \end{document} ordinal/vignettes/ordinal.bib0000644000176200001440000002472112431434103016021 0ustar liggesusers@Book{brazzale07, author = {A R Brazzale and A C Davison and N Reid}, title = {Applied Asymptotics---case studies in small-sample statistics} , publisher = {Cambridge University Press}, year = 2007} @Book{pawitan01, author = {Yudi Pawitan}, title = {{In All Likelihood---Statistical Modelling and Inference Using Likelihood}}, publisher = {Oxford University Press}, year = 2001 } @Manual{R11, title = {R: A Language and Environment for Statistical Computing}, author = {{R Development Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2011}, note = {{ISBN} 3-900051-07-0}, url = {http://www.R-project.org/}, } @Article{tutz96, author = {Gerhard Tutz and Wolfgang Hennevogl}, title = {Random effects in ordinal regression models}, journal = {Computational Statistics \& Data Analysis}, year = 1996, volume = 22, pages = {537-557} } @Article{efron78, author = {Bradley Efron and David V Hinkley}, title = {{Assessing the accuracy of the maximum likelihood estimator: Observed versus expected Fisher information}}, journal = {Biometrika}, year = 1978, volume = 65, number = 3, pages = {457-487}} @article{bauer09, author = {Bauer, Daniel}, affiliation = {University of North Carolina Department of Psychology Chapel Hill NC 27599-3270 USA}, title = {A Note on Comparing the Estimates of Models for†Cluster-Correlated or Longitudinal Data with Binary or Ordinal†Outcomes}, journal = {Psychometrika}, publisher = {Springer New York}, issn = {0033-3123}, keyword = {Humanities, Social Sciences and Law}, pages = {97-105}, volume = {74}, issue = {1}, url = {http://dx.doi.org/10.1007/s11336-008-9080-1}, year = {2009} } @article{fielding04, author = {Fielding, Antony}, title = {Scaling for Residual Variance Components of Ordered Category Responses in Generalised Linear Mixed Multilevel Models}, journal = {Quality \& Quantity}, publisher = {Springer Netherlands}, issn = {0033-5177}, keyword = {Humanities, Social Sciences and Law}, pages = {425-433}, volume = {38}, issue = {4}, url = {http://dx.doi.org/10.1023/B:QUQU.0000043118.19835.6c}, year = {2004} } @article{winship84, jstor_articletype = {research-article}, title = {Regression Models with Ordinal Variables}, author = {Winship, Christopher and Mare, Robert D.}, journal = {American Sociological Review}, jstor_issuetitle = {}, volume = {49}, number = {4}, jstor_formatteddate = {Aug., 1984}, pages = {512-525}, url = {http://www.jstor.org/stable/2095465}, ISSN = {00031224}, abstract = {Most discussions of ordinal variables in the sociological literature debate the suitability of linear regression and structural equation methods when some variables are ordinal. Largely ignored in these discussions are methods for ordinal variables that are natural extensions of probit and logit models for dichotomous variables. If ordinal variables are discrete realizations of unmeasured continuous variables, these methods allow one to include ordinal dependent and independent variables into structural equation models in a way that (1) explicitly recognizes their ordinality, (2) avoids arbitrary assumptions about their scale, and (3) allows for analysis of continuous, dichotomous, and ordinal variables within a common statistical framework. These models rely on assumed probability distributions of the continuous variables that underly the observed ordinal variables, but these assumptions are testable. The models can be estimated using a number of commonly used statistical programs. As is illustrated by an empirical example, ordered probit and logit models, like their dichotomous counterparts, take account of the ceiling and floor restrictions on models that include ordinal variables, whereas the linear regression model does not.}, language = {English}, year = {1984}, publisher = {American Sociological Association}, copyright = {Copyright © 1984 American Sociological Association}, } @article{thompson81, jstor_articletype = {research-article}, title = {Composite Link Functions in Generalized Linear Models}, author = {Thompson, R. and Baker, R. J.}, journal = {Journal of the Royal Statistical Society. Series C (Applied Statistics)}, jstor_issuetitle = {}, volume = {30}, number = {2}, jstor_formatteddate = {1981}, pages = {125-131}, url = {http://www.jstor.org/stable/2346381}, ISSN = {00359254}, abstract = {In generalized linear models each observation is linked with a predicted value based on a linear function of some systematic effects. We sometimes require to link each observation with a linear function of more than one predicted value. We embed such models into the generalized linear model framework using composite link functions. The computer program GLIM-3 can be used to fit these models. Illustrative examples are given including a mixed-up contingency table and grouped normal data.}, language = {English}, year = {1981}, publisher = {Blackwell Publishing for the Royal Statistical Society}, copyright = {Copyright © 1981 Royal Statistical Society}, } @article{burridge81, jstor_articletype = {research-article}, title = {A Note on Maximum Likelihood Estimation for Regression Models Using Grouped Data}, author = {Burridge, J.}, journal = {Journal of the Royal Statistical Society. Series B (Methodological)}, jstor_issuetitle = {}, volume = {43}, number = {1}, jstor_formatteddate = {1981}, pages = {41-45}, url = {http://www.jstor.org/stable/2985147}, ISSN = {00359246}, abstract = {The estimation of parameters for a class of regression models using grouped or censored data is considered. It is shown that with a simple reparameterization some commonly used distributions, such as the normal and extreme value, result in a log-likelihood which is concave with respect to the transformed parameters. Apart from its theoretical implications for the existence and uniqueness of maximum likelihood estimates, this result suggests minor changes to some commonly used algorithms for maximum likelihood estimation from grouped data. Two simple examples are given.}, language = {English}, year = {1981}, publisher = {Blackwell Publishing for the Royal Statistical Society}, copyright = {Copyright © 1981 Royal Statistical Society}, } @article{pratt81, jstor_articletype = {research-article}, title = {Concavity of the Log Likelihood}, author = {Pratt, John W.}, journal = {Journal of the American Statistical Association}, jstor_issuetitle = {}, volume = {76}, number = {373}, jstor_formatteddate = {Mar., 1981}, pages = {103-106}, url = {http://www.jstor.org/stable/2287052}, ISSN = {01621459}, abstract = {For a very general regression model with an ordinal dependent variable, the log likelihood is proved concave if the derivative of the underlying response function has concave logarithm. For a binary dependent variable, a weaker condition suffices, namely, that the response function and its complement each have concave logarithm. The normal, logistic, sine, and extreme-value distributions, among others, satisfy the stronger condition, the t (including Cauchy) distributions only the weaker. Some converses and generalizations are also given. The model is that which arises from an ordinary linear regression model with a continuous dependent variable that is partly unobservable, being either grouped into intervals with unknown endpoints, or censored, or, more generally, grouped in some regions, censored in others, and observed exactly elsewhere.}, language = {English}, year = {1981}, publisher = {American Statistical Association}, copyright = {Copyright © 1981 American Statistical Association}, } @Manual{christensen11, title = {Analysis of ordinal data with cumulative link models --- estimation with the \textsf{ordinal} package}, author = {Rune Haubo Bojesen Christensen}, note = {R-package version 2011.09-13}, year = 2011} @Book{agresti10, author = {Alan Agresti}, title = {Analysis of ordinal categorical data}, publisher = {Wiley}, year = 2010, edition = {2nd}} @Book{agresti02, author = {Alan Agresti}, title = {Categorical Data Analysis}, publisher = {Wiley}, year = 2002, edition = {2nd} } @Article{mccullagh80, author = {Peter McCullagh}, title = {Regression Models for Ordinal Data}, journal = {Journal of the Royal Statistical Society, Series B}, year = 1980, volume = 42, pages = {109-142} } @Article{randall89, author = {J.H. Randall}, title = {The Analysis of Sensory Data by Generalised Linear Model}, journal = {Biometrical journal}, year = 1989, volume = 7, pages = {781-793} } @Book{fahrmeir01, author = {Ludwig Fahrmeir and Gerhard Tutz}, title = {Multivariate Statistical Modelling Based on Generalized Linear Models}, publisher = {Springer-Verlag New York, Inc.}, year = 2001, series = {Springer series in statistics}, edition = {2nd} } @Book{greene10, author = {William H Greene and David A Hensher}, title = {Modeling Ordered Choices: A Primer}, publisher = {Cambridge University Press}, year = 2010} @Book{mccullagh89, author = {Peter McCullagh and John Nelder}, title = {Generalized Linear Models}, publisher = {Chapman \& Hall/CRC}, year = 1989, edition = {Second} } @Book{collett02, author = {David Collett}, title = {Modelling binary data}, publisher = {London: Chapman \& Hall/CRC}, year = 2002, edition = {2nd} } ordinal/vignettes/clm_article_refs.bib0000644000176200001440000003776613654745104017721 0ustar liggesusers@Misc{ordinal-pkg, title = {\pkg{ordinal}---Regression Models for Ordinal Data }, author = {R. H. B. Christensen}, year = {2019}, note = {\proglang{R} package version 2019.12-10}, url = {http://www.cran.r-project.org/package=ordinal/}, } @Manual{emmeans, title = {\pkg{emmeans}: Estimated Marginal Means, aka Least-Squares Means}, author = {Russell Lenth}, year = {2020}, note = {R package version 1.4.6}, url = {https://CRAN.R-project.org/package=emmeans}, } @Manual{margins, title = {\pkg{margins}: Marginal Effects for Model Objects}, author = {Thomas J. Leeper}, year = {2018}, note = {R package version 0.3.23}, } @Article{ggeffects, title = {\pkg{ggeffects}: Tidy Data Frames of Marginal Effects from Regression Models.}, volume = {3}, doi = {10.21105/joss.00772}, number = {26}, journal = {Journal of Open Source Software}, author = {Daniel Lüdecke}, year = {2018}, pages = {772}, } @Article{effects1, title = {Visualizing Fit and Lack of Fit in Complex Regression Models with Predictor Effect Plots and Partial Residuals}, author = {John Fox and Sanford Weisberg}, journal = {Journal of Statistical Software}, year = {2018}, volume = {87}, number = {9}, pages = {1--27}, doi = {10.18637/jss.v087.i09}, url = {https://www.jstatsoft.org/v087/i09}, } @Article{effects2, title = {Effect Displays in \proglang{R} for Multinomial and Proportional-Odds Logit Models: Extensions to the \pkg{effects} Package}, author = {John Fox and Jangman Hong}, journal = {Journal of Statistical Software}, year = {2009}, volume = {32}, number = {1}, pages = {1--24}, url = {http://www.jstatsoft.org/v32/i01/}, } @Manual{generalhoslem, title = {\pkg{generalhoslem}: Goodness of Fit Tests for Logistic Regression Models}, author = {Matthew Jay}, year = {2019}, note = {R package version 1.3.4}, url = {https://CRAN.R-project.org/package=generalhoslem}, } @article{ananth97, author = {Ananth, C V and Kleinbaum, D G}, title = "{Regression Models for Ordinal Responses: A Review of Methods and Applications.}", journal = {International Journal of Epidemiology}, volume = {26}, number = {6}, pages = {1323-1333}, year = {1997}, month = {12}, issn = {0300-5771}, doi = {10.1093/ije/26.6.1323}, url = {https://doi.org/10.1093/ije/26.6.1323}, eprint = {https://academic.oup.com/ije/article-pdf/26/6/1323/18477637/261323.pdf}, } @Article{ordinalgmifs, title = {\pkg{ordinalgmifs}: An \proglang{R} Package for Ordinal Regression in High-dimensional Data Settings}, author = {Kellie J. Archer and Jiayi Hou and Qing Zhou and Kyle Ferber and John G. Layne and Amanda Elswick Gentry}, journal = {Cancer Informatics}, year = {2014}, volume = {13}, pages = {187-195}, url = {http://www.la-press.com/article.php?article_id=4569}, doi = {10.4137/CIN.S20806} } @Manual{oglmx, title = {\pkg{oglmx}: Estimation of Ordered Generalized Linear Models}, author = {Nathan Carroll}, year = {2018}, note = {R package version 3.0.0.0}, url = {https://CRAN.R-project.org/package=oglmx}, } @Article{mvord, title = {\pkg{mvord}: An \proglang{R} Package for Fitting Multivariate Ordinal Regression Models}, author = {Rainer Hirk and Kurt Hornik and Laura Vana}, journal = {Journal of Statistical Software}, year = {2020}, volume = {93}, number = {4}, pages = {1--41}, doi = {10.18637/jss.v093.i04}, } @Manual{CUB, title = {\pkg{CUB}: A Class of Mixture Models for Ordinal Data}, author = {Maria Iannario and Domenico Piccolo and Rosaria Simone}, year = {2020}, note = {R package version 1.1.4}, url = {https://CRAN.R-project.org/package=CUB}, } @Article{MCMCpack, title = {\pkg{MCMCpack}: Markov Chain Monte Carlo in \proglang{R}}, author = {Andrew D. Martin and Kevin M. Quinn and Jong Hee Park}, journal = {Journal of Statistical Software}, year = {2011}, volume = {42}, number = {9}, pages = {22}, url = {http://www.jstatsoft.org/v42/i09/}, doi = {10.18637/jss.v042.i09}, } @Article{decarlo98, author = {Lawrence T DeCarlo}, title = {{Signal Detection Theory and Generalized Linear Models}}, journal = {Psychological Methods}, year = 1998, volume = 3, number = 2, doi = {10.1037/1082-989X.3.2.186}, pages = {185-205}} @Article{christensen11, author = {Rune Haubo Bojesen Christensen and Graham Cleaver and Per Bruun Brockhoff}, title = {{Statistical and Thurstonian Models for the A-not A Protocol with and without Sureness}}, journal = {Food Quality and Preference}, year = 2011, pages = {542-549}, volume = {22}, doi = {10.1016/j.foodqual.2011.03.003}} @Book{macmillan05, author = {Neil A Macmillan and C Douglas Creelman}, title = {Detection Theory, A User's Guide}, publisher = {Lawrence Elbaum Associates, Publishers}, year = 2005, edition = {2nd}, ISBN = {978-0805842319} } @article{kuznetsova17, author = {Alexandra Kuznetsova and Per Brockhoff and Rune Christensen}, title = {\pkg{lmerTest} Package: Tests in Linear Mixed Effects Models}, journal = {Journal of Statistical Software, Articles}, volume = {82}, number = {13}, year = {2017}, keywords = {denominator degree of freedom, Satterthwaite's approximation, ANOVA, R, linear mixed effects models, lme4}, abstract = {One of the frequent questions by users of the mixed model function lmer of the lme4 package has been: How can I get p values for the F and t tests for objects returned by lmer? The lmerTest package extends the 'lmerMod' class of the lme4 package, by overloading the anova and summary functions by providing p values for tests for fixed effects. We have implemented the Satterthwaite's method for approximating degrees of freedom for the t and F tests. We have also implemented the construction of Type I - III ANOVA tables. Furthermore, one may also obtain the summary as well as the anova table using the Kenward-Roger approximation for denominator degrees of freedom (based on the KRmodcomp function from the pbkrtest package). Some other convenient mixed model analysis tools such as a step method, that performs backward elimination of nonsignificant effects - both random and fixed, calculation of population means and multiple comparison tests together with plot facilities are provided by the package as well.}, issn = {1548-7660}, pages = {1--26}, doi = {10.18637/jss.v082.i13}, url = {https://www.jstatsoft.org/v082/i13} } @Article{cox95, author = {Christopher Cox}, title = {Location-Scale Cumulative Odds Models for Ordinal Data: A Generalized Non-Linear Model Approach}, journal = {Statistics in Medicine}, year = 1995, volume = 14, doi = {10.1002/sim.4780141105}, pages = {1191-1203}, } @Book{elden04, author = {Lars Eld\'en and Linde Wittmeyer-Koch and Hans Bruun Nielsen}, title = {Introduction to Numerical Computation --- Analysis and \proglang{MATLAB} Illustrations}, publisher = {Studentlitteratur}, ISBN = {978-9144037271}, year = 2004} @Article{farewell77, author = {Vernon T Farewell and R L Prentice}, title = {A Study of Distributional Shape in Life Testing}, journal = {{Technometrics}}, year = 1977, volume = 19, doi = {10.2307/1268257}, pages = {69-77}} @Article{genter85, author = {Frederic C Genter and Vernon T Farewell}, title = {Goodness-of-Link Testing in Ordinal Regression Models}, journal = {{The Canadian Journal of Statistics}}, year = 1985, volume = 13, number = 1, doi = {10.2307/3315165}, pages = {37-44}, } @Article{aranda-ordaz83, author = {Francisco J Aranda-Ordaz}, title = {An Extension of the Proportional-Hazards Model for Grouped Data}, journal = {Biometrics}, year = 1983, volume = 39, doi = {10.2307/2530811}, pages = {109-117}} @Article{peterson90, author = {Bercedis Peterson and Frank E {Harrell Jr.}}, title = {Partial Proportional Odds Models for Ordinal Response Variables}, journal = {Applied Statistics}, year = 1990, volume = 39, doi = {10.2307/2347760}, pages = {205-217} } @Article{peterson92, author = {Bercedis Peterson and Frank E {Harrell Jr.}}, title = {Proportional Odds Model}, journal = {Biometrics}, year = 1992, month = {March}, note = {Letters to the Editor} } @Book{brazzale07, author = {A R Brazzale and A C Davison and N Reid}, title = {Applied Asymptotics---Case Studies in Small-Sample Statistics}, ISBN = {9780521847032}, publisher = {Cambridge University Press}, year = 2007} @Book{pawitan01, author = {Yudi Pawitan}, title = {{In All Likelihood---Statistical Modelling and Inference Using Likelihood}}, publisher = {Oxford University Press}, ISBN = {978-0198507659}, year = 2001 } @Article{efron78, author = {Bradley Efron and David V Hinkley}, title = {{Assessing the Accuracy of the Maximum Likelihood Estimator: Observed versus Expected Fisher Information}}, journal = {Biometrika}, year = 1978, volume = 65, number = 3, doi = {10.1093/biomet/65.3.457}, pages = {457-487}, } @article{burridge81, title = {A Note on Maximum Likelihood Estimation for Regression Models Using Grouped Data}, author = {Burridge, J.}, journal = {Journal of the Royal Statistical Society B}, volume = {43}, number = {1}, pages = {41-45}, ISSN = {00359246}, language = {English}, year = {1981}, publisher = {Blackwell Publishing for the Royal Statistical Society}, } @article{pratt81, title = {Concavity of the Log Likelihood}, author = {Pratt, John W.}, journal = {Journal of the American Statistical Association}, volume = {76}, number = {373}, pages = {103-106}, ISSN = {01621459}, language = {English}, year = {1981}, doi = {10.2307/2287052}, } @Book{agresti10, author = {Alan Agresti}, title = {Analysis of Ordinal Categorical Data}, publisher = {John Wiley \& Sons}, year = 2010, edition = {2nd}, doi = {10.1002/9780470594001} } @Book{agresti02, author = {Alan Agresti}, title = {Categorical Data Analysis}, publisher = {John Wiley \& Sons}, year = 2002, edition = {3rd}, ISBN = {978-0470463635}, } @Article{mccullagh80, author = {Peter McCullagh}, title = {Regression Models for Ordinal Data}, journal = {Journal of the Royal Statistical Society B}, year = 1980, volume = 42, pages = {109-142} } @Article{randall89, author = {J.H. Randall}, title = {The Analysis of Sensory Data by Generalised Linear Model}, journal = {Biometrical journal}, year = 1989, volume = 7, pages = {781-793}, doi = {10.1002/bimj.4710310703}, } @phdthesis{mythesis, title = "Sensometrics: Thurstonian and Statistical Models", author = "Christensen, Rune Haubo Bojesen", year = "2012", publisher = "Technical University of Denmark (DTU)", school = "Technical University of Denmark (DTU)", url = "http://orbit.dtu.dk/files/12270008/phd271_Rune_Haubo_net.pdf" } @Manual{SAStype, title = {The Four Types of Estimable Functions -- \proglang{SAS/STAT} \textregistered 9.22 User's Guide}, author = {\proglang{SAS} Institute Inc.}, organization = {\proglang{SAS} Institute Inc.}, address = {Cary, NC}, year = {2008}, url = {https://support.sas.com/documentation/cdl/en/statugestimable/61763/PDF/default/statugestimable.pdf}, } @Manual{SAS, title = {\proglang{SAS/STAT} \textregistered 9.22 User's Guide}, author = {\proglang{SAS} Institute Inc.}, organization = {\proglang{SAS} Institute Inc.}, address = {Cary, NC}, year = {2010}, url = {https://support.sas.com/documentation/}, } @Manual{ucminf, title = {\pkg{ucminf}: General-Purpose Unconstrained Non-Linear Optimization}, author = {Hans Bruun Nielsen and Stig Bousgaard Mortensen}, year = {2016}, note = {\proglang{R} package version 1.1-4}, url = {https://CRAN.R-project.org/package=ucminf}, } @Book{fahrmeir01, author = {Ludwig Fahrmeir and Gerhard Tutz}, title = {Multivariate Statistical Modelling Based on Generalized Linear Models}, publisher = {Springer-Verlag}, year = 2001, series = {Springer series in statistics}, edition = {2nd} } @Book{greene10, author = {William H Greene and David A Hensher}, title = {Modeling Ordered Choices: A Primer}, publisher = {Cambridge University Press}, year = 2010} @Book{mccullagh89, author = {Peter McCullagh and John A. Nelder}, title = {Generalized Linear Models}, edition = {2nd}, year = {1989}, publisher = {Chapman \& Hall}, address = {London}, doi = {10.1007/978-1-4899-3242-6}, } @Manual{Stata, title = {\proglang{Stata} 15 Base Reference Manual}, author = {{StataCorp}}, publisher = "\proglang{Stata} Press", address = "College Station, TX", year = {2017}, url = {https://www.stata.com/}, } @article{oglm, author = "Williams, R.", title = "Fitting Heterogeneous Choice Models with \pkg{oglm}", journal = "Stata Journal", publisher = "\proglang{Stata} Press", address = "College Station, TX", volume = "10", number = "4", year = "2010", pages = "540-567(28)", url = "http://www.stata-journal.com/article.html?article=st0208" } @Article{gllamm, author="Rabe-Hesketh, Sophia and Skrondal, Anders and Pickles, Andrew", title="Generalized Multilevel Structural Equation Modeling", journal="Psychometrika", year="2004", month="Jun", day="01", volume="69", number="2", pages="167--190", issn="1860-0980", doi="10.1007/BF02295939", url="https://doi.org/10.1007/BF02295939" } @Manual{SPSS, title = {\proglang{IBM SPSS} Statistics for Windows, Version 25.0}, author = {{IBM Corp.}}, organization = {IBM Corp.}, address = {Armonk, NY}, year = {2017}, } @manual{Matlab, author = {\proglang{Matlab}}, address = {Natick, Massachusetts}, organization = {The Mathworks, Inc.}, title = {{\proglang{Matlab} version 9.8 (R2020a)}}, year = {2020} } @phdthesis{mord, author = {Fabian Pedregosa-Izquierdo}, title = {Feature Extraction and Supervised Learning on fMRI: From Practice to Theory}, school = {Université Pierre et Marie Curie}, year = 2015, address = {Paris VI}, url = {https://pythonhosted.org/mord/} } @Manual{R, title = {\proglang{R}: {A} Language and Environment for Statistical Computing}, author = {{\proglang{R} Core Team}}, organization = {\proglang{R} Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2020}, url = {https://www.R-project.org/}, } @Article{brms, title = {\pkg{brms}: An \proglang{R} Package for {Bayesian} Multilevel Models Using \pkg{Stan}}, author = {Paul-Christian Bürkner}, journal = {Journal of Statistical Software}, year = {2017}, volume = {80}, number = {1}, pages = {1--28}, doi = {10.18637/jss.v080.i01}, encoding = {UTF-8}, } @Manual{rms, title = {\pkg{rms}: Regression Modeling Strategies}, author = {Frank E {Harrell Jr}}, year = {2018}, note = {\proglang{R} package version 5.1-2}, url = {https://CRAN.R-project.org/package=rms}, } @Book{MASS, author = {William N. Venables and Brian D. Ripley}, title = {Modern Applied Statistics with \proglang{S}}, edition = {4th}, year = {2002}, pages = {495}, publisher = {Springer-Verlag}, address = {New York}, doi = {10.1007/978-0-387-21706-2}, } @Article{VGAM, author = {Thomas W. Yee}, title = {The \pkg{VGAM} Package for Categorical Data Analysis}, journal = {Journal of Statistical Software}, year = {2010}, volume = {32}, number = {10}, pages = {1--34}, doi = {10.18637/jss.v032.i10}, } @Article{Zeileis+Kleiber+Jackman:2008, author = {Achim Zeileis and Christian Kleiber and Simon Jackman}, title = {Regression Models for Count Data in \proglang{R}}, journal = {Journal of Statistical Software}, year = {2008}, volume = {27}, number = {8}, pages = {1--25}, doi = {10.18637/jss.v027.i08}, } ordinal/vignettes/clmm2_tutorial.Rnw0000644000176200001440000004375212431104052017342 0ustar liggesusers\documentclass[a4paper]{article} \usepackage{amsmath}%the AMS math extension of LaTeX. \usepackage{amssymb}%the extended AMS math symbols. %% \usepackage{amsthm} \usepackage{bm}%Use 'bm.sty' to get `bold math' symbols \usepackage{natbib} \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{Sweave} \usepackage{url} \usepackage{float}%Use `float.sty' \usepackage[left=3.5cm,right=3.5cm]{geometry} \usepackage{algorithmic} \usepackage[amsmath,thmmarks,standard,thref]{ntheorem} %%\VignetteIndexEntry{clmm2 tutorial} %%\VignetteDepends{ordinal, xtable} \title{A Tutorial on fitting Cumulative Link Mixed Models with \texttt{clmm2} from the \textsf{ordinal} Package} \author{Rune Haubo B Christensen} %% \numberwithin{equation}{section} \setlength{\parskip}{2mm}%.8\baselineskip} \setlength{\parindent}{0in} %% \DefineVerbatimEnvironment{Sinput}{Verbatim}%{} %% {fontshape=sl, xleftmargin=1em} %% \DefineVerbatimEnvironment{Soutput}{Verbatim}%{} %% {xleftmargin=1em} %% \DefineVerbatimEnvironment{Scode}{Verbatim}%{} %% {fontshape=sl, xleftmargin=1em} \fvset{listparameters={\setlength{\topsep}{0pt}}} %% \fvset{listparameters={\setlength{\botsep}{0pt}}} \renewenvironment{Schunk}{\vspace{-1mm}}{\vspace{-1mm}} %RE-DEFINE marginpar \setlength{\marginparwidth}{1in} \let\oldmarginpar\marginpar \renewcommand\marginpar[1]{\oldmarginpar[\-\raggedleft\tiny #1]% {\tiny #1}} %uncomment to _HIDE_MARGINPAR_: %\renewcommand\marginpar[1]{} \newcommand{\var}{\textup{var}} \newcommand{\I}{\mathcal{I}} \newcommand{\bta}{\bm \theta} \newcommand{\ta}{\theta} \newcommand{\tah}{\hat \theta} \newcommand{\di}{~\textup{d}} \newcommand{\td}{\textup{d}} \newcommand{\Si}{\Sigma} \newcommand{\si}{\sigma} \newcommand{\bpi}{\bm \pi} \newcommand{\bmeta}{\bm \eta} \newcommand{\tdots}{\hspace{10mm} \texttt{....}} \newcommand{\FL}[1]{\fvset{firstline= #1}} \newcommand{\LL}[1]{\fvset{lastline= #1}} \newcommand{\s}{\square} \newcommand{\bs}{\blacksquare} % figurer bagerst i artikel %% \usepackage[tablesfirst, nolists]{endfloat} %% \renewcommand{\efloatseparator}{\vspace{.5cm}} \theoremstyle{plain} %% {break} \theoremseparator{:} \theoremsymbol{{\tiny $\square$}} %%\theoremstyle{plain} \theorembodyfont{\small} \theoremindent5mm \renewtheorem{example}{Example} %% \newtheoremstyle{example}{\topsep}{\topsep}% %% {}% Body font %% {}% Indent amount (empty = no indent, \parindent = para indent) %% {\bfseries}% Thm head font %% {}% Punctuation after thm head %% {\newline}% Space after thm head (\newline = linebreak) %% {\thmname{#1}\thmnumber{ #2}\thmnote{ #3}}% Thm head spec %% %% \theoremstyle{example} %% %% \newtheorem{example}{Example}[subsection] %% \newtheorem{example}{Example}[section] \usepackage{lineno} % \linenumbers \newcommand*\patchAmsMathEnvironmentForLineno[1]{% \expandafter\let\csname old#1\expandafter\endcsname\csname #1\endcsname \expandafter\let\csname oldend#1\expandafter\endcsname\csname end#1\endcsname \renewenvironment{#1}% {\linenomath\csname old#1\endcsname}% {\csname oldend#1\endcsname\endlinenomath}}% \newcommand*\patchBothAmsMathEnvironmentsForLineno[1]{% \patchAmsMathEnvironmentForLineno{#1}% \patchAmsMathEnvironmentForLineno{#1*}}% \AtBeginDocument{% \patchBothAmsMathEnvironmentsForLineno{equation}% \patchBothAmsMathEnvironmentsForLineno{align}% \patchBothAmsMathEnvironmentsForLineno{flalign}% \patchBothAmsMathEnvironmentsForLineno{alignat}% \patchBothAmsMathEnvironmentsForLineno{gather}% \patchBothAmsMathEnvironmentsForLineno{multline}% } \begin{document} \bibliographystyle{chicago} \maketitle \begin{abstract} It is shown by example how a cumulative link mixed model is fitted with the \texttt{clmm2} function in package \textsf{ordinal}. Model interpretation and inference is briefly discussed. A tutorial for the more recent \texttt{clmm} function is work in progress. \end{abstract} %% \newpage %% \tableofcontents %% \newpage \SweaveOpts{echo=TRUE, results=verb, width=4.5, height=4.5} \SweaveOpts{prefix.string=figs} \fvset{listparameters={\setlength{\topsep}{0pt}}, gobble=0, fontsize=\small} %% \fvset{gobble=0, fontsize=\small} \setkeys{Gin}{width=.49\textwidth} <>= ## Load common packages, functions and set settings: library(ordinal) library(xtable) ## RUN <- FALSE #redo computations and write .RData files ## Change options: op <- options() ## To be able to reset settings options("digits" = 7) options(help_type = "html") ## options("width" = 75) options("SweaveHooks" = list(fig=function() par(mar=c(4,4,.5,0)+.5))) options(continue=" ") @ We will consider the data on the bitterness of wine from \citet{randall89} presented in Table~\ref{tab:winedata} and available as the object \texttt{wine} in package \textsf{ordinal}. The data were also analyzed with mixed effects models by \citet{tutz96}. The following gives an impression of the wine data object: <<>>= data(wine) head(wine) str(wine) @ The data represent a factorial experiment on factors determining the bitterness of wine with 1 = ``least bitter'' and 5 = ``most bitter''. Two treatment factors (temperature and contact) each have two levels. Temperature and contact between juice and skins can be controlled when crushing grapes during wine production. Nine judges each assessed wine from two bottles from each of the four treatment conditions, hence there are 72 observations in all. For more information see the manual entry for the wine data: \texttt{help(wine)}. \begin{table} \centering \caption{Ratings of the bitterness of some white wines. Data are adopted from \citet{randall89}.} \label{tab:winedata} \begin{tabular}{lllrrrrrrrrr} \hline & & & \multicolumn{9}{c}{Judge} \\ \cline{4-12} <>= data(wine) temp.contact.bottle <- with(wine, temp:contact:bottle)[drop=TRUE] tab <- xtabs(as.numeric(rating) ~ temp.contact.bottle + judge, data=wine) class(tab) <- "matrix" attr(tab, "call") <- NULL mat <- cbind(rep(c("cold", "warm"), each = 4), rep(rep(c("no", "yes"), each=2), 2), 1:8, tab) colnames(mat) <- c("Temperature", "Contact", "Bottle", 1:9) xtab <- xtable(mat) print(xtab, only.contents=TRUE, include.rownames=FALSE, sanitize.text.function = function(x) x) @ \end{tabular} \end{table} We will fit the following cumulative link mixed model to the wine data: \begin{equation} \label{eq:mixedModel} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j - \beta_1 (\mathtt{temp}_i) - \beta_2(\mathtt{contact}_i) - u(\mathtt{judge}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation} This is a model for the cumulative probability of the $i$th rating falling in the $j$th category or below, where $i$ index all observations and $j = 1, \ldots, J$ index the response categories ($J = 5$). $\{\theta_j\}$ are known as threshold parameters or cut-points. We take the judge effects to be random, and assume that the judge effects are IID normal: $u(\mathtt{judge}_i) \sim N(0, \sigma_u^2)$. We fit this model with the \texttt{clmm2} function in package \textsf{ordinal}. Here we save the fitted \texttt{clmm2} model in the object \texttt{fm1} (short for \texttt{f}itted \texttt{m}odel \texttt{1}) and \texttt{print} the model by simply typing its name: <<>>= fm1 <- clmm2(rating ~ temp + contact, random=judge, data=wine) fm1 @ Maximum likelihood estimates of the parameters are provided using the Laplace approximation to compute the likelihood function. A more accurate approximation is provided by the adaptive Gauss-Hermite quadrature method. Here we use 10 quadrature nodes and use the \texttt{summary} method to display additional information: <<>>= fm2 <- clmm2(rating ~ temp + contact, random=judge, data=wine, Hess=TRUE, nAGQ=10) summary(fm2) @ The small changes in the parameter estimates show that the Laplace approximation was in fact rather accurate in this case. Observe that we set the option \texttt{Hess = TRUE}. This is needed if we want to use the \texttt{summary} method since the Hessian is needed to compute standard errors of the model coefficients. The results contain the maximum likelihood estimates of the parameters: \begin{equation} \label{eq:parameters} \hat\beta_1 = 3.06, ~~\hat\beta_2 = 1.83, ~~\hat\sigma_u^2 = 1.29 = 1.13^2, ~~\{\hat\theta_j\} = [-1.62,~ 1.51,~ 4.23,~ 6.09]. \end{equation} Observe the number under \texttt{Std.Dev} for the random effect is \textbf{not} the standard error of the random effects variance, \texttt{Var}. Rather, it is the standard deviation of the random effects, i.e., it is the square root of the variance. In our example $\sqrt{1.29} \simeq 1.13$. The condition number of the Hessian measures the empirical identifiability of the model. High numbers, say larger than $10^4$ or $10^6$ indicate that the model is ill defined. This would indicate that the model can be simplified, that possibly some parameters are not identifiable, and that optimization of the model can be difficult. In this case the condition number of the Hessian does not indicate a problem with the model. The coefficients for \texttt{temp} and \texttt{contact} are positive indicating that higher temperature and more contact increase the bitterness of wine, i.e., rating in higher categories is more likely. The odds ratio of the event $Y \geq j$ is $\exp(\beta_{\textup{treatment}})$, thus the odds ratio of bitterness being rated in category $j$ or above at warm relative to cold temperatures is <<>>= exp(coef(fm2)[5]) @ The $p$-values for the location coefficients provided by the \texttt{summary} method are based on the so-called Wald statistic. More accurate test are provided by likelihood ratio tests. These can be obtained with the \texttt{anova} method, for example, the likelihood ratio test of \texttt{contact} is <<>>= fm3 <- clmm2(rating ~ temp, random=judge, data=wine, nAGQ=10) anova(fm3, fm2) @ which in this case is slightly more significant. The Wald test is not reliable for variance parameters, so the \texttt{summary} method does not provide a test of $\sigma_u$, but a likelihood ratio test can be obtained with \texttt{anova}: <<>>= fm4 <- clm2(rating ~ temp + contact, data=wine) anova(fm4, fm2) @ showing that the judge term is significant. Since this test of $\sigma_u = 0$ is on the boundary of the parameter space (a variance cannot be negative), it is often argued that a more correct $p$-value is obtained by halving the $p$-value produced by the conventional likelihood ratio test. In this case halving the $p$-value is of little relevance. A profile likelihood confidence interval of $\sigma_u$ is obtained with: <<>>= pr2 <- profile(fm2, range=c(.1, 4), nSteps=30, trace=0) confint(pr2) @ The profile likelihood can also be plotted: <>= plot(pr2) @ The result is shown in Fig.~\ref{fig:PRsigma_u} where horizontal lines indicate 95\% and 99\% confindence bounds. Clearly the profile likelihood function is asymmetric and symmetric confidence intervals would be inaccurate. \begin{figure} \centering <>= <> @ \caption{Profile likelihood of $\sigma_u$.} \label{fig:PRsigma_u} \end{figure} The judge effects, $u(\mathtt{judge}_i)$ are not parameters, so they cannot be \emph{estimated} in the conventional sense, but a ``best guess'' is provided by the \emph{conditional modes}. Similarly the \emph{conditional variance} provides an uncertainty measure of the conditional modes. These quantities are included in \texttt{clmm2} objects as the \texttt{ranef} and \texttt{condVar} components. The following code generates the plot in Fig.~\ref{fig:ranef} illustrating judge effects via conditional modes with 95\% confidence intervals based on the conditional variance: <>= ci <- fm2$ranef + qnorm(0.975) * sqrt(fm2$condVar) %o% c(-1, 1) ord.re <- order(fm2$ranef) ci <- ci[order(fm2$ranef),] plot(1:9, fm2$ranef[ord.re], axes=FALSE, ylim=range(ci), xlab="Judge", ylab="Judge effect") axis(1, at=1:9, labels = ord.re) axis(2) for(i in 1:9) segments(i, ci[i,1], i, ci[i, 2]) abline(h = 0, lty=2) @ The seventh judge gave the lowest ratings of bitterness while the first judge gave the highest ratings of bitterness. The significant judge effect indicate that judges perceived the bitterness of the wines differently. Two natural interpretations are that either a bitterness of, say, 3 means different things to different judges, or the judges actually perceived the bitterness of the wines differently. Possibly both effects play their part. \begin{figure} \centering <>= <> @ \caption{Judge effects given by conditional modes with 95\% confidence intervals based on the conditional variance.} \label{fig:ranef} \end{figure} The fitted or predicted probabilites can be obtained with the judge effects at their conditional modes or for an average judge ($u = 0$). The former are available with \texttt{fitted(fm)} or with \texttt{predict(fm)}, where \texttt{fm} is a \texttt{f}itted \texttt{m}odel object. In our example we get <<>>= head(cbind(wine, fitted(fm2))) @ Predicted probabilities for an average judge can be obtained by including the data used to fit the model in the \texttt{newdata} argument of \texttt{predict}: <<>>= head(cbind(wine, pred=predict(fm2, newdata=wine))) @ Model~\eqref{eq:mixedModel} says that for an average judge at cold temperature the cumulative probability of a bitterness rating in category $j$ or below is \begin{equation*} P(Y_i \leq j) = \textup{logit}^{-1} [ \theta_j - \beta_2(\mathtt{contact}_i) ] \end{equation*} since $u$ is set to zero and $\beta_1(\mathtt{temp}_i) = 0$ at cold conditions. Further, $\textup{logit}^{-1}(\eta) = 1 / [1 + \exp(\eta)]$ is the cumulative distribution function of the logistic distribution available as the \texttt{plogis} function. The (non-cumulative) probability of a bitterness rating in category $j$ is $\pi_j = P(Y_i \leq j) - P(Y_i \leq j-1)$, for instance the probability of a bitterness rating in the third category at these conditions can be computed as <<>>= plogis(fm2$Theta[3] - fm2$beta[2]) - plogis(fm2$Theta[2] - fm2$beta[2]) @ This corresponds to the third entry of \texttt{predict(fm2, newdata=wine)} given above. Judge effects are random and normally distributed, so an average judge effect is 0. Extreme judge effects, say 5th and 95th percentile judge effects are given by <<>>= qnorm(0.95) * c(-1, 1) * fm2$stDev @ At the baseline experimental conditions (cold and no contact) the probabilites of bitterness ratings in the five categories for a 5th percentile judge is <<>>= pred <- function(eta, theta, cat = 1:(length(theta)+1), inv.link = plogis) { Theta <- c(-1e3, theta, 1e3) sapply(cat, function(j) inv.link(Theta[j+1] - eta) - inv.link(Theta[j] - eta) ) } pred(qnorm(0.05) * fm2$stDev, fm2$Theta) @ We can compute these probabilities for average, 5th and 95th percentile judges at the four experimental conditions. The following code plots these probabilities and the results are shown in Fig.~\ref{fig:ratingProb}. <>= mat <- expand.grid(judge = qnorm(0.95) * c(-1, 0, 1) * fm2$stDev, contact = c(0, fm2$beta[2]), temp = c(0, fm2$beta[1])) pred.mat <- pred(eta=rowSums(mat), theta=fm2$Theta) lab <- paste("contact=", rep(levels(wine$contact), 2), ", ", "temp=", rep(levels(wine$temp), each=2), sep="") par(mfrow=c(2, 2)) for(k in c(1, 4, 7, 10)) { plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") } @ \begin{figure} \centering <>= k <- 1 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 4 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 7 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 10 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ \caption{Rating probabilities for average and extreme judges at different experimental conditions.} \label{fig:ratingProb} \end{figure} At constant experimental conditions the odds ratio for a bitterness rating in category $j$ or above for a 95th percentile judge relative to a 5th percentile judge is <<>>= exp(2*qnorm(0.95) * fm2$stDev) @ The differences between judges can also be expressed in terms of the interquartile range: the odds ratio for a bitterness rating in category $j$ or above for a third quartile judge relative to a first quartile judge is <<>>= exp(2*qnorm(0.75) * fm2$stDev) @ \newpage \bibliography{ordinal} %% \newpage \end{document} <>= @ ordinal/vignettes/static_figs/0000755000176200001440000000000014533322576016223 5ustar liggesusersordinal/vignettes/static_figs/fig-figEqui.pdf0000755000176200001440000003467013633002525021054 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20120625120914) /ModDate (D:20120625120914) /Title (R Graphics Output) /Producer (R 2.15.0) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 10888 /Filter /FlateDecode >> stream xK-KRWa{%O ɖqd!`}x{؃yZ2*#222?7'Siܟ6o??շu]?ſo㿿VV>/hRI ~DmhvPD"U^xE)ln$s~Ԋ/R-g Z%u^we>EGG4([?/zLQcMzQ&~WGS<^":B?k^Dޗh(l! GSE>}sD52_U4:ߌvAf~MPVISW$c"h(5;W 4D{ۛcyDG :lo( ׳{zhlbLק*{}YO| AZ'o-'8e`SW)(&t=j "Xc\!5N^sԵFk:%ZlָtT.qɇϏ#,q4ָdw &q&[Sf{ ^.kܰqk7~'H޴-uQ\ X?~ZB,tDq,FOhBkxY57ͦ9uvgS~(y+j2vk5gq}wlrލWx͟u7=,az6 w5^'=\}.\ς>ƫoayr`AAaziz,{\y4 k .^_=Fm3yr_"^.b]xk\EsG5+hUX#^(l E}U2vW0kuOx4"WX\]vq<&=u=]ZD1╱~ǹs5eqwg}hx b4#O8d1:Y[uPYkk|{p٣{X}u†5{{xxU n_qV[п+b ָYx1u QxX8ZGHPXc|5*Q O8+75,\ zL|'촹 Mn{h S:(qOYGPXxsls؎[7=DDPXvĹ NQ"(m{/HweX h☏y0'1& k 5D}GV"C#cWwT3(1W>(TE{ºNpܜъLNq=v9tyvAYY78W+ƾ_)Wgh{Dx<=zL8~Jxī&ǫƳ|]8K:(,s|s((پJxw5\FG%~=7%6y(Uq?Wp8"1p\0i~``f a`7``ehmtb@yF1+9L\t/s/clU!xTsc^㱡G{ hż|>\}>94qS0NRu{6w6n^S '*WgB,9HDѸI0c9g8OL}Lcc#+i\n#p͈Kљ0ݸ?t^Z2XSr.sθ},mdl.b,'-cft`eЬ)1p0j.Jkz sӳsfd콽cr{ wN2^kÜ0U*0 [;H+$9;Z(yw'H,d݁XۥVvrZ-#wDu!ӹX;2 XU'ÜΆ::U1N.qЀk^+Bnh` сi\ M>pq)P݂u6@~2۸u֗瀍iތyn47s X\gX y ;S(rqr TL'"QdBCWHOj "xi,T8ؽ!; ( *BZɫrܾÜg޹=[EޤF3W HccMwAi00'sVc[95Gvœ.LG)I{p^=̍҃CqnclƑ=fT>`NQS_gl3[44m3Nk~1A2ų8&CRfsŗ}r?.(L1S֚&bLJ:(O9[Vk"R@=s`l\J{`&nf}-%;q_J1[y984tGWZ56I)r7v0 Nw"Zn!|xSd0eYzB ]ÁpkHW9 ԑ|:7qj77g䍹{ظcki̓p5j+M9prG䧇[r>k8/f^7.q+5`6vʁk5$b~hI[ MeV`^1949rMۯn in*\L`VWpFeekqbjs3Z5vAMhuc3OVn{_E8x>J ͝o %I8~6Սظi?'w5PJl9\`ƽ\BGk#Du1ݻB y~\AEKG qX}fgYѽ<3RlWKecp+%>9t_H>cU kh\&c}KQU*HZLE&֪S*TMAޚmXUQܮEVwj]Ю:.{F}b%x˃N~:Xt!EH"In~:Y@jE@F}xE,CO$L,_kwKGYt<A:_SМr6˜" +K9E)M"!DUwM1@U*.T$X oPU ̯rzŹ*XTY9w`ٻQBR)~]:7䧿CՏ˸U>sO<6gUcsQ|Y ®vQ|Xl7ʜ@n=ʜo9yTypv2eΚZ9<<ʜo9F3(=|Fqۜol zn<6gM؜o9(s1B2gc9 Wyy[m7B3ڛFM"7}=ۜo98 P<ʜo9^^8 ϳes<$r(/\S86gE}؜o9ߨID8@x`/9(s6mpjs^ Nsb?/\vƺ_8 fIsNlp?Na4Ķ^8 P1፽p)4>_ ϳݑ8 ˜a^8 <˜Uħ9vLxc;/|/sV9欂')ez^˜zh/|/s^eΫ9p?m o|j/s^eΫDZ+,&EVe~!#' ~bDTO ){bo/(O\C1zb/y خ%BFTOlㅳp?%9_xQ=.#'8 7X G~ˈꉭpQ=<FTOs_ ~ ŜOk=y/܏CI_8)H4IeOy #'9ӜiVt¾_ۜ4IӜdOli4g٘<>)|dD4gc<os|>q9m4IR~>9ӜԟOLs>q/ӜiF4-'^_8 mPJ Ds|Am̌J39|,RҜmΉ6Ԍڜ;'&w })OjC93m,u9 P%%O*DHsv 4Eb>q4aL ̹(/.(K)C%OEV­SI(ѩv$?)%OF_Q"M" h)pQjR◜>'-Y+A)pX5z]&%)%ZE:t LJTJ䑻ق JOKWJly!s.Jϫx>-sCMM"I^J\ԗ%iR4mQ͙4Dvc+&*K*Sb̔>3%G朊#snIMZS mPbS4g ?$7%OM/ń.h)͹ Tsheiס8$;-Y-)_n ]')q}Rz ؐhiաCrHS"28VThA֡] PY}B%OPqt%%DJ6_ХҢ'ŨOi^Jʪ{4x ET JT"凗[sҤbKJTŦő.a*-'[~JsFJYa*cB?WHTM}xSUqO*TKj$Q%OjTY>)R-ߺT#*k@L9<&**}IUY0R?U-)oT=Z2g J v)&)K1aji٥1}R FR2ȘOn84g K<6%sv ҹ44 p 4yHs^r2үF$`-Rh(QgUL4aMuqb-摌2cJF2R I \VҜ-#5jV mArVXʘЏ7듊VgVH <;aaN`S֚-Zj֚LiS۟nqY$^($oeQtwJXJ)\뗮1<6rOk?dIZ%W2WVmsEK::u_[zMVsbP:WW H+XҹU`)+X5vҹ#R"!UtIʒdUMkmuM?R& #fe4,JZuvz_rWT,TT?G‚Wt@RC%UK /IeM+*M~O.XEҾ-*#կ`+%Mq?PMӏ(Tr{VVG`nDۣ8XT5‚ L?R ˂a?h~ڐ`هU_X)Ie#I,5:|*Qlm*J%E֓X\ư"ׇ4FX3VTR芦)eIh4USNj)aH#˺m?h~:U` )eiC'ڦTCJYV7Re}S~>;-ǓYv} f5Z%fl',U_Y>LiβIN*go΂ue5C< 'L:ճzgYς!,˿KAdgbYC ɘ=eT7du~*[+ E|ꬵ5k1mgZM HN )?PՏ[ E-k?Ԃ-cRU nZjֻRTo*imͧZ[ V&q-kښ!E^͌5>j-ez>Aۚܒ$l\M|Z]Z-.eϟ]ߒ?>Ixb $ /㑔'$e<d:^K튏dR2[V2gQe?heIEPx-/2]f5W_ά k_31|Re~Jު0Gj{$e%.o2GJƿĿV(H/L</-7=,kQ R P-X T|Zt̗(2S Pk{)ĬjIM2,PFi}.=0x>^).f|63Ob&mM`滶QR~0 fBa((/u)I=*u0}bc}00͌X"Le\2_%fT”mxL3MOl)f>uJj%f>vJKY._K3`>x!f>yf0Oe:5Kf;uwo廯,o廯G1ǂWI7)f>?_T|4$h b7l3RK 1+`"JF  .N?L+=kEki_|jb$x`$(0+*coHS*w)*~47W1a]1D-ǤQb [Iۥqp?/W6Wb1TW'Ns^/%܏LA2UR|ix8cn@KVUoq|$:1/mX_}\8_aqnZx̭e^,~V;7ޫj;UxǜRbqrZiJ|[~l5ȬGXP!!E Ci]]!)2 -#مFfC"a0/+UUKY`̬ȿ$Q2Y٦ ?ŲdVDeǒ|G&,zq"Ne5~jjo{gU37Վ~Y\O}hV$X&_殢/=[`nMn-ǖ-3,_?e6~b||nM˿uTc2J u6(0/k+޳+n}-?ݚ?2f7RgXR22R-3uKm}eKzfr3Gw%wrKߟ@?۲8պv滴f2%.5+q\dq3yϣfe} qE+_K`kfk0zkβ^̮W|:N u\n]o]6o]Uo]to]o]o]o 4y_׬:k^E;"X}K BBkoзLj[Ea9-#K3_uN+x(XbWʲ!~ٕsg:f,~I</=OScKjNLR5uzgVMVI-`{+ _lSws/oNWOO)a>6?_o;W})E گ珿G~~xn&~g_~”~Ͽ SOϯ_txY1:tq\IV~ ..*}}a/Ѷ]oF^Gm/ha}O~_O3Mhc5ܚF،?{  _J`NS~X^?\r=Z?U"絺T,~2y~se?ε^6[~UM#hH+i\cR~.z|tbL7y-?M_ߞvԃ/_A;N??ǝ Ǐr3Fe }S@ s2;/]R<\,DXԟZ[L7u8͊6Fҗi&Uw/d7qެIo|˴ǜL~1_2ǯcr:o|1~(yVo{,=m?oe/2}endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 360] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000164 00000 n 0000011254 00000 n 0000011337 00000 n 0000011460 00000 n 0000011493 00000 n 0000000213 00000 n 0000000293 00000 n 0000014188 00000 n 0000014282 00000 n 0000014379 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 14457 %%EOF ordinal/vignettes/static_figs/fig-figFlex.pdf0000755000176200001440000003474013633002525021045 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20120625120914) /ModDate (D:20120625120914) /Title (R Graphics Output) /Producer (R 2.15.0) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 10928 /Filter /FlateDecode >> stream xK-KRWa{%O ɖqd!`}x{ڸ؃yvUU?7'Siܟ6o??շu]?ſo㿿VV>/hRI ~DmhvPD"U^xE)ln$s~Ԋ/R-g Z%u^we>EgG4([?/zLQcMzY&WgS<^":B?k^Dޗh,l! GSE>}sD52_U4:ɟvAd~MPVISw$c"h,5'w 4D{ۛcyDg :lo, ׳{簾zh>i^^Ov!XTuEh9A)kKAgE4yޗQ_ qj 5ZԵuw.b{ƥmr`K>$ָ|}aAE%Km|A6iKxG@G4ޚ6[rA X`[3A5ny>@q|5n7xt`y#lc)x4z`GZ7 5 xF4B5ݳ Xq_*IEƥ,=>6O3( Ϡ >?AAr<ˮb|6@=Qe3Aco&Z ƨݠ3ETG(@՟ kUsF`4 O|5h~&H-0X)cwS >D_gvxNi ܑ\ Y@/赽~PD톥IDOV.a9scpUO D;B/8 ڍ^S\Ʊdhi:]RHwp_-Um)1hf8vhh %Ƒ7H}9xC#l^LAl:MNHE̤=6=e.gAU׷aJ{x}9 zᠰy|?4==<5qf{/6^?<^9ۯWE\D5#W*]" Ͼfx+k5:D'VQXjc,grXqg~fuEdkk- X?ZXޚxAwwml1ZK'ց b:(⬵5>=zQ=q{X:na=\{/?8+-_qxkE,Z(G,GBP-#$(1c>wg,'E^wLqg.}VPXc=&>yv\E&7p=\)ָ#(Fv,96Clǭ"~"(q;܌'Ũ bahn { o,hcq{Y<5 >#CDr!NQ1G+cxs;xxk׽Da]ip8nNhg&km|;Gr[Pܻ ì@eW+qPc_/ה+34ὃhg"Ίb kf k<KWQtQt{xWj` { H ݂ w7EeK9V7o':J.S|t>SaC.^Kc^74ޒwvr+[eD2̓ΈS&pnV lkj;pүT {^9p/c?s ;6NNϬwe>at`gfӈhp_y8jc9\(a㣨ab^ƩLNX͈/?`MlG,<ӰTݞݫͩہW+o-`A1p1m4n 8ƘcN5S=1.F?S.s?Jq3ҍmt-Et5aq78tLm@EXHZ+yswrtL1;giӛ4z " bl̼)(<fdJ5#w(38NŔr29="i`˳gq4sCzpw9m s8̜"4*wLS#9cfk5&bm|w/&P3HUxdH _ٌq-O.e2s fZÜd}е ]irI#g|{MDj>gn^8mGOL,O5űd6+Ww ;f+3g\ Wf&t6EfӮU1 ^T P 2dƵ>KO!k8n N"g:3O0NFv1w!s}G<WNYt'wN=ܒ%tQ}s77qq$pX) _-V\! L-C(O  \]% F.B1y-Αk*L~uGf@0MsVb;\Ɔ4b-/H-]M^; =S[ Ѫsz%.nB~p*.Vyh|o(Mٮn4~oߜM8kRfs;7:r^e'evJ#deF,rX:DL3=#R`4fZZn.{[_,%ΡBYè fHXDs28U^rR@Қe*4VeT lWxh#jTv-S0ҧGvݧt3*8F+Ku=Xt`}ɣB(fEeb<:Y@jE@F}xE,SO$L,_kwKGYu<A:_KМr6Ü" +K9E)M"!DUwM1@U*)T$X oTU ̯rzŹ*XTY9w`ٻQBR)]:7䧿CՏ˸U>sO<6gUcsQ|Y ®vQ|Xl7ʜ@n=ʜo9yTypv2eΚZ9<<ʜo9F3(=|Fqۜol zn<6gM؜o9(s1B2gc9 Wyy[m7B3ڛFM"7}=ۜo98 T<ʜo9^^8 ϳes<$r(/\K86gE}؜o9ߨID8@x`/9(s6mpjs^ Nsb?/\vƺ_8 fIsNlp?.a4Ķ^8 T1፽p%4>_ ϳݑ8 ˜a^8 <˜Uħ9vLxc;/|/sV9欂'%ez^˜zh/|/s^eΫ9p?m o|j/s^eΫDZ+,&EVe~!#' ~bDTO ){bo/(O\S1zb/y خlr6^8 B5دSȈꉽp]/q u>9OJ;Ӝi4g+:kya/\vm_'9O<[ya1͙8os6v'՞_Ȉi4gyjlnU^L<9Ӝq*)?'9Ӝ6/s~a٘lEh1͙Iծhs>V{ڜ6ģv6D3>8ܮ>p9'ڜY*YmΒD]>|,9'ڜE<*P'ҜҒ~a9]"'&lV2/9ɥ%Z8Q'ŢI/J.s.2`X>}QR>)%OF㓲Q")veG^j$ҏ,8? %WG\" )3'UD/s^9X90/shhqř܀Ԥ%E.:b JRఈޓ>)*-#>)+%OJa)}2bY9NҒ(KSMM"E[ 'I)њR3IL_SeVV];͙O:S"{ҜU4&yԔhU*cBKA$6+Ji.ܴ:LzSe ]-)q|RrJܟԜ4!*)qRv LQRӪKNɜSzJҩ$}2gkڤ>%OOY2`K@JJdbBWKJ\TKJ֔lqDkO69(IQKt TS%O UY!vbBeH NwʜS;eVEK vɜ|帲^U R"Uw˜-hA(*QU֭| WZUs+/ΥIhPrYʘвMy1Ҝn|^՘U$@Mb¥++Ph2gw$c-^ɜ@VIZR'$)k]$f-5RRWƄGZzV  Zh-)LJVOZ:P(,!_VXVS+cnNVO[Y4j 1%oet2NoAuk\Y4U&+:NΩ, sEOI*l(u`J +غNV@Hҹhg#P:W4ҹ_\Q4@\Ybɪ$[R@V%MsQ+y- -Y:z-uc}JY2u,N٩ΕIż'ynTuԹ.:WpK),H+k7tK:WƇt`@O^IL{ Rfjm\Y̤R RL+غO)upцuйw߸g&S upg9PP!+ֽAVեt:X4Q {?("\*ٖ%LsKH +x:WC:WtWbo+ i]Y4auR YTXtՋ} ^YP֚KW4b*)W,D+9I&rR:Ե]dJRe?k)Ű5T%yP ,(br{C"UN,˜y]!5R‚E`K'Eҕr׺WRÂGe}I% bY-AX-20lV9m3A+*bQd)uWW-URƂU-i,n+vͥVǢfJ--A/> @,%,YtȲ)5m|Ij7#,+TRBYtRʲI:ʲܩg'<жH˂C/[[$}(k/R2 ,_T2tJmĬPܤbYųJ},A/Ή PRw:Ђ5eTjfzeCD *ZL?-GY iRI[Rښe֬wfeմ*K#`Ԃ}TԂo$χ{MVՂ})"~%%0Me+~UYik]#ŵH][7J-fG[s)lkƶjCGlY&Pق[2A)4ud?Rh xҖۭCk[~ضު*mkN嶬L{mђ<͈_{ڀ$d.֞1$-Y\kYwKN.~ϥL[rkΧķno&׷mnͳyA_ܯ%T_ [[w q||?yJqu=!AJ$C_?>?<ގyhI'Q.ǧ7[*7#unwO)si?]SS;uǏڷܚ AC[oɞ`o??8Yѭetk&e`ٯtW3)ZR[䯭m)R\G v-b*/IvY;G] ]ݚ $vkjJ]Aub>C-ݚG vRq//LP<~$zeqvjzjO,EOxkO彄y)>R{oK*)e2/oů yk/Zcފu%E|!M/xqƯ+_3~-)->uC^*$@mh[[[*%e|?ǯV2ͿWh/RNq?H[[G|XfF|)fį-LJ_DR۴l_]_^W-Kr_/`Sb}hl_ Z38b=z_jqRƣgWVVgW cyHIeQ v{TRr}n*ˡ~jN5`wqxȀkJ}'oRך )05e Ps=-10i&"߼M[F<)?(hTTTI, 0ߧ\]xۧb,$/sW~zx9?G+uwK*0|jg*eƛt\xSZ77- SXC)'%eGy1G:bq-𥒘%kEqi51DSMa<3wz*aMv<$8eMU-UCUsKsi?n/48^baꅋ߮fk954b>S2w'Hb\wB1[%2d5gZJ1ݔg77^4kf]_i&;oRӭ>t:N1}Yq\g-9枲몷8^WVs:EG \x}i1f^,W.j]W㒿i15}bgK9H*a*!<]dTsS+YpO%Cf96w0\.pNVa$Ef0ͮØ.ɮ⒥-ٵO$3 WUc"Y̰嘇KJ.3 2Fj*2YD3<~Kf90oճ8nir_64U>z2#f1M<4iH44"͂w(u|ެ_h{4j,Tfu27> Syz䟛5܅WfV k+{JcZr-[sS?E7AYa)3K,_d!e&Vo9e:nBKUߛ]zT[z|v+p;NyTs*'Xe1o]w,T<;JV]jK\'|n&=q\gk}32Oj>7-q&7skG"~ѳ_aXwuwwmwwewάfk3eceAceE۰B?l]n3y]կ8vk6vwdaP/>%a_ۼ_y^Y/2,zmZ*|+~%w 5wEз*[ `VX} BRkoзj['a9]/{#?ȂsdſuZcIEѷĪ_=W++2!]6<̮y>0Gcٌ/?pܜ)ߊ_lURws},~zH,~zWM-.zχʊl%[R\?T|";o _Oa_p{/_~~7ݯy뗊Wf'L0_˿q:eߥXTXA2{+kӹd铨펇B|[E%ϣy_@TE۪ '~g3Sy܄'OnC?kƣohX}=,BO &qF._,/knfej.ϕAkuc0wo`}\kl6o#IN͇MC4Ǚy8Es@/?׷g^$1w"x|/ޏ/rǗϘ*cǗvV$H.LϿ9.K)Z.n,1bO-}|:JfEMi&VYӷ}o7>eZs.|㜯O|935*[n= ?bY?{:_G?gBz3=\a?ɷ eTjendstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 360] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000164 00000 n 0000011294 00000 n 0000011377 00000 n 0000011500 00000 n 0000011533 00000 n 0000000213 00000 n 0000000293 00000 n 0000014228 00000 n 0000014322 00000 n 0000014419 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 14497 %%EOF ordinal/vignettes/static_figs/fig-figNom2.pdf0000755000176200001440000003464013633002525020761 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20120625120914) /ModDate (D:20120625120914) /Title (R Graphics Output) /Producer (R 2.15.0) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 10864 /Filter /FlateDecode >> stream xMqW41;CI H@ s%2`ߧޏ&mC>{qdWuuuO}O{?wݷ?o_}_y_W>|2>[i[}OqH'iRIG6\IUu^x3D%lQZHMiV#, }QRDhmϲqAKxz۫U^eZ-QU#VEgi^p%lU*:{U%q2谽1A1X8ΫUxՓ-4sUeU`{Gn1(9zWQUs ^5KYs逦(HxH]'TM/(FgY-h&~᧲ s>hE؞8 uw^l,Bς%jloOU"ڼj޳"X=Wt|P|*F{P"Xފ8u'`G5Z"uǾ-Ǒ:qj,{iSqxmԗ?ָKk\6i=%k~“0 Z|4ָdAg3ڴƅs:ۣ? Zq[^*ָNС5n$7oS ŨqZ֕5ُ((l4*:`G~)x܃6Q~Ϥ >۠h % _ zSy 13N < 3aꢰ U⼂g)A Ao1h6Q:hͳp3qj|Y m ϣHւԣ1w2kܭ =hqaA5ϲ(f= > (qx^6Ђ.#\S743**cV4(1:(<CƔUoïюp hnQXcGHv8,U1~r E8"A]n?)]ܦq4ſg-9g苟9/X㴍ſa^1{-QX~T~wi[#Ί_߰I9+΍#(1~oXɬuᚂ8E50:"7$~C GQGȰF#+11JP Ƒ)'qſE#8^:\㟦0 (8\ۦt΀5.{ k<]c"(5}cWA1nx]˄5̼/ p"qs"7g/z fUƸ9S~0(㙚Iaǫs7H99xD5uRdz4;ᝥIֱ*WI2MuFy0*WUh`8@`xG0ijxRgg 6qb rN^F}~ ch?`y*hdv5a wPYyT8t<Xypin:0$s > >'x}лI<̽yg<zz\oa'?Jfnw9R9fø84ZLׅҠa@ynL ^Yrl{mz@#V;AI-gS!Jgpe2NcNqVTNJp/{3N˚K7hS`q^ 7}FT֌^Ͽ249V.n{HrP8 l4To匷.ΞndR>pr=?u{SMsyWx ț"AC43DC9mq߫F;#DӍa؁Q+k7ۈʝ©;J`Rtpw҅6FfKA~voM db j5sÜ2+qta}nt8yO;Ƽ_.jbpn=-Ć5ʃ7z9ƨxw,qX lB Y."45Xa,cbD=pr5Y*0f,fP7Q,. HٙN@Θx_uH.1"Egq_v#z&漎=t4uⱦXpNc@R26`FV fʻ1p0nxEt5!uƳh[ay; {[XčL?r#Hsjy7\Aba 7d~eWޙaYnTCTesj@Z/E"d&>d2n]޲D>MHU3q5<=RJMc~J;mtsۺSz 2%}&*{r;Odd .6/X4БCzܓc:Bt09{bwb' T+6mNna0r m<=S80gK7h\.nh>Z\ CY\ٌܐ4[0fm&@Z1.qLS]6cWNFm <=#gg|JF.ѩX0r^⡲ƙpqPblˈ]i|18A-iq9d:rU?ǕrydT<.O{N0CR/cŞ;x^aLl FR9\-qTދV{%j_ֲeWlDmm+CƘ{#qk3׌{b%{: d+oo߫ac,# ÂJo\-"{Si^K[,e>Cbj]$Xy*e6z47nl L%2=|AX ,uٮ$l秓5GXS7n؍]K8y* `ٮ!2vL5b|&[] U^`&=X<%$,- jiTј慳jLWwNM> s*XgDJ՚TR;Uǩ`9 `W6Utxx*͹UZ6Uj4. .Uem*oawRHjcTq˘N8j30gZDvNyV$YWz]Z}f{Q侀CŬ͸T3u/cT8Zv9D+}; 'SI g;q s^vUIgBrUe삪A 7GifJSX*ٛøYw؛L$uxT֩tUŮf].+QuUX,-ەŮ4X@eI-Ò'vvwbd|aeU݄p jevw]/삊w}`ei `mp rA)/l4禥£vy7j9(sVs6eIh~^ݫisQ|̹ٸ>ʜremsQ\qcsQzl7ʜtOjwe7ʜ|ƥv@ kX e7ʜ|YS?y4 (sQ|Y5ڍAQ|8m7BM"FXՅf9(s6n2zn<6e7ʜo9+ ,xb^(sQl,6{*/y]/s^eΫ8&q>y՗92U_挒g1Os^eΫy9#\^85L]ê/dDpOQ}!qOR72zbDTO\S1zb/FTOl z~ˈmp Jhnz/< lydDĺ^8 ]FTOlcp?OňmpgQ=l}pg($Z9 z #'1^f󉭿p^/LsR3K_x2zbszi_'os69Ӝy*FTIӜiy½Ȉi㓂'9[lㅫ0͙s1٘ u0y}R6gc1'v_ '9ӜiT{1¹_hsffEi'ڜmΒF2BFTO9'ڜE6Dsp퓚/\i`i6D3sc~>Im“,IsN$lkox>}YÒhs~R596?)%RUɈZisfVr/iP33R%$ %OBֈڜ,hy| C% %OjCP:KJ3&s93ӯ""/9s@QH?%Rfx9swB:QbPhMyYI"Ҋ'ŢDJmΒ04g)/kī5͙;9R$I(R3w%Z(jstI(%ZXJsO(q}R>Jblٖ/)q~RBJt]'UD G@:RnGm:ݗI-)PK IrRH?(%OJJ]nR"2.U)KV \>]Lb­g)KY`R8T 䞶ĥIu)K^ KҜHi"niLc%2%ZJsvdD+KiΖYHh 3Z ]p')PZSؔt ӟJ֣IX4{R)|dG&)i0EO9WJvʂI) )q~RyZR(iɺ{iOYlbaTR²ԌhQ)X72q ݦ\K Tn|RJPR˗ Ce(M.,01+E%OQ͊Fˤ'(/}mI,ٟTiJZIQj3U%5~R F3 sp'L-ȓT1b9M&)q*g%OuQOSLjIx]S o4"Uf|\g!*냬a9׆H e8,\9*SjUsG&*$\E֤"&<~%֡R TJK.jP[t}V["n WKj\>oZ1!'ūKRCGձ@UJʢfA;J|iV)OVT2 M"VuEV]%+XrұG Y˭q?Rڨbma6wfeU?KGRzeUJkXxuCʊb 鮒*^>~$keUVYd0t*ƤkEԕ:ȷu:3uW#|ԵVke)V:kg\['K$]+ _nX!]+RYҵ-D,XVlU<еK/]+CUxn*Y֕GZ?ҵ!]+v7u,B5[%uC NVҵnKGVVyH:bW1?p׏TG[Yr׎UZJ vb0?`W1?`Wj^E)k&zK\Y5 %t[5ʎGJ]YWeKsrgW[;>]YGcWt)W/+X(+X#+ʸT>/k-^XZwj[?eXu_ P/$eW1@`,ZfZ<%&*MTuN jG`QC!,.3Am)W,)-,KRZY%RÂgejKTԬ"Oت#5`bV\2"*Y5`=Ya ؗXwi>6|`ك`)'WHI-x{lQ-k׺g|)Y~,&a-R; 3ޔl I[ q-R?|.y-XCS&+~[֞5,/-kzK.חt:ܡi|HwYS ERk2AKwョ,񂮇[A'+>?,{l5Q|*I y<\Z^E<&{,*=/bf-E/R2V` ׌_w*uJ[l^ƻV,g~/6j`R\)u0X9Ƀc #~-5U-a5 .7巼LlnW~”[Oi$(+?|ulja*n>>3R)d|C$fR?J6tݷn?nԃ-B \_۷>K+"3_̈_'R-wԧS.Ts?s鸸QW n$~e_܏)n#?RN8ljȚڡǗf*T4mگ6{NiLtI~K'Z5s[M=1b]a}1ߣ؝ ؝kJ_mJK1yh?SM2c_Q4~h3o+cZ1nެPSEڗ-fnNsXu,:Iu\ՕsZܩKlڧ-?檝w7{'/a6my.U_|# 2@!I"dżUG%) K`hG{JY"|ȬHv%ƃ ]!UTQ=>H&_fcT+iY%R[5%6UdT{ɒ=tɬPYz]t4'W%6VtuL3O^,<|*d|ϭPf]2w\_0J%F.gr7WaI~K2]oa3]nU2F9f.vܙr~qs[[xi hI|׀[Lf]@h|[tBO>?V?yIf|۟Sb8ͷ=/~ϯg~^i_~ _~S/~_~8kX&? e?}UK.Gy_vdۨqve3k?}gGܟ9POfPSC4> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000164 00000 n 0000011230 00000 n 0000011313 00000 n 0000011436 00000 n 0000011469 00000 n 0000000213 00000 n 0000000293 00000 n 0000014164 00000 n 0000014258 00000 n 0000014355 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 14433 %%EOF ordinal/vignettes/static_figs/fig-fig2.pdf0000755000176200001440000003453413633002525020311 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20120625120914) /ModDate (D:20120625120914) /Title (R Graphics Output) /Producer (R 2.15.0) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 10796 /Filter /FlateDecode >> stream xˮ,q)zHUy4I$ dȐ8/Q(|+2"##U>ϯ?Osk8}?u]/?ǿoW߯|󟾕OK??@{-Qۤ#T/ѹH4vQg{u&۫[tk"Dgiµ~{ݫhם}/OQY 7hSEEy{Yu7h׫{Zz-Qg%Z͠8>4Oq N,;ᵊWӈ\cqu=ŕcY1SZD1╱~ZXޚ8YD8n-FCki>qh8VdmEAgGcXscX:na^.^ۗA8zBwQ<9 ;^([q5q1qWyh|&b_L}rPYAuWtg|g5c3ўg\MP>h{CkȞAa{:bm›cc/_;=vܺ!'#(xowR FA1n;q@z*ָƠ @6F{|#:Ę((1ֈola wYz Cq"8OT3(1w>(TEq k׽Da]ip8nNhg&km|;Gr[Pܻ ì@eW+qPc_/ה+34A3gk15k358Fwpp/qI9pmr kVN/G5)>Usηt8\<Ⱦ s\u=W3%\en]Ob |̗q5OO"fKi7:zǀ\Q Cx^jvLnn2?i~>:47}1DG=޺y|}'{* p=ޫyi~,ڋxӞ[.1Pn?P.6@rЃz?x Oyqn:#J>^~MmN5ݚJd+<ezt织~y껍Sv2:3+]"Oc*f.qp6F4+W-N|oJ(*s׆q*G>V3b_C'85 4!Ugsjs*v9@~rr~z>}&"Xs \Dd$31|ECTO ԇ 46?6m܌tc)~qK] cX)~܍C'5N9!S[5U(xr;g;I;gnNaZAV2.rb2fMV͚Q4ў*0==;n@; kow{$&0y{! s^ým/a r9(s;B }|Dn2L]zieo!2byJNYgr; 답#s0ulaZu;ylX  浲!ZȚ6ښЄ, E-Xk a) $' CIYYj}yؘ8NFiyS:uŁ@w@͏H3"8.gـHdp\n/\f6cH[#Tjv9̀`vnu7 i[f_Zfwz.60UcJ\܄V763*dMUӏ]P"䉃{'qk]h޾93qr^Odߝvn!tνF;NT7Xӽ,G$Yt托x%g{FP[Si<#Uv\6 "X2K`COJ=Q*͐fqe2ۇqU5T4i"k:˨B=ٮ.FըZZdufaޥOOgTpߧ.Vz<仃A',RD4+}.+滓5IV$ dчWbk#])gLnSi>Y/ґ`as\̝ڄz1xN+BTzT"BEҊExq oO[*竮W˨EsW ֋k/."UsC~l8T[3ij]* TbF/6uczuc%jvO ;\XQ%W(`Tʋ4}O븐tc]n%%SqUdẩ;tBT2x 0gaՁrijlυi_40KϩmFsn.MLr>qa.Wu Ÿ@/c?/\yBd z6Ř0qMs~`өqcsVq=6e7ʜ0_/je7ʜ|̙}ZvyFٜG5ʜo |`9(sQ欩u{{Fqڜo9s\<ʜo9vpcsTގF2ga>_/9(s6󍽿pg|c?/93ڬٜo$r/Fcp?O%zFqڜos<]6|/q@FTOcp?N%M[^ ]~as Q=1٘lLsl|bs Q=9Ӝ6gc/nw|RlLs6mv-]is1ٸ>)|b1͙Xns>1'/ۜiV9ӜD]J1ʈ6c͙ KIAhsN, I$IimǒOs͙YdCxЮ%}"͹(!-K%}hI(j*sBsQ\JQ=q}R,JܟT>q2-# F哊'+* 퓢Qbj8>)%S\ (qR9ZR:#(7$OjGQoWzhQvI"h"P H R"{udE/Ґ9#)|REJ 9%/sE/s 9&WIK /1)KMZRX$9)ҡS1({(H.=iJ퓢"=RJ듲R!s.e --)<$R y)q|R_J\)93Ĕ1%ZVzm%i%H Ӝ3%'͙^EJSnOJMV2&DbS`MKä7%ZFʘR'%I)YXJsR'eT-54̹>DK/J.sMS%V^>n P]P T $Dj*&tDI*dDkMiIJ1aw̹oHJ4&w&g%H-"d$IM*pY:t*+RKKZ79K%O SQ@KTŨ4g( hULMlSY7ҥO%~ T^)TYntM"]AUHE֥^ca*xҩ'aS12$UJ;e)2gVJddCNzr\Y/*K)VY{b*yZ˻eKJ j، ](*Va*9ŕEF$`E4[PeLhف ּiKN>j̪IdiMVU1n4;[d[OPBV +Τd- .szFXYYkc+c#w-=+EzTBIZR&%M+ s'Eck (\Ɛ/]+v,e kҕnNVO[YwibJ)ce|_W!؃0ָ h! M*WuSY4uC⟒:}Un%+غQ\*:WuA*ϥse94FVt`IisԹhHҹ(uU)+x%I.:J>Q ɣWZd?/Z)u[^ zYZd\Y ׳SK+˓yOLjd6sI](u:S>.Y:W,Msod=O\Qd&u`\ց2Խƃe!>O+RL<*YҹIc5˙:Wu,(gASX \s|zz~E`O+ֽ@AƇt[YUҹO`ЬG+wsd[:W0Շ,!t?PvG\\7ҹw^,t*fr<^ue4I3gS1c1T/-xeCZk/I^Ҽ$6>BV^V@K?X$(IYRvU++KEJbԬRA+XWXd$RQ  ,"XV9=T,s*uCBThJ v{-KW]"_I )?%=,_$eS3IbQt\ðYw$EUł[J_1_T]J V_կ14"Z+-TX~'TY,rzHdaH#2%h?ǏdtjfRI eK)b'o0S+rb|}G\zR',3VRH[~ϗJښޖ,;fŴ5+뭦EUY+맢,#I-|>kKY-x-) m*k]#H[뒮)pFښQjy-X'}m5;؂Ka[-uDVo}4U ?Rd2ܒN)u#B[ƋTnZ۪#ŶVUIm[wJ-eeCo[oiF$%s!?!nr=]Ϻ[rpc.mgޒS]t>$%w}5%oso{=_~/Wd*pVB~GU245;"ݚlndL,NOҢjܔBS|ʿ|˿^)-ֶVIK͈_U{#I[o$B..nMe5?rK. VGJwNWZ@w1_n#e?(?}8;5I5O'iou^B^Gż]=uH}7 I-e1oŏ"<}Ư+_3~-)->uC^*$@mh[[[*%e|?ǯV2ͿWh/RNq?H[[G|XfF|)fį-LJ_DR۴l_]_^W-Kr_/`Sb}hl_ Z38b=z_jqRƣgWVVgW cyHIeQ v{TRr}n*ˡ~jN5`wqxȀkJ}'oRך )05e Ps=-10i&"߼M[F<)?(hTTTI, 0ߧ\]xۧb,$/sW~zx9?G+uwK*0|jg*eo:.T-0M{K/2P b$`I!exk?BQ*bަc~|-3Av |$cI_t\ZbpM~Ts$ bkX.E12/* e%*0Nq~UK~Ui\ڏyhF*$fz.ZNMy1ͺ>]{9PkVt~?YCRfL7%e-+M)_WZcfάƛt1ݮާޘ۾b 8.3sOYu[laj.L3/o?SeK]\+qӚTzڋO$@f0oH. 2k@DȬ@[ !a da?'0do"3 fadbdWbtqRZ ' 1w,Ifr%%# M,?%eY_ho4/[R*WHo=ToW4K$r]zyfAM;iɺ^Kjr>o/4j=U5 j*iWReݼu=Қv«a*)XfU2w}T՗nMR@&-5If-[ڷ|N[ilO/',8:f.yzddѥQR]?ku3YY:[ɗyVǿh3淘]ï?z_Uê$tt-ukuK-v+v -wf]6_ŘY/+ ++5ކ:Qdp~DZ_뵱' 5^:, 땵~yeykrW[]-kK-VAˠouзB[`%U2X }+,VC: ˡ_rY/9G+^#+e K:, VEUﹲ_\Y=/銵av!<f~ևHVX$bK/CMdCd-Sj*n@lquQKx>DVVL?^fDOWO(Ow)R6?;'!fE گ~oWųK_TJ7?a*?߄짟WbQce?:MO~; mu>>?qSim'_}q[zJtʹ%5GalFoͥ]eZn75pby}w_s~hVFݭ?2Z]G f[9ĵ2=X(-n?}mi%SۦC4=g޲=>.[,M.ٿ=_'A;=wݢ_~|_VrҨǗ2ՏH\9@/S_<;]zR<\7YRc_Z[oI(w`}ӆendstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 360] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000164 00000 n 0000011162 00000 n 0000011245 00000 n 0000011368 00000 n 0000011401 00000 n 0000000213 00000 n 0000000293 00000 n 0000014096 00000 n 0000014190 00000 n 0000014287 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 14365 %%EOF ordinal/vignettes/static_figs/fig-figSca.pdf0000755000176200001440000003474613633002525020663 0ustar liggesusers%PDF-1.4 %ρ\r 1 0 obj << /CreationDate (D:20120606102000) /ModDate (D:20120606102000) /Title (R Graphics Output) /Producer (R 2.15.0) /Creator (R) >> endobj 2 0 obj << /Type /Catalog /Pages 3 0 R >> endobj 7 0 obj << /Type /Page /Parent 3 0 R /Contents 8 0 R /Resources 4 0 R >> endobj 8 0 obj << /Length 10934 /Filter /FlateDecode >> stream xM,9rW{76`O h! i#ƃQ^9"`0or*_U}}׿___oq뷿[~[]}4i\_=I ԿJ:AN+VuwhsD޿4BjHC,tDuJJjIle.zU_z-QU#VE4|޸ u׎h{^uuu^E&:oLPL7g-^d tD*o} {ΫUx՜=AWu<O4 7JBVb_A['5Q?lAS4(tiмEW{^k~# ڷ=EW}hy`S_AAyCAu`{+"Xԝ5N#'?=h?άiSs)8=Nk/wv7pM}`{E%mָ۠XhZ - _ zSy ޱ2N < 3aꢰ U⺂y z y̷ R 4(FVG۸Q5?6Q@x y`kAсUց;5VŌMLwd4° k#Ucxe⪽nb,ħ/ k^Wp `GH ̸ ~g1f" k  >א1eUkA#6@|~|- k acR ΉKl'g޳AV2! Mt4Ž8ֿO ~"?)#~VQ̾q5NXk;Da[PAO-LJ8+~2~aOsVwlQXcwXɬuᚂE5(1# OIkq~=Ž Kxٻk\c:(1=(zZkX1(ⲗ k(,~/!Rܻ Aq֒µ4{ֆ5.G%AaAIJj~gC6qCE8}wumAgm{#+^k\bָIHq~"~GMmW=(n;1~rՎak'VD1'W]jX>'(qw Aakf|gl=Ŭݎw!d5QaϊeoCvVa{iW%nς'?_ kJ}Qq;;7q܊wXI9w Qx2xֈw%(fXhEAqVuhxE38^:㟦0 ,8Ӧt΀5.{ k<]s"(ӵ}bu-ֈy_"ys"7g/z fUƼ9S~0(ZIƷaǻs7H9tx<#UaB[)`YZҢXo$&m <*g40C 0zq1 4Pܿfl8:Ƹ8vOX)v7ƄTA0U>Up8v471 6׻y?n!x[.ƔGO{|4zRȐOg<W q=~= 07y~LYi<2o?27_yMpEsDn6y}@-e!mƮi>|Lt]( M~`8NOecheFٞ8Ln96*Õy8vkBrQ Nٛq_|_FJbᇏFx5jfza, Nh挼W^ṉd+*M:8sOSVx6n.:qO&'Wٓ(YWq4'ؑwʾ)24E;C?5vANj 0B@1FxX<9)\өF,IoXNLg)H24o<}|hL,AppnA uY{g;?|5/2vcY_9i'ۘ3˭q~`=^ML0P/1q0_̀^η1*1K`\84Cz-tvعl܍h UvwyTg۠۸Ev%3&޷}۬cHmjљbܷH޽A9clm6]Mfx)1*6аU;X#wٿn 4 бȚtC6n|Cb+,do +Z_n}n}7_-+H,,`fǝ̯91^;3,:\jWq,rN TKE"׽H$l‡a^٭G2j&x4g@JWi sۏ[im݃xn[w`rJOy]i rs9Vy`Wg|s:l̬|5I|0CrA#?s9_Oax5/D72.*\L/F 3ײb: h 9_fF}~$ǹQ %zTeLFxȸNcc:Vrŷ}`\}0C| 0 j:$2e*Y=撵"dM\rxى1uGnB(9.̧D{sBcgS\MFrH2iE3j<9E1y=yz9#qL 'N'qBN5b x/8?styU⁍֣Ž =ŝ a6پX<7i6n7iJq1gN6u:(]'ekuT!4J\ٔ.eX#SX0r^⩲pPblˈSi|18A-iq;b:rW?ǝydTx ^Ʈf\oݤ9 Yʫb,"kD^dTT5*ӺpvV.6֩ t^hJ5Y!ѥRfllJuWJu88G쪲ƹEwMuuTi]zDU$apiWB eέeŘ~Y.7Pl6ʜ1<؜_(sViq=6ʜtOwe/92.XXz>PBٜ_(sO(1E/9PBj'J)B eis~a_E(Ќ2ʜB xl/9PB€Â'e/9*xV e/9uC6gƱUe/9%'x`[hsN9 gsb/8g+9\ϳE䅣|z9'c|~;0x`8܏!fmxCP{@m9 c*&|YӜmΉ~z;P1 mΉ6gLsNlgJsNvL>>pp=c~ a4>p܏!̚TsL^?p?p<~Gs(CiΉ1?p?Ҝ[y~s'9\t&ħ9ÜW0Up|9a()~iΫ}j9#\>p5̥e%-$.Qʌꍔƕ#'VJ=RB/3^FTO\nJicIjH=phʜ1*BFTDe(a OWNVG$#*RVV_[MiCtř]JȈL\0̢!8yMi ̫vpDT.0Q*h1PJ0""QlB 2Dp%kC$Z95JIhLJIs#<%WjŐ{; 'Ir;5o3_k/#* #$$V! $$RLjhfS#2"+e@ZtrphTSܚHIdGhv Z95s A &s1qȖEsp%2筄TD̈8qוBN5[C`6HRNbRIs_)6QrNYh Ry=$%˦HIW:oY'q^$+<9y+;Ji'q\$+ŝD*٥^wNRIn|ēخx"O"MRIW =l9 PROuKMqzKI53[I|=o'STObRYH'-1:u#+eķTBObRYvOO򛏆HORIs_iQ)dDE3OIi96iJ|s9(RRt@ KK֡KYJ|KK-xT~^)/rzK_ʓr0ruĴ"LSWL֕Ҝ-E(E9[@&)\4R32aY9H6%ZzԔpX-iI=@ S䛷٥"RJvZR&) +sH,)r,&CSA&s^Vr6RH,*GR=$@NiPY@1Bro%*+4goL$De An9;ؖV5ŠWrT2N-]EL)RY񖤖IZRJ'QjI)TJT5FyY$$%LҦCrMSk*N)Ӛc. TkPeu1|jU?`KcYĢn|R1zg4, f|!=S'JU+XYL}uC[퓼V5A{Y(]++ykeC [gQV"md?nwmk]+XUҵZkKFKN-Am҂^HIK~HiY͈_kQLKN,Au,$K$6//I-xvk<S(D~jQmHZs}EZ}%|J[[k~sĵe?|k/@Z,[dؒORo)SX{k<ڒy=)a$%?c?Wj[r BzZZį5k)nL0~Jsj?7ZuK1oEZ'?/E?A[/{*joGA[CKh=~t|4B۲?O^jO񸔸n_'.j {ɷyjݢ>IxqZ8Jg('c-e:̎_Hxx_IkW|_sU{Ϩ|r2~W[ H@L3Uy[Nz'/#!i^׳ߚo}0[fR3Q[3_MߟLQ~kV~"3֫om4 )K[9HQjAߛ>mFZ߸BaTڔVoꔃc^jϊc.<yl)E o5WJ*|`JJµnSjaT~I)XMT% >ʧyP2|hJ!jՇl wWy=뙙Xg~}X;|6/-rJjBoWX?|uꉷCAvj:).wJp"[Zߺ֭ļlU,:)1)^:?pM1'[1K91?uޮ"夆x*1dcJu>E+`Ӎ6sYI1_ 1OuK糩,gݼux! 2O=kyP!sIe1gk>ٚ\=]\@#WcOrdP!,]@|X/I2 dE V5CIT/Y2ddX8Ѳ(C뫔5gkm2 @|:Z+5e֦ɯֵDD[$5U,I*iK2>jP3QMCuF'y!Vf=C\kv-WvXzeyI\V,]KYشGKZ$ZfY62TH̰T*z(6eXަ꿕+Y^߻6m/osSI}PI$ ՟71 nj$c&?t%BfuTIY93_қ39>Ńjo_iA36/UK=lM3y=}GT%7߲foR̚e*z+e3Y'fs.TEfn$俵'}3kIySD gn_Sow,3*W=>irN~]dp!CVXw~[>~xi?^1_~]ܳ~ﯟ~/ GVL"!?*\u7,~jV!>EvFc֖#%3a~㏮죅?~D~y,X^f_` ϗMJzlz.]N.ztMx^#%.G,.o }_vg8ߍ$=<>[gQ{pֶ+ʣLJk}?_endstream endobj 3 0 obj << /Type /Pages /Kids [ 7 0 R ] /Count 1 /MediaBox [0 0 288 360] >> endobj 4 0 obj << /ProcSet [/PDF /Text] /Font <> /ExtGState << >> /ColorSpace << /sRGB 5 0 R >> >> endobj 5 0 obj [/ICCBased 6 0 R] endobj 6 0 obj << /Alternate /DeviceRGB /N 3 /Length 2596 /Filter /FlateDecode >> stream xwTSϽ7PkhRH H.*1 J"6DTpDQ2(C"QDqpId߼y͛~kg}ֺLX Xňg` lpBF|،l *?Y"1P\8=W%Oɘ4M0J"Y2Vs,[|e92<se'9`2&ctI@o|N6(.sSdl-c(2-yH_/XZ.$&\SM07#1ؙYrfYym";8980m-m(]v^DW~ emi]P`/u}q|^R,g+\Kk)/C_|Rax8t1C^7nfzDp 柇u$/ED˦L L[B@ٹЖX!@~(* {d+} G͋љς}WL$cGD2QZ4 E@@A(q`1D `'u46ptc48.`R0) @Rt CXCP%CBH@Rf[(t CQhz#0 Zl`O828.p|O×X ?:0FBx$ !i@ڐH[EE1PL ⢖V6QP>U(j MFkt,:.FW8c1L&ӎ9ƌaX: rbl1 {{{;}#tp8_\8"Ey.,X%%Gщ1-9ҀKl.oo/O$&'=JvMޞxǥ{=Vs\x ‰N柜>ucKz=s/ol|ϝ?y ^d]ps~:;/;]7|WpQoH!ɻVsnYs}ҽ~4] =>=:`;cܱ'?e~!ańD#G&}'/?^xI֓?+\wx20;5\ӯ_etWf^Qs-mw3+?~O~endstream endobj 9 0 obj << /Type /Encoding /BaseEncoding /WinAnsiEncoding /Differences [ 45/minus ] >> endobj 10 0 obj << /Type /Font /Subtype /Type1 /Name /F2 /BaseFont /Helvetica /Encoding 9 0 R >> endobj 11 0 obj << /Type /Font /Subtype /Type1 /Name /F6 /BaseFont /Symbol >> endobj xref 0 12 0000000000 65535 f 0000000021 00000 n 0000000164 00000 n 0000011300 00000 n 0000011383 00000 n 0000011506 00000 n 0000011539 00000 n 0000000213 00000 n 0000000293 00000 n 0000014234 00000 n 0000014328 00000 n 0000014425 00000 n trailer << /Size 12 /Info 1 0 R /Root 2 0 R >> startxref 14503 %%EOF ordinal/NEWS0000644000176200001440000002633114533322411012404 0ustar liggesusersThis file documents updates and changes in package ordinal since version 2010.03-04 March 04 2010: - First version of the package is created. 2010-04-06: - removing class "clm.fit" from results of finalizeRho. - moving offset computations from logLik and gradient funtions to newRho function. - Bug fixed in grad.lambda - checks and warning messages added to profile.clm - a warning is now given if the profile fits do not converge - profile.clm has grown the argument 'stepWarn', which gives a warning if the no. profile steps in each direction (up or down) is less than stepWarn (default 8), which indicates that the profile is unreliable. - Bug in loglog-link for clmm fits fixed. - Missing values are handled better in clm and clmm. - clmm has grown an argument 'sdFixed' which assigns a fixed value of the standard deviation of the random effects. Optimization is performed with respect to the remaining parameters. - profile.clmm, confint.profile.clmm and plot.profile.clmm are now available. Profiling is restricted to the standard deviation parameter of the random effects. - control.clm and control.clmm now handles the control parameters. 2010-05-06: - allowing the formulas to be constructed outside clm and clmm (the formulas are evaluated in the parent frame before the variable names are extracted) 2010-05-17: - Better evaluation in case of non-standard formula usage allowing e.g. clm(data$y ~ data$x). - Better handling of ill-defined variance-covariance matrix of the parameters in summary methods for clm and clmm objects. 2010-06-12: - Standard Gauss-Hermite quadrature is now available via the nAGQ argument to clmm. - Core functions implemented in C for speed. This includes all link functions, update of the conditional modes of the random effects, adaptive Gauss-Hermite quadrature and standard, i.e. non-adaptive Gauss-Hermite quadrature. Select R or C implementation via the argument doFit to clmm. - Bug in random effects estimates and their conditional modes corrected. 2010-07-06: - Bug in predict when 'newdata' was supplied is now corrected. 2010-07-23: - Better descriptions of random effect estimates and fitted values in the clmm help page. 2010-10-22: - Updated help page for predict.clm/clmm. 2010-12-13: - Bug in predict.clm corrected for models with nominal effects and newdata supplied (thanks to Simon Blomberg for the bug report). 2011-04-21: - Better message from summary.clmm when Hess = FALSE - endpoint thresholds are now closer to infinity. This is due to a bug report from Ioannis Kosmidis (March 30, 2011); the model estimates weren't right with very large scale effects. Tests are added to testCLM.R - gradTol in clm.control now defaults to 1e-5 rather than 1e-4. convTol is retained at 1e-4, so we are asking for closer convergence than we require. - getGnll no longer returns Inf if !all(pr > 0) - link utility functions are moved from clm.R to linkUtils.R - extensive testing for NaN-specials in C-code for the link functions is added. - details section added to clmm.control.Rd with comment about using "central" gradients with the ucminf optimizer. - examples updated in confint.Rd 2012-01-19: - Changed evaluation of formula in clm to make clm more forgiving for evaluation inside other functions. 2012-05-09: - Updated evaluation of formula in clmm, cf. resent update of clm. 2012-05-22: - Better evaluation of fitted probabilities. This should reduce the occurance of the "sqrt(phi2) : NaNs produced" error message. - Improved evaluation of control parameters in clmm using the new function getCtrlArgs. - Better warning if intercept is attempted removed in clmm. 2012-05-23: - Adding useMatrix argument to clmm.control - Using getFitted in clm - Implementing getFittedC in C and updating C code for fit.clmm.ssr with better and faster evaluation of fitted values - Introduction of links.h, links.c and get_fitted.c in /src 2012-05-29: - Correcting formula interpretation in clm to allow for really long formulas. - Better evaluation of control arguments in clmm (adjustment of getCtrlAgs). - Adding clmm.control.R to ./test 2012-09-10: - Computing Newton step in clm with solve() rather than .Call("La_dgesv", ...) to accomodate changes in R base. 2012-09-11: - Using globalVariables() conditional on getRversion() >= '2.15.1'. 2013-03-20: - Adding symmetric2 threshold function, which restricts the latent mean in the reference group to zero. This means that the central threshold (ylev even) is zero or that the two central thresholds are equal apart from their sign (ylev uneven). 2013-04-08: - Allowing zero weights in clm unless there are no observations with a positive weight in one or more response categories. 2013-04-11: - clm now computes Theta and alpha.mat tables of thresholds and threshold-parameters if nominal effects are specified. 2013-04-17: - anova.clm and anova.clmm now tests for illegal arguments 'test' and 'type' (wish from Ben Bolker and Jonathan Dushoff) - introducing convergence code 3 in clm: Thresholds are not increasing, which can happen with nominal effects. 2013-06-21: - Allowing zero weights in clm even if an entire response category is zeroed out. 2013-07-23: - Newton-Raphson fitting algorithm for CLMs has been redesigned: clm.fit.env is now deprecated (and removed from the code base) and all fitting of CLMs take place in a new version of clm.fit.NR - Convergence assessment has been improved with a new set of convergence codes and new message handling. - clm.control has gained several arguments to accommodate this. - in clm the new function conv.check assess convergence and compute the variance-covariance matrix of the parameters. Thus vcov is always part of a clm object. - vcov.clm has been redesigned and can now compute the variance-covariance matrix with Cholesky, SVD, EIGEN and QR methods or just grap it from the clm object (default). - nominal_test and scale_test functions added: they add all terms in a model to nominal and scale formulae respectively and perform likelihood ratio tests. These functions can be helpful in model development and model/GOF testing, e.g. of non-proportional odds. - Lazy-loading of data enabled. - MASS moved from Depends to Imports. - In clm man-page the 'value' list is ordered alphabetically as are the elements in a clm object. - clmm now computes the variance-covariance matrix with the Cholesky decomposition. - makeThresholds now take ylevels rather than y as argument. - clm.control and clmm.control are moved to control.R - drop.cols has gained argument drop.scale which controls whether columns in the scale design matrix are droped if they are linearly dependent of columns in the nominal design matrix. This was previously implicitly TRUE but is now FALSE to allow fits of certain models. - The list of control arguments are now storred as part of the clm output. - weights, offset and S.offset can now be missing or NULL in clm.fit. - predict.clm now allows type="eta". 2013-08-22: - Exporting S3 print method for convergence.clm objects. 2013-08-23: - Fixing an issue in the Hessian computation for boundary fits with useMatrix=FALSE and a single scalar random-effects term. - Allowing control parameters to be passed on to nlminb (when it is used). A bug was fixed in getCtrlArgs and clmm.control now includes method="nlminb". - Adding test for no. random effects >= no. observations for each r.e. term. 2013-08-25 - changing default optimizer from ucminf to nlminb - adding grad.ctr4 to the list of gradient functions - explicitly computing the number of objective function evaluations rather than relying on the optimizer's count. - wrapping calls to optimizers in try() to catch errors that occur here - adding test for non-finite parameter values in *.ssr objective functions. - adding list of control parameters to list of clmm output. - refining test of no. random effects > no. observations. - removing ucminf control settings from clmm.control when fitting with nlminb. - fixing bug with C version of NRalgv3 (conditional mode update): Hessian (D) values are now initialized to 1 rather than 0. 2013-08-26: - registrering global variables. - removing use of ':::'. 2013-08-27: - documenting list of control parameters in clmm objects. 2013-09-27: - no longer importing numDeriv as we now use our own gradient and hessian functions - moving Matrix package from Depends to Imports 2013-10-01: - Updating convergence checking in clm.fit and simple_clm to the clm standard - Removing distinction between (non-S3/4) sclm, eclm and clm model classes 2013-10-31: - Now having 'methods' in depends since this is needed to run clmm. This was most likely only a problem when using Rscript where the methods package is not loaded by default. 2014-11-12: - Reimplementation of formula, model.frame and design matrix processing motivated by a bug in model.matrix.clm and predict.clm reported by Russell Lenth 2014-11-07 when implementing lsmeans support for clm::ordinal. 2014-11-14: - Fixing bug in convergence checking (conv.check) and added test to /tests/test.clm.convergence.R - Improved the efficiency (i.e. speed) in the evaluation of standard errors for predictions using predict.clm (based on feature request by Thomas Jagger). 2015-01-21: - Updating Citation information per CRAN request. 2015-06-28: - Updating maintainer email address 2016-12-12: - Fixing a couple of errors in CLM tutorial vignette - Correcting description of threshold argument to clmm - qgumbel did not respect it's lower.tail argument (thanks to John Fox for reporting) - Test for no. random effects less than the no. observations now gives a warning instead of an error and is now manageable via clmm.control. 2018-04-19: - Fixed insufficient protect in get_fitted - Registration of native routines (C-code) - Reduce exec time for clmm examples - change rBind -> rbind 2018-08-25: - Added sign.location and sign.nominal to clm.control() - Implemented type I, II and III type ANODE tables for clm fits - Implemented flexible link functions for clm()s - Added new article submitted to JSS as vignette and moved old vignettes to GitHub 2019-03-09: - Massage tests to check out on R-devel 2019-04-25: - Change in formula evaluation in base R prompts this update - very kindly fixed by Martin Maechler (R core) in PR#18 2019-12-11: - Get rid of S3 class checks with class() - now using inherits() instead. 2020-08-22: - Fix evaluation of long formulae in clmm - thanks to Stéphane Guillou, Stefan Th. Gries and Tingting Zhan for reporting. 2022-11-13: - Fix function declaration without a prototype in utilityFuns.c per CRAN request. - Add model.matrix method for clmm-objects. - Enable evaluation of anova.clmm in separate environments - thanks to Karl Ove Hufthammer for reporting and Jack Taylor for a detailed analysis and suggestion for a fix. - Allow models with an implicit intercept and only random effects in clmm(). - Fixed index in equation (7) of the clm-vignette. - Fix import of ranef and VarCorr methods from nlme and lme4 packages 2023-12-04: - Change NCOL usage because Kurt Hornik wants to change the behavior of NCOL(NULL) in base R - Names of functions clm.fit.NR, clm.fit.flex and clm.fit.optim changed to clm_fit_NR, clm_fit_flex and clm_fit_optim to avoid hiccup from CRAN checks. ordinal/R/0000755000176200001440000000000014533322576012115 5ustar liggesusersordinal/R/contrast_utils.R0000644000176200001440000002573714533321514015322 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# # contrast-utils.R - utility functions for contrasts, terms and anova # -------- Contents: -------- # # containment # term_contain # relatives # doolittle # ensure_full_rank # get_rdX # extract_contrasts_type3 ############################################## ######## containment() ############################################## containment <- function(object) { # lm or merMod # For all terms 'T' in object compute the terms # Return a list: # for each term 'T' a vector of terms that contain 'T'. terms <- terms(object) data_classes <- attr(terms(object), "dataClasses") # Note: need fixed.only for merMod objects to get dataClasses term_names <- attr(terms, "term.labels") factor_mat <- attr(terms, "factors") lapply(setNames(term_names, term_names), function(term) { term_names[term_contain(term, factor_mat, data_classes, term_names)] }) } ############################################## ######## term_contain() ############################################## #' Determine which Terms Contain a Term #' #' The definition of \emph{containment} follows from the SAS documentation on #' "The Four Types of Estimable Functions". #' #' Containment is defined for two model terms, say, F1 and F2 as: #' F1 is contained in F2 (F2 contains F1) if #' \enumerate{ #' \item F1 and F2 involve the same continuous variables (if any) #' \item F2 involve more factors than F1 #' \item All factors in F1 (if any) are part of F2 #' } #' The intercept, though not really a model term, is defined by SAS to be #' contained in all factor terms, but it is not contained in any #' effect involving a continuous variable. #' #' @param term character; name of a model term and one of \code{term_names}. #' @param factors the result of \code{attr(terms_object, "factors")}. #' @param dataClasses the result of #' \code{attr(terms(model, fixed.only=FALSE), "dataClasses")}. Note that #' \code{fixed.only=FALSE} is only needed for \code{merMod} objects, but does #' no harm for \code{lm} objects. #' @param term_names the result of \code{attr(terms_object, "term.labels")}. #' #' @return a logical vector indicating for each term in \code{term_names} if #' it contains \code{term}. #' @importFrom stats setNames #' @keywords internal term_contain <- function(term, factors, dataClasses, term_names) { get_vars <- function(term) # Extract vector of names of all variables in a term rownames(factors)[factors[, term] == 1] contain <- function(F1, F2) { # Returns TRUE if F1 is contained in F2 (i.e. if F2 contains F1) # F1, F2: Names of terms, i.e. attr(terms_object, "term.labels") all(vars[[F1]] %in% vars[[F2]]) && # all variables in F1 are also in F2 length(setdiff(vars[[F2]], vars[[F1]])) > 0L && # F2 involve more variables than F1 setequal(numerics[[F1]], numerics[[F2]]) # F1 and F2 involve the same covariates (if any) } # Get (named) list of all variables in terms: vars <- lapply(setNames(term_names, term_names), get_vars) # Get (named) list of all _numeric_ variables in all terms: numerics <- lapply(vars, function(varnms) varnms[which(dataClasses[varnms] == "numeric")]) # Check if 'term' is contained in each model term: sapply(term_names, function(term_nm) contain(term, term_nm)) } ############################################## ######## doolittle() ############################################## #' Doolittle Decomposition #' #' @param x a numeric square matrix with at least 2 columns/rows. #' @param eps numerical tolerance on the whether to normalize with components #' in \code{L} with the diagonal elements of \code{U}. #' #' @return a list with two matrices of the same dimension as \code{x}: #' \item{L}{lower-left unit-triangular matrix} #' \item{U}{upper-right triangular matrix (\emph{not} unit-triangular)} #' #' @keywords internal doolittle <- function(x, eps = 1e-6) { if(!is.matrix(x) || ncol(x) != nrow(x) || !is.numeric(x)) stop("argument 'x' should be a numeric square matrix") stopifnot(ncol(x) > 1L) n <- nrow(x) L <- U <- matrix(0, nrow=n, ncol=n) diag(L) <- rep(1, n) for(i in 1:n) { ip1 <- i + 1 im1 <- i - 1 for(j in 1:n) { U[i,j] <- x[i,j] if (im1 > 0) { for(k in 1:im1) { U[i,j] <- U[i,j] - L[i,k] * U[k,j] } } } if ( ip1 <= n ) { for ( j in ip1:n ) { L[j,i] <- x[j,i] if ( im1 > 0 ) { for ( k in 1:im1 ) { L[j,i] <- L[j,i] - L[j,k] * U[k,i] } } L[j, i] <- if(abs(U[i, i]) < eps) 0 else L[j,i] / U[i,i] } } } L[abs(L) < eps] <- 0 U[abs(U) < eps] <- 0 list( L=L, U=U ) } ############################################## ######## ensure_full_rank() ############################################## #' Ensure a Design Matrix has Full (Column) Rank #' #' Determine and drop redundant columns using the \code{\link{qr}} #' decomposition. #' #' @param X a design matrix as produced by \code{model.matrix}. #' @param tol \code{qr} tolerance. #' @param silent throw message if columns are dropped from \code{X}? Default #' is \code{FALSE}. #' @param test.ans Test if the resulting/returned matrix has full rank? Default #' is \code{FALSE}. #' #' @return A design matrix in which redundant columns are dropped #' @keywords internal ensure_full_rank <- function(X, tol = 1e-7, silent = FALSE, test.ans = FALSE) { ### works if ncol(X) >= 0 and nrow(X) >= 0 ## test and match arguments: stopifnot(is.matrix(X)) silent <- as.logical(silent)[1] ## perform the qr-decomposition of X using LINPACK methods: qr.X <- qr(X, tol = tol, LAPACK = FALSE) if(qr.X$rank == ncol(X)) { ## return X if X has full column rank return(X) } if(!silent) ## message the no. dropped columns: message(gettextf("Design is column rank deficient so dropping %d coef", ncol(X) - qr.X$rank)) ## return the columns correponding to the first qr.x$rank pivot ## elements of X: keep <- with(qr.X, pivot[seq_len(rank)]) newX <- X[, keep, drop = FALSE] sel <- with(qr.X, pivot[-seq_len(rank)]) ## Copy old attributes: if(!is.null(contr <- attr(X, "contrasts"))) attr(newX, "contrasts") <- contr if(!is.null(asgn <- attr(X, "assign"))) attr(newX, "assign") <- asgn[-sel] ## did we succeed? stop-if-not: if(test.ans && qr.X$rank != qr(newX)$rank) stop(gettextf("Determination of full column rank design matrix failed"), call. = FALSE) return(newX) } ############################################## ######## get_rdX() ############################################## #' Compute the 'Full' Rank-Deficient Design Matrix #' #' #' @param model a model object; lmerMod or lmerModLmerTest. #' @param do.warn throw a message if there is no data for some factor #' combinations. #' #' @return the rank-deficien design matrix #' @author Rune Haubo B. Christensen #' @keywords internal #' #' @importFrom stats as.formula model.frame terms model.matrix get_rdX <- function(model, do.warn=TRUE) { # Compute rank-deficient design-matrix X usign contr.treatment coding. # # model: terms(model), model.frame(model), fixef(model) Terms <- terms(model, fixed.only=TRUE) term_names <- attr(Terms, "term.labels") df <- model.frame(model) # Compute rank-deficient (full) design-matrix, X: rdXi <- if(length(term_names)) lapply(term_names, function(trm) { form <- as.formula(paste0("~ 0 + ", trm)) model.matrix(form, data=df) # no contrast arg }) else list(model.matrix(~ 1, data=df)[, -1, drop=FALSE]) rdX <- do.call(cbind, rdXi) param_names <- unlist(lapply(rdXi, colnames)) # Potentially add intercept: has_intercept <- attr(Terms, "intercept") != 0 if(has_intercept) { rdX <- cbind('(Intercept)'=rep(1, nrow(rdX)), rdX) param_names <- c("(Intercept)", param_names) } colnames(rdX) <- param_names # Warn/message if there are cells without data: is_zero <- which(colSums(rdX) == 0) if(do.warn && length(is_zero)) { txt <- sprintf("Missing cells for: %s. ", paste(param_names[is_zero], collapse = ", ")) # warning(paste(txt, "\nInterpret type III hypotheses with care."), call.=FALSE) message(paste(txt, "\nInterpret type III hypotheses with care.")) } rdX } ############################################## ######## extract_contrasts_type3 ############################################## #' @importFrom MASS ginv #' @importFrom stats terms resid lm.fit extract_contrasts_type3 <- function(model, X=NULL) { # Computes contrasts for type III tests with reference to treatment contrast coding # X: Optional full rank design matrix in contr.treatment coding Terms <- terms(model) term_names <- attr(Terms, "term.labels") if(is.null(X)) { X <- get_model_matrix(model, type="remake", contrasts="contr.treatment") X <- ensure_full_rank(X) } # Get 'complete' design matrix: rdX <- get_rdX(model, do.warn = TRUE) # treatment contrasts # cols for aliased coefs should be removed in X; not in rdX. # This makes ginv(X) unique! L <- zapsmall(t(MASS::ginv(X) %*% rdX)) # basic contrast matrix dimnames(L) <- list(colnames(rdX), colnames(X)) # Orthogonalize contrasts for terms which are contained in other terms: map <- term2colX(Terms, X) is_contained <- containment(model) # Orthogonalize higher order terms before lower order terms: terms_order <- attr(Terms, "order") orthog_order <- term_names[order(terms_order, decreasing = TRUE)] # Only orthogonalize terms with columns in X: keep <- names(which(sapply(map[orthog_order], length) > 0)) for(term in orthog_order[keep]) { # if term is contained in other terms: if(length(contains <- is_contained[[term]]) > 0) { # orthogonalize cols in L for 'term' wrt. cols that contain 'term': L[, map[[term]]] <- zapsmall(resid(lm.fit(x=L[, unlist(map[contains]), drop=FALSE], y=L[, map[[term]], drop=FALSE]))) } } # Keep rows in L corresponding to model coefficients: L <- L[colnames(X), , drop=FALSE] # Extract list of contrast matrices from L - one for each term: Llist <- lapply(map[term_names], function(term) t(L[, term, drop=FALSE])) # Keep all non-zero rows: lapply(Llist, function(L) L[rowSums(abs(L)) > 1e-8, , drop=FALSE]) } ordinal/R/clm.simple.R0000644000176200001440000001323414533321514014275 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## A implementation of simple CLMs (simple_clm), i.e., CLMs without ## scale and nominal effects. simple_clm <- function(formula, data, weights, start, subset, offset, doFit = TRUE, na.action, contrasts, model = TRUE, control = list(), link = c("logit", "probit", "cloglog", "loglog"), threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) { ## Initial argument matching and testing: mc <- match.call(expand.dots = FALSE) link <- match.arg(link) threshold <- match.arg(threshold) ## check for presence of formula: if(missing(formula)) stop("Model needs a formula") if(missing(contrasts)) contrasts <- NULL ## set control parameters: control <- do.call(clm.control, c(control, list(...))) ## Compute: y, X, wts, off, mf: if (missing(data)) data <- environment(formula) mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"), names(mf), 0L) mf <- mf[c(1L, m)] mf$drop.unused.levels <- TRUE mf[[1L]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) ## Return model.frame? if(control$method == "model.frame") return(mf) y <- model.response(mf, "any") ## any storage mode if(!is.factor(y)) stop("response needs to be a factor", call.=FALSE) ## design matrix: mt <- attr(mf, "terms") X <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else cbind("(Intercept)" = rep(1, NROW(y))) ## Test for intercept in X: Xint <- match("(Intercept)", colnames(X), nomatch = 0) if(Xint <= 0) { X <- cbind("(Intercept)" = rep(1, NROW(y)), X) warning("an intercept is needed and assumed in 'formula'", call.=FALSE) } ## intercept in X is guaranteed. wts <- getWeights(mf) off <- getOffsetStd(mf) ylevels <- levels(droplevels(y[wts > 0])) frames <- list(y=y, ylevels=ylevels, X=X) ## Compute the transpose of the Jacobian for the threshold function, ## tJac and the names of the threshold parameters, alpha.names: frames <- c(frames, makeThresholds(ylevels, threshold)) ## test for column rank deficiency in design matrices: frames <- drop.cols(frames, silent=TRUE) ## Set envir rho with variables: B1, B2, o1, o2, wts, fitted: rho <- clm.newRho(parent.frame(), y=frames$y, X=frames$X, NOM=NULL, S=NULL, weights=wts, offset=off, S.offset=NULL, tJac=frames$tJac, control=control) ## Set starting values for the parameters: start <- set.start(rho, start=start, get.start=missing(start), threshold=threshold, link=link, frames=frames) rho$par <- as.vector(start) ## remove attributes ## Set pfun, dfun and gfun in rho: setLinks(rho, link) ## Possibly return the environment rho without fitting: if(!doFit) return(rho) ## Fit the clm: if(control$method == "Newton") fit <- clm_fit_NR(rho, control) else fit <- clm_fit_optim(rho, control$method, control$ctrl) ### NOTE: we could add arg non.conv = c("error", "warn", "message") to ### allow non-converged fits to be returned. ## Modify and return results: res <- clm.finalize(fit, weights=wts, coef.names=frames$coef.names, aliased=frames$aliased) res$control <- control res$link <- link res$start <- start if(control$method == "Newton" && !is.null(start.iter <- attr(start, "start.iter"))) res$niter <- res$niter + start.iter res$threshold <- threshold res$call <- match.call() res$contrasts <- attr(frames$X, "contrasts") res$na.action <- attr(mf, "na.action") res$terms <- mt res$xlevels <- .getXlevels(mt, mf) res$tJac <- frames$tJac res$y.levels <- frames$ylevels ## Check convergence: conv <- conv.check(res, Theta.ok=TRUE, tol=control$tol) print.conv.check(conv, action=control$convergence) ## print convergence message res$vcov <- conv$vcov res$cond.H <- conv$cond.H res$convergence <- conv[!names(conv) %in% c("vcov", "cond.H")] res$info <- with(res, { data.frame("link" = link, "threshold" = threshold, "nobs" = nobs, "logLik" = formatC(logLik, digits=2, format="f"), "AIC" = formatC(-2*logLik + 2*edf, digits=2, format="f"), "niter" = paste(niter[1], "(", niter[2], ")", sep=""), ### NOTE: iterations to get starting values for scale models *are* ### included here. "max.grad" = formatC(maxGradient, digits=2, format="e") ## BIC is not part of output since it is not clear what ## the no. observations are. ) }) class(res) <- "clm" ## add model.frame to results list? if(model) res$model <- mf return(res) } ordinal/R/lgamma.R0000644000176200001440000000515714533321514013475 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## [pdg]lgamma functions for the log-gamma distribution [lgamma]. ## Here glgamma is the gradient of the density function, dlgamma. ## The log-gamma distribution is ## used as a flexible link function in clm2() and clmm2(). plgamma <- function(q, lambda, lower.tail = TRUE) .C("plgamma_C", q = as.double(q), length(q), as.double(lambda[1]), as.integer(lower.tail[1]), NAOK = TRUE)$q plgammaR <- function(eta, lambda, lower.tail = TRUE) { q <- lambda v <- q^(-2) * exp(q * eta) if(q < 0) p <- 1 - pgamma(v, q^(-2)) if(q > 0) p <- pgamma(v, q^(-2)) if(isTRUE(all.equal(0, q, tolerance = 1e-6))) p <- pnorm(eta) if(!lower.tail) 1 - p else p } dlgamma <- function(x, lambda, log = FALSE) { stopifnot(length(lambda) == 1 && length(log) == 1) .C("dlgamma_C", x = as.double(x), length(x), as.double(lambda), as.integer(log), NAOK = TRUE)$x } dlgammaR <- function(x, lambda, log = FALSE) { q <- lambda q.2 <- q^(-2) qx <- q * x log.d <- log(abs(q)) + q.2 * log(q.2) - lgamma(q.2) + q.2 * (qx - exp(qx)) if (!log) exp(log.d) else log.d } glgamma <- function(x, lambda) { stopifnot(length(lambda) == 1) .C("glgamma_C", x = as.double(x), length(x), as.double(lambda[1]), NAOK = TRUE)$x } glgammaR <- function(x, lambda) { stopifnot(length(lambda) == 1) (1 - exp(lambda * x))/lambda * dlgamma(x, lambda) } glgammaR2 <- function(x, lambda) { stopifnot(length(lambda == 1)) if(lambda == 0) return(gnorm(x)) y <- dlgamma(x, lambda) y[!is.na(y) && y > 0] <- y * (1 - exp(lambda * x)) return(y) } ordinal/R/clm.R0000644000176200001440000001366514533321514013015 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## The main clm function and some auxiliary functions to generate the ## model frame and handle the model environment. checkArgs.clm <- function(mc) { nm <- names(as.list(mc)) if(!"formula" %in% nm) stop("Model needs a formula", call.=FALSE) if("offset" %in% nm) stop("offset argument not allowed: ", "specify 'offset' in formula or scale arguments instead") invisible() } clm <- function(formula, scale, nominal, data, weights, start, subset, doFit = TRUE, na.action, contrasts, model = TRUE, control = list(), link = c("logit", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) { mc <- match.call(expand.dots = FALSE) link <- match.arg(link) threshold <- match.arg(threshold) if(missing(contrasts)) contrasts <- NULL if(missing(start)) start <- NULL checkArgs.clm(mc=match.call()) ## set control parameters: ## getControl.clm control <- do.call(clm.control, c(control, list(...))) ## Extract and process formulas: call.envir <- parent.frame(n=1) formulas <- get_clmFormulas(mc, call.envir) ## Get full model.frame and terms.objects: fullmf <- get_clm.mf(mc, formulas$fullForm, attr(formulas, "envir"), call.envir) if(control$method == "model.frame") return(fullmf) terms.list <- if(any(c("scale", "nominal") %in% names(formulas))) get_clmTerms(mc, formulas, call.envir) else list(formula=terms(fullmf)) ## Get y, X, weights, off etc.: design <- get_clmDesign(fullmf, terms.list, contrasts) lst <- namedList(doFit, control, link, threshold, start, formulas) if(control$method == "design") return(c(design, lst)) ## Get clm.struct: design <- c(design, makeThresholds(design$y.levels, threshold)) design <- drop.cols(design, silent=TRUE, drop.scale=FALSE) clm.struct <- c(design, lst) ## Fit model, check convergence, or return a model environment: fit <- clm.fit.default(clm.struct) if(doFit == FALSE) return(fit) ## Format output, prepare result: keep <- c("terms", "contrasts", "xlevels", # formula "S.terms", "S.contrasts", "S.xlevels", # scale "nom.terms", "nom.contrasts", "nom.xlevels", # nominal "na.action", "y", "y.levels", "control", "link", "threshold", "start", "formulas") res <- c(fit, clm.struct[match(keep, names(clm.struct), 0L)], list(formula=lst$formulas$formula, call=match.call())) ## res$tJac <- format_tJac(res$tJac, res$y.levels, clm.struct$alpha.names) res$info=get_clmInfoTab(res) if(model) res$model <- fullmf res <- res[sort(names(res))] class(res) <- "clm" res } clm.newRho <- function(parent=parent.frame(), y, X, NOM=NULL, S=NULL, weights, offset, S.offset=NULL, tJac, control=clm.control(), ...) ### Setting variables in rho: B1, B2, o1, o2, weights. { ## Make B1, B2, o1, o2 based on y, X and tJac: keep <- weights > 0 y[!keep] <- NA y <- droplevels(y) ntheta <- nlevels(y) - 1 y <- c(unclass(y)) y[is.na(y)] <- 0 n <- sum(keep) B2 <- 1 * (col(matrix(0, nrow(X), ntheta + 1)) == y) o1 <- c(1e5 * B2[keep, ntheta + 1]) - offset[keep] o2 <- c(-1e5 * B2[keep, 1]) - offset[keep] B1 <- B2[keep, -(ntheta + 1), drop = FALSE] B2 <- B2[keep, -1, drop = FALSE] ## adjust B1 and B2 for structured thresholds: B1 <- B1 %*% tJac B2 <- B2 %*% tJac ## update B1 and B2 with nominal effects: if(!is.null(NOM) && ncol(NOM) > 1) { ## if !is.null(NOM) and NOM is more than an intercept: if(control$sign.nominal == "negative") NOM[, -1] <- -NOM[, -1] LL1 <- lapply(1:ncol(NOM), function(x) B1 * NOM[keep, x]) B1 <- do.call(cbind, LL1) LL2 <- lapply(1:ncol(NOM), function(x) B2 * NOM[keep, x]) B2 <- do.call(cbind, LL2) } ## update B1 and B2 with location effects (X): nbeta <- ncol(X) - 1 if(nbeta > 0) { if(control$sign.location == "negative") X <- -X B1 <- cbind(B1, X[keep, -1, drop = FALSE]) B2 <- cbind(B2, X[keep, -1, drop = FALSE]) } dimnames(B1) <- NULL dimnames(B2) <- NULL n.psi <- ncol(B1) ## no. linear model parameters ## there may be scale offset without scale predictors: sigma <- Soff <- if(is.null(S.offset)) rep(1, n) else exp(S.offset[keep]) ## save scale model matrix: k <- 0 if(!is.null(S)) { S <- S[keep, -1, drop=FALSE] dimnames(S) <- NULL k <- ncol(S) ## no. scale parameters } has.scale <- ## TRUE if scale has to be considered. (!is.null(S) || any(S.offset != 0)) ## initialize fitted values and weights: fitted <- numeric(length = n) wts <- weights[keep] lst <- namedList(B1, B2, o1, o2, n.psi, S, Soff, k, sigma, has.scale, fitted, wts, clm.nll, clm.grad, clm.hess) list2env(x=lst, parent=parent) } ordinal/R/gdist.R0000644000176200001440000000414414533321514013344 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Gradients of densities of common distribution functions on the form ## g[dist], where "dist" can be one of "logis", "norm", and ## "cauchy". These functions are used in Newton-Raphson algorithms ## when fitting CLMs and CLMMs in clm(), clm2(), clmm() and ## clmm2(). Similar gradients are implemented for the gumbel, ## log-gamma, and Aranda-Ordaz distributions. glogis <- function(x) ### gradient of dlogis .C("glogis_C", x = as.double(x), length(x), NAOK = TRUE)$x gnorm <- function(x) ### gradient of dnorm(x) wrt. x .C("gnorm_C", x = as.double(x), length(x), NAOK = TRUE)$x gcauchy <- function(x) ### gradient of dcauchy(x) wrt. x .C("gcauchy_C", x = as.double(x), length(x), NAOK = TRUE)$x glogisR <- function(x) { ### glogis in R res <- rep(0, length(x)) isFinite <- !is.infinite(x) x <- x[isFinite] isNegative <- x < 0 q <- exp(-abs(x)) q <- 2*q^2*(1 + q)^-3 - q*(1 + q)^-2 q[isNegative] <- -q[isNegative] res[isFinite] <- q res } gnormR <- function(x) ### gnorm in R -x * dnorm(x) gcauchyR <- function(x) ### gcauchy(x) in R -2*x/pi*(1+x^2)^-2 ordinal/R/utils.R0000644000176200001440000004475514533321514013406 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Various utility functions. setLinks <- function(rho, link) { ### The Aranda-Ordaz and log-gamma links are not supported in this ### version of clm. rho$pfun <- switch(link, logit = plogis, probit = pnorm, cloglog = function(x, lower.tail=TRUE) pgumbel(x, lower.tail=lower.tail, max=FALSE), cauchit = pcauchy, loglog = pgumbel, "Aranda-Ordaz" = function(x, lambda) pAO(x, lambda), "log-gamma" = function(x, lambda) plgamma(x, lambda)) rho$dfun <- switch(link, logit = dlogis, probit = dnorm, cloglog = function(x) dgumbel(x, max=FALSE), cauchit = dcauchy, loglog = dgumbel, "Aranda-Ordaz" = function(x, lambda) dAO(x, lambda), "log-gamma" = function(x, lambda) dlgamma(x, lambda)) rho$gfun <- switch(link, logit = glogis, probit = gnorm, cloglog = function(x) ggumbel(x, max=FALSE), loglog = ggumbel, cauchit = gcauchy, "Aranda-Ordaz" = function(x, lambda) gAO(x, lambda), ## shouldn't happen "log-gamma" = function(x, lambda) glgamma(x, lambda) ) rho$link <- link rho$nlambda <- if(rho$link %in% c("Aranda-Ordaz", "log-gamma")) 1 else 0 if(rho$link == "Aranda-Ordaz") rho$lambda <- 1 if(rho$link == "log-gamma") rho$lambda <- 0.1 } makeThresholds <- function(y.levels, threshold) { ## , tJac) { ### Generate the threshold structure summarized in the transpose of ### the Jacobian matrix, tJac. Also generating nalpha and alpha.names. ### args: ### y - response variable, a factor ### threshold - one of "flexible", "symmetric" or "equidistant" ## stopifnot(is.factor(y)) lev <- y.levels ntheta <- length(lev) - 1 ## if(!is.null(tJac)) { ## stopifnot(nrow(tJac) == ntheta) ## nalpha <- ncol(tJac) ## alpha.names <- colnames(tJac) ## if(is.null(alpha.names) || anyDuplicated(alpha.names)) ## alpha.names <- as.character(1:nalpha) ## dimnames(tJac) <- NULL ## } ## else { ## threshold structure identified by threshold argument: if(threshold == "flexible") { tJac <- diag(ntheta) nalpha <- ntheta alpha.names <- paste(lev[-length(lev)], lev[-1], sep="|") } if(threshold == "symmetric") { if(!ntheta >=2) stop("symmetric thresholds are only meaningful for responses with 3 or more levels", call.=FALSE) if(ntheta %% 2) { ## ntheta is odd nalpha <- (ntheta + 1)/2 ## No. threshold parameters tJac <- t(cbind(diag(-1, nalpha)[nalpha:1, 1:(nalpha-1)], diag(nalpha))) tJac[,1] <- 1 alpha.names <- c("central", paste("spacing.", 1:(nalpha-1), sep="")) } else { ## ntheta is even nalpha <- (ntheta + 2)/2 tJac <- cbind(rep(1:0, each = ntheta / 2), rbind(diag(-1, ntheta / 2)[(ntheta / 2):1,], diag(ntheta / 2))) tJac[,2] <- rep(0:1, each = ntheta / 2) alpha.names <- c("central.1", "central.2") if(nalpha > 2) alpha.names <- c(alpha.names, paste("spacing.", 1:(nalpha-2), sep="")) } } ## Assumes latent mean is zero: if(threshold == "symmetric2") { if(!ntheta >=2) stop("symmetric thresholds are only meaningful for responses with 3 or more levels", call.=FALSE) if(ntheta %% 2) { ## ntheta is odd nalpha <- (ntheta - 1)/2 ## No. threshold parameters tJac <- rbind(apply(-diag(nalpha), 1, rev), rep(0, nalpha), diag(nalpha)) } else { ## ntheta is even nalpha <- ntheta/2 tJac <- rbind(apply(-diag(nalpha), 1, rev), diag(nalpha)) } alpha.names <- paste("spacing.", 1:nalpha, sep="") } if(threshold == "equidistant") { if(!ntheta >=2) stop("equidistant thresholds are only meaningful for responses with 3 or more levels", call.=FALSE) tJac <- cbind(1, 0:(ntheta-1)) nalpha <- 2 alpha.names <- c("threshold.1", "spacing") } ## } return(list(tJac = tJac, nalpha = nalpha, alpha.names = alpha.names)) } getFitted <- function(eta1, eta2, pfun, ...) { ## eta1, eta2: linear predictors ## pfun: cumulative distribution function ## ## Compute fitted values while maintaining high precision in the ## result - if eta1 and eta2 are both large, fitted is the ## difference between two numbers very close to 1, which leads to ## imprecision and potentially errors. ## ## Note that (eta1 > eta2) always holds, hence (eta2 > 0) happens ## relatively rarely. k2 <- eta2 > 0 fitted <- pfun(eta1) - pfun(eta2) fitted[k2] <- pfun(eta2[k2], lower.tail=FALSE) - pfun(eta1[k2], lower.tail=FALSE) fitted } getFittedC <- function(eta1, eta2, link = c("logit", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), lambda=1) ### Same as getFitted only this is implemented in C and handles all ### link functions including the flexible ones. { link <- match.arg(link) .Call("get_fitted", eta1, eta2, link, lambda) } getWeights <- function(mf) { ### mf - model.frame n <- nrow(mf) if(is.null(wts <- model.weights(mf))) wts <- rep(1, n) ## if (any(wts <= 0)) ## stop(gettextf("non-positive weights are not allowed"), ## call.=FALSE) ### NOTE: We do not remove observations where weights == 0, because ### that could be a somewhat surprising behaviour. It would also ### require that the model.frame be evaluated all over again to get ### the right response vector with the right number of levels. if(length(wts) && length(wts) != n) stop(gettextf("number of weights is %d should equal %d (number of observations)", length(wts), n), call.=FALSE) if(any(wts < 0)) stop(gettextf("negative weights are not allowed"), call.=FALSE) ## if(any(wts == 0)) { ## y <- model.response(mf, "any") ## if(any(table(y[wts > 0]) == 0)) ## stop(gettextf("zero positive weights for one or more response categories"), ## call.=FALSE) ## } return(as.double(wts)) } getOffset <- function(mf, terms) { ### mf - model.frame n <- nrow(mf) off <- rep(0, n) if(!is.null(o <- attr(terms, "offset"))) { if(length(o) > 1) stop("only one offset term allowed in each formula", call.=FALSE) varnm <- attr(terms, "variables") ## deparse all variable names - character vector: varnm <- unlist(lapply(as.list(varnm), deparse)[-1]) off <- mf[, varnm[o]] } ## off <- as.vector(mf[, o]) if(length(off) && length(off) != n) stop(gettextf("number of offsets is %d should equal %d (number of observations)", length(off), n), call.=FALSE) return(as.double(off)) } getOffsetStd <- function(mf) { n <- nrow(mf) if(is.null(off <- model.offset(mf))) off <- rep(0, n) if(length(off) && length(off) != n) stop(gettextf("number of offsets is %d should equal %d (number of observations)", length(off), n), call.=FALSE) return(as.double(off)) } getFullForm <- function(form, ..., envir=parent.frame()) { ### collect terms in several formulas in a single formula ### sets the environment of the resulting formula to envir. forms <- list(...) if(lf <- length(forms)) { rhs <- character(0) ## Collect rhs terms in a single vector of rh-sides: for(i in 1:lf) { rhs <- c(rhs, Deparse(forms[[i]][[2]])) if(length(forms[[i]]) >= 3) rhs <- c(rhs, Deparse(forms[[i]][[3]])) } ## add '+' inbetween terms: rhs <- paste(rhs, collapse=" + ") ## combine if 'deparse(form)' is a (long) vector: form2 <- paste(deparse(form, width.cutoff=500L), collapse=" ") ## combine form2 and rhs into a single string: form <- paste(form2, rhs, sep=" + ") } return(as.formula(form, env=envir)) } ## getFullForm <- function(form, ..., envir=parent.frame()) { ## ### collect terms in several formulas in a single formula (on the rhs) ## ### sets the environment of the resulting formula to envir. ## forms <- list(form, ...) ## allVars <- unlist(sapply(forms, all.vars)) ## rhs <- paste(allVars, collapse=" + ") ## form <- paste("~", rhs) ## return(as.formula(form, env=envir)) ## } ## getCtrlArgs <- function(control, extras) { ## ### Recover control arguments from clmm.control and extras (...): ## ### ## ## Collect control arguments in list: ## ctrl.args <- c(extras, control$method, control$useMatrix, ## control$ctrl, control$optCtrl) ## ## Identify the two occurences "trace", delete them, and add trace=1 ## ## or trace=-1 to the list of arguments: ## which.trace <- which(names(ctrl.args) == "trace") ## trace.sum <- sum(unlist(ctrl.args[which.trace])) ## ctrl.args <- ctrl.args[-which.trace] ## ## remove duplicated arguments: ## ctrl.args <- ctrl.args[!duplicated(names(ctrl.args))] ## if(trace.sum >= 1) ctrl.args$trace <- 1 ## if(trace.sum >= 2 || trace.sum <= -1) ctrl.args$trace <- -1 ## ## return the updated list of control parameters: ## do.call("clmm.control", ctrl.args) ## } getCtrlArgs <- function(control, extras) { ### Recover control arguments from clmm.control and extras (...): ### if(!is.list(control)) stop("'control' should be a list") ## Collect control arguments in list: ## 1) assuming 'control' is a call to clmm.control: ctrl.args <- if(setequal(names(control), names(clmm.control()))) c(extras, control["method"], control["useMatrix"], control$ctrl, control$optCtrl) ## assuming 'control' is specified with control=list( 'args'): else c(extras, control) ### NOTE: having c(extras, control) rather than c(control, extras) ### means that extras have precedence over control. ## Identify the two occurences "trace", delete them, and add trace=1 ## or trace=-1 to the list of arguments: which.trace <- which(names(ctrl.args) == "trace") trace.sum <- sum(unlist(ctrl.args[which.trace])) if(trace.sum) ctrl.args <- ctrl.args[-which.trace] ## remove duplicated arguments: ctrl.args <- ctrl.args[!duplicated(names(ctrl.args))] if(trace.sum >= 1) ctrl.args$trace <- 1 if(trace.sum >= 2 || trace.sum <= -1) ctrl.args$trace <- -1 ## return the updated list of control parameters: do.call("clmm.control", ctrl.args) } Trace <- function(iter, stepFactor, val, maxGrad, par, first=FALSE) { t1 <- sprintf(" %3d: %-5e: %.3f: %1.3e: ", iter, stepFactor, val, maxGrad) t2 <- formatC(par) if(first) cat("iter: step factor: Value: max|grad|: Parameters:\n") cat(t1, t2, "\n") } response.name <- function(terms) { vars <- as.character(attr(terms, "variables")) vars[1 + attr(terms, "response")] } getB <- function(y, NOM=NULL, X=NULL, offset=NULL, tJac=NULL) { ### NOTE: Is this function ever used? ### NOTE: no tests that arguments conform. nlev <- nlevels(y) n <- length(y) B2 <- 1 * (col(matrix(0, n, nlev)) == c(unclass(y))) o1 <- c(1e5 * B2[, nlev]) - offset o2 <- c(-1e5 * B2[,1]) - offset B1 <- B2[, -(nlev), drop = FALSE] B2 <- B2[, -1, drop = FALSE] ## adjust B1 and B2 for structured thresholds: if(!is.null(tJac)) { B1 <- B1 %*% tJac B2 <- B2 %*% tJac } ## update B1 and B2 with nominal effects: if(!is.null(NOM) && ncol(NOM) > 1) { ## if !is.null(NOM) and NOM is more than an intercept: LL1 <- lapply(1:ncol(NOM), function(x) B1 * NOM[,x]) B1 <- do.call(cbind, LL1) LL2 <- lapply(1:ncol(NOM), function(x) B2 * NOM[,x]) B2 <- do.call(cbind, LL2) } ## update B1 and B2 with location effects (X): nbeta <- ncol(X) - 1 if(ncol(X) > 1) { B1 <- cbind(B1, -X[, -1, drop = FALSE]) B2 <- cbind(B2, -X[, -1, drop = FALSE]) } dimnames(B1) <- NULL dimnames(B2) <- NULL namedList(B1, B2, o1, o2) } Deparse <- function(expr, width.cutoff = 500L, backtick = mode(expr) %in% c("call", "expression", "(", "function"), control = c("keepInteger", "showAttributes", "keepNA"), nlines = -1L) paste(deparse(expr=expr, width.cutoff= width.cutoff, backtick=backtick, control=control, nlines=nlines), collapse = " ") getContrasts <- function(terms, contrasts) { if(is.null(contrasts)) return(NULL) term.labels <- attr(terms, "term.labels") contrasts[names(contrasts) %in% term.labels] } checkContrasts <- function(terms, contrasts) { ### Check that contrasts are not specified for absent factors and warn ### about them term.labels <- attr(terms, "term.labels") nm.contr <- names(contrasts) notkeep <- nm.contr[!nm.contr %in% term.labels] msg <- if(length(notkeep) > 2) "variables '%s' are absent: their contrasts will be ignored" else "variable '%s' is absent: its contrasts will be ignored" if(length(notkeep)) warning(gettextf(msg, paste(notkeep, collapse=", ")), call.=FALSE) invisible() } get_clmInfoTab <- function(object, ...) { names <- c("link", "threshold", "nobs", "logLik", "edf", "niter", "maxGradient", "cond.H") stopifnot(all(names %in% names(object))) info <- with(object, { data.frame("link" = link, "threshold" = threshold, "nobs" = nobs, "logLik" = formatC(logLik, digits=2, format="f"), "AIC" = formatC(-2*logLik + 2*edf, digits=2, format="f"), "niter" = paste(niter[1], "(", niter[2], ")", sep=""), ### NOTE: iterations to get starting values for scale models *are* ### included here. "max.grad" = formatC(maxGradient, digits=2, format="e"), "cond.H" = formatC(cond.H, digits=1, format="e") ## BIC is not part of output since it is not clear what ## the no. observations are. ) }) info } format_tJac <- function(tJac, y.levels, alpha.names) { lev <- y.levels rownames(tJac) <- paste(lev[-length(lev)], lev[-1], sep="|") colnames(tJac) <- alpha.names tJac } extractFromFrames <- function(frames, fullmf) { lst <- list(y.levels=frames$y.levels, na.action=attr(fullmf, "na.action"), tJac=format_tJac(frames)) lstX <- list(contrasts=attr(frames$X, "contrasts"), terms=frames$terms, xlevels=.getXlevels(frames$terms, fullmf)) lst <- c(lst, lstX) if(!is.null(frames[["S"]])) lst <- c(lst, list(S.contrasts=attr(frames$S, "contrasts"), S.terms=frames$S.terms, S.xlevels=.getXlevels(frames$S.terms, fullmf))) if(!is.null(frames[["NOM"]])) lst <- c(lst, list(nom.contrasts=attr(frames$NOM, "contrasts"), nom.terms=frames$nom.terms, nom.xlevels=.getXlevels(frames$nom.terms, fullmf))) lst } formatTheta <- function(alpha, tJac, x, sign.nominal) { ## x: alpha, tJac, nom.terms, NOM, nom.contrasts, nom.xlevels, Theta.ok <- TRUE if(is.null(x[["NOM"]])) { ## no nominal effects Theta <- alpha %*% t(tJac) colnames(Theta) <- rownames(tJac) return(namedList(Theta, Theta.ok)) } x$nom.assign <- attr(x$NOM, "assign") args <- c("nom.terms", "nom.assign") args <- c("nom.terms") if(any(sapply(args, function(txt) is.null(x[[txt]])))) { ## Nominal effects, but we cannot compute Theta warning("Cannot assess if all thresholds are increasing", call.=FALSE) return(namedList(Theta.ok)) } ## Get matrix of thresholds; Theta: Theta.list <- getThetamat(terms=x$nom.terms, alpha=alpha, assign=attr(x$NOM, "assign"), contrasts=x$nom.contrasts, tJac=tJac, xlevels=x$nom.xlevels, sign.nominal=sign.nominal) ## Test that (finite) thresholds are increasing: if(all(is.finite(unlist(Theta.list$Theta)))) { th.increasing <- apply(Theta.list$Theta, 1, function(th) all(diff(th) >= 0)) if(!all(th.increasing)) Theta.ok <- FALSE } Theta <- if(length(Theta.list) == 2) with(Theta.list, cbind(mf.basic, Theta)) else Theta.list$Theta alpha.mat <- matrix(alpha, ncol=ncol(tJac), byrow=TRUE) colnames(alpha.mat) <- colnames(tJac) rownames(alpha.mat) <- attr(x$NOM, "orig.colnames") ## Return namedList(Theta, alpha.mat, Theta.ok) } ## We don't need this function anymore since the terms objects now ## always contain dataClasses and predvars attributes. ## get_dataClasses <- function(mf) { ## if(!is.null(Terms <- attr(mf, "terms")) && ## !is.null(dataCl <- attr(Terms, "dataClasses"))) ## return(dataCl) ## sapply(mf, .MFclass) ## } ## Returns a named list, where the names are the deparsed actual ## arguments: namedList <- function(...) { setNames(list(...), nm=sapply(as.list(match.call()), deparse)[-1]) } ## a <- 1 ## b <- 2 ## c <- 3 ## d <- list(e=2, f=factor(letters[rep(1:2, 2)])) ## g <- matrix(runif(9), 3) ## ## namedList(a, b, c) ## namedList(a, b, c, d, g) ## ## res <- namedList(d, g) ## names(res) ordinal/R/derivatives.R0000644000176200001440000001312614533321514014557 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Functions for finite difference computations of derivatives ## (gradient and Hessian) of user-specified functions. deriv12 <- function(fun, x, delta=1e-4, fx=NULL, ...) { ### Compute gradient and Hessian at the same time (to save computing ### time) nx <- length(x) fx <- if(!is.null(fx)) fx else fun(x, ...) stopifnot(length(fx) == 1) H <- array(NA, dim=c(nx, nx)) g <- numeric(nx) for(j in 1:nx) { ## Diagonal elements: xadd <- xsub <- x xadd[j] <- x[j] + delta xsub[j] <- x[j] - delta fadd <- fun(xadd, ...) fsub <- fun(xsub, ...) H[j, j] <- (fadd - 2 * fx + fsub) / delta^2 g[j] <- (fadd - fsub) / (2 * delta) ## Off diagonal elements: for(i in 1:nx) { if(i >= j) break ## Compute upper triangular elements: xaa <- xas <- xsa <- xss <- x xaa[c(i, j)] <- x[c(i, j)] + c(delta, delta) xas[c(i, j)] <- x[c(i, j)] + c(delta, -delta) xsa[c(i, j)] <- x[c(i, j)] + c(-delta, delta) xss[c(i, j)] <- x[c(i, j)] - c(delta, delta) H[i, j] <- H[j, i] <- (fun(xaa, ...) - fun(xas, ...) - fun(xsa, ...) + fun(xss, ...)) / (4 * delta^2) } } list(gradient = g, Hessian = H) } myhess <- function(fun, x, fx=NULL, delta=1e-4, ...) { nx <- length(x) fx <- if(!is.null(fx)) fx else fun(x, ...) stopifnot(length(fx) == 1) H <- array(NA, dim=c(nx, nx)) for(j in 1:nx) { ## Diagonal elements: xadd <- xsub <- x xadd[j] <- x[j] + delta xsub[j] <- x[j] - delta H[j, j] <- (fun(xadd, ...) - 2 * fx + fun(xsub, ...)) / delta^2 ## Upper triangular (off diagonal) elements: for(i in 1:nx) { if(i >= j) break xaa <- xas <- xsa <- xss <- x xaa[c(i, j)] <- x[c(i, j)] + c(delta, delta) xas[c(i, j)] <- x[c(i, j)] + c(delta, -delta) xsa[c(i, j)] <- x[c(i, j)] + c(-delta, delta) xss[c(i, j)] <- x[c(i, j)] - c(delta, delta) H[j, i] <- H[i, j] <- (fun(xaa, ...) - fun(xas, ...) - fun(xsa, ...) + fun(xss, ...)) / (4 * delta^2) } } H } mygrad <- function(fun, x, delta = 1e-4, method = c("central", "forward", "backward"), ...) { method <- match.arg(method) nx <- length(x) if(method %in% c("central", "forward")) { Xadd <- matrix(rep(x, nx), nrow=nx, byrow=TRUE) + diag(delta, nx) fadd <- apply(Xadd, 1, fun, ...) } if(method %in% c("central", "backward")) { Xsub <- matrix(rep(x, nx), nrow=nx, byrow=TRUE) - diag(delta, nx) fsub <- apply(Xsub, 1, fun, ...) ## eval.parent perhaps? } res <- switch(method, "forward" = (fadd - fun(x, ...)) / delta, "backward" = (fun(x, ...) - fsub) / delta, "central" = (fadd - fsub) / (2 * delta) ) res } grad.ctr3 <- function(fun, x, delta=1e-4, ...) { nx <- length(x) Xadd <- matrix(rep(x, nx), nrow=nx, byrow=TRUE) + diag(delta, nx) Xsub <- matrix(rep(x, nx), nrow=nx, byrow=TRUE) - diag(delta, nx) fadd <- apply(Xadd, 1, fun, ...) fsub <- apply(Xsub, 1, fun, ...) ## eval.parent perhaps? (fadd - fsub) / (2 * delta) } grad.ctr2 <- function(fun, x, delta=1e-4, ...) { ans <- x for(i in seq_along(x)) { xadd <- xsub <- x xadd[i] <- x[i] + delta xsub[i] <- x[i] - delta ans[i] <- (fun(xadd, ...) - fun(xsub, ...)) / (2 * delta) } ans } grad.ctr <- function(fun, x, delta=1e-4, ...) { sapply(seq_along(x), function(i) { xadd <- xsub <- x xadd[i] <- x[i] + delta xsub[i] <- x[i] - delta (fun(xadd, ...) - fun(xsub, ...)) / (2 * delta) }) } grad <- grad.ctr grad.ctr4 <- function(fun, x, delta=1e-4, ...) { ### - checking finiteness of x and fun-values ### - taking care to avoid floating point errors ### - not using h=x*delta rather than h=delta (important for small or ### large x?) if(!all(is.finite(x))) stop("Cannot compute gradient: non-finite argument") ans <- x ## return values for(i in seq_along(x)) { xadd <- xsub <- x ## reset fun arguments xadd[i] <- x[i] + delta xsub[i] <- x[i] - delta ans[i] <- (fun(xadd, ...) - fun(xsub, ...)) / (xadd[i] - xsub[i]) ### NOTE: xadd[i] - xsub[i] != 2*delta with floating point arithmetic. } if(!all(is.finite(ans))) { warning("cannot compute gradient: non-finite function values occured") ans[!is.finite(ans)] <- Inf } ans } ordinal/R/warning_functions.R0000644000176200001440000000252414533321514015767 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# givesWarnings <- function(expr) countWarnings(expr) > 0L countWarnings <- function(expr) { .number_of_warnings <- 0L frame_number <- sys.nframe() ans <- withCallingHandlers(expr, warning = function(w) { assign(".number_of_warnings", .number_of_warnings + 1L, envir = sys.frame(frame_number)) invokeRestart("muffleWarning") }) .number_of_warnings } ordinal/R/gumbel.R0000644000176200001440000000746314533321514013514 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## [pdqrg]gumbel functions for the gumbel distribution. ## Here ggumbel is the gradient of the density function, dgumbel. pgumbel <- function(q, location = 0, scale = 1, lower.tail = TRUE, max = TRUE) ### CDF for Gumbel max and min distributions ### Currently only unit length location and scale are supported. { if(max) ## right skew, loglog link .C("pgumbel_C", q = as.double(q), length(q), as.double(location)[1], as.double(scale)[1], as.integer(lower.tail), NAOK = TRUE)$q else ## left skew, cloglog link .C("pgumbel2_C", q = as.double(q), length(q), as.double(location)[1], as.double(scale)[1], as.integer(lower.tail), NAOK = TRUE)$q } pgumbelR <- function(q, location = 0, scale = 1, lower.tail = TRUE) ### R equivalent of pgumbel() { q <- (q - location)/scale p <- exp(-exp(-q)) if (!lower.tail) 1 - p else p } pgumbel2R <- function(q, location = 0, scale = 1, lower.tail = TRUE) { q <- (-q - location)/scale p <- exp(-exp(-q)) if (!lower.tail) p else 1 - p } dgumbel <- function(x, location = 0, scale = 1, log = FALSE, max = TRUE) ### PDF for the Gumbel max and mon distributions { if(max) ## right skew, loglog link .C("dgumbel_C", x = as.double(x), length(x), as.double(location)[1], as.double(scale)[1], as.integer(log), NAOK = TRUE)$x else ## left skew, cloglog link .C("dgumbel2_C", x = as.double(x), length(x), as.double(location)[1], as.double(scale)[1], as.integer(log), NAOK = TRUE)$x } dgumbelR <- function(x, location = 0, scale = 1, log = FALSE) ### dgumbel in R { q <- (x - location)/scale log.d <- -exp(-q) - q - log(scale) if (!log) exp(log.d) else log.d } dgumbel2R <- function(x, location = 0, scale = 1, log = FALSE) { q <- (-x - location)/scale log.d <- -exp(-q) - q - log(scale) if (!log) exp(log.d) else log.d } ggumbel <- function(x, max = TRUE) { ### gradient of dgumbel(x) wrt. x if(max) ## right skew, loglog link .C("ggumbel_C", x = as.double(x), length(x), NAOK = TRUE)$x else ## left skew, cloglog link .C("ggumbel2_C", x = as.double(x), length(x), NAOK = TRUE)$x } ggumbelR <- function(x){ ### ggumbel in R q <- exp(-x) ifelse(q == Inf, 0, { eq <- exp(-q) -eq*q + eq*q*q }) } ggumbel2R <- function(x) -ggumbelR(-x) rgumbel <- function(n, location = 0, scale = 1, max = TRUE) { if(max) location - scale * log(-log(runif(n))) else location + scale * log(-log(runif(n))) } qgumbel <- function(p, location = 0, scale = 1, lower.tail = TRUE, max = TRUE) { if(!lower.tail) p <- 1 - p if(max) ## right skew, loglog link location - scale * log(-log(p)) else ## left skew, cloglog link location + scale * log(-log(1 - p)) } ordinal/R/clm.fitter.R0000644000176200001440000003746014533321514014310 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Functions to fit/estimate CLMs (clm_fit_NR, clm_fit_optim) and ## functions implementing the negative log-likelihood, its gradient ## and hessian (.nll, .grad, .hess). These functions are rarely to be ## called directly from outside the package. clm_fit_NR <- function(rho, control = list()) ### The main work horse: Where the actual fitting of the clm goes on. ### Fitting the clm via modified Newton-Raphson with step halving. ### -------- Assumes the existence of the following functions: ### clm.nll - negative log-likelihood ### clm.grad - gradient of nll wrt. par ### clm.hess - hessian of nll wrt. par ### Trace - for trace information { control <- do.call(clm.control, control) stepFactor <- 1 innerIter <- modif.iter <- abs.iter <- 0L conv <- 2L ## Convergence flag (iteration limit reached) nll <- rho$clm.nll(rho) if(!is.finite(nll)) stop("Non-finite log-likelihood at starting value") ## do.newton <- ## rel.conv <- FALSE ## stephalf <- TRUE ## Newton-Raphson algorithm: for(i in 1:(control$maxIter + 1L)) { gradient <- rho$clm.grad(rho) maxGrad <- max(abs(gradient)) if(control$trace > 0) { Trace(iter=i+innerIter-1, stepFactor, nll, maxGrad, rho$par, first=(i==1)) if(control$trace > 1 && i > 1) { cat("\tgrad: ") cat(paste(formatC(gradient, digits=3, format="e"))) cat("\n\tstep: ") cat(paste(formatC(-step, digits=3, format="e"))) cat("\n\teigen: ") cat(paste(formatC(eigen(hessian, symmetric=TRUE, only.values=TRUE)$values, digits=3, format="e"))) cat("\n") } } abs.conv <- (maxGrad < control$gradTol) if(abs.conv) abs.iter <- abs.iter + 1L hessian <- rho$clm.hess(rho) ## Compute cholesky factor of Hessian: ch = Ut U ch <- try(chol(hessian), silent=TRUE) ### NOTE: solve(hessian, gradient) is not good enough because it will ### compute step for negative-definite Hessians and we don't want ### that. ### OPTION: What if Hessian is closely singular but slightly positive? ### Could we do something better in that case? if(inherits(ch, "try-error")) { if(abs.conv) { ## step.ok not true. conv <- 1L break ## cannot meet relative criterion. } ## If Hessian is non-positive definite: min.ev <- min(eigen(hessian, symmetric=TRUE, only.values=TRUE)$values) inflation.factor <- 1 ## Inflate diagonal of Hessian to make it positive definite: inflate <- abs(min.ev) + inflation.factor hessian <- hessian + diag(inflate, nrow(hessian)) if(control$trace > 0) cat(paste("Hessian is singular at iteration", i-1, "inflating diagonal with", formatC(inflate, digits=5, format="f"), "\n")) ch <- try(chol(hessian), silent=TRUE) if(inherits(ch, "try-error")) stop(gettextf("Cannot compute Newton step at iteration %d", i-1), call.=FALSE) modif.iter <- modif.iter + 1L ## do.newton <- FALSE } else modif.iter <- 0L if(modif.iter >= control$maxModIter) { conv <- 4L break } ## solve U'y = g for y, then ## solve U step = y for step: step <- c(backsolve(ch, backsolve(ch, gradient, transpose=TRUE))) rel.conv <- (max(abs(step)) < control$relTol) ## Test if step is in a descent direction - ## otherwise use step <- grad / max|grad|: ## if(crossprod(gradient, step) < 0) { ## if(control$trace > 0) ## cat("Newton step is not in descent direction; using gradient instead\n") ## step <- c(gradient / max(abs(gradient))) ## } else if(abs.conv && rel.conv) { conv <- 0L ## no need to step back as stephalf was false so the new ## par are just better. break } ## update parameters: rho$par <- rho$par - stepFactor * step nllTry <- rho$clm.nll(rho) lineIter <- 0 stephalf <- (nllTry > nll) ### NOTE: sometimes nllTry > nll just due to noise, so we also check ### reduction in gradient for small diffs: if(stephalf && abs(nll - nllTry) < 1e-10) stephalf <- maxGrad < max(abs(rho$clm.grad(rho))) ## Assess convergence: ## (only attempt to sattisfy rel.conv if abs.conv is true and ## it is possible to take the full newton step) ### OPTION: And if 'step' is not close to 1 or 1/2, but ### small. Otherwise this just indicates that the parameter is ### infinite. ## if(abs.conv && !step.ok) { if(abs.conv && stephalf) { conv <- 1L ## we need to step back to the par for which abs.conv ## was true: rho$par <- rho$par + stepFactor * step rho$clm.nll(rho) break } ## if(abs.conv && rel.conv) { ## conv <- 0L ## rho$par <- rho$par + stepFactor * step ## rho$clm.nll(rho) ## ## no need to step back as stephalf was false so the new ## ## par are just better. ## break ## } if(abs.conv && abs.iter >= 5L) { ## Cannot satisy rel.conv in 5 iterations after satisfying ## abs.conv. Probably some parameters are unbounded. conv <- 1L break } ## Step halving if nll increases: while(stephalf) { stepFactor <- stepFactor/2 rho$par <- rho$par + stepFactor * step nllTry <- rho$clm.nll(rho) lineIter <- lineIter + 1 if(control$trace > 0) { cat("step halving:\n") cat("nll reduction: ", formatC(nll - nllTry, digits=5, format="e"), "\n") Trace(i+innerIter-1, stepFactor, nll, maxGrad, rho$par, first = FALSE) } if(lineIter > control$maxLineIter){ conv <- 3L break } innerIter <- innerIter + 1 stephalf <- (nllTry > nll) if(stephalf && abs(nll - nllTry) < 1e-10) stephalf <- (maxGrad < max(abs(rho$clm.grad(rho)))) } ## end step halving if(conv == 3L) break if(control$trace > 0) cat("nll reduction: ", formatC(nll - nllTry, digits=5, format="e"), "\n") nll <- nllTry ## Double stepFactor if needed: stepFactor <- min(1, 2 * stepFactor) } ## end Newton iterations message <- switch(as.character(conv), "0" = "Absolute and relative convergence criteria were met", "1" = "Absolute convergence criterion was met, but relative criterion was not met", "2" = "iteration limit reached", "3" = "step factor reduced below minimum", "4" = "maximum number of consecutive Newton modifications reached") if(conv <= 1L && control$trace > 0) { cat("\nOptimizer converged! ", message, fill = TRUE) } if(conv > 1 && control$trace > 0) { cat("\nOptimization failed ", message, fill = TRUE) } ## return results: gradient <- c(rho$clm.grad(rho)) res <- list(par = rho$par, gradient = gradient, ##as.vector(gradient), ## Hessian = hessian, Hessian = rho$clm.hess(rho), ## ensure hessian is evaluated ## at optimum logLik = -nll, convergence = conv, ## 0: abs and rel criteria meet ## 1: abs criteria meet, rel criteria not meet ## 2: iteration limit reached ## 3: step factor reduced below minium message = message, maxGradient = max(abs(gradient)), niter = c(outer = i-1, inner = innerIter), fitted = rho$fitted) return(res) } clm_fit_optim <- function(rho, method = c("ucminf", "nlminb", "optim"), control=list()) { method <- match.arg(method) ## optimize the likelihood: optRes <- switch(method, "nlminb" = nlminb(rho$par, function(par) clm.nll(rho, par), function(par) clm.grad_direct(rho, par), control=control), "ucminf" = ucminf(rho$par, function(par) clm.nll(rho, par), function(par) clm.grad_direct(rho, par), control=control), "optim" = optim(rho$par, function(par) clm.nll(rho, par), function(par) clm.grad_direct(rho, par), method="BFGS", control=control) ) ## save results: rho$par <- optRes[[1]] res <- list(par = rho$par, logLik = -clm.nll(rho), gradient = clm.grad(rho), Hessian = clm.hess(rho), fitted = rho$fitted) res$maxGradient = max(abs(res$gradient)) res$optRes <- optRes res$niter <- switch(method, "nlminb" = optRes$evaluations, "ucminf" = c(optRes$info["neval"], 0), "optim" = optRes$counts) res$convergence <- switch(method, "nlminb" = optRes$convergence, "ucminf" = optRes$convergence, "optim" = optRes$convergence) return(res) } clm_fit_flex <- function(rho, control=list()) { lwr <- if(rho$link == "Aranda-Ordaz") c(rep(-Inf, length(rho$par) - 1), 1e-5) else rep(-Inf, length(rho$par)) ## optimize the likelihood: optRes <- nlminb(rho$par, function(par, rho) clm.nll.flex(rho, par), lower=lwr, rho=rho) ## save results: rho$par <- optRes$par res <- list(par = rho$par, lambda = setNames(rho$par[length(rho$par)], "lambda"), logLik = -clm.nll.flex(rho), gradient = numDeriv::grad(func=function(par, rho) clm.nll.flex(rho, par), x = rho$par, rho=rho), Hessian = numDeriv::hessian(func=function(par, rho) clm.nll.flex(rho, par), x = rho$par, rho=rho), fitted = rho$fitted) res$maxGradient = max(abs(res$gradient)) res$optRes <- optRes res$niter <- optRes$evaluations res$convergence <- optRes$convergence return(res) } clm.nll.flex <- function(rho, par) { if(!missing(par)) rho$par <- par with(rho, { if(k > 0) sigma <- Soff * exp(drop(S %*% par[n.psi + 1:k])) ### NOTE: we have to divide by sigma even if k=0 since there may be an ### offset but no predictors in the scale model: eta1 <- (drop(B1 %*% par[1:n.psi]) + o1)/sigma eta2 <- (drop(B2 %*% par[1:n.psi]) + o2)/sigma fitted <- pfun(eta1, par[length(par)]) - pfun(eta2, par[length(par)]) }) if(all(is.finite(rho$fitted)) && all(rho$fitted > 0)) ### NOTE: Need test here because some fitted <= 0 if thresholds are ### not ordered increasingly. -sum(rho$wts * log(rho$fitted)) else Inf } clm.nll <- function(rho, par) { if(!missing(par)) rho$par <- par with(rho, { if(k > 0) sigma <- Soff * exp(drop(S %*% par[n.psi + 1:k])) ### NOTE: we have to divide by sigma even if k=0 since there may be an ### offset but no predictors in the scale model: eta1 <- (drop(B1 %*% par[1:n.psi]) + o1)/sigma eta2 <- (drop(B2 %*% par[1:n.psi]) + o2)/sigma }) ### NOTE: getFitted is not found from within rho, so we have to ### evalueate it outside of rho rho$fitted <- getFittedC(rho$eta1, rho$eta2, rho$link, rho$par[length(rho$par)]) if(all(is.finite(rho$fitted)) && all(rho$fitted > 0)) ### NOTE: Need test here because some fitted <= 0 if thresholds are ### not ordered increasingly. -sum(rho$wts * log(rho$fitted)) else Inf } ## clm.nll <- function(rho) { ## negative log-likelihood ## ### For linear models ## with(rho, { ## eta1 <- drop(B1 %*% par) + o1 ## eta2 <- drop(B2 %*% par) + o2 ## }) ## ### NOTE: getFitted is not found from within rho, so we have to ## ### evalueate it outside of rho ## rho$fitted <- getFittedC(rho$eta1, rho$eta2, rho$link) ## if(all(rho$fitted > 0)) ## ### NOTE: Need test here because some fitted <= 0 if thresholds are ## ### not ordered increasingly. ## ### It is assumed that 'all(is.finite(pr)) == TRUE' ## -sum(rho$wts * log(rho$fitted)) ## else Inf ## } ## clm.grad <- function(rho) { ## gradient of the negative log-likelihood ## ### return: vector of gradients ## ### For linear models ## with(rho, { ## p1 <- dfun(eta1) ## p2 <- dfun(eta2) ## wtpr <- wts/fitted ## dpi.psi <- B1 * p1 - B2 * p2 ## -crossprod(dpi.psi, wtpr) ## ### NOTE: It is assumed that all(fitted > 0) == TRUE and that ## ### all(is.finite(c(p1, p2))) == TRUE ## }) ## } clm.grad <- function(rho) { ### requires that clm.nll has been called prior to ### clm.grad. with(rho, { p1 <- if(!nlambda) dfun(eta1) else dfun(eta1, lambda) p2 <- if(!nlambda) dfun(eta2) else dfun(eta2, lambda) wtpr <- wts/fitted C2 <- B1*p1/sigma - B2*p2/sigma if(k <= 0) return(-crossprod(C2, wtpr)) C3 <- -(eta1 * p1 - eta2 * p2) * S return(-crossprod(cbind(C2, C3), wtpr)) ### NOTE: C2 and C3 are used by clm.hess }) } clm.grad_direct <- function(rho, par) { ### does not require that clm.nll has been called prior to ### clm.grad. clm.nll(rho, par) clm.grad(rho) } ## clm.hess <- function(rho) { ## hessian of the negative log-likelihood ## ### return Hessian matrix ## ### For linear models ## with(rho, { ## dg.psi <- crossprod(B1 * gfun(eta1) * wtpr, B1) - ## crossprod(B2 * gfun(eta2) * wtpr, B2) ## -dg.psi + crossprod(dpi.psi, (dpi.psi * wtpr / fitted)) ## ### NOTE: It is assumed that all(fitted > 0) == TRUE and that ## ### all(is.finite(c(g1, g2))) == TRUE ## }) ## } clm.hess <- function(rho) { ### requires that clm.grad has been called prior to this. with(rho, { g1 <- if(!nlambda) gfun(eta1) else gfun(eta1, lambda) g2 <- if(!nlambda) gfun(eta2) else gfun(eta2, lambda) wtprpr <- wtpr/fitted ## Phi3 dg.psi <- crossprod(B1 * g1 * wtpr / sigma^2, B1) - crossprod(B2 * g2 * wtpr / sigma^2, B2) ## upper left: D <- dg.psi - crossprod(C2, (C2 * wtprpr)) if(k <= 0) return(-D) ## no scale predictors ## upper right (lower left transpose): wtprsig <- wtpr/sigma epg1 <- p1 + g1*eta1 epg2 <- p2 + g2*eta2 Et <- crossprod(B1, -wtprsig * epg1 * S) - crossprod(B2, -wtprsig * epg2 * S) - crossprod(C2, wtprpr * C3) ## lower right: F <- -crossprod(S, wtpr * ((eta1*p1 - eta2*p2)^2 / fitted - (eta1*epg1 - eta2*epg2)) * S) ## combine and return hessian: H <- rbind(cbind(D , Et), cbind(t(Et), F)) return(-H) }) } ordinal/R/clmm.R0000644000176200001440000007707414533321514013176 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Implementation of Cumulative Link Mixed Models in clmm(). if(getRversion() >= '2.15.1') utils::globalVariables(c("ths", "link", "threshold", "optRes", "neval", "Niter", "tJac", "y.levels")) clmm <- function(formula, data, weights, start, subset, na.action, contrasts, Hess = TRUE, model = TRUE, link = c("logit", "probit", "cloglog", "loglog", "cauchit"), ##, "Aranda-Ordaz", "log-gamma"), ## lambda, doFit = TRUE, control = list(), nAGQ = 1L, threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) { ### Extract the matched call and initial testing: mc <- match.call(expand.dots = FALSE) ### OPTION: Possibly call clm() when there are no random effects? link <- match.arg(link) threshold <- match.arg(threshold) if(missing(formula)) stop("Model needs a formula") if(missing(contrasts)) contrasts <- NULL ## set control parameters: control <- getCtrlArgs(control, list(...)) nAGQ <- as.integer(round(nAGQ)) formulae <- clmm.formulae(formula=formula) ## mf, y, X, wts, off, terms: frames <- clmm.frames(modelcall=mc, formulae=formulae, contrasts) ### QUEST: What should 'method="model.frame"' return? Do we want Zt ### included here as well? if(control$method == "model.frame") return(frames) ## Test rank deficiency and possibly drop some parameters: ## X is guarantied to have an intercept at this point. frames$X <- drop.coef(frames$X, silent=FALSE) ## Compute the transpose of the Jacobian for the threshold function, ## tJac and the names of the threshold parameters, alpha.names: ths <- makeThresholds(levels(frames$y), threshold) ## Set rho environment: rho <- with(frames, { clm.newRho(parent.frame(), y=y, X=X, weights=wts, offset=off, tJac=ths$tJac) }) ## compute grouping factor list, and Zt and ST matrices: retrms <- getREterms(frames = frames, formulae$formula) ## For each r.e. term, test if Z has more columns than rows to detect ## unidentifiability: test_no_ranef(Zt_list=retrms$retrms, frames=frames, checkRanef=control$checkRanef) ### OPTION: save (the evaluated) formula in frames, so we only need the ### frames argument to getREterms() ? use.ssr <- (retrms$ssr && !control$useMatrix) ## Set inverse link function and its two first derivatives (pfun, ## dfun and gfun) in rho: setLinks(rho, link) ## Compute list of dimensions for the model fit: rho$dims <- getDims(frames=frames, ths=ths, retrms=retrms) ## Update model environment with r.e. information: if(use.ssr) { rho.clm2clmm.ssr(rho=rho, retrms = retrms, ctrl=control$ctrl) ## Set starting values for the parameters: if(missing(start)) start <- c(fe.start(frames, link, threshold), 0) rho$par <- start nbeta <- rho$nbeta <- ncol(frames$X) - 1 ## no. fixef parameters nalpha <- rho$nalpha <- ths$nalpha ## no. threshold parameters ntau <- rho$ntau <- length(retrms$gfList) ## no. variance parameters stopifnot(is.numeric(start) && length(start) == (nalpha + nbeta + ntau)) } else { rho.clm2clmm(rho=rho, retrms=retrms, ctrl=control$ctrl) if(missing(start)) { rho$fepar <- fe.start(frames, link, threshold) rho$ST <- STstart(rho$ST) start <- c(rho$fepar, ST2par(rho$ST)) } else { stopifnot(is.list(start) && length(start) == 2) stopifnot(length(start[[1]]) == rho$dims$nfepar) stopifnot(length(start[[2]]) == rho$dims$nSTpar) rho$fepar <- as.vector(start[[1]]) rho$ST <- par2ST(as.vector(start[[2]]), rho$ST) } } ### OPTION: set starting values in a more elegant way. ## Set AGQ parameters: set.AGQ(rho, nAGQ, control, use.ssr) ## Possibly return the environment, rho without fitting: if(!doFit) return(rho) ## Fit the clmm: fit <- if(use.ssr) clmm.fit.ssr(rho, control = control$optCtrl, method=control$method, Hess) else clmm.fit.env(rho, control = control$optCtrl, method=control$method, Hess) ## Modify and return results: fit$nAGQ <- nAGQ fit$link <- link fit$start <- start fit$threshold <- threshold fit$call <- match.call() fit$formula <- formulae$formula fit$gfList <- retrms$gfList fit$control <- control res <- clmm.finalize(fit=fit, frames=frames, ths=ths, use.ssr) ## add model.frame to results list? if(model) res$model <- frames$mf return(res) } clmm.formulae <- function(formula) { ## Evaluate the formula in the enviroment in which clmm was called ## (parent.frame(2)) to get it evaluated properly: form <- eval.parent(formula, 2) ## get the environment of the formula. If this does not have an ## environment (it could be a character), then use the calling environment. form.envir <- if(!is.null(env <- environment(form))) env else parent.frame(2) ## ensure 'formula' is a formula-object: form <- tryCatch(formula(if(is.character(form)) form else Deparse(form), env = form.envir), error = identity) ## report error if the formula cannot be interpreted if(inherits(form, "error")) stop("unable to interpret 'formula'") environment(form) <- form.envir ## Construct a formula with all (fixed and random) variables ## (fullForm) and a formula with only fixed-effects variables ## (fixedForm): fixedForm <- nobars(form) ## ignore terms with '|' # Handle case where formula is only response ~ RE: fixedForm <- if(length(fixedForm) == 1 || !inherits(fixedForm, "formula")) reformulate("1", response = form[[2]], env=form.envir) else fixedForm fullForm <- subbars(form) # substitute `+' for `|' ## Set the appropriate environments: environment(fullForm) <- environment(fixedForm) <- environment(form) <- form.envir list(formula = form, fullForm = fullForm, fixedForm = fixedForm) } clmm.frames <- function(modelcall, formulae, contrasts) { ## Extract full model.frame (fullmf): m <- match(c("data", "subset", "weights", "na.action", "offset"), names(modelcall), 0) mf <- modelcall[c(1, m)] mf$formula <- formulae$fullForm mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") fixedmf <- mf ## save call for later modification and evaluation fullmf <- eval(mf, envir = parent.frame(2)) ## '2' to get out of ## clmm.frames and clmm ### OPTION: Consider behavior if data is a matrix? fixedmf$formula <- formulae$fixedForm fixedmf <- eval(fixedmf, envir = parent.frame(2)) attr(fullmf, "terms") <- attr(fixedmf, "terms") ## return: list(mf = fullmf, y = getY(fullmf), X = getX(fullmf, fixedmf, contrasts), wts = getWeights(fullmf), off = getOffsetStd(fullmf), terms = attr(fixedmf, "terms") ) } getY <- function(mf) { ### Extract model response: y <- model.response(mf) if(!is.factor(y)) stop("response needs to be a factor") y } getX <- function(fullmf, fixedmf, contrasts) { fixedTerms <- attr(fixedmf, "terms") X <- model.matrix(fixedTerms, fullmf, contrasts) n <- nrow(X) ## remove intercept from X: Xint <- match("(Intercept)", colnames(X), nomatch = 0) if(Xint <= 0) { X <- cbind("(Intercept)" = rep(1, n), X) warning("an intercept is needed and assumed") } ## intercept in X is garanteed. X } getZt <- function(retrms) { ZtList <- lapply(retrms, '[[', "Zt") Zt <- do.call(rbind, ZtList) Zt@Dimnames <- vector("list", 2) Zt } getREterms <- function(frames, formula) { ### NOTE: Need to parse mf - not just fullmf because we need the model ### fits for an identifiability check below. fullmf <- droplevels(with(frames, mf[wts > 0, ])) barlist <- expandSlash(findbars(formula[[3]])) ### NOTE: make sure 'formula' is appropriately evaluated and returned ### by clmm.formulae if(!length(barlist)) stop("No random effects terms specified in formula") term.names <- unlist(lapply(barlist, function(x) Deparse(x))) names(barlist) <- unlist(lapply(barlist, function(x) Deparse(x[[3]]))) ### NOTE: Deliberately naming the barlist elements by grouping factors ### and not by r.e. terms. ## list of grouping factors for the random terms: rel <- lapply(barlist, function(x) { ff <- eval(substitute(as.factor(fac)[,drop = TRUE], list(fac = x[[3]])), fullmf) ## per random term transpose indicator matrix: Zti <- as(ff, "sparseMatrix") ## per random term model matrix: mm <- model.matrix(eval(substitute(~ expr, list(expr = x[[2]]))), fullmf) Zt = do.call(rbind, lapply(seq_len(ncol(mm)), function(j) { Zti@x <- mm[,j] Zti } )) ### QUEST: can we drop rows from Zt when g has missing values in terms ### of the form (1 + g | f)? ST <- matrix(0, ncol(mm), ncol(mm), dimnames = list(colnames(mm), colnames(mm))) list(f = ff, Zt = Zt, ST = ST) ### OPTION: return the i'th element of Lambda here. }) q <- sum(sapply(rel, function(x) nrow(x$Zt))) ### OPTION: If the model is nested (all gr.factors are nested), then ### order the columns of Zt, such that they come in blocks ### corresponding to the levels of the coarsest grouping factor. Each ### block of Zt-columns contain first the j'th level of the 1st gr.fac. ### followed by columns for the 2nd gr.fac. ### ## single simple random effect on the intercept? ssr <- (length(barlist) == 1 && as.character(barlist[[1]][[2]])[1] == "1") ## order terms by decreasing number of levels in the factor but don't ## change the order if this is already true: nlev <- sapply(rel, function(re) nlevels(re$f)) if (any(diff(nlev)) > 0) rel <- rel[rev(order(nlev))] nlev <- nlev[rev(order(nlev))] ## separate r.e. terms from the factor list: retrms <- lapply(rel, "[", -1) names(retrms) <- term.names ## list of grouping factors: gfl <- lapply(rel, "[[", "f") ## which r.e. terms are associated with which grouping factors: attr(gfl, "assign") <- seq_along(gfl) ## only save unique g.f. and update assign attribute: fnms <- names(gfl) ## check for repeated factors: if (length(fnms) > length(ufn <- unique(fnms))) { ## check that the lengths of the number of levels coincide gfl <- gfl[match(ufn, fnms)] attr(gfl, "assign") <- match(fnms, ufn) names(gfl) <- ufn } ## test that all variables for the random effects are factors and ## have at least 3 levels: stopifnot(all(sapply(gfl, is.factor))) stopifnot(all(sapply(gfl, nlevels) > 2)) ## no. r.e. per level for each of the r.e. terms qi <- unlist(lapply(rel, function(re) ncol(re$ST))) stopifnot(q == sum(nlev * qi)) dims <- list(n = nrow(fullmf), ## no. observations nlev.re = nlev, ## no. levels for each r.e. term nlev.gf = sapply(gfl, nlevels), ## no. levels for each grouping factor qi = qi, nretrms = length(rel), ## no. r.e. terms ngf = length(gfl), ## no. unique grouping factors ## total no. random effects: q = sum(nlev * qi), ## = sum(sapply(rel, function(re) nrow(re$Zt))) ## no. r.e. var-cov parameters: nSTpar = sum(sapply(qi, function(q) q * (q + 1) / 2)) ) ## c(retrms=retrms, list(gfList = gfl, dims = dims, ssr = ssr)) list(retrms=retrms, gfList = gfl, dims = dims, ssr = ssr) } test_no_ranef <- function(Zt_list, frames, checkRanef=c("warn", "error", "message")) { ## For each r.e. term, test if Z has more columns than rows to detect ## unidentifiability: checkfun <- switch(checkRanef, "warn" = function(...) warning(..., call.=FALSE), "error" = function(...) stop(..., call.=FALSE), "message" = message) nrow_fullmf <- with(frames, nrow(mf[wts > 0, ])) REterm.names <- names(Zt_list) for(i in seq_along(Zt_list)) { Zti <- Zt_list[[i]][["Zt"]] if(nrow(Zti) > ncol(Zti) || (all(frames$wts == 1) && nrow(Zti) == ncol(Zti))) checkfun(gettextf("no. random effects (=%d) >= no. observations (=%d) for term: (%s)", nrow(Zti), ncol(Zti), REterm.names[i])) } ## Test if total no. random effects >= total nobs: q <- sum(sapply(Zt_list, function(x) nrow(x$Zt))) if(all(frames$wts == 1) && q >= nrow_fullmf) checkfun(gettextf("no. random effects (=%d) >= no. observations (=%d)", q, nrow_fullmf)) invisible(NULL) ### NOTE: q > nrow(fullmf) is (sometimes) allowed if some frames$wts > 1 ### ### NOTE: if all(frames$wts == 1) we cannot have observation-level ### random effects so we error if nrow(Zti) >= ncol(Zti) ### ### NOTE: Could probably also throw an error if q >= sum(frames$wts), ### but I am not sure about that. ### ### NOTE: It would be better to test the rank of the Zt matrix, but ### also computationally more intensive. ### } fe.start <- function(frames, link, threshold) { ## get starting values from clm: fit <- with(frames, clm.fit(y=y, X=X, weights=wts, offset=off, link=link, threshold=threshold)) unname(coef(fit)) } getDims <- function(frames, ths, retrms) ### Collect and compute all relevant dimensions in a list { dims <- retrms$dims ## n is also on retrms$dims dims$n <- sum(frames$wts > 0) dims$nbeta <- ncol(frames$X) - 1 dims$nalpha <- ths$nalpha dims$nfepar <- dims$nalpha + dims$nbeta dims } rho.clm2clmm <- function(rho, retrms, ctrl) ### update environment, rho returned by clm.newRho(). { ### OPTION: write default list of control arguments? ## control arguments are used when calling update.u(rho) rho$ctrl = ctrl ## compute Zt design matrix: rho$Zt <- getZt(retrms$retrms) rho$ST <- lapply(retrms$retrms, `[[`, "ST") rho$allST1 <- all(sapply(rho$ST, ncol) == 1) ## Lambda <- getLambda(rho$ST, rho$dims$nlev.re) ## Vt <- crossprod(Lambda, rho$Zt) ## rho$L <- Cholesky(tcrossprod(Vt), ## LDL = TRUE, super = FALSE, Imult = 1) rho$L <- Cholesky(tcrossprod(crossprod(getLambda(rho$ST, rho$dims$nlev.re), rho$Zt)), LDL = TRUE, super = FALSE, Imult = 1) rho$Niter <- 0L ## no. conditional mode updates rho$neval <- 0L ## no. evaluations of the log-likelihood function rho$u <- rho$uStart <- rep(0, rho$dims$q) rho$.f <- if(package_version(packageDescription("Matrix")$Version) > "0.999375-30") 2 else 1 } getLambda <- function(ST, nlev) { ### ST: a list of ST matrices ### nlev: a vector of no. random effects levels .local <- function(ST, nlev) { if(ncol(ST) == 1) .symDiagonal(n=nlev, x = rep(as.vector(ST[1, 1]), nlev)) else kronecker(as(ST, "sparseMatrix"), .symDiagonal(n=nlev)) ## This would make sense if the columns in Z (rows in Zt) were ordered differently: ## kronecker(Diagonal(n=nlev), ST) ### NOTE: .symDiagonal() appears to be faster than Diagonal() here. } stopifnot(length(ST) == length(nlev)) res <- if(length(ST) == 1) .local(ST[[1]], nlev) else .bdiag(lapply(seq_along(ST), function(i) .local(ST[[i]], nlev[i]))) ## coerce to diagonal matrix if relevant: if(all(sapply(ST, ncol) == 1)) as(res, "diagonalMatrix") else as(res, "CsparseMatrix") ### QUESTION: Are there any speed gains by coerce'ing Lambda to ### 'diagonalMatrix' or 'CsparseMatrix'? ### QUESTION: What is the best way to form the kronecker product in .local()? } getNLA <- function(rho, par, which=rep(TRUE, length(par))) { ### negative log-likelihood by the Laplace approximation if(!missing(par)) { setPar.clmm(rho, par, which) if(any(!is.finite(par))) stop(gettextf(paste(c("Non-finite parameters not allowed:", formatC(par, format="g")), collapse=" "))) } rho$neval <- rho$neval + 1L if(!update.u(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) logDetD <- c(suppressWarnings(determinant(rho$L)$modulus)) - rho$dims$q * log(2*pi) / 2 rho$nll + logDetD } nll.u <- function(rho) { ## negative log-likelihood if(rho$allST1) { ## are all ST matrices scalars? rho$varVec <- rep.int(unlist(rho$ST), rho$dims$nlev.re) b.expanded <- as.vector(crossprod(rho$Zt, rho$varVec * rho$u)) ### NOTE: Working with Lambda when it is diagonal will slow things ### down significantly. } else { rho$ZLt <- crossprod(getLambda(rho$ST, rho$dims$nlev.re), rho$Zt) b.expanded <- as.vector(crossprod(rho$ZLt, rho$u)) } rho$eta1Fix <- drop(rho$B1 %*% rho$fepar) rho$eta2Fix <- drop(rho$B2 %*% rho$fepar) rho$eta1 <- as.vector(rho$eta1Fix - b.expanded + rho$o1) rho$eta2 <- as.vector(rho$eta2Fix - b.expanded + rho$o2) rho$fitted <- getFittedC(rho$eta1, rho$eta2, rho$link) if(any(!is.finite(rho$fitted)) || any(rho$fitted <= 0)) nll <- Inf else nll <- -sum(rho$wts * log(rho$fitted)) - sum(dnorm(x=rho$u, mean=0, sd=1, log=TRUE)) nll } nllFast.u <- function(rho) { ## negative log-likelihood ## Does not update X %*% beta - fixed effect part. if(rho$allST1) { rho$varVec <- rep.int(unlist(rho$ST), rho$dims$nlev.re) b.expanded <- as.vector(crossprod(rho$Zt, rho$varVec * rho$u)) } else { rho$ZLt <- crossprod(getLambda(rho$ST, rho$dims$nlev.re), rho$Zt) b.expanded <- as.vector(crossprod(rho$ZLt, rho$u)) } rho$eta1 <- as.vector(rho$eta1Fix - b.expanded + rho$o1) rho$eta2 <- as.vector(rho$eta2Fix - b.expanded + rho$o2) rho$fitted <- getFittedC(rho$eta1, rho$eta2, rho$link) if(any(!is.finite(rho$fitted)) || any(rho$fitted <= 0)) nll <- Inf else nll <- -sum(rho$wts * log(rho$fitted)) - sum(dnorm(x=rho$u, mean=0, sd=1, log=TRUE)) nll } grad.u <- function(rho){ ## gradient of nll wrt. u (random effects) ### should only be called with up to date values of eta1, eta2, par ## compute phi1: rho$p1 <- rho$dfun(rho$eta1) rho$p2 <- rho$dfun(rho$eta2) rho$wtpr <- rho$wts/rho$fitted phi1 <- as.vector(rho$wtpr * (rho$p1 - rho$p2)) if(rho$allST1) (rho$Zt %*% phi1) * rho$varVec + rho$u else rho$ZLt %*% phi1 + rho$u } hess.u <- function(rho) { ## Hessian of nll wrt. u (random effects) ### should only be called with up-to-date values of eta1, eta2, par, ### p1, p2 g1 <- rho$gfun(rho$eta1) ## does not need to be saved in rho g2 <- rho$gfun(rho$eta2) ## does not need to be saved in rho phi2 <- rho$wts * ( ((rho$p1 - rho$p2) / rho$fitted)^2 - ( (g1 - g2) / rho$fitted) ) ## This may happen if the link function [pfun, dfun and gfun] ## evaluates its arguments inaccurately: if(any(phi2 < 0)) return(FALSE) if(rho$allST1) Vt <- crossprod(Diagonal(x = rho$varVec), tcrossprod(rho$Zt, Diagonal(x = sqrt(phi2)))) else Vt <- rho$ZLt %*% Diagonal(x = sqrt(phi2)) rho$L <- update(rho$L, Vt, mult = 1) return(TRUE) } getPar.clmm <- function(rho) ### Extract vector of parameters from model-environment rho c(rho$fepar, ST2par(rho$ST)) setPar.clmm <- function(rho, par, which=rep(TRUE, length(par))) { ### Set parameters in model environment rho. which <- as.logical(as.vector(which)) oldpar <- getPar.clmm(rho) stopifnot(length(which) == length(oldpar)) stopifnot(sum(which) == length(par)) ## over-wright selected elements of oldpar: oldpar[which] <- as.vector(par) ## assign oldpar to rho$fepar and rho$ST: rho$fepar <- oldpar[1:rho$dims$nfepar] rho$ST <- par2ST(oldpar[-(1:rho$dims$nfepar)], rho$ST) } ST2par <- function(STlist) { ### Compute parameter vector from list of ST matrices. unlist(lapply(STlist, function(ST) { ## if(ncol(ST) == 1) as.vector(ST) else as.vector(c(diag(ST), ST[lower.tri(ST)])) })) } par2ST <- function(STpar, STlist) { ### Fill in parameters in list of ST matrices. Reverse of ST2par(). nc <- sapply(STlist, ncol) asgn <- rep(1:length(nc), sapply(nc, function(qi) qi * (qi + 1) / 2)) STparList <- split(STpar, asgn) stopifnot(length(asgn) == length(ST2par(STlist))) for(i in 1:length(STlist)) { par <- STparList[[i]] if(nc[i] > 1) { diag(STlist[[i]]) <- par[1:nc[i]] STlist[[i]][lower.tri(STlist[[i]])] <- par[-(1:nc[i])] } else { STlist[[i]][] <- par } } STlist } STatBoundary <- function(STpar, STlist, tol=1e-3) { ### Compute dummy vector of which ST parameters are at the ### boundary of the parameters space (variance-parameters that are ### zero). STcon <- STconstraints(STlist) stopifnot(length(STpar) == length(STcon)) as.integer(STcon == 1 & STpar <= tol) } paratBoundary <- function(rho, tol=1e-3) ### Compute dummy vector of which parameters are at the boundary of ### the parameter space. c(rep(0, rho$dims$nfepar), STatBoundary(ST2par(rho$ST), rho$ST, tol)) paratBoundary2 <- function(rho, tol=1e-3) { STcon <- STconstraints(rho$ST) c(rep(0L, rho$dims$nfepar), as.integer(STcon == 1 & ST2par(rho$ST) < tol)) } STconstraints <- function(STlist) { ### Compute indicator vector of which variance parameters are constrained above zero. The ### variance parameters are non-negative, while the covariance parameters are not ### constrained. ### ### This function can also be used to generate starting values for the covar. parameters. nc <- sapply(STlist, ncol) unlist(lapply(nc, function(qi) { c(rep(1L, qi), rep(0L, qi * (qi - 1) / 2)) } )) } parConstraints <- function(rho) ### Returns a dummy vector of the same length as getPar.clmm(rho) ### indicating which parameters are contrained to be non-negative. c(rep(0, rho$dims$nfepar), STconstraints(rho$ST)) STstart <- function(STlist) par2ST(STconstraints(STlist), STlist) isNested <- function(f1, f2) ### Borrowed from lme4/R/lmer.R ### Checks if f1 is nested within f2. { f1 <- as.factor(f1) f2 <- as.factor(f2) stopifnot(length(f1) == length(f2)) sm <- as(new("ngTMatrix", i = as.integer(f2) - 1L, j = as.integer(f1) - 1L, Dim = c(length(levels(f2)), length(levels(f1)))), "CsparseMatrix") all(diff(sm@p) < 2) } set.AGQ <- function(rho, nAGQ, control, ssr) { ## Stop if arguments are incompatible: if(nAGQ != 1 && !ssr) stop("Quadrature methods are not available with more than one random effects term", call.=FALSE) if(nAGQ != 1 && control$useMatrix) stop("Quadrature methods are not available with 'useMatrix = TRUE'", call.=FALSE) rho$nAGQ <- nAGQ if(nAGQ %in% 0:1) return(invisible()) ghq <- gauss.hermite(abs(nAGQ)) rho$ghqns <- ghq$nodes rho$ghqws <- if(nAGQ > 0) ghq$weights ## AGQ else log(ghq$weights) + (ghq$nodes^2)/2 ## GHQ } clmm.fit.env <- function(rho, control = list(), method=c("nlminb", "ucminf"), Hess = FALSE) ### Fit the clmm by optimizing the Laplace likelihood. ### Returns a list with elements: ### ### coefficients ### ST ### logLik ### Niter ### dims ### u ### optRes ### fitted.values ### L ### Zt ### ranef ### condVar ### gradient ### (Hessian) { method <- match.arg(method) if(method == "ucminf") warning("cannot use ucminf optimizer for this model, using nlminb instead") ## Compute lower bounds on the parameter vector lwr <- c(-Inf, 0)[parConstraints(rho) + 1] ## hack to remove ucminf control settings: keep <- !names(control) %in% c("grad", "grtol") control <- if(length(keep)) control[keep] else list() ## Fit the model with Laplace: fit <- try(nlminb(getPar.clmm(rho), function(par) getNLA(rho, par), lower=lwr, control=control), silent=TRUE) ### OPTION: Make it possible to use the ucminf optimizer with ### log-transformed std-par instead. ## Check if optimizer converged without error: if(inherits(fit, "try-error")) stop("optimizer ", method, " failed to converge", call.=FALSE) ### OPTION: Could have an argument c(warn, fail, ignore) to optionally ### return the fitted model despite the optimizer failing. ## Ensure parameters in rho are set at the optimum: setPar.clmm(rho, fit$par) ## Ensure random mode estimation at optimum: nllFast.u(rho) update.u(rho) names(rho$ST) <- names(rho$dims$nlev.re) ## Prepare list of results: res <- list(coefficients = fit$par[1:rho$dims$nfepar], ST = rho$ST, logLik = -fit$objective, dims = rho$dims, ### OPTION: Should we evaluate hess.u(rho) to make sure rho$L contains ### the right values corresponding to the optimum? u = rho$u, optRes = fit, fitted.values = rho$fitted, L = rho$L, Zt = rho$Zt ) ## save ranef and condVar in res: if(rho$allST1) { res$ranef <- rep.int(unlist(rho$ST), rho$dims$nlev.re) * rho$u res$condVar <- as.vector(diag(solve(rho$L)) * rep.int(unlist(rho$ST)^2, rho$dims$nlev.re)) } else { Lambda <- getLambda(rho$ST, rho$dims$nlev.re) res$ranef <- Lambda %*% rho$u res$condVar <- tcrossprod(Lambda %*% solve(rho$L), Lambda) } ## Add gradient vector and optionally Hessian matrix: bound <- as.logical(paratBoundary2(rho)) optpar <- fit$par[!bound] if(Hess) { ### NOTE: This is the Hessian evaluated for all parameters that are ### not at the boundary at the parameter space. The likelihood for ### models with boundary parameters is still defined as a function of ### all the parameters, so standard errors will differ whether or not ### boundary terms are included or not. gH <- deriv12(function(par) getNLA(rho, par, which=!bound), x=optpar) res$gradient <- gH$gradient res$Hessian <- gH$Hessian } else { res$gradient <- grad.ctr(function(par) getNLA(rho, par, which=!bound), x=optpar) } ### OPTION: We could check that the (forward) gradient for variances at the ### boundary are not < -1e-5 (wrt. -logLik/nll/getNLA) ## Setting Niter and neval after gradient and Hessian evaluations: res$Niter <- rho$Niter res$neval <- rho$neval ## return value: res } update.u <- function(rho) { stepFactor <- 1 innerIter <- 0 rho$u <- rho$uStart rho$nll <- nll.u(rho) if(!is.finite(rho$nll)) return(FALSE) rho$gradient <- grad.u(rho) maxGrad <- max(abs(rho$gradient)) conv <- -1 ## Convergence flag message <- "iteration limit reached when updating the random effects" if(rho$ctrl$trace > 0) Trace(iter=0, stepFactor, rho$nll, maxGrad, rho$u, first=TRUE) ## Newton-Raphson algorithm: for(i in 1:rho$ctrl$maxIter) { if(maxGrad < rho$ctrl$gradTol) { message <- "max|gradient| < tol, so current iterate is probably solution" if(rho$ctrl$trace > 0) cat("\nOptimizer converged! ", "max|grad|:", maxGrad, message, fill = TRUE) conv <- 0 break } if(!hess.u(rho)) return(FALSE) step <- as.vector(solve(rho$L, rho$gradient)) rho$u <- rho$u - stepFactor * step nllTry <- nllFast.u(rho) ## no 'X %*% beta' update lineIter <- 0 ## Step halfing: while(nllTry > rho$nll) { stepFactor <- stepFactor/2 rho$u <- rho$u + stepFactor * step nllTry <- nllFast.u(rho) ## no 'X %*% beta' update lineIter <- lineIter + 1 if(rho$ctrl$trace > 0) Trace(i+innerIter, stepFactor, rho$nll, maxGrad, rho$u, first=FALSE) if(lineIter > rho$ctrl$maxLineIter){ message <- "step factor reduced below minimum when updating the random effects" conv <- 1 break } innerIter <- innerIter + 1 } rho$nll <- nllTry rho$gradient <- grad.u(rho) maxGrad <- max(abs(rho$gradient)) if(rho$ctrl$trace > 0) Trace(i+innerIter, stepFactor, rho$nll, maxGrad, rho$u, first=FALSE) stepFactor <- min(1, 2 * stepFactor) } if(conv != 0 && rho$ctrl$innerCtrl == "warnOnly") { warning(message, "\n at iteration ", rho$Niter) utils::flush.console() } else if(conv != 0 && rho$ctrl$innerCtrl == "giveError") stop(message, "\n at iteration ", rho$Niter) rho$Niter <- rho$Niter + i - 1 if(!hess.u(rho)) return(FALSE) if(!is.finite(rho$nll)) return(FALSE) else return(TRUE) } clmm.finalize <- function(fit, frames, ths, use.ssr) { fit$tJac <- ths$tJac fit$contrasts <- attr(frames$X, "contrasts") fit$na.action <- attr(frames$mf, "na.action") fit$terms <- frames$terms ### QUEST: Should the terms object contain only the fixed effects ### terms? fit$xlevels <- .getXlevels(frames$terms, frames$mf) fit$y.levels <- levels(frames$y) fit <- within(fit, { ## extract coefficients from 'fit': names(coefficients) <- names(gradient) <- c(ths$alpha.names, colnames(frames$X)[-1]) alpha <- coefficients[1:dims$nalpha] beta <- if(dims$nbeta > 0) coefficients[dims$nalpha + 1:dims$nbeta] else numeric(0) ## set various fit elements: edf <- dims$edf <- dims$nfepar + dims$nSTpar dims$nobs <- sum(frames$wts) dims$df.residual <- dims$nobs - dims$edf Theta <- alpha %*% t(tJac) nm <- paste(y.levels[-length(y.levels)], y.levels[-1], sep="|") dimnames(Theta) <- list("", nm) rm(nm) info <- data.frame("link" = link, "threshold" = threshold, "nobs" = dims$nobs, "logLik" = formatC(logLik, digits=2, format="f"), "AIC" = formatC(-2*logLik + 2*dims$edf, digits=2, format="f"), ## "niter" = paste(optRes$info["neval"], "(", Niter, ")", ## sep=""), "niter" = paste(neval, "(", Niter, ")", sep=""), "max.grad" = formatC(max(abs(gradient)), digits=2, format="e") ## BIC is not part of output since it is not clear what ## the no. observations are. ) }) bound <- if(use.ssr) rep(FALSE, fit$dims$edf) else as.logical(paratBoundary2(fit)) dn <- c(names(fit$coefficients), paste("ST", seq_len(fit$dims$nSTpar), sep=""))[!bound] names(fit$gradient) <- dn if(!is.null(fit$Hessian)) dimnames(fit$Hessian) <- list(dn, dn) ## set class and return fit: class(fit) <- "clmm" return(fit) } ordinal/R/clmm.ssr.R0000644000176200001440000002421514533321514013771 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Functions for fitting CLMMs with a single simple random-effects ## term (ssr). rho.clm2clmm.ssr <- function(rho, retrms, ctrl) ### Version of rho.clm2clmm that is set up to use the C ### implementations of Laplace, AGQ and GHQ for a single random ### effect. { gfList <- retrms$gfList rho$grFac <- gfList[[1]] rho$ctrl <- ctrl rho$sigma <- rep(1, nrow(rho$B1)) rho$lambda <- 0 rho$nlev <- as.vector(sapply(gfList, nlevels)) rho$random.names <- sapply(gfList, levels) rho$tau.names <- names(gfList) rho$nrandom <- sum(rho$nlev) ## no. random effects rho$Niter <- 0L rho$neval <- 0L rho$u <- rho$uStart <- rep(0, rho$nrandom) rho$linkInt <- switch(rho$link, logit = 1L, probit = 2L, cloglog = 3L, loglog = 4L, cauchit = 5L) rho$ST <- lapply(retrms$retrms, `[[`, "ST") } ## set.AGQ <- function(rho, nAGQ) { ## rho$nAGQ <- nAGQ ## if(nAGQ %in% c(0L, 1L)) return(invisible()) ## ghq <- gauss.hermite(abs(nAGQ)) ## rho$ghqns <- ghq$nodes ## rho$ghqws <- ## if(nAGQ > 0) ghq$weights ## AGQ ## else log(ghq$weights) + (ghq$nodes^2)/2 ## GHQ ## } clmm.fit.ssr <- function(rho, control = list(), method=c("nlminb", "ucminf"), Hess = FALSE) ### Fit a clmm with a single simple random effects term using AGQ, GHQ ### or Laplace. { optim.error <- function(fit, method) if(inherits(fit, "try-error")) stop("optimizer ", method, " terminated with an error", call.=FALSE) ### OPTION: Could have an argument c(warn, fail, ignore) to optionally ### return the fitted model despite the optimizer failing. method <- match.arg(method) ## Set appropriate objective function: obj.fun <- if(rho$nAGQ < 0) getNGHQ.ssr else if(rho$nAGQ > 1) getNAGQ.ssr else getNLA.ssr ## nAGQ %in% c(0, 1) init.val <- obj.fun(rho, rho$par) if(!is.finite(init.val)) stop(gettextf("non-finite likelihood at starting value (%g)", init.val), call.=FALSE) ## Fit the model: if(method == "ucminf") { fit <- try(ucminf(rho$par, function(par) obj.fun(rho, par), control = control), silent=TRUE) ## Check if optimizer converged without error: optim.error(fit, method) ## Save return value: value <- fit$value } else if(method == "nlminb") { ## hack to remove ucminf control settings: keep <- !names(control) %in% c("grad", "grtol") control <- if(length(keep)) control[keep] else list() fit <- try(nlminb(rho$par, function(par) obj.fun(rho, par), control = control), silent=TRUE) ## Check if optimizer converged without error: optim.error(fit, method) ## Save return value: value <- fit$objective } else stop("unkown optimization method: ", method) ## Extract parameters from optimizer results: rho$par <- fit$par ## Ensure random mode estimation at optimum: nllBase.uC(rho) update.uC(rho) rho$ST <- par2ST(rho$tau, rho$ST) names(rho$ST) <- names(rho$dims$nlev.re) ## Format ranef modes and condVar: ranef <- rho$u * rho$tau condVar <- 1/rho$D * rho$tau^2 ## names(ranef) <- names(condVar) <- rho$random.names ## ranef <- list(ranef) ## condVar <- list(condVar) ## names(ranef) <- names(condVar) <- rho$tau.names ## Prepare list of results: res <- list(coefficients = fit$par[1:rho$dims$nfepar], ST = rho$ST, optRes = fit, logLik = -value, fitted.values = rho$fitted, ranef = ranef, condVar = condVar, dims = rho$dims, u = rho$u) ## Add gradient vector and optionally Hessian matrix: ## bound <- as.logical(paratBoundary2(rho)) ## optpar <- fit$par[!bound] if(Hess) { ## gH <- deriv12(function(par) obj.fun(rho, par, which=!bound), gH <- deriv12(function(par) obj.fun(rho, par), x=fit$par) res$gradient <- gH$gradient res$Hessian <- gH$Hessian } else { ## res$gradient <- grad.ctr(function(par) getNLA(rho, par, which=!bound), res$gradient <- grad.ctr(function(par) obj.fun(rho, par), x=fit$par) } ## Setting Niter and neval after gradient and Hessian evaluations: res$Niter <- rho$Niter res$neval <- rho$neval return(res) } getNGHQ.ssr <- function(rho, par) { ### negative log-likelihood by standard Gauss-Hermite quadrature ### implemented in C: if(!missing(par)) { rho$par <- par if(any(!is.finite(par))) stop(gettextf(paste(c("Non-finite parameters occured:", formatC(par, format="g")), collapse=" "))) } rho$neval <- rho$neval + 1L nllBase.uC(rho) ## Update tau, eta1Fix and eta2Fix with(rho, { .C("getNGHQ_C", nll = double(1), as.integer(grFac), as.double(tau), as.double(eta1Fix), as.double(eta2Fix), as.double(o1), as.double(o2), as.double(sigma), as.double(wts), length(sigma), length(uStart), as.double(ghqns), as.double(ghqws), as.integer(abs(nAGQ)), as.integer(linkInt), as.double(ghqns * tau), as.double(lambda))$nll }) } getNAGQ.ssr <- function(rho, par) { ### negative log-likelihood by adaptive Gauss-Hermite quadrature ### implemented in C: if(!missing(par)) { rho$par <- par if(any(!is.finite(par))) stop(gettextf(paste(c("Non-finite parameters occured:", formatC(par, format="g")), collapse=" "))) } rho$neval <- rho$neval + 1L if(!update.uC(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) with(rho, { .C("getNAGQ", nll = double(1), as.integer(grFac), as.double(tau), as.double(eta1Fix), as.double(eta2Fix), as.double(o1), as.double(o2), as.double(sigma), as.double(wts), length(sigma), length(uStart), as.double(ghqns), as.double(log(ghqws)), as.double(ghqns^2), as.double(u), as.double(D), as.integer(abs(nAGQ)), as.integer(linkInt), as.double(lambda))$nll }) } getNLA.ssr <- function(rho, par) { ### negative log-likelihood by the Laplace approximation ### (with update.u2 in C or R): if(!missing(par)) { rho$par <- par if(any(!is.finite(par))) stop(gettextf(paste(c("Non-finite parameters occured:", formatC(par, format="g")), collapse=" "))) } rho$neval <- rho$neval + 1L if(!update.uC(rho)) return(Inf) if(any(rho$D <= 0)) return(Inf) logDetD <- sum(log(rho$D)) rho$negLogLik - rho$nrandom*log(2*pi)/2 + logDetD/2 } nllBase.uC <- function(rho) { ### updates tau, eta1Fix and eta2Fix given new parameter values with(rho, { tau <- exp(par[nalpha + nbeta + 1:ntau]) eta1Fix <- drop(B1 %*% par[1:(nalpha + nbeta)]) eta2Fix <- drop(B2 %*% par[1:(nalpha + nbeta)]) }) return(invisible()) } update.uC <- function(rho) { ### C-implementation of NR-algorithm. nllBase.uC(rho) ## update: tau, eta1Fix, eta2Fix fit <- with(rho, { .C("NRalgv3", as.integer(ctrl$trace), as.integer(ctrl$maxIter), as.double(ctrl$gradTol), as.integer(ctrl$maxLineIter), as.integer(grFac), ## OBS as.double(tau), # stDev as.double(o1), as.double(o2), as.double(eta1Fix), as.double(eta2Fix), as.double(sigma), ## rep(1, n) as.integer(linkInt), ## as.double(wts), ## pre. weights u = as.double(uStart), fitted = as.double(fitted), ## pre. pr funValue = double(1), gradValues = as.double(uStart), hessValues = as.double(rep(1, length(uStart))), length(fitted), length(uStart), maxGrad = double(1), conv = 0L, as.double(lambda), ## Niter = as.integer(Niter) ## OBS )[c("u", "fitted", "funValue", "gradValues", "hessValues", "maxGrad", "conv", "Niter")] }) ## Get message: message <- switch(as.character(fit$conv), "1" = "max|gradient| < tol, so current iterate is probably solution", "0" = "Non finite negative log-likelihood", "-1" = "iteration limit reached when updating the random effects", "-2" = "step factor reduced below minimum when updating the random effects") ## Check for convergence and report warning/error: if(rho$ctrl$trace > 0 && fit$conv == 1) cat("\nOptimizer converged! ", "max|grad|:", fit$maxGrad, message, fill = TRUE) if(fit$conv != 1 && rho$ctrl$innerCtrl == "warnOnly") warning(message, "\n at iteration ", rho$Niter) else if(fit$conv != 1 && rho$ctrl$innerCtrl == "giveError") stop(message, "\n at iteration ", rho$Niter) ## Store values and return: rho$Niter <- fit$Niter rho$fitted <- fit$fitted rho$u <- fit$u rho$D <- fit$hessValues rho$gradient <- fit$gradValues if(!is.finite(rho$negLogLik <- fit$funValue)) return(FALSE) return(TRUE) } ordinal/R/clm.methods.R0000644000176200001440000003472714533321514014461 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Implementation of various methods for clm objects. print.clm <- function(x, digits = max(3, getOption("digits") - 3), ...) { cat("formula:", Deparse(formula(x$terms)), fill=TRUE) ### NOTE: deparse(x$call$formula) will not always work since this may ### not always be appropriately evaluated. if(!is.null(x$call$scale)) cat("scale: ", Deparse(formula(x$S.terms)), fill=TRUE) if(!is.null(x$call$nominal)) cat("nominal:", Deparse(formula(x$nom.terms)), fill=TRUE) if(!is.null(data.name <- x$call$data)) cat("data: ", Deparse(data.name), fill=TRUE) if(!is.null(x$call$subset)) cat("subset: ", Deparse(x$call$subset), fill=TRUE) cat("\n") print(x$info, row.names=FALSE, right=FALSE) if(length(x$beta)) { if(sum(x$aliased$beta) > 0) { cat("\nCoefficients: (", sum(x$aliased$beta), " not defined because of singularities)\n", sep = "") } else cat("\nCoefficients:\n") print.default(format(x$beta, digits = digits), quote = FALSE) } if(length(x$zeta)) { if(sum(x$aliased$zeta) > 0) cat("\nlog-scale coefficients: (", sum(x$aliased$zeta), " not defined because of singularities)\n", sep = "") else cat("\nlog-scale coefficients:\n") print.default(format(x$zeta, digits = digits), quote = FALSE) } if(length(x$lambda)) { cat("\nLink coefficient:\n") print.default(format(x$lambda, digits = digits), quote = FALSE) } if(length(x$alpha) > 0) { if(sum(x$aliased$alpha) > 0) cat("\nThreshold coefficients: (", sum(x$aliased$alpha), " not defined because of singularities)\n", sep = "") else cat("\nThreshold coefficients:\n") if(!is.null(x$call$nominal)) print.default(format(x$alpha.mat, digits = digits), quote = FALSE) else print.default(format(x$alpha, digits = digits), quote = FALSE) } if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") return(invisible(x)) } vcov.clm <- function(object, tol = sqrt(.Machine$double.eps), method = c("clm", "Cholesky", "svd", "eigen", "qr"), ...) { method <- match.arg(method) if(method == "clm") return(object$vcov) if(is.null(object$Hessian)) stop("Model needs to be fitted with Hess = TRUE") dn <- dimnames(object$Hessian) H <- object$Hessian if(!all(is.finite(H))) stop("cannot compute vcov: non-finite values in Hessian") if(method == "svd") { Hsvd <- svd(H) ## positive <- Hsvd$d > max(tol * Hsvd$d[1L], tol) positive <- Hsvd$d > tol if(!all(positive)) stop(gettextf("Cannot compute vcov: \nHessian is numerically singular with min singular value = %g", min(Hsvd$d))) cov <- Hsvd$v %*% (1/Hsvd$d * t(Hsvd$u)) } else if(method == "eigen") { evd <- eigen(H, symmetric=TRUE) ## tol <- max(tol * evd$values[1L], tol) ## if evd$values[1L] < 0 if(any(evd$values < tol)) stop(gettextf("Cannot compute vcov: \nHessian is not positive definite with min eigenvalue = %g", min(evd$values))) cov <- with(evd, vectors %*% diag(1/values) %*% t(vectors)) } else if(method == "Cholesky") { cholH <- try(chol(H), silent=TRUE) if(inherits(cholH, "try-error")) stop("Cannot compute vcov: \nHessian is not positive definite") cov <- chol2inv(cholH) } else if(method == "qr") { qrH <- qr(H, tol=sqrt(.Machine$double.eps)) if(qrH$rank < nrow(H)) stop("Cannot compute vcov: \nHessian is numerically singular") cov <- solve.qr(qrH) } else stop("method not recognized") ## Need to test for negative variances, since some methods (svd, ## qr) may produce a vcov-matrix if the Hessian is *negative* ## definite: if(any(diag(cov) < 0)) { stop("Cannot compute vcov: \nHessian is not positive definite") } structure(cov, dimnames=dn) } summary.clm <- function(object, correlation = FALSE, ...) { vcov <- object$vcov coefs <- matrix(NA, length(object$coefficients), 4, dimnames = list(names(object$coefficients), c("Estimate", "Std. Error", "z value", "Pr(>|z|)"))) coefs[, 1] <- object$coefficients if(!all(is.finite(vcov))) { ## warning("Variance-covariance matrix of the parameters is not defined") coefs[, 2:4] <- NA if(correlation) warning("Correlation matrix is unavailable") } else { alias <- unlist(object$aliased) coefs[!alias, 2] <- sd <- sqrt(diag(vcov)) ## Cond is Inf if Hessian contains NaNs: object$cond.H <- if(any(is.na(object$Hessian))) Inf else with(eigen(object$Hessian, symmetric=TRUE, only.values = TRUE), abs(max(values) / min(values))) coefs[!alias, 3] <- coefs[!alias, 1]/coefs[!alias, 2] coefs[!alias, 4] <- 2 * pnorm(abs(coefs[!alias, 3]), lower.tail=FALSE) if(correlation) object$correlation <- cov2cor(vcov) } object$coefficients <- coefs class(object) <- "summary.clm" return(object) } print.summary.clm <- function(x, digits = max(3, getOption("digits") - 3), signif.stars = getOption("show.signif.stars"), ...) { cat("formula:", Deparse(formula(x$terms)), fill=TRUE) ### NOTE: deparse(x$call$formula) will not always work since this may ### not always be appropriately evaluated. if(!is.null(x$call$scale)) cat("scale: ", Deparse(formula(x$S.terms)), fill=TRUE) if(!is.null(x$call$nominal)) cat("nominal:", Deparse(formula(x$nom.terms)), fill=TRUE) if(!is.null(data.name <- x$call$data)) cat("data: ", Deparse(data.name), fill=TRUE) if(!is.null(x$call$subset)) cat("subset: ", Deparse(x$call$subset), fill=TRUE) cat("\n") print(x$info, row.names=FALSE, right=FALSE) nalpha <- length(x$alpha) nbeta <- length(x$beta) nzeta <- length(x$zeta) nlambda <- length(x$lambda) if(nbeta > 0) { if(sum(x$aliased$beta) > 0) cat("\nCoefficients: (", sum(x$aliased$beta), " not defined because of singularities)\n", sep = "") else cat("\nCoefficients:\n") printCoefmat(x$coefficients[nalpha + 1:nbeta, , drop=FALSE], digits=digits, signif.stars=signif.stars, has.Pvalue=TRUE, ...) } ## else cat("\nNo Coefficients\n") if(nzeta > 0) { if(sum(x$aliased$zeta) > 0) cat("\nlog-scale coefficients: (", sum(x$aliased$zeta), " not defined because of singularities)\n", sep = "") else cat("\nlog-scale coefficients:\n") printCoefmat(x$coefficients[nalpha + nbeta + 1:nzeta, , drop=FALSE], digits=digits, signif.stars=signif.stars, has.Pvalue=TRUE, ...) } if(nlambda > 0) { cat("\nLink coefficients:\n") printCoefmat(x$coefficients[nalpha + nbeta + nzeta + nlambda, , drop=FALSE], digits=digits, signif.stars=signif.stars, has.Pvalue=TRUE, ...) } if(nalpha > 0) { ## always true if(sum(x$aliased$alpha) > 0) cat("\nThreshold coefficients: (", sum(x$aliased$alpha), " not defined because of singularities)\n", sep = "") else cat("\nThreshold coefficients:\n") printCoefmat(x$coefficients[seq_len(nalpha), -4, drop=FALSE], digits=digits, has.Pvalue=FALSE, signif.stars=FALSE, ...) } if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") if(!is.null(correl <- x$correlation)) { cat("\nCorrelation of Coefficients:\n") ll <- lower.tri(correl) correl[ll] <- format(round(correl[ll], digits)) correl[!ll] <- "" print(correl[-1, -ncol(correl)], quote = FALSE, ...) } return(invisible(x)) } logLik.clm <- function(object, ...) structure(object$logLik, df = object$edf, nobs=object$nobs, class = "logLik") extractAIC.clm <- function(fit, scale = 0, k = 2, ...) { edf <- fit$edf c(edf, -2*fit$logLik + k * edf) } ### NOTE: AIC.clm implicitly defined via logLik.clm anova.clm <- function(object, ..., type = c("I", "II", "III", "1", "2", "3")) ### requires that clm objects have components: ### edf: no. parameters used ### call$formula ### link (character) ### threshold (character) ### logLik ### { mc <- match.call() dots <- list(...) ## remove 'test' and 'type' arguments from dots-list: not.keep <- which(names(dots) %in% c("test")) if(length(not.keep)) { message("'test' argument ignored in anova.clm\n") dots <- dots[-not.keep] } if(length(dots) == 0) { if(inherits(object, "clmm")) stop("anova not implemented for a single clmm fit") return(single_anova(object, type=type)) } ## Multi-model anova method proceeds: mlist <- c(list(object), dots) if(!all(sapply(mlist, function(model) inherits(model, c("clm", "clmm"))))) stop("only 'clm' and 'clmm' objects are allowed") nfitted <- sapply(mlist, function(x) length(x$fitted.values)) if(any(nfitted != nfitted[1L])) stop("models were not all fitted to the same dataset") ### OPTION: consider comparing y returned by the models for a better check? no.par <- sapply(mlist, function(x) x$edf) ## order list with increasing no. par: ord <- order(no.par, decreasing=FALSE) mlist <- mlist[ord] no.par <- no.par[ord] no.tests <- length(mlist) ## extract formulas, links, thresholds, scale formulas, nominal ## formulas: forms <- sapply(mlist, function(x) Deparse(x$call$formula)) links <- sapply(mlist, function(x) x$link) thres <- sapply(mlist, function(x) x$threshold) nominal <- sapply(mlist, function(x) Deparse(x$call$nominal)) scale <- sapply(mlist, function(x) Deparse(x$call$scale)) models <- data.frame(forms) models.names <- 'formula:' if(any(!nominal %in% c("~1", "NULL"))) { nominal[nominal == "NULL"] <- "~1" models$nominal <- nominal models.names <- c(models.names, "nominal:") } if(any(!scale %in% c("~1", "NULL"))) { scale[scale == "NULL"] <- "~1" models$scale <- scale models.names <- c(models.names, "scale:") } models.names <- c(models.names, "link:", "threshold:") models <- cbind(models, data.frame(links, thres)) ## extract AIC, logLik, statistics, df, p-values: AIC <- sapply(mlist, function(x) -2*x$logLik + 2*x$edf) logLiks <- sapply(mlist, function(x) x$logLik) statistic <- c(NA, 2*diff(sapply(mlist, function(x) x$logLik))) df <- c(NA, diff(no.par)) pval <- c(NA, pchisq(statistic[-1], df[-1], lower.tail=FALSE)) pval[!is.na(df) & df==0] <- NA ## collect results in data.frames: tab <- data.frame(no.par, AIC, logLiks, statistic, df, pval) tab.names <- c("no.par", "AIC", "logLik", "LR.stat", "df", "Pr(>Chisq)") mnames <- sapply(as.list(mc), Deparse)[-1] colnames(tab) <- tab.names rownames(tab) <- rownames(models) <- mnames[ord] colnames(models) <- models.names attr(tab, "models") <- models attr(tab, "heading") <- "Likelihood ratio tests of cumulative link models:\n" class(tab) <- c("anova.clm", "data.frame") tab } print.anova.clm <- function(x, digits=max(getOption("digits") - 2, 3), signif.stars=getOption("show.signif.stars"), ...) { if (!is.null(heading <- attr(x, "heading"))) cat(heading, "\n") models <- attr(x, "models") print(models, right=FALSE) cat("\n") printCoefmat(x, digits=digits, signif.stars=signif.stars, tst.ind=4, cs.ind=NULL, # zap.ind=2, #c(1,5), P.values=TRUE, has.Pvalue=TRUE, na.print="", ...) return(invisible(x)) } model.matrix.clm <- function(object, type = c("design", "B"), ...) { type <- match.arg(type) mf <- try(model.frame(object), silent=TRUE) if(inherits(mf, "try-error")) stop("Cannot extract model.matrix: refit model with 'model=TRUE'?") ### NOTE: we want to stop even if type="B" since the fullmf is needed ### in get_clmRho also and this way the error message is better. if(type == "design") { contr <- c(object$contrasts, object$S.contrasts, object$nom.contrasts) design <- get_clmDesign(fullmf=object$model, terms.list=terms(object, "all"), contrasts=contr) keep <- c("X", "NOM", "S") select <- match(keep, names(design), nomatch=0) ans <- design[select] } else { ## if type == "B": env <- get_clmRho.clm(object) ans <- list(B1 = env$B1, B2 = env$B2) ans$S <- env$S ## may not exist } return(ans) } model.frame.clm <- function(formula, ...) { ### returns a model frame with *all* variables used for fitting. if(is.null(mod <- formula$model)) stop("Cannot extract model.frame: refit model with 'model=TRUE'") else mod } coef.clm <- function(object, na.rm = FALSE, ...) { if(na.rm) { coefs <- object$coefficients coefs[!is.na(coefs)] } else object$coefficients } coef.summary.clm <- function(object, na.rm = FALSE, ...) { if(na.rm) { coefs <- object$coefficients coefs[!is.na(coefs[,1]), , drop=FALSE] } else object$coefficients } nobs.clm <- function(object, ...) object$nobs terms.clm <- function(x, type=c("formula", "scale", "nominal", "all"), ...) { type <- match.arg(type) term.nm <- c("terms", "S.terms", "nom.terms") Terms <- x[names(x) %in% term.nm] ind <- match(term.nm, names(Terms), 0L) Terms <- Terms[ind] names(Terms) <- c("formula", "scale", "nominal")[ind != 0] if(type == "all") return(Terms) if(!type %in% names(Terms)) stop(gettextf("no terms object for '%s'", type)) Terms[[type]] } ordinal/R/clm.slice2D.R0000644000176200001440000002060214533321514014266 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# slice2D <- function(object, ...) { UseMethod("slice2D") } slice2D.clm <- function(object, parm=seq_along(par), lambda=3, grid=20, ...) { ## argument matching and testing: stopifnot(is.numeric(lambda) && lambda > 0) stopifnot(is.numeric(grid) && grid >= 1) grid <- as.integer(round(grid)) par <- coef(object, na.rm=TRUE) par.names <- names(par) stopifnot(length(parm) == length(unique(parm))) if(is.character(parm)) parm <- match(parm, par.names, nomatch = 0) if(!all(parm %in% seq_along(par))) stop("invalid 'parm' argument") stopifnot(length(parm) >= 2L) parm <- as.integer(parm) nparm <- length(parm) ## parm is an integer vector indexing non-aliased coef. ml <- object$logLik parm.names <- par.names[parm] mle <- par[parm] ## get environment corresponding to object: env <- get_clmRho(object) ## env <- update(object, doFit=FALSE) names(par) <- NULL env$par <- as.vector(par) ## set env$par to mle stopifnot(isTRUE(all.equal(env$clm.nll(env), -object$logLik))) ## generate sequence of parameters at which to compute the ## log-likelihood: curv <- sqrt(1/diag(object$Hessian)) ## curvature in nll wrt. par par.range <- par + curv %o% (c(-1, 1) * lambda) ## All pairwise combinations: pairs <- t(combn(seq_len(nparm), 2)) ncombn <- nrow(pairs) ### Allow for sequential paired comparisons? par.seq <- lapply(parm, function(ind) { seq(par.range[ind, 1], par.range[ind, 2], length = grid) }) names(par.seq) <- par.names zlist <- vector(mode="list", length=ncombn) names(zlist) <- paste(par.names[pairs[, 1]], par.names[pairs[, 2]], sep=".") for(k in 1:ncombn) { i <- pairs[k, 1] j <- pairs[k, 2] xx <- expand.grid(x=par.seq[[i]], y=par.seq[[j]]) ## Set parameter values to MLEs: env$par <- par ## Compute log-likelihood over entire grid: z <- apply(xx, 1, function(x) { env$par[c(i, j)] <- as.vector(x); env$clm.nll(env) }) ## Store log-likelihood values in a matrix: zlist[[k]] <- matrix(z, ncol=grid) } res <- list(zlist=zlist, par.seq=par.seq, par.range=par.range, pairs=pairs, original.fit=object, mle=mle) class(res) <- c("slice2D.clm") res } safe.as.int <- function(x) as.integer(round(x)) plot.slice2D.clm <- function(x, parm = seq_along(orig.par), ## How to specify default values ## of parm? plot.mle = TRUE, ask = prod(par("mfcol")) < nrow(pairs) && dev.interactive(), ...) ### parm: a character vector or integer vector of length >= 2 with ### those par-combinations to make contour plots. { ## stopifnot(all parm in names(par.seq)) orig.par <- coef(x$original.fit, na.rm=TRUE) ### More parm stuff here... stopifnot(is.numeric(parm) && length(parm) >= 2L) parm <- as.integer(round(parm)) par.names <- names(orig.par) ## of <- attr(x, "original.fit") ## par <- coef(of) ## ml <- of$logLik keep <- (x$pairs[, 1] %in% parm) & (x$pairs[, 2] %in% parm) pairs <- x$pairs[keep, , drop=FALSE] stopifnot(length(pairs) >= 2) if(ask) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) } ## Plotting the contours: for(k in seq_len(nrow(pairs))) { i <- pairs[k, 1] j <- pairs[k, 2] contour(x$par.seq[[i]], x$par.seq[[j]], x$zlist[[k]], xlab = par.names[i], ylab = par.names[j]) points(orig.par[i], orig.par[j], pch = 4, col = "red", lwd = 2) } return(invisible()) } sliceg.clm <- function(object, parm = seq_along(par), lambda = 3, grid = 1e2, quad.approx = TRUE, ...) { ## argument matching and testing: stopifnot(is.numeric(lambda) && lambda > 0) stopifnot(is.numeric(grid) && grid >= 1) grid <- as.integer(round(grid)) par <- coef(object, na.rm=TRUE) par.names <- names(par) npar <- length(par) stopifnot(length(parm) == length(unique(parm))) if(is.character(parm)) parm <- match(parm, par.names, nomatch = 0) ### disallow character argument due to ambiguity? if(!all(parm %in% seq_along(par))) stop("invalid 'parm' argument") stopifnot(length(parm) > 0) parm <- as.integer(round(parm)) ## parm is an integer vector indexing non-aliased coef. ml <- object$logLik parm.names <- par.names[parm] ## get environment corresponding to object: rho <- get_clmRho(object) ## rho <- update(object, doFit = FALSE) names(par) <- NULL rho$par <- par ## set rho$par to mle stopifnot(isTRUE(all.equal(rho$clm.nll(rho), -object$logLik))) ## generate sequence of parameters at which to compute the ## log-likelihood: curv <- sqrt(1/diag(object$Hessian)) ## curvature in nll wrt. par par.range <- par + curv %o% c(-lambda, lambda) ## par.seq - list of length npar with a sequence of values for each ## parameter : par.seq <- lapply(parm, function(ind) { seq(par.range[ind, 1], par.range[ind, 2], length = grid) }) ## compute relative logLik for all par.seq for each par: logLik <- lapply(seq_along(parm), function(i) { # for each par rho$par <- par ## reset par values to MLE sapply(par.seq[[ i ]], function(par.val) { # for each par.seq value rho$par[ parm[i] ] <- par.val rho$clm.nll(rho) rho$clm.grad(rho)[ parm[i] ] }) }) ## collect parameter sequences and relative logLik in a list of ## data.frames: res <- lapply(seq_along(parm), function(i) { structure(data.frame(par.seq[[ i ]], logLik[[ i ]]), ## names = c(parm.names[i], "logLik")) names = c(parm.names[i], "gradient")) }) ## set attributes: names(res) <- parm.names attr(res, "original.fit") <- object attr(res, "mle") <- par[parm] ## class(res) <- "slice.clm" class(res) <- "sliceg.clm" ## if(!quad.approx) return(res) ## ## compute quadratic approx to *positive* logLik: ## Quad <- function(par, mle, curv) ## -((mle - par)^2 / curv^2 / 2) ## for(i in seq_along(parm)) ## res[[ i ]]$quad <- ## Quad(par.seq[[ i ]], par[ parm[i] ], curv[ parm[i] ]) return(res) } plot.sliceg.clm <- function(x, parm = seq_along(x), type = c("quadratic", "linear"), plot.mle = TRUE, ask = prod(par("mfcol")) < length(parm) && dev.interactive(), ...) { ## Initiala argument matching and testing: type <- match.arg(type) stopifnot(is.numeric(parm)) parm <- as.integer(round(parm)) of <- attr(x, "original.fit") par <- coef(of) ml <- of$logLik ## take the signed sqrt of nll and quad: ## if(type == "linear") { ## sgn.sqrt <- function(par, mle, logLik) ## (2 * (par > mle) - 1) * sqrt(-logLik) ## mle <- coef(attr(x, "original.fit")) ## for(i in parm) { ## x[[i]]$logLik <- sgn.sqrt(x[[i]][1], mle[i], x[[i]]$logLik) ## if(!is.null(x[[i]]$quad)) ## x[[i]]$quad <- sgn.sqrt(x[[i]][1], mle[i], x[[i]]$quad) ## } ## ylab <- "Signed log-likelihood root" ## } ## else ## ylab <- "Relative log-likelihood" ylab <- "Gradient" if(ask) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) } ## actual plotting: for(i in parm) { z <- x[[i]] plot(z[1:2], type = "l", ylab=ylab, ...) if(!is.null(z$quad)) lines(z[[1]], z[[3]], lty = 2) if(plot.mle && type == "quadratic") ## abline(v = par[i]) abline(v = attr(x, "mle")[i]) ## abline(v = par[names(x)[i]]) } return(invisible()) } ordinal/R/clm.slice.R0000644000176200001440000001603014533321514014100 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Methods to compute and plot likelihood-slices for clm objects. slice <- function(object, ...) { UseMethod("slice") } slice.clm <- function(object, parm = seq_along(par), lambda = 3, grid = 1e2, quad.approx = TRUE, ...) { ## argument matching and testing: stopifnot(is.numeric(lambda) && lambda > 0) stopifnot(is.numeric(grid) && grid >= 1) grid <- as.integer(round(grid)) par <- coef(object, na.rm=TRUE) par.names <- names(par) npar <- length(par) stopifnot(length(parm) == length(unique(parm))) if(is.character(parm)) parm <- match(parm, par.names, nomatch = 0) ### disallow character argument due to ambiguity? if(!all(parm %in% seq_along(par))) stop("invalid 'parm' argument") stopifnot(length(parm) > 0) parm <- as.integer(round(parm)) ## parm is an integer vector indexing non-aliased coef. ml <- object$logLik parm.names <- par.names[parm] ## get environment corresponding to object: rho <- get_clmRho(object) ## rho <- update(object, doFit = FALSE) names(par) <- NULL rho$par <- par ## set rho$par to mle stopifnot(isTRUE(all.equal(rho$clm.nll(rho), -object$logLik))) ## generate sequence of parameters at which to compute the ## log-likelihood: curv <- sqrt(1/diag(object$Hessian)) ## curvature in nll wrt. par par.range <- par + curv %o% c(-lambda, lambda) ## par.seq - list of length npar with a sequence of values for each ## parameter : par.seq <- lapply(parm, function(ind) { seq(par.range[ind, 1], par.range[ind, 2], length = grid) }) ## compute relative logLik for all par.seq for each par: logLik <- lapply(seq_along(parm), function(i) { # for each par rho$par <- par ## reset par values to MLE sapply(par.seq[[ i ]], function(par.val) { # for each par.seq value rho$par[ parm[i] ] <- par.val -rho$clm.nll(rho) - ml ## relative logLik }) }) ## collect parameter sequences and relative logLik in a list of ## data.frames: res <- lapply(seq_along(parm), function(i) { structure(data.frame(par.seq[[ i ]], logLik[[ i ]]), names = c(parm.names[i], "logLik")) }) ## set attributes: names(res) <- parm.names attr(res, "original.fit") <- object attr(res, "mle") <- par[parm] class(res) <- "slice.clm" if(!quad.approx) return(res) ## compute quadratic approx to *positive* logLik: Quad <- function(par, mle, curv) -((mle - par)^2 / curv^2 / 2) for(i in seq_along(parm)) res[[ i ]]$quad <- Quad(par.seq[[ i ]], par[ parm[i] ], curv[ parm[i] ]) return(res) } plot.slice.clm <- function(x, parm = seq_along(x), type = c("quadratic", "linear"), plot.mle = TRUE, ask = prod(par("mfcol")) < length(parm) && dev.interactive(), ...) { ## Initiala argument matching and testing: type <- match.arg(type) stopifnot(is.numeric(parm)) parm <- as.integer(round(parm)) of <- attr(x, "original.fit") par <- coef(of) ml <- of$logLik ## take the signed sqrt of nll and quad: if(type == "linear") { sgn.sqrt <- function(par, mle, logLik) (2 * (par > mle) - 1) * sqrt(-logLik) mle <- coef(attr(x, "original.fit")) for(i in parm) { x[[i]]$logLik <- sgn.sqrt(x[[i]][1], mle[i], x[[i]]$logLik) if(!is.null(x[[i]]$quad)) x[[i]]$quad <- sgn.sqrt(x[[i]][1], mle[i], x[[i]]$quad) } ylab <- "Signed log-likelihood root" } else ylab <- "Relative log-likelihood" if(ask) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) } ## actual plotting: for(i in parm) { z <- x[[i]] plot(z[1:2], type = "l", ylab=ylab, ...) if(!is.null(z$quad)) lines(z[[1]], z[[3]], lty = 2) if(plot.mle && type == "quadratic") ## abline(v = par[i]) abline(v = attr(x, "mle")[i]) ## abline(v = par[names(x)[i]]) } return(invisible()) } ## slice.clm <- ## function(object, parm = seq_along(par), lambda = 3, grid = 1e2, ## quad.approx = TRUE, ...) ## { ## ## argument matching and testing: ## stopifnot(is.numeric(lambda) && lambda > 0) ## stopifnot(is.numeric(grid) && grid >= 1) ## grid <- as.integer(grid) ## par <- coef(object) ## par.names <- names(par) ## npar <- length(par) ## stopifnot(length(parm) == length(unique(parm))) ## if(is.character(parm)) ## parm <- match(parm, par.names, nomatch = 0) ## if(!all(parm %in% seq_along(par))) ## stop("invalid 'parm' argument") ## stopifnot(length(parm) > 0) ## parm <- as.integer(parm) ## ml <- object$logLik ## parm.names <- par.names[parm] ## ## ## get environment corresponding to object: ## rho <- update(object, doFit = FALSE) ## names(par) <- NULL ## rho$par <- par ## set rho$par to mle ## stopifnot(isTRUE(all.equal(rho$clm.nll(rho), -object$logLik))) ## ## ## generate sequence of parameters at which to compute the ## ## log-likelihood: ## curv <- sqrt(1/diag(object$Hess)) ## curvature in nll wrt. par ## par.range <- par + curv %o% c(-lambda, lambda) ## ## par.seq - list of length npar: ## par.seq <- sapply(parm, function(ind) { ## seq(par.range[ind, 1], par.range[ind, 2], length = grid) }, ## simplify = FALSE) ## ## compute relative logLik for all par.seq for each par: ## logLik <- lapply(seq_along(parm), function(i) { # for each par ## rho$par <- par ## reset par values to MLE ## sapply(par.seq[[ i ]], function(par.val) { # for each val ## rho$par[ parm[i] ] <- par.val ## -rho$clm.nll(rho) - ml ## relative logLik ## }) ## }) ## ## ## collect results in a list of data.frames: ## res <- lapply(seq_along(parm), function(i) { ## structure(data.frame(par.seq[[ i ]], logLik[[ i ]]), ## names = c(parm.names[i], "logLik")) ## }) ## ## ## set attributes: ## names(res) <- parm.names ## attr(res, "original.fit") <- object ## class(res) <- "slice.clm" ## ## if(!quad.approx) return(res) ## ## compute quadratic approx to *positive* logLik: ## Quad <- function(par, mle, curv) ## -((mle - par)^2 / curv^2 / 2) ## for(i in seq_along(parm)) ## res[[ i ]]$quad <- ## Quad(par.seq[[ i ]], par[ parm[i] ], curv[ parm[i] ]) ## ## return(res) ## } ordinal/R/clm.start.R0000644000176200001440000001175714533321514014151 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Functions to compute starting values for CLMs in clm(). set.start <- function(rho, start=NULL, get.start=TRUE, threshold, link, frames) { ## set starting values for the parameters: nScol <- if(is.null(frames[["S"]])) 0 else ncol(frames[["S"]]) # no cols in S nSpar <- pmax(0, nScol - 1) # no Scale parameters if(get.start) { start <- ## not 'starting' scale effects: clm.start(y.levels=frames$y.levels, threshold=threshold, X=frames$X, NOM=frames$NOM, has.intercept=TRUE) if(nSpar > 0 || # NCOL(frames[["S"]]) > 1 link == "cauchit" || length(rho$lambda)) { ### NOTE: only special start if nSpar > 0 (no reason for ### special start if scale is only offset and no predictors). ### NOTE: start cauchit models at the probit estimates if start is not ### supplied: ### NOTE: start models with lambda at model with probit link rho$par <- start if(link %in% c("Aranda-Ordaz", "log-gamma", "cauchit")) { setLinks(rho, link="probit") } else { setLinks(rho, link) } tempk <- rho$k rho$k <- 0 ## increased gradTol and relTol: fit <- try(clm_fit_NR(rho, control=list(gradTol=1e-3, relTol=1e-3)), silent=TRUE) if(inherits(fit, "try-error")) stop("Failed to find suitable starting values: please supply some", call.=FALSE) start <- c(fit$par, rep(0, nSpar)) if(length(rho$lambda) > 0) start <- c(start, rho$lambda) attr(start, "start.iter") <- fit$niter rho$k <- tempk setLinks(rho, link) # reset link in rho } } ## test start: stopifnot(is.numeric(start)) length.start <- ncol(rho$B1) + nSpar + length(rho$lambda) if(length(start) != length.start) stop(gettextf("length of start is %d should equal %d", length(start), length.start), call.=FALSE) return(start) } start.threshold <- function(y.levels, threshold = c("flexible", "symmetric", "symmetric2", "equidistant")) ### args: ### y.levels - levels of the model response, at least of length two ### threshold - threshold structure, character. { ## match and test arguments: threshold <- match.arg(threshold) ny.levels <- length(y.levels) ntheta <- ny.levels - 1L if(threshold %in% c("symmetric", "symmetric2", "equidistant") && ny.levels < 3) stop(gettextf("symmetric and equidistant thresholds are only meaningful for responses with 3 or more levels")) ## default starting values: start <- qlogis((1:ntheta) / (ntheta + 1) ) # just a guess ## adjusting for threshold functions: if(threshold == "symmetric" && ntheta %% 2) { ## ntheta odd >= 3 nalpha <- (ntheta + 1) / 2 start <- c(start[nalpha], diff(start[nalpha:ntheta])) ## works for ## ntheta >= 1 } if(threshold == "symmetric" && !ntheta %% 2) {## ntheta even >= 4 nalpha <- (ntheta + 2) / 2 start <- c(start[c(nalpha - 1, nalpha)], diff(start[nalpha:ntheta])) ## works for ntheta >= 2 } if(threshold == "symmetric2" && ntheta %% 2) { ## ntheta odd >= 3 nalpha <- (ntheta + 3) / 2 start <- start[nalpha:ntheta] ## works for ntheta >= 3 } if(threshold == "symmetric2" && !ntheta %% 2) {## ntheta even >= 4 nalpha <- (ntheta + 2) / 2 start <- start[nalpha:ntheta] ## works for ntheta >= 2 } if(threshold == "equidistant") start <- c(start[1], mean(diff(start))) ## return starting values for the threshold parameters: return(as.vector(start)) } start.beta <- function(X, has.intercept = TRUE) return(rep(0, ncol(X) - has.intercept)) ## clm.start <- function(y.levels, threshold, X, has.intercept = TRUE) ## return(c(start.threshold(y.levels, threshold), ## start.beta(X, has.intercept))) clm.start <- function(y.levels, threshold, X, NOM=NULL, S=NULL, has.intercept=TRUE) { st <- start.threshold(y.levels, threshold) if(!is.null(NOM) && ncol(NOM) > 1) st <- c(st, rep(rep(0, length(st)), ncol(NOM)-1)) start <- c(st, start.beta(X, has.intercept)) if(!is.null(S) && ncol(S) > 1) start <- c(start, rep(0, ncol(S) - 1)) start } ordinal/R/clmm2.R0000644000176200001440000010215214533321514013242 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## The main clmm2 function and some related auxiliary functions. clmm2.control <- function(method = c("ucminf", "nlminb", "model.frame"), ..., trace = 0, maxIter = 50, gradTol = 1e-4, maxLineIter = 50, innerCtrl = c("warnOnly", "noWarn", "giveError")) { method <- match.arg(method) innerCtrl <- match.arg(innerCtrl) ctrl <- list(trace=ifelse(trace < 0, 1, 0), maxIter=maxIter, gradTol=gradTol, maxLineIter=maxLineIter, innerCtrl=innerCtrl) optCtrl <- list(trace = abs(trace), ...) if(!is.numeric(unlist(ctrl[-5]))) stop("maxIter, gradTol, maxLineIter and trace should all be numeric") if(any(ctrl[-c(1, 5)] <= 0)) stop("maxIter, gradTol and maxLineIter have to be > 0") if(method == "ucminf" && !"grtol" %in% names(optCtrl)) optCtrl$grtol <- 1e-5 if(method == "ucminf" && !"grad" %in% names(optCtrl)) optCtrl$grad <- "central" list(method = method, ctrl = ctrl, optCtrl = optCtrl) } .negLogLikBase <- function(rho) { ### Update stDev, sigma, eta1Fix, and eta2Fix given new par: with(rho, { if(estimLambda > 0) lambda <- par[nxi + p + k + 1:estimLambda] if(estimStDev) stDev <- exp(par[p+nxi+k+estimLambda+ 1:s]) sigma <- if(k > 0) expSoffset * exp(drop(Z %*% par[nxi+p + 1:k])) else expSoffset eta1Fix <- drop(B1 %*% par[1:(nxi + p)]) eta2Fix <- drop(B2 %*% par[1:(nxi + p)]) }) return(invisible()) } .negLogLikMfast <- function(rho) { ## negative log-likelihood fit <- with(rho, { .C("nll", as.double(u), length(u), as.integer(grFac), as.double(stDev), as.double(o1), as.double(o2), length(o1), eta1 = as.double(eta1), eta2 = as.double(eta2), as.double(eta1Fix), as.double(eta2Fix), as.double(sigma), pr = as.double(pr), as.double(weights), as.double(lambda), as.integer(linkInt), nll = double(1) )[c("eta1", "eta2", "pr", "nll")] }) rho$eta1 <- fit$eta1 rho$eta2 <- fit$eta2 rho$pr <- fit$pr fit$nll } update.u2.v3 <- function(rho) { ### third version: C-implementation of NR-algorithm. .negLogLikBase(rho) ## update: par, stDev, eta1Fix, eta2Fix eta2Fix, sigma fit <- with(rho, .C("NRalgv3", as.integer(ctrl$trace), as.integer(ctrl$maxIter), as.double(ctrl$gradTol), as.integer(ctrl$maxLineIter), as.integer(grFac), as.double(stDev), as.double(o1), as.double(o2), as.double(eta1Fix), as.double(eta2Fix), as.double(sigma), as.integer(linkInt), as.double(weights), u = as.double(uStart), pr = as.double(pr), funValue = double(1), gradValues = as.double(uStart), hessValues = as.double(rep(1, length(uStart))), length(pr), length(uStart), maxGrad = double(1), conv = 0L, as.double(lambda), Niter = as.integer(Niter) )[c("u", "funValue", "gradValues", "hessValues", "maxGrad", "conv", "Niter")] ) ## Get message: message <- switch(as.character(fit$conv), "1" = "max|gradient| < tol, so current iterate is probably solution", "0" = "Non finite negative log-likelihood", "-1" = "iteration limit reached when updating the random effects", "-2" = "step factor reduced below minimum when updating the random effects") ## Check for convergence and report warning/error: if(rho$ctrl$trace > 0 && fit$conv == 1) cat("\nOptimizer converged! ", "max|grad|:", fit$maxGrad, message, fill = TRUE) if(fit$conv != 1 && rho$ctrl$innerCtrl == "warnOnly") warning(message, "\n at iteration ", rho$Niter) else if(fit$conv != 1 && rho$ctrl$innerCtrl == "giveError") stop(message, "\n at iteration ", rho$Niter) ## Store values and return: rho$Niter <- fit$Niter rho$u <- fit$u rho$D <- fit$hessValue rho$gradient <- fit$gradValue if(!is.finite(rho$negLogLik <- fit$funValue)) return(FALSE) return(TRUE) } clmm2 <- function(location, scale, nominal, random, data, weights, start, subset, na.action, contrasts, Hess = FALSE, model = TRUE, sdFixed, link = c("logistic", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), lambda, doFit = TRUE, control, nAGQ = 1, threshold = c("flexible", "symmetric", "equidistant"), ...) ## Handle if model = FALSE ### Marginal fitted values? (pr | u = 0) or (pr | u = u.tilde) ? ### How can we (should we?) get u.tilde and var(u.tilde) with GHQ? ### Make safeStart function if !is.finite(negLogLik) ### Write test suite for doFit argument { R <- match.call(expand.dots = FALSE) Call <- match.call() if(missing(random)) { Call[[1]] <- as.name("clm2") return(eval.parent(Call)) } if(missing(lambda)) lambda <- NULL if(missing(contrasts)) contrasts <- NULL if(missing(control)) control <- clmm2.control(...) if(!setequal(names(control), c("method", "ctrl", "optCtrl"))) stop("specify 'control' via clmm2.control()") if (missing(data)) data <- environment(location) if (is.matrix(eval.parent(R$data))) R$data <- as.data.frame(data) ### Collect all variables in a single formula and evaluate to handle ### missing values correctly: m <- match(c("location", "scale", "nominal"), names(R), 0) F <- lapply(as.list(R[m]), eval.parent) ## evaluate in parent varNames <- unique(unlist(lapply(F, all.vars))) longFormula <- eval(parse(text = paste("~", paste(varNames, collapse = "+")))[1]) m <- match(c("location", "data", "subset", "weights", "random", "na.action"), names(R), 0) R <- R[c(1, m)] R$location <- longFormula R$drop.unused.levels <- TRUE R[[1]] <- as.name("model.frame") names(R)[names(R) == "location"] <- "formula" R <- eval.parent(R) nonNA <- rownames(R) ### Append nonNA index to Call$subset to get the right design matrices ### from clm2: Call$subset <- if(is.null(Call$subset)) nonNA else c(paste(deparse(Call$subset), "&"), nonNA) Call$start <- if(is.null(Call$start) || !is.null(Call$sdFixed)) Call$start else start[-length(start)] Call$random <- Call$control <- Call$nAGQ <- Call$sdFixed <- Call$innerCtrl <- NULL Call$method <- control$method Call$doFit <- Call$Hess <- FALSE Call[[1]] <- as.name("clm2") rhoM <- eval.parent(Call) if(control$method == "model.frame") return(rhoM) rhoM$call <- match.call() rhoM$randomName <- deparse(rhoM$call$random) ### Set grouping factor and stDev parameter: rhoM$grFac <- R[,"(random)"] if(!missing(sdFixed) && !is.null(sdFixed)) { stopifnot(length(sdFixed) == 1 && sdFixed > 0) rhoM$estimStDev <- FALSE rhoM$stDev <- sdFixed } else rhoM$estimStDev <- TRUE with(rhoM, { r <- nlevels(grFac) ## no. random effects grFac <- as.integer(unclass(grFac)) if(r <= 2) stop("Grouping factor must have 3 or more levels") s <- ifelse(estimStDev, 1L, 0L) ## no. variance parameters Niter <- 0L }) ### set starting values: if(missing(start)) { suppressWarnings(fitCLM(rhoM)) if(rhoM$estimStDev) rhoM$start <- rhoM$par <- c(rhoM$par, log(1)) else rhoM$start <- rhoM$par } else rhoM$start <- rhoM$par <- start rhoM$uStart <- rhoM$u <- rep(0, rhoM$r) ### Test starting values: if(length(rhoM$start) != with(rhoM, nxi + p + k + estimLambda + estimStDev)) stop("'start' is ", length(rhoM$start), " long, but should be ", with(rhoM, nxi + p + k + estimLambda + estimStDev), " long") if(rhoM$ncolXX == 0) { if(!all(diff(c(rhoM$tJac %*% rhoM$start[1:rhoM$nalpha])) > 0)) stop("Threshold starting values are not of increasing size") } ### Change the lower limit if lambda is estimated with the ### Aranda-Ordaz link and sdFixed is not supplied: if(rhoM$estimLambda > 0 && rhoM$link == "Aranda-Ordaz" && is.null(rhoM$call$sdFixed)) rhoM$limitLow <- c(rep(-Inf, length(rhoM$par)-2), 1e-5, -Inf) ### This should hardly ever be the case: .negLogLikBase(rhoM) ## set lambda, stDev, sigma, eta1Fix and eta2Fix if(!is.finite(.negLogLikMfast(rhoM))) stop("Non-finite integrand at starting values") rhoM$ctrl <- control$ctrl rhoM$optCtrl <- control$optCtrl if(rhoM$method == "nlminb") { m <- match(names(rhoM$optCtrl), c("grad","grtol"), 0) rhoM$optCtrl <- rhoM$optCtrl[!m] } ### Match doFit: if(is.logical(doFit) || is.numeric(doFit)) { if(doFit) doFit <- "C" else doFit <- "no" } else if(!is.character(doFit) || !doFit %in% c("no", "R", "C")) stop("argument 'doFit' not recognized. 'doFit' should be\n numeric, logical or one of c('no', 'R', 'C')") ### Set ObjFun parameters: ObjFun <- getNLA2 ## same for "R" and "C" rhoM$updateU <- if(doFit == "R") update.u2 else update.u2.v3 rhoM$nAGQ <- as.integer(nAGQ) if(rhoM$nAGQ >= 2) { ghq <- gauss.hermite(rhoM$nAGQ) rhoM$ghqns <- ghq$nodes rhoM$ghqws <- ghq$weights if(doFit == "R") { ObjFun <- getNAGQinR rhoM$PRnn <- array(0, dim=c(rhoM$n, rhoM$nAGQ)) rhoM$PRrn <- array(0, dim=c(rhoM$r, rhoM$nAGQ)) rhoM$ghqws <- ghq$weights * exp(rhoM$ghqns^2) } else ObjFun <- getNAGQinC } if(rhoM$nAGQ <= -1) { ghq <- gauss.hermite(abs(rhoM$nAGQ)) rhoM$ghqns <- ghq$nodes rhoM$ghqws <- ghq$weights * exp((ghq$nodes^2)/2) if(doFit == "R"){ ObjFun <- getNGHQinR } else { ObjFun <- getNGHQinC rhoM$ghqws <- log(ghq$weights) + (ghq$nodes^2)/2 } } stopifnot(rhoM$nAGQ != 0) ## test needed? ### Fit the model: if(!doFit %in% c("C", "R")) return(rhoM) if(rhoM$nAGQ > -1) rhoM$updateU(rhoM) # Try updating the random effects rhoM$optRes <- switch(rhoM$method, "ucminf" = ucminf(rhoM$start, function(x) ObjFun(rhoM, x), control=rhoM$optCtrl), "nlminb" = nlminb(rhoM$start, function(x) ObjFun(rhoM, x), control=rhoM$optCtrl, lower = rhoM$limitLow, upper = rhoM$limitUp)) rhoM$par <- rhoM$optRes[[1]] if(Hess) { if(rhoM$link == "Aranda-Ordaz" && rhoM$estimLambda > 0 && rhoM$lambda < 1e-3) message("Cannot get Hessian because lambda = ",rhoM$lambda ," is too close to boundary.\n", " Fit model with link == 'logistic' to get Hessian") else { rhoM$Hessian <- myhess(function(x) ObjFun(rhoM, x), rhoM$par) rhoM$par <- rhoM$optRes[[1]] } } .negLogLikMfast(rhoM) ## update pr ## if(rhoM$nAGQ > -1) rhoM$updateU(rhoM) # Makes sure ranef's are evaluated at the optimum ### Post processing: res <- finalizeRhoM(rhoM) res$call <- match.call() res$na.action <- attr(R, "na.action") res$contrasts <- contrasts class(res) <- c("clmm2", "clm2") res } getNLA2 <- function(rho, par) { ### negative log-likelihood by the Laplace approximation ### (with update.u2 in C or R): if(!missing(par)) rho$par <- par if(!rho$updateU(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) ## logDetD <- sum(log(rho$D/(2*pi))) logDetD <- sum(log(rho$D)) - rho$r * log(2*pi) rho$negLogLik + logDetD / 2 } getNAGQinR <- function(rho, par) { ### negative log-likelihood by adaptive Gauss-Hermite quadrature ### implemented in R: if(!missing(par)) rho$par <- par if(!rho$updateU(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) with(rho, { K <- sqrt(2/D) agqws <- K %*% t(ghqws) agqns <- apply(K %*% t(ghqns), 2, function(x) x + u) ranNew <- apply(agqns, 2, function(x) x[grFac] * stDev) eta1Tmp <- (eta1Fix + o1 - ranNew) / sigma eta2Tmp <- (eta2Fix + o2 - ranNew) / sigma if(nlambda) ## PRnn <- (pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))^weights ## This is likely a computationally more safe solution: PRnn <- exp(weights * log(pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))) else ## PRnn <- (pfun(eta1Tmp) - pfun(eta2Tmp))^weights PRnn <- exp(weights * log(pfun(eta1Tmp) - pfun(eta2Tmp))) ### OPTION: The fitted values could be evaluated with getFittedC for ### better precision. for(i in 1:r) ## PRrn[i,] <- apply(PRnn[grFac == i, ], 2, prod) PRrn[i,] <- apply(PRnn[grFac == i, ,drop = FALSE], 2, prod) PRrn <- PRrn * agqws * dnorm(x=agqns, mean=0, sd=1) ### OPTION: Could this be optimized by essentially computing dnorm 'by hand'? }) -sum(log(rowSums(rho$PRrn))) } ## tmpAGQ(rho) tmpAGQ <- function(rho, par) { if(!missing(par)) rho$par <- par with(rho, { ls() stDev <- exp(ST[[1]][1, 1]) nlambda <- 0 K <- sqrt(2/D) agqws <- K %*% t(ghqws) agqns <- apply(K %*% t(ghqns), 2, function(x) x + u) grFac <- unclass(grFac) ranNew <- apply(agqns, 2, function(x) x[grFac] * stDev) eta1Tmp <- (eta1Fix + o1 - ranNew) / sigma eta2Tmp <- (eta2Fix + o2 - ranNew) / sigma if(nlambda) PRnn <- exp(weights * log(pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))) else PRnn <- exp(wts * log(pfun(eta1Tmp) - pfun(eta2Tmp))) dim(eta1Tmp) exp(wts[IND] * log(pfun(eta1Tmp[IND, ]) - pfun(eta2Tmp[IND, ]))) PRrn <- do.call(rbind, lapply(1:dims$q, function(i) { apply(PRnn[grFac == i, ,drop = FALSE], 2, prod) })) head(PRrn) PRrn <- do.call(rbind, lapply(1:dims$q, function(i) { apply(PRnn[grFac == i, ,drop = FALSE], 2, function(x) sum(log(x))) })) head(PRrn) ## Could we do something like PRnn <- wts * log(pfun(eta1Tmp) - pfun(eta2Tmp)) PRrn <- do.call(rbind, lapply(1:dims$q, function(i) { apply(PRnn[grFac == i, ,drop = FALSE], 2, function(x) sum(x)) })) head(PRrn, 20) ## to avoid first exp()ing and then log()ing? head(exp(PRrn), 20) range(PRrn) exp(range(PRrn)) out <- PRrn + log(agqws) + log(dnorm(x=agqns, mean=0, sd=1)) log(2 * 3) log(2) + log(3) PRnn[grFac == 12, , drop=FALSE] IND <- which(grFac == 12) cbind(IND, wts[IND], PRnn[IND, ]) dim(PRrn) ## There seems to be underfloow allready in the computations ## in PRnn, which propagates to PRrn PRrn <- PRrn * agqws * dnorm(x=agqns, mean=0, sd=1) }) -sum(log(rowSums(rho$PRrn))) } getNAGQinC <- function(rho, par) { ### negative log-likelihood by adaptive Gauss-Hermite quadrature ### implemented in C: if(!missing(par)) rho$par <- par if(!rho$updateU(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) with(rho, { .C("getNAGQ", nll = double(1), ## nll as.integer(grFac), ## grFac as.double(stDev), ## stDev as.double(eta1Fix), as.double(eta2Fix), as.double(o1), as.double(o2), as.double(sigma), ## Sigma as.double(weights), length(sigma), ## nx - no. obs length(uStart), ## nu - no. re as.double(ghqns), as.double(log(ghqws)), ## lghqws as.double(ghqns^2), ## ghqns2 as.double(u), as.double(D), as.integer(abs(nAGQ)), as.integer(linkInt), as.double(lambda))$nll }) } getNGHQinR <- function(rho, par) { ### negative log-likelihood by standard Gauss-Hermite quadrature ### implemented in R: if(!missing(par)) rho$par <- par .negLogLikBase(rho) ## Update lambda, stDev, sigma and eta*Fix with(rho, { ns <- ghqns * stDev SS <- numeric(r) ## summed likelihood for(i in 1:r) { ind <- grFac == i eta1Fi <- eta1Fix[ind] eta2Fi <- eta2Fix[ind] o1i <- o1[ind] o2i <- o2[ind] si <- sigma[ind] wt <- weights[ind] for(h in 1:abs(nAGQ)) { eta1s <- (eta1Fi + o1i - ns[h]) / si eta2s <- (eta2Fi + o2i - ns[h]) / si ## SS[i] <- exp(sum(wt * log(pfun(eta1s) - pfun(eta2s)))) * ## ghqws[h] * exp(ghqns[h]^2) * dnorm(x=ghqns[h]) + SS[i] SS[i] <- exp(sum(wt * log(pfun(eta1s) - pfun(eta2s)))) * ghqws[h] + SS[i] ### OPTION: The fitted values could be evaluated with getFittedC for ### better precision. } } -sum(log(SS)) + r * log(2*pi)/2 }) } getNGHQinC <- function(rho, par) { ### negative log-likelihood by standard Gauss-Hermite quadrature ### implemented in C: if(!missing(par)) rho$par <- par .negLogLikBase(rho) ## Update lambda, stDev, sigma and eta*Fix with(rho, { .C("getNGHQ_C", nll = double(1), as.integer(grFac), as.double(stDev), as.double(eta1Fix), as.double(eta2Fix), as.double(o1), as.double(o2), as.double(sigma), as.double(weights), length(sigma), length(uStart), as.double(ghqns), as.double(ghqws), as.integer(abs(nAGQ)), as.integer(linkInt), as.double(ghqns * stDev), as.double(lambda))$nll }) } finalizeRhoM <- function(rhoM) { if(rhoM$method == "ucminf") { if(rhoM$optRes$info[1] > rhoM$optCtrl[["grtol"]]) warning("clmm2 may not have converged:\n optimizer 'ucminf' terminated with max|gradient|: ", rhoM$optRes$info[1], call.=FALSE) rhoM$convergence <- ifelse(rhoM$optRes$info[1] > rhoM$optCtrl[["grtol"]], FALSE, TRUE) } if(rhoM$method == "nlminb") { rhoM$convergence <- ifelse(rhoM$optRes$convergence == 0, TRUE, FALSE) if(!rhoM$convergence) warning("clmm2 may not have converged:\n optimizer 'nlminb' terminated with message: ", rhoM$optRes$message, call.=FALSE) } if(rhoM$ctrl$gradTol < max(abs(rhoM$gradient))) warning("Inner loop did not converge at termination:\n max|gradient| = ", max(abs(rhoM$gradient))) with(rhoM, { if(nxi > 0) { xi <- par[1:nxi] names(xi) <- xiNames thetaNames <- paste(lev[-length(lev)], lev[-1], sep="|") Alpha <- Theta <- matrix(par[1:nxi], nrow=ncolXX, byrow=TRUE) Theta <- t(apply(Theta, 1, function(x) c(tJac %*% x))) if(ncolXX > 1){ dimnames(Theta) <- list(dnXX[[2]], thetaNames) dimnames(Alpha) <- list(dnXX[[2]], alphaNames) } else { Theta <- c(Theta) Alpha <- c(Alpha) names(Theta) <- thetaNames names(Alpha) <- alphaNames } coefficients <- xi } else coefficients <- numeric(0) if(p > 0) { beta <- par[nxi + 1:p] names(beta) <- dnX[[2]] coefficients <- c(coefficients, beta) } if(k > 0) { zeta <- par[nxi+p + 1:k] names(zeta) <- dnZ[[2]] coefficients <- c(coefficients, zeta) } if(estimLambda > 0) { names(lambda) <- "lambda" coefficients <- c(coefficients, lambda) } if(s > 0) { stDev <- exp(par[nxi+p+k + estimLambda + 1:s]) coefficients <- c(coefficients, stDev) } names(stDev) <- randomName if(exists("Hessian", inherits=FALSE)) dimnames(Hessian) <- list(names(coefficients), names(coefficients)) edf <- p + nxi + k + estimLambda + s nobs <- sum(weights) fitted.values <- pr df.residual = nobs - edf ranef <- u * stDev condVar <- 1/D * stDev^2 logLik <- -optRes[[2]] }) res <- as.list(rhoM) keepNames <- c("ranef", "df.residual", "fitted.values", "edf", "start", "stDev", "beta", "coefficients", "zeta", "Alpha", "Theta", "xi", "lambda", "convergence", "Hessian", "gradient", "optRes", "logLik", "Niter", "grFac", "call", "scale", "location", "nominal", "method", "y", "lev", "nobs", "threshold", "estimLambda", "link", "nAGQ", "condVar", "contrasts", "na.action") m <- match(keepNames, names(res), 0) res <- res[m] res } anova.clmm2 <- function (object, ..., test = c("Chisq", "none")) { anova.clm2(object, ..., test = c("Chisq", "none")) } print.clmm2 <- function(x, ...) { if(x$nAGQ >= 2) cat(paste("Cumulative Link Mixed Model fitted with the adaptive", "Gauss-Hermite \nquadrature approximation with", x$nAGQ ,"quadrature points"), "\n\n") else if(x$nAGQ <= -1) cat(paste("Cumulative Link Mixed Model fitted with the", "Gauss-Hermite \nquadrature approximation with", abs(x$nAGQ) ,"quadrature points"), "\n\n") else cat("Cumulative Link Mixed Model fitted with the Laplace approximation\n", fill=TRUE) if(!is.null(cl <- x$call)) { cat("Call:\n") dput(cl, control=NULL) } if(length(x$stDev)) { cat("\nRandom effects:\n") varMat <- matrix(c(x$stDev^2, x$stDev), nrow = length(x$stDev), ncol=2) rownames(varMat) <- names(x$stDev) colnames(varMat) <- c("Var", "Std.Dev") print(varMat, ...) } else { cat("\nNo random effects\n") } if(length(x$beta)) { cat("\nLocation coefficients:\n") print(x$beta, ...) } else { cat("\nNo location coefficients\n") } if(length(x$zeta)) { cat("\nScale coefficients:\n") print(x$zeta, ...) } else { cat("\nNo Scale coefficients\n") } if(x$estimLambda > 0) { cat("\nLink coefficient:\n") print(x$lambda) } if(length(x$xi) > 0) { cat("\nThreshold coefficients:\n") print(x$Alpha, ...) if(x$threshold != "flexible") { cat("\nThresholds:\n") print(x$Theta, ...) } } cat("\nlog-likelihood:", format(x$logLik, nsmall=2), "\n") cat("AIC:", format(-2*x$logLik + 2*x$edf, nsmall=2), "\n") if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") invisible(x) } vcov.clmm2 <- function(object, ...) { if(is.null(object$Hessian)) { stop("Model needs to be fitted with Hess = TRUE") } dn <- names(object$coefficients) structure(solve(object$Hessian), dimnames = list(dn, dn)) } summary.clmm2 <- function(object, digits = max(3, .Options$digits - 3), correlation = FALSE, ...) { estimStDev <- !("sdFixed" %in% names(as.list(object$call))) edf <- object$edf coef <- with(object, matrix(0, edf-estimStDev, 4, dimnames = list(names(coefficients[seq_len(edf-estimStDev)]), c("Estimate", "Std. Error", "z value", "Pr(>|z|)")))) coef[, 1] <- object$coefficients[seq_len(edf-estimStDev)] if(is.null(object$Hessian)) { stop("Model needs to be fitted with Hess = TRUE") } vc <- try(vcov(object), silent = TRUE) if(inherits(vc, "try-error")) { warning("Variance-covariance matrix of the parameters is not defined") coef[, 2:4] <- NaN if(correlation) warning("Correlation matrix is unavailable") object$condHess <- NaN } else { sd <- sqrt(diag(vc)) coef[, 2] <- sd[seq_len(edf - estimStDev)] object$condHess <- with(eigen(object$Hessian, only.values = TRUE), abs(max(values) / min(values))) coef[, 3] <- coef[, 1]/coef[, 2] coef[, 4] <- 2*pnorm(abs(coef[, 3]), lower.tail=FALSE) if(correlation) object$correlation <- (vc/sd)/rep(sd, rep(object$edf, object$edf)) } object$coefficients <- coef object$digits <- digits varMat <- matrix(c(object$stDev^2, object$stDev), nrow = length(object$stDev), ncol=2) rownames(varMat) <- names(object$stDev) colnames(varMat) <- c("Var", "Std.Dev") object$varMat <- varMat class(object) <- "summary.clmm2" object } print.summary.clmm2 <- function(x, digits = x$digits, signif.stars = getOption("show.signif.stars"), ...) { if(x$nAGQ >=2) cat(paste("Cumulative Link Mixed Model fitted with the adaptive", "Gauss-Hermite \nquadrature approximation with", x$nAGQ ,"quadrature points\n\n")) else if(x$nAGQ <= -1) cat(paste("Cumulative Link Mixed Model fitted with the", "Gauss-Hermite \nquadrature approximation with", abs(x$nAGQ) ,"quadrature points"), "\n\n") else cat("Cumulative Link Mixed Model fitted with the Laplace approximation\n", fill=TRUE) if(!is.null(cl <- x$call)) { cat("Call:\n") dput(cl, control=NULL) } if(length(x$stDev)) { cat("\nRandom effects:\n") print(x$varMat, ...) } else { cat("\nNo random effects\n") } ### OPTION: Should the number of obs. and the number of groups be ### displayed as in clmm? coef <- format(round(x$coefficients, digits=digits)) coef[,4] <- format.pval(x$coefficients[, 4]) p <- length(x$beta); nxi <- length(x$xi) k <- length(x$zeta); u <- x$estimLambda if(p > 0) { cat("\nLocation coefficients:\n") print(coef[nxi + 1:p, , drop=FALSE], quote = FALSE, ...) } else { cat("\nNo location coefficients\n") } if(k > 0) { cat("\nScale coefficients:\n") print(coef[(nxi+p+1):(nxi+p+k), , drop=FALSE], quote = FALSE, ...) } else { cat("\nNo scale coefficients\n") } if(x$estimLambda > 0) { cat("\nLink coefficients:\n") print(coef[(nxi+p+k+1):(nxi+p+k+u), , drop=FALSE], quote = FALSE, ...) } if(nxi > 0) { cat("\nThreshold coefficients:\n") print(coef[1:nxi, -4, drop=FALSE], quote = FALSE, ...) } cat("\nlog-likelihood:", format(x$logLik, nsmall=2), "\n") cat("AIC:", format(-2*x$logLik + 2*x$edf, nsmall=2), "\n") cat("Condition number of Hessian:", format(x$condHess, nsmall=2), "\n") if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") if(!is.null(correl <- x$correlation)) { cat("\nCorrelation of Coefficients:\n") ll <- lower.tri(correl) correl[ll] <- format(round(correl[ll], digits)) correl[!ll] <- "" print(correl[-1, -ncol(correl)], quote = FALSE, ...) } invisible(x) } ## ranef.clmm2 <- function(x) { ## x$ranef ## } ## Trace <- function(iter, stepFactor, val, maxGrad, par, first=FALSE) { ## t1 <- sprintf(" %3d: %-5e: %.3f: %1.3e: ", ## iter, stepFactor, val, maxGrad) ## t2 <- formatC(par) ## if(first) ## cat("iter: step factor: Value: max|grad|: Parameters:\n") ## cat(t1, t2, "\n") ## } gauss.hermite <- function (n) { n <- as.integer(n) if (n < 0) stop("need non-negative number of nodes") if (n == 0) return(list(nodes = numeric(0), weights = numeric(0))) i <- 1:n i1 <- i[-n] muzero <- sqrt(pi) a <- rep(0, n) b <- sqrt(i1/2) A <- rep(0, n * n) A[(n + 1) * (i1 - 1) + 2] <- b A[(n + 1) * i1] <- b dim(A) <- c(n, n) vd <- eigen(A, symmetric = TRUE) w <- rev(as.vector(vd$vectors[1, ])) w <- muzero * w^2 x <- rev(vd$values) list(nodes = x, weights = w) } profile.clmm2 <- function(fitted, alpha = 0.01, range, nSteps = 20, trace = 1, ...) { estimStDev <- !("sdFixed" %in% names(as.list(fitted$call))) if(!estimStDev) ## || is.null(fitted$Hessian)) fitted <- update(fitted, Hess = TRUE, sdFixed = NULL) MLogLik <- fitted$logLik MLstDev <- fitted$stDev if(missing(range) && is.null(fitted$Hessian)) stop("'range' should be specified or model fitted with 'Hess = TRUE'") if(missing(range) && !is.null(fitted$Hessian)) { range <- log(fitted$stDev) + qnorm(1 - alpha/2) * c(-1, 1) * sqrt(vcov(fitted)[fitted$edf, fitted$edf]) range <- exp(range) pct <- paste(round(100*c(alpha/2, 1-alpha/2), 1), "%") ci <- array(NA, dim = c(1, 2), dimnames = list("stDev", pct)) ci[] <- range } stopifnot(all(range > 0)) logLik <- numeric(nSteps) stDevSeq <- seq(min(range), max(range), length.out = nSteps) if(trace) message("Now profiling stDev with ", nSteps, " steps: i =") if(trace) cat(1, "") rho <- update(fitted, Hess = FALSE, sdFixed = min(range)) logLik[1] <- rho$logLik start <- as.vector(rho$coefficients) for(i in 2:nSteps){ if(trace) cat(i, "") rho <- update(rho, sdFixed = stDevSeq[i], start = start) logLik[i] <- rho$logLik start <- as.vector(rho$coefficients) } if(trace) cat("\n") if(any(logLik > fitted$logLik)) warning("Profiling found a better optimum,", " so original fit had not converged") sgn <- 2*(stDevSeq > MLstDev) -1 Lroot <- sgn * sqrt(2) * sqrt(-logLik + MLogLik) res <- data.frame("Lroot" = c(0, Lroot), "stDev" = c(MLstDev, stDevSeq)) res <- res[order(res[,1]),] if(!all(diff(res[,2]) > 0)) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for stDev") val <- structure(list(stDev = res), original.fit = fitted) if(exists("ci", inherits=FALSE)) attr(val, "WaldCI") <- ci class(val) <- c("profile.clmm2", "profile") val } confint.profile.clmm2 <- function(object, parm = seq_along(Pnames), level = 0.95, ...) { Pnames <- names(object) confint.profile.clm2(object, parm = parm, level = level, ...) } plot.profile.clmm2 <- function(x, parm = seq_along(Pnames), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, fig = TRUE, n = 1e3, ..., ylim = NULL) { Pnames <- names(x) plot.profile.clm2(x, parm = parm, level = level, Log = Log, relative = relative, fig = fig, n = n, ..., ylim = ylim) } update.clmm2 <- function(object, formula., location, scale, nominal, ..., evaluate = TRUE) { call <- object$call if (is.null(call)) stop("need an object with call component") extras <- match.call(expand.dots = FALSE)$... if (!missing(location)) call$location <- update.formula(formula(attr(object$location, "terms")), location) if (!missing(scale)) call$scale <- if(!is.null(object$scale)) update.formula(formula(attr(object$scale, "terms")), scale) else scale if (!missing(nominal)) call$nominal <- if(!is.null(object$nominal)) update.formula(formula(attr(object$nominal, "terms")), nominal) else nominal if (length(extras)) { existing <- !is.na(match(names(extras), names(call))) for (a in names(extras)[existing]) call[[a]] <- extras[[a]] if (any(!existing)) { call <- c(as.list(call), extras[!existing]) call <- as.call(call) } } if (evaluate) eval(call, parent.frame()) else call } ordinal/R/clmm.formula.R0000644000176200001440000001012014533321514014615 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Functions to process lmer-style mixed-model formulae. These ## functions are borrowed from the lme4 package but have later been ## modified. findbars <- function(term) ### Return the pairs of expressions that separated by vertical bars { if (is.name(term) || !is.language(term)) return(NULL) if (term[[1]] == as.name("(")) return(findbars(term[[2]])) if (!is.call(term)) stop("term must be of class call") if (term[[1]] == as.name('|')) return(term) if (length(term) == 2) return(findbars(term[[2]])) c(findbars(term[[2]]), findbars(term[[3]])) } nobars <- function(term) ### term - usually the third element of a formula object: formula[[3]] ### returns a list of terms ### Return the formula omitting the pairs of expressions that are ### separated by vertical bars { if (!('|' %in% all.names(term))) return(term) if (is.call(term) && term[[1]] == as.name('|')) return(NULL) if (length(term) == 2) { nb <- nobars(term[[2]]) if (is.null(nb)) return(NULL) term[[2]] <- nb return(term) } nb2 <- nobars(term[[2]]) nb3 <- nobars(term[[3]]) if (is.null(nb2)) return(nb3) if (is.null(nb3)) return(nb2) term[[2]] <- nb2 term[[3]] <- nb3 term } subbars <- function(term) ### Substitute the '+' function for the '|' function { if (is.name(term) || !is.language(term)) return(term) if (length(term) == 2) { term[[2]] <- subbars(term[[2]]) return(term) } stopifnot(length(term) >= 3) if (is.call(term) && term[[1]] == as.name('|')) term[[1]] <- as.name('+') for (j in 2:length(term)) term[[j]] <- subbars(term[[j]]) term } subnms <- function(term, nlist) ### Substitute any names from nlist in term with 1 { if (!is.language(term)) return(term) if (is.name(term)) { if (any(unlist(lapply(nlist, get("=="), term)))) return(1) return(term) } stopifnot(length(term) >= 2) for (j in 2:length(term)) term[[j]] <- subnms(term[[j]], nlist) term } slashTerms <- function(x) ### Return the list of '/'-separated terms in an expression that ### contains slashes { if (!("/" %in% all.names(x))) return(x) if (x[[1]] != as.name("/")) stop("unparseable formula for grouping factor") list(slashTerms(x[[2]]), slashTerms(x[[3]])) } makeInteraction <- function(x) ### from a list of length 2 return recursive interaction terms { if (length(x) < 2) return(x) trm1 <- makeInteraction(x[[1]]) trm11 <- if(is.list(trm1)) trm1[[1]] else trm1 list(substitute(foo:bar, list(foo=x[[2]], bar = trm11)), trm1) } expandSlash <- function(bb) ### expand any slashes in the grouping factors returned by findbars { if (!is.list(bb)) return(expandSlash(list(bb))) ## I really do mean lapply(unlist(... - unlist returns a ## flattened list in this case unlist(lapply(bb, function(x) { if (length(x) > 2 && is.list(trms <- slashTerms(x[[3]]))) return(lapply(unlist(makeInteraction(trms)), function(trm) substitute(foo|bar, list(foo = x[[2]], bar = trm)))) x })) } ordinal/R/clm.anova.R0000644000176200001440000002723114533321514014112 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# # clm.anova.R single_anova <- function(object, type = c("III", "II", "I", "3", "2", "1", "marginal", "2b")) { type <- type[1L] if(!is.character(type)) type <- as.character(type) type <- match.arg(type) if(type %in% c("I", "II", "III")) type <- as.character(as.integer(as.roman(type))) if(any(is.na(vcov(object)))) stop("anova table not available with non-finite values in vcov(object)") # Get list of contrast matrices (L) - one for each model term: L_list <- if(type == "1") { get_contrasts_type1(object) } else if(type == "2") { get_contrasts_type2_unfolded(object) } else if(type == "2b") { get_contrasts_type2(object) } else if(type == "3") { get_contrasts_type3(object) } else if(type == "marginal") { get_contrasts_marginal(object) } else { stop("'type' not recognized") } # Add cols to L for alpha, zeta and lambda params: L_list <- adjust_contrast_for_param(object, L_list) # Get F-test for each term and collect in table: table <- rbindall(lapply(L_list, function(L) contestMD(object, L))) # Format ANOVA table and return: if(length(nm <- setdiff(names(L_list), rownames(table)))) { tab <- array(NA_real_, dim=c(length(nm), ncol(table)), dimnames = list(nm, colnames(table))) table <- rbind(table, tab)[names(L_list), ] } # Format 'type': type <- if(type == "marginal") { "Marginal" } else if(grepl("b|c", type)) { alph <- gsub("[0-9]", "", type) paste0("Type ", as.roman(as.integer(gsub("b|c", "", type))), alph) } else paste("Type", as.roman(as.integer(type))) attr(table, "heading") <- paste(type, "Analysis of Deviance Table with Wald chi-square tests\n") attr(table, "hypotheses") <- L_list class(table) <- c("anova", "data.frame") table } adjust_contrast_for_param <- function(model, L) { nalpha <- length(model$alpha) nzeta <- if(is.null(model$zeta)) 0L else length(model$zeta) nlambda <- if(is.null(model$lambda)) 0L else length(model$lambda) nextra <- nzeta + nlambda # pre and post add extra cols to L: add <- function(L) { pre <- array(0, dim=c(nrow(L), nalpha)) post <- array(0, dim=c(nrow(L), nextra)) cbind(pre, L[, -1L, drop=FALSE], post) } if(!is.list(L)) add(L) else lapply(L, add) } model_matrix <- function(object, ...) { if(!inherits(object, "clm")) return(model.matrix(object, ...)) X <- model.matrix(object)$X if(!any(object$aliased$beta)) return(X) remove <- c(FALSE, object$aliased$beta) newX <- X[, !remove, drop=FALSE] attr(newX, "assign") <- attr(X, "assign")[!remove] contr <- attr(X, "contrasts") if(!is.null(contr)) attr(newX, "contrasts") <- contr newX } contestMD <- function(model, L, rhs=0, eps=sqrt(.Machine$double.eps), ...) { mk_Qtable <- function(Qvalue, df) { pvalue <- pchisq(q=Qvalue, df=df, lower.tail=FALSE) data.frame("Df"=df, "Chisq"=Qvalue, "Pr(>Chisq)"=pvalue, check.names = FALSE) } if(!is.matrix(L)) L <- matrix(L, ncol=length(L)) stopifnot(is.matrix(L), is.numeric(L), ncol(L) == length(coef(model, na.rm=TRUE))) if(length(rhs) == 1L) rhs <- rep(rhs, nrow(L)) stopifnot(is.numeric(rhs), length(rhs) == nrow(L)) if(nrow(L) == 0L) { # May happen if there are no fixed effects x <- numeric(0L) return(mk_Qtable(x, x)) } if(any(is.na(L))) return(mk_Qtable(NA_real_, NA_real_)) beta <- coef(model, na.rm=TRUE) vcov_beta <- vcov(model) # Adjust beta for rhs: if(!all(rhs == 0)) beta <- beta - drop(MASS::ginv(L) %*% rhs) # Compute Var(L beta) and eigen-decompose: VLbeta <- L %*% vcov_beta %*% t(L) # Var(contrast) = Var(Lbeta) eig_VLbeta <- eigen(VLbeta) P <- eig_VLbeta$vectors d <- eig_VLbeta$values tol <- max(eps * d[1], 0) pos <- d > tol q <- sum(pos) # rank(VLbeta) if(q < nrow(L) && !all(rhs == 0)) warning("Contrast is rank deficient and test may be affected") if(q <= 0) { # shouldn't happen if L is a proper contrast x <- numeric(0L) return(mk_Qtable(x, x)) } PtL <- crossprod(P, L)[1:q, ] # Compute t-squared values and Q-value: t2 <- drop(PtL %*% beta)^2 / d[1:q] Qvalue <- sum(t2) mk_Qtable(Qvalue, df=q) } ############################################## ######## get_contrasts_type3 ############################################## get_contrasts_type3 <- function(model, which=NULL) { term_names <- attr(terms(model), "term.labels") # Extract original design matrix: Xorig <- model_matrix(model) # Assumes Xorig is full (column) rank if(is.null(which)) { which <- term_names # If model has at most one term return Type I contrasts: if(ncol(Xorig) <= 1L || length(term_names) <= 1L) return(get_contrasts_type1(model)) } else stopifnot(is.character(which), all(which %in% term_names)) # Extract contrast coding in Xorig: codings <- unlist(attr(Xorig, "contrast")) # If only treatment contrasts are used we can just return the type 3 # contrasts for contr.treatment coding: if(length(codings) > 0 && all(is.character(codings)) && all(codings %in% c("contr.treatment"))) return(extract_contrasts_type3(model, X=Xorig)) # otherwise we need to map the type III contrasts to whatever contrast # coding was used: X <- get_model_matrix(model, type="remake", contrasts="contr.treatment") # Ensure that X is full (column) rank: X <- ensure_full_rank(X, silent=TRUE, test.ans=FALSE) # Extract contrasts assuming contr.treatment coding: type3ctr <- extract_contrasts_type3(model, X=X) map <- zapsmall(ginv(X) %*% Xorig) # Maps between contrast codings rownames(map) <- colnames(X) lapply(type3ctr[which], function(L) L %*% map) } ############################################## ######## get_contrasts_type1 ############################################## get_contrasts_type1 <- function(model) { terms <- terms(model) X <- model_matrix(model) nalpha <- length(model$alpha) p <- ncol(X) if(p == 0L) return(list(matrix(numeric(0L), nrow=0L))) # no fixef if(p == 1L && attr(terms, "intercept")) # intercept-only model return(list(matrix(numeric(0L), ncol=nalpha))) # Compute 'normalized' doolittle factorization of XtX: L <- if(p == 1L) matrix(1L) else t(doolittle(crossprod(X))$L) dimnames(L) <- list(colnames(X), colnames(X)) # Determine which rows of L belong to which term: ind.list <- term2colX(terms, X)[attr(terms, "term.labels")] lapply(ind.list, function(rows) L[rows, , drop=FALSE]) } ############################################## ######## get_contrasts_type2_unfolded ############################################## get_contrasts_type2_unfolded <- function(model, which=NULL) { # Computes the 'genuine type II contrast' for all terms that are # contained in other terms. For all terms which are not contained in other # terms, the simple marginal contrast is computed. X <- model_matrix(model) Terms <- terms(model) term_names <- attr(Terms, "term.labels") if(is.null(which)) { which <- term_names # If model has at most one term return Type I contrasts: if(ncol(X) <= 1L || length(term_names) <= 1L) return(get_contrasts_type1(model)) } else stopifnot(is.character(which), all(which %in% term_names)) is_contained <- containment(model) do_marginal <- names(is_contained)[sapply(is_contained, length) == 0L] do_type2 <- setdiff(term_names, do_marginal) if(!length(do_marginal)) list() else Llist <- get_contrasts_marginal(model, which=do_marginal) if(length(do_type2)) Llist <- c(Llist, get_contrasts_type2(model, which=do_type2)) Llist[term_names] } ############################################## ######## get_contrasts_type2 ############################################## get_contrasts_type2 <- function(model, which=NULL) { # Computes the type 2 contrasts - either for all terms or for those # included in 'which' (a chr vector naming model terms). # returns a list X <- model_matrix(model) nalpha <- length(model$alpha) terms <- terms(model) data_classes <- attr(terms(model), "dataClasses") if(is.null(asgn <- attr(X, "assign"))) stop("design matrix 'X' should have a non-null 'assign' attribute") term_names <- attr(terms, "term.labels") if(is.null(which)) { which <- term_names # If model has at most one term return Type I contrasts: if(ncol(X) <= 1L || length(term_names) <= 1L) return(get_contrasts_type1(model)) } else stopifnot(is.character(which), all(which %in% term_names)) which <- setNames(as.list(which), which) # Compute containment: is_contained <- containment(model) # Compute term asignment list: map from terms to columns in X has_intercept <- attr(terms, "intercept") > 0 col_terms <- if(has_intercept) c("(Intercept)", term_names)[asgn + 1] else term_names[asgn[asgn > 0]] if(!length(col_terms) == ncol(X)) # should never happen. stop("An error happended when computing Type II contrasts") term2colX <- split(seq_along(col_terms), col_terms)[unique(col_terms)] # Compute contrast for each term - return as named list: lapply(which, function(term) { # Reorder the cols in X to [, unrelated_to_term, term, contained_in_term] cols_term <- unlist(term2colX[c(term, is_contained[[term]])]) Xnew <- cbind(X[, -cols_term, drop=FALSE], X[, cols_term, drop=FALSE]) # Compute order of terms in Xnew: newXcol_terms <- c(col_terms[-cols_term], col_terms[cols_term]) # Compute Type I contrasts for the reordered X: Lc <- t(doolittle(crossprod(Xnew))$L) dimnames(Lc) <- list(colnames(Xnew), colnames(Xnew)) # Extract rows for term and get original order of columns: Lc[newXcol_terms == term, colnames(X), drop=FALSE] }) } ############################################## ######## get_contrasts_marginal ############################################## #' @importFrom stats model.matrix terms get_contrasts_marginal <- function(model, which=NULL) { # Computes marginal contrasts. # # No tests of conformity with coefficients are implemented # # returns a list X <- model_matrix(model) terms <- terms(model) term_names <- attr(terms, "term.labels") if(is.null(which)) { which <- term_names # If model has at most one term return Type I contrasts: if(ncol(X) <= 1L || length(term_names) <= 1L) return(get_contrasts_type1(model)) } else stopifnot(is.character(which), all(which %in% term_names)) # Compute map from terms to columns in X and contrasts matrix term2colX <- term2colX(terms, X) L <- structure(diag(ncol(X)), dimnames = list(colnames(X), colnames(X))) # Extract contrast for each term - return as named list: which <- setNames(as.list(which), which) lapply(which, function(term) { L[term2colX[[term]], , drop=FALSE] }) } ############################################## ######## rbindall ############################################## rbindall <- function(...) do.call(rbind, ...) cbindall <- function(...) do.call(cbind, ...) ordinal/R/clm.frames.R0000644000176200001440000002412214533321514014257 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## methods for computing, manipulating and extracting design matrices, ## weights, offsets, model.frames and things like that. ## ################################# ## ## call sequence ## clm() { ## get_clmFormulas() ## get_clm.mf() ## get_clmTerms() # optionally ## get_clmDesign() ## ## makeThresholds() ## drop.cols() ## ## clm.fit.default() ## get_clmInfoTab() ## } ## ## get_clmFormulas() { ## getFullForm() ## } ## ## get_clm.mf() { ## model.frame() ## } ## ## get_clmTerms() { ## get_clm.mf() ## } ## ## get_clmDesign() { ## checkContrasts() ## get_clmDM() ## for formula, scale, nominal ## getWeights() ## get_clmY() ## } ## ## get_clmDM() { ## model.matrix() ## getContrasts() ## getOffset() ## } get_clmTerms <- function(mc, formulas, call.envir=parent.frame(2L)) ### Compute terms objects for each of the formulas. { ## We need this approach in order to get the predvars and ## dataClasses attributes of the terms objects. nms <- c("formula", "scale", "nominal") keep <- match(nms, names(formulas), nomatch=0) lapply(formulas[keep], function(form) { terms(get_clm.mf(mc, form, attr(formulas, "envir"), call.envir)) }) } get_clmDesign <- function(fullmf, terms.list, contrasts) { ### Compute (y, X, weights, off, S, NOM etc.) for a clm object. ### clm-internal+external ### ### terms.list: list of terms.objects. stopifnot(all(sapply(terms.list, inherits, "terms"))) ## Check that contrasts are specified only for terms in the model: checkContrasts(terms=attr(fullmf, "terms"), contrasts=contrasts) ## Extract X (design matrix for location effects) + terms, offset: res <- get_clmDM(fullmf, terms.list[["formula"]], contrasts, type="formula") res$terms <- terms.list[["formula"]] res$contrasts <- attr(res$X, "contrasts") res$xlevels <- .getXlevels(res$terms, fullmf) res$na.action <- attr(fullmf, "na.action") ## Extract weights: res$weights <- getWeights(fullmf) ## Extract model response: res <- c(get_clmY(fullmf, res$weights), res) ## Extract S (design matrix for the scale effects): if(!is.null(terms.list$scale)) { ans <- get_clmDM(fullmf, terms.list[["scale"]], contrasts, type="scale") res$S <- ans$X res$S.terms <- terms.list[["scale"]] res$S.off <- ans$offset res$S.contrasts <- attr(res$S, "contrasts") res$S.xlevels <- .getXlevels(res$S.terms, fullmf) if(attr(res$S.terms, "response") != 0) stop("response not allowed in 'scale'", call.=FALSE) } ## Extract NOM (design matrix for the nominal effects): if(!is.null(terms.list$nominal)) { ans <- get_clmDM(fullmf, terms.list[["nominal"]], contrasts, type="nominal") res$NOM <- ans$X res$nom.terms <- terms.list[["nominal"]] res$nom.contrasts <- attr(res$NOM, "contrasts") res$nom.xlevels <- .getXlevels(res$nom.terms, fullmf) if(attr(res$nom.terms, "response") != 0) stop("response not allowed in 'nominal'", call.=FALSE) if(!is.null(attr(res$nom.terms, "offset"))) stop("offset not allowed in 'nominal'", call.=FALSE) } ## Return results (list of design matrices etc.): res ### NOTE: X, S and NOM are with dimnames and intercepts are ### guaranteed. They may be column rank deficient. } get_clmDM <- function(fullmf, terms, contrasts, check.intercept=TRUE, type="formula", get.offset=TRUE) ### Get DM (=Design Matrix): { X <- model.matrix(terms, data=fullmf, contrasts.arg=getContrasts(terms, contrasts)) ## Test for intercept in X(?): Xint <- match("(Intercept)", colnames(X), nomatch = 0) if(check.intercept && Xint <= 0) { X <- cbind("(Intercept)" = rep(1, nrow(X)), X) warning(gettextf("an intercept is needed and assumed in '%s'", type), call.=FALSE) } ## Intercept in X is guaranteed. res <- list(X=X) if(get.offset) res$offset <- getOffset(fullmf, terms) res } get_clm.mf <- function(mc, formula, form.envir, call.envir=parent.frame(2L)) ### clm-internal ### Extract the model.frame from formula ### mc - matched call containing: data, subset, weights, na.action { ## Extract the full model.frame(mf): m <- match(c("data", "subset", "weights", "na.action"), names(mc), 0) mfcall <- mc[c(1, m)] mfcall$formula <- formula mfcall$drop.unused.levels <- TRUE mfcall[[1]] <- as.name("model.frame") if(is.null(mfcall$data)) mfcall$data <- form.envir eval(mfcall, envir=call.envir) } get_clmY <- function(fullmf, weights) { y <- model.response(fullmf, "any") ## any storage mode if(is.null(y)) stop("'formula' needs a response", call.=FALSE) if(!is.factor(y)) stop("response in 'formula' needs to be a factor", call.=FALSE) ## y.levels are the levels of y with positive weights y.levels <- levels(droplevels(y[weights > 0])) ## check that y has at least two levels: if(length(y.levels) == 1L) stop(gettextf("response has only 1 level ('%s'); expecting at least two levels", y.levels), call.=FALSE) if(!length(y.levels)) stop("response should be a factor with at least two levels") ## return: list(y=y, y.levels=y.levels) } get_clmFormulas <- function(mc, envir=parent.frame(2L)) ### clm-internal ### Extracts and returns a list of formulas needed for further processing. ### mc: matched call ### envir: environment in which mc is to be evaluated { ## Collect all variables in a full formula: ## evaluate the formulae in the enviroment in which clm was called ## (parent.frame(2)) to get them evaluated properly: forms <- list(eval(mc$formula, envir=envir)) if(!is.null(mc$scale)) forms$scale <- eval(mc$scale, envir=envir) if(!is.null(mc$nominal)) forms$nominal <- eval(mc$nominal, envir=envir) ## get the environment of the formula. If this does not have an ## enviroment (it could be a character), then use the parent frame. form.envir <- if(!is.null(env <- environment(forms[[1]]))) env else envir ## ensure formula, scale and nominal are formulas: forms[] <- lapply(forms, function(x) { # 'is.character(.)' for scale = "~ ..." tryCatch(formula(if(is.character(x)) x else Deparse(x), env = form.envir), error = function(e)e) }) if(any(vapply(forms, inherits, FUN.VALUE=logical(1), what="error"))) stop("unable to interpret 'formula', 'scale' or 'nominal'") ## collect all variables in a full formula: forms$fullForm <- do.call("getFullForm", forms) ### OPTION: do we actually need to set this name? names(forms)[1] <- "formula" ## set environment of 'fullForm' to the environment of 'formula': attr(forms, "envir") <- environment(forms$fullForm) <- form.envir ## return: forms } get_clmRho <- function(object, ...) { UseMethod("get_clmRho") } get_clmRho.default <- function(object, terms.list, contrasts, link, threshold, parent=parent.frame(), start=NULL, control=clm.control(), ...) ### .default method(?) ### object: model.frame (fullmf) with all variables present ### terms.list: list of terms.objects for each of the formulas in the ### clm object. { ## Get design matrices etc: design <- get_clmDesign(fullmf=object, terms.list=terms.list, contrasts=contrasts) ## Get threshold information: design <- c(design, makeThresholds(design$y.levels, threshold)) ## Drop columns for aliased coefs: design <- drop.cols(design, drop.scale=FALSE, silent=TRUE) ## Set envir rho with variables: B1, B2, o1, o2, weights, fitted: rho <- with(design, { clm.newRho(parent.frame(), y=y, X=X, NOM=design$NOM, S=design$S, weights=weights, offset=offset, S.offset=design$S.off, tJac=tJac, control=control) }) ## Set and check starting values for the parameters: start <- set.start(rho, start=start, get.start=is.null(start), threshold=threshold, link=link, frames=design) rho$par <- as.vector(start) ## remove attributes ## Set pfun, dfun and gfun in rho: setLinks(rho, link) ## Return: rho } get_clmRho.clm <- function(object, parent=parent.frame(), ...) { ### Safely generate the model environment from a model object. o <- object get_clmRho.default(object=model.frame(o), terms.list=terms(o, "all"), contrasts=o$contrasts, start=c(o$start), link=o$link, threshold=o$threshold, parent=parent, control=o$control, ...) } ## get_mfcall <- function(mc, envir=parent.frame(2)) { ## m <- match(c("data", "subset", "weights", "na.action"), ## names(mc), 0) ## mf <- mc[c(1, m)] ## ## mf$formula <- fullForm ## mf$drop.unused.levels <- TRUE ## mf[[1]] <- as.name("model.frame") ## ## if(is.null(mf$data)) mf$data <- form.envir ## list(mfcall=mf, envir=parent.frame(2)) ## } ordinal/R/control.R0000644000176200001440000000703014533321514013707 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Functions that set control parameters for clm() and clmm(). clm.control <- function(method = c("Newton", "model.frame", "design", "ucminf", "nlminb", "optim"), sign.location = c("negative", "positive"), sign.nominal = c("positive", "negative"), ..., trace = 0L, maxIter = 100L, gradTol = 1e-6, maxLineIter = 15L, relTol = 1e-6, tol = sqrt(.Machine$double.eps), maxModIter = 5L, convergence=c("warn", "silent", "stop", "message")) { method <- match.arg(method) convergence <- match.arg(convergence) sign.location <- match.arg(sign.location) sign.nominal <- match.arg(sign.nominal) if(!all(is.numeric(c(maxIter, gradTol, maxLineIter, relTol, tol, maxModIter)))) stop("maxIter, gradTol, relTol, tol, maxModIter and maxLineIter should all be numeric") ctrl <- list(method = method, sign.location = sign.location, sign.nominal = sign.nominal, convergence = convergence, trace = as.integer(trace), maxIter = as.integer(maxIter), gradTol = as.numeric(gradTol), relTol = as.numeric(relTol), tol = as.numeric(tol), maxLineIter = as.integer(maxLineIter), maxModIter = as.integer(maxModIter)) if(method %in% c("ucminf", "nlminb", "optim")) ctrl$ctrl <- list(trace = as.integer(abs(trace)), ...) return(ctrl) } clmm.control <- function(method = c("nlminb", "ucminf", "model.frame"), ..., trace = 0, maxIter = 50, gradTol = 1e-4, maxLineIter = 50, useMatrix = FALSE, innerCtrl = c("warnOnly", "noWarn", "giveError"), checkRanef = c("warn", "error", "message")) { method <- match.arg(method) innerCtrl <- match.arg(innerCtrl) checkRanef <- match.arg(checkRanef) useMatrix <- as.logical(useMatrix) stopifnot(is.logical(useMatrix)) ctrl <- list(trace=if(trace < 0) 1 else 0, maxIter=maxIter, gradTol=gradTol, maxLineIter=maxLineIter, innerCtrl=innerCtrl) optCtrl <- list(trace = abs(trace), ...) if(!is.numeric(unlist(ctrl[-5]))) stop("maxIter, gradTol, maxLineIter and trace should all be numeric") if(any(ctrl[-c(1, 5)] <= 0)) stop("maxIter, gradTol and maxLineIter have to be > 0") if(method == "ucminf" && !"grtol" %in% names(optCtrl)) optCtrl$grtol <- 1e-5 if(method == "ucminf" && !"grad" %in% names(optCtrl)) optCtrl$grad <- "central" namedList(method, useMatrix, ctrl, optCtrl, checkRanef) } ordinal/R/clmm.start.R0000644000176200001440000000254014533321514014314 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Functions to compute starting values for clmm()s. clmm.start <- function(frames, link, threshold) { ## get starting values from clm: fit <- with(frames, clm.fit(y=y, X=X, weights=wts, offset=off, link=link, threshold=threshold)) ## initialize variance parameters to zero: start <- c(fit$par, rep(0, length(frames$grList))) return(start) } ordinal/R/clm.predict.R0000644000176200001440000003402514533321514014437 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## The predict method for clm objects. predict.clm <- function(object, newdata, se.fit = FALSE, interval = FALSE, level = 0.95, type = c("prob", "class", "cum.prob", "linear.predictor"), na.action = na.pass, ...) ### result - a list of predictions (fit) ### OPTION: restore names of the fitted values ### ### Assumes object has terms, xlevels, contrasts, tJac { ## match and test arguments: type <- match.arg(type) se.fit <- as.logical(se.fit)[1] interval <- as.logical(interval)[1] stopifnot(length(level) == 1 && is.numeric(level) && level < 1 && level > 0) if(type == "class" && (se.fit || interval)) { warning("se.fit and interval set to FALSE for type = 'class'") se.fit <- interval <- FALSE } cov <- if(se.fit || interval) vcov(object) else NULL ### Get newdata object; fill in response if missing and always for ### type=="class": has.response <- TRUE if(type == "class" && missing(newdata)) ## newdata <- update(object, method="model.frame")$mf newdata <- model.frame(object) ## newdata supplied or type=="class": has.newdata <- !(missing(newdata) || is.null(newdata)) if(has.newdata || type=="class") { if(has.newdata && sum(unlist(object$aliased)) > 0) warning("predictions from column rank-deficient fit may be misleading") newdata <- as.data.frame(newdata) ## Test if response is in newdata: resp <- response.name(object$terms) ## remove response from newdata if type == "class" if(type == "class") newdata <- newdata[!names(newdata) %in% resp] has.response <- resp %in% names(newdata) ## FALSE for type == "class" if(!has.response) { ## fill in response variable in newdata if missing: ylev <- object$y.levels nlev <- length(ylev) nnd <- nrow(newdata) newdata <- cbind(newdata[rep(1:nnd, each=nlev) , , drop=FALSE], factor(rep(ylev, nnd), levels=ylev, ordered=TRUE)) names(newdata)[ncol(newdata)] <- resp } ### Set model matrices: if(is.null(attr(object$terms, "predvars"))) warning(paste0("terms object does not have a predvars attribute: ", "predictions may be misleading")) mf <- model.frame(object$terms, newdata, na.action=na.action, xlev=object$xlevels) ## model.frame will warn, but here we also throw an error: if(nrow(mf) != nrow(newdata)) stop("length of variable(s) found do not match nrow(newdata)") ## check that variables are of the right type: if (!is.null(cl <- attr(object$terms, "dataClasses"))) .checkMFClasses(cl, mf) ## make model.matrix: X <- model.matrix(object$terms, mf, contrasts = object$contrasts) Xint <- match("(Intercept)", colnames(X), nomatch = 0L) n <- nrow(X) if(Xint <= 0) X <- cbind("(Intercept)" = rep(1, n), X) # if(object$control$sign.location == "negative") NOM[, -1] <- -NOM[, -1] ## drop aliased columns: if(sum(object$aliased$beta) > 0) X <- X[, !c(FALSE, object$aliased$beta), drop=FALSE] ## handle offset (from predict.lm): ### NOTE: Could factor the offset handling out in its own function for ### code clarity: offset <- rep(0, nrow(X)) if(!is.null(off.num <- attr(object$terms, "offset"))) for(i in off.num) offset <- offset + eval(attr(object$terms, "variables")[[i + 1]], newdata) y <- model.response(mf) if(any(!levels(y) %in% object$y.levels)) stop(gettextf("response factor '%s' has new levels", response.name(object$terms))) ### make NOMINAL model.matrix: if(is.nom <- !is.null(object$nom.terms)) { ## allows NAs to pass through to fit, se.fit, lwr and upr: nom.mf <- model.frame(object$nom.terms, newdata, na.action=na.action, xlev=object$nom.xlevels) ## model.frame will warn, but here we also throw an error: if(nrow(nom.mf) != nrow(newdata)) stop("length of variable(s) found do not match nrow(newdata)") if (!is.null(cl <- attr(object$nom.terms, "dataClasses"))) .checkMFClasses(cl, nom.mf) NOM <- model.matrix(object$nom.terms, nom.mf, contrasts=object$nom.contrasts) NOMint <- match("(Intercept)", colnames(NOM), nomatch = 0L) if(NOMint <= 0) NOM <- cbind("(Intercept)" = rep(1, n), NOM) # if(object$control$sign.nominal == "negative") NOM[, -1] <- -NOM[, -1] alias <- t(matrix(object$aliased$alpha, nrow=length(object$y.levels) - 1))[,1] if(sum(alias) > 0) NOM <- NOM[, !c(FALSE, alias), drop=FALSE] } ### make SCALE model.matrix: if(is.scale <- !is.null(object$S.terms)) { ## allows NAs to pass through to fit, se.fit, lwr and upr: S.mf <- model.frame(object$S.terms, newdata, na.action=na.action, xlev=object$S.xlevels) ## model.frame will warn, but here we also throw an error: if(nrow(S.mf) != nrow(newdata)) stop("length of variable(s) found do not match nrow(newdata)") if (!is.null(cl <- attr(object$S.terms, "dataClasses"))) .checkMFClasses(cl, S.mf) S <- model.matrix(object$S.terms, S.mf, contrasts=object$S.contrasts) Sint <- match("(Intercept)", colnames(S), nomatch = 0L) if(Sint <= 0) S <- cbind("(Intercept)" = rep(1, n), S) if(sum(object$aliased$zeta) > 0) S <- S[, !c(FALSE, object$aliased$zeta), drop=FALSE] Soff <- rep(0, nrow(S)) if(!is.null(off.num <- attr(object$S.terms, "offset"))) for(i in off.num) Soff <- Soff + eval(attr(object$S.terms, "variables")[[i + 1]], newdata) } ### Construct model environment: tJac <- object$tJac dimnames(tJac) <- NULL env <- clm.newRho(parent.frame(), y=y, X=X, NOM=if(is.nom) NOM else NULL, S=if(is.scale) S else NULL, weights=rep(1, n), offset=offset, S.offset=if(is.scale) Soff else rep(0, n), tJac=tJac, control=object$control) setLinks(env, link=object$link) } ## end !missing(newdata) or type == "class" else { env <- get_clmRho.clm(object) ## env <- update(object, doFit=FALSE) } env$par <- as.vector(coef(object)) env$par <- env$par[!is.na(env$par)] ### OPTION: Are there better ways to handle NAs in coef? ## if(length(env$par) != ncol(env$B1)) ## stop(gettextf("design matrix has %d columns, but expecting %d (number of parameters)", ## ncol(env$B1), length(env$par))) ## Get predictions: pred <- switch(type, "prob" = prob.predict.clm(env=env, cov=cov, se.fit=se.fit, interval=interval, level=level), "class" = prob.predict.clm(env=env, cov=cov, se.fit=se.fit, interval=interval, level=level), "cum.prob" = cum.prob.predict.clm(env=env, cov=cov, se.fit=se.fit, interval=interval, level=level), "linear.predictor" = lin.pred.predict.clm(env=env, cov=cov, se.fit=se.fit, interval=interval, level=level) ##, ## "eta" = eta.pred.predict.clm(env=env, cov=cov, ## se.fit=se.fit, interval=interval, level=level) ) ### Arrange predictions in matrices if response is missing from ### newdata arg or type=="class": if(!has.response || type == "class") { pred <- lapply(pred, function(x) { x <- matrix(unlist(x), ncol=nlev, byrow=TRUE) dimnames(x) <- list(1:nrow(x), ylev) x }) ## if(type == "eta") ## pred <- lapply(pred, function(x) { ## x <- x[, -nlev, drop=FALSE] ## colnames(x) <- names(object$alpha) ## }) if(type == "class") pred <- lapply(pred, function(x) { factor(max.col(x), levels=seq_along(ylev), labels=ylev) }) } ### Filter missing values (if relevant): if(missing(newdata) && !is.null(object$na.action)) pred <- lapply(pred, function(x) napredict(object$na.action, x)) return(pred) } prob.predict.clm <- function(env, cov, se.fit=FALSE, interval=FALSE, level=0.95) ### Works for linear and scale models: ### env - model environment with par set. ### cov - vcov for the parameters { ## evaluate nll and grad to set dpi.psi in env: clm.nll(env) pred <- list(fit = as.vector(env$fitted)) if(se.fit || interval) { se.pr <- get.se(env, cov, type="prob") if(se.fit) pred$se.fit <- se.pr if(interval) { pred.logit <- qlogis(pred$fit) ## se.logit <- dlogis(pred$fit) * se.pr se.logit <- se.pr / (pred$fit * (1 - pred$fit)) a <- (1 - level)/2 pred$lwr <- plogis(pred.logit + qnorm(a) * se.logit) pred$upr <- plogis(pred.logit - qnorm(a) * se.logit) } } return(pred) } eta.pred.predict.clm <- function(env, cov, se.fit=FALSE, interval=FALSE, level=0.95) { ## clm.nll(env) pred <- list(eta = c(with(env, B1 %*% par[1:n.psi]))) if(se.fit || interval) { se <- get.se(env, cov, type="lp") if(se.fit) { pred$se.eta <- se[[1]] } if(interval) { a <- (1 - level)/2 pred$lwr1 <- env$eta1 + qnorm(a) * se[[1]] pred$upr1 <- env$eta1 - qnorm(a) * se[[1]] } } pred } lin.pred.predict.clm <- function(env, cov, se.fit=FALSE, interval=FALSE, level=0.95) ### get predictions on the scale of the linear predictor { ## evaluate nll and grad to set dpi.psi in env: clm.nll(env) pred <- list(eta1=env$eta1, eta2=env$eta2) if(se.fit || interval) { se <- get.se(env, cov, type="lp") if(se.fit) { pred$se.eta1 <- se[[1]] pred$se.eta2 <- se[[2]] } if(interval) { a <- (1 - level)/2 pred$lwr1 <- env$eta1 + qnorm(a) * se[[1]] pred$lwr2 <- env$eta2 + qnorm(a) * se[[2]] pred$upr1 <- env$eta1 - qnorm(a) * se[[1]] pred$upr2 <- env$eta2 - qnorm(a) * se[[2]] } } return(pred) ## list with predictions. } cum.prob.predict.clm <- function(env, cov, se.fit=FALSE, interval=FALSE, level=0.95) { ## evaluate nll and grad to set dpi.psi in env: clm.nll(env) pred <- list(cprob1=env$pfun(env$eta1), cprob2=env$pfun(env$eta2)) if(se.fit || interval) { se <- get.se(env, cov, type="gamma") if(se.fit) { pred$se.cprob1 <- se[[1]] pred$se.cprob2 <- se[[2]] } if(interval) { a <- (1 - level)/2 pred$lwr1 <- pred$cprob1 + qnorm(a) * se[[1]] pred$lwr2 <- pred$cprob2 + qnorm(a) * se[[2]] pred$upr1 <- pred$cprob1 - qnorm(a) * se[[1]] pred$upr2 <- pred$cprob2 - qnorm(a) * se[[2]] } } return(pred) } get.se <- function(rho, cov, type=c("lp", "gamma", "prob")) { ### Computes standard errors of predicted probabilities (prob), ### cumulative probabilities (gamma) or values of the linear ### predictor (lp) for linear (k<=0) or location-scale models ### (k>0). rho$xcovtx <- function(x, chol.cov) { ## Compute 'diag(x %*% cov %*% t(x))' diag(x %*% crossprod(chol.cov) %*% t(x)) ## colSums(tcrossprod(chol.cov, x)^2) } rho$type <- match.arg(type) ind <- seq_len(rho$n.psi + rho$k) rho$chol.cov <- try(chol(cov[ind, ind]), silent=TRUE) if(inherits(rho$chol.cov, "try-error")) stop(gettext("VarCov matrix of model parameters is not positive definite:\n cannot compute standard errors of predictions"), call.=FALSE) clm.nll(rho) ## just to be safe with(rho, { ### First compute d[eta, gamma, prob] / d par; then compute variance ### covariance matrix of the observations and extract SEs as the ### square root of the diagonal elements: if(type %in% c("lp", "gamma")) { D1 <- B1 D2 <- B2 if(k > 0) { D1 <- cbind(D1/sigma, -S*eta1) D2 <- cbind(D2/sigma, -S*eta2) } if(type == "gamma") { p1 <- if(!nlambda) dfun(eta1) else dfun(eta1, lambda) p2 <- if(!nlambda) dfun(eta2) else dfun(eta2, lambda) D1 <- D1*p1 D2 <- D2*p2 } se <- list(se1=sqrt(xcovtx(D1, chol.cov)), se2=sqrt(xcovtx(D2, chol.cov))) } if(type == "prob") { p1 <- if(!nlambda) dfun(eta1) else dfun(eta1, lambda) p2 <- if(!nlambda) dfun(eta2) else dfun(eta2, lambda) C2 <- if(k <= 0) B1*p1 - B2*p2 else cbind(B1*p1/sigma - B2*p2/sigma, -(eta1 * p1 - eta2 * p2) * S) se <- sqrt(xcovtx(C2, chol.cov)) } }) rho$se } ordinal/R/clm.nominal_test.R0000644000176200001440000001704214533321514015501 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Implementation of of nominal_test.clm() and scale_test.clm() for ## automatic testing of nominal and scale effects in clm()s. These ## functions work in a fashion similar to add1(). nominal_test <- function(object, ...) { UseMethod("nominal_test") } scale_test <- function(object, ...) { UseMethod("scale_test") } nominal_test.clm <- function(object, scope, trace=FALSE, ...) ### Test nominal effects for all (or selected) terms in location ### and scale formulas. { ## get scope: vector of terms names which to add to nominal: termsnm <- attr(object$terms, "term.labels") if(!is.null(object$S.terms)) termsnm <- union(termsnm, attr(object$S.terms, "term.labels")) if(!missing(scope) && !is.null(scope)) { if(!is.character(scope)) scope <- attr(terms(update.formula(object, scope)), "term.labels") if(!all(match(scope, termsnm, 0L) > 0L)) stop("scope is not a subset of term labels") } else { scope <- termsnm } if(!is.null(object$nom.terms)) { scope <- scope[!scope %in% attr(object$nom.terms, "term.labels")] } if(!length(scope)) message("\nno additional terms to add to nominal\n") env <- environment(formula(object)) ## get list of (updated) nominal formulas: nomforms <- if(!is.null(object$call$nominal)) lapply(scope, function(tm) { update.formula(old=formula(object$nom.terms), new=as.formula(paste("~. + ", tm))) }) else lapply(scope, function(tm) { as.formula(paste("~", tm), env=env) }) ns <- length(scope) ## results matrix: ans <- matrix(nrow = ns + 1L, ncol = 3L, dimnames = list(c("", scope), c("df", "logLik", "AIC"))) ans[1L, ] <- c(object$edf, object$logLik, AIC(object)) n0 <- nobs(object) ## for all terms in scope: i <- 1 for(i in seq(ns)) { if(trace) { cat("trying +", scope[i], "\n", sep = " ") utils::flush.console() } ## update and fit model with nominal effect added: nfit <- try(update(object, nominal=nomforms[[i]], convergence="silent"), silent=TRUE) ## model may not be identifiable or converge: if(!inherits(nfit, "try-error") && ### NOTE: non-negative convergence codes indicate that the likelihood ### is correctly determined: nfit$convergence$code >= 0) { ans[i + 1L, ] <- c(nfit$edf, nfit$logLik, AIC(nfit)) nnew <- nobs(nfit) if(all(is.finite(c(n0, nnew))) && nnew != n0) stop("number of rows in use has changed: remove missing values?") } } dfs <- ans[, 1L] - ans[1L, 1L] dfs[1L] <- NA aod <- data.frame(Df = dfs, logLik = ans[, 2L], AIC = ans[, 3L]) rownames(aod) <- rownames(ans) ## compute likelihood ratio statistic and p-values: LR <- 2*(ans[, 2L] - ans[1L, 2L]) LR[1L] <- NA nas <- !is.na(LR) P <- LR P[nas] <- pchisq(LR[nas], dfs[nas], lower.tail = FALSE) aod[, c("LRT", "Pr(>Chi)")] <- list(LR, P) head <- c("Tests of nominal effects", paste("\nformula:", Deparse(formula(object$terms)))) if(!is.null(object$call$scale)) head <- c(head, paste("scale: ", Deparse(formula(object$S.terms)))) if(!is.null(object$call$nominal)) head <- c(head, paste("nominal:", Deparse(formula(object$nom.terms)))) class(aod) <- c("anova", "data.frame") attr(aod, "heading") <- head aod } scale_test.clm <- function(object, scope, trace=FALSE, ...) ### Test scale effects for all (or selected) terms in formula { ## get scope: vector of terms names which to add to scale: termsnm <- attr(object$terms, "term.labels") if(!missing(scope) && !is.null(scope)) { if(!is.character(scope)) scope <- attr(terms(update.formula(object, scope)), "term.labels") if(!all(match(scope, termsnm, 0L) > 0L)) stop("scope is not a subset of term labels") } else { scope <- termsnm } ## if(!is.null(object$nom.terms)) { ## scope <- scope[!scope %in% attr(object$nom.terms, ## "term.labels")] ## } if(!is.null(object$S.terms)) { scope <- scope[!scope %in% attr(object$S.terms, "term.labels")] } if(!length(scope)) message("\nno relevant terms to add to scale\n") env <- environment(formula(object)) ## get list of (updated) scale formulas: scaleforms <- if(!is.null(object$call$scale)) lapply(scope, function(tm) { update.formula(old=formula(object$S.terms), new=as.formula(paste("~. + ", tm))) }) else lapply(scope, function(tm) as.formula(paste("~", tm), env=env)) ns <- length(scope) ## results matrix: ans <- matrix(nrow = ns + 1L, ncol = 3L, dimnames = list(c("", scope), c("df", "logLik", "AIC"))) ans[1L, ] <- c(object$edf, object$logLik, AIC(object)) n0 <- nobs(object) ## for all terms in scope: for(i in seq(ns)) { if(trace) { cat("trying +", scope[i], "\n", sep = " ") utils::flush.console() } ## update and fit model with scale effect added: nfit <- try(update(object, scale=scaleforms[[i]]), silent=TRUE) ## model may not be identifiable or converge: if(!inherits(nfit, "try-error") && nfit$convergence$code >= 0) { ans[i + 1L, ] <- c(nfit$edf, nfit$logLik, AIC(nfit)) nnew <- nobs(nfit) if (all(is.finite(c(n0, nnew))) && nnew != n0) stop("number of rows in use has changed: remove missing values?") } } dfs <- ans[, 1L] - ans[1L, 1L] dfs[1L] <- NA aod <- data.frame(Df = dfs, logLik = ans[, 2L], AIC = ans[, 3L]) rownames(aod) <- rownames(ans) ## compute likelihood ratio statistic and p-values: LR <- 2*(ans[, 2L] - ans[1L, 2L]) LR[1L] <- NA nas <- !is.na(LR) P <- LR P[nas] <- pchisq(LR[nas], dfs[nas], lower.tail = FALSE) aod[, c("LRT", "Pr(>Chi)")] <- list(LR, P) head <- c("Tests of scale effects", paste("\nformula:", Deparse(formula(object$terms)))) if(!is.null(object$call$scale)) head <- c(head, paste("scale: ", Deparse(formula(object$S.terms)))) if(!is.null(object$call$nominal)) head <- c(head, paste("nominal:", Deparse(formula(object$nom.terms)))) class(aod) <- c("anova", "data.frame") attr(aod, "heading") <- head aod } ordinal/R/clmm2.utils.R0000755000176200001440000003276414533321514014417 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Utility functions for fitting CLMMs with clmm2(). ### OPTION: Could make use of getFittedC throughout this file... .negLogLikMfastR <- function(rho) { ## negative log-likelihood ### .negLogLikMfast in R with(rho, { o21 <- u[grFac] * stDev o11 <- o1 - o21 o21 <- o2 - o21 eta1 <- (eta1Fix + o11)/sigma eta2 <- (eta2Fix + o21)/sigma pr <- if(nlambda) pfun(eta1, lambda) - pfun(eta2, lambda) else pfun(eta1) - pfun(eta2) if(any(is.na(pr)) || any(pr <= 0)) nll <- Inf else nll <- -sum(weights * log(pr)) - sum(dnorm(x=u, mean=0, sd=1, log=TRUE)) nll }) } .negLogLikM <- function(rho) { ## negative log-likelihood with(rho, { if(estimStDev) stDev <- exp(par[p+nxi+k+estimLambda+ 1:s]) o21 <- u[grFac] * stDev o11 <- o1 - o21 o21 <- o2 - o21 if(estimLambda > 0) lambda <- par[nxi + p + k + 1:estimLambda] sigma <- if(k > 0) expSoffset * exp(drop(Z %*% par[nxi+p + 1:k])) else expSoffset eta1Fix <- drop(B1 %*% par[1:(nxi + p)]) eta2Fix <- drop(B2 %*% par[1:(nxi + p)]) eta1 <- (eta1Fix + o11)/sigma eta2 <- (eta2Fix + o21)/sigma pr <- if(nlambda) pfun(eta1, lambda) - pfun(eta2, lambda) else pfun(eta1) - pfun(eta2) if(any(is.na(pr)) || any(pr <= 0)) nll <- Inf else nll <- -sum(weights * log(pr)) - sum(dnorm(x=u, mean=0, sd=1, log=TRUE)) nll }) } .gradM <- function(rho) { ## gradient of the negative log-likelihood with(rho, { if(nlambda) { p1 <- dfun(eta1, lambda) p2 <- dfun(eta2, lambda) } else { p1 <- dfun(eta1) p2 <- dfun(eta2) } wtprSig <- weights/pr/sigma .C("gradC", as.double(stDev), as.double(p1), as.double(p2), as.double(wtprSig), as.integer(grFac), length(wtprSig), u = as.double(u), length(u))$u ## tapply(stDev * wtprSig * (p1 - p2), grFac, sum) + u }) } .gradC <- function(rho) { tmp <- with(rho, { .C("grad_C", as.double(stDev), p1 = double(length(pr)), p2 = double(length(pr)), as.double(pr), as.double(weights), as.double(sigma), wtprSig = double(length(pr)), as.double(eta1), as.double(eta2), gradValues = double(length(u)), as.double(u), as.integer(grFac), length(pr), length(u), as.double(lambda), as.integer(linkInt))[c("p1", "p2", "wtprSig", "gradValues")] }) rho$wtprSig <- tmp$wtprSig rho$p1 <- tmp$p1 rho$p2 <- tmp$p2 tmp$gradValues } .hessC <- function(rho) { with(rho, { .C("hess", as.double(stDev), as.double(p1), as.double(p2), as.double(pr), as.double(wtprSig), as.double(eta1), as.double(eta2), as.integer(linkInt), as.integer(grFac), length(pr), hessValues = double(length(u)), as.double(lambda), length(u))$hessValues }) } .hessianM <- function(rho) ## hessian of the negative log-likelihood with(rho,{ if(nlambda) { g1 <- gfun(eta1, lambda) g2 <- gfun(eta2, lambda) } else { g1 <- gfun(eta1) g2 <- gfun(eta2) } .C("hessC", as.double(stDev), as.double(p1), as.double(p2), as.double(pr), as.double(g1), as.double(g2), as.double(wtprSig), as.integer(grFac), length(pr), z = double(length(u)), length(u))$z ## tapply(((p1 - p2)^2 / pr - g1 + g2) * wtprSig, grFac, sum) * ## stDev^2 + 1 }) update.u2.v2 <- function(rho) { ### second version: C-implementation of NR-algorithm. .negLogLikBase(rho) ## update: par, stDev, eta1Fix, eta2Fix eta2Fix, sigma fit <- with(rho, .C("NRalg", as.integer(ctrl$trace), as.integer(ctrl$maxIter), as.double(ctrl$gradTol), as.integer(ctrl$maxLineIter), as.integer(grFac), as.double(stDev), as.double(o1), as.double(o2), as.double(eta1Fix), as.double(eta2Fix), as.double(eta1), as.double(eta2), as.double(sigma), as.integer(linkInt), as.double(weights), u = as.double(uStart), pr = as.double(pr), funValue = as.double(nll), gradValues = as.double(uStart), hessValues = as.double(uStart), length(pr), length(uStart), maxGrad = double(1), conv = 0L, double(length(pr)), # p1 double(length(pr)), # p2 double(length(pr)), # wtprSig as.double(lambda), Niter = as.integer(Niter) )[c("u", "pr", "funValue", "gradValues", "hessValues", "maxGrad", "conv", "Niter")] ) ## Get message: message <- switch(as.character(fit$conv), "1" = "max|gradient| < tol, so current iterate is probably solution", "0" = "Non finite negative log-likelihood", "-1" = "iteration limit reached when updating the random effects", "-2" = "step factor reduced below minimum when updating the random effects") ## check for convergence and report warning/error: if(rho$ctrl$trace > 0 && fit$conv == 1) cat("\nOptimizer converged! ", "max|grad|:", fit$maxGrad, message, fill = TRUE) if(fit$conv != 1 && rho$ctrl$innerCtrl == "warnOnly") warning(message, "\n at iteration ", rho$Niter) else if(fit$conv != 1 && rho$ctrl$innerCtrl == "giveError") stop(message, "\n at iteration ", rho$Niter) ## Store values and return: rho$Niter <- fit$Niter rho$u <- fit$u rho$D <- fit$hessValue rho$gradient <- fit$gradValue if(!is.finite(rho$negLogLik <- fit$funValue)) return(FALSE) return(TRUE) } update.u2 <- function(rho) { stepFactor <- 1 innerIter <- 0 rho$u <- rho$uStart rho$negLogLik <- .negLogLikM(rho) if(!is.finite(rho$negLogLik)) return(FALSE) rho$gradient <- .gradC(rho) maxGrad <- max(abs(rho$gradient)) conv <- -1 ## Convergence flag message <- "iteration limit reached when updating the random effects" if(rho$ctrl$trace > 0) Trace(iter=0, stepFactor, rho$negLogLik, maxGrad, rho$u, first=TRUE) ## Newton-Raphson algorithm: for(i in 1:rho$ctrl$maxIter) { if(maxGrad < rho$ctrl$gradTol) { message <- "max|gradient| < tol, so current iterate is probably solution" if(rho$ctrl$trace > 0) cat("\nOptimizer converged! ", "max|grad|:", maxGrad, message, fill = TRUE) conv <- 0 break } rho$D <- .hessC(rho) ## rho$D <- .hessianM(rho) step <- rho$gradient / rho$D rho$u <- rho$u - stepFactor * step negLogLikTry <- .negLogLikMfast(rho) lineIter <- 0 ## simple line search, i.e. step halfing: while(negLogLikTry > rho$negLogLik) { stepFactor <- stepFactor/2 rho$u <- rho$u + stepFactor * step negLogLikTry <- .negLogLikMfast(rho) lineIter <- lineIter + 1 if(rho$ctrl$trace > 0) Trace(i+innerIter, stepFactor, rho$negLogLik, maxGrad, rho$u, first=FALSE) if(lineIter > rho$ctrl$maxLineIter){ message <- "step factor reduced below minimum when updating the random effects" conv <- 1 break } innerIter <- innerIter + 1 } rho$negLogLik <- negLogLikTry rho$gradient <- .gradC(rho) maxGrad <- max(abs(rho$gradient)) if(rho$ctrl$trace > 0) Trace(i+innerIter, stepFactor, rho$negLogLik, maxGrad, rho$u, first=FALSE) stepFactor <- min(1, 2 * stepFactor) } if(conv != 0 && rho$ctrl$innerCtrl == "warnOnly") { warning(message, "\n at iteration ", rho$Niter) utils::flush.console() } else if(conv != 0 && rho$ctrl$innerCtrl == "giveError") stop(message, "\n at iteration ", rho$Niter) rho$Niter <- rho$Niter + i rho$D <- .hessC(rho) if(!is.finite(rho$negLogLik)) return(FALSE) else return(TRUE) } .hessMinC <- function(rho) { with(rho,{ if(nlambda) { g1 <- gfun(eta1, lambda) g2 <- gfun(eta2, lambda) } else { g1 <- gfun(eta1) g2 <- gfun(eta2) } .C("hessC", as.double(stDev), as.double(p1), as.double(p2), as.double(pr), as.double(g1), as.double(g2), as.double(wtprSig), as.integer(grFac), length(pr), z = double(length(u)), length(u))$z }) } .gradMinC <- function(stDev, p1, p2, wtprSig, grFac, u) .C("gradC", as.double(stDev), as.double(p1), as.double(p2), as.double(wtprSig), as.integer(unclass(grFac)), as.integer(length(wtprSig)), u = as.double(u), as.integer(length(u)))$u .gradMinC <- function(rho) { with(rho, { if(nlambda) { p1 <- dfun(eta1, lambda) p2 <- dfun(eta2, lambda) } else { p1 <- dfun(eta1) p2 <- dfun(eta2) } wtprSig <- weights/pr/sigma .C("gradC", as.double(stDev), as.double(p1), as.double(p2), as.double(wtprSig), as.integer(grFac), length(wtprSig), u = as.double(u), length(u))$u }) } grFacSumC <- function(x, grFac, u) .C("grFacSum_C", as.double(x), as.integer(grFac), as.integer(length(x)), u = as.double(u), as.integer(length(u)))$u grFacSum <- function(x, grFac, n.x, u, n.u) { ## i, j, z z <- 0 for (i in 1:n.u) { for (j in 1:n.x) if(grFac[j] == i) z <- z + x[j] u[i] <- z + u[i] z <- 0 } u } getNAGQ2 <- function(rho, par) { ### Not in use if(!missing(par)) rho$par <- par if(!update.u2(rho)) return(Inf) if(any(rho$D < 0)) return(Inf) with(rho, { K <- sqrt(2/D) agqws <- K %*% t(ghqws) agqns <- apply(K %*% t(ghqns), 2, function(x) x + u) ranNew <- apply(agqns, 2, function(x) x[grFac] * stDev) eta1Tmp <- (eta1Fix + o1 - ranNew) / sigma eta2Tmp <- (eta2Fix + o2 - ranNew) / sigma if(nlambda) ## PRnn <- (pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))^weights ## This is likely a computationally more safe solution: PRnn <- exp(weights * log(pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))) else ## PRnn <- (pfun(eta1Tmp) - pfun(eta2Tmp))^weights PRnn <- exp(weights * log(pfun(eta1Tmp) - pfun(eta2Tmp))) for(i in 1:r) ## PRrn[i,] <- apply(PRnn[grFac == i, ], 2, prod) PRrn[i,] <- apply(PRnn[grFac == i, ,drop = FALSE], 2, prod) PRrn <- PRrn * agqws * dnorm(x=agqns, mean=0, sd=1) ### OPTION: Could this be optimized by essentially computing dnorm 'by hand'? }) -sum(log(rowSums(rho$PRrn))) } getNGHQ <- function(rho, par) { ### Not in use if(!missing(par)) rho$par <- par .negLogLikM(rho) ## Update lambda, stDev, sigma and eta*Fix with(rho, { eta1Tmp <- (eta1Fix + o1 - ranNew * stDev) / sigma eta2Tmp <- (eta2Fix + o2 - ranNew * stDev) / sigma if(nlambda) PRnn <- exp(weights * log(pfun(eta1Tmp, lambda) - pfun(eta2Tmp, lambda))) else PRnn <- exp(weights * log(pfun(eta1Tmp) - pfun(eta2Tmp))) for(i in 1:r) PRrn[i,] <- apply(PRnn[grFac == i, ,drop = FALSE], 2, prod) PRrn <- PRrn * agqws * dnorm(x=agqns, mean=0, sd=1) }) -sum(log(rowSums(rho$PRrn))) } ordinal/R/clm.Thetamat.R0000644000176200001440000001223514533321514014553 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Functions (getThetamat) to compute a table of threshold ## coefficients from model fits (clm()s) with nominal effects. getThetamat <- function(terms, alpha, assign, contrasts, tJac, xlevels, sign.nominal) ### Compute matrix of thresholds for all combinations of levels of ### factors in the nominal formula. ### ### Input: ### terms: nominal terms object ### alpha: vector of threshold parameters ### assign: attr(NOM, "assign"), where NOM is the design matrix for ### the nominal effects ### contrasts: list of contrasts for the nominal effects ### tJac: threshold Jacobian with appropriate dimnames. ### xlevels: names of levels of factors among the nominal effects. ### sign.nominal: "positive" or "negative" ### ### Output: ### Theta: data.frame of thresholds ### mf.basic: if nrow(Theta) > 1 a data.frame with factors in columns ### and all combinations of the factor levels in rows. { ## Make matrix of thresholds; Theta: Theta <- matrix(alpha, ncol=ncol(tJac), byrow=TRUE) ## Matrix with variables-by-terms: factor.table <- attr(terms, "factors") all.varnm <- rownames(factor.table) ### NOTE: need to index with all.varnm not to include (weights) and ### possibly other stuff. var.classes <- attr(terms, "dataClasses")[all.varnm] numeric.var <- which(var.classes != "factor") ### NOTE: Logical variables are treated as numeric variables. numeric.terms <- factor.terms <- numeric(0) if(length(factor.table)) { ## Terms associated with numerical variables: numeric.terms <- which(colSums(factor.table[numeric.var, , drop=FALSE]) > 0) ## Terms only involving factor variables: factor.terms <- which(colSums(factor.table[numeric.var, , drop=FALSE]) == 0) } ## Remove rows in Theta for numeric variables: if(length(numeric.terms)) { ### NOTE: ncol(NOM) == length(asgn) == nrow(Theta) ### length(attr(terms, "term.labels")) == ncol(factor.table) ### NOTE: length(var.classes) == nrow(factor.table) numeric.rows <- which(assign %in% numeric.terms) Theta <- Theta[-numeric.rows, , drop=FALSE] ## Drop terms so the design matrix, X for the factors does not ## include numeric variables: if(length(factor.terms)) terms <- drop.terms(terms, dropx=numeric.terms, keep.response=FALSE) } ## if some nominal effects are factors: if(length(factor.terms)) { ## get xlevels for factors, not ordered (factors) factor.var <- which(var.classes == "factor") factor.varnm <- names(var.classes)[factor.var] xlev <- xlevels[factor.varnm] ## minimal complete model frame: mf.basic <- do.call(expand.grid, xlev) ## minimal complete design matrix: X <- model.matrix(terms, data=mf.basic, contrasts=contrasts[factor.varnm]) ### NOTE: get_clmDesign adds an intercept if its not there, so we need ### to do that as well here. Otherwise 'X[, keep, drop=FALSE]' will ### fail: if(!"(Intercept)" %in% colnames(X)) X <- cbind("(Intercept)" = rep(1, nrow(X)), X) if(sign.nominal == "negative") X[, -1] <- -X[, -1] ### NOTE: There are no contrasts for numerical variables, but there ### may be for ordered factors. ## From threshold parameters to thresholds: ### NOTE: some rows of Theta may contain NAs due to rank deficiency of ### the NOM design matrix. keep <- apply(Theta, 1, function(x) sum(is.na(x)) == 0) ## Theta <- apply(Theta, 2, function(th) X %*% th) tmp <- lapply(1:ncol(Theta), function(i) { X[, keep, drop=FALSE] %*% Theta[keep, i] }) Theta <- do.call(cbind, tmp) } ## Adjust each row in Theta for threshold functions: tmp <- lapply(seq_len(nrow(Theta)), function(i) c(tJac %*% Theta[i, ])) Theta <- do.call(rbind, tmp) ### NOTE: apply returns a vector and not a matrix when ncol(Theta) == ### 1, so we need to avoid it here. ## Theta <- t(apply(Theta, 1, function(th) tJac %*% th)) colnames(Theta) <- rownames(tJac) res <- list(Theta = as.data.frame(Theta)) ## add factor information if any: if(NROW(Theta) > 1) res$mf.basic <- mf.basic ## return: res } ordinal/R/clm.profile.R0000644000176200001440000007336714533321514014461 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## profile and confint methods for clm objects. profile.clm <- function(fitted, which.beta = seq_len(nbeta), which.zeta = seq_len(nzeta), alpha = 0.001, max.steps = 50, nsteps = 8, trace = FALSE, step.warn = 5, control = list(), ...) { ### match and tests arguments and dispatch to .zeta and .beta ### functions for the actual profiling. ### which.[beta, zeta] - numeric or character vectors. ### Works for models with nominal and scale effects and for any number ### of aliased coefs. ## match and test arguments: if(fitted$link %in% c("Aranda-Ordaz", "log-gamma")) stop("Profiling not implemented for models with flexible link function") if(any(is.na(diag(vcov(fitted))))) stop("Cannot get profile when vcov(fitted) contains NAs", call.=FALSE) stopifnot(is.numeric(alpha) && length(alpha) == 1 && alpha > 0 && alpha < 1) stopifnot(round(max.steps) > round(nsteps)) stopifnot(round(nsteps) > round(step.warn)) stopifnot(round(nsteps) > 0 && round(step.warn) >= 0) max.steps <- round(max.steps) nsteps <- round(nsteps) step.warn <- round(step.warn) trace <- as.logical(trace)[1] ### BETA: beta.names <- names(fitted$beta) ## possible beta nbeta <- length(fitted$beta) if(is.character(which.beta)) which.beta <- match(which.beta, beta.names, nomatch = 0) ## which.beta is a numeric vector if(!all(which.beta %in% seq_len(nbeta))) stop("invalid 'parm' argument") ### ZETA: zeta.names <- names(fitted$zeta) ## possible zeta nzeta <- length(fitted$zeta) if(is.character(which.zeta)) which.zeta <- match(which.zeta, zeta.names, nomatch = 0) ## which.zeta is a numeric vector if(!all(which.zeta %in% seq_len(nzeta))) stop("invalid 'parm' argument") ## the actual profiling for beta and zeta par: prof.beta <- if(nbeta) profile.clm.beta(fitted, which.beta, alpha, max.steps, nsteps, trace, step.warn, control, ...) else NULL prof.zeta <- if(nzeta) profile.clm.zeta(fitted, which.zeta, alpha, max.steps, nsteps, trace, step.warn, control, ...) else NULL ## collect and return results: val <- structure(c(prof.beta, prof.zeta), original.fit = fitted) class(val) <- c("profile.clm") return(val) } profile.clm.beta <- function(fitted, which.beta, alpha = 0.001, max.steps = 50, nsteps = 8, trace = FALSE, step.warn = 5, control = list(), ...) ### which.beta is assumed to be a numeric vector { lroot.max <- qnorm(1 - alpha/2) delta = lroot.max/nsteps nbeta <- length(fitted$beta) beta.names <- names(fitted$beta) nalpha <- length(fitted$alpha) orig.par <- c(fitted$alpha, fitted$beta) if(!is.null(zeta <- fitted$zeta)) { names(zeta) <- paste("sca", names(fitted$zeta), sep=".") orig.par <- c(orig.par, zeta) } if(!is.null(lambda <- fitted$lambda)) { orig.par <- c(orig.par, lambda) } ### NOTE: we need to update zeta.names to make names(orig.par) ### unique. This is needed to correctly construct the resulting ### par.vals matrix and to extract from it again. std.err <- coef(summary(fitted))[nalpha + 1:nbeta, "Std. Error"] if(any(is.na(std.err))) stop("Cannot profile model where standard errors are NA", call.=FALSE) ## results list: prof.list <- vector("list", length = length(which.beta)) names(prof.list) <- beta.names[which.beta] ## get model matrices and model environment: ### NOTE: Fixing the fragile update approach: ## mf <- update(fitted, method = "model.frame") ## Need to subset by wts to make nrow(X) == nrow(B1) ## X <- with(mf, X[wts > 0, , drop=FALSE]) ## containing alias cols wts <- getWeights(model.frame(fitted)) X <- model.matrix(fitted)$X[wts > 0, , drop=FALSE] if(fitted$control$sign.location == "positive") X <- -X rho <- get_clmRho(fitted) ## rho <- update(fitted, doFit = FALSE) orig <- as.list(rho)[c("B1", "B2", "o1", "o2")] rho$n.psi <- rho$n.psi - 1 ## needed for models with scale nalpha.clean <- sum(!fitted$aliased$alpha) par.clean <- orig.par[!is.na(orig.par)] ## which of which.beta are NA: alias.wb <- fitted$aliased$beta[which.beta] ## For each which.beta move up or down, fit the model and store the ## signed likelihood root statistic and parameter values: for(wb in which.beta) { if(alias.wb[wb == which.beta]) next ## ignore aliased coef rem <- nalpha.clean + (which.beta - cumsum(alias.wb))[wb == which.beta] par.wb <- matrix(coef(fitted), nrow = 1) ## MLE wb.name <- beta.names[wb] lroot.wb <- 0 ## lroot at MLE ## set variables in fitting environment: rho$B1 <- orig$B1[, -rem, drop=FALSE] rho$B2 <- orig$B2[, -rem, drop=FALSE] for(direction in c(-1, 1)) { ## move down or up if(trace) { message("\nParameter: ", wb.name, c(" down", " up")[(direction + 1)/2 + 1]) utils::flush.console() } ## reset starting values: rho$par <- par.clean[-rem] for(step in seq_len(max.steps)) { ## increment beta.i, offset and refit model without wb parameter: beta.i <- fitted$beta[wb] + direction * step * delta * std.err[wb] new.off <- X[, 1+wb, drop=TRUE] * beta.i rho$o1 <- orig$o1 - new.off rho$o2 <- orig$o2 - new.off fit <- clm_fit_NR(rho, control) ## save likelihood root statistic: lroot <- -direction * sqrt(2*(fitted$logLik - fit$logLik)) ## save lroot and pararameter values: lroot.wb <- c(lroot.wb, lroot) temp.par <- orig.par temp.par[names(fit$par)] <- fit$par temp.par[wb.name] <- beta.i par.wb <- rbind(par.wb, temp.par) ## break for loop if profile is far enough: if(abs(lroot) > lroot.max) break } ## end 'step in seq_len(max.steps)' ## test that lroot.max is reached and enough steps are taken: if(abs(lroot) < lroot.max) warning("profile may be unreliable for ", wb.name, " because lroot.max was not reached for ", wb, c(" down", " up")[(direction + 1)/2 + 1]) if(step <= step.warn) warning("profile may be unreliable for ", wb.name, " because only ", step, "\n steps were taken ", c("down", "up")[(direction + 1)/2 + 1]) } ## end 'direction in c(-1, 1)' ## order lroot and par values and collect in a data.frame: lroot.order <- order(lroot.wb, decreasing = TRUE) prof.list[[wb.name]] <- structure(data.frame(lroot.wb[lroot.order]), names = "lroot") prof.list[[wb.name]]$par.vals <- par.wb[lroot.order, ] if(!all(diff(par.wb[lroot.order, wb.name]) > 0)) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for ", wb.name) } ## end 'wb in which.beta' prof.list } profile.clm.zeta <- function(fitted, which.zeta, alpha = 0.001, max.steps = 50, nsteps = 8, trace = FALSE, step.warn = 5, control = list(), ...) ### which.zeta is assumed to be a numeric vector { lroot.max <- qnorm(1 - alpha/2) delta = lroot.max/nsteps nzeta <- length(fitted$zeta) nbeta <- length(fitted$beta) zeta <- fitted$zeta names(zeta) <- zeta.names <- paste("sca", names(fitted$zeta), sep=".") ### NOTE: we need to update zeta.names to make names(orig.par) ### unique. This is needed to correctly construct the resulting ### par.vals matrix and to extract from it again. orig.par <- c(fitted$alpha, fitted$beta, zeta) nalpha <- length(fitted$alpha) std.err <- coef(summary(fitted))[nalpha+nbeta+1:nzeta, "Std. Error"] if(any(is.na(std.err))) stop("Cannot profile model where standard errors are NA", call.=FALSE) ## results list: prof.list <- vector("list", length = length(which.zeta)) names(prof.list) <- names(zeta)[which.zeta] ## get model environment: rho <- get_clmRho(fitted) ## rho <- update(fitted, doFit = FALSE) S <- rho$S ## S without intercept Soff <- rho$Soff rho$k <- max(0, rho$k - 1) ab <- c(fitted$alpha, fitted$beta) ab.clean <- ab[!is.na(ab)] zeta.clean <- zeta[!fitted$aliased$zeta] ## which of which.zeta are NA: alias.wz <- fitted$aliased$zeta[which.zeta] ## For each which.zeta move up or down, fit the model and store the ## signed likelihood root statistic and parameter values: for(wz in which.zeta) { if(alias.wz[wz]) next ## ignore aliased coef ## rem: which columns of S to remove rem <- (which.zeta - cumsum(alias.wz))[wz] par.wz <- matrix(coef(fitted), nrow = 1) ## MLE wz.name <- zeta.names[wz] lroot.wz <- 0 ## lroot at MLE ## set variables in fitting environment: rho$S <- S[, -rem, drop=FALSE] for(direction in c(-1, 1)) { ## move down or up if(trace) { message("\nParameter: ", wz.name, c(" down", " up")[(direction + 1)/2 + 1]) utils::flush.console() } ## reset starting values: rho$par <- c(ab.clean, zeta.clean[-rem]) ## rho$par <- coef(fitted, na.rm = TRUE)[-rem] for(step in seq_len(max.steps)) { ## increment zeta.i, offset and refit model without wz parameter: zeta.i <- zeta[wz] + direction * step * delta * std.err[wz] rho$Soff <- rho$sigma <- Soff * exp(S[, wz, drop=TRUE] * zeta.i) ### NOTE: Need to update sigma in addition to Soff since otherwise ### sigma isn't updated when k=0 (single scale par) fit <- clm_fit_NR(rho, control) ## save likelihood root statistic: lroot <- -direction * sqrt(2*(fitted$logLik - fit$logLik)) ## save lroot and pararameter values: lroot.wz <- c(lroot.wz, lroot) temp.par <- orig.par temp.par[names(fit$par)] <- fit$par temp.par[wz.name] <- zeta.i par.wz <- rbind(par.wz, temp.par) ## break for loop if profile is far enough: if(abs(lroot) > lroot.max) break } ## end 'step in seq_len(max.steps)' ## test that lroot.max is reached and enough steps are taken: if(abs(lroot) < lroot.max) warning("profile may be unreliable for ", wz.name, " because qnorm(1 - alpha/2) was not reached when profiling ", c(" down", " up")[(direction + 1)/2 + 1]) if(step <= step.warn) warning("profile may be unreliable for ", wz.name, " because only ", step, "\n steps were taken ", c("down", "up")[(direction + 1)/2 + 1]) } ## end 'direction in c(-1, 1)' ## order lroot and par values and collect in a data.frame: ## lroot.order <- order(lroot.wz, decreasing = TRUE) lroot.order <- order(par.wz[, wz.name], decreasing = FALSE) ### NOTE: Need to change how values are ordered here. We should order ### with par.wz[, wz.name] instead of lroot.wz since if lroot.wz is ### flat, the order may be incorrect. prof.list[[wz.name]] <- structure(data.frame(lroot.wz[lroot.order]), names = "lroot") prof.list[[wz.name]]$par.vals <- par.wz[lroot.order, ] if(!all(diff(lroot.wz[lroot.order]) <= sqrt(.Machine$double.eps))) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for ", wz.name) } ## end 'wz in which.zeta' prof.list } ## profile.sclm <- ## using clm.fit.env() ## function(fitted, which.beta = seq_len(nbeta), alpha = 0.001, ## max.steps = 50, nsteps = 8, trace = FALSE, ## step.warn = 5, control = list(), ...) ## ### NOTE: seq_len(nbeta) works for nbeta = 0: numeric(0), while ## ### 1:nbeta gives c(1, 0). ## ## ### This is almost a copy of profile.clm2, which use clm.fit rather ## ### than clm.fit.env. The current implementation is the fastest, but ## ### possibly less readable. ## { ## ## match and test arguments: ## stopifnot(is.numeric(alpha) && length(alpha) == 1 && ## alpha > 0 && alpha < 1) ## stopifnot(round(max.steps) > round(nsteps)) ## stopifnot(round(nsteps) > round(step.warn)) ## stopifnot(round(nsteps) > 0 && round(step.warn) >= 0) ## max.steps <- round(max.steps) ## nsteps <- round(nsteps) ## step.warn <- round(step.warn) ## trace <- as.logical(trace)[1] ## ## possible parameters on which to profile (including aliased coef): ## beta.names <- names(fitted$beta) ## nbeta <- length(fitted$beta) ## if(is.character(which.beta)) ## which.beta <- match(which.beta, beta.names, nomatch = 0) ## ## which.beta is a numeric vector ## if(!all(which.beta %in% seq_len(nbeta))) ## stop("invalid 'parm' argument") ## stopifnot(length(which.beta) > 0) ## std.err <- coef(summary(fitted))[-(1:length(fitted$alpha)), ## "Std. Error"] ## ## profile limit: ## lroot.max <- qnorm(1 - alpha/2) ## ## profile step length: ## delta <- lroot.max / nsteps ## ## results list: ## prof.list <- vector("list", length = length(which.beta)) ## names(prof.list) <- beta.names[which.beta] ## ## get model.frame: ## X <- update(fitted, method = "model.frame")$X ## containing alias cols ## rho <- update(fitted, doFit = FALSE) ## orig <- as.list(rho)[c("B1", "B2", "o1", "o2")] ## rho$n.psi <- rho$n.psi - 1 ## nalpha.clean <- sum(!fitted$aliased$alpha) ## ## which of which.beta are NA: ## alias.wb <- fitted$aliased$beta[which.beta] ## ## For each which.beta move up or down, fit the model and store the ## ## signed likelihood root statistic and parameter values: ## for(wb in which.beta) { ## if(alias.wb[wb]) next ## ignore aliased coef ## rem <- nalpha.clean + (which.beta - cumsum(alias.wb))[wb] ## par.wb <- matrix(coef(fitted), nrow = 1) ## MLE ## wb.name <- beta.names[wb] ## lroot.wb <- 0 ## lroot at MLE ## ## set variables in fitting environment: ## rho$B1 <- orig$B1[, -rem, drop=FALSE] ## rho$B2 <- orig$B2[, -rem, drop=FALSE] ## for(direction in c(-1, 1)) { ## move down or up ## if(trace) { ## message("\nParameter: ", wb.name, ## c(" down", " up")[(direction + 1)/2 + 1]) ## utils::flush.console() ## } ## ## reset starting values: ## rho$par <- coef(fitted, na.rm = TRUE)[-rem] ## ## rho$par <- orig.par[-wb.name] ## for(step in seq_len(max.steps)) { ## ## increment beta.i, offset and refit model without wb parameter: ## beta.i <- fitted$beta[wb] + ## direction * step * delta * std.err[wb] ## new.off <- X[, 1+wb, drop=TRUE] * beta.i ## rho$o1 <- orig$o1 - new.off ## rho$o2 <- orig$o2 - new.off ## fit <- clm.fit.env(rho, control) ## ## save likelihood root statistic: ## lroot <- -direction * sqrt(2*(fitted$logLik - fit$logLik)) ## ## save lroot and pararameter values: ## lroot.wb <- c(lroot.wb, lroot) ## temp.par <- coef(fitted) ## temp.par[names(fit$par)] <- fit$par ## temp.par[wb.name] <- beta.i ## par.wb <- rbind(par.wb, temp.par) ## ## break for loop if profile is far enough: ## if(abs(lroot) > lroot.max) break ## } ## end 'step in seq_len(max.steps)' ## ## test that lroot.max is reached and enough steps are taken: ## if(abs(lroot) < lroot.max) ## warning("profile may be unreliable for ", wb.name, ## " because lroot.max was not reached for ", ## wb, c(" down", " up")[(direction + 1)/2 + 1]) ## if(step <= step.warn) ## warning("profile may be unreliable for ", wb.name, ## " because only ", step, "\n steps were taken ", ## c("down", "up")[(direction + 1)/2 + 1]) ## } ## end 'direction in c(-1, 1)' ## ## order lroot and par. values and collect in a data.frame: ## lroot.order <- order(lroot.wb, decreasing = TRUE) ## prof.list[[wb.name]] <- ## structure(data.frame(lroot.wb[lroot.order]), names = "lroot") ## prof.list[[wb.name]]$par.vals <- par.wb[lroot.order, ] ## ## if(!all(diff(par.wb[lroot.order, wb.name]) > 0)) ## warning("likelihood is not monotonically decreasing from maximum,\n", ## " so profile may be unreliable for ", wb.name) ## } ## end 'wb in which.beta' ## val <- structure(prof.list, original.fit = fitted) ## class(val) <- c("profile.clm") ## return(val) ## } format.perc <- function(probs, digits) ### function lifted from stats:::format.perc to avoid using ':::' paste(format(100 * probs, trim = TRUE, scientific = FALSE, digits = digits), "%") confint.clm <- function(object, parm, level = 0.95, type = c("profile", "Wald"), trace = FALSE, ...) ### parm argument is ignored - use confint.profile for finer control. { ## match and test arguments type <- match.arg(type) if(object$link %in% c("Aranda-Ordaz", "log-gamma") && type == "profile") { message(paste("Profile intervals not available for models with flexible", "link function:\n reporting Wald intervals instead")) type <- "Wald" } stopifnot(is.numeric(level) && length(level) == 1 && level > 0 && level < 1) trace <- as.logical(trace)[1] if(!(missing(parm) || is.null(parm))) message("argument 'parm' ignored") ## Wald CI: if(type == "Wald") { a <- (1 - level)/2 a <- c(a, 1 - a) pct <- format.perc(a, 3) fac <- qnorm(a) coefs <- coef(object) ses <- coef(summary(object))[, 2] ci <- array(NA, dim = c(length(coefs), 2L), dimnames = list(names(coefs), pct)) ci[] <- coefs + ses %o% fac return(ci) } ## profile likelhood CI: if(trace) { message("Wait for profiling to be done...") utils::flush.console() } ## get profile: object <- profile(object, alpha = (1 - level)/4, trace = trace, ...) ## get and return CIs: confint(object, level = level, ...) } ## confint.clm <- ## function(object, parm = seq_len(npar), level = 0.95, ## type = c("profile", "Wald"), trace = FALSE, ...) ## ### parm: a 2-list with beta and zeta? ## ### or args which.beta, which.zeta while parm is redundant? ## ## ### make waldci.clm(object, which.alpha, which.beta, which.zeta, level ## ### = 0.95) ?? ## { ## ## match and test arguments ## type <- match.arg(type) ## stopifnot(is.numeric(level) && length(level) == 1 && ## level > 0 && level < 1) ## trace <- as.logical(trace)[1] ## mle <- object$beta ## if(!is.null(zeta <- object$zeta)) { ## names(zeta) <- paste("sca", names(zeta), sep=".") ## mle <- c(mle, zeta) ## } ## npar <- length(mle) ## beta.names <- names(mle) ## if(is.character(parm)) stop("parm should be numeric") ## ## parm <- match(parm, names(c(object$beta, object$zeta))), nomatch = 0) ## if(!all(parm %in% seq_len(npar))) stop("invalid 'parm' argument") ## stopifnot(length(parm) > 0) ## ## Wald CI: ## if(type == "Wald") ## return(waldci.clm(object, parm, level)) ## ## return(confint.default(object = object, parm = beta.names[parm], ## ## level = level)) ## ## profile likelhood CI: ## if(trace) { ## message("Waiting for profiling to be done...") ## utils::flush.console() ## } ## ## get profile: ## ### Edit these calls: ## object <- profile(object, which.beta = beta.names[parm], ## alpha = (1 - level)/4, trace = trace, ...) ## ## get and return CIs: ## confint(object, parm = beta.names[parm], level = level, ...) ## } confint.profile.clm <- function(object, parm = seq_len(nprofiles), level = 0.95, ...) ### parm index elements of object (the list of profiles) ### each par.vals matrix of each profile will have ### sum(!unlist(of$aliased)) columns. { ## match and test arguments: stopifnot(is.numeric(level) && length(level) == 1 && level > 0 && level < 1) of <- attr(object, "original.fit") prof.names <- names(object) nprofiles <- length(prof.names) if(is.character(parm)) ### Allow character here? parm <- match(parm, prof.names, nomatch = 0) if(!all(parm %in% seq_len(nprofiles))) stop("invalid 'parm' argument") stopifnot(length(parm) > 0) ## prepare CI: a <- (1-level)/2 a <- c(a, 1-a) pct <- paste(round(100*a, 1), "%") ci <- array(NA, dim = c(length(parm), 2), dimnames = list(prof.names[parm], pct)) cutoff <- qnorm(a) ## compute CI from spline interpolation of the likelihood profile: for(pr.name in prof.names[parm]) { if(is.null(pro <- object[[ pr.name ]])) next sp <- spline(x = pro[, "par.vals"][, pr.name], y = pro[, 1]) ## OBS ci[pr.name, ] <- approx(sp$y, sp$x, xout = rev(cutoff))$y } ## do not drop(ci) because rownames are lost for single coef cases: return(ci) } plot.profile.clm <- function(x, which.par = seq_len(nprofiles), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, root = FALSE, fig = TRUE, approx = root, n = 1e3, ask = prod(par("mfcol")) < length(which.par) && dev.interactive(), ..., ylim = NULL) { ## match and test arguments: stopifnot(is.numeric(level) && all(level > 0) && all(level < 1)) stopifnot(n == round(n) && n > 0) Log <- as.logical(Log)[1] relative <- as.logical(relative)[1] root <- as.logical(root)[1] fig <- as.logical(fig)[1] approx <- as.logical(approx)[1] of <- attr(x, "original.fit") mle <- of$beta if(!is.null(zeta <- of$zeta)) { names(zeta) <- paste("sca", names(zeta), sep=".") mle <- c(mle, zeta) } prof.names <- names(x) nprofiles <- length(prof.names) if(is.character(which.par)) which.par <- match(which.par, prof.names, nomatch = 0) if(!all(which.par %in% seq_len(nprofiles))) stop("invalid 'which.par' argument") stopifnot(length(which.par) > 0) ML <- of$logLik ## prepare return value: which.names <- prof.names[which.par] spline.list <- vector("list", length(which.par)) names(spline.list) <- which.names if(approx) { std.err <- coef(summary(of))[-(1:length(of$alpha)), 2] names(std.err) <- names(mle) } ## aks before "over writing" the plot? if(ask) { oask <- devAskNewPage(TRUE) on.exit(devAskNewPage(oask)) } ## for each pm make the appropriate plot: for(pr.name in prof.names[which.par]) { ## confidence limits: lim <- sapply(level, function(x) exp(-qchisq(x, df=1)/2) ) if(is.null(pro <- x[[ pr.name ]])) next sp <- spline(x=pro[, "par.vals"][, pr.name], y=pro[, 1], n=n) if(approx) y.approx <- (mle[pr.name] - sp$x) / std.err[pr.name] if(root) { ylab <- "profile trace" lim <- c(-1, 1) %o% sqrt(-2 * log(lim)) sp$y <- -sp$y if(approx) y.approx <- -y.approx } else { ## !root: sp$y <- -sp$y^2/2 if(approx) y.approx <- -y.approx^2/2 if(relative && !Log) { sp$y <- exp(sp$y) if(approx) y.approx <- exp(y.approx) ylab <- "Relative profile likelihood" if(missing(ylim)) ylim <- c(0, 1) } if(relative && Log) { ylab <- "Relative profile log-likelihood" lim <- log(lim) } if(!relative && Log) { sp$y <- sp$y + ML if(approx) y.approx <- y.approx + ML ylab <- "Profile log-likelihood" lim <- ML + log(lim) } if(!relative && !Log) { sp$y <- exp(sp$y + ML) if(approx) y.approx <- exp(y.approx + ML) ylab <- "Profile likelihood" lim <- exp(ML + log(lim)) } } spline.list[[ pr.name ]] <- sp if(fig) { ## do the plotting: plot(sp$x, sp$y, type = "l", ylim = ylim, xlab = pr.name, ylab = ylab, ...) abline(h = lim) if(approx) lines(sp$x, y.approx, lty = 2) if(root) points(mle[pr.name], 0, pch = 3) } } attr(spline.list, "limits") <- lim invisible(spline.list) } profileAlt.clm <- ## using clm.fit() function(fitted, which.beta = seq_len(nbeta), alpha = 0.01, max.steps = 50, nsteps = 8, trace = FALSE, step.warn = 5, control = list(), ...) ### NOTE: seq_len(nbeta) works for nbeta = 0: numeric(0), while ### 1:nbeta gives c(1, 0). ### args: ### alpha - The likelihood is profiled in the 100*(1-alpha)% ### confidence region as determined by the profile likelihood ### max.steps - the maximum number of profile steps in each direction ### nsteps - the approximate no. steps determined by the quadratic ### approximation to the log-likelihood function ### trace - if trace > 0 information of progress is printed ### step.warn - a warning is issued if the profile in each direction ### contains less than step.warn steps (due to lack of precision). { ## match and test arguments: stopifnot(is.numeric(alpha) && length(alpha) == 1 && alpha > 0 && alpha < 1) stopifnot(round(max.steps) > round(nsteps)) stopifnot(round(nsteps) > round(step.warn)) stopifnot(round(nsteps) > 0 && round(step.warn) >= 0) max.steps <- round(max.steps) nsteps <- round(nsteps) step.warn <- round(step.warn) trace <- as.logical(trace)[1] beta.names <- names(fitted$beta) nbeta <- length(fitted$beta) if(is.character(which.beta)) which.beta <- match(which.beta, beta.names, nomatch = 0) if(!all(which.beta %in% seq_len(nbeta))) stop("invalid 'parm' argument") stopifnot(length(which.beta) > 0) ## Extract various things from the original fit: orig.par <- coef(fitted) ## c(alpha, beta) beta0 <- fitted$beta ## regression coef. nalpha <- length(fitted$alpha) ## no. threshold coef. nbeta <- length(beta0) beta.names <- names(beta0) orig.logLik <- fitted$logLik std.err <- coef(summary(fitted))[-(1:nalpha), "Std. Error"] link <- fitted$link threshold <- fitted$threshold ## profile limit: lroot.max <- qnorm(1 - alpha/2) ## profile step length: delta <- lroot.max / nsteps ## results list: prof.list <- vector("list", length = length(which.beta)) names(prof.list) <- beta.names[which.beta] ## get model.frame: ### NOTE: Attempting the following fix for a safer extraction of ### model-design-objects: ## mf <- update(fitted, method = "model.frame") contr <- c(fitted$contrasts, fitted$S.contrasts, fitted$nom.contrasts) mf <- get_clmDesign(fitted$model, fitted$terms.list, contr) y <- mf$y X <- mf$X wts <- mf$wts orig.off <- mf$off ## For each which.beta move up or down, fit the model and store the ## signed likelihood root statistic and parameter values: for(wb in which.beta) { par.wb <- matrix(orig.par, nrow = 1) ## MLE wb.name <- beta.names[wb] lroot.wb <- 0 ## lroot at MLE X.wb <- X[, -(1+wb), drop=FALSE] for(direction in c(-1, 1)) { ## move down or up if(trace) { message("\nParameter: ", wb.name, c(" down", " up")[(direction + 1)/2 + 1]) utils::flush.console() } ## (re)set starting values: start <- orig.par[-(nalpha + wb)] for(step in seq_len(max.steps)) { ## increment offset and refit model without wb parameter: beta.i <- beta0[wb] + direction * step * delta * std.err[wb] new.off <- orig.off + X[, 1+wb, drop=TRUE] * beta.i fit <- clm.fit(y=y, X=X.wb, weights=wts, offset=new.off, control=control, start=start, link=link, threshold=threshold) ## save likelihood root statistic: lroot <- -direction * sqrt(2*(fitted$logLik - fit$logLik)) ## save lroot and pararameter values: lroot.wb <- c(lroot.wb, lroot) temp.par <- orig.par temp.par[names(fit$par)] <- fit$par temp.par[wb.name] <- beta.i par.wb <- rbind(par.wb, temp.par) ## update starting values: start <- fit$par ## break for loop if profile is far enough: if(abs(lroot) > lroot.max) break } ## end 'step in seq_len(max.steps)' ## test that lroot.max is reached and enough steps are taken: if(abs(lroot) < lroot.max) warning("profile may be unreliable for ", wb.name, " because lroot.max was not reached for ", wb, c(" down", " up")[(direction + 1)/2 + 1]) if(step <= step.warn) warning("profile may be unreliable for ", wb.name, " because only ", step, "\n steps were taken ", c("down", "up")[(direction + 1)/2 + 1]) } ## end 'direction in c(-1, 1)' ## order lroot and par. values and collect in a data.frame: lroot.order <- order(lroot.wb, decreasing = TRUE) prof.list[[wb.name]] <- structure(data.frame(lroot.wb[lroot.order]), names = "lroot") prof.list[[wb.name]]$par.vals <- par.wb[lroot.order, ] if(!all(diff(par.wb[lroot.order, wb.name]) > 0)) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for ", wb.name) } ## end 'wb in which.beta' val <- structure(prof.list, original.fit = fitted) class(val) <- c("profile.clm") return(val) } ordinal/R/clmm.methods.R0000644000176200001440000002434314533321514014627 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Implementation of various methods for clmm objects. formatVC <- function(varc, digits = max(3, getOption("digits") - 2)) ### "format()" the 'VarCorr' matrix of the random effects -- for ### show()ing ### Borrowed from lme4/R/lmer.R with minor modifications. { recorr <- lapply(varc, attr, "correlation") reStdDev <- lapply(varc, attr, "stddev") reLens <- unlist(lapply(reStdDev, length)) nr <- sum(reLens) reMat <- array('', c(nr, 4), list(rep.int('', nr), c("Groups", "Name", "Variance", "Std.Dev."))) reMat[1+cumsum(reLens)-reLens, 1] <- names(reLens) reMat[,2] <- unlist(lapply(varc, colnames)) reMat[,3] <- format(unlist(reStdDev)^2, digits = digits) reMat[,4] <- format(unlist(reStdDev), digits = digits) if(any(reLens > 1)) { maxlen <- max(reLens) corr <- do.call("rbind", lapply(recorr, function(x, maxlen) { if(is.null(x)) return("") x <- as(x, "matrix") cc <- format(round(x, 3), nsmall = 3) cc[!lower.tri(cc)] <- "" nr <- dim(cc)[1] if (nr >= maxlen) return(cc) cbind(cc, matrix("", nr, maxlen-nr)) }, maxlen)) colnames(corr) <- c("Corr", rep.int("", maxlen - 1)) cbind(reMat, corr) } else reMat } varcov <- function(object, format=FALSE, digits=max(3, getOption("digits") - 2), ...) ### VarCorr method for model environments - should be the same for ### fitted model objects. { ## Compute variance-covariance matrices of the random effects. res <- lapply(object$ST, function(st) { ## Variance-covariance matrix for the random effects: VC <- tcrossprod(st) ## Standard deviations: stddev <- sqrt(diag(VC)) corr <- t(VC / stddev)/stddev attr(VC, "stddev") <- stddev ## correlation: if(NCOL(st) > 1) { diag(corr) <- 1 attr(VC, "correlation") <- corr } VC }) names(res) <- names(object$dims$nlev.re) if(format) noquote(formatVC(res, digits=digits)) else res } # VarCorr <- function(x, ...) UseMethod("VarCorr") VarCorr.clmm <- function(x, ...) varcov(x, ...) print.clmm <- function(x, digits = max(3, getOption("digits") - 3), ...) { if(x$nAGQ >= 2) cat(paste("Cumulative Link Mixed Model fitted with the adaptive", "Gauss-Hermite \nquadrature approximation with", x$nAGQ ,"quadrature points"), "\n\n") else if(x$nAGQ <= -1) cat(paste("Cumulative Link Mixed Model fitted with the", "Gauss-Hermite \nquadrature approximation with", abs(x$nAGQ) ,"quadrature points"), "\n\n") else cat("Cumulative Link Mixed Model fitted with the Laplace approximation\n", fill=TRUE) cat("formula:", deparse(x$formula), fill=TRUE) if(!is.null(data.name <- x$call$data)) cat("data: ", deparse(data.name), fill=TRUE) if(!is.null(x$call$subset)) cat("subset: ", deparse(x$call$subset), fill=TRUE) cat("\n") print(x$info, row.names=FALSE, right=FALSE) cat("\nRandom effects:\n") print(formatVC(varcov(x), digits=digits), quote=FALSE, ...) nlev.char <- paste(names(x$dims$nlev.gf), " ", x$dims$nlev.gf, sep="", collapse=", ") cat("Number of groups: ", nlev.char, "\n") if(length(x$beta)) { cat("\nCoefficients:\n") print(x$beta, digits=digits, ...) } else { cat("\nNo Coefficients\n") } if(length(x$alpha) > 0) { cat("\nThresholds:\n") print(x$alpha, digits=digits, ...) } if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") return(invisible(x)) } vcov.clmm <- function(object, ...) vcov.clm(object, method="Cholesky") summary.clmm <- function(object, correlation = FALSE, ...) { if(is.null(object$Hessian)) stop("Model needs to be fitted with Hess = TRUE") nfepar <- object$dims$nfepar coef <- matrix(0, nfepar, 4, dimnames = list(names(object$coefficients[1:nfepar]), c("Estimate", "Std. Error", "z value", "Pr(>|z|)"))) coef[, 1] <- object$coefficients[1:nfepar] vc <- try(vcov(object), silent = TRUE) if(inherits(vc, "try-error")) { warning("Variance-covariance matrix of the parameters is not defined") coef[, 2:4] <- NaN if(correlation) warning("Correlation matrix is unavailable") object$condHess <- NaN } else { coef[, 2] <- sd <- sqrt(diag(vc)[1:nfepar]) ## Cond is Inf if Hessian contains NaNs: object$condHess <- if(any(is.na(object$Hessian))) Inf else with(eigen(object$Hessian, only.values = TRUE), abs(max(values) / min(values))) coef[, 3] <- coef[, 1]/coef[, 2] coef[, 4] <- 2 * pnorm(abs(coef[, 3]), lower.tail=FALSE) if(correlation) ## { ## sd <- sqrt(diag(vc)) object$correlation <- cov2cor(vc) ## (vc / sd) / rep(sd, rep(object$edf, object$edf)) } object$info$cond.H <- formatC(object$condHess, digits=1, format="e") object$coefficients <- coef class(object) <- "summary.clmm" return(object) } print.summary.clmm <- function(x, digits = max(3, getOption("digits") - 3), signif.stars = getOption("show.signif.stars"), ...) { if(x$nAGQ >= 2) cat(paste("Cumulative Link Mixed Model fitted with the adaptive", "Gauss-Hermite \nquadrature approximation with", x$nAGQ ,"quadrature points"), "\n\n") else if(x$nAGQ <= -1) cat(paste("Cumulative Link Mixed Model fitted with the", "Gauss-Hermite \nquadrature approximation with", abs(x$nAGQ) ,"quadrature points"), "\n\n") else cat("Cumulative Link Mixed Model fitted with the Laplace approximation\n", fill=TRUE) cat("formula:", deparse(x$formula), fill=TRUE) if(!is.null(data.name <- x$call$data)) cat("data: ", deparse(data.name), fill=TRUE) if(!is.null(x$call$subset)) cat("subset: ", deparse(x$call$subset), fill=TRUE) cat("\n") print(x$info, row.names=FALSE, right=FALSE) cat("\nRandom effects:\n") print(formatVC(varcov(x), digits=digits), quote=FALSE, ...) nlev.char <- paste(names(x$dims$nlev.gf), " ", x$dims$nlev.gf, sep="", collapse=", ") cat("Number of groups: ", nlev.char, "\n") nbeta <- length(x$beta) nalpha <- length(x$alpha) if(nbeta > 0) { cat("\nCoefficients:\n") printCoefmat(x$coefficients[nalpha + 1:nbeta, , drop=FALSE], digits=digits, signif.stars=signif.stars, has.Pvalue=TRUE, ...) } else { cat("\nNo Coefficients\n") } if(nalpha > 0) { ## always true cat("\nThreshold coefficients:\n") printCoefmat(x$coefficients[seq_len(nalpha), -4, drop=FALSE], digits=digits, has.Pvalue=FALSE, signif.stars=FALSE, ...) } if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") if(!is.null(correl <- x$correlation)) { cat("\nCorrelation of Coefficients:\n") ll <- lower.tri(correl) correl[ll] <- format(round(correl[ll], digits)) correl[!ll] <- "" print(correl[-1, -ncol(correl)], quote = FALSE, ...) } return(invisible(x)) } logLik.clmm <- function(object, ...) structure(object$logLik, df = object$edf, class = "logLik") extractAIC.clmm <- function(fit, scale = 0, k = 2, ...) { edf <- fit$edf c(edf, -2*fit$logLik + k * edf) } nobs.clmm <- function(object, ...) object$dims$nobs edf.clmm <- function(object, ...) object$dims$edf ## anova.clmm <- function(object, ...) ## anova.clm(object, ...) anova.clmm <- function(object, ...) { ### This essentially calls anova.clm(object, ...), but the names of ### the models were not displayed correctly in the printed output ### unless the following dodge is enforced. mc <- match.call() args <- as.list(mc) Call <- as.call(c(list(quote(anova.clm)), args[-1])) ff <- environment(formula(object)) pf <- parent.frame() ## save parent frame in case we need it sf <- sys.frames()[[1]] ff2 <- environment(object) res <- tryCatch(eval(Call, envir=pf), error=function(e) { tryCatch(eval(Call, envir=ff), error=function(e) { tryCatch(eval(Call, envir=ff2), error=function(e) { tryCatch(eval(Call, envir=sf), error=function(e) { "error" })})})}) if((is.character(res) && res == "error")) stop("Unable to evaluate models.") res } logLik.clmm <- function(object, ...) structure(object$logLik, df = object$edf, class = "logLik") extractAIC.clmm <- function(fit, scale = 0, k = 2, ...) { edf <- fit$edf c(edf, -2*fit$logLik + k * edf) } model.matrix.clmm <- function(object, type = c("design", "B"), ...) { type <- match.arg(type) mf <- try(model.frame(object), silent=TRUE) if(inherits(mf, "try-error")) stop("Cannot extract model.matrix: refit model with 'model=TRUE'?") if(type == "design") { Terms <- terms(object) ans <- model.matrix(Terms, mf) } else { ## if type == "B": stop("type = 'B' not yet implemented") } return(ans) } ordinal/R/AO.R0000644000176200001440000000475014533321514012534 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## [pdg]AO functions for the Aranda-Ordaz distribution. Here gAO is ## the gradient of the density function, dAO. The AO distribution is ## used as a flexible link function in clm2() and clmm2(). pAOR <- function(q, lambda, lower.tail = TRUE) { if(lambda < 1e-6) stop("'lambda' has to be positive. lambda = ", lambda, " was supplied") p <- 1 - (lambda * exp(q) + 1)^(-1/lambda) if(!lower.tail) 1 - p else p } pAO <- function(q, lambda, lower.tail = TRUE) .C("pAO_C", q = as.double(q), length(q), as.double(lambda[1]), as.integer(lower.tail), NAOK = TRUE)$q dAOR <- function(eta, lambda, log = FALSE) { ### exp(eta) * (lambda * exp(eta) + 1)^(-1-1/lambda) stopifnot(length(lambda) == 1 && length(log) == 1) if(lambda < 1e-6) stop("'lambda' has to be positive. lambda = ", lambda, " was supplied") log.d <- eta - (1 + 1/lambda) * log(lambda * exp(eta) + 1) if(!log) exp(log.d) else log.d } dAO <- function(eta, lambda, log = FALSE) { stopifnot(length(lambda) == 1 && length(log) == 1) .C("dAO_C", eta = as.double(eta), length(eta), as.double(lambda), as.integer(log), NAOK = TRUE)$eta } gAOR <- function(eta, lambda) { stopifnot(length(lambda) == 1) lex <- lambda * exp(eta) dAO(eta, lambda) * (1 - (1 + 1/lambda) * lex/(1 + lex)) } gAO <- function(eta, lambda) { stopifnot(length(lambda) == 1) .C("gAO_C", eta = as.double(eta), length(eta), as.double(lambda[1]), NAOK = TRUE)$eta } ordinal/R/clm2.R0000644000176200001440000015523214533321514013074 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## An alternate (and older) implementation of CLMs in clm2(). The new ## and recommended implementation is available in clm(), cf. ./R/clm.R clm2.control <- function(method = c("ucminf", "Newton", "nlminb", "optim", "model.frame"), ..., convTol = 1e-4, trace = 0, maxIter = 100, gradTol = 1e-5, maxLineIter = 10) { method <- match.arg(method) ctrl <- if(method == "Newton") list(convTol=convTol, trace=trace, maxIter=maxIter, gradTol=gradTol, maxLineIter=maxLineIter) else list(trace = abs(trace), ...) if(!all(is.numeric(c(maxIter, gradTol, maxLineIter, convTol)))) stop("maxIter, gradTol, maxLineIter, convTol should all be numeric") if(convTol <= 0) stop("convTol should be > 0") if(method == "ucminf" && !"grtol" %in% names(ctrl)) ctrl$grtol <- gradTol ## if(method == "ucminf" && convTol > ctrl$grtol) ## stop("convTol should be <= grtol/gradTol") ## if(method == "Newton" && convTol > gradTol) ## stop("convTol should be <= gradTol") list(method = method, convTol = convTol, ctrl = ctrl) } newRho <- function(parent, XX, X, Z, y, weights, Loffset, Soffset, ## OK link, lambda, theta, threshold, Hess, control) ### OPTION: Could we remove the theta argument? { rho <- new.env(parent = parent) rho$X <- X rho$dnX <- dimnames(X) dimnames(rho$X) <- NULL rho$Z <- Z rho$dnZ <- dimnames(Z) dimnames(rho$Z) <- NULL rho$weights <- weights rho$Loffset <- Loffset rho$expSoffset <- rho$sigma <- exp(Soffset) rho$Hess <- ifelse(Hess, 1L, 0L) rho$method <- control$method rho$convTol <- control$convTol rho$ctrl <- control$ctrl rho$pfun <- switch(link, logistic = plogis, probit = pnorm, cloglog = function(x) pgumbel(x, max=FALSE), cauchit = pcauchy, loglog = pgumbel, "Aranda-Ordaz" = function(x, lambda) pAO(x, lambda), "log-gamma" = function(x, lambda) plgamma(x, lambda)) rho$dfun <- switch(link, logistic = dlogis, probit = dnorm, cloglog = function(x) dgumbel(x, max=FALSE), cauchit = dcauchy, loglog = dgumbel, "Aranda-Ordaz" = function(x, lambda) dAO(x, lambda), "log-gamma" = function(x, lambda) dlgamma(x, lambda)) rho$gfun <- switch(link, logistic = glogis, probit = function(x) -x * dnorm(x), cloglog = function(x) ggumbel(x, max=FALSE), cloglog = ggumbel, cauchit = gcauchy, "Aranda-Ordaz" = function(x, lambda) gAO(x, lambda), ## shouldn't happen "log-gamma" = function(x, lambda) glgamma(x, lambda) ) rho$link <- link rho$linkInt <- switch(link, logistic = 1L, probit = 2L, cloglog = 3L, loglog = 4L, cauchit = 5L, "Aranda-Ordaz" = 6L, "log-gamma" = 7L) rho$estimLambda <- ifelse(link %in% c("Aranda-Ordaz", "log-gamma") && is.null(lambda), 1L, 0L) rho$nlambda <- 0L rho$lambda <- if(!is.null(lambda)) lambda else 1 if(link %in% c("Aranda-Ordaz", "log-gamma")) rho$nlambda <- 1L if(rho$estimLambda > 0 & rho$link == "Aranda-Ordaz" & rho$method != "nlminb"){ message("Changing to nlminb optimizer to accommodate optimization with bounds") m <- match( names(rho$ctrl), "grtol", 0) rho$ctrl <- rho$ctrl[!m] rho$method <- "nlminb" } if(rho$method == "nlminb") { rho$limitUp <- Inf rho$limitLow <- -Inf } rho$n <- n <- length(y) rho$p <- ifelse(missing(X), 0, ncol(X)) rho$k <- ifelse(missing(Z), 0, ncol(Z)) rho$y <- y rho$threshold <- threshold rho$ncolXX <- ncol(XX) rho$dnXX <- dimnames(XX) rho$lev <- levels(y) rho$ntheta <- nlevels(y) - 1 rho$B2 <- 1 * (col(matrix(0, n, rho$ntheta + 1)) == c(unclass(y))) ### Setting elements of o[12] to [+-]Inf cause problems in ### getGnll and clmm-related functions because 1) 0*Inf = NaN, while ### 0*large.value = 0, so several computations have to be handled ### specially and 2) Inf-values are not by default allowed in .C calls ### and all specials would have to be handled separately. ## o1 <- B2[, rho$ntheta + 1, drop = TRUE] ## o1[o1 == 1] <- Inf ## rho$o1 <- o1 - rho$Loffset ## o2 <- B2[,1, drop = TRUE] ## o2[o2 == 1] <- -Inf ## rho$o2 <- o2 - rho$Loffset inf.value <- 1e5 rho$o1 <- c(inf.value * rho$B2[, rho$ntheta + 1]) - rho$Loffset rho$o2 <- c(-inf.value * rho$B2[,1]) - rho$Loffset rho$B1 <- rho$B2[,-(rho$ntheta + 1), drop = FALSE] rho$B2 <- rho$B2[,-1, drop = FALSE] makeThresholds2(rho, threshold) rho$B1 <- rho$B1 %*% rho$tJac rho$B2 <- rho$B2 %*% rho$tJac rho$xiNames <- rho$alphaNames rho$nxi <- rho$nalpha * rho$ncolXX if(rho$ncolXX > 1) { ## test actually not needed rho$xiNames <- paste(rep(rho$alphaNames, rho$ncolXX), ".", rep(colnames(XX), each=rho$nalpha), sep="") LL1 <- lapply(1:rho$ncolXX, function(x) rho$B1 * XX[,x]) rho$B1 <- do.call(cbind, LL1) LL2 <- lapply(1:rho$ncolXX, function(x) rho$B2 * XX[,x]) rho$B2 <- do.call(cbind, LL2) } if(rho$p > 0) { rho$B1 <- cbind(rho$B1, -X) rho$B2 <- cbind(rho$B2, -X) } dimnames(rho$B1) <- NULL dimnames(rho$B2) <- NULL return(rho) } # populates the rho environment setStart <- function(rho) ## Ok { ## set starting values in the rho environment ## try logistic/probit regression on 'middle' cut q1 <- max(1, rho$ntheta %/% 2) y1 <- (c(unclass(rho$y)) > q1) x <- cbind(Intercept = rep(1, rho$n), rho$X) fit <- switch(rho$link, "logistic"= glm.fit(x, y1, rho$weights, family = binomial(), offset = rho$Loffset), "probit" = glm.fit(x, y1, rho$weights, family = binomial("probit"), offset = rho$Loffset), ## this is deliberate, a better starting point "cloglog" = glm.fit(x, y1, rho$weights, family = binomial("probit"), offset = rho$Loffset), "loglog" = glm.fit(x, y1, rho$weights, family = binomial("probit"), offset = rho$Loffset), "cauchit" = glm.fit(x, y1, rho$weights, family = binomial("cauchit"), offset = rho$Loffset), "Aranda-Ordaz" = glm.fit(x, y1, rho$weights, family = binomial("probit"), offset = rho$Loffset), "log-gamma" = glm.fit(x, y1, rho$weights, family = binomial("probit"), offset = rho$Loffset)) if(!fit$converged) stop("attempt to find suitable starting values failed") coefs <- fit$coefficients if(any(is.na(coefs))) { warning("design appears to be rank-deficient, so dropping some coefs") keep <- !is.na(coefs) coefs <- coefs[keep] rho$X <- rho$X[, keep[-1], drop = FALSE] rho$dnX[[2]] <- rho$dnX[[2]][keep[-1]] rho$B1 <- rho$B1[, c(rep(TRUE, rho$nxi), keep[-1]), drop = FALSE] rho$B2 <- rho$B2[, c(rep(TRUE, rho$nxi), keep[-1]), drop = FALSE] rho$p <- ncol(rho$X) } ## Intercepts: spacing <- qlogis((1:rho$ntheta)/(rho$ntheta+1)) # just a guess if(rho$link != "logit") spacing <- spacing/1.7 ## if(rho$threshold == "flexible") # default alphas <- -coefs[1] + spacing - spacing[q1] if(rho$threshold == "symmetric" && rho$ntheta %% 2) ## ntheta odd alphas <- c(alphas[q1+1],cumsum(rep(spacing[q1+2], rho$nalpha-1))) if(rho$threshold == "symmetric" && !rho$ntheta %% 2) ## ntheta even alphas <- c(alphas[q1:(q1+1)], cumsum(rep(spacing[q1+1], rho$nalpha-2))) if(rho$threshold == "symmetric2" && rho$ntheta %% 2) ## ntheta odd alphas <- cumsum(rep(spacing[q1+2], rho$nalpha-1)) if(rho$threshold == "symmetric2" && !rho$ntheta %% 2) ## ntheta even alphas <- cumsum(rep(spacing[q1+1], rho$nalpha-2)) if(rho$threshold == "equidistant") alphas <- c(alphas[1], mean(diff(spacing))) ## initialize nominal effects to zero: if(rho$ncolXX > 1) { xi <- c(alphas, rep(rep(0, rho$nalpha), rho$ncolXX-1)) stopifnot(length(xi) == rho$nalpha * rho$ncolXX)} else xi <- alphas if(rho$estimLambda > 0){ rho$lambda <- 1 names(rho$lambda) <- "lambda" } start <- c(xi, coefs[-1], rep(0, rho$k), rep(1, rho$estimLambda)) names(start) <- NULL rho$start <- rho$par <- start } getPar <- function(rho) rho$par ## OK getNll <- function(rho, par) { ## ok if(!missing(par)) rho$par <- par with(rho, { if(estimLambda > 0) lambda <- par[nxi + p + k + 1:estimLambda] sigma <- if(k > 0) expSoffset * exp(drop(Z %*% par[nxi+p + 1:k])) else expSoffset eta1 <- (drop(B1 %*% par[1:(nxi + p)]) + o1)/sigma eta2 <- (drop(B2 %*% par[1:(nxi + p)]) + o2)/sigma pr <- if(nlambda) pfun(eta1, lambda) - pfun(eta2, lambda) else pfun(eta1) - pfun(eta2) if(all(is.finite(pr)) && all(pr > 0)) -sum(weights * log(pr)) else Inf }) } getGnll <- function(rho, par) { ## ok if(!missing(par)) rho$par <- par with(rho, { if(estimLambda > 0) lambda <- par[nxi + p + k + 1:estimLambda] sigma <- if(k > 0) expSoffset * exp(drop(Z %*% par[nxi+p + 1:k])) else expSoffset eta1 <- (drop(B1 %*% par[1:(nxi + p)]) + o1)/sigma eta2 <- (drop(B2 %*% par[1:(nxi + p)]) + o2)/sigma if(nlambda) { pr <- pfun(eta1, lambda) - pfun(eta2, lambda) p1 <- dfun(eta1, lambda) p2 <- dfun(eta2, lambda) } else { pr <- pfun(eta1) - pfun(eta2) p1 <- dfun(eta1) p2 <- dfun(eta2) } prSig <- pr * sigma ## eta1 * p1 is complicated because in theory eta1 contains ## Inf(-Inf) where p1 contains 0 and 0 * Inf = NaN... ## eta.p1 <- ifelse(p1 == 0, 0, eta1 * p1) ## eta.p2 <- ifelse(p2 == 0, 0, eta2 * p2) gradSigma <- ## if(k > 0) crossprod(Z, weights * (eta.p1 - eta.p2)/pr) if(k > 0) crossprod(Z, weights * (eta1 * p1 - eta2 * p2)/pr) else numeric(0) gradThetaBeta <- if(nxi > 0) -crossprod((B1*p1 - B2*p2), weights/prSig) else -crossprod((X * (p2 - p1)), weights/prSig) grad <- ## if (all(is.finite(pr)) && all(pr > 0)) ## c(gradThetaBeta, gradSigma) ## else rep(Inf, nxi + p + k) c(gradThetaBeta, gradSigma) }) if(rho$estimLambda > 0) c(rho$grad, grad.lambda(rho, rho$lambda, rho$link)) else rho$grad } getHnll <- function(rho, par) { ## ok if(!missing(par)) rho$par <- par with(rho, { eta1 <- drop(B1 %*% par[1:(nxi + p)]) + o1 eta2 <- drop(B2 %*% par[1:(nxi + p)]) + o2 pr <- pfun(eta1) - pfun(eta2) p1 <- dfun(eta1) p2 <- dfun(eta2) g1 <- gfun(eta1) g2 <- gfun(eta2) wtpr <- weights/pr dS.psi <- -crossprod(B1 * g1*wtpr, B1) + crossprod(B2 * g2*wtpr, B2) dpi.psi <- B1 * p1 - B2 * p2 ### dS.pi <- dpi.psi * wtpr/pr if (all(pr > 0)) dS.psi + crossprod(dpi.psi, (dpi.psi * wtpr/pr)) else array(NA, dim = c(nxi + p, nxi + p)) }) } .negLogLik <- function(rho) { ## negative log-likelihood ## OK with(rho, { eta1 <- drop(B1 %*% par[1:(nxi + p)]) + o1 eta2 <- drop(B2 %*% par[1:(nxi + p)]) + o2 pr <- pfun(eta1) - pfun(eta2) if (all(pr > 0)) -sum(weights * log(pr)) else Inf }) } .grad <- function(rho) { ## gradient of the negative log-likelihood ## OK with(rho, { p1 <- dfun(eta1) p2 <- dfun(eta2) wtpr <- weights/pr if (all(pr > 0)) -crossprod((B1 * p1 - B2 * p2), wtpr) else rep(NA, nalpha + p) }) } .hessian <- function(rho) { ## hessian of the negative log-likelihood ## OK with(rho, { dS.psi <- crossprod(B1 * gfun(eta1)*wtpr, B1) - crossprod(B2 * gfun(eta2)*wtpr, B2) dpi.psi <- B1 * p1 - B2 * p2 if (all(pr > 0)) -dS.psi + crossprod(dpi.psi, (dpi.psi * wtpr/pr)) else array(NA, dim = c(nxi+p, nxi+p)) }) } fitNR <- function(rho) ## OK { ctrl <- rho$ctrl stepFactor <- 1 innerIter <- 0 conv <- 1 ## Convergence flag message <- "iteration limit reached" rho$negLogLik <- .negLogLik(rho) if(rho$negLogLik == Inf) stop("Non-finite log-likelihood at starting value") rho$gradient <- .grad(rho) maxGrad <- max(abs(rho$gradient)) if(ctrl$trace > 0) Trace(iter=0, stepFactor, rho$negLogLik, maxGrad, rho$par, first=TRUE) ## Newton-Raphson algorithm: for(i in 1:ctrl$maxIter) { if(maxGrad < ctrl$gradTol) { message <- "max|gradient| < tol, so current iterate is probably solution" if(ctrl$trace > 0) cat("\nOptimizer converged! ", "max|grad|:", maxGrad, message, fill = TRUE) conv <- 0 break } rho$Hessian <- .hessian(rho) ## step <- .Call("La_dgesv", rho$Hessian, rho$gradient, .Machine$double.eps, ## PACKAGE = "base") ## solve H*step = g for 'step' step <- as.vector(solve(rho$Hessian, rho$gradient)) rho$par <- rho$par - stepFactor * step negLogLikTry <- .negLogLik(rho) lineIter <- 0 ## simple line search, i.e. step halfing: while(negLogLikTry > rho$negLogLik) { stepFactor <- stepFactor/2 rho$par <- rho$par + stepFactor * step negLogLikTry <- .negLogLik(rho) lineIter <- lineIter + 1 if(ctrl$trace > 0) Trace(i+innerIter, stepFactor, rho$negLogLik, maxGrad, rho$par, first=FALSE) if(lineIter > ctrl$maxLineIter){ message <- "step factor reduced below minimum" conv <- 2 break } innerIter <- innerIter + 1 } rho$negLogLik <- negLogLikTry rho$gradient <- .grad(rho) maxGrad <- max(abs(rho$gradient)) if(ctrl$trace > 0) Trace(iter=i+innerIter, stepFactor, rho$negLogLik, maxGrad, rho$par, first=FALSE) stepFactor <- min(1, 2 * stepFactor) } if(conv > 0) if(ctrl$trace > 0) cat(message, fill = TRUE) ## Save info rho$optRes$niter <- c(outer = i, inner = innerIter) rho$logLik <- -rho$negLogLik rho$maxGradient <- maxGrad rho$gradient <- as.vector(rho$gradient) rho$Hessian <- .hessian(rho) rho$optRes$message <- message rho$optRes$convergence <- conv } fitCLM <- function(rho) { ## OK if(rho$method == "Newton") { if(rho$k != 0) stop("Newton scheme not implemented for models with scale") if(rho$ncolXX > 1) stop("Newton scheme not implemented for models with nominal effects") if(rho$link %in% c("Aranda-Ordaz", "log-gamma")) stop("Newton scheme not implemented for models with", rho$link, "link function") fitNR(rho) return(invisible()) } optRes <- switch(rho$method, "nlminb" = nlminb(getPar(rho), function(par) getNll(rho, par), function(par) getGnll(rho, par), control=rho$ctrl, lower = rho$limitLow, upper = rho$limitUp), "ucminf" = ucminf(getPar(rho), function(par) getNll(rho, par), function(par) getGnll(rho, par), control=rho$ctrl), "optim" = optim(getPar(rho), function(par) getNll(rho, par), function(par) getGnll(rho, par), method="BFGS", control=rho$ctrl), ) rho$par <- optRes[[1]] rho$logLik <- - getNll(rho, optRes[[1]]) rho$optRes <- optRes rho$gradient <- c(getGnll(rho)) rho$maxGradient <- max(abs(rho$gradient)) if(rho$maxGradient > rho$convTol) warning("clm2 may not have converged:\n optimizer ", rho$method, " terminated with max|gradient|: ", rho$maxGradient, call.=FALSE) return(invisible()) } finalizeRho <- function(rho) { ## OK if(rho$method != "Newton") { rho$gradient <- c(getGnll(rho)) rho$maxGradient <- max(abs(rho$gradient)) rho$par <- rho$optRes[[1]] if(rho$Hess) { if(rho$k > 0 || rho$threshold != "flexible" || rho$ncolXX > 1 || rho$nlambda > 0) { if(rho$link == "Aranda-Ordaz" && rho$estimLambda > 0 && rho$lambda < 1e-3) message("Cannot get Hessian because lambda = ",rho$lambda ," is too close to boundary.\n", " Fit model with link == 'logistic' to get Hessian") else { rho$Hessian <- myhess(function(par) getNll(rho, par), rho$par) getNll(rho, rho$optRes[[1]]) # to reset the variables: # (par, pr) } } else rho$Hessian <- getHnll(rho, rho$optRes[[1]]) } } rho$convergence <- ifelse(rho$maxGradient > rho$convTol, FALSE, TRUE) with(rho, { if(nxi > 0) { xi <- par[seq_len(nxi)] names(xi) <- xiNames thetaNames <- paste(lev[-length(lev)], lev[-1], sep="|") Alpha <- Theta <- matrix(par[1:nxi], nrow=ncolXX, byrow=TRUE) Theta <- t(apply(Theta, 1, function(x) c(tJac %*% x))) if(ncolXX > 1){ dimnames(Theta) <- list(dnXX[[2]], thetaNames) dimnames(Alpha) <- list(dnXX[[2]], alphaNames) } else { Theta <- c(Theta) Alpha <- c(Alpha) names(Theta) <- thetaNames names(Alpha) <- alphaNames } coefficients <- xi } else coefficients <- numeric(0) if(p > 0) { beta <- par[nxi + 1:p] names(beta) <- dnX[[2]] coefficients <- c(coefficients, beta) } if(k > 0) { zeta <- par[nxi+p + 1:k] names(zeta) <- dnZ[[2]] coefficients <- c(coefficients, zeta) } if(estimLambda > 0) { names(lambda) <- "lambda" coefficients <- c(coefficients, lambda) } names(gradient) <- names(coefficients) edf <- p + nxi + k + estimLambda nobs <- sum(weights) fitted.values <- pr df.residual <- nobs - edf if(exists("Hessian", inherits=FALSE)) { dimnames(Hessian) <- list(names(coefficients), names(coefficients)) } }) res <- as.list(rho) keepNames <- c("df.residual", "fitted.values", "edf", "start", "beta", "coefficients", "zeta", "Alpha", "Theta", "xi", "lambda", "convergence", "Hessian", "convTol", "gradient", "optRes", "logLik", "call", "scale", "location", "nominal", "method", "y", "lev", "nobs", "threshold", "estimLambda", "link", "contrasts", "na.action") m <- match(keepNames, names(res), 0) res <- res[m] res } clm2 <- ## OK function(location, scale, nominal, data, weights, start, subset, na.action, contrasts, Hess = TRUE, model = TRUE, link = c("logistic", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), lambda, doFit = TRUE, control, threshold = c("flexible", "symmetric", "equidistant"), ...) { L <- match.call(expand.dots = FALSE) if(missing(location)) stop("Model needs a specification of the location") if(missing(lambda)) lambda <- NULL if(missing(contrasts)) contrasts <- NULL link <- match.arg(link) if(!(link %in% c("Aranda-Ordaz", "log-gamma")) & !is.null(lambda)){ warning("lambda ignored with link ", link) lambda <- NULL } if(!is.null(lambda) & length(lambda) > 1) { lambda <- lambda[1] warning("lambda is ", length(lambda), " long. Only the first element ", lambda[1], " is used") } if(!is.null(lambda) & link == "Aranda-Ordaz") if(lambda < 1e-6) stop("lambda has to be positive and lambda < 1e-6 not allowed for numerical reasons. lambda = ", lambda, " was supplied.") if (missing(control)) control <- clm2.control(...) if(!setequal(names(control), c("method", "convTol", "ctrl"))) stop("specify 'control' via clm2.control()") if (missing(data)) L$data <- environment(location) if (is.matrix(eval.parent(L$data))) L$data <- as.data.frame(L$data) ### Collect variables in location, scale and nominal formulae in a ### single formula, evaluate the model.frame and get index of row ### names for the rows to keep in the individual model.frames: m <- match(c("location", "scale", "nominal"), names(L), 0) F <- lapply(as.list(L[m]), eval.parent) ## evaluate in parent ## frame to allow 'f <- formula(sureness ~ prod); clm2(f, ...)' varNames <- unique(unlist(lapply(F, all.vars))) longFormula <- eval(parse(text = paste("~", paste(varNames, collapse = "+")))[1]) m <- match(c("location", "data", "subset", "weights", "na.action"), names(L), 0) L0 <- L[c(1, m)] if(!missing(scale) || !missing(nominal)) L0$location <- longFormula L0$drop.unused.levels <- TRUE L0[[1]] <- as.name("model.frame") names(L0)[names(L0) == "location"] <- "formula" L0 <- eval.parent(L0) m <- match(c("location", "scale", "nominal", "data", "subset", "weights", "na.action"), names(L), 0) L <- L[c(1, m)] L$drop.unused.levels <- TRUE L[[1]] <- as.name("model.frame") S <- L ## L: Location, S: Scale L$scale <- L$nominal <- NULL names(L)[names(L) == "location"] <- "formula" L <- eval.parent(L) keep <- match(rownames(L0), rownames(L)) L <- L[keep, , drop = FALSE] TermsL <- attr(L, "terms") ### format response: y <- model.response(L) if(!is.factor(y)) stop("response needs to be a factor") ### format thresholds: threshold <- match.arg(threshold) ### format location: X <- model.matrix(TermsL, L, contrasts) Xint <- match("(Intercept)", colnames(X), nomatch = 0) if (Xint > 0) X <- X[, -Xint, drop = FALSE] else warning("an intercept is needed and assumed in the location") n <- nrow(X) if(is.null(wt <- model.weights(L))) wt <- rep(1, n) if(is.null(Loffset <- model.offset(L))) Loffset <- rep(0, n) ### Format nominal: if(!missing(nominal)) { Nom <- S Nom$location <- Nom$scale <- NULL names(Nom)[names(Nom) == "nominal"] <- "formula" Nom <- eval.parent(Nom) Nom <- Nom[match(rownames(L0), rownames(Nom)), ,drop=FALSE] TermsNom <- attr(Nom, "terms") XX <- model.matrix(TermsNom, Nom)## , contrasts) ### Not allowing other than treatment contrasts in nominal if(is.null(Noffset <- model.offset(Nom))) Noffset <- rep(0, n) Nint <- match("(Intercept)", colnames(XX), nomatch = 0) if(Nint != 1) stop("An intercept is needed in the nominal formula") ### Are there any requirements about the presence of an ### intercept in the nominal formula? } else XX <- array(1, dim=c(n, 1)) ### format scale: if(!missing(scale)) { S$location <- S$nominal <- NULL names(S)[names(S) == "scale"] <- "formula" S <- eval.parent(S) S <- S[match(rownames(L0), rownames(S)), ,drop=FALSE] TermsS <- attr(S, "terms") ### Should contrasts be allowed for the scale? Z <- model.matrix(TermsS, S, contrasts) Zint <- match("(Intercept)", colnames(Z), nomatch = 0) if(Zint > 0) Z <- Z[, -Zint, drop = FALSE] else warning("an intercept is needed and assumed in the scale") if(is.null(Soffset <- model.offset(S))) Soffset <- rep(0, n) if(ncol(Z) > 0 && n != nrow(Z)) # This shouldn't happen stop("Model needs same dataset in location and scale") } else if(missing(scale) && !is.factor(y)){ Z <- array(1, dim = c(n, 1)) Soffset <- rep(0, n) } else { Z <- array(dim = c(n, 0)) Soffset <- rep(0, n) } ### return model.frame? if(control$method == "model.frame") { mf <- list(location = L) if(!missing(scale)) mf$scale <- S if(!missing(nominal)) mf$nominal <- Nom return(mf) } ### initialize and populate rho environment: rho <- newRho(parent.frame(), XX = XX, X=X, Z=Z, y=y, weights=wt, Loffset=Loffset, Soffset=Soffset, link=link, lambda = lambda, threshold=threshold, Hess = Hess, control = control) ### get starting values: if(missing(start)) setStart(rho) else rho$start <- rho$par <- start if(rho$estimLambda > 0 & rho$link == "Aranda-Ordaz") rho$limitLow <- c(rep(-Inf, length(rho$par)-1), 1e-5) if(length(rho$start) != with(rho, nxi + p + k + estimLambda)) stop("'start' is not of the correct length") ### OPTION: Could consider better check of increasing thresholds when ### ncol(XX) > 0 if(ncol(XX) == 0) { if(!all(diff(c(rho$tJac %*% rho$start[1:rho$nalpha])) > 0)) stop("Threshold starting values are not of increasing size") } if(!getNll(rho) < Inf) stop("Non-finite log-likelihood at starting values") if(model) { rho$location <- L if(!missing(scale)) rho$scale <- S if(!missing(nominal)) rho$nominal <- Nom } ### fit the model: if(!doFit) return(rho) fitCLM(rho) res <- finalizeRho(rho) ### add to output: res$call <- match.call() res$na.action <- attr(L0, "na.action") res$contrasts <- contrasts class(res) <- "clm2" res } print.clm2 <- function(x, ...) { if(!is.null(cl <- x$call)) { cat("Call:\n") dput(cl, control=NULL) } if(length(x$beta)) { cat("\nLocation coefficients:\n") print(x$beta, ...) } else { cat("\nNo location coefficients\n") } if(length(x$zeta)) { cat("\nScale coefficients:\n") print(x$zeta, ...) } else { cat("\nNo Scale coefficients\n") } if(x$estimLambda > 0) { cat("\nLink coefficient:\n") print(x$lambda) } if(length(x$xi) > 0) { cat("\nThreshold coefficients:\n") print(x$Alpha, ...) if(x$threshold != "flexible") { cat("\nThresholds:\n") print(x$Theta, ...) } } cat("\nlog-likelihood:", format(x$logLik, nsmall=2), "\n") cat("AIC:", format(-2*x$logLik + 2*x$edf, nsmall=2), "\n") if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") invisible(x) } vcov.clm2 <- function(object, ...) { if(is.null(object$Hessian)) { message("\nRe-fitting to get Hessian\n") utils::flush.console() object <- update(object, Hess=TRUE, start=object$coefficients) } dn <- names(object$coefficients) H <- object$Hessian ## To handle NaNs in the Hessian resulting from parameter ## unidentifiability: if(any(His.na <- !is.finite(H))) { H[His.na] <- 0 VCOV <- ginv(H) VCOV[His.na] <- NaN } else VCOV <- ginv(H) structure(VCOV, dimnames = list(dn, dn)) } summary.clm2 <- function(object, digits = max(3, .Options$digits - 3), correlation = FALSE, ...) { if(is.null(object$Hessian)) stop("Model needs to be fitted with Hess = TRUE") coef <- matrix(0, object$edf, 4, dimnames = list(names(object$coefficients), c("Estimate", "Std. Error", "z value", "Pr(>|z|)"))) coef[, 1] <- object$coefficients vc <- try(vcov(object), silent = TRUE) if(inherits(vc, "try-error")) { warning("Variance-covariance matrix of the parameters is not defined") coef[, 2:4] <- NaN if(correlation) warning("Correlation matrix is unavailable") object$condHess <- NaN } else { coef[, 2] <- sd <- sqrt(diag(vc)) ## Cond is Inf if Hessian contains NaNs: object$condHess <- if(any(is.na(object$Hessian))) Inf else with(eigen(object$Hessian, only.values = TRUE), abs(max(values) / min(values))) coef[, 3] <- coef[, 1]/coef[, 2] coef[, 4] <- 2*pnorm(abs(coef[, 3]), lower.tail=FALSE) if(correlation) object$correlation <- (vc/sd)/rep(sd, rep(object$edf, object$edf)) } object$coefficients <- coef object$digits <- digits class(object) <- "summary.clm2" object } print.summary.clm2 <- function(x, digits = x$digits, signif.stars = getOption("show.signif.stars"), ...) { if(!is.null(cl <- x$call)) { cat("Call:\n") dput(cl, control=NULL) } coef <- format(round(x$coefficients, digits=digits)) coef[,4] <- format.pval(x$coefficients[, 4]) p <- length(x$beta); nxi <- length(x$xi) k <- length(x$zeta); u <- x$estimLambda if(p > 0) { cat("\nLocation coefficients:\n") print(coef[nxi + 1:p, , drop=FALSE], quote = FALSE, ...) } else { cat("\nNo location coefficients\n") } if(k > 0) { cat("\nScale coefficients:\n") print(coef[(nxi+p+1):(nxi+p+k), , drop=FALSE], quote = FALSE, ...) } else { cat("\nNo scale coefficients\n") } if(x$estimLambda > 0) { cat("\nLink coefficients:\n") print(coef[(nxi+p+k+1):(nxi+p+k+u), , drop=FALSE], quote = FALSE, ...) } if(nxi > 0) { cat("\nThreshold coefficients:\n") print(coef[seq_len(nxi), -4, drop=FALSE], quote = FALSE, ...) } cat("\nlog-likelihood:", format(x$logLik, nsmall=2), "\n") cat("AIC:", format(-2*x$logLik + 2*x$edf, nsmall=2), "\n") cat("Condition number of Hessian:", format(x$condHess, nsmall=2), "\n") if(nzchar(mess <- naprint(x$na.action))) cat("(", mess, ")\n", sep="") if(!is.null(correl <- x$correlation)) { cat("\nCorrelation of Coefficients:\n") ll <- lower.tri(correl) correl[ll] <- format(round(correl[ll], digits)) correl[!ll] <- "" print(correl[-1, -ncol(correl)], quote = FALSE, ...) } invisible(x) } anova.clm2 <- function (object, ..., test = c("Chisq", "none")) { test <- match.arg(test) dots <- list(...) if (length(dots) == 0) stop('anova is not implemented for a single "clm2" object') mlist <- list(object, ...) nt <- length(mlist) dflis <- sapply(mlist, function(x) x$df.residual) s <- order(dflis, decreasing = TRUE) mlist <- mlist[s] if (any(!sapply(mlist, inherits, "clm2"))) stop('not all objects are of class "clm2"') ns <- sapply(mlist, function(x) length(x$fitted.values)) if(any(ns != ns[1])) stop("models were not all fitted to the same size of dataset") rsp <- unique(sapply(mlist, function(x) { tmp <- attr(x$location, "terms") class(tmp) <- "formula" paste(tmp[2]) } )) mds <- sapply(mlist, function(x) { tmp1 <- attr(x$location, "terms") class(tmp1) <- "formula" if(!is.null(x$scale)) { tmp2 <- attr(x$scale, "terms") class(tmp2) <- "formula" tmp2 <- tmp2[2] } else tmp2 <- "" if(!is.null(x$nominal)) { tmp3 <- attr(x$nominal, "terms") class(tmp3) <- "formula" tmp3 <- tmp3[2] } else tmp3 <- "" paste(tmp1[3], "|", tmp2, "|", tmp3) } ) dfs <- dflis[s] lls <- sapply(mlist, function(x) -2*x$logLik) tss <- c("", paste(1:(nt - 1), 2:nt, sep = " vs ")) df <- c(NA, -diff(dfs)) x2 <- c(NA, -diff(lls)) pr <- c(NA, 1 - pchisq(x2[-1], df[-1])) out <- data.frame(Model = mds, Resid.df = dfs, '-2logLik' = lls, Test = tss, Df = df, LRtest = x2, Prob = pr) names(out) <- c("Model", "Resid. df", "-2logLik", "Test", " Df", "LR stat.", "Pr(Chi)") if (test == "none") out <- out[, 1:6] class(out) <- c("Anova", "data.frame") attr(out, "heading") <- c("Likelihood ratio tests of cumulative link models\n", paste("Response:", rsp)) out } predict.clm2 <- function(object, newdata, ...) { if(!inherits(object, "clm2")) stop("not a \"clm2\" object") if(missing(newdata)) pr <- object$fitted else { newdata <- as.data.frame(newdata) Terms <- attr(object$location, "terms") m <- model.frame(Terms, newdata, na.action = function(x) x)#, if (!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m) X <- model.matrix(Terms, m, contrasts = object$contrasts) xint <- match("(Intercept)", colnames(X), nomatch=0) if(xint > 0) X <- X[, -xint, drop=FALSE] n <- nrow(X) y <- m[,names(cl)[attr(Terms, "response")]] if(length(object$zeta) > 0) { Terms <- attr(object$scale, "terms") m <- model.frame(Terms, newdata, na.action = function(x) x)#, if (!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m) Z <- model.matrix(Terms, m, contrasts = object$contrasts) zint <- match("(Intercept)", colnames(Z), nomatch=0) if(zint > 0) Z <- Z[, -zint, drop=FALSE] } if(!is.null(object$nominal)) { Terms <- attr(object$nominal, "terms") m <- model.frame(Terms, newdata, na.action = function(x) x)#, if (!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m) XX <- model.matrix(Terms, m, contrasts = object$contrasts) namC <- colnames(XX) } B2 <- 1 * (col(matrix(0, n, nlevels(y))) == unclass(y)) o1 <- c(100 * B2[, nlevels(y)]) o2 <- c(-100 * B2[,1]) B1 <- B2[,-nlevels(y), drop=FALSE] B2 <- B2[,-1, drop=FALSE] locationPar <- c(t(object$Theta)) if(!is.null(object$nominal)) { ncolXX <- ncol(XX) LL1 <- lapply(1:ncolXX, function(x) B1 * XX[,x]) B1 <- do.call(cbind, LL1) LL2 <- lapply(1:ncolXX, function(x) B2 * XX[,x]) B2 <- do.call(cbind, LL2) } if(ncol(X) > 0) { B1 <- cbind(B1, -X) B2 <- cbind(B2, -X) locationPar <- c(locationPar, object$beta) } pfun <- switch(object$link, logistic = plogis, probit = pnorm, cloglog = function(x) pgumbel(x, max=FALSE), ## cloglog = pgumbel, cauchit = pcauchy, loglog = pgumbel, "Aranda-Ordaz" = function(x, lambda) pAO(x, lambda), "log-gamma" = function(x, lambda) plgamma(x, lambda)) sigma <- 1 if(length(object$zeta) > 0) sigma <- sigma * exp(drop(Z %*% object$zeta)) eta1 <- (drop(B1 %*% locationPar) + o1) / sigma eta2 <- (drop(B2 %*% locationPar) + o2) / sigma if(object$link %in% c("Aranda-Ordaz", "log-gamma")) pr <- pfun(eta1, object$lambda) - pfun(eta2, object$lambda) else pr <- pfun(eta1) - pfun(eta2) } if(missing(newdata) && !is.null(object$na.action)) pr <- napredict(object$na.action, pr) as.vector(pr) } profile.clm2 <- function(fitted, whichL = seq_len(p), whichS = seq_len(k), lambda = TRUE, alpha = 0.01, maxSteps = 50, delta = LrootMax/10, trace = 0, stepWarn = 8, ...) { rho <- update(fitted, doFit=FALSE) if(rho$estimLambda > 0 & rho$link == "Aranda-Ordaz") rho$limitLow <- c(rep(-Inf, length(rho$par)-2), 1e-5) nxi <- rho$nxi; k <- rho$k; p <- rho$p; X <- rho$X; Z <- rho$Z B1 <- rho$B1; B2 <- rho$B2 sO <- rho$expSoffset; O1 <- rho$o1; O2 <- rho$o2 beta0 <- with(fitted, coefficients[nxi + seq_len(p+k)]) Lnames <- names(beta0[seq_len(p)]) Snames <- names(beta0[p + seq_len(k)]) Pnames <- c(Lnames, Snames) if(is.character(whichL)) whichL <- match(whichL, Lnames) if(is.character(whichS)) whichS <- match(whichS, Snames) nL <- length(whichL); nS <- length(whichS) summ <- summary(fitted) std.err <- summ$coefficients[nxi + seq_len(p+k), "Std. Error"] if(trace < 0) rho$ctrl$trace <- trace <- 1 origLogLik <- fitted$logLik LrootMax <- qnorm(1 - alpha/2) prof <- vector("list", length = nL + nS) names(prof) <- c(paste("loc", Lnames[whichL], sep=".")[seq_len(nL)], paste("scale", Snames[whichS], sep=".")[seq_len(nS)]) for(where in c("loc", "scale")[c(nL>0, nS>0)]) { if(where == "loc") { rho$p <- max(0, p - 1) which <- whichL } if(where == "scale") { which <- whichS rho$o1 <- O1 rho$o2 <- O2 rho$p <- p rho$k <- max(0, k - 1) rho$X <- X if(rho$nxi > 0) { rho$B1 <- B1 rho$B2 <- B2 } } for(i in which) { if(where == "loc") { rho$X <- X[, -i, drop=FALSE] if(nxi > 0) { rho$B1 <- B1[, -(nxi+i), drop=FALSE] rho$B2 <- B2[, -(nxi+i), drop=FALSE] } } else { rho$Z <- Z[, -i, drop=FALSE] i <- i + p } res.i <- c(0, beta0[i]) for(sgn in c(-1, 1)) { if(trace) { message("\nParameter: ", where, ".", c(Lnames, Snames)[i], c(" down", " up")[(sgn + 1)/2 + 1]) utils::flush.console() } rho$par <- fitted$coefficients[-(nxi+i)] step <- 0; Lroot <- 0 while((step <- step + 1) < maxSteps && abs(Lroot) < LrootMax) { beta.i <- beta0[i] + sgn * step * delta * std.err[i] if(where=="loc") { rho$o1 <- O1 - X[, i] * beta.i rho$o2 <- O2 - X[, i] * beta.i } else rho$expSoffset <- exp(sO + Z[, (i - p)] * beta.i) fitCLM(rho) Lroot <- sgn * sqrt(2*(-rho$logLik + origLogLik)) res.i <- rbind(res.i, c(Lroot, beta.i)) } if(step - 1 < stepWarn) warning("profile may be unreliable for ", where, ".", c(Lnames, Snames)[i], " because only ", step - 1, "\n steps were taken ", c("downwards", "upwards")[(sgn + 1)/2 + 1]) } rownames(res.i) <- NULL prof[[paste(where, c(Lnames, Snames)[i], sep=".")]] <- # -p+nL structure(data.frame(res.i[order(res.i[,1]),]), names = c("Lroot", c(Lnames, Snames)[i])) if(!all(diff(prof[[length(prof)]][,2]) > 0)) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for ", names(prof)[length(prof)]) } } if(lambda & rho$nlambda) prof$lambda <- profileLambda(fitted, trace = trace, ...) val <- structure(prof, original.fit = fitted, summary = summ) class(val) <- c("profile.clm2", "profile") val } profileLambda <- function(fitted, link = fitted$link, range, nSteps = 20, trace = 0, ...) { if(link == "log-gamma" & missing(range)) range <- c(-4, 4) if(link == "Aranda-Ordaz" & missing(range)) range <- c(1e-4, 4) if(!link %in% c("log-gamma", "Aranda-Ordaz")) stop("link needs to be 'log-gamma' or 'Aranda-Ordaz';", link, "not recognized") if(link == "Aranda-Ordaz" & min(range) <= 0) stop("range should be > 0 for the 'Aranda-Ordaz' link") if(fitted$estimLambda == 0) fitted <- update(fitted, Hess = FALSE, link = link, lambda = NULL) MLogLik <- fitted$logLik MLlambda <- fitted$lambda logLik <- numeric(nSteps) rho <- update(fitted, Hess = FALSE, link = link, lambda = min(range)) logLik[1] <- rho$logLik rho <- update(rho, doFit = FALSE) lambdaSeq <- seq(min(range), max(range), length.out = nSteps) if(trace) message("\nNow profiling lambda with ", nSteps - 1, " steps: i =") for(i in 2:nSteps){ if(trace) cat(i-1, " ") rho$lambda <- lambdaSeq[i] fitCLM(rho) logLik[i] <- rho$logLik } if(trace) cat("\n\n") if(any(logLik > fitted$logLik)) warning("Profiling found a better optimum,", " so original fit had not converged") sgn <- 2*(lambdaSeq > MLlambda) -1 Lroot <- sgn * sqrt(2) * sqrt(-logLik + MLogLik) res <- data.frame("Lroot" = c(0, Lroot), "lambda" = c(MLlambda, lambdaSeq)) res <- res[order(res[,1]),] if(!all(diff(res[,2]) > 0)) warning("likelihood is not monotonically decreasing from maximum,\n", " so profile may be unreliable for lambda") res } confint.clm2 <- function(object, parm, level = 0.95, whichL = seq_len(p), whichS = seq_len(k), lambda = TRUE, trace = 0, ...) { p <- length(object$beta); k <- length(object$zeta) if(trace) { message("Waiting for profiling to be done...") utils::flush.console() } object <- profile(object, whichL = whichL, whichS = whichS, alpha = (1. - level)/4., lambda = lambda, trace = trace) confint(object, level=level, ...) } confint.profile.clm2 <- function(object, parm = seq_along(Pnames), level = 0.95, ...) { of <- attr(object, "original.fit") Pnames <- names(object) if(is.character(parm)) parm <- match(parm, Pnames, nomatch = 0) a <- (1-level)/2 a <- c(a, 1-a) pct <- paste(round(100*a, 1), "%") ci <- array(NA, dim = c(length(parm), 2), dimnames = list(Pnames[parm], pct)) cutoff <- qnorm(a) for(pm in parm) { pro <- object[[ Pnames[pm] ]] sp <- spline(x = pro[, 2], y = pro[, 1]) ci[Pnames[pm], ] <- approx(sp$y, sp$x, xout = cutoff)$y } ci } plot.profile.clm2 <- function(x, parm = seq_along(Pnames), level = c(0.95, 0.99), Log = FALSE, relative = TRUE, fig = TRUE, n = 1e3, ..., ylim = NULL) ### Should this function have a 'root' argument to display the ### likelihood root statistic (approximate straight line)? { Pnames <- names(x) ML <- attr(x, "original.fit")$logLik for(pm in parm) { lim <- sapply(level, function(x) exp(-qchisq(x, df=1)/2) ) pro <- x[[ Pnames[pm] ]] sp <- spline(x = pro[, 2], y = pro[, 1], n=n) sp$y <- -sp$y^2/2 if(relative & !Log) { sp$y <- exp(sp$y) ylab <- "Relative likelihood" dots <- list(...) if(missing(ylim)) ylim <- c(0, 1) } if(relative & Log) { ylab <- "Relative log-likelihood" lim <- log(lim) } if(!relative & Log) { sp$y <- sp$y + ML ylab <- "Log-likelihood" lim <- ML + log(lim) } if(!relative & !Log) { stop("Not supported: at least one of 'Log' and 'relative' ", "have to be TRUE") sp$y <- exp(sp$y + ML) ylab <- "Likelihood" lim <- exp(ML + log(lim)) } x[[ Pnames[pm] ]] <- sp if(fig) { plot(sp$x, sp$y, type = "l", ylim = ylim, xlab = Pnames[pm], ylab = ylab, ...) abline(h = lim) } } attr(x, "limits") <- lim invisible(x) } logLik.clm2 <- function(object, ...) structure(object$logLik, df = object$edf, class = "logLik") extractAIC.clm2 <- function(fit, scale = 0, k = 2, ...) { edf <- fit$edf c(edf, -2*fit$logLik + k * edf) } update.clm2 <- function(object, formula., location, scale, nominal, ..., evaluate = TRUE) ### This method makes it possible to use the update.formula features ### for location and scale formulas in clm2 objects. This includes the ### possibility of using e.g. ### update(obj, loc = ~ . - var1, sca = ~ . + var2) { call <- object$call if (is.null(call)) stop("need an object with call component") extras <- match.call(expand.dots = FALSE)$... if (!missing(location)) call$location <- update.formula(formula(attr(object$location, "terms")), location) if (!missing(scale)) call$scale <- if(!is.null(object$scale)) update.formula(formula(attr(object$scale, "terms")), scale) else scale if (!missing(nominal)) call$nominal <- if(!is.null(object$nominal)) update.formula(formula(attr(object$nominal, "terms")), nominal) else nominal if (length(extras)) { existing <- !is.na(match(names(extras), names(call))) for (a in names(extras)[existing]) call[[a]] <- extras[[a]] if (any(!existing)) { call <- c(as.list(call), extras[!existing]) call <- as.call(call) } } if (evaluate) eval(call, parent.frame()) else call } dropterm.clm2 <- function(object, scope, scale = 0, test = c("none", "Chisq"), k = 2, sorted = FALSE, trace = FALSE, which = c("location", "scale"), ...) ### Most of this is lifted from MASS::dropterm.default, but adapted to ### the two formulas (location and scale) in the model. { which <- match.arg(which) Terms <- if(which == "location") attr(object$location, "terms") else attr(object$scale, "terms") tl <- attr(Terms, "term.labels") if(missing(scope)) scope <- drop.scope(Terms) else { if(!is.character(scope)) scope <- attr(terms(update.formula(Terms, scope)), "term.labels") if(!all(match(scope, tl, FALSE))) stop("scope is not a subset of term labels") } ns <- length(scope) ans <- matrix(nrow = ns + 1, ncol = 2, dimnames = list(c("", scope), c("df", "AIC"))) ans[1, ] <- extractAIC(object, scale, k = k, ...) n0 <- length(object$fitted) for(i in seq(ns)) { tt <- scope[i] if(trace) { message("trying -", tt) utils::flush.console() } Call <- as.list(object$call) Call[[which]] <- update.formula(Terms, as.formula(paste("~ . -", tt))) nfit <- eval.parent(as.call(Call)) ans[i+1, ] <- extractAIC(nfit, scale, k = k, ...) if(length(nfit$fitted) != n0) stop("number of rows in use has changed: remove missing values?") } dfs <- ans[1,1] - ans[,1] dfs[1] <- NA aod <- data.frame(Df = dfs, AIC = ans[,2]) o <- if(sorted) order(aod$AIC) else seq_along(aod$AIC) test <- match.arg(test) if(test == "Chisq") { dev <- ans[, 2] - k*ans[, 1] dev <- dev - dev[1] ; dev[1] <- NA nas <- !is.na(dev) P <- dev P[nas] <- pchisq(dev[nas], dfs[nas], lower.tail = FALSE) aod[, c("LRT", "Pr(Chi)")] <- list(dev, P) } aod <- aod[o, ] Call <- as.list(object$call) Call <- Call[names(Call) %in% c("location", "scale")] head <- c("Single term deletions", "\nModel:", paste(names(Call), ":", Call)) if(scale > 0) head <- c(head, paste("\nscale: ", format(scale), "\n")) class(aod) <- c("anova", "data.frame") attr(aod, "heading") <- head aod } addterm.clm2 <- function(object, scope, scale = 0, test = c("none", "Chisq"), k = 2, sorted = FALSE, trace = FALSE, which = c("location", "scale"), ...) ### Most of this is lifted from MASS::addterm.default, but adapted to ### the two formulas (location and scale) in the model. { which <- match.arg(which) if (which == "location") Terms <- attr(object$location, "terms") else if(!is.null(object$scale)) Terms <- attr(object$scale, "terms") else Terms <- as.formula(" ~ 1") if(missing(scope) || is.null(scope)) stop("no terms in scope") if(!is.character(scope)) scope <- add.scope(Terms, update.formula(Terms, scope)) if(!length(scope)) stop("no terms in scope for adding to object") ns <- length(scope) ans <- matrix(nrow = ns + 1, ncol = 2, dimnames = list(c("", scope), c("df", "AIC"))) ans[1, ] <- extractAIC(object, scale, k = k, ...) n0 <- length(object$fitted) for(i in seq(ns)) { tt <- scope[i] if(trace) { message("trying +", tt) utils::flush.console() } Call <- as.list(object$call) Call[[which]] <- update.formula(Terms, as.formula(paste("~ . +", tt))) nfit <- eval.parent(as.call(Call)) ans[i+1, ] <- extractAIC(nfit, scale, k = k, ...) if(length(nfit$fitted) != n0) stop("number of rows in use has changed: remove missing values?") } dfs <- ans[,1] - ans[1,1] dfs[1] <- NA aod <- data.frame(Df = dfs, AIC = ans[,2]) o <- if(sorted) order(aod$AIC) else seq_along(aod$AIC) test <- match.arg(test) if(test == "Chisq") { dev <- ans[,2] - k*ans[, 1] dev <- dev[1] - dev; dev[1] <- NA nas <- !is.na(dev) P <- dev P[nas] <- pchisq(dev[nas], dfs[nas], lower.tail=FALSE) aod[, c("LRT", "Pr(Chi)")] <- list(dev, P) } aod <- aod[o, ] Call <- as.list(object$call) Call <- Call[names(Call) %in% c("location", "scale")] head <- c("Single term additions", "\nModel:", paste(names(Call), ":", Call)) if(scale > 0) head <- c(head, paste("\nscale: ", format(scale), "\n")) class(aod) <- c("anova", "data.frame") attr(aod, "heading") <- head aod } ## addterm <- function(object, ...) UseMethod("addterm") ## dropterm <- function(object, ...) UseMethod("dropterm") ################################################################## ## Additional utility functions: grad.lambda <- function(rho, lambda, link, delta = 1e-6) { ll <- lambda + c(-delta, delta) if(link == "Aranda-Ordaz") ll[ll < 0] <- 0 par <- rho$par f <- sapply(ll, function(x) getNll(rho, c(par[-length(par)], x))) rho$lambda <- lambda rho$par <- par diff(f) / diff(ll) } TraceR <- function(iter, stepFactor, val, maxGrad, par, first=FALSE) { t1 <- sprintf(" %3d: %.2e: %.3f: %1.3e: ", iter, stepFactor, val, maxGrad) t2 <- formatC(par) if(first) cat("iter: step factor: Value: max|grad|: Parameters:\n") cat(t1, t2, "\n") } print.Anova <- function (x, ...) ## Lifted from package MASS: { heading <- attr(x, "heading") if (!is.null(heading)) cat(heading, sep = "\n") attr(x, "heading") <- NULL res <- format.data.frame(x, ...) nas <- is.na(x) res[] <- sapply(seq_len(ncol(res)), function(i) { x <- as.character(res[[i]]) x[nas[, i]] <- "" x }) print.data.frame(res) invisible(x) } fixed <- function(theta, eps = 1e-3) { res <- vector("list") res$name <- "fixed" if(!missing(theta) && length(theta) > 1) { if(length(theta) < 3) stop("'length(theta) = ", length(theta), ", but has to be 1 or >= 3") res$eps <- NULL res$theta <- theta res$getTheta <- function(y, theta, eps) theta } else if(!missing(theta) && length(theta) == 1) { if(as.integer(theta) < 3) stop("'as.integer(theta)' was ", as.integer(theta), ", but has to be > 2") res$eps <- NULL res$theta <- theta res$getTheta <- function(y, theta, eps) { eps <- diff(range(y)) / (theta - 1) seq(min(y) - eps/2, max(y) + eps/2, len = theta + 1) } } else if(missing(theta) && length(eps) == 1) { res$eps <- eps res$theta <- NULL res$getTheta <- function(y, theta, eps) { J <- diff(range(y))/eps + 1 seq(min(y) - eps/2, max(y) + eps/2, len = J) } } else stop("inappropriate arguments") class(res) <- "threshold" res } makeThresholds2 <- function(rho, threshold, ...) { if(threshold == "flexible") { rho$tJac <- diag(rho$ntheta) rho$nalpha <- rho$ntheta rho$alphaNames <- paste(rho$lev[-length(rho$lev)], rho$lev[-1], sep="|") } if(threshold == "symmetric") { if(!rho$ntheta >=2) stop("symmetric thresholds are only meaningful for responses with 3 or more levels") if(rho$ntheta %% 2) { ## ntheta is odd rho$nalpha <- (rho$ntheta + 1)/2 ## No. threshold parameters rho$tJac <- t(cbind(diag(-1, rho$nalpha)[rho$nalpha:1, 1:(rho$nalpha-1)], diag(rho$nalpha))) rho$tJac[,1] <- 1 rho$alphaNames <- c("central", paste("spacing.", 1:(rho$nalpha-1), sep="")) } else { ## ntheta is even rho$nalpha <- (rho$ntheta + 2)/2 rho$tJac <- cbind(rep(1:0, each=rho$ntheta/2), rbind(diag(-1, rho$ntheta/2)[(rho$ntheta/2):1,], diag(rho$ntheta/2))) rho$tJac[,2] <- rep(0:1, each=rho$ntheta/2) rho$alphaNames <- c("central.1", "central.2", paste("spacing.", 1:(rho$nalpha-2), sep="")) } } if(threshold == "equidistant") { if(!rho$ntheta >=2) stop("symmetric thresholds are only meaningful for responses with 3 or more levels") rho$tJac <- cbind(1, 0:(rho$ntheta-1)) rho$nalpha <- 2 rho$alphaNames <- c("threshold.1", "spacing") } } ordinal/R/terms_utils.R0000644000176200001440000001771014533321514014607 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# # terms_utils.R - utilities for computing on terms objects and friends # ------- Contents: -------- # # --- utility functions: --- # # term2colX # need_yates # no_yates # numeric_terms # get_model_matrix # get_contrast_coding # get_min_data # get_var_list # get_fac_list # get_num_list # get_pairs # get_trts # ############################################## ######## term2colX() ############################################## term2colX <- function(terms, X) { # Compute map from terms to columns in X using the assign attribute of X. # Returns a list with one element for each term containing indices of columns # in X belonging to that term. if(is.null(asgn <- attr(X, "assign"))) stop("Invalid design matrix:", "design matrix 'X' should have a non-null 'assign' attribute", call. = FALSE) term_names <- attr(terms, "term.labels") has_intercept <- attr(terms, "intercept") > 0 col_terms <- if(has_intercept) c("(Intercept)", term_names)[asgn + 1] else term_names[asgn[asgn > 0]] if(!length(col_terms) == ncol(X)) # should never happen. stop("An error happended when mapping terms to columns of X") # get names of terms (including aliased terms) nm <- union(unique(col_terms), term_names) res <- lapply(setNames(as.list(nm), nm), function(x) numeric(0L)) map <- split(seq_along(col_terms), col_terms) res[names(map)] <- map res[nm] # order appropriately } ############################################## ######## need_yates() ############################################## need_yates <- function(model) { ## Do not need yates for: ## - continuous variables ## - factors that are not contained in other factors ## Need yates for all other terms, i.e. terms which are: ## - contained in other terms, AND ## - which are not numeric/continuous term_names <- attr(terms(model), "term.labels") cont <- containment(model) is_contained <- names(cont[sapply(cont, function(x) length(x) > 0)]) nmt <- numeric_terms(model) num_terms <- names(nmt[nmt]) term_names[!term_names %in% num_terms & term_names %in% is_contained] } ############################################## ######## no_yates() ############################################## no_yates <- function(model) { setdiff(attr(terms(model), "term.labels"), need_yates(model)) } ############################################## ######## numeric_terms() ############################################## #' @importFrom stats delete.response terms numeric_terms <- function(model) { ## Determines for all terms (not just all variables) if the 'dataClass' ## is numeric ## (interactions involving one or more numerics variables are numeric). Terms <- delete.response(terms(model)) all_vars <- all.vars(attr(Terms, "variables")) data_classes <- attr(terms(model, fixed.only=FALSE), "dataClasses") var_class <- data_classes[names(data_classes) %in% all_vars] factor_vars <- names(var_class[var_class %in% c("factor", "ordered")]) num_vars <- setdiff(all_vars, factor_vars) term_names <- attr(terms(model), "term.labels") # term_names <- setNames(as.list(term_names), term_names) sapply(term_names, function(term) { vars <- unlist(strsplit(term, ":")) any(vars %in% num_vars) }) } ############################################## ######## get_model_matrix() ############################################## #' Extract or remake model matrix from model #' #' Extract or remake model matrix from model and potentially change the #' contrast coding #' #' @param model an \code{lm} or \code{lmerMod} model object. #' @param type extract or remake model matrix? #' @param contrasts contrasts settings. These may be restored to those in the #' model or they may be changed. If a length one character vector (e.g. #' \code{"contr.SAS"}) this is applied to all factors in the model, but it can #' also be a list naming factors for which the contrasts should be set as specified. #' #' @return the model (or 'design') matrix. #' @keywords internal #' @author Rune Haubo B Christensen get_model_matrix <- function(model, type=c("extract", "remake"), contrasts="restore") { type <- match.arg(type) # stopifnot(inherits(model, "lm") || inherits(model, "lmerMod")) if(type == "extract") return(model_matrix(model)) # Set appropriate contrasts: Contrasts <- get_contrast_coding(model, contrasts=contrasts) model.matrix(terms(model), data=model.frame(model), contrasts.arg = Contrasts) } ############################################## ######## get_contrast_coding() ############################################## get_contrast_coding <- function(model, contrasts="restore") { # Compute a list of contrasts for all factors in model Contrasts <- contrasts if(length(contrasts) == 1 && is.character(contrasts) && contrasts == "restore") { Contrasts <- attr(model_matrix(model), "contrasts") } else if(length(contrasts) == 1 && is.character(contrasts) && contrasts != "restore") { Contrasts <- .getXlevels(terms(model), model.frame(model)) Contrasts[] <- contrasts Contrasts } Contrasts } #' # #' get_min_data <- function(model, FUN=mean) #' # Get a minimum complete model.frame based on the variables in the model #' do.call(expand.grid, get_var_list(model, FUN=FUN)) #' #' get_var_list <- function(model, FUN=mean) #' # Extract a named list of variables in the model containing the levels of #' # factors and the mean value of numeric variables #' c(get_fac_list(model), get_num_list(model, FUN=FUN)) #' #' #' @importFrom stats .getXlevels #' get_fac_list <- function(model) { #' # Extract a named list of factor levels for each factor in the model #' res <- .getXlevels(Terms=terms(model), m=model.frame(model)) #' if(is.null(res)) list() else res #' } #' #' get_num_list <- function(model, FUN=mean) { # FUN=function(x) mean(x, na.rm=TRUE)) { #' # Extract named list of mean/FUN values of numeric variables in model #' deparse2 <- function(x) paste(safeDeparse(x), collapse = " ") #' Terms <- terms(model) #' mf <- model.frame(model) #' xvars <- sapply(attr(Terms, "variables"), deparse2)[-1L] #' if((yvar <- attr(Terms, "response")) > 0) #' xvars <- xvars[-yvar] #' if(!length(xvars)) return(list()) #' xlev <- lapply(mf[xvars], function(x) { #' if (is.numeric(x)) FUN(x) else NULL #' }) #' res <- xlev[!vapply(xlev, is.null, NA)] #' if(is.null(res)) list() else res #' } #' #' #' @importFrom utils combn #' get_pairs <- function(levs) { #' stopifnot(is.character(levs), length(levs) > 1) #' combs <- combn(seq_along(levs), 2) #' ind <- seq_len(ncombs <- ncol(combs)) #' A <- as.data.frame(array(0, dim=c(length(levs), ncombs))) #' dimnames(A) <- list(levs, paste(levs[combs[1, ]], levs[combs[2, ]], sep=" - ")) #' A[cbind(combs[1, ], ind)] <- 1 #' A[cbind(combs[2, ], ind)] <- -1 #' A #' } #' #' get_trts <- function(levs) { #' nlevs <- length(levs) #' ans <- t(cbind(-1, diag(nlevs - 1))) #' rownames(ans) <- levs #' colnames(ans) <- paste(levs[-1], levs[1], sep=" - ") #' ans #' } # get_trts(letters[1:5]) # get_pairs(letters[1:5]) ordinal/R/convergence.R0000644000176200001440000002562514533321514014537 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Functions to assess and check convergence of CLMs. Some ## functions/methods are exported and some are used internally in ## clm(). convergence <- function(object, ...) { UseMethod("convergence") } convergence.clm <- function(object, digits = max(3, getOption("digits") - 3), tol = sqrt(.Machine$double.eps), ...) ### Results: data.frame with columns: ### Estimate ### Std. Error ### Gradient - gradient of the coefficients at optimizer termination ### Error - the signed error in the coefficients at termination ### Rel. Error - the relative error in the coefficeints at termination ### ### The (signed) Error is determined as the Newton step, so this is ### only valid close to the optimum where the likelihood function is ### quadratic. ### ### The relative error equals step/Estimate. { ## get info table and coef-table: info <- object$info[c("nobs", "logLik", "niter", "max.grad", "cond.H")] ## Initialize coef-table with NAs: coefs <- coef(object, na.rm=TRUE) g <- object$gradient H <- object$Hessian tab <- matrix(NA_real_, nrow=length(coefs), ncol=6L, dimnames=list(names(coef(object, na.rm=TRUE)), c("Estimate", "Std.Err", "Gradient", "Error", "Cor.Dec", "Sig.Dig"))) tab[, c(1L, 3L)] <- cbind(coefs, g) res <- list(info=info, coefficients=tab, original.fit=object) class(res) <- "convergence.clm" if(!all(is.finite(H))) { warning("non-finite values in Hessian: illegitimate model fit") return(res) } ## Get eigen values of Hessian: res$eigen.values <- e.val <- eigen(H, symmetric=TRUE, only.values=TRUE)$values ## Compute Cholesky factor of Hessian: ch <- try(chol(H), silent=TRUE) if(any(abs(e.val) <= tol) || inherits(ch, "try-error")) { return(res) } ## Hessian is positive definite: ## Compute approximate error in the coefficients: step <- c(backsolve(ch, backsolve(ch, g, transpose=TRUE))) if(max(abs(step)) > 1e-2) warning("convergence assessment may be unreliable ", "due to large numerical error") ## Compute approximate error in the log-likelihood function: env <- get_clmRho(object) ## Note: safer to get env this way. ## env <- update(object, doFit=FALSE) env$par <- coef(object, na.rm=TRUE) - step new.logLik <- -env$clm.nll(env) new.max.grad <- max(abs(env$clm.grad(env))) if(new.max.grad > max(abs(g)) && max(abs(step)) > tol) warning("Convergence assessment may be unreliable: ", "please assess the likelihood with slice()") ### NOTE: we only warn if step is larger than a tolerance, since if ### step \sim 1e-16, the max(abs(grad)) may increase though stay ### essentially zero. logLik.err <- object$logLik - new.logLik err <- format.pval(logLik.err, digits=2, eps=1e-10) if(!length(grep("<", err))) err <- formatC(as.numeric(err), digits=2, format="e") res$info$logLik.Error <- err ## Fill in the coef-table: se <- sqrt(diag(chol2inv(ch))) res$coefficients[, c(2, 4:6)] <- cbind(se, step, cor.dec(step), signif.digits(coefs, step)) res } print.convergence.clm <- function(x, digits = max(3, getOption("digits") - 3), ...) { ## Prepare for printing: print(x$info, row.names=FALSE, right=FALSE) cat("\n") tab.print <- coef(x) for(i in 1:2) tab.print[,i] <- format(c(coef(x)[,i]), digits=digits) for(i in 3:4) tab.print[,i] <- format(c(coef(x)[,i]), digits=max(1, digits - 1)) print(tab.print, quote=FALSE, right=TRUE, ...) ## Print eigen values: cat("\nEigen values of Hessian:\n") cat(format(x$eigen.values, digits=digits), "\n") conv <- x$original.fit$convergence cat("\nConvergence message from clm:\n") for(i in seq_along(conv$code)) { Text <- paste("(", conv$code[i], ") ", conv$messages[i], sep="") cat(Text, "\n") } if(!is.null(alg.text <- conv$alg.message)) cat(paste("In addition:", alg.text), "\n") cat("\n") ## for(i in seq_along(conv$code)) { ## cat("Code: Message:\n", fill=TRUE) ## cat(conv$code[i], " ", conv$message[i], "\n", fill=TRUE) ## } ## if(!is.null(alg.text <- conv$alg.message)) { ## cat("\nIn addition: ", alg.text, "\n\n", fill=TRUE) ## } return(invisible(x)) } cor.dec <- function(error) { ### computes the no. correct decimals in a number if 'error' is the ### error in the number. ### The function is vectorized. xx <- -log10(abs(error)) lead <- floor(xx) res <- ifelse(xx < lead - log10(.5), lead-1, lead) res[abs(error) >= .05] <- 0 as.integer(round(res)) } signif.digits <- function(value, error) { ### Determines the number of significant digits in 'value' if the ### absolute error in 'value' is 'error'. ### The function is vectorized. res <- cor.dec(error) + ceiling(log10(abs(value))) res[res < 0] <- 0 as.integer(round(res)) } conv.check <- function(fit, control=NULL, Theta.ok=NULL, tol=sqrt(.Machine$double.eps), ...) ## function(gr, Hess, conv, method, gradTol, relTol, ## tol=sqrt(.Machine$double.eps), ...) ### Compute variance-covariance matrix and check convergence along the ### way. ### fit: clm-object or the result of clm_fit_NR() | gradient, Hessian, ### (control), convergence ### control: (tol), (method), gradTol, relTol ### ### Return: list with elements ### vcov, conv, cond.H, messages and { if(missing(control)) control <- fit$control if(is.null(control)) stop("'control' not supplied - cannot check convergence") if(!is.null(control$tol)) tol <- control$tol if(tol < 0) stop(gettextf("numerical tolerance is %g, expecting non-negative value", tol), call.=FALSE) ### OPTION: test this. H <- fit$Hessian g <- fit$gradient max.grad <- max(abs(g)) cov <- array(NA_real_, dim=dim(H), dimnames=dimnames(H)) cond.H <- NA_real_ res <- list(vcov=cov, code=integer(0L), cond.H=cond.H, messages=character(0L)) class(res) <- "conv.check" if(is.list(code <- fit$convergence)) code <- code[[1L]] mess <- switch(as.character(code), "0" = "Absolute and relative convergence criteria were met", "1" = "Absolute convergence criterion was met, but relative criterion was not met", "2" = "iteration limit reached", "3" = "step factor reduced below minimum", "4" = "maximum number of consecutive Newton modifications reached") if(control$method != "Newton") mess <- NULL ### OPTION: get proper convergence message from optim, nlminb, ucminf etc. res <- c(res, alg.message=mess) ## } evd <- eigen(H, symmetric=TRUE, only.values=TRUE)$values negative <- sum(evd < -tol) if(negative) { res$code <- -2L res$messages <- gettextf(paste("Model failed to converge:", "degenerate Hessian with %d negative eigenvalues"), negative) return(res) } ## Add condition number to res: res$cond.H <- max(evd) / min(evd) ## Compute Newton step: ch <- try(chol(H), silent=TRUE) if(max.grad > control$gradTol) { res$code <- -1L res$messages <- gettextf("Model failed to converge with max|grad| = %g (tol = %g)", max.grad, control$gradTol) ## Compute var-cov: vcov <- try(chol2inv(ch), silent=TRUE) if(!inherits(vcov, "try-error")) res$vcov[] <- vcov return(res) } if(!is.null(Theta.ok) && !Theta.ok) { res$code <- -3L res$messages <- "not all thresholds are increasing: fit is invalid" ## Compute var-cov: vcov <- try(chol2inv(ch), silent=TRUE) if(!inherits(vcov, "try-error")) res$vcov[] <- vcov return(res) } zero <- sum(abs(evd) < tol) if(zero || inherits(ch, "try-error")) { res$code <- 1L res$messages <- "Hessian is numerically singular: parameters are not uniquely determined" return(res) } ### NOTE: Only do the following if 'ch <- try(chol(H), silent=TRUE)' ### actually succedded: step <- c(backsolve(ch, backsolve(ch, g, transpose=TRUE))) ## Compute var-cov: res$vcov[] <- chol2inv(ch) ### NOTE: we want res$vcov to be present in all of the situations ### below. if(max(abs(step)) > control$relTol) { res$code <- c(res$code, 1L) corDec <- as.integer(min(cor.dec(step))) res$messages <- c(res$messages, gettextf("some parameters may have only %d correct decimals", corDec)) } if(max(evd) * tol > 1) { res$code <- c(res$code, 2L) res$messages <- c(res$messages, paste("Model is nearly unidentifiable: ", "very large eigenvalue", "\n - Rescale variables?", sep="")) } if((min(evd) / max(evd)) < tol) { res$code <- c(res$code, 3L) if(!5L %in% res$code) { res$messages <- c(res$messages, paste("Model is nearly unidentifiable: ", "large eigenvalue ratio", "\n - Rescale variables?", sep="")) } } if(!length(res$code)) { res$code <- 0L res$messages <- "successful convergence" } res } cov.conv <- conv.check ### OPTION: let convergence() print convergence info from clm using ### print.conv.check print.conv.check <- function(x, action=c("warn", "silent", "stop", "message"), ...) { action <- match.arg(action) if(x$code == 0L || action == "silent") return(invisible()) Text <- paste("(", x$code[1L], ") ", x$messages[1L], sep="") if(!is.null(alg.text <- x$alg.message)) Text <- paste(Text, "\nIn addition:", alg.text) switch(action, "stop" = stop(Text, call.=FALSE), "warn" = warning(Text, call.=FALSE), "message" = message(Text)) } ordinal/R/clmm.ranef.R0000644000176200001440000000675314533321514014264 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Implementation of ranef and condVar methods for clmm objects to ## extract the conditional model of the random-effects and their ## conditional variances. ## fixef.clmm <- function(object, ...) coef(object, ...) ## object$coefficients ### NOTE: Should return a *named* vector # ranef <- function(object, ...) UseMethod("ranef") ## fixef <- function(object, ...) UseMethod("fixef") ranef.clmm <- function(object, condVar=FALSE, ...) ### This function... ### args... ### Returns.... { formatRanef <- function(relist, ST, gf.levels, assign, qi) { asgn <- split(seq_along(assign), assign) ## colnames of random effects: cn <- lapply(ST, colnames) cn <- lapply(asgn, function(ii) unlist(cn[ii])) ranefList <- lapply(seq_along(relist), function(i) { matrix(relist[[i]], ncol=qi[i]) }) ## Combine r.e. terms associated with the same grouping factors, ## set dimnames and coerce to data.frame: ranefList <- lapply(seq_along(asgn), function(i) { mat <- do.call(cbind, ranefList[ asgn[[i]] ]) dimnames(mat) <- list(gf.levels[[i]], cn[[i]]) as.data.frame(mat) }) ## list of r.e. by grouping factors: names(ranefList) <- names(gflevs) ranefList } ## which r.e. terms are associated with which grouping factors: asgn <- attributes(object$gfList)$assign ## names of levels of grouping factors: gflevs <- lapply(object$gfList, levels) ## random effects indicator factor: reind <- with(object$dims, factor(rep.int(seq_len(nretrms), nlev.re * qi))) ## list of random effects by r.e. term: relist <- split(object$ranef, reind) ranefList <- formatRanef(relist, object$ST, gflevs, asgn, object$dims$qi) if(condVar) { ### OPTION: Should we return matrices for vector-valued random effects ### as lmer does? ## Add conditional variances of the random effects: cond.var <- object$condVar if(NCOL(cond.var) > 1) cond.var <- diag(cond.var) cvlist <- split(cond.var, reind) cond.var <- formatRanef(cvlist, object$ST, gflevs, asgn, object$dims$qi) for(i in seq_along(ranefList)) attr(ranefList[[i]], "condVar") <- cond.var[[i]] } ranefList } condVar <- function(object, ...) UseMethod("condVar") condVar.clmm <- function(object, ...) lapply(ranef.clmm(object, condVar=TRUE), function(y) attr(y, "condVar")) ordinal/R/drop.coef.R0000644000176200001440000001506514533321514014115 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## Functions that can drop columns from rank-deficient design ## matrices. One is exported and others used internally. drop.coef <- function(X, silent = FALSE) ### works if ncol(X) >= 0 and nrow(X) >= 0 { ## test and match arguments: stopifnot(is.matrix(X)) silent <- as.logical(silent)[1] ## perform the qr-decomposition of X using LINPACK methods: qr.X <- qr(X, tol = 1e-7, LAPACK = FALSE) if(qr.X$rank == ncol(X)) return(X) ## return X if X has full column rank if(!silent) ## message the no. dropped columns: message(gettextf("design is column rank deficient so dropping %d coef", ncol(X) - qr.X$rank)) ## return the columns correponding to the first qr.x$rank pivot ## elements of X: newX <- X[, qr.X$pivot[1:qr.X$rank], drop = FALSE] ## did we succeed? stop-if-not: if(qr.X$rank != qr(newX)$rank) stop(gettextf("determination of full column rank design matrix failed"), call. = FALSE) return(newX) } drop.coef2 <- function(X, tol = 1e-7, silent = FALSE, test.ans = FALSE) ### works if ncol(X) >= 0 and nrow(X) >= 0 { ## test and match arguments: stopifnot(is.matrix(X)) silent <- as.logical(silent)[1] aliased <- rep.int(0, ncol(X)) ## perform the qr-decomposition of X using LINPACK methods: qr.X <- qr(X, tol = tol, LAPACK = FALSE) if(qr.X$rank == ncol(X)) { ## return X if X has full column rank attr(X, "aliased") <- aliased attr(X, "orig.colnames") <- colnames(X) return(X) } if(!silent) ## message the no. dropped columns: message(gettextf("design is column rank deficient so dropping %d coef", ncol(X) - qr.X$rank)) ## return the columns correponding to the first qr.x$rank pivot ## elements of X: newX <- X[, qr.X$pivot[1:qr.X$rank], drop = FALSE] sel <- qr.X$pivot[-(1:qr.X$rank)] aliased[sel] <- 1 attr(newX, "aliased") <- aliased attr(newX, "orig.colnames") <- colnames(X) ## Copy old attributes: attributes(newX)$contrasts <- attributes(X)$contrasts attr(newX, "assign") <- attr(X, "assign")[-sel] ## did we succeed? stop-if-not: if(test.ans && qr.X$rank != qr(newX)$rank) stop(gettextf("determination of full column rank design matrix failed"), call. = FALSE) return(newX) } drop.cols <- function(mf, silent = FALSE, drop.scale=TRUE) ### drop columns from X and possibly NOM and S to ensure full column ### rank. ### mf - list with X and possibly NOM and S design matrices. Includes ### alpha.names ### ### returns: updated version of mf. { nalpha <- length(mf$alpha.names) ## X is assumed to contain an intercept at this point: Xint <- match("(Intercept)", colnames(mf$X), nomatch = 0) if(Xint <= 0) { mf$X <- cbind("(Intercept)" = rep(1, nrow(mf$X)), mf$X) warning("an intercept is needed and assumed") } ## intercept in X is guaranteed. if(!is.null(mf[["NOM"]])){ ## store coef names: mf$coef.names <- list() mf$coef.names$alpha <- paste(rep(mf$alpha.names, ncol(mf$NOM)), ".", rep(colnames(mf$NOM), each=nalpha), sep="") mf$coef.names$beta <- colnames(mf$X)[-1] ## drop columns from NOM: mf$NOM <- drop.coef2(mf$NOM, silent=silent) ## drop columns from X: NOMX <- drop.coef2(cbind(mf$NOM, mf$X[,-1, drop=FALSE]), silent=silent) ## extract and store X: mf$X <- cbind("(Intercept)" = rep(1, nrow(mf$X)), NOMX[,-seq_len(ncol(mf$NOM)), drop=FALSE]) ## store alias information: mf$aliased <- list(alpha = rep(attr(mf$NOM, "aliased"), each=nalpha)) mf$aliased$beta <- attr(NOMX, "aliased")[-seq_len(ncol(mf$NOM))] if(drop.scale && !is.null(mf[["S"]])) { mf$coef.names$zeta <- colnames(mf$S)[-1] ## drop columns from S: NOMS <- drop.coef2(cbind(mf$NOM, mf$S[,-1, drop=FALSE]), silent=silent) ## extract and store S: mf$S <- cbind("(Intercept)" = rep(1, nrow(mf$S)), NOMS[,-seq_len(ncol(mf$NOM)), drop=FALSE]) mf$aliased$zeta <- attr(NOMS, "aliased")[-seq_len(ncol(mf$NOM))] } else if(!is.null(mf[["S"]])) { Sint <- match("(Intercept)", colnames(mf$S), nomatch = 0) if(Sint <= 0) { mf$S <- cbind("(Intercept)" = rep(1, nrow(mf$S)), mf$S) warning("an intercept is needed and assumed in 'scale'", call.=FALSE) } ## intercept in S is guaranteed. mf$coef.names$zeta <- colnames(mf$S)[-1] mf$S <- drop.coef2(mf$S, silent=silent) mf$aliased$zeta <- attr(mf$S, "aliased")[-1] } return(mf) } ## end !is.null(mf[["NOM"]]) ## drop columns from X assuming an intercept: mf$coef.names <- list(alpha = mf$alpha.names, beta = colnames(mf$X)[-1]) mf$X <- drop.coef2(mf$X, silent=silent) mf$aliased <- list(alpha = rep(0, nalpha), beta = attr(mf$X, "aliased")[-1]) ## drop columns from S if relevant: if(!is.null(mf[["S"]])) { Sint <- match("(Intercept)", colnames(mf$S), nomatch = 0) if(Sint <= 0) { mf$S <- cbind("(Intercept)" = rep(1, nrow(mf$S)), mf$S) warning("an intercept is needed and assumed in 'scale'", call.=FALSE) } ## intercept in S is guaranteed. mf$coef.names$zeta <- colnames(mf$S)[-1] mf$S <- drop.coef2(mf$S, silent=silent) mf$aliased$zeta <- attr(mf$S, "aliased")[-1] } return(mf) } ordinal/R/clm.fit.R0000644000176200001440000001607614533321514013575 0ustar liggesusers############################################################################# ## Copyright (c) 2010-2022 Rune Haubo Bojesen Christensen ## ## This file is part of the ordinal package for R (*ordinal*) ## ## *ordinal* is free software: you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation, either version 2 of the License, or ## (at your option) any later version. ## ## *ordinal* is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## A copy of the GNU General Public License is available at ## and/or ## . ############################################################################# ## This file contains: ## The function clm.fit() - an lm.fit or glm.fit equivalent for CLMs. clm.fit <- function(y, ...) { UseMethod("clm.fit") } clm.fit.factor <- function(y, X, S, N, weights = rep(1, nrow(X)), offset = rep(0, nrow(X)), S.offset = rep(0, nrow(X)), control = list(), start, doFit=TRUE, link = c("logit", "probit", "cloglog", "loglog", "cauchit", "Aranda-Ordaz", "log-gamma"), threshold = c("flexible", "symmetric", "symmetric2", "equidistant"), ...) ### This function basically does the same as clm, but without setting ### up the model matrices from formulae, and with minimal post ### processing after parameter estimation. { ## Initial argument matching and testing: threshold <- match.arg(threshold) link <- match.arg(link) control <- do.call(clm.control, control) if(missing(y)) stop("please specify y") if(missing(X)) X <- cbind("(Intercept)" = rep(1, length(y))) stopifnot(is.factor(y), is.matrix(X)) if(missing(weights) || is.null(weights)) weights <- rep(1, length(y)) if(missing(offset) || is.null(offset)) offset <- rep(0, length(y)) if(missing(S.offset) || is.null(S.offset)) S.offset <- rep(0, length(y)) stopifnot(length(y) == nrow(X) && length(y) == length(weights) && length(y) == length(offset) && length(y) == length(S.offset)) frames <- list(y=y, X=X) y[weights <= 0] <- NA y.levels <- levels(droplevels(y)) struct <- namedList(y, X, weights, offset, S.offset, y.levels, threshold, link, control, doFit) ## S and N are optional: if(!missing(S) && !is.null(S)) { struct$S <- S stopifnot(is.matrix(S), length(y) == nrow(S)) } if(!missing(N) && !is.null(N)) { struct$NOM <- N stopifnot(is.matrix(N), length(y) == nrow(N)) } clm.fit.default(struct) } clm.fit.default <- function(y, ...) ### y: design object with the following components: ... ### (tJac=NULL), (y.levels=NULL), threshold, (aliased=NULL), ### (start=NULL), link, control, weights, (coef.names=NULL), y, X, ### (S=NULL), (NOM=NULL), doFit=TRUE, S.offset=NULL { ## check args: stopifnot(is.list(y)) y <- c(y, list(...)) stopifnot(all( c("y", "X", "offset", "weights", "link", "threshold", "control", "doFit") %in% names(y) )) ## preprocess design objects if needed: if(is.null(y$y.levels)) y$y.levels <- levels(y$y) if(is.null(y$tJac)) { y <- c(y, makeThresholds(y$y.levels, y$threshold)) } if(is.null(y$aliased)) y <- drop.cols(y, silent=TRUE, drop.scale=FALSE) ## Make model environment: rho <- do.call(clm.newRho, y) setLinks(rho, y$link) start <- set.start(rho, start=y$start, get.start=is.null(y$start), threshold=y$threshold, link=y$link, frames=y) rho$par <- as.vector(start) ## remove attributes if(y$doFit == FALSE) return(rho) if(length(rho$lambda) > 0 && y$control$method != "nlminb") { message("Changing to 'nlminb' optimizer for flexible link function") y$control$method <- "nlminb" } ## Fit the model: fit <- if(length(rho$lambda) > 0) { clm_fit_flex(rho, control=y$control$ctrl) } else if(y$control$method == "Newton") { clm_fit_NR(rho, y$control) } else { clm_fit_optim(rho, y$control$method, y$control$ctrl) } ## Adjust iteration count: if(y$control$method == "Newton" && !is.null(start.iter <- attr(start, "start.iter"))) fit$niter <- fit$niter + start.iter ## Update coefficients, gradient, Hessian, edf, nobs, n, ## fitted.values, df.residual: fit <- clm.finalize(fit, y$weights, y$coef.names, y$aliased) fit$tJac <- format_tJac(y$tJac, y$y.levels, y$alpha.names) th.res <- formatTheta(fit$alpha, fit$tJac, y, y$control$sign.nominal) ## Check convergence: conv <- conv.check(fit, control=y$control, Theta.ok=th.res$Theta.ok, tol=y$control$tol) print.conv.check(conv, action=y$control$convergence) ## print convergence message th.res$Theta.ok <- NULL fit <- c(fit, conv[c("vcov", "cond.H")], th.res) fit$convergence <- conv[!names(conv) %in% c("vcov", "cond.H")] fit <- fit[sort(names(fit))] class(fit) <- "clm.fit" fit } clm.finalize <- function(fit, weights, coef.names, aliased) ### extracFromFit ### ### distinguishing between par and coef where the former does not ### contain aliased coefficients. { nalpha <- length(aliased$alpha) nbeta <- length(aliased$beta) nzeta <- length(aliased$zeta) nlambda <- length(fit$lambda) ncoef <- nalpha + nbeta + nzeta + nlambda ## including aliased coef npar <- sum(!unlist(aliased)) + nlambda ## excluding aliased coef stopifnot(length(fit$par) == npar) if(nlambda) aliased <- c(aliased, list(lambda = FALSE)) if(nlambda) coef.names <- c(coef.names, list(lambda="lambda")) fit <- within(fit, { coefficients <- rep(NA, ncoef) ## ensure correct order of alpha, beta and zeta: keep <- match(c("alpha", "beta", "zeta", "lambda"), names(aliased), nomatch=0) aliased <- lapply(aliased[keep], as.logical) for(i in names(aliased)) names(aliased[[i]]) <- coef.names[keep][[i]] names(coefficients) <- unlist(coef.names[keep]) par.names <- names(coefficients)[!unlist(aliased)] coefficients[!unlist(aliased)] <- par alpha <- coefficients[1:nalpha] if(nbeta) beta <- coefficients[nalpha + 1:nbeta] if(nzeta) zeta <- coefficients[nalpha + nbeta + 1:nzeta] names(gradient) <- par.names dimnames(Hessian) <- list(par.names, par.names) edf <- npar ## estimated degrees of freedom nobs <- sum(weights) n <- length(weights) fitted.values <- fitted df.residual = nobs - edf ## keep <- i <- fitted <- par.names <- par <- coef.names <- NULL }) notkeep <- c("keep", "i", "fitted", "par.names", "par", "coef.names") fit[!names(fit) %in% notkeep] } ordinal/MD50000644000176200001440000001512014533336352012220 0ustar liggesusers3ead017746ee142eeaa82137cefe4c29 *DESCRIPTION 9023f060a8aa0d9f16566887e1f561e7 *LICENCE.note a2437fe61640a0e31060e6a16a28d50e *NAMESPACE 2a8d4f3e4ad336e60b5d11b95b8d4b7a *NEWS 869e905a54815e650868264a2bee1264 *R/AO.R b5172dd6e7b5f7f23e133cd279cbb15b *R/clm.R 8f0c1d0af7f7f68728acced4e978c364 *R/clm.Thetamat.R b1b280d156278f991729d0e9d39d70ff *R/clm.anova.R b90d5a1c5abb244750e275a4d0ffec92 *R/clm.fit.R cb4448c0e954f202b681a4665f9b1c38 *R/clm.fitter.R 70fc4552bed2aa792527407004c537d5 *R/clm.frames.R 4e2e7da3c00311fdc12b7d4e84d76917 *R/clm.methods.R c38157d19765d99728b11d20dba4eb3c *R/clm.nominal_test.R 28268872112a7ef9d93f81399e7d8d07 *R/clm.predict.R fb19cbdce82cbdfc755dc7df56a78b7a *R/clm.profile.R f8e467dda373e544c79f853bcccf5db8 *R/clm.simple.R 27531994978ef842f9225558d98f5e8a *R/clm.slice.R c2d95a71c7b8f8a2bfbadd9655dbfdba *R/clm.slice2D.R 3dbf13015e277e7e1c5442491a8899fe *R/clm.start.R c86d631d774ae572238027cb380e7e8d *R/clm2.R bbfecf069481a5a27609b7102f6a7cb8 *R/clmm.R a4cfd611edfc0ef2ec7c84557d20bd09 *R/clmm.formula.R a242d8385d958d9ad27c84fccd868ff7 *R/clmm.methods.R d35ded8d8e8111cbc8a4e4a6debe76c0 *R/clmm.ranef.R 63ca18ecb9e0d7ffb8eed7bb247737ad *R/clmm.ssr.R ef3d4639d726be5e417a7558e431ce53 *R/clmm.start.R 10d62d2cb10d2269d7e3669db3f757dd *R/clmm2.R 5559c014ae600a2f031e897edbbda6a2 *R/clmm2.utils.R 6cfb772181a1ee478b22fdc694c6f0ec *R/contrast_utils.R b52732b2ab5c0113c320df484faec3f9 *R/control.R 06b4ec45624a187e02c9b127239f4156 *R/convergence.R fa85118c3a5612847e6e2e8a62055191 *R/derivatives.R 7d6871b41f4736f496326c81873124a8 *R/drop.coef.R 8d4f671d30b5f7c22395b50297aeadcf *R/gdist.R 651a98b5b14214833f69063bdacaaa62 *R/gumbel.R ffd316f2d6501837566bd3c182213dd9 *R/lgamma.R 274843603afe2a4e57d9117dbcf9f73b *R/terms_utils.R 7dd6b54f58ac7846a3afb3006f55e6a5 *R/utils.R e464fea09b4a6386417c6e5f382bd529 *R/warning_functions.R f03be3092abad988ab2989b17023b588 *build/vignette.rds 63eb28b797efedb31ad1f5db75ddfa0b *data/income.rda de5d075c95248681323ce9e571e7d0a2 *data/soup.rda 50b83b93508e5c5a9085d5204cb9ff09 *data/wine.rda b7f57dc62ba4df06bcfc94a8da23edb7 *inst/CITATION ae3b9374f095160fe80630038bbaf34e *inst/doc/clm_article.R 26c233139e172037013c4e8f138118eb *inst/doc/clm_article.Rnw 25353b05a3fcebba49eb33bf5fe63949 *inst/doc/clm_article.pdf 64aaa511c3b4dc72b6c004106553bf97 *inst/doc/clmm2_tutorial.R ddde6e9af6699c2d9c11002d12e3049e *inst/doc/clmm2_tutorial.Rnw 8f0e52c67159cc512ff8cf5e475209d3 *inst/doc/clmm2_tutorial.pdf bd8055e0c9cc16fdf95f9caebc942c53 *man/VarCorr.Rd 4d295ce70b37b9409b0b74eb6ae7d0b2 *man/addtermOld.Rd fbbfa8a549ecc09ad66aa0e00d1f6d1b *man/anovaOld.Rd 14a35aa475723573c41e11dc049ae5f7 *man/clm.Rd eef6dbcf3a110b0221075d694991a70d *man/clm.anova.Rd 2c2b6be598033c141f2639fecbc25959 *man/clm.control.Rd aeedb960b5d0cd91648b2f43261c0010 *man/clm.controlOld.Rd 6b5c14c006fbfe9634616757bf287b9f *man/clm.fit.Rd 02cc7944a1cc6a14870e5c1ba33bd0ae *man/clmOld.Rd 5529ca85369ae94713f65470336e6e20 *man/clmm.Rd 861db001a071e534dc5021120bb62124 *man/clmm.control.Rd 3095fd227ece61f6b6a099d5de003313 *man/clmm.controlOld.Rd d8b5a448143a0da1495756ea2c48ab44 *man/clmmOld.Rd a03c75733972348ddd5f505dc472c26b *man/confint.clm.Rd 50c7e6ec194e8af3bfccca5e3e4e61fb *man/confint.clmmOld.Rd 3d881bc96a9fd9a56c4cd1451c708a7f *man/confintOld.Rd 8e1dcaa797916a35a9de4f1413dee029 *man/convergence.clm.Rd cb5e6dd9111063de64f3643568674fc4 *man/dropCoef.Rd 650996c7b48d859ae5e5ac751dfeaca2 *man/gfun.Rd 4d87ff9fa6c1729a7ad52be5c3f7a16f *man/gumbel.Rd 7f719c8b1d0ede27f15c1fa9cd3aedea *man/income.Rd 5ebc7da192ca06d173b2694853352e9e *man/lgamma.Rd 51b4cdc005b1c26b5d23d04197965c8f *man/nominal.test.Rd d8c875d10d4669c6b8fe2d4d65860b6b *man/ordinal-package.Rd 1f050e8e469290a5c6c9c07e3ae08a29 *man/predict.Rd 731499033e04d0f739cad2d0ad13b9c1 *man/predictOld.Rd 37b2ed10c518b0e95c9b64e211929bab *man/ranef.Rd ffeacc4ef5eb2b97d794c89afdb5c59c *man/slice.clm.Rd 2c66bfbfde8422891b1ca4359c858dc6 *man/soup.Rd 41562a0c8389e5fe01af70802a3e995f *man/updateOld.Rd 079335e2cb6d006b7040c330f4aabd59 *man/wine.Rd a4c8272ba330e14dfb8fff85c1184bca *src/get_fitted.c b774245a99c4226f32d467e15d9c4cf3 *src/init.c 5b028613e6e3b236f905adee877601d8 *src/links.c 7920d50fb3efca7a9ec8a37fab7ccd94 *src/links.h 4ebce05763db1904a275166d250410f0 *src/utilityFuns.c 735d1e1f085ffaaacf1a7628930adc64 *tests/anova.R 89bb425a86eefa6d518534ba2bebffe9 *tests/clm.fit.R 7cf9e5abc7360d67304ac97cb1f4bbad *tests/clm.formula.R ae0d8a60e17d3ebb5a6863f5f1d13dd7 *tests/clmm.R 9ed01ea5d1feb4f302de5a957e195a3b *tests/clmm.control.R cadfa40f297ae2ad3013b99470d73116 *tests/clmm.formula.R f72fcd80cfeff92cb86b987d4a829c9d *tests/clmm.methods.R bb53b627bd127be25140ca9b18cd7570 *tests/confint.R d8267669e5b9c3ab305c6036f1c8d623 *tests/nominal.test.R c0a7ea9adb79f1a72f794d68c0b2a8e3 *tests/ranef.loading.R d1a9b3c673dfe17f1579cb8527af60d3 *tests/test-all.R 16a2a63ab5214f0a105692aedc0c8fc6 *tests/test.clm.Theta.R d4e39c9cbf18fb828ee6abde86063820 *tests/test.clm.convergence.R bbb3efe198444977cfe15200a2f73aa2 *tests/test.clm.flex.link.R 972645dadf3c58dda8dfba40191406f0 *tests/test.clm.model.matrix.R 65465ddc9177b9ef5e0c1eb7ed83bb39 *tests/test.clm.predict.R b237b3a6173025bf72810002e9e0b195 *tests/test.clm.profile.R e91f920a51deaa7c84a7d75f9220486a *tests/test.clm.single.anova.R b666a698afa2ebefbc12b52358969a05 *tests/test.general.R 6faea911a5b2575b5acb57087c697201 *tests/test.makeThresholds.R 434ae1cd4de96ac0d2ac54db684ac7d5 *tests/test.sign.R 0452a68e5a919248553360c9622eb926 *tests/test0weights.R dd771457105e82780b335f6ced52e736 *tests/testAnova.clm2.R ca67691eee70bdd41b3ae5c71f5b61e6 *tests/testCLM.R d1d0e84d5901ddf008e6cbb22e2ce003 *tests/testthat/test-clm-formula.R 5a26fb2b90f90bd574da4ed0758abfe4 *tests/testthat/test-clm-predict.R ff1d040fe6da8b6ffe3e26b2546aa27d *tests/testthat/test-clm-profile.R bfa792df07f282e896fb427ed763abb4 *tests/testthat/test-clm.R 0263b906dbd4420343b72f0b9316ea73 *tests/testthat/test-clmm-checkRanef.R a9c6572a4ca505408b5ee7204021b905 *tests/testthat/test-contrasts.R 49fd8f2e430e2be16217acbc1008a209 *tests/testthat/test-misc.R 234b3c903ae3e070034dff21ff97de82 *tests/testthat/test-utils.R 26c233139e172037013c4e8f138118eb *vignettes/clm_article.Rnw 93f4376771c255464971866bb5220156 *vignettes/clm_article_refs.bib ddde6e9af6699c2d9c11002d12e3049e *vignettes/clmm2_tutorial.Rnw b79878774fe08d5c9e41784df0e084fb *vignettes/ordinal.bib 611a9529149925e573c10c3e44e35f65 *vignettes/static_figs/fig-fig2.pdf bfd5f3a90bf5e07ea91dbe7989988e73 *vignettes/static_figs/fig-figEqui.pdf 4ef0c7b6ada9dbe4f654e78f23c09f00 *vignettes/static_figs/fig-figFlex.pdf 3c8fe3fcbc9df098a95ded136c94b2c5 *vignettes/static_figs/fig-figNom2.pdf 606db1c50178c05344e9f26b31c45375 *vignettes/static_figs/fig-figSca.pdf ordinal/inst/0000755000176200001440000000000014533322576012671 5ustar liggesusersordinal/inst/doc/0000755000176200001440000000000014533322572013432 5ustar liggesusersordinal/inst/doc/clm_article.pdf0000644000176200001440000135634714533322575016431 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 5348 /Filter /FlateDecode /N 99 /First 843 >> stream x\is7~EfطI^Irkd'qJdKCe~;@I٤By?{-g Ӧ?A3kg-B)x1;89lQŝ%S'-{/i=vPW?EnW]6IK _h<|+4J2e.CKrwcHGhtLP"W2]W*U:יH+Ig*&北y/s$zy2:7BN@䠃dҴ4\A3u4N  =&2QߨiMc>g|T/Z6jvMbnA={M߷ 7 .`p:k3{\\44WQ#E<˞S/Llo.eAz8i281)4cRUB|MDQőXJ|fJ^;ؒ4$D\PݘZ@]4YBgI *0L'*YgdeaD$*&Q1zp~P=WѦuK(D2z§F>dsMmJ!Q JHTg}c`nc'O{ ^>*^ֽRX!GOΚq5[3ŋ^R4܃D!RёWm4RK ^DRaDO(ס9 "]#!@G\/E36Pۨ9 GWCBDeQ'Qc|yC*+FvGzx1PvN)'9{^؏bYY9`6d k%kٜeᅧ0Vu$gvoT]PxtGQ&1[yo80.}ZO!baxry2<_P^hV xOVtmZ O)t!hzvF \R!8_&QϫN,TT<^A@!gT^+4Ucݎ?MTxfA@B bŀ\ {쁎P1CeBG^/UUͪe=U>)By۝OZhpڛG{kF2Rtu՛8'q X=I?(i!B5pԎz%f&'I ]و\K⑱xIt6}v9؟9K8iںz8wD")ifj߬~ f,вv0kG}}@zZφ3hw=]O+Flb?<݅ q`tGtvWnĮz{Ͽ]W=\z":={+KGnSvyuEHc\m8 ecif=uK^ʎH(lYВp X(e梘u_]1,yRQbnbnX1gG%|]bacгN4&LvVKO `;!}Ent̫OA_7Nӓtt/m2@T"ؚ\-eib'C=~QY Gvr(+x_5I27 h~^^o(@yM J+L~IM%S&[2-le:"k%sW%`J'd4_T:9̆ Ku`ͦ:XQX-U$'כls}mYy|k$?%Ps1U䢘aH'+X\CS;)GƵڈXLvw 5c+dثR |݃'ͤ>)K(ImJ|n+zv)nʷ6~j}A3R+רS)c6 )Hg諚~4b0(\ڍ.Њ"V6cTf#,Q]3SEP)vwi1)$?cM{uq= kU P24@(֖.\3 5 9YRyw,QXIx$8OEYk QÜZL$myRׄzo@ȹ5\mٝ3FԶ(O-6as&jcܵi|C_7s/&vOuMNZQצꢠٛy >>;^^MfQgO˳iCL^.Q_Zhd)NaPp8AᛚI8PRöp/K-?̓3]>ef~Y"\\WvGsYF-`kx`Kk0.Mf /fZe"PҗRxI:eu]ZXWJw\KŁb/I؆JVLJtoa-ӇO{nZRjL>V@ɫrbGY΋_G{=E1"Z^.7>ӽ-t hDFdk])ڛ]P  "IAc;y! QC Er0 F2#;Ւ0@h*L3}=l~;/sf3&=~=E=EOvok F]-mL (iWc$H OI*[В;4դnUڞ"}KSP:\i/mZ-eoS¡DT et;D(x*i!ETm% ǥv `öFo+NBr!p;n.ˆ"U8 >*a(@(p9M#C} V΄^Ƈ%En>N:BمR[y$qOWĆVX( ZTҌq \AֈE[ J>jr44~h`KOe'PB.Xքj%nu0+%{ ʁ˥)&#KtӨi?vMdz*WBw&8812u60LэP{^}Fp$@ vpa)D9m?E=hFx/%M)o!iPIkdp%l:[CqI~9۝ӭ ʟCe:DZȘfaѷGRu5K eE)vMA'}&,ꊬrOٛ &yYQvrrUL'a䎒yF_MTE񍂦=]@VZxI>eҝ"oQǹ-= %`]\]:N'(._ϰC˝YJgC 6"="M#{d) mS>&siNa%A3<  ӦC)!B4XgE4vw |g[#(J8&Ui6\&?ZPLWz~A28u)C[J /rV~ӛIzu(/hz=*Ziy5O= BMé W 凖/YM-h4}fi?koµqe9V U{Nq) j?W+Pt LqnmR,v]vy&hSVEvg__oHQrEEoQC_AaMQ}_XbDii:M3>ck|hKʨ;'P]2J7gxыDuT  YǂGHyw$h/{Ka"%sPpt}hY{汃4]Izz=k~H&K %-!,[MI{9A~/$eYywx~^O k>#7S SOވ(n"oL:?o߈-@..DAEhH JeWCow>ֻ-v> p&dendstream endobj 101 0 obj << /Subtype /XML /Type /Metadata /Length 1713 >> stream GPL Ghostscript 9.19 ordinal, cumulative link models, proportional odds, scale effects, R 2023-12-04T11:10:02+01:00 2023-12-04T11:10:02+01:00 LaTeX with hyperref Cumulative Link Models for Ordinal Regression with the R Package ordinalRune Haubo B Christensen endstream endobj 102 0 obj << /Type /ObjStm /Length 4369 /Filter /FlateDecode /N 99 /First 928 >> stream x\ks7?V7P55U;vlGv[@K-!Ğ_|uSJ*7ppq}d) Uܺ"/3bH;OE# UEU(epԅQc[(%v\/Thj`$_HUhdm@JktCRFz o178<*U,xJWh^jYa(ZVq\ZV;M [lI+i$i '9>^cH $oX4R"hH'Iy9Q-fq!K#}U_%'QqίWl}{͞b)9$*$H/" ʜ$q_[!LE$]ڂڭ[:oMڼuy6gr{mOI>oC޶)S2o~/dYOʶo>kFOApJ@ 79('bz=?G峷ѲF-$̧o%o<-Ļ2E:L/2\r^.ezL/s{>s{ .5].!;q<;9X}86br lF:=.^=ߗNWT=tzL.h{kѼ茶ԋy3[NiENw囸Jo"ٰZʾe6mA<!'@hC5Ա86I` e<|@}P{AZ2c48ZW; E$eqJ~r.f?ew9 7^<Q</+ZC'x'F8t߫8H sqοF?jq~R\~]шOb,DLI-bߙfz&b!o|K׵X9,ı|ǵF<.*:IdHTHW6KB:ztUeѸ9~r1x,`Čq%Y3Mg T4@b( ;Ē|%eyW`сRJ ({40vA-+F1ChQJLL? K.W(,e.@I՟J;tUց" k胂2*#(gJ1@U5*8b[e<]Z A#l.DiV1Vz–vv7 40tPeq|PΖ|h&Xʥ=SU(=\KmZʪ^GVB<(Os uwA 8ORe)e@MOM+PX+FID]HVĐԚ rt)cJ C >P:۪5(K~@YIƬaQ /-tU)\oJ}?XB?L16XT4U 꺒<\&ˠ; Jl,H_,]8`l6oZA X"b1)m `ɖ9*+ ,Ls|HޞI M8f!XX`ZTWk K$_?%dGA6*ٝaE@ v*tx0f_ ćnNcT>4Dt_<&ΔS))$v ~ *ۻO^CE9JYWVxt>+P%jwjHC'1ہƒ Z C-<|u$0s)]{4P@yޅ5MjcN@ )+lwr+L+6@L# VzD 8v+H0ShQuH^cB.t \eadb&-pZ^gOA[ɰ.ͰJ`%l0y`2T1\ +D|[O}k(:KCm6]M;OMJZZZjWFRhjr*?vom6?Ǿs'o6mi/3'įdhߟ<}5Z^}jYݱRP^wAua;x8k .몔4S~z6Um\lr}-+(w~=|U׽lyATo`N&gZp[Ie Å3F^iy$&72zmhiuhqygRx.^x+~ Yr$>$dMNֳE40ϟ q1ًy=Z֔Oh66b!-kጂlFìY]GK qN,~_ FD+ơbno $ɇ_n@!'f}bc]vy`d{NMvjWֺԶIvtkV|VMNj{g%ၳ̡~0U~!̡k?5^jI%&ݑ[.|FZ{Χ׮PE|o8mޕCI3[2Wٝo{g-XFNNj$|/ ofd[73w_dUnvV{Yjl5辇ufs+?8\|N H{B.4m#伙4/)^ڞj& 1/Ff:i 4"<Ys[U6(xԀl m{E/'/ݍ8)7 3$t>)[sZ0f79j̧ORM6%i,v7 XZ93sm$ۊ>A^Y:f-.^N0oXP 3O?ۢ2d U|[4|Af2w-S&"^ >f+ (f ԄU\6r:zq6o ٫ Xf\opv;(73̷V`ewӿX#BwҿPG5T:2+Y}fjɷjMk*{dký.0 Uz.g P+)K T.w`Bk#4MiqKe/M͠,׀Un;UH cXn?ia:NRiWDtq[ ~:dp 4t_-1z;>RPi5(v ら [-ӛ bӰl)ޙEgѧΗϋPl)hC'uײ~$Z0>ǚ,9\xS`ޗR6jk (Gذ CX4ɒֆ0>endstream endobj 202 0 obj << /Type /ObjStm /Length 3711 /Filter /FlateDecode /N 99 /First 933 >> stream x\r7}߯cR[~ʦRu;qIq$I)RKRY{~Oc0$QeKE0N_zf)aWˌcNR3IAdRYܑI I ਠ4)эLUQgk4"ӂ`-DA2UDA1m z5L1cό@L bЅh$Z2k-s"b, 0g"`h˜# E B!0=GGF #F7DQ,$3cHydRZmc$10ƀH=}F[ (ܗF *Y*VKBBb70 $1Lw_֦SzUSϴzu&צz1ey{0ybv hk7ۭ'6:/^(&ݮױ[ `8-HDE֕%u$E_FpKޭήSjz\13n,b" ᚡUr2PcJ2($ǸHioyD@STf NF)Z1prr%ωGZy41D@6枢\ aJ20$0!bD 5i0ES[aBJ k+ U Ի +lK(YfaIyaFmؼKn{/p}16Q^I&Ѵ qY~lu09oUMrrXwPתr1sZ^WTݼx ^xu:Y^MGt2M rDQ쏒 ӴlGH>P҉])iKCIeÖ7_x͛y̓Oӥt<Йb`C35jљNOumk- Zi]!q`0/O]uBA[JDҚD~?rDcfm;8{ ̮}عQњ: +uT vm)(v1d_f/G3Uõ?ѝ?zhl'n_v)0jYiJiS{k=[zAVO0&[7ꡗ-=ex]@/k&8;妷Ln.(>ޠm-~f\# m=O6z|.mkjM]Om~延m蔸jHyMjz*g[VUݸXE1-޸FgVhMҚ;D;tqT3r떊_ЋzSN5uͦԚSZM])f(nI^x#)2ItD'wG f8no6ytsg5xz'nS:B鸟SnNĭ5Pd@0}IJVUɵ n}-X([D+J'-+gP6CFUZT gDg)&x8𜣥O$%<'Kʤ\o4hL9d]W%pݱNS8}MѴdF>[kPuf@=Xzzi٧/ ^ݞo]ˀHiKCNK7mj1et)٘Yx ̤l? ݧcP{ 6,}eyKWrQ諬_SB:C:?ZK*{H;Q>^3anGS&dGQ^p+28Y'uKY/k4弶ZY'EYکLYiʚɲ U\y3sԬ:٫?g)XRor\?Xcendstream endobj 302 0 obj << /Type /ObjStm /Length 3041 /Filter /FlateDecode /N 99 /First 914 >> stream x[mo8~(_mkC?8uV{lvR( TD ̐Ifeh\2KtXtYĔ x$SZK2>rΡ` LEEuSICL@Z{"O+f T3Z1ht0 !5hH0Z4h\'1,`ɬOh(fEŒah2E!R1$*b4d"Y[&1o"BwhXż'=@Z|FcA& -IO" zaNuH5N ArEX5ڏ,9.hsȢs$0H?^@cT5KRR@ IEt;{< ,Yt6qp ܎TREp&f@ger4@C&]LC`L%!fwͥ!Ҳ܂B{D~kEO!r(cý0C#OEMli0/ȋo޿pWON`[ %&O'l^]&[0n,?/u%&BP?GMJ\?zFSx>$btyܢ92߿dO<.lU艢FAR>Vb!?=Gçqoī|u!~ÓӲ7v[ lk:{B^b L\>O7}B޽a57Mlʈ|uSFxjۭnaxi Aq#*r,7rXB|ȥKP 5C@>K[Li* Equ 0(+&P:pN|QNvA=@-4nt!f1M 1S#nsT5zSHtMv.>ϷӲ8?y磯լ(DT܊ f|8K|ٴ/R&RG NAȯ=%~7`r5`&'ꞸޚsZ4yk=jɐԈuk *ThFi >0IA}P$%,Fk/޾;>~oKQ[2Ye]K$mR0v$CdMLuhEbF"}mJ_ȹNbWD=ReWmٴd%]{FtvADDg`hڒ6Gw8hmؐZ7ջu *F[{s_0{9lm~>;_ǎ/ʕy!Iږ1;_O5<<ӏv|ǭ$62,H/}N##ŗq׼T=ZAvC]ou`:p礲Q.RNN;RgV/ eaɚbpF`r3o Y[:(I;or쐤á |juP&Yњ]>si!N\Gj&5ǤKnnu6]^.MHrhKnѨ|׵-Nr}c8m ͳr75d"ʱmB > A,[8E-wTGJ*-Xb$3 Hij SD6wr[tTpqAi W22`2> GZ۰Ga!{It1A!ęPPޅn05)Y8KHRH_!ŝ%>Ї[<^Sڱ:)q6޻>peN+Ag[Ϙ!*] k8ݝ@e)؉œ'XtH(5Zz9xpj}"lƊ+K~*[d3\Tm4QVmDN;X,L[*J#I;GpR}03lE}&ѪR`zIMN|fQ:| v{Ȱ#9كu533Ԓ7мy"LEo>r.b⢀)"pڜU]YTG5t>.oL`3Q^QqcSqendstream endobj 402 0 obj << /Type /ObjStm /Length 3159 /Filter /FlateDecode /N 95 /First 858 >> stream xZ]s6}_voq63qӯ䁑hIԐT﹔HXy܃{TT1II)똱Y)stsSap /qJ\3%m2<^{<H@Hp&J&M\PL1zEm7t`Gtt3)%NHO@RKz/Sp5(R'QO,aDJN5n+t VoF!J3m-ޡ ӞL(LjaGd>)噑YhNXKXbYGk?0ɨrrv bCUZCQFDQ:x^ͬ1q8@q NIWҐDNxYz8 }gIДߧ yI>y GxCCg(8ɼ'`3/ \,OByt789G="Y!H)C\S6$AސC_|ξ؄A[41pr=Cӎˬʗ WE3Wu7X+_oZ0l8)x ,լXfsʦojiHჇ|Đb=w9˷lQy=:1vK 71:7Dםmt^$je(YQ7Ufݒ-g-ɼxϋ۲rJc/b&^؋=[/v~-H(NbH{{Nl:U#Ҥ $?j&˚̑RDž=Iqc8:bg 5zUE%P#&Ba< f]夷*o#߫E`*WeEI9խiPЅ:6{#R%2c(l(~! E< a H0H<_ze\`?Ma˶?.`QPaȍXt;7}9ܗ]?uYF b/p?r*Ox:.Bd"I7[u~+cߡ@6p E-`W8FG\8KL7j,tg9*X:\X8,H ϊ}Ph/[6/FuiX FDV{d41'TD ~nDghm'GC&FQXa$&U=px%\(GmhaꑣF>9j9W}wL|B*{Qf"QTHz]sQ;-DXh C;HPE0%?.u5ME7#hEcFI-`~Zϋ8QjJ<^*-&X6lwo|,!Ҝʜ~__]"{Z~@{N`)Tٴu-Fɫۚmƺ#*W9u(| 8{QI4"k(CG,t X ێq1ʞzvc(;(V2z4YJLA'c5$pJhc/+fhY\(Ew1,I*lEFOD Rjȝ#lC!e)8Un?1[hiDrySG'RI\u0HV]..@SL'z]7][ir n^=7ջ^&~{3\@ʊ%Z*+0%A1T<VJ=7O: Cy9U rIim~SV(n:T ȹ;TwejWBS\) (\߈1m%۞IAeb1y Z8$_$4JQZK;;&AF^&ڵ< tE-oVZV: 5`i Jq$@( ؤ]ޡ-(MnFmr(L.m8{_ʖ5OhTGaI09@Ò0i&Hsh}GTF0S,ս|(;)%C'@Aert+P;-[ΎYQw㞵=Ãn~ wqZPzĊ>[@e(M$FBp^i< 7D%Ni訚MtǨ.gHDEb`_4/գ;$Zqv$!.{~$C6%gF6k>d:Pk4P]($$wYxtdCƩ#"MRAڳAޡTzyr,hVZEGZ( m@ouuj8ng eE)ޓ)id;!:HF=f9y7o?΍!eCGT&"S}*݋=Jt?XPh?TJ;*Ŭ%O݉ܶbgN 6ҴXdż)yS~S~wCi~pW|sd1 YMʥSX۪3|I.q1SP RrׁVKޭ篯~WWLFoկa:tno֎߬i6뺬bQ̳jD-ǩh)gQIлոn3bqqlN2O]A{dڷ_?%Y~K\eGendstream endobj 498 0 obj << /Filter /FlateDecode /Length 4545 >> stream x[Ks/dSSJHN$lǬsAk{lƒx_@ %;4~| {l_ue^ѷ%fjΔ\~Sƙڪn.?T[qz::a]u +[}>܇7ﶻnJa7GI[} P_a?@Vm3L%T>p֌6★-^NxƻbD`-%ekC~ E =6R6Mt!ujD ~H`DŽ45yB!YpDhmOtM#4 }!b~׷4y5<7 T>x |j`ZIظZk)Taնr;Ъ;4qC^I Pb?(>~hG\aRT/a.g9mlFJi a{r;ijٸb݀ǓK 7+V}Pʆ1@n V\j#7 zfjNmW+1Mm G~01XMҵ DakiL8w/ @~hN:G?Ǹf:> 3ޏoCsT{PWaT *%$jyBoNJWvs^G9=/y2BɌiWkJ+KD+dK-L1Qx$|a:ORF!=,Ci}\:ͻ X4rMo$;D%$\s>7@rȖ_0YQ',\'XZV35sFABo@T9mѪ=u(1Cu١h"g}&JPd#lFvN lnLn؆!4GQt0{HT+&u8Ȍdk %eǘ~ӗk:sG u%;dd˅LrV f6MSGNZH,j~Ie,l(G'=NP9&g/sYqԇo&1mmcz'.d>,МXi V%#g>㞥'%Į2a?WCaM|hAɅ탶!+‹!5A^/|O~7i>&PI0[P5 78Fj3ŌT/aA=M@}fv_0'skR$zKU%yf6r3=Y^^?8Fn9=QHJ1hzwMT8rB^MV$[f2f#@DS8te8{M {3$J@Cb;}V&*4v YIt"$h"E|6pĴZ12'ڮ듀,x= OGJ6.`B_e*HZ6m9~1z$k(Z@ԱqÁtx:Q`8#fr [ݫ?D](@0Soq拌?GMLk/6, F*0%D /*Ͱ@tU鬐DcЬ6VcA%"3S{#t85z,~*ʔcRMpjai\?=tȎ:6ɱ| r<1o]GFutf~>byvKt(i>O]J/6$_v*=X(U}3*~r*/Bƫjn5"O7^6] |m5]w8믟 8]g;j*n~skY<"XH;M07b3,<]]2cZ׼%lY{B%8ݞԿ9cqT֦.DZ/y+Kr eA}KcAd?&V΄)fyE6ΆmJ l:+t+ҕ ogFb1`a o<;ͣ9U|Y;?;ehgh  uT gi cP."AF ݇ n!h=h^ρ!:ԡC f]X6(8mEE/$[rB{Sn{>Om{uY]AwBiA$;^\ HWSSs@Zh($Yt\ˇO/plS35pZܷa (BG֨Sv IФ<,MGEi.Leaz-0-4ߡa|KԀޞGNȡ^"'ϮSo$1atYD"oN"|axR&[-4}P.] <5$5w'pw .R(i~=oYī9$/8d IM p';9}LG3ֳm`=x{Xݸ^Iu8p VpPK&5, %MRֲK# [dV]5`VI}ܚcZN۸4rX8a8Qrg^PlirF!X ^"JsԅYh=rҐu29f-yBTO"_!x5fχqi&MRPwx{K.9ٓQ8Q F8_'j/oM7?7ƨ@PYx^ڃH[ySyA11- Eٛ yփ.NP_>zr1 cTO>Jh  ҳ 73;)z Ҏt,d|]*Dg-cO3-$]([" kNù]y!~CH]Xb=jK k˰cfKųUI55)TFFն~_G_a OlA`/a-$cT6?KX;zU9s^endstream endobj 499 0 obj << /Filter /FlateDecode /Length 5826 >> stream x\Ksɑ/xMsCzww<K\ol%RXACʿ磪Q]A11!g_>hjugUóozy [M5U\pZ?ߡ2^4n \]?zgjj}ֺ*v_._n.Ƙz˚}tuz\n <ZZ .wΫ .?W{W7^7]Ң?eiWhڦldU{ly}${0*zXWs?omu饿8=۽p쫭6o%?m:S74s[[;CupB~|]Wx<8&'.>hB!됷umAYq~%Vqwz9\L>v­JiIV}j&lӇLz6I$;vճ96cC 6)hRΣ$5AQDy\6щעQa;rh^,e#~IRpP]䭆)5ߘ4"NĶϕ@N5>{!͚ճ>cvۇ4puBzq+%i-FKxdgEit\ms8h'j+fӵQP=Έ}Gy/g죟@PҼ6i`ŝz֠R2Nœ;M0vĹoU`E=RRq'e}.ﲉN,R.?dp kT2܂ͷGs.6n3M!rh,3*v-@L#Jqg'0D-Nw4*`HSA^ZM Aΰb"qi r>Ү@juv0)TD5$R;`yWٓ$ STs(w._/4?ag6a6F 7%+ҺO|HO#];x vܝ?:.7b_kTXKqu/4Hf)Csf$C-8*wˣ葘Ώ"en <܁;8N:~]d\}K]ڴnTw'>i _;P G @STf ]tCX&imq|'>q=uބ:ǺZCQ^ی $|R3_յLMW+!~%o3ku%֚ lzOrm9Xc5}T]-8Jt4@]`c+09lvq$t[{H)B8pۀ׻o p)Np!K@~C+6_9ykE&׽OyAl?(plxpv%c?xB[`f#8٫r؈ IN4 A ?sЦ\)+g0؛3 y&CIh$4so3s-dUdsh&kBAԍ/qy~q4[XMPjfpdGN> n-rNRx-Q_@y=刁嬡ГZjO[\DK.C3%l6-4CRfnqJ?fKcMNW|.Shx}bewgkV[/\PcP(wvp6՟WZ0ҹKJ { ]8Zm0r%hK{,Gƙ)!vDnO8p!n4hTXZ:MD6F#qUyp8Ձ7` >@4L2|B IM3|HPNBamZ! 죆䜀}hGVoo~yghl <{+$*7YE!]h`Ӈy G"Gi|LtSARc>59' y=OwcJ 46f7xlIXP@Ke P\`\ I{)!;Lv{ gńY_I&#IS A <;lSx{G#aW;2Y. 偄`O&KEVNυsQjjK@Ef=1Mu,pzW\Mc`8N@6*C M ݥu"@2UY)"8.5ϵClLf%%: @{@LO0KqIR&9V\Fy0#*u9ͯG˄^i/W}ΈV&஍w RzA|(M̫ ybۧ 0@//g^6#*dvf ~Ƃ ӆlFaEsqrpLH%ʳ6]yg?[jIK ڻL?WKqHg|S*()8qp4s\$@.ݚc"ƻyE".Q)Ec3< Kz] Fj+Ϛ+ॱD94ѹ=@r}KeK Ӕi(L C.6ۉf ~(a 7NnRB*NZlZ:.Ba.bqP[J~l! nȫ1jfgxd_X78L+s(c83c٩"7} -p> ڼ\L".e^l'$s20hjR6R2GϱS{`i+\Ӽ9I[Bcf*M;&KYcpW qw|FXXe6ð̹ dvkɛPǛH;cP,!Va?,JO%O9-"{ GʙЮ8ȋ/jՎ`L`P5XqO(f|Hn;#$DPG{$$H< .`B@yw7\BgL!4VZ EЏ1Z6V)W617T^^-}?X^[(KNN $Pn &c~eI1wdl0Zh'<^Lh̀=Ψd'oE/Vk!a?MNP@,KFCPEs6+ˎRh,8l8-@^򃺶usZ@tr@cղ_:VdE CM-f w օNil^x$֧^¢F_ GmT.6JԛC%X&+Y%լ7^^W\YC̯j5OXVaDLuس 6Y1tl;sU4NbxOJk[vaT 0C "2:D(K[C¡A٬`z_3"ml8s2DP^Ayl~Ԏ7~x[cbB E֨Ff>X)WXLJLir hkij/vV2%mjdXg)2P+Tw1ΔV&il&uR"rIa@Aз3- .qث㽛c@TV̼9[o[J3Q\Ww6>t83阳 :x/;ᢐgtۉ9x]4Ioیqf;Cɛv >>O=dն^ SZGvtMi[K I(ڧZNCR9ppC}q`r$A$ \'^hدė\KE0$yu=ˣم!{U>G HtߔEP <09u)xSe4"ihoϊES;( <\n16:Z M#`~^7V:㸶o.`PXa W@jq>mVA77V*bjby")>g~*8{Dѿ]j-[܃8ݮ8=G}<|rnz9~ʮvb]L"GWP=F AJT1%ɖDIh*1:tՋ|}&aaߥq}E[Qn$nǝ9qGA /r);@Dz "Oi-$Z 'n 7[lн@32'fŨ}>m}U)GY*﩮ߖo/{5tce(^܋p/E )^ALUǮ ݾP2X~v6lmUAyǻ:gTt~k b)eQ|dlWΜh[9kC_\Ŕ RǕ> stream xZKsG=h.{i!pIɒeI)zwcG6 -hJ6QlP/|>aO˽c'=6}t99<P<Ů|K=9Y%'1h C)LK˙'˩N*]TKb=OgR*ʔ"oU3~{E6әpqYǝqd2zsy <ߓaC{ƉnWiؘWq.ÔP\)Ni↊Ө8ByW3)`NNfRSgL=LsYӆ3ֲ*֩%S#/a[qRtT-jY/rA[zroޥ=`{ nfLnb($}Ϧ3q~EtCb?/r$X d+*NTX w.8UrI8"v:oF%;A0J1.כڣIrS#SЋ|hϩG({Nx|qܓO(i療}zy/L=$+hrz= \(M: {aEy6r<7u+M'yD OVi0-ՋbaROΒ !b CyN~[no,lx&\%rI])@ύ4(Ox"ʧ^"+gFt?v:v^\l%6aEL#~$tg5j!OC0a!-K(Y"`ES6~J0iqZXvZ  T`:t VRﲻ%ɤ_bK_A ŐMŽ$/ +z2n/"6 9N$ lI@CF!۞͕RfޮxqZP6] 3U}6_"M0 7ەU2"[ )'$< 󑘏Sits0OvJxӝ–c MW̡$c3m-`uv´r4N*O@|_iqJ+z,V $8 ԧyg9|9,Wׇ{ h!h+pe< M~9~

;P'$uov}eGcyD{$SL1p̌z6dZ2C4E*3Dօ/Qe T&iZ˥%-FȏP|t^4/`"S}#A:tdžsJE@jFySM=YK_*8/j00OioHG*A]L)|}Xֹ~o(|F* pR%YG؜0wQWf.GZHx xyeo`i=zn>Tz`xux'-yU_R:p^o9=mEр$ Q5-cK+6:u4D;6MJnW(y.螎 t~8msgr j*DLp89fPf0Ip;?oJY8o+]T[@-wWW|qGEɤR_(mCLuؖDIЗWO>o'uA0O IC$Ϻ}xq"+N$s`b3 *1d<ҟ#BHi!z4.opMxԻe]=|\u} Ś ͝>yy @`]f*}T-ѵ-mE:04z3b(7)K";4v$0%o'o1t0fOL|U1k<tdJ{r*wͦbjmGHvM9H7>JV]d%+J\2G:~ `и= c{tr\2ַ"M=E4e %;Fvt &1mA_඼B;F&CzGMonxoy"Vͯ7 bCAG$Q)ؒaw"fABRnPьR:%/Wϫ}BBH}_H 9h8m~4-ŋ} c""A^cXNb \Էq8koif<C>ay1TϺZ o3А|`z`Vx{v\$*=ny!ky^rhXJ b4J|j{?Za2~ؽº FA6`ه[/h3\"O`im5{H,A)Gw4,JcÇUm,+&$p[7Wcn0ϵX,H_=G3PwR"/2;B?FJAD+SBl |"OX9l@MJamӧ$8Mhp m"*P㤧u P-?".](I*mCQ$oˀٳՙGrXi7( əк|P+Nq@0#>b^STn{]C>6Prxyݮ7*RZ|r9hCĿ* L>hLZN~NSčJ.Y%2P.0r2d2xG]~sTa1 ַDnzMYDjHGg=R%G$4FM[gkzyz aHJr}?B<47xsLC> ӽ^M\ ;MxApr[JEJ1 mw0AF}aj,&x4I2r1IVך 1\DNjE[JXJY a\p1|!ix/etb}HA"ꜼnL0 6 0$|. xaOq,OJ#<U$Kk@N.CG H-ۣUM]1&KѨ}IZ[r0l0A]g_mtDQ ,{Yt}SjICn.`qLb6ȱB bĒ{L~jMtoHŜ?_6QDmρI..OQb!e(z&"NO 3)x ?%,arJ5;Ms; Eգ ׃w5N\lb\?DbF(<Ƃ?S7) %k&~ry@> stream x[rǒkYhM!Bh%҈L8YAt7(]/&3Q6(U]̓'׈Wb/]<y1:kكz:JfgÝ* FǧbW1~θ*(3:xB٢5z<\yōrv1+zRZ7U>U+.l`'Y?ֳeTŹg_+Xz˷6Yse|4ο>cY<RtoZ7quqAnq|CyV%a^&Tú@"񇃽,1V:zލqUPn>6^?v/U\j<á puRJiݭ``pC 6A|ҳ@36\h]ig><+{;rh0mc1e_? OB+%4Hn9+# N֋'IǶ>&V pqEM(a&:8~9zskAUZ7'8$ɂYO_|>p-knΦbwX4NlufR,>HGyiJS\gɠe ZYG ];v9?~jƨ k+@¹eM uU_.\UIWa`M{46,60Sm(O HMqJ]b/t_-T&ܬ:Gmf0GB'rIly.R˶ )plv26 [m3Gi(&Ae~KE!IQM=Zqɻq/׫vlfwrv:y^MУ LDb VU9|: "LCvmQk2h+Rˊ4VjV|QD"[)*1({ ^U@I啋}2=}[MlqHe8Eg\@pN|7+tkAT8!q=:R(Mf"ECbS9zEpiaOA!xp%4_nu-~Au/'>-`"d<>K}uAUN/9U̓5w `J|"EYQ?O<̀BfOt5 /Ν ۷6E UD 6)WMc{ǂ pgMHp+}l֓WZ/`b ^F >WYscg{;Itj$P0gB^JƇAvm˾h睠tcob`Af$NeRDbZi 74P;2.h:&jdXmpoMdi'{Gnn I=W" A<Ϯ23M#uA#[x`xID!):_z/,]ޥlBIͬ+_.&3nw eƾY;گh;<=yZeGبw9s(rYrI9.n׫uպ̹T\Y52 crD p^l_MK={t-İ9{t{"^W W 1]9 qtʇB`% GX s2?drjӋ+^zyKw' <2+4@wУif($أ]ź3Sei[{B>^SIٖJ\^}'`PA S {la*{l?HnI%qUAކ=n*v<]1BTI{%tP5fHmZ% @:Uֹ6E 9154, o4 R |kR]:ӮмNTݾHzHVJ*_1@?@s ny[4S[woUQ15jBf _uZ8{ S1GXHSA.gL/)t0X (™pR^W,8!wQ,j'VgʲYv#Lu )ϕdR~l+0-vv<<9km }ȣ񄄝+Ua<-& ;vJ&\(허}'$2U]cUET5je%>=aݶ99UrvWDٴg/7"%϶-m"wUn6WGŊtH ~'q\7q$ 8Y𶷏ńL1kQNqaWzaW+ czJR41Bixō׽pL@ .̛$d,tM'! 7BK +뀁Ԓ ~_V9OPG%=6p)S&YV>kxm5[_LOֳjC֏J͵d9i:'OpVM=9=WnN`}fMl+.O^zkoSYT"M"WaQ)Ea[l-t(F P}s4ķwvՐ%'C /* 8bb OM'[~Dg QF"7}Z4 )]&SjE< VF a`whvEwEpSwcيM4דh*g1"{Wުb- Iy>b !"ՃTofr#5yم~VAa#8`) }5`/ѢiKn`i%F.E)?B׏.Km~>`uŀ ]'%!8WY&ϰeRxMHBy.<?vS9˯"P)7Hz$1TYe}KQrB !{ /IAsg%)QYHW~ U)Ox.VVnCa`x*ڕYvI;Nzzg}ږH2k1$WWږ>+7xP1 ҩ@PLgRqX~R`tܜ.a1Rwy !"Qr>yΪ?ٱmnHA牭LRe"3-t9rO< ًXӽQ }7Ty'YLd_|#&o,A̬Pe;frmShܔ^o:+˔j_~,>Z[gog_Iӽ)0!qbay^}xM)'@pܦB*&h$:w zRVzX.E#v)>JBgƉfS ]l'1XO vKlȿdOhΠP]RYopФCԤgm`\ly156$U&jatBG KGaN3#jGuۥ)x:Do25F2ăGT%S3?2ցh(@[ޏY":!TqdƧ: ͖|sX2LϦs MO  @;&~FUhِl A z52Axz8?׻7h) z 1UϝSA]jٺ)2Ya-PFrﳂe,mC EH]ةȿŠ$޾݇[(3L<yRy} F߂A.L}BT~~!Mendstream endobj 502 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3658 >> stream x}WyXWY"E 5DƘ8&yO4QQq -FM} jnPA #d2c2&f4[5y"7n&?n`1 b%kRc&N$6>;eM͓B3L/%U?E .ND;zQ*S i YLO͙&:ِ[?9xn!>LM4b֤ q+taAKt, {jW;;u! -#s^VvΚ1q! aKSa_f13 a3LȄ3˘f%3d&c221B&yYpf|i'&LaPe؊a+g*8:}zIa8Wnïpn'oue77DGuaGs18֯*n/w>8$ */4RMYP_:TWTJG5Cm^ `7D|5YF]J=qY|Y(i.n.d~L+p*QS%9]x.rB-*/Jlk.&KwccKTH`F=Q蛁l%.0H _-+qzgO_A_ ‛u<#~WK^ja&30Hc#Xi:d|7SgEFCp+26C'[a)nW]tCS)'Ac$2L뛫f5ayQbQL\V.7n1τ,-QpyPTm&rKmp?w@1"%y zg h Iq䓜kS#f4^cTSǮ\;Vcd /tY%[7ѓId"9H1+wevwɴj:6p 4l*V|S֋rT°\+o(j~_΅ ؉q0=g~@'7Od'K9?}82ZǎE&!2\p)heȂ[$,ZzL\[N8˷9O w;ރ3E<~ȋ'H *̨H5g@2Py~lX}<6ՇKMɦҟpu*6m1צmM'Xqc]]ц9dr54Y 쏈{TL]ݝsogά&[*k{?1Oϑ&_~ƭ 't9xmZ2Y٩QY0ﰊC zHX3J-FB U_%WDaKQޚ=5 azNgS,$Y3zEcɄuyITͼQK=<0yI\eu}etJU,~liCI#C/EDLaZ=ܐn 6hԚٛze"aQSx^[G49Pϖ.Þ$hwisse쫔Ýj=[P@Ei_F&B$?$P 9Tze^E#4޲ 77lKs̆xHi17*L㛐ŏgwLO!:2Kt1{ѩNta>H%㰵cr=?$8 ; #aY%h+.X,d9 FSvQM%ѕjm.'$@Ԓa > YWUB ->PPU}/+#{K6?3SF iY.# eO<1$ "lGGD&I*4ڧY?c|V!'R7{X}ޮ;cD:!. p݃om<) *וBszN#kƶW[v]h&c:jpt߃$&Y!] k>r8T_0ͮPʾr`B_+>b9%L\ixy- (d'݁;vq'D%9][awTa)1VWV|kH+olޝM6шhIVͲYKF~1~r,N&@x1M[|[m=ױ{qtwӺ5u[*ͦr(3;:ɲIh\8(|8]8y, cV=|(b/*9O#*v«mX+owB=(q Jܐ4J 8C21Y! }rlCRNF?)>ֻJy#VÉ|SB:Xؐ&#;>~dllh+25pGYbZZLתP#;km=E*f7(y3nԕ&1x KkkGϕPtvP:|n,S |2h={E*-_:B=g% .wfrd9(lN\p4^U؊LztaZ >{ PPKuM]l \J hjh̻( ։лNEٛ.EawJ~A q1, [n3dOp䙧}0E}1Ɣ0}hs&sd̃q8'_yQW 6^Z[G(5([N gsʡk 6HN0t\"@-GHli:5x߰M%;{j1_Uh#(gxׁbib޵ltQy-8?tؼz#IT{YkCҢHadd~?i7}p-ɏ9jNTg PPӴKxfsvX'bd:VrFRppeX+- H,6mU2tendstream endobj 503 0 obj << /Filter /FlateDecode /Length 295 >> stream x]An0E>omj&dѨj{cDȢϟ.cߞxz=i:Otʸm@(8dM׸4 w>+UݛGNueN<益y.  endstream endobj 504 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 9494 >> stream xzXg ;cW֕E,{5vEARHԥ,,{إ{/1QcK5%g?h߼-׵33Ϲ9yF@uɚޞ^'tr ㏎ A]XFc =ԠI.&\X_t7އ26ȗxvO4ils ݽw{2_5aunQ^N[[}biez [ٻ]w/(vז[l[2+W _F6t:' .\-w[mty֐srGgԂm ?N4Ӧ1(jeAͦR90j5NYR#(+j$EYSMj35B-Q[%xj@m>&R˨Irj2BRijj:AfRY%PAT:%h*b)vS])suG<1ՋzSS} H~OST;B/SFz-2KphQ+c{㻢UK8vt@1J}{ﵨ׽gglC}g-LMK\?n҇ffi ,KZ);h|7 =dϐCmjX̰aG]rxS#Pi2b.^11s]Pm뿻R` r 9]3Wq fY un`1^F czz6.m<k 8(Ckhqկ7\em2n7U`Z [B#R }!E 4a;8j [W?1ЮVQD"ox9ȝP[NE3) !C g)c5 PAT`u& 5 ]ա sO0lnWXnⷸ?ţLtKp.YnH~x A$BGjQ&0ķ$+-gcq_юĴ*Gi +d8BM *[О!$׸'; p_,u,z Yp@@[`%>E8UvF8`81=K G0MC %x%: H1+'d qƮgҟC"9㟤x[(Du px12dȚF瓫~chhoyGLHT2Z泱ux^t@^Db&T䃶DHS$$@T,usݖcc [x*Îh֡ϐ 1 h33DM^_C->\! hm`0{? w#-23)tJX>C٬'39:7% W:>ũؘ_ r$!wKDX2%=KI7iYifJf68X/JC{A'pk%,g/"S^f}"_[7dxCC,F, pca0#:BGG %x|a>C/Eu^_rex%l 4)\f}:1כWdx{{p{GFf񺇈Fݯ|yy&*Imӣz^j~xBbh֙e?OgO4]1:S"Bbc>1z7 4nw|I$1뫯9+[.E]x6}MEF8 EYu24z4 5JwIP]C.6C=t|x硟 ՛߹bE$mmhЍ[ZR?| ht*U>f[j_JqLj #GLųP'{ݪwۧ[eȁsLmy%eM&Rd'h|"DESrZa](T~o [2`.gur40'a) qY)IIEEF¢%$C}AB0!aʌVO"EgSMU8VcWm&?{Q$x h,o-?WLb!9SCABXZl5ăo}Gro#y<*` 5|'R4b}\%½Dݨq hoG v7Qŕu[lAZDJ,W-QFG 'P?_'-j_BZG@y5$) G hYծE>M;*C#LeL8|@iqS۠"c0 ;sR;&= ȕəݖf쿚?R=ddu\ FFb9Wg,QTp"[G?HWYzVMx9FY2-RW!D4.Ԁ83$?7)n SXfhpհ5;0XYZ^YX\_3IVQ{4gwM9[Tx.lbSW5.bŋ+, =.'[fC-hv*P{@=0DVE']_;f Iɛ1Rt1ЈA]J\ QfZNۛ_l{I6d*Vq9d ThtϾ}G uMyyOؤL"$G W/K / S //8OS ؑÇuHRx%rA@T1$O*)y@m\NU\tENp5#VIi+A'-\lFuȹߕ-HCN69ͷ!cuCSK=]w)/y`V%I9& B9PIY gNA-s*wZQ`nJgIFu$? җ'u=^1ģT.a.aUaN9e+:`Rt##:?uX-BKLL&W^EGGAhL@>p`&OBCrHOfcΞ-cRnAId YvK2AE8QzMX ̦t4.2"=/h){>j#O ۤ2Dy(/Pɱw)T3<R>c/jQW^2.FB, YHSvC6굟0A=_xA#}U G1[-f&BGFV;Ă7TNzˢzԅ(WQ@"ěEџ&QKmUSWVr{^uv,~6 [$I$F DE(#nih7w>&Yhzͅ%}5 6YdUZAJ"):OW+#k[@\~L:)G(yRUB{NVrڞJ3iBZ;JWW,A;IT&&C4l=G*1ZBs$!BHX$[$8ZFU&K߁孳L}%luB%%*ӰQw$ BcTd˕B̉cQ.F#>o ]3OiudtwB[R\q+$2ஈ?r6بy$1Uyeg_9l0}+SFRl#{o H OlX@2s*+v,*TN`Rdy`i~)ҨR4+rLŴs$Qջm+j',[I$ +x8-BY|#ꅺzvm- %'҂&U0O]UVW*[RHxTZ"!^QLhNT~fQZ)bJ>jpبoG`Ҋ[`٠mXeMͨbN|BLX F/%0? tZgo+e8*Y?bIA)d봙*(d ьM>oeBJ؜ %g;hk6qL,"(Sgoq Y?14RpgWК+B., ҵyN.\e&)AfjZǺZzV94M!-9u𬓋oȲG]W\nҭKfz-H9pJuE]Όn{G{Jc5' <$gLoWh~B[%xHneacNL_9l^rq.T0booy3gt+:C~.m2(!J [q[$!Bbhz^  P% Dn:z/&'˫ ,MHynx;/5~uX¥;C 8b\ħ՘|Bf͚^>{"^>3 QDƽ\-p^ ˻zE_9}kpBBBvI\U*"[c %)쇯 v7h42fY\q``rWD Xm1d@#n5pVK: Oz1}6aV:u[ [M}xY{B]K\Jīۂ؊(s쪳oB/w r4))ȟ/ͭﳅecG unQ{$I~ i$@g?Cxe;RjO %.vPb RcxNNL%}sPގr^Yg]tHm)Z"ôWvJ1MZrͥHќԎfG,d+p݄J/1ߧ7iSĞo%Hϧl&cla!~?]lj>Wʺ8r ֚>^`g̮# =lcɬc2UN,cpNcrj a.a ힼf{_ \/IWUUYYoX d?ET׷.ElZR&=ՋbIǠbRR5\wMM=6f{ B%3# ^<<;R%2y۸ZLI㢋>GJ##N ?097ǟ=35g*9q / cep ^;wޝ]}iNbko+:!;əKќKȒ4'Nldd(+|'VDHAP;0 ]q/|uCd N>u>__qCjrZf_cH%lȌ$gUBYKzwKfl+Ϙڼds44ECX΢m{'X…le pC:}Seb탃1*҇*166Eѧ.|5Ca-K.8>uIv>`m65_S^8v|'R[fְC'??tjI$ Z/S9}}΁X]{[֖-Ym6YOO} nGEu7- p# GD8<#'d205 3F8"ضE!\y2GQ^&#r.mQq.W,ud7Z"AiPJDF&"=nɚ?kԍOAA7$,e>PB![åȁR}xt[VW@:"3n}_BV hU'&$&W}q,jxŬEŠrmYǦ sGcjUxݥ=q乼f"C|Llar|΁BdFm9#YXQ\R<b. #6#n+Pύi40ʄ g⩀I\L>$# ?|K6½ '^8=7 )ҰtmE 酫Q(!|5t۔B(B(E5Lwo :~  D=nvuG!Mb|%!s!cK{YD71nE{ol^4w߂,u""២BJtҿh*$g$6rS3XO>%+ޕP O/P2ulwةıoGO7Ƒ>[kM!s,T(DUBrn qtLrC.`7k BMڧwGwb ) e{r4HKςl^vTYthcq|$:#&+793Sˢ,N!4tW]T'hhA-Bݔ_yv=Xb_ \X?I[C]<-v̓Pܮ~C!Xk` a lnUXYJV*9 EDacHq9';-6y(Fxd< `Y4:34֞N?\%4YJ3xU¼t!q07>g^i-h %+=㲙3v]T|&p0 !4:g`X7~VI[ADV/ ~WK"K'2I,)fpsfLǂ_h ~QU0U0?S&UĨ"!LȘ~mG+C}oďktvQ@ߓ:TCb;DYG31\ofuܒ䕕]v3ڸѶGW]9+M^CߤMդjڔ=)qendstream endobj 505 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2363 >> stream xUyPw{8[Q5 h@P7\ DEq""} 9tQxlxq-rɲqY6nmWտ?~ޓQf&L&=H}02GA앓X&.1M@s0 Kۢcc5e&ILݤNN?\M:7(>J Ԅxedb}r:SWQ1q*ձʐ0ehoP/hgh`'_H\LZ$EQ)iQ_fW֛P2* B)jrQJʊl)9eGq=PVNIeSfTuK%$KӷL/m6b|9s!ƌibxiL: L/+RԼT>5).&g,{qN\~hTT5\@Ks?#/hc0 ,%ɢ^s{-)&l hVrqh}ڳw4y\\hEŠmA 71'1dJ4Sxo˳ĵ`8Sw:'zث11(;+ v*3mP} Ƹ$@'R 5ws yzS|iS) 41YPZS;xTqh^ttVHIJi!p`/mњD%wN{$e3ﺸhV~~)J1pt3vF^/tqyA#t|q "VGֆ볺{:><բOWR3+/X_><˽?~RgWK ۯA 4]}Gi wKڡ5˓L''13 Cg3o(PĨ /g֏LV@,}g>_"aЦ m<^HL9\S.f{€% /.@Nߢ6*!9eEGgջxyH^Fw| 5 +V0 ƦoLiSwrzӓ1Jh}cUxkp~}lxld%RaoC^SHV>\Sns5/Dqr\>wa};^#)Eh D<p_6B5x|P3i) <(:bȆ*`=v:::59 4x>Z8$;n?קV .g%R{5ȮͰpLK9bbg9'zO e:TPy`Vdf*6I. cp%N 8}DD\\D>nxXP }fdxo7Cdfb5lT`lB7SDC4EGrP%x1O8ݹț*anc OuA? eRJQ@5LVq+v(Nl|z`j䵥d 'rׇםIiT))Ȓ 6:Nh.s` ZY(L +~.CF>P1'.Hdܪڲ:VEǑsXUQ_ Hޤӳ2WMd1T7 G̟g`rFSƭ R%QFCK5^e7ZerI|>j%Ud!YG(\N@WǓe t-ީoKִCҿU:w$lQ Rœs*K8uUAX:ia.f!j9`1leNWQ9Pacu}1J Kendstream endobj 506 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8188 >> stream xzxWb("̘,IhL5-ƽ"Kzs/r\Ĕ: HRa77e}lSM?~@Xhf==s!y癍%ǿ0oκPϛӘf75ĸa8zs¬ HiD&JHLJ?oނ9sK|C2}W]~2Ѿ3}C£c#|"|wݹov5۶>sw V_*qu_k)kR֥ ٘)+lsȭQۢ3vW\%KϘ9esss0fK bLSTb1NL'v3.y"E&V=*bXM%~ğ׈yb-1XG, /BbL,&8b뛿89N3L.̷@ %KLtmQaZE@,%s) A qxR; t:*^ dlz9{J*A-XsK,ehr+LNA ЕVu޼1}`0Q(΃t@]{h ?'o;a= { JgkVЈ ,b/%8V(sB4*͎mE+dkp=ib}C`4:}_TG>`;d&5V}&^4/ 3Q7Zλg~)3 E^Qu.cF._>I)6}F|a>Mcw^ "\7zj4zxa ߨ;u@s\\ |Qg# Z ɣ6x6McMA @t8R!;>a1A.V>F^oz1IjړPAWbrmycޠxJ|60? lyze2uVA+7)-ut#4'Rl1Ӈ4KVuIf4;]'rՠ3t7jmћ o NdoGUrs0f~u3h|09C[vm|g,we@Ny bzl-ηO> d#GCUdeVhћtf =[l2ZMZr١lv.5ut:$Hڠ%s2jKc&\AW,v@kq!G ]=`Jmi=BPkUV]]w]hs*׽ !5f.<Ȧ`-0XD_B&xWl9Yl`\+2ɎQg\:SIeN22!9c_KQ թ6QbpD h gШs>|ri-idUՏk[a*F%K))1gU 92++:uL)dL)i[M'&8P[l64 ?]\zUeʋVC  e!q@m@@[jA* ՄI\BPq8gЂOxRH&I߼ZORlpG}̕z1 Xq 7ɲ20ɝ"f@Š͔QBo2-PlG=wx%/㵢BDVdta1oۮUA$zVӅ3\gki J]1$- ԡ7cYQF<^VL q ;tꫴTedAW(vFY uR4QF'%ųO574TQ;*ʠ*\mKX'9-Ɵ. cXc3F*}j VCA qLjӾcf'vւ =2%S[tUi)(ͻPx H+>5955"7˩$C v. Y#;xl7(S c0c ']?܅8ӜWƈ\s^| ;h*p=Zڗ|^ `Lixdop6QղVH9k2 ʚӡsk'0:EϾ*Hz.s;ИIY!i1!'MW8FA6;b<'blcm:ƣ$v.?xtO)KIõI蚔ƄNu 4BKAÉspt4p0 M߰;WQc5#Vj)ј_coӎ-*FȗlZ[ L\w}u΃<*R']XڈT]}m렄Ld5RQnVPbӚBm[fVNv.O#th^`R\/QC ! ai0a:ؙZpgQ `RMA4vl94XcµBҡhp?Suʊu 'G5?DWSQVGVR/ 2#1d㨵nF}bQqꉭ;J{@NɎMCMtݓq)חEЩק/5@VaV EdM9799`ؑԮMm %T7$5@~+H( ~ %b;|`GDSQ+T-j X+тP Cq\VKk,:=4 Aqs|1[K=і*׷5+e0AHVK-֪r*f~K\r ~e `'ZШ&cG,PP:ך-WrUPOfޢ]Z(&+h8L=#DmukwL|!SiRb|U;;ϞgqЕṊ= vdWV9jɛqnϾ+k 3ogR+UcY ,}jdz/lgj P+~|pqF h3=@C\jpubz^SE` :B,Lى~U~6\O'w:.cM2~^ܳ/:2 ےa+3@ 2Pj-֊:4M9w- \e6d.gUb]4'#Ϣ&꬯nr(9M9ANV$;].پc +6zŠ6pUR\Q$4SxWւׂ"s&3W9`'Ra7cW }=86}RckdWv S(p>R89VKi֧V#"R|&74 T| >ŗu!_W2PsN Rvѭ{i XȮ&~/_2 BVDv yGqGW&F{]DZ{q5 u3|x(6 el;/#|X v[r/rOѰB{OρvF]+М+"ֲ!!31r*^ vFJ}wx4D.EDIQ Ig=;99#*hz( )>@/Bk!XBS@V*-.om5蒋4o雑z,/ 7B j33@TQv`vwD҆ Nfxq Gf;-cU>n v0;{G;E=hAPlqs! ʒ2cEay[eT6 &l˸vjn97!tcxKmdZvH'S̝/6׽A˷(M F!P~{" R4_%7Sh 5oAjdx;*""Ƥa`uKS5W#x۾l\W7~ 7r5u pΛ͹0OH0+JPcz ZٿQڝnOwFER'd9vFZV?*U8 Qb`k;ӗ67t:0HsbrQL&45vt;Ec*j*k{\ &8umcPC*MǠD]&9G~5-t{aoϼ>{;r2{H'NKD $GwsvÏVj6jΝq{R94Xo2 / E_~qzLcGn2Cpd"]mMASA+}j T>D{}H" 3D*L3Fjo-ůqƤ764{^RlAy|w$5&ĆuBSM.jsn͊$\FiEޏ!'QcV+ɣZUf$8wMjHdU]9 ˗Tkp`Þ@ IkոPTfNoRF{A9'EШGt| qmu{PX gbtfRE|j}ZfXoV/_eZ %dAš;C.YQ'D>vf-l˰.Z<L8L ke`g`VGH`4@:M@| y5yad6EI`)Oia`C 9t/bjH0(PcZ|<Z(J&1Fҷ\OpR/W?)觕Q+9dR3Ѧ A\h,uD`@r:lTjr Wu's69(g[0 (8E78b:>j>İk䤿cg]1H~{}4x 򪥾f]5Ll?EMW*@Y~*L/ :q*i)0BtBptqCd*(o #Gb.Vq+P#> S:{aUC8 Q^P,ƲQʼN`9zwx~[9p ] ( -`_2}~E}ѳ׀9xM-O.}~ŃD4S_"!NMRC j!Bhy kIc$ޠ5KeWtMWD!\5#FE֩ pKQHYQr|B[N0v5e'Rtt]*Yd-Bu 8{:!(J< R6NpŪFht.=mϥ7+:L Cc^cF֜Zg߆[^ӧL,LRfNʾgL$]u9ݥ_g(@9"o|SnV>_`ٟ68zs_J! :)bgez~-Yv,;3Ahc.*ibӓl7DJ3#wu%^O馾C #_ ^a[_C;^} ~D =x#N}%_;+bL*ͪiv55P hؘ⌺fWk̋hTƯP MVvJ%|AE rKz CCN{r28hvUrD0;?݁&c#9 *,0XTKI/R.oXzѭ4/|ˌb\ޥ̞wΊ0U*"O/.?`'flHW&Fف&}N!/΅v~ץw2>w}wf[:ނ!,J:VW'ȃЁ9GnC!%W9\ƺb+I[o1aW44:F;PtkEzxոvvUsAsXv+;{ſA[?Tv;,hZzާ氓FQ&5xљB!b}j!ӫcw*:Nbv~dI jdRCeZ<9HV*=TҕPhE(03˼4HQEӊMHm=L6fY 7R6 @~{#.vD51^^0Wl*~R4u<nq3rیcGO$握&1Wf l*ZB3D:$cHvǰ+_ŏ9}N`+./;d4Fix~@F43u ^ϊ:JO,<ٸqS#I yf%v('Ж3>lv쳰lPjJ +.א?cqTRw6/G Y3WdC~M^ cAC.+L"Z׆'dagy[sU0F޾߁^f߳;Ș?ܠ[ov6q(n ?Pf\2Ϸ5)/t4BYo=N[* V#^'W+=v+X􆹥bbdҗA ϲG; p>]c¾a?T jE͍F ,i@Ýh\&-zxD!akb5 f#aX`7}U~E@|/~ \aVגP*=Pf1d|bM-#:S_\4ß L N [$Y=z֕jF˨ΪtmpZpu9u˅ .Dz m48:2yϊ, Zjt`F, @=XaPUkjɫ[>ZVYEc  uz;'E`O\A(&+s S1+xWǦ8Temm46׭TEyd2;dE&Vl-66@ԫr$V:ʷbfQSyV] )(]ӅƢ?5P o$a I(wCds +MeePw]bjZmΞpRNjNO]C?|BK~Wr[))m?8юPřJ,}+);DHs2(E_9F cǁC`049\b4VmLeL%hendstream endobj 507 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1590 >> stream x] PTU۷vӕeoWB4gT 6.~+(+! \p$uP+AD(C6J11Цb͆ 30kuQQ̔ϬTI \5$7-DU޹c%EP(BAiM,T.zxKJOix]ymR_\ujh? ˯ٴ+񳊋Xi:)$*+QXܫ\;"/ LEyXrMMslnb*; ;`VoV@u8jWLZCE6}:z +FGY"5QϷDK;r>r_O|g!ψUX/×N_U'PIwꢭ"&Bw i^c q̔9-9ƃezjPp3>&u UnS%jGxHqJ[=qG3 5{2"T̔ٛu<ƴ *?Y^H6'Zڨ_gK;0&2ݍ)8[K_ܹhn?2Z7;Rim״tV^p"󃷪ӰEi4&wv,2kA3$bu?> z(,ɼO|Ǘ=YՂ0>TtLrO&I7Oa䦪b+)WRyr B\+endstream endobj 508 0 obj << /Filter /FlateDecode /Length 239 >> stream x]An! E 3 !7&FU 0`"a,rNEp8kYٗΥ#ϥqҩIR%45>B74pkgF9$zF3(ǜ"cn q~Fꔷ2lkᛠcAD83&& "Bzߍ,NyGX8^D(z=?Q;U8R/4vi|xendstream endobj 509 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7182 >> stream xyX,evTlLFf5jbEEA}Y:K{"ްM\7147ݥl,s}"JOD7`gTuq0Y$LMÑ6@]2#PpǃJO$r 00(R%3[`yr3H3f6|C|\̷o90\6{;0,}6uvflj?54 xxϗGQE ^' Y/jf1mS>ۨݷxlqloXjk1k=滼མb/Y,FmSө -#jeGͤY6zNͦ9ʒK>QNZj>ZGKYQ {ʚZDPj6S(J2&SMIEQj%e@VS95HR{(zr8jeD2){AJQΛ::7tueOhxErj|Vc:E{5g s'NuOlo =4k X#l1646-0;'3e֔77_K]| _ߖ`IFߧM}8u4tW05\[~-BJ;RRrBQ cZC,?Q;;Pg,G2bfTſQ{G84KϿUK^Uͨ]WӟꕡO0I J#PuHQ41/;boIGKtͨ\r`]NfWShC.nq~sI+ ra bKu[ 5bĆ`h<Adtj~rU2" Yg ]Ln5 I 8*P!4ld,$Oa>DY3 a pV~.)w]_Qsy8oBys2iVWKރl-p4ŷ>:|sڿu1YfmtV/KJhR ϱ֛6#V|[s {Ͼ~k6 !/ =[#{X%p XPgX?DF:WFx+ދHa# ~xPQd:CP| dqĈ%lxoW?ٳ\ݎ.TW+5G&$i(#VӺvb0GmmF a0R,>3%c!F 9ϛge!ZgG]'@ ~Xդ9l֭z|#f \˖G@ dpK`66yȈLF x>s4euW2A pA^(ʝjsk#ћ1xAƩ: uҊ$ir OT~yjk. T.j0RT&>yk p- EwD5{n<6= zPK⩎bo~`HQG HhVD `t[#v-j-۱1rP~dWoG` c&V .T]]!VH 8bbG+pGR|` Bnh9bo+Z+D:߼Ka@_YQQTCH<9wsA$Ȗ{*\ғe,xx,m_r_ʧV#F.lq4Ufu=2ZI6/<M"P)>v1#tyjD.J1V* _]/8U®a'CP$tdZ@Hz~&{*h%^(TY|hz>̧3#(+ ~rb #(Z8<ɢܤҨjTQNenU7nJ{gM6B$@5!/'?m@uэEY<*ʪ.*CEy.߉0lէ6/ :N_C[ İ [錒&ۺ<fI/lt$*ܴl\pEph5PS/YXG/gB~ {rO 8] 6Ű G7` U.oQCEb1s/CR1 x^f(MΆ!?Cߏ ArO RUBD2FpF0=W5"%P/taSyxdKG {@12 !Z`v) a'`kJCgI$%̇d㦿׹ (!J693#&AyEeּ)Zʳǃ"]yqKr%K] "UxG !̧F@o~w(QU#oDLc]U۝oSag!H\5MP0Ի^B B.ɦ0 rUAKT6O'oq)N/ҌF)#^) F? S9 f,qɞk!7PV林T\PyaayN_zUSh:PG GI.0w\/SWL?}|${â: LGQ&U0CtC;BLBBڧͭr/ax8%ldoZZyv'iχ9y[nxk=#izKj3m`&7 q19AdEHxx Y4lH/lF&9$B~x$]7l0"#92?st] zx3g/a_fƧ4kak;W>H (-z}zB98֊mmX>-locUlma "$;w=B>tTٕJp(M, L$I#]Z]Ezb~% E#?`V5;MiEj*[u&.lE&s,`1eK7?l";O۾j?VDG'"riQA h}W xU߷mD?G 6?=u0}a/,ڐPӴ2[ZYxF%xPϵև׫[](O+Atn ~C<ŕ *XUصԽ f+9%)CM3KOݺ:H5_t)C$ g+ZS_lw@ܖ}kKڱ̪j& EE5ۏeg:Aυ4 Bxa<E!` w?"P\vL6lTʪdQ%ߺBVs _-[Mg?[epuNKۓ{]r|_Ƴvˬ{˟~Ҧͪ+8-$5.wa,;s\ʰqVLNX+jByYU ^{ FA7I%4_{ˈ}%uRr.֝ ㅠs|ѓ|9ݞYayyi GV%jLp"8-[n>* Y$OYD솉p_}ݥ~~5ut_w^: ]SI5 |g0_ܙvV^#!)52ZKpGjCh]ub7A($Sҏ1b|sŅi OL۳j`yvJ՜{n28rv?FЙ3GX=`ġλ73 ,3ڡr>}0Oww.'W?s="s1U!q)C˃t˫Jti'z]>(|ȽnZ=˻=-pCeI% !ˋҳ' W%p[c6FVWYUTW6[eLmH΍,&ЋC *K rI7SZbc\ǜzk'=ij B.57RfuKa[ Oumkj-M-N͗4.8}C"$xh!GGڋUbtҳsmm<l[&J w{C#M O)IK2,KD%x(Ioj 4'N^D'zuq}ɵt{{ B ;OɄ l0,xw{~<Dx ] =RBv{w^ͩO< N"?v2U;ˁb\ZPVj\Զ>n NecSyQJF&᱑1%)e$D[15.96#"#:3,2ԊN+G:ܑȓYOp_#ft# p.`"yH# S׾48}fxUTFǧ$r"ӒbKcQ EH&"$?/oom,GŤ,+ ;ZPkeCUmE[=<Р›$wUT*$.MW+3ЛCPjfl85NE|;xÔV am?e4H᥾Ʒ X=VB&'?~Y3J>f׀G"فܼB$'-m>X*b/fO@WpNr"n7ۑD4g8cG(7g~1:*RW qfHuxsSmU{٤vLs ggo@b5i݇6{|Bcv#*gW6X/.P^R Ho!qljZy䍃aȧσn+dU! xgF^JbRiYfTTaЦ"Y!iP[$b,B'y Hv0KK`UIn Ïs4 R}tedd_,6KQ<endstream endobj 510 0 obj << /Filter /FlateDecode /Length 4329 >> stream x[MsFҾD= NGVDiؤT?3$(+*`>~o\;>h~ _z>x#!MYm /by3; Wf\]%ϳDJUUDk6α䂝|HZ7u;+_/q.]pp.LǃR|49u-[Lɇ NY^qN^BcXآ,HKǚR#r"`}:[b{#؋ e~'^d{gk{3׫];8EϦ7zCi+?1rԗŪE|Irͺzhb!taheaa>4p),vug[Z#q\fgoTaK-xX"&)[/&*-,R HftƽsmC $r]QIOاs' 0{T@O_32osJin&ܕn4S:nTd8#f3,* f.nloaÚ(F;W&a#fSO^ :֢O4c|-ﷱn[^ PM9 zꗩASXmخ*) ;ߏ752hN ub^ t4aYvw1z%]~W*!j\ƭXC_fNYcyׄn6Eh(_dij/qPU?ƽ/P#c '0 Y#.@wϭd lB7rU!?`<x伐v'> f鯖5 m}RP'UtHXϪqiG~OWz,S"_t:1L;|~1yr3(M֙%L"s{ƌ)\fOf}ggď{[Dl.ɬ5OM9kBxO2tِպ}'`@2}m p VI1'GaNN!dV[ĥ%o:GR&~#<ep` 9 LHHHJR08i>m{ .H`0\'`нl%0’$rx`&$@lio ;9 @if oL4a |L 35CWEvuDI SNV)Of).BNewN qٳ4AH,dY=Z-@ؒ܃@X}Yf{&@huUM{B90WnkX64&_`KALگz\J@ڝTPJqWd[Q[ޚk*RgoB%O/y?aH"9%g=;sHROL`2`ͽʶ,9JLu81&ݨLRFmaҤ8eZUhMXKvR-Cb [=MFݦs1^ARGo 3{Qֈ-M98Ÿd.Ά^TMɋEL)`ė >y&7ʦ*N8tS,ҳuS^<qΛ^]E*'wtv´bzy=B[FE\ڂJ4 %u >0PFKl% m h<]3ǡ"(- cɦ"'ǯ Ie2Wx֛ilF. ٞ7n(J{vNl#D&6)92wBtw叡 YtNEXo<|XSG%.dq ՏPB#xO;5-y XiE/! Tw$f33^uFLjj! N7%O=QLa3> 7߆AyVͩrw ^r6AzcjõHI4M"9OZ dr s(xdMUg}l`[[JæuHسj] @#Ǽmly"Zl+Q®y0@HLH ɏOx2f.‹,YΰUs-!bh^3Y?Gޤ?\Wذ$(Y)J?WmIywc +I.`{ĤPu%MqaS6K(ߚ>FV! .W ǐ1^V!x]dPCGG  #sgYKM@ު]̖1͊M=ŊP:nU_!^N`A N@KRWnQ8Mau@J=ؑ ԧ #+ +}bB /&/iĉ>TGր#(\x$<3&2|!2iRt*=&n]#lzvXQ`@3q| iӑIY<:?e(oH:T`y( lu$ an3Ir5 RÉ ~<ob?i&u4P:7U@ dAj׻bvU?XYL[$`/5aȞz4449q:MgX(Op ]^lݳ>-/wAREUWpXcPT`$:_Z0Hra^dt!w֋VljQ?dZt+cؑ%^.^vϟ=^u,f^g9|_dJb gUyLD'9h.J171Ǝ ׏DJ8R]E|%<;X@ѫ#>Uʍ\n:)e,wmQ+ĔMVzOCy-؊Ub+qK 0N95#pW/`^Ё^r a-^IWRJ3|Tt~5 4f~Qٽ|=N3"`J][$xM9 ؠb*gP\Qv]:S2d}N{)7ҹNǪrOElВSiua,)s]l_&ndb/h rC Լa,d!%4D*+ys^꣏+R^[в7K:JRjyV]ȷ 8 8$/OgxY|vAO)26ZlU2b-\F$ib7c4] jgg0nòea]>yz[FܕChJU@L|ؿ3>"-W7Kr m#R-ßʓp7 ,k L0cŔ>QBr?)-Jendstream endobj 511 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3412 >> stream xW TSW>!$*rrNRG}cW"Z߈Z Hrȟ$yHx:֩NJ{˶̽w{ug:+koK(o/J"X.%)29T1cu*edbD%T~R G >gƎnCǠQ(b4%H6&^8sYSEGFHڹ#1029&pWd)ɁQ ʸB\t5|,NMߙ̊Z2&6>a]eESk Z*ZOQ FjZLMPӨ7`*ZF͢S+**Ԇ򦌒%^ҟeSdrZN~|o5rFߵQP{M(p2F]")6?gqot07q(՝Ωk"YktOp ;5Nwh52! eQ|KAҸOˏ 7$>B2Ozd5K7m4CEm>w&_͟;M,l wKB:O_swyŝQdn69g4ŕzmrԬ3g$$G^|*LsKM}9Ϩe=hƖ3>]5 3 *_k$~K ZCIq>p5LF giOq|x“|ao-3kI]M/7d?5I<vt)T4`U=;Ll֊6:rܨ.?&U+Mms҂][ކ`f.:Yc7C;Ɩ{3ܙ6n#&4xh}a@>pfF5ae Q0)@BTٻgoyhЅw. Vݔ>yꦭOOXa5qY-L0 mzu|x2b{ÎD6a_+b0VT<{ֶ9:&x]qP4+KILA?jȐ].Oy)KpMUw[\ g_Sx4|QMt3kFdymul7ԣٍhNi( D񚤒|]ެFjKtl\^ģ3OD4bK<#b@B8Qtܨ!Ia H ٸoKQR?eLiceF{l}Iv82"37&"9\֦f2YHBgJshvUDL}åJ[;Ϸw768[nˁ>~$'ZDkJ|]]{Y*xT펬ujJ4Ќ T}koi6;76/!}}bp0͓eXSvcGmr+!o6s_5O?Vi=4fV=15Rv^&߾|?S90ZMv-),)^[c>bDFw\V|_!w2^KxQp.C YY4L^]ДHUjB4˵ѭmgMp ܟ[i<]PI,*7ך]g7 +@RN\ I~;Wӧ?SyN(1H-6m.R*AkIc) I)t]# Z\G?CPPG>h#@ɧWRIo5 o/]9brI|HxXe"#_: 6MI>kƾu1\~^wew 5Z"FÁQYzMr@_2riPH:ZN:p3b{?{?m?zK\\V= ýׯ p&d,3 ||K#jIQ;'Yendstream endobj 512 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1377 >> stream x]S{PTU>wӆȺh+R6(" ](oW < I"M)DꨓZSz jt3sw}Eai|4q;:Bcw$E6g1{h%K,]\5dLS !2~e|BZbtd.J%+CӔJoMXL|Θh&.\O6qЈ(MVeVepħʐUAՁ~!A/?Bh ]^u>AN١y(-@h%R!G"7 +4UQdܘ`I^tC.n)D->ݼg sm(g\j›t6P-%x K[;'%%XUX%1# =Hqy\{8SwCQjG$4S0_$ n˲s\;p`%7gyI>dH@U9xE/ؙh[s 4! x,+ݞC[+;;A'sq3H9O,]@ gpTHgiW]!A}hBvQ<2XXk[.N}'82ʦ~t" o)c}Bϑ5,cEZNS[Mkb!J.?R~䲢Ĉmi5o}[jrZb":W)֣#`?2C`6RV8Y\/7g% gų0S&8}I8~0Y!5w|5 tla) XȦ.6bi+=FXkbLЇ!*3kN{.1ro܈8!O1u\?_χAzXPEDKHpZ)=9[^,}ovJ% XK0X~bbOyѷCs 1MeuS>f܅$=L׳ MZW?]8F  ,w ra2eLeB8a{ a@@k?T3,d5Oo<[\eW{wT\|HnZb[ojPN;y2Mm,}rY^^$Vx^,KODry#L"LAf/[9CfH댬g r&"%UDrSńXF1']cJLy{}׆Yp6uԦ{hZl*sH)-?<}yBVU^WzR_4> ä9-oI]%S 7Ku-'#곺endstream endobj 513 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2424 >> stream xVyTTEQɈi#DmŅ% #&Y`A6 6#‰i&41=rӓ97Xړow{+aD̺ݱSKOMt%_IDOOD*WmwǦY3D50H0+59%G/Wq5iTElFb"\OS E\bJlzB؜MeӪokCaʠ؂Ĥ-f=3`3[Jf;̬bBPf5&Ƅ3)3S"Y$u$ ;:Ͼž3Se|.%S}w?^00`f]is8ΆpھA!.a=ZmRE md^UE`Y0Q}k?*y9qk_v<\c=}P >첉@ 5݈Liw`l_?o8pqYїÅpc|].fąNa ><2EwB'E^ć;Y\ L@sTS| ~DE&s@R6YM @O.TDTQũ q9ɐ)S^h]n~8C4.U*\]ʔ%ȢZO: e6Ahr'n Ú{FFj/5߲~΅Kh< 0 yAaီFP~3a0M ۀى$/dx`4+`n{Bd焁" dO-A1kA x ǟ@/Sy7g?u\Y(-o.oE?*ł. }.`h40sk/7ijfo힂w(NG"FV&{\dOk0 C5yo ` Yɒ ⡺s&.|0:ӗ@N|AVmS|o6"Þv2Utz9O$A9Snp~gtfhſ~R?5D$ANaYdo`q:SXLHΈUx%Ac>Ӡ\k_C4c{r(E2Z4Ux ЎGx4p3j/F|^,r_ί92܄o!̣  =6َx{D8!3s:h`= `@< u{ZgtiBg2RuqQUzgOA7q30cmEwN I%,J_CCpbwŹ2cgE_Afg*/t3w^d:zgc$^mzʈ*Өr7 <L}nGqcfz Gp\y{y}*kdj4U5 _UT mHV!`3‡e[F/{]|ʄm`!>c>cxZ8Ogs,Rח>K S)S[X 㓷Q>7Ya*^|8L '59[QJg7ȟ6%ƿO bɰX=|or $1 ߦA)G|!.<ğ'&jXa>n ۡƤ=j5k"/*geӼb &?פ@0Bcx~Å<%7J?|cz|$ - :]v0y=Aw11̄+\~GP?P6xzDWW`ؒrClˋegvFUǘvOEiNuެtuiNoԙt]]u Vk:agendstream endobj 514 0 obj << /Filter /FlateDecode /Length 5741 >> stream x\sFr~翐Ke랰)-=Ud&g+g_6TEDV\ J3`$]z;=_ӳ _|eX.~`w_}s-(/_wي 3WFjux6/XuJVmvBawUz5Tv6"/;qgm}+|`֏J /?q6\*šn.2?.~WA2^0! 2VBj[4?//ЁqsiZw]JkHdC}-iDwPS0n׵yY١G/(pJf.xaP9l]m8LWnQQ$ALܰP)Az8¯P~QC2>~ڶ5Bfϩ ~maNyi K0#W𦲠)ר)6} ͙oq+㝂#VԸ)UXYճA~mv~_ͭ@sDR\P"?]3nn]~}ؾ%Ò8\.v]_x\26.4-FK<Uhr&x5rPXOlSNRf, W`&Gb0gp ;^#J {授%,YUodmh&s z]*V+B\ݡ [wdhz56wίN$ gCr> Qa R!xbH`' @qof vKER|fo"M\[^Ё[l4nbYM|myáqoc{q$"Xi;.E,w- Zps A0{zPJv:@@ (XGYGK:4d <$t !q5ԳF}l- ,_A#ˈ $_>heYG/%4lO|;3f]0`|qB+|EϑNOƶ t`<$ 1`T#K Yg&m *|(B("zk. 'l\*kH`Y,/ՁAKutZY*l'sab>CNgiŲ@کk5Ꝧ@ewt F6d;> 6pH`/ŔOpsΞ1ƀKwP`b>H P.8-e&gbHN-kKV18BÝFr##b)L5YdIc,cXfWyY!~zV h#Q|YጤM hǴ Ӳ=05{KAP%={XT'JM$m"|)"b?Nq!4y@-y`h$h7b1qz9B$ˇB錍:lMC"E"Z Rk)n igD67'r+Szpv(ʂ#DăT6s,6 Ҡ3w$:2x2ItfR"vd>Ty|AaK(,[ o홯+== Ah{cdSR2nrtbQf͚9^rVA4 PSȅ0ћ6U6?fJ1 teVDLa}w)H) y EzhOĀPJg&}7/#)ǪA)зa'{L-PCIBK~"#(]˸R6f+\fK2es8m+Jvfh4rR.R{Vg8sLrGeˠ'fTW m&zp\k5^0x7 ttNX2Hˏ0fȃ  K&I02Iv:!]rsjOmVLLHM`_ioI ]W;8,S T"NFA{ bIVH AH9Oq6cM%,#}2L.҇$ߦיDEtк&3}FQwn]ڜM"!&<ہa旹40vϏ"WE2 j=O?ȡZ9N7P { OkWך2{6P3"+R.2"贴V&9z 84 ;9+OPTt.}3n ֗:h .2iɭ|6fD5vH44>Zrr6!JHMK c?Xc8q?u\ ءņ? pTwv\( 6CiSWMD/#̚s%4w*_l"D._ 0#Q: &kh69[ E(Z^0ct:~`Qz kIV;Erwt1DZAAlb OI@N|%*>-rEbqGq```FeAP&rPBȏDsnSQPAV*ŻiMO`ѭ BjJpܵu3"KZ-Aܭg.*#6Zsrju߰8!%p}xOx'v+nQ0̤~`޼ݿ~  kI'F% QPbĩTMzWbaK73E.z/M0 5!JYwp:BHc5lO4I܁ȽxݨX aNS$`ZR Pxw&.>"|!=&nڟʮ8x2T"*&ZuWwa֋il>K,h񢋢T.0byFQc#%ߔOfa߷*''XsV :YC>$5/,s䡇hmMKպ,J4Ăieq+gnrsGaQcA7Ec? eN#;:#oܙ b؊ _1OR, Do1_bO6CО)Mw;A;z5iMpfI/p顠a8ޟunZhr8JEA#~*9U"B*VW8\=c>^rly˦ղsQ/e\*Z 8 EOg(WDJDXN7fʰivvPĀ59F;%}UHX„}4ͣX1\dIY&;DnB)X7\'ߣ$U0S"4i}ĎtH5x1];tc#w%8R⋅Le('#͈"9rݒhƙ V,:s ^Z(g{`5SM{IL$(?ZτwD+P{Wy> stream x[IFvs14F4&i iؒ[Ќ\a|`Weհ;Df{AFpɬRчf%cymWUV _/GVEUj%K*V7~*[UCYJV}NJ;s,-Di\knb@`4Dq#+No($x>TmrI8@mj۶ޥ v0yd*> lN)V=sQT l15osp*Lz,d)*>#脶R/ 㞗ۡ&,.N}sũoSL_|2 c0a|xvv;t~a zj*S dA7Rhz^ Fj9bQO1P\1JLc9ndspJ<._oD%Mo tb{L}s'߆`gP\ ;ϯ B' dy${fV3c/cX-&<ounJsL=u[8âV^Zt-TmŤMI!`=|Y܆g&y2uY+rȴ3`_T7,% 쐤7D/q53o'Y>QL r.=A ;%vR\zk#v͠G%q1l99hY1(zvd*]a 8@l,'j`랕A'A 1(о d6B:;)łQ)va091P[iauQ]&UZ)Zrg窴aB3}'8wNV#bU(Ty&MᏬ L"5JW9<ǺtwԻ.dd:O*PR10U.mZ"[`(p:LⓓRSRHx,%8WH>%+#L`N)kmCVpRR+Eި w/q?rYhաG΢3gL,UڌfD3ł,d)rt}Xz/AC Aƿ,>Ї]8an..@ƀ.`OrdDQ $s%γB(LBw˭Md1Ack\i"),UWߣbЙuEäQN4FiX~nf׼]\)i}9TG_jF_b3]ml5Bl#pEwW9`%0dCbRQt"U;Xߎ!v`2^F%:)ȏe4G܋D%h1il[<#EL:seG%˺p]*ݗ]qH7YOz%GŨj@=i ) Hs2Al81U>|*͐Ijn=6KIlGbs4 Y}'3'v ݢCh&3\ lClD*KqyrMO!XXC)(_}5܍\$LV}i<90Ҋg()vdoM5q Y$iCWPyߤ)0+s[L0G0ݬM h=4c?D&4`y6:\ǐ%=ݓF%>%J(Ϸ!wܡ jλ zƼ:nפye1܉x?t.ۑYФlT>u*z_+lp ʞ8i FH\$ gC@7b1wkeEgC}N2#|ƋEȽfㄏ!"[ 7q~* &gc'ُ_E$m;m{:5IR< z%s"@C"FBRM0b8{E3 ֆwQVi\V J'md=GpQgh>L&[>j&-%N⭤jMզ8ƩĎ !jIZ>Z1Am[řۣfX6Wyͮxu_ߖe kl9٪lj Tߛ lviJ(@VV%at A=㚒e+0t"޵T{yH&?e)J Aid3$x_ 0X4`̻PV߄fBQnMiĦߟR7xZGE[5͙/ՔmX2ɗ4@:nh˝( '+L[ྠ|Ҟ6s2NgvHA5kZmdY&/1 52^U>Y< ]|&qBz\ ZkDʨ s _hn[?Q&QaG<>+ɨ~{o)I;]l3VSy1o kh5Άfb@jtW: FG}z.y3|ČYLy/?J;IJ0Tm +5\WSƯU|!y4fZ`lza֔:/mKe_hmn®D3fv+)Y$#yVž-KܯE-bM1ɻ^gkƸSLV%c71_ 'g@@&BtyVHK҈^ lnw^WKQgz|1 n>@l3=-VhMՒ>&?*эVO^PA􄋴(Q[ڼB0Iѡx%vgAkTx:~ZNa @dȓ7JHgB)Ic 4ҦW d9QIz; U6^$PUt_kiqš# GPc[Ţ&{STr]#y%3=~Sr-w'D&R 4>0f`boH8x0jt Pe*8(t{d[́'[ 辚 ج88_m K"D_<j*#waO"]~;K"Cq$qrK/qffY1AϤ^̭" U3X>6H`nxJ˩L+OPx[1 +GPS7ĺyA(]a-oS޿/Pz쥔a#EQd;KAMJ:lXE$p߬|Sz 暻>iendstream endobj 516 0 obj << /Filter /FlateDecode /Length 5060 >> stream x\ˏ#Ij9FI;4H000hvCISU]i\vMusؿ}/^鲻Vˎ,,:W_h_to.us]V&t,._^gɂ2JJ0^\5_,n;qٵ`tO>.WSs.jv"߻'z M a8<˿ :jj˛F-/.pOKP fZ7/?.PpϾpu;vևH8#CJ/\% \؇V%j鈗'Z |gZ] a+a_ҌJsMێg:fS$O>[aaVa@$i2qoFyl7/K'vpjb~A/i^+Z){r'@1ME{)j7n5U@:F̩DSm{eD6%ڌeɞ~$R6tq/)_.55/O}7 &*p90Xu)mbޮ>uOd[  KBHMLU@buՀW@ 8A?o6W+ELvƝeyҪAٮ$į 'aFl`- .UBxR[ʉ>QgR,Y{0c0Q$TK$'Hu9M'DiTVV-hګge'o<a(zaÅJ sok-SQ`΅!R\5qmOT@{ v\Q^7 s&}sT 5uWA xaBx1`}0vI%)DU)uM}MkAɼ!FVʠ}ȴ7U=1! 㗵{844XҎ=,MW; tiq?xm9-P<T 6x ~yM& F ~6%4MҰfÿ FY-k³`ޯ鉒wÐ KBxTo[s)G&53s ͽ-C1DՃUf )K$1άLCNdE'ex([=k>;A6('Py9- %r \=R&gB Pw_3JaI.w9m;L$o׀fc-EqV H틄1ʖ _@GoLOWA>M*g f -;eؑ蒔\~ =˔h:0ҌAɹo R( N ud &.Ðim1S6⺯lv)fMwFGAmk2V`) +J a->3?q@GH"ZSjgS67g|nݰ[CXR­$e !ajM:TK.jޟz7Sx.h}w/)Q*qQH1r֫u# yr5 &&~5ƻU/Puj:5h 7AIEFFE>-Я19 k}jy PF\|+ UFL}DcMW!i/ bx p$ 09mX̄ P4gZ >e!#> ʆ4)mpsH-bhcY4Qɱlv.r{B% S} {Gg!gI˜2:uýXߛ~QٲJwOn`IS5Zwg :a@C)41pƒ.*pUX2P hM&5~2ȏ`Wi[lһ5HvIe/.5X}6Dh}`M|uOGgCEZ3 QjȝA3dy=m_5[34#}n^g~FZᰶٍԾo(2CˊSRǭje=\z xQ$?ck[MW0{f" Pﴑ(~%,:08il'e,OU>%8'UΗo~`+fJa66* F^mjIAҬ?z<aG?MpĎsnr>ZW/q؋/[FT/-X%|JO/ƕw/Pph5[yb{;!8=|sp:S*p'4vNKDϺZղ0*'dg՟`ϣ`^4ʿL%OtV جQ-Uw^-}\(r|A@(pW, ld P]˸<1_S'[%pLsg!E\ȶxM?sn86·3(Bc\OL9)ifZNyQaUC-' \}98}LV=:[Ӫ f be^b,6t!E762M̄}1yxm- aQC(W E0x; „:`haay{f~+ ss]\ﳿq܆R9ԱlAD<ʳm)4jc˭\rrH8kU'.W |Q~k,ۋr>I}#؁ | Wqp`}?/z]*K"8t ̃Q-&^묺WIu!ɿq2]KFBIe.foze͡ S>OG+yICŜFsag#R.oǡë!O_Ry :Ӥaud~~./8CtR^G2|2Ύ^,N$9 C~, 8ˑG1ߏXQTL( 2 m'T $.T*͑h5H,ZYڗX' y0ػNW;S- n8i2o>V)]H'tT"QyqA*_R8U%k:00(p :ǑS%ţE JΣ#~~p,.Jj@dyp~L{>`rmKR% ]͌s'v4ol@PT/2 ~Bar;4tP27͟b9. : lX^:hp|4Mai UTTߛp 1 /Zu3Jvo 7ѝuE2ۧu\.E/2pok7ʈt3}7b2:3}DeHmHB>뤭BHyĕeSQvzQendstream endobj 517 0 obj << /Filter /FlateDecode /Length 5076 >> stream x\Isu$]|M\O EXcF:@M6SnL _"Q$c<U[d|]uǫUU}p󕢷߇?B e<|uN]~ZTZ_79v]>^Xpcj]}3s|s0FU٧~sFR9vm[k>%O|V sByn ܦѭ;M {Zu}0{:vf[u kLu|wOG_a} mFMD |QOl`FH6qhym4 OhTi|k[}Li:= }:U ?)Ԉf)jmz}Ttm3zuLt{MAx#a: s_k)_%;&W_lIzAQlo -lY.>ߴx涩71Gc&AujHUM?_gZj>G |˨zatOA: YG*ZʛLJ>_ϒtgwaMmpdmT[Vz$.o<|m: hZ)qb2v=.yo}:m?>q5F %Jv[fO8f8Jh;}cjN[؎錚D2S( q,3B5Gߘ`!~@UuM~RË$CPQ5^Fºtā.aI}MhJ/pZ#'ZJSZLNi!?!a-mБ+@ 0nWB}LmmmaS9_3 Ng܎CV_`{kta%φOq] .hr07i@ giQشsdx$ \H/ Q ǥg!l9 h[KE xuaiEZ-vP>H jF`6_2J r( c q2-+qz(kVst `3w)aņ*!{``0<9[Yڂi]a|,Cl%- H$RtꥶGq?6n>Eg<<~:-5,ڼ4TcwfY|`ap/Tr^وv<U?}JeJJ]+U{45uրl) xevu%wAaNgp{{vYpvT$UHHg+f94}5,Ymm H?ShhF& Phq$&sM Lbza=\_-i7(g#ܫuq6eң5r]P&.|)}<Jǖ!e6jOUŘi>SCnvbx>mv VijݫusH$.NjC4b/ױOϡP腏 Tk K2,%9Vh߆ )2|)'i'wc7uΤgi@$o;,R pbyh)nҫeuI}岬V?cyNKn]Z~Ƚd)e=TxlBLkrT].%=q( _p 6nh?${M9 ؽ]iMK/e|۵%(.4Z}Ǯrd޶~! m-9\W"_&XmM"D~l#ʴE2þ^,hrfô񜍄:leA~22 s<0] G<+Ȃ̉b왰(AG( )mm녴 —4-bm?iO'7!_wԛNm6 :ײ5B:4`d @`PHe8wg7A$^|eX PShM/315{i )a9t !/D,>3r ۆ 9I0Ъ aWxU# y_q%OϱMhLCvLs:Mw{gÈG , Bߣ)NBWP;T K a Yet0ab> N*rsLHd> d-sTeMqr"{PSKAx?yYϊ ;,=u@m O~T<:a?AvD<0ԥy;eq<>8?\d]޽^S\,(>m/  3ZbgKdm [>TU\E}(=sBL1NJQy^cB`bqR؎ 2Q|[˃=u>;,d)0.d (m,Yl)f"B'ɵSaŽI`%蟇ejPNR :7eVw+Tp1;1 -Ue Ȕ}}Q+~{W9B6>Q[9lK XBOfq-yK>,|k pM (>yRͪI`fgC _dߗㄛh̬ CP_mUih5R`E#׋ \) [=%EOQ_W?'w@le-ل=m`VHA̗HC# +Ɖ|4kj"ymV6 sFči3TQw\^IJo!&).2ˋ0{g9bEJLĬ"8lɗPoO S^. Hqm_Dv[#Fh,z߳D̲x29fU`g̘&x" qo眉5@ Z?OT5AQ8V^6؁߀%Y ^iWo2cTB#eм ҩ7$bċXr5DO8'^N?[0?'\pC $;g)N1"5tC?`p-dȐ SohQ$zמ Cvn_!JE/dU8 "uCQHqv?hh)!Ckqq[K4J%v=~H)!|[p;q\)N*jt\PW1,5Dn_`V=Um8K<[D#^c }5&0XχBVRzhh48`-闽4y9MMcn u2%Cv QsOkvnd\[!Z|$40"%2S<KUvyWZ3F&ǽnw~4i{w.E{l\K, ^:eWƌ6Չ^{}30%`Ƌ,e1pf9ɬdF1f͌e0sq/3x0nI WGd âk']8WG昒r J0U0lS%CƑ$H=hE$+-8,pF'h(+| %^>iVK;-}T΁ H8c+p[/x'1wBł6[bC 6fmyJTɢȀdp /T5c=o4&ñ`^XD\, ;~nV\DqO9]h]u R [ޠ-M…|9 'bO~V 19\r+R+H:C،./EvIVkB;Ykϲ7 j֞ Xe8g^*SmL$M1T*n֔ \(oI> (͜a"&!R5[YOMR‹2 %8 ¾;ST5) 8t47 dzA8`. vF(eǷRP~%4wv kAo(k^h[. YWxXc #|GHREew\r:M4\Lq v&:Io;X\O-la&NX" gHgPK&9#D[_"upD%Xr3+>opO>˝>+}DvaϪ- o&-L3"8:X,l~8c8-UbfMQKۘ(aE"|$ Uw^39ٚPOT$(Yl~.l0X c/8>'ڔR b6é7MK] v.ym9OOMT "&Yދ1ngo|FRzrn$&8$K eU;U IX/+&CW<}O)))̬RJϝJҳwcW:bm M${*9 az~ 15uMe<ޜSe]>yj#K^%7r+~eJlPUS1vrL Sp/U,sY2ȡ2 ' PZ0PCIfiMRzդ#u4 zE7:|g?Oڛ`V=Fkgk?1:htUZqE+7ۥ 巼`m0ؗ)lIYIY֕gG)kOġ X"B|&<8{ȩCK/2h"%I:8P`+-2"خ,fC%n8S_NA|ZZ!M^D8GS8{u;$KIPF L.8sdyhHdTo޲ /0#({ i%MT硍TB˝8!!Eo? +舎Y)` ].o*8y+\*,y8'#Eb5zЋ$ ?)<mNob YN SY32ϕP]ʯٻ`^JOn8GZJI75Pˡ Z̊wLBS{Nk]I$ qmpAW kFhZAgRFO_ ]xib.Uݬf Z?T&Mq,yn&?-usYO쓏y&>ߕ QƆE-`:}\QuO1ȌL;tJ_o8~RW0 P%0u^'{70!}eOqێr!?TL/g78~sCrT1[P+8Cmdx}8=qڗgn|6"Z!6@QD(XWpj9()?i kLUWeREF_XVΓͣc/f&'ʝ;NEBŖ-K/%N.]"d>_׊-WkÿLl  Cٿi{8ym `a36v|9]kQ^?%{ x:$*=t cFFU8=%YwX[ܓ]huAtO N޺mCH0N0LIOlD[NN%:)kbO%pO`Y,fɛb.4\$WZYq,~MC54tiUGoo\K$d`ӈPȵ/mع@<[H`vq_Y^ \[c7=}Ը/0'*憍<n=wPrr6[冚o]jJIn8D%b)mߴik9s1Y\>O_CkDR!au)rT21KmMѼF!񑰝 ǹlua=@[Q37)B-߰mE!K٧ ]ͪ`WowdFBױ :4VV[T|.̣\GQY\im*[-sD//LLoZkW Ex-J h)q"֣YiUYX~9<$?z-{-gVPIL0F+La_hK,"ܕ{-<_M͟>Ϲ/5mNT&\7 uA !U:Hb3 ?%C+\  ʒSTuܱZ^s xEz!u88XFe[mȈîāSa# CO_Sygj+Z2?#sQn5XS}yʭ=kY.e<||q:J)uk7omΔfJ.cU v_ɘ6|Ecߘ IA* VpB%ocnY;h]q܃WbHg@'=Qj[E @Q׉fa1#RKטV95.q5;KZA1R4g"rbL(Lv* 7!>m/<û[-'J86 D 2{L<}.'J*4 /jJ]B4p[! +/(|h[Vo(甓K$w{5 'ZnK<5t RcC`KOZ()X̸B8`Q^e~.:}jOZ6>l| %wr%Z^_yY&f fBײ> stream xV T V2Q3j] ZW-mD",! ` ;?#KX"RPj(nbںޱwg;}29s̽}H$s[B"pCocw"~ٽg)V`+[h@bh\ezcȆ`yp.[N-SFN-S /Cd `?yL$\-[>olOV~>~n ȍQh?@`<|ƍyL%O;A$< W‹G'Ke0"` k†%)Ao 1'FdDBm1Bc-ngVKI'M_QcTzu(/8 h̃X3O7.{?6L.I& 5gnT"$YgyqI%l(_ 5 ~-"n{|ĉun,V\a_ӌ5#D=ɼ w)ɠC;? a;l13XJxˀE- j[/"µ+ZgxpڽD,En$ue >=z,񨿀W*l3X53 #b"~JԖT2x+,Y@$U.%i0ЪHꞛHV컋, ӕ[Y]j6b)ŐJIdky>@a|gkz-n:`ʥxX@DzrgQeHJ8<@r6t'k0 ̬ _1s2\4#X):{!z#|yPBY"oHw#`$yD-$[Zo[XK7^NBt Rs8Ďv%r3%c; 'sa"zwX #Ыڃk̷MϿEnaþ|-܃\ <۝}(\]m*-+6 U] }HtNN..SLܼB):io45VG7C ]3az*֯NEיx,*Joz~`m&AxBO2 5-Yi2>S(HV:]k&} b3 3K }yڨ.g1vQXUwPWolS6uÄ^)dUZ'E.wi_lЦg%kٴ嬱@-uiE ];:A])S2THEƚX|gNsb<ӏ!)YllR&Wdb~z? fl1L :s̗ȘZ|4շG]nO[,NӔl.MGjζ_v;k)NA!U $&;dEWݐqv&>^*š%EIԮzS~<\|8,=eo/9vn7O+NOoANJ|5T숧#NhB&gh` ":lYcıEJUE3;IQ9li c}c֒JaQcuqg Na ;KPBLZ=@Mh~@eI֬Yb,S⅀mhĻ|H+> Rdud02 3?C2Z]$m4BE!ZA /4azfc5"cGxt&3*<K)^5sEwZg|taS77%OaR{̨c';.VpnssZŅbm>-xE̻1 k2uz(GF/kd6ё 4o ߢ_3K;/ kOh#hcS$nڒ:[8Px G *ONUJh=2y ur9[Y~| dUjB̍ݱҧ۵Ko7$eClf&$E+ɂ}ɮ燢4}S8ĕɆIjjH,ޮn`G{Ǧ [!V*}7=a1,iR$}Vbg] VnegDrLW@-T?Ln7 xP} eX> stream x]r0D{?$#a8d2I~đ00qYՂ:_/۶qMn}R5ͅ07_/Jd> stream xxgx[e$ p $@BBB({q7ٲ,ɲե#=]{$N!0i2L2X=+'2\w}{~y#x|>+W&K3JsR̞&-8'0c܃|n!%.`SSܺщ I|Rl"]8}~q~0+#Ssg$v+^,9eO~IўԄeVJX_B.f%LK؝.mSYIkgWs 2wIS22ss rȕݩ{ Y9YYL4(+#7Y(0@m^\ERFQt鲒d"E*=)#km81G/~8 xo[[[[{7I&ޫͼ-x[yOysxKxx-=cxcyx!|w||?{7wo1~u&O#q>=@~z{Wsrl8޸/qM܇&dCp]8`TS$ xmgLD_ `4YZ'}o]ԜOu(ܕꇷa*{qGZ "]x{Xb\ah_;|i՗ QQohCO Á{+G]¯\zi֌lZ{jW]pu EVNWiRVcV2xj}Űj .Lt@B^Z(L܏40@! ݇C3x<^(z}kv:PK?s8AijjwxLJ(R˶ztrZD>fމn]_܎9.p}&(jKDU@h>a{蓗lCg#.*VjK]sID6X!Od}WU!ɵt+cw}48'6hRRtJY<<Okww3;p>_o^kkjr@ lǝ"2ECu85._ ԛ ~~z;<@?3痡Oo++Ӻi-rZ(}C,۝hM9CG#^OK+䚊@B ǑFGNN*E8L@p ]VWu.:p3dRE\4eo'qi{PO_}3 1t%Hz疀3܍Tg1,tvBbQNH!wceM%P4L꾓IՙjzPX9t|.tL FBP+RGث-~nLN$l 7Kx89~Sr aR@뽶Ԩ>Ǭ(Y ځU^_ M=w`*9{u_+ [{ IyXQ\}kogUs3]Q%e$+-o;JBA VJKl>~Dqͱa9E:oE W]6zjVWmym R򒲲RE ZSTwl[kA,Ə]z󟃱\Ӽ㍠`K$t]zh`il ̓R9edFcOmin8"G{k EgsJ*yTY gLgwL6/A!S2v=X^:yQ5$a7jmj(u yU;&5 ߥ;Q@z& tF2ր*kQY٘a WڤėОƛ2ԩMQϊ@-~Mì#P @"A4ťE/D# W5JN}m;(W8|WІݬow0=Axx{tw \(cEx"Fpj uօ:p(vƖH/=dozB&EhjCVkY“f+˪c19ID,u Pln7ۅG1+Hh~IBþdk$G66,#4 P1ߣ;y9fX4Uos6l`#"XNl,<.- XnYbWH&YNpኖByä5eUE\inI4D'㷅>UH kC9=t5& bP{MG.BȢԻ4(,*(NѹaEmLFЉ5>]A9Ds날@8/hiDvڄ fJ:!O;v[l [^|3;hQ8 />t;OK``,ul_0Ar_w[lC r((MJ]"-'OotU %ht548} tH/R9S ߔkȣVLʤbˈ! 8AK5L9;.f-K K COnl2i4I kZ,F>eg*;dwn}jgvL)`z~A1d=N'h0~8|M'C 2ӫ|k FbO<~ERAS9Zk/mOⅫLJC Sj. GBi\o 6{rڿf} #wF#zύD69&Z2iQ:깊+ZRd4g:de:# *k:/>:[<@¡pͪD"f%/.tLRZ6g$:s~ ='T,4F5]Qo i6W6'6vQPU7fѦ3vlw{;lnLgSPY&PhS?w 3H"Bqxn,!>IzTm]u0wކf P||:c8{%r Bn͝=mǓ˼֎hW_qUXjT<ў}EXJ_| ZyS(~B7ѣ4g /x?P¾AoXyߎGĢY{WОn 5o ˨BmT"rZ+4x ?ŀ 8tHM nw;½h=BЃPX 0]И/H3Bi!zB#}v kKʱK L I +f`aCRz~H.~wX|lo*5eϥ-LL۲ lYWg%f^?8Y> 2%J Sj b4jpo: +ůgQ<?xw[9ǢYUp4 Fy^¶yYRKRڒ}5i[? Vwip,VN4y!^eܝTUTKhZ4\{ @{M.J«ـ+oWZ5vh6@.)Vhj3J2 i\`.~^g #sTxvFum> stream x]QLwki{H'kkaʼl,a80 HPJ W "-+4R"XXi)4#3@e3٘M]BPL\}~Xq\s Q&++u`4|0@qRp,w^XVwLR*}7UHSr=`(uʎ?O APGѵTYEPIѕTAEUWQ ^?0ĤW_ NRLmXZmlƧy4_|ʷa@|O^ylPCf-n()dFxݵ7mĿ_vEQRl:am4 іG#c)@Npw%+>k;uᓁ'Gȡ_{z?=cr5V>?_ H>ʺi)?Rc'ћr .qG9ljݥsXE^z R2 Wbٲhh-v8A4~rVL~ :n-Q00B5qB2p!20g%>;R 抆V 4(zߖV#D.h08S2rnTFSpHwV#er$Lxɳz|~8y k6 m.jΆ9m@4IMC@o_^Ӯ.S*J!2/fpt1p1ZJ) IvD$tý+/ kŵ5w1K0R93U#dȀ"ss[9mte[Qh"{> stream xX XS׶>!sDjOVZ[֪T"(N( sgB a (VQZ^C_[uap> >{Z(k+J$_+:?jlDMWaHl%<-ab`g}z6ڣQbhsH{tLj\XHh˼sϞM^c9.E'7\ܢ\B..A.>WzmtYs9~)<*=&6.>!1)?%`]jݯzyG#/ymYOϙGQS '5z6RޔLR+-;5JB͡VRQԫj>A-QרIdFQT$5)I`)^/z`b/qS֑lM11i\q?`PpMd'آc[h/lK'LAanS~jo:// G s O]aa}#Q9vU` INg Z;GnXNNӟ`'T* El W'3 *7'4 qM}0)| ́]2͗OS+ܭ2\MPrNJ{N@G!5~ʷnpJ$4CwB "gc{̺ xc7d؇=(`x=ng ovhzHk>ш:/[θJgGB~J1#r.v˧W@ȣ㯿[Z/ejˁʤ1+WRq"x7xh1oh7hm49ryHȫ~i$ "a0'RAjVKX֨ B',A "u;7*w4;! DO"nf F9U{udSMa)4xjJ h٤SSɐS>NWz^@ݡ *'FXַB4è(U~"8 癈 ΍o ^TdyYNT6 Gӷ!۠0 %"Nj#6gN>Bhhɵ$~Bgҳ޴JĿf +k44VU7tᄱޠpG,ȰE_\ ]_9|tKW"wnֻGA^[Ḧ́ M-v5׍//hEbADh94"?oN/}kTHβ>H#xn@>-:ģlR'TfhVM}PBrVa_7[n:U%@o)zm'b,47v3 =CfRFMvClz{hL<.C;ϛ ηOFMMA5NS[Zvda# taɮr5_ċ]h5;dM&# eV^ փSj`b) ͽovd}P!WC +52pnh *xA,<7E8S1ýڦmO5T&ĥu[g9FXRڰ%3 B57HXx)@<.aX_`.:[]OeIV'74Yyx> gD֊?r k>0q"@/Ujv]u/J rSq%' ϩbHzQ^'4mH8О$Ԥ9ma48(Zii~ 3 2p.BvNZdH/58̵9_k4Y`5}ϑu)G\VxL=E;FD* ,qE?Y_f(hMJObX.?I >#S}HHmQ:HRcPYh i)( xqL ኏:] Bv+`[le$)SA `M 8KOfB!8W*E7‰Gb, 'BNNueElUn 6O 1-+7OFeVȎ2WC*(N, 0y-W}2Su 03AklPXu;JZfld6@ì ǃWak|#A_H_lTK6n^t|I4 N/')s)kW弣487t_~YI-F^Ü =Bk%A]>datux2T"`ԵB 0w x{Z1SyD9|?Qң ^<Ղ%cqjoB%I:mGFGFǷ[\K/DD7_i2J$D&IZT݌fLH@M(M)nYJ !nz , ZcDlPU1#EyC <I)Ԑ!^ra)߸wPҟ;vASKLe $,Ab=ltKX7 2UYrJoU#Xֆ\ULi~ǡ iNZ&mp43!GE%O)avLrћzI;W5.ug^Ge\: PD"J|1?(>佸_q,^|G+~` 7_'GT.2qY=GIS_[l:#e(veT6ҹ'DHLr/IEN%LZ0Ʉ'S-K8%釴dËbt+Pc)ݍ^2tz(d YϻcGK`se9"w#7 YmFbOd?}2esT:Uq O|3x'UϨt9%ew"ic]֢[̤_nxk'*4j i5m<YI?[w8/.ȓCYؒDž6x!w>D 订!*"A۾;4%&ƒb\^v !`0]Gϱ,#6eA4S\eݵ`vܥ8 G^SΫ2reU*° NKУǏxQcOV>^㙙\E 3WI)z3g~i-ږC-I|dWw40Z5mmex-ު 2$8 dlbEJxۻ9[kovntVG EZmvvMzb}AOte7$e9endstream endobj 524 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1191 >> stream xu{lSeY<ڀ='nA.SDe`29vi+뺮t=u]붞0 *IH$`"JBL=D}%y1imeecF\`gCI6]gWd_8ܻ @& %>Ǥ8^9 m&#v͚u⹑Q RM`6Sϩ²BcE,6S+ T]CF2*>*>YA^U^?ب5K-b&l54I{xCsQErM-;9|#}! >[O#ċAORyT_ʱ@ź-F?XTety+` t 6G .{Q-_^X9h;4F+H/A9KpLib D+⧁e@CA4q8{Q"ZQ f/=o2`Юi-X@~:#(l~H&Ǔi?;}G{ڠ0) p@|=8nIttm+ٝGO>Ima"@9 j'E1'?tlC>I{xֶh|9ӇΞeR}n5LTno6wA^jwq>.ʆ 91 [k :}Ylz/`1kk{GsL -\@ {4=> stream x}Uip^Y,` M+jM]q$B`j*_T_į,+զ⒆Jp󱟱bŞeӦYl"HdCbg?WRO!XJ\V",+)^ܼM+<3/i(x)ɩɼFf'(.W՜*>;ZV6ooV l+kr~ Ȋ@RXV~bێeD6";]H.IA D"ˑXz +xdy\+{3>*,>CEW|.B6bxKSaf 1}Z*EZtvk<ꕺzRs RU\Zl;m8I5 蕁"[VZ{DKR+PkC8ʨ;w<н-1q*/=3:5`1BD(eݣ5[84r6b"(Aa3(HIn#:PN'4:W CU<4MYB >Q=)#u2)?zp 9No9AE Fk9JaٺR W8h4BJ*)4s׽qa-\II}8y d.E[ϣ71_l9#ОFB+1RV$=Q띀6Nk w@'X/W+iz4|C)Dj .X| wꮌdʢrAa1hbSc< V[76Z)<1$laHzDB{TCv ǡi4bۓxz Hǣw.#ZWB#ZݫX&{54, 3;(6g_z滔M<ћljb6.jho=Ƃs ʤVj% *n[#"] O&|_PS[˔Ux@־J hFsϔ\9p^H҇6;j|MP/Ƨg8&>FS4uL>11qГnY`in4-7% - 5kŃT*qrطofjNȫ 6w?A$iIPU4vb M*Bרt\g[1ѺJhqjiEjz 0&'ōJr L}PSd;l n շ`X ji$:L_qHcp[p.t]m홼4:84vva517}sG3sgeaYI@j%fw:)7{e~oflʚ\k,.hAK* 9)-1R05 zw[0uPlbS&㥍55nRdY I^eܟ}L-;Z"NȺ̎UVpK^G7\> z9:1/ 39tc7߹}&!RIUmf2U 덬'FXHΤj 6T)©"Hveަ%}>KZJȸ~u9e"]_U);⓱]N=Zz{4PMH\k4nC1&&F,.`1y / gMendstream endobj 526 0 obj << /Filter /FlateDecode /Length 251 >> stream x]n0 y$K{aմB0(}mN_T霖MWeyIS(%) zZc[Ȫ:QSλ_ C#7u{KHWT1~g0M^f? Oj-Zֆ ڰv^#k#z>m-GYd/.nU mc@:Ra+Ŏun!̯x|򕛎R0m!. H^3wiB=}gendstream endobj 527 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1815 >> stream xTkp޵SgW$ 敁@ij\l5(ȶ,˶llYZY%2CvxP'ے@u %C-2S~]+J'v3wg{}$)IYI75הE ȪҺDl-I^ad Cx1z~&z4h:,InUjЉ\mWUK/\d|~].)o0WVZWQK=r%U~S.RJeREDU),&)TPIdCM/u~e[Z[[jk:&YL!)dzY]2/FɕrY!S:yT$JY^RRu5AdϪg-X\mR.q#v2N tKb4!$ ğf2;BIa3:hȌ|2t 'n^{ +DxeJvl0P&c`8ãB^Eqq nP/X?gP Gy~n^VODl;vo ]v;,b~;u 8oA'H%ckKZ:~FmB wF>&)DCARV*Շ 39+nnZ9D.!`ֱS9l8n ަwR0>.DiwPa*\==n/lm4.JвO[(β^ؙ/a.A~lGG&Zң׈'VW&-="U M}}zurɑ7d6qodY;mYS - PvxȖZ z<ÿK0\sB ۄƀQoj%v]9hD(2ițD~7mqHN߄vmYd2ۙ /})OHkdYdžMz- vKBTDy&D{紀L3%[D%S ZRMԉrP=^&¯8+Kx:ZƠO4ep9j `.Å#>mz!v7HsQ@gvB_Jǣ"kJ\uxbnu'dz'ۤ-¯4ƷH>JcwÉ(HPRq:kh o {n<(Laa(NEh=4/\]ᰇFzH4|sne+UG.vhP_^w6ek^Ƭ/pwNof"∜O?Cѥȑ gᆮp7kN2 o n~['׫Jɼ8$/~jԙ' #.a![>,n~-^:Sh{|W!8ݾnn,OH49VGPS?Akmjc=CUꧽ;zyݬ禎Vs1D}v\U7޸Rw2xkītb8 63V,ɔ($am`6zBKs[&o'; D=SaQ4-N > d]^p\nwg0tVe> stream xWipSW,[{J7RV,,V-IO_h}5m:meJyqaZn7,Nː)´dei%i̼촬lM[ec֦[R.ݟW[^X./+ x9;V\NRk ۧ%Kf^-ƛ{[[[)v'" yw C$&}S6NJM'|'/{rړ֔7@C+<}3ʟWV\-]kc9/AHz!8I2{ky(`S$d PV Ou ^hYUVT>X?r;0(KXZeF.y zʒ!5J[ -EIXr ,\mFiIy&aBGNp"]2yʊ,PJC(ԭn5܉s!= imtN_"PW~yVB|O7kXVJ4V `67u`SmE¡Eq S8NC<YC"F0Ŀ8 .ʯxk«AU؊sf`Zys18t'TUcX89bomDPP(KKu-Z#vIc_MT &Sd Fԙ<{b( \*Aoe/qK\>j8`܃VD**iɥPG3 =.@8-HB$O&MB /P)S>4Bh"C?];Z9|*, 9. -؟NRR ]\A1aoXIU u@)JRug;s` 5VQC5V.,ɔ(j%{b?)DJj {lcu(sH36/{tS{K'R68U @ >=gZ@=:Ojk54} nyxo-z}OSR!}ˍtsxDnJtFM쩎>1mwĞm!rbu5y-Ma\:>J7֯&iٔþ`PѲex_xM4o}<3 B_F9 A\krPO0xڋG>ǤZAH˥*9`UP t˒m=3~QB?cIJxqzRZ?49MGOukvh.52u~hmABuIk+j 54Z-#?'r>g^\'_aBЫxg둸C}c8x((z/'=O?P5{NuY46#?IE + 7}z/O'zXf,$"(9i`ްm,(OgmGCt%k Y zݶ]e j";f./yT2hY~,`"* 25!L;#h*޹:6o;xS6Zi,jԒ 2:MsZ}4SlDަus~;h#} Tw~}xj'+ACmP|ϧPj;]dC11w&E͆f}ydtABàf{69HA)4΄r_ʑ>kω>CZX"j+Hw9zO÷ \c'$eD'(GglpىGDNGc>c>{;J#}hp?LSqJĬT N>r{w)WC3ѡ"ݞu-g{zu%$ЫUt6{@aj6 *e%Ȇͬ.5B138m1S_vkj3Pl6RyhH6)S8JVK1NIϫ@'cEǤYr`.ilSZ/xiF@P?;j5h;t 9лb(^ˀMԚa܄:5L6B v7>%u_Ȥ䣙(ݏw #tu~lƯh41d@蕬UaGw$i!8js[T E(];bnf9"dW[ڍ]vCރu i"'_nc@f˺\~hR.-(&Gdŭ{D_)cwQoU"|m~qoEP*9) si(A/c}vGؤiYJ1!ƢVZЀy TPe OܟF?O Ӣ6;QKN{y> stream x]O1 y? @.KdHU1!C_ MUu8Kgل-zu:귨O0[(ڪaEDQ+`v~ ڢIy k t3iDg@L0FȔLϙ`Lu)Xm1K g7Ķ\endstream endobj 530 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 571 >> stream xmOaǟJ4<,a^ʶlՆZ rv}1|L"B(I,\uCc^:zoֺ>/{m6^}1j!ѱ1N!^ӱ SY X)b ʷ gihG'BYt#i@86׈'NMhOi{! N~<>".7?}uR.*x#h p9AOz>jBz jxQf~釶#uhTA=<0$c ذ( T-mGaptT!-=]Ժ&൹Ǫ=OZ`C2KH7~}a5Ys뙵ZGUH:9sF|diτh~n>[vrey+I۸E0_Ԧ`2hMLdIjU;,g/ \ʳtDǞOuCm{! j*=}zsѱb[ٛI`ZwsR(d1/f7'b$%9oyendstream endobj 531 0 obj << /Filter /FlateDecode /Length 4645 >> stream x\I#G6A7**L!,vp`p@nȣn%o{Tei1ђ{Rլk٬˛ٲY7{~7_cslvY-q[(:f7gܶ͟xz޵u^_Bȶ32l31lhhͯˎjTeV۸r?M}6?LWgykM^4org7Rf9OtN\قC - >vj`ذ2 -wi6G> .Zd:ksA9~ɇ7$9yGǛk|\̺ *oP/nLŪv@e\h6ŮKϻzawmarVi7c47czCMz ۛ q*xv7ۻ%~3mjd e~Xwci!U;ZOUҪYn'dŨa.o4Z\&;ɒ:_+L6pb&Hqu{)Uеr]v#f9A\C&xUW+i]^-,vXȯ&2xExyvVw,X;׸UX]f]ijli~gP\"8&]y (dK1l N̒p{6yi;!%C"%<]F`*\WO6יƙҌ٦20%HdXU~q-]XAoG Mo]\MKu=-J XK{>%Q / Z(Qީ I086ka>b{6 1u/H*֠S $Cqp G2h#@+[o栧yޯn77zWl2/ rϗW @/ \o/֫~vus^ FΔ &d;a(čBW鑏pBvD @^JKq=i~8/i ;hek@5[pJ{,Pi2AJ>fR z>8Ӿ`SZ@Ӧ( ˑiThG~\;D8jgOҍ{UOy$M/҈G^AGD\dīY?';G.7 BIG:X LQN)|0l4c@4zx!#N3#bp&那$% 'WKEfU V3{gI49Ahw#!ܫdv,ʨ^*)gh=5Z5TFHv5(hQkaL^u'ஶAخ;a]+1B=e;VZK*K23]ⓠD BmEZ>jhcZI\Aqip"ZF)L\l|0#5뒢ȦD+)_@:n1Ҍ#'N(UG:3Q%A9Dmp1b$q}{NN?< '@D/<H;d-HckH1d <愦"2A֟ό(#Bll=KV~\ݏq0EV3`z:GIdOU 2X6 1r,h0,g)E49FTM#.H6˃rBzƵ T6`մ+NyCJ9ϯ| ! due=ҢNTDUCE{T=/ỰސgYMd`_(r|y7YpN$n?h#-=JT]’2גT a-o,dcI7=ؓutũЕ3rd`ySL`@~뾪1r վESLJ R24|uX{XuQgJ.˟!d%>E3f"A6ڂ8;g=W"pا]AOq)@ruKAi z4 aQߢ@i/+P&՛x:uio{u鹖e*˦eЍnȹ-TDzƵ|(acJ ͢TC&R&8No}Uj r*Vsy:vC (YYGs7u["9Qʥh '4' ~ 8H Oʜ(4??ec(aL%*Ҿ) ҽKZ2 &C4|"#6D."zLQM ɫ֊YfNUCɿ3awNx@C0 W+RM~H0YK-#T[RgN{slB*LviRF[Iſ1ld/W%'],]!T7{xu\NB5&mvP[]nE=`3%lok}"TOoƔ GoU 8_urW\9L[ᮦx5Szl}(9h`97J^ K][.";b2e|?|Rެt>ccRr-{HD)PϏ|oj2+Db|a`)V7֟1ifc.sEL"f?VӚPB'LmGiNO)6_ ]Xgͭ'M'(( ۡL-.ީ^Y20ݔn*t6}LkJsYdԁ=eZXe# n5&qdL5\[ȏY,10RܷL<.!Ek-lA޼o}|e-ޝ/(1yA\=>)!]LpZ"Vk[*'DYf '=@uƁTC;٬|Se?p淫yvF>򆃜|5 DBPW:q创-LЋ;j yEdrDFiγᲺJjss\_X|i$  7&}UZDM+ed̕/{f ou'US/88=Kn+ײ>x-#x>l wO:ٓ O>'ĩTySoị/C#CYyfޖ>`Vh?KY7ݙe20ܓuH<aA.#-DkUyATFDGO2&YM2{z&D|n. h.7W+r7k y]|I_vpl熳;q==Y4C?5u8GW.)"yWu;$m -][6ȇU][4|H>d/#q>Te8]ݨʞA9g׏#$uVCYa6kFb{S8kT/mW9?By'UP&cb 6R=B \ E-.?f9'sX~{:QgAp8Yendstream endobj 532 0 obj << /Filter /FlateDecode /Length 5624 >> stream x\[GvOAm30{~M lw`1riD#J$%K9TUwUu5hmlAԩsΥ͂|_8?\՛+N./qoz<_\ >x]Hɻ}܇#Zj=wKd|Ѱ|E.5ӋcPL-,shnޥ:&V0|. K6[-Ӷ"9陋Vܯ̌Kc!ۂu8~wh( ȴqjFhA)osXOᘲ[,ϐe@YtTI7r@ 6.W ~lՠzf{Yx2@ 8 v\IMJ6[2ނ LL2Y5I1-R@H5eERYNi{mu_4gSS%>)sJ4dV; -Px>9 J~*E>n~ {BI89*0>Gm+SC"4 ?<(۱8y;>I҅HoMoF-bN_1JfbKir!&Ym(\HHg ̺AsBF}kL&ufNlٴVl:OYm7DmUbA n ^Rw4gV~G[A(B#KA]=}? hEP5&dnĹ5xVׁx( i@/Z͂P`:f>ڜ{ VU@*u r>iR;cKG1Aޑ4@[ytGLE<>W/<6yehEggՃҐсJ) !%f2yQ7f5k N9Mwɠ2;0B4nPY:5 GfU.Hq1F30Upyr"͏UL.1L.t )DZ@z8W99l9:7,ayl iYo< EGi%Ke!0Db>ɘ E%A,p@:pEtc<-$Fϋ?_@GwmC4%!ѷ8, J+[!wDV&MLLd2e;AL Ⱥ4NcQd@ v{}bIt81zdS.}֩0J!pu Vu{D?EXG#Z&2Iw`Wa+f\BM}UM\X8,DV:r ]Ae]Xq?k̬8o3{&"5?r]ׂ|شC f>ǧs7(F];F"qhEdЙ: m#vT-(Krȑ*\p{uP2P_gKj4=p 1Kձ'*+sZA4yD\->kc]Xi)[d2'rwLrfHZ3J;#*89O;\Fr@@3." U>7G'ىpLhUVJ*QqtʂE0sΜB`ELmX2&y).#Svzvn}|e! PU2}jslO^*Yqqn"t lP_ 3M414Dv5[#e鯼Ն3q@;7s}!?lQG ;&~Tu5m!G%xa졁`ʄPtCt`mJe: ##NO?K٩`^\`%DH_4cmY&-A0`BBߢ z}+ . ]P~HC}f? +x'}fLgS *Co* rPRqϐ+d7 Uns Kh} :{sM/8 Wji-ߣ{CVwm)*n(RF{j{ZQ82gﱜ6 lf* ʅ#~INH*l\˼Ą; 0&7準L/pNUQ;vB_Iol= qg Azn @)ʸ.ؠfc,ųwߠF 3W=멫;! Xo.A*W4{Z1?4'뭖|SǦDY[9 y{AI"G]F-"^ցRF@![2&%TIlgbN79f^p׻=1チӹ<-Goa<։5 pKcWDŽ<;)M6cSMw 1ǩ[lvp(nܯL7|q 5ΩU:B <b-=3SxSyg#N}&,GbKn;fm|^qjgBļQ;> !yB@#J9Ɖ:V9_Q@Kؠ`)I" >p<,UQj>3'+JFt۝5vh>)4so?S*vh% I ؓyz.+PyhI9Qzbq,80}Nk42C #{sXj\<ڏbTK<,K,н3eMSJ.nnSe]Ys[-E[cW"]qЗU%+n[~FtyUKZq\dB"do0RĬgI!DN]^]b uMO\ 兰fq,?Sk_`N(݃t??mqAu$ڤc#Eumnr̦J:tP6!HVm)/׏vUs 3g*LxY8UOS*kU%!Mɝ x3Kl{b?.L걔J 響ҷ}Ղң۱+<ܶ L|揊|\0(Kvou2!~ZbqI`C~v6eѣ%16 KBuFF*"fҒ]T˵ TICxѱH#R{mR4ס/m2zR^ Y@!=AY\  )q1m),~s?XӌzJ4iկ3Oa*8r02pKpߐ*"HN_Ypendstream endobj 533 0 obj << /Filter /FlateDecode /Length 5566 >> stream x<ێ#ubL`Ay`f"v~Y[EX  gՎiKvGsX͋fDzXNwsUyz>c~ss|a?pz;ofq#<=|v&|gնRn77 ri*]9뙖3m>wJwz?h}5_H){x]6k|!zUvgM6?wu}\auc76T6nbӽq̧M_Ӧ`f3L fS,x/ mwaaK>yPk!<6Խ3&@u?-t:#P/r8#-qUﶛ;D+a` Bf!;Kz<A[{~{<~L:Nu-\?9/DDMO &}wmoS"րFs=B@a;m,cE-8H*`.{)'O\YHL!mD*X&m\ib$a-`B^z`UFvAҔLGE`^A?%ʯw3Jh!z#W4rFVdzQY}T#:йR:rډ[ rJ|~}+U_@>O93@11aT'l8^IZ>% ˀ38qHՒWҗ R.z[WXd)u,=m|!,&^UNo3pX9eAp|.\g+|iVȻjPrN 喗v @0#o3i)AVӨ>]H $J-/_<ֲANT 3B1f2S}|/@%)H?1LUk;vcHpJ^m [@i)zҥ*},z]EА[sҐ!3.FTtִ늖E5po32 Y-di* `uܠbv>rg}]85|X皱CZ=6_rS9 d=h\9VQr7DWp+nM&lQK*Q(q(R3%$f*|Z,IP-Qy6I }G 7  jW7[Kv9iJXp^.K3_]:l[vukG<ۧDp7\lj_hF[@o; F$-w$A8%Loj2ܬ-F3rqp̅w J;v`4E"߳^/X$pyܩ<6Njs@,~:!{L{;o^nC LSndYHc\P $|C1>3Z;[HL)h$zs~%zx!ъ.2_"l>F'25СaAj|"h+vcOw,]ulҲ aȺ mܥks:`'y8M*ݭ2闼Уl7`z)FJ{wItv=7b#Z7Q[.t$UHX~;!W"vSC3<v~ӎ:\-sRU w%UBucqaX]Nft2k1mSr kK)/iHO9wY $%/#.Ã#\.$zvbTjP.mmR~&؄Q>imQ3`L 1-8hֺQnEH86[OʵJe4-S0]$XI&YtЛ3ƙ\C.O3~Ș!B8~#rG( f^~4[ *ޜR2PzHn~ڶy0~۩5ɯKCg1jwT٤J~M:q1syhs6Y v} -TClG7Z\lh9EW@XZI`NX5.¼a#Ǵˉn)IReJn̚Z=Ec搈)KRVv#^4" ͊$qשl/ mzM ;X~?HkC![Z~jo?+}$&h#!D3`+1))#6BO^jb=m-o"ڍVJj-\G>\Sڬ_֛z(T H^vO6yOj0ӹa-Ieca!^9+D!7nGğ{sxOsiDȤ |n#܃rBWVօmd!ss^%kLjG5Wݸ'W<"H*n57=5"ᇐ k;R ]*p<:69O}|ʺs,Bl,ztk`-, 7/xd><_E=~tpr܇K꥙}Q hQfzr*@ If)X^:c5*$@V>/r"HL/s%`[_Nn iQKs\T[!8a +pdoC͞>GzM6Ulüuch7slHr8B-0J04'[`8 ȪZ/;Ȏendstream endobj 534 0 obj << /Filter /FlateDecode /Length 4886 >> stream x\K#Gr{0t 냊Ʋ2`^xlZ{tP"G3,ffel t6ȈȈ/ pO7oo8_. |{MB ^f)\8ݻ%뙖3=g+)U,{z\ +qqsW#7f8Rθ & sk<#upۇ/oY)+ u RU]FRX[0/.E/q8Q o =wW;h࿇< 4@0nfgNYB9 Dul˕)]dÏ٠eJHOs봦\ yjV'JZ['7ptVv;P:PIN=cۆ _Y+HpUEnyFytVNjAgeh ]'z/W40v~C)=pY~4E[ی&X?>( P Cq' 2kxYaA`kE-h%Ժ Cc%~Z[0qe˞2ނFiGV(`rl5Eܭh+3Z Nvcݏic@\ZP%|bt⍀x}q"S }\nEAu/AiN$ Q z$Mt\A%L=Fj`JBJv iq̫)ii?379#Ngeh K4ޠ\b2[/+|XC, }ꊘ| 86UK[АyiPgw\N66p6PLB:C})IeTw!6~z݂R5Kր/Asf6ø֥8Mɜ2C"n7^oE,{=&|졝ft1v&9~#d'گ:u˞4ct1(pS۟x [~A\^ygJP(ћo}%VtC@k+Loƹt1 S}L?/y`{V&h /UL9RA5C=>L͸Ӭp!|.&?,${㝘=nW`G~ʰJ]אKv[l|(J'y|n@86} *.Y&G'"C#c.[ow aVS55Ȱ;.[jT /P 3mJ:RЖe[ -VمeM b(K2oy!Dgǎg+ VAuFc.dH*lc8Υ"9 ˕!+B%*o1zk. NGFAXN Z6K3YaIڢ?$(+=]1MWzM>+B8N88BEm@[[Q{GS QdbV@j7ȰĴOrcf +0.٪1hSƃӿi[Vkj- {ì[H-l)kQJU!d҄`JE(<9şCpP~֙L(;xK/fu#$ԣoa&Yik%-ܞ 3hŰa-e <6m R+RX12H?Np\2 0@0@ Ka+5bM&@SF^~̓¹Ԥ0AR,X|y![y+{Z- ervZC rĝ0g-O|.k0I.X.|5:4J?0%@^ V6gأSw_x>e-_ zNhF_`wÚ;ł8.!X?+cJԀL; UrW+{ [E y ]H+-~- U6Xx* Mu!W:vNqz5PXK T5k "8e2p=i DB䕅(5|~ hQXAٸuܕ*}},S9u\׵M M.ZW5>'2˔lUKF}YIXHY3 H#wgžc֩!F/%1Ejhr* KF^Ab6,b30[Ue=αwr(\01`/w)yΑR-ƌXՔvA]S>U0yc+gmҤx|QO00L8 s\Zz{Bg6Yj<޷뜠l؟-KxɍE,x5˝+s&93)?VO"C55t[Lsö"Fb1|lu JƇoҋ nUZO>F(&OF OU7KZMI=99Oi,!%Ax+ Y*ؤ`3/" ԙFHv̨1Ak`鏑6U^0y1!<㖏afj+tDL*D5ŷSs@ #WN~?"+@5Ǫe0fj\ VG R25 IZA;/NO"i릂K oI/ w;/@1CY[oQ3Cg28ۇ0$Z$GQ:/'\E\8w5p~IQ0Wz:y\WJDڌ0rF^}ǣϳH{Ӝn/7M^t Rვ|'3|1v\*""|`Hbš:^Aw!.㓇u ޝo>Ќ7q0Gi34s0K$1x]b(0<,$WVx1;jzѰ#|? %X6?͚:$EHeu Sǎ\ߏMغۮx6C[L9[2SO6EYR|,K9foEd/pNBiOQRMnvaj4D}8*^3/B47U%Miiʼh/sˇ/YۢU=^arUzN*ꄡ4qnNiuTOW?-WZap >+'\ދ qNe. EKrY''Nƹ+pE"c3D5ZKH@0 9߈Cؓ,AĞ* أ}I;EHdjZsbh*mnEYf9'$ O/'hd*F#섈+_oЮ̄ca&u&vl@.9=&Q3W)f娲04IC]h57?<}`%}J n}xRo:i )WÉu\6ǪѳY[+i@;>s(*V9ʓPS SG*E}wHciAش\2j ${#_/QnXC)5LR2O~ =<g.ͅ8 >1f&b> stream x\[#uVdLd`DG l(;8Yi9HʣC~{9U]U]ClV\snzb_vwu|uwb8Vr#<=|uUx=m^:ήo\b43=޸y{Yϴnw-}'U?nN/w_\T=Slφ}?{z-\ϸl_wu# {\0 ߼|n_Ӂ3v̽q"l-L7wa-ÔPi-<ʹ?Ӆl %R\1axɥtt^ᛗ$"|Oq)tC_ 'HtUNG%`Q[>?۷zaI=#K?dXQu= Zns6/Bn8W,zkeȟ>C(S.nιns"I{"%zgr ق:qw+X2Aǘ]yo_D^-*yF & FQHcTƭ@_6gLWk(E;(@!ϔ "5W>^ynLN_.+AJx(@ _JWpZҽuRbTm㤥ek}so`}̉Gn8r$GIdhQCF0ͲUOEen:i9;7Co01#hoǴ ={M2@crWbnhC̲O 7LTI9-^wIZYKkZ(ϙݾ>ķ19>:[+Ȉ&9߅%蕖;`p4մ ?w zYrH64]=VsbҩlJC6n v'JǛH._ >W/7'ZC9G̕;8*J&ܨOCE)6*saZ&t) 7q ~Y*T8ր@2D )(3a'UGZ>4qCi9?g(0 zY( @ByWc!I& RW kpp,`Ñ1Xu=֢ 8\r+ 8a@&ض@)D P.ؒN g1ֈAum.Ug ,$%,ɤ,B"Sc|e j_ y!qַYf) bƁө9c;Q#HBm>NF!<8C aAS PsS\էCxyESbC0xZWX & ` "5SU(΁oяt?ߟ= OryaM !uK19'#*# Z3ތ"$.}H JWC jsgVj­?87 fT=31utMhûR7vcLKq\A[sI @` LjږbbzsRLxKB"n1<ʘ&HpHy$!ȯɋn|92dž#N!=., AKKCIrb.BXyA%>aF:]xUO⪮f1WW(r7Eo&([V$R@\2Lle ض0l%T &ՅJn  HA+7PFh zZř7BGt|By3VBg@C^$i5r(U 9DXD/Nf&+.U23! kdSgl$"HF|HߙnUDeY&(bCo(D{X֑cMt%v8'C@>ϣͲW^FOxפya1Z@pbY^H? ?!ݨU×qy@;rb9xs| ~z^~S1@7n^SAՔY ѫT0XS7*BQw~*fiNJveaԱ9܉`]8TXCאUb=?8DTt0z J~ΰ)6Xl.g)!0 0^rj-Y/L4UG4c>~C(`/Ûm7J;㾿E'}G-ZR%0>7KdL ItKmz)E]By!(dp=L(}DfO{:A a>p>v? l=o.0oY2ʕG.+wΦ9چBL\:{>Ok9̏D;1wkB#WUBgXcgGy XF-@?Cmkrk,Nw G0$e"ʵ?gT BW>]ZxCR"}lV9_H6%27'_+ED (KxL"EA2J۔Ys1.,'42&7*a{v),Z;JꜦv:E[8fmOJ/3=iչʞڑQK1E:z,vO9_yL=7[Z%-Gġ@lMZ?")])%{QZJ嬯j{gE`R/fMQ9EwQJAZVB! `[Lh~:3h/xX" ZXE)@Ih;:Ƭ -&Y{$0l]| {욑nB(M&:ϊc#Xv[ C~Ϛ\rrTkޑpYn_w!Ĉ]䢦.mǘCFNFdb ǕJ8l +d# Ӏ0+c9i h*aۏ{s(1zǫ,WE^Mb&_ /je ;MYʹƮ?@f;B$?]>PÓ'X'n T!qGuwM -b%B&=E1 O -ΠdüaxE-9LMQqDLLT)oԓU2ok":f/YHOp2o)ZcT{LÛ5b9D*,bDFi2F)˚.@<ƛZ玶xH].*YG {LL3[p!a ulU!"35L=*mkA7U60U oIGqx܊+#eS 3KL ;կo*` w(2['8);b]߅ _VHb谷&r;뼃,ZL>VIO PkW;{ZXd1/+R ,毵7d njstYtWߴ cmT_w` ;|G=s{?RkaYKO{!ȲF⩾(U潵eĬ{%-]I-`iZk^wC8pbt̡0Wg40N! '#Kc7O]0L f#3K $?=ߵI,T34& 0` A ,L\s|*Y/ HQ,Г_r埕W@4`_Ɗt͏7FuC'#> stream x\K6r}{zlM":XZWIیLO 5USb˿ޙ D`M?F HMSM8\ǯƫfJMߋ!T ?ծqbsʿ+6Rrc2ճkՍhdh\6|{UJ׍to[LTGjuwc#ddi0Љixi\{7[ԝ`O7/*!o~j-6[~Vu']ed¯VOx{B6ye!e'sDJrII^_|w%,IWyr<iFW'zOrNjILlUrFnنZiV#G0Vst*W׵pt_gNϪx (շaCRC$]m; Zfkˆ Hٺm-#/ 碚 D8Y[V(M-$:I20犐BOF~e!m_χ ć!I%:FWaotVa?=2hWmZD.ȨR]<kLJQWd5U&OddKZXS1|iݕe%ZB&,cEMQoeՔV`aZGeEz>d0+<Ǩ|Ƃ. \bׂٔp6s~܇(Xݪ"١0o׌ VE V~KIE鶶2忔fvջk#?bZYán:>2b(W[(7d8*3kDTe UrPw< BإAR.^<_gVZϼYFڒ%'Aiٙ?{rErq{~~KvԊADSfo<"?{ W:zVtUq}D uWqgqIeP5dr+ڧ%)D@1݁Т\ "iҢiӵ@ xM Rm; f{6BDU~n)R]6E`1%5:`T.c;HK@Niq fT`Flko,}nS0X. wl4 i.xFU Nǔ 0e[%ptYxKF홹8soD6 ?} kȆhOcX8 34jޅ˧x$vVn+Tuގo&̈́1 TÁߋ@չaBc&> SOp^oa*2m@?-?gtRnS04Cg+ht: scjDs Da) U_ &_( d~6zG:OϮ_g"aXP^QR;L ::2Fw3\O`?>+LGuXOզ KAo8x$'F4aV! Ɓ%kkϒk-zWGninS31S852a )=gH/fD/taϪn h=9{\{BV5BU) ?6 M'sڥz(&-À|94L̉-zC( aM3u_}`@P|(+Ѥ-r$f@x:iŎI|):Nn@n% pZVuGF4 5~ h6KPeTUƋɇy/ 'bJ= TWcZ Vݟ)Jn3gG"P:<[}VR,#Wp>3;[Q#3 nӈ=O#iF2/uJn40vAUF_w-9ɘ&!u~1(L$nb`P9Oeo喫yȂ,.n1a4WSed7J{3E셐T#;G0y %B$)v]˭]Sݤ!A`ٞd%`hi$+x1b ('@ӄhSUrenU&3jNLc+s8H3>112\Sklэ6 O^aV.L)v(_H"{Rugӆ.ΏoR ʜ"@n)S+Dn@S6JT>:ӴO3X_p]'UdMv|,xN6'?fyi8!ЁnVLTZe-L>w5[$m,ź "m!O 2N0H㝋96g:vX`h%;﷩+dqN)㰍a) @)%MS,o2WCw,e_p']C%/~lGruDU}vt!36J򙃟;\Q-nQ@bR´jNbq38k-z:),k@A-µ񌥺 R'L |-i?w#k|pFO2oy?a9V}Yzt8VYG7u )[yd E.Qsׄ)m hCLk`n(F;J!Bd1 &Ml "Иac )& u HwP0`@B8ѿ'8kHAQxLH| RQQgcOjL1, ލnӹ'#f ,P54ڛ1I?|塚zS %;xyMdvo2bqw jfe.(1O<ɱҌ2p|q^%-M ܇,Cڍ9F,RPs!`rRfS9'>p7)J^a:3f$="HEwwewʪs6,ҚZ}u%DӁ,_ F}ZG@^/iU;S܉I=_ #aJcx̽0̓i3?E!jYjg#7W~1)@ Fvx-/l>x:.ɒ򃾚~_I]Qe$iTjQ}46"ړV=>+#O?1h v>JhRлTUSK7ie#Sc# n gXe{tmߞm.ЌϯoXπ5(к*H 1Q(޽}sO7u2,۷/oiM3y3~+J..#PS<_?Az@.H>ؼ@Slp8AN~/`[*w ^sO8tug? >K-ksY <ҟ+g 'wa~y)qj /ǜ bҼQDNo`<bmu13[yz-K)I0אcI٥m %?wSIRz\.ۿLBXZ";Ĭ 辽 ~BoBSs!qDDx[P +Vz/d./\ ͗Mݮia5aO!W)(D,jyڄu!댹3l59OL'wmkiv{vÂ"GFX}wC\Vi&iZSkE*~ɲDy5f5K|ϾSl?~,a^m[n WB{|+GrMb`"S~bQLn?s} MRSf}~ 3/A-Vf>D~p##/ &`nƙ_L@$ wɇ[=4yaO-<%lgH(ϕ83J3?tѬAPf ɴKP*D]]`+NAxvz(Y&""ɽd߰[Legg\= /wl]P/'VrKD03I$='B=ݶ+yJƗzVc ꦋ=1+o_S{]*gIQq, ?8ʃ8D7䂅 O.547p%p>?cw&IB//c8m"~c\5=/0uO3̛ >$r㒥> 1!5VDHG fAZ46f /N8Y5RHRdku Li[~mxeXOqC(wyB ՛^ϲ&߷T? XމGB`7u8I,bוSid*.-9Aۆj"X9&Ҥ0ir{fOߞƁnH(嫾#*_ﮘ-A.1VәNce>%06CQυ6&*t)(1˓z5Ic^14H-9sPڏ3.6]vZ\S4erK5zte>n+҃'_K=}6x>Op#,wz `Co.ވHG?:.6R$̋„mep2Jdendstream endobj 537 0 obj << /Filter /FlateDecode /Length 4859 >> stream x\KGr^ط0vU)aXi (.rV-TWZꌬ ݕ_D]+n;q'nwMwFߋa M*n5n!7wBu׷Bzqwݦk;|sHn_y|X߼lmu&k{|}lk;>k}w~:8|/\_w 曕=ioч1vaRtE+i).lư߄]m;uUu}ƙΧpIa^9=p:Y[M>7%܀hY ^^LqaԤgm)%z5x62I29=|YP}9u8.fVEApϳ|H3*V2i;jE P^}u柰w2sja݊WkR/A+ ;Y3>j4 .,,N/4ov@'ί;Dj64Ng}R- eO+@`fFw:L`E\`NďW/lnr+V wr݄YjEm[HBU8ڀZ>>}k.pv O:+ʎ tk:hpPbBxBx3V9%snv5=pyо0X8⁻67L bM0-d ?]ʂOo Io/͋q~=S,j&2%['ߵ:|]QFRKo s7+8+MIm=S}^ q]CmZ1 w0,`+IB/ڴFL[azP u(m޲418"kUvFPIGfb~x0Ӫb y/_}IW0Fd;F@OM!7)z#pmFpt꩟սh +u]Q`D5 N{|vŒ=~rU1j@ 7~4Jso7FQ1Ll ²Iɮ]'l1bQח*܂6mP- {nj:K{끹ZT$̮n6oo9ٸ,v VƁ~ !o۷=:U8e%P,f?[c~ *;f2{MzgfhT0B=d+QO((ojzpҸV"(}DИn*jPBxbcfxU~'qw='aqJߍ{^ɝ\A-O2sׁyn*R('#u.}?FT5 G  O>VH#$7'x} #3l+7M &cW6Gh4/3קlP AfU58 4lXӟ~?q"U^:TŴLT]rJ|Emi?F> AKr£jN_K9vV[Guq&U]c`\f`HU>E⠬Kv#c:o*WS!Fp%:D4/!R=v8~<g,]93'y 4k)8ASV>ˎzVe|R8 gzQ@FiaKDUҌ*Ɠ/Fz)eR!VR&ʶsenhڌ"} c 9c䪵O"7IGP{8v'x:tEB8>\nG1.e 8\ϯ i)j  9Oh"]%Mԑ(, A7CrdMޤY8C{=gƀOmQ$@`y=X~09S8.{h!p4 *p.KiӮϰW"6qWAdf<'rInCL#4T:kBhЛΟL(t_G& n|4pR[!2:Q<ѱEJF 0q,VY|h04b7/]N0y\OK?B@?47xA,I,m8psWci:v)yns53p6ڨ"gC2itFrʇ?̙ɫu4aT]Jq/QvF%_'2tV /1P4^sLJB8]7v勫QУ89N<&Z9r=8(Y@k2G' 2j:9a"4WI|?I`HѰ[MP XOOY̲[eSN$3D[²)Ge:Yb2/J:d%5rbaeYkڊ=I➿(yMCnU+|?T9Dy5mXuYS?^FZN̴"{mWYg橾/ll,wAwt`Dj a,*F$LiTwE)N+bl/mh%L,F% dQd9*E%bu`۟hmZ$'䍇!wnG,>WjA@/jڝG$ tE]`(Vyaaii upDwKK@fXJf?='b.42G=M 9 {\VΗTowV]`[%E^7I2oOJe\?HSp,CqbADf'R[8P"pTl)xpAc}栌^ kucut $W †*-X2 dܻ"]V$*M uu Z>sR8fCa.ǜR=`/0憢X7]">uц meVVAiz8n孍+nwvq,C1Wm*TK`h^\ʂ#r]vn$tI¥NuA3<^\LJ# !Jq;`hh{"uN/ ~!*kj44C!AosLz3̻4|x_9ٔ}Ui݆$K(QI!cgȘϚ$rwVI}q>_ȹ9-0# Yфb  $r  R \{ȓj*RJcV׈VuH,,ok'X=,ayo,!%}.EZ8A!`%gL91b%=ybqkQ"WAn1\- d@q\JՃ辕͙p'L,`Ȑ XK )6*R1; ":߾ R604++C8fOaw7F4\' X%u`H*H谙B4EF4oq #ipRIHms<̹klz{#ËBCLlVeh cP|{ 7_6o^)ȧM#є@<# ֜ )_:XdSh3UM썔X]T_AT>ڸժk +$u*OZ3K>AV=FU#GhK D!k-Piޱ L,-Zf@ ,ޥIXPh^iTjvs=8驖Vg"W|iWBFsTh\cp2l#\?T7x.`/',K>dB{A7/a\o~r@(/z`(}Cth@v44v j)jJvZ*9Ւ#,n&sg3% yP$B|SL\Oڪ9c@p[Hc[}mbY8B\ؿ-Nc`<͸ bn,Qo*s'Y?xT{;}ܫDdS+H a45uywV3:p2-g:Jqc~;*5"XY!+iqAQΓ7> stream x[Ks/$)]I0+;ermkd,GIO7$v.j9$hwUIv:&ݞ|FםF&Ui*Cv7gn.Q&KBN U&v7{ˊTxWe%")1^V@14Z_NJwF /4R50iߋuE=] 18'Ra↥GG⫷C׺rM)wJa_WYÀᢻS+~(/" Vp&\08տ6'Fi50Czuo}}>5 3/[{o&z*C=6ß3URRUċe"6f)ah=L1V nSxf*N?\ܢno;PD_ ɤbٶ /Z]2qRm7*6m*Y"(Nd1.Pf ͋q@{PtwG3! 杝^1r F0Zh<_T+xgD4>lAN'UlfW%,TBJEU̬0ccn"bm?2{eL.2 lM 5xH@4޺ӽ}+)<&{OLhd+h$\Hm *Tt+KsK5jDH,ž@HpMSYP-wGh"qZ< n:! ]XS` Ƈtp*WqLoY\hj}S= lw-T3 1|/Hɗq)pJIL.$9JlJdͻ $Bd)@YTIH@Ŧ3b֞g*8P 'VHܬЄ9 ĉLimOr1Ę3;7Q&_/P *x[TeeC ,t[B>[,267bbhAHsևi6O e3o37JԘ[Gw} f Zl{Jux 퇇H\{\p͇zs=D΍_`T>Yz!Q¬De,ˤqp?ݐg5uS,æ(\"+Ŕqzĺ 5M#racqjƍxH0/B +S8Lb GguVf.rUKd|摢ȦN^ZmiT,DӺ4 c1 ,@:\28gO4xlp-lS. 眹=;u_6j2I{UŻ>)ݯc.)怞@l(qnu_P(&m"ȗާecW%2+'n0 uo-$2^Q>5`CURoln!aЩqRDd+H\͓Y1nK%UNd 9+3֘CGUx*G H:iI Ϫyق_UZy 7YR>Na ?5{{y$v++ATq=s~JtԅH ay)r1D (SXa(hVTTyQ>g%Vy@XqCAgIΣV&vIf~̧!c2urL|Ϫ覓vf6j]nlWؼ^]'zb>b3<|1 ܉-*p$+24f2=gPaR1eq'1HvU1\uDnnXZnn!0u?N#d1<J&LVpo13 <7Ƒ)벾Be`Wü4;u`3o1_. %Ѻ\k/m1 sHeqvx1qwoW Q+U?^ىVTޭH%uU_ÿc[<[T~ix_w]{FF2Ao_t=yd[z-p~cscXڮS~qvMj?n8ϗ`mBS-]O-Ʈ`{?hnlZ1ny/ nm{xT.b٬b%msS)c5d ܢ?i^NɟheMSwǪ$[,-U]K<\j&WUB JR W:K27%Y2IF ĄkQrBe(^qzfƄ23mֲ8J,;/woiq_z>'WdiGS`>儧ZP,娕y&^g?,h\3)ÒS poW]٧қ}\ 8A> %G7 M*)"؝['JM4U`<<%",.Zݹ|eH>pQPE)9 n?R]-^ Q|>1sR+NՒtpӯlWyҔ͗߃=d+/iQtgtAv;DhCRDhendstream endobj 539 0 obj << /Filter /FlateDecode /Length 6077 >> stream x\ߏFrγ\] DcnZCl#K ƞYC%!{fsfV<6GuuW_U󗫦fW {x,5W~yWmӲ7ܫ 3ԭPW7g7?Ac&tҺ5-qsUoM(aXVk[!w^ou#e!j3m+ ut[EM]eVj? n[ߍoݿZ*]~oD JUhꇕ5wQ--L{7n$a,IVsY?չ%׮t6ÊU[]?# -a& z5~7a [Hj.xu;?zUu Vu1e XjUwpA߀͛a;]H.ZdKtGtOUO}{nV՚3?v(M m=)J۰D7G'JW+57ձPѿݐ2Ҁ t٥mDk)6LJ?]i׊I;!RCƋpۢmEЌW 0 N 9G㡫w$~5LbjkT?OlekLjxKp"Dev_3Rw~Y)T&o[׬`+8(~N8*7 fg ;1IT9v莻Y3O^B}'U7Wܴͳ??sA]^Rq }rȢ斋KN*0okpfDq4vA-e{rgiƦwH ]X7?!פaFՠ);'uKS[c[vj@q~cTn5{}R h63 Պ`  `-!Zkr|PdgPuxbFMv_Ak;n;Gm{xjB L?Cݸ;<}=sMPs4ׇ=i~Cëo+CSkhrL]Ny:9f ݖf<Ri7Cq{,WEAӌ4vauګocx;gS]3ܲDi_۷0??0痢Y}߅IIsAU+-zqT\͆~0&j6dKaqGW ܬq󛌩}1"+9\xP7J O(s(5'CaN 1k?CE/& 9덤Qʖ##nwIx:!E8Gm;Lá8pZgg-ň+Q(^WEA׹ yQ$ILx zF(mEb-]O#sL.bhs/Xi 2V`\q힉0)v z]%w>J),1`ג9bW&pϏ|ጀ1RWA)ψ|kng۔VUX5j}; 8~̀$>LKPz!-O>Ma#H޿аb3v*QM ֙prȁq O!T3fVCbF̋(hp`L G2_Q^7~Kg>T\@<:.^"U6eB5%= ](9BbڪM) <ʝ(Q>SՉҷG kr7q/SS 9 }ݥu2ƮV4~)CqyL} h jk])QMHqݔ4BB MB%0#tRiadj2: /W(` ֓~Q˥H J Dmf* $1mnumUڹ8т a0 NR]30/rh=[x~ Iu\&˧dIh)E3@O.g8 CcFR5Z.:=%A lRrqh" 3ؑjyhoR4A1g4B{Mtd{dY8Nu>Oe}J,z%KpEYcQl03ⒼDjb(hrH i9ϲ'뎊)NuCU{,mUE&$+?IevƦx|*ѹ|󴹇;_[Q-̴+reeqf}|Ɯ+[[ ,͘S[2 L,bX%5 wȺ)4.}n A@%D j gE'a[0Qےq.ԝ$x umB X( _PFMWYQTV!䲬z<յ]ܧC"/l.ٺX?~5lj4C<\eSi&; " T?4 w.CAHP >44%28Rڪ6  g}Gn(UuH>I.JeΤ5\8We[rf@Eĩn-a在. jtlk#$rm 8m|S:4ysm@ _[tkFnNhXwwWK_ |#+_ 4|"|ʾA&W˾A_~B;8M߬ʁnMKBy'a(>}9U3|s+~U(gakʤg围 dd_h1I7$ܹI:ӵʫTSiSƁ+zE( mBoK^yc`uʋAT`鸬S[f8` 9,^&6V_ Ae)GzR&O _ B)ɋ۱$,oYQRIƾ..mӫ^um?$٬`[=&= xŸWS%J0*/lgNO!UVi)ZF zF[V]i kܮA[+tTL~XN^(t5FcҰiiD\Z}|9# zՠ U9K*2m/E"m":Y+ ZJ%O5c,[!`igj%Ο"׽l_ ;YcyI[q̲[˪GdȚjhd>9x"/kU&*U3a m86|X Gl=}acAPVc4J\pȐʮEuB;8Y^ECϋ]4QbW]!n}L7?FIaa˻@TqwO<9=Txlo_?FlIU&|Jɣ0'8d#"ʵ\TᕗseR`GJ. N&Y~2 G| |UWCV;*h/83n|<1)H2@%ajɽ~٬D*lnՒ(Hz* &v hhY~Z?b$sgC̆dTӆPCi//,ffZZ6D٭z '8$$2(E̯+\k< &·?Ֆ6<*0E%֨5{,B>EY×lक़όٲ#ܮ‡yזWU8]t9FbD\_7>w2v-*kVu=VYӣZT <"iHH8\%{EyAnb*~O{d@dEdQ4z|Iu(9On $}Ku1sW[G }K)S붉cEl.SU 9b爉"'g8nendstream endobj 540 0 obj << /Filter /FlateDecode /Length 3502 >> stream xZ͏ܶyS[rq2*E:@@M"ۃ;gf⿽=IQ{XD>}#_.x%ٟ;噠r9p܋Y*06uYΘp0X([ÌWK^qj=-%wJ9l~ _j˕RZg}6g +5]]eC`.pYǶ=NPm?_ s} 1ToMm=?seL,fԛKbLZ괖9!-*)H.p*lBS26yAZ[Ŷ ZO(_eA+%,ɭEiv( 8 JQ* B8=k/KcAҲ4?Sʽvk\~ҺVIۛ}m7A饤;?eWllJ,~$ڇZY>QO_5$*_Jh3Π]u~8WqGAïsz ό.\v"ʷs6l7)$ U"<[xRD^v/+#5F&vcyCx {OXC3btٯ/A){@\Ndadpafӟ6M:Zİ]Ǚn$ l T{14q*DєQޛGAD! aF ٟ~ Ofgϗia/} lEVt>߷4w _Pvq_ &PET$A&\!9pd05mr7-W;(frӸZOFk^şZBi_Ư!G8NY0r) fW3qjUA!p#e!ٵ142}چتe50PW'˃e9[Yz:3,lB eAԩ6GA24%X]AtD5dͮIC>6$eb&1k4u)u*C&$p"z.?d.i`eLɲ!Az b"8pr>k4a;׹^eWEiM.O9>cR7b%e~ W]8N eB%zLbD世8 sܓOb}f~9x=*Ʃ{]D꧄ofm4D̸Ds6v"+3SLH s!UCa7RYׂ*dIVs|jqƛ[1=U!MmZn.S k!}|XKP E/ʁ  X"w&P6'k(T]D搄)Ң-we(<}4E %2qױ(1i!P[TVfh0r\-uoNv4X L|byD=9>T@uc'Ἐ8[9^Op D7?+(}fomL z}Q}vK$Vxs.V6w\{yߛz B06g%R1x<үe4Oq/F[LשpuJXx⭡Ld6<~6sx4WҲ^ƾG|Z(7$p3 Xq0!V#,I2]{~OGj;:2ANc@pS`8"#Lqh"&b"ox<.;a<>v$"eee΃f uj‚zG':GyhmBWBR!Gקm+IVR$U<|͆37Øpcow㵴wÈ 4U==*&E6`x889 ݮ^_D^2bfUendstream endobj 541 0 obj << /Filter /FlateDecode /Length 2595 >> stream xYI!rblVj_dK,K ÙiLX 竭)Yp[^OsF_gy淳f<~ \_Z>P.jK~=n!e r`ii90.R*,'_DKakr 4zqA O͹qg\C{EKo*:gDH^)Tϗ:hdBAHInp܋Eb𥉉R%.=9XӺm^S,y/q?τ^ I6C޴qC.V^ HsXRu{aijz̻9XriNqw]xóg^Ǣ!}psrhd~IoO^NyMtE#pmڸ/gmHAxOat3DoZde85j~E#ƣI[<=nonM=G\ؖG70ug>gyp)ԜS63Ti؋nvY/֧}eWsZf)we!:2A5qsϒHY#/=VkhZne#_FiI|y@l6KNɇ B6 o5Me\)pg!5Nxouu4_L 2^x[Z-)`Mun31I(=XAX6^Yp=%m.)yfe\֒qSݴ)? BnRq@u*꯿[66Tbb CH;/y#!pC[yh oQFhÍig`Zlaj VLdߧ!iU,b3ݦrAkʽyS+A-l cqA%Z@N4MYKuuyf[r|r(xO˝7kBcbf %Ąe\lpJz@!Uh}qBz~QJ=>5@#d=*T( y]mW2,hP,S"";ul_u"탊EjPSӎ塼Wa c*~\#HBusoy'5MTG:C2?Jp%#FBpèP"Ј-q3;Є0],]25>$x~a vaTЄ<.b|Ay@u2D%``0 :]q5|Omy1':e&۟T]S3${ȭ6*HML0?t[oS!'/[|,DNӫl>|7[=] 9Uk b~0HdUacG#ކ;/B:(1@] *<*;7M7Ss@G2ǘ j}isٻB]$߹nw-t2qre̜a .,`Ua JJ!پkӊߨX< :{PsmB6RrENjDE{vF(@:㮞"-ʡ8w0䛩Dž2 E2W$f 0eiĠ Pr)D)X R~r_NX)!sJvA"@ ^ZoItolRX1JY&CUb}OI~ILmI,v?UJ1ڵxwkƙzB. G93Ӗt̋¿&p_D\&d `,aX#,,^;36LX01g:|?SB{זzEr`eέƪBA"fv: q%F*,95\X]k$y[>ZL/#37Y'.wjpǼ:B?Or==QZ~I^=j~_^]eog endstream endobj 542 0 obj << /Filter /FlateDecode /Length 14989 >> stream x}K^Ir\zk/ ڸh ?ݛ[,A6-j46[+VHмw7e?/cv^?y)m7yMtk\o^/_q-~rl5}N~U/Ή~UjQ^o?"M1nDt9s` QgL? tyF>O߼M߾/&#ctS/e[ko}ۛ\J{!)|/1Nȸ. Sg9/ՅN1zBtqCjň3Kw1S5Ǵ%ik\h.]r%_Dd U}]ppK̶yi,D;P/!m-"U4=/iYBD[> q]Ј9#2&VVbIHB[$7RԞB3oY2va[%e!e+HBbU=(9ew5)#IZ.S7 "2'l}|W~9mXPR&|qLA6#w!O !khrN $c)D.9"MMK420NlnTwa+OBju5 C;8I fu}(b`RV-U مX ITc-|م4r,MH?wKsυr⇅A&"B[qEy\˹S?%.˩X0;i_X0=XP k\ȥNRz־z`'pj0NX!Suu4fcѻ25yŹƲB̲䳋锭\X!lgjydnYic-Z%[`NӨ8P $!74v:*IJXR0E\8V$[cxyAmqh`.C1l!$uS P9ksi)kd(Ɵ=%/l?$(D&K^qy du1TO0Pn[,zI ?8נv✕bZ\S/Ddc@Ш8za\22K\&֖ 73<7HT uNw n!6-q qA-le6 ZwVd-h,HV[ $[70UH1'P [,q-] kkcN؋+a(0W] ILK”@|5Fd0h_"ԺClV0';fX֐@Z1';3Du 6}y$A}[ WLþM7@$(}_ULWj٠C(& b,L;KZ'3& aON9Ӻsa2˙k#M*!fKyl׸(b/rYK7f ⫇p =/հiuA5XEHS/b'aa[pK 3L2fAaǢ7ҡ}54d6ui=bL@&Yzl0ӴLfc_L>~Ldk?vఖbz YVCLdWyd9ste 3 u(GLc6؅Q܄ }t1!P {\5Ŭ !0z?DWomh0`!\a{C"aѥ8/Ve3fè:51J) 4Rc$z1Q!K*u_D窀DWy,h8eLY[!%Ь.S4.d:S=%=N J-d5BUS4\Fd1"iq॥YYI{Bף)J>z޺3Tv.x43A2@p._LNJ{yX\3uٲIͨ8I*D,E5/jUg>ʢ+JR夁>WY k3O}iYg K]`Hͅ i)[Nk:EVl2ಢ_&\Լ)U멳u)p9pNC5]2t9t8ou癱ىb\Qζ\2<45JCPL6}6#Mg3Ҩ 6Ŭ+6\4UyʧCj34KU@bF͗\QY&ӽOq^\19,[-d*rR:k&orAm$rYnSAU. +Is. aRVtvمf\4:˓](]581%&5.TyF&VF420R50٠5 S| Y$2s[)U埁 "ClBϒddY/TY9s^ZM[Si!`$§ؙ94u%]_Hw >UUd"%(>&PܯȮ@)St.h/GަT۴n0ԻÞ9U>eQXw^H>H{pq/}L/BؒFbaB6YSbڌ{oۏH~ȴ.R 8 A|-. q /Y2s D(]nހ@䗁.}_y*~mn(piiSgl DnЇFP7Q'M(z9E;C[t]AU ,9!Y hE]ABĉ蜦6 AOA@Ĺ9LPbƧ#8: R,Uơ.J \撜b.i(e QsI91h@Ѕu@iCNEsH$a[Y91h B94#ę0 աê"9DeĈ9]  dRQqc)t9u Rs*A*KwXhġ8aQ8Ƕf7m0αg:pR " yD8+s+ؾcq yl8NGs+spV:\qg7P݈8"#8I/Iḩ0s922›b97q] 99ր8Ҫ8ξKlj"$1 LƉZ(g#v2<ɐ8ξΑ8I*'9(}EȾy#9;ƹNA+LsㄉAM $0ed d1rV\808X%vK?b8ζs8X8Nݥsc3b.IکqvW D*"3mNS=p.JHnh43i\W"#y7WB wpq]1dahb8 P##$#94R(d9 pt@9+aλٜK!乨sw'jTAɈsN՜K!'#9;Wv.v96M!@$ ε9B8Z*[x&0I6"<΍(LCٓҺBO㗇5MILb/{/jIzDꀮcPY.Uhb5xg@zD%润{*2(2^#BΠ+SFfcf}F'B" Q$_uB dYgDG~)5*7| mAOԮй2dB7SF%2"Nh)1\lŒd~.`~6rOq<#b~R+_!&h@D?3? ds4i4b'WDy՟Y+dO"2S)\!Mr~& j3mLd(gYS O"gާ5i2Bh2 YS\O!fj@DR? %:O!&{B%0\.DLVPɞ0|dj.&'S$\JWVPBO"f8IԷ x$qSAO"$X?!J;.'w\O"LIdO ?''wTO!g'wK-e?iO"I$9Dv挊Iĝm$OOÙG&6^MJ@>&rqsI%Pဈ\P"`&gL]l(?L&MH6O"nDIdn? hg?Dɕ)v&wb.ڦD)o21ISͯ2?w('s=i6 PiIĕI`ٟ -'tf1{[O" ? ,VrHln 'SD$R{('/E )zIDe}qE}1PO"&Z"'>WEbqE"}QIDb'+g~d'S4UI Jq> 9{$E$PΔO"L$rlƹrs\@_R> *'SEl8TɛJ`Z1f|9gg[O"O}mb|1%Tkffũ㓈iVy[\S>LB';Wd DDߓt{1 SφܬG>T<%MsZV$0UD*a3Mjlv|\WD`y5QtO"H,tf{`1ݓ{FؼE$`ΞDƙ`^NJ= tYJIi,g{d6> 3QSKO"IdڨuI4C>\Ki>Tpv[`>Lk&|1)TϖT?D٤|OLlYѯHs)MpCbsHL΋IY .Cb|6\$L㓈ӈ2㓈$-oQIdi$b-/=IihpDlt8KW#L$$t?؞ "$HM7ٓׄ\ W|"{0Tno"{2ٳe׳"s=,f&՗1דH7k3 b's+ly*llm.gJtA0דH?s=[> 2ד/R ȘDL2$׳YW\φkI W4J%%3=g'iJ& s/fz68MSӓلbz6܆ !ӳ6Iӓybz6 MӳƼI'4dz6dz¡ӓMLVlp?d('3ӳ*iӳEiz(Dӳk .hLO" lW#4+6x6?6/*1=. *dR h~ndM vz6<z01ZT@ƚQ=lqBlPR=ܪWTkB6BgU5Ib&u[\O"q=mfvz6%Sv %`X,MٞD$lxtlF?5MlO"LN:Zf{6X=v2;픵3ԋlf{1yMl@jg++lib{1+Nltb{6X̹32IAܤB`d\PZ xDlwLl0ҸR=4fډId) \͈L4 g%ǜ>Q=pg 32E$2K,mTr G4Xq=ggLJq=g'qz6:\FZ(gc׳tp=q=`mE[s=c\T/Z[3!g|Kq=*g_m׳a's=ٳNMȞOpIe &i'w3M[L%ѳ!C.'9d`3QTO"~TPb1=gId JyY2_f{6ؤI1yo$۳!Nٳ-$'nEl0z2E$bȞ Q/IdQ;Il0zD$Ґ`]ih$Ґ@0dOfrzl=,j'ɞ _3H$0dφ8IIY]Il:=X** L"{1Md;ir܈4BVhDLݓY @qb{1Ul%5PlO"N=)g.DLܔ0BgܤF=T;I$2t% z{q.=MI U=v)g޼V=)'$E)gߵIKO2( qa^|q)|P)|0W?x6}z3IdqBe(KF@΄O|.D\D㓈F"7峑f&oD\(DL HqygS|1[O"fH3w"\"&(M^IK:܉b|O"LNVBD=㓈jW<:J1ߏ+JN'B9ȚW/ּBF{[ylgw=ls6fq2WF:lgr*ɻdDs)iEM"A5,ͷ| .&ETe9^c)`blNoftdbl6/&j83aHr^JHæE(7a֎l213aaryI(4vϋIۏblόMZ< blΤMf&gSe߰hu1ww"b&`6i=3VM"UIܢ_D+&bIl?(4*&E[/\M'= l4IMA<.KlIHbMI0K ^*g*J]*;$b+KfIk)Ěz":L٤ k}إҖ%MZǧKV&N*GM=lae>~ }2;oY|MId7Ld.jRɫRFl=(&!ݴJ`eb4 kRMW8s5l..".&]>T*ǘId7S,s<VkZlM:tMz k̴U5*QcqDפLV53iQM"LI)}"l LϴG%/)o&7<݉ސD6,;ǠLפt2&BtM"5j-g&ؚJI$tzפxQf9޵1_>f/b56S|IwIkCRMz=bl1r;|s6vm{Q8/.پtQ8KfB]ߏe>Oُe.F's1: QcF'[W%/.#nHK\->w^7s5/Hk C/’I(*e-K\%LFQ1asR~;kr 4˪=+6’kzχd5’YRs钁dJռ.K̿)35@εQ%x|ΈKNH_a(ͼ(v36wb=R %\K@tjZ!)S~ iߋ2.;}1"IG ~`rrFP2ٴ.D*~:70GXO: g|Hy>=T РS-o_|bn?7ThƈŮ$772^ADz߿搏7Mآ߼ ݧF}_ş~7:`*|a~vi|=m46cSChzÉFȓG[4~G`o>W CFcB|J';o_|Mw9y)1汀]w@GG+ԧe=PNKu&yn6GpZWglp_BP:^P;4P!+2@ cB DW޼WsK?Zlp˅,+2JLb'Kړ̆$/D_ãҳ L=)h!DE)ѿa٩Ng8b}u^!7z,Wd-+bS.b.^k|[B׆ܖZ`Iw~F|ۗ`JTo|Wd?}~o?~㹮ۇ_Bұ6nW-ݾ=\}]e<*ݛS3}Geo];~۫U~~MQ2s}y|p+{[ gU_G黻_ H x2`ߜr\~ՁpϘ!~,#6&>S?ڟ_a;CS "JQῈ3\E",tSWP^!~iSz{t}t ay. Zmh[wC llbi|1L=z~\Oz`W؇o}}1V 5BhoiF[>5X|BW45@!]{oc 5bae}]x}H] TWw;+CG;AΫW{~:u,1)ب7?҇ΟwnXhA$1q@yigz4wsصiy6+ݛ72Wd+xw?ZAOO\[q=&ۏ.I\g~Ȏ#8s49NQ}ղdJI~Ս-] G shР~`5#T"D9[n:29hBﻄLCR4o?z!a~4tc~ӷZd8n_A G]I/#A=yKycϩQm+H㲎z$ւ>'ۗXRg~-b9Q[lj^nΪ&nQ"ϻ;z QU' i@c7Oo?D(Ic<˵[Qc:J>2OG_~TX ͻ?|p>xǼ5>R8BiMu'OW.f1Fִ'Qp:sN'U\.O$cOO(kѱنEskӱAi mO:g|f%м'Xō_̃v{<\/Fݫ8` N+l5+0d;ˉw.J` Fǔ>w <ǡw؞H7珇wDL ogZ1f}Wd(%={:otɣB8cNu_Zō Fռ|s#?;ό+Rqt@u #84MGZ"v}NObODji.עOکeosWONkB9=˟~z&M*.WO9['rkeh1?!.A?jANzCz)%{T΅!|^4_1ٯkeInX87Wg\֝5 t$޺oӏ)yXW{?̷_uհshBY]{ߟV{]Bp'Ԕ/% J?T_cRIlz?6>]"1h!lQџ?^dBe!;7~$F)s;nßKS;]t\ZD=Ϗd˲>ް~Ek6hh%#+`,.N}=Z?~}Wp|`1lۯ*o>=&Ǥʿ}ōlmge?4 ? ]|Oh! vWi QiGnq`zIs◷o@P>|cw Ro?[}Oo?ݽyb4/<ܣw4%W~l]u32؄|Iop3cq~ͻ߽'iendstream endobj 543 0 obj << /Filter /FlateDecode /Length 6142 >> stream x\Ks#7޳Fx7xN1w"GhZ]6)ILU ER-ه>tG痉yѵl-[/.7WЂ ?slqe .t8_eZ'jsy}m۱7])aXr)l;ÚoWi.\HL5[l#Zgmoy ?Xf s('~^7.٦VXXۋF˫ _0 9YJ)Zb)M'X(kZ9?Gz->D0hVD>n7'<Ÿ/PGit9~_?EwY]9.Œ,-wi`)~O}mVtdzr`;Xaiən[}3߭=w&?ΚlM!`Fa&C͇|_ |S숧 sȦkMC.%%.f. +Yy7|x3.fq;|mJ{jZ`*,u]_zaFci\2Mئ%S_.-nX5*{˟My:, ik\i?|E r ~Ii0P, ːRu NDR'9 k0:Ɯ4`G+adӕ 6TAz.X}bJOuq׼5 ̡v:&Fȅl0e3l~ϢO$UKɔ_je`j)ls`Χ>λ.?{R }断m5aYuO%"mrb_ogjʼˏ*0,ʗg:Kւ)KMHB/Q]1Cs*pxzEs>\:[$<` 4T}ǥ:6 8aE2u6ט:a9R3qgKͨ|K!%¸2]c`%Kh%co"JTwB<.gOWTq#'fũx N % 1ōNv%Ŷ[=΀Wu ) \hoD Q5XpȜ?QLm6=8Gѽ.Wx<ۺ:hCNr D/9\_F)[m[ u[ pD̵7Sr^x_I|/c:!<\'F(0칌3vi @`" Jgɚ*41mgjr?ܭ1by-O_cZS$L|D+Ͱ$ռ{00j0@fsƼȒ1_&r6 +6DV8\W "ߺJ"ne!I: XmFZBi a<-0ar(߇t@ Jmf)pg.gpOwOdGCdu/:Sd#{kc6y/naqx3bƛ#yCU0e"ZŪf"Z4Wz hPd<Md$ X ,¡g'Z~+Ӟ||x#O|5eLWs:Z`M KI`(aМlv>{),:ЎآLC84a(]bn@'+Wv&UyANWsҸtfz.kV\*pp&ݛZD="Ř`~xe"O+-+8كW%KɁ&2WcD!yU?C!Yv?$vݧ`﯋Bݽ֌6 ,Cvnbb]1`橖 eI찟 Is/;a_ń #ѵ\E/$ Q!ýg)Gf`qҹӟoøB/|)l1ZCWǎfuz0Q'S*%(xO\kP #rMD#sR䋭 읛p;m}ȅHvb8ͅY*1vf&xC}gqZV9/f0@{>-(:T,g/!aAѦ">W)K"¢|qo>{DWy5+>ʒ|l.f} eMꦷ,YntgQ2 y@oسnRDȜ_׷sg#߄!]_$t&>}}3]e(Yy^khLܜrhK  çe&SYjSn8)kվu#Q:QdcƘW']o) 7ǢT,9ֿ {*)j*+! g'2Ϳߌj kDqvg+Gm*Q񗂼 ru霡 fR0&;{Ԓh'[8*/X^=N/;<z "y^+41aT 7R/:C2d܌ ~O٤*1#鑌.S_}oGyLjg>Sq>:TpO׋xҙ]ܜ{ť[xQٌE/;{bOpt+*St`7z\yUn 3Uk L&J8hy7_A/;]\RB&TG.8Jv-"i8dθA0w͍:+Ɔq^[ @*"BΩ2@XT>zJ)Ͼ?b#Q3Jmq*ȁWWktxxәYfjpW<4]4@c!Xc )I,m6@~LԖxÄSy <$@7ڏa{TN9MIM6rSDj1 r^97C~C`)AXi1CևDQN|<K8,Sc4&rw 'קY2xLwY$Fzu2u>V{׾eAĨ_gn(bO7Ur^X:JKفܑ>q+::a8&0,p]8т3 ӥsUa7d lҖHn>Y>ne,E!j7 M`9Hp}%nO-fN7dHBNN*Zhsw7K&ow%s׃+C>R缱2ˊSXkz6#/*x5vHo}NRQNxHݹ`i)/rkS>vgvn.GQ%@7֟"LE)C^úIX*|PRQ9Ffqfy8&SGaiiyH\P{5~ߋ]>fTvo࿑xY& aUf-CLE)d2YXP%g$w]02DT-'Jk2_:d7A dX4ژ%pR-: :. ,>YZS#| m#T,./]_ &ix]Cץ_{}䩟0끡YyŨ"'״vttcwvh0월1۷'tRәq%*/$_^ H:B "bPuő ~"1fH[+&f-r"O 0d^#mBR#'nxzuWYWQLp{vF-CY %>WPwViUPc{G}5(]RH:F^><5gPn4TΆ\S[N~Ȁ5Noqgl j?#BaB&R9(sꠌH7rҍW|ՉTA px:XPǮ%=>MQa)A] jLP2^8F Y?(^eF8Ғ( 6T~/* Xۜ> stream xcd`ab`dd N+ JM/I,f!Cxnn}S1<-9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C52000&300012|_5C/0whJnIЫOol~}3.,a-[7o|3}x|m|e ~`\XBy8y<<{{{$/.w{endstream endobj 545 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2876 >> stream xV TǪ3yu9@ZUE-jYCbق? BPZJZ5>kyھW~9oY3g߽"f!Un#4x uxN櫞"1rx &gP%NX$Zi:6%^(1MxϔmLy|B\ IGUmUFԑe+-Zbo]yc5a~Q1+Musc*AN8b1XA$ ‹X@xo {4‡XBM,%B#1p"hB&B]d> %>e7]#mlh򂤘rN 3t(eݷwtsXPpsX 4;ȏOh14ǡ46v|yΡ"9IQfvJ!HS8YGNVFkPrxd3Yfp/ltd~`]eQW~XeC3C֦Dt>Fv.ͧV0, r~y$! IJӛ&Y8y.E$d Ŷn ;uO~ NnM///_yTV8 # >)'ûX}es3g0&~˽Yة+? ʀj*8$^O5h4ybźDX_ŋb_Q`{n8Sp2;bv&$4Uҏn٩z?$+lԚrQ ~ RsT^ eHεyn[5Xj#@KM-U.IDɬ7jJ"P|Ew/9RXC!+LF["93Z+z4ϑ=Ah>'Y4!0mНð_+('Kc1ؓ{@:&ǗFF5#5Y^R+Jt+ ^12PuM 7ǕY"kw̡Mܾ56}*uCt,a_6wx~/iu;ڂPJUdsIlNb5*86a?U 1UCko3$)L6!M| 0̑8cs4rDyZa?xn$-hb-9Z{ML+rP"-lP$>cVBeBrܭ#vjcy +QAUo&!|Y3Ҵd$oc h:qiV_Pթ?ţBDΘxM!k;P5`P"DͩQڟ'݀4\5KGi#MM Fߝi|#B2W*g!x=4+ OJ'DnQzZFy0:T #<;st`'=*LR?d{ o n\7k€/o|ǫk!߰<&ԼGG 3lF4z]M '/%ryˀ|IDEu9^W+4+%f}rὤo1| @ЌI$+Q P> v2csA-71x/Yt|y>//.AF_ۻ.~Si J\Mɪшr_Qeeơ8Z]R~cRble(c_V~T,6f5"MY:z_i~nW\Uv&"&pÆի7ut9Ӷ!L )dϓ@U9K,]Q=\]=?ip#k/STfQ@TL-7J>jveղ[U5; (?gt̽9CYqP@ 5QtW6A~(\A iG\._l Y.Ncg)DFNLTk^p?hF8ݵ뛟 WyFW8Jt iuiy:ּ.&>Mys~B`| }tŷk ͏g0}R͕wpѬ}n5A=E@jP;)I<=ౙSX[xbu=t&x`7[ׅ*&ӀJ8p A2k& w2RYFd>? -]/,//`yys~ЂcjKS3PX--*/^{ʐm yQzku+?dzͱ,֐k G]FO(wXZΞ=š2KYwrN;Mkp&A`{:fݒi3ٳv6+C}K^.?Pw &`ga~k-endstream endobj 546 0 obj << /Filter /FlateDecode /Length 217 >> stream x]=n0 FwB7e .ɒ!AL" 3!C'$@~t<8ZƂYo11cϦ:}}e܀߱޹~e(L#>X|z"0%W/}Z:݀ZnAaDw,A@auV4ڈ"({]PN0mxi4 "& 5OY,c&uendstream endobj 547 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2057 >> stream xuU}XS@ѭj t$:aS@EE8B. S"|7 _Tֵ>U+ ˦V{6v֭+p$E叛{?^xz0""#c We4Ii䤟ϟYMuΖ~$C.~ J<>2eS$ Rs?d-fg23y2)5C7EQZ62Q1*G+Tm~:hss:Τ^719#-QMW9O>9jI8X>u>vutԪQ՛7Do E\*51C0]Fi @0Ls0mB>6U4"8kkXXM7859>}a̭晪}gԸO&%4cQFͥkƫ'0 b/X0e;}f2H@DdAVTFy<* U*TjP-C5&Ԍ2di{h %EciYlS5kחrӓzԟdxB#/}?esޡV,KFޱTya$7mDr\/)*̦B]T'Fp =U@ Xu-+w#&㤰8X 7&HXA*- i) xv0xz猅b"$$ m[;v (a #Q*Z1U-'[A6 Ǫov*Q&n3i=3/ș4ғp^rMh!_+GGx>]Ɏ.)[1|7b 1^Oʤ>**zI.=X"ݗ(6Q+9@`))BXE|mx0VtI8߹j@Wurr`6+"/'$B N# b !9G3+ E x0`=NDTy {XB\:"}QaXW99^K`^֭}1KV9;x'7  ~zyPg||̽2vD+In$UW=Oz7WCp,\ilץ?6ZcirN'~ř ڿ\ f&bnR%HK o; =TdтZs aC6{1cgjn};p͢جtHCR@5Xe~/wq5q~1n<5|)e\(,QW,@ Мo0;7ʴ4lbIsZ؜îHL7OhbXLs/Vל;U{ĝ(2FtmctR:`濰endstream endobj 548 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 467 >> stream xcd`ab`ddM,M)6 JM/I,If!C<<,~ }/=[1<9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5 U4$(8;(E5|}a^}Kku.)m|1|t &4NwBO/bz]KgWsoÙU{gΩQYU]W.7o;{Z%*K˦j[ӿ\t^I=}WyרZ5nY3t7= zմ9 %g̛Pq ?oZȶk?s=g7 ==3zz&L?guO}209eendstream endobj 549 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 708 >> stream xmMLq#^&%Y!WpV[Cӹ/ *$"`'P4D i\|kmyusmޞ=nF֥/qAs2¤~`Rh55-Z'KCq%䫹hz<FoegX)i @J5Wo5jq#+Tw$5 zKsPTD~,([bZB[GZ9QQWZF/-()/ PhA$ >.ƾI> |0'egΞx{Y1,Ӓl`imتokp඀&"( ve{ۛWpz7\d>A7E}VuόQ|YޔGAtǗ#qⓏ揌0 lי]{c&Ig|CA|t̶re֮!{(> stream xXyTSw!iզ%m^jθv9nj. , š}$ QvTPkݧֶOjN;N_Ng]zyZv:}8 >kxĔܰ1\tµy/cOKGc!AS?%윅f"(mOKԴeKp!<4AbQ <7#=4>;)tݢB7Є̔PqJhdrTֈw#BWm`Ǻ3AO-^)yG//HؠHX95-="#3k!Es0b3,GDVbXAD+D XD !%b-x@l$69x?;DN>HNz\C^V:􁇲* :#'80#q3?50GEm?\?uBJcf{47% *N[)΋+3J ZojzѹJY b{)H6E%j(Piv M@VTB-G&!.@V`0;薍 7ⳗ3B efcRKFY^;>y<w rTҍt3>]f.+0J\`9|Zu+OGD]q7TC5iE lT0k;( */Yj~`n47#>1 F3h[q\`Fdª(i靗P@). we~?*e "#ce\"Ժ2NG_".K8 Xf#kv}VN 4Hr i/r }n z// H$Ŧw%oOԧ%41 ."=G>h,)6hZ:1NuХ yv ?gEpkU"ds ;j0eTKں\PܸzSE F{ “7($CSiÈ Fٗ6'{4z,3i:,%jVM_AÁ j]lZ(/EK @2- |Yp7+z9j?14,%m3_._yx%s:8xF97>n}T#Bg,+$rܣ}Vʝt@=Wk;*O fvѩa4&-`gUiAڢI)PD*iQ%{o;K9xDoH19jQzuOts|;Zn(wT)2I@t96Y{aك|kɔJK &31"D*oTˢҢ2bSei q}ꈅ9OT+=rmiKTϞnWMй+M 5lVkm+G%?}/9~*8xvHKJNWW`|צܳaGn~롂g~zbpT~q<"吭ySŤqW~ƼkvL"ʬP;̶},N}`©{YV zd}o)+AQN9cLCXq;q7B4P)g,4# 9:qH&:SEo :tl&wu\ P\!ʨ):m%hM=}_}~ PG:4c.R%r(]Wre¢|6LXvoj]܅,.V9ܜիc qSiZUИQ+(hlGXki_U>k W1͜U{PI|[.J^EI<[xOU^$<ҷ22תsНЬp\nռᕟO`NFva(*dP2l/|'NV-dSG=l}rͩ"[DQzAn-7QYzJ}}Zq:˺*?z>9O2>ᣏ {| rk) p?kѧea&u&2lg*@ʔum1!yy=I]yo 8ÛVX ҵ: (kѾ&tчoGvgkχR\/gzgG:1WzƹR}BBEWP/0wX,%Cn n 7.mgB v$+(@哶`4GrsymSf&*NN/x*uM\ZS4+{Vvp^g[AC~XM=?|E&$\~~StR &&7o솱lYVnT0kևO,4{tg, RYV;JF/;_'Ӓx7_?aZ`ieP_4P jіY۟&:;MWʳْNqW\C1PY>C*!\O쏮y+Y lʄQ4Ɯ=QQs8U(5TlXGoK4._}P%3nȫ(?+KXr.ycU_\&qƩWEj;d64xj\]m Sϴ$g\h&E3F&Ű , /},l+/7rký}XJƔHx"|wo޽mWFQh. (8RkDs 衄\u8oz`duxwpiFsztHFCQ]S{CcLjEڽ}sq<ۏGDFRK;};` dF+Tؽvjm,oh(oNiw"9{GcQ:J'}@+ }>G-pE)M:]wVCp\-`u[**d~!O7$#p2^8y㳿ȟluZ:V~n/uCsv^%RYk[y#!{ދRYTFV}ޙ"\ BބKmhvps[Ds?w9Q(!RFe5,[6՝{0zGC"` moaA v;X'W*eD߅Iwq=^Mؕ.km?DM)A.n86KK9T1@endstream endobj 551 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 420 >> stream xcd`ab`ddM,M) JM/I,If!CwϘM<<,~ }/=S1<9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5;U4'841)K!E5|ek~63~w/3 J+WwW,\н`fz\0%~>eꄩgHΪ %ݍۧ8{ČnYOϲFҲ3` Lq ?oZȶks=g7޾i='NY;wƴ zxx"endstream endobj 552 0 obj << /Filter /FlateDecode /Length 27090 >> stream x]&GrxϿ hf_.{#2 ] {+f|R#^%6##ꩧ|X0P|22###Ή7ǻ́7/97ןg'_}|?qآwX盟gz|Sj;Ky3xjo>_~^JS;%)zkwmIR${J$-$zHҧ$v|nfJ񮸙ȇqW ?%*I9jXkֻ.Ap 8nf$];h89 O tV;&>%oGl ?R)O W滢GG0uNM+'qJR0Lu4fKVT*]=%iEAVt;V=5~}%9"%}Jb{_٩]3n'[nI&wՒPm<޴=N6SޝyVjT$L+K~V^TM_ldX <}=!IUñTy{KleJMKҟS[1$i[2;˒'JwL`@PM.oj֖ V@JĂݺS<>Go[I׵H"5VoĭWw4ZG?'ք&J`럹$Q}+ jׄUZu W?g0$u}k[4@C%(Ҫ{ִ@CxcCvLdU[i zfbom[QnP V>R*MC؟# |"J$V@^P7Q?ZvuUL:z ƽOc3 FI`PַCJkqoQrJAPX4X[pzC,X~dh{{ʷ‚{mrЪ:K:( AJhfQ4s#&"%zzW{9rOmZ[7fk/@{I4~/E?i_'r@ x/p zy8gtE'aB hWypJcA6F^.]W݁+xIT*wc7Z|j6`.X%?cI>v?baL9w?8"`;{(۾c˪[@[a%Ll`9uNc;ұ3FtlsqʛÒFU:-Q-YmV*ڻZe2݂O/o|XB%izcOK%ŷ+\,T[霅rY7$/t2 NZqth5J.V5:vҬ]c\W{lg gCOl=>?5]Ms3ev6^}$O{Q%>w7`bcx'r.dhU( ͤSsvz;iLo oPCV鬚GaŎ4 wi ˑ3P.)ld >plE&I*_n 5fMɤk:&SǨpty='=}CɠfրA3}1zȟzI"tƏvZs 3ٺ(V޽VB}JY[NkPklij JtV됃Oam%4$:wI Y VB0d!о_'=ڲ! .WCjW?SrؒlX$-|) $(/?!4,!Ъ KEzm-d9$]U{7[ @I-Sv $ĻSMHdcIgtPoK',wtܴDQJcCЃD+},tmpFSK8xoH˼JkBKvבGΒDz`M.LG/٭aɚ@p!m^2W֢EBgo]:DӮ9VRw-! Z O2 џ' UY8ے~#3[[?$Œ);I}%tД&zٶ%lw8J%KFV9b,cQɹt<5p /-O }^I^=ۊzrEQ ._}8ܿ:`l߾J)?YyנMun XΦ \VASЂ<YNf zFw%V٦},gW%bOv]H%W[dg~:G/8C~h7vOyEò[ϞT9f/m{q9`??|nS<U ,=sXayT/(vS;.ps{+%?$U#s3$UEXT7?؟]Em ft :{ww+Ө-aJet O^/'J,C2 MaX2]5WT>:a"J^ZQz=o/8B'TШe;RñeIQ:UVٺQMaKQs\pTsKtG[4.8zrmI [#Fm,}}BAB 8Ealaڥ YM8.sR(z͓foT@J"͡-QZ]:\ose|(^7X8 WCDS?")WZ#{g3^Kr:.Iu}\Gsȏ- ލYܒػ[{wxpz6AݜDPLlBNHv!F؁c=4A*a1!ё]4h~jlR-.EXL.4;FϢ02cB_ʡ8^Lϙ't 2՛*0&$1Wbjj͐;Qca16a1!#]P̞-8mǵ{Rn(+_bb.AP5rP̕בäFb.od OC1WS#ӵrDC1Wa,\XLw(bC-9iӶPј}.<3Fck?L1g:ƜGp]FcΑ,1g\9#2sPUna04줴`̙WH^RX n$"~9sL$f:D~Hi$0NHL;$~*9M2~SʧZ"9,*#C[1s?ZS&3}39FSXLT V  \1Gi[L:Zn@ӶImHѭmZ3s4kkLOQkb:SZ3si8Lt!a)SHl@!VJL[s0G0̑*@Ւg2 ڛ|DM.uI\yD!B-с I0 3QTX XlHK 1zi ]}Mx[0;q b. oyǃ)a`R "5<̓gP5a`AW(8EɋgY"̀3$E8tOS.Li@ ft1pwN1}@}R¬o.J麰>8,f(:t+V@wܻ"$Ũ)0 0:EAfN!PrI%̜feG\>~DHL|x `Ju1k>KŠS`]ēKx\n*=jLq!>K#IB '<,o#)@М@(GN,|xz{H7Kwn OοikϒtT?KtgIXٞ68M ^؜fbRbHeO\c((&AѦ>%J4$)1)' YInğ'#ĚAJh*RbV>$=GIB[IB %9"aNwْ 0250.#P"1  ^""-'$#&rHub Lf;"4)&ڪIB)&Irb$Z :H@0_,|tA$ߒ ~4%&:~ .]@ tڥNFf#wFL0Bgpf(٤*/&5#C^S]ARwP͂1/&MFO@HR4Z hfL_I M1&]%f_"1&k>c>fsgȋ B*M/&s>ς!!y1Y0tF ɋs'Ŵ[N.]zb8V::;/&6G@y1W;(^L :(^L ]w^L y|fuz{}b|H1A׵i2 ]זHrI1ѥ^/VLw&{)6IZŬ6X1׵ b"D̴fHȬ I x?bqꙈ#ACp\ b"ZLomh1).D-;,ZLqLq׋|5s9l^LrS^Lb"Kzx1W5D$ɴ@t$Jh1']$-&"I7Y1MZLD[HZLD>h1[rvb".ji190t"ɮ.: !mVLDlL-VL!n Y1A$S#!O i1Dք*g'ZLD $1CZ>fn ѧICZLDh1h1Hsc".$1F̘-HdbPbޑx&21&2y CY9"D\/c"⢽CI܉17Hb %:M Mzdމ1JLVyp/"ċ I/]hq͋y HF3 )^ fmCAPb 1EBC.Pbƈ1X@DArexMII@JJfI@[C*jX4osh0!Qj&ĂDz,J̿= `8ИY0`%@I j̒ ̈́IP2%zE;n< x0\tH}` - ܌x0܉a 7%&؈m2y0TfCI`<̗*oCI_POr# ui`P.WhQbx0ԛ榈Cj&x0T杈C-+<*vcŃ0<y04L L(0DoW@5<}>fK`xiLFi ,pm&U-Y"Dy6 n΂Hc &ҥ$O,̂ bKbe~h0`$L ix0tgC' |x0 yx0ză?`z`&&Ht;jaŃ KLr[\> TDz[_E Dzm%B[o." &fh;KD:^F u`0o jӘă A[4 ̃ Vz0hLx0A*@/C< 3F RKNL&!i05z`l#&֢h0=/`z$E/Ld &Z$0blH0Ѻɂa(E`m9MKxد )&C[l-)0)%ї:s0haPqÒ!TgM #c%Pa2P&48B04Qvb `(0 z\:KWe6gU1#8>21A&PR N`3}LS%8[ kf8yg3g5^bE$44nk'L A=5Ǒ L|xͬɔb ŭx͘ަ$_;'Pc|¢ܰn)t)=dIC K5lf!e2 ,s0$YiHyѝyXڟ%͜L0U'Iv<ײfM ̇Qx! xyyl ILЙ[\Yr^ ΐ痟ҷGcR?ޏʞ9oSp㷞~֧#TV:i3![:4$__"eok??v6˃O{aeLXo^D37 jghUO-O50Ex#jce\&?nG'X~sIF*qPʴu=CCB&M,z1GOܽz5pkO=SzoGDnmDO)=߻y=k?OkKO4Y[{d}Rm{뱬d(ԓE27T'!pKӊN_nnK󖑮NZKM=j\*徫@r߲UyOWneb=; lwSy 4ghN%yghvlt^1*o?JЯ4lͩ ++6rrræQMR^Ԋٜ:hDeL=W-+fN=2k~u;Cs(~5%SQ%\^Y A)$@u=U,i„T5PVXl:WnͶa- u (w8?---Qњ*:*ܙ,^ryWE) K¹f|ooR\r+ʗT͜ r:>*Yd釥)#چ:i4FͤqS (r2d `͏ ݧgU@Q@K;U<ҔyJ;ͅvkK*L8ͫvSi%C͓a:C{91L;aKq=ӕ _MbXN|=î5τֶW \NmF-*Δ]}9ZkZK>T8S5&mlM[y$X8CEk[F+Dw3J|a @WG [˩DUk1VCY߽j֐DwVQ\YV.VBdݥ5ąiл,'& @v@V?%8[@Į 7sk; #2U` '=`7+'VY9t `7pR[RT9t `7qܖΪƒp-PqحgWU`&rM/vWf #*\ӓ_D0b )*f>R}  LOTUbKu>?Z)PвCujX0+'vIT[v9mgY9RCʙAxTipd\Zjl` X]D%FRvy0Ħh[^e@iKKXkt* bv%7`nx\ i⢲/ zj `iTůfm#$ukt꣺5%W$oYWV c3"]ql3 2e#mQ#q ls%R|,lVmFW9mӆ!͢fI~Xƙ"ǩZ^$la̍ kO͂Gtck咶cg4kX'u9Tf*`jXIjG2G0@+Yz7/ \r6Pf!nusIy vU)P70̻q0D1@/Aqnt yIpáBֳ^9@Ӓ"78htqCnnֲfCU`y`Ъ f5˭`7Y/ɨ䵲lA<ްV[}QU[H<; K7KXf,#*Nj^ç1|)Oc$|)9\9^߲T Lq|3UYJ$gr%*$僻;(%*V̌!YB.Ht>$ÒE[x=5٧DwibI[L-Z哒q궐6q>$ESjfM*0!™ [S>y4Wj`C;g$t0r{E. dX@%Z8DDZ/s:x=%q3--,#*Z t:pKI+p!m%e,gDGztI? FB p۵hӸaonN!7vPxJ4ǂrˬB?Y3zB& R.rFA3捻XjMUiF2PfjCަ4#ӐVݩU]z? +V!)*SÜCJP  lQ!x?d $mCJȒu(|ţ37HȽ OYiZ᳈>Zy)EoGM`cqH0dm*Qj~*䝦R-C5 `m݆iS1M+/3mU@iHmyj"4W iմ wF^§+ڴjS2<H+T,4JڻSX^ES+W|ȺRf^64JSզ:3<ę4Nn$tM+ TYidMu+Pmȥ 6.7@9#C|׊TGP-&qtx._8_aTѓI_ZKj7HTYV"a]ٯcuZ0cKjNnE 6GMic2ZRp|)a”5m,[sr漂)鲬3;xv;\j=$ %@HSjqU|ռR\7RtǕժTftǕڟX-R5rXqҡnbF6`_i#4c,鮷D=rҿGW}Ro&,3G7cPg^sU^ NdC6gtD/ r:2r:#,s#;@ih#4c#}9Ai[ҖGiY¹H*rN3.ݾ%*U _ֲd1vNW:4ȫN̒P4*RNeVk(m#4C8 \w෌w2N3pG1? pe )ȕI< LB;O2r4s9Ao;O1b<\L13Kl3wL5y1+uiNxL}9#Jw:KcfӄcNCOʄcN_3N9k@+9]p|XLUX̙T ս.?$nnL ƜU?Ni)r1˦u}م+H9l*7CCϨԏiLP`LJېu`1јNA!0K5ŔWc1GU4LO;i@*7Kjɤ9Ŵ*pAZFbT=*1N#13LVX" .#1L (23/ ,0\Heyj9Mpr3n43 @eNZL-9їאtm]U|ۅZLӀTN& H◴K* ^0y *=lE]!t[LNkyôC41[jx!3hbEEZ.FZ 2PZS}BcBt1I&ї݅{g1mTԪciHE=܌N;x i\̓U%ݥǍi*7uZCHE5cQ( -tOТ|痪_4y [R`!vdl,s6T32!Ǵs6ZKYnXanc heqs wڒeQӀ+3ͩ Os##Pc`^Y#RH<$݃0sK#h/**ʓZwUZDsZZyIhAkςbzQifEDHJJd=INl:]ߓDHJ(M43Wv!i+P%~,|-VMIMiM'I1TY> k,ֳN.&"D|$H&"% cY.!NsQ"S$.w2ӓ#4pfd\19GD't3 d1a %yt^,0ɺ*"Pl6܏;ᅒaf!S6IxaTWN^l/LQo(/c´a:!:. \ !k cDࢼ0w1VrkBɼs^X^X9@gI/7 I/IpT$oB%ՃX]Ei0CzkEפc0/Al0@u(TY+<^X4%V&-2e&Z]w R,U%i’)]X4#V ]Xz#%*Js1LwasI7݅@iYE׋킺!Ӧׇ.,?2t(EwaaR3 C>JR\I/ )+$4b˝w x1& 3^"m\ !hTֲq#;n&+d\XW `&,:%|kn\3Ifj 0ؤz4--csYp|#Qa]La"\TmT:2:pE~q.DA$°Ȓ|SaY?4aL`HG%ikErLa(@L`dפiӘ  RT!kt1&IRZXņ Mo0դ!8<:_#G!. kyNŅ & 2_fDt;Z)Xbce&2-0A$ 2*I+Xt`"_T!%˲;ky%g&E2L!&aXGN|``͢aZJBaI}oRLfBA)="X'VL+Y1(է?* _O81 hޕH1E)&44H4QE|(tJɒs_ psb_@qsbXTDܜ&ݜp,&,M`ۅ͉ȋڑ')&t*J6*EjMsbbĉarDW5׋0iB?bV3_bXb&m3ןI1)&kI1,BzX"&CV k7+&wVLm.}'Z&ŰتLkb",Gz,͊ MyR`=bwSLzԋfpS,`nb"eV kaV zqfbXOWJѴ+yi1›,Υ{94`bŰޯ{<\dpJh1+-Z K{!qyMac3Daeҋ8/&ޙZ>_͋a$2KEh1\-QYZ@Ihq\U*h1D=MQZ^V6AVNISjU{x1Lc`UO3eT_t5/°=yY05V JI .1!f k23 5=5ϗ1,jj @j :MauvsDaIgX&.1,o1,-oj %f'fC\ư)P9 \D\IMd䢪4rwj %q1c(IW&R3c(i&PS1;3&pq;"PRJCIxCKVSz;}c$p3ÃȻ1w1Dy@q~Iܘ{čAܘ0ɍτ17wo)'7UčD@ 7 zqcFL|Q4g$,J3J "p?c(1An@M]GǐUscY0p? jn %&Tk-'Jj %C]j0w*IU;16Uy9 R Sff UFbPrƤz͒3YCI1:~܉1<LйƳl#c(0!I0CcxPe4$KFf(Jb ORaMi+ؖ1<).r6+G`P)yyW3eV%^LD42<9 sgT6N"G;6%^ 0[9H$D$(1EQ[\21h3`^Nۃ5iVbЬdC*chzX"Ђ5HĘ\chNbL+14%]y$%ZN|f`ݛ O&oڝ'UahIcb 1j1 fLnd1 ǝËOD2\"b /PK="D>jZcx3aA^Ëaܙ1A%dA=ђ!f ΌuW`Gpk~d6nrc%qcx?&0~;'ܘ@MIW'n 1މt$:&L5&;}o.Wzc@Z,hDO@0ϥiu;^cYf[PKncADz2Mc(#9[(~ =ZɘQL{ Jw6K&d3I0 . N[!Czh 2dYL %7z rCr0m,Ɏ];rg|y N0}Gn͠=iPLNoK6 I ۴MYL+c3tcn ϶hJm1#z &COc7υwQ̎O_=ͱHƖ1 b0);&Ñ4; Ď yAMfYD4Bovc[Kbl5_0ShLT=#ldPcw2MD3]aQ?Mo:n~ ifXf}:FS&vL#v z$øƔM ;qghDha7ַ49&8Ea;%axJ̼IA<9c?Mcvsm3Uc7/A$f=`Nuu0+C0͋lr %:M!⼑cPYp$1D_h3E4l9KIwڋbl "^BmDzsę fA͋H23Ȑ8'E7#GΒ@6"Ũ8>B61fc6{AWCiAv{oxj8);3 d$ ^l^~=Fm܄8xԩ9<ʳdx;A_D͋ق5c}6ֳD͋?SAXRwwbj _$z+#W]$w~D3)9k45!ye$z;b~K//<ΐBB|i2\)t?cL'on,ug]A3\%>x\y}! "ݴyHnO?zq[~(uN.@y-F|ؔW.@Ñç*;'@}[v&>yAeؽ&XǛ,籭?g?+7J}$6+8P[%!㡔'Yr{XwwfEԙf9/^8^Vk-mǹ25lCX4_(j׏}c_7oϯ%SgRִt*C9a98]̵㧞"i3au#^c%jV>*Ӓ|~[3yCEzomSg)*ܜd{?R_٦9yǾ~eρ wX~~t 4:߻o~?_Ƕ_4=mf?}^z{k]w*,їo ~[x`A瞔jeܿoػ> ~|ޯ`1[m֗־d}z~4y[Wf,49ϽWcbٿG~ GQmz6#S!$Uֻjcf{h.yZ OKx>6[]/]隟D1e՗MM:,H}uT-lo?WhD;5*64pGnknw?7 zX G?J b/}vڿH[FT.ןM{2ՠW\'X_޾mJ}`wOg?[[^~@=m`[.jo^G_fkԱ@}GkfγBʾ]|nj~$$X z,!S6ק! p# 6ݧ^L+pRgO[I?Sd&|b_-@D^zjmL=N%'a=WTCZ^ja}p"3gv>}GO 'j/'?0Rc]虿?4?GFPzMdK59>7;u}Jhp_QV8>?>uf˔Imϻ읽MS }Dv}Q@<_=.~gW{|m8 _fKo+kox;цt ASu< w-ׁ}RhyrLVAx۔[= jn?j ZF,D`%;+?8?:LU?0TԿM>xV+vp)xzʫGg?4 lȭ?٩+Mc! sOѷ`,̿y 1*şx'5xsD1BFh}y\2Sehc>O+`kh!<=/⾚L~C ]Y.ݘy_TȞ|ݗw?^O ܿ_u{맃PR{f?]+=~ |8]xM?q yw[y"BQpg\A_ W`,EYBA4i}ءP O^!9gI!Gt3O1-oOUgّ3Ywi;٩U0ƶ%og鉬lOV.2|ف2wo7^)y@Gǻf~}Sʌ?׼%Gqg Ӂ?wi߹/}ŧ| "f?eb'wO|Ҩ(gvynطzY'p_CiaЫVӡS}L_9hϱ'LJ&}p9?w'&|fxv3c?m1i߂\y꿵j7h'h_zXa_Pyc"O}Po{1Z-Zvaeg{? ?>d΃^˗Q/?p+,;iB>VNl?O)5ۤ@@߾}ڞ[|l]}dZO[?W{*sw>axZ__YW= /퇮*Ķ[~TYtL=6?'Rk.O\pm{~2_~$aٻyE9d^E AskEg/78$z>?Z| -h@p}Ù.6^Y9?o OϝFMaC0W5{(7K(=>=DSm?OA_~OOݽ2d`@so8X')%s/Ov{ء=>xA2V?|󶾛Ǫ3U6N{f&?꫿3!3=7F|Xy_m=[9!Lٟ񿵗?M\ t0sT0ln+$()˷.S>6b`ҝ,2#q A. V() VN4!hrI{AP)nzn5E.oQc8q ej^ssJfèxۛ:΃^^~)~[%t]cendstream endobj 553 0 obj << /Filter /FlateDecode /Length 2450 >> stream xK{nA!k2HmdWe-67rF:5RƊoUI ٷηoggԝß~BxSf9R~FY\ )WtJ,ܸZ^緋$״d`ppAҷzh>mzQp.J9$`o vQ0SVTYJ@.>j!/ kd%\ dY)/|@RE~`N0[OKUHKT)"AKFKʴQCn3yeix~]ad 4*5jdnmU(#w E@(LMQEP~x`mJwx"2?$CѿuR&sosvW0oT\02AuM?tu"uiR$yC1ݡBTtq>;k}~Fq+@&xlzwN张o]#XK /N=Q#kip2tdn8H8=#Cwv  pC'.m6BkȖGo%4$K%j|a SSkxԈy쏝޸w# XrwBb1᲼ D :rM fI+x٢L`!?$"*&no^EC~B?b!n:wO&2}3o 2Qy}zn}{isev]4L( 2c,ɁR3e mL!("ؔ nߴUiɡ']L/@c@J9ȡ֝;sc=?;nӤpEjcs^M+5BOV9' 1Ta(uQ-EEyle> Sj45>rŘF rbn_'>ԻtS<>xJ[E ?O *cԤD_.(}h O) eDx5!%|B րH9J6! C2H3y?\-U"AΙZcVNVFJΖy4i196c ko뤘m=) hJ ʘvBasl> $bL2:A#JI1a{JJBnLYp2i>2CB Iʃ" 99佚*-ȯaD6,p v2:{>p׻O&nuwVk?7 t@K/Mn\g`,J{vP6U xᷩ׭KpD ZA>gc\!YhFTm I dc! ]45$W[mhiM bnNrc!'%?}p/4 CwMRs|0t7}=n>c}G2 ٝbmI}덋|}fUhas # Cȳ)K->_c =-v1 w t?ĂZ.h#":Vȯjh,\Ώ5hspi5!ۣyxwO > fp,NWH6y,1AwdPIs)!`-JT-W n|XSTU^NŔ?J 0݇DM~o?7Kޥ1Z" .GbBv;ZϭBhqd:~a^?h<3>K匛RK=H\@s_@%`4p"o"jS{O  esY ֑,ƵR뒝{h5ȇ~gyt`3\ET`=_s Sq:)riZ1N 8% 4dهX%Pl2G2/T?.<cNL >}m f5I'BAQ*,+BP&ыTSJܤ"w;+ -&f,zu#!jQCi qvgM a ?p3#ʳ$wBBz9#!Дz$>f(| ==6ay5#|6Y,z !'C׺03wϳ*cendstream endobj 554 0 obj << /Filter /FlateDecode /Length 17639 >> stream x}[&q~ n23f,A,P--ᲥeO\r%SȌ[FvI7#|/~Ex_4']6͗B鍊\%|kRon.˖|6o=])vzC{'/6ϻ/(_nn|)vrKy|n.X7[awK7H/Ԣ|%_z0/ )Di瀉b}PgJ嫝/-ک.})咢EұJ4͢K'a0Dȗ_D7Q*pKR0/=phkq}I}I^JLajj4[^S~̋ՠhqvLɗDIZA,"K[yDy2/)z/i.5zK=a"hZ.-:Z.E辋(MSs~-}`ӓnj5aqG*bA((q41/cE钛(Z~[iBxƪϋtˈд4[ܜ1˺822(ҽmY/KJuQrM!PL%J2R:J5Q y J (c?zS !HqQ>$JQ;9a{R5BTERb<0zRf+WP:%cHd'(Y픍Ŵ>.ꭒy@ x%rW{bH"C]M"PbK"PծlR$뫋Eю!4:j"4Q$*^D.S%6@ZZotQr7+9\Ri.)v{u \C5}śY"3ݾ=kuxj)=qahAK!? jL*0RT[o\ٝc8; -6l^R*9wuPZ >`V w=ҭ{uR:wAڽS (Bϼ]3I~N @FNiO,N)qثzɢTګpM[|R)ڷaLxm@cr梸!O#p~N?X)ilK  _g[-4,+sueB{)m>, }gwhطq0 1s hݥt9];ڭX>^rCwuDԵ.ߜOug| DeiS|%{K oYIauH'ҝ7~|0־rШv18ѱ#ҸNq|0]%|o0?"HN(Щ:d :v.iK97tKIwgOt"'Ηo \v 0v'7 gnnbWɹ>ZqRB]w|cJKؘEشTRBoCc7t U}Kv4,Q7.|nnl0yIvJ Q'-ĖqFй3 ξ_78tѹ+):fW(@IǍ> :Ȳ0N<㫐z`Rj$>C09ح4뜗=]Ai4r23p2 v\#Fw4̍] <̐*OR{0Oqx/v,Lςx&ۂ)60L4"'a rcDv!oZp`BxLgFj+E0ܠL!a0"sJ[桟C,y)8±!~ X\|MrĂ2a:e_Ju`et<}~ SymE]!ޣtm}*D34nˆ(nd_CV&!,? gǜo5Q5F! *1?|n67[s !L)_bjCxq-z`%];;mmV%/m윃-YbpfiSߘ.VK\Z gץNsM3"]44dž`N Szl$ە }f:xМږ¢p5DdEN068FV_a+jL0쓹INYQùmFB@NY CUw8?M?p]hL90>_tFeL}(Э"$rdrtɇC]c3ǜre`,j00p]A8l0xc}$V j Fh,hat)+1 wts} vӼ/o[5It@H$:H4moˣ)]0TDŽ/ ΍.CQ[Bl49܄|`&c4+7qPU8+vf 1/Y0a<ݼ5(F+|aѩ2Փ&`mM "c2Jy3ZAr} MA!ؓPݐRiߌ°`BJ?LgTYugΠmEhj{5^-XtK`%W`'O2pJJ}E[ˢʈ*jYLZ sxYk-'Rg"`0TvKC[LO=B5g1N XHz 8s5xAKV=ճoJtm#)I׆0 g-U1m)74m̉jp% 2euW*soRlE-k*NUfD88^KЋ 1h' ^XSh̉jK̉^r;!IރvgpJ[}76dRyV|Ұ(\ǒw%D*_67p[*! A%Da %DVEʇ3 |(cP>2 Ԝٖ7XP.G[&aU*R%& 2P̀v\ba*#A TFљA5hl/)!j" !f$ГoF-טa))(j)! (!  3!  l#fm%%D!T)S{!-HbTqG"{2?s~YyA:,`P m { Qs~d m"wel}-FVFԔ(# hi|+NAR" d(qOv6撷(\7eDᦸG;]Kj<+0JtzWRh˝@}s1PuՕ&vNqUrdpFɂL[b}i:hRH(kԏkT`S3c( yeEv(a:E PFjm|jqMZxz,kjx(QvV!CKiSmgryQ"R#7],=*D `I<4kX ᪟3C7t%-`qHEM|%:9/!5r0EHA><ʐTm!VD:w̠$eÔgHoT0廮i!egX:;4Dfw5#DmY#ڐ.3kC(Ri̊Tw֤)3KC(dM2?11SF>)MLK !E@eEV%RHQ̓!,a4$⥪|xXDHq4dk4R(4$j1|<c3XY QbET Pl00ו"~mF9KC(!P~R]FiH\, OtQ HnvCf|o!&d.r[FPΩT-xsS֞ L$fng`qH`QC#ީj(:M9F,LPi(ImNJ'1sZh1"6$(YJ(S[ E(D7.DHZSe}}ݏw?MY&S]dLJ$MG?i$t˻}>RCEbAxIEKtRvI [.q \ cLY/!] #D-NHn;v(  iQ:= DgSgJbU;ϼ뻂ZE:شRU bGm)]La/ a~ vL@vã j֊41[K'Bl)L f; ?1r CGm=X![;ڕ!ۑgA6/[TscG4Lx&ƫ\Mv0 5€2HYaFl'(o(Z@vD(LQTK;61~ c.m{9TAYQi9m+ m<łRɂȭESIpS nna>rAvy Bva6LI!. p;^~A6}9Z1@"PC0na,1-\4Pa Z@L+juxE $ y"[3 "b?lPAELi[1* VQj<3#RP`T 1 crvZ\PA6kq ZH C틍F@:j=P]Bav*E$. lX@RDp7( ;CJ1F] # S$%Oo-ގkD0vX1ELy)y*ޒ >.,3%E`A0.f̜}M5$- 9%@]A^3bY9,4}-"(7xݖ"(H]ö(ԂRuS nB*۶`6.xm'钣B.9% 5 z4J2tԥ0r.,bI @{&J˺(ܒxm(3v-%3mK @ܶfm9.q[^֕(//e4t׽¨-o>5< 4uZx*-/fK(MDmKSu[AЖt׮-FS{ܺ hKװMq6MsaĖifq!n{1(εFRAȖQ3\%!-od0d-Bk%, ly DBkk]C![ ŖuU# lKf2"q&f!J8_cؖi]eZGdmy~!,BU(Nm3H0lˌa[.kн܂5ے3+]%Cgw8?[p1[ -/1[t=1d  ,COaЖD#A ҙIi  RˉFH-([@҈+cCDt)2*J b=bY {Uav*5+&-Sjڢ鑢hJ+DuJQ=,(}{Dq!XpU`F\J`4DvwFC ݝVzmқ]6]̒zj(v%F `}lc ړ$eDXQd<֙*(BelWDYZyDQ*n)"t"Vg.>(@M0u=Rm[.M YPA d۾L׮b@vA% 'Ŧd[e-Rm;#0 [WE_BwIʨ$UE-Z*'Z6!VY͑7r z,\fG%l!'ѣF6«ɌWtۘ>T ի3_>J*B5w/Ȥ)0R%{9x#֏"FƬF̘sC5&)Ș8Jd,cX;R$3Ph(gX.,8tD%/0P dy YB[HFCbT`A"CUJDCW X*xRTUb 04SUi 68UY)EVK)izٓ*)UG:-V+U[l-ʕ"9jIL1FuXrsqZ1\ w)+ T= ŶH/*aekrh}(]X$-"HeXz Tfh 2"Fž!|gEb0mf֧9  *!c2Q!F)F[k*Sl BH nV"Y7uiԉFwS`Ʊ2 uA*sB-dž?,+4Fev!B$pT*ZQϬLa3oiA\VEgE.yXd"lJdTX[6̨镡2Ϩۈv/4@SU=hBJGAQ '˜ȦVJx6o#&hPX"k-lKF[ `gҤߥH`fTBEVnɋ1TKs'G,%D0I -(BxY[u2 SrB20~:kWE453V3͡1M=&(mP0dVrB̑dMJA,ty ]u/m,r#"X 1.̂24x J&t~f`EhLPIИ7Q ' -o(", @X$ ?m@!,$yeb*%:<%lc!YFvt lCyY;.?y?6BS ԑ$v}xf:] Q t Z3Rґ*hz["5-(xihh| h :2v:28W&^28GK9g@Gtq92T6 v92L;}J>~92n sdpp >cp >p ypbJ"1y4Ao-#O#O-#96a\>tzI#pլud@!7ԑݧԑ*vN*# ye:2{+ >3Iu!i2H"~ZZv*PJ_[>BH!$b)$& !.Jpp(X3RO9&Q2|!Nv.Hٙ"PMQ,#?  Ԑu {k+kVI00vw}$Α7(G4D9n&~Mc,7a $*RfbuΦy!NA6w ?CzDLd4 4B7mFn| 0/n oK:]m{WGAeSDCLa%m&$W OhȞ0DFD]1 iU"3` v#M/} @!QhNM@Iᇆ*Eb5nZ#ĀCK6} KuE5#qb@1n2T uP/%E 0G$xvt[w^{2PҀ={Nع8ǑX;/E38 (ycfTŌRZ 6t‍'!ר/]5dKe}$Lgfb4Q2;}:Zt}@F62;VBg (7CS} e{f:}*Yo|{_ϬP }*o} i 0 Z]_2L`oO=|vó]1b`h)R#IHyZ*PyD8\+ uLRhbǶ  (Fj8>-+}u3֮-.}/4 / €o}#סo4" IRADY|E WO~`•ё(hI E)Uo`OK@Pw^p{SpˌAr|L)}>SaQCZk:r|gGzV'>#(?%ZF+`~2:+Dbu~|i:SsY4W<Ṿ]%I|b&\"飥%$%x4%|ڗD)"NMˏ$q 9Ĥ @1ČxKiRKy_k;lCH~J2E_ҖmГKl}ՔȒ2oXEi%ޜRߑ#j 7WaK;?ȑl,@iײ#rzSH$!NVTeEץiq_dW0&(jZi?xC9Rs›Y '3 dyKfBb2D8 E q:@(!!|oS*3,Q/'v;wT"Ẍ́s2֊D!kEս"珣КQ(Bh6gi&vYf#Pl)ds3`cTfޥյ=]2Y*gFTkDe}k#5,R,!Xb2SD)kP EjqASL wKZĔ)Յ;J*aJhl.6izL@߶LT5~/SXMB{"Ȫ;*+ %#_?6lUȤHV'Eͳ cHEj"\vψ4# u"*Ͱ^RY;2DEUQ@BcH|!Z"S E"Q:"ɏJflk=G@m-L0__6Vd-+EG7j3VAP$ bb|s2+5]m#o*XSҵڶ0^ٶcV$$Y+?ڴ, x(.Pn?҂x<5cHTIгZ$j`$lGCOjhAEK-_7VQRt5si,3m&*˚>!}1oxiaD!5)k`$db$ &j <|ZgQ'1}ojnqȭ7T-deǚpzD'k^$c_1Qݣ6YX2UAuqZ ^'WWk\!|%#Q)mƒޑ+x)Δ~uE𱦯_]M#J!2^dC5:Wc8Zcz-5 o T#s"QT-*X-RUrq 6ԋz8]5zb}mC]^j"Pl]xXh^#m}BhX1wW q[W=pҙBZn9V+F;Ɗ|C]\de j#u}DWhsG ?]|-kRrE_ҺDKvf kCڙbX6d3X RϔD k@\ 5,g 謕ꂒΔ;}YagwP CFNgJ\4^3e.GC'JۗBn L_Cj@ \M?AU0 w7T1@zԕr)S?>SOяBWg8G !<"mw}L{{t t[.W (Gמ H!:bOBSDh"?P0BsxO*x!p^Ƕ?G^}ۧ~sy+3La"B? g w@hKFbvPjo y? QmO8kpekvn}|n;6> G_OP%~}xsOz:Or׎9ӷxtӳO_kH>|{}w!ʏ˯t?jc],ՕfYߘ%8cاoOnww?.|z|Gڀ.vn?xڝO+܁iYNx=-;gc+H<<ձO7O DzoJ.>O[5V-7_|]2׻wcNJ=cw9O@ݯ|_Tnw; O~~>+ԫ_jS EO"oO}xm%;46 ?gVر]cg)s$o>pί5ٸj]S\{[G.h3<7QŖmfx1̷;\9^Wڮ$|]^?Ev9i>>;3,·T9#clv{A6]<(ȭHs;ZVG:2g>w?v{7o*x ByҸ Yt~GɻqQυIszJ)'má9瞤z+Oĭ|\S#{!,A~0LCvv^;OiڟvH( gCA,Iamھ NU]:ַjd7P[0&;g?spm o^!H2pHW%DkZ5zw;8o!#phs=xͤ-j^߶GzE8pziWG=r/ ^v]7_!g|=#B~'[wV ,=Wb<>al_Wg\4XAĭsO嵃ZW_ _)~ɝU?mbN9'-ܻ\3N[1J$^KO6='^ּHFYDMu]dl>8߯&yC42w92̯\#W ߯_E<s7'PF܉ZlrR_h mr k䷏q|ިgkJ9ڳO+j8슯|۽XН^KwI5L}f. ]mppq|oX2@ j?{pG?F_}һÝ򯂫o|N4zqOWww)Won7mȋXJKy3G4?A>x5:GFVW~XDH *^{+/T Got%l> @SԫC:7Z7>__8>X^lLM҂Ѧ?}>1C*{@ǧǏ߽"”?;%tVk @L[>=w? s}q|"t4Okl YQѥ%͵=ؾֻ/;㛁ǟ-s/wX=glR$UO[w^+惺xa:'$FTyC܋eFO xbM_rS2yXjz)|O!{8as؈\H7\K/<&Enh~8se}.~p12>oL D U w^Wgcdg߮>= ?cdvKO~9.+|.x圜ɏ>l^Q~?sa~vcխ)/N+Ebu9Rnwz;jq-_Ƕ.?=?_# _uӺpD|b2uzœŨ8ohOz|Mɺ{&z:5tMum]josL82'%tԨ0^F?IKsm{!v|M& 'SyO-ן4NԄ0kxt^ѣfxa^)/'}޹ub?|>e4~cln1F9: d]a+d+GBE樐I'{Pl8 9re?k(endstream endobj 555 0 obj << /Filter /FlateDecode /Length 4677 >> stream x[oGs{X lv-2bBȇ?RcA`Xӷ5y1j1ߛjqwwW?_s[x4$~{Ϫ6WZ/ }2nqz}ԱԠˡ jH݃YTZ*YHu{1} ~' C c%}iQGG۫Ψ^^_ØЇp W e](l_++cMX|Wh`C޶pJ<);t<Ev |VC+~j`w$OlSxhR>]鱘RBT*)RB*0`\qF,1Ӑ4TPeM6LLy n4x> +$zLڒIR6⶜K#-@ٸz+l)ay-zЪH8:iC|GvJ8yx$:y`=%3 o% n5 QNF'.q 2ӴKؠV6iiROTk8~C38Ekla5aQJpz1A Hx=٩:6z/jN_ KF+kbS m(Sh O"IDjs4-|>X}@;8}9C;8=һ(rI;8>YrY29MUsp|)]j$lRaj^GkN~$A1P2kT6L-: uO  Zp|<<,`hk]zŵ%uF*E 埧K%l-; Mr5I묅ؕCCL7 lrMSd4 : M(S! 8 M0+E!(Ybpğ%AM W`脊$.iiDڝ(?UQ(i\JE(Y TR@ę(Q*>dD!3#adql۬wzdrc?uJ/#9A`eNP 1._#n߭DAwOGđ :n?$sNgQXs-,יr}s!lY 0qsmJVaN>>K !9\zB`G`kg̶bf3l՜r'T f ✁`7 '!9DB0?@Q0O.)()DωY4l휂s c0sܗc3s ?N1Arpq0$98#`Vr0`&.'`ΙKZ\2`6´#R007e3Nsg \u)EO(K-q9V>vXk2v ^HXT3Oo,r84b069*88W0鼰qN4:8|ꗌ'SUdϩ2%OԠmEfd̩3\khtShKtiKQdy5`N(CJoo`0Dcy\n?'gf/uqM1~V:t?,yb׍LBIoF@iP#ۆ"[^&Wl?-:,;ﷷBjTrOB>wڷDzVV,_K (v;{K ^%\X" "Jpe5kέi¨BD»N/Ӷ/ʍ~v}8qI̶w/dmw+j߆t߬s2GX"q&=hb *լpbc~WP⓼P.nݗGXdli8wWzV|6Yj4Bo-O〠OBQa$CnmHP$A"ws9"?Tncy6ynNm]ߗ=Ӥ|^]Ou6In +A'[v+`W5 #pnɉ_DE~O_ !ܨmN\:\ӭoW4H y=͠f cA e;YM.Δ]{o~-_\`Pzbd꾪k?u qy< N@11DYG"v.{GLz|B&z#p]^i9|.˞O-6Z~1 ޿Axl4A?NêF.đ)~ǫ(Z 2pW>?!?uh˱#UAuJ:1}.HBtjv5NB/#$y,9I@CZ9V4I̿lvchv,=N?eG *j`=1Wb> stream xY[ܶ~ߐ6RūH `½'oPhg4c՚FڱaRHf a%<7}WYJW w?tunjGWÿil34|IMfj{qK 2O p&SٙJ)3byC?l4(}Yy>rPhy}pn.AɾpzB#qP 9oI6fDOK &AJTVW>iď)#j6HRQ~7>N4 a𾲈 A j;mNbIy~_ ys>AA,~s>OD(=.e\(NkLT\j6K;KF7Ә%g`%|n/)p[r8ѯ'Po|bũnQ0"Gh7ɤ}Ѯ-&J>n_v^#ɯp_I 'Bݤoˮ[dk? Gu=*wF9Uߗ( ɏ]jk=uUz4kt A_7CUszUCe @,|F@p!.$KRlB9E a֛$CdIߨNB <u9((wX'8[HZ596GD kN.Ly(%h#aC0`CgF:VV2^K%IT_ ű項Xy )~q +[V Ȯf/3I"n-%#%8(/N2>ӬKsy?'yTdu$29VίJ,oMs`j:)D+lh t\:_]E.;;(C(ṑE^Q'nҠtPb{O/?endstream endobj 557 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 536 >> stream xcd`ab`dd74 JM/I, f!CG<<,~!=3#cxzs~AeQfzF.THTpSJL//THKQS/ f*h)$f$))F(+k; g```Ne`pae{!a+c5|2~3M =#.GwFѥE%͵mr!QfΗ^[]S'utu%!bY.u`wyaR͖}GQgD]Q]+US3~v-)Oo>$IKW~hQs@Jsw/bYV :tctw\wN^s]eݕҹsu}=j} C ﹿ3b~7W-*y?v KH>g7)szz{zzzOYȜ;w /{_endstream endobj 558 0 obj << /Filter /FlateDecode /Length 3042 >> stream xZKZ@jh:HIs,r[ҕKb"v=gfHP^;bW o?=vڞ.~ZJϯnQi Wo(@LNP#++A=B e)[|{р%~[-Pru!y}>G{`MWZDK*tesdrԻ] 7j΍\e`b ;3E+ane-cݦ^⥦>1RM{}ܓRdd8xes$rA0z lX~8ס ‹ ?TP .؂Qp !DU5LD3A7D`hFvuԐϾz|8cigt{ף ~95]8Onw'QC3 AȖʏ\z7m rt#[A#c6p!MCL)!F_fVOnMG|q<꿌nzQo':^O_:ʪ#ˀń @x$0z$.(3<̝Z7PW֘:5LGYUKjZ,mAހIGI4k}~S%Ei):":r?=Pe@{8*u0*R%5@:|xjceȒUWqSNxG`ҧq׉)k1̊Sc[>~]YZƈpe C4*$2Ə\=Tӡ:2xJ4-7 *M`C#g-D*s}RI r_ctS/57;0)g"6C,<IgHO/]4|ytQ,y`AdhgSj2z)?%R"SƮK2IS*rilYEph>)1PS{*3E9T>ENp j%ŭoYe(b$X,~ErU~0e?{oSD0 |Mp=lcc̈́:ZH+̆W"};=CT@T?gB?fQx`y7alpsauU8Uov\'ueW=HP9Y̨˿Kg}edfUG91A '|2aCͅXdX^3&zjp|pM›RgRė%40#B(ۧ|轣4c+{Q=>ԝ]|Km/RY^\`xUPhKkqS WoJZ, u _O¨OHޟKc۴ zC ;fp~qi$ $~>̦|`Dp~"M]|@Xh]w=S$l^\$ auGw$HfH2xMzP {ǟq|/3BͷĄ 2g#> stream x}]%q-dOy5LzmY K6wA eճC4IQw8'nߞm,ܬWՂۏ_,~/J Gn2W_ >z뷳W_f_ N]O|?~.u7_o(= oJK7C|xfjC:lvYo=7gov1/Ɩ]c{wR˱nfM;v4wQß}iֱjfZ}DoK}ns^/_*/R^jvW_Vш ~\;?Vl|K'R*Vux]۵iHTq V| V|/ƕH" J=6~5歩UlL>ƫ{TA]UƛXvR>ԅ~Xf,>D:O=2h D6bX Eeڈ4𞪞 w͊ѻfC@*Guz kߖDد,hNnBdDʭ a'KшY> ̽#^o2-&뭊BeckeaSV_vQɆHg7l|*7|h۰V)@ ;s#0d5`,QMlRXqYW &{)d.[Ӱz1@tl@aE>Hal t S }p] #bBsdOM?)A!|{+̧\ID7fBj| RܕKEq!b໔>8Z y(.GT+|EOkT̍Z|)DGſ4w"rŇ>Zr#J-RˑɶgX.kr%E͘QO* $j $tՑjHgBrHe\,StfYDqzG?glDZ.Z o.NĕB ")V Ȍɩ~\yTAEu-XAPBVB bbFU]dh7x[C%au ;=yl⡂ұ%xr.xM2B8OU%QC9 P#~Z! LU*H,`WՕu*Ӱrk|&KTl-!Ua\׵Jpd`2ԉ  ixǧu〹:EkjVELt=VRÁ?hm р,jRum.X⩉ /jwG!UΧ L㡂 d:ظ^ζtAArePtZޙ`Ӌ`E`)xj_%!1g-iGߣQkKafQ&a.AU tke^"Rkǚ_GXч'V8D#l# KlEK7OB:۬f}Blֲ,ChYXt 6(|]dU`Am=8tubAxJaD |l.PJ8|j~XBh  _0 _Ԛ: :W bW.[N`у%,耟R.|̣N~.Y37=dYBm37!Gst;; EЖT:YXXda`J˚[FLiJL-о|F; UB3 ye+B-䩹.&l3Gg0TZ7A(2W+ĵ+۶\>=%S@%жPJBVl,+ )[Ia9N<0gkc*2,@0eh^j.} r PBbY-ͅOtek&JF"huH8bΠZ9"h.ha lJ5I>-ZڬY3F@ k-\HL7qS! s5|enzUҎ3,Eʍ: bh~|E'(a1=*IJ 4x1mlAD6RI@ra .Ih`\4F3,qi\㩴!C`\Tb.ؗdWeCXvQ`A5jLIkQF#21Pd=8$}5-WȉLL2+'9+ -SiY:bry`ר'5EsaK -Otl0 FʑIA/CN) -CEb83TM/Tq`v(ݕbG3 _-J;=xه"[}\-m_@ZP $NmޑI] _a ott_\&G*2C5=0-Ñ ,>/e$jOof=Z~Jú ȫ;q0~_ʚ>##x%kSLz!2}jUDu܏51֘tz(Oi$VǑY*]RyF=s*w&j`@!i z+I  OdyQ:8psaxgc"4C0jw{TdbGԙveO(jVWO'Yi=5] 7\W*TdRgCQ688lBE>QB&;\.vܥ%1 +P8Q9ϝ2F=9uDv wiiϺHCF(٬E jBTƤMDye]zZ&(kfKFghs%'Z! 1۠WG2apx JJi4tUH'H D3T車#uvW`2}V?UHJnO0;{~flbB?3c+P5V_u@Y6]D'L-!.`KYs0LY3J1(H\0Ц[bp11fD(KhaGx}v|j0+c9bP`yƎr Tf]j# Yew-IG(@ p~I,?GztT )X: 3* k?D#]76 UWz_ev86?ޑgj WԳi@} -itWVA!{ʏ+4SiS38un@FmѳVS8 KJ  9*٨8W^VNoktVnk究4m"ߩ>iVGKN|stFojba2هUa Ncڌ>X8S|]#+p`WUBSMoU.ō#|˒=YyYM %[mޡ LL^` K)~Қ} L4pGxNW?Y[ޙȽHlzH -~u=B l}Rc.HJs +堬au];R؉R`]+}ў_WEXlMD)Ǘݦ$2(אAB6d-8Rn㡒]A"ST4!F e{S+M.vG #Ms0ʂ-a#P[Ke2*ZIUƑA:V?S1K+2*hGsQ=]?VCleo|bK0ȂX[eMjEdd|."^<,&BDVߑ61-EYz:\G:Iܔ9Z6_UG(s"qD¦r@Sٷ0s*b*g+JŜT̩ yʑ ͚ vi3Ń|LƜjQ.\.[S.f #g0Z(s^ؚr1GId*G.&6؅P~ʘr1G* <;͝)s( [yQR*&@XC'eJ6 #s\]HcٺR11`pWw"|҈L&DaaBib1aC)qb1!a1U2GnD>$ɤ*LL9h ҔP[Cה]")L#[W&/W"'Ey)HQ< ׊Bd4^%˪4L9 r#T1EzK1Q$̞K#&a< I Z(o{5t$QT)ʑSS~3]R)]9E%x"۫mGӄ@r`4jK{8 (G f_t̾dU)R0ӍVYsJgT=d(/f4H[aF2>{՘ 4L `Ee/P371 9SES Gꀱ-,̝H2`g<d)b<,,ә ^ޙ\ 63sg`)om4:>T`fMeArU=j`fH8QyR^0wJAjxԕ4@T!U2i Z&;% K}3.L d%Rԇz"!nG9ʫja2Eɳ9ʛT#݊Y*bvVѢjXEEH囋栂&QG&VNu+b\LVn1@YʛiyJs*HS.zt-H9i"MD)9Zz |w]>hh&NΖCSE)󄘘E,S.GLalE[HJM iʋѐ.bL@Q[D,JK*x@E0 L m2 Q5HC^%@G~?sfo0lBA@L4&P% LE2 f|20@Ě &)|Cgy, I4S" XSp-.G @?!U皁CҘ'kN"WY&2LMӑ يr4U^ND;0aYd`aH#BF1̑H)#B6#J D1ru: H@\PNH(sX8fzY s !G1qBB !uZNHSDX3i-'$)n-bi-G""jgf rBB DtZNHSDH 億>H]ދDB"rBB DK@D#D0@k9Q=&4@.Hl9!832 %M:y-'D/֌b$@P }0O"/C;!-Ė@8"QS#3El DXb){ x?J$&FJS({̖@TI [}$IbK C$PGf8^T }1[Ql!"TZT `㳺@bK "Urel D fKl1wfK H)ۭDjK GnK GjKِhX8R[@ʘ@!zK z;c5Eo Do'%u@D !%qo d}[)Ju- =5ى|~[َ@H%- =@wMV<؇~[)@&"K Tvn[~Kl'%1Ao D|G](jk!%)$ '@D %eI@g.4!tD2\ -1p D  @DDTĔTj椊b"$@!JMJH.h a %4q D4r\ Z&nFLG!K "CH zULK beg/HJ2IK<@ #q$.>KHt1n%K boэmDHt1\߰N ],Gf]?P}"xC>*Ii*K _eue=|\엍bӊBD$%.F(x.\ZžiXoJyb0"ŝ 8] V2`f, %%M .1.Vߙ..w)&~.KXE=wZL+mbBJL+oM edXDPd\"X.:>lY؇ȇ1O|M 0)&ao͋ cpP&m3p-y.#5/"#"ƒ c%ܝ;jOv0VcwL:'LcDRa,T!bu.*Eb>W' cqH:UBxIH  cfds0v%0y.9@]H+$򍚞 2E$Z:\^@paeb0Tq cpFD`X\<#rKh`G['F" ҇,cOF2$cp7]1x=6[B1$)힜k% bcHLԫ&@Nyd=b#"\2b ^EecmAb1b"<7D19#!"9"#Cz`ސcHGFc FcO92b #!(*DƑ>!›A 0g;H:G:0e]e9a zȇ1dWkc#|k~X{‐cpfhȇ1da,Wa=$Ę$cTC*Jq+V-b,X%1 b,b,0b ?B/rE!#"#ƌi;#P/Ȉ1P#kW3ƸwFLI1D B!Bb ,H1HLb- !L^a+]! 1f!ƁɅ !!,eI1D~ 5y5X\ݢjcp0)>Yzjcq 9\$cAaH18M Bi !1̸L!ƌ[~'cEb "Mb'Eba81f=%lO%lO'Ƒ˂@D;"',D1SD1+ 8A1D1{Y)1f2%&fc %(1c7hȈ D$32b $ 1DFٞ(FL UED4 F#KFL E`"*1L! Kk3ՋL)&%ԓ, Ř坦I Da$@c D#1Os|7 0s/%I1d D _8GK h)vZ B!LEN1&&&.KX@L~drbb!#&vʐ!&vD2`bCWC !&ddSd%Sw1% 0!D#&hEdFLMRȀ ɪI&&ƙ ك"&S D+2`4X 81&[SDKgS3J8D!&AHr݅НHSz 0qkH1:8V"ߎP'`BH S"YF%R~ f;_B3/$%t'6PT$P M="%ԸUtM_݉- Xғ)$f*2I~%X9 CkjK"f0rk<侄ʭI !֑b&Kceibxe} 侄"> /)q_CWr_‚1 )IKo} k ]H_;_.l0KE~)/w i{fVh]TW kXC80ˍH)w~ e`!%QSͬj/PfzRŐC!/oI~1bC![CBǎSN1p;2фbXdȄ"0W\&[#/J,_+/9bH h5俄KOk'uBLÊƐ* 0) /-/)M!qI-$&wj!0S ɀ1H%ojɀ obR|#&`2` i]a2`\~bTdܹIdU"R`ڄƂwVR` IӚuR`+FVce$ p /"&̢0){b"&\b/c<2`)'SYk>Ȁ qxbXPv: 0 vTYm&A`xGE^%었\h|I1,LD􃆑/4R( ǜhE Xb_"`SDJY؃* S5{s_".$/MJ c/e$ρAX@KD~1dv!h9"DDlK);U^["ؿ/K 5Vw: 5DlCOO$DDqS0$]_QF_"xIUAp#%b&_"Nd'Vx+?2UqXiۢD\W3GK~P!%M/KKDl@^F̗U'b p彇|7$1_",.Ĕ95jΤ҉!x3 fbD$ߎ̗k(LSbDH"dDFHg]Ci1_"!j|f1_"/.AE'ƕ9 %19#;&w56C/"ixX!@VEO׮7|+W?{^MH{@WM'! _6l/΋w/5K^YR?!R?QO-s(eVꩌsm.P(P9q]\>9=b[.`m&Ѿ_ıH\87%ݸ! %hW >>= ٲ|.E W |Ϳ?}Q{nOO%[z[vGe[{;|ﻇoyn1_U?qh/}ܿ;E?gV:{8փM/O?nn-å 'rXL~(~b\_ak$ګqƶY}|zBj:T50 =_`a|4=鉵rrk[o8GnCtG!ޕÇ"}sc79i}+-vjo^{xlx{wzji T~ˋzOk_߆3M+ˉG c9 6g6~tS-2Q (YfVho3 Ws6yQXSvl{Y(pk$џ?v5EyueT.*piI(_uyO<_ ߀KYFZ#@v~n&jc͂Ks&9*"KEҾb_e]a<=|{H]p=\+++ޮ,bO+ض lkscC| T?=d5\cYneKc8JMlo2?𼻱j˾>k[XOX^\[\M:p4/ :ܞ^׏?KX{W73Xm7Օ%yXߝѣBpwb_FP?KˣRo.\,hXd6MGQIOG[f?CIűY.ԏw>χ>Ey?J/U " wب9DA&L*6UʄU0tψdX!ed~Y$cEr|A$#H}{wnT箥3QocۮnMݷJ$D~K̻ g̤q\CwOV?ntn7meͮΟw?;HBq.e:̕0b?xyTrsu7Qͱ ȁxdGg*[пk4eXB+nۓ8扏%vʓ slٳ}UY6V+V﹋]LS<'4xgF^ip`u-OZQh|||xw4f~x ;W8 kӞ=+opk ᵇ@.$pZZKJǽ]5LW;xيF]nhTϒ⑷~@YꈿxU2҃ÒM?1T:'}+J[ػ5&5 jpGϼzb̔L`B{Tdyy ^ F 5څs闇%^EkWB"oTI}$Rl:fj3ueSⷬJK]r-_]c{n$nK]t}^UqkI T_>Onޓ 6Kiq?KlǡF7XT}P(B0ru_ҞX}_^ RwU}@o~GzF;g@kܛn?HKh: "vn8zwE[uDGei!?NY;ͽAn&B5y0Ztğ'/vnGAxq7%ruyQ:V-z::yjl+P2>QMgHR@5,[ X/קB}<:zH`ku7t (oD3I:|p#w)@|?Qۇ}\Bi?RHx3{Z[íN^?~8eR)_wa +M\LťRrepٹF&̠φ(:_\XWny9/H\Tlٶ`o\,iz?-IGJnHzL e5D(KHH^v(U٥OՓ>h_-RIƜ닆œܛ[9 5ėALyweflr|j=o=ffG ]E}~3o~U0txzV#ֵ#=3>>{2e?iZ&R;vãZp/Xp=u>q-Agy0/a|e3!蘹غHp9q#+ r_~(*ƚ?'7ޞ؇gQzrѼ{a'\PxɥV.乯 ;Qa #;;) ;Z' }rdس>c@5ziDƋӁ<lendstream endobj 560 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6978 >> stream xY \ČSZFb+պԭVU-R7 *ȾCBdﻀtqRm.qmmU^_k{~w' 'ws'ji̪իcc6DN|u֌"w w&N!8!`3zfClT?mrvr L};&65>l_hY̘A~/ܝ=wמIBD轞3W\#!aSb=w qòʗzKQ5;L=GyP"^QOQé"j$4&5MyQ.6ʕbԳT%PnG RZIN C!Ys.z[U^1ỵӈߎO~ԙF~M?\ϱo?=(^#475n'ܫɞGc7}~W</}!aܐq]ƗM8AZG`QusyEZ¨'A:,7A 8Q'DPL]&ʀ.Z06[$Ch} M&ʅ9ePO0x,,DP&Yll' 2A!^c{}%lKJaX~d7-xzb`>p(h C;{]3Z&b Wo,ƱU^fsEJ{0٨4A0m.&Ӝڈ櫫j-x~j*B#sݱ@ê#Aۥ]G9 175Mwb)8nʱ.\G_닟={%wGg 5h*v~ 0G2QAFn-ȳ%˦i-^&F"hs5v$ٵ;*>?V>VĦU aMS'5ԯwttd̍鎺}G11#V_`K%wKݲ ;:SdCABOlG#n}1C!;Y+/p%}%ڭC9}L Tnt !SnҦd'{;$ƖlKn$&9Lvߨѭ`63nGScR| +S}V8b$Cz䦃QrS1qS i "vJ&IH֔z-.W4ԾvE~KZlڳ3G.7J { S:>Eh]kpVp؞Yzϱ=}BoՆm|"rTE}\ǚPw rti9i-;HP˴UA ط@OiݲY72!;g6X(uqЃTN+͎o3wl8㽦}NaU/7TB8hi~&Ova4AMe DN/}ލi]5^Iw!\4tcc9d?DzbڇDϗGx,Sq10{'6K}@>Y42ʈ*(J [jyBA *e)} j>r_=p`c epRЖ[h31&C#_uQ@R>6}SX%T{@Y+B!N7ZH+ M Lټp"!h4IVƓP:!tݳ%?^_5b/(o(Й5@ѩd=P99u!02 eI*J:tJtO%_S`\">l4_8|G__,sZ9XLՈA\3qT];*rJJEr|_0ޚ i-qO9# 728p,R/bnt*+NO0Wd?ɻRĴ; c5O@ }mcV#o*^u}`COBs84Et~p|iY8+ Ka>Ṣ=44zu xt $NȜz Hь|49=2i%Jc] ӈ|>`9O hIܟrTJCUOŔhvdr'}7o`Ru829d ?_)55B[?#ʻh [~OS1TJ2U)BS].&@u[ءTi JEiyL } }=->?Do+"p  I͎Sjsg3")Hhỷ49Ѡ&VdmK`T]ch ur2Y-Iؚ2cc:vxl D hA_֗ qb ^Wg% ώi . ؉H2A$_G'kİ:'懖ԃ{3Mɶ:(/>F JmfJ ot"j@tx 9h$LBbA]Rطb\A_KpgsK9qr񻥱h5U@n2+򥒍 Qɳa$WeԞ*m5[:*Mmr/Ś>C.$m7oiGo9oהɃi(HIjm=fWF]"4jpƣhvb➓PWjlm imNH4N/24%nADȅ&-ڝ% em_>M@J-˕1Zч>صԾHpn=sbCe#(1g{ e=f^y” iF&Jjϗ/ءXS} zkXW_MʓEWYFgA 2j rr ba=Wcg5ZCqnnlePg%3iv`#3aOrgmqŪ;'dX^핍fJvqp ?c5Zi@7o<1F_#-}8VBϟ)Dd)C  B^&)J^xɖշpeJ BhGn 1I]DyS _MTq]ey\O2⬭f+4-u2CWg22%%:Sc/⒲c41I=ׯ vaY-Z(\~WYz8.߫|q9^p| 9IueoTSJˈ m* sSl*9r $_6-"B*An^y^Y^yDo3na`..uNzQ8m}U Nv%P7tt8( @=7ᐻ#z%RG'у:':Z|@>l{Ƌ0 ڄƣax)Z9ilW4qRrϣ&Z aJ߼wM*Ii>~]qBoؑu/L-+ 7} ;uCp?߫Dϡ\:}rnL?qg1=ݠx~5ʅN4B|di8}%! vfUŗCDbf^}{@OxN_^ٌVpǎx@aw/q)(&%gCmW*!R$&-.oΞ0T|  S*wlǰW%{exIޘ\( Eřyx̝7D2 ÇĊomOχ v~;GWe:T9EXDA#A8ӥCr# EA3z6 uB%D|ا 䂇͗S0}u6BԇeDgOJM)5M&}625$E riE[/Jwχ볢kŷov^6*kJ/u  ďᇊ;# "5p[p7~SpNMM~,lhC{c͊,\M,Jn ʾj(rXL Z:âX z=2sAj)Z`14&IBUe?x uR[N_j:!׊O^bp[(8Up,̯|P4w1}Gf]h|v ]H"PIЇGѝɽ27m8'9urbQd䏸/ԙZMJR$ `W rS! ds-~]=6%% ~'$;o`=_ JCzc#5E@~Ի >* Z'!Vr5_~MP$셗|'W( 2p+ 5iV)q6Gp,m.ä0))r̄GX.ݺ]Ŧh:TJeпh+ɤpbE!L҄v88`lwW΋ԅ|| sr6WIH0YM)̲pܳ`Cnb*&q((jAjCI@hhC^W[C$p2vbhyaO6SǻnwM&UwcOۙ@(*SK7Z4 ;\ox:+-S!mqV&@Ɉ{/3|+PX_0rV&繢|}Eشط9SScS.-tљ>/@Eʙ4UM~nh{c8&:Go"Қk66Y>ͫ V1)-ח8 o}ˢ7ՠ4>GJMf4d+>+ʽ:qc|q=4Ԣ yVa=B'';O{ru`][l/rK'א!HyV2N^ "D{Y \'"! '܂[l6Eҭier4]b3H0M+wxC^%tyw#7 czaoo]MzkҪI_E5_c7[ϡF皑/Pb؁ס)hҷF xr{IrwM$=tҀr:Ñ-GZu:66#fAP+'B1GڢISӦKo! $4Q9AV1/4QY;EL<:NJi wz'fѷYr<`FDǰviNN_) xtG Y%C>@[2+O!W:?Xu%9:ǚ@6OR([1Ϫ+ʯ>zC`n}{2u^؈p,F+D9t6?"Vr3!!;w678CcۚWB!GAUdz^ާ s9Kġ5rrNއ'zN.CeaVgݹh}~CY/_u#٢k?8P\^-tؘWm;Z.f>eD%F%e&7Ҭ^.sUHN{3 LJA_ħk"g^Frw%-+JIwdb2' kfQZUB4>-1=4Jc4tP_Y& ܋!]hhZ@YNQL nBD zQ@+ F&'H}5eg <?w8~CrY>F6o/DȴkMyŖ./&2c`%?GL֖BAv6ErA_aldPwUKql*INTN䋼/)3ۅhq xaPOKM?c4rdg^a5?r`4E/O9endstream endobj 561 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2879 >> stream xUV TTG ~A n`({d3 Ȯbĸ/1("jTQTDE@0dp4L3S 9}WU׿}w+RWhp IO Qfz4?U&@8npi zT͵)M$3Z5{Y֥|Ef'I1cܣcmNy%\e^ᑛcBHQ츍 I[BRB#<#7.Zl%7;{vs d )ʃ򤬨7CRTeGS#eF9QΔBRn;5BQj uҠQ(m*S)DP4K(uqIs]AT) ԿT_'. >g3VLHWt@T ɲR֓-dَ11c4Vh\(KZ| Zr[lzpr )zL$nPW w7/)\fJĦrx6X5 9lh}þa6ܛ:< _ic2ϫ 6 6A4T x(}YT2Ŧ'q>-5/E͇t;ǟ2Ʒ8#EѷwmCl%RTGNGõI 2P)2^Nj GnoOy#[ yc fXF& oJ* i]nC%qߜ*рcl7EJwZ0Y]hGEi);wc)6pY<ɯ|[TNEY, ڸg[BRt̺o~NqQdNfQ6KΉ㺆'Sy 1B3`߰(5"%5 MNEfdD9sEхcg/`<Զn)_t$k}ryv.T@5Z%g8[ "{9ۿ4RuToCKT|]ima@גSXq}FM0glm4#<#ZS|ŽӪٌ&.k//\"@`Z^i']3K3f̝$lI[|B4h(c]+< n^-Lz46<]k?0:?Q_jkk*p\ LPEx鈧Rx!$Y-^bM?iiPUB-j@7", [e`QtdsԨԆRcFX"D7Om-?&V=%\InR-$`bIqTƢ=s%)P;Z"{]̰]U.F]E Dwx^$L Ύ |&`o4ZJ˕myJd E-璳ףep!) #]q"X`DTZfc0n=/U:ZE'wBppXUz`L^X) ?K׈W}tW?7.;^ wOAqjOm )pDn vew)$kCzfUU[am,4 zϙ&&빮& 7v#4A >2|ſA?Zh)[OҩsI3 b6o.CXXA,A`你Ϫ u|g XB0!if]e>L/6KD"-0R(6N臒~ s#." -Bxo'c0"?$ |$,rB\m1AA:_۷dN2w([XWd e{ B SaDq{Vﵷ[' ) )#2CN1a ;`{÷p2?O5{jH*lpl:aF/!O۴yif $]8XdN{P5H7 UT˕?O-V 񜃒^&Ffw^fJl7]1EN"J'pئl~׺3p;8@4GY& &ē(?$ z啜:ľwџ6&3>ƼM?IH }#{]'-!U7Ilvi3G|찅 ee׼ƐzC ],_.MۺwCt#o,^x{X-vkz{Beђwr9,y1x6é*hV+);^'}n/2f>y䨦̣ߑA֑#c)?Zf/endstream endobj 562 0 obj << /Filter /FlateDecode /Length 2405 >> stream xYK۸/$RŐc1xpm\eIzU;ZaLcca{ [Rs_C`Kv+Ö݂.̍.ÿ~l3Բjײ%`/KUVfxM^)(ܭhA(m.k)r-`@k JX)F /= WN4n<bH&n5L_M7jiYDI}XR$6 qJ*]fd:^EWVKrSAAl.$ߒ\#8Ŵv)^9"۳8/0~L3gvD$G8.X7R 3nfQW3#ާ$KYBuu 0^%iX ȅ##yIxd!caɠ &]}aVLl,79dOpXb &o銪 2YS;RkXk'W &f&FՓZqxuh?To՞= q{ow۟Q9,:dQ&NHCwMt8Z|86`G.Jj.ǷM߹젨:%*;r6sPͨ0Dݽwȿo]Ř* N8q^H( XQq[S">Km{mMʄBy`HNH) 'P_Qh KMM+$B6({y,R$-`P.NE6&GذT٢)W;"B : ^Bϐ4́ (z333hS $>er}96ͽ`*,jV"N(aV&Hĝ b򮪻`ʛ/\x碏Cyڳ eN3*gꙶᔥNA_RL[ S'Y}iemfUq=ԓm>CJҦzf:.SAKirfmPby=.:?UY}C[VxWFcO/#C iUU2ٮSy>y2FO^L5hĹZ-IDly=#WdCj,ZW B:ia-y)]KeYkɡlSSߎcVkgis R( 4 ;pKxqV SL%CxXIвvMaaC 42w]RHg)}.1,7+'ʙtycO 9v x$y\[6<&q% gxC ^ Q3. 8*bQkC_̕[4>ɮ@=އeo\e,c߮fr}iIf.q&i&^X *%d$U;rOgN| 3Ul\!A.%OYKᓻ]%A;w+zQ0_]#G| _H/ÄGYrD|YA,sHCvO]9L#dnF nMEwN)ATi?{~a_pendstream endobj 563 0 obj << /Filter /FlateDecode /Length 14738 >> stream x]&&` |V$`/0;]blR{!EI]ݪѩ.~[/Z/b%n͋/s?^|k_)?qJyjqxF72^<[ !e zl筹։PvPVo{5JԛS6e]k܆.n oeD fM~[s\ `)H=vN1!K@[} 4o .3<[XoUM>rSHq⷏ pvftL2)YkBȡ9F3SH?3V]sam/vTm+B.$%HBrH{5%TE"0 Af"37$0q@MHHu #4ȸ]jfbiBJr(U"BbS)F(rDB].KdDR -]/~wjVHnCH!Ad%"TRP_%Zx }JӦHE'̀|C_IC +c֞-h(\ӊDQJCSBF{?htt!ڐ% Rr=)ػDU"K$܅tMCl'RӛȀ`QI]^'XfJlf,MӛU&0L(h"T8Di攺wUSP&"|8.b!i%}u)VSQeu<j 2oM¤$R5Dޕ*@ϋG 5Y[.tո(a]S u3<^B<limL U"Ne. TCȱf5O2v, d;v±k˞X+n-: P#Rrl)V f[kT@P_-m!]C($RZikKO߉[yc#}aOx"`˓O6%WKkcADYh`u46Oa3 `[ 84o6 C7ITVRT[9h [A|9,r&4W l',D ?"'ֻB BuIqF*UWKq੟R`;:XVC>`ֿԒi^)G& ڄ VN25@s1.Z(l}W\k Ys& ~aF 뵑g4!uΈ 2Uw.~rӝ3܎ϑ*IS Ƹz*f74WR @!QZxQaϟgc uXVH  -@ >RUW4#48hi3?ӈzxRoPHӚFSx]K. R#k;u mDY#v5mgm>RLZܢQXM\!)}S]Xk6iD܀kIY&R SВ*He:l%-k84z[T# JN݁X|XTtX4ԺYc2NHĘ ܤsj!CaY2RMv|RDsSan!*%;y3)S0 T?4Wt3;w x5!M:5G|84 sۺ>Ӷ|p3uV [p󐻠hFpЫ=d#LCh({d, 03#pfQH }gQf8䔏D:ql&KD>8#S) }Z={)7ɖs0ԖbG1`Xx/p(ܷ}846^"@+tg*tg)]ST3żKi.!FWU9sC#ʏ); k1kP|e;{l ;M>6 zWt+)lwq {~y!tE ۡ@Vvx|Mtpz{u~mƽ#$eD&2w q}I`'ޜCd#HC>5$N $NĒ3$NB HDq-' ySIk{pvHDI 86%q>DA}|m?>%q"ݤ|A%q"ZOُ&Ǯ$&Hڜ@$Ǯ$N t!JDvNϻ8hҏIDĹ9MPib38: U.J"\r&N\=8.gqΥ,DŹ< 83ma:Gq"tԡUsHG=Ӊ2А܌H#Yic-Xi+sXy}sq \1Wb+u7<(oN`eIZ8o*\fuZy}YHo>~ky}Ypfg_DJr0 LٝƉz,#N'2<ߝP<ξQq}s8﫝y}]D<@  f1Gsq%%8sE WrlR \ƙȡq.'["<׎A-iawA38WN{"N㌱ZqRj?sϺs#p%WOҴԹ 20SA% 2".H.rE~y2?bNBOڡ{*"bfR(ChPv;XiGEFP@$.BR7S-#4!6EC ggġC`{Pό;֖uN|;W"~Ch9ѫE~tJ"bV.WD!b9rԳEw8;(12劈BZ>/>y-w"!x-D$8!4?r.!4f̀[Mt33f&( B\y}EZDDi.BN"qZ\7EC}sS)}eBtqZ=Eq5H1d^i%"i4+cQ2Djbr M"""D)"Ï'E*wmO8-D$bN A$@=<8Ei!qBHw%DB4QZ@JK3ٔ!Tk$h!`v-7+J w(-D+BdB+)P)-D_ adF:1g!b-D)@dB 5,> \I@Ǿ,A&Xt"'BDf xW!tkYYBYv"1Id"YHtYU])' 1]Iy~0F9O;<\'pq',Ǩ^^a"e J3WL kSY؍ قg~,7,D.cXYĕYf,,32Me!b%๵B2QY(&tT(g,nR!Ze*K'SEeR\xrY(^T2j;5fΩ@C`G(Dd ]bJe5%xfҩmqDd!`*,kId 5-$𱛵Kcc |cs9<(˽x,QSmX,gDYc!bbx,W&DyՈ_y,Q$nJJE{b"Œu]br~A,(+ֳX,/+%I,QI,Bt*B~$K3%F,(+X,,$ΏR#KwxX~8%I4(O|X{[X,1j*?)^4B"Kq꾘,9)bD]$Ȕ+%u!&K[(fHhb M8rĕɒ aKT'-K d`X,A3C,hXY,@4X$wY,%b [54X, X@U%t9  XadIa ~K8,Q|<9,Q|<9,ArXFa T =%1'#\`*ɅĒV `2X#V '(,Q5chrXiC&Aa XX2ۄ\0Y&6=[~4\4@,`(Bbr%,Z,amKYh+1h"K ] Y"3 Dd x$&/"K9{Y "K>f"KfD; &K R,i_0%1Y0%d x2i}Cf !"z<,ad nF 8_1Yѫd f %D*#2CLwHǀ,*p"D[dEd 8LR%2eh }}D@`"C:fMd d>XWK 4X_X_d EXYCK[Lu!%ni,Ѧb>X,4BK XΣ0@K-K{LwX#%ԅX$Db 2 |%=++8,ˇ8, /ָL8 b$ K,Xre$bXαb 8Lm%@XlX(TXđZ!%zVH` 9.͎Y'-K0;0BBK8$o %(~vKDb <#" ؞=XJb7\ƆI,KK[J |]Բ="=MN~fitba@ƯY,A8Sb0*a5AwY" G%k"Q&'b0 cB, 89] cQcI+o FL{Q,fT jJ6}*]W8+iML_aw 3^a|Ws+W #~ӵ|ub9׷I^a(Qdž+ 78[d@+ vjL_ ʀi0SE`a_a$d0`{5B+jذ6+{1_ay3ٚc?P~x0lN6M_aPTp5u/y>_aSNV4~n0@4W/&8Ys0 aekTCKPI#Y&=̊+f`B`PDIyb۴:5Å :5p!S5۲paj:U\Dy!"9/foYx b6ݎS5ۺaeffs|1eh xy1N\_<+O1UtMl0ƹ_q.03L{ㆈ7pڅịo<8SJ+T݀s5SMIY} _ŠVӅs E8U u*LvfYV~Vh~pf3˲bTBDVhei-w^ܑk$>]ZsfYSVe4[ FtDDL%,k 9,IptugiՋ3MV"5K+SD׭iOyWs;SJ\7S#jNG֡Ĕ5(piKV݌ax:—לt6 a@ IRGt4nr \ݕJ>u6 @Ə/׿߾?YvɟG4H]PB?*ߟ-QiY7Y'7Μ.߸&4l+ɷ0i >$;k)cƸFJ!Oa+eZo:>/nB@ՓKd?po$<uyG*.]o~Qwo^~ߪ,o\6÷߽>~D8Y|ֈlw^uiohe/ۗHjxᮈ=fd'`='Vg{p~nfRY?\<} LSv!0xFы//_=|ͿԨٯ4AEFo_ιƿC*g}W}Tm?0UГ._OiFK݋1}dCy{<ܢ>e[K\gU:}k"oQ3R/P^/MΆ'weMcw(nY*OT䳓<;o·MŇ@GIoX7:vg 6cj߯N#@"U-om|yvš<ŧۻ]Y\D[hEJw㇧n<@jNjϳۿWW{ /91Vs5˝QzIV+_OV_ϯgS}wٍ+`o&[#7`J6~uj?SZooWo=8$nd(ԡD^Fݶoq[z~͇1sojys*r oV /'iC9 eBS~|&\wry'G^y7o??W;ȑ咯qe_]-L?2嵧Әv> h`3ga^!)㸯#Y#ƍ5.jWK':`[ _oK[6 ]|Sمp3glou>ك<~>{U6u-'ۻΐ=̻տGi7+*}ֿ|}@5Z1ms;ok߅sn)L"컚>t}j*܋9r.j5.|c[1>^|ۋш 7[aV# 174Su256lX|ϟ`hZܴʓ +qɨEZ0H >grx\ a5Ƿ8`,:j{"RȞ;:\YԫCrZ05㚷cKo{7NwBlO5ݴ#{7v/|eG{z#K\ןz~أy+%~ڼ!P&E{}= ?ߒ%KҩF߿\QRnиwDAv 3%'ήA^<5Sg-|?O\˷ nyK_ҼF>ۧ|[I8t4 neU&t?^=~}#Z=gr_zRkL9ӟ,0>w4{,QC.;GH,C˖Os"AE %kܙXR?uKgCg>u%:VTr>~^yIDY[`y#[Ïv0Ryz:xKSZt_ʼ}Fm X@<Z|^ó1o><9tѷa{Z= >qV_>mendstream endobj 564 0 obj << /Filter /FlateDecode /Length 3127 >> stream xZݏxSBPSpI)u[`}'ɛÑ ;%wuchp+狿ÄQ>a߬r&˫x7@gOWݷ|"\Ֆz'gT88Q̓M<ΤTYN49ыL5"9Eȸ _̨wǝqq|?5뿡.G8Mu~zBR|2@h =#nz<δP "n)cA|jssIbrgd]f1f947@$'{ o=J 9aϧQN0n)aYivX7|s?BWj&W~2mjh FPjf|)ɏ|9;)Nw4ug^"(A3¨:XzNy|ail6>]nhnsPgg y? F@ iqWaJ(G呎ǯV#h!1pg&O'M 8o 7D˂q:N ,C3N Q V89xa&rG4 eT`V\ZISU.hr.F6pTqsQwEQcȹ8I54U dNróG+zVKS0uot%3:~a8ה\idpB!X<90 *#I PUȒo  %S rۮ〗*m|x{pFjSz~miY/ vI\Hӿ& m^}Mc^H ウY 'XǨigMk|")eP$U5ƹ\0ˋ#v|> !ނٵ fo/H極Lf =74+}˺|($uWY z <ޒ6W&եF#,a `.n!bŅ]e‘r͢g}Ao#H/K QmPq~iS+bwZ\l3tc+PzM8F) Vz:HN-@]? C@R@ 7R\ #w_jx?"V붊eNFԽ VD'GOFuR 0BEU(ӚthI^ (7{0]@Lߧ!I \ItÎ8IMco4&t0hHy2ޏI=Wʣ O pK4HnQ jǖVjg~6+ sS(qG ri[xyZmøCAf䔯f׬ KYx$<C>I(*!a#(/C 9Pj ё@eXZ3vƅM1H؄*IaaE$Ʃ{A"Q8yIHo&zTcFUb,V X*YxQTINê_Z]8Ի ,\pخd* &'9&&h47QUf7J*oQaC1 W5^$I>>G,j`Q6-$yR~'B0ҨҜ]PNxThyz*sfA(S.U@Í;x^ru'̀f9t〞B4v^t 0=_([Aբ(.ͶXy A S| xu>7 sL O8">='\vk 6CmϗM^nvH8(6,i/. ?hGJ^+D[Ɛ7;KC'#c/asVPAůDk8b-VhzX ,swɲkAbΜ'ȶj}'sIմ3Eja^è9x*5QFǀYLbt,n\z^ rQ--ni2jAdw*YD3$ xc<*jh1B$0שMwc.v?U`/b(]P2ݔz34æQ:p]UJzjpࢲ}V 6o^&ã+hp#@E]q!^y}D/`vO :So˅XzMC'`9n4 1A=wʲzOK%3Xi.h3ȺRI3ދygGR0 r%6qPz|ųph4:.ǧaNWt]5SU&Űĝ `x.ܥB@WޥBj5JogӸ E7ѕî)i_CDguL'yE6uˋ,t]ԛ!񚇫<6gK3}z' w^]wpsnUj(et棒]N--DxPx.\KFq *([a:\Xn 熙$7B'Dendstream endobj 565 0 obj << /Filter /FlateDecode /Length 3177 >> stream xMܶ@o{9̥0ɨhR7@R@jOOޢwgjfF$go{Dj4hb;j _.n/^]Z]W՟7N7T4eYmL߰Y({z nT)cJiC_)ۭ7Jk7dۏ 4[Ȫ:vlX|De+@W^'g0ٷÞṰ,LWĘF[Zx)"- )sʤQ(( WjQxnz_?\o f:9^_O!ٰ; a_ŮP_ tPN/0%g܈gK.5֙Dkoj EuE;vluqcdòor .Iv]}= .d@~y!v;6-NU5=W_fn:V|%t&AEF4W8,s8~h>I.v]ğݴCwɾ~; ȉ' TK,t?34{'RjBZ`QDFqsz ajnv}cV)vB6>=67Ej׻W ti} ~xԂЋJ\]pP6rRE-`+2 A5ab. Ę7*:or@ZU܌;8Q;+䵁`፪X?1Ek;'rl%im,H\98]I60s&|])6dYKm- H>p8"7 W)x 7)yU6̦J]8,D&D·~ ʻ<4(960 E0E'QFt)PւUC< CDa؅x␷^<h3!o\ RޙX-"c n=cN{ ;A8Y_wY hg`B1Զ/2E2u-d *l<'o#!-gYTf QUCqZ )R\S.+jGtO h3Ic edѼ*2/!/Ek`ԗF (j ll,,q b{x"gVܾx *˨T JAc-)9/CsHCߵ3JAӯF\7aBqyYX>,y+_.iUh&P_G3P'JD8%-4DFl,EaBؽ)S( 8hoyHOáfm~xy6xDxm@BYY"t\SQq%ZzdOx ٿ f}$7 iǓt"QfvY6߆#>{Y-5@X'E妃eB,¬j`^`*VFkpORKՏ .mxBS-rX-gkK䙂2g?9Ѻ_z&SEe%!$t ͕4*L&BJhF r4e0^አNd}^vٰ>0`M?F"kS~X!-si!JlTU sF@י]X_?C>GYmv)SvC$>1GKo 0p4Hi;s?⃈$':X,YOH&؟yr{fu҂H: h=jaDE2߽ߜEfJSR6]U H0хaԺƺlHNKDymH"l Dts Gq_ cI:5mos݌>F:0XbtB8J6t+} 9/O);wǹ B'4'xs/ͧbOĹ (}h[I#`?bYM{Id8'-.uG Yj ʠ yMIHTJ_Ю e K~qaD @ib.ឹ_} @P J 23bZ.Ep34dghw/']3xCYᴥҷnIo>4f!{=Ñ Z!thše)Uls=,C|lƢDwʚ̚,v-*B8ڢ/v8\,edmQIŭ☲ LT^TqN1FSX(<GH-N^c7MopQYVzIw $=:_PW^1e_;Px uɿ쮆~Pn*Eop+Izd;])^4FJxc{}n;u.''D?@~endstream endobj 566 0 obj << /Filter /FlateDecode /Length 2673 >> stream xZKo7 Ypo $"w,x8HJ !=U 93z UU_}yǷ=3jGus(MfrCW3:-uf<_5g0rUF;ng4  .HvWцT%"˅ o}4#A~mKVd9Ul) Zz\'"ҲDBPL)Wkp!bnFnw^~MUdiE4ȉ%l-/)r:Ynt2)=xtMWJ8Q R"SBJWJ3y(qPeOs+*C!y 8 ^%MA8[4|k6{pleW9%9X"9ḎΫBt8+lڃp7dG2O&wͼFi)0vTp?GNDTXVjSO3Ҕ[os=XaI2tuN%sďd,dPs c.97qn?2Cm5B$@b` h"Hۇ zV*/hAQFNRTi9ԁ4mTx`@GN1YI(/I0m;C`Cc_ G9O(UyUiZ'izeb6@*nҨwLƌvڔ]:${2 ejc*}! ]gIB3K"Х4x5G\be' `~xr9{drDm* P4M6 D/!u}Ŕ>(w2m."Ir&Ozo 䉓M\mUwmnx/~ҳq<*ܳ{1Rd\ zF"}d2aȧ((]4RhMϸ)0/ 0"qZLo$Z?qi2l-Sx~x@7J5 pJ< D4KJ ȅ5"/,[B )NPVzσck#?`qZ uq:lN@SYw feadNQ3/$v "R4h|?aJCEY}r `!M酽3pu['N:h~YU P7e1#{pL'N-HA=z>IqE!xw:EqюLȐ2CRUӺ,=ơMgB[W*@Gbx7Vh_5T8< >K!96J:IX~h<8v k<xk5Rc:ƌ'0͘1` "3*dSȯbO UDNJnqXF2̜.:TҿPd:1._Hl;'@06l"ѓlޗq$9vPx %g%?te*)XO$ICwNmD?~> Q Mry[7SƎkN)C8wO(A:RYcOA p(%ׇ'dSI(l+p.`}Z {O -9j0Xe=']m[CȿY߹>GCt=9RIJ0(\$C8^?\,)}it<6+o,TAڛcv3 fJZsd}ٯHմrW?NOMvR[ i{n`ۡŲyn_L. X )aFI?a$q?0/Ӌ]{U%acp,֌_^SaW:2af7e7}zϯ6)qy^"uTǦWu>7%~tj;Ij>`Yh],ANЄxPd53I.v,?ۮN[Nem["r`YF8W,IN>o}}:n(@)@޷P?El}Z{XQ]Fv0Q'}*Mb HUtr+4!#NoSaZ˛6hEu}jzn(9yGH^,K*MJu]k8RvU P:k9s>2usr<'0>()+F 3Wg.endstream endobj 567 0 obj << /Filter /FlateDecode /Length 2469 >> stream xYK7/xB.lw/MH$x&r<ƽNƏ`߾U|4ɖdg7suw}eNk6_g}fzsj?z; -lQ6sZ 5_g7`3M65N,׳ȣQK 獐$[۷~E[YTBȚJIlk\൵|-*njK8Ɛ݀DM!+dF3\)Tߨ6^ucxTMF}u(j24'$YgR{B5VRx0V( EDVJ0l>+5`ގpkIw[C8e,?i^sK^,<}k(c<ʩjp+a4@ߚj^v8/V@(V,,Cj}|Ћ\WLJZsSPb *',BAZ!U S&z? jw,_U6wIa_Crxw6]Ɛ@ 'r("S~J.gc'*,X’Ib'0C:s 4s2xtby #jb qpەRerŎ.\]$A;`MVyz'X/r.1f@~89K֖>?yH{pOlw9"MX(M:  /l("8')^CiwN7Wg T kj̀ƛٗ\:(~ X+K>nם 'i UaE%b5BיߊBgt0JUMX3YjܦfgY xVު<6q b)&'ZD!ECC3)BXj 8vB>1lH| 1/_lä]0l>I7W lxF`F*:;QL -r尧ͥoC+eo-}|M>:X,'QY옂3ur%zٖW%"2뢕RR3iX>ڙG.[8h&0PÓ& N/ΘHq҅58{1M96@a;b q2Ar: U7Y ]:ɼ(=,y\{e›YFδnk\.yT[qܣ?npL5uT"PBN­endstream endobj 568 0 obj << /Filter /FlateDecode /Length 2570 >> stream xY[6~/gcU@fY, 홸奿}9,R%|03>/ڝ}wQu{Ϯ>q: v0K2;>[^|&ɸ3m椞-wgo\Ysnykiy>o )U[YƓ. a@׬92sE~Ü ]4ٶ/ ^"Nԗ-G.Bg-Lvl-$|0_a?\璭pdīB8JVS EcwѼ?'A VXx6jɕfj6-kcs\dZiC87nVvk*MXyodѡ rͶY8MnXtQ͐F5$7UxR +ujŢp kS[~ۖUee嫳ޡm8 P:~r^44>[M˦%.H chXY8-%,9Ih( gw˪;;drA7P\gly綏ۓІM9:[喵;pg1²&̃3*O }&E!OGǡ4hq"yR+C*;bOouXW4IlB݄2Q(=.gs_>%O!2i1+.~hgɮvUQ/ 7y/=+>a%>5Ol˷^Jb".¥Uu^mou7T [ @~[nwWViW7U:T{+ (uJmg$?g{òz֩Pނ:#5C4]]EXHɔ _//^UE.?x3Xnr{]UKp?nkb7{ ޫs2uvS60~|衪!{i.2Qڼe?<m?@a}viYWDd'zw$Kp$=\ϳ ECD3FvO{ɸJLe&z69/zXQ}6D lX̟.^o۫? 225 IxTBi+Vp+ $|b*S%8%%nVC$N+/5#JrS&06.ڗF0^fnĺH ͇j-'` g^H#": FXP٣)FNR MgmPe;wv T;u5A#퍨fC;Nf DC#* F9M H& N8]>Am"snݝ®;B%%ME:5jIpC #:@k;A/8^0ۙ@29I5 p*m2F[rj.}C5xӯv~D];ĩYP[{͢eS!0*TM$6^nGH+ [\w쫤,'g u} a4ԑf`ak<=hV%p{{KCTcX_z#hcT+ؕ!̹cfz] 0$Tıi~?DmPY ^$Eǐ"~8N>[cKWtOK[Ohrݩ`Nkk=dJ\RKS2WOG]J';GOPl] ޑEž넱li3}9{)j'dH1>,4tK"Tf2tۣ D@YBPs*`{TuY4ʼuļS%tS)db*OIʐdmg% -ؑ[0*FLi"8U՜ʹ4^P ڊ ˫ @bxu9>I* =>EOM1JrĝG!M\7lQ9k-$p>96]`9/Mp~&9|+`+ JOܙ~/eHE-q*6oJiCu <Ѽ+UؘU lS#1^?G6*& @$m]E'44#6C6YفQo)M0 ԧ6 >~Q×C*Q?u hݣ/:în^C'A݇JO2e TK+OڜE@~KdЫj6<-> stream x}KolIrw=w*bw¶d@h-}-9Cz/"3fˊʓ'2up޽͗=\fB⿿$}pA׵Ҏ{*ov?qHuic;]7_/q+m?]F|맛?⛶J)_}χsMZCpûi@?Ϳg:Z[|_0cѦ{2[7Qb0;LhYK߾WƘ۱~ti %c/7n=g/c/jO۱;B׳$k%Nuc,I[8’$1U97HB%}aIa$c,IRc"ɤ|UUԚV$4<ҏcIT.0HhuZi!E+O$];1Et&-Q7 ׉$MXyt6ϡo:LkxE,ߑ܏'۱R~hj浭}F,6F$;-IZPwQ.<(셇^IQ:.4VYQn]H1Kb ݲCyHRx;Xi "Kh i6j?&HHV]G7y瑒$fIjJdmh2Pl8-ڒZ(=jeHNDe8aW[:fD@fǓX+$]v۲&7Pd+9 )2JjfĒ]%k$?k$_.o]:7.VĒ,Uy Fe-h=cНFRslr1`ai@B[:K:&*áQѦb;mԿާ=joXjnx:(駓b&`i I}V)vʱL+JUHHQU1j-yܻe6ory$=ll|UUy~6s%zruP4cֶO6%<;fڏȣ$YJmdU%2{mYeU1TR[$نXaytII#$] V7$ي-d:KXvJWqtK]SHdZq%FgAt~JBgTA?&3фؕhԽ㛈orjJJ}.0[ kl5$UuՐVc)_42ܱÉ)Pu>\˫[:\EAnџ@:l)9wS2W϶dM4kF*X5ڡD0۸}j]6ݖפQRm,3pm9NwC9Wϑ1@%)RP^A<J/W7x>~MH };;FZG`ͶJn7{4ˠF~ l6Z}̒cC$`@dˀ7,}A ,ͦl~|b}YZBgYC4;B@ZZZ,!{4K l8l1x8K09قdz !1<DnR'Mn -ֳY-xr+ %&M wmO,dz|gŽ:dg~gvE~g1@g1g1Ogz >~I4򱆕YG?JF(_MA^9/v24N%]X윁INCp*Ywma79ƞXFJ]\u(*v#:m8P+ L5rGPag" OM̰0*Lq}4t'ƞlln9dvHuUݝ|e*He\E.$[cO Xݝ%k;iO^b?6U34ˮi#?^_4CƲw81ԬIF!@zi=<%F3mE$HF@6bF4? +]Lc^DY㫜PS )9_{I \g“b:دHtQ%+OQ̔0/DS,%SIl6vIir(EN,\9Vg|BnT" Iv@ٮyi.ȏBI"y;q Q'E-q)TP<$K1X;,ܻqYND/0xBEdO%%s*z8a)O^2ˆX}mͼ &ZOw PDqH '#dm/-#5u&t6]Ή냭9^ih&KϒNZgJ)wI?KN=.ZhC\t45N !e nYЦJgTVqKɷ+gGLƷ:E> v"GDede:jHf+gU yqH\+gՐV}K_CZ2IM/$W12쇀`z:b€5^I$.O\ܑ8diŇ-V*YZ5ƖݷRjFE>F'Bapp<MNrV15FYV$xQˆ>mUh29&ZRAh{=A#cA#&3* TtQq֒ȱG#3Jt9 ٻFd&ht%@䬙!X$1YP}'UE#ݚ [t=gO0n@,Z0.Ǹ"*V+}5ZLk1.n50.c\lĸ6qm'5% d\ӥ rMrM'5 ״z`\q631:0rȝN@b(wr(wr(wscНNՀtts6;+НqtgϾm <8|~ ϵ"sE"s„jW\-bmP7=w'>1ő-LT`e\sn ihk(t1bW2_FS 0=v"xiŪ+L'e70zԜ\ă 0D9tJ"TԗLg8H} H,XįKVG$`}%O&WAR gɊF1PA./V]Ib՗EcJxJ3H=V$`0,dKaJO2AK{`}[+X;$MsDH9b`r ";pV:]S6N)M6OS 0HQV`mF*x?iGH>)Wa_}Ϡ RDAvk`^JKZXremQ:ec{/TX%t_&y|"`F, 2E, QX,Y=X*abXY`k[K2 ?p&߳YU\{h+|9e@lF&Yx:Z5hŤ˓ m <*:gPOS0ҲR)$;@q,dJ6;lvN^i ?B Y!̨mBMk II\3k5$&odצ=]!qNfᳲ_Y5%DȄ uT~}3Z$[di疜|+,"y<\Y+,sܗV*YZq=Rg~V<~щo~~F[X_T~Lg~}{O5e5zF#jOySC'FV?0zVFU9+V䀵0:q( 4 z6ѹ:<=P BNn.7 OXL@t+D'QPTXGB=kDg =8ğt&H:Kҫ+}og q  Hǒ{OQ (*QzezNb# >b#.)Hwk):/4'+0G!I%*_^>|xo,Q/ pͼh]A{iw~s=X t%cƿJJ s;|]z5z_gT!ɫ}`Ï?-^ U,\r`x-w/F\!{29+AW7dxWOˤw7':ly+~%]Ҟ+%6sU'7?x=*~ ~=Zc~=lz|yE+4j6#CRek~mKV[R,> ʚA/޼_aΫ(g$}eT}ԭ~Lݲ?dylVTͫ迟Pbv3H\޲1L>i/-"TO"1+K,A8aXM+5Inl[CퟴNJvsb)r 9͏?Ll*Skҩi^?ܟ\ZZ`;Yatt|V`w,·![ ?SE͗-/O73sߘtP4\*~0ֲ8xe)Vq,G><=COIE=Z`L K:EUCƖv,H&/OȐPE^| CƿS^k\ĜA!{~wxCtwJ\U_κfND/8U!A#op>0|vvqu9tF*ʹ{C =$E߻y q{MsK%Q/ Eyz;F|eoq <3_O5{89d1۩@E4ϻǧ _8aj\>=(Rvo0ȇߝP~2+^ {:;K%<}"x -vb]s;K =n?0Ml$gv1'0?p9?eAg籜IurR=-}sO9u;|cg}>./RDoKA_/v~'L wBǡv׎_WD{.ݛ_oE댢5l~+4.2̝5k/0& OvBz*3{gR,o4}jcNn$TDw=rS#6}D2 qT|-v{8˦ /C\~_H=DgΛdr?KY"ԏߏkendstream endobj 570 0 obj << /Filter /FlateDecode /Length 4237 >> stream x[Y0 x;΃ep y@rxJ{{=h8W]ձ?.X oWyb^+N_kM0/`նR/W`.M1 3ﮞV^.Yʹj0IUows*|,WR)U=nٰAޫr%\͸]6ǝqծ fUY yz J򍮙 TOfh["L۰aJbBׂ V:u2Pj%Pb%u \HV؞W%?e8qJk5^i}q[,Q/Wy>Da"hP); Ȋͱ>09afg{th, "bVTHPU95 '0>KF;6,FK|x=]wUG sIv )JkQBn66S1$yLjw$mI&g)0.4*pQ8\Vzw(Tx=vYā HXF%kAA 4ŌݱMKD3_(jWqMЩc!>+WӺTf/=ž!Tc@F'm*ޮ H ]:C1h*|ۥI cBѕ_i7wq59pw%xG[⻝g89̴ꔗ 1ig2UiߛyZVf  ^&(/disܢpLʈ@,v QgkiCKiH0zeIJۍuHH=c{$ 7DҦcLhS^0' ޱ$qۦp4’7iDrx-4zgR8-OL} /mQfTF>L-i&;d VP3as#g4i|7COQt6<~8L227K< -VBXʶUOۂ^" Jb?0Ǘa[@dYH0)9e~a59Lg@H[ jn첈;6a(l7,.;cץDjc/O5K8d˜=nPf8 "3l ED3mrMLUvq̉ZI۫Prҋӻ}B9VCLHS\׳6B=-m uDj$'0 H L4FL@AJ3ON+=aprYMdXSc&gyy%!7pфwälPPFE"?RiREE9 i@XK2 ho`TVb|dQLp"ޥGWsRcVt.aRx\;%xһ\v*a"OD Bf)ijc?DZH BtbQ$63 g/8t۟,(:*mB}caNyA?R<xS55en H)3&RVv*%@Hcmf# Y*+JcxQFKcx ¤) s UctA_يU4C-p#6a}9iY(H.|j4=RDzN)w; :>fOHYm6F9lw|S':y_,l_n`CsdSnߦۛ#f`J +]xNζ{.T_Ǣ&𴿨X- ?)f^8M is" 5ɦw50ϐ֢Jd*w;̩b'YMv*c9k-s6 V`LyJ6Z M& b#P yOa ٩igj̈t [D1^UЋxGDYLe;tC]U &2ji% CS=OM%>;CwUYJSK /r`vK AB\SawTHQzSii6pNyQ b#%0Ⱦ"'MyS>T 85b+*=C%##||Dɿz&,?IoO A%:pqF'X+?mGxIxz8HM:-*7Q G^i. G̮JZ4ކH~&Zw5Bf7ic}&3}ao/uEx5gwW>$WDo7F3}.vr;ϭ<ĴH#䜚PYOs󸍭Zz,7ю 3U fלCTU\#VIWEꧤvlE⯝UN 8J?]ZeTj=N3Ɂ )SpVMe\[n '5=Dfa(ߥLoFV~1f-/P-vآO}^:`C HRD%vV\ʽFS;tK{ص\7] Mf[> stream xZ[Fyx -{! Bq'/Pڹ粑dB~;eԭxqBVsUͼ*nίY5}3[~tabxoC^YfSd]P//(/:T!e~vY3ۣdFRJťng3i9F5'_O95z''u_93g%$Rk#Ͽ:3V߃g^r*|Y\[]E`wnbGx!(@`B sRS8VD(`Liqs$Ǒ|_+m`!f;212|䌆SiBkt]p`r .. 4{yσ7̓E@F>/Rq=M)@N`G~Nq:~ ^M7+x to!iLvLw3&`t\ξ>[J`d*[*`U~O)N[1wVӟЦw:<-8i4{JSJ"\*%畅4r/XN ^p?rj1KBVV.̱JvhNa!U,@̱&/*g:2 8R ̇HA#1<1ȱXT]FJNOA' (B' NݧbihcB<43IrT6c([!Kk$-5l\1/J[BPUXgBuǂ Zo9mV}ͯ+Wqh)1bSؤWT/i6QlS ^xþC ;fdRMj5BiS|b+X.#ԲL#B;d}m=*a)rCsMSZzDfu.aӑ<~ o"I'6hɐS ~LۨWAZ')$ug @fौN};۬G6~ l( rpe*I{>mQRʤz=Dۺbɗhʀއ b)i(QF7tZɭG-v^Y A;C{ʤ :0u!Zxߏ۵P@.0آ@S!KstBo8gI(|3.ff׷u?V#0e:\yyǠ*ym_Oub*Ⱦǟ*BB݀kk:_gDqgf:uTtvsq&|8^0hU4G;mqX1/`7{$胇ûl!>|pY\ǒ8z7YNUrJ"H*lJ!PW1nn6Gv_Hi%]O#EF@(%2F|":G-VAĪӹmsp>@KMp+0HP# 0pQz9ESyC{ Lˆ)nPGѥQ&)ӶٿԮμKn4OWW|c8*$_ wW0~U Aٮ6WXzy,8^}[W0Í] Gq'Ee`} ,8OZf,L)D /v.F_roÛAo YVhUFKB h~mlP]GF'^fB%ǿ XaMendstream endobj 572 0 obj << /Filter /FlateDecode /Length 2676 >> stream xY[s۸~_NFo;L3n٦M>t>0,$/Iqs(N:$ENiƦfM̈́Nד_'̭NÿfKfeeS8gɬPfB-n3-X\NޓͨQK63s!I*_1fs!dF$m.ZIyQ-Y|YsR5x@dlՌ3\)UFׇ\99TKD祩%iLH3:JkJQT3O&̹=΅r=-7E `fI޵YxWДr5<2A>~5?9s ȔsZmlV`|(ˤV4&y}7ퟵAiͧWdq]]uىn\mhqdzЫʯ/>WIvP*fG.k<TÃinQP Yo?Z7c8@'7 pP pNOj<`l#>Z{_5 {,i{|$({qFr,iniI3rj$&X) 厧h UZL{1?UyBOvMq.ʙ+̀a&^~sxD;R>J}l ]X}#B##g#!O $dآ9",7Qdc(ͿRa9/&o'~(!74Bdȩ&Q:"C-S t8Gz fNNqܹWq =4ּ{ =Lkfp7=žQOb7G+ ȣHr;G,/}vV<6jGb︹;RjL͡ɳ@J$9wFT&P2On*&sGȻ_._{ Zsax[?Kwm=GB,flC'o8qP]Wųh)G96D:X'KqVXj-]U|`&}l U~-@w4_u]\J-w|!~rUu?a4_a Ӄ-ܐYT6QYj:ϓ+ .|kU]]"|\-}>`w3ğ5z пj RTv!{{ UܼjH=7x8ax]OX ${r 0 $ &d&J̅y@f KJXfVsYǤb -D$38a2%=$Tp, L 3n sc㫔,# ̳NAe#3Lo5#KieqM{g'rţ&'\}'9Lf•5&NwO7Fԗ(lL > stream xY[~߿ ksm$6)8RxJ^&\;=g. EIb"g;[Qǫ߾lW?^1tm/anU:_wي ]1WF >^$/VQN״JF9e׻FYQ|ѫkiqpI'_e@ްjPNK'3ls{U5l"¬5y#%[m,$}ͳ?=SO}70e$Kh%ahWK;=ds*Їc.wkԉJIW__]?NpE Es኉\8TP/l6#/W mow=|&?~((R_[5`#yHX|K9X#aVR/iV;Ifl|(¼aT$]u=ܦm; `cPWķ$ Uyumyp7d %|f*-BF)I0fŬ\^drh%3fC>\Jg^BNeBԙQqc']&?C2Cm&]~ƒ̐cJ;+g% tS}K] Igm'Fmx уE979]2!C-i]t"]C:'5)nׇ3mDA-T,)h>-A]{dUP4:^/wnoL;v7p,f}{X _m *W7ql,2qa>]RN*˴欒JyDh7%X1][[_*LqMt*&3q&e-|s\FoB>_҆lnfK^w䫯G6nK  DQdu'GюzC,(<;߯tI0Y^+йRtOM% vediOq8>en\-.}F &M xP==4ؒ4ܷ^4(W 06X ڳ隥ܐ+hWwy]L}F3T,[Ht e%(>߀jwxxhL}F1E1[rgwXW͞SDKX:|>dcܽȀC 4R8әl 0'YAXH9yJO.dLl4pw5bR 9TgER4riZDz.Pׅ]L=% {B4>'+h L2&X#_JH ,<&o`!|-ƻYb 15Jl?*Uw4!Hg*# `6S"+ouk%*`ڕE :x8Uť= #;o!Cz'kp=_#"xCr~qDTw徤4oB 9_ixA*>X+-!l7g #W@g9(CkY8]`%Z fLuJsH6WtZUf!9Vy~慠롑Kg0la&B|W8A_X]^x!M!:~F.KE2By>yԝ'Y:Y r{vZQrdŤ0zCER}. iY睵;Hn]љQq9:WIHBB䨠xKN՗s^-BC`e]sOXNO\Dxy%x2uptΡxk{Uʷa#Y:i+gMk(w:O]T\Ծ/Wg"_(\1CT_qP􌩝ZKLFD 2VB?6N2~thNn\= |dE Mh9Zhu$yo}(`nCSD,:iƴ4Y_+fMS_dzhummOC.p>bޅp (.9G&'0U$(gS4<zAŲqdkސن~՟S@:w+.zh}`SgN_יPO.ΌB^2!U/0働apO=wSR²̎ziͅtCYS&sKxtqXf>ő[,,s[nF+~?]hendstream endobj 574 0 obj << /Filter /FlateDecode /Length 3108 >> stream xZY~߿[0d1;kawU3xޮ푯0=*~tn!J)˭t,]?ukv5Նp^Z6ݾk`H`yF^0ٮp8ADD,"%WD OT`0"ExNj m68^K&D./K+v F71}E9crm2pR[SekVm,1t2g ʜ U2`=+ʸRҭ7!i8D*wc19Uz xȞLRƭun<^I [,bNp^2;ঙA6b蒻g1R1GK*&? ,Xۇv9{m`Ia~(Hp00آxUiY4KBb?&nLYƆ(zw@vN ,+ m !Wsx|4㧢wꄧ!^)xC HԐ6e?ZV1 (y 8#NJ+ءe$.<ʾM!\G1-1uO‚ FtZ X.9*D -V!bi8/(Z1gAw(FRn+%p9i!vND]C9x1jAa44@M*LG%'4hZ"ER98,(ƅh+!̂T+dzV4atam7T +K>t|q6񸥴qneΛ雒AOć_s خ-$Xсѣ"2=X`&ʲ,4wq6TXC2.w._fE)3ԟIX)>%TRhy )Q`%VN&eҟe(n\l, }P"}Z83R"u"#fJ*b&svv:@=ԣ#e뷀L8n=]1u(! (CB5>uɷP@{ZSϧcHbs!1cZߌl)>JɚtIʯSNϗʃ"P »IVuH"iP>CslFjxqtFrfhZE jn.EĉMva:=!I~c&Ja OCh;oY}$|1Ne_8((M q9X WAfn 1Kmm4] 4Jל ?.wdg̡pF A __w8W7n7QqőIQpT[B…Eof\NbD]n/VB[!|vUq}vI AڟKhs\k $ښnOOxӸ`¦KGt+@Qٴ,o ȹ_slqG!͖ՔvZ%:X5ۣhXVۭ.)P]* u_ӑ1i2 8y펣2TO jܵRI| */k=b{c 55aG{vlPݭ`QK?Ɖ,]WiR=Fɜv+ixf@܁E pO7Yt &ARr.0M5u42g|,5q =w^vBݸB<}9ӋnC4&Zf!Da܇SVƥ&v.@leyso sbu!,SUˠAU`kL5 <+u9dʟ }*2ё.MEW3Ų# Ī?<bk6&jB!S%B@i蠥eqr!'%B.Kb}zTA\n.FhG4*|G =o /Ǣ pL<n8t mч/`.vmL~vD̖TaEkK3b;bqNn=#p>*⁁d\qE 'фfDFUOW#Hl`EXM,]zdPPy£__ {bh_ǫjx~% Z)SRLcĶ]N۳ .Qq.;|/rc~&eطa>T J+~)!N zc&˼M&RJdGM Q@v2x= 7H}YUT5ݛdٳU8rL#9G sT}h΋T]"fY L~s ]ux B tC@X6Mc&?ybi7KDY' hxYH~]S-m} 'ְ@#Y I=bLѝ̣нGyqJ !74tLǔ<3E#UͱjvSf?S})u?^y\ Bh)]Ŝ3c">iv?^endstream endobj 575 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 368 >> stream xcd`ab`dd N+64uIf!CO/nnC/}M1<%9(3=DXWHZ*$U*8)x%&ggg*$(x)34R3sBR#B]܃C5P\0000103012_i eO_^Pw*ˋ T\)S^m. =wnw6/ i^O mӺvKc?{oi~}/.cA<_qsg.a][9\Ğ{z{&>&20gendstream endobj 576 0 obj << /Filter /FlateDecode /Length 3064 >> stream xZ[o~' 0Rd؊_&(M E}R`MR(/Ws;\v\Μ˜w͜VlN_\f/^Y6>zfͿ>LhxU9j9bύ2j~]W n+(' ZQ% fϛRYQwub5PbqH'd)|a6PN¦é_~<ls}U5t:_ψ``K)|)`ָ4jT\nTw5P X?nuu6Kmvw' '_;<4pl2Q)w%+v("^.. RɅ_nnAYAv?ܵuՎEus?Һ\|vtiU$&9ն>OL&~s*_9IxJgT O+n (L'"~xz>l8~h` bj?VI{ ϿR.B5@d>p})ĕӂWNi聫\%'cZKmE:J&؜`tU)Ѓr|$*I<-+7r@.6?g!>GueߟHMo3!C:6dh9׸̑<#oDY34 :;xZ2ur( B smSA~8g(ϩR׬Lz6rkRH(1:Np|fTt N{ΓͦT^m߅ӄ$ {a\tu԰}pC7DS eY-K5YgG=}N=~@=1׬&z-H!EՇ!Mt.8;ẁP:њiNf[Ԉ* D2(w7PNӹ*ev`Q9 @y2Tk@Qz!;ׯ_v=Xwm!Ry4Q4|lϠdchD:eYxP Wh0g.2v^pOq)eʐ,| KȱI㉁LͶ~ k57}k i]?>]F $G:G@6[ QGkc㜾Wz(@ys%Nem+M46^Dhس0}@hYߵ&:H炝8`dDTKgh2'wk'/iTAj /B/h ipn-J&!])qUd = ^g@[)d NK@(ށ*F]cF0*餓< ƠyiZCKN8eLeɢN^>h2F>iB+}y(T8PTnvQ{ඤ#hDD y^>4Pƈ2me?㖑94H<.r#X"](K!';E#;X=ſbzx,tPlp_cNu GsF_ óFlqò\±IP|k@w+H1Tyk-տx9#?/>頡Š ӼvJDzʾ2K&6#U)M]Ab7ɵ. 1ƫa xƐVΗ#[iT0 m7^@? usNFl8D*d =r:ui ҈ۏO[ (H _c`bRŜ,r#;vTehU%Jp`c`!XB1fiW3`PY ވ8t7MYA1D_v>.}ףS$l'r^Oa\V﫰n-e\D\G+ .r!a@ ӯ!/ *E#*r]r'-e,,N3: /szn& 7/F/t\)bBGZhQ a1d| _$%< N&*[rڌ 6[ hrǀFE`q {!fcgX9ie>XGyu*aBEN% hdKIis1|ƹظ{jD]` hIڦ@lcP=|Y?P>? 4`Hm ;&c:__3 1:, y;ٸVwъQ&VNl 1QLT_mӕۑT\ziѩ䃞w$c>l2q6\Kx5iFq0@Xy2?laqd4}~츏\HڭX9q^E.UdZyS3s4,|emb m0@x  Be<;5Eg A|w#FK);CH2>P$G!Ycɱb%M2vV<5 %âCWo !(*OnK5޶|2I/)̒= D.#;V0ϱ  $endstream endobj 577 0 obj << /Filter /FlateDecode /Length 3120 >> stream xZmܶ~!i߬u-"iSA7twkڕ-"o _DRҞk%r8y8oHE7߽t/fگa%W!n.o/\a(c%Ue\.^/LWVmIE$WXBx]sY)'{mLS CF_Ǡ>->̙A,mulnf G "*! wI ;L|2  6oE;bLO >u5~٬r5@.SZ(8dIFA ϠќP8Ap0w.Q03;0ᬸJbjp( }T 5K)|R+N嬢l-ˊE,C,E-Ѳf2"شE'byaRf@*sڮ4ʼn5 @9k%Rs\ Bfdi.mʼnAA NxDƹ7NKd^!Zd:Ї?Ɔ,e4!8NMt1 RU@,[ b].AR!,^χ~@e\A] ĺ΍.:A. rۘ b FK" T5td쯜^L̥sJD=6+a"cbDfE)SO Vx9c t  Ѓ :8,?1e~tS<M@qﭫ0([Kh=PNAf=Rn.`HKj9C< C3ZJAF"~4sfle)5*y A)wRKy4uGV !.0$ 1 6g 7{,OA=hnRF|cL\K8S˳sO^!@ QpB0. 3 qwoOPO 9KySdؓy69/7*H*Q:PS:S0+T ,$z{Ժ2/El„s<`IicڔS3FU\E}ư jOƞ0^`mZ*8yuȱ4V\}ʩB~[[bik ț$7D?Zke!_~ H<&q Ʌ̹`15Z$|n|`W6;lh g[ G˖y;CRK7/ߖvp,Xu1@wba^IA]sj?A6$T7'e4g.,_oo.V ԈՈ0{FwFę]{|M7>j|VLz8鶬91;i\f/QvV6VRB#R 9 {//aBEr[:;,163X I .^H?Ņwk>.{L֛L8uk ZVTӸ\)H% .c V R28L+]SowC'ߔDN~vVeYnYEm r MzBA bp!CZx?㨟Acv Rg.NI5H2 Aـ0z ~9c"B([\N6 SX3C 2$%Yy$;/  )tC@|j :XP|2AP6۩k#h5{= ֫- *prMpoҊ5RڭArLjrAֺ|5'W0wTv$u,rɈH^ű5d{HyǨl4$^1pRC )?,;@Mt3b2VL"߽d e%(!ioY.@+kVs\ʎ6Pŝ(xM֩ .Qre/+\_1 @02/!ieUV9ʼ_=+}'g@܅;s3X5?ػ1X|52MY[ۦdkB9QEeA6۬ ];<25m!{.p\A3cٍj{V֢t' 6l)W_6y;lг6߼9?\ưo:g6iS\|*T 8`޵aXṹYFi-sBg+: ~C(Mju,3ET$ӟSnw<5ק@-0$fI$iL^ ^> stream xYKo_ Y{~[H8]$ zсF!'N~{AvP"zޯifo]߮ޯqR7+\ >oVD?`3:Mu׫ TQK FHx࿔7[!dA$om k%krSP-NY|1hCRCjFyN\} >#:]BrI4\F^$+8,RzW(FUo<If1Ez+TavB_}5Q-Vh[& %-e߃Q`aBG6?Q#͖Q Kt8XY00g1J ^6>L^ [WKa=(7~4)_a-`[8͚6*,Ku:a$ !l!'݆k %(\(Iv&HeXNr HwY:eqqr6eQJ4U8UjGGЫyay?̏7u5VU龪f:ө@cv'FtܛU+U\[Z`|? ! %pecღ_eúRS@r!vcPbV~5ώʺxpC-7 Pp!\[`!#cr=?0UJ0-C͖$-XT&]]"QiI2]ޝ g 4Rog%Ӊ0wPXEldCdU-$!~^Ow!6gDd>J``J{*TV 1GPv],'.寃Y`vbl mU>9u>~ucK]u]ns)O:p}˂ObiY|I`.a-5-%+ l&V2]Z)n$w;_ZbauAʬ0fRBf[?ti%ҧY_ SeI~TJ)/dG,agtM(?$ؒr؟ hzlPn3v!P'ҟMI@ő쨚 c@1%x\n1yQ'd瀴4S1@Եjt Ҿ}<7oѰWgFMy/e]T'wԠa>'j S _T]ऱkzܔviv&/w Ѧ 2$aZ>SO]yKU\$|Q,_ H,0xMpIys> )#ech2jLnO:^FW ZDɬ0s`m(>ri7xbI|36JrS7waXQ1"iN Tk2XJH`NnN(dA q<}=[Kܳ# >p09첲t|xan+ӅNBӏ'.Q9`x}ü[霗 Ӗw渜 0>ᘥl}MoVhtad*Ml.i q9T8:Q%>\kQ7zԋj@k˨sڏiU *xK9)?B8ob[S 6g6^mmWV"'P1''f EG'Tt“kzhqg]Rݔ#(qo``5_F ݑ*#-H5&^3y\Ⓔ)'|f4r]IdgU?èW&}BmۡG0?$R,$57-a/&c0Qyyo :d쾿~$L07?}ɜ R}{C_8ɫ'552U9*U9endstream endobj 579 0 obj << /Filter /FlateDecode /Length 4475 >> stream x;]q 0o >IOkAJ39wGWU7ɮfsv7}Nwuu}/ ǫ߼tb/W~݄ϯq'eS5bsogk[6\ m?b []Rv\\}_<ʪVVTMqJ.⧇}ئeu񢻋?Ȳitiih /N8ÀTYU؝o| 뛺OrΥqrz.v2zPF.KB&H wZ ! [Tw<њtC366~e<~T-%Z. 9(Z[֑3E[nR[$ɋU$Vs^N[(wLG;Fn;S5u1~atTQIՔVW<f!DSx&_!gѿKS 0߅@v!()-1; :2$l/Y+G ~d$ qIB  9rR][m6!'`b0!IgOӲ'q;(3f h2*R0B|2 (5 2Dl\K// a/$H-]HPMf Ʉ2Tc% *q|wڼ;5Mefiݮ laf9z)X@P lEa5'a0j_Y(qCd[-d=1>( >g6.Ug/l &}d@~gM4ۀ|p >G"wl6 qytRk e~O10|6 ;ХUR D,ADr:-R*[ԠU<$%PZ/n~EreFM!g ԡ e9"l<ևvj)wQ: i%hN‰Tz- HY85 " Pq~I)g EJ&WJ6~qܢB~|F|ZpIc~hUbp+PJdZ4c9O7 ,Qg#a2CDf< ȼ w[00P_,ST$i%,P.  WяdȗT eyqzzGvggD H $,|0S̓+Se)f溵D*\,)On6uU[Mm! UfJ緧m%Ap/~n-ut·;,)隬mok̈ўbHrт*`'2/pj}^jѦ\By}̿p}oΝuKg~s}M& &z9Bscmwa5d@J8&`#eChRan0͕EWaFd+`GX?H['ozK^ 4cfk1'ş*닣-$~{r9 )u^đ4dmJ[I?/ LմIy4UBȔqZpŸygiYxoYJĻmce/bBLP8[喍x{ScT c!#%c>SpphjE+ï`,"ٱſ :8kW<^ʆ%M`0Y:h#rƦB;'"{ ~5 e\̴7vl 4cRc'gX"΍,X]q: Id-CDF`²۪ur]=?d?!rs9=R_lXFh]J-EhkǪl CmШ I,Xnʄ4\~ݔ`HPR(lyT%LaR2y!)bЌ?܎UMp.K:Z+%wSDg- 2C ?r#1a Г,v0ԓfj'L]7X!,vKo^객BbefU? IUXv< +c|2雹Sҕ+P\ !ЬJGacN3/݊ !҄XԴfعP1SఱEHQ>ĔS:y%CZ8OcOeEkZ .@%6U BA. ]kMo_\|dh62h˪L"@J]:qYj&˥ݤp2_Oor̰ͤsʍA"ASL6@ GSB\'#K%K,lIaL7`)Y- l/lX2'}w_Zu*%24%0;`ApZ4N_D6JJ"q)69(UQzLa@<9lǖlVb$ VcExbQXȾ_!`%}'4G0@m& J&V$8\0 9V^Gd"(8]sv ('dN!,!Vj'x~iAv|~∁_ˑ<>5apS߱k.A"xL"t&K~R0.=γ825P*۴3c]<ѻYԩLU~ZzFf`i8" y-Nq$vˡqӵWL/wݛpO0x nLR,uA~O%]zT^RRTI aHQK > stream xcd`ab`ddd v541UH3a!gnn?' ~#Ș_Z_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*@\\KsRrR3s3sY9w'-n>?.giMtIqLk.gU-͒-[~=CbFKow_7Ǵ)Sho gnd.jRU*n5nsȞJ~ z%&O/?;OwJܑ|(jn/'+]c~'Mg̵[|>Y<<@'Nendstream endobj 581 0 obj << /Type /XRef /Length 365 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 582 /ID [<5e9268d69189389e9427d1a24b44c234><295e313b28bbbb0da4372bbfebb190ae>] >> stream x햽/qN""4D4DLH,$޺I Lb@" 1 $K 6AX}?'oαt9eaz*'u5uzLbZjK*/]z|U22~ SNMkbLWĞU1%; Fņ+Tb2 ;mǩs,孟Mr[wxjb9 3LΈNȻ7c{Oo1.CTfv ?W\HoUur̳gf#b;ۅщكD~s?[2O9D֙|yD?8F endstream endobj startxref 383591 %%EOF ordinal/inst/doc/clm_article.Rnw0000644000176200001440000032243614334176473016420 0ustar liggesusers% \documentclass[article]{article} % \documentclass[article]{jss} \documentclass[nojss]{jss} %% -- Latex packages and custom commands --------------------------------------- %% recommended packages \usepackage{thumbpdf,lmodern,amsmath,amssymb,bm,url} \usepackage{textcomp} \usepackage[utf8]{inputenc} %% another package (only for this demo article) \usepackage{framed} %% new custom commands \newcommand{\class}[1]{`\code{#1}'} \newcommand{\fct}[1]{\code{#1()}} %% For Sweave-based articles about R packages: %% need no \usepackage{Sweave} \SweaveOpts{engine=R, eps=FALSE, keep.source = TRUE, prefix.string=clmjss} <>= options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE) library("ordinal") library("xtable") @ %%\VignetteIndexEntry{Cumulative Link Models for Ordinal Regression} %%\VignetteDepends{ordinal, xtable} %% -- Article metainformation (author, title, ...) ----------------------------- %% - \author{} with primary affiliation %% - \Plainauthor{} without affiliations %% - Separate authors by \And or \AND (in \author) or by comma (in \Plainauthor). %% - \AND starts a new line, \And does not. \author{Rune Haubo B Christensen\\Technical University of Denmark\\ \& \\ Christensen Statistics} \Plainauthor{Rune Haubo B Christensen} %% - \title{} in title case %% - \Plaintitle{} without LaTeX markup (if any) %% - \Shorttitle{} with LaTeX markup (if any), used as running title \title{Cumulative Link Models for Ordinal Regression with the \proglang{R} Package \pkg{ordinal}} \Plaintitle{Cumulative Link Models for Ordinal Regression with the R Package ordinal} \Shorttitle{Cumulative Link Models with the \proglang{R} package \pkg{ordinal}} %% - \Abstract{} almost as usual \Abstract{ This paper introduces the R-package \pkg{ordinal} for the analysis of ordinal data using cumulative link models. The model framework implemented in \pkg{ordinal} includes partial proportional odds, structured thresholds, scale effects and flexible link functions. The package also support cumulative link models with random effects which are covered in a future paper. A speedy and reliable regularized Newton estimation scheme using analytical derivatives provides maximum likelihood estimation of the model class. The paper describes the implementation in the package as well as how to use the functionality in the package for analysis of ordinal data including topics on model identifiability and customized modelling. The package implements methods for profile likelihood confidence intervals, analysis of deviance tables with type I, II and III tests, predictions of various kinds as well as methods for checking the convergence of the fitted models. } %% - \Keywords{} with LaTeX markup, at least one required %% - \Plainkeywords{} without LaTeX markup (if necessary) %% - Should be comma-separated and in sentence case. \Keywords{ordinal, cumulative link models, proportional odds, scale effects, \proglang{R}} \Plainkeywords{ordinal, cumulative link models, proportional odds, scale effects, R} %% - \Address{} of at least one author %% - May contain multiple affiliations for each author %% (in extra lines, separated by \emph{and}\\). %% - May contain multiple authors for the same affiliation %% (in the same first line, separated by comma). \Address{ Rune Haubo Bojesen Christensen\\ Section for Statistics and Data Analysis\\ Department of Applied Mathematics and Computer Science\\ DTU Compute\\ Technical University of Denmark\\ Richard Petersens Plads \\ Building 324 \\ DK-2800 Kgs. Lyngby, Denmark\\ \emph{and}\\ Christensen Statistics\\ Bringetoften 7\\ DK-3500 V\ae rl\o se, Denmark \\ E-mail: \email{Rune.Haubo@gmail.com}; \email{Rune@ChristensenStatistics.dk}%\\ % URL: \url{http://christensenstatistics.dk/} } \begin{document} This is a copy of an article that is no longer submitted for publication in Journal of Statistical Software (\url{https://www.jstatsoft.org/}). %% -- Introduction ------------------------------------------------------------- %% - In principle "as usual". %% - But should typically have some discussion of both _software_ and _methods_. %% - Use \proglang{}, \pkg{}, and \code{} markup throughout the manuscript. %% - If such markup is in (sub)section titles, a plain text version has to be %% added as well. %% - All software mentioned should be properly \cite-d. %% - All abbreviations should be introduced. %% - Unless the expansions of abbreviations are proper names (like "Journal %% of Statistical Software" above) they should be in sentence case (like %% "generalized linear models" below). %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Introduction} \label{sec:intro} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Ordered categorical data, or simply \emph{ordinal} data, are common in a multitude of empirical sciences and in particular in scientific disciplines where humans are used as measurement instruments. Examples include school grades, ratings of preference in consumer studies, degree of tumor involvement in MR images and animal fitness in ecology. Cumulative link models (CLM) are a powerful model class for such data since observations are treated correctly as categorical, the ordered nature is exploited and the flexible regression framework allows for in-depth analyses. This paper introduces the \pkg{ordinal} package \citep{ordinal-pkg} for \proglang{R} \citep{R} for the analysis of ordinal data with cumulative link models. The paper describes how \pkg{ordinal} supports the fitting of CLMs with various models structures, model assessment and inferential options including tests of partial proportional odds, scale effects, threshold structures and flexible link functions. The implementation, its flexibility in allowing for costumizable models and an effective fitting algorithm is also described. The \pkg{ordinal} package also supports cumulative link \emph{mixed} models (CLMM); CLMs with normally distributed random effects. The support of this model class will not be given further treatment here but remain a topic for a future paper. The name, \emph{cumulative link models} is adopted from \citet{agresti02}, but the model class has been referred to by several other names in the literature, such as \emph{ordered logit models} and \emph{ordered probit models} \citep{greene10} for the logit and probit link functions. The cumulative link model with a logit link is widely known as the \emph{proportional odds model} due to \citet{mccullagh80} and with a complementary log-log link, the model is sometimes referred to as the \emph{proportional hazards model} for grouped survival times. CLMs is one of several types of models specifically developed for ordinal data. Alternatives to CLMs include continuation ratio models, adjacent category models, and stereotype models \citep{ananth97} but only models in the CLM framework will be considered in this paper. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Software review} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Cumulative link models can be fitted by all the major software packages and while some software packages support scale effects, partial proportional odds (also referred to as unequal slopes, partial effects, and nominal effects), different link functions and structured thresholds all model structures are not available in any one package or implementation. The following brief software review is based on the publicly available documentation at software package websites retrieved in May 2020. \proglang{IBM SPSS} \citep{SPSS} implements McCullagh's \pkg{PLUM} \citep{mccullagh80} procedure, allows for the five standard link functions (cf. Table~\ref{tab:linkFunctions}) and scale effects. Estimation is via Fisher-Scoring and a test for equal slopes is available for the location-only model while it is not possible to estimate a partial proportional odds model. \proglang{Stata} \citep{Stata} includes the \code{ologit} and \code{oprobit} procedures for CLMs with logistic and probit links but without support for scale effects, partial effect or structured thresholds. The add-on package \pkg{oglm} \citep{oglm} allows for all five standard link functions and scale effects. The \pkg{GLLAMM} package \citep{gllamm} also has some support for CLMs in addition to some support for random effects. \proglang{SAS} \citep{SAS} implements CLMs with logit links in \code{proc logistic} and CLMs with the 5 standard links in \code{prog genmod}. \proglang{Matlab} \citep{Matlab} fits CLMs with the \code{mnrfit} function allowing for logit, probit, complementary log-log and log-log links. \proglang{Python} has a package \pkg{mord} \citep{mord} for ordinal classification and prediction focused at machine learning applications. In \proglang{R}, several packages on the Comprehensive \proglang{R} Archive Network (CRAN) implements CLMs. \code{polr} from \pkg{MASS} \citep{MASS} implements standard CLMs allowing for the 5 standard link functions but no further extensions; the \pkg{VGAM} package \citep{VGAM} includes CLMs via the \code{vglm} function using the \code{cumulative} link. \code{vglm} allows for several link functions as well as partial effects. The \code{lrm} and \code{orm} functions from the \pkg{rms} package \citep{rms} also implements CLMs with the 5 standard link functions but without scale effects, partial or structured thresholds. A Bayesian alternative is implemented in the \pkg{brms} package \citep{brms, brms2} which includes structured thresholds in addition to random-effects. In addition, several other \proglang{R} packages include methods for analyses of ordinal data including \pkg{oglmx} \citep{oglmx}, \pkg{MCMCpack} \citep{MCMCpack}, \pkg{mvord} \citep{mvord}, \pkg{CUB} \citep{CUB}, and \pkg{ordinalgmifs} \citep{ordinalgmifs}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection[ordinal package overview]{\pkg{ordinal} package overview} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \pkg{ordinal} package implements CLMs and CLMMs along with functions and methods to support these model classes as summarized in Table~\ref{tab:functions_in_ordinal}. The two key functions in \pkg{ordinal} are \code{clm} and \code{clmm} which fits CLMs and CLMMs respectively; \code{clm2} and \code{clmm2}\footnote{A brief tutorial on \code{clmm2} is currently available at the package website on CRAN: \url{https://CRAN.R-project.org/package=ordinal}} provide legacy implementations primarily retained for backwards compatibility. This paper introduces \code{clm} and its associated functionality covering CLMs with location, scale and nominal effects, structured thresholds and flexible link functions. \code{clm.fit} is the main work horse behind \code{clm} and an analogue to \code{lm.fit} for linear models. The package includes methods for assessment of convergence with \code{convergence} and \code{slice}, an auxiliary method for removing linearly dependent columns from a design matrix in \code{drop.coef}. Distributional support functions in \pkg{ordinal} provide support for Gumbel and log-gamma distributions as well as gradients\footnote{gradients with respect to $x$, the quantile; not the parameters of the distributions} of normal, logistic and Cauchy probability density functions which are used in the iterative methods implemented in \code{clm} and \code{clmm}. \begin{table}[t!] \centering \renewcommand*{\arraystretch}{1.2} \begin{tabular}{llll} \hline \rotatebox{0}{Fitting} & \rotatebox{0}{Miscellaneous} & \rotatebox{0}{Former impl.} & \rotatebox{0}{Distributions} \\ \hline \code{clm} & \code{convergence} & \code{clm2} & \code{[pdqrg]gumbel}$^{\textsf{c}}$ \\ \code{clmm}$^{\textsf{c}}$ & \code{slice} & \code{clmm2}$^{\textsf{c}}$ & \code{[pdg]lgamma}$^{\textsf{c}}$ \\ \code{clm.fit} & \code{drop.coef} & \code{clm2.control} & \code{gnorm}$^{\textsf{c}}$ \\ \code{clm.control} & & \code{clmm2.control} & \code{glogis}$^{\textsf{c}}$ \\ \code{clmm.control} & & & \code{gcauchy}$^{\textsf{c}}$ \\ \hline \end{tabular} \\ \caption{Key functions in \pkg{ordinal}. Superscript "c" indicates (partial or full) implementation in \proglang{C}.\label{tab:functions_in_ordinal}} \end{table} As summarized in Table~\ref{tab:clm_methods}, \pkg{ordinal} provides the familiar suite of extractor and print methods for \code{clm} objects known from \code{lm} and \code{glm}. These methods all behave in ways similar to those for \code{glm}-objects with the exception of \code{model.matrix} which returns a list of model matrices and \code{terms} which can return the \code{terms} object for each of three formulae. The inference methods facilitate profile likelihood confidence intervals via \code{profile} and \code{confint}, likelihood ratio tests for model comparison via \code{anova}, model assessment by tests of removal of model terms via \code{drop1} and addition of new terms via \code{add1} or AIC-based model selection via \code{step}. Calling \code{anova} on a single \code{clm}-object provides an analysis of deviance table with type I, II or III Wald-based $\chi^2$ tests following the \proglang{SAS}-definitions of such tests \citep{SAStype}. In addition to standard use of \code{clm}, the implementation facilitates extraction a model environment containing a complete representation of the model allowing the user to fit costumized models containing, for instance, special structures on the threshold parameters, restrictions on regression parameters or other case-specific model requirements. As CLMMs are not covered by this paper methods for \code{clmm} objects will not be discussed. Other packages including \pkg{emmeans} \citep{emmeans}, \pkg{margins} \citep{margins}, \pkg{ggeffects} \citep{ggeffects}, \pkg{generalhoslem} \citep{generalhoslem} and \pkg{effects} \citep{effects1, effects2} extend the \pkg{ordinal} package by providing methods marginal means, tests of functions of the coefficients, goodness-of-fit tests and methods for illustration of fitted models. \begin{table}[t!] \centering \renewcommand*{\arraystretch}{1.2} \begin{tabular}{llll} \hline \multicolumn{2}{l}{Extractor and Print} & Inference & Checking \\[3pt] \hline \code{coef} & \code{print} & \code{anova} & \code{slice} \\ \code{fitted} & \code{summary} & \code{drop1} & \code{convergence}\\ \code{logLik} & \code{model.frame} & \code{add1} & \\ \code{nobs} & \code{model.matrix} & \code{confint} & \\ \code{vcov} & \code{update} & \code{profile} & \\ \code{AIC}, \code{BIC} & & \code{predict} & \\ \code{extractAIC} & & \code{step}, \code{stepAIC} & \\ \hline \end{tabular} \caption{Key methods for \code{clm} objects.\label{tab:clm_methods}} \end{table} The \pkg{ordinal} package is therefore unique in providing a comprehensive framework for cumulative link models exceeding that of other software packages with its functionality extended by a series of additional \proglang{R} packages. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Organization of the paper} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The remainder of the paper is organized as follows. The next section establishes notation by defining CLMs and associated log-likelihood functions, then describes the extended class of CLMs that is implemented in \pkg{ordinal} including details about scale effects, structured thresholds, partial proportional odds and flexible link functions. The third section describes how maximum likelihood (ML) estimation of CLMs is implemented in \pkg{ordinal}. The fourth section describes how CLMs are fitted and ordinal data are analysed with \pkg{ordinal} including sections on nominal effects, scale effects, structured thresholds, flexible link functions, profile likelihoods, assessment of model convergence, fitted values and predictions. The final parts of section four is on a more advanced level and include issues around model identifiability and customizable fitting of models not otherwise covered by the \pkg{ordinal} API. We end in section~\ref{sec:conclusions} with Conclusions. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Cumulative link models} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A cumulative link model is a model for ordinal-scale observations, i.e., observations that fall in an ordered finite set of categories. Ordinal observations can be represented by a random variable $Y_i$ that takes a value $j$ if the $i$th ordinal observations falls in the $j$'th category where $j = 1, \ldots, J$ and $J \geq 2$.\footnote{binomial models ($J = 2$) are also included.}% % A basic cumulative link model is \begin{equation} \label{eq:BasicCLM} \gamma_{ij} = F(\eta_{ij})~, \quad \eta_{ij} = \theta_j - \bm x_i^\top \bm\beta~, \quad i = 1,\ldots,n~, \quad j = 1, \ldots, J-1 ~, \end{equation} where \begin{equation*} %% \label{eq:cum} \gamma_{ij} = \Prob (Y_i \leq j) = \pi_{i1} + \ldots + \pi_{ij} \quad \mathrm{with} \quad \sum_{j=1}^J \pi_{ij} = 1 \end{equation*} are cumulative probabilities\footnote{we have suppressed the conditioning on the covariate vector, $\bm x_i$, i.e., $\gamma_{ij} = \gamma_j(\bm x_i)$ and $P(Y_i \leq j) = P(Y \leq j | \bm x_i)$.}, $\pi_{ij}$ is the probability that the $i$th observation falls in the $j$th category, $\eta_{ij}$ is the linear predictor and $\bm x_i^\top$ is a $p$-vector of regression variables for the parameters, $\bm\beta$ without a leading column for an intercept and $F$ is the inverse link function. % The thresholds (also known as cut-points or intercepts) are strictly ordered: \begin{equation*} -\infty \equiv \theta_0 \leq \theta_1 \leq \ldots \leq \theta_{J-1} \leq \theta_J \equiv \infty. \end{equation*} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{The multinomial distribution and the log-likelihood function} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The ordinal observation $Y_i$ which assumes the value $j$ can be represented by a multinomially distributed variable $\bm Y_i^* \sim \mathrm{multinom}(\bm\pi_i, 1)$, where $\bm Y_i^*$ is a $J$-vector with a $1$ at the $j$'th entry and 0 otherwise, and with probability mass function % \begin{equation} \label{eq:multinom_pmf} \Prob(\bm Y_i^* = \bm y_i^*) = \prod_j \pi_{ij}^{y_{ij}^*} ~. \end{equation} % The log-likelihood function can therefore be written as % \begin{equation*} \ell(\bm\theta, \bm\beta; \bm y^*) = \sum_i \sum_j y_{ij}^* \log \pi_{ij} \end{equation*} % or equivalently % \begin{align*} \ell(\bm\theta, \bm\beta; \bm y) =~& \sum_i \sum_j \mathrm I (y_i = j) \log \pi_{ij} \\ =~& \sum_i \log \tilde\pi_i \end{align*} % where $\tilde\pi_i$ is the $j$'th entry in $J$-vector $\bm \pi_i$ with elements $\pi_{ij}$ and $\mathrm I(\cdot)$ is the indicator function. Allowing for observation-level weights (case weights), $w_i$ leads finally to % \begin{equation} \label{eq:clm-log-likelihood} \ell(\bm\theta, \bm\beta; \bm y) = \sum_i w_i \log \tilde\pi_i ~. \end{equation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Likelihood based inference} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Confidence intervals for model parameters are obtained by appealing to the asymptotic normal distribution of a statistic $s(\cdot)$ for a scalar parameter of interest $\beta_a$ and defined as \begin{equation*} CI:~\left\{ \beta_a; |s(\beta_a)| < z_{1 - \alpha/2} \right\} . \end{equation*} where $z_{1 - \alpha/2}$ is the $(1 - \alpha/2)$ quantile of the standard normal cumulative distribution function. Taking $s(\cdot)$ to be the Wald statistic $s(\beta_a):~ w(\beta_a) = (\hat\beta_a - \beta_a)/\hat{\mathrm{se}}(\hat\beta_a)$ leads to the classical symmetric intervals. Better confidence intervals can be obtained by choosing instead the likelihood root statistic \citep[see e.g.,][]{pawitan01, brazzale07}: \begin{equation*} s(\beta_a):~ r(\beta_a) = \mathrm{sign}(\hat\beta_a - \beta_a) \sqrt{-2 [ \ell(\hat{\bm\theta}, \hat{\bm\beta}; \bm y) - \ell_p(\beta_a; \bm y)]} \end{equation*} where \begin{equation*} \ell_p(\beta_a; \bm y) = \max_{\bm\theta, \bm\beta_{-a}} \ell(\bm\theta, \bm\beta; \bm y)~, \end{equation*} is the profile likelihood for the scalar parameter $\beta_a$ and $\bm\beta_{-a}$ is the vector of regression parameters without the $a$'th one. While the profile likelihood has to be optimized over all parameters except $\beta_a$ we define a \emph{log-likelihood slice} as \begin{equation} \label{eq:slice} \ell_{\mathrm{slice}}(\beta_a; \bm y) = \ell(\beta_a; \hat{\bm\theta}, \hat{\bm\beta}_{-a}, \bm y)~, \end{equation} which is the log-likelihood function evaluated at $\beta_a$ while keeping the remaining parameters fixed at their ML estimates. A quadratic approximation to the log-likelihood slice is $(\hat\beta_a - \beta_a)^2 / 2\tau_a^2$ where the \emph{curvature unit} $\tau_a$ is the square root of $a$'th diagonal element of the Hessian of $-\ell(\hat{\bm\theta}, \hat{\bm\beta}; \bm y)$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Link functions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A commonly used link function is the logit link which leads to % \begin{equation} \label{eq:cum_logit_model} \mathrm{logit}(\gamma_{ij}) = \log \frac{\Prob (Y_i \leq j)}{1 - \Prob(Y_i \leq j)} \end{equation} % The odds ratio (OR) of the event $Y_i \leq j$ at $\bm x_1$ relative to the same event at $\bm x_2$ is then % \begin{equation} \label{eq:odds_ratio} \mathrm{OR} = \frac{\gamma_j(\bm x_1) / [1 - \gamma_j(\bm x_1)]} {\gamma_j(\bm x_2) / [1 - \gamma_j(\bm x_2)]} = \frac{\exp(\theta_j - \bm x_1^\top \bm\beta)} {\exp(\theta_j - \bm x_2^\top \bm\beta)} %% =&~ \exp(\theta_j - \theta_j - \bm x_1 \bm\beta + \bm x_2 \bm\beta) = \exp[(\bm x_2^\top - \bm x_1^\top)\bm\beta] \end{equation} which is independent of $j$. Thus the cumulative odds ratio is proportional to the distance between $\bm x_1$ and $\bm x_2$ which motivated \citet{mccullagh80} to denote the cumulative logit model a \emph{proportional odds model}. If $x$ represent a treatment variable with two levels (e.g., placebo and treatment), then $x_2 - x_1 = 1$ and the odds ratio is $\exp(-\beta_\textup{treatment})$. Similarly the odds ratio of the event $Y \geq j$ is $\exp(\beta_\textup{treatment})$. The probit link has its own interpretation through a normal linear model for a latent variable which is considered in section~\ref{sec:latent-variable-motivation}. The complementary log-log (clog-log) link is also sometimes used because of its interpretation as a proportional hazards model for grouped survival times: \begin{equation*} -\log\{1 - \gamma_{j}(\bm x_i) \} = \exp( \theta_j - \bm x_i^T \bm\beta ) \end{equation*} Here $1 - \gamma_{j}(\bm x_i)$ is the probability or survival beyond category $j$ given $\bm x_i$. The proportional hazards model has the property that \begin{equation*} \log \{ \gamma_{j}(\bm x_1) \} = \exp[ (\bm x_2^T - \bm x_1^T) \bm\beta ] \log \{ \gamma_{j}(\bm x_2) \}~. \end{equation*} thus the ratio of hazards at $\bm x_1$ relative to $\bm x_2$ are proportional. If the log-log link is used on the response categories in the reverse order, this is equivalent to using the clog-log link on the response in the original order. This reverses the sign of $\bm\beta$ as well as the sign and order of $\{\theta_j\}$ while the likelihood and standard errors remain unchanged. % % Thus, similar to the proportional odds % model, the ratio of hazard functions beyond category $j$ at $\bm x_1$ % relative to $\bm x_2$ (the hazard ratio, $HR$) is: % \begin{equation*} % HR = \frac{-\log\{1 - \gamma_{j}(\bm x_2) \}} % {-\log\{1 - \gamma_{j}(\bm x_1) \}} = % \frac{\exp( \theta_j - \bm x_1^T \bm\beta )} % {\exp( \theta_j - \bm x_2^T \bm\beta )} = % \exp[(\bm x_2 - \bm x_1)\bm\beta] % \end{equation*} % Details of the most common link functions are described in Table~\ref{tab:linkFunctions}. \begin{table}[t!] \begin{center} %\footnotesize \begin{tabular}{llll} \hline Name & logit & probit & log-log \\ \hline Distribution & logistic & normal & Gumbel (max)$^b$ \\ Shape & symmetric & symmetric & right skew\\ Link function ($F^{-1}$) & $\log[\gamma / (1 - \gamma)]$ & $\Phi^{-1}(\gamma)$ & $-\log[-\log(\gamma)]$ \\ Inverse link ($F$) & $1 / [1 + \exp(\eta)]$ & $\Phi(\eta)$ & $\exp(-\exp(-\eta))$ \\ Density ($f = F'$) & $\exp(-\eta) / [1 + \exp(-\eta)]^2$ & $\phi(\eta)$ \\ \hline \hline Name & clog-log$^a$ & cauchit \\ \hline Distribution & Gumbel (min)$^b$ & Cauchy$^c$ \\ Shape & left skew & kurtotic \\ Link function ($F^{-1}$) & $\log[ -\log(1 - \gamma)]$ & $\tan[\pi (\gamma - 0.5)]$ \\ Inverse link ($F$) & $1 - \exp[-\exp(\eta)]$ & $\arctan(\eta)/\pi + 0.5$ \\ Density ($f = F'$) & $\exp[-\exp(\eta) + \eta]$ & $1 / [\pi(1 + \eta^2)]$ \\ \hline \end{tabular} \end{center} % \footnotesize % % $^a$: the \emph{complementary log-log} link \\ % $^b$: the Gumbel distribution is also known as the extreme value % (type I) distribution for extreme minima or maxima. It is also % sometimes referred to as the Weibull (or log-Weibull) distribution % (\url{http://en.wikipedia.org/wiki/Gumbel_distribution}). \\ % $^c$: the Cauchy distribution is a $t$-distribution with one df \caption{Summary of the five standard link functions. $^a$: the \emph{complementary log-log} link; $^b$: the Gumbel distribution is also known as the extreme value (type I) distribution for extreme minima or maxima. It is also sometimes referred to as the Weibull (or log-Weibull) distribution; $^c$: the Cauchy distribution is a $t$-distribution with one degree of freedom. \label{tab:linkFunctions}} \end{table} The \pkg{ordinal} package allows for the estimation of an extended class of cumulative link models in which the basic model~(\ref{eq:BasicCLM}) is extended in a number of ways including structured thresholds, partial proportional odds, scale effects and flexible link functions. The following sections will describe these extensions of the basic CLM. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Extensions of cumulative link models} \label{sec:extensions-of-clms} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A general formulation of the class of models (excluding random effects) that is implemented in \pkg{ordinal} can be written % \begin{equation} \gamma_{ij} = F_{\lambda}(\eta_{ij}), \quad \eta_{ij} = \frac{g_{\bm\alpha} (\theta_j) - \bm x_i^\top \bm\beta - \bm w_i^\top \tilde{\bm\beta}_j}{\exp(\bm z_i\bm\zeta)} \end{equation} % where \begin{description} \item[$F_{\lambda}$] is the inverse link function. It may be parameterized by the scalar parameter $\lambda$ in which case we refer to $F_{\lambda}^{-1}$ as a \emph{flexible link function}, % \item[$g_{\bm\alpha}(\theta_j)$] parameterises thresholds $\{\theta_j\}$ by the vector $\bm\alpha$ such that $g$ restricts $\{\theta_j\}$ to be for example symmetric or equidistant. We denote this \emph{structured thresholds}. % \item[$\bm x_i^\top\bm\beta$] are the ordinary regression effects, % \item[$\bm w_i^\top \tilde{\bm\beta}_j$] are regression effects which are allowed to depend on the response category $j$ and they are denoted \emph{partial} or \emph{non-proportional odds} \citep{peterson90} when the logit link is applied. To include other link functions in the terminology we denote these effects \emph{nominal effects} (in text and code) because these effects are not integral to the ordinal nature of the data. % \item[$\exp(\bm z_i\bm\zeta)$] are \emph{scale effects} since in a latent variable view these effects model the scale of the underlying location-scale distribution. \end{description} With the exception of the structured thresholds, these extensions of the basic CLM have been considered individually in a number of sources but to the author's best knowledge not previously in a unified framework. % For example partial proportional odds have been considered by \citet{peterson90} and scale effect have been considered by \citet{mccullagh80} and \citet{cox95}. % \citet{agresti02} is a good introduction to cumulative link models in the context of categorical data analysis and includes discussions of scale effects. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Latent variable motivation of CLMs} \label{sec:latent-variable-motivation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% It is natural to motivate the CLM from a linear model for a categorized version of a latent variable. Assume the following linear model for an unobserved latent variable: % \begin{equation} \label{eq:latent} S_i = \alpha^* + \bm x_i^\top \bm\beta^* + \varepsilon_i, \quad \varepsilon_i \sim N(0, \sigma^{*2}) \end{equation} % If $S_i$ falls between two thresholds, $\theta_{j-1}^* < S_i \leq \theta_j^*$ where % \begin{equation} \label{eq:thresholds} -\infty \equiv \theta_0^* < \theta_1^* < \ldots < \theta^*_{J-1} < \theta_{J}^* \equiv \infty \end{equation} % then $Y_i = j$ is observed and the cumulative probabilities are: % \begin{equation*} \gamma_{ij} = \Prob (Y_i \leq j) = \Prob(S_i \leq \theta_j^*) = \Prob \left( Z \leq \frac{\theta_j^* - \alpha^* - \bm x_i^\top \bm\beta^*}{% \sigma^*} \right) = \Phi ( \theta_j - \bm x_i^\top \bm\beta ) \end{equation*} % where $Z$ follows a standard normal distribution, $\Phi$ denotes the standard normal cumulative distribution function, parameters with an ``$^*$'' exist on the latent scale, $\theta_j = (\theta_j^* - \alpha^*) / \sigma^*$ and $\bm\beta = \bm\beta^* / \sigma^*$. Note that $\alpha^*$, $\bm\beta^*$ and $\sigma^*$ would have been identifiable if the latent variable $S$ was directly observed, but they are not identifiable with ordinal observations. If we allow a log-linear model for the scale such that % \begin{equation*} \varepsilon_i \sim N(0, \sigma^{*2}_i), \quad \sigma_i^* = \exp(\mu + \bm z_i^\top \bm\zeta) = \sigma^* \exp(\bm z_i^\top \bm\zeta) \end{equation*} % where $\bm z_i$ is the $i$'th row of a design matrix $\bm Z$ without a leading column for an intercept and $\sigma^* = \exp(\mu)$, then \begin{equation*} \gamma_{ij} = \Prob \left( Z \leq \frac{\theta_j^* - \alpha^* - \bm x_i^\top \bm\beta^*}{% \sigma^*_i} \right) = \Phi \left( \frac{\theta_j - \bm x_i^T \bm\beta}{\sigma_i} \right) \end{equation*} where $\sigma_i = \sigma_i^* / \sigma^* = \exp(\bm z_i^\top \bm\zeta)$ is the \emph{relative} scale. The common link functions: probit, logit, log-log, c-log-log and cauchit correspond to inverse cumulative distribution functions of the normal, logistic, Gumbel(max), Gumbel(min) and Cauchy distributions respectively. These distributions are all members of the location-scale family with common form $F(\mu, \sigma)$, with location $\mu$ and non-negative scale $\sigma$, for example, the logistic distribution has mean $\mu$ and standard deviation $\sigma \pi / \sqrt{3}$. Choosing a link function therefore corresponds to assuming a particular distribution for the latent variable $S$ in which $\bm x_i^\top \bm\beta$ and $\exp(\bm z_i^\top \bm\zeta)$ models location \emph{differences} and scale \emph{ratios} respectively of that distribution. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Structured thresholds} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Structured thresholds, $\{ g(\bm\alpha)_j \}$ makes it possible to impose restrictions on the thresholds $\bm\theta = g(\bm\alpha)$. For instance restricting the thresholds to be equidistant means that only the location of, say, the first threshold and the spacing between adjacent thresholds has to be estimated, thus only two parameters are used to parameterize the thresholds irrespective of the number of response categories. \pkg{ordinal} takes $g(\bm\alpha)$ to be a linear function and operates with \begin{equation*} g(\bm\alpha) = \mathcal{J}^\top \bm\alpha = \bm \theta \end{equation*} where the Jacobian $\mathcal{J}$ defines the mapping from the parameters $\bm\alpha$ to the thresholds $\bm\theta$. The traditional ordered but otherwise unrestricted thresholds are denoted \emph{flexible thresholds} and obtained by taking $\mathcal{J}$ to be an identity matrix. Assuming $J=6$ ordered categories, the Jacobians for equidistant and symmetric thresholds (denoted \code{equidistant} and \code{symmetric} in the \code{clm}-argument \code{threshold}) are \begin{equation*} \mathcal{J}_{\mathrm{equidistant}} = \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 0 & 1 & 2 & 3 & 4 \\ \end{bmatrix}, \quad \mathcal{J}_{\mathrm{symmetric}} = \begin{bmatrix} 1 & 1 & 1 & 1 & 1 \\ 0 & -1 & 0 & 1 & 0 \\ -1 & 0 & 0 & 0 & 1 \\ \end{bmatrix}. \end{equation*} Another version of symmetric thresholds (denoted \code{symmetric2}) is sometimes relevant with an unequal number of response categories here illustrated with $J=5$ together with the \code{symmetric} thresholds: \begin{equation*} \mathcal{J}_{\mathrm{symmetric2}} = \begin{bmatrix} 0 & -1 & 1 & 0 \\ -1 & 0 & 0 & 1 \\ \end{bmatrix}, \quad \mathcal{J}_{\mathrm{symmetric}} = \begin{bmatrix} 1 & 1 & 0 & 0 \\ 0 & 0 & 1 & 1 \\ -1 & 0 & 0 & 1 \\ \end{bmatrix} \end{equation*} The nature of $\mathcal{J}$ for a particular model can always be inspected by printing the \code{tJac} component of the \code{clm} fit. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Partial proportional odds and nominal effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The nominal effects $\bm w_i^\top\tilde{\bm\beta}_j$ can be considered an extension of the regression part of the model $\bm x_i^\top \bm\beta$ in which the regression effects are allowed to vary with $j$. % The nominal effects can also be considered an extension of the thresholds $\theta_j$ which allows them to depend on variables $\bm w_i^\top$: $\tilde{\theta}_{ij}(\bm w_i^\top) = \theta_j - \bm w_i^\top \tilde{\bm\beta}_j$ is the $j$'th threshold for the $i$'th observation. The following treatment assumes for latter view. In general let $\bm W$ denote the design matrix for the nominal effects without a leading column for an intercept; the nominal-effects parameter vector $\tilde{\bm\beta}_j$ is then $\mathrm{ncol}(\bm W)$ long and $\tilde{\bm\beta}$ is $\mathrm{ncol}(\bm W) \cdot (J-1)$ long. If $\bm W$ is the design matrix for the nominal effects containing a single column for a continuous variable then $\tilde{\beta}_j$ is the slope parameter corresponding to the $j$'th threshold and $\theta_j$ is the $j$'th intercept, i.e., the threshold when the covariate is zero. Looking at $\tilde{\theta}_{ij}(\bm w_i^\top) = \theta_j - \bm w_i^\top \tilde{\bm\beta}_j$ as a linear model for the thresholds facilitates the interpretation. If, on the other hand, $\bm W$ is the design matrix for a categorical variable (a \code{factor} in \proglang{R}) then the interpretation of $\tilde{\bm\beta}_j$ depends on the contrast-coding of $\bm W$. If we assume that the categorical variable has 3 levels, then $\tilde{\bm\beta}_j$ is a 2-vector. In the default treatment contrast-coding (\code{"contr.treatment"}) $\theta_j$ is the $j$'th threshold for the first (base) level of the factor, $\tilde{\beta}_{1j}$ is the differences between thresholds for the first and second level and $\tilde{\beta}_{2j}$ is the difference between the thresholds for the first and third level. In general we define $\bm\Theta$ as a matrix with $J-1$ columns and with 1 row for each combination of the levels of factors in $\bm W$. This matrix is available in the \code{Theta} component of the model fit. Note that variables in $\bm X$ cannot also be part of $\bm W$ if the model is to remain identifiable. \pkg{ordinal} detects this and automatically removes the offending variables from $\bm X$. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Flexible link functions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \pkg{ordinal} package allows for two kinds of flexible link functions due to \citet{aranda-ordaz83} and \citet{genter85}. The link function proposed by \citet{aranda-ordaz83} reads % \begin{equation*} F^{-1}_\lambda (\gamma_{ij}) = \log \left\{ \frac{(1 - \gamma_{ij})^{-\lambda} - 1} {\lambda} \right\}~, \end{equation*} which depends on the auxiliary parameter $\lambda \in ]0, \infty[$. When $\lambda = 1$, the logistic link function arise, and when $\lambda \rightarrow 0$, \begin{equation*} \{ (1 - \gamma_{ij})^{-\lambda} - 1 \} / \lambda \rightarrow \log (1 - \gamma_{ij})^{-1}~, \end{equation*} so the log-log link arise. The inverse link function and its derivative are given by \begin{align*} F(\eta) =&~ 1 - (\lambda \exp(\eta) + 1)^{-\lambda^{-1}} \\ f(\eta) =&~ \exp(\eta) (\lambda \exp(\eta) + 1)^{-\lambda^{-1} - 1} \end{align*} The density implied by the inverse link function is left-skewed if $0 < \lambda < 1$, symmetric if $\lambda = 1$ and right-skewed if $\lambda > 1$, so the link function can be used to assess the evidence about possible skewness of the latent distribution. The log-gamma link function proposed by \citet{genter85} is based on the log-gamma density by \citet{farewell77}. The cumulative distribution function and hence inverse link function reads \begin{equation*} F_\lambda(\eta) = \begin{cases} 1 - G(q; \lambda^{-2}) & \lambda < 0 \\ \Phi(\eta) & \lambda = 0 \\ G(q; \lambda^{-2}) & \lambda > 0 \end{cases} \end{equation*} where $q = \lambda^{-2}\exp(\lambda \eta)$ and $G(\cdot; \alpha)$ denotes the Gamma distribution with shape parameter $\alpha$ and unit rate parameter, and $\Phi$ denotes the standard normal cumulative distribution function. The corresponding density function reads \begin{equation*} f_\lambda(\eta) = \begin{cases} |\lambda| k^k \Gamma(k)^{-1} \exp\{ k(\lambda\eta - \exp(\lambda\eta)) \} & \lambda \neq 0 \\ \phi(\eta) & \lambda = 0 \end{cases} \end{equation*} where $k=\lambda^{-2}$, $\Gamma(\cdot)$ is the gamma function and $\phi$ is the standard normal density function. By attaining the Gumbel(max) distribution at $\lambda = -1$, the standard normal distribution at $\lambda = 0$ and the Gumbel(min) distribution at $\lambda = 1$ the log-gamma link bridges the log-log, probit and complementary log-log links providing right-skew, symmetric and left-skewed latent distributions in a single family of link functions. Note that choice and parameterization of the predictor, $\eta_{ij}$, e.g., the use of scale effects, can affect the evidence about the shape of the latent distribution. There are usually several link functions which provide essentially the same fit to the data and choosing among the good candidates is often better done by appealing to arguments such as ease of interpretation rather than arguments related to fit. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section[Implementation of ML Estimation of CLMs in ordinal]{Implementation of ML Estimation of CLMs in \pkg{ordinal}} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In the \pkg{ordinal} package cumulative link models are (by default) estimated with a regularized Newton-Raphson (NR) algorithm with step-halving (line search) using analytical expressions for the gradient and Hessian of the negative log-likelihood function. This NR algorithm with analytical derivatives is used irrespective of whether the model contains structured thresholds, nominal effects or scale effects; the only exception being models with flexible link functions for which a general-purpose quasi-Newton optimizer is used. Due to computationally cheap and efficient evaluation of the analytical derivatives, the relative well-behaved log-likelihood function (with exceptions described below) and the speedy convergence of the Newton-Raphson algorithm, the estimation of CLMs is virtually instant on a modern computer even with complicated models on large datasets. This also facilitates simulation studies. More important than speed is perhaps that the algorithm is reliable and accurate. Technical aspects of the regularized NR algorithm with step-halving (line search) are described in appendix~\ref{sec:algorithm} and analytical gradients are described in detail in \citet{mythesis}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Properties of the log-likelihood function for extended CLMs} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \citet{pratt81} and \citet{burridge81} showed (seemingly independent of each other) that the log-likelihood function of the basic cumulative link model~(\ref{eq:BasicCLM}) is concave. This means that there is a unique global optimum of the log-likelihood function and therefore no risk of convergence to a local optimum. It also means that the Hessian matrix for the negative log-likelihood is strictly positive definite and therefore also that the Newton step is always in direction of higher likelihood. The genuine Newton step may be too long to actually cause an increase in likelihood from one iteration to the next (this is called ``overshoot''). This is easily overcome by successively halving the length of the Newton step until an increase in likelihood is achieved. Exceptions to the strict concavity of the log-likelihood function include models using the cauchit link, flexible link functions as well as models with scale effects. Notably models with structured thresholds as well as nominal effects do not affect the linearity of the predictor, $\eta_{ij}$ and so are also guaranteed to have concave log-likelihoods. The restriction of the threshold parameters $\{\theta_j\}$ being non-decreasing is dealt with by defining $\ell(\bm\theta, \bm\beta; y) = \infty$ when $\{\theta_j\}$ are not in a non-decreasing sequence. If the algorithm attempts evaluation at such illegal values step-halving effectively brings the algorithm back on track. Other implementations of CLMs re-parameterize $\{\theta_j\}$ such that the non-decreasing nature of $\{\theta_j\}$ is enforced by the parameterization, for example, \code{MASS::polr} (package version 7.3.49) optimize the likelihood using \begin{equation*} \tilde\theta_1 = \theta_1, ~\tilde{\theta}_2 = \exp(\theta_2 - \theta_1),~\ldots, ~ \tilde{\theta}_{J-1} = \exp(\theta_{J-2} - \theta_{J-1}) \end{equation*} This is deliberately not used in \pkg{ordinal} because the log-likelihood function is generally closer to quadratic in the original parameterization in our experience which facilitates faster convergence. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Starting values} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% For the basic CLMs~(\ref{eq:BasicCLM}) the threshold parameters are initialized to an increasing sequence such that the cumulative density of a logistic distribution between consecutive thresholds (and below the lowest or above the highest threshold) is constant. The regression parameters $\bm\beta$, scale parameters $\bm\zeta$ as well as nominal effect $\bm\beta^*$ are initialized to 0. If the model specifies a cauchit link or includes scale parameters estimation starts at the parameter estimates of a model using the probit link and/or without the scale-part of the model. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Estimation problems} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% With many nominal effects it may be difficult to find a model in which the threshold parameters are strictly increasing for all combinations of the parameters. Upon convergence of the NR algorithm the model evaluates the $\bm\Theta$-matrix and checks that each row of threshold estimates are increasing. When a continuous variable is included among the nominal effects it is often helpful if the continuous variable is centered at an appropriate value (at least within the observed range of the data). This is because $\{\theta_j\}$ represent the thresholds when the continuous variable is zero and $\{\theta_j\}$ are enforced to be a non-decreasing sequence. Since the nominal effects represent different slopes for the continuous variable the thresholds will necessarily be ordered differently at some other value of the continuous variable. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Convergence codes} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Irrespective of the fitting algorithm, \pkg{ordinal} reports the following convergence codes for CLMs in which negative values indicate convergence failure: % \begin{description} \item[-3] Not all thresholds are increasing. This is only possible with nominal effects and the resulting fit is invalid. \item[-2] The Hessian has at least one negative eigenvalue. This means that the point at which the algorithm terminated does not represent an optimum. \item[-1] Absolute convergence criterion (maximum absolute gradient) was not satisfied. This means that the algorithm couldn't get close enough to a stationary point of the log-likelihood function. \item[0] Successful convergence. \item[1] The Hessian is singular (i.e., at least one eigenvalue is zero). This means that some parameters are not uniquely determined. \end{description} % Note that with convergence code \textbf{1} the optimum of the log-likelihood function has been found although it is not a single point but a line (or in general a (hyper) plane), so while some parameters are not uniquely determined the value of the likelihood is valid enough and can be compared to that of other models. In addition to these convergence codes, the NR algorithm in \pkg{ordinal} reports the following messages: \begin{description} \item[0] Absolute and relative convergence criteria were met \item[1] Absolute convergence criterion was met, but relative criterion was not met \item[2] iteration limit reached \item[3] step factor reduced below minimum \item[4] maximum number of consecutive Newton modifications reached \end{description} Note that convergence is assessed irrespective of potential messages from the fitting algorithm and irrespective of whether the tailored NR algorithm or a general-purpose quasi-Newton optimizer is used. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section[Fitting cumulative link models in ordinal with clm]{Fitting cumulative link models in \pkg{ordinal} with \code{clm}} \label{sec:fitting-clms} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The \code{clm} function takes the following arguments: % <>= clm_args <- gsub("function ", "clm", deparse(args(clm))) cat(paste(clm_args[-length(clm_args)], "\n")) @ % Several arguments are standard and well-known from \code{lm} and \code{glm} and will not be described in detail; \code{formula}, \code{data}, \code{weights}, \code{subset} and \code{na.action} are all parts of the standard model specification in \proglang{R}. \code{scale} and \code{nominal} are interpreted as \proglang{R}-formulae with no left hand sides and specifies the scale and nominal effects of the model respectively, see sections~\ref{sec:scale-effects} and \ref{sec:nominal-effects} for details; \code{start} is an optional vector of starting values; \code{doFit} can be set to \code{FALSE} to prompt \code{clm} to return a model \emph{environment}, for details see section~\ref{sec:customized-modelling}; \code{model} controls whether the \code{model.frame} should be included in the returned model fit; \code{link} specifies the link function and \code{threshold} specifies an optional threshold structure, for details see section~\ref{sec:threshold-effects}. Note the absence of a separate \code{offset} argument. Since \code{clm} allows for different offsets in \code{formula} and \code{scale}, offsets have to be specified within a each formulae, e.g., \verb!scale = ~ x1 + offset(x2)!. Methods for \code{clm} model fits are summarized in Table~\ref{tab:clm_methods} and introduced in the following sections. Control parameters can either be specified as a named list, among the optional \code{...} arguments, or directly as a call to \code{clm.control} --- in the first two cases the arguments are passed on to \code{clm.control}. \code{clm.control} takes the following arguments: % <>= cc_args <- gsub("function ", "clm.control", deparse(args(clm.control))) cat(paste(cc_args[-length(cc_args)], "\n")) @ % The \code{method} argument specifies the optimization and/or return method. The default estimation method (\code{Newton}) is the regularized Newton-Raphson estimation scheme described in section~\ref{sec:algorithm}; options \code{model.frame} and \code{design} prompts \code{clm} to return respectively the \code{model.frame} and a list of objects that represent the internal representation instead of fitting the model; options \code{ucminf}, \code{nlminb} and \code{optim} represent different general-purpose optimizers which may be used to fit the model (the former from package \pkg{ucminf} \citep{ucminf}, the latter two from package \pkg{stats}). The \code{sign.location} and \code{sign.nominal} options allow the user to flip the signs on the location and nominal model terms. The \code{convergence} argument instructs \code{clm} how to alert the user of potential convergence problems; \code{...} are optional arguments passed on to the general purpose optimizers; \code{trace} applies across all optimizers and positive values lead to printing of progress during iterations; the remaining arguments (\code{maxIter, gradTol, maxLineIter, relTol, tol}) control the behavior of the regularized NR algorithm described in appendix~\ref{sec:algorithm}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection[Fitting a basic cumulative link model with clm]{Fitting a basic cumulative link model with \code{clm}} \label{sec:fitting-basic-clm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In the following examples we will use the wine data from \citet{randall89} available in the object \code{wine} in package \pkg{ordinal}, cf., Table~\ref{tab:wineData}. The data represent a factorial experiment on factors determining the bitterness of wine with 1 = ``least bitter'' and 5 = ``most bitter''. Two treatment factors (temperature and contact) each have two levels. Temperature and contact between juice and skins can be controlled when crushing grapes during wine production. Nine judges each assessed wine from two bottles from each of the four treatment conditions, hence there are 72 observations in all. The main objective is to examine the effect of contact and temperature on the perceived bitterness of wine. \begin{table}[t!] \centering \begin{tabular}{llrrrrr} \hline & & \multicolumn{5}{c}{Least---Most bitter} \\ \cline{3-7} <>= ## data(wine) tab <- with(wine, table(temp:contact, rating)) mat <- cbind(rep(c("cold", "warm"), each = 2), rep(c("no", "yes"), 2), tab) colnames(mat) <- c("Temperature", "Contact", paste("~~", 1:5, sep = "")) xtab <- xtable(mat) print(xtab, only.contents = TRUE, include.rownames = FALSE, sanitize.text.function = function(x) x) @ \end{tabular} \caption{The number of ratings from nine judges in bitterness categories 1 --- 5. Wine data from \citet{randall89} aggregated over bottles and judges.% \label{tab:wineData}} \end{table}% Initially we consider the following cumulative link model for the wine data: \begin{equation} \label{eq:CLM} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j - \beta_1 (\mathtt{temp}_i) - \beta_2(\mathtt{contact}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation}% % where $\beta_1(\mathtt{temp}_i)$ attains the values $\beta_1(\mathtt{cold})$ and $\beta_1(\mathtt{warm})$, and $\beta_2(\mathtt{contact}_i)$ attains the values $\beta_2(\mathtt{no})$ and $\beta_2(\mathtt{yes})$. The effect of temperature in this model is illustrated in Figure~\ref{fig:standard_clm}. This is a model for the cumulative probability of the $i$th rating falling in the $j$th category or below, where $i$ index all observations ($n=72$), $j = 1, \ldots, J$ index the response categories ($J = 5$) and $\theta_j$ is the intercept or threshold for the $j$th cumulative logit: $\textup{logit}(P(Y_i \leq j))$. Fitting the model with \code{clm} we obtain: <<>>= library("ordinal") fm1 <- clm(rating ~ temp + contact, data = wine) summary(fm1) @ The \code{summary} method prints basic information about the fitted model. % most of which is self explanatory. % The primary result is the coefficient table with parameter estimates, standard errors and Wald based $p$~values for tests of the parameters being zero. If one of the flexible link functions (\code{link = "log-gamma"} or \code{link = "Aranda-Ordaz"}) is used a coefficient table for the link parameter, $\lambda$ is also included. The maximum likelihood estimates of the model coefficients are:% % \begin{equation} \label{eq:parameters} \begin{gathered} \hat\beta_1(\mathtt{warm} - \mathtt{cold})= 2.50, ~~\hat\beta_2(\mathtt{yes} - \mathtt{no}) = 1.53, \\ \{\hat\theta_j\} = \{-1.34,~ 1.25,~ 3.47,~ 5.01\}. \end{gathered} \end{equation} % The coefficients for \code{temp} and \code{contact} are positive indicating that higher temperature and contact increase the bitterness of wine, i.e., rating in higher categories is more likely. % Because the treatment contrast coding which is the default in \proglang{R} was used, $\{\hat\theta_j\}$ refers to the thresholds at the setting with $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{no}$. % Three natural and complementing interpretations of this model are % \begin{enumerate} \item The thresholds $\{ \hat\theta_j \}$ at $\mathtt{contact}_i = \mathtt{yes}$ conditions have been shifted a constant amount $1.53$ relative to the thresholds $\{ \hat\theta_j \}$ at $\mathtt{contact}_i = \mathtt{no}$ conditions. \item The location of the latent distribution has been shifted $+1.53 \sigma^*$ (scale units) at $\mathtt{contact}_i = \mathtt{yes}$ relative to $\mathtt{contact}_i = \mathtt{no}$. \item The odds ratio of bitterness being rated in category $j$ or above ($\mathrm{OR}(Y \geq j)$) is $\exp(\hat\beta_2(\mathtt{yes} - \mathtt{no})) = 4.61$. \end{enumerate} % Note that there are no $p$~values displayed for the threshold coefficients because it usually does not make sense to test the hypothesis that they equal zero. \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-fig2} \caption{Illustration of the effect of temperature in the standard cumulative link model in Equation~\ref{eq:CLM} for the wine data in Table~\ref{tab:wineData} through a latent variable interpretation.\label{fig:standard_clm}} \end{figure} The number of Newton-Raphson iterations is given below \code{niter} with the number of step-halvings in parenthesis. \code{max.grad} is the maximum absolute gradient of the log-likelihood function with respect to the parameters. % The condition number of the Hessian (\code{cond.H}) is well below $10^4$ and so does not indicate a problem with the model. The \code{anova} method produces an analysis of deviance (ANODE) table also based on Wald $\chi^2$-tests and provides tables with type I, II and III hypothesis tests using the \proglang{SAS} definitions. A type I table, the \proglang{R} default for linear models fitted with \code{lm}, sequentially tests terms from first to last, type II tests attempt to respect the principle of marginality and test each term after all others while ignoring higher order interactions, and type III tables are based on orthogonalized contrasts and tests of main effects or lower order terms can often be interpreted as averaged over higher order terms. Note that in this implementation any type of contrasts (e.g., \code{contr.treatment} or \code{contr.SAS} as well as \code{contr.sum}) can be used to produce type III tests. For further details on the interpretation and definition of type I, II and III tests, please see \citep{kuznetsova17} and \citep{SAStype}. Here we illustrate with a type III ANODE table, which in this case is equivalent to type I and II tables since the variables are balanced: <<>>= anova(fm1, type = "III") @ Likelihood ratio tests, though asymptotically equivalent to the Wald tests usually better reflect the evidence in the data. These tests can be obtained by comparing nested models with the \code{anova} method, for example, the likelihood ratio test of \code{contact} is <<>>= fm2 <- clm(rating ~ temp, data = wine) anova(fm2, fm1) @ which in this case produces a slightly lower $p$~value. Equivalently we can use \code{drop1} to obtain likelihood ratio tests of the explanatory variables while \emph{controlling} for the remaining variables: <<>>= drop1(fm1, test = "Chi") @ Likelihood ratio tests of the explanatory variables while \emph{ignoring} the remaining variables are provided by the \code{add1} method: <<>>= fm0 <- clm(rating ~ 1, data = wine) add1(fm0, scope = ~ temp + contact, test = "Chi") @ % Confidence intervals of the parameter estimates are provided by the \code{confint} method which by default compute the so-called profile likelihood confidence intervals: <<>>= confint(fm1) @ The cumulative link model in Equation~\ref{eq:CLM} assumes that the thresholds, $\{\theta_j\}$ are constant for all values of the remaining explanatory variables, here \code{temp} and \code{contact}. This is generally referred to as the \emph{proportional odds assumption} or \emph{equal slopes assumption}. We can relax this assumption in two general ways: with nominal effects and scale effects examples of which will now be presented in turn. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Partial and non-proportional odds: nominal effects} \label{sec:nominal-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The CLM in Equation~\ref{eq:CLM} specifies a structure in which the regression parameters, $\bm\beta$ are not allowed to vary with $j$ or equivalently that the threshold parameters $\{\theta_j\}$ are not allowed to depend on regression variables. In the following model this assumption is relaxed and the threshold parameters are allowed to depend on \code{contact}. This leads to the so-called partial proportional odds for \code{contact}: % \begin{equation} \label{eq:CLM_nominal} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j + \tilde{\beta}_{j} (\mathtt{contact}_i) - \beta (\mathtt{temp}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation} % One way to view this model is to think of two sets of thresholds being applied at conditions with and without contact as illustrated in Figure~\ref{fig:clm_nominal}. The model is specified as follows with \code{clm}: <<>>= fm.nom <- clm(rating ~ temp, nominal = ~ contact, data = wine) summary(fm.nom) @ As can be seen from the output of \code{summary} there are no regression coefficient estimated for \code{contact}, but there are additional threshold coefficients estimated instead. % The naming and meaning of the threshold coefficients depend on the contrast coding applied to \code{contact}. Here the \proglang{R} default treatment contrasts (\code{"contr.treatment"}) are used. Here coefficients translate to the following parameter functions: \begin{equation} \label{eq:nom_parameters} \begin{gathered} \hat\beta(\mathtt{warm} - \mathtt{cold})= 2.52, \\ \{\hat\theta_j\} = \{-1.32,~ 1.25,~ 3.55,~ 4.66\}, \\ \{ \hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}) \} = \{-1.62,~ -1.51,~ -1.67,~ -1.05\}. \end{gathered} \end{equation} % Again $\{ \theta_j \}$ refer to the thresholds at $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{no}$ settings while the thresholds at $\mathtt{temp}_i = \mathtt{cold}$ and $\mathtt{contact}_i = \mathtt{yes}$ are $\{ \hat\theta_j + \hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}) \}$. % The odds ratio of bitterness being rated in category $j$ or above ($\mathrm{OR}(Y \geq j)$) now depend on $j$: $\{\exp(-\hat{\tilde{\beta}}_j(\mathtt{yes} - \mathtt{no}))\} = \{ 5.03,~ 4.53,~ 5.34,~ 2.86\}$. % \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figNom2} \caption{Illustration of nominal effects leading to different sets of thresholds being applied for each level of \code{contact} in a latent variable interpretation, cf., Equation~\ref{eq:CLM_nominal}.\label{fig:clm_nominal}} \end{figure} The resulting thresholds for each level of \code{contact}, i.e., the estimated $\bm\Theta$-matrix can be extracted with: <<>>= fm.nom$Theta @ As part of the convergence checks, \code{clm} checks the validity of $\bm\Theta$, i.e., that each row of the threshold matrix is non-decreasing. We can perform a likelihood ratio test of the proportional odds assumption for \code{contact} by comparing the likelihoods of models (\ref{eq:CLM}) and (\ref{eq:CLM_nominal}) as follows: <<>>= anova(fm1, fm.nom) @ There is only little difference in the log-likelihoods of the two models and the test is insignificant. Thus there is no evidence that the proportional odds assumption is violated for \code{contact}. It is not possible to estimate both $\beta_2(\mathtt{contact}_i)$ and $\tilde{\beta}_{j}(\mathtt{contact}_i)$ in the same model. Consequently variables that appear in \code{nominal} cannot enter in \code{formula} as well. For instance, not all parameters are identifiable in the following model: <<>>= fm.nom2 <- clm(rating ~ temp + contact, nominal = ~ contact, data = wine) @ We are made aware of this when summarizing or printing the model in which the coefficient for \code{contactyes} is \code{NA}: <<>>= fm.nom2 @ To test the proportional odds assumption for all variables, we can use <<>>= nominal_test(fm1) @ This function \emph{moves} all terms in \code{formula} to \code{nominal} and \emph{copies} all terms in \code{scale} to \code{nominal} one by one and produces an \code{add1}-like table with likelihood ratio tests of each term. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Modelling scale effects} \label{sec:scale-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % To allow the scale of the latent variable distribution to depend on explanatory variables we could for instance consider the following model where the scale is allowed to differ between cold and warm conditions. The location of the latent distribution is allowed to depend on both temperature and contact: \begin{equation} \label{eq:CLM_scale_wine} \begin{gathered} \textup{logit}(P(Y_i \leq j)) = \frac{\theta_j - \beta_1 (\mathtt{temp}_i) - \beta_{2} (\mathtt{contact}_i)} {\exp( \zeta (\mathtt{temp}_i))} \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{gathered} \end{equation} This model structure is illustrated in Figure~\ref{fig:clm_scale} and can be estimated with: <<>>= fm.sca <- clm(rating ~ temp + contact, scale = ~ temp, data = wine) summary(fm.sca) @ In a latent variable interpretation the location of the latent distribution is shifted $2.63\sigma^*$ (scale units) from cold to warm conditions and $1.59\sigma^*$ from absence to presence of contact. The scale of the latent distribution is $\sigma^*$ at cold conditions but $\sigma^* \exp(\zeta(\mathtt{warm} - \mathtt{cold})) = \sigma^*\exp(0.095) = 1.10 \sigma^*$, i.e., 10\% higher, at warm conditions. However, observe that the $p$~value for the scale effect in the summary output shows that the ratio of scales is not significantly different from 1 (or equivalently that the difference on the log-scale is not different from 0). Scale effects offer an alternative to nominal effects (partial proportional odds) when non-proportional odds structures are encountered in the data. Using scale effects is often a better approach because the model is well-defined for all values of the explanatory variables irrespective of translocation and scaling of covariates. Scale effects also use fewer parameters which often lead to more sensitive tests than nominal effects. Potential scale effects of variables already included in \code{formula} can be discovered using \code{scale_test}. This function adds each model term in \code{formula} to \code{scale} in turn and reports the likelihood ratio statistic in an \code{add1}-like fashion: <<>>= scale_test(fm1) @ \code{confint} and \code{anova} methods apply with no change to models with scale and nominal parts, but \code{drop1}, \code{add1} and \code{step} methods will only drop or add terms to the (location) \code{formula}. \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figSca} \caption{Illustration of scale effects leading to different scales of the latent variable, cf., Equation~\ref{eq:CLM_scale_wine}.\label{fig:clm_scale}} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Structured thresholds} \label{sec:threshold-effects} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In section~\ref{sec:nominal-effects} nominal effects were described where the assumption that regression parameters have the same effect across all thresholds was relaxed. In this section additional restrictions on the thresholds will be imposed instead. The following model requires that the thresholds, $\{ \theta_j \}$ are equidistant or equally spaced. This allows us to assess an assumption that judges are using the response scale in such a way that there is the same distance between adjacent response categories, i.e., that $\theta_j - \theta_{j-1} = \textup{constant}$ for $j = 2, ..., J-1$. The effect of equidistant thresholds is illustrated in Figure~\ref{fig:clm_structured_thresholds} and can be fitted with: <<>>= fm.equi <- clm(rating ~ temp + contact, data = wine, threshold = "equidistant") summary(fm.equi) @ The parameters determining the thresholds are now the first threshold (\code{threshold.1}) and the spacing among consecutive thresholds (\code{spacing}). The mapping to this parameterization is stored in the transpose of the Jacobian matrix (\code{tJac}) component of the model fit. This makes it possible to extract the thresholds imposed by the equidistance structure with <<>>= drop(fm.equi$tJac %*% coef(fm.equi)[c("threshold.1", "spacing")]) @ These thresholds are in fact already stored in the \code{Theta} component of the model fit. % The following shows that the average distance between consecutive thresholds in \code{fm1} which did not restrict the thresholds is very close to the \code{spacing} parameter from \code{fm.equi}: <<>>= mean(diff(coef(fm1)[1:4])) @ One advantage of imposing additional restrictions on the thresholds is the use of fewer parameters. Whether the restrictions are warranted by the data can be assessed in a likelihood ratio test: <<>>= anova(fm1, fm.equi) @ In this case the test is non-significant, so there is no considerable loss of fit at the gain of saving two parameters, hence we may retain the model with equally spaced thresholds. Note that the shape of the latent distribution (determined by the choice of link function) also affects the distances between the thresholds. If thresholds are equidistant under a normal distribution (i.e., with the logit link) they will in general\footnote{The exception is perfect fits such as CLMs with flexible thresholds and no predictors where models have the same likelihood irrespective of link function.} not be equidistant under a differently shaped latent distribution such as a skew latent distribution (e.g., with the log-log or clog-log link). \begin{figure} \centering \includegraphics[width=6cm]{./static_figs/fig-figFlex} \includegraphics[width=6cm]{./static_figs/fig-figEqui} \caption{Illustration of flexible (left) and equidistant (right) thresholds being applied in a cumulative link model in a latent variable interpretation.\label{fig:clm_structured_thresholds}} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Scale effects, nominal effects and link functions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% This section presents an example that connects aspects of scale effects, nominal effects and link functions. The example is based on the \code{soup} data available in the \pkg{ordinal} package. This dataset represents a sensory discrimination study of packet soup in which 185 respondents assessed a reference product and one of 5 test products on an ordinal sureness-scale with 6 levels from "reference, sure" to "test, sure". The two key explanatory variables in this example are \code{PRODID} and \code{PROD}. \code{PRODID} identifies all 6 products while \code{PROD} distinguishes test and reference products: <<>>= with(soup, table(PROD, PRODID)) @ The so-called bi-normal model plays a special role in the field of signal detection theory \citep{decarlo98, macmillan05} and in sensometrics \citep{christensen11} and assumes the existence of normal latent distributions potentially with different variances. The bi-normal model can be fitted to ordinal data by identifying it as a CLM with a probit link. The following bi-normal model assumes that the location of the normal latent distribution depends on \code{PRODID} while the scale only varies with \code{PROD}: <<>>= fm_binorm <- clm(SURENESS ~ PRODID, scale = ~ PROD, data = soup, link="probit") summary(fm_binorm) @ Here we observe significant differences in scale for reference and test products and this is an example of what would have been denoted non-proportional odds had the link function been the logit function. In this context differences in scale are interpreted to mean that a location shift of the latent normal distribution is not enough to represent the data. Another test of such non-location effects is provided by the nominal effects: <<>>= fm_nom <- clm(SURENESS ~ PRODID, nominal = ~ PROD, data = soup, link="probit") @ A comparison of these models shows that the scale effects increase the likelihood substantially using only one extra parameter. The addition of nominal effects provides a smaller increase in likelihood using three extra parameters: <<>>= fm_location <- update(fm_binorm, scale = ~ 1) anova(fm_location, fm_binorm, fm_nom) @ Note that both the location-only and bi-normal models are nested under the model with nominal effects making these models comparable in likelihood ratio tests. This example illustrates an often seen aspect: that models allowing for scale differences frequently capture the majority of deviations from location-only effects that could otherwise be captured by nominal effects using fewer parameters. The role of link functions in relation to the evidence of non-location effects is also illustrated by this example. If we consider the complementary log-log link it is apparent that there is no evidence of scale differences. Furthermore, the likelihood of a complementary log-log model with constant scale is almost the same as that of the bi-normal model: <<>>= fm_cll_scale <- clm(SURENESS ~ PRODID, scale = ~ PROD, data = soup, link="cloglog") fm_cll <- clm(SURENESS ~ PRODID, data = soup, link="cloglog") anova(fm_cll, fm_cll_scale, fm_binorm) @ Using the log-gamma link we can also confirm that a left-skewed latent distribution ($\lambda > 0$) is best supported by the data and that the estimate of $\lambda$ is close to 1 at which the complementary log-log link is obtained: <<>>= fm_loggamma <- clm(SURENESS ~ PRODID, data = soup, link="log-gamma") summary(fm_loggamma) @ The analysis of link functions shown here can be thought of as providing a framework analogous to that of Box-Cox transformations for linear models. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Profile likelihood} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In addition to facilitating the generally quite accurate profile likelihood confidence intervals which were illustrated in section~\ref{sec:fitting-basic-clm}, the profile likelihood function can also be used to illustrate the relative importance of parameter values. As an example, the profile likelihood of model coefficients for \code{temp} and \code{contact} in \code{fm1} can be obtained with % <>= pr1 <- profile(fm1, alpha = 1e-4) plot(pr1) @ The resulting plots are provided in Figure~\ref{fig:ProfileLikelihood}. The \code{alpha} argument controls how far from the maximum likelihood estimate the likelihood function should be profiled: the profile strays no further from the MLE when values outside an (\code{1 - alpha})-level profile likelihood confidence interval. From the relative profile likelihood in Figure~\ref{fig:ProfileLikelihood} for \code{tempwarm} we see that parameter values between 1 and 4 are reasonably well supported by the data, and values outside this range has little likelihood. Values between 2 and 3 are very well supported by the data and have high likelihood. \setkeys{Gin}{width=.45\textwidth} \begin{figure} \centering <>= plot(pr1, which.par = 1) @ <>= plot(pr1, which.par = 2) @ \caption{Relative profile likelihoods for the regression parameters in \code{fm1} for the wine data. Horizontal lines indicate 95\% and 99\% confidence bounds.} \label{fig:ProfileLikelihood} \end{figure} Profiling is implemented for regression ($\beta$) and scale ($\zeta$) parameters but not available for threshold, nominal and flexible link parameters. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Assessment of model convergence} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Likelihood slices} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The maximum likelihood estimates of the parameters in cumulative link models do not have closed form expressions, so iterative methods have to be applied to fit the models. Further, CLMs are non-linear models and in general the likelihood function is not guaranteed to be well-behaved or even uni-model. In addition, the special role of the threshold parameters and the restriction on them being ordered can affect the appearance of the likelihood function. To confirm that an unequivocal optimum has been reached and that the likelihood function is reasonably well-behaved around the reported optimum we can inspect the likelihood function in a neighborhood around the reported optimum. For these purposes we can display slices of the likelihood function. The following code produces the slices shown in Figure~\ref{fig:slice1} which displays the shape of the log-likelihood function in a fairly wide neighborhood around the reported MLE; here we use $\lambda=5$ curvature units, as well as it's quadratic approximation. <<>>= slice.fm1 <- slice(fm1, lambda = 5) par(mfrow = c(2, 3)) plot(slice.fm1) @ Figure~\ref{fig:slice1} shows that log-likelihood function is fairly well behaved and relatively closely quadratic for most parameters. \setkeys{Gin}{width=.32\textwidth} \begin{figure} \centering <>= plot(slice.fm1, parm = 1) @ <>= plot(slice.fm1, parm = 2) @ <>= plot(slice.fm1, parm = 3) @ <>= plot(slice.fm1, parm = 4) @ <>= plot(slice.fm1, parm = 5) @ <>= plot(slice.fm1, parm = 6) @ \caption{Slices of the (negative) log-likelihood function (solid) for parameters in \code{fm1} for the wine data. Dashed lines indicate quadratic approximations to the log-likelihood function and vertical bars indicate maximum likelihood estimates.} \label{fig:slice1} \end{figure} Looking at the log-likelihood function much closer to the reported optimum (using $\lambda = 10^{-5}$) we can probe how accurately the parameter estimates are determined. The likelihood slices in Figure~\ref{fig:slice2} which are produced with the following code shows that the parameters are determined accurately with at least 5 correct decimals. Slices are shown for two parameters and the slices for the remaining 4 parameters are very similar. <>= slice2.fm1 <- slice(fm1, parm = 4:5, lambda = 1e-5) par(mfrow = c(1, 2)) plot(slice2.fm1) @ \setkeys{Gin}{width=.45\textwidth} \begin{figure} \centering <>= plot(slice2.fm1, parm = 1) @ <>= plot(slice2.fm1, parm = 2) @ \caption{Slices of the (negative) log-likelihood function (solid) for parameters in \code{fm1} for the wine data very close to the MLEs. Dashed lines (indistinguishable from the solid lines) indicate quadratic approximations to the log-likelihood function and vertical bars the indicate maximum likelihood estimates.} \label{fig:slice2} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Parameter accuracy} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% As discussed in section~\ref{sec:algorithm} the method independent error estimate provides an assessment of the accuracy with which the ML estimates of the parameters have been determined by the fitting algorithm. This error estimate is implemented in the \code{convergence} method which we now illustrate on a model fit: <<>>= convergence(fm1) @ The most important information is the number of correct decimals (\code{Cor.Dec}) and the number of significant digits (\code{Sig.Dig}) with which the parameters are determined. In this case all parameters are very accurately determined, so there is no reason to lower the convergence tolerance. The \code{logLik.error} shows that the error in the reported value of the log-likelihood is below $10^{-10}$, which is by far small enough that likelihood ratio tests based on this model are accurate. Note that the assessment of the number of correctly determined decimals and significant digits is only reliable sufficiently close to the optimum so in practice we caution against this assessment if the algorithm did not converge successfully. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Fitted values and predictions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Several types of fitted values and predictions can be extracted from a CLM depending on how it is viewed. By \emph{fitted values} we denote the values ($i=1, \ldots, n$) \begin{equation*} \hat{\tilde\pi}_i = \tilde\pi_i(\hat{\bm\psi}) \end{equation*} that is, the value of $\tilde\pi_i$, cf., Equation~\ref{eq:clm-log-likelihood} evaluated at the ML estimates $\hat{\bm\psi}$. These are the values returned by the \code{fitted} and \code{fitted.values} extractor methods and stored in the \code{fitted.values} component of the model fit. The values of $\pi_{ij}$ (cf., Equation~\ref{eq:multinom_pmf}) evaluated at the ML estimates of the parameters (i.e., $\hat\pi_{ij}$) can also be thought of as fitted values for the multinomially distributed variable $\bm Y_i^*$. These values can be obtained from the model fit by use of the \code{predict} method: <<>>= head(pred <- predict(fm1, newdata = subset(wine, select = -rating))$fit) @ Note that the original data set should be supplied in the \code{newdata} argument \emph{without} the response variable (here \code{rating}). If the response variable is \emph{present} in \code{newdata} predictions are produced for only those rating categories which were observed and we get back the fitted values: <<>>= stopifnot(isTRUE(all.equal(fitted(fm1), t(pred)[ t(col(pred) == wine$rating)])), isTRUE(all.equal(fitted(fm1), predict(fm1, newdata = wine)$fit))) @ Class predictions are also available and defined here as the response class with the highest probability, that is, for the $i$'th observation the class prediction is the mode of $\bm\pi_{i}$. To obtain class predictions use \code{type = "class"} as illustrated in the following small table: <<>>= newData <- expand.grid(temp = levels(wine$temp), contact = levels(wine$contact)) cbind(newData, round(predict(fm1, newdata = newData)$fit, 3), "class" = predict(fm1, newdata = newData, type = "class")$fit) @ Other definitions of class predictions can be applied, e.g., nearest mean predictions: <<>>= head(apply(pred, 1, function(x) round(weighted.mean(1:5, x)))) @ which in this case happens to be identical to the default class predictions. <>= p1 <- apply(predict(fm1, newdata = subset(wine, select=-rating))$fit, 1, function(x) round(weighted.mean(1:5, x))) p2 <- as.numeric(as.character(predict(fm1, type = "class")$fit)) stopifnot(isTRUE(all.equal(p1, p2, check.attributes = FALSE))) @ Standard errors and confidence intervals of predictions are also available, for example: <<>>= predictions <- predict(fm1, se.fit = TRUE, interval = TRUE) head(do.call("cbind", predictions)) @ where the default 95\% confidence level can be changed with the \code{level} argument. Here the standard errors of fitted values or predictions, $\hat{\tilde{\pi}} = \tilde{\pi}(\hat{\bm\psi})$ are obtained by application of the delta method: \begin{equation*} \mathsf{Var}(\hat{\tilde{\bm\pi}}) = \bm C \mathsf{Var}(\hat{\bm\psi}) \bm C^\top, \quad \bm C = \frac{\partial \tilde{\bm\pi}(\bm\psi)}{\partial \bm\psi} \Big|_{\bm\psi = \hat{\bm\psi}} \end{equation*} where $\mathsf{Var}(\hat{\bm\psi})$ is the estimated variance-covariance matrix of the parameters $\bm\psi$ evaluated at the ML estimates $\hat{\bm\psi}$ as given by the observed Fisher Information matrix and finally the standard errors are extracted as the square root of the diagonal elements of $\mathsf{Var}(\hat{\tilde{\bm\pi}})$. Since symmetric confidence intervals for probabilities are not appropriate unless perhaps if they are close to one half a more generally applicable approach is to form symmetric Wald intervals on the logit scale and then subsequently transform the confidence bounds to the probability scale. \code{predict.clm} takes this approach and computes the standard error of $\hat\kappa_i = \mathrm{logit}(\hat{\tilde{\pi}}_i)$ by yet an application of the delta method: \begin{equation*} \mathrm{se}(\hat\kappa_i) = \frac{\partial g(\hat{\tilde{\pi}}_i)}{\partial \hat{\tilde{\pi}}_i} \mathrm{se}(\hat{\tilde{\pi}}_i) = \frac{\mathrm{se}(\hat{\tilde{\pi}}_i)}{% \hat{\tilde{\pi}}_i(1 - \hat{\tilde{\pi}}_i)}, \quad g(\hat{\tilde{\pi}}_i) = \log \frac{\hat{\tilde{\pi}}_i}{1 - \hat{\tilde{\pi}}_i}. \end{equation*} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Model identifiability} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Unidentifiable models or unidentifiable parameters may happen in CLMs for several reasons some of which are special to the model class. In this section we describe issues around model identifiability and how this is handled by \code{ordinal::clm}. Material in the remainder of this section is generally on a more advanced level than up to now. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Complete separation} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% In binary logistic regression the issue of \emph{complete separation} is well known. This may happen, for example if only ``success'' or only ``failure'' is observed for a level of a treatment factor. In CLMs the issue may appear even when outcomes are observed in more than one response category. This can be illustrated using the \code{wine} data set if we combine the three central categories: <<>>= wine <- within(wine, { rating_comb3 <- factor(rating, labels = c("1", "2-4", "2-4", "2-4", "5")) }) ftable(rating_comb3 ~ temp, data = wine) fm.comb3 <- clm(rating_comb3 ~ temp, data = wine) summary(fm.comb3) @ Here the true ML estimates of the coefficients for \code{temp} and the second threshold are at infinity but the algorithm in \code{clm} terminates when the likelihood function is sufficiently flat. This means that the reported values of the coefficients for \code{temp} and the second threshold are arbitrary and will change if the convergence criteria are changed or a different optimization method is used. The standard errors of the coefficients are not available because the Hessian is effectively singular and so cannot be inverted to produce the variance-covariance matrix of the parameters. The ill-determined nature of the Hessian is seen from the very large condition number of the Hessian, \code{cond.H}. Note, however, that while the model parameters cannot be uniquely determined, the likelihood of the model is well defined and as such it can be compared to the likelihood of other models. For example, we could compare it to a model that excludes \code{temp} <<>>= fm.comb3_b <- clm(rating_comb3 ~ 1, data = wine) anova(fm.comb3, fm.comb3_b) @ The difference in log-likelihood is substantial, however, the criteria for the validity of the likelihood ratio test are not fulfilled, so the $p$~value should not be taken at face value. The complete-separation issue may also appear in less obvious situations. If, for example, the following model is considered allowing for nominal effects of \code{temp} the issue shows up: <<>>= fm.nom2 <- clm(rating ~ contact, nominal = ~ temp, data = wine) summary(fm.nom2) @ Analytical detection of which coefficients suffer from unidentifiability due to \emph{complete separation} is a topic for future research and therefore unavailable in current versions of \pkg{ordinal}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Aliased coefficients} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Aliased coefficients can occur in all kinds of models that build on a design matrix including linear models as well as generalized linear models. \code{lm} and \code{glm} determine the rank deficiency of the design matrix using the rank-revealing implementation of the QR-decomposition in \code{LINPACK} and displays the aliased coefficients as \code{NA}s\footnote{if the \code{singular.ok = TRUE} which is the default.}. Though the QR decomposition is not used during iterations in \code{clm}, it is used initially to determine aliased coefficients. An example is provided using the \code{soup} data available in the \pkg{ordinal} package: <<>>= fm.soup <- clm(SURENESS ~ PRODID * DAY, data = soup) summary(fm.soup) @ The source of the singularity is revealed in the following table: <<>>= with(soup, table(DAY, PRODID)) @ which shows that the third \code{PRODID} was not presented at the second day. The issue of aliased coefficients extends in CLMs to nominal effects since the joint design matrix for location and nominal effects will be singular if the same variables are included in both location and nominal formulae. \code{clm} handles this by not estimating the offending coefficients in the location formula as illustrated with the \code{fm.nom2} model fit in section~\ref{sec:nominal-effects}. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Over parameterization} \label{sec:over-parameterization} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The scope of model structures allowed in \code{clm} makes it possible to specify models which are over parameterized in ways that do not lead to rank deficient design matrices and as such are not easily detected before fitting the model. An example is given here which includes both additive (location) and multiplicative (scale) effects of \code{contact} for a binomial response variable but the issue can also occur with more than two response categories: <<>>= wine <- within(wine, { rating_comb2 <- factor(rating, labels = c("1-2", "1-2", "3-5", "3-5", "3-5")) }) ftable(rating_comb2 ~ contact, data = wine) fm.comb2 <- clm(rating_comb2 ~ contact, scale = ~ contact, data = wine) summary(fm.comb2) @ <>= ## Example with unidentified parameters with 3 response categories ## not shown in paper: wine <- within(wine, { rating_comb3b <- rating levels(rating_comb3b) <- c("1-2", "1-2", "3", "4-5", "4-5") }) wine$rating_comb3b[1] <- "4-5" # Remove the zero here to avoid inf MLE ftable(rating_comb3b ~ temp + contact, data = wine) fm.comb3_c <- clm(rating_comb3b ~ contact * temp, scale = ~contact * temp, nominal = ~contact, data = wine) summary(fm.comb3_c) convergence(fm.comb3_c) @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Customized modelling} \label{sec:customized-modelling} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Using the \code{doFit} argument \code{clm} can be instructed to return a \emph{model environment} that we denote \code{rho}: <<>>= rho <- update(fm1, doFit=FALSE) names(rho) @ This environment holds a complete specification of the cumulative link models including design matrices \code{B1}, \code{B2}, \code{S} and other components. The environment also contains the cumulative distribution function that defines the inverse link function \code{pfun} and its first and second derivatives, i.e., the corresponding density function \code{dfun} and gradient \code{gfun}. Of direct interest here is the parameter vector \code{par} and functions that readily evaluate the negative log-likelihood (\code{clm.nll}), its gradient with respect to the parameters (\code{clm.grad}) and the Hessian (\code{clm.hess}). The negative log-likelihood and the gradient at the starting values is therefore <<>>= rho$clm.nll(rho) c(rho$clm.grad(rho)) @ Similarly at the MLE they are: <<>>= rho$clm.nll(rho, par = coef(fm1)) print(c(rho$clm.grad(rho)), digits = 3) @ Note that the gradient function \code{clm.grad} assumes that \code{clm.nll} has been evaluated at the current parameter values; similarly, \code{clm.hess} assumes that \code{clm.grad} has been evaluated at the current parameter values. The NR algorithm in \pkg{ordinal} takes advantage of this so as to minimize the computational load. If interest is in fitting a \emph{custom} CLM with, say, restrictions on the parameter space, this can be achieved by a combination of a general purpose optimizer and the functions \code{clm.nll} and optionally \code{clm.grad}. Assume for instance we know that the regression parameters can be no larger than 2, then the model can be fitted with the following code: <<>>= nll <- function(par, envir) { envir$par <- par envir$clm.nll(envir) } grad <- function(par, envir) { envir$par <- par envir$clm.nll(envir) envir$clm.grad(envir) } nlminb(rho$par, nll, grad, upper = c(rep(Inf, 4), 2, 2), envir = rho)$par @ %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsubsection{Constrained partial proportional odds} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% A type of models which are not implemented in full generality in \pkg{ordinal} are the so-called \emph{constrained} partial proportional odds models proposed by \citet{peterson90}. These models impose restrictions on the nominal effects considered in section~\ref{sec:nominal-effects} and are well suited to illustrate the customisable modelling options available in the \pkg{ordinal} package. We consider an example from \citet{peterson90} in which disease status is tabulated by smoking status: <<>>= artery <- data.frame(disease = factor(rep(0:4, 2), ordered = TRUE), smoker = factor(rep(c("no", "yes"), each = 5)), freq = c(334, 99, 117, 159, 30, 350, 307, 345, 481, 67)) addmargins(xtabs(freq ~ smoker + disease, data = artery), margin = 2) @ The overall odds-ratio of smoking is <<>>= fm <- clm(disease ~ smoker, weights = freq, data = artery) exp(fm$beta) @ showing that overall the odds of worse disease rating is twice as high for smokers compared to non-smokers. Allowing for nominal effects we see that the log odds-ratio for smoking clearly changes with disease status, and that it does so in an almost linearly decreasing manor: <<>>= fm.nom <- clm(disease ~ 1, nominal = ~ smoker, weights = freq, data = artery, sign.nominal = "negative") coef(fm.nom)[5:8] @ \citet{peterson90} suggested a model which restricts the log odds-ratios to be linearly decreasing with disease status modelling only the intercept (first threshold) and slope of the log odds-ratios: <<>>= coef(fm.lm <- lm(I(coef(fm.nom)[5:8]) ~ I(0:3))) @ We can implement the log-likelihood of this model as follows. As starting values we combine parameter estimates from \code{fm.nom} and the linear model \code{fm.lm}, and finally optimize the log-likelihood utilizing the \code{fm.nom} model environment: <<>>= nll2 <- function(par, envir) { envir$par <- c(par[1:4], par[5] + par[6] * (0:3)) envir$clm.nll(envir) } start <- unname(c(coef(fm.nom)[1:4], coef(fm.lm))) fit <- nlminb(start, nll2, envir = update(fm.nom, doFit = FALSE)) round(fit$par[5:6], 2) @ Thus the log-odds decrease linearly from 1.02 for the first two disease categories by 0.3 per disease category. %% -- Illustrations ------------------------------------------------------------ %% - Virtually all JSS manuscripts list source code along with the generated %% output. The style files provide dedicated environments for this. %% - In R, the environments {Sinput} and {Soutput} - as produced by Sweave() or %% or knitr using the render_sweave() hook - are used (without the need to %% load Sweave.sty). %% - Equivalently, {CodeInput} and {CodeOutput} can be used. %% - The code input should use "the usual" command prompt in the respective %% software system. %% - For R code, the prompt "R> " should be used with "+ " as the %% continuation prompt. %% - Comments within the code chunks should be avoided - these should be made %% within the regular LaTeX text. %% -- Summary/conclusions/discussion ------------------------------------------- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{Conclusions} \label{sec:conclusions} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% This paper has described the class of cumulative link models for the analysis of ordinal data and the implementation of such models in the \proglang{R} package \pkg{ordinal}. It is shown how the package supports model building and assessment of CLMs with scale effects, partial proportional odds, structured thresholds, flexible link functions and how models can be costumized to specific needs. A number of examples have been given illustrating analyses of ordinal data using \code{clm} in practice. The significant flexibility of model structures available in \pkg{ordinal} is in one respect a clear advantage but it can also be a challenge when particular model variants turn out to be unidentifiable. Analytical detection of unidentifiable models could prove very useful in the analysis of ordinal data, but it is, unfortunately, a difficult question that remains a topic of future research. In a wider data analysis perspective, cumulative link models have been described as a very rich model class---a class that sits in between, in a sense, the perhaps the two most important model classes in statistics; linear models and logistic regression models. The greater flexibility of CLMs relative to binary logistic regression models facilitates the ability to check assumptions such as the partial proportional odds assumption. A latent variable interpretation connects cumulative link models to linear models in a natural way and also motivates non-linear structures such as scale effects. In addition to nominal effects and the non-linear scale effects, the ordered nature of the thresholds gives rise to computational challenges that we have described here and addressed in the \pkg{ordinal} package. In addition to computational challenges, practical data analysis with CLMs can also be challenging. In our experience a top-down approach in which a ``full'' model is fitted and gradually simplified is often problematic, not only because this easily leads to unidentifiable models but also because there are many different ways in which models can be reduced or expanded. A more pragmatic approach is often preferred; understanding the data through plots, tables, and even linear models can aid in finding a suitable intermediate ordinal starting model. Attempts to identify a ``correct'' model will also often lead to frustrations; the greater the model framework, the greater the risk that there are multiple models which fit the data (almost) equally well. It is well known statistical wisdom that with enough data many goodness of fit tests become sensitive to even minor deviations of little practical relevance. This is particularly true for tests of partial proportional odds; in the author's experience almost all CLMs on real data show some evidence of non-proportional odds for one or more variables but it is not always the case that models with partial or non-proportional odds are the most useful. Such effects complicate the interpretation and often generalize poorly outside the observed data and models assuming proportional odds or including scale effects are often more appropriate. %% -- Optional special unnumbered sections ------------------------------------- %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section*{Computational details} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % \begin{leftbar} % If necessary or useful, information about certain computational details % such as version numbers, operating systems, or compilers could be included % in an unnumbered section. Also, auxiliary packages (say, for visualizations, % maps, tables, \dots) that are not cited in the main text can be credited here. % \end{leftbar} The results in this paper were obtained using \proglang{R}~\Sexpr{paste(R.Version()[6:7], collapse = ".")} with \pkg{ordinal}, version~\Sexpr{packageVersion("ordinal")}. \proglang{R} itself and all packages used are available from CRAN at \url{https://CRAN.R-project.org/}. % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % \section*{Acknowledgments} % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % \begin{leftbar} % % All acknowledgments (note the AE spelling) should be collected in this % unnumbered section before the references. It may contain the usual information % about funding and feedback from colleagues/reviewers/etc. Furthermore, % information such as relative contributions of the authors may be added here % (if any). % \end{leftbar} %% -- Bibliography ------------------------------------------------------------- %% - References need to be provided in a .bib BibTeX database. %% - All references should be made with \cite, \citet, \citep, \citealp etc. %% (and never hard-coded). See the FAQ for details. %% - JSS-specific markup (\proglang, \pkg, \code) should be used in the .bib. %% - Titles in the .bib should be in title case. %% - DOIs should be included where available. \bibliography{clm_article_refs} %% -- Appendix (if any) -------------------------------------------------------- %% - After the bibliography with page break. %% - With proper section titles and _not_ just "Appendix". \newpage \begin{appendix} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \section{A regularized Newton-Raphson algorithm with step halving} \label{sec:algorithm} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The regularized NR algorithm is an iterative method that produce a sequence of estimates $\bm\psi^{(0)}, \ldots, \bm\psi^{(i)}, \ldots$, where parenthesized superscripts denote iterations. From the $i$th estimate, the $(i+1)$'th estimate is given by % \begin{equation*} \bm\psi^{(i+1)} = \bm\psi^{(i)} - c_1 \bm h^{(i)}, \quad \bm h^{(i)} = \tilde{\bm H}(\bm\psi^{(i)}; \bm y)^{-1} \bm g(\bm\psi^{(i)}; \bm y) \end{equation*} where \begin{equation*} \tilde{\bm H}(\bm\psi^{(i)}; \bm y) = \bm H(\bm\psi^{(i)}; \bm y) + c_2 (c_3 + \min(\bm e^{(i)})) \bm I, \end{equation*} % % where % $\bm h^{(i)}$ is the step of the $i$th iteration, $\bm H(\bm\psi^{(i)} ; \bm y)$ and $\bm g(\bm\psi^{(i)}; \bm y)$ are the Hessian and gradient of the negative log-likelihood function with respect to the parameters evaluated at the current estimates; $\bm e^{(i)}$ is a vector of eigenvalues of $\bm H(\bm\psi^{(i)}; \bm y)$, $\bm h^{(i)}$ is the $i$'th step, $c_1$ is a scalar parameter which controls the step halving, and $c_2$, $c_3$ are scalar parameters which control the regularization of the Hessian. Regularization is only enforced when the Hessian is not positive definite, so $c_2 = 1$ when $\min(\bm e^{(i)}) < \tau$ and zero otherwise, were $\tau$ is an appropriate tolerance. The choice of $c_3$ is to some extent arbitrary (though required positive) and the algorithm in \pkg{ordinal} sets $c_3 = 1$. Step-halving is enforced when the full step $\bm h^{(i)}$ causes a decrease in the likelihood function in which case $c_1$ is consecutively halved, $c_1 = \frac{1}{2}, \frac{1}{4}, \frac{1}{8}, \ldots$ until the step $c_1 \bm h^{(i)}$ is small enough to cause an increase in the likelihood or until the maximum allowed number of consecutive step-halvings has been reached. The algorithm in \pkg{ordinal} also deals with a couple of numerical issues that may occur. For example, the likelihood function may be sufficiently flat that the change in log-likelihood is smaller than what can be represented in double precision, and so, while the new parameters may be closer to the true ML estimates and be associated with a smaller gradient, it is not possible to measure progress by the change in log-likelihood. The NR algorithm in \pkg{ordinal} has two convergence criteria: (1) an absolute criterion requesting that $\max | \bm g(\bm\psi^{(i)}; \bm y) | < \tau_1$ and (2) a relative criterion requesting that $\max | \bm h^{(i)} | < \tau_2$ where the default thresholds are $\tau_1 = \tau_2 = 10^{-6}$. Here the first criterion attempts to establish closeness of $\bm\psi^{(i)}$ to the true ML estimates in absolute terms; the second criterion is an estimate of relative closeness of to the true ML estimates. % Both convergence criteria are needed if both small (e.g., $\approx 0.0001$) and large (e.g., $\approx 1000$) parameter estimates are to be determined accurately with an appropriate number of correct decimals as well as significant digits. The NR algorithm in \pkg{ordinal} attempts to satisfy the absolute criterion first and will then only attempt to satisfy the relative criterion if it can take the full un-regularized NR step and then only for a maximum of 5 steps. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \subsection{Convergence properties and parameter accuracy} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% Convergence to a well-defined optimum is achieved when the gradient of the negative log-likelihood function with respect to the parameters is small and the Hessian is positive definite i.e., having only positive eigenvalues away from zero. % Identifiability problems occur when the likelihood function is flat in directions of one or more parameters (or linear functions of the parameters) while well-defined, i.e., pointy in other directions. It may happen that a parameter is exactly unidentifiable and \code{clm} is in some cases (including rank-deficient design matrices) able to detect this and exclude the parameter from the optimization procedure. In other cases the likelihood is almost flat in one or more directions. These cases are not uncommon in practice and it is not possible to reduce the parameter space before optimizing the model. To measure the degree of empirical identifiability \code{clm} reports the condition number of the Hessian which is the ratio of the largest to the smallest eigenvalue. A large condition number of the Hessian does not necessarily mean there is a problem with the model, but it can be. A small condition number of the Hessian, say smaller than about $10^4$ or $10^6$, on the other hand is a good assurance that a well-defined optimum has been reached. A key problem for optimization methods is when to stop iterating: when have the parameters that determine the optimum of the function been found with sufficient accuracy? The \emph{method independent error estimate} \citep{elden04} provides a way to approximate the error in the parameter estimates. Sufficiently close to the optimum the Newton-Raphson step provides this estimate: \begin{equation*} |\hat{\bm\alpha}^{(i)} - \bm\alpha^*| \lesssim \bm h^{(i)}, \quad \bm h^{(i)} = \bm H(\bm\psi^{(i)}; \bm y)^{-1} \bm g(\bm\psi^{(i)}; \bm y) \end{equation*} where $\bm\alpha^*$ is the exact (but unknown) value of the ML estimate, $\hat{\bm\alpha}^{(i)}$ is the ML estimator of $\bm\alpha$ at the $i$'th iteration and $\bm h^{(i)}$ is the full unregularized NR step at the $i$'th iteration. % Since the gradient and Hessian of the negative log-likelihood function with respect to the parameters is already evaluated and part of the model fit at convergence, it is essentially computationally cost-free to approximate the error in the parameter estimates. Based on the error estimate the number of correctly determined decimals and significant digits is determined for each parameter. The assessment of the number of correctly determined decimals and significant digits is only reliable sufficiently close to the optimum and when the NR algorithm converges without regularization and step-halving. In practice we caution against this assessment if the algorithm did not converge successfully. % % \begin{leftbar} % Appendices can be included after the bibliography (with a page break). Each % section within the appendix should have a proper section title (rather than % just \emph{Appendix}). % % For more technical style details, please check out JSS's style FAQ at % \url{https://www.jstatsoft.org/pages/view/style#frequently-asked-questions} % which includes the following topics: % \begin{itemize} % \item Title vs.\ sentence case. % \item Graphics formatting. % \item Naming conventions. % \item Turning JSS manuscripts into \proglang{R} package vignettes. % \item Trouble shooting. % \item Many other potentially helpful details\dots % \end{itemize} % \end{leftbar} % % % \section[Using BibTeX]{Using \textsc{Bib}{\TeX}} \label{app:bibtex} % % \begin{leftbar} % References need to be provided in a \textsc{Bib}{\TeX} file (\code{.bib}). All % references should be made with \verb|\cite|, \verb|\citet|, \verb|\citep|, % \verb|\citealp| etc.\ (and never hard-coded). This commands yield different % formats of author-year citations and allow to include additional details (e.g., % pages, chapters, \dots) in brackets. In case you are not familiar with these % commands see the JSS style FAQ for details. % % Cleaning up \textsc{Bib}{\TeX} files is a somewhat tedious task -- especially % when acquiring the entries automatically from mixed online sources. However, % it is important that informations are complete and presented in a consistent % style to avoid confusions. JSS requires the following format. % \begin{itemize} % \item JSS-specific markup (\verb|\proglang|, \verb|\pkg|, \verb|\code|) should % be used in the references. % \item Titles should be in title case. % \item Journal titles should not be abbreviated and in title case. % \item DOIs should be included where available. % \item Software should be properly cited as well. For \proglang{R} packages % \code{citation("pkgname")} typically provides a good starting point. % \end{itemize} % \end{leftbar} % \end{appendix} %% ----------------------------------------------------------------------------- \end{document} ordinal/inst/doc/clmm2_tutorial.Rnw0000644000176200001440000004375212431104052017054 0ustar liggesusers\documentclass[a4paper]{article} \usepackage{amsmath}%the AMS math extension of LaTeX. \usepackage{amssymb}%the extended AMS math symbols. %% \usepackage{amsthm} \usepackage{bm}%Use 'bm.sty' to get `bold math' symbols \usepackage{natbib} \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{Sweave} \usepackage{url} \usepackage{float}%Use `float.sty' \usepackage[left=3.5cm,right=3.5cm]{geometry} \usepackage{algorithmic} \usepackage[amsmath,thmmarks,standard,thref]{ntheorem} %%\VignetteIndexEntry{clmm2 tutorial} %%\VignetteDepends{ordinal, xtable} \title{A Tutorial on fitting Cumulative Link Mixed Models with \texttt{clmm2} from the \textsf{ordinal} Package} \author{Rune Haubo B Christensen} %% \numberwithin{equation}{section} \setlength{\parskip}{2mm}%.8\baselineskip} \setlength{\parindent}{0in} %% \DefineVerbatimEnvironment{Sinput}{Verbatim}%{} %% {fontshape=sl, xleftmargin=1em} %% \DefineVerbatimEnvironment{Soutput}{Verbatim}%{} %% {xleftmargin=1em} %% \DefineVerbatimEnvironment{Scode}{Verbatim}%{} %% {fontshape=sl, xleftmargin=1em} \fvset{listparameters={\setlength{\topsep}{0pt}}} %% \fvset{listparameters={\setlength{\botsep}{0pt}}} \renewenvironment{Schunk}{\vspace{-1mm}}{\vspace{-1mm}} %RE-DEFINE marginpar \setlength{\marginparwidth}{1in} \let\oldmarginpar\marginpar \renewcommand\marginpar[1]{\oldmarginpar[\-\raggedleft\tiny #1]% {\tiny #1}} %uncomment to _HIDE_MARGINPAR_: %\renewcommand\marginpar[1]{} \newcommand{\var}{\textup{var}} \newcommand{\I}{\mathcal{I}} \newcommand{\bta}{\bm \theta} \newcommand{\ta}{\theta} \newcommand{\tah}{\hat \theta} \newcommand{\di}{~\textup{d}} \newcommand{\td}{\textup{d}} \newcommand{\Si}{\Sigma} \newcommand{\si}{\sigma} \newcommand{\bpi}{\bm \pi} \newcommand{\bmeta}{\bm \eta} \newcommand{\tdots}{\hspace{10mm} \texttt{....}} \newcommand{\FL}[1]{\fvset{firstline= #1}} \newcommand{\LL}[1]{\fvset{lastline= #1}} \newcommand{\s}{\square} \newcommand{\bs}{\blacksquare} % figurer bagerst i artikel %% \usepackage[tablesfirst, nolists]{endfloat} %% \renewcommand{\efloatseparator}{\vspace{.5cm}} \theoremstyle{plain} %% {break} \theoremseparator{:} \theoremsymbol{{\tiny $\square$}} %%\theoremstyle{plain} \theorembodyfont{\small} \theoremindent5mm \renewtheorem{example}{Example} %% \newtheoremstyle{example}{\topsep}{\topsep}% %% {}% Body font %% {}% Indent amount (empty = no indent, \parindent = para indent) %% {\bfseries}% Thm head font %% {}% Punctuation after thm head %% {\newline}% Space after thm head (\newline = linebreak) %% {\thmname{#1}\thmnumber{ #2}\thmnote{ #3}}% Thm head spec %% %% \theoremstyle{example} %% %% \newtheorem{example}{Example}[subsection] %% \newtheorem{example}{Example}[section] \usepackage{lineno} % \linenumbers \newcommand*\patchAmsMathEnvironmentForLineno[1]{% \expandafter\let\csname old#1\expandafter\endcsname\csname #1\endcsname \expandafter\let\csname oldend#1\expandafter\endcsname\csname end#1\endcsname \renewenvironment{#1}% {\linenomath\csname old#1\endcsname}% {\csname oldend#1\endcsname\endlinenomath}}% \newcommand*\patchBothAmsMathEnvironmentsForLineno[1]{% \patchAmsMathEnvironmentForLineno{#1}% \patchAmsMathEnvironmentForLineno{#1*}}% \AtBeginDocument{% \patchBothAmsMathEnvironmentsForLineno{equation}% \patchBothAmsMathEnvironmentsForLineno{align}% \patchBothAmsMathEnvironmentsForLineno{flalign}% \patchBothAmsMathEnvironmentsForLineno{alignat}% \patchBothAmsMathEnvironmentsForLineno{gather}% \patchBothAmsMathEnvironmentsForLineno{multline}% } \begin{document} \bibliographystyle{chicago} \maketitle \begin{abstract} It is shown by example how a cumulative link mixed model is fitted with the \texttt{clmm2} function in package \textsf{ordinal}. Model interpretation and inference is briefly discussed. A tutorial for the more recent \texttt{clmm} function is work in progress. \end{abstract} %% \newpage %% \tableofcontents %% \newpage \SweaveOpts{echo=TRUE, results=verb, width=4.5, height=4.5} \SweaveOpts{prefix.string=figs} \fvset{listparameters={\setlength{\topsep}{0pt}}, gobble=0, fontsize=\small} %% \fvset{gobble=0, fontsize=\small} \setkeys{Gin}{width=.49\textwidth} <>= ## Load common packages, functions and set settings: library(ordinal) library(xtable) ## RUN <- FALSE #redo computations and write .RData files ## Change options: op <- options() ## To be able to reset settings options("digits" = 7) options(help_type = "html") ## options("width" = 75) options("SweaveHooks" = list(fig=function() par(mar=c(4,4,.5,0)+.5))) options(continue=" ") @ We will consider the data on the bitterness of wine from \citet{randall89} presented in Table~\ref{tab:winedata} and available as the object \texttt{wine} in package \textsf{ordinal}. The data were also analyzed with mixed effects models by \citet{tutz96}. The following gives an impression of the wine data object: <<>>= data(wine) head(wine) str(wine) @ The data represent a factorial experiment on factors determining the bitterness of wine with 1 = ``least bitter'' and 5 = ``most bitter''. Two treatment factors (temperature and contact) each have two levels. Temperature and contact between juice and skins can be controlled when crushing grapes during wine production. Nine judges each assessed wine from two bottles from each of the four treatment conditions, hence there are 72 observations in all. For more information see the manual entry for the wine data: \texttt{help(wine)}. \begin{table} \centering \caption{Ratings of the bitterness of some white wines. Data are adopted from \citet{randall89}.} \label{tab:winedata} \begin{tabular}{lllrrrrrrrrr} \hline & & & \multicolumn{9}{c}{Judge} \\ \cline{4-12} <>= data(wine) temp.contact.bottle <- with(wine, temp:contact:bottle)[drop=TRUE] tab <- xtabs(as.numeric(rating) ~ temp.contact.bottle + judge, data=wine) class(tab) <- "matrix" attr(tab, "call") <- NULL mat <- cbind(rep(c("cold", "warm"), each = 4), rep(rep(c("no", "yes"), each=2), 2), 1:8, tab) colnames(mat) <- c("Temperature", "Contact", "Bottle", 1:9) xtab <- xtable(mat) print(xtab, only.contents=TRUE, include.rownames=FALSE, sanitize.text.function = function(x) x) @ \end{tabular} \end{table} We will fit the following cumulative link mixed model to the wine data: \begin{equation} \label{eq:mixedModel} \begin{array}{c} \textup{logit}(P(Y_i \leq j)) = \theta_j - \beta_1 (\mathtt{temp}_i) - \beta_2(\mathtt{contact}_i) - u(\mathtt{judge}_i) \\ i = 1,\ldots, n, \quad j = 1, \ldots, J-1 \end{array} \end{equation} This is a model for the cumulative probability of the $i$th rating falling in the $j$th category or below, where $i$ index all observations and $j = 1, \ldots, J$ index the response categories ($J = 5$). $\{\theta_j\}$ are known as threshold parameters or cut-points. We take the judge effects to be random, and assume that the judge effects are IID normal: $u(\mathtt{judge}_i) \sim N(0, \sigma_u^2)$. We fit this model with the \texttt{clmm2} function in package \textsf{ordinal}. Here we save the fitted \texttt{clmm2} model in the object \texttt{fm1} (short for \texttt{f}itted \texttt{m}odel \texttt{1}) and \texttt{print} the model by simply typing its name: <<>>= fm1 <- clmm2(rating ~ temp + contact, random=judge, data=wine) fm1 @ Maximum likelihood estimates of the parameters are provided using the Laplace approximation to compute the likelihood function. A more accurate approximation is provided by the adaptive Gauss-Hermite quadrature method. Here we use 10 quadrature nodes and use the \texttt{summary} method to display additional information: <<>>= fm2 <- clmm2(rating ~ temp + contact, random=judge, data=wine, Hess=TRUE, nAGQ=10) summary(fm2) @ The small changes in the parameter estimates show that the Laplace approximation was in fact rather accurate in this case. Observe that we set the option \texttt{Hess = TRUE}. This is needed if we want to use the \texttt{summary} method since the Hessian is needed to compute standard errors of the model coefficients. The results contain the maximum likelihood estimates of the parameters: \begin{equation} \label{eq:parameters} \hat\beta_1 = 3.06, ~~\hat\beta_2 = 1.83, ~~\hat\sigma_u^2 = 1.29 = 1.13^2, ~~\{\hat\theta_j\} = [-1.62,~ 1.51,~ 4.23,~ 6.09]. \end{equation} Observe the number under \texttt{Std.Dev} for the random effect is \textbf{not} the standard error of the random effects variance, \texttt{Var}. Rather, it is the standard deviation of the random effects, i.e., it is the square root of the variance. In our example $\sqrt{1.29} \simeq 1.13$. The condition number of the Hessian measures the empirical identifiability of the model. High numbers, say larger than $10^4$ or $10^6$ indicate that the model is ill defined. This would indicate that the model can be simplified, that possibly some parameters are not identifiable, and that optimization of the model can be difficult. In this case the condition number of the Hessian does not indicate a problem with the model. The coefficients for \texttt{temp} and \texttt{contact} are positive indicating that higher temperature and more contact increase the bitterness of wine, i.e., rating in higher categories is more likely. The odds ratio of the event $Y \geq j$ is $\exp(\beta_{\textup{treatment}})$, thus the odds ratio of bitterness being rated in category $j$ or above at warm relative to cold temperatures is <<>>= exp(coef(fm2)[5]) @ The $p$-values for the location coefficients provided by the \texttt{summary} method are based on the so-called Wald statistic. More accurate test are provided by likelihood ratio tests. These can be obtained with the \texttt{anova} method, for example, the likelihood ratio test of \texttt{contact} is <<>>= fm3 <- clmm2(rating ~ temp, random=judge, data=wine, nAGQ=10) anova(fm3, fm2) @ which in this case is slightly more significant. The Wald test is not reliable for variance parameters, so the \texttt{summary} method does not provide a test of $\sigma_u$, but a likelihood ratio test can be obtained with \texttt{anova}: <<>>= fm4 <- clm2(rating ~ temp + contact, data=wine) anova(fm4, fm2) @ showing that the judge term is significant. Since this test of $\sigma_u = 0$ is on the boundary of the parameter space (a variance cannot be negative), it is often argued that a more correct $p$-value is obtained by halving the $p$-value produced by the conventional likelihood ratio test. In this case halving the $p$-value is of little relevance. A profile likelihood confidence interval of $\sigma_u$ is obtained with: <<>>= pr2 <- profile(fm2, range=c(.1, 4), nSteps=30, trace=0) confint(pr2) @ The profile likelihood can also be plotted: <>= plot(pr2) @ The result is shown in Fig.~\ref{fig:PRsigma_u} where horizontal lines indicate 95\% and 99\% confindence bounds. Clearly the profile likelihood function is asymmetric and symmetric confidence intervals would be inaccurate. \begin{figure} \centering <>= <> @ \caption{Profile likelihood of $\sigma_u$.} \label{fig:PRsigma_u} \end{figure} The judge effects, $u(\mathtt{judge}_i)$ are not parameters, so they cannot be \emph{estimated} in the conventional sense, but a ``best guess'' is provided by the \emph{conditional modes}. Similarly the \emph{conditional variance} provides an uncertainty measure of the conditional modes. These quantities are included in \texttt{clmm2} objects as the \texttt{ranef} and \texttt{condVar} components. The following code generates the plot in Fig.~\ref{fig:ranef} illustrating judge effects via conditional modes with 95\% confidence intervals based on the conditional variance: <>= ci <- fm2$ranef + qnorm(0.975) * sqrt(fm2$condVar) %o% c(-1, 1) ord.re <- order(fm2$ranef) ci <- ci[order(fm2$ranef),] plot(1:9, fm2$ranef[ord.re], axes=FALSE, ylim=range(ci), xlab="Judge", ylab="Judge effect") axis(1, at=1:9, labels = ord.re) axis(2) for(i in 1:9) segments(i, ci[i,1], i, ci[i, 2]) abline(h = 0, lty=2) @ The seventh judge gave the lowest ratings of bitterness while the first judge gave the highest ratings of bitterness. The significant judge effect indicate that judges perceived the bitterness of the wines differently. Two natural interpretations are that either a bitterness of, say, 3 means different things to different judges, or the judges actually perceived the bitterness of the wines differently. Possibly both effects play their part. \begin{figure} \centering <>= <> @ \caption{Judge effects given by conditional modes with 95\% confidence intervals based on the conditional variance.} \label{fig:ranef} \end{figure} The fitted or predicted probabilites can be obtained with the judge effects at their conditional modes or for an average judge ($u = 0$). The former are available with \texttt{fitted(fm)} or with \texttt{predict(fm)}, where \texttt{fm} is a \texttt{f}itted \texttt{m}odel object. In our example we get <<>>= head(cbind(wine, fitted(fm2))) @ Predicted probabilities for an average judge can be obtained by including the data used to fit the model in the \texttt{newdata} argument of \texttt{predict}: <<>>= head(cbind(wine, pred=predict(fm2, newdata=wine))) @ Model~\eqref{eq:mixedModel} says that for an average judge at cold temperature the cumulative probability of a bitterness rating in category $j$ or below is \begin{equation*} P(Y_i \leq j) = \textup{logit}^{-1} [ \theta_j - \beta_2(\mathtt{contact}_i) ] \end{equation*} since $u$ is set to zero and $\beta_1(\mathtt{temp}_i) = 0$ at cold conditions. Further, $\textup{logit}^{-1}(\eta) = 1 / [1 + \exp(\eta)]$ is the cumulative distribution function of the logistic distribution available as the \texttt{plogis} function. The (non-cumulative) probability of a bitterness rating in category $j$ is $\pi_j = P(Y_i \leq j) - P(Y_i \leq j-1)$, for instance the probability of a bitterness rating in the third category at these conditions can be computed as <<>>= plogis(fm2$Theta[3] - fm2$beta[2]) - plogis(fm2$Theta[2] - fm2$beta[2]) @ This corresponds to the third entry of \texttt{predict(fm2, newdata=wine)} given above. Judge effects are random and normally distributed, so an average judge effect is 0. Extreme judge effects, say 5th and 95th percentile judge effects are given by <<>>= qnorm(0.95) * c(-1, 1) * fm2$stDev @ At the baseline experimental conditions (cold and no contact) the probabilites of bitterness ratings in the five categories for a 5th percentile judge is <<>>= pred <- function(eta, theta, cat = 1:(length(theta)+1), inv.link = plogis) { Theta <- c(-1e3, theta, 1e3) sapply(cat, function(j) inv.link(Theta[j+1] - eta) - inv.link(Theta[j] - eta) ) } pred(qnorm(0.05) * fm2$stDev, fm2$Theta) @ We can compute these probabilities for average, 5th and 95th percentile judges at the four experimental conditions. The following code plots these probabilities and the results are shown in Fig.~\ref{fig:ratingProb}. <>= mat <- expand.grid(judge = qnorm(0.95) * c(-1, 0, 1) * fm2$stDev, contact = c(0, fm2$beta[2]), temp = c(0, fm2$beta[1])) pred.mat <- pred(eta=rowSums(mat), theta=fm2$Theta) lab <- paste("contact=", rep(levels(wine$contact), 2), ", ", "temp=", rep(levels(wine$temp), each=2), sep="") par(mfrow=c(2, 2)) for(k in c(1, 4, 7, 10)) { plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") } @ \begin{figure} \centering <>= k <- 1 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 4 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 7 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ <>= k <- 10 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") @ \caption{Rating probabilities for average and extreme judges at different experimental conditions.} \label{fig:ratingProb} \end{figure} At constant experimental conditions the odds ratio for a bitterness rating in category $j$ or above for a 95th percentile judge relative to a 5th percentile judge is <<>>= exp(2*qnorm(0.95) * fm2$stDev) @ The differences between judges can also be expressed in terms of the interquartile range: the odds ratio for a bitterness rating in category $j$ or above for a third quartile judge relative to a first quartile judge is <<>>= exp(2*qnorm(0.75) * fm2$stDev) @ \newpage \bibliography{ordinal} %% \newpage \end{document} <>= @ ordinal/inst/doc/clm_article.R0000644000176200001440000003703414533322560016037 0ustar liggesusers### R code from vignette source 'clm_article.Rnw' ################################################### ### code chunk number 1: preliminaries ################################################### options(prompt = "R> ", continue = "+ ", width = 70, useFancyQuotes = FALSE) library("ordinal") library("xtable") ################################################### ### code chunk number 2: clm_article.Rnw:742-744 ################################################### clm_args <- gsub("function ", "clm", deparse(args(clm))) cat(paste(clm_args[-length(clm_args)], "\n")) ################################################### ### code chunk number 3: clm_article.Rnw:759-761 ################################################### cc_args <- gsub("function ", "clm.control", deparse(args(clm.control))) cat(paste(cc_args[-length(cc_args)], "\n")) ################################################### ### code chunk number 4: clm_article.Rnw:792-802 ################################################### ## data(wine) tab <- with(wine, table(temp:contact, rating)) mat <- cbind(rep(c("cold", "warm"), each = 2), rep(c("no", "yes"), 2), tab) colnames(mat) <- c("Temperature", "Contact", paste("~~", 1:5, sep = "")) xtab <- xtable(mat) print(xtab, only.contents = TRUE, include.rownames = FALSE, sanitize.text.function = function(x) x) ################################################### ### code chunk number 5: clm_article.Rnw:830-833 ################################################### library("ordinal") fm1 <- clm(rating ~ temp + contact, data = wine) summary(fm1) ################################################### ### code chunk number 6: clm_article.Rnw:884-885 ################################################### anova(fm1, type = "III") ################################################### ### code chunk number 7: clm_article.Rnw:889-891 ################################################### fm2 <- clm(rating ~ temp, data = wine) anova(fm2, fm1) ################################################### ### code chunk number 8: clm_article.Rnw:897-898 ################################################### drop1(fm1, test = "Chi") ################################################### ### code chunk number 9: clm_article.Rnw:903-905 ################################################### fm0 <- clm(rating ~ 1, data = wine) add1(fm0, scope = ~ temp + contact, test = "Chi") ################################################### ### code chunk number 10: clm_article.Rnw:909-910 ################################################### confint(fm1) ################################################### ### code chunk number 11: clm_article.Rnw:945-947 ################################################### fm.nom <- clm(rating ~ temp, nominal = ~ contact, data = wine) summary(fm.nom) ################################################### ### code chunk number 12: clm_article.Rnw:977-978 ################################################### fm.nom$Theta ################################################### ### code chunk number 13: clm_article.Rnw:987-988 ################################################### anova(fm1, fm.nom) ################################################### ### code chunk number 14: clm_article.Rnw:999-1000 ################################################### fm.nom2 <- clm(rating ~ temp + contact, nominal = ~ contact, data = wine) ################################################### ### code chunk number 15: clm_article.Rnw:1003-1004 ################################################### fm.nom2 ################################################### ### code chunk number 16: clm_article.Rnw:1008-1009 ################################################### nominal_test(fm1) ################################################### ### code chunk number 17: clm_article.Rnw:1028-1030 ################################################### fm.sca <- clm(rating ~ temp + contact, scale = ~ temp, data = wine) summary(fm.sca) ################################################### ### code chunk number 18: clm_article.Rnw:1035-1036 ################################################### scale_test(fm1) ################################################### ### code chunk number 19: clm_article.Rnw:1059-1062 ################################################### fm.equi <- clm(rating ~ temp + contact, data = wine, threshold = "equidistant") summary(fm.equi) ################################################### ### code chunk number 20: clm_article.Rnw:1069-1070 ################################################### drop(fm.equi$tJac %*% coef(fm.equi)[c("threshold.1", "spacing")]) ################################################### ### code chunk number 21: clm_article.Rnw:1077-1078 ################################################### mean(diff(coef(fm1)[1:4])) ################################################### ### code chunk number 22: clm_article.Rnw:1084-1085 ################################################### anova(fm1, fm.equi) ################################################### ### code chunk number 23: clm_article.Rnw:1107-1108 ################################################### with(soup, table(PROD, PRODID)) ################################################### ### code chunk number 24: clm_article.Rnw:1112-1115 ################################################### fm_binorm <- clm(SURENESS ~ PRODID, scale = ~ PROD, data = soup, link="probit") summary(fm_binorm) ################################################### ### code chunk number 25: clm_article.Rnw:1118-1120 ################################################### fm_nom <- clm(SURENESS ~ PRODID, nominal = ~ PROD, data = soup, link="probit") ################################################### ### code chunk number 26: clm_article.Rnw:1124-1126 ################################################### fm_location <- update(fm_binorm, scale = ~ 1) anova(fm_location, fm_binorm, fm_nom) ################################################### ### code chunk number 27: clm_article.Rnw:1131-1136 ################################################### fm_cll_scale <- clm(SURENESS ~ PRODID, scale = ~ PROD, data = soup, link="cloglog") fm_cll <- clm(SURENESS ~ PRODID, data = soup, link="cloglog") anova(fm_cll, fm_cll_scale, fm_binorm) ################################################### ### code chunk number 28: clm_article.Rnw:1140-1142 ################################################### fm_loggamma <- clm(SURENESS ~ PRODID, data = soup, link="log-gamma") summary(fm_loggamma) ################################################### ### code chunk number 29: profileLikelihood ################################################### pr1 <- profile(fm1, alpha = 1e-4) plot(pr1) ################################################### ### code chunk number 30: prof1 ################################################### plot(pr1, which.par = 1) ################################################### ### code chunk number 31: prof2 ################################################### plot(pr1, which.par = 2) ################################################### ### code chunk number 32: clm_article.Rnw:1201-1204 ################################################### slice.fm1 <- slice(fm1, lambda = 5) par(mfrow = c(2, 3)) plot(slice.fm1) ################################################### ### code chunk number 33: slice11 ################################################### plot(slice.fm1, parm = 1) ################################################### ### code chunk number 34: slice12 ################################################### plot(slice.fm1, parm = 2) ################################################### ### code chunk number 35: slice13 ################################################### plot(slice.fm1, parm = 3) ################################################### ### code chunk number 36: slice14 ################################################### plot(slice.fm1, parm = 4) ################################################### ### code chunk number 37: slice15 ################################################### plot(slice.fm1, parm = 5) ################################################### ### code chunk number 38: slice16 ################################################### plot(slice.fm1, parm = 6) ################################################### ### code chunk number 39: slice2 ################################################### slice2.fm1 <- slice(fm1, parm = 4:5, lambda = 1e-5) par(mfrow = c(1, 2)) plot(slice2.fm1) ################################################### ### code chunk number 40: slice24 ################################################### plot(slice2.fm1, parm = 1) ################################################### ### code chunk number 41: slice25 ################################################### plot(slice2.fm1, parm = 2) ################################################### ### code chunk number 42: clm_article.Rnw:1265-1266 ################################################### convergence(fm1) ################################################### ### code chunk number 43: clm_article.Rnw:1292-1293 ################################################### head(pred <- predict(fm1, newdata = subset(wine, select = -rating))$fit) ################################################### ### code chunk number 44: clm_article.Rnw:1297-1300 ################################################### stopifnot(isTRUE(all.equal(fitted(fm1), t(pred)[ t(col(pred) == wine$rating)])), isTRUE(all.equal(fitted(fm1), predict(fm1, newdata = wine)$fit))) ################################################### ### code chunk number 45: clm_article.Rnw:1303-1307 ################################################### newData <- expand.grid(temp = levels(wine$temp), contact = levels(wine$contact)) cbind(newData, round(predict(fm1, newdata = newData)$fit, 3), "class" = predict(fm1, newdata = newData, type = "class")$fit) ################################################### ### code chunk number 46: clm_article.Rnw:1310-1311 ################################################### head(apply(pred, 1, function(x) round(weighted.mean(1:5, x)))) ################################################### ### code chunk number 47: clm_article.Rnw:1314-1318 ################################################### p1 <- apply(predict(fm1, newdata = subset(wine, select=-rating))$fit, 1, function(x) round(weighted.mean(1:5, x))) p2 <- as.numeric(as.character(predict(fm1, type = "class")$fit)) stopifnot(isTRUE(all.equal(p1, p2, check.attributes = FALSE))) ################################################### ### code chunk number 48: clm_article.Rnw:1323-1325 ################################################### predictions <- predict(fm1, se.fit = TRUE, interval = TRUE) head(do.call("cbind", predictions)) ################################################### ### code chunk number 49: clm_article.Rnw:1361-1367 ################################################### wine <- within(wine, { rating_comb3 <- factor(rating, labels = c("1", "2-4", "2-4", "2-4", "5")) }) ftable(rating_comb3 ~ temp, data = wine) fm.comb3 <- clm(rating_comb3 ~ temp, data = wine) summary(fm.comb3) ################################################### ### code chunk number 50: clm_article.Rnw:1372-1374 ################################################### fm.comb3_b <- clm(rating_comb3 ~ 1, data = wine) anova(fm.comb3, fm.comb3_b) ################################################### ### code chunk number 51: clm_article.Rnw:1379-1381 ################################################### fm.nom2 <- clm(rating ~ contact, nominal = ~ temp, data = wine) summary(fm.nom2) ################################################### ### code chunk number 52: clm_article.Rnw:1392-1394 ################################################### fm.soup <- clm(SURENESS ~ PRODID * DAY, data = soup) summary(fm.soup) ################################################### ### code chunk number 53: clm_article.Rnw:1397-1398 ################################################### with(soup, table(DAY, PRODID)) ################################################### ### code chunk number 54: clm_article.Rnw:1409-1415 ################################################### wine <- within(wine, { rating_comb2 <- factor(rating, labels = c("1-2", "1-2", "3-5", "3-5", "3-5")) }) ftable(rating_comb2 ~ contact, data = wine) fm.comb2 <- clm(rating_comb2 ~ contact, scale = ~ contact, data = wine) summary(fm.comb2) ################################################### ### code chunk number 55: clm_article.Rnw:1418-1432 ################################################### ## Example with unidentified parameters with 3 response categories ## not shown in paper: wine <- within(wine, { rating_comb3b <- rating levels(rating_comb3b) <- c("1-2", "1-2", "3", "4-5", "4-5") }) wine$rating_comb3b[1] <- "4-5" # Remove the zero here to avoid inf MLE ftable(rating_comb3b ~ temp + contact, data = wine) fm.comb3_c <- clm(rating_comb3b ~ contact * temp, scale = ~contact * temp, nominal = ~contact, data = wine) summary(fm.comb3_c) convergence(fm.comb3_c) ################################################### ### code chunk number 56: clm_article.Rnw:1441-1443 ################################################### rho <- update(fm1, doFit=FALSE) names(rho) ################################################### ### code chunk number 57: clm_article.Rnw:1446-1448 ################################################### rho$clm.nll(rho) c(rho$clm.grad(rho)) ################################################### ### code chunk number 58: clm_article.Rnw:1451-1453 ################################################### rho$clm.nll(rho, par = coef(fm1)) print(c(rho$clm.grad(rho)), digits = 3) ################################################### ### code chunk number 59: clm_article.Rnw:1458-1468 ################################################### nll <- function(par, envir) { envir$par <- par envir$clm.nll(envir) } grad <- function(par, envir) { envir$par <- par envir$clm.nll(envir) envir$clm.grad(envir) } nlminb(rho$par, nll, grad, upper = c(rep(Inf, 4), 2, 2), envir = rho)$par ################################################### ### code chunk number 60: clm_article.Rnw:1477-1482 ################################################### artery <- data.frame(disease = factor(rep(0:4, 2), ordered = TRUE), smoker = factor(rep(c("no", "yes"), each = 5)), freq = c(334, 99, 117, 159, 30, 350, 307, 345, 481, 67)) addmargins(xtabs(freq ~ smoker + disease, data = artery), margin = 2) ################################################### ### code chunk number 61: clm_article.Rnw:1486-1488 ################################################### fm <- clm(disease ~ smoker, weights = freq, data = artery) exp(fm$beta) ################################################### ### code chunk number 62: clm_article.Rnw:1493-1496 ################################################### fm.nom <- clm(disease ~ 1, nominal = ~ smoker, weights = freq, data = artery, sign.nominal = "negative") coef(fm.nom)[5:8] ################################################### ### code chunk number 63: clm_article.Rnw:1499-1500 ################################################### coef(fm.lm <- lm(I(coef(fm.nom)[5:8]) ~ I(0:3))) ################################################### ### code chunk number 64: clm_article.Rnw:1503-1510 ################################################### nll2 <- function(par, envir) { envir$par <- c(par[1:4], par[5] + par[6] * (0:3)) envir$clm.nll(envir) } start <- unname(c(coef(fm.nom)[1:4], coef(fm.lm))) fit <- nlminb(start, nll2, envir = update(fm.nom, doFit = FALSE)) round(fit$par[5:6], 2) ordinal/inst/doc/clmm2_tutorial.pdf0000644000176200001440000031233414533322576017074 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4361 /Filter /FlateDecode /N 92 /First 775 >> stream x\[s~[$$ ;n׻/;yZFInW([m+ $@w8\b&,ag9P?>~b49n"8L_+Mg컫g'g]G߳|1,g&0},-p &OjRJ+PHk\XE>YBnKh;a34?bgh_nE>uvmqtMH )-~69:?to tO͙=cчi0f3-f/yٺ|d0g`n:M.nb|8-@QoAd˗F$xGK)xK&}#D5 jnZ]-˿"#!ү@t+/V=+. @>Xk;;I,q>zx,/Dh/zEU4aG(zQ[4I4f |RU@ǾFU\?x7qxZtOख़|"a+Y2xSpMt۶ECz55lv"< u%VR>Kj#<7?zoQ>-5*V+k:ŸE}c;=K! f_In6" (YWQ[䗃8v5hiQ'TpLo'\*~s>Mm6mVXꏆZ.5B}ĬD(\"4^/TR߽9vIJQV&%,ՅJk>~q~ ǭ]o !BT֯l+"ؽδVz,zX!"_> 8eopY2g*Yk&/(ZIC..pzYаבMww.ݞ:c5n-+!UX"DObI)eqKS@+S6,k A>0I%ZQy=* 5 ( c.!3l}SK򚴝)Bzʴ5z,SJ]]i*Ӓ~YO鴄 Hڂ KI\)նgz]CZ;YKf5%p%z'Cu#k4dq _%~kkm[;?$dv<N|xj% :V藃|>@ӴK:mt~K,]˟ZfHus䕂=P_;5 UVNî`W͎>~:p18_ՐޣRMd(djxTg;lD=EY:;4:=:SzXMݹ݆@ G$F@A*sY6in;&;[%J>>=;9f|ɉ6_lXwYhզ5WWԀ69ʌƉikzR#qL;NϏ0F+m  -t2] laB5,[{mzm-\Qksmx4@棧Yůmoե6k#<{X+Fb>оRFڽ&Z3VPf*;eZkUb-vni(@\$>ykzRai" ݏ/;D^HsFFxcm1 Ǹu7 ЎgQe3~ί녧 D s@J+elBɺE9e*sRv#Eo:l{g^^|8ܻiGQ9 嫊`UN9\T@vpIK$Y61u jlhZEGg ]@cdcVPЯ#IJ/}JM.?7`&ZhBNf]\M:Ap $϶ Te2ɓmU4I,؃2k;{WM^^|:8p?r$ >@>f4HV IA}6 R۷jcufVXJ˰}5gDUeFK9 nxe;Pgeh꽗Pe'X}>,𲽞G:BY>-M-&Eo=\@e^l``0X'}ntRWw?HvNHV ͮ>R~S{sھ\>AЌTO&i蝭+v:taX^\CN cI0OUaB"TVjmej4 /ZIiZ+%wKZa1] J-u U5CIi_cھxi 5 V64LOhA@!:&DKiKU43=)\Vs4kLF^A؝}5hh-j ڴk6]Gߚgwڌ4u'.\u0l*6ڹXsi˦_rNofEeۙefay>4Mu^Xvfy'b-# +h?NB#YKf\}ZzFyJC@Ծ4T);.NSkKmuHu^Ԗ:vu;6R Q si-dF#:p9/Aelw.0G ~NA`Iz%dKsHJZ4G*endstream endobj 94 0 obj << /Subtype /XML /Type /Metadata /Length 1329 >> stream 2023-12-04T11:10:05+01:00 2023-12-04T11:10:05+01:00 TeX Untitled endstream endobj 95 0 obj << /Filter /FlateDecode /Length 3197 >> stream xZoЇ[S ^a]%) mSC@QD}(xV3RdE`^3ؙ9* *Ow?y®'NW~Z.NFh2᫋^n}VZ)f Rn5/[{wp'fm>nsrӵf9DL*} '6nFBoAH ]55mvwͰU)x%X$2<(> m 0DMrP8 r7 *'=뻓?SK CY>]dス*qJG 1k.WftEgUtgfQG!HB$%([(Y5O!<ЕŤJ(@> :F"@wTvoC \?TjВvN ”hC/5shs\E_ / Ūm|85Ҁ|SmP^O\)Yҭ߯ݮJZ0u놪O9Iɫ6퇔8PCpPY:"L$'ߤoGYfj ="D?r 6)b}HzSWez5&tlT#E2J,e}qxo$6`·1ql::58d,v(MNnZs,ܘ] 11TnP,1CCIDoP-Tsutjbn2꽘E[艷]}We$&@>GCQC#' U_kY(fCR`|t8TDiJo*(j :iS(Wx#P34CLRkdgSiwEx,0ЄRüc&VC_WC䐈Gf~B.LI#W1ML`J{mX"a22Tcs.L.ț*@Ѓƒ*3)3WiھK  +o3,&5pap: ]KaYH$#|HxVc5uWdž&@qk0㈞(l;A_ W1 #  |Nտb'18_ TUG:vLΑ(DH@?- IfAS>r.#!ì>NҰb.x4I3h:4nni0ɄN 뮛|vP%C(gb . g/WfaZ/??Hi CHiV E $ٴP^q{~qO>B{f[~[,Ms$z6WTХ8υ  YA4ΞN-K\O}BN܊9zR&ZM_߳*gv1qL"p@Lj{.U%rxɗ(,9c-ٖ̔3RXf!OOsq uHK@΀غB%-8).ݲR-et]ܺyTYS!X!,i`cF?@ֆ+ ۰t"hXk"ʙ{ I)pO${="skp'Dwx(]θ:G)CNlzq/?La*endstream endobj 96 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3274 >> stream xW TSW>!}PĎiVάNul}V><@ $E !>yC 80(P-l룣mgܱ֮0u{OκkY묳yyqKܒYX]]·lk{a"&c' ,F^ׇ8oJie2Ɋ>*6_qDh!ػI6>}3OOF7~i3"@JgTBf#Q&ꋷ; e!\#H85,{NʁL꿒}pkVDw+yQ;|$Dˏ6-C >D(M(]BX(== 8"E;!4gU(-nls8ZskԽ3AaW4O}ϥ\T o~WCl Z}"pW#9'œ'|O<J0)<lbM4[jIv;XzTmT~8_|2nI딣{m67}c+AA3o@aKAEԩM:䅒$[ pu{-:l[*s%@/  Cɑ"O=aianwδ]m֥sjׄl&f|7[͖Sm=#6.*R :N ,*ڬ?{t}m!}}{Vp-1 I}`zu%6z@՚k-e%3kpG8ðeGW.>ʝ؍X*3lg^F!S[W2U@nI8 Pϡ?9U٠|~YX?EPamWٹpw3z3W̯iq3EYTb_r(m7 rrFB`\^MbxM1潠RCI*#fO4Q&c,K~uGAK*6<O^} hwqgqP2VkFֺU 6}ș#g,Nr8zO$\x[md)/5W#gaE"lv9|\lC0SY?:Ac)0RuoDǒXXj-!hU㳖좒rS,FAvn$`u磠xvwz3xYh3mBV*Z`omeof`DM]J:p;pXhk5|L )eHo1a7:&i``R zRt+J:&H::ՙ0;5v0?f!k}:v@7/fSz'2^<0n-aCa"S F"= ܉等&#^ˁjT[&B#ΆX3ΙoilN~{wTM[7 pm[_G2A1@?!Qv)шWT^u"&h`8cUdm$بt.2r1|co-##0Z > stream xcd`ab`dd v 147q~H3aneOG? 0012Jk9)槤)TeEdXsr\|Xv232b\Ϥ>0ٶ3`'Q1fݰ[T7<ߥZ&]>b s {+s39 >>/u^gߋ0/qDOo!ڊ-sc[8m9SJsXɶF*[MeGiͤyPC|3vWP؝߅AH\rg,Kai+R7K^߷Z[{Z9O7TWh-mάLjinn޽g5SV-\m[p6v^Zd3{65 goϞ)ٝ_]Za!w@$};ؿ3rSR ijij(1KSYs{o`>Ba.k/4{NB>endstream endobj 98 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 673 >> stream x]mHSawvth }0+*eYbC7΅9[{LK' !R9?IfB?sQaecjBLʰv%-II0Nڕi1錊\CΨW Xy BeUFJBY(qQ8bQzë>T'񢟔,JˆOٷߗ ݨL萟XYk޴8Sd, 77h6Q35()%*kRZ`WbqB,sT.C> stream xVyPWaD YT[c51\c Y#P + /s` 3 xJb$&n1&ݪڭu{}ݍH$#V/ ^9gٮ>nN_)P𔂧{g^";DZDK1 I+[qI͊HQ_4 .>["ۃwDϜ=w| PWj* fPk(j)@->QR ԋKDɨqI)RIH=IrGt} d>EJ3"rDH'Gm3ɲ j37kpX%#.J[2q4?I  8ڑ(M<^?|YUd+rCtLaU ͩ *6z؜#<;SO|24$aPyap{ok]imGj-h&"JRS4h V{5bTfrx9\eA=ON/ά+Gb;d PiÙwzuq *i(-iX3Nj DlWaY0йg{ITvsvr\M}a,~ ,"M"罧xa2cuއ%2\Oo᝾+j|A-հV qQ)rX\+RXr rHNޕ:?c1 L'KH0O%K:c>(þ3buyn=9GِKz{t]?{0 $-S j@"1]V)7hlzHSD%aEhH!9 \W"i4HCrgOfnW2nN҈ _tMЁd>=`?m81c -bvF wV3  +2(3jݣ-9V!AZk[(!gfx@aWULSpεW2G2Pl?c,OQƴ5d=րw OY $xv-jox֛=]fwzЙ#4$y5S^98[*O!ͦ8 {MWn]R(5A)7[QXRjK]}rU<#+ɰG6,k:ve\BGà|݆ ].cf,i۴9GhM xAAmRDܚ[@noהB‹(Ek>9f'j Gиx%gJoN$˅ydȮu>m%ETIꂶiit+e!VFPFQ*O[)״};OۿZG5.˱9 )U%(P>zDӌy6*aM~ U┢ESS̈L"7ei()mྡྷoݏ'EaaMcq . xMIIh^bmG8EwbBq,FI(JlP6Fq\!g3鬳Ta_S)~|B6'GAgЁ9)y$9a@`R,0?=vg^P3=MYW˵Vץ[f^Z=?jGxzRVu~ й.;Aq~+dM9Etv& R EE{o.YFӗ5͵K4csw'B$>}=nݜ](ֳgpᄊyNbiB+ֆB sg~%'U3-ʼE5V _1GHXgIoK%D@3Tzg-PCuNOAOf c&<(nt;0endstream endobj 100 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1104 >> stream x]}LSgˇ+I85!ưTZum0>ʘPR(C Kߖ١[G iٖl˖s5ayrN$ϑ Y H$=;36gd,bJ.x@8A6%VB"H Ն:ޘ[]7V,M`WKDѯkME%!-:(BLH@O$_(clԒĩC%i8rE -"%iCPlUP'uN :xD`G'm#mpob1odg3%7f 3C*Q#ru2[<]25;T:?~;4 4&ξʄdX rfiF`f"x~04*NR8Ogu, A)$ &\f{ScP,U:kp.Fʯ]]XƗ8;]}9I5f Jlsz}`E}nngرɔ{|>mAZN:O #8*p{{5ҽ҈9>mv3QŽO&Dax*֬\iVab*0i\ >x)\)iw/y1u\C5sۢi: x[]qP}cgs#~O4CUGJqBay&؝eB @Fd= -fG3~A-3gHRu#mmӱb*ZLޥJ}uEmRۃ_k85C0m.3Qw# ^~XyqaV/|8\uir|WByr,Ի!q 9{7-Z_2a[j t!԰= ?ls k Sy~=Y `vendstream endobj 101 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3604 >> stream xWytSי3;)BdJB6 ,lZmɋZ+kH^e[0Y4f $NhMͤ)3k;i!?h9}[~},""`XQ{7[v͚? - =853"ιvEg3c/ g 3%{yIX_U*K-L+:u4;3b'M%/x"XO$$b#xDl&[Xb;G >1 y,6?LR#bvDJXd$PR15joGb M4c猶3?߳S J e\DU>9m܆A&H2V a! iJ*TJIW߂6wPU ѕ`Ǽ^#ݞ&ۇxq4t McnE>OO- ">|e\mA hL2~&2%|`wbW_|_]OrrҰoʰ-=\$0`˽$6}`]"Ծ/Muw{C8 ZdQ?cf_(vͧchj,zIf9=! )x0JK2zpQy䄵Nj*nfmD&ow\ `2͖xN#&b+MЕu"'t: q2`Mgr8u<ءS\kZ.]?}L:5Y߳ZG] z_6-uZ2V'rJ.n4Gw$%U^#\hVu%kSCߟ[ '6V-ձ^.p_=NkZVZ͕ *UeR1T夶BkY"RW/[+4gD$jNZMX|X*5jʸ @\E*GNJV{m%jqɢ"GhUh>]za[깒Ӑ"?v:ow^ Djܵ_u,ZѲ Ł)pz2)M xP K)&<3NK|<w6 _A+j" /2,d\ ar:8im]@)?Bx݋/ŭzi1׷V蕺mbvm5d-P_!WTPD4EX`,rE=)1VC u,WR_C0wp|̦uІОqԛe94 ʥ#BSY2s՟|r cpdCuBP*ŲB*'%;e=ZʅK[֎2֣Qv7<0:|7 Uۓx\>xf>zR 1s T~0HA< <=B85z|ss~KyV˝s#FsC`4j/lT 1-m-X4RRS1){ˋ`MQfn})OȒ$Z|3@-ya&B *xhOϳe:{]x=1C4@SBJPow7F_$PL ̳̬|ywǭ۪0|Z$RhhI!> +{?J=tѮ45A8 ϲ`I Ns-;x@Aw\ {^mU\snUIe^d_f 0dMi6@ An7Q]bo1Ĥr)R;L)sozav@NKa,֢)Ͱ4^F(CĠ~wM>)&U۾2Kh碛swPEhrXDM@]g0-T'2^w}5h%'hX?"LV 0Ǖ/:X"Esh3u9̶`V#]Fg fc3>{^򄠕k^14xW'AgRѠcCjaANQ(H}y̆@>4s*a`u VCzb+Vi|ԀΑ7&Wf衞hR2l,vO`pjZRô|R@sS9 JN%^)7 MHgbDFˢ?n#SãO2pDhJ-,BvBsA*\x>a 1F@dx5hҹ ZugdƤ"Z<- f{aڤri!S&-ڛ]+ I?y~UR ~kIiPAʈYAiZEf\D`"a7G4-B]EPWx s)5)"d{G #6J 5 :<ԛc,&#=KZ߈fg;MkS͈"Qendstream endobj 102 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8580 >> stream xztWm AUAO`6m UySo EīgH̔Z;{ݜs78S k#ŗ xyWV:l8fcs\6{=b` X&6[M`l6 ¦``o`bS4l6a3,'VJNۀQuƺ`#X7l48$V-X uz`tGL*hпÙ:r:~ש GM';Acky݈nLJ='>Oy Xݞ=}_Y\]{U\ɩgf?H}JNz-ɳKJ6HM^Вf:*GMj&Q~v3&`H3)#f?,uph{ Z7n#߼ M& 2f_ʶ7ԭye(3Cf<|@~ vJUk~53w@%`USe6Ydqɺu9t ÞRl! mh*(U4o/`^v61UޢCkwc(/ŷm+H cXKȶ!`f &+n[G&\9 hf ˅ڡZցw\AG°2A8(,gpHÞ$f8ͬE-_Tiց YgzgrW [$^g v]lcl|>GqN]Sx!Q R7 N,im7:Y_oJpcģ":DEp8tߍvQ VRHCbچ |B, ʓ4^x 0"I։_`WRH 0uAŲeCsawې/Op ^c aߍGttbn9 48v)LV>v? o`wfmz8WAy{ώr7wE-%MDL5RWUStȼտGU_!YcdQ _ D^i2 3 b |XUGqn"fy 0Cx⪾pg~=c^YviF%)uHSEzjSJ@qy f<`1oMo);"Shپ-T |BT_@XCpR)UIs'}|ׅUlRp[Kf+!N&6O]J1}7J"L[hY7seuшшPRPA\n,mv$#>} GUB,g8/ +SiVK\䣃Q4 طP.TDfT&S܏eؖ):" v`3zlnʏjwGEP##TBo.K5X\ z.^D=q2m(pl`-qkJ 1O]66 [MHpʶ V<>@mU,7@'T&ŵM `&f!.-=~^h5ObJ!m&T*\7X)qqu/>ǣTBrHdNr\DV#$0.7j ᬕ.]|%W_d7^&[Bn_6V ŋ.f<%bf pÀD SV)*O8N<ES*E[k1 z.-(ߣr+}Fr",̗!^VbSXL)Ik!4-@rv0.5)U@B$%ovDv|YĢ Meã:\D:u:A'R`,!hf2.Tɥx {UCJUN]}FO.i]jjsXL8n& u{൒8'V.m]wv $g'l-tMRꐠ4-4ymjħ18:pשEq. }.|EaCD/T^,#64|˃Ӎ!þw7Pv ʓKH \%worLLB,- ʮzތOV5w=?=:Zl,w+8:Hϔ L|1T ONM1k@U4 //ɬ[9HES],8#ej2T(3nԄQ*%B4+dKKD) j)!vnkniޑ9ws@d!q XA%xniC*neAE| ZY7f< A+}?ƒ;vjPRZRы^QLBA.! e8r'GR6kE4%cƍ\BY=ۃ!>E=J"@fsBg0qۓQ:k'r(rpV$ʜqOܭ MW9WNaGUSY"L%DP&~vd+<6<7yy> ɔ.C`s癩7.+0nbLTVcNIv0spXeE>j^Ͼ .w #S*fdr ,''`uvnd@6[{m_MqhJd{،Rng6r2.gii[9H627VsZ;6٪zc0 x 94ӭ kw7iZ>|tߪkbB^d~.4샧ZNgLdLxWAelhN_Ҵ,S,iiFt8wm8IWb_?cl } >FՠZ H=YS%,R)@fPa~'95T51GNʅ Ʉn db.ȏ] 1BR "VkOJ|`nN^ܜKŔ=ҧ F _GJ*n?q''"Fw 9-/FZ`7-%X{s܄ޅJAy*r=ϋkG p ?ol+ bئ oڷqF{83jI҅>2gjE.1 yrPD2݂3/2}:p3F^]PDR1Mչ=]mrWs+IYp|Ǯo]`c`HEEx*Sg9!ۗȳuX# iGۙ629(I_10 &Njo,Ğ'BXB($Ϊn(_.S֪F6‡b"_):pmv; F NSVdɰTzگ5֔iw޺ |̫6Rd:39⬄f4Z\:vlL2}5% vMOf*)1}̦6~ƒX7_|>rplI,3 DжT+v`8hk'R"IQv8{o?\X#F DMR;È L0vk{f}`my[C2boCXl`),ʪʗn.'N-HlA#֏W[d.Sb!2Y2bfҪ@x1amC5圙:Nڻn\`"Xp̙layAqV:]-5&\ Y w$~(2^gh}8O9=Ga UL0o}CK,Tj: %Mu1aҪwW^Zuj}ͤ‘%3vk2'b}Et+>E'ЇoJM|RGoXX5U X=Z=]1q1ncZ?(;8sޜl$bFs'@f?.J4)jES:LV3/JQlGq#.>Q_j4=vŽrF@xay}+4r=R" }A v4)o?0-Kֻ&# $ٝe4-;%pA@-Xy4D`ppֆ&t6Bڐv?F? ai0I3lf.5+@$!eL¡" ȣ AK}K0>R CqsDDRnxM&+*jH1I|[/Xݲq}CNbq+taIȌ+ ܄.@]4iNYMR  { ?p FjX-lQ' 2?(T ljf|8kNkѕrR|ajˎEX bl2+\_!ZjH^) c'SW.}Rޔ? " LyHT+]D}1k~,*:[rcrhSf2㛓^}҅qs 8ɺqbZkvk% $$|K,oB\/[Q+BIO@GViʎ8Hn9F&TߦNlD}.v;GΝ<~9 2 u5uA޺19UUV˕b [ {3LtJD). e]_֞W xk7 \-]t^KQ,,6Ͱ޽s"&\kBFj)B MJ2ްh-@;^;̎Ө~/Sg J(^}?Y|5Nf٫,IZEP h m/`fFg’5T R/_s|^ k,5O3(W«p4Rpb< Gx2M30Q rVM-^0lô*q5 n_ub"jE8(GC Ɇ3`?D"Wm+|R)vdZ'2l4!rպ0P{' ;8uбec֯ 4I^&ϵ#mybe ^kX^-]s-C.߰]9ƭ ZhHnBX[5~<:52L7b-5kցu$S݆5NiMao\{ 1fܪ)(BN켟l0{<Fzu x檭 #Ǘ0>GVy ya獁*7q=a@ q2 ($V!J6$<5x@=eǻ8]> stream x]}HSaw׺W[uAT@>dؔHqFnt *\53, EANCIBFn*ܹQL%b"sA@SǴڂ₂d!BvjܸI)$M鏫dXm@bas&EYf[LJmRhR[ohGIPhP u0%26%wU0 gJbLT^J[\#aFwdf]Nq0Q ~m38tdxTv%-DH*C?I kvNdetNSAXbՉ:|dbaiiP+_ F\^gap!`%J+1ҥT.zˣm,0+y0#Uy.rS<=4,> stream xY TS־^jIZ[[ڪupB: @H@9Ƅ) X؊Ӟ\оaֿ.+Y$޽Ϸ}a݈,}#::gpp@xd0 j64{NC_[HȟLKMe% Ŧ%$ nˇA̞.63}Vl̘y[&,JۺxI˒SW0qC 17*7nj[o b0GL,&^!Cī25b91XA #VjbI$%flb41&b7zHCbc KBy|)`K~(:⦢&W J܀N 0-J¨D 23 N4J%ruj8l5|}%s,dqKm%XR4IQgenWK<opt;|(ova5Z$acp&_J651ܒfHn{Z _p6g $O+ K3Hdhkڷd=XnQ܇kFb %݌"{[: %j3Odb:@fr\+:}¯OdL$wLJ/o' !x^wDbc8>1C jx* HzMru)^{U,o39- rP=?8 FrCYp9EL| BP7{Asa7 ZA;Aż}$R0΁N$b UlY`X}Z~[n _812r'+dEWVZDZ@Sp .h`uve5Aů1Y`^ӺK/>?K; f:(V@;i78uT: .hEC]{,Z*ɆzެLr[V@gXfZZ#^d/F=BݗܲHrif$ VRj&l),V7'eX_9r(r >. І ?6n~$!NWKw}FP7Ϙ peP< BɩJj'+$pj+9`/q#N/yB2֧wj KQlV~%ݙ"2-S,)OڱCMtGa_0yAJዼb|\uEK(4q %:A6^וG}C:&k~Mr,rѲPH_b^HD}EmJ~ؘ+8M P1zh؋sm++mk)?' 0 h]rr4 dJIbGޫ-f`R.{UW++\B~eXaWqf\@I`. v+n1 d*ņE98@ͅE*;8ߙ3޲Ⱦ op#h: s|#7,[ *0SRwN-Z|i (ԪU z4j:2q@Yk2rVQ+}_]Z{b^9[>wK>Ni,,.8 /l:GaB6p AKfm9snZcPx}~4|eB_^ไ.@6f;ƴ6e2@jm`2H *ޚ0iؓw6 q~seoSvFעDb_'UPx׽Cqf8FJB:Wb]2<(c%&HpX o=?܏bH6% sꓲ3BHRGmTLYTyu? }p0=@a,7W`\ $M{X 2>INf?/`1omcr@1N!~3VA t *]͖)b/b[Wnlnm8 Z$Me~j/KcLN5gp?ùx P4Oz|M#6[0[( QU˫(CBZ*|{S+˂@n _϶l;[ ̓dl"7nnɤ30s}nȪI EIcﯽuމܴ [T]z+"ܱ"袵c$ϩvEnw%ˮǫ[6ٍٜo˷OXUbEn_/i9 DUw߹`1@Zn2#obH#Y Dtvzu3@ =0 R"yW @bTA g`3 7votTO:OX.ALƜԘNQv1G͸ȿkC<吐+p{MptUy! F2`"imz!u3úދAuA'fr~zgAܰviĢ1Vq!w @ oUT p%,֒0C:\l/w[+O>OQt_ 2[CO"A,ޕ)?*Gnoyj(g$za4Q1Q _qa*F3a"K/Y˹?~~!PN9 ͅTWBi6+eZƀfZYuua/ރ>A=`Ա]M|/䧚2{b䕒WTT۫uZ 4JI;WnPwS~ N{Z*.)P2OjK4f"evoLPID. b`/4}+yN&>yu/s{V3y2D dN _ʃ9V{_0g A=Gym|s<EI(W?GpO}P+D݂3n5~/=yrr _Ii _.WNo+jLT4',l45EsK5*]%V9W1U5-BfRV*ϕ_V^.yӶ'2hN0ACc3R&<:ٕZXv ZHTB - RX8 os2c?v+  "O>ɂ-'gKJߍX.k֔5ڵ TFftm5YM҆}k\1'd/3U$h'x8A 9%p6>Oԏ tVҡ+RW hY^, /dײNhWY*'.R~Fsy{ASGpukeYKt20RyZA!ƔUlٽ~. Aܗo|/f<*GsToUjRjoqHJhPdpRvHc-T U4J'U&mN{)woOǝx+͍:$1jzt4\,%Ee-\v.`s\ǩ6+F';&7H1@ssj@ p+V`$$ZvvܺsӺ$, ;w;as%cA8BN! <|\8ϗK&۔s$Z)F+|A#˕w[2vl;X(c/zk>,uSץ AF fϠ1\M`3qA 2-mL ?ͮ?m|}YHInK'n@1 ZMM`>X$,R:Ga;N CAqy,Zr-曅hM 8Gfarbaf,βeG>@3=ʬzܽ+gua9[۳|r8\`NjJV rf/^bi´fNxk+fp a}Qί%z X(ɣ#SG( I%bm "(nn+Qf^s?UʸE:##"OOȘ5 Lm~o!2ўxr>F; lX yvG7GvrH}`1X,#'O+( Ym?yg˱OZBQ`Ɇ:V"Ó\մ^vopHMĞ _``k<#3 [6;-9)k#H:nVe2uzN hnEsL?Q@s)3tiq j;SJ  a˖;v2rK9˞rW2Ǒܫ028 & )a P[ I] ͽaAai>[@˫_ q\Z 'ךH191?8 T/`L\t|ܖ8Cˣb JH9txp<4P˾[lvrP,w(Jyܔ9`XY_XU ֡0JAqqiU&mmt-[*K}.|V HߧNC/M:Pof/yn%D, t>A,gj]JӼXqi:-R*Nw؋MNq(-*{Y_8:zeD+yEƢp(,*_CZA1\!P{EK$ay0p7E{Z,v+N:iƶl-[ ւEUrvz"/?'_/c?O?ӻf([<?awCHd2Y4yg a4 8#w1>)p'??I,co\yG9e)ꪆ,?aO|ܟDlW|p FG)8_\N!ի*D/Be92)jT]}uA2 @/4k0^O7\]l6L&`Z4ehJ6`nƦVc4b vWCHYUTRZ麦gz SA)y9\,=ljK}(+(J oڤժ4@wTcיKkZ*KhC6v׎i:Q$I{E ] 7 S!{/Ba?xg/\oh͠*]g$lIFFfL 3-Aыh\r`أ;d)~cD=xEC":F=16ۏ4rņ{=ax([$񽀪@ɇa!ge36Ozwv^R+ESc,zuF8#W]5rXNoX`,ٖ1"w{W؊ 5aDPoHp s d@BFEvM[}deՖԂ ZӨ!I~&*δe)9ge`^N @A%="wrN]uOUy2 rr*os&Ʊ2\5km߃`Z ޜ&`sV/umfÒE;ocY*(|y̧rf mJ QE='gT@UA+c#={u'endstream endobj 105 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4218 >> stream xW tSպ>a1xAA)3 C,(CK$iiv9mSR&AeL *^w9뽷S@w[뽵ՓvѷьƍHF'PgRA} C~J=1R*)[X\>@QỶ7' A#+*Mo̒ʸ"ŚoXqQMoƌ $ GLjEDME,!Sitb1EN`~Nĵ3-fat`,ӕ\A\Ϋ.Mu)W,-5 nhJу~': S9.kbo$tg:nhwL AޞxCnZBJ 2pa-c2 Gazl衮I]@[*v 0<mFhʢh4\'۞ h d7B3硕>AYkd&R j5گ3Ӌӂvh:>d>X\eɂz >=k%ZBP%X嶻Dh~ ¢Q]@$"PtC77 %TP% ;f艹(Σ#_'/Bl u^`;|YCU[uɸ.(W Z S ~'Kp8k%猬b.nޔPF`'dlU4&w<;CghA=*6kr. )NJ>ū?cd©jDS(]۶AM:Zbڀ_ VjS=XE{Ҩ0m{ܐ{SNuK˸]#Cc^̍a#JxU>)Sdh/+뀌:(MFYlDRFM2|!fI' Pӝ#kՊl@S,f"Kg%a])oY}ذR-&s|"oO.E@*a( &²ֺ j$h9<i6Ll1#2%W xp){pJ˱YcAZ&T_哴A 0buO;74@"^<*x$w),>S*o_i[t.wa{Njn$fn]}m|Wg DțL*9`]=aPw,k`cHS_?ϒT2$YIAwRF+¨#^ԉsY~??ixیxG:avqxa=U{ȠEfTQm33.]?$wWKT [;?1ϐűXՇ<;\z1>ŷoT$].k/@,ZL|,1o,Pd&+B9䕫vzVPKIk9rtDt:'\D!:QW{c%8߈5.~c H@w݉ĶVI*(lrnU~R;gܻ9~!A:=KX) '|yPVp-?kM$F  FSg5; I Z7SCJGf*MX3vKxI4hI⤛D}yRhMOUxBٺnlR(~y[Õ,Yﺺ}Lgk=DHfk;eJ|۬I[kq]=]ԬmdkANOU #I]׭8 VTAsdLEt%5:ν!TS8awxoRFrɪo1Vܱv"-3hM1cڪB%jcRz7JY՝39pfu(tׁ׬QYJ([Jͻ+ =czwҭP(-eM]++6f=a7p~4Pj*!cGaeMJ)i=ٔb-e S֦t:|GpTwR7 ?n_7GOZ?'x*U;!Ֆ5nh8&8+Cp]@4G6̻ nY=waUZ)~8)TGo3,;/-H5/wS=r.Ţyx½Q7}r>ݱ 1[( a/ ~˳@O v@E}h|8$a#lХGjLx&<A+qF0҇KҚ[;͕ל)خ}~M> stream x]}HSQ];3! -,$H-g%nejc 5hZKR¯H V~̍m%QI;6}x?!)(I c RA. DcJ#  OPYS]L*)5R]^S @ t *8u(?"B393 Nؙ@-uq]TǚUcf2phdkl%/ +Qa\?%^u>rSY}y?By&YðsałƢd,&Ě[Q+0K}){hwlۗ?gXOᢓ 8~J 8i-ܛo؛yMgꪓ!J`k,VEjwۆ-9WoG$/oƕ8C-}&QLkȈ-uz7$6ʬS}5=sjmkE_&v=tasKX9lQټsb\_K@gH2L^39O&]}.W2MqHx,6p ZnWk> stream xzTUn5m:"#BQRM;08 A" HR2C7 *wc'M03q:ްOu޹޽オ:^@9g?|]ƺ.8uD(Вe7Tp]N>DMX$/l7 _&k3taM?|b~aFcӬssEyM d[kXxےKXsEOɑOM{z֌n'*~;I&X#X/dZZ k)kkk4k9k kk%k,kk%Lxj,s٬9߲&g=šz5kqBd"V9kUzXd,u7?S_fȺ55ag am`qYx98,6b~5cw=V*B pALwՀ'~1uA 8*'5dwwy*UUqL|˵i\uqtl~-tG ő~>s &@ÁgX)G*$V} 8R#s(M&;40kسՕ3HpI h\-\rzBZ9T$yުt"(B" =҃Ghc*>Wѷ:UH]ʹTa\ HkBq kE¤>MSKP#V = P6Jw&\<ˁ{[!klV)%7p`H*0eHQho?rmm܄UMkn*UyWerIT4+5bqLP\Oo07Dp: Ƽ­rH2W¤!C~qQS8ٴb%@loB@ e>ջKʾWѕ|.=D=i.1n2 Аȏ@t?q}u?=k|jĴ箽{KYVh{*\X/F t-&?B"Ӛ0vjjʦ^Kw xv!jl ll cVǛ#l28{ꨄ\ )JCeho+k)D'; ^Yo֕qFqOkT]Z -v3u+SEQj9z/E @( D)=N* C|[E_R4nL8)E͏ z,z= #t8׸ְΰָg;Ը^GY؂VkDKf}-9RR ;׊fÃ[M|gLސ؀륒OKxCΣ^kf5^y $v_^~~Vh[ot{ؼC2Ӂd+&D4-T_Siqs\A r"(H!UJNVXvUDsr" 7dV* fp dlKG.XikB SC@PaB!lfAѿs!W2("{s%* 0Bkgc,nWy;Ҋd b$zxWt7$1ȴm$Κ9^߁V|/}]$*DcTrp&`@j!Mސyv]DЭqlZI~b]}8 1$s-f$)P4f7wOQ|{+~Q,9q r[NYH2:< >z+ʨTPk0iE}5B0r'Z괾޵r /ΪI=FsjI,RT`3@!jh*[_Dza^o/Wץ3Qo QX^O8EzQ@.66˃,u,4  =oWа?IB@K3&h"Ҧd-&7 =SRnH|ݎpM>m8|}GoGQ@%ehk&0 B!]ųjвg!^'t[5lD]st_ܝpE,;;)yړ*\Du\Ym{M&Uԯ)*% 2%QMHpa0N̾:D2n&r$C H4V/*qog78jFur)S̘U$3֌",2;A.ĠߑRE^aԈEImg`^UAo nXL LfE$9<  =;0<<]& q:xdR|^Sq |A0 *Mh%V.kmv666jBN+;0Kb %]i#W)OV @&Ob*sʈ˱j M8',qXFħxw8'Eh"y|D&y)0?`ӊDqݭ*a(x8ѳ8 WUcS4K?tWN|"C -_w}Bθܛ&q ă:+sU)G?pzoNvN6TCC3Ԑ4O[pk-PkBnt<ۣ<< ۾h." b@%ef#ޥ_kXBJVj2]^嗀HIpZb9+%;PEJ$ҦZ\ilFŀ[jqتH: )z.JcP d7\eKBHJ6u6ܞ(uսoAwEgkyqF>$NvRvw'$ zb}LK;H%0BFv6H`%)빊DhbyтfXIcC&?GCbH|$H0)/K01a@ ^/iUHI, DI Ov:Տm;lz³ėWqTrmw "P( {,T-gwP;c;۱C[8{LN %FC1CXq .mqsвʮWXǹ%XF_6h G٨70h0{jHDĨen/?.k`&iSҤ>3#;o)b@t._"8?}2AhرO$~崸J e6Y-*QpG@\<\A\$̹=' qʺʔ:-]&|`W[^n1R(BٵQA&kҀ%SL(mCB:QLȡ&Y,|Һur\3Yɱ=!U8$| ,$^5gYc%*6x.E~!!$q- >B&98no74K^wv\[j 0^Ƌ* \ X8neb/WEg?9|GwX`MmIvrD4e  P>6<;ܡY) q.0<eJ_OngC ZIcy^R®vޒvAw*SʨLK UEWMfTfřΧ `g2B.I":꽵|o]DE }[(\^t 3/Xmq a~iA#NHF, P3ߪC4/h}O\Zn"J>` p'yOJ=nXELÅP4F_lj4pL^M펬C N#߹ ɏ|IXP"2:4!Km,ˣy2JE*L}U 8&!=gUT8-#(x(v$afeT 3ziE5ޟYRzXϝ؊1I*ʤBa\>SSL/;I\..wt؞D |fK:w:Bfg:o ybapq塜nb7N==yuÇCf qH[(53mb:Ow".fě͢ ||jeVU:L4f-I 8sBǛVWFma~7VnVkl-I( -"s`SΛnY4 4R~w!WD dڜEٝ:9D1gUQGpOߕ"Q(%&H/8hxpC&1=g)b;[oIR%.>e3ŧw}^ń3|8EU?f9-˷3y }6?O18pFfْ+UZ \EH#,|qͭ ɆEf~' F0IJZcЀqqzɕYT{ Ag]o~f ETl˿9 '9U^?;՝PƥՖ͔4! pA%}qBkUP:Y Xk=< d0deA)HPEPR*qӁgwgnj "1fuz*5FҦ4.> stream xMU TSW~!$煮B|#X}ɌN] RX";})DB!"a5O"D@!TvZQںPsϹsQnH$zCďvLjX{T=yуzc& 5)H%Y*<SLRhT)1IhZ D-1*^1aZM5uNI%%~ ]VXJUlS*oI*>֨MChӴEؖXUJEQt̠ #RL |{;ZOͧ|QvQٔEyS2jRFR#)wBVtmOb]==viOFOqmM){sy& Dzia\vWMG`!H-!8k:fngӻ] (e!:JXI%t3gq5HfĨl-l;kK=|]6?~RV8&y+h`e{=ʶC&dn/* ~W(ݶEL-/bs[SL&C"/3mD,zh5 f- o} {3L"P|P8an0m2C-j8} :SjGxt]l N[{X9 /LGn2{y ->) T:Pj0Z 8xzf=AFisB<-Zi#RYwj>3-REV05f0Z] ^z~:vZڏN2쑽"(͝S(*PoIdWo n [zPANEHSHl7$%( R N&q.AyǸ(ZBs|e0t UjqCm?|Û {i76&"UzCn_hu] =xk68JƺYlB /lL81v1v5E[y*+B}h ]‘/JezN~oQ-p5Zh(pf348(f6Y\al ͵J "nvAvTK8@Uɘ n Sќ_?843Oj֫1\o7J{1&yR_tmbCj^#yjSXږ#m+rW Vq3PHyI.Vn1wǵ";w *{ J4[Z(_})MrE! LYIbwtWyn8 kT-1:L(ʵ*uT 5bI44zA] fcMׯb:JNvt&8]Q8:sPjX|:Fo~ʮp}@񴸱Enoz:n1o'-wIp;<:w~ܸ{i ^a3޴' U01zɣL)?{ n3)7uM.6A@lf+Q˝Xv ҡ:D`xpUBrW\ ҉+~hrt=R }"Q׼+ic9,ܿ(.rEt{}d:qYIC\wdn 1t{i758Ճ6䄷\Q{ze`p5|i DhP-f̟+ؚmDഅI4٘ A0>hX^g->}mk qVX--V8c5  'CW1)40%`hʄeɭ^Np-E+ҋ#+ݧG8޺3n9i)f6IsPz:;{x&Q.O>sH(:mtlS%'HN$?N#]>'䯐;h H-Y@Ƅ$0o=k) :))> stream xE[HSqsܴuAh0#Tc͈ܚ'.:ι\sf3"&E`zBCS/!npD& ޽1G`VG'xG}kݼ0a^CEa"e%v^\pZ 0R:S5RS茊55&Z3V g+0RaWq#n۱F=Aa:+ت ). 7`2};n tA "$-l3<Ō$#Lw8MXV,H#xEr;VL ֖y6[gqw>Vڍ6( ̠uE2f+N//E[Ar4;\HF,c jkiRsp>>xBOzhj!n$4$4ψ hZۋo8|`ՓD'au'Rde܂aH*+Gu} E=sП$q_,}2.je?2+9HjA&3ZV8 "60Ѭ!Iz/ؕL)Uo\ 6a1Fb=!Y&]7+0-&#e]Ep k/73k죖1o0?HV߳Tq/ZGxK<p=jendstream endobj 111 0 obj << /Filter /FlateDecode /Length 205 >> stream x]M h-.6q1( )2`qH>M;/9\a1?:Zou$pOͅ|Zdf $& cjpN/$k~2QBp;5 b!nU(]UcrDTFQ殲veu&.V7> stream xuoLeZF OiW2"cL'ƨNǦBavm)k eWzEbhna Of6} MDWwݕŃWO~/ϓyPL(jli=~cw)JVX+xA*>UF?"<,{P=`(jFKF{l4QC @-d}}~F:o!(i#(v=ϙ(}\.Waskjq2oN1H> ?JHXW*v[E:{3:HBJ:ѳa4V±U6O^~M5|DB(A4 9 AcgjEPlU)GeZp' 8_k &U_ gދ9 J!'!7#0`@' ,wL}@`Y1[쒑 0FLT%rv^/G"g&`f l ɔUPk-Yb4\)d<73ap+G/tf 5'̀yh77eK#)Hq%;83vi&G) uYq.S igub$ˍĊKBJnEQC}O/NKPH A,@ጾ+?3x : h< I$F}vJi7MW& AX;,!M䊮3^[nkU!Q?hmԜ[? Ǹtq<N4ysyEM, aI&9x}#njWtWY~WO' '5ycˀI뒢ޒ ΄A_MX:|Zo/FV?CiozXJ~p / `5O%_rWdnAx6`wcMC#ĵSXxa\ɫ;t_/-rendstream endobj 113 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 679 >> stream xoHqmis,y'_4Eyk[:o.#,W{aQ5gI14)荙ԛwry|<R! FSEA$91LO[Wjl+&W(\H_i bt& .d9V:+Z9;++iƉ LQ(:nw&kKIz㶉Vz̔99&d #owDN`L!UNn^>B((w *GAb71 AiOய$M?ueAk` kДj,uwFǶu%JϓBxcsM">lb)wMjx[KJۏa]#,)JtA^WFz[㕆OS3]\g*$)J|5q$W혯}!!|R@4z ʷjOR6h.28-`|0 BNzƒVb9SzL3q/+4v-x76331x;_uph^W eӔd%U٣*=X_ð~s 'ดJ gλ 8QYρy~F9ף  \ P\gVZ/aG}FaPR.C -?E"Rw(endstream endobj 114 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1491 >> stream xTkPW@l5XV;jwө(8ւ::`*!M !  6A(#XDP_,QCuDk}TMDNܽ3g;&D"$:.11lw2?AOJ0/Hō.dјHFfZ˦MED Ϙ!*t4y,NW)rg%ij,@gϟ>=777T euʅӂejJJl^/*dC ZYmAt. 0i7 ->VbIjlbr,ᑊ1 vR!M=&IxH 9nG^C*Ex4`g )L~tϤXw4B&‚⩹Ѓ1ݬKb 6يw2C1F9HD$ܭt&Ӻ( D_B4q:GU,D&"Ĵ(nzNs6Vư&H<Q3'.Bl >uDZp+/#BC%;{O=ÑuW|ʄ@2py!8kݕ]= $gL~z=!1sR2*M )`"v<:dXbxj> iD,^ J`b/2YfYNYjJd~ D~0;%2uʋm#A4bSOy1?zaNJO }C(6O`sYAum7WB; > stream xYIsS&HLL&NՌ=5Nت d*W~{ Y[Ƨ%ْw,k]^.>-qO0˥˝zyYl\wKeN]7̊RIq<v4#o]M;+Hw1x`j4)6Mb#.@njXg+`Kɋ+Ɋ{? %2mhf6QglEa 28J#.*$ef:4 0g,Ϟ,~>_˂Iir͜Y6 lY! yu,/C8as&`CӛG KmMn< VxU;F&u0|Su8abafaüa74vl7Q`uSTTrf+Fio痱!2<㽲(˃G=v^e+ )ǝOaGü`#g t ع!QT>H+gm tΚm)hS v:7zzɕa.Md[rC[a0շCMv}d_y$ 0sR(ԀA>8ΌDDIK /%51nl?![ n rp&&0ߡ|xQ>`9OJ LB/;9 -!A66̱A|#@cp ZD ]ӓ.ESAwC,#1q$U@[@CJ8ŦM+>He|xmF>"B:uc D`FKjw ֎Tʶy!Vb 5q86l ]UxKX?RY,IB0/_ 2}Slfk4:!v.6+oυ@yD>V{8^qYdl/r SȾwvvrX#F䯕t,|+_*r([*灞MD>o~[9QOJ,.RoNe E3Z$ 9 fߥCZ*H+kx1Ohax.sn" }h7B 4\y+D>ɜq"#8OR^]7NQ;OL%CQ|1[DJr |A85^"08#Oª·(C_Ykc@RaR/Z& u.x~lBX% 5y]K\M}6|S;`ɘ7!6 |դwy؟.źJK)%g2n|ff}shxβ'A#{ۮ"ٳ_@6c#cY# zMNQl|R@HB*cm!%0^`|וsّ%1@'vDjmMuЀ N@;a|qw۰!i^(}з imr՟lW;uiF-?I=kQ_Pț\h3U}+dm|bx_ ]sHjxJ}d (J$8&`lCtԶ\mնj,%"+hd3!1$Ӹ+yS|wu"GᐖxHRȅ;+FݫC wrV).]vB!<< e9Uq2p!\WFO%5 U=ܖF8\ )]__>P`Mf BXLkXćUP4iI6}z}0odo*cM=G`_eI W4L _.):ݒjZz5U_=C1-7iXȧǢL_J⦻JD4-\%#TNbbԬ?; jAIx;<$=BC3y= ։MSog^p7MMɕGY YbgYvw|׳tm`xMF7EڞB$bi2 2uEnendstream endobj 116 0 obj << /Filter /FlateDecode /Length 3462 >> stream xZݏO [VFiZS'H♢80wfv:)#;;Y\B,8Kg9]<Ǔs#uaϾf) .p/.rvnE!<s`<7sl߼`W%,q[d39Zxzva{ؔ]|XF.~NZGͺ WuϪAuwH>+@XEy P˪]Eg#/-C;bٽm-2Az'dbMăE0c#VV`f .٣̦DzSWmo@lr=jݺDF?Bo튭ˮvw@ZN- Mtvll '$},UWWժ#}hi!5p^طݺ`<իx>LV;i^w{y:Q2jeTW]ի˙aǓ}V:"cOڶi>zUn# ޾`6V9^- .y[J ໍk MiNfm9w3>M'%yT(Rzrr8I7 P-3:k3~rS :⦭7ͦ_qTi*mcLg碰R/Ge^U[@I3qea8|Hz PR&Ę灏-EJ##Sقcz%fzhwi7զi a$VR`>Qp^^n]r`v0˪\ .wǃȉW/@l+-%^ \`BۃOqҺJ1)=[Hq[-GK]dh.1n˶V UɜyDܛf pu\ɹJe@kaOMy/d؏<7* >'ky)@ƈ a`8OC fɱr:ƇVcQ%ժW.DvWĬF&!DD#3y ˄mƒ%1HJZ, s+(۔:g=Ɓ?Ca TGϕjcj rUY}K:D h(@# 厠@)D{5ya})+eu@Uca5/\MKzOLz V3$#'|3glo]uhپlaR[Va "0X@ڣ|@p-JH *#,5>”L˺}#DH2pMGb^fغ]#c^ЋerU!efb`OB.A2p!`% Q)]e &M N\\ ej ,/~~bY0kÕ'*,wn{:ebbaR9 x#Dwa1WE{AII 'AN!1kЁ j}fP ,  bviZ[ϸNAh)g,z %nR_M94=N B>sMఫdcV j |wf31]Xg9refe8ma3 eH Ywjrr]`ÀN;C/9zq\9eO`OrufNCzc#ߓEe4?猇D-X8IzRXJl U)p{R| \r$B񨘣v}|5%HJaka?b` tYիX!B_5mfŽ9PKcg(KS33ZŮf8葞8k )7r]q Ռ G-T4<;D+û@ƳaSZZphk2C@/i!5S $ Є=1EUT |5L -?|{8Q\qNQC>LQ4!$S)nXZS8s!d;fINvϧ4z4Q#} b!X=c;J@} ZcNq%lo s]2 hy'XO)56&&IΨ--"ŝD'!GQ6@yL\4oAOyሎߗ\fNh6G&ټi1TiVjݐ5T8mM?IMsˌO'FQ~טW*ۙ[H9ūBz|sL*DHAD-YGLD}'-X(-J ~l $%6S&Ũ~ 8(lM'hRm[UZ݄"7_v;w :C TG?XX7[]=fz P>cyݴu{>DDO=Z!g`bۘXd5y,hI"E;Gt[6L8E8=A7;f]~\M(lh(QDv"@nSfCP.&Ӥ]CqGƞ+gR Fm^%d'ew/Xaqr1vK\`ćdQ[b؀Ww;<\v 9,An~3S3*pGS~62 iQ8Bl붢-H%5x 7Y>2S}ny!07GPGAendstream endobj 117 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 497 >> stream xcd`ab`dd vs0400q~H3ank7s7)B93``bdu-(-I-ROI-SpIQp(IKIMAd```/a`g`P?c- =~0Aw ą/3]Eϳ ?q>a%l8wcǴ0 2n߻ٻunX+`RDGU|7 [YahCd"'aGhhSr{bgb}]w)]]w:V{֬^h-*ZˑQZ֝ҝ2te-83ah]߃˅>~g{?D*ʥ ߺ{*[wVoGkwus\<٬YݳcZC"o:]տ5N0{\l'-<͞4Eendstream endobj 118 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 830 >> stream xkHSaV;ΉȈ ZfJ\ ):;sshSI:myi,ҏaPF2n?'SYDA.>#+T~>4Fx<]cXS:=}y~`4;{X^|ۼO0x}ALs,6Bwv'ISAa3Y^U?UГC_Yq,E%RDL5>D-VBq(&;,)(BEVp庢kOKH(fI;$dںypl N{=/ =Z#dr nl󺺝QէztF{ 7b%}BQz y~őGv(߫9gk5Xd*hb;wYY{L5Dݼ7'̃YрNRpUYQMM.OKz*JU-Y#{3aTt Nמ%.8(TS{+A&FX,DxҳVtT#'e(UfZ C]N{6p|+J;[1t꒑ vE?s=_n~g/3W²$kJSf(-oqŋenAсeXu!|%"endstream endobj 119 0 obj << /Filter /FlateDecode /Length 2538 >> stream xYKo/devV)",5xr֎Zvf䷧H#d/ܔ(~UcOĊ_=\]w|uӅo{X:(g^]]b\"t+x9>\gޤ2M8Wmݭylf>f0͝%F\o/X_y{XJ\J(Xr0VkɌz] ꠦ$O ށP\[,v-aeL dvv΁Z@%xJ NqJ2B̠ #p [bD&vu h;y^\=@(h*n(cseM~Z=bgy< !!:R}'v,H87mֻpiw_\}~#SVAG}ݕtG:=}, fВO*N@~jv4,0m蜸{ ΰp" a0C+,tD6lMo+r2a$ oq"%{z#Rk 4ɑY$j7rb!0ρc-y%10OXk9-ȌW&j;x9E`OXxȍvq*A ky\z<]4)u鑎jQ2`)F:7o S1^0#d/')s4A&Z fIdpPCFhSfTpM[7zi8Dǽo*,#i#0\marIww(. x4 N8ٽVQa9!;/ހ<*ܙI2!J]_Fkܟ|2>J$Iހzˇ `05(j 83Dc^x%xA #I67fuz}^=LtqtMߙrp^^ 6l-ff[Rw>P QN[o# 9jT"^GUK.\ƒ]^K=EJtAg_B i n%*-ָ˿Rav-%)U0^$v΁'^#ZP`hWES |LQĀW_M'+.+\E L__/rԢ D?O?Μ 3Haε3Q+6>rTQweUPcoްD:4])>>-䀫N৻o`3]@Ndm41U5>NHghLqfa}4*5})1Ƨ +tB3Qijhj|}}nʚendstream endobj 120 0 obj << /Filter /FlateDecode /Length 10132 >> stream x|KFr'yth vj[]Z>b0ԡ,5)/Ȫjv!*#"lE/>ְ-EhoHBJ,9p ֦nmTu69QW7-n[# Ia^YHur G:[ U ]Hw$; c5zOۑͰ ZEfTྀ Y@n΄!ɩ2d㑶ͨ67!E2Ϩ[ДyibTCHycnQHUI|HW4vLdAm4\ǝ\sb dlՀR"RC޶!d @v HJ0muN1r i!6hʖLȘhB\!%Ҕ̯r{cPpq  :*sҶڌ56]9NѓT8 .fRDGQF`p-JiT#ҷT8یm'NpSH7;1Da UK-Ì.뎔m&Npq7QS&DNkШT8D.K\uqVm Y( Y R|doOҌ84Ty[jqFZt\"0]1V0@(I"Os$C$®gM .(|&Jb7t$Qi*4\l ~,G0S|3ԵFlĠ&O7x)fht+ ]29З@Ӈ z-k JEsځh1 *2)Ztig9R%l@4W@+lHPX @+hYc,ǚt2V",d4aƌmD}՘a\LO*H6P.#Ø Scaf|IoHw#h EB۠+@"NM%(AMfu UH%)3$qhj`HmD䐆4E7ElGE /] QY7x8UDk ǫG邚xݬ5GNdX[9U 'ʺ,A.TCkLj0 r6gH*-ΐF_m:}F_Ϸ;I}np p.X 5nfڐLӅwW " SRfo5!`\6ӘaGKWR)һ $3T5(2ҏ#J';2T܍1Ւ5R6m2Q#*^+,2{1r JDz3(\M!d&@{TIW ](GUM4p֨"a8k`P&"+ g*B g5o3xXJyj%4;8U`DՍ`\gJ!aabq3nf.r9RĉbrtėQmtfvPm8qtG?Fׇr,njf [Viih4PrHkXx."1 oǹ\ʒԸ0^1(MD3Dm͟ ii1H!zI ޵o1-zrg_gn2%K1Ϩ+4:&?O0xmˡjv L ]VoLn:FIL>0Ts?ܠ !Vh4#V$`1@ 1=go4k3R&T+è dɨrʨYti5r# 2UF]+jTc94cFއk]H%$}Zڡ 3.6/کZŇ f  2U;ݎc5Ef:. p.&ͪ9YUTcEateKzUYѻ/>ۭ>VE@ZjEdiȃUm.V2zʼn sZ(pCL(}I4#ۤ񠔵~GK%aX=dzƨ@) j%cѴUѸpLj@Ҵz(%:b^U㸬N+!hv#V9GC_iV>1Ҝsz( ~ꡬ\H535*OFZUv)35*mȬ0n$kQiE,z)y,n(R5dEuo2[sc%G/ť+iQԮ90'"J^Qs% MU[>Qal4%9. \x]T~^r^UUc?zeTHUML+ y.UpT;&ZcͶX]9`eQK\LgV˷hѺ 9UjeQ@026&PII!U.jXe; fAV3"ޱܲxI,8TqbPkTyn]3F^Uӣ1͌k2Z/_t5merN+C۾>=VhHp2*VڻCHp)>/|aE5j-LuR7=mzobQ}Jq/3iF*lMY{g>FUjӁx]z#SF"fX 둻lbLW;Ŋ< e cF^.T`TmM孞>8RČ8WSyG0o/K7Qn>a>p :sхY\| PZc+^立;#P=$U/o Xi=4v 5]b#+`hkD+U\dkΈ]$3"J"+=U1DɃ[CS ߷y Q|P;כ{=!ŅMjiRizIpLG4N0媛.C|xe>Bo*ůNOWZ7\D$?WGzO&m?zO0NR'ۡb;Q&]æ{[CdmO%3峙5y.~zv"{I,I~\80㓋#k}#ub xlД=d|1:UunkwqS9ӹQ<⃋o]fI7$ gunxͦ(F׍!.,Aە1DrɇRϚ. 9QU [' NM]΀) O݀!$;|nQH*H";tgĺzkD&VT|~1HuYkbn߈ڐo-L[PxWf[Q:o-SI֢8U~ W.|l*{?CW.|lJ"KTLDz|lJBKUL{]qݖ2$Kw-SIl{M΍!?[D R:K.l ) KTÒyU[rh[ܪ\5r.S hSs d3m@gdT~(z"{S|taa_";ƶN'w,fzӉ(&֕X]qbXyT>2twXl]B4^nEa70ց]25";lQ; ?|xJr idae8Use>H9,=5DGo/S(Zg{D5ꙏ/ SgHR2bN;+P[{t!zbȔ>!9*xu`T|s*x+ ?:P3d*z뮖0̔CҞ#ٹ]j*tCW-ΐqt3䏯?»΁W1XӿWZ iG8/[VT>Nm(6[tV|VXm.ۋ|6x~&^X~]}M*7g:O^?sXPczWKeQO:ޤd!&LBϚ8rjB#&%#&k&+u&dOlq3{ v'^>>o7 {_0Dz|Vi?>oRCmuZb,ovO/` in7{Xz>߿;-?~IR,2~l3Y75YHWf14iJeE%̣_>=/x<'N qni&{Z7*2yGoT2Fu c킱rMX' Z!b|WW0P%W,^Xϟޱ9L |f/-ޑlre7N&ٰkV50,3+TZֺ=8g-/́?<3<#ysi+qxJ;]XwiqswqP6vXuū >>޳<}줹nwwNoJ&ϟs#vp{_.f0w;-65wkm2kcs}wEv\|!|hG;C-]cO w<^BfCXqvryɛŴO~.G||o q}U7?s*!aJRاކD[cm,8Y68Ϯcn efxep1n_{UVg+{ mfp&ʗ&tr |>1|f7?;oSQM"쌥'I GtVͅ7;x%n>]v)-oo\fiSY<S %~lm5/bjR=mWiK_ x.Of S'Eͧww58o9oyU[$ l~Vwǀ?SIQs! ʳ!Y@A!vw_y"/cᅾF%F^8tO&^ *.2ƎYҟwlmWcqI== q"W`P%NWpu|c5ӃM=Ҥcon>=>KN)["5͔e$z~tvbl}A|&> Orx&7XsdȿzdW8yءx|z<+vhq,.`gs2RU/ \5'ssd ~?fv}8?wټM]Ҫ 3??ܾ0 WilߞZ<:qAUbn֐`ȳ1zx\]ïx6ɯyyw>|zT dEl6;MV/6rO0o_! 8H4!6kl<[wkW|6mvq,x~Ug8P3[- >x`v#Vò >!Xm^_ Cҙxܯ^nΐ\6LӎBNӋbaEigܑY8CԜ6 Rqwr#ڴ#?(mr')Iʐɧ{͵Ӽ($}G@Q-㯈MFYHzrDPcΕ~ E"  w[,s8Н7Wki ]'٧э%?(8i7qvr7_atZ|"L:nOǃǨ¥,7Yx`oA+(6s]Wo.{wE!uE!#Uq*m|ңI6c=;:{0ۿ X=5-:sWDBc˗Mi1u/D,BV/M켛?=oVw"^ *_Κ}z8D(Chn?&<2Rtl >w?w05`roz&`p bhON9om`aSY  3 _"n1Z/&bAD冿G#-:_NͻwAk? GF֗R9`1{/U4Z-}i2(_ p.5E?R )lx 3iv6\UÕ/ Nq^\YuoSy&??endstream endobj 121 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3027 >> stream xUViXWR#e%EPqa " B@MEA8&jTѠƠFQ(KT !@"3:-k|uwyhJo E8Hݯ91-#}(Iï7ˑ `'ADX%iO7gmHS o*g5&5>.I9,c7'$'ߚLJUz)b&DK>JrIvMIMۚ>3*:&7n_M vg[εomc`rEfRlʟ PTeNPP!+F-ܩ<MMS uHM )L ʈ)a#u4C;cLD3N/$7zfd[Ɔ90lgcǖ3v7 ZZ6 zPY*PQqyGRj9 'p}< ݰ۰Ss t3Me%T5.ꋪ[ғKWj[cD_ q'9|7( Ee%nHٖ'|䁟 ,*t}HbOk,5ԪepN* vn}3c0N sx `ü~]# ǢnA7ݯfhdS ^ d@:FX ̰v4óDxNۦReD37sl{Vv|nߴ֞}Yi([jB-<%!x`h QcVa:%<ętΓ{O~Y0xBjq0뎈3ωKOtNyIIRA\Jkg=28B-YvxZ%$љS)?ߕqq畊:[u>V#~>E*,_w]yBb#)xvTusѵ8B צ@T d+%{/m\Bǰ-<Ny,ZwsLe:``*FIVtNȤ彩/C_".鳓Ejܫ5:NA\/skk,ne2wܷiϖ0Y<^ǵu ֜OCбEG8<{{JZ|BsNI x~n N#@%ywʰjF|hi}b^b "0pX|7ZbAso {/낉0nW?~=9_;.|{3ߖCְJ&x-a*b":+]L`Rg+L1YE"n^@+|Fu5Wwj!Q%&̆Z"gXc/n1 &T_xT);R0pw6x6|b;ԵKk>&{α2쌝L{) 8Ӗy@~6"➋5FP^=kXaIzȑlkB'#Tv3WO*;reV4rw8E8n)T=r*(4?$# @L./Zq-Y \>QHz5k. \kc '5$ F ɔpdaE+1oe6++Ų:UU<6X45nBY0tJLv,Sjg< :.n&W={rftsI{Y6{H뒾*̼ˍ&~םi+CpՅ nh$=BF=Qш@^l8lVJUC&C# )Ho%qwaƹ:6"ڔVHpzg׎Kf%;mAÿN[dːpR+ҋldh AmڻRyg@܋_ ,Dٙ0x u.på7 /Vȕ N)@n*W`D@WH0,l0$ciRyNzODzq- ͒(_x|(2BQ 81v!Nk+4ޮh7ZB4z:A;}mB!smˁB&`|f[ޗ쉅>":0&?:{{1;Ah]YV#By6|l6 4< 9!Cš !ٰ "T4kvagҲaPSw2/ ,za?{|r`I 63~Es33wq+183131}H`IC! :Mu,nP)R|Tc@SFՕz 6x6w-J'K.r}/ ]Wo s_Q;׋E"1#kd/؄)db#WF,ϰEX@XS5 d+a| 6ň9\5znFΑ@xVkasbWhl#Vl.Ǧ8{AF?-siRI;Pӌ/=rȡC=Y9|XE%k=endstream endobj 122 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4426 >> stream x]XyxSuN)&?A䒊AwQ2P( Hi Yf9Η휓Y6FC-";"8(̨sy//3s<9}/9D999w_g}6f~f޴̃U¶Sew=pslѴ ^ikT)+ZPQ\ eYT$=rW_mnQe]oT-xf "|âGDD֋666^&-V E,T$#57IOsNù鱻 ".+9~ȟlwWfmq_rN9#YN"wڢƭ͜k>+DNnA1Tr0"6)>7/*r t Ny#݊V&C$wHz؊?9NpB=T.n&DyS޲dzAhB7%m K|o^c;`\xF{ɫ_2\?M EP&2'3O's2rqVf4M?vdev0GŁzڐUզjkMP ګ ۓds_\sK+&S34}8>#hUkߥwP;^ p3y\ ᢢ)RC"Yģ9_|>W:هdUϸ B>0PJlN m 2\<z2+7~sE#v;vS[,mř_'' In/^v^%>=PI!hSBN[" vlJ17{ހ;&e뱛=}HY},Y*Qm߿<2_ʄ7<"<d|_??X>-('sV;;5/?֜T$An7;v>u~Pwgl7 xWa!|S0+SϤsNOȔxŋ @ ?c9FTxc8@U'_xx KsE yV8{þOy%>gUmjn!e]eN\a-VWK~׋vViZ+mݕ2hT0NBgO^*8< S -焿9 N͐G.6m2($NBғ>>kPD ZK-0yx}1C؁;e]]궵uezmKkRջi; za97JDXo;h:ۣ\"4ΙZ~#O-ٜN-2arw/:,M!f* *n3<c5 x03n@&}qV|\ le&ar.=rMbmurX|ʞ>7ا:TU٘p_RgqYyBZAt!}rU M[+(e 4OXʈسù.&Qm3v]`CeNcSŚ1^o ?6K쒭N<-=dX;]ҞH(8;AȽAw8 Pc!gih*1???0$ws^8evjlTp6AlhPhv)gLr3&e;[F4tUov-;+wR9{Bi>8_z{'/24*M-z%Eysb@o.-oi 6ëye#BLᑾ36CTPiX o^gߝYrD7ծ (5j፡VVIVU6 am15|)}9Ordafd hKP걅Ku ;(Ƶj ަr荁_Q1ąx/KB ?=Esbۺ%H~tYVmp-.'i鮨" $>m;^ij*(pFP `uKMAg a81A'q\Ƙ&JxH4vZצoo3:]PЄ3 G8O}nRDrh ے?eϗ!&QkTUmu{n,8~[ (ˎ&Jm#oTk^qd{uU"FeU 7("v"4g(KE{%sB.J.zIefg5*5 11sjta'S\̎GhMmߨ[E\;$إ`JK. H&k _ƬV3?dv6@IU]5X}M 8|֊041@ۧdn;0jA9]uv ⳇ8N X\X,iSk˭AMP2tF&L w}2{kkwM$^b4s̕^&f}tq<.Q^^AfMhd|e?w+_ߜ.v1OxXX+~Qo멫}XxYba՚nM\JR~ +N_QUin(j]5T']N IHG_/УLLtN=+NJZtjHF ޞd/%hRқ.w;hyĩ7t?x)u} #k -n8M?ykXX)g`;9%nuqཡps[ <붨[)ͱX6Ξ/$]4)֘3fW+?K+jǏ_pK!p&,J9dgv_bTn~[n#gJ /˺0AXvH7'ب7~?I>+GFYd#[cZÙ5~s]QJ;= תWqcq/CSlr}fg.5DQk̐┷z!B"5,2^\<6U [g{G:ĉF575ύUP.v'>>v}a6q4IϼS=:+t@gf<,7>XI,Dr |h2Fzlx V6ζ! 5PT8H&!G *r(E$wGa=8_AXaޮ(6ܾ/ϭD߳9.oON4䫠%敭kԫmuPK6?_En .YGxתv+4^TX%6!&L8 :uE;{vtw;*< yktHjtn\[%+Ӵ:5. % Y{vdDR6o5=0 o!U&jr/ߣ&l[a;iPSck-G|D¿=/<@ղZ"ىElmi,dM>pg\ &u.d8?zp%fYP߄zFUS$Rx%Σ{I;* .^{2O!8-Y}0IpIp'zPu+;;.I}5CI< "R#~#CX~$µ<Ff u4VРhkxxu("OvC8A6 *.f+gx ܸNVjӎ-[nRVq2 n 'Q a;ϑEG[NҮQ3XW^s,Pg'^vg !5UlW/$+Y sF0!kxy`9|Kx)I<֔ 3K <})CiÎ*.KAiRV ԦmZ ռHe5ѣwXW/8~_;Q#,JfVybaH8cr&5#7Eiendstream endobj 123 0 obj << /Filter /FlateDecode /Length 3625 >> stream xZoϳЗ}){he^owF $@LC"Ef:2vovfvg@s}mw{BH/;䚋^+{aGP ޽o//BIѻpz'doM'A hb hm{F7:yOp[1h gsb/0r玮Tv0d=уZ~pe hi p 4Dޱ:彷FZӉE\f {Y'B8M 纬A TpCxzƠ uI8(˄n d }c}t`ڊ@+ع-k0;ב; OЈk:ˌazRft'9dpXTYq[t # :v4U0hB#Dd$ -$le,^tlT'%Yu* R5^4BA^Gt = H^B\ӁCV ew(ALAr@{J HiRjN~Uv*T eJᨁ*; Tr4PI@Ig|; Iv*&|}u[դnj)cŨD2AIqGNAT Hީ ҅݀ I;m&Nd $Tt4V=U r$8.l ygKѼsKhƲ p* E}? ըٯ\įQ_k\o'nmb[]W䛫9HRPMĭ [GīQsbjKVH1ӦJ֫Q(j/W[+>jmB%TU%P -{r >Lcxدxu`LMa}NLq Tqk϶ Ra[>$^4[ݸ;Ww?0jlbw㛌bZ&*B&42~woHBC@S ^a գ.zq+6!T}s{v IK؅fn6z:DԹ7QΑ"ZDb10+wդPh~u)'1>[@,gjR3gR3ϠvnG{nv1}/pAQd b]aM+mh6:oyn<·`e%X8;\>-X`Kw60:ոJ0x\]$=rO>Ý, t(x3G]"%|>Gp0u,B`]"Z0XdA7[_ _֡b:ۯ]{x8ynE&I"!iAy/saD|<AЇSnbyF#@q/ 99rC%gA)~LPt^?]%d/GfkRfXNEHVZ rS*`|3JB4AϐmMRD"),*z&j$o fUBtZCe!s[-r}2D!֬ӝ!ѻk75oР we݈Zg=r1"&Ycb *ECa 09T/ z^k.%ftFH$BTf ґȹA% /*Q]s7>RHe9!px+}& p#%gTF1^,tOY:9œ<"3+HF~,girY+qfJd"J+!oQOqKnsvIۃCZ7>aMԍTtŒs;hSw|i;慢TE[`o"㽴B>_^%rlGȧrc p%&3K69RC_9ELO!{K~aNbid웅Բ pDJ=@D(0)NR")jA .!'T&Rs~~n4(KsP(qL.np%EiŽvh$BSe)/l(0({lWC gsb?C((CZ$1DsɍEX8^hw01Phw~ 3 x~W64(Ji(`q;b*ө S4ݭ =f$m"zcs}7$JEP^PB_BA vUPWfygCe+8(B]S(k8jDԮsLD=5ye\26-BGz*__$B?U{Qf}qnSwbi51WQ~&հL/4E=-evϗr>)g%gY!C~NAr%M.+;ȉ>ڣLdwI<@4U_WlqG;SRTI!r/ML'&C ,P4t쎛h, Ci;wEO0Փ68W;RFRH; #C;C"EfRZJJe5}Ii sȖBD,^cgOT2EI&!dJ34{ 3743!G&à2R#7qD%qn|D){ h|T \fճ8Iz6l+]2"< IT&嗖>> S!9z8g~/\b#endstream endobj 124 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 269 >> stream xcd`ab`ddds 4T~H3a!s<<, w }=D19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMU;ML:)槤10002&FF|aӂg2ً/)鮒󂭺hQy?헰ξk<<@/\endstream endobj 125 0 obj << /Filter /FlateDecode /Length 2411 >> stream xXop WtX r{hs`h9RHڱ~q%+h ٙfv?NNrwoޫɶ'ۋ~?+`bb2Sbru{҉1L3mjwqM~lٜ#IMz|Z uAw9,vʦyrkBM]%.*~%a=1 / 34T]ƚG_g9d_rC~g*x䆲 ]f7W\P)׋W40ɥ{a&=Uɋ)Jpxh|ANNfZ7'H I۵/gZz|&cˡCqrbA8^D)ץ6>m!c kf Q'D@"o%rLa\||pCHnӜ]j@W5e 6>dvoEfŊ 'Γή>\pM#FX;Z!nUبWo?(w<-YP=z!豗.7RN^`&_"DJo/( ( q!s`xtP'A.-cb|Ƌk'tE<7̪B"3XrY{= I" ʪ<5U3hFhIF2-`%cL3~`'Tky;ka H> cZ^Ҹtfsv1 -C @oV[V@Sowt;_ќDұ9@zt7>.- o<  +{|:q>]\IWMTvβHιl%\۲,Wc;fI}#H*'t"v5ԹT(Fg8Q' WB9͍>bE Q `zC;⋣]% j1s^hG?Qr$ Jo~&)h<7ye?TK2-I+ BTyAo^̴>n/d&he!&Dd)w v4^f, QYGz .e=y)Y 1:hD\TVQ0|inCoeޥ8B@b\~y O tx:T')mKcL_zBCe{v.=BR-?8޵8wG$ܴ=eSMp~?ۻUȧOn+]Y ^Wo`VlX8v݃Y> `wRcst<1#;^f=I@}  uZ ?tn|~NlNShfOsCD5;V8/ɂIMD`NpH=~iCݮXj;yCOܤ0,=yÝGdoM*-/#]#dB>'u=nJw5JjɚX{N?JwIOOa:HdIPH(ؽ.Ό:cȍ&ƁK}sr:}*Z'R91Y峛%wa& gpW5. xh 0^ف*vg8;?5F. XI6Ctn=S]';Q3UXT]ծqV Z ;*cQA=kwq+{܁$Wm0 f VI Bn׏z).˨K>We2ʂ@W-c9*:3?+{6`\drRIawaq}B>tA;:5MV?% a) m% wFXIw3 wu 24HD+* 8,"?Yn2 = G'v@MИ5NԜ^xC?x4pU9l&9 ]]D8endstream endobj 126 0 obj << /Filter /FlateDecode /Length 1841 >> stream xXMo7_腗+fI? -Pдz0zPlU`٩~>.YY[AHJv*hLVєVΪLÂA1H}(gwRƦYi[?*[ZzϽ$陷Փчф0 e*Jos>bBo|h8|٪k(L(qc,9rфk)>uu=v(5 w?ėgj|週VZw:-0fNPC*Zi ,P+*-SPj =Bz.$4‹JeD_sZxFp)^t410:/#ƒ,-&g,UM溢& wvױ"L2yV mcY>Bw X@+qbUJ ƉT)'cVJ, Ή{was" NkiO?<EugѼu`DZn1FuH2Eq+1՗̓{ j  2(6.+qIeºUL7'FL ,E*>($8bSE%<,>CüB%N }A:: Yn7U!.tZUU eaiVeaC5 ڕB lJvZhrEht'QS"DrٙbQX Dsb|~L 48b|Oa.@v/SH*5= ?O?J8QL#{h㢐 (#pEh ׏QFV3o{i-oA̛̜N(֡o;&ъQh P_~j]!ߕnNI C8zL?w˛!U< ^w1?0eJd*!endstream endobj 127 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1392 >> stream xUTiPGYVakEfG [QJ &BY AEY"DCR! R7&BT}潯$!a$ŤGEzj͐Zp`")Q,Rɘ-l`X,f&CmrfJ|l\*r'!ΫWE]Νߛ MմHI&'$'=5I:~;o?!dMNѥGfFE$$zcAh E( y!ov@!k [/*E3.L+3!RE[,1'>*>+߃f(evx x;ŕAC9Ac2KM82Ne`?LXϰQbÏ2"$ $LݠJQMM|` @ +Hc=>![|;@E`Q)B{cZ| V1]+V3E}~1TsRodv;]ɂj#8G&xSTT+1`*R(Dc }o8a8! iꌪ,bߖc+2#Gni %6ΖΖ=wūxGrp-bn.xLM088D>m+.;Mʾѡ?^TYR!?L #%M 3$,n&PPqʪں;bRϩMiټjx{Rjzed;X`!(+Td4[WTɮUeSse'+s5 kihݍ-8/ޔH1H} Jg|Y=D(#~V@ ֳU)9z})wħa,fHSp)=N" mƅGdۯ'GUfED(yUz!z٢ m>ʡMFf؜(Sy~s3 sm?7p"w;߮[٫i"08*=\z` ,QdPk3ZoefjfVY ZI"_mtPMWvD,86yjq QuR*U%z}i`oJ b+B+9BGg֎endstream endobj 128 0 obj << /Filter /FlateDecode /Length 121 >> stream x-; @ ^)h C -LٵS̼ 2rHFaqp~ʦ:` sfFSfZw ,άJ48>=endstream endobj 129 0 obj << /Filter /FlateDecode /Length 593 >> stream xmRMo0o{v%U6 ~=3.!x|Z(;zunUw.yK Zi.T+T DiA Q \ZIe]JzF3cJЍt6~9-L7|NQÒGz WdBD Mmw莐[m"ac*)\\hGy~ۿxU Sݔ"A$ IvK>~S{6k 2Z+%I~&3o)iC4&̶5Ĥ"uSAAXnN1px,DtvP3Q |&-´NU\ fpYSq}YtU漛n8*x byZ`Kq LMHo|Wb݁zCbOZH|]x:$x]OŻNbscQ:dm> stream x]mL[epe s\[@Qc4٤PLx, RJ҇7Z&/W(tD6Xԡn 3Lb)^,~|ΓHAqʌԝGJt$O;77bq'ČI wo<$Rh>67RNh u)ڪ,]J]OUШRWD((hGҠ;ؒH~lu-$n@F2 EsdAYʅd7L2YPN\x>+ <.gޠn\;]. UzNI3PL&*ӵ x5&3KRG-=DOd/B)tMaWe|pޫr*‹sMnքYO̚䞍Pjh];s>=3^ cOo\QomP{5cvHa͂-.7r9^]@`to hКII^tOU{<^Z]Xٴ>J}ǿӋݴlfI;1r BFhBG&q @ Y_~« U hXI w ؾ~שԷʢb\fu!4J1x/_&^9#;ωI4OZBEn;Lmb̯5#MS|21Cv5\/(M;:0\ @8)dpiS!SmlW~ݭߐgR̵w!y(c8{1 +7YG{f[O> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 132 /ID [<7a37214597f094219ead4b1c876cb7c7><985d129fd30261e435fb2e2cb173ab12>] >> stream xcb&F~0 $8J8 uKlfPj9"yU@$!0 Dr{H ?D*˲6 R*XD*}@$Y dSalz@l7k"5vT2-lZ:XV^ 64XrY`B endstream endobj startxref 103208 %%EOF ordinal/inst/doc/clmm2_tutorial.R0000644000176200001440000002164714533322572016524 0ustar liggesusers### R code from vignette source 'clmm2_tutorial.Rnw' ################################################### ### code chunk number 1: Initialize ################################################### ## Load common packages, functions and set settings: library(ordinal) library(xtable) ## RUN <- FALSE #redo computations and write .RData files ## Change options: op <- options() ## To be able to reset settings options("digits" = 7) options(help_type = "html") ## options("width" = 75) options("SweaveHooks" = list(fig=function() par(mar=c(4,4,.5,0)+.5))) options(continue=" ") ################################################### ### code chunk number 2: clmm2_tutorial.Rnw:152-155 ################################################### data(wine) head(wine) str(wine) ################################################### ### code chunk number 3: clmm2_tutorial.Rnw:176-190 ################################################### data(wine) temp.contact.bottle <- with(wine, temp:contact:bottle)[drop=TRUE] tab <- xtabs(as.numeric(rating) ~ temp.contact.bottle + judge, data=wine) class(tab) <- "matrix" attr(tab, "call") <- NULL mat <- cbind(rep(c("cold", "warm"), each = 4), rep(rep(c("no", "yes"), each=2), 2), 1:8, tab) colnames(mat) <- c("Temperature", "Contact", "Bottle", 1:9) xtab <- xtable(mat) print(xtab, only.contents=TRUE, include.rownames=FALSE, sanitize.text.function = function(x) x) ################################################### ### code chunk number 4: clmm2_tutorial.Rnw:217-219 ################################################### fm1 <- clmm2(rating ~ temp + contact, random=judge, data=wine) fm1 ################################################### ### code chunk number 5: clmm2_tutorial.Rnw:226-229 ################################################### fm2 <- clmm2(rating ~ temp + contact, random=judge, data=wine, Hess=TRUE, nAGQ=10) summary(fm2) ################################################### ### code chunk number 6: clmm2_tutorial.Rnw:265-266 ################################################### exp(coef(fm2)[5]) ################################################### ### code chunk number 7: clmm2_tutorial.Rnw:274-276 ################################################### fm3 <- clmm2(rating ~ temp, random=judge, data=wine, nAGQ=10) anova(fm3, fm2) ################################################### ### code chunk number 8: clmm2_tutorial.Rnw:282-284 ################################################### fm4 <- clm2(rating ~ temp + contact, data=wine) anova(fm4, fm2) ################################################### ### code chunk number 9: clmm2_tutorial.Rnw:295-297 ################################################### pr2 <- profile(fm2, range=c(.1, 4), nSteps=30, trace=0) confint(pr2) ################################################### ### code chunk number 10: profilePlot ################################################### getOption("SweaveHooks")[["fig"]]() plot(pr2) ################################################### ### code chunk number 11: profileFig ################################################### getOption("SweaveHooks")[["fig"]]() plot(pr2) ################################################### ### code chunk number 12: ranefPlot ################################################### getOption("SweaveHooks")[["fig"]]() ci <- fm2$ranef + qnorm(0.975) * sqrt(fm2$condVar) %o% c(-1, 1) ord.re <- order(fm2$ranef) ci <- ci[order(fm2$ranef),] plot(1:9, fm2$ranef[ord.re], axes=FALSE, ylim=range(ci), xlab="Judge", ylab="Judge effect") axis(1, at=1:9, labels = ord.re) axis(2) for(i in 1:9) segments(i, ci[i,1], i, ci[i, 2]) abline(h = 0, lty=2) ################################################### ### code chunk number 13: clmm2_tutorial.Rnw:348-349 ################################################### getOption("SweaveHooks")[["fig"]]() ci <- fm2$ranef + qnorm(0.975) * sqrt(fm2$condVar) %o% c(-1, 1) ord.re <- order(fm2$ranef) ci <- ci[order(fm2$ranef),] plot(1:9, fm2$ranef[ord.re], axes=FALSE, ylim=range(ci), xlab="Judge", ylab="Judge effect") axis(1, at=1:9, labels = ord.re) axis(2) for(i in 1:9) segments(i, ci[i,1], i, ci[i, 2]) abline(h = 0, lty=2) ################################################### ### code chunk number 14: clmm2_tutorial.Rnw:361-362 ################################################### head(cbind(wine, fitted(fm2))) ################################################### ### code chunk number 15: clmm2_tutorial.Rnw:367-368 ################################################### head(cbind(wine, pred=predict(fm2, newdata=wine))) ################################################### ### code chunk number 16: clmm2_tutorial.Rnw:386-388 ################################################### plogis(fm2$Theta[3] - fm2$beta[2]) - plogis(fm2$Theta[2] - fm2$beta[2]) ################################################### ### code chunk number 17: clmm2_tutorial.Rnw:396-397 ################################################### qnorm(0.95) * c(-1, 1) * fm2$stDev ################################################### ### code chunk number 18: clmm2_tutorial.Rnw:402-410 ################################################### pred <- function(eta, theta, cat = 1:(length(theta)+1), inv.link = plogis) { Theta <- c(-1e3, theta, 1e3) sapply(cat, function(j) inv.link(Theta[j+1] - eta) - inv.link(Theta[j] - eta) ) } pred(qnorm(0.05) * fm2$stDev, fm2$Theta) ################################################### ### code chunk number 19: clmm2_tutorial.Rnw:416-434 ################################################### mat <- expand.grid(judge = qnorm(0.95) * c(-1, 0, 1) * fm2$stDev, contact = c(0, fm2$beta[2]), temp = c(0, fm2$beta[1])) pred.mat <- pred(eta=rowSums(mat), theta=fm2$Theta) lab <- paste("contact=", rep(levels(wine$contact), 2), ", ", "temp=", rep(levels(wine$temp), each=2), sep="") par(mfrow=c(2, 2)) for(k in c(1, 4, 7, 10)) { plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") } ################################################### ### code chunk number 20: clmm2_tutorial.Rnw:439-449 ################################################### getOption("SweaveHooks")[["fig"]]() k <- 1 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") ################################################### ### code chunk number 21: clmm2_tutorial.Rnw:451-461 ################################################### getOption("SweaveHooks")[["fig"]]() k <- 4 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") ################################################### ### code chunk number 22: clmm2_tutorial.Rnw:463-473 ################################################### getOption("SweaveHooks")[["fig"]]() k <- 7 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") ################################################### ### code chunk number 23: clmm2_tutorial.Rnw:475-485 ################################################### getOption("SweaveHooks")[["fig"]]() k <- 10 plot(1:5, pred.mat[k,], lty=2, type = "l", ylim=c(0,1), xlab="Bitterness rating scale", axes=FALSE, ylab="Probability", main=lab[ceiling(k/3)], las=1) axis(1); axis(2) lines(1:5, pred.mat[k+1, ], lty=1) lines(1:5, pred.mat[k+2, ], lty=3) legend("topright", c("avg. judge", "5th %-tile judge", "95th %-tile judge"), lty=1:3, bty="n") ################################################### ### code chunk number 24: clmm2_tutorial.Rnw:495-496 ################################################### exp(2*qnorm(0.95) * fm2$stDev) ################################################### ### code chunk number 25: clmm2_tutorial.Rnw:502-503 ################################################### exp(2*qnorm(0.75) * fm2$stDev) ################################################### ### code chunk number 26: misc (eval = FALSE) ################################################### ## ordinal/inst/CITATION0000755000176200001440000000071314531705120014016 0ustar liggesusersyear <- sub(".*(2[[:digit:]]{3})-.*", "\\1", meta$Date) vers <- paste0("R package version ", meta$Version) bibentry( 'Manual', title = 'ordinal---Regression Models for Ordinal Data', author = person("Rune H. B.", "Christensen", comment = c(ORCID = "000-0002-4494-3399")), header = "To cite 'ordinal' in publications use:", year = year, note = vers, url = "https://CRAN.R-project.org/package=ordinal" )