robustbase/0000755000176200001440000000000014555227612012434 5ustar liggesusersrobustbase/NAMESPACE0000644000176200001440000001442214435623207013653 0ustar liggesusersuseDynLib(robustbase, .registration=TRUE) if(FALSE) {##MM stopifnot(require(codetoolsBioC), require(robustbase)) findExternalDeps("robustbase") } importFrom("grDevices", dev.interactive, extendrange) importFrom("graphics", abline, axis, box, legend, lines, matplot, mtext, panel.smooth, par, plot, points, strheight, text, title) importFrom("stats", .lm.fit, aggregate, alias, as.formula, binomial, confint.lm, dummy.coef.lm, # want to register these as S3 {confint}{lmrob} and {dummy.coef}{..} coef, cor, cov, cov.wt, cov2cor, delete.response, deviance, dnorm, dpois, family, fitted, fivenum, formula, gaussian, glm, glm.fit, hatvalues, integrate, is.empty.model, lm.fit, lm.wfit, mad, mahalanobis, median, model.frame, model.matrix, model.matrix.lm, model.offset, model.response, model.weights, na.fail, na.omit, na.pass, napredict, naprint, naresid, nlminb, nls, nls.control, nobs, optim, optimize, pbinom, pchisq, pf, pgamma, pnorm, poisson, ppois, predict, printCoefmat, pt, qchisq, qnorm, qpois, qqline, qqnorm, qt, quantile, resid, residuals, residuals.lm, setNames, splinefun, symnum, terms, uniroot, var, vcov, weights, .checkMFClasses, .getXlevels, ## S3 generics (*not* shown yet by findExternalDeps() above): anova, case.names, confint, dummy.coef, logLik, profile, variable.names ) ## ^^^^ MASS has a bit more; take it as example if(getRversion() >= "3.3.0") { importFrom("stats", sigma) } else { export(sigma) S3method(sigma, nls) } importFrom("utils", globalVariables, str) ## MASS has many rlm S3 methods; should we use some of them - for rnls() ?! ## we also currently use MASS::cov.rob(); but probably only temporarily importFrom("methods", is, new, setClass, setMethod, slot, "slot<-", ## needed, e.g. for 'robustlmm' to work w/o warning: as, "as<-", coerce, "coerce<-") importFrom("DEoptimR", JDEoptim) ## Functions defined in this package export(Sn, Qn, Qn.old, s_Sn, s_Qn, s_mad, s_IQR, summarizeRobWeights, wgt.himedian, h.alpha.n, covMcd, .MCDcons, .MCDcnp2, .MCDcnp2.rew, .MCDsingularityMsg, robMD, mahalanobisD, # <- still "internal" ltsReg, tolEllipsePlot, ## RENAME ?!? : covPlot, ltsPlot, ## NO! ddplot, distplot, chi2qqplot rrcov.control,## << RENAME --- FIXME huberM, huberize, colMedians, rowMedians, covOGK, covGK, hard.rejection, scaleTau2, covComed, ## comedian, COM, -- not yet smoothWgt, .wgtFUN.covMcd, .wgtFUN.covComed, psiFunc, huberPsi, hampelPsi, ## Not yet: ## tukeyPsi, # = biweight / bisquare tukeyChi, tukeyPsi1, # TODO deprecate: see ./TODO Mpsi, Mchi, Mwgt, MrhoInf, .Mpsi, .Mchi, .Mwgt, .MrhoInf, .Mwgt.psi1, .Mchi.tuning.default, .Mpsi.tuning.default, .regularize.Mpsi, .Mchi.tuning.defaults, .Mpsi.tuning.defaults, .psi2ipsi, .psi.const, .psi.ggw.findc, .psi.lqq.findc, lmrob, lmrob.fit, lmrob.fit.MM, lmrob..M..fit, lmrob..D..fit, lmrob.S, lmrob.lar, lmrob.M.S, lmrob.control, .lmrob.hat, # was lmrob.leverages() splitFrame, outlierStats, mc, # Mia Hubers's medcouple lmc, rmc, # left and right mc, robust measures of tail weight adjbox, adjboxStats, adjOutlyingness, fullRank, glmrob, nlrob, nlrob.control , glmrobMqle.control, glmrobBY.control, glmrobMT.control , BYlogreg ## R/BYlogreg.R FIXME: add to glmrob() and "deprecate" ## , glmrobMT ## R/MTestimador2.R , estimethod ## related to detMCD() and to be used in rrcov etc: , r6pack, doScale , rankMM, classPC, .signflip ) ## S3 methods for ``our own'' S3 generics: S3method(ltsReg, default) S3method(ltsReg, formula) S3method(adjbox, default) S3method(adjbox, formula) ## Register all the methods for S3 generics elsewhere ## in case namespace is loaded but not currently attached. S3method(anova, lmrob) S3method(anova, glmrob) S3method(alias, lmrob) S3method(case.names, lmrob) S3method(confint, lmrob, confint.lm)## -> R bug (see below) & in R/lmrob.R S3method(confint, nlrob) S3method(dummy.coef, lmrob, dummy.coef.lm)# (R bug ...) S3method(estimethod, nlrob) S3method(family, lmrob) S3method(hatvalues, lmrob) S3method(kappa, lmrob) S3method(labels, lmrob) S3method(model.matrix, lmrob) S3method(nobs, lmrob) S3method(nobs, lmrob.S, nobs.lmrob)# use the same as "lmrob" S3method(nobs, mcd) S3method(residuals, lmrob) S3method(residuals, lmrob.S) S3method(variable.names, lmrob) S3method(print, lmrobCtrl) S3method(update, lmrobCtrl) ## R Bug {fixed in svn rev 84463}: needs within.list in our NS: S3method(within, lmrobCtrl, within.list) S3method(weights, glmrob) S3method(weights, lmrob) S3method(weights, lmrob.S, weights.lmrob)# use the same as "lmrob" S3method(weights, nlrob, weights.lmrob)# use the same as "lmrob" S3method(formula, nlrob) S3method(fitted, nlrob) S3method(plot, lts) S3method(plot, mcd) S3method(plot, lmrob) S3method(predict, lmrob) S3method(predict, glmrob) S3method(predict, nlrob) S3method(print, glmrob) S3method(print, lts) S3method(print, mcd) S3method(print, nlrob) S3method(print, lmrob) S3method(print, lmrob.S) S3method(print, summary.glmrob) S3method(print, summary.lmrob) S3method(print, summary.nlrob) S3method(print, summary.lts) S3method(qr, lmrob) ## lmrob uses residuals.default S3method(residuals, nlrob) S3method(residuals, glmrob) S3method(summary, glmrob) S3method(summary, lmrob) S3method(summary, lts) S3method(summary, mcd) S3method(summary, nlrob) S3method(sigma, lmrob) S3method(sigma, nlrob) S3method(vcov, glmrob) S3method(vcov, lmrob) S3method(vcov, nlrob) S3method(print, summary.mcd) ## S4 Generics {only those defined in this package}: export( chgDefaults ) exportClasses( "functionX", # function(x, ): VECTORIZED in 'x' "functionXal", # a functional: function of tuning par.s only "psi_func" # containing rho(), psi(), psi'(), ... ) exportMethods( chgDefaults, plot ) robustbase/demo/0000755000176200001440000000000014555212560013354 5ustar liggesusersrobustbase/demo/determinMCD.R0000644000176200001440000001072112432407066015632 0ustar liggesuserslibrary(robustbase) source(system.file("xtraR/test_MCD.R", package = "robustbase"))#-> doMCDdata() ##' This version of domcd() runs *both* "Fast" and "deterministic" MCD ##' @title covMcd() "workhorse" function -- *passed* to and from doMCDdata() ##' @param x data set: n x p numeric matrix ##' @param xname "promise" which will be substituted() and printed ##' @param nrep number of repetition: only sensible for *timing* ##' @param time ##' @param short ##' @param full ##' @param lname optional: ##' @param seed optional: ##' @param trace optional: domcd.2 <- function(x, xname, nrep=1, do.exact = NULL, # <- smart default, globally customizable time = get("time", parent.frame()), # compromise short = get("short", parent.frame()), # compromise full = get("full", parent.frame()), # compromise lname=20, seed=123, trace=FALSE) { if(short && full) stop("you should not set both 'full' and 'short' to TRUE") force(xname)# => evaluate when it is a data(<>, ..) call n <- dim(x)[1] p <- dim(x)[2] metha <- "FastMCD" methb <- "detMCD" if(is.null(do.exact)) { nLarge <- if(exists("nLarge", mode="numeric")) get("nLarge", mode="numeric") else 5000 do.exact <- choose(n, p+1L) < nLarge } set.seed(seed); mcda <- covMcd(x, trace=trace) set.seed(seed); mcdb <- covMcd(x, nsamp="deterministic", trace=trace) if(do.exact) { methX <- "exactMCD" set.seed(seed); mcdX <- covMcd(x, nsamp="exact", trace=trace) } mkRes <- function(mcd) sprintf("%3d %3d %3d %12.6f\n", n,p, mcd$quan, mcd$crit) xresa <- mkRes(mcda) xresb <- mkRes(mcdb) if(do.exact) xresX <- mkRes(mcdX) if(time) { tim1 <- function(meth) sprintf("%10.3f\n", system.time(repMCD(x, nrep, meth))[1]/nrep) xresa <- paste(xresa, tim1(metha)) xresb <- paste(xresb, tim1(methb)) if(do.exact) xresX <- paste(xresX, tim1(methX)) } if(full) { header <- get("header", parent.frame()) header(time) } ## lname: must fit to header(): x.meth <- paste(xname, format(c(metha, methb, if(do.exact) methX))) cat(sprintf("%*s", lname, x.meth[1]), xresa) cat(sprintf("%*s", lname, x.meth[2]), xresb) if(do.exact) cat(sprintf("%*s", lname, x.meth[3]), xresX) cat("Best subsamples: \n") cat(sprintf(" %10s: ", metha)); print(mcda$best) if(identical(mcdb$best, mcda$best)) cat(sprintf(" %s is the same as %s\n", methb, metha)) else { cat(sprintf(" %10s: ", methb)); print(mcdb$best) cat(sprintf(" Difference %s - %s:", methb, metha)) print(setdiff(mcdb$best, mcda$best)) } if(do.exact) { if(identical(mcda$best, mcdX$best)) cat(sprintf(" %s is the same as %s\n", methX, metha)) else if(identical(mcdb$best, mcdX$best)) cat(sprintf(" %s is the same as %s\n", methX, methb)) else { cat(sprintf(" %10s: ", methX)); print(mcdX$best) } } if(!short) { cat("Details about", metha,": ") ibad <- which(mcda$wt==0) names(ibad) <- NULL nbad <- length(ibad) cat("Outliers: ",nbad,"\n") if(nbad > 0) print(ibad) if(full){ cat("-------------\n") print(mcda) } cat("--------------------------------------------------------\n") } } doMCDdata(domcd = domcd.2) warnings() ## in one example n < 2 * p .. ###' Test the exact fit property of CovMcd -------------------------------- ##' generate "exact fit" data d.exact <- function(seed=seed, p=2) { stopifnot(p >= 1) set.seed(seed) n1 <- 45 x1 <- matrix(rnorm(p*n1), nrow=n1, ncol=p) x1[,p] <- x1[,p] + 3 n2 <- 55 m2 <- 3 x <- rbind(x1, cbind(matrix(rnorm((p-1)*n2), n2, p-1), rep(m2,n2))) colnames(x) <- paste0("X", 1:p) x } plot(d.exact(18, p=2)) pairs(d.exact(1234, p=3), gap=0.1) for(p in c(2,4)) for(sid in c(2, 4, 18, 1234)) { cat("\nseed = ",sid,"; p = ",p,":\n") d.x <- d.exact(sid, p=p) d2 <- covMcd(d.x) ## Gave error {for p=2, seeds 2, 4, 18 also on 64-bit}: ## At line 729 of file rffastmcd.f ## Fortran runtime error: Index '6' of dimension 1 of array 'z' above upper bound of 4 print(d2) if(FALSE) ## FIXME fails when calling eigen() in "r6pack()" d2. <- covMcd(d.x, nsamp = "deterministic", scalefn = Qn) stopifnot(d2$singularity$kind == "on.hyperplane") } ## TODO: also get examples of other singularity$kind's robustbase/demo/00Index0000644000176200001440000000012312423467231014501 0ustar liggesusersdeterminMCD Compute (and compare) the deterministic MCD for "standard datasets" robustbase/ChangeLog0000644000176200001440000014236512722336306014214 0ustar liggesusersNOTE: All newer changes are in ./inst/NEWS.Rd --> `news(package = "robustbase")` ---- --------------- 2014-12-12 Martin Maechler * R/mc.R (mc): fix for limit case where x[] contains +/-Inf: pass +/- Inf to C * src/mc.c (mc_C_d): and replace them by 'Large' = +/- DBL_MAX/4 2014-12-11 Martin Maechler * man/adjOutlyingness.Rd: fix another old FIXME: * R/adjoutlyingness.R (adjOutlyingness): cupper=0, clower=0 now computes the classical ("symmetric") outlyingness W/O mc() call. 2014-12-03 Martin Maechler * DESCRIPTION (Version): 0.92-3 * R/comedian.R (covComed): new from Maria Anna, tweaked by Valentin and modified considerably by MM. 2014-11-22 Martin Maechler * DESCRIPTION (Version): 0.92-2, released to CRAN on 2014-11-22 2014-11-18 Martin Maechler * DESCRIPTION (Version): 0.92-1, released to CRAN on 2014-11-18 2014-10-24 Martin Maechler and 2014-10-19 Valentin Todorov * DESCRIPTION (Version): 0.92-1 * R/covMcd.R, detmcd.R: added Deterministic MCD as an option to covMcd (nsamp="deterministic"), function .detMcd() * tests/tmcdd.R: new tests, for covMcd() with nsamp="deterministic" * tests/tmcdd.Rout.save: new tests, for covMcd() with nsamp="deterministic" * inst/xtraR/test_MCD.R: modified to test covMcd() with nsamp="deterministic" * NAMESPACE: export the functions r6pack() and doScale() to be used in rrcov for the deterministic S- and MM-estimates * src/rffastmcd and init.c, R/covMcd: fix a bug in nsamp="exact" 2014-10-17 Martin Maechler * R/adjoutlyingness.R (adjOutlyingness): reverse the defaults of 'clower' and 'cupper' and fix +/- swap; see new note in man/adjOutlyingness.Rd * man/adjOutlyingness.Rd: the wrong defaults came from *.ssc file from Antwerpen 2014-06-30 Martin Maechler * src/rllarsbi.f (rlLARSbi): got rid of warnings -- and many GOTOs 2014-06-11 Martin Maechler * DESCRIPTION (Version): 0.92-0 * R/nlrob.R (.nls.get.start, nlrob): now works with indexed vector parameters -- FIXME: at ETH have testing code + example data -- unfinished!-- -- on nb-mm3 not 2014-04-30 Martin Maechler * DESCRIPTION (Version): 0.91-1, released to CRAN on 2014-05-01 2014-04-24 Martin Maechler * DESCRIPTION (Version, Date): 0.91-0, released to CRAN 2014-04-24 2014-01-30 Martin Maechler * DESCRIPTION (Version): 0.90-1, released to CRAN on 2014-01-30 2013-05-22 Martin Maechler * R/lmrob.MM.R (Mpsi, Mchi, Mwgt): now all work via .Call(): is 9 x faster for n ~ 4000, and a few NAs. 2013-05-18 Martin Maechler * R/lmrob.MM.R (MrhoInf): new, providing rho(Inf), i.e., the scaling constant (rho |--> rho~ = chi) * R/lmrob.MM.R (Mpsi, Mchi, Mwgt): renamed, exported and documented, from former hidden lmrob.psifun(), lmrob.chifun(), lmrob.wgtfun(). * TODO: now has an extensive section on psi/rho/chi etc 2013-03-27 Martin Maechler * DESCRIPTION (Version): 0.9-8, released to CRAN on 2013-06-14 2013-03-06 Martin Maechler * DESCRIPTION (Version): 0.9-7, released to CRAN on 2013-03-06 2013-03-01 Manuel Koller * R/lmrob.R (lmrob): Adding weights and offset arguments. 2013-02-26 Manuel Koller * R/lmrob.R (class lmrob): Objects of class lmrob now store the robustness weights in $rweights (used to be in $weights). * R/lmrob.R (weights.lmrob): specialized weights() function for lmrob objects. Returns prior weights by default. Robustness weights are available via the "type" argument. 2013-02-14 Manuel Koller * R/lmrob.R (lmrob): setting class "lmrob" also for unconverged initial estimates. * R/lmrob.R (print.summary.lmrob, print.lmrob, print.lmrob.S): improved handling of unconverged estimates and exact fits. 2013-02-07 Manuel Koller * R/lmrob.R (lmrob): compatibility to lm: 'assign': labels of terms used for fitting. * R/lmrob.R (summary.lmrob): compatibility to summary.lm for: 'df': degrees of freedom, a 3-vector (p, n-p, p*), the last being the number of non-aliased coefficients (used to be just (n-p)). 'aliased': named logical vector showing if the original coefficients are aliased (was not returned before). 2012-09-11 Manuel Koller * DESCRIPTION (Version): 0.9-4, released to CRAN on 2012-09-11 2012-05-14 Martin Maechler * R/lmrob.MM.R (.vcov.avar1): fix typo in "posdefify" 2012-03-05 Martin Maechler * R/lmrob.MM.R (.vcov.avar1): "posdefify" FIXME (negative eigen values) 2012-02-27 Martin Maechler * DESCRIPTION (Version): 0.8-1-1, released to CRAN on 2012-03-02 2012-02-24 Manuel Koller * R/lmrob.R (lmrob): added init argument: string, function or list. * R/lmrob.MM.R (lmrob.fit): added init argument. * man/lmrob.Rd: Added documentation about init argument. * man/lmrob.fit.Rd: see above. 2012-02-22 Martin Maechler * R/nlrob.R (print.summary.nlrob): use full call instead of just formula. --> shows non-default psi() 2012-02-19 Martin Maechler * R/nlrob.R (nlrob): now use nls(...., weights = *, ..), no longer need hack "put everything to the RHS". 2011-12-23 Martin Maechler * R/nlrob.R (nlrob): for zero weights 'w', the residuals accidentally where NaN. 2011-12-12 Manuel Koller * DESCRIPTION (Version): 0.8-1 * src/mc.c (mc_C_d): fixed bug in iteration; see svn log -r 272 & 274 2011-12-09 Martin Maechler * R/adjbox.R (adjboxStats): fix the sign-error thinko for the case mc < 0. * man/adjboxStats.Rd: document; test the reflection invariance now. 2011-12-08 Martin Maechler * R/adjbox.R (adjbox.default): new 'doReflect' argument; default 'TRUE' ensures that 'adjbox(x)' behaves symmetrically in 'x'. * DESCRIPTION (Version): 0.8-0, released to CRAN on 2011-12-09 2011-10-24 Martin Maechler with Andreas in train to Fribourg * R/glmrob.R (residuals.glmrob): provide a version of residuals.glm() which works * NAMESPACE: residuals.glmrob 2011-10-11 Manuel Koller * DESCRIPTION (Version): 0.7-8, released to CRAN on 2011-10-26, r270 * man/lmrob.Rd: See also extended with lmrob..M..fit. * man/lmrob..M..fit.Rd: Extended example with function lmrob.custom. * R/lmrob.MM.R (lmrob.tau): moving calculation of h to the inside of the function, so that obj$qr is not required. * R/ltsPlot.R (ltsPlot, myqqplot): removed superfluous title(). 2011-10-11 Martin Maechler * src/rfltsreg.f: add 'implicit none' and declare everything; new gfortran gave (correct) warnings. * R/nlrob.R, man/nlrob.Rd: finally fix "fitted.values" * tests/nlrob-tst.R (new): start more testing of nlrob()! 2011-08-09 Manuel Koller * R/plot.lmrob.R (plot.lmrob): Fixing bug reported by Andreas Papritz. is.null(x$x) is always FALSE since there is always x$xlevels present. * inst/doc/lmrob_simulation.Rnw (f.gen): load(file) was missing the proper path. 2011-05-24 Manuel Koller * DESCRIPTION (Version): 0.7-6, released to CRAN today * inst/doc/lmrob_simulation.Rnw: cleaned up chunk headers. * inst/doc/Makefile: added Makefile that runs qpdf after texi2pdf. 2011-05-23 Manuel Koller * DESCRIPTION (Version, Date): 0.7-5 * man/lmrob..D..fit.Rd: updated reference to Koller and Stahel 2011 (now with volume, issue and page numbers). * man/lmrob.control.Rd: updated reference. * man/lmrob.Rd: update reference. * inst/doc/lmrob_simulation.bib: updated reference. * inst/doc/estimating.functions.R (robustness.weights.lmrob.S): fixed typo. 2011-03-17 Manuel Koller * DESCRIPTION (Version, Date): 0.7-4 * inst/doc/lmrob_simulation.Rnw: Fixed problem with paths in source(), save() and load(). 2011-03-08 Manuel Koller * DESCRIPTION (Version, Suggests): 0.7-3 and added xtable and ggplot2 to Suggests. * inst/doc/lmrob_simulation.Rnw: re- or moved dependencies that are not required or just needed for data generation. * inst/doc/error.distributions.R: removed dependency on skewt. 2011-02-10 Manuel Koller * DESCRIPTION (Version): Version: 0.7-2 * inst/doc/lmrob_simulation.Rnw: updated title. * inst/doc/lmrob_simulation.bib: updated references. * man/lmrob.control.Rd: as above. * man/lmrob.Rd: as above. 2011-01-28 Martin Maechler * R/glmrobMqle.R (Huberprop2, ...), * R/psi-rho-funs.R (huberPsi): replace pmin2/pmax2 by pmin.int()/pmax.int(). 2011-01-26 Manuel Koller * R/psi-rho-funs.R (psiFunc, chgDefaults): default arguments are now also set for the E... slots * tests/psi-rho-etc.R: updated comments * tests/psi-rho-etc.Rout.save: updated accordingly 2011-01-20 Manuel Koller Updated reference to Koller and Stahel 2011 paper. * man/lmrob.control.Rd: updated reference. KS2010 -> KS2011. * man/lmrob.Rd: see above * inst/doc/lmrob_simulation.bib: updated ks2011 reference. * inst/doc/lmrob_simulation.Rnw: see above * R/lmrob.MM.R (lmrob.control): setting argument: KS2011 instead of KS2010 2011-01-18 Manuel Koller * tests/psi-rho-etc.R: added tests to document bug in psi_func class and pmin2/pmax2 functions. * tests/psi-rho-etc.Rout.save: results as they should be * inst/doc/lmrob_simulation.Rnw: added table with tuning constants used in simulation. 2010-12-04 Martin Maechler * R/qnsn.R (Qn): even better finite sample correction, from an analysis of a Qn simulation. (Qn.old): provide, if needed for back-compatibility. 2010-12-02 Valentin Todorov * R/ltsReg.R, man/ltsReg.Rd, tests/LTS-specials.R: the raw weights 'raw.weights' added to the returned object * R/ltsReg.R, src/rfltsreg.f, man/ltsReg.Rd: options nsamp="exact" and nsamp="best" fixed; Added 'trace' parameter 2010-12-02 Martin Maechler * tests/mc-strict.R (adjOutlyingness): adapt tests to the fixed mc() code; using some tolerance... still need to be tested on Win/Mac/.. * R/nlrob.R (summary.nlrob): do not compute 'se' if not converged; update help page; (print.summary.nlrob): print "non-convergence" * man/summary.nlrob.Rd, man/predict.lmrob.Rd, ..: update and trivial changes. * R/qnsn.R (Qn): fixed the consistency constant (thanks to Peter Ruckdeschel), slightly adapting the finite sample factors for n = 2:9. 2010-12-02 Manuel Koller * DESCRIPTION (Version): 0.6-9 * R/mc.c: fixed several bugs, consequent use of eps[0] and eps[1] to avoid numerical problems, fixed memory corruption bug that caused segfaults from time to time. * tests/mc-etc.R: see below * tests/mc-etc.Rout.save: see below * tests/mc-strict: updated tests and results * man/mc.Rd: see below * R/mc.R (mc.default): updated eps1 and eps2 defaults. * inst/doc/lmrob_simulation.Rnw: minor cosmetic changes * inst/doc/graphics.functions.R: added some helper functions to ease working with color palettes used by ggplot2. 2006-10-27 Andreas Ruckstuhl and 2010-10-28 Martin Maechler * DESCRIPTION (Version): 0.6-8 * R/lmrobPredict.R, R/glmrobPredict.R: new more careful prediction methods, now documented in * man/predict.lmrob.Rd, man/predict.glmrob.Rd: and no longer in * man/summary.lmrob.Rd: 2010-10-13 Manuel Koller * DESCRIPTION (Version): 0.6-7 * R/lmrob.MM.R (lmrob.psifun, lmrob.rhofun, lmrob.wgtfun): as.double caused input to loose dimension attribute, fixed. * inst/doc/lmrob_simulation.Rnw: Updated vignette title. Enhanced aggregation procedure. * inst/doc/lmrob_simulation.Bib: Fixed entry KS2010, no more trouble with bibtex. * inst/doc/aggr_results.Rdata: updated simulation results. n = 400 missing for now. * inst/doc/error.distributions.R: added cskt distribution: centered skewed t-distribution. * inst/doc/simulation.results.R: small changes in proclist generation. 2010-10-11 Manuel Koller * DESCRIPTION (Version): 0.6-6 (Depends): Depends on R 2.9.0 (use of grepl) * man/lmrob.Rd: Design Adapted Scale estimate renamed to Design Adaptive Scale estimate * man/lmrob.fit.Rd: ditto * man/lmrob..D..fit.Rd: ditto 2010-10-08 Manuel Koller * DESCRIPTION (Version): 0.6-5 * inst/doc: added directory. * inst/doc/lmrob_simulation.Rnw: vignette containing simulation study of Koller and Stahel (2010). * inst/doc/lmrob_simulation.bib: bibtex file for vignette * inst/doc/graphics.functions.R: R code used in vignette, graphics helper functions. * inst/doc/error.distributions.R: R code used in vignette, custom distribution functions. * inst/doc/simulation.functions.R: R code used in vignette, functions used in simulations. * inst/doc/estimating.functions.R: R code used in vignette, extra estimating functions. * inst/doc/asymptotic.max.bias.Rdata: Cache of calculations, to speed up vignette making. * inst/doc/aggr_results.Rdata: Cache of calculations, to speed up vignette making. * .Rbuildignore: Omitting temporary Sweave output files from the build. * src/init.c: added function to process simulation output (used in vignette). * src/lmrob.c (R_calc_fitted): ditto * src/robustbase.h: ditto 2010-10-08 Manuel Koller * DESCRIPTION (Version): 0.6-4 * inst/CITATION: Added my name. * man/lmrob.control.Rd: Added more information to "setting"-Argument. * man/lmrob.Rd: ditto. Added example for "setting" argument. * R/lmrob.R (lmrob): Moved call of lmrob.control to function code. * R/lmrob.MM.R (lmrob.control): Changed setting KS2010: uses lqq instead of ggw. Fixed bug when using unknown setting. * R/lmrob.MM.R (lmrob.fit): Check for initial estimator. Issues warning if neq S. 2010-08-13 Martin Maechler * DESCRIPTION (Version): 0.6-3 * R/lmrob.MM.R (lmrob.psifun, ..., ghq): cosmetic changes; cleaner * src/lmrob.c (rho, psi, psip, ...): cleaner; partly faster; also ok for x = +-Inf.... * tests/lmrob-psifns.R: test psi(), rho(), etc -- also nice plots: tests/rob-psifns.pdf after .. check. 2010-08-13 Manuel Koller * R/lmrob.MM.R: changed the naming of lgw to the more accurate lqq "linear, quadratic, quadratic" (by construction of psi prime). * tests/lmrob-psifuns.R: changed the naming of lgw to the more accurate lqq * man/lmrob.control.Rd: ditto * man/summary.lmrob.Rd: updated documentation to account for "cov" argument of vcov. * R/lmrob.R (lmrob): warning and check for method argument improved. 2010-08-05 Manuel Koller * DESCRIPTION (Version): 0.6-2 * R/lmrob.MM.R (lmrob.const): replaced lmrob.ggw.const by lmrob.const, which now supports ggw and lgw. (lmrob.control, lmrob.lgw.findc, lmrob.conv.cc, lmrob.psi2ipsi): support for lgw psi function. (lmrob.efficiency, lmrob.bp): functions to calculate the efficiency and breakdown point of an M-estimator. (lmrob.tau, lmrob.tau.fast.coefs): Updated constants in all.equal check for Hampel psi function. Updated constants for all the supported psi functions. (lmrob..D..fit): switched order of updating covariance matrix and updating control$method. This caused the correction factors to be set incorrectly in some cases. * src/lmrob.c (normcnst, rho_lin, psi_lin, psip_lin, wgt_lin) (rho, psi, psip): Added support lgw psi function. * man/lmrob.control.Rd: updated to account for lgw. * tests/lmrob-psifns.R: added tests lgw psi function * tests/lmrob-psifns.Rout.save: updated accordingly * tests/lmrob-methods.Rout.save: updated, since constants for calculation of taus changed slightly * R/lmrob.R (vcov.lmrob): added possibility to calculate covariance matrix with another estimation method. 2010-07-12 Manuel Koller * DESCRIPTION (Version): 0.6-1 * src/lmrob.c (normcnst, rho_ggw, psi_ggw_vec, psi_ggw, psip_ggw) (wgt_ggw): Added support for custom constants for psi ggw. * R/lmrob.MM.R (.vcov.w): Modified constants for ggw psi function. (lmrob.control): Added method to calculate constants for ggw psi function. (lmrob.conv.cc, lmrob.ggw.mx, lmrob.ggw.ms, lmrob.ggw.ac) (lmrob.ggw.bp, lmrob.ggw.finda, lmrob.ggw.findc, lmrob.ggw.const): Methods to calculate constants for ggw psi. (lwgt): control argument optional. * tests/lmrob-psifns.R: added tests for custom tuning constants * tests/lmrob-psifns.Rout.save: added tests for custom tuning constants * experi-psi-rho-funs.R: added ggwPsi function, standardized s.t. Dpsi(0) = 1. Putting this into R/psi-rho-funs.R results in an error while "preparing for lazy-loading". 2010-06-25 Manuel Koller * DESCRIPTION (Version): 0.6-0 * NAMESPACE: dropped lmrob.control.sfs * R/lmrob.R (lmrob): "method" argument * man/lmrob.Rd: "method" argument * R/lmrob.MM.R (lmrob.control, lmrob.control.sfs): argument setting="KS2010" replaces lmrob.control.sfs. Setting a method involving a D step, sets default psi to ggw. * man/lmrob.control.Rd: "setting" argument * tests/lmrob-methods.R: "methods" and "psi" argument * tests/lmrob-methods.Rout.save: call output in summary changed * tests/lmrob-psifns.Rout.save: call output in summary changed * man/lmrob.control.Rd: Documentation details for tuning constants and alternative constants for .85 efficiency at the normal. 2010-06-24 Manuel Koller * DESCRIPTION (Version): 0.5-1.1 * tests/lmrob-psifns.R: added Hampel psi function example and updated coefficients of ggw. * src/lmrob.c: replaced "gws" with "ggw" in function names, "gwgt" with "welsh", replaced pow() with R_pow(), implemented support for coefficient vectors. Fixed problem with "ggw" on 32-bit machines. * R/lmrob.MM.R: changed the way coefficients for psi functions are handled: coefficient vectors are now supported. * R/lmrob.MM.R (lmrob..M..fit): class attribute for init entry in lmrob..M..fit was missing. 2010-06-23 Manuel Koller * tests/lmrob-methods.R: tests for compatibility between manual construction of different methods with specifying method argument in lmrob. * tests/lmrob-methods.Rout.save: expected results * tests/lmrob-psifns.R: tests for the support of different psi functions. * tests/lmrob-psifns.Rout.save: expected results * tests/lmrob-data.Rout.save: updated results to reflect slightly modified summary output (more options) * man/lmrob.Rd: updated to reflect changes in options * man/lmrob..D..fit.Rd: added documentation * man/lmrob..M..fit.Rd: updated example * man/lmrob.control.Rd: updated to reflect changes in options * man/lmrob.fit.Rd: updated from lmrob.fit.MM to reflect changes in options * man/lmrob.fit.MM.Rd: moved to lmrob.fit.Rd * man/lmrob.S.Rd: coef -> coefficients in example * R/anova.lmrob.R (length.tl, anovaLmrobPair, anovaLmrobList): Added checks to ensure soundness of result for methods other than "MM". 2010-06-21 Manuel Koller * R/lmrob.MM.R (lmrob.control, lmrob.control.sfs, lmrob.fit.MM) (.vcov.w, .vcov.avar1, lmrob..M..fit, lmrob.S, lmrob..D..fit) (lmrob.kappa, lmrob.tau, lmrob.hatmatrix, lmrob.leverages) (lmrob.psi2ipsi, lmrob.psifun, lmrob.chifun, lmrob.wgtfun) (residuals.lmrob.S, lmrob.E, ghq): Support for psi functions other than bisquare as well as SMDM estimates. * R/lmrob.R (lmrob): Support for psi functions other than bisquare as well as SMDM estimates. Replaced lmrob.fit.MM by lmrob.fit. * src/robustbase.h: declarations of new functions in lmrob.c * src/init.c: definitions of new functions in lmrob.c * src/lmrob.c: added support for psi functions other than bisquare, including vectorized accessor functions ("R_psifun", "R_chifun" and "R_wgtfun"). "wgt" is now always used to calculate robustness weights. Function to calculate scale estimate iteratively ("R_find_D_scale"). * NAMESPACE: added lmrob.fit, lmrob..D..fit and lmrob.control.sfs to export. * DESCRIPTION: added my name to the list. 2010-04-01 Martin Maechler * DESCRIPTION (Version): 0.5-1 2009-11-19 Martin Maechler * R/covMcd.R (.fastmcd): nLarge = 100'000 (instead of previous 5000), for now. nsamp <= integer.max is asserted now. * R/covMcd.R (covMcd): nmini = 300 is now an optional argument. * R/rrcov.control.R (rrcov.control): 'nmini = 300' ... * R/ltsReg.R (ltsReg.default): .fastmcd(..., nmini = 300) * src/rffastmcd.f (rffastmcd): new argument nmini 2009-11-18 Martin Maechler * DESCRIPTION (Version): 0.5-0-1, released to CRAN * tests/mc-strict.R: don't use the longmemo example for now. (platform dependence !) 2009-11-11 Martin Maechler * tests/glmrob-1.R: add moe explicit glmrob() tests; including the simple puzzling 1-outlier problem. * R/glmrobMqle.R: do use getRversion() ! 2009-06-27 Martin Maechler * data/wagnerGrowth.rda: add the "wagner data" (continuous + categorical) * man/wagnerGrowth.Rd: ditto 2009-06-05 Martin Maechler * R/glmrob.R (glmrob): add the option start = "lmrobMM", using a *robust* start for {essentially} glm(). * man/glmrob.Rd: add an example for that <<<<<______________ FIXME 2009-06-04 Martin Maechler * DESCRIPTION (Version): 0.5-0; *not* released * tests/glmrob-1.R: more glmrob() examples 2009-06-03 Martin Maechler * R/glmrobMqle.R (mFormat): make 'trace=TRUE' also depend on getOption("digits"). * R/glmrobMqle.R, R/glmrob.R: changes from Andreas Ruckstuhl, to support family = "Gamma". Unfortunately, these at first also very slightly change binomial, poisson. ==> few small changes by MM. * R/glmrob.R (glmrob): allow y (in "y ~ ...") to be *factor* 2009-01-17 Martin Maechler * DESCRIPTION (Version): 0.4-5 : bug-fixing release * tests/tmcd.R: add a regression test for the bug * src/rffastmcd.f: rfrdraw() w/o 'seed' * src/rfltsreg.f: ditto 2009-01-10 Martin Maechler * src/rf-common.f (rfrangen): drop unused 'seed' argument (rfrdraw): ditto * src/rffastmcd.f: get rid of TABs (gfortran -Wall complains) * man/exAM.Rd, man/possumDiv.Rd, ...: Rd_parse fixes 2008-11-28 Martin Maechler * R/OGK.R (covOGK): also return n.iter and weight.fn. * man/covOGK.Rd: note about the "silly" weight.fn 2008-10-15 Martin Maechler * R/lmrob.MM.R (lmrob.S): fix error message s/larger/smaller/ thanks to Keith Ponting. 2008-10-01 Martin Maechler * src/wgt_himed_templ.h: fix leading comment 2008-08-29 Martin Maechler * DESCRIPTION (Version): 0.4-3 for CRAN release * tests/mc-strict.R: define 'isMac' and tweak the tests; thanks to reports from Rory Winston. 2008-08-09 Martin Maechler * NAMESPACE: import stats::cov {since others hide it!} 2008-08-05 Martin Maechler * R/huber.R (huberM): replace `s' by 's' in errors and warnings. * R/qnsn.R, R/plot.lmrob.R, R/ltsReg.R, R/ltsPlot.R: ditto 2008-08-04 Martin Maechler , really from Valentin Todorov * R/ltsReg.R (ltReg.default): if(mcd) call covMcd() with correct alpha * R/covMcd.R (covMcd): drop (n-1)/n correction, as cov.wt() contains that per default (in R, not in S-plus!). * R/tolEllipse.R (tolEllipsePlot): no text() for id.n == 0 * DESCRIPTION (Version): 0.4-2 released to CRAN. 2008-08-04 Martin Maechler * src/wgt_himed.c: renamed wgt_himed.c_templ to * wgt_himed_templ.h: 2008-08-02 Martin Maechler * DESCRIPTION (Version): 0.4-1 ready for release to CRAN. * R/ltsReg.R (ltsReg.default): don't add artificial "Y" y-name * src/lmrob.c (rwls): first call to sum_rho() is only needed for tracing (was used for lambda iterations). * R/glmrob.R (glmrob), * R/glmrobMqle.R (glmrobMqle): introduce 'trace' option to trace the robustness iterations {*not* part of control: does *not* influence result} * man/CrohnD.Rd: new data example robust poisson regression * data/CrohnD.rda: 2008-01-25 Martin Maechler * R/covMcd.R (.fastmcd): fix nsamp="exact" to use "all" * src/rffastmcd.f: krep=0 <==> nsamp="exact" : all := TRUE; new argument i_trace; and use intpr() and dblepr() * man/covMcd.Rd: pass 'trace' to .fastmcd() and Fortran * src/rf-common.f (rfncomb): give "error" message for very large 'comb' * tests/tmcd.R: add test for nsmap = "exact" 2008-01-05 Martin Maechler * man/ambientNOxCH.Rd: new data set from René Locher, * data/ambientNOxCH.rda: showing some lmrob-nonconvergence 2007-12-22 Martin Maechler * src/lmrob.c (rwls): if (trace_lev >= 3) show beta vector. 2007-12-13 Martin Maechler * man/summarizeRobWeights.Rd: add toy example * R/lmrob.R (summarizeRobWeights): cosmetic change; notably finishing line when weights were practically 0/1. * tests/MCD-specials.Rout.save {adapt from change of 11-07} 2007-11-07 Martin Maechler * R/covMcd.R (covMcd): if we have singularity, also say so, even if trace is FALSE. * R/covMcd.R (singularityMsg): for "on.hyperplane", concatenate long coefficient vector. 2007-10-25 Martin Maechler * man/pulpfiber.Rd: New data set (p=8 = 4 + 4) * data/pulpfiber.tab: from "Robust Multivariate Regression (2004)". * tests/lmrob-ex12.R: use versions of predict[.lmrob]() 2007-10-24 Martin Maechler * man/condroz.Rd: set latin1 encoding (and fix typo) * R/lmrob.R (predict.lmrob): define predict() and model.matrix() methods, working via "lm" methods. * NAMESPACE: export vcov() * man/summary.lmrob.Rd: some docu of new methods 2007-08-02 Martin Maechler * R/adjoutlyingness.R (adjOutlyingness): small improvements, getting rid of loops; fix an obvious typo (in original code). Leave away the Inf and NaN that from abs(Y[] - m) / ( tup | tlo) 2007-07-21 Martin Maechler * DESCRIPTION (Author): add Tobias * src/rmc.c (h_kern): revert to absolute test; many more experiments * tests/mc-etc.R (x3): a smallish "extreme" case of "non-convergence" 2007-07-20 Martin Maechler * tests/mc-etc.R: new testing of mc() and related * src/rmc.c (mc_C_d): changed work[] and weight[] to 0-indexing; this is hopefully the last bug ... * R/adjbox.R (adjbox.formula): use adjbox(), not boxplot()! * man/adjboxStats.Rd: added 2007-07-20 Tobias Verbeke * R/adjbox.R: Skewness-adjusted boxplot ported from Matlab LibRA * man/adjbox.Rd: * data/los.rda, man/los.Rd: * data/condroz.rda, man/condroz.Rd: two dataset related to medcouple 2007-07-19 Martin Maechler * R/adjoutlyingness.R: new (also from the Antwerpen MC collection) * DESCRIPTION (Depends): R >= 2.3.1, so we can use * NAMESPACE (useDynLib): .registration = TRUE * R/*.R (.C, .Fortran): now can use name variable instead of string and drop 'PACKAGE = ".."' 2007-07-18 Tobias Verbeke and Martin Maechler * src/rmc.c, src/robustbase.h (rmc): code for medcouple (MC); needs debugging: infinite loops and segmentation faults * R/mc.R (mc): new mc() for MedCouple 2007-07-16 Martin Maechler * DESCRIPTION (Version): 0.4-0 - definitely made progress * R/plot.lmrob.R (plot.lmrob): recompute robust Mahalanobis distances and cache them with the object. 2007-07-10 Martin Maechler * R/lmrob.R (summarizeRobWeights): new defaults; work but warn when 'eps' and 'eps1' lead to weights both close to 0 and 1. * R/covPlot.R: don't warn for which = "all" * man/covPlot.Rd: example 2007-06-28 Martin Maechler * data/kootenay.tab: add "famous" data set * man/kootenay.Rd: * data/cushny.R: add the other "famous" simple data set * man/cushny.Rd: 2007-06-20 Martin Maechler * DESCRIPTION (Version): 0.2-8 ready to be released * src/lmrob.c (rwls): patch from Matias Salibian: Don't do any "lambda iterations" anymore, they are remnants from old "experiments". 2007-06-19 Valentin Todorov * R/covMcd.R: usage of simulated finite sample correction factors fixed: * - case p=1 fixed * - simulated corrections used only when the Pison et.al. * formula is definitely wrong (negative or very large) 2007-06-09 Martin Maechler * tests/lmrob-data.R, *.Rout.save: new consistency tests for lmrob() * R/biweight-funs.R (tukeyPsi1, tukeyChi): renamed from lmrob.Psi() and lmrob.Chi(); also renamed * man/tukeyPsi1.Rd, man/tukeyChi.Rd: updated, also mentioning "psiFunc" * src/lmrob.c (rwls): now controlling *relative* error in MM iterations, i.e., convergence happens when ||b1 - b2|| < eps * ||b1||. The above 'eps' is now part of lmrob.control() instead of hard-wired in C code. * R/lmrob.MM.R (lmrob.MM): new argument 'trace.lev' * src/robustbase.h (R_lmrob_MM): add trace_lev argument 2007-06-08 Martin Maechler * src/lmrob.c (refine_fast_s): slight change in warning() [make message nicer for future R >= 2.5.1] 2007-04-21 Martin Maechler * R/ltsReg.R: replace 'quan' by 'h' internally * R/covMcd.R: ditto * R/covMcd.R (h.alpha.n): renamed from "quan.f"() and now exported: * NAMESPACE: and hence * man/h.alpha.n.Rd: documented, and linked to from here * man/ltsReg.Rd, man/covMcd.Rd: 2007-04-18 Valentin Todorov * R/covMcd.R: use simulated finite sample correction factors {FIXME!} 2007-04-11 Martin Maechler * R/ltsReg.R (ltsReg.default): 'ans$X' now has "intercept first" to match coefficient vector. * R/ltsReg.R (summary.lts): no need to reorder 'R' (cholesky) anymore 2007-04-02 Valentin Todorov * R/covMcd.R: the (repeated) calculation of the consistency * correction factor for the raw and reweighted MCD covariance * replaced by a call to a function MCDcons() 2007-03-30 Valentin Todorov * R/covMcd (print.mcd): solve a conflict with fastmcd() in package robust: both return an object of class "mcd" 2007-03-28 Martin Maechler * R/covMcd.R (.fastmcd): subsample size myk <- p+1 (was 'p') 2007-03-27 Martin Maechler * R/covMcd.R: cosmetic changes; comments * src/rffastmcd.f: ditto * tests/tmcd.R: added very small sample (n < 2p) examples. 2007-03-26 Martin Maechler * R/ltsReg.R (print.summary.lts): signif.stars: instead of FALSE, use same default as for lm(.). * man/summary.lts.Rd 2007-03-26 Martin Maechler * R/covMcd.R (covMcd): be more tolerant about small n, notably n < 2p, now only requiring n >= p + 2 * src/rf-common.f (rfnbreak): really unused * src/rfltsreg.f, src/rffastmcd.f: comment use of rfnbreak() 2007-03-24 Martin Maechler * R/covMcd.R (covMcd): ans$raw.cov should be matrix even for p==1 2007-03-21 Martin Maechler * R/ltsReg.R (ltsReg.default): fix long-standing ("rrcov") bug of wrong coefficient order in ltsReg(x,y, intercept=FALSE). * tests/LTS-specials.R: add regression-test 2007-02-08 Martin Maechler * R/covMcd.R (singularityMsg): moved the remaining cases out of covMcd(). 2007-01-24 Martin Maechler * R/covMcd.R (covMcd): save 'singularity' info as list, and don't append it as string to 'method'. (singularityMsg): new utility used for printing singularity of MCD (print.mcd): using strwrap() instead of "\n" such that print() obeys options("width"). 2006-12-28 Martin Maechler * DESCRIPTION (Version): 0.2-7 released to CRAN * src/rfltsreg.f: get rid of warnings, notably some extraneous args. * src/rffastmcd.f: ditto * inst/test_MCD.R (mortality, mort3): add example {near singular} * tests/tmcd.R: test "near singular" example, using new tolSolve: * R/rrcov.control.R (rrcov.control): new tolSolve = 1e-14 * R/covMcd.R (covMcd): use 'tolSolve' instead of 1e-10 for mahalanobis' solve(*, tol). 2006-12-21 Valentin Todorov * R/ltsPlot.R (ltsPlot): for "rqq", use *standardized* residuals 2006-10-20 Martin Maechler * src/rfltsreg.f (rfltsreg): more comments; and minor cleanups * R/ltsReg.R (.fastlts): slightly simplified 'nsamp' checking and setting * DESCRIPTION (LazyData): yes * man/heart.Rd: mention survivals' "heart" data 2006-10-18 Martin Maechler * src/rfltsreg.f: less "if() l1,l2,l3" ; better indenting; etc worked pretty hard on C translation but that still seg.faults (!) 2006-10-04 Martin Maechler * INDEX: added a "manual" INDEX which has all the data sets at the end. * R/OGK.R (covOGK): more default arguments: n.iter = 2, and weight.fn = hard.rejection * R/OGK.R (s_mad, s_IQR): more "scale functions" for (O)GK * tests/OGK-ex.R: added examples for new s_*() scales; * tests/OGK-ex.Rout.save: new 2006-10-03 Martin Maechler * R/qnsn.R (s_Qn, sSn): new (trivial) wrapper functions for easier use in covOGK(). * man/Sn.Rd, man/Qn.Rd: ditto * man/covOGK.Rd: example with the above 2006-09-30 Martin Maechler * DESCRIPTION (Version): 0.2-6 * R/covPlot.R (covPlot): added 'labels.id', 'cex.id' and 'label.pos' arguments, "parallel" to plot.lm() and improved the labeling accordingly. * man/covPlot.Rd: 'ask=TRUE' gives problems in R <= 2.3.1 2006-09-29 Martin Maechler * DESCRIPTION (Version): 0.2-5 * data/radarImage.rda: add 'radarImage' data set from MMY-book * man/radarImage.Rd: ditto * data/toxicity.rda: added 'toxicity' data set (from MMY) * man/toxicity.Rd: ditto * src/rf-common.f (rfishsort): swap integer declaration order 2006-09-23 Martin Maechler * man/covPlot.Rd: document covPlot() as well, since that is more generally usable. * src/rf-common.f (rfrangen): declare unifrnd() as double prec.; also in other places; now using 'implicit none' to catch such things more easily. * R/tolEllipse.R (tolEllipsePlot): classic=TRUE: do not prepare a side-by-side plot, since we *over*plot. Rename 2nd argument from 'mcd' to 'm.cov', since it really only needs to have a mean ('center') and covariance component. 2006-09-05 Martin Maechler * DESCRIPTION (Version): 0.2-3 uploaded to CRAN * tests/tlts.R: rename test functions and move to new file * inst/test_LTS.R: * tests/tmcd.R: rename test functions and move to new file; do not 'time' them, since we want to use *.Rout.save files. * inst/test_MCD.R: * src/Makevars: add long overdue $(FLIBS) 2006-09-04 Martin Maechler * R/ltsReg.R (ltsReg.default): add argument name: sort(*, partial = ) * R/covMcd.R (covMcd): ditto 2006-06-27 Martin Maechler * DESCRIPTION (Depends): no longer depend on "MASS" which is suggested. * NAMESPACE, man/summarizeRobWeights.Rd: export and document summarizeRobWeights(). * R/lmrob.R (summarizeRobWeights): improvement (singular/plural etc) * R/ltsReg.R (ltsReg.formula): add 'subset' etc; and follow guidelines in developer.r-project.org/model-fitting-functions.txt. * tests/tmcd.R: typo; add timing comparison * R/glmrob.R (glmrob): family = "gaussian" now dispatches to lmrob() 2006-06-24 Martin Maechler * R/glmrob.R (print.summary.glmrob): as for lmrob(), now use summarizeRobWeights() and printControl(). * R/lmrob.R (printControl): more flexible * man/glmrob.Rd: update examples with 'weights.on.x' 2006-06-23 Andreas Ruckstuhl * R/glmrobMqle.R (wts_HiiDist): *row*Sums()! 2006-06-13 Martin Maechler * DESCRIPTION (Version): 0.2-2 * src/Makevars (PKG_LIBS): need this (in particular for Windows) 2006-06-08 Martin Maechler * src/lmrob.c (refine_fast_s): made sure 'conv *is* called as *Rboolean 2006-06-07 Martin Maechler * src/lmrob.c (refine_fast_s): print warning in case of non-convergence * DESCRIPTION (Version): 0.2-1 - pre-release * R/lmrob.MM.R (lmrob.control): decrease refine.tol "back" to 1e-7 * man/lmrob.control.Rd: ditto * src/lmrob.c (rho_biwgt): fix embarassing sign typo/thinko 2006-06-06 Martin Maechler * DESCRIPTION (Version): 0.2-0 "pre-released" to Andreas & Matias * src/lmrob.c (rwls): return *max_it = #{iterations used} * R/lmrob.MM.R (lmrob.MM): return #{iterations} back to R level * R/lmrob.R (summary.lmrob): and make sure they are printed. 2006-06-05 Andreas Ruckstuhl * R/anova.lmrob.R (anova.lmrob): new, for model comparison both "Wald" and "Deviance" tests. * man/anova.lmrob.Rd: docu + example 2006-05-31 Martin Maechler * R/covMcd.R (covMcd): use R' RNG * R/covMcd.R (print.summary.mcd): is new; also print.mvd and summary.* are now here, and improved. * src/rfltsreg.f (rfltsreg): use R's RNG * src/rffastmcd.f (rffastmcd): ditto * src/rf-common.f (rfrangen): use unifrnd(), and comment out the previous uniran() * src/R-rng4ftn.c: use R's RNGs also for Fortran, in 2006-05-29 Martin Maechler * src/lmrob.c: use R's unif_rand() instead of C's rand() * R/lmrob.MM.R: set/save R's .Random.seed, possible from 'seed' 2006-05-29 Martin Maechler * DESCRIPTION: version 0.1-7; the last one with C's rand() * R/lmrob.MM.R: it's "$seed" from init.S , not M(M)-estimate 2006-04-25 Martin Maechler * R/biweight-funs.R (lmrob.Psi): unification, rescaling, such that Psi'(0) = 1 2006-04-22 Martin Maechler * R/lmrob.MM.R (lmrob.S): new (control) option 'best.r.s = 2'; was hardcoded in C * src/lmrob.c (R_lmrob_S): 'best_r' is now argument; further, seed_rand is passed to the fast_s*() sub functions, so we can call them from R {and decide ourselves "if fast". * src/rf-common.f: new file for functions identical in rffastcmd.f and rfltsreg.f. 2006-04-21 Martin Maechler * data/NOxEmissions.Rd: add the large dataset from René Locher * man/NOxEmissions.Rd: 2006-04-21 Martin Maechler * R/lmrob.MM.R (lmrob.MM): return robustness weights as 'wt' 2006-04-18 Martin Maechler * R/lmrob.MM.R (lmrob.control): change default for 'compute.rd' to FALSE ==> robust (Mahalanobis) distances are *not* computed by default. This prevents singularity errors which happen frequently as soon as X contains (large) factors (with few levels). 2006-04-15 Martin Maechler * man/pension.Rd: add plots and simple 1st aid trafo (!) * R/glmrobMqle.R (glmrobMqle): weights.on.x = "hat" couldn't have worked(!) - still needs work __TODO__ * R/lmrob.MM.R (lmrob.S, lmrob.MM): simplified computations achieving ~ 10% faster execution for a (n,p) = (500,20) example. 2006-04-01 Martin Maechler * src/lmrob.c: new lmrob() code from Matias' roblm package * R/lmrob.*.R: renamed s/roblm/lmrob/ but also refactored * man/lmrob.*.Rd: and added a bit * TODO: section on lmrob() 2006-03-20 Martin Maechler * DESCRIPTION (Version): 0.1-4 * data/vaso.rda: one value, vaso[32,2], had a typo; it was '0.3' but should have been '0.03' 2006-03-16 Martin Maechler * R/OGK.R (scaleTau2): add consistency correction (for OGK). * man/scaleTau2.Rd: added * R/glmrobMqle.R: ni=0 special casing (not finished yet). * tests/binom-ni-small.R: more testing of ni=0 2006-03-14 Martin Maechler * TODO, Done, DESCRIPTION: updated, ready for release 2006-03-14 Andreas Ruckstuhl * R/anova-glmrob.R (anova.glmrob): new function * man/anova.glmrob.Rd: and documentation, replacing previous modsel.*() 2006-02-24 Martin Maechler * R/print.lts.R: moved to this file (and simplified slightly) to * R/ltsReg.R (ltsReg): more cleanup; fix (y ~ 1) and (y ~ 0) properly 2006-02-23 Martin Maechler * tests/LTS-specials.R: new file 2006-02-22 Martin Maechler * tests/MCD-specials.R: new tests * tests/huber-etc.R: tests for huberM() moved from pkg 'sfsmisc' 2006-02-21 Martin Maechler * R/glmrob.R (summary.glmrob): "Std. Error" (with blank!) 2006-02-18 Martin Maechler * R/glmrobMqle.R (glmrobMqle): make it work for ncoef == 0 * tests/glmrob-specials.R: new tests * R/glmrob.R (print.summary.glmrob): the same as print.glmrob + more 2006-02-17 Martin Maechler * R/glmrob.R (vcov.glmrob): added (print.glmrob): fix typo * tests/binom-ni-small.R: add from Martin's old "robGLM1" package * R/glmrob.R (glmrob): '...' passed to glmrob.control(...) 2006-02-17 Valentin Todorov * FIXED - .fastmcd and .fastlts no more return everything * fixed problems in ltsReg in case of location model Y~1 (i.e. x is missing in ltsReg.default()) * ltsReg & covMcd - added options 'best' and 'exact' for nsamp * ltsReg & covMcd - added parameter for controlling the small sample correction factors - use.correction * rrcov.control - added parameter for controlling the small sample correction factors - use.correction * ltsReg & covMcd - output of the consistency and the small sample correction factors 2006-02-09 Martin Maechler * DESCRIPTION (Version): 0.1-2 ready for CRAN "baby release" * man/vaso.Rd: fix longstanding typo: con*s*triction * R/glmrob.R (glmrob): Oops! the *.control() function must only have one "." and must be *called* 2006-02-08 Martin Maechler * R/glmrob.R,......: added 'glmrob' and 'nlrob' from Andreas Ruckstuhl 2006-02-02 Martin Maechler * R/covMcd.R: all moved from 'rrcov' after Valentin's * R/ltsReg.R: "green light" .......... 2006-01-25 Martin Maechler * DESCRIPTION (Version): 0.0-1 "prerelease" ready * man/psi_func-class.Rd: version "checked in" and ready for pre-release * man/chgDefaults-methods.Rd: ditto 2006-01-17 Martin Maechler * NAMESPACE: added name space * R/OGK.R (covOGK): added this; even though, the default scale estimate is *not* consistent * tests/tests-OGK.R: minimal 'test' 2006-01-16 Martin Maechler * man/starsCYG.Rd: clean up of documentation for the Rousseeuw data * man/aircraft.Rd: from Valentin * .... --------------- all these are for those things from 'rrcov' ------------------- 2005-12-28 Valentin Todorov * 0.2-11 * added more data sets from Rousseeuw & Leroy: telef, lactic, pension, pilot, cloud, education, airmay * fixed codoc discrepancies in the data sets stars and wood * ltsReg & covMcd - added control parameter for the small sample correction factors * ltsReg & covMcd - output of the consistency and the small sample correction factors 2005-10-24 Valentin Todorov * 0.2-10 * minor corrections in the help of covPlot * fixed bug in covPlot in case of class=TRUE * tolellipse - both robust and classical ellipse are superimposed * added directory inst/bm containing benchmarks comparing covMcd and ltsReg to the corresponding functions in MASS, S-PLUS and Matlab 2005-10-17 Valentin Todorov * 0.2-9 * minor corrections in the help of covMcd * minor correction in ltsReg (false corrected to FALSE) * covMcd - fixed the limitation on the number of variables <= 50 * ltsReg - fixed the limitation on the number of variables <= 50 * added function summary.mcd which prints (additionally to the output of print.mcd) the correlation matrix (if requested), the eigenvalues of the covariance or correlation matrix and the robust distances. * added control object for the estimation options rrcov.control and used in covMcd and ltsReg 2005-09-20 Valentin Todorov * 0.2-8 * ltsReg: added formula interface * ltsReg: adde generic functions summary.lts and print.summary.lts * ltsReg: fixed a problem with reordering of the coeficients even in case without intercept 2005-04-16 Valentin Todorov * 0.2-7 * ltsReg: fixed a bug related to nsamp -> it was hard-coded = 500 in Fortran * ltsPlot: default for id.n changed - instead of 3, now it is the number of identified outliers * ltsPlot: help enhanced * covMcd, covPlot, tolellipse: tol.inv parameter changed to tol, according the change in mahalanobis() in 2.1.0 2004-12-26 Valentin Todorov * 0.2-6 * a bug in ltsReg (rsquared) fixed * fixed CRLF line endings in FORTRAN sources * fixed a problem in covMcd: in case of p=1 and cov=[0], the cov matrix was a double instead of a matrix, which resulted in errors in subsequent calls (e.g. determinant(mcd$cov) expects a matrix) * fixed a problem in ltsReg when p==1 and Intercept==FALSE - the vectors of coefficients ans$coefficients and ans$raw.coefficients were of size 2 * error handling added in ltsReg in case of scale=0 2004-09-16 Valentin Todorov * 0.2-5 * several errors in doc fixed (ltsPlot.Rd, covPlot.Rd, aircraft.Rd) 2004-09-16 Valentin Todorov * 0.2-4 * added Regression Diagnostic Plots - function ltsPlot() * ...added Normal QQ Plot of the residuals * ...added Standardized Residuals versus index plot * ...added Standardized Residuals versus fitted values plot * ...added Regression diagnostic plot * ltsReg: the responce variable Y added to the result object * covMcd: fixed a bug related to nsamp -> it was hard-coded = 500 in Fortran * covMcd: fixed a bug - in case of alpha=1 * added S3 methods plot.mcd and plot.lts * the S3 methods print.mcd and print.lts moved to separate R files * added the stars data set (Hertzsprung-Russell diagram) 2004-07-13 Valentin Todorov * 0.2-3 * improved documentation of the datasets * added datasets aircraft and delivery * added Covariance Plots - function covPlot() * ...added Distance Plot - function distplot() * ...added Distance-Distance Plot - function ddplot() * ...added Chisquare QQ-Plot - function chi2qqplot() * ...added Tolerance Ellipse Plot - function tolellipse() * added function print.lts (for ltsReg result); included in the test tlts.R * added function print.mcd (for covMcd result); included in the test tmcd.R 2004-06-26 Valentin Todorov * 0.2-2 * fixed bug in Fortran: rfltsreg.f, xrfnbreak() * Depends >= 1.8 (it was >= 1.9, because of the bug above) * Parameter, controlling whether to perform intercept adjustment at each step added to ltsReg and its default value set to FALSE robustbase/.Rinstignore0000644000176200001440000000002211721777657014745 0ustar liggesusersinst/doc/Makefile robustbase/data/0000755000176200001440000000000014555212560013341 5ustar liggesusersrobustbase/data/milk.rda0000644000176200001440000000315211754203337014766 0ustar liggesusersX}hUe?M\%?VZ2hݏ5{bVBMǕ * EPHH keG0XP$H(P'C`~}Ds}~9.qzCo7#U=7JoN%'xbh0޵[sr8}dCaGF_Nw9aNr` m?0u俯qj1~Ma<0w͌N4v>_[u]]~fWʭ/;]/\tT6_B̺D~zu~kͦWگ%w^<9?uJqsmu^\+uk>w-gu)6|~Ly.UJčXYB\ bȐ~mǔX'S?Sd\7G⚑qe<+v֣tux]Љ .oΥDQ`-ɧ䞂'WO|wO=z/W;Qx'o彰僒<]q1:/Q{1Ox{Pss<8;S_WPm_uMm?Uԏz 6}++O!oev+:Ճ ~9^%#S/ q7~OVA2^ \s' ?2 ks`~/lsK6=ess&wY/﫴5-,0\XBW ?4 "? ?G;l|@yeC5&>ļɲN+a` pC|i%9e7Ƥ@ >~}6h2j0>LΚ 0xc_Y>eR6Z. yS$sOX|ţŐ|vo7[`0KܵƆUi<= O򔓧.@ ظos_S҇riÖ,5͘ԡD\N1yrobustbase/data/salinity.rda0000644000176200001440000000074611754203337015674 0ustar liggesusers r0b```b`@& `d`aʼn9y% I b40pBPZ g,0p Pq:->M(_ uPӂC͓U|  է PZMց2yA" PX q>H3z`~ČbBy3,El|`9mwpt0sS/JCI }fy" _h:%By:%a$s9I/oWJL96(` u]\V^ gp9 44@쥏L@7M K?x$6G@QFKhG'.8J~bKM-2d0,c(1݌b0AĒD"h9`ւ4S Ɲrobustbase/data/phosphor.rda0000644000176200001440000000044511754203337015676 0ustar liggesusers r0b```b`@& `d`a@\, ͙ 0 F;pA +A*P:քҪPysIy0t& _8XA>n MXAUb vb bY bU b#@匑sb vb[ 6b_ X(H,v'+k^bnj1!T`f^~Q:d&e& rJMII,JI,IK+Z(\f1wL @?lʶrobustbase/data/heart.rda0000644000176200001440000000037011754203337015134 0ustar liggesusers r0b```b`@& `d`aҬE% @Hy\88Aig(;@hm9vҾ 0qOЎPs^Pڐ>ETs|S(|f/ Vb# Vb bT\ՀX=sS p A2R33J`rd{rNj^zI990s`\)%ziE@+Дs R@2robustbase/data/foodstamp.rda0000644000176200001440000000125112137052541016017 0ustar liggesusersA_뢫`(`&b(,L/ff x** &b 8~E$Mwu/cï}ԫ6O)MDđ~RJZl=kvD2 2,0mi~IryB|,myq6Η A,rw\8f5FzIKV')>Ҭ+ͬtI$4*,'Dir)I:;Qq6[K>Ecrrm(N𷱝y7"诂C__-@O{ejEƮWd|yEr{C`<[Ћ ϓk> bY2 %π{#~)֑z~^'9G>ZQؙSC֗3j+]ĭF3bj9ӣdbuԧ,k\@UufQR~C ,(軔+)T}r}_~1t#O"752c|>З Z5k=y/gF9~Rx~ yZ݁874Vݬ\ tyPFmg~3K {ɮ_1w robustbase/data/radarImage.rda0000644000176200001440000004025311754203337016071 0ustar liggesusers xWu$i xc[dْeki3kYd2*$@)J 冡a,Z|aPX!P S(4ko{@?&qyy~ka7V޲휖sN:G{=ԣ9%LL׌ devO8L3X\Ÿ5frumh:빮纁뷚jk;>ϫ:=Qϫ:IWy>=D=+D=3{wbj=k:A=Vp]g#v1wb;EyeiWzurO><Ϲ-/6w,EƎ|NṄc|_;}ϣϕ7=\/~wqse':nη12gk^y<{ǹ+ϟx>XL}ޱ39%Oci=N/VY/^Eo]~ ,Z?4gpHZ~+h,Ҹ,Z?4g"ba k?>~+o~,VX9ֺcw?}=Q.}O<ŏpgv n^v'OPOpO^ ktsŸ,ѿ۶;g۽}>~ڎpW|8/ ?Σ?kSe>՝~+v/Gn#{*k}T{4]/?}W۵}/{]jokռVE.yo:sZ^v4]}KUzlACk_'?f< [Xe9Mr.Kw ^z#~~}]zؿw\ڼJ e>W=HwZx\{U-L=Knμgŧ[Fu鵌Z4Ƽ~gsoH~ ʥ_Ml}knzS"ԛ>$yF' h>sPzk̿SAsߪui׵Eܣv(胫%g$;D|`֪uWicM|M:5;sZVQLV[)>|2LOo;&,>Zү?ۋ^/_)>ƷqK_kи,h9no^i[?0Fz# }:Y5N:g#_M?oWnU#zX>uI Z%zFWܻYϤM{Ї-VMgeٱB䶓qűBVNJJբO{-N+d 9xmXvΆkKt`"Х zw&<'d_m:__~GdZ׆Լ y;_k&Uctߡ`g7ľ/;2 zAl 3(:&Jic9TH {:dl{ UV~fy8z>+ǒV>m@6-zUqT*&e_mCc//|Tv_Մ;yގڴ@x٥:Чq _O#裪IOuםݖNYӺ͛4Yh;r37B뜵sk>iأ&=zCleg_'ycs~?>qH DmpSp#o$}2etK?٤)KET}I8E8u{IDvS t2α7oc7Zi-%p%$?A^oÞ/z3u=}Xz9a&oaOᷚ_p?/?Zểz~|@#?҈Fnl}t_hE& _71Ǚ3I/ID6XD^A^hceOjOm\ U,_O`/uv:%sM9=d{0Ì^{ۄ}pmZo,5mN;<iƿA/g_Ηl_S؝tֱgG$6o{7~/n$5GSO7|\uFI3ⷋσ׀=e'A;yY8H;}[o-_ h6IeثȻ^~w)pz(4| G5 c4W~~@W}HtE_<ώOGK/5o>3_8=U7U^Lo/T8ȣ͌{\1]=7)B|'fPfnc/6jC=j`o2~s -}?BnGGkd 8 8ZZ:Սz?Sğb7!xGE q?3acL I#S4 }_%> :$r_{/t7@S`>/FFdk7h_yHm+z!UޯF/Z5|zEOn0xauFO6ފcI+/ZG}_p+g깹CQ%!xV%T١aU0~ v>>8xy b(~H:=Ynb|M ?OUW~X~NL zWtqǧ^OqG^zv-t"cCm|u#EjA?86pA//\Q& ɭKCl Csw}o[pK'y= rt~nǺGwj}|6%qqo:q0$r|m<-}eر@?H- qv(n<.M}ApI5#aBu-?/u%q-8Yx{By ?H﫮w >x5#9QIbc=Mۻ_eӷw9^4v]NwbwgbŁm6=˞b'q]:[qNޭ;|?mEp8e[>cw&9kГ.ޢO\z 'q|iTv>a7|{=%=6R?Xa1o"^MWIBOʉtwয়&]B3ol/%b<ߓ^"?aeo׉LC+Zř;u] }.Az>>/Tce/Ƿ xu:ċ}~AM~~d!.:4>?\O ?}ee?uj/ge 2fA2?/>ۯ|؇OQsK#lͣ}Ս _6H ["ݒ+N#gqp4 뵮K_~G{eGl`OD75s쥍 =; 7w`/C=JTBϼ;H%oNjvWZӬWՃ\bGɧb'&|v:ڣo^Bz>zqm ~WnR5nG`❶<{!J/6=!{֍7S҇|J[@c/[[ÌW8uQ.(+gUOcGnܻ[b-w[9 Ğ cIz?qȨ?)K|=ҧ%<: ߆ߋh/[-t1Rғǭ]텞>>Ix_"^W("=*etK$|`'3{5]eqt1N(=mz*{?D=ap]S y~oAAnv/`apA}Fan'^ׇ8\zi|WrcSqb?THO{^g"Il҅rR?~Υ6-a>!4:re/KdB~=҃Y&Y$~Gу$YĹcʛj|avmc&.(*}=S^ys>naÏC-p6'\f1ggf&.s "Mtj':akI/υɧ v*ͶAv:Wzև$wP@BKce?A>!nlm!yN/ (\v+˿^1N_xkXuCLw>8Fu趃8 œmiր)_ڰ Cm^ O9aŌB4KH0ocݤ v6xCߧ\lBy?D́ߪV}@rۀEoXuG/1+6_ gʻ5wi^'U_OS?x.At1#_;GvW߭xj>:5諐?]a>ĩ"~mɳF~kGcz_NA!?G_M+C~k ,o$mCq8rk,?|qod6k|߅8rCR[6'b;7^+O3!X'H!uJW,]H}ܘO>Nr$e# Px_#}U~_gsgKg`/|K~gIpFwg)~ n *ĕgڷp_>mgQ ij2y>!ܷ<94S3Zq;[Lc'TW pC񂀻CJa72 Lf嗍zKE_CK/&ݬ(wB=| w1{vsPu9l%[>nw~ѺTR7Bf#?ǎn}-ac#uAW/U^A<67z~Ϧ6J\bGSOOѿ@<bǯ@ϡ6`bIYcN*WF4zi} ?x@&Q|#? "K>~i:IaMssw3_;h={ĵ=_?PW@z.QO/ElD.+Y>:m:Njk U=]IeK[Bg>m/vx0z-?6;ǎ#y qba&= 6sCfކ~no qIu{֋l.z'ףWs]&D/Ğ%ƻ_O/$?= ~LK{/S⍙CV:]1O/ .c_`9EiF=FC[_&޸;u.M4o"-ؑ#yn'iO,8[O˔aRz5>+K])K ^$~gr=KX7t^>Y]2u !i s™7r. A;GjrEB#A|l; vQwUD^Kl"R7_zc}"_+}W8"Gvh+9w䫚4 |XD٧J?/˿kx~j.I8 EҺt]@ќ3鱔~NaG1Q~cY̗:ڰ܃>CyԃSea$fG7^~/=]Եx3Ԩ9 u׭Uj[W[v_<^:FV*+~B?X%b\>*=Lg{Ruь; 6"=`w[~!cvyu`]Nk /S>Nߣk'k~3~7ocص}蛰ϻA׃>-N ~p ;z^2!7ߟ6FcBڡJ[_,\쇄3Pc_:rr{눜ȏR?ξs)x=/#hlS}Oz|pŭg >~T6-I㑛EY=佮'VJr/j>]e¹jA6w!,z-|Mp ɱ?ą+{_c3F]k4rsQ>&|jeq/wQ\xqzMk}.~@쳓'M@Cey}Kp~e.q]YuLE v nɥŰἴ@y-x0v^CҳxB&8:>!y 6^pwPF]f4ziqFoCϰڑ|#> FoPt7ןj%ҧvi,ІFQ]艐'܋^n:Pתшƛ>^ ~D4˨{5p6SP2>Ƣ1B~Yzo=䓜?Aᰏ'/x(??z=yip~ ,QO?L?48ǃ 8C8tt]u0~LU.-;dj߄-SQ*=*ɫw+%d3;-n_qmo6=ac  Ky=x H]^⟁¾&Q%]B8F.DĽsEv>W/W}]۬u./e*\V^:ط!?˾[dA.P([_ؿj]!E DVg຾sI9N{~Jn<ÝdOݾ;0 }Q'+y?fs86{bЇ},3=PuDriAgjCP//8:_#uq`ȿ|L;:ҢyxH3U&I/S;u@o'>YϬ!a8ۓGLK{? /Q EyRPK˜WBp>@ka>\z 8ˉWsKz_ov/Jkg}>J9+Fݍ87y;>xZP|!/K+t'_؉pI).u,S_/ YoXrZ>@ F߂,?gSaߌ`|o0OragG}JF[>:u5~ uU`6>>PFkw^zs A쪳y 3עO:Qs.KX!\׾ >bU!~Ӎy-\/V?OCǃĻ_}+yC؃Y+;,}/G#I~O؃-^xMrjZvhuY {<y9is.W߼\xȇy94 {Ͽ/־BǟF%s\!q+_,`+йeğɟ:Ub =usŅo2Ǵ+- }8r@|}%8e>\ڪ]{y~9増n:Z bd>=uݡ)?ETm ?Ġc| *Kv}o‘)~N6.6=?ON'h>R~W?%C?sAY?J}%Wb߄#ga̪Q|҅|v//(N㊷eX+zͰọ:&Ϝc1ݵZ+m'86=/ q5ut~5އ^P()u&=i ;=;#`q둛&n5mŲ׶-Y(~i]=y Z]tq3{TuS&p߆oPw8bpTUIXNܦyzGTzqC-'~?5b _E[vˠ[ YL!ʾR)Z;}@I3B?ԙVO>g 'cw?e믦1ܽTx7*2Y!Βd^㌞[MozN,8;CM9VqE؇Y~ډ> [GYIZYO y| m^僣(/(d%ԣSSٖ#d &r3-չ^Wf*.e¾𻗓_zW {5qoݖUb7I{%s ݵп{xx~F7S}Z?TEw-yfJpdùM~:pMu4{kxg n PNz;$9d߶;_X| s:mwXx^ 9▜g)Ι9'lu&,J>uHnM>x% M fJ0=?O~:f-~zw7YMiS>trNxa_:/kcNC| ^BtZf[ ywѾa/G7ha^|swAl^\coU?_8( KWOsj^CULݗ)yͪs.U_d+rB_sqaײ!nLw.?`瑧ݲ_E8g_ʠѳa_Jp`5~;[tRi clBt 0jgP/^?~ODzR࿾< )@:^_S 8੫QɋwG#ԅ9 YPgUG< d |]F^#F 1s~|/*$?\CEoLG7-)oiwr{#q(*ؽBE( ukM$}p~9~εl Ipx">GPmK$^wumV!%.8.rs|w38s(3%0.z:˲!yIޢ ?{.*eį GM6gM|9+ރ!:F=zޗ}`QóeO|8> *#^vUio9$_-q֨mg\0}=NemޗN("UWg^B嚵NˉÏӿ閝)cv`ĺ'WN9@ ?g$*iss9)"9ح7ׁC'u߶ or&=<z$/G`G.Y&[;wx@ EM,}~ʯ&* ? ql>٠yGsCuOعj+?݆%7C^M‰sgʏU79zJ m_=ϛWi|nu,GϬDcn=b Wc+N<[uɡ =u==r+jK+ H?pq<4I#KA͊ x/t+!|ܓ/|lZj]׆`}b;8 h3u$i,{Sy~Ces:! ރGؽR?:k<7Ox?V;g/g:5Z䡒̫F<@~yNC承Үku^ܷxOjݫfƟ$.2L6[IR|C']!g_hR= 9EG7f bwύ>+C--|\X~nZT 7L=D#r^@o~ƱQ#;9[ q$j| 6ZE.@2TLrL|慊'xҟZ՝Ƚ+^]oc%ĝ~Ok!Ợ/(oMؗbd_^L!Ⱥ$r=q5Fɲ?`A$#?⇦vϺX} G}<^B3γTYBmٷזz$fWb֯L[9μo"OyHio.\aM؟pp=oϧZ+WR%{ANE/&>=" sO㉉|8Yu\שЇ=Fmu+w_o8| ^?^E ބsy9"7g}rhOCCץ ō}ٍ*.!@__9` c{=7׬Jy:B\pNOjX=)?ʪ/Q?m1u™LԀcs/{z.^+^ ľ~xߴ?yXj1&yOcv'/[T),=!m˨x V1$8 u7qxx$}R-b+U=mBARu/~Jg _D>BɫzVG~Ht?:6N?Q|/$L V:ԣuzG3% |-})gߒmO==qhKx;uwog&&|xNlϖ飿?z¿robustbase/data/starsCYG.rda0000644000176200001440000000065111754203337015532 0ustar liggesuserseS;LA]nNmI&l!\p$&PSk555ZXQSc,vvf̛O|Z9*c,9y&5KԛQ7D'cIWxo8*eek>惽1827;3xʓQOhۄ/&p&|6yW`'.2X?MoJ ;4 78cSOLʦu -Z;JiC>Ju~5Mr4'%h's9u- GMbur̳C/(4 > M{OIץ2?]x[?ǯsjkhռi\w㱗0XrZWa7,;2m DX.R}ɼrobustbase/data/aircraft.rda0000644000176200001440000000106311754203337015624 0ustar liggesusers]S=hSQ晚:$h Ƙц3XRS>Rb5w 8 ݄(TAG x>;pιc+9b0`y0x! ]ZoⱇG'BRJkܾ#z'=AR>V^U~DAq͊?/Q2/oIp%٧q7U=C+ xסzQVէGDBKH@ 8#@0:Yd c @~ ?a`s=VƁkD,xtsnh~N,'?95'G97`6+]ƥ8o>pf}G_3K/M:0'-\~/E ԡxϯy}%ϩQKKYԳ(yήq&37$/ oE >2cDUЗso=t["OYW/Ͱq> csk;eNZ3M nej6ԍn@ܜ!ΰh|c-?@ robustbase/data/CrohnD.rda0000644000176200001440000000324111045055460015201 0ustar liggesusers[PUc匾4ZBR:c1hKek&+"*^RǮ㳥iNLC/v!gz2{=g]4z?~sTɘ!p gC8]8!tC EBF`fn˄( '(?BT6` Z6M4xς( j"P !@s@9<(D,x AB,FL0ƂqB,M ~p )ĉ;=Okځv/@Ww'݉zw]+U^M.tAK`ZH!$xΩ'CĵZʾr웱Fc1){ﰭ%n64\uLaߴ6i )\35H0F{. =N l9 Ị{J=wn۰ /鿔nP#-!5ꑲ?~xS2lmDދ >!#ނgC_ ;6ﲎ*@'h~ VuϐYJBi+ Cڟw -f|ٺ 2ww:(=m2,=t^&9zG* ޠѿ?C d.:D{/8>T@{/M:n}H)|`n'դsYwknæw3 Z>y{75zYWǹˮU'Iץ_cgyaԓ-:޼z?syu/csӷ9(1{FsW{^ l*{*; Os2G ڸl߃\nvוvSak4>qe۩xմJ6m{!Uo[hg'n;Jv[ożfc͌gwuϠpw\[kBo^uVM$hVEBhUsC+s9ZFY2|{Cqt8JH$=}?\r.oB}\DžG9oeH\DLXٹz`4$p9H{qY+Am)6_-Dc7$O)bx" "Qkci+ b2!3vBз9UlA un&^@;4;ު%p?KeXTpQ/ [)*EE6}xp3ýx+l7sEU)s(gPԪ+U7續ga 2G՟ 壒vK㣬d(SJg{Z6) G=ᄏNq+͙dvd~;п߁KJ\m OۑĞK\[d{c{]vZc]*| T}T}ҘnH'ΙEF>^P#WlY-^TQZUòF)tbFe]%96'DoX1Dεs- hstjM*:9qoayrobustbase/data/ambientNOxCH.rda0000754000176200001440000010463510737747431016333 0ustar liggesuserst8o?pTdoez;+2*iPQʈ"BIvJIFϯ:9bHCCCGC@KCGO":_44l˨#֬gjECCI}LCwMh=S)ĦĦffĖĖVV66ĶĶvvĎĎNN..ĮĮnnĞĞ^^īW&^MGGOO@@HHx qqZ!!ġġ'^Oxqqq8q8qqFě7GGo&LEEMMCCx q,q,Vqq ۈ''''o'NLLxNĻw&MBBJJFFx^'ONN|AęęYYć&>L|QLj'>N|IħO&>M| q!q!qqq1q1Y%%ĥĥ'>O\F\F|Eė/_&L\A\A\I\I\E\E\M\M| U55׈_'N\K\K\G\G\O\O@@|q#q#qqMķo77&MJJFF|]'ONNCĝĝď?&~LEEMMCC q/q/Sψ?'~NKį_&~M qq['~O 23t,-9+ri&X>3ax2%svM"AEg -vkՀ'kEsK @| krTl6y<=Ԅӣ}~ipW ? uL}Sa -uR!e^i7[ Kq-KJIX4=/z:!Uα,ING&h fX=!"1=̞Mߤ "!u\H&2wm}Aya?8jAt|}:ciM]Y7lI`[n0]̖}d9W@E-=VЛW1t|*ڒrO'-rNJ<%1U^NK)=ABp뚲ы nr:Mf Y@zѝүs}}rQoOm$\Qӯ UVU[fɰ##, cq/(7-#V_\- &7=LEdƓP.mk N\us̪|3;9HYaQ O#HD^3 2M>P u XNFU-"\ !k0fwLh\ X`$xvajH owdyYƸ}r9SXZ5cLGyK5;#ryRr@W %PQWMCU)|LhtWe9+Aِ\ȅiOrnt02Kϯn7>"oņ'B-ne0CwJ9LRtY ڱwACC~v]?ߦp@dO(4x/f&l#Ɵ,qVv {wjT}9$j{33 tƬQ<8+ىw\xۗG^_( /[Ҏ$ҌнgA01!)XmdvGW C2YDTk8?/.*ӳڶ/2648@#hTn1 kSmgƧ_fͭ9ՐOsH* 7'B%sj U挠h=yU*ujR j}:"),9H>=6U4mv\ [$FAar(߸?2y/=l‹vc}j j󺮵BYmQ8ӳ U0wJo?w ner~e mr_֠A4D2[=?OY 9Ph]fzhoSVoKS{8 g?s!5-L: k a6Hʶ7n =0;_p6V!4.X: yʰ t8y^̾]MPq؜7$ o)TքSYP~xQ SW`ٓ@ӬA;zjF̦4rwڄa3C?#mӈGvǵPñRϤmf8 |aֱc6nα< 5ءȖMEp9nc l&2>} [}&!ƇӒZ]ߘa:KzW7оu2 w:_H#WѾ=0`c$ f,๵Wp(@BȆ>x2w|/ 'ݧ;K̦*pwxf/ƪeR dcǙpᆵ)S>Ő>~Pf393YWͮJf( =V1ǃ|=PPg+Lɲ ssY%~)'Xt-Jv,_3K 1\vz1 S DŽxEy9SY ~U"`1^55mڅIyμ{+-̓J& (42è~zE߾[ua/TѤ_Ս5TP#\3hDoJDDTcg9 4'!|w!&ô; 踍q؍++6"3fj߆vfImZ6;*ŸSeފMRt 0zf=Ns}tl^) p((7M[!enkߺql{BXyY5K%ˋFjkoU;BҺڳP{LW$t +`aIh4(7+w=ZǴ_h!M]_oC[SĔfzB@>P9Bh'WCPS Ď%5ˇ7Ej' Q!PA{:㛬ض =ڗbk&YYd%3d8C _LҦ Pu'9s9 LPߠ,2l.v%Skm $ׁPY5!i ?L5e5"ep[wN}n!z3>_׽*g"ۡ%g9jH,P -CR;9Yy;52(P}2辡WLEu;X۠;D8z[׽ űϹAf;ϚДݛj?U-zLt%(x 6Cj?30z'~ZMyV)uYڧԎ2CDlhbc;K#?@gE[x_e SBѕw UŶ@_\NJ@R.:Ho]Ex T/ Z~\ Z1HJgמ@ù\OH}zc(-}>I 5 Bmڇ?fW u,c>oVAPH^򬆻okj" ,M?=<ϻ2=Q &~bgAhdf\ cPc@,WfY~R_Z=DMkrq@2-rTЩJ\ )(>3r:J몊z{ZP9~ 6?7>=z$Fc9VA"fchJ[BCAE Pf:ѿ'XY6Zژ_1]ЪQUT5P2j~2nL  Q4; | '<#NA2cM4b&7H@!C{H6+_億 [a\15-}p%YE%w>sZ\#Jcr>U~ %yݱ>V ,T:8rF6B-U^W bεCpDs'dfω}_OЈ~9LGfAxG/Ό}HX˸j/T']5\Hjw%B{I_Ħ @-~Jr. >$94VVQGrz]Ưmգ<s) CS׸i -LwN[ҘY"t>]mhm!lQsa5v:l$%4k>o>3Ft= 9ap9&χ^fS &.1mQ}ͭ6t4?ajSB'OK}C?)o0U ySPVJ:CbP{1 s _`o"_= Es!1h̊ 0w, wWe.) AM3x@ 𥇠ʼZAwWsA9GNcRY`̨^\6\7 m/MᏏ@4HXT, 6xA)EL3ݒdָM۰4]b ёp*՞d\HU=~t;AAi#:v'Lv=;ĹagàةA SzGW:oNLce7ۯÒWg_Sȗ>qA8ƞa{y½xg:@n/}b2>/.A[9Fm3d+=\\ 7 @rC"WЏRۚv@,e]eɥ`JM{l{uKz;V5+Յ*UU{a fFn{:`7ڃbz5ʦ(ԽxTrǡ!&qyn3d|WjΓ|I1kIa΄5~k@p~! W0@u˵ϚL5g\0Ӱ (d cRb8r 꺡΁+}맯׬ a҂pKK⨬YBkq+6Lk&5$!tUeU:kW2U>$sa}(.`yx/!0,p7x(xJxhW] l:16 t m+o9{n#ܙkGM/>68;+= Kn'JEG / +܂ba[/dKoKvƢ4G4]ӛQJsew 4ˋw[ǡ5g5zsx^zY% Enr7a4f8ja BUa˵5[^PnkŠGQkT}5҅f꺰u s+{نrE 7`P*C`>ް!vsTy޻Dz-OaBj%X4ia(;&lz,~0z6v/~pZ$oVF7pG{f^X24ݝ/ь6V6r2q10 ^ ;laM:,nant4*z fr-,8~\KX8ײOK` Ц0Jzr뺚~:KS`}3(\R} ?`~mՁHAXŧdbvNp|2=K3;ha2?%o}/njFILS/M;`KYT "X 7\8 բtz|L"/>b_rs;iTʷ7 ְuZAX/:Kݻ}7S`gh%iw_X.& &mKF|OW0#imU᫡Kg|Xa8a\Z'LẨ4weL^w_ϲaH!A\VON|jQӗ0Q?U %n'Ck]fcp^dς Vka{V kB pQM՛sɒ˟1٨g9#C FPQkYaӇzS?Yg_E0`e}SF vdcmO+Qڪv{+m7;mKopz4h1 n&FC ˏn36sz+LZg- ~QHXLWD2™ŬMm\/RU݉0 XMKmBCJGd˾3oxedě"az8 (rNCW>pU?xCE {WINle`Ki6Hk{WAo6vo9^y\Iggݗ7ۭ/='j|7^9uQ ƦO0Z+D 'CJbQe*x[8s\%-ZʔL WE2e'2,l7!'&uˡwW˧wWo#[I%JǷMs["r(nD ZXW5~{ طϺcr8k$Y/>;  u;hmg:p!W0ٹ'#d̋(~2 L}Gji.y(mg<obY. c }: lBZ۾ 8{(n2LMte'D FªFp\Ζ>[J42ClCoG4"QK-BĦإsX}m( F( iy!$Ÿ`9py,[dd{P۵s~у0Mm/!UXaXbR5ֵl`1jW͈_/\SOs`~]@rY$?VK5a7!oyb-cytt$#]m󗁯Xmx *1^χ,d{^%Puګ QR<òZ[{?CZiy w,r̀PMԱPk3 ٚ4O^LEO!q^zA ˧{C]7IpCn2m+ ׻fW%=&VA&|Ze|̹̞C_AGSVqDtS.@OI\!_ϵ;Cϲ:_@uGMup<:IL.wg׼xÐDZ6S)Kv&p(Yp߹~}vFH yٷ*('L#`:-#.cZQkovp N?oN`~w)3pb./WBg'^_^y%Xtj;  mB8WKg) .jPi@O(C&lBL?mOhe:}U4`zw,<޵?9&H)״ow;w]E%?&g4Y|ֿX'S3̭:[} p+=E9Vg;8=ޮ]6`kIp/L uZ&ܩo΅pl?_SC (iM,:Sn9l6'naA5,\0)P v @][Ay[ԫ);IsS`dܢ-,z=wAc!R}Tc~{JZ!v%=,:x R7 h0|ҁe /|\++*sR`wF=y0ӔRWwr/l}ՍESCqڑ' l>hƯѣq3dB۞y Π_Z wypnqXV`_O)\og,`^6 4ɺc7ySѺ B;Sej󥆚t{!6z)nXJʨ X(OBE9yv7¶Sp[_ρ⽰̃waBUYpW{8~B3w'-xjy S/CEX(3ݜ ޅvj6p٦}b9l[C'EmY1^`/H]J| Iw:Zxy,pIIJO!'8n?ؚ  Ͷrʟ0Q\,` Ak ڶͩ\/8ƀ>nB<0x8VLI |:kM M` gˠN!zRLԕ|ܾōK..$KSugX.0Ȇy@2f!`X('9MgsIj?1P ̻C05J IAP99dT>B$d7?Oh4B@BǟzwTQ9 eI Ry_QhT|\ڭZc ^:-l?]?n6̖٠# ,z[ BABA6B#</ ΃gy cD ?$/ o߭UY,{hjzK@6}vvU< {>|kWO4٫S)&S?C*)1X :QX.f@VCPn:Z;i![}q) z}qPobU#pɃa$A ~5Su0{'.J=l20}pXFw17rdhE%l{l(%ݦjId<> = OVxvJV98830=Ǖ*9dP|%L2ȋ A6\>cAvм+9#jZ_>w< &mY'?oҧ@ADϱvh6\톮V/VXrp}3v|Ga􄜪5̌1A2(6, AԣPεX\I΁|ݍC_iˠ'd[ /B _>h@Fea9&O UaqF9@ZIھɧRe7\幠ZR({G-Sɇ+[k^ ,+~XN[* W|NA4V/})XtGd)\ГVcꁕ~ً4qJ+-f΁Dh iA+'Xw{fhpz_{ZQuo>eCNǶ&r'~+Fq]h~[Iϒٖ^/8&wnPNƒPMdMCWEO⨅`0DӞ " ȻntXkO+$0зbd#&d7BmyhU^󤮳})1:NyˡJvǎ0[a4L2]6Dh.O^ UWL5mkgC!x'ͼAw56^+CFP{6yd:lzLSyph1J^x8 ?\g?RNZ` 3 4SN o&m ?Y'AlG5͡4=[8#U m/OC%uM@%5r~AnX0c~ZkewKt~Rm_9!)#-% # ՝n'h8f.*ѷƏʿΩBcˡ!}RnjO[.Dh*mu:^{$Kr>7^!*H/k uvcA5h Y`tc(mV&P}mB3ns {]>9 ORcfv|ja^t*k^_ a;_@C ($ʯʟEg͜B(_'y\]ЊP,I-އE\Yx^k:s[v@4\4&|-ׅR:(ܻ;E]3rXD޷PL o Jv}y׿g$4mEhŅ @ѕVJ ow6Q7͢YK7[@[GFaUa2{ŵj9Z< 3͡뼡tɏ][`X &$VeXq]MS{lk9z8LBv.sV< _ ױhV@]3`[(GfNy[Jߺ^1 >6NPk,?-KgG pT4L5unJ-avLxЮ1"6X Ϫ:rPTTUϴMҼZ6,,6==s <羆ﱅ70vY V_w]easR{8u5t?^% h]V#B*ӣ'ȧCӰvW`,+MU1K@aa* K˙*1aq=\B3}K|UB,˺,@{,D+u|. Ai+gU,Fumn}eAs0x,!}a6y0<4|o߭!2P9j ˷CC,؈^n?"fm^lPx1Wj`Y.Zw,ߨ vB#HXit '/RtaIhZ 3j< +"[b%_ˋ8Lp^Eh[\}Q6찜>&"3I5CSI+=,1mCp k>Y,蟗$nn5E!𫏢ѻ5Vb<oPjOCMscNh?4fё;-p0[zїs\ vL$7ی paY:*2,_u_`Ep*+<8 '/Wb8 sgþY<,|: 3׉?§dQj4Hߔ Q (WMZ 0T2 e[s .D< VmOZ7?~{8`'}g6&R  &I=ع6eՍيkԾ)> { /;{)j 8 - H]?/qV vZ`%خ/qqQɱЈۑ ]^`I)jCpn#^ s?_; g}Fl0}/>x<ҡA_Zz1h( UG vݒfoum u=[qm)6on gI+CfI _ɰ1v{{#pcIչ}?gmzagjd%Ab3<9"WF;LC@&ãagZ]|ЏOR{KS5ASJuI}6٘zXi?lmQ]5!|˷y?i;0g)Ž-{i=a^:OG5S ̧`^UðE:ls[&IůƗXn`h0e3Zj{>`CƞF~"2 svvX_K~ k!"f0`vMvI亳,zV\kӅWq/,b;}`9ė~#i3oCݟ/ȁ-C'kX[h/mYCEzg">QOn G5EVv^Xr5@huʺ.j؎.? [YI\h`.ydj*([eYl&9x`+Lj(U;Жc}b','WTGöM0TIW C[8}&ch3k_Iʞ=>w'Iw}6 Y) |&Qޓu0->^kUêgݝgϰ<[x'fk^Áa S%Rkml6, c{bypL1},W|Xsec\; &/䌁cbcpIL Cb@9 ./e`cFTȷE,URMj-ǵ^7n~4;(돜:% ;80[f& Wfp<,<aƯ)(u*\GIݦ{3ﬢ`&ef1Цk+e <ˈIK\ji5{oX{HoLG)j\1AjഒMPy4פ4BWd GfS- cc"<.'썚iŰ_{X*k*][OLZ\0 v{ p(jO]|h>94y4k}}c}߉,)HY*m()"R(mJ)JִH%$$Kgw<ޙ= ;srRk= )T!מ[p֗f, ୷rQo_ /gCù:R# C+ iE!|~Ͼ$v MZHI~؛`z0Ynfl?5o1#;f7{ &dPU /sak\UߡnNpۣuwtG<;Ū oe6; - ?]@jƂ0SF-J $BfȬZfEjLyf!`v܁В+3VUdDwV߼s=ӭ'_'%_Kpl@#?𱆭 cԅXK]ŐY7ŗ*EL| v;mg`Gˠv쿾/o_mj%,Tβ zdF xva-k+ `]wnO|B?>Qg:`R'(odԟ}PvkއWE|k\7-lP3wOf3tl/n>ҺllMpt/ܬXԫ 8%%Dɿ|urXKp΂/CY}YNNey@>P#ݯpK[IO‹X|GNEp Hs_XNE0~/䂿Cw5t_Znr8Ӑ|h1e؁k~e@;{!% V ?=d@r3mfr-u/GH\ QO꯿˼3,iklg[n:]a8D_aP6A`Y,ϫUÂwe)*,D[r,P$VŇ|ev0ro!`]*q[`{rOq8$0.lt _u_]`ڸؐ3,&q5˴:辋Ux"&BE<,Q:Ui.qw#Pڤ~}uz!_s3O12˦B Xcc;b /Zh|T" ѥ)TŌw^أ7^T:7 QpuvjuHn5!!+`v>W(d6hdb EtmuuޏPb>l~.j2 ]S '&*|1@qھϜ| 6嵮S >(t]ra!)lRPbb}beSNޡ:@MQ,njFߋA:OR'io"_ΣN-չzF[A>Xx!{7WBj%'K0M|,½Ey{?}L8M '\Kܢ蒮sF  (-h*/O+ZI K|w KۙFZM\a!JJuPˡy"'(/`n+sElY:׷By*/)Fl8˖cmŭu!)g/;u[ȝPA]^ڻ@o~]Fc}ꯅr6 ֿ::?/+Uo]R 2" r/6u/GY,^L[Q\S@Xv__:Xm-6tR x.rN.^pṣbCZ93K]Zp~[E WCS$D+ĠCvK{r~j 9G dp6> A=8kȹ݊ށ\ K TE[a~K@0Lo[ctv3\y 35'Yll *~o`ZBXpN; ^`g[~@?<ʈ.GJ%lpn}k-d dȀ}eZ lD)Lؓ\ָ= ]wb֚LR5z@>nEVs%,= `ɻ! ϊ?6[ ȯ\^;2ޭ#1Pg]n'G N5.ᾀ&m%0Ll_pNz7S_rNY~y1v=}ֻ`p؁$X$9zJfPֻK >'2鯰t ㊣^`'NkS|!'6d^&?&;o )'DPJ؀srH:u# Ro;S ϊ}q/R6b'Rkf%76 tkc#P,H}] }li ̋.C&-H6\j01]%.)f~!og'.c͠sykPs$Ƀ\O)eGApJmȳ>ODp^)\۔[ tʜ_f8t8m Ȟ좪'@˖PY(;0rº]@aEUFݐɦ=3@.Kھ4&: *NR>2 wͤ<4b3.eKi}WlS'tBʤҭqr>k>pzcDž:}d~2rjFz!3? 7o49Mx^SW vIh1w[bҦ!TIIbҏ 3 )::6 rN9z@HP(UF^B݄jKzhz]3Q ߿}}FOS,܆]乗U9:t{C?D}~Cr*}84nPy,WxE#vV@i C² Dυ6P rzC}c' 1[QCV(%[S"GV=l0ir*C֑ۡQW>umjc,v}DP?XCe5wO=O5rU0n9|FjW@o&9P>| f⠬iH`A)~>)vCa~HP l|jNJZf4芧8B{V{[u9 2x?<5D֜24&Xq&ǘJVx1K!z.AXmzT{(|LLE*a؝ie8\}Źe@Dt_h1m> I, 4<ϪGPvU0W18ldflAfR*6礸"8uDF?auh{^F 5P=F.tFs5r^L<%6gb7,صcBac9eC,7qj ^pSk B,]shK< @v1Kۗyjg/5iF|k2<ȃi,[.k<>PHdihO0a^Br^3O?cU< = <*3g~&A$C?ݭ5q'g}Evvʆ gqX}rs-҈~-530w'tp2]YZE'ҷRh~{z.dzZħ O桵:~<_sCVqf>w̫^EftX: _54 (*!;~`3XCP+m>}J r|4hH؂ ?Nb !0}! JZhs qs/TC||Uȧ͜Qt쏤CzK:MW~m֘?b}Y33&?`mkl3i> gҎܳ '† @xx ~ԵRՂ' =W;䍞o7y6 ǝ.ae^r8}JR;7@g=^K֐90\&Nw3t^cP2f2ޘqq/ teGARj?$B@i鮰P?g հ;y@kȭxуa[ӳPx&ruA8JatLy]5; Cc*?A\nPB]eX -ׂKy]8LC1ڴ *U@:#A&֯)$};zGx9@~5C>oN H[8/[dAv<Ɨr̾CUO\Ǝ&?w2j }cuu9}( }O m AW+7!\K'T2 xb9dNnPMry2;xeOf󎰲E}c>@ tЯ@:{ oSCC2pTkﲁ_nN-%2 1Z]ؿ'Jᆈ裗$`мW~rJvN.%3DrPVQ9'̓| |`YZ޿|T OO⮂wo:NV(\M/z> ;<#-ơ*S-P6NM:yCL0H8tE?FKwMTd((qbvA5Nfȅͽ: 5n(턡ePPo^ATPW%$rf更pM#r8 N*&v~,6mnmzլ3]E/C(i7PtT[k܃tN:DHӯWća!Afݖu@rF&Ӵ?7F,޽k\m,z&FN_}ݭzPܿ6zm`ij a°ݡ1bԺʔ js DRf<P=ΦƾR0{W,rMY Vo/`|d/O{p=@Yؑ81TJ>B 6yGtU(SZosWoBXGʘzlWV+ w-| s̆)ؔ Xp?"vI~QW]'z]dŠ^?e` e3`}~lsp4:2crn~D̤yQV@kmS;]! ^n۶:o6Q&α1rH^:kyZ{E&0n]as}su% < oo\Oivs]#4(5L%?Qmc=S}P9PL_tywd(rL &>DZ4cq&w>Km~(OC?{Ka-4@S33& ef5V-U㖇EU> ^c rnoT'tE.jNUT[k83Ga-za{!g5&惘ChCo:os2 5Da_s65$BNqЫM9dN?^V ]C'(\ (R}lѷP{dy?J*y u淞RZٲOw)ELנilt<zz@Ț[czQs?(ceA*A>SjqP-iA NYuケ>A-6tN(tޫ6&Cv"Ut aݡ!./A~3=ΌnlbvKPxI4h,F zpݯW7cGA tνvJD -z.n@Dfb<rsV2q&&n B'c?3@ۃz6 ǚt',O=pcϡ&yuS3>.ejѫڡ3"F |\ׂuj-^~* :WE׀?} =4p@Mj}d_!AS7HW]\׵LHn򡳫ߴH+8p7儧z.LdOJg=G  ¥Nw[P@ՈVH\] GW %"aJۄʂTx/#dXf>JIZmx+HG/J]im97L!5f,2uo6)kt7# B+5c $kՃPd;} ĺ͇ΕzǃrKYFp/d#}VPZ{U.- l%M]Ԗ.$t1^ ߛm2@HQvP?'@;"N-Y yF4$nZ= \ 7V7Bf(ՄLM$$Ss(~"Ta-6oMz^q;9g{F7nB~aNϗ7mw9j h)ӊ)Urk %_R9ސҼN+L BxL0CZjӁsgppWؽn,j8d* $'[3CoeAr-gnϣ_<&ˠ93@YʐɎԼXgH "%w6> 7b { ߵ{'w5 OY rr(O 9އ%)s9/R?\`x[H=rm {q8tyv}IJԛ@?e HԎ?:Ug oA7P_/008J;\]D@khrఝ;lt myXeщ m $E[w үCKYNB#?9J,KjlsbfOm.H;|}}|'wq@!#̜yyGzBKC XsN9^8."}OC<w}{](s} 㽼}4ƫ+B9D-g[>@/{im|Yhx3J9soB٫ؽkSM^3z;|9+O ٹDDBOʾ2dRe#uzSܭ.Ӈn9CIWq(UA5y}/L6FrNL'AP>Wpr=ħQg}3R|V}wgwl: 4=^!*Cٹ} _RC1i[ :6axOjXt LBgL5w9Q oEi;{)f퍲jj/,5^)dWgi={27tv0nc Awajs7rօ¤z`zm/lk> |5Qm5+-JqՊ h  vg>g{\Y}o3@0ٿ 4W Ue#eB9%O]BzϾSSt1/0:D+̷2.y]¼?ςF vēV0KFf%?isOJ>حӱzvZtJޠw/'~KP-v>MX{#}3] I@bq{䯛3sZ:17]աq=ԓ;m"`Q2 +.tNJ9.2 {rp8mPl_0܃E s!\׷^w{(,؉CL!n]:ii2$^9|}{߷qrmmE$𭀧kp=:G+YH*u*ݢ2 ;4} ^~0_W϶Npgທ`~ .?Ea#%zC;\UkapV*eB9w@KxN^."7/f;_CyprEUf?^*rQgX]? 8BLMHᄎ(oX}9?7^wi}I [JFvq7 - 7X@+<Z:CK^; :jYϠFb e} F3w&s _wHc y+gݓ3TI¹SM `5 踼n^"=amJ/>!ۄ.J98ZwOqݏasrwn txa {,Mv`Y,>^sl')eݙ&vAuE&ͯOoNne] &=QU1OJE]A9ʾ2XyH/yl.lnx,཰hRk%yrR1v"c'7"un~2_|3$& [e%tjBhw'_˾1 mru3%j)*0pʖB G,j{>?J+xa>V Ɖ")0ӋޤFnGj6qSy,U0+,6 > `Qt:7g LF:4[ci,L:%:/Lew [Uo8Qw*Uk+>;@h?:iQ e֢e |y+ 0\#u`xXa>(;4 )7Aym?<^[=lHDWvR%CSPoxQ U)I!yIb2V ̲}eH-㛮nAȾuv{寤w? KxcRG@iefKJːʶ6R<u ^1hr &i,px>O ]ucnX΍5~,<9sZlzv'UT^ߺGZACWVyݯ-~sV.mX0Ű ,edzvA<=k$UBS@u$p.gR_+/3R&G_]PȻs_ఌܻ 3-]|xN|0S/3 :'?1P94otUijlaz+[@;?a?vNN$f:;[[Y;G\\HF t1Sb&<\}Ы¢c}?p)3w9.iJVnjopaxs6[>%lJY{Q6MRӸ`Exlx+Ze }\dPB~s:i:K< *%%D)ufk[z۞сVBU;A):fFգ; !ܵrkx(j6>Ӏ`ӛ6jhkj『G4>?aiZ#/a/s57Qa݅äj%6{5~u?O\q ?<;40<0Qd '.@K M?<\_ @(]A1LOm{%P$m\òǘ;kanzy,̊OpyB3Љ9rDvS(rpkYql8W[=XŲpֽءOp_S{ Ѧ#BtVߘ0Gy5ޡVSS@Jqi8 u ;G(Y@ "~~ltf⡉4=fMF"(h5tv_lK^s9vkq˹>HbaEh$ tӓn<?Z_aL’J(2)P{ak섾ku%=2@<;_; wAKJLoljk?$>b]Tuݕʾ5nܢEy{w^i6: %,H&=}?V%jLJ9BWiL"5G|Ȗ-v8u(4ynyAiG2nBin邷'B ~jzUjaTv &(Hٹǿ}|>YM{/A<1&Hh[_|lw[LAܩswlc!Rr]hZ} #.++B3@3f&1ŽBk=]A9MJrh]#}]3rꔌUJlqAV#~=% $@[Q/ܻ ?@休"ugǯ`۫LACS(ʽJzRHjR#dH5Rhsv0w݊T  <& s,[ZsoP}q("u+HÕ om2}>g1$aqh EgwAáʜΛ(u>_8~;鮜HCcYS42;4RoO3ύ[A$?iEWqrZܐ^/Y3 EpJ1(SW\~1o!j QEd(ךز T؍P(Lj<ߗ{Q: iSIV槳A GW-b)i(I98wduϔm2~=(OGW:ۼL\M͐}V~A{n7iei"P{{[<{^O/VG.c!Ϙ m\3?e+Ͽjbޖ;ydE-GEFR?z̮ԝ\)~-?;4˾oT2"YV4UEdr( v_|M(]>tF7ZUڛC.~Vb %Cu]L$s2%=)$0p(鸬X,W:i$X_R z=SFJ^ac'Be,V]+$d2?mU5㏴nJ?1 }?#B MX^r:m J$Y7AmÍ{ n䮺(LcnmsJ0H'>nrK2yzVۜaZl-W8(IuM@C oF:ݫ'j:hovXyԧuȒ\d}צr(m>`?.Xyts=,¡o]K#vȒ E1H;A=vZggP>Y}+A[;i}R'$ 9V9JB]nh$2浮b5BX W3=>i+_R_B{G f9;) Ɨ:x„P1o^S۔́8a,uWb;T`C:W_e^C o.&q rٰ~/!UK{uD<ߝ"Bӫr)AĶtaϵGan"v]%`6HZO~|_=st/10/ ayʝw&?^&K“bEyBLE`+psL$tSu$Y~hVޯV˨~0z9}w'`XnE-3%缔=ۭIԗ0(u']o,p\+ UDJ#">x>vpng@~3Pvt*jJa:VTUM q+* bƗ~6cpg]>?t v'vBVٚ^.R.i`Z8tuPfoE6"브3fw`6%eʭBPjYUB,lFVmUj~8}s>y<}] `y6*S^3vI=k)`Ux6>,`oTAtc-q/Ykl <oa1ͳ|p8k> a鷫rޗ[g>^P lĆm քQlCI5pLK\'1JQ5&غQ$@٬a`ޣxc8qtR_5YLmmG ȕAILByxe!(rW5P[5#+L|Q$ZU9uR ~֓^~{Oud 5C`=>ULg3WhuaPK=\a2(vXŸlvv`.~c幂ݾ1Ϻ^ýM`L 4VM^H)ks0 04t40jg(U-B&bsoL%h њOTʒ@M?:.[Cw 3VnpRc ۖDKMǿYy~JRT ΩuU'Byi<^*%޺Wh ޕ'm%cwN[=C;5*3P;FuV7D|:QAa9˘}Um$5#ۄQEGBXl||J^\*Cs}kvr=$ {A/Z!Y/MP^H\꒵үJ0ɼ]WrsmL; }c(w4V?HԷv )emY0U=^F!1O>z!`TT6˴jS˞>LJ/ȃJt a/ELXA[@IWiArN\N?Wdx6"?_#CŤ ?N4Taa%,Jd4Š[ ~ܛ͊Gba`e RR(?!ݳ;f =2XqAS#0(*Y V@{qVQ63+geLaiqJYCѶ`F*6S±wC,K7~4?epM;`sjrd#7¨[/sb@>V2ܣovydG#(Ŋ#@[Hb%͓˱Ρ5kJϩãlrm0L&[Bvi2r<𶛅Z Mo3N4qIOUݮq5TЫGjzݢid8F)_NN\X W&˕u$qlXj;fu}fYT߼};2:#~$L%1]YGOU=v`ω8a;PRXk]DNͣ.wv(xF^ku{)t80Q0176|7^``Cͻ3Óp;^f"&>ə`LxBj8wUK(F=Q) 4 ܌)5c-k{`\Ӓ9'AݹTo0#+hXX'-[jf+R uqG-u|lZKf]Kf̰4v=̃:w8v Q^9(< 1APKGDŐg}`'|p·NY"lge +`Z|Op6y&BxY!}zD~/}{*Cj hI)a`vbqLlSb_?Q]h p z;kNo,.VO]G(EDDEDHpXoB?x__CPYh26"xE~Eˢ/c#P|MU2bgte?$^ C(?3D?+M0 0B\}3k\robustbase/data/steamUse.rda0000644000176200001440000000135713212607767015633 0ustar liggesusersU=LSQ~寀bhAJ˟8F%& ,/jHpqa1&:h U'cb(qx;߭wϹy-ݘ17*3bFĦKaDRV3rghdwkv' ^p;MʽQ-><݋S~y%ޥ/67'ؗ{4d ~Y 쳾oH,(wEFP%Fs?r?p1g&CB"Eg&[pV9+$K!R/nK9RFLa]i9KiϾ-_a$^O8v«0 wB a o.N(Ć785V =#;ͧ4sgP7k"-mٵ[Μ%.FO{s}Lo7Ξ ~m6 `_'q[shB.̮{7p.ynn?o ЇO1 !.xY{X|rol:Vrobustbase/data/epilepsy.rda0000644000176200001440000000220411754203337015661 0ustar liggesusersŖoU8qĉS5$i@!MR) ;BX!$rS'i!*_ )@x !Xe#|sgή1`㙄BfZ݋V1A <(u4x< z`\/Wkux|>7' | ng - v@\ρ+y` ok1>kp |~VI \KG t;Fe!s8qˌ NZ2%c}xϪ9eLYyb-9m̿{wVdžzk6=Cr=yUkJfx9k^OZ1G̰e37aa{)<N2s]sMs,RGk+rbϦ,p]jgs5 RWMv }/294dz Vo=I cWfp>hN[W[ Otjp:;/VWvsk#fxFi6JSKC7xH[(cQGSÒ~×+֣DR=(MQ@]IH!RHb! lh1*vS!RJԊj ZR-!H:R4e }ߒٴ{hXSYFMI #縞nH ;I3*s7_fĉj^ ^sЙDNQ=ؗS?0w|rg]隅K'ؿQ,SߐVQo[}=KMztJװHC|~9=PqNסO?uฬa:?=?қu|U_Dx_Z@d 7:vzz1Yҹ2`7惸j޷ &^މwH;=ʡ;,۽/d~7++Ǿ fXQ`W?<1WP\){NqbA,pUOv_#.ԵWI;.xS=ԹW: (&h-~P:p[=W7uڑj|7knu~w(7x()WqO,;x H[ĵ/l0%fouBdϓ+- ai+3IoIW/!:9)`Zs夔v%wKP}3[Ob[ߨ;)EEwռ8s9u A5yM9'[uj4B| q[x`6<~i$^-pޟ[Eg[_6Wo|6G^G<}}} q~g+˟G/*70~vWw>k԰̋;nn^됊ݻ> +`mt|FU{rcvbsfw~˰V8Q\arobustbase/data/cushny.R0000644000176200001440000000007210647075756015010 0ustar liggesuserscushny <- c(0, 0.8, 1, 1.2, 1.3, 1.3, 1.4, 1.8, 2.4, 4.6) robustbase/data/wood.rda0000644000176200001440000000150211754203337014777 0ustar liggesusers]TkHTAf%eb&Y=)-3:,LH1+WMDLm4HJʬ$zE'QjZaoz>{5QwJ͌so̜;ܻ.")%E@VGTdq(//C'L+0w|E^D{F1i?RLTwg)}rO^kZivW?S>q(*WX:RC+W\ם\D@}aJ tvNMמq³;|b4lH\qkI c7J vc-+-d;/-xwWoH=ݑt:um‘lO4p> mjj=44Ožmz&*r^oFoGө;?fœkH z?|]Lgj0T(s|B&_9'nX!;%a-,j) rg6r%!Inik PxJ _|G}`$Qz}/gFV㢫7m/XB*%R,O0C]>0{3" .Vio8_{6DZ; a>I`F[y_jnˢ z)jب bο0z:$\!UPAnd&UWeQAnBC/+~ie6ُ}#r[~Ç*h+(## KP0SCRx}&hLe< ,CCCV~robustbase/data/education.rda0000644000176200001440000000160011754203337016001 0ustar liggesusersYlLQϽmZK[S[EŨۡSHLi-F;!$ OHy!O"BHx ^k$ ;_y<|}=k*pRV_a͏ AѦ6<$R3!@>{S`X tW9P@0 FPY0 a4L4B=ZqL0}hU`&0 {Tz‚SZ;Fd*VIT#:7*/((QUTJFˤr]pIEڍBnJ*HjQtqD#Q_Trs VJHT'<(AݠjQ_#JC2oHa7Z+K%W/E_}C‰HnltBIs1Am?a%$0~!aRZUC hn݁/pE/8N5AL}{fw? 9i3;}Dl=nð2b ؏41XLx71hc"If2OfP2X9![0!ه K_rGq#g>>ww{ȱ޳Bn8or{YZU5XU@w%Ϛ Ns_,5=ʠB0̹xFVZO|8Zc)6Z?-9 J6C{^0k[U? tްW>gC} _)<𤷿9Ԡ7=f-Kȶk)n2ĢD[PP"Ldw ))7w0Y'+w>O<gܫ7wrobustbase/data/SiegelsEx.R0000644000176200001440000000010210357044573015350 0ustar liggesusersSiegelsEx <- data.frame(x = c(-4:3, 12), y = c(rep(0,6), -5,5,1)) robustbase/data/delivery.rda0000644000176200001440000000057311754203337015661 0ustar liggesusers r0b```b`@& `d`a)9eE @>H/K1;lP1&(-Ĭ@,\Hj8|6$3bRPyz]L@Y!@:^@:/T @(u[XHkTCc`N:u0pzAP?qA"n$qP3&|E˶`u.S:m(m|B[@PZo18CPڱ u0|^`5CW؋y` :$1/9gF}Hfn*I90`\)%ziE9`b ?R@robustbase/data/NOxEmissions.rda0000644000176200001440000044544210441333762016442 0ustar liggesusersw Vy3$2.EH(%M-i($"(ICEC) Jw>Oy}#s|]uce&###'#'/?(QT,V [:LFF/P_22:=FcyQ`EQbcTFaFaX fpca!0|0a4Faf ea 0: 3atf1HgQ 3a0Ìeq 3a&0!LdI c0 c0 #b1Ø19X0d0Sƒa1aaƚalƖAƞaf:`G0Ɖaf6ø0+a 0 0 0ƛa3Y0Ƈa3/1g a3Lì` 0A aV3L0Ä0H(ìa0Y0f=l` 0&a630f;`H0f7a 0&a390&a3ec($01Id$90'$Übd90) 0g&a2H:Üc d,f sar&a.2%0W"a25g0 san2Lb S0waJa.T0L%c*0 a2#y05 a23e:y0/%übzy0oa2;id aa>3iaVicv0 0] 0= az;` a~3L3`A,2 #0r #0 0J 0* 0j 0,a3 a 1 0Ð c>F0 Z 3a20f0 3af$3(0cƀa280Ɛa&2$1bc1aS1aƜa,f2La c0F00V c06 c0v c0 3af0#dY 0 3a\ƕa0蕂_&???-obfMѡѡ:4G$th!f#k]XwaYL'MBFCSt(Bbth!f!kFȚf!kFȚf!kȚ1f#kȚ f k&Ț f"kȚ)f"kȚ)f"k"dM5&BDȚY!k"dM51&FȚY#kbdM51f!kfcSǦȏM"?6E~l)cSǦcMyy"?6E~l)cSǦȏM"?6E~lXXXXXXXXXXXXttXX RF3Yش`K \Ҿ,ǭ= UOxH/I-S3cSlb 엘q6]7\_pKnexp^8UwBϵ}{h.o+O]o Spޮs+V*a胊Ǭ@̾fP Tn5,K`k?ʘ'Tug  /#;kc(:=Iʬo9j\k}Ծ$NwϢJCMMv`"#:kz:Ȑ׉X z5*ž{Uws"\ O:@ 6z9EOs(ǧj/,9*~W]MߨZxگu%nc@+.Mj9ߔ@C6 S }&9G H}9g6O򷻙t/Oqz DÃmԀouS,eUZ %9h\⚢UtalmHh*^xқU%@^r޺[T{kr?@fH)^~H `{#fҬnA|)Aad۳O~Į^g] 8s#定\|R |%g>fcǀQkV{Q[?䗽/qhy1ȟg7_^?) ܭ8x<~qa3^-+싃 `Ee%qvމݟ@v)3%.]:? ڋ[nUͮ@MҾӏ;l>ʕ; xX%Ck[2[i*qќrEflޠtڎi-)X4oZ*'vXW={NoYog+tT~ x e~J懍^Ze-V,Whr@\68gO0/BySzw%P;Gބ콷ۖe'^q/pSxݵmZ&SoP'kxPX*Ƚ kntJ2hR2-Ɓ{r-2ѻo/>+^ަ;?ݐgg[_WmdJ`}թQ눷 yv5wOZ6:惺9B1+*mv{} 3_DnbɖsLЃ~9gRS."\?YԪd?kn2rB?Ao+?㯬y.FѼ'>߫SV˃Jw-L{b=˫F'Y].NTSH\#` )Z-=to/\Οܯ^OYd/]'ܓ\fhi1mq g Z YlյYᱽNi[ơ|_483WJ,A^ѾqOK- J7vRn^XZ#FSNfU=\4Zx< vI^0 ^.K@-y(q ' u {FWsr!ȍQp }=V7 ߷`Y. ,_Op@*uz5WИFhX2~Sϳ8%AEJN&D98Yֲx5 ]T߱6w–Z+UwnoȭH SvJs;sQ`^͋| X|[m$( zC g}# xK 6I}ovFK:XU.BIyS|X/: 7@vٔz1@nab76Soۺ&y ϩN#kW;7lMŗOODݙǵnʑg6F%OwRQp ?qsL,Udͱ C~^+5Gѳ]&ɉYpT L<j R:,<à4n^x(g qU%W x"Uq.n/a1X 5I6ĆX3?zOnu ږ>^Cּ{+r{^OGfyo6{ BڦBޞ@r1,MϹwɊONS2q kcdTysoՀ){eb i46l2F]h4 >mU>qw~Uu<@8#iS p\8'K <u5痁 ՠɎ@$s(mm,i'?YH, [d3JZwݿӖzk}TvwʹV 3.YSKrA!PH ? q@|@o]GAc'ȃSkWv7jBCw m^ރ0;w\>.x{] Bw9KM:NT<ԊS<@1spa:ڿr.ےG-|XZyĤqO{{#q:@ aO ;0ǀ%:ɿ[kmٍ:K8ɒk5?儩 wxo;?~iwv@;Je׾k!o =:2[:4(wڎ0o'H[y3vN5 xSuXƒҮTv3mɻ@"Zdo!|ig5A+u5vV<dFY#@ftWb|`8M41l_3A=Eϒ7Si[iuG_(=054v_9t |  ^>=nVwá%'l_ )F3Vv5k0opqI]44w;N]4]WQ-W{z`TW s2~l@m<'=1ͣaof J H2(pj'>tbnIn<:~tj]LFLy95 jm z$oJe2\L]oOJo<+1,n”Rq;&yBD޹E$EI@8ӞIfŽIkegVsf0F Sw2p  UsVq)cc/qtYy4JuA9X뙾mzkH$>*[ϑ 4?o? گto٨#Uw5fBͳ-%])_$ygQ<4}e^hWw1ubϻ}eX`c(f]8_5Ve`Il7t܃ r08svMd4pdCh^kN'lԎɥۏ,鮲և_xc$}f$7/0Է tIWcThvvv5?/xsyj@۾haaMg^S9+U~eTРY[3xo\9E)JWW𶃆N^ |]n*Θ$ :Ϳ6i^#*G(g|{-iʀk;a >fݤiY}yb՟2drQ[a;M5m5=.F<-~F%z?kyE[=;Of=~k'գj1e㷠x>G? Ua R;{:v&Z m=JH+;+?+̏z\E:7zWGl_*Xo]rNn-zz;Z J7%>x_)ڵ\y@9RͭccKv_  _pwA/jFmlI( =\Uv#\o6;D6f_ dA+a#կoM' d@C4ʗskA:t[59p\ ">KN"|f TEh@Rv7~޻KP?.wݑS0_\$+oYA`D|s_ S@c0A0oԩ_@9<ҥEM|(k61mϜSTa{<˓sM3xj !gϙ#. sCɯ h]RpwXG׵k@5#j֥QNy7imޗgP5d : 蛝^`CnS7A ҡ|끨Y,7TlGߠL~1xԳy*|7{.!|M7nRVix{^1jܞ|yBnoI&PoR}XmO1T48 .c?}B*p~hA=W鏓6Á(J}^xUD}:9\ů#ֶNM p\O_^^ oo\^IΌI??{?eoP"Cy7/I: n|w<6"#]jvHC W;Jt\4[: 2r]JgX%xVyyJ+Lk7a x m+S?r-]_`[UC̈́)Y> L/W\ éZ=u Sx5Zg^3 xɷ9)>#m#oj v9gen {G yۜgeJƝAqÆQ?QwHb'I~ay޴K~zlZl$~ڨŒ^}/p6=E[<:6{|ɑqܦjL{ `o.vRQG{:mɴ}:Y|1 C緽;w唹 Q`UdF*~wַQ9m!g^67,^Xҕ/?ol.޳Qi@ r}f//O٘`1uMC/T>=׸u{|P7_i|@'ٵgO\Axu์o!mG`N|O&wҴ}woہWexi;:f^J'gǙp>fwu0.=ȈO|cƖD>BޭԀ +~; 2ڻ&vCwQqяj:;vuIP/mJҗtKW SK%3]DO:ogsM=9  S>D/iX;AR|ݔHϏN9cRX86G7j(ÑmsQ2{|6f }ypao{|,l ~fyxrs+Sy~̧wSUJ FTL<'U;Uok**?xi7%:\yZln퓜R<a_3e:gR4{;Pq }ttf~s|Ψ8`|LI JmK&N_SڞC?jGmM2×gtz\K1?=?iG~qD`R[??DЗ#~麖R# gE ۧwp= ;ª_3ɿFqחÝ:9 ްE6o2- 8?&7Wr{&i$jMB;$۞lA/(,| uY_@;S? S2|]MO9sq&t]o1XGfA(oXGgQ-b!X%nMZEtEL2vʃ/K{&Hϓ7kI#EߦSqSt;@xwzU??OucAshT]XH{昖,g.'%Y,.KW]j!,k 4Y; u@ٸ(z{iL ǞrɊcȼsYcSMA[ 'iyZq>Nijn4;w.1Ö;gkﱆ@Yp3:f|OEm Oz˿ni3&RN[9.kÎxҹ?~us^@Spj/Vٷҩq"G.tmҎhWw^:ُ?wҹ Pnwm~`St݁/ݸxؐl; ?MRR)Mh]P]ً~[t%SoxúJ}E\%4GrSPc)gXv'e18;["5 }J!='O&\(A[[T6D>Ub{5Wo+4CL۷'[TזNC1Y.QhG_p otan5 {>J[@A̿}I&gATF{Ո.q鼍0WF[J}[vM.kMÖsjsύ Kaֿ1mct7x9ΊέTGpWx'x {4nH'"Z/cl[ ?ʥ\>UݠocA:&q>Mܵ 8o}aBqV Ĺ B#shs/q#c%_[bQHIEQn*T XkRq4W^-!ب:z .Tٝ87P@Co %cXǹmŅh6*=7HWoJuY X'a`팀0EۅrWx%zcA*~7~(ׇRZ[< >Y lA5 ftR=7ڢx#2; Xx[:N)кOfh*~K{d5R3c >m1 v5Frilp8dqWIpp]ZxMӅրV]N݃ ,/%xO);޵_+ؤLoLT|Wu1eFѼ@-n=W}oƠj/ p.*Jiv׆? ]]T-H~gꥱv3nvh?$ݤGѣEaEuoLWm_p+e|ԁ=!< MAzZ@԰Ԟ )؋ckzj} 'ᆾGp>zW.xu8jUl6ݑoBc ՜j_NIjr*{NeaTH?Kڭ)pƭ/:AKN2x ;c6},;{#@y1j*=Lp4E@ϹqѣNoy$W@嬝diĎyKs f~W~%:l[/^|c בoL%Ip>*XY'Apwɤ9^tx7g  &^sCA"AxAY hT{kBG6֣eZ߱5n {:3їŜ*ۗ4+mOSNrņ};}g*Xp:{d4}:dg+&H guT%eD7/ݼTv _{B_h 9 ^>Ta]@~9SO|>=fZ? 3<1fg\~3i|;c<AU^CWwn]No);oOσbVnC\gx;j]|^` ]CtT$:s+<>#z}Z-i>`Gk׼I*A תNay;I0=O5}B秒zpyA?=O+5?EsޮfA巕{8ޚw7bqs/Sr ʟM>֏u7g^FWϼe\0*_1W떴_0r|h4@ZUX%m0z=m=Q,NZ3IL֌bNk*㤭t=pϿ0g{];DdL❧]+5%G{£*;,yǔ. O)^ )밊o?ԛ 8}$% -@>_=B#8n44 AqQiDzמĮ;טPû;WZTHjOQ4<S@ ?ZyA >{͑11 j qa;ʳm|Fecк/h]ncNFalP9ԥ0"T_2ߓd5sɸLۛiFFC{PI{յYYt^gTi1]*gHs(dY~6 ] {QΧ}bҼ~`|Q3wռ4ɬ1]+\X PoM:Zke 6Fr|˽ܓ` ꑗ|"A)Z xiJvβ<{^uE:{Oz-Z[v?#s8:Ϛϩhٿto# z_9wOQѷZ\jsN{/m={R@ZsgTo=VN&Vyt?Ϧ/#gx*/&Ҽ|rHT@dՕwI~7+?}RPp7YL=c>[W}nTWVU\xg_V7<Ǣ sG-oi֟HRk4΃>}72[ر`U1q8R:KKԳʲl$rIpZҝ s8>_#+|Tp deunoTگ"3Le-H޽ T)9 tNKJ wVqZն[zKdN1|E:oK$o+OKsƸW׾PW n3ݴG'*-IY%9Ď}Xҷm'ݲi8_YMixLj{T~TfAT*/^X\R>KU;;k})˥U)KMVu=,P/ww|^ z~)I_A^`k @5Yo9ܹ{zV4xw~2KMsmWqHN)~>b+r| L|NM3dԓ;!=zur`zkn潗&Q5{M֮t ;;CT\ |[R2)S4/'D.V^PxcO>ցTXZB3CRʟZՋ@mSi ]] dfv<D j7bL[Zթ[Ap?C|񈚽/wȗO7SqCl[ߕ{ wg{RyӒuYTy(zV;a5 $i"Gj%Z-4Sً~LCޑ3t%>ɧbYAx`ɽ rC&N_f?@t=Wz,_x#=[Bs3e&:J}d<1g!{Q4l 'Ƭ8y1t]۲H(+M6MCWbR};|{z.Grk7b-mb7VT<;b,QAĨ-@|rzZ ]i Br~?Z(^H8}J8 姽Kw~AO=}xVEW+;w\}2ns>r읯%Iо#_>jhN ̳̿hpˀ#=Nh嵭I ~ {GqJqT\:",] IҾs2s-EEH[3y}@VdVsB3}܇@_H:!]9GVz<ZFlAW=a/yeAWwB_%$3>>A_/|1g,c3+zD.\sFe3͚ub9qT52g #q890=qT^$`9X**i$t(Q(@KTFKi&B%E[(R! Iyqn_z|>x Y>2ӾK{B r_:emNlOr#`c07 JGP1q3 W,A_T@g_&C>`JFZ?ܖ\*{{nũs߃"56e~P](}ߙu^|Hc:Y9E11C}7YOa.=`oObzH7~9T|!tte+oGkX6n+{CsEf۰K@$ CW'݌1x׾?'MӱD-J&t ʬSZ xX車]C:CM3 祫tP51xӈ~现rV?X\J >DͱRǬiEقڛ=)p*q9W;"fǵ4}X]ZW9<^8.:V_ ,3,tꔓv/Mh J#s *w35Flc}9YEyȝN~hH}rͶo坥m̤[W"kJS]_EJM W$ȊyBaR'ɻmשwjZ5uL(^њjp12ffy;+hN}+/_(O ԯ>^QZ(Mo vz 1tq*PGzͬKU'.Оѝ7t4oz9tN}pe3l ̻NO gƷH3Ss:֋ۂɓCy=l<4ĪzHӲ&`0A'c\I{wLz$l-cP'GIu|_F$_PׇϷ8Q]&.mmD:[k>>Qu> k}~:OX?xC3FGrF>vNm|9*p(|:~tZ)'`}@ϻA)dHdp.?btՂxհ6A`iN0ע{lE`g/LkOygzn002|U ۸>:$%*{w5H= >IP'|`$gpaXң]oYQ=]˷~{~0^6xfsXBǏC&}==1٠jy1;/s|U%ZPA~eEbymXt {ꃑ9ַhά|c}oi>'_U)@5lݫk$"}X_\xalzqi//Vp*^cV=vO7Rd福Ź' ^O\ 8O4BW[6zPj0E һx""oL}S6^_* ޻']L}헏T4vS{}u[[uu HVPCP(00Sg*ͬ~UO) Z>O:GG|uq8n8Y2:v坾AzݦUc3߄<Ƒ7?_̡rףE\Lmϒ].ߤt"ue0Kuly'>>F~?= [roNM#ӗ?z#R } +ߓE8f}MSzN~A>~UVY5:E]p0 we}:+u=I碞d/@ETP=/x.;t ۥS?.ﭞϠj8f8 Q?y=eP @ߥ5;u 33<QZ:sB`MK /YYt>_͕G2kGî;W<d3ً' O.[i X(mh~oA lL_7QLI>[Zf~D6W_C0#5;OA-9YKZQRԣ AwXjt0~G>mD3zקfQ`|CFF?>.ι:|;B fx:*,ΪW=-ZF3o.i96鞘>x3^vDҍg϶ ˑGE_Q]g=Zl% TJWoxK<_G(Op˶gEj PEPc\kǩ y"9[^*̌8(W&P7B#acNܖxPmv; }FY6:OK&.緁NY{=1PjLW(u3v!]1wCc;T_ :kYT׊Ta Ho;==/~?T⋋5"7GO#4 77,kUuerH:IǛ|*UwyE*19#Sgn/qT?T/湼;gdn(t(my± 0`Hj y~*XLjAfso{3R=_xgukz`4sS߀pgHO\*g^yȵ}>-r˙o1 'm2}S' y ]fGyv7\4Qwj|+בf!ijAXQv2'phk`#l/Zܙ*_k*,$8{OrI*53t_yT28G_M~ 0S^GkKHZ[IGTN=VD{o' >cbN )Oi6K΃AŪWQ_6ppm$^@uwVƴpg-j9LNZ)F>XD)~&l]h9u& 0PG:ծT yꃉ}qu vsD!:>iX}.8"ڱ.p`RA LQwU*sE[6F%hl I.}e Ku~͊c~^asEZPh 7IbUW)X Wũj+b\lfazP+Hses6y Oye gTM^jDjanxy[7!XЏjghu%°>ȹ'e~׎H']z< 3z}L}^vsuGQV/Y~':!`u7Ƚ*F}7Y%֙{Tݰ^}}.J +IW; oXЕwA.8k]靺K`IzoGb֟jSf] ?MuE~ui'JHou {Cݴdf_# EiTl^q!rȯ;r8jIM}a=at@Ƽk l‡9U3A0>O^ʙS}c/IO}g߳'鑾/`WWQ*W/\3'˞\:[UI4V%M:gARl)~ r]ʹ%5S9kn=¹ntO؂{&H>=[In~It  ڦw\g}g/19{td%yYS%eNIO8jW"M?[{p͜ڬlfVa Tuuk*Sߍ{,pKݺ:oV%{269{4/o mVo(dpK+&}B|A#nm$>3S Fyjڒu[L |F׊'&q$b}Ͽ!)9Ww7+[B\ 1Q]U9N<7C0C-"do3+5F˾9ZsfdF`^eӞ^rUov3~}vm]ȍwM=iKU |Eɞˏ}V^_48Ǽ&h>*yo*HY=tB_(aCIGOیz ㇌ +v'f# 5=^sioA:C*Q:U&FW'8G~OOG<-Nro$^shރ'/f }{vm=?X>3#?O~%)_4#gT [fvʈ't5q1C%JBw^⁈sj?IgNWf#O.q+q tI&O؇sm6Ѱ͜8`mN?/ˑlV }loVtzW!x*T.Jlo&흺kwF+#vMن5V#w]tjTLjym_i$XޏN?|PmaPO=qXΠwdҜِ9w 8%~<ɧ9_mU {{#͹wg;ݽJU$;!B'<􉪴3|zi{*-Z^gᜄzBCf,qw:9|lwOl (6 v'iAü zŹstܶswu|j{haOz1OQo6?438 FZR#sD dwjڛ%y*kx趘X7" D=My\Sq~&?dl@>O']˧x|?M{WttojO O{,|I^9nNu;0|Fu҃W Ww|h7?\kWKsO\qPs6ֱLL?W;PQ_\:L:}*~"ʽMd57vg%Ω>;$>M{?uS^:aqrgdǝ eHu羜?;++スr!mFTU|?T⇉w]g $֓AkA7x Ƴwа \pp^K`>rQO{_܁ľW6vKȇRz?>e wW1HX2_p_hV=upZolA;~+:c뾤6%F X1lGoX0pL)-;SomgrpO~{&mzlj?,5 @s#GϫMy8F}4:5'cCXf%K,霓r['n3>B};M)\=VzLM>zfwp;Opy[OҞ3HYu.u)qtyb_sNKw8VCZΧ=]Z[k@3|'nQ)t,p:\^uwT1F&nدIy~Xc=N7^_v}nA}faKmJQ-ouʾLVy$~s_]f506e^)qiܴGxn9 LӮ]'$}3MG[Ϸ|N~uQ]4}Ey )GM~m^'ǩڃAvU[Mv8P@ω|z bwe_mYDodR=nz|鞣N//[rͣvrU`#=`{]ȫCdJhcfjX4>?\Q2.Mi)g~>9R )3r.äZع^( L*|iG@g"w}WGı CA98z88>WShXnjKzfSՃǤ%;vm)U9q=VIy2}b2 3GGCdC}ǹ'C_Lb}8 Ͼ25ȃ2s霤l1ՐKS} q=w.KVul.0SΒ/Kr\USsdrecX_[?XoyNsu\`܂x,'A]IyyԼÛ[jYlQ& ۼ.~['K}8'`:xXvYuMzftcv,y:]GWklA}o$>kZ=yŕa#9T߉-x~ ƌ[`u(,ó7(y3l!O9S7Xo+H|$ ?>E>Ig.>슴J #w}SA\矖n|E| I+s_q~3/}ÔDžJk;|o?bC oyڳf3;m14'ڛ`.:gŽn~Nc8`݊~x̫iWsϧ94֔y~}Aׂ͐N4Ds}}5 +M:4pBӉagz fǝm V&=vۑXa+}n'Gl~2Xpo243?V6j$3:.ZI%v}`/"99^_v0!_TLyUkwbt#u^kckFܓs!'cpӢbs_[=RqtΘ7}j )ieJcn.$}~>Ugn0S cX8`G(Y雮kJn^C&WNjrPImf0EW6~' O1x߹dcR`'%kSP;Í8BΥ[,)[GVϗmJmM#f\(Yc{sZ{.PTD#ܔxikVӞ5xO(kpNfLS_7<_4'=cLSdC(.p8(Ľ3D߆'Ė*c7ifkEą>\+rӜgXe׬Vf=i{zgImEc HEni.8zOO8ӥw>~.X8E (żI\؛jF4b=J^')3kա>Ψ/sƼ~~}ađ}Cm ]wjǵ8#ƤϓK]x¿h{:ꞏ-ȓa>nx8K'yJUT07hsw95hnI)t)6ӒM~g[OŎ@ȃ|/Z6/xs";/-s;C˸e)WvIϿ'u!3v6Tx?QbbDn%rY_Qեa;~ ԟoLNsԵXÄB:^﨧uy(j+夲G+ @Q0zoW5MƅW#Kc- wTGtN&-!kbAOJxOG:qHq5Hs&-߻ܫkAeۇ4զ= 9xu#,3E _mKSud,~ X |Q\u2!'}$< m+}=Gٓ?u5, ZͩB~>&݋LW8݁_YpOyzsSUcT\?fPL0a:(<"bR\%_`罙/(gLS<Σq!.ϰZe}騗B `۳ثl.59n*u籼E#[`(pHuq^:C'scf,!WIj1gϊR,/X3R)񍗗vgu$%4?qn{;z:7$$ KqQ3/<:Jԃu3֕=G-xE>k. u"w0ĕTnzn qrGm>u5?9}`_Cy"oC6PNJeaf0#7 &ޛ|^C]?FODwOy,rU G? [w)Yi:_9?UBY;P鴦 >?wFxל߳/,uQ(kyB>}XlА0aZ!m*aȟ2ܸ nL^ȇ[A)cFҼGhur[4e{^ÿt\wٷ-؊~Iq|scUS)k7.梛>NKGrl-宋<!_]OP/ސT7)cos7_:9޳k:Yy2#SuMxZL}_XF̋.}f$Q崠MP'PIƼ`Rz;jLr ![cS(C*oo.赁ێh>~բuۀ.u~w2H IhiGQAnHg(׎(@䓃+Wd䘶@}wº 9-A˟Q8G.:E5tЧy' 5qެuIbG }hE~ :x]<QDiH9 C/MIIa.=piaj|Nʌd }JZC?jX!斒 EItܼ֒wKU%Gh8pw|m(yd|/&`~rpϲOӯ#^oo4y糦[zFkem۲PΊzk7=f+mxio|fqTfP5(ftI5e|,ml9m_ѲTZ#@=KmҴI |Hkܻ!uorn"? gi9 x)I}M PX=y{з~pa`̻׋pQ3i/}6`V\NXFNr;i/4ب Jo&0կU7/q!rOK9FGn7h 04w, ao<4[0{i}mG|"Q#cVnagUVߊlW_OLy}]췖K]5̉g? ҷmWGstCc%N ߈7d.YYGrЏ%~R%cIKA˿M6*XP(bPo%.=xxMoA7lYZ]^Y$M-0hN: ٜp]󏯿< x_vm1ylrcf}1<^(F m͉ =$;/}F/L{oʵEB_tӢ`s^ҁfxQ9kN8E6sF^!=E":¤O,`EnN'09rǶa꓉k9h`/%: yLx:;511\T&wTw㾇r\E_0 ռ2>.=OAuI/ww\G EĜ[(AssSq|L\En_ԫC1[^aߤUmU2uDM堄3>өuOf=*#Yxǧr"_w|Xڶ/8f[໚%,>ocĽTcJ W0V6mc#I`S:Mv/vQ/v;۪z>uW7~MvdWv.x-ftϓ|z }DjGWRɾߤɠ x_8j,(7U|"7g{{26_zP-OLv R3 A24 gQrC_Ξ~J~N#\=R.+O{7goIƫlROwQ Fnޓuh>5d廲:duD9iY?4z`–9һn뫐qȻE8L}oߠ2yvJx:~1YvWL ܒs߃_^T-<w;ZI:pq>VTΑ?4.?{)Ҫo7Oosl :04n"W%Ͻ<Z8MȑbnY lf.qp%ٴT#=r2ipy>v[9>zЫGy ׾mj +Iˉ*zЇPә-n~p<|dg݋@nc~.L\=ou}Xhn687ecn1 /^=w~k0X*0~ S 4^o-sv3M_@a/˸ﺜt{Ե⾏r{ucA9L߰&y̳rȗ#/Fk3?~Siy x>`Qjes?9^ѪQ shPҝTH!]nN'C@_QF5}|l̏GޒbhpUu +Klϛl}٪wΥl-W49ҠܘB|CoH<+*7Uh5g q״9wf[ _Rnբw}r7/(ޜJovUh{x>4bγ_8ذm\A}E*@~ΒE9UUuVqʇZzn^7eՊw]Aã7ԭ}2(γc.?^:.c?R A!]=PMv[6`= ~| 9s|x#>S8>kJ69L(iE ʾ;<6^&) +m2%ɠnv&0.nn"0aL#Ɉ/uE/K΁LܻRC._FbPMcxа mK SsO ;}olfϕ~ݟaH9P_v4ryD\ѡ7ՀqXVTwʬ6`w|S)(\=*/Q}[(Q=^k!rp?.~ryşVӞ˙1[{J "_9r}Noe}w@9s'W@9iW{_dEgHs)~ _V#8eq,([e.֑ B9'ԋMO;7vdMXPq;Hml@hfv`ʻq}䭷ϭ9ncS7#Ryc5iAcZ~کb(OI{wE,dk+ȇ@&tJ@ 3&l(H{s8O9mk /[:9-U)[Ǿk]g>ߞ21A~煺[jKM_o(\ԏ_ 8GUv)[2twk+`A͟c-XesGwҷc~~7Egw;>蝹>db=g; ?u)QQAg햘D\?rSS<6jϠӴ_\J _aw W~:upo /;]ݛx12[LִWɷMn?akO`wG7`w6g3UPO{WvK̷v:Z t/~~)/ j y~=(|?E˙GHd;-cO/1E6:!Ftyv^U,^꫟UB~=Fq|M>ćǽჺeI)wvJ⾛rX* k hNyXөza{OƮ-S6MW ~9[?07KnYJyREv'Q~ 6$ʵ0RZܹ Z%h˭4uĿ-wViΎ~L͟g/ ۂI/%t?VZG&/ufg2_޽OtlNxj/xOI.^ N_qqޚof_8骔=6TX_jvB{ED-"?WHjnweZf({'ߔnsKx_cR?u|C^ü,yLK{P rH ͹?Iq a"I{&җր@~=#3o"[?bmU~ ξjNydžݙZLrXE~p>qU&n ]ƹ9e40m/oN~}_N~E8_۰͔9he$pYu2 T8z( ꀏ2!:N_HI񶴧Pq hIq~r|4[9*ǜ߈󫈽j'K/sdf+lvbEE^u 9QA{#ܣ2q,ER{{cV>:J:Qp^vQ|?\EC%P.Ѩ?O̷E=%TjX#_kJ?G=n1y }-۲@yx@{>/$ :(zǧee[ܟyfv%O'NO)D C]fw|> ;DYwA?-G YC-ZE7)xn7ZIy(8aO.PϷzzЪᎠ,-{(﷚S^x?BjwpGxX̹k?a P/3pY?涌FoSAqbīƜUŮW9xvГ| '|~Ŋ!/>OuoρnO1 7J-@{F?G%SwYM'uA9ݕBie_U?;Y>g?b^={P?Z YoB;9dP~3W,t0u3.;,V (_ =~6{X]Sx@u(!y߅CYatgWMV>(-;@^y#ZuG]|c@ ]sm>gsr"{Uw ށ}^'y1cagXoܗ|];?Y5G8GFRo_ o7B&4J٧|Ƃ۞% Hi9+_dnG5WOIOʤ:&P7VȪvҿ+1y-·mksA1I`WsLXpaVC,r_P:3?߽nM[0 6m /N[6I0mwov_ekx^'eۑk\kNq:Pn^eSyZ-ch/N<^-[xޖ]TZݢh\q= ]}fp7=(+>uh">|עԱAai |Sq!b#k˜X^P\ e*t/iɂ:u/,t{F"ou1 sy]k _Mvq$zA~杧GeYxz::ܑ5\[Vp?MVLVXb1[5*g&o8:|{v(>t"DO߱y H̟-&;eϣG^p^ x}~ 0>"^=R9?c?t'y^m7,SX}2=shu~ͦ&~ Sug&# hL漶*m:xjB9+5'r)F)ߐ7?~NL~s͉5,lM>%ۧ`CcK;0w/s?| <> xoA1JCk`gt0Κ}᜔&@&}ŧ;Q^$W;繁uq@.sKI ay5 ˇ'^*[ÖZNQ)QC~q^#eṁ{<{9ajn}Vm!18U7>QxUf 'uO2Tvnss7:W ѣ\vϛU/, ?킙!B6Df5 {C<΀2T*l)G^یt5pgl|^D)\ӆt8"?/[n* J sY>Ĭ:~0?B&}4t@FX_ Gk/9,A$ N^Ux}< dw 1@oJYuwl`:ukPNТ~#֑W89#A2ʖ߻S J#q=k;c%(s Z?ɗUc *Q~:޳ o;ݷol;2%ywvzx1;ǎYB0'h(M`ț1N~ȵeww(l)qboī sey)txCvUi"k2N]rCkp ΢ u`꙾ ӳ\5[xt~>hK{>SGGG眒5HylçA7jЉ|bNGc9`>8:Ʊ1Ef;6̍|ߨwOw̦Zk\2̍eWdۆJUq>g4sBdq :7|Nw`)PKguX %ُgf -q2 22B;)JV-J$+DȨ$RB~uz^>xqs:.тfu eiπ{¯g<g찟l_?>Nj}tO=rlCD~bYD^?pXAZK lz>'Z1]G B.~yʢɞ'oY(05O*aAr˭2️в39_}Po>R*9d(b-m̉L1k2o8P.> t_ rM^v'>ʝB>F\g݋N|/4Fy:>GMЇH6wBfrfb~?E?_}p~1LXglZdU Gɩ'~yhDjhUwΈ嗺Ӿ:#ta+̙ߝ}ة""C;%1O(l#՜>y9>~iAhycœ#͘+)B$Hr:h-M ^%cNX5e i<HRr {B/g}"pCDl,huVh|sQ=^Vb#~oRy>G+k7+9FbNsV2˰kK%.^_S^D;| @x,EPف6m!wo'ǁ}ͻ`F},Oj@[d~G~S|1ǃI86ӂ?Fy]~`޶n&'&˧v115zy,S U>؍6i8p&}I"@zZ7Wzw-u$?|!8Wuy*sey~ Q\>3y!h\w66@kz2uSO(1C_ Kޱ@4 3ړg:F?~l:z~s fh^Q櫵ޞ&O:]}B{<\e7/5uBerX`eH&Us*$'b~H/"$O gɜㅳ|ԯ"- |Qf*J(~^]໥W%eKc0@jٴŠE94Nc''"ioxfDf 9eLN^wOt9'Y#~^dM͓^@L %&ZZfg57mϖu'椩|92X@?vXA==8Rh‹9F+i9Cb΋+略J%HJl't&CIZ >^3͊\}xp'Po<9G>FHn]CDsx, IY!|Fq`A!}[ ٽ|*?GZOӐ=[z{J%-I &FX">9J =~B./+RO1bM4c+u-@#~i|J?#Ϯwʢ|aLm8O-E暼G=-QxI'h+,Q~xo@ۚ^hT*o:%PşKxFPyI= B+)`H>*vfX`GRIzzo$]v"@S yh#Gv"jS MZJHBW&|gR|\}ǔ7n0XbInǮgex<=4*ճW,݇@⧖sМڬ&߈O\}VXJB:+g۠(C^sd:Snq1O2szH^~W 8s DViK!>q=d^cfiwhmN{׺M]c㎗A->7M,fYOv?#R[_OMO5@nI9a |mNg|vM}G/H6sOX^ZX | -_[]%ܮiCsX;ozFd>1+u@r@bnT P嶛"m@NN7ZqډKFG2hB>&7W{ϩ5ÖW,3xlsnʕd@ዻ bgw8[ypOLjQ*&οDRX^5{.U_B'nz^:F=$}Qr@׵k |plۢ@9 ';tۘD)>Iw !) _;Mb9gBUK8w .j"p06q c xД¯mm>Nf>5GJ_9N~Vs_@/ܾ/0h:k3@p2eȚ^9ݸP_CbUD-pT>}cղ>3PKʔd&Cg'-s%w /v*>jȁ@A$SjDKk([U3Ͳ%2\]xBY,OV,DbCqMѪ.CxM+L%a/݊=d_ֻ|@ݰ=ڍ_+v\yׁ|R;K̀`cl45i5 2-X@ϷXϴaT6x9}`Ƒh(Ϫz(9 ljE}qdązje}>Ї4˛z!17BFHTjs3o&>Q|A6˛H]}y(xj|DžhOc/U{i`JX̭J#{eDcnrⰣ|@q kemuK$Z1@հ3snN'(H5'|p}~ ܯś_{ݒ#~X \E{O`+|E@k+-s5Tň;G.9\M{n冐(̔jZe}zZ;=:@MG^o3Ĝ])=?_:6|hn|V҉8ڗo Wj夀nw!ʗK~=#?EDݲ??~Ŋ|/S:} t z??8o V\vq ׇ]GRhW}:sϏq] 9 3 %$W5 c5Pkn[:[r%P/=O[侯ӸޗL ihfnS֟9h|5e=fC{ y!)[]D!LI/XtW g_#.-/Ӹ] MOzn?ۄp=m맏@Hr3ЏMZF>(L:Ys}5 l:>ԸЭ(ίmZ,[Vc|9oDR/G }w;(Ϥ_4OS]@T4s2(?l}~M"dj{u/K"˽q uG1=N?9Ac~w>n*ӈ۴?!ω䶡t;8i"heO=ǀVrRfC;*±ާ]WnMh.By0 J֔((q^˙-7'O(ЗR1bŢ,Һ9/p$%|q?-ܨjDYZYj\5w7.ħ&R#ȰcLr]}„ :/ B)j5M ;?B1K Dz?H"w:H-=A֫5TA۵\x$ۗ{ 5|߷RA3e}BZl)^7j' !%sFoc"99o8ǝ(Rs / (DύΟ~y~G +N$˨Mr/\T-o)猡8xybz3KyQ}fN@YRCH)%i]O[_䬚uWB̞3Es:#mP >o+ U_@ ILNQyL7C@Wq_wKZV{?9:{-qDIan+y"(:7vH+ _tr]n>Zޯ}@|N oC G@wf?DZxxxxx(\p&3Q*@$|jftDdܰFX73-4>0Sq{SꥺJN} 5m potZ,PGIX >qEfLo#05Br#)pR[/腍,0We[(ZcY]a@1BW._ħ+-2v>Z| `dxwgQManzJVgP*Z \>'O'h˙Mi& ǧݓ}(8j;r%`u+VU ,`*0e^A|/D?T  >ޮL'[f?hŻ?]ڴϨR5U"DDrGgkW#11'XG\8KsY03[< $]wlƩo@J=@RURQJz|}_׏ Wι=3u'n%82sX[mض*)SNnG@O%DWF}et ^Ӯ ՞jiHx`mǼom49"-/DžC偮Е!\4}/վ%{jy~O_8d3gYMpsC|{ĔѻVf(v11s 7XAoؾGq}d;-tCvev` f1^V~ 0ec,QZx9M2C[$Gݥpz%{6U _]IOb^ݦ4xo ;>}5jR^N⚙\}QɘJv5?;Xcu&&gr8|`ccVNo#pW]M7 F6  It}ԃVp\@Y u{/yk%UZe>ϰF@EـܒHBgo'iZXIlU=ؒ m]U~u$ 3=yu[0k3t޹yy31#@+G9 aU׀xb}`' 'z6ps(ۯ%:d԰ '~8,*݁S7q^ܢ\ZK#TW$u#ku89*1oցeA:{7Эg1`XY}}A5@+3(cLyNJjˀkQ$p0G+uWISnQ=7G]+C̕1xͽ(G>?&e/ŸK4w{6Ck p +'2Hܟl!` ƦxFP>el"V!ݧ6h_4>Qo-B|k%@6/Onx ~fx.%1NI]Տ/,'l~ "As§4] (?Y^҂!~VGٵ>0c4`+a` };`h ϟAoP;,իna@^/JN HN޳'?,Cu-z84Mjmۂ7٘,XSR옜#qqtU=6=?{aC7q:#sOYxjF&njIS@~u*' SoTDpNޛ ,7K?`@N]A݂(m,(9@Eȷ>^ C<~pjvzL$)JM/: \'Vv?*p_2΃2;^xݖh)fRJ.`Efnx<{tтRT<1r8 q*WkPj>6*;ێDr 9e2@N>q+PŢN VD40cC$) L?R$66zOݎ}Fա:~FeX^byfDیi7`wQP~;{֓P7Ǘ ϮB8~)kQ-O,4Z7w?}x`oEaAyO*E7znn׿- ;|}E׬7ϥX4k\:!=#VOvW=}iҕ{"Mu#%`H>+/ áǻ.[~yd&h-69B`2P<2oj~@sg;n](]UII0kiz沤Z&H*c}A>G8w` N6=n8MC8qMTW5 FNZwOluzHl偦.c~- ٨uYޔC@zc=ᆙWkWN\=3{EZ%ȃ2.,u/t{5{6Z{P~56. X mB8 17oPǯqOM?\XwGnB=.=~^Păzҗ?JYہd7I H]9 e X,KvJ h9mڸqU0`9Ru(w\~g}*vk^GJ<9n B^}';(<؏s HHz.GSp>pJ fGSp~E@5v5P߳m c@ Q4PwȟJӝV^.+I EU1IKICOS z{wU*?Gv͑G ݞNG"ya}8(.!7 xձcTXى.]Ƽ٦]Gν hhO <r{D;H}}EMUSkZ~NI?C65h< X3jWzsUv>)>e3.̣_?/.OA'{ÛQ} CyAQt=%?O䜵@-^-R (-ނCa@>0ʫ:"@w|ll9< qޏ|`jQ\.~/ݯmոGoG8~c`.OԔ\rJQz_yYQ`眀&ۋa **ra!0/1\轔e@Kˉ츗WBm@h;̮鈠E˗/Q~+Zq_ Ss^;ǫ B\߳LsI\w֥7jhW>Yhbdz^8Ř+S}}z? i@Az`(Oľ̎SܿMjf|y^eW1UIn@{įդ օnH6`}'W< ,WWxO,%|ʃ@~/?Md]VG H|dn;@IySCĻېD`q&^#5.D@k)tnםn]Y|^s:5Nv#>pw?'M RnM~ -Vbg.t X5g4X@e~pԪR@>)jk-zR4Gz+Vq95z;8Kq3-Q}V5WP> pʫ֢7ɞኺ [];!(|d8ߵE}()K>θqu&<-{lPiT4,CQJR PhyK""~qP|7kB6İ?%ޏכdg&/@GvP ʋSuJ:!?·G[qJ pceLhEw_Ҟ.D-B탤8zOV,5,G煙E;Pskg`x^o>ͧJCIbNPouoLG^mVE_l3kใ~%\/nLZA $俑L^((nѕsp!WN!ߝOˁc6>ZnR`F3Nks4S7a10b}ˆϔW$ƫV7a ɢnrf$jxC Uʃ|ԅ.~VUs`ܿqEO&M55 K}"!syv󊞽|58Ł8/. ~xO`DF lzBhf~M~i/_;1?X'H>>vl;KGszљ'iaƚ m-0F86:LV u7c;Xr2pc_q^U`.^G\ʟ3](_dGyy ̒GqJZRXt =n®@4k#ܘOf 䗶 MY!"Axv-!h@yߋ`.on4Ԉ^ #(F9]^ W?/ é}_Vw4rWSp$UdzoqdT ᰄ5R| FrrKi|e-Sкh0X8pE2}oЯ] ԿI@>9cxMbwhe{:;^q-GH(jËox^e/ Dϗ{t=kؓ~ya*w_ڙ3_B^ z?>!Q}ȫ:uM] V~aG,_ ѕ/Ut]qX5~~,&Jh`HuС56_,pi?P"xhl?:q#=W<ϻQ[tn&c{/` z:iJߟ݈wdl _N\oms^/Ďρ!}6 .f֣O/m8kP0Z-}"F[%zy|bi@㙔sJCy揭} 6I׎0`+\'@zKcv;]X~TV$4kj:hjS}{(]2;k;t?:VoAxZEXk#k,Eģie0^ j@'sJGGqsgxYLF /XQ9PߜnRNHݯ+pYNW/샑$sw($vy cYp+c g|}7!3Mh.)Axoؓ5Y\Zâ\K{ARCngvEP;vY[(12NEu 2 N<1J Z@v`cb6!-O-ᰩ_}Ij ;D@!H\1QA%s?uOվ!uet)Z+ "xpmΨۗpu %w; XEa-OW8:W8ti\}cePw }|n?VQ9OovDskT>,/ XςQbZ~x#/گ;wu#;O>E̍{?7+^nxY XU?ސ <(_ t5VEڿ:5zHtUYkik߃XTj<6\g@IwTQsj:qzueWbG }m~+r*HܐE^6S}[:`Aͻ|omz>f>Cy֤D{0Pxͻwl .IIJrKꃕځ)ypO5]<ljT?OG߾~6;bޮ^ݺ3z6s×Y(jӉvm wKsu .8p灀v+t8(oZS/A:f>kZ]O< Er^GYƵ@{z^1 aDEN}ɲ|`>_% ??hݻ[2 KGwiEL=q wb:RuF)`K={+~hv#;& `fNn(Fyg8lOrv@1lr5GyR8i */f# mE@?ymq3`|·?u``Ig5%r`[6XŻρ5sIX_iֻ k75 1ShD&OCt&ayERgPlY_ob7A{0s~oӓUOoD|o+`3[3$!1L-yڂ;>F`A/eQ߂{)~~dM7xns7/t1waThzQ1[`l`ңSY|1Sw $3wOx(U_Zym閙;70`/0MN7%i_uYHʋ,ή7f+ ;]ػ-O 2Q~vTᎾ%Q 9y֭M@h;9n`wXVU`uRL>j2lJoC]ˋhZ4wcҒ`-8[K>MCqbr-`'9/UO}3<ʷf [UY5525;d2e73͟|, k鿝{ I7O;0~3lơDɥ,攺Y}nx8fKQ~QPI^ܯ+2C>@=L6LY\X䘷ܳs)=Oz>`Gdo{}Ҋ!bM)UXf PiIń@T]PHJ|cGL+)< ܎KN<*6e]0*廓o`P;57PIkd= mic.?'PϨ^W~uj0T="T T-BK ;?`Tqpƞj=x]%PZ;K,Rs#ܕT2?'6 dX*zT|qKw%{Gn_%'%OR::ފRZVٸbAG];o]$&8#"?GVZp`FZꟊ؁3ᚂ@sƃP[H:IaU`}C#h5n( YpTn -CXr p\žl#1`gWnƂn6kD(z^S)ֺ2IكہtyFmo@][!ծEh3:gkMI}"sE`/9%Nh~L hS&{}~uw3c6OY -g.' py+$ t)P!}ExFܶΠ_޲BoEw}n:_:{y?;`o7Gqⷺ XX]YضA1ϽƘ zZud]_y~Xi8nE2*|l"I(}EIJ)2(DDҠȊ$BFv%e%23">r9z0DyP_׸j}el=j(!V|-˫c*VKx eFm"_x[R|@Zkd4ЬK֟XY r}w@a!+ݹ?_I+uT ӮN,UƼ+& koD>qvP5䰦0=%^ {̑4܋Mm{&i}ViSTHFyѴ@;Ss}Ȳ?*`(p?_w) s}wH`.DnYd=HI=@>Xy?e܀Y_q#km%m( g?`erWr^2o͋72^ϛ-G^2w8) r ;H}׎60[: b}635竡3 f|Pg.n+~% ۟,J+<`h EcXDW_hjYctDGA;hʋ I⥺^9PuuO;P̓yu +kǯy\խ[oLc߯kpW% ~)Δe6!c3mx>gҖ 5Z| H+HX--raZb#Z9p>^_$0Wewh.^UVEhgۀ*@Y?jݛQGC_[5|g\!|tU#%џ4 f;q}dlp} "w֩$Ի"w5ubꝺ)lWvŹ.*\=[>зCewgwj^\(^\3к-1KZllQ"@/.O\2wS @<֖ kOp)x[/ 2 sfO|ߴT(w]rs>" +Gxws5ڃ aX*J\?bso~9q~hs^|_Բ1,p*u%3ݣggC Ng` tae"dtL=7ԙPUcǙ*E,i;ӾVޫ^/z7>j<+iHQ+9ʟs!#7ѥ/tN ǦdÏ*N|;% Dˌ>ld|g{IyYX[-' s ~vɿ%3}՟~ 8SZig|{}GrC'ҘEoN0l*eløw@op[&0vyL6Z]w5pV..@_ )Wy&q? 46Y$ߺĽ4 %C 3ke5򁤁_ ?."YȟzN.+'rF` ̾2}\UyDh ׁ7:K6Hͻ{=M̞Vnҙ^bߏ.&t_)9"9]Rg`)%?qf.6mRȧK,Kumetuѐ_qa`?eɿgj?02{a8l5Iw*@&{ZCx额m78j?NK ?m1/ ?9m7r𮘖?B;sunq-:G&jGYSᵂ.ԟsۋa7.!_A3.wmZ@0~s GdzPg=H h&@( #w@|\O ]ߗ '{^9|$Ux{i!谌ɣ8qK1>8y>q6; ZߦnzU]sYCڼs5 k-f 1ع ^ֵw/Qn,ky,OR ӏo~ط@FR)_wzWTgo_6gTQ'TrK #gIe/u[ 1p^|,x2B~>#BF5/d}b4b99E3 ,-7J!pے3 ?~:# 4벿*#L_|p(r*7;aI;D/} X*Ÿ@ft.V8){ -:=rRy[{}v9$XbgMZC(ro=1`ǖe@560G~0}Zex*ps;ࣱ* K, d@> Ĉ@8x5|%2rO:߇T"B 6_:]I8pl&/:۟3(eYONkkZn}0Y@}і;?5 hڏXܞ(ok >3jǬ :=R@qX/]5 π\|z76'~2ue)>d x8 =$H@ 8F|Twp?˼+s0_ yn>t>Q]ރ~tѹjl'/A>{({, ~>=_sx6  ?v^q{h{yʤ_bmG j.M$MܯVp/0];(]+fbLj /D?ط8J&3@T| C#-u&K_9v@+Pn_&.r?oepNo>r'uvPv[u6/*ܮpYwPG =T3`;:RTR(t`:͊ ÃV肹W}{c}u+ LOc#PY#rdwpe5!r7 %qäU\a7n|C@􈫸QPaWi™@Ԇbޒ mGƽ?poRү/&О,Z@kg{̍N1m䑮64 ?3=ȕN˘>VM>>6{j ˁ駅ᄡ g=CW\zU:|3tDIY>!=Wy{\^jO}x?( yvp[O^`~1ѯAFɲn[1]rf0g+54ne;n{0/`a@_wG/m/z'pmw8XEoVK7VLsZvSY'qr (F"^78{ EAzobq=*WT}\oD=7jЗ 櫢˝=sXfeEe6~Dp/Ԣvqc!3 [-I`^~QX$LkF7@?|0< smS`B?ƿvao0kedߝrD]jewyi^MȠG2@g/ՙ?}kvE湜p_/&J@Q`oݠ?ͼu^'t-K4>g̻P fKU?O-xl}."Yʍ@l:J"$_8Ijao&3:;!W m7>?؏G'.z߱3C\<ffHW|_,/}czg<@|+yerWL?7 ?O/Ma^9 @B%rαf]^jc2r;ߋ'}%#o$/Wr;;6VO\KH5"rzO`||o)߅g&AY"Ou"bxe5oAyo/YzyCv>^x/M[)PON=srޓ@]+󁟶4PC~@{')P"dn9tu} ` ù{o=m,[Azga'|2)? 2z1 l|ϧ,.LX Bshk䉤T3P;-ۀP }eeƐ3.klQׇMy3ND7 >jb j=9*4#f|/04 އ}^E1g:?ƗO~g/ptwWޱ=@lZ}OMwY|BY!>WS@Tރ@(/C;ئw \|p%R.x]inUG+xuVt"fXUo J#[TA` 8"(Н%Ya7?r0]' pS`#7h NWxBkpZVl`|GF %e\Hw讛NY\@4{X68g 98s7=E־bl{ڔJ`Et@zO@Ӫmiy ;G@>Z|oҔEE_+煿*[u5Pkn`kۈΖNj=@ǔIbyۃ~~aGV_Q!s ~Yn}k^릸kpXi7A<.M?2 "MNIn4S@b[u=oMp_iS@ [=tρOЗFyjTa_N;xyDZvܖ.F?6u~ڂ9F|`~ Pt #ꀻC0W0_ )ԟSok3m#@W(H5-Rֲ3|Tiy8NewcbgوR0v+oջIcOm9yXiqnw+&%l>}+xr}k< `/J\$@ E1xXdG2Y&v%޲0 S^^@9jrݷgmwL/{3Ll sXcaT)a$MCz%39q伐Muq-|Uޖ6Xa>؃ f2ƀYF-L>:$Et1.0ªO"NOpiL=K10rO$nK.WX; ;mnde܄qMˁ싖mzdg߮1x|{go7o7ۧ_jbd&^~VH..#p~ NZ>-}X[ZGqjr_;.'VZ. tE*[^_t{fbohPy( +RKN(WcN8;̓Re燽{Sj3>:oo2GQ,dT8"^A7[] + ̘Q pr7\Ae : o@{&$)7HoL)o;G NQ.^+Kuz/ɞk^=yzٔQ߁F7?떷等z4xrO툅$ꗶш,P]:m*Vl@^xh+!-} XY.s>LS+ZGڵ}uZg/4ϥ=@O:B~^U5u#7?%9\zlog-Qcjڒ&~ W>%~9M975)~Qqi{VtA9AUW|X{#}@/;= hrtqzA_'Ѝ]ҶaݯuҁԖľJsnqtfO>A޼W%=w>gKǿ T+ 䋺JNdM:CkN ruLۡrYg圁!R4LƾMboji{S:uy uDFniԮ )SqFCɎ!>vH57_ Xo|> ;S}'.Hjt=-Dݑf܇`/1fi*'7y0E_;cD pWY|-pVOV{3+5%?37 3h ʞd;yߔzfC T30ˍ|'=z[S֨_2ӛc֬"}y㯘x а"SuY@nqc赩,sb׺nO@Zky ߳۽[I92e }U7+@=I8h<-+A:;0Vc|:NOqtmbdy}s7]zk,P8 O旓L79Ԑޫ.8zE׏z[F\SPGrO}TߛY>b?AfԁB<iotea/c35D]bNFMQݡa MQKn8+z$1_=:rg3ϱ?KW.n2QG>ى;d}]BQ7PNPR}:07>QdVvU*:@"h"W)k"i(O -)-U0:jWk!-ϼ%B~[; bT8.&Cg9b,0J/@z씭D9ͣ|mŷm8!r$r7O\us"z@FtL*ڱbz75ePU?'QJ>wY5|_E/Wbn\QpTGAI`>y8)lk_À1Y;.wӢ]miqcjՕ "\~,r7T"G230/fQ_)X톋0Ue֊_Q:)nZq+!&MU{ ng!k^kʏ94͑ O/ k|3LW·"ȵߕ{wn9p^Mξ:m~i5`<X ?EFYgӈy&^Z<cTV^#Kأo2ܫp-?[CRX ktٯB?h5嵧,q P}΍^^ NjV@zv"Ofz0 -Ԥ # >_ [D߯%-]V?12x+8gGza&+i50K}G&qOO^ĮCd)YfnyE ^71:6F˜;ƵY;}#rri$aQi@@igrwI㡺%=}^g,I`Wj*&Cv`L.X4hI[Xto?%@FG+K\[5o9H_ 9,=G;]73!an MBp2]A w||- 3 \BSymkߒ4J EX*}'ѺLbV~LZ=]rHY]IcZ^O?9:~9JDr'Oޯ_6@^~!}Y-UN?ebgL Ma[e76#nI@]eY`_*]*6\"kǼvuq8S?A~Nn|K/nFjv[]Qܟqܮo.6_(V4g_̀tGڋ@3vp'OV߶j|]*aז* #^z"7Xk)~>>W̵" ߙ9>kbJa^4lʏH:1"P*s"_V.pg]̑ 4Eqۿo>- ږ@TItFá@{Y&uysv+x~ྐu/Bޱy> @]UI. o򒻙v^iq+\݇L^zw-+Z~i3︗*H6IJp#ԛg;en(F93PY}^Pg7x.A]6-m"  9w89rüù 8;w[yփho_{.V}QZcmbՆ1٢ZWGXۃ924p:c@ʅ%U c~ox(crޔ_pw7?Yj򤺠\3pK]k}}ډu>;g9 곷"7ח.wK֮bV,g!r\u |{?[۔+$Rggہq0WQf9ˡuܥ-܁{pv)rY) :>=oĸglEp%@^L6>=TCTLl>pi\onΘP敭{^Om嶞m3@>ґ:O6Eȫ.Ǥ{qC^Ļ^;; %3/muf;0o {9ҝU %x\5z5@ 89.TaQu 'eyG-O?ҾB;W}}G;H_8RS)] Aq}Z3u` l{-|HQydku+vCɰL|㍯ 16ձjglP5@ͨʗ?zaS,jMuHsrV)1gzA>Jpr:0tWES**Wrsd8r^bpO$QOpwo]qjѺy:RP ·M0>-09W)!^~[@'ײGü>;ρ+hD2{5b7'jpc>ۛWfWGLD}`EJĠBK0 : 5 88ls!:Awkgp5g%MmD7xc=|F @֭ŀj:3U6G6g?GS+w? wbe}uG|𖫝@'0s\) lK4w'/Bry-ʕ)`6~W0~dU]u'='S s3?@iO!m6^e'U}򨣉dG|aH²j?pGqN>\:Vvߎ:yB}@M4 A+{= =/ޟ^{s1Ro ,o5ws 們ɨƀ虇[/c!a]ˁS4A_6&rSÄ#x赧gmPt5(/9!XX$Q#uG?wPA'y}!wߩsA7]}+';1XJы/GWhJX{`*?-Beyzw; ف9V0e9p?"/r:_6G)RPu@^\`䪱;̎hU1x`y 8*!):C]Njb Cr(),Fe1c?|:/TPAI{&eo#xZ{U2N' LmjE]}bU4F0P ٻeqΎoXpZ~Q։`) vߞ0)#{&{D/Q@yZ XŶ,/%"@Wn;v m^+[#e #ݗ΋^,Vt'X꟝#xuGoe]) Mg{U@}6B/yG@;"?sr<~G@ +l<_x*D.[v>*'Ήa,\t+0,Y?۸Hygrնz*-o`8Y X\Yp 0&1]QOs{~ywCxo$18 'OI"Ͻ^m :yk>x .ma<\douCkpM&37|nK hy·o<+F@}JIʟ)?ӎǓKl(t}ܴ}zϤm*z};-ׄ=gi-E[MܐDͯ|@'u<ޡ?]=bg%D͔$Bzce }.)JӺ gOjiu HJ K=^ .GW` wl'T}JeͮɆ@˳t s7/AΧ@xy-|>kOC8009)*sAZ|ߪ< .$--wyVBn-O0i琓+Vy#o"?L ?4iN=ʁ5>P OoFۿ>43>ۣRiB곹a[2*(E }~8OOnƧI?u%X;bBr -(Uxݛ)Bw0- T7okYaݽwY [5YV+_ \JGL'[bC`?wWv5%_NX ˫Mb_iu ƚꚊqG#܋QCjqvjJx;5i>LwU 􊣏,ÀWvĒ.j:i0p4#d۳sY` 6?C`iɊ ہUW@TlCu/TW.U3Fl[ 䛜N@(ed@oa@]!9} U^@$m}Hs"pŽת7x'{+$#O~Sunֻ5̹KBs|w?R@,kM+m`tOMC v;tźu^=5ݮΒ{9G?!,~:~: h ͦǀ~xGjȟ$4M^w)|:l(;9b7#W ԟE6չuWe<怑-{^ͷ)96t5zo#83laazN_uƹTy풇]D+9,>\*߅KJ\r鼔cOs蘃χz.ɧb>OHrjѼ, n8R\ݺ?nN `9_u[5Fєg`8H? #tw*d>-Y;t?w@겥 [ yĢΥ;(f=wr c{i{XZ1+oHA-rNgc@mKxXo\_~\tٱ7][79 _> +<,mWqX9%G{K~nHƫ@Z:V䨶 p;klvX=tO̿mI5@qXW}k* [ZgHN_@hu-BY iRI} vRMh`{]Q7ǾNʝ_I+^df;ZfWY#uJ:srE˖9Qr'N|e>MU\E[> ȣ+x~yOx.vsg*B^X(Mc+@[&8f=,\ SK@t4zGAqٿ8[okLVve6ZMn@Ϸ|?ߓT =i7$ej'쏪") X;KwK,Y^`NnR{bf}9 %>bsIVٿar [>SRG4{ TK5ͻSJ~pb 5#߼\fTL 1uM jWeȖ vC 켩\CZ jpANߟ{-^[ZK\iϹ(պ jN9*˅޵j]@Bv '~`Ǣ@li P\Ko8e(ƼWQ'_;L*1{Ⱥe #1U_A]eHɱz? TdnyyNNk SOFr[qVebX`O3+f҂v6o C@+S}F4V ?|Nɑ&nm1 u?!=4:y])wo\^ڱ7~l7=fB #Ϻ|+{̅$?G?i $Y JGڥ7;[I6mYѳ4UIUG`o:WSđ:~߶1KnXs{,Hs_7ƹ-٘;:-\O5}E0*y.%4r7?C_cׁ@,6|u#1 FlK5jeUGZfi4` Y͎%T`ܒ(+_xa!vu>ti\eAOh]wTʐ~uSY]Ar}b8(;(OfQX,>{@7l`_.| hkcep٭EW{+zsq&i 7rLD/||c?aq˝?MX75Kv99@ܙr#u훝~NOH 9Mvp %^baZw LhOh26͎uC]]]_Nfʴ7ȝ#|}Z(5n̠ls!9N7D_Z{_%@<|x(OYNuϲ Ϻآ!]#_\=9OβbgOK# :Y8Vv[8Q5f*k|SPub Wg@&z^S}ݡcZ ys٭ '~rd9ꬅU.:)'8oM>C?݋>|`r<h 7M )Z~)t7"7lqKnGL.wA >Ϳzӵ o*VdΗXљTGg_)>cbUvY쏄մǨW_B͘=T v"7w=(q\ofiͺA;+a7#imitY:]$E{%&c3|Y9tЍ@ի+*էTo8rG ~ 4$Np-~OLPX6]>DfQ227`fdGs}r`} 1}⼥="(JUzo2#)Koۏ"!^`Myv'~ΪgTA!w䩩zϛ"mT}fg>oF=;o]DkR @r_jA_;}M8삄M<-px@kl&h䨒ip1 hʺ"X8o ?'g^Ecgdb=/@;t0uɸ2sdFM n1-)J~+ !ǝݤ-y$@=/x>r*UTVaq-_ПK=]L=?aOhIbSf$ }̍VF·;mď"6YT5G CrUtwGͶA_B/ok%^y̏78;K rW,G2c6vstއ;\OGrJ4d^)3MzX #ho,vi{\#]ZZ4oẃvgDc pj#䑻 u Tm?IbvmYbŏmEF]3¼7qk_hV)6S쯥t¾e۪b)<̀ ѣh, }V|qmж+n0T?dc&%X)hW5Op ǜ:MzCq!Y}oqb 6%j`o bK|Oz~)>}hw % {CzXS̫5wW_$|VkB+%|/vƺ}zu0?\@WW.޿}d%p44;|C^5R&}YC;Lw:Tlzt5u.t{h>{jWLv ǻ9'9MƬ_ L/D OCB`^EE[-O{\`[:W6-8k|w|80'Dz1[Lh5g 5ڏnqOQtnɘ]ȑG2G% @*,oۊ{3̗@ݧ>y߷?\?7$eXKܪg3p;Omݶo0ݮbS[PyzցEV jbt̕~p??̭LH, fNrZ:et+&w_~~Dp7!`9D'[}4QIe{*z`ԝ+]{)JL&r;kum1.KH.\uqȓAح@vH9+OZc.ǷYKfP 4ِޔ:γ=@d̿AJO@c?賞׋y%h 2mY9@;r;ꥴ^@(^v&~Ns(_ 䏮GCPG?JoDn H/Q*orU KGvȁ*+)G췽{o͘7% jζmW@ `VF%@,}@ {=T.:EdB)'n uίد]k8> bGE1w?iP윶rM;-[䲺i?͘Í ̜Cۼ. 'ȢSqs_;_㱼ȷ!CcC1 ZTK;W¯fԟʏ~a@[]E1oC#v]_I>] 9|5?- p[@uE}4qΡX)?u[ ALQ[KL軿,D^"mݥoWkd~ \zrNE@tzC`!K r'!,ulM,0>Vm-AlYbѯ # :i@N?w0Hc:fŶcX x1Q/;5a^qVI5=䵣]tsXB6oX|k"{؟*r˼'{dؠ^>Q7=_eЅקӛcޏ|]*0sjoӫ\}<@t R~ֵ\r}C;|#\5?JHr*r[\ᘓU'/x@.N | ܃$7֑ b]}޿i!_ťK{Ni+Z/9-g 梱Ub~ciu/p Uf↋h+:?vWvͪ;o?_cX/o$:'A Eg_m\%yqAղ XO8,L'JCxun5ТRhM$XÆ,sрNy7nNgHt,\{M]VE箕unᘭ>kyg9rDNoDz<;_*_߷uk_)/dH2!`h2w?3KD9P)bn PGqɷuޮ)xU O|UX#?v|h?ؖd| -\hSP Em\.U5|2ho) z4ߎ'K-{#g G0}-jҁN}6hnOnB>2y_U:~Zg`OtKڛ~ hLȍw3px|a El}۱lBߨ{:\1?wͲRQM=}_&+{9`H*i,U\'V,cwS cJ?cq Ж|W2 oz H-Ӷk-E.Zyc'?!fl~%<#_,uaEMW9 ̜)̵4} v٦7^/B=}u 0[ dEiˇ_sڏΏVr9-#hKΎCʕX, zK8D 4 F<ۋu5|˱@Z↻qGo+ne#C΁eȱwo|'Д>.ê36Nb9]y JWxFfw'tOoP>/L眼}ہѽlg YG 1>>u@@g}NBNhܟ[EhRM7-cޕ*`Nm|kOf-}=㼺RCN=૷]Rz|R_l @+>XEJPpWuK֘LۅsXphVQ}`{p3k~2yy>HMԒ9஗N؈7vP}H1ݚطe9X;x_}yz/NJdI5c=41n/=H O9.9+2xenw,~}lE=9 8}Q y3\%U۪nܟ{F+_anP. WUʣ1qmme`J5LKi@?mrtO;@cR{KoP6B\X Ğ]Wig @n*1;&U=:S[S'^<-|+yaIagu'|pF-o9*;QNOIyJ*x`)p~?9Z@zH7<‹r WGz'-u_T#UN珁+Ӯ?Ӕ ec.4(juS}cP7M?2"σ X$>Oic]؋`RO? /ʀY~]0؍[\RN X5~KH\}00L?IF΋E'Ѷrσ@` ܰ c~Qo1~`=RHvj$mDJnc9J6$Ԥw.u~+m<.@Ŵg޻Ħ$m mNKfu+sn`Znx{0&#%O$|C'`5=LZ3U|@oq˛FGwT?`^J7#'ᛛԶ r[Ja%s>,!2$prrȃ'Ś;$|"AΣp"ůտ:E*xle`(˹~O 5@j? #?үhg//G.Ųˍ;V4Rl@k.~@9sK%H}fgþͻX1}bMcB͝y׽U3n~\;2a`6S5A-⟺/oLvM.О>:!!@xr;P=HXs P9e˛}bإQ6] [8/7I@Y8Xu]\!ݨ[iT]S|q禝㖺9&EϑN`n: _]mv|10;܍[.ϚBbb@ 4d-}JX UO)dzgdW[O~u%=Ҹ;P_xe?\x[{Bӧ3֨ȥNiy%> `蒂s`P?$!` 7=-5bK ln\Njq@1q5֠)TY i&@r&%nVR9:i3 o\}keUj1_$ba|v|W~F?K|?ՎHt8OYt۹IJI| LS{v6Ψ`n?5jGO`Q*Lsૌ#<7Sy L!ƱbܷVXc > Br\|J ÓBZPO,>@}yss |ʳqRT(Ti3+q̭wkpAھs \{0?1C5X̖>PTɗ:Jsfځc| dy))Byg D:1` WE[f&>>`jQ+gI5-@ HyB >a^sq<_{)?=3Qo-gMotsKIw} .VEb".-7iO'^X:t-WY. 0Ή s#&в[A=@[ִb(m[~@?}_}Aj'd d۾m;y!>nb!:p7'Jļ`l ̮SS[dү@xDf=1/kWأk$(@4|W#uAr:j}Xߊ/"џ}~1ތ: %3ihw4,摣 26hpjpƴ.׏fwZE 8]D.5^\@XwN9Db6nG?Q# J[6UnIL0>ʆ#F_Sk_@d9o3plA ~<'19Ύ) OލStA}vڦ1Fֿ_Hj \qaRը%ɟ0 df㆓+1d%mC]n5wm] g]ȟw~n 4ժߨvK}u,Pgέ7.-/|J:3*6/I@^| s1gׯv O?x^C^;7yjkwɸ6xeO,T[R}u;_1ON^ٟø{):$Y$ |bv5`Z<%J+^XZ@nUS'7;{_q ~~%ˇ懲X߆Bdi?D`e0p2nvՄ`,0<@i8ۆy%{ˬJ7,DZPGfU9׍O4=&~>$lÿlogw-z8w=D}Z2xgRV-~81 k_n,`1 be"s>^٘UdUƩU Z;ŁtO9#=myGkTtQ;@: 9s 3BOT-:# IMlmR.vлO'|rW)ѾL+@>z>p.e5cj<1C\ǫk\;i{6Sn|L11yϓ& BoVWoSZs9o[uSFC`]+%Xb/nG20tǁJw:V 4݇AVH%#-ty&`z;v9 3a6YǮbk:s0J?unKj @Svd"RZJW_Yp_u SiρFX[F:sܦ?|g @_<.hbIsrýP/ j~1 V?D}ajfpފ̉;mا6v#n/A]:6&Ǟ|~} ~ =:rqU\fuƞV;?3 ?a (+sL}ya`puYyCfK~Z\n::,TL?֘ P~E9&i aor\YE ^ d\gϝpWԵ ,<^j_,xQrY;@>9>\9z>ygFy_XS`xCE}_RV5\@ݹ_}2qjJI`kLBwX reRǜ7돕F }d'{rTJ[} _PO b}$Zj=`S1)ݓB;P׬Bvq]|l)N;*9w W3h&f~`$F5=@_]`C!yﺻo-<8o_HѪcB0{}{RF& )/gVo]Wkf7{%?DaED! %lN"O+dP8p>]M:  NQ?9 [T5T#OY_XKx_1rb]y\ոΜ<&]Oc^ȼ -f_\ۈy%Ecqom rZ@P}|}8~K`Y= <䍋сzb66r /lR|f5]Wu t'[6P/J*u؆7jKtp~쟼p݅ר*[yp1ב9/QܽR1 | dSG,?l,?=c 2]Hmc$I+B8}HX[|ï1ew}Xw竒@Z-4C j?ǣoIJ7+ŝ/؎4aU?| ?Ь?Jqk]w4cs/iz>+BbN&mӞ:?^1"Z7.b@%S2 1AkZ`<G5ug#M|Jg0,8]tGoQ`: K>7ce{`8W`56^ܹf+=컣6pF,A.R}ْu-czՐ}0^ zSw|AVpy,l̯K60/ǸՕ$iwѥ0xonJ@JR`^OKwr@,ȇ; G/\'Rԣ$#/<,Zf헵= j4^na=3$~-Σs=U|n rM,GX]aW:՟u߀.RE+ (7?7N4l@+RA[ DN"Hg& >"ZT}ٮqJb eUȅs{8D]'2ZS&˔URw"ӰsFhMpxኇ"Q dV!FSu n!qu@ߴ mekLQzX o9eߍh=`zL:o},ХhuoxS<⣚ \7 %|@n{LK4r&ڲ9\W#4XwT3c>b"C>JUZ\?ck I+9}Mh uemƙw@]=*e+!sI 0Ə5 MDZPu~=p>|:@ʨSevD} _9FM7/8,m|eA+{+ꪀ9|0D)D"sr„n`<^G\ ʃwRf* yp:n+(|KFYV+ԕݱPg*mpm1o'ORJxkcI&Q=u u(c#Oب 6n8nݿiscʋi۰Nc\MS:Ҥ{βvHJbv`>aXwHЪ`?v_Whhm^[gnŊC@W Ԟ۞g2 )X% ߍI͔Њ^{7wɫv>#^Qz#$ZP;F2,Emya@٩w#8?ra;zPy]6?{WEް!4P'edA]_HsV 2O 3,p'JKYeg8̊qeq oV,9ݢȡlk8J~ w yb[T#sOŘckOW_C-U"Kdb*j%H{ӁdZSᐝ+殯ˁ&4,$( =~:upԇon .(V<[eQ?=KҘj_|U==BHU;'?{0ntgQ+~Q9$~'XcY*.XK'>ܧõCBX{G,bG+f Xj5$b~ɑ0 5n8am 7#j\E.jÜF," fCTʮY [J=_ߧZ2pcr>Lr@lc\8m2O.} _?J z9M D5ltJ ǚx.X¹MDhy?2p8am\R[bn|e}jB6rPEq2 vZටFDsd0~-zy 96y ܿ5kbPT3# *pDĹƀ*F §>r rV`ܳ/VrП2>ܝȬҔy%X9w`VrO n?hoex$o Nyyw'0/fY`eY?yﺰ+G D9qaOD=Uոh_z ?Y)tZ}Es(WHgWP霅rhЁGo0 z\@꜎X^p]u@X%ސ:6,o[Ev8~ cbO-< Ӗ9^@/2!2$--Ԏl^M- !܊jqsQV`7V+ yK@xy;Xdx>ؼ:ZX~z8ރ05[=L%;rq>Z6G~utn{:EWMxs20N|@rlr ;{Th?6~)E+'ꀀ}/n@#rZ15olz7bP_=F>֒|?{~ܖgGtNt~lxc9\#ƇOB-8Qǔ@,@ _hu|PQC]̗٫ZcWn?]܋,RZ9\HcS}`7>),CnW( CjV h{x׽JRZ䈋%kcJʗŦK [S̿0QFt LK8[\p}앓zy6mtPvEԩ❳.&ș}bQt=N/ y2ʵI@lm^~U"ս90+9rtm0Te8 t:}ƵиZӾ_z(c?+e_wK~\KCKpAo|rU%gߑؿ#^ph?Knx (\C2y`g7|aQ0_F{^Li@oS> t%< 퇛P x`.8~rx d#E\J>zwU` -7ֈ` *B\goKz>pxhB7jq_KfXrŝ/AG3<8r/}t#qXYnHfr0whv9"7[5|"v2}D4L4z sc#Ok/߇vУ福نOU|I1AWX*מ I̤2#RBvH$+4HRJFH"EiH}z:vsnq1{m(מB6KLqz !'/=)C}(Rт' voKkm 5b2:}.h~}tGR8+MC{~g2n`Tii;Hj̯D~+&m@toEmJ[V7hg߈Ew~Pq[@V /LL:uLJa𒮵![;ܪ9s?Nܹ8_zX퇃ce8qOo{̳]# 9=O ס~?v^Gm߲UA5=V;]ATB;Ԥi Lmx`݊퍐;@h*]< S֯܀.(& VʏATJo*=Zle69pF-7l+C5[o#5i慆0T'߲ϼڭxTyNwO3WrmɎ/!ݽL ꟼ^a5磁ױHω,%>ts=^L,'UgW.To+? }⿂x!J|?<-sY} "~u}q"  Ԋ>ӨNZts'}UW&DnT! eUz%@_%`M9]vʻE' PjUڏڀj/`] c83RxcIJ)Y+y b%sP~*M|v} );Ngiz1ҡ N~(PεZ/ N?ң3{jmK\A__pۯfFqgA:?:FGPSci~ >|}M͠YZp踍s1fh>"JDp׍ߝQFޱs˫4xY/d}G* ׺>9ls]Ss:5nǴ!]tCu8c5y$"ȎOmQj*,AIM<R˨NK\k|TNg^#T! heȇ3v?P[ :\EƏN]J'\^?<׮lx+IvMT/qT?h07ĵ*5BjfeP[yWK6oZ .@>u4H)#}Z :z8յwlm#oޥIq@yt‡y{-#?Un0ʯ;G߀S6az\'){ĀYHVeze_Zz;Qm(s4nAżF)`臹UzR=?n^mjcoEn@o|͡Q] ]yda$(9f7-!З ݭ[/=o(G/Q0FxΔD*-e PGy{poU?mx >S(!D[+"Pm{z<Ү@_wŽ(kθt38w:Pb"l<)^ fw':! K@P٭vs7 So't.߬z:ڋWυ!xy ű{!@s(G:AFKE(r/ h@{YsV(NVY9LHu1CT<= Sa|^ˊIUăi`l. Wz8ЧtKNpy\uE&;O< fp1] W!́Fdpm:Osj/]qUJms+wot00 zLCP>9TƁt@Wn R5\*#S])q;{m[n˿ @j귥.;$3 0.jak:gϑק?ZF>gܥ~'}\]hN&O>ؖt(aJ6k:8 L8WKgTv\aѓu{鐉|^Q\9"cfާHsGZ`8l*Cno+yLz3G>B;ׄ!ڀWpþUwVzm#G"VB H {*a*goƦP(< jRdb?OOuI?2˵*CvoQ9,< 8QF o8m1sh3N΄=SQHj喑Q_ۿz8tݕ}~UkkQ/+끋sF_b'ՀA#iaΊFmhq ;o;/=z:,]S{v3o+nl^=aEf$ =+F!yBD'}LM&4PÊz\c~h. ~F+ٸ8[rMH>>>Ws|K8+z=@?$^cct>Di wutM u?iOwWÁ0p˃n9 }P3@8/tGb%;Мas{'m1E.Ir h}rP\3+6ЋtHOC,5fͧTT snm~s!3"sHOtr5ܮh>^Γc̒\fO/<uHQ5ɺfͼ;coB|ԯRvzyS)hܩ]@?P+ 6M6| Js@kȋ1._ml9zqg\_6#6.O V-J%'keu~H;vF6G)hZIJ*- e{&7|p P/q琁SYdmOL7Zj>95H=NΈ3 g,egt"Yy'}$"pf6Kkڇ7Uѹ@T=teZ]mL/M$xHokx=7觇4 ^{Of_5)?G+[K%4j$%o?Ҳ[᳆szZSo}e #y5ʣ[<|MNbD%#ne )=tw$vς-? $au^+/ V 2y<GM܁. f*j/8oz |B@Vsm91o AhTYM/;DZ@Xi_cH~hd{(]ut W >~OM+Y+x-Ҹ0ʴU5Pm-F11 jLg>{K =lwS0^`~,û :[c'\n߯Z_(xS$}V$m-W.Gxu>qݏ -hglJ~,qP>D8vD%P'DAⷢi~F~d=ƿƒlЯi(0;^ٗf_[5yY&_dDe'27LGvxΎV vm/HT"SDr-2OjZeLdiv ,6D|+ҪI 7?i!]PZ߮Csc0r@:0R>65zpmQ}f⤗ ;^E~eG/۶-K)ܗ3fXB)j4h4nF aRGuXauQ]}~7q~[z:(#_>,=׶l*w^A8VĹs2J$9=qm俱WЕ[GZ.#I1ΗJ"`F _Y̸[ Os8흤1˶@Il@$#PѾÂc!w6Z7 pgi&.z4RK4?J1?nX.hL esi]~KPu2 d^nB ţwThmڎpI(024S@{hC]#Vd%ȫ/kt񃿩x <ox{㽪9 (|Ut{ΌͲ%0dm#q#Sc&7sy !~CVl"#->y?胪wf[ng~K w:=Q˾9-sj%+ofjx/\KApd9v OBۮ@4 KE1.}yrFD|/w UXGπ=&h#ߏJyt9d8鎠9Vpb…]Z'Dnd(;[d՝s@]sy,l%rOgM@^Fc *qWv!^^7t-w~iSsDX0=:#Tսo! 36D :|#vfʵN[`]]m  (!?T7>84px`h=s@:6wL= H.@j03,>ݵ,j[t~ZDMnO؍p$2˕&Ұ}@KgbV7j!uS8{+V!z<(pzk-:ZnS@?xF|h k+"$mʳZjp2t5p<] V͛yZ6i[_:jg.AŹ|Y_Wt #|u08jL ᕨn$N䏃L=D|5T@ze ;\vbu*0ukvͣgL<0p#f_QOp3~&*h>~g艎ylLzvs( kRztEz둕$wQ6>Kg ̃יPAcGe,_os''6a+'=LCM;cPBH{Hw_A:mnA g"#>4Ge? ,CNX1%#.J nh 0iż7kw.07ܨ O74Z|ݍ+=@6!W\%wifCޟy :G>m˺l e{wá]SC7H>L6-Ht-xHu (3pn-ẁ8"6 X{͍@t<[mÀjG jr DzdaSr FxU:3us.TW wOˁ|Z>Q^œ, n f~"?`*?(bOT>AYLk!}Q0*xC#ߝ{<0ⶕ fCxio^>wZ 12_M4Z_4X5[USV2Oۣ-hx-Cgra:7@URW7 PY~z'~\;$Dc?Ȕ6EŶh#IU-]}3.M%4_7?lBxj2ͷl@s"Oĭ/!ofo/o8TZo3#(M; tΉ@ϴ\ / $@:C+umUs@:@|j| ymʍ))o癔/߫ C (f BaBVfG8=r S qB;7ҭq w.=Gkvy^G҂|5t+ K؀Ot- O$c?"@vJ+Y #^+ķe(@=jh"DmCiKнH9!>>VJZNCD<$7|S A`>ОY ǘHGM<7W}w*I[@O*rZaOPFyPꣷӣ@3z-vhe:h.y^h۔F $`(z"T?\;80q7WcK:Lސ!I<:?_1jNeP3f3*/+yxWPND_D&;pvemT'mRl;Ą8o2o/]^fe4"3ِHl;߬Bs!`[dG;tтH'"O(蛵V& o>?*9]_.nw5׭sA\aw֞'z;ln,+Wc"'$ ཌྷ |+WSܳܰb`]y?Tx> J `|ѿtp_8ұ-qAo"]X"0$#g#- PT^w%Շ7xMHkW46n t sI 7D: G-]jDVyL{}=mǺ!?G|b[Oo~?1+T\pJMɵq@I:<< =[B[2Sֶks?n/Jbb@z"$6Hs3'踚}/ YߟJ5'm`L4OUWDz좏@W3ˑ#dI^W}ynJ OlXd@J=hn }N | $Z6 c֛lí{jُ,v|?#n*lȌ};43yvsiC (9\@ (wyS(Hyh_'_<K.SZO H_/*e"8s:'u-[g:uVmA|;]?yn)ta/}8-wpe:3~Y5ͫwKg!+O˻D COw =*bxPm)>L7nhΛ{z)-1["Q*]E ] lm6 [d­˅@UP ۝[{5Z.~s9!8aA4ߜe}*G0y}(=oز}QN[r^ osqGW:"蘬ݼw57 O(:[e,н%DyiuS  '0ZZܥxctOo,}Aք=~x;M6@[wcs+tjO߭?E>Ũ6ܛ˃~=c:ˑosjXk33! _?@E?m|N>:]bߊ7|R񢱗EA{")^1hs#|kʍ>?yxM`Zz LL5-ҫOL] \Ӻtb+I^6O y|3ҭF-q#8\I ϗwmJA =byI9aG]$sBHUjWK%XG~Ρ݅ L=3N_t g+Zc}Mqwr%@t+rD;wva٦}{Z$B2'GBTl[tӯ̮x]kX=2/~ #=-dc5nyQ 69'9k/U[2O.@O89&hj0eWDv( f5f:0N}v^%a@|QVZR74SzՇ@=θDn4~:p;xGmXLh1kR]嬗C|6.EQԷt/"|R 08(h_pE?j ,]7ft'hB/Zҏ}$;@  #h~9ޝ(R@|{OpJbՓ@Zng>NW;xQ.I=x ETf~AEB^8ND~Vq#G2Dtz/w3o6溁ϾÜ ydzs?@7V$0 Tbj[Xh ![4" %ߐ~ɩ@x(=&5RGEB4?z;mqfCi8q-7_||zAğKP~Ö_L0`;Y鶼j>cEu wz,f#1 `*"Gڅ=>c@:]VW$' QFr9 Y'1,Dhcs|L 5}枠*@V]twʗJζGqE&`^_30.\s* #/pU؀7>x'*ͭ39I.n>s,>ctCQ˱@.Q٨ d[n@u#GBY@h:׶K vL1,UDr  | ͩS`ރ{.y T6` ad UI4|z?W迈?邴$)[xd4@-2Ze'oqGQҊe~@85AL8+ǁpCEeLY;)mkkpպBxF-/E|.3c(۲@xWC4x9΢WCNP;{Elpқu>u-~:f3Њ{~YQD+%RrC)HZ)l^w7MKnq3Wb[\SkCE0*IOxġ9զbL"Sxmk/>F~}O z}E~f_`fy_pH v u%KߏC ??ַ|=RPj ]u"֌$> sD{z>=a {cD> :0׫ʿ ptb10ϨmX|ŝ(4_"i[դ.GW[8ݾmN!$(}~|*G3#`JZdmA{k{2Ȫf م7tD ͯRj k\e@P#$_4 tjۑ`ns -JМt#h[~/̷tO6tnBuQPf͹2֧6*I(GPK'3&[$M}i3Y@EY ݞs 3kÿ^LJ'qL}CߧԪ2_-GZm`jlE:'fHWff~LMH J>ukD֞ Gxnֲ= (|,CӼ80^CsaB+^Ut7EB\vbMkP;'~բ/ -ʖ#l ~U!1Ee7 O`b=#n)`iù}Ӯ@y)O=bڔ^ bc:6Z)qOׂg f9tl9Xض>?{>Sďx#@Ӈh- 2߉)nc@IƩ`x4i;(F$k[tB4P Pn>IxrbP ݪ@0:(3na0qrK<б2u?QTU~˥H2`l3_{\6FO2`P j%#_40V]@68|Zf\/ͼMyw ;D8/),A/ >jXsBz8;W_>[qRHbsY`|P5F@V0j/ĮoƟ:o0(g3H2M!RT[49QWrnQ ;ϵ^@;>{虌f|)3P 0NjRAfLg ?X#~+`xJ'y̦jQu{jjI{ W`xW֯xvqW;bz3068^ 'x s\$Dxmz04 @̷O58H_FSB%99U C%*{ 'Vʥ']-ޛηں,Ϧ/٘tU @T|OݴJ&t_k׼HxZ)Qyb[N=F (sa3/>y}nA _~||wlY_q6wvsppsp0nOqoKa 584p5^yBAQυOLd ۬ϒGkֱlz5LlkeX.~KͰF7LV`htܩF.}Id_xVpt5qvS`tMcTjwMMB k8#ƉFcdMݫ̚LN:ɚZTRkd*aupddgͱV.=jv6,M{rPauZ 'I!emb=(n5.7ĚJ`MuҎ'VJW.kʹOEͱu^=C་k_Y=1&o \cljp/=;zit$i\XrZNஐ>pvi-Gm>N6ZZEw~*=<Î fއ; ~rf\Fk*F7qM-|Ϸ5Zt8kDց'f5~"OظGz5Vw4E-͚Ŀ)j9٩Y߆FYC}C^Nׄ(v~X}fzY*]( }ʮG?^E({u^ )4K'Q#VY4m`-*d͜P{ʞl= sAJ)q.Bmq}o~Im2`?|:ZЬ_5wAa/G6y:\vaͅۆe'skփ'Ł+:٭4* ,\ Ng>e,۵xpe}N4f]inV4԰go}H:bqV{C6>bs0a6{X0h~6c1q}8))1)X\X`p;pkX>VdŚd~9p4?d]p1ffu7{sk$$=k42p { qJ[Pu5:'~Kŏ X aK|3Bm)i5sE'y VÎ#,Vhvv0j2[qgϰ-/XcD[1~._26[x{4)_ǣ;xpMrk +㋴2Tro'_d}[qտa lҾ͗bx(E#6>cO6;.#X F?巫]`MEz֔iqzw+kRǎ+cռCj܏f[~6d[JX:-,Ưo.0_t?փp g1p z<|/Ksb#d|FxV @#d c^E_ +>x3YCqk"U]UL^7 ?WHH>ٸ!gK=GT} GH &ce~|h]!Tiaë煻@CRpf;rtzwf=4LXM5߱9AbBpWŖl^ӲNo7lOOa:bdQ;-ܥ==gb?lsk|l?0&k6Ͽb>/غgla812zPNٱmٸl?L ;}LG`ѩ{MQ sjek$y~:jkdÅ5so) @o[G,O0+7VڜAM*y8Xzp  O~=?֧Ána?1+B|s2Մ?X_O4q 3 q<a>_M-E|VtjIM Z,G+"^MIy>#ׁ}ܣCgrs_{wfπ7I7p_$73壤7v^Ҿ֒"?zku{]ǝݯi-=}_W }⧿/~;gSB߈硗O'Q ;U'u~O~73Ѿ܇8F]1;>7>mwG?ܷ/y}ѓ'ʟ3߾ᤩO=O_{{?z{N|n</;ُnO.og;o=sSG-~bC.x1" '?[qWk?G;mqd>#nj7|-ݰ/Y?NW?q<7^\ k7=grV]g/sD`7?_ wo㗳+'/߻gn6yكkG=*ܿpNlF}ø:n{7WEaӋ:߷?~?v$05۴wys#}{ť WO?O\n'^%7u⧗˨OuGX:GUI=C?vy CKkܷ{ًCx\ևo#M ~3>/#?_so,i.{=#5N?.tп~[w/Ojd ]W8<|~z;?V3׾~Z s6gN=f?kxק[f?s/E̲s?8g葿0<eoڲ}꧗_8ڂN>bG3'M.x]3/|oO5azuN9.[ US~M?v i|#hoy+ʻ󋫍r ɓCb~{?U8 xL^6qfK.s>:w?G?+vZ[-_P?zg ߈1ylo/Olyev{NuyAB =tAS/]P!x?&-7Xu1!<t}|)7]pװ~W>S y[ƆQ#=y"%|O'`4>y_V/t?xύ^ùh7}~f|=B2;}_XwN{~xLy?i3q|?wr/6鵔/E?μ+.s~!?W}g9Owxmoq~ޟg{KXx7蟟~B^x2Ͻ+ۼKo@y6KpogG^%'[}[ȳ|%ޟq9.6'}67d?ućN97p#x< q.e׾k;?j'y|z |͈;|/'j[[1)gz9GqF z^HuD^ys#܆ݼyΖW'.A\{WO=kɽV ?y>­f~RKtnW_?]߁{q<΅x9B <oxFu:㘍?uAw_XqW'?}5"?`_'O?>^zcW~}/Ї+~#?vw3秝| {vaW/މErJ?N}꿇߹}Qkf<7#ت/2l]?/X7Kκ~kgbo?3wnq ~}jow<n?0v_~ ~kEȏ݌snwόmuVﲼWiO@B9e{W?[?8o؝ٳz[ד%<+fJw+\xz/y6;OE> {op_n8| %Gpt8> ;}ᄧpzϛqt3|?~<= 3=?Gxn_aw/Gv{=\o{Ol/f>?!qsAShw|8p~yl@=M'dX>2v6gNq h3_+ػȋ@^psB\ <C:Aҏs_(M>noz~pOBy*?c<^!xM_2~G7MuW/'>yWWj8z(o7_x!}<P&o/p%C:µ' |y#wzCQvcW烐~ov 9x0x<:)Czy\s?pq/vM^7{ϙy_?S@>{ ;ż.|<_{ :d޵/b ZS'`'9&w\mLlv1 y|aG. -zKQG->~S__'&࿃x -!.  ' ą ??/%\78/?k>uS?~z?\~8㾕?\zq/wCn\U}7j=zPm=nB*?}7#>^~O|xzѻ>Gqhā!:0 |6}prG$ɼr|<qWrQoC~Yq5]̳ QL~{=v]NτxWr<#OϑV{ulο)y]7v>!ox 1nqI >gO=n C;<#;w~<_u?k?u>xg%kxY<^;ޥw879 xC∮ȟ3sżEۇ] {p=ߎz?|>o߅8 ];[/^|,į`߀e;Oprx9`7|/_y x# q\?=0<;籯aүE?+COÎYǬu=QY /κQ/8 Co~x>(>z C7'oc9x?~Пv ;~ ~Nø B>#x|~1^h7q_Ϗ_7Ge~B^y_//M8_#WA ~yr 8&J!% # Sɢ>קO_.}<e#.^E< ī۰IAPV#yUW'u/G9! yCq?u <|^OsA[]|A?ϘF_K>a_kmQCᏐv]nE ?du 1^Q<o3/wx2^'qyEſ'=qa #Wy<zxr y/ >z/$P?! TS=?1&̿ސov>ȇŽ$$ډyv؉;'xU?B 'v ׯj@GH%~T?%+{`3NbOe9 {Ƕ5/N]mggtwQ?' YOκe}؟g>?>kz_Kb!/}B=σA'gyקΧ8'q%0Ϸ/3OqWgq5E@ y߇zoe S+A*xq=ȯ(W@}".Ctί-y vFO-_< 98gؙWm/^z!u}WG}{ =nL/r;t簋/e^/~9OϺ%׷{yŹ~!h#~{M=;~੡n'>1Nx ;|t5o\ֿ%yw ^3p}^B>Y]Zϡx[G'vyF>}js^s@ yuWlv  _ aӬ> ?T7=?هvt:qwA(?./8WqG8={9eNe%b_N=|q=8ٿ.qo=A y޹F;#:xW-G%9/8sѿyEqn|zKB><_KhWɷ8YzyO0|WY̌络|XeO*+#!g/_2 ^<_ς/4/!# _-qiOƺ?`XogƼn\̾'E |:?/<?KP~&kӟ'yL'9 gYܨgq~?։@4=9Ggyxxnƹ:O={+qSr31< ڈ@mOCWg}ù3gr< ̀'N|ӮiWzG׹h/p=9Y{~a^)D}]X4˿7?*Q_A ;qdK{snG" }^ g~{pq'N=~8gĹ{?(}'Oy(</'?5]9{z<~ľ!o}9'[@s"+'>^Lj"ΓW^'b=]ùtEs= 2!~8O7ϛՅo4XBu.x. ~b}R~+'yE*pP зFo7B/uqA~}|GZ'C0?fAAo݂8I3}o}a:?`gfhP8!|} q;%-'=6>1SeoO^v. S|=.W\#w+&g Iy 8G8OD ~"/>7jcP_J^,@.kaW|݉xas`!~+o_D| CvE8aסO n@_|3%⍌'"x S|א~p蓋8xcX8~*YGU\{2xY?\CGw?QD~}6o1jxysnRO=BHq;_/a(<߫@B'5ul'=}WɻC&>ۛ3Gp|:Ebǰg:{?Oi^+{=^$<?ہf?(zއvO̧OB{[FVhOPP?q" qR"6?d]#2CN+ŻC >{WN<.-7'E]߲~8Ϲx?RqaG }u'^Awq4y%n?1N~'n7p#y},?֭g-7C?lq^O̸%5EK$~6.s؎CǑy";2qc7 a0gG܊wJ^?ǿ/B~mzf.y8O Cϐ[kȋ~=ϧ\de< x8>`~ ϗϗ?@?I?|3^uecV>G|!̿y6J@~qgo=F^@~5 G/^}\\`g}DxWOYXoyخ8:ݯ |DdO`]h^5އs]~x! /[p3W|⸷fqďXBސ7~S3|~ F=x>puӆ9WC|}Y9G`^ ;nc?f<ށǼ7C` ynwqoxP\_<ۍ~lx~a9/b}}E8}?e!:p.󓐇b*-C^xϺ{z 벏gE8 wݿ881W<ܞg_D7 9ϰ|{/ ̑/&;c?A~s=}`7z)Y|c1s%ou\g_<߇g< AB>] G+5!ݟe*'~j_)8C;yD_CNgYԝx賂:븞zA% x}[eQ?a%?b/|^yגa= ^s}<ۅyx}u?ߨE/.'óC/<>|>[9Wyds\w:ln>/sǓ|(KЗ?7௻~8uy99 |gG~[ k蝼"yߟks}ͽއ:qx?Xd&KCa~ϽbP] d_QԹ0j^:EA8z\~!?? E=)mG+P_xW C>CA/\Ѿm˄?x5o&ͯBsoBgB=6?p1?y\l5y~q\<ܬ/z_%̷o Ч~m9}˹PÇnϘ\>Y;׉G:o_|+zoio8.xl)7~6+kԏ@ !/5ˎ G|3`7%+=Îg%=D]Igb{g+)|Y 7,OA^.A)ǂ.'㐗qk_} %߰>x 1?"o&/?[{#C'ϸ?|@=sd=yux8΃1I{E?ۿ?AV',~Ǯ;w}/ סO?ow?//0/89;q$O<<1mo@0#_+O/ 9nNYGN9sN>+P"q>)"@~'y0쫂z .a^ 4Ѽ>?pS_uء~OOğx;Q{9??W.Mk|hE/o{`9c߆Fy/Jw͹e\EfSM@~ u"^`+yc? ̽>~^lvEw?/8=G7Kxϖ:s#D)-ҬwEWqNo8_-a\q7g+9}ѧ"{ 4:{e|狻ü(PynsΔ+lk+m*/uWn3_?\k~j/=vybe}?m~fG{ ~*T|pv%x^Nj?y$>`Ɖ_u|?ߜ P7@D>q^L?v;&\"ĭ'xPy" !yu_ [q>?s<ƾE`u1'Cu# =/2piz~:>/)yc7yQo B8ڿ8Ա#^> e6oc~u.;q| _ ec9g-鑧C6 B}|/O|(uB܏|Bο|>>V6scFQ3N)^}qi|oΧ'xv?W"x\ &ϧW |w ǟ7qqXrc.=(xgeџ9e>y%38(J+pn/?9ϋWy ݈fsWfb[{Գ1F >q!ăs>,]pısNA=;|ev,Ǔ?kO~~^&. uW=ށ~ 5sFqyTYq9Y9./"5ᓇ x,cy0__cjkET|?F= 4z:_#{yao?e9s|\N3|% AG:>?C={q#_y {V9P9.ӡO۝'բ/9[@ޅ|U̯:g> D.Η7sCM'8ߗui#s=>jeSޯ~f.LP/Aρ_? }yŽ> ~+<﹜3`?l7~osy޸|WEw!>菑$ y_G<r܇p.'#dss}y.sg]apg;~DueY~y]_QOu忏s\f[9}}yخ|'u6zO뜧Ⱥ̟q}~eyy$벏,^K ^~={f?ׯ??!? >s σ3}wyr?|{_C]ۡxAƇBC }Bx8{ Aލr|I?)/wd}sbu }|򾒹>,ϺXAb^H:O5x /?s}Y'z|:9E #F ܞ#n>ruqMs9g|\n~o~_g=9cy{ p6x~?/ ?G[9cF9 C}\N٧sv?}Ȳ~ny|Ns(\ؿ.[CZ#~y'q!|&D,yI+B]"ϱ?7 y_A;įC}&*Јwz/? %Uʸpp`]x-yB?y>g!3>#Cr~w=A? ~Yg9_?:&qxz}58yf`^cns$3_򮗭?\ȻŹCh7f.8Rs!?zr#b>sw_NNӃ|t?&_3L0?qNso9z*S׽y㣜uņ/D}>=Vy椂߀r79Jc!utϏnd'?33Y.7vjK9Ҙ c x2. {.Va~窡^sO}Pq#8֎__ŸOA$W;`+e:}g~~o6=~ñ7n{!lG{jE78_qY=_oWzs?+t ?O`?vO .㪿>v~s{Awg_?{![+/+xMw>kһɧrE2{iW{ ~9}w7n=X׊gη<%$~xχ ߄y>]ByxiGÿD~8 ĭ>nF?vY+oO㇈DZ`_R+{`}Q Y#O9?B| O11Jn sWy:E=P] q ?kaNtχ}Ⱦ'ߌ--dCX?# }sE/X7KκxLN^/8l 7z~6J> =A[v9`o}xs{>vWފ~\𫽾~ʝ|ɮZm_kw>B s0xKo#cqɏs{y?w>fst D:m|..]~aw0yPW9ǘwq<0_!Dո"Kz~ >MznA^Sgvc?W~pTJ}T0Orst{q#Y_līY~?[Ϣ^f;-nB}0@}x-}8'I?qڛYqO7eoXDiQߎ<yy5M ;gqqAayH}+o5볞 sIF^)xۇÞG~ Qo>JK_`.=A-s1OzҬ>'Py6x^+>~.KȏfyE ?E}yfyVNrg| K(0*a/2c αAX6?{!!'Z ^=#χ9%/ȳ?opy'Onk<_~/O+_rz a^s^<+濡u֧;k'ϕD܎`_ojޗxxԏOcV?u;}ou3z|s==?s8o}u9l ˄~Fu!/?+#ŜOߢO?{3DŽgWna|s`o'o9vy|n_|UX8<.࿺?)=&~_ɸYoG_UC;qޝA'<<>{Ո#_x->Wx <g{|l#x0_q7ОGg_<<`{~y`ƅt~ag|/P;w/wo2WȹŮP@ŽpYM*Lq.=>?mާ_Ln:G>o\O֖ωs`#=sx@yG~~-7 ,Mޫ)s3F_{x?8p𖐿;qBey08!)Ϝ/83u5ɺp9e~Q'H/rnMל7ϋ?=o>҈? ?'?{??kگdMt~B~*GwFʉO\gC6v-OsDK̽E== z/|iszbN޵ƛ7ѧj}@0W.7GPksgy_Z'8K~s|l?p{ϰ[9"/W<5[au77=_p{{.my}pn/<½Ƕv~ ']/o_l~;q'9_/w߿i3Ϝ+.{b~_xO{_xv/ ?uat՜w~ܓgowf_낾ѧ?y[_௱+{d^==~:<^|(cӎ"Oۜ?Jޱ΃~P`uz3 /<['&NNḼ~G?b.鴖`}vqO7{?YzOko-W}w?Ţ_*|=i-[1ٴVJ+gLhJi*i='UZ-znZKi 5Z3Z;uZ7^KZ/^+zeZi*aZeZV(qZuZMZmZ]ZNk^ֆiv)L,MNL.קQZIZoHi)76Mkޚz{ZEZH+Ėimֻ:wIk۴KimzZ;>֎i}(SZ;vIk״vKk#NobLkNk|<}DZT8 ˧LZMkO뀴LsikZO ii}1C֗:4:<մHȴJki1iqi}=ou|Z'ubZ'ʹiַ:5:=3:3:;O?:'s:/u~ZM?^Z+uaZQZ.N'ILc2=}.\$sI璼%yK>}.\$s<撼ϥ3Kg2d.\:t&sLҙ̥3Kg2d.\:t&sLҙ̥3Kg2Y黬\9Z3%=Z3%=tN/H2/HXFSk$=FSk$=FSk&|\3 Lf5>q KmevRh Ë /Fx1Ƌ/hS启J2jWc᫖q={0aøqG=*Qq{TܣLj{Lj{Lj{Lj{Lj{ǘ{ǘ{ǘ{ǘ{ǘ{G=jQs{ܣ5G=jp{4ܣ hG=p{ܣ-hG=Zr{ܣG=:q{tܣGQ񲌗/x9xY&^2v+c2v+c2v+c2v+c2v+c2vb7,vb7,vb*vb*vb*vb*vb*vbQ6F(vnmbQ6Ʊ8vnmcq6Ʊ8vc:vc:vc:vc:vc:vkb&vkb&vkb&vkb&vkb&vkc6vkc6vkc6vkc6vkc6vb.v#U<*qϸg<66666666666666666666666666666666g1g1g1泘NG@K_Wz$׍nuɖ}峍ƲX˾cw,e߱[˾[˾[˾[˾[˾ȾȾȾȾȾʾʾʾʾʾɾɾɾɾɾ];u)M^Wz$׍n[ʾ+k\ʾ[ʾ[ʾ[ʾ&kɾ&kɾ&[ɾ[ɾ[ɾ[ɾ[ɾ;}ƂWcXj,x5 ^ƂWc+ұXj,x5 ^ƂWcXj,x5 ^ƂWcXj,x5 ^ƂWcXj,x5 ^ƂWcXj,x5 ^ƂWcXj,x5 ^ƂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWu|J^X^׭X^Lkw,e߱;}DzXeZeZeZeZeZmdFgZ 3WxXUɕQQQQQQ$$$$$$$-}]kו;׵nu+x]ʾ[ʾ[ʾ[ʾ[ʾkɾ&kɾ&kɾ[ɾ[ɾ[ɾ[ɾ[ɾ#7dc$hrL^3$U?cy]k~Y4ߥ}kO#\#\#\#\#\#XV4`E#XV42X3*Tၤ[CP>FQ(a0 |>FQ(a0 |>FQ(a0 |>FQ(a0 |>FQ(a0 |>"VD,ZI"rhEЊȠ@+"VDYɳ"rgEΊȜ8+"oVDڬYI"rfE̊Ș0+"_VDlYɲ"reEʊȔ(+"OVD,YI"rdEȊȐ +"?VDzXɱ"rcEƊȌ+"/VDZXI"rbEĊȈ+"VD:lXɰ"raEŠȄ+"QD,FI"rE0`("QDEɋ"rE.\("oQDڢEn~nnn^nNn"Ed+HV("UQDDEy"Ed)HR("EQDE"Ed'HN("5QDfDy"Ed%HJ("%QDFD"Ed#HF("QD&DDy"Ed!HB("QDD"EdH>{("PD案Cy"EdH:s("PDơC" EdH6k("PDDCy" EdH2c("PDC" EdH.[("PDfBy" EdH*S("PDFB" EdH&K("PD&DBzԁ%u`IXRԁ%u`IXRԁ%u`IXRԁ%u`IXRԁ%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%m`IX%m`IX%m`IX%m`IX%m`IX%m`IX%m`IX%m`IX%m`IX%m`IX%m`I-$EˋH-/"Y^DTy"EɋH%/"I^DWY"^E伊Hy*"UDnx"EŋH/")^n%m`IX%m`IXDʲez%m`IX%m`IX%m`IX%m`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09S {N)Z&PCar9&PCar9&0kWCar9&PCar9&PCar9&PCar9&PCar9&PCay>PCay>PCay>PCay>PCay>a)R0Ka<,yX a)R0Ka<,yX a)R0Ka<,yX a)R0Ka<,yX a)R0Ka<,yX a)ò*JR*JR*JR*JR*JR*JR*JR*JR*JR*JR*JR*JR*JR*JR*LLLJ ^ ^ ^ ^ ^sK2+2+2+2+2+2+2+2+a&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeWU%xU ^UWU%xU ^UWU%xU ^UW`T%U FUQ`T%U FUQ`T%U FUQ`T%U FUQ`T%U FUQ`T%U FUQ`T%U FUQ`T%U FIE)e%U FUQ`T%U FUA0*J0*J0*J0*J0*J0*J0*J0*J0*J0*J0J*J0j$5 FFQ#`H0j$5 FFڍDj$x5 ^FW#Hj$x5 ^FW#Hj$x5 ^FW#Hj$x5is-\<"ϵs-\<"ϵs-\<"ϵs-\<"ϵs-\<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύFo#ۈmD6ѿFo#ۈmD6ѿFo#ۈmD6ѿFo#ۈmD6ѿFo#ۈmD6ѿU#x^5WU#x^5WU#x^5WU#x^5WU#x^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WlSD)"JQJR:E)Nt(SD)"JQJR:E)NTR]Jw)ߥTR]Jw)"JQJR:E)Nt(SD ^WU+x ^WU+x ^IE)e+x ^WU+x ^WU+x ^WU+x ^WU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uW1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1a1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&SLL 0`25dj%K&ӗL/L_2d2}d%K&ӗL/L_2d2}dҁɤI&L&L:0t`d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%&M:t7o2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2$v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7kqeb(>~""Fб;' b`Wh;oooooooooooooooooooooooooooowNx~{'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|r \'>ɅO.|r \'>ɅO.|r \'>ɅO.|r \'>ɅO.|r \'>ɅO.|r \'>ɅO.|r \'>ɅO.|r \$&0ɅI.Lra \$&0ɅI=x]vew]vc=vc=vc`v?`Ӑ1ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Žv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZѢ\6rF.Eh#m䢍\6rF.Eh#m䢍\6rF.Eh#m䢍\6rF.Eh#m䢍\6rF.Eh#m䢍\6rF.Eh#mDž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\7Ǎ=nqc{7Ǎ=nqc{7Ǎ=nqc{7Ǎ=nqc{7Ǎ=nqc{7Ǎ=nqc{7Ǎ=nqc{7i#7qȍCnr8!7qȍCnr8!7qȍCnr8!7qȍCnr8!7qȍCnr8!7q m\hBڸƅ6.q m\hBڸƅ6.q m\hBڸƅ6.q m\hBڸƅ6.q m\hBڸƅ6.q m\hBڸƅ6.q m\hBڴj#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1MmhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhEVmӪmZMi6ڦU۴jVmӪmZMi6ڦU۴jVmӪmZMi6ڦU۴jVmӪmZMi6ڦU۴jVmӪmZMi6ڦU۴jVmӧmM6}ڦO7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzc=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=}o|>}o|>}o|>}o|>}o|>}o>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xǾؗc_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_LbӾŴ/}1i_LbӾŴ/}1i_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<x/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^oxƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|x㣧}LJ=>a{|LJ=>a{||8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>zGOɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||䣧}>zGOi=}>zGOi=}>zGOi=o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}廿?~ן~~>x 2 robustbase/data/exAM.tab0000644000176200001440000000016610353034413014655 0ustar liggesusers x y 1 3 2 2 4 4 3 5 6 4 7 7 5 7 8 6 5 8 7 4 8 8 5 9 9 6 11 10 8 13 11 8 15 12 14 6 robustbase/data/hbk.rda0000644000176200001440000000134311754203337014576 0ustar liggesusersukQ7D'OERUS<E9r "q7 dgߛy3ߙypwv$IBxԒF.d٣R^{a,GXYXOauRv軲"U9⵪FH+?\~JfS>cͿȾ#?VC7}?%qk5& I|CI -#?R7[݉ا~š;6jyǿzWæ曗}eߓ)7$oInKG+]hKRIs:S~Ȯ]\~qx|~4aTz/MuIs@/Ippr=8|:fGA0]sjsj'qO߁=jGFW'l3ҾÀ3Gt_6ϔ0Bn/'r*$Z|T8HP5y$x m2"TD$6Rrobustbase/data/Animals2.tab0000644000176200001440000000267110353034413015474 0ustar liggesusers "body" "brain" "Mountain beaver" 1.35 8.1 "Cow" 465 423 "Grey wolf" 36.33 119.5 "Goat" 27.66 115 "Guinea pig" 1.04 5.5 "Dipliodocus" 11700 50 "Asian elephant" 2547 4603 "Donkey" 187.1 419 "Horse" 521 655 "Potar monkey" 10 115 "Cat" 3.3 25.6 "Giraffe" 529 680 "Gorilla" 207 406 "Human" 62 1320 "African elephant" 6654 5712 "Triceratops" 9400 70 "Rhesus monkey" 6.8 179 "Kangaroo" 35 56 "Golden hamster" 0.12 1 "Mouse" 0.023 0.4 "Rabbit" 2.5 12.1 "Sheep" 55.5 175 "Jaguar" 100 157 "Chimpanzee" 52.16 440 "Rat" 0.28 1.9 "Brachiosaurus" 87000 154.5 "Mole" 0.122 3 "Pig" 192 180 "Artic fox" 3.385 44.5 "Owl monkey" 0.48 15.5 "Roe deer" 14.83 98.2 "Verbet" 4.19 58 "Chinchilla" 0.425 6.4 "Ground squirrel" 0.101 4 "Artic ground squirrel" 0.92 5.7 "African giant pouched rat" 1 6.6 "Lesser short-tailed shrew" 0.005 0.14 "Star-nosed mole" 0.06 1 "Nine-banded armadillo" 3.5 10.8 "Tree hyrax" 2 12.3 "N.A. opossum" 1.7 6.3 "Big brown bat" 0.023 0.3 "European hedgehog" 0.785 3.5 "Galago" 0.2 5 "Genet" 1.41 17.5 "Grey seal" 85 325 "Rock hyrax-a" 0.75 12.3 "Water opossum" 3.5 3.9 "Yellow-bellied marmot" 4.05 17 "Little brown bat" 0.01 0.25 "Slow loris" 1.4 12.5 "Okapi" 250 490 "Baboon" 10.55 179.5 "Desert hedgehog" 0.55 2.4 "Giant armadillo" 60 81 "Rock hyrax-b" 3.6 21 "Raccoon" 4.288 39.2 "E. American mole" 0.075 1.2 "Musk shrew" 0.048 0.33 "Echidna" 3 25 "Brazilian tapir" 160 169 "Tenrec" 0.9 2.6 "Phalanger" 1.62 11.4 "Tree shrew" 0.104 2.5 "Red fox" 4.235 50.4 robustbase/data/bushfire.rda0000644000176200001440000000075111754203337015643 0ustar liggesusersuOHQfWiB5$kNx.ZwAiD52bAKuHO OI] CtCX~q:|޼ A>RE+ ]J^Ow(+Gp})F WD=c/ "V/{qcx(-8/p#)eu$˕JH~OZׁ {]kXdX3徧 qM=ơ?usץUe||'g{gOs ^iIRnSwTz[.{Ƭn5{wokVф}!ܳUVNuRwZKjǽ[^R0}sŃ?|\[Ɔv4=SlbgNL6XdYgy`ۼj ,hQ=mcja0%{ϿSN8hh 4Ww>}q];e{ͳVnǝi ~TOm2v-Y"i|bewDgh#H'JT~MD[JT\$} ̙/*^HEo0T8W7m춨Cwp|jEXrT}*`k\U52,yF59U2PTTcsʗbIAm`,N!2>x]ʗWhh$kф5RX9qeB g9~tF1(İk>:<+<щ{ĵbI*Z_3WTRғTXyA'1G9hd aFeu8ؼ&3;%TE}HgXEGjZQXv.Tɀ N.ܫcL_e_5q6ZǩQrKӖޡެ_nsw`uTi0X?f~8W?8tX5e݈<^tй׳_yзjDQ& eZY:ejt'd?(<]ƊW- Ck8gQTtxJ@bgOV?J74hyFGa菙9 ͘ qԁX?]Ph߆:96xn]@|ݱMZ)nԓPž֠^UCg#;qϹuox~ @W&(b MmwsH.#:xx}Y1{r=_R6<~E{( H|}#PbW :Ex?}+on~~! x$+2X( ׂ}"O#QGrn< N Ox3\!UHP=.K;?DԟrJ)/ L_x)O(@) E7>Ox9Krʍ;y!95y'rVrCN> !n>|Lg|@9ҟdO qo_(Wt/W/S4~-U4@ y8uowu 57%WhW'ϤyP-Ro2 Srv͉:!uG;Sr~j[+99sO`??{ uAWv~ٿ;;;]7~`蹍WqѶ}m}]r#3robustbase/data/pilot.rda0000644000176200001440000000031711754203337015161 0ustar liggesusers]? AQq (FA$Y I L7ý͟( lQ޼ tzz9nǫ:#"JT%&db=_v"gH$, uQip;n84{袆 h"\tZueNqJ{3sYRT>~CUp;mI)ߌX`r8GzBBO@4 mh\"Iك*͎d?5|Oߊ9+7S_ky꺜g1O_RCgm?V; jט{^5+3WZ`y^ ;7 /LŽPȜAZ {f@$<7ui_uDAT`d{{4oMҹw{:wYS~ ?Է}arobustbase/data/condroz.rda0000644000176200001440000000247311754203337015515 0ustar liggesusers[h\eǏIDEQ "QYD,6{=9B %Xo (RE">R!E#(z $s ߙo$k`x07ד}\b^6Zan9ofWe'+^>!h`RB^אWmcg -i8m6:L=Gp yC 6t롏nExZ Ŕ8Һ]i-+y{iaaOb`GGk2>5"ZoX^Zw ^\k4w9/Jμ%xV=>qkznc! zH>N 0;üiY+]'?+4WMz>6|?˞*3I%U0ۻO%|MпBOeߋJ=Dn<x>q>sm8)H֐׾|`/F@nᯅ]K"{'d5>}xym?ӯes;}9M_!5]y8=WOf]'}}u9< k["o yRbuy9o-Xw/^l7)3zzi|:slJ^ќr?]7zG5^!~E߀O=pOE_ù٤n'ü?W_<5:+[Deh9ۜ mK`/RH=֩,_fqD踌ǽԕ{0OC{GyD /ʼ}O<~Gp:/%/O X;$-v8f. ;&\S*+s`qaz+(VQN=.wp#p@U^>H}/S-7'uIh֊f' G;gcm ):iz8^w.8vN)~ǩ쑾&o+||?׉=ru2{:cD^G/λTċUij_TNWoAy70b<Hq790R#>p8w<8E,JV\G"nMaGc OAF "N7k—^`%&p8zc4aݛa߄~ s؏raxq@X>Yx3}ŏ4d,#|:ExC2$o@~+Q~R_cq%;Dhfs!݋xB pQSկ՛Dy2S$L(EOsS:~[R4(cRMJsJy{Tͽ$Tnfq=ybK%+SUmK6*wJuι6 bVC L^DU>~hrG[$-q= @y8`x<%qJFHMB|(م gNP(CHH!\O[s “þX&V;/םw}pjA? ę7D?H:8D~:4rl(INDLЙ266oӛg$sH|9%1#wm;q9}J.W?>V robustbase/data/carrots.rda0000644000176200001440000000054011754203337015505 0ustar liggesusers r0b```b`@& `d`aɉEE% 9 b ib fbN(f UŬP169\P gaDbQb- 60k">:VQ ŀX@|*#m\4Ѵ'R}80590q`V=h`#D3(Ƀ-',5LNpe59'n`ZbrI~]y^bn*L9 ,y&'M)/Í䧧r!2G+%$Q/hEz0AA$Fw66Erobustbase/data/cloud.rda0000644000176200001440000000037311754203337015142 0ustar liggesusers r0b```b`@& `d`aҬ9) `E @ b3A@ ف *り3C@t0 , 8`c 6@*oCiS<yv>s| j5TʷZ d= H-JN+ILO8- ?33DaRKҊf),/׃ |&Ha rrobustbase/data/coleman.rda0000644000176200001440000000120011754203337015440 0ustar liggesusers]T=hSQ~kBcR@ j!i &EźE$"Bn sqX]D| p}sZaJ`#`p#>n a_i֫ k0<#sH > dn!}2 .wOɿ}E} r'RWd<{ 8ב|I2 _s}-sҌqx.|a[Zlg^("t}q2 IbgT$kjc1>~z&gcK97N+-S/3?<:8w2n.#Ni?\ҝ@aΛx7Ҩῦ!<sOxiփS{J#!DD'Q !_zFe]h|8^:>㿇9Qcyԑϱ]@g'AQ*`l#-t¿8=QzP9Xz9V lW8/sє{p6Vꖭjgն ny=K;?9robustbase/data/alcohol.rda0000644000176200001440000000254711754203337015462 0ustar liggesusersV{LSW/Z鄁GPbRs X t } XdsL4L" KAtۘs:{fK7w~;91+mVP(7\lF(O"BQ]h*64X+xj\nkZ顖VPkB}6J-G4ͿZ.Ook̢I3 mBԠ,mi:<5ז?;:WѥMԷS"-v/ڧ=j>O&=t ;=mD-OCesɖe&s¼wC>}U؇<-۠^kD\7`CڜX}~_-À5-Q'3/U$,J_?A"] #5t2 H'_./drՒlyqq~&ӈt;:}5PF= QxFu*Zh>'F;Y~h,_?`=^} `b bUxNjxI(|E?/`䂴:64C=CR.} ޣduIܑ8O?N༄߷'O;gIУxKGe%t.ٞq́XܼԦǘaibywZ=`:9N+􅵽_\ϕM#c7'!o랋,V5 C[kZR /;uMcVjY|܊?w&NbC*b'yZs|ewۿDBE\GڱܘؠٕOAѐl\3|^ {[m߾ȵkVF ټ0`h2 1"r v/.0EL塌&ABGaJMb])%J8> robustbase/data/possumDiv.rda0000644000176200001440000000333011754203337016021 0ustar liggesusersZKsG}豲cBr‡PvUN\#*qI2)n8;?ͬ4kIMh^)~랈ߤqI*Yfӛ˓GdMAQ)jĴ(VWRMFL뀍x%y7BF :b2sܰELxb+s6^¸ھv|;O@ݢuLtT}ct#Zs큁MvGkภ(ϖ1ܞη= fw/-"-żBd@죑$鿫kͱ _Lp؃`']o)c6J(\S,*MXMug ;s>bVeߩk{R5'enAiڗ261ޑB[X|3"xnK5grŘϵՓj g:b^?5ȇ|ѽHȋOzNcGJu/q&~s^{t]ySjGpy{Gmյ=wέWGRJJ 3K0-)7YAT7{듎I'쪮ХE0Ό.|F3 Cww@p&rdgW>DZ:2)cI5 fp7"T}0{sXK;iϱ!KB >6<>Ӿ3B2cq|\W:9";)<)~>CǾ UGuAew8G2'P`yi/7{x:쫎Cc#]bc|!3hjEG ZE[ )lwl{$;N}ꪽ Bl0|.]`-L('"gy[pB?2hTlg2z_Muvt1zFW1Wc76Zxg&E3y8|擁27' ִb#ǞȾ5+">V62`HkOo QZ_4gNy>_$soe)\/Oipv78;}~nPH@Uǿ/Gp '㏣t<?&7oBlvsy=cf #ϻptal==sc(ŏNg:3s:6gG;xH_Cr\C16ڕPxwp>= @ 68^~Xɡ~@!Bw9dJk6ѡIգ9^|]ޯgJSOF _fAq{^;f 4;gww;W^t9!?bwAOIԓ7;WC錕Nb lN}N:<[,DC|bG -|P|F6,^'i8Pstxjv xE-sSa6 TT+&}`ڄ L ?WJbI^Z4[9`6\ c Ґrobustbase/data/kootenay.tab0000644000176200001440000000032510647075756015676 0ustar liggesusers Libby Newgate 1931 27.1 19.7 1932 20.9 18.0 1933 33.4 26.1 1934 77.6 15.7 1935 37.0 26.1 1936 21.6 19.9 1937 17.6 15.7 1938 35.1 27.6 1939 32.6 24.9 1940 26.0 23.4 1941 27.6 23.1 1942 38.7 31.3 1943 27.8 23.8 robustbase/man/0000755000176200001440000000000014555212560013203 5ustar liggesusersrobustbase/man/nlrob-algos.Rd0000644000176200001440000001571413441022761015714 0ustar liggesusers\name{nlrob-algorithms} \alias{nlrob.algorithms} \alias{nlrob.MM} \alias{nlrob.tau} \alias{nlrob.CM} \alias{nlrob.mtl} \title{MM-, Tau-, CM-, and MTL- Estimators for Nonlinear Robust Regression} \description{ \describe{ \item{"MM":}{Compute an MM-estimator for nonlinear robust (constrained) regression.} \item{"tau":}{Compute a Tau-estimator for nonlinear robust (constrained) regression.} \item{"CM":}{Compute a \dQuote{Constrained M} (=: CM) estimator for nonlinear robust (constrained) regression.} \item{"MTL":}{Compute a \dQuote{Maximum Trimmed Likelihood} (=: MTL) estimator for nonlinear robust (constrained) regression.} } } \usage{ ## You can *not* call the nlrob(*, method = ) like this ==> see help(nlrob) ## ------- ===== ------------------------------------------ nlrob.MM(formula, data, lower, upper, tol = 1e-06, psi = c("bisquare", "lqq", "optimal", "hampel"), init = c("S", "lts"), ctrl = nlrob.control("MM", psi = psi, init = init, fnscale = NULL, tuning.chi.scale = .psi.conv.cc(psi, .Mchi.tuning.defaults[[psi]]), tuning.psi.M = .psi.conv.cc(psi, .Mpsi.tuning.defaults[[psi]]), optim.control = list(), optArgs = list(...)), ...) nlrob.tau(formula, data, lower, upper, tol = 1e-06, psi = c("bisquare", "optimal"), ctrl = nlrob.control("tau", psi = psi, fnscale = NULL, tuning.chi.scale = NULL, tuning.chi.tau = NULL, optArgs = list(...)), ...) nlrob.CM(formula, data, lower, upper, tol = 1e-06, psi = c("bisquare", "lqq", "welsh", "optimal", "hampel", "ggw"), ctrl = nlrob.control("CM", psi = psi, fnscale = NULL, tuning.chi = NULL, optArgs = list(...)), ...) nlrob.mtl(formula, data, lower, upper, tol = 1e-06, ctrl = nlrob.control("mtl", cutoff = 2.5, optArgs = list(...)), ...) } \arguments{ \item{formula}{nonlinear regression \code{\link{formula}}, using both variable names from \code{data} and parameter names from either \code{lower} or \code{upper}.} \item{data}{data to be used, a \code{\link{data.frame}}} \item{lower, upper}{bounds aka \dQuote{box constraints} for all the parameters, in the case "CM" and "mtl" these must include the error standard deviation as \code{"sigma"}, see \code{\link{nlrob}()} about its \code{\link{names}}, etc. Note that one of these two must be a properly \dQuote{named}, e.g., \code{names(lower)} being a \code{\link{character}} vector of parameter names (used in \code{formula} above). } \item{tol}{numerical convergence tolerance.} \item{psi, init}{see \code{\link{nlrob.control}}.} \item{ctrl}{a \code{\link{list}}, typically the result of a call to \code{\link{nlrob.control}}.} \item{tuning.psi.M}{..}% FIXME \item{optim.control}{..}% FIXME \item{optArgs}{a \code{\link{list}} of optional arguments for optimization, e.g., \code{trace = TRUE}, passed to to the optimizer, which currently must be \code{\link[DEoptimR]{JDEoptim}(.)}.} \item{...}{alternative way to pass the \code{optArgs} above.} } \value{ an \R object of \code{\link{class}} \code{"nlrob."}, basically a list with components %% FIXME } \details{ Copyright 2013, Eduardo L. T. Conceicao. Available under the GPL (>= 2) Currently, all four methods use \code{\link[DEoptimR]{JDEoptim}()} from \CRANpkg{DEoptimR}, which subsamples using \code{\link{sample}()}. From \R version 3.6.0, \code{\link{sample}} depends on \code{\link{RNGkind}(*, sample.kind)}, such that exact reproducibility of results from \R versions 3.5.3 and earlier requires setting \code{\link{RNGversion}("3.5.0")}. In any case, do use \code{\link{set.seed}()} additionally for reproducibility! } \author{ Eduardo L. T. Conceicao; compatibility (to \code{\link{nlrob}}) tweaks and generalizations, inference, by Martin Maechler. } \source{ For \code{"MTL"}: Maronna, Ricardo A., Martin, R. Douglas, and Yohai, Victor J. (2006). \emph{Robust Statistics: Theory and Methods} Wiley, Chichester, p. 133. } \references{ \describe{ \item{"MM":}{ Yohai, V.J. (1987) High breakdown-point and high efficiency robust estimates for regression. \emph{The Annals of Statistics} \bold{15}, 642--656. } \item{"tau":}{ Yohai, V.J., and Zamar, R.H. (1988). High breakdown-point estimates of regression by means of the minimization of an efficient scale. \emph{Journal of the American Statistical Association} \bold{83}, 406--413. } \item{"CM":}{ Mendes, B.V.M., and Tyler, D.E. (1996) Constrained M-estimation for regression. In: \emph{Robust Statistics, Data Analysis and Computer Intensive Methods}, Lecture Notes in Statistics 109, Springer, New York, 299--320. %% not yet -- e.g. tuning constants for Welsh: %% Edlund, O. and Ekblom, H. (2005) %% Computing the constrained M-estimates for regression. %% Computational Statistics Data Analysis \bold{49}(1): 19--32. } \item{"MTL":}{ Hadi, Ali S., and Luceno, Alberto (1997). Maximum trimmed likelihood estimators: a unified approach, examples, and algorithms. Computational Statistics & Data Analysis \bold{25}, 251--272. Gervini, Daniel, and Yohai, Victor J. (2002). A class of robust and fully efficient regression estimators. The Annals of Statistics \bold{30}, 583--616. } }%describe } \examples{%% for more, --> ../tests/nlregrob-tst.R DNase1 <- DNase[DNase$Run == 1,] form <- density ~ Asym/(1 + exp(( xmid -log(conc) )/scal )) pnms <- c("Asym", "xmid", "scal") set.seed(47) # as these by default use randomized optimization: fMM <- robustbase:::nlrob.MM(form, data = DNase1, lower = setNames(c(0,0,0), pnms), upper = 3, ## call to nlrob.control to pass 'optim.control': ctrl = nlrob.control("MM", optim.control = list(trace = 1), optArgs = list(trace = TRUE))) ## The same via nlrob() {recommended; same random seed to necessarily give the same}: set.seed(47) gMM <- nlrob(form, data = DNase1, method = "MM", lower = setNames(c(0,0,0), pnms), upper = 3, trace = TRUE) gMM summary(gMM) ## and they are the same {apart from 'call' and 'ctrl' and new stuff in gMM}: ni <- names(fMM); ni <- ni[is.na(match(ni, c("call","ctrl")))] stopifnot(all.equal(fMM[ni], gMM[ni])) \dontshow{ if(doExtras <- robustbase:::doExtras()) { gtau <- nlrob(form, data = DNase1, method = "tau", lower = setNames(c(0,0,0), pnms), upper = 3, trace = TRUE) ## these two have "sigma" also as parameter : psNms <- c(pnms, "sigma") gCM <- nlrob(form, data = DNase1, method = "CM", lower = setNames(c(0,0,0,0), psNms), upper = 3, trace = TRUE) gmtl <- nlrob(form, data = DNase1, method = "mtl", lower = setNames(c(0,0,0,0), psNms), upper = 3, trace = TRUE) stopifnot(identical(sapply(list(gMM, gCM, gmtl), estimethod), c("MM", "CM", "mtl"))) }% doExtras }% dontshow } \keyword{robust} \keyword{regression} \keyword{nonlinear} robustbase/man/lmrob.M.S.Rd0000644000176200001440000000715014432641147015205 0ustar liggesusers\name{lmrob.M.S} \alias{lmrob.M.S} \title{ M-S regression estimators } \description{ Computes an M-S-estimator for linear regression using the \dQuote{M-S} algorithm. } \usage{ lmrob.M.S(x, y, control, mf, split = splitFrame(mf, x, control$split.type)) } \arguments{ \item{x}{numeric matrix (a \code{\link{model.matrix}}) of the predictors.} \item{y}{numeric vector for the response } \item{control}{\code{\link{list}} as returned by \code{\link{lmrob.control}}.} \item{mf}{a model frame as returned by \code{\link{model.frame}}.} \item{split}{(optional) list as returned by \code{\link{splitFrame}}.} } \details{ This function is used by \code{\link{lmrob}} and not intended to be used on its own (because an M-S-estimator has too low efficiency \sQuote{on its own}). An M-S estimator is a combination of an S-estimator for the continuous variables and an L1-estimator (i.e. an M-estimator with \eqn{\psi(t) = sign(t)}) for the categorical variables. The S-estimator is estimated using a subsampling algorithm. If the model includes interactions between categorical (\code{\link{factor}}) and continuous variables, the subsampling algorithm might fail. In this case, one can choose to assign the interaction to the categorical side of variables rather than to the continuous side. This can be accomplished via the control argument \code{split.type} or by specifying \code{split}, see \code{\link{splitFrame}}. Note that the return status \code{converged} does not refer to the actual convergence status. The algorithm used does not guarantee convergence and thus true convergence is almost never reached. This is, however, not a problem if the estimate is only used as initial estimate part of an MM or SMDM estimate. The algorithm sometimes produces the warning message \dQuote{Skipping design matrix equilibration (dgeequ): row ?? is exactly zero.}. This is just an artifact of the algorithm and can be ignored safely. } \value{ A list with components \item{coefficients}{numeric vector (length \eqn{p}) of M-S-regression coefficient estimates.} \item{scale}{the M-S-scale residual estimate} \item{residuals}{numeric vector (legnth \eqn{n}) of the residuals.} \item{rweights}{numeric vector (length \eqn{n}) of the robustness weights.} \item{control}{the same list as the \code{control} argument.} \item{converged}{Convergence status (always \code{TRUE}), needed for \code{\link{lmrob.fit}}.} \item{descent.cov}{\code{\link{logical}} with the true \code{m_s_descent} convergence status.} } \references{ Maronna, R. A., and Yohai, V. J. (2000). Robust regression with both continuous and categorical predictors. \emph{Journal of Statistical Planning and Inference} \bold{89}, 197--214. } \author{ Manuel Koller } \seealso{ \code{\link{lmrob}}; for a description of the available split types, see \code{\link{splitFrame}}. \code{\link[robust]{lmRob}} in package \CRANpkg{robust} uses a version of the M-S algorithm automatically when the formula contains factors. Our version however follows Maronna and Yohai (2000) more closely. } \examples{ data(education) education <- within(education, Region <- factor(Region)) flm <- lm(Y ~ Region + X1 + X2 + X3, education) x <- model.matrix(flm) y <- education$Y # == model.response(model.frame(flm)) set.seed(17) f.MS <- lmrob.M.S(x, y, control = lmrob.control(), mf = model.frame(flm)) ## The typical use of the "M-S" estimator -- as initial estimate : fmMS <- lmrob(Y ~ Region + X1 + X2 + X3, education, init = "M-S") } \keyword{ M-S } \keyword{ robust } \keyword{ regression } robustbase/man/summary.lts.Rd0000644000176200001440000000657110607131625015775 0ustar liggesusers\name{summary.lts} \alias{summary.lts} \alias{print.summary.lts} \title{Summary Method for LTS objects} \usage{ \method{summary}{lts}(object, correlation = FALSE, \dots) \method{print}{summary.lts}(x, digits = max(3, getOption("digits") - 3), signif.stars = getOption("show.signif.stars"), \dots) } \arguments{ \item{object}{an object of class \code{"lts"}, usually, a result of a call to \code{\link{ltsReg}}.} \item{correlation}{logical; if \code{TRUE}, the correlation matrix of the estimated parameters is returned and printed.} \item{x}{an object of class \code{"summary.lts"}, usually, a result of a call to \code{summary.lts}.} \item{digits}{the number of significant digits to use when printing.} \item{signif.stars}{logical indicating if \dQuote{significance stars} should be printer, see \code{\link{printCoefmat}}.} \item{\dots}{further arguments passed to or from other methods.} } \description{ \code{summary} method for class \code{"lts"}. } \details{ These functions compute and print summary statistics for weighted least square estimates with weights based on LTS estimates. Therefore the statistics are similar to those for LS but all terms are multiplied by the corresponding weight. Correlations are printed to two decimal places: to see the actual correlations print \code{summary(object)$correlation} directly. } \value{ The function \code{summary.lts} computes and returns a list of summary statistics of the fitted linear model given in \code{object}, using the components of this object (list elements). \item{residuals}{the residuals - a vector like the response \code{y} containing the residuals from the weighted least squares regression.} \item{coefficients}{a \eqn{p \times 4}{p x 4} matrix with columns for the estimated coefficient, its standard error, t-statistic and corresponding (two-sided) p-value. } \item{sigma}{the estimated scale of the reweighted residuals \deqn{\hat\sigma^2 = \frac{1}{n-p}\sum_i{R_i^2},}{% sigma^2 = 1/(n-p) Sum(R[i]^2),} where \eqn{R_i}{R[i]} is the \eqn{i}-th residual, \code{residuals[i]}.} \item{df}{degrees of freedom, a 3-vector \eqn{(p, n-p, p*)}, the last being the number of non-aliased coefficients.} \item{fstatistic}{(for models including non-intercept terms) a 3-vector with the value of the F-statistic with its numerator and denominator degrees of freedom.} \item{r.squared}{\eqn{R^2}, the \dQuote{fraction of variance explained by the model}, \deqn{R^2 = 1 - \frac{\sum_i{R_i^2}}{\sum_i(y_i- y^*)^2},}{% R^2 = 1 - Sum(R[i]^2) / Sum((y[i]- y*)^2),} where \eqn{y^*}{y*} is the mean of \eqn{y_i}{y[i]} if there is an intercept and zero otherwise.} \item{adj.r.squared}{the above \eqn{R^2} statistic \dQuote{\emph{adjusted}}, penalizing for higher \eqn{p}.} \item{cov.unscaled}{a \eqn{p \times p}{p x p} matrix of (unscaled) covariances of the \eqn{\hat\beta_j}{coef[j]}, \eqn{j=1, \dots, p}.} \item{correlation}{the correlation matrix corresponding to the above \code{cov.unscaled}, if \code{correlation = TRUE} is specified.} } \seealso{ \code{\link{ltsReg}}; the generic \code{\link{summary}}. } \examples{ data(Animals2) ltsA <- ltsReg(log(brain) ~ log(body), data = Animals2) (slts <- summary(ltsA)) ## non-default options for printing the summary: print(slts, digits = 5, signif.stars = FALSE) } \keyword{regression} \keyword{robust} robustbase/man/summary.glmrob.Rd0000644000176200001440000000475411721663343016463 0ustar liggesusers\name{summary.glmrob} \alias{summary.glmrob} \alias{vcov.glmrob} \alias{print.summary.glmrob} \title{Summarizing Robust Fits of Generalized Linear Models} \description{ The \code{summary} method for class \code{"\link{glmrob}"} summarizes robust fits of (currently only discrete) generalized linear models. } \usage{ \method{summary}{glmrob}(object, correlation = FALSE, symbolic.cor = FALSE, \dots) \method{vcov}{glmrob}(object, \dots) \method{print}{summary.glmrob}(x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), \dots) } \arguments{ \item{object}{an object of class \code{"glmrob"}, usually, a result of a call to \code{\link{glmrob}}.} \item{correlation}{logical; if \code{TRUE}, the correlation matrix of the estimated parameters is returned and printed.} \item{symbolic.cor}{logical. If \code{TRUE}, print the correlations in a symbolic form (see \code{\link{symnum}}) rather than as numbers.} \item{\dots}{further arguments passed to or from other methods.} \item{x}{an object of class \code{"summary.glrob"}.} \item{digits}{the number of digits to use for printing.} \item{signif.stars}{logical indicating if the P-values should be visualized by so called \dQuote{significance stars}.} } \details{ \code{\link{summary.glmrob}} returns an object of \code{\link{class}} \code{"summary.glmrob"}. Its \code{\link{print}()} method tries to be smart about formatting the coefficients, standard errors, etc, and gives \dQuote{significance stars} if \code{signif.stars} is \code{TRUE} (as per default when \code{\link{options}} where not changed). } \value{ The function \code{\link{summary.glmrob}} computes and returns a list of summary statistics of the robustly fitted linear model given in \code{object}. The following elements are in the list: \item{...}{ FIXME } %% MM thinks that summary should possibly just *add* to the original %% object. } %%\references{ ~put references to the literature/web site here ~ } \author{Andreas Ruckstuhl} %%\note{ ~~further notes~~ } %% \seealso{\code{\link{glmrob}}; the generic \code{\link{summary}} and also \code{\link[stats]{summary.glm}}. } \examples{ data(epilepsy) Rmod <- glmrob(Ysum ~ Age10 + Base4*Trt, family = poisson, data = epilepsy, method= "Mqle") ss <- summary(Rmod) ss ## calls print.summary.glmrob() str(ss) ## internal STRucture of summary object } \keyword{regression} \keyword{nonlinear} \keyword{robust} robustbase/man/phosphor.Rd0000644000176200001440000000166513312375575015352 0ustar liggesusers\name{phosphor} \alias{phosphor} \docType{data} \title{Phosphorus Content Data} \description{ This dataset investigates the effect from inorganic and organic Phosphorus in the soil upon the phosphorus content of the corn grown in this soil, from Prescott (1975). } \usage{data(phosphor, package="robustbase")} \format{ A data frame with 18 observations on the following 3 variables. \describe{ \item{\code{inorg}}{Inorganic soil Phosphorus} \item{\code{organic}}{Organic soil Phosphorus} \item{\code{plant}}{Plant Phosphorus content} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection.} Wiley, p.156, table 24. } \examples{ data(phosphor) plot(phosphor) summary(lm.phosphor <- lm(plant ~ ., data = phosphor)) summary(lts.phosphor <- ltsReg(plant ~ ., data = phosphor)) phosphor.x <- data.matrix(phosphor[, 1:2]) cPh <- covMcd(phosphor.x) plot(cPh, "dd") } \keyword{datasets} robustbase/man/radarImage.Rd0000644000176200001440000000426414430460426015532 0ustar liggesusers\name{radarImage} \alias{radarImage} \docType{data} \title{Satellite Radar Image Data from near Munich} \description{ The data were supplied by A. Frery. They are a part of a synthetic aperture satellite radar image corresponding to a suburb of Munich. Provided are coordinates and values corresponding to three frequency bands for each of 1573 pixels. } \usage{data(radarImage, package="robustbase")} \format{ A data frame with 1573 observations on the following 5 variables. \describe{ \item{\code{X.coord}}{a numeric vector} \item{\code{Y.coord}}{a numeric vector} \item{\code{Band.1}}{a numeric vector} \item{\code{Band.2}}{a numeric vector} \item{\code{Band.3}}{a numeric vector} } } % \details{ % } \source{ The website accompanying the MMY-book: \url{https://www.wiley.com/legacy/wileychi/robust_statistics/} } \examples{ data(radarImage) plot(Y.coord ~ X.coord, data = radarImage) ## The 8 "clear" outliers (see also below) ii8 <- c(1548:1549, 1553:1554, 1565:1566, 1570:1571) outF <- 1+(seq_len(nrow(radarImage)) \%in\% ii8) pairs(radarImage[, 3:5], main = "radarImage (n = 1573)", col = outF, pch=outF) ## Finding outliers ----------------------------------------- set.seed(1) system.time(cc.ri <- covMcd(radarImage))# ~ 0.1 sec ## check for covMcd() consistency: iiO <- as.integer( c(262, 450:451, 480:481, 509, 535, 542, 597, 643, 669, 697, 803:804, 832:834, 862:864, 892, 989, 1123, 1145, 1223:1224, 1232:1233, 1249:1250, 1267, 1303, 1347, 1357, 1375, 1411, 1419:1420, 1443, 1453, 1504, 1510:1512, 1518:1521, 1525:1526, 1543:1544, 1546:1555, 1557:1558, 1561:1562, 1564:1566, 1569:1571, 1573)) length(iiO) # 73 -- other seeds sometimes give 72, rarely 71 "outliers" table(isO <- cc.ri$mcd.wt == 0) # 2023-05: 118 stopifnot(exprs = { ## identical(iiO, which(isO)) -- TRUE before 2023-05 covMcd() change ii8 \%in\% which(isO) # ii8 is subset of isO identical(ii8, which(cc.ri$mah > 200)) length(intersect(cc.ri$best, iiO)) == 0 }) cc <- c(adjustcolor("black", 0.4), adjustcolor("tomato", 0.8)) pairs(radarImage, main = "radarImage (n = 1573) + Outliers", gap=0, col = cc[1+isO], pch = c(1,8)[1+isO], cex = 0.8) } \keyword{datasets} robustbase/man/splitFrame.Rd0000644000176200001440000000543413626040715015605 0ustar liggesusers\name{splitFrame} \alias{splitFrame} \title{ Split Continuous and Categorical Predictors } \description{ Splits the design matrix into categorical and continuous predictors. Categorical variables are variables that are \code{\link{factor}}s, \code{\link{ordered}} factors, \emph{or} \code{\link{character}}. } \usage{ splitFrame(mf, x = model.matrix(mt, mf), type = c("f","fi", "fii")) } \arguments{ \item{mf}{model frame (as returned by \code{\link{model.frame}}).} \item{x}{(optional) design matrix, defaulting to the derived \code{\link{model.matrix}}.} \item{type}{a character string specifying the split type (see details).} } \details{ Which split type is used can be controlled with the setting \code{split.type} in \code{\link{lmrob.control}}. There are three split types. The only differences between the types are how interactions between categorical and continuous variables are handled. The extra types of splitting can be used to avoid \emph{Too many singular resamples} errors. Type \code{"f"}, the default, assigns only the intercept, categorical and interactions of categorical variables to \code{x1}. Interactions of categorical and continuous variables are assigned to \code{x2}. Type \code{"fi"} assigns also interactions between categorical and continuous variables to \code{x1}. Type \code{"fii"} assigns not only interactions between categorical and continuous variables to \code{x1}, but also the (corresponding) continuous variables themselves. } \value{ A list that includes the following components: \item{x1 }{design matrix containing only categorical variables} \item{x1.idx }{logical vectors of the variables considered categorical in the original design matrix} \item{x2 }{design matrix containing the continuous variables} } \references{ Maronna, R. A., and Yohai, V. J. (2000). Robust regression with both continuous and categorical predictors. \emph{Journal of Statistical Planning and Inference} \bold{89}, 197--214. } \author{ Manuel Koller } \seealso{ \code{\link{lmrob.M.S}} } \examples{ data(education) education <- within(education, Region <- factor(Region)) educaCh <- within(education, Region <- as.character(Region)) ## no interactions -- same split for all types: fm1 <- lm(Y ~ Region + X1 + X2 + X3, education) fmC <- lm(Y ~ Region + X1 + X2 + X3, educaCh ) splt <- splitFrame(fm1$model) ; str(splt) splC <- splitFrame(fmC$model) stopifnot(identical(splt, splC)) ## with interactions: fm2 <- lm(Y ~ Region:X1:X2 + X1*X2, education) s1 <- splitFrame(fm2$model, type="f" ) s2 <- splitFrame(fm2$model, type="fi" ) s3 <- splitFrame(fm2$model, type="fii") cbind(s1$x1.idx, s2$x1.idx, s3$x1.idx) rbind(p.x1 = c(ncol(s1$x1), ncol(s2$x1), ncol(s3$x1)), p.x2 = c(ncol(s1$x2), ncol(s2$x2), ncol(s3$x2))) } robustbase/man/steamUse.Rd0000644000176200001440000000622313312375575015271 0ustar liggesusers\name{steamUse} \title{Steam Usage Data (Excerpt)} \alias{steamUse} \docType{data} \encoding{utf8} \description{ The monthly use of steam (\code{Steam}) in a factory may be modeled and described as function of the operating days per month (\code{Operating.Days}) and mean outside temperature per month (\code{Temperature}). } \usage{data("steamUse", package="robustbase")} \format{ A data frame with 25 observations on the following 9 variables. \describe{ \item{\code{Steam}:}{regression response \eqn{Y}, the poinds of steam used monthly.} \item{\code{fattyAcid}:}{pounds of Real Fatty Acid in storage per month.} \item{\code{glycerine}:}{pounds of crude glycerine made.} \item{\code{wind}:}{average wind velocity in miles per hour (a numeric vector).} \item{\code{days}:}{an integer vector with number of days of that month, i.e., in \eqn{28..31}.} \item{\code{op.days}:}{the number of operating days for the given month (integer).} \item{\code{freeze.d}:}{the number of days below 32 degrees Fahrenheit (\eqn{= 0}\enc{°C}{'C} (C=Celsius) \eqn{=} freezing temperature of water).} \item{\code{temperature}:}{a numeric vector of average outside temperature in Fahrenheit (F).} \item{\code{startups}:}{the number of startups (of production in that month).} } } \details{ Nor further information is given in Draper and Smith, about the place and exacts years of the measurements, though some educated guesses should be possible, see the examples. } \source{ Data from Draper and Smith, 1st ed, 1966; appendix A. A version of this has been used in teaching at SfS ETH Zurich, since at least 1996, \url{https://stat.ethz.ch/Teaching/Datasets/NDK/dsteam.dat} The package \CRANpkg{aprean3} contains all data sets from the 3rd edition of Draper and Smith (1998), and this data set with variable names \code{x1 .. x10} (\code{x9} being \code{wind^2}, hence extraneous). } \references{ Draper and Smith (1981) Applied Regression Analysis (2nd ed., p. 615 ff) } \examples{ \dontrun{ if(require("aprean3")) { # show how 'steamUse' is related to 'dsa01a' stm <- dsa01a names(stm) <- c("Steam", "fattyAcid", "glycerine", "wind", "days", "op.days", "freeze.d", "temperature", "wind.2", "startups") ## prove that wind.2 is wind^2, "traditionally" rounded to 1 digit: stopifnot(all.equal(floor(0.5 + 10*stm[,"wind"]^2)/10, stm[,"wind.2"], tol = 1e-14)) ## hence drop it steamUse <- stm[, names(stm) != "wind.2"] } }% dont data(steamUse) str(steamUse) ## Looking at this, cbind(M=rep_len(month.abb, 25), steamUse[,5:8, drop=FALSE]) ## one will conjecture that these were 25 months, Jan--Jan in a row, ## starting in a leap year (perhaps 1960 ?). plot(steamUse) summary(fm1 <- lmrob(Steam ~ temperature + op.days, data=steamUse)) ## diagnoses 2 outliers: month of July, maybe company-wide summer vacations %% no longer visible: summary(fmF <- lmrob(Steam ~ ., data=steamUse)) ## KS2014 alone seems not robust enough: summary(fm.14 <- lmrob(Steam ~ temperature + op.days, data=steamUse, setting="KS2014")) pairs(Steam ~ temperature+op.days, steamUse) } \keyword{datasets} robustbase/man/covMcd.Rd0000644000176200001440000002654713710054521014714 0ustar liggesusers\name{covMcd} \title{Robust Location and Scatter Estimation via MCD} \alias{covMcd} \alias{print.mcd} \alias{.MCDcons} \alias{.MCDcnp2} \alias{.MCDcnp2.rew} \alias{.MCDsingularityMsg} \alias{.wgtFUN.covMcd} % \concept{High breakdown point} \description{ Compute the Minimum Covariance Determinant (MCD) estimator, a robust multivariate location and scale estimate with a high breakdown point, via the \sQuote{Fast MCD} or \sQuote{Deterministic MCD} (\dQuote{DetMcd}) algorithm. } \usage{ covMcd(x, cor = FALSE, raw.only = FALSE, alpha =, nsamp =, nmini =, kmini =, scalefn =, maxcsteps =, initHsets = NULL, save.hsets = FALSE, names = TRUE, % full.h = save.hsets, seed =, tolSolve =, trace =, use.correction =, wgtFUN =, control = rrcov.control()) } \arguments{ \item{x}{a matrix or data frame.} \item{cor}{should the returned result include a correlation matrix? Default is \code{cor = FALSE}.} \item{raw.only}{should only the \dQuote{raw} estimate be returned, i.e., no (re)weighting step be performed; default is false.} \item{alpha}{numeric parameter controlling the size of the subsets over which the determinant is minimized; roughly \code{alpha*n}, (see \sQuote{Details} below) observations are used for computing the determinant. Allowed values are between 0.5 and 1 and the default is 0.5.} \item{nsamp}{number of subsets used for initial estimates or \code{"best"}, \code{"exact"}, or \code{"deterministic"}. Default is \code{nsamp = 500}. For \code{nsamp = "best"} exhaustive enumeration is done, as long as the number of trials does not exceed 100'000 (\code{= nLarge}). For \code{"exact"}, exhaustive enumeration will be attempted however many samples are needed. In this case a warning message may be displayed saying that the computation can take a very long time. For \code{"deterministic"}, the \emph{deterministic} MCD is computed; as proposed by Hubert et al. (2012) it starts from the \eqn{h} most central observations of \emph{six} (deterministic) estimators. } \item{nmini, kmini}{for \eqn{n \ge 2 \times n_0}{n >= 2 n_0}, \eqn{n_0 := \code{nmini}}, the algorithm splits the data into maximally \code{kmini} (by default 5) subsets, of size approximately, but at least \code{nmini}. When \code{nmini*kmini < n}, the initial search uses only a \emph{subsample} of size \code{nmini*kmini}. %% FIXME? -- more accurately ==> ../src/rffastmcd.f The original algorithm had \code{nmini = 300} and \code{kmini = 5} hard coded.} \item{scalefn}{for the deterministic MCD: \code{\link{function}} to compute a robust scale estimate or character string specifying a rule determining such a function. The default, currently \code{"hrv2012"}, uses the recommendation of Hubert, Rousseeuw and Verdonck (2012) who recommend \code{\link{Qn}} for \eqn{n < 1000} and \code{\link{scaleTau2}} for larger n. Alternatively, \code{scalefn = "v2014"}, uses that rule with cutoff \eqn{n = 5000}.} \item{maxcsteps}{maximal number of concentration steps in the deterministic MCD; should not be reached.} \item{initHsets}{NULL or a \eqn{K x h} integer matrix of initial subsets of observations of size \eqn{h} (specified by the indices in \code{1:n}).} \item{save.hsets}{(for deterministic MCD) logical indicating if the initial subsets should be returned as \code{initHsets}.} \item{names}{logical; if true (as by default), several parts of the result have a \code{\link{names}} or \code{\link{dimnames}} respectively, derived from data matrix \code{x}.} \item{seed}{initial seed for random generator, like \code{\link{.Random.seed}}, see \code{\link{rrcov.control}}.} \item{tolSolve}{numeric tolerance to be used for inversion (\code{\link{solve}}) of the covariance matrix in \code{\link{mahalanobis}}.} \item{trace}{logical (or integer) indicating if intermediate results should be printed; defaults to \code{FALSE}; values \eqn{\ge 2}{>= 2} also produce print from the internal (Fortran) code.} \item{use.correction}{ whether to use finite sample correction factors; defaults to \code{TRUE}.} \item{wgtFUN}{a character string or \code{\link{function}}, specifying how the weights for the reweighting step should be computed. Up to April 2013, the only option has been the original proposal in (1999), now specified by \code{wgtFUN = "01.original"} (or via \code{control}). Since \pkg{robustbase} version 0.92-3, Dec.2014, other predefined string options are available, though experimental, see the experimental \code{.wgtFUN.covMcd} object.} \item{control}{a list with estimation options - this includes those above provided in the function specification, see \code{\link{rrcov.control}} for the defaults. If \code{control} is supplied, the parameters from it will be used. If parameters are passed also in the invocation statement, they will override the corresponding elements of the control object.} } \details{ The minimum covariance determinant estimator of location and scatter implemented in \code{covMcd()} is similar to \R function \code{\link[MASS]{cov.mcd}()} in \CRANpkg{MASS}. The MCD method looks for the \eqn{h (> n/2)} (\eqn{h = h(\alpha,n,p) =} \code{\link{h.alpha.n}(alpha,n,p)}) observations (out of \eqn{n}) whose classical covariance matrix has the lowest possible determinant. The raw MCD estimate of location is then the average of these \eqn{h} points, whereas the raw MCD estimate of scatter is their covariance matrix, multiplied by a consistency factor (\code{.MCDcons(p, h/n)}) and (if \code{use.correction} is true) a finite sample correction factor (\code{.MCDcnp2(p, n, alpha)}), to make it consistent at the normal model and unbiased at small samples. Both rescaling factors (consistency and finite sample) are returned in the length-2 vector \code{raw.cnp2}. The implementation of \code{covMcd} uses the Fast MCD algorithm of Rousseeuw and Van Driessen (1999) to approximate the minimum covariance determinant estimator. Based on these raw MCD estimates, (unless argument \code{raw.only} is true), a reweighting step is performed, i.e., \code{V <- \link{cov.wt}(x,w)}, where \code{w} are weights determined by \dQuote{outlyingness} with respect to the scaled raw MCD. Again, a consistency factor and (if \code{use.correction} is true) a finite sample correction factor (\code{.MCDcnp2.rew(p, n, alpha)}) are applied. The reweighted covariance is typically considerably more efficient than the raw one, see Pison et al. (2002). The two rescaling factors for the reweighted estimates are returned in \code{cnp2}. Details for the computation of the finite sample correction factors can be found in Pison et al. (2002). } \author{Valentin Todorov \email{valentin.todorov@chello.at}, based on work written for S-plus by Peter Rousseeuw and Katrien van Driessen from University of Antwerp.% no E-mails for spam-protection Visibility of (formerly internal) tuning parameters, notably \code{wgtFUN()}: Martin Maechler } \value{ An object of class \code{"mcd"} which is basically a \code{\link{list}} with components \item{center}{the final estimate of location.} \item{cov}{the final estimate of scatter.} \item{cor}{the (final) estimate of the correlation matrix (only if \code{cor = TRUE}).} \item{crit}{the value of the criterion, i.e., the logarithm of the determinant. Previous to Nov.2014, it contained the determinant itself which can under- or overflow relatively easily.} \item{best}{the best subset found and used for computing the raw estimates, with \code{\link{length}(best) == quan = \link{h.alpha.n}(alpha,n,p)}.} \item{mah}{mahalanobis distances of the observations using the final estimate of the location and scatter.} \item{mcd.wt}{weights of the observations using the final estimate of the location and scatter.} \item{cnp2}{a vector of length two containing the consistency correction factor and the finite sample correction factor of the final estimate of the covariance matrix.} \item{raw.center}{the raw (not reweighted) estimate of location.} \item{raw.cov}{the raw (not reweighted) estimate of scatter.} \item{raw.mah}{mahalanobis distances of the observations based on the raw estimate of the location and scatter.} \item{raw.weights}{weights of the observations based on the raw estimate of the location and scatter.} \item{raw.cnp2}{a vector of length two containing the consistency correction factor and the finite sample correction factor of the raw estimate of the covariance matrix.} \item{X}{the input data as numeric matrix, without \code{\link{NA}}s.} \item{n.obs}{total number of observations.} \item{alpha}{the size of the subsets over which the determinant is minimized (the default is \eqn{(n+p+1)/2}).} \item{quan}{the number of observations, \eqn{h}, on which the MCD is based. If \code{quan} equals \code{n.obs}, the MCD is the classical covariance matrix.} \item{method}{character string naming the method (Minimum Covariance Determinant), starting with \code{"Deterministic"} when \code{nsamp="deterministic"}.} \item{iBest}{(for the deterministic MCD) contains indices from 1:6 denoting which of the (six) initial subsets lead to the best set found.} \item{n.csteps}{(for the deterministic MCD) for each of the initial subsets, the number of C-steps executed till convergence.} \item{call}{the call used (see \code{\link{match.call}}).} } \references{ Rousseeuw, P. J. and Leroy, A. M. (1987) \emph{Robust Regression and Outlier Detection.} Wiley. Rousseeuw, P. J. and van Driessen, K. (1999) A fast algorithm for the minimum covariance determinant estimator. \emph{Technometrics} \bold{41}, 212--223. Pison, G., Van Aelst, S., and Willems, G. (2002) Small Sample Corrections for LTS and MCD, \emph{Metrika} \bold{55}, 111--123.% ~/save/papers/robust-diverse/Pison_VanAelst_Willems.pdf Hubert, M., Rousseeuw, P. J. and Verdonck, T. (2012) A deterministic algorithm for robust location and scatter. Journal of Computational and Graphical Statistics \bold{21}, 618--637. } \seealso{ \code{\link[MASS]{cov.mcd}} from package \CRANpkg{MASS}; \code{\link{covOGK}} as cheaper alternative for larger dimensions. \code{\link[robustX]{BACON}} and \code{\link[robustX]{covNNC}}, from package \CRANpkg{robustX}; } \examples{ data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) set.seed(17) (cH <- covMcd(hbk.x)) cH0 <- covMcd(hbk.x, nsamp = "deterministic") with(cH0, stopifnot(quan == 39, iBest == c(1:4,6), # 5 out of 6 gave the same identical(raw.weights, mcd.wt), identical(which(mcd.wt == 0), 1:14), all.equal(crit, -1.045500594135))) ## the following three statements are equivalent c1 <- covMcd(hbk.x, alpha = 0.75) c2 <- covMcd(hbk.x, control = rrcov.control(alpha = 0.75)) ## direct specification overrides control one: c3 <- covMcd(hbk.x, alpha = 0.75, control = rrcov.control(alpha=0.95)) c1 ## Martin's smooth reweighting: ## List of experimental pre-specified wgtFUN() creators: ## Cutoffs may depend on (n, p, control$beta) : str(.wgtFUN.covMcd) cMM <- covMcd(hbk.x, wgtFUN = "sm1.adaptive") ina <- which(names(cH) == "call") all.equal(cMM[-ina], cH[-ina]) # *some* differences, not huge (same 'best'): stopifnot(all.equal(cMM[-ina], cH[-ina], tol = 0.2)) } \keyword{robust} \keyword{multivariate} robustbase/man/covOGK.Rd0000644000176200001440000001260411114012313014603 0ustar liggesusers\name{covOGK} \alias{covOGK} \alias{covGK} \alias{s_mad} \alias{s_IQR} \alias{hard.rejection} % \title{Orthogonalized Gnanadesikan-Kettenring (OGK) Covariance Matrix Estimation} \description{ Computes the orthogonalized pairwise covariance matrix estimate described in in Maronna and Zamar (2002). The pairwise proposal goes back to Gnanadesikan and Kettenring (1972). } \usage{ covOGK(X, n.iter = 2, sigmamu, rcov = covGK, weight.fn = hard.rejection, keep.data = FALSE, \dots) covGK (x, y, scalefn = scaleTau2, \dots) s_mad(x, mu.too = FALSE, na.rm = FALSE) s_IQR(x, mu.too = FALSE, na.rm = FALSE) } \arguments{ \item{X}{data in something that can be coerced into a numeric matrix.} \item{n.iter}{number of orthogonalization iterations. Usually 1 or 2; values greater than 2 are unlikely to have any significant effect on the estimate (other than increasing the computing time).} \item{sigmamu, scalefn}{a function that computes univariate robust location and scale estimates. By default it should return a single numeric value containing the robust scale (standard deviation) estimate. When \code{mu.too} is true, \code{sigmamu()} should return a numeric vector of length 2 containing robust location and scale estimates. See \code{\link{scaleTau2}}, \code{\link{s_Qn}}, \code{\link{s_Sn}}, \code{s_mad} or \code{s_IQR} for examples to be used as \code{sigmamu} argument.} \item{rcov}{function that computes a robust covariance estimate between two vectors. The default, Gnanadesikan-Kettenring's \code{covGK}, is simply \eqn{(s^2(X+Y) - s^2(X-Y))/4} where \eqn{s()} is the scale estimate \code{sigmamu()}.} \item{weight.fn}{a function of the robust distances and the number of variables \eqn{p} to compute the weights used in the reweighting step.} \item{keep.data}{logical indicating if the (untransformed) data matrix \code{X} should be kept as part of the result.} \item{\dots}{additional arguments; for \code{covOGK} to be passed to \code{sigmamu()} and \code{weight.fn()}; for \code{covGK} passed to \code{scalefn}.} %%% covGK(): \item{x,y}{numeric vectors of the same length, the covariance of which is sought in \code{covGK} (or the scale, in \code{s_mad} or \code{s_IQR}).} %%% s_mad(), s_IQR(): \item{mu.too}{logical indicating if both location and scale should be returned or just the scale (when \code{mu.too=FALSE} as by default).} \item{na.rm}{if \code{TRUE} then \code{\link{NA}} values are stripped from \code{x} before computation takes place.} } \details{ Typical default values for the \emph{function} arguments \code{sigmamu}, \code{rcov}, and \code{weight.fn}, are available as well, see the \emph{Examples} below, \bold{but} their names and calling sequences are still subject to discussion and may be changed in the future. The current default, \code{weight.fn = hard.rejection} corresponds to the proposition in the litterature, but Martin Maechler strongly believes that the hard threshold currently in use is too arbitrary, and further that \emph{soft} thresholding should be used instead, anyway. } \value{ \code{covOGK()} currently returns a list with components \item{center}{robust location: numeric vector of length \eqn{p}.} \item{cov}{robust covariance matrix estimate: \eqn{p\times p}{p x p} matrix.} \item{wcenter, wcov}{re-\bold{w}eighted versions of \code{center} and \code{cov}.} \item{weights}{the robustness weights used.} \item{distances}{the mahalanobis distances computed using \code{center} and \code{cov}.} \dots\dots \cr \bold{but note that this might be radically changed to returning an S4 classed object!} \code{covGK()} is a trivial 1-line function returning the covariance estimate \deqn{\hat c(x,y) = \left(\hat \sigma(x+y)^2 - \hat \sigma(x-y)^2 \right)/4,% }{ c^(x,y) = [s^(x+y)^2 - s^(x-y)^2]/4,}% where \eqn{\hat \sigma(u)}{s^(u)} is the scale estimate of \eqn{u} specified by \code{scalefn}. \code{s_mad()}, and \code{s_IQR()} return the scale estimates \code{\link[stats]{mad}} or \code{\link[stats]{IQR}} respectively, where the \code{s_*} functions return a length-2 vector (mu, sig) when \code{mu.too = TRUE}, see also \code{\link{scaleTau2}}. } \references{ Maronna, R.A. and Zamar, R.H. (2002) Robust estimates of location and dispersion of high-dimensional datasets; \emph{Technometrics} \bold{44}(4), 307--317. Gnanadesikan, R. and John R. Kettenring (1972) Robust estimates, residuals, and outlier detection with multiresponse data. \emph{Biometrics} \bold{28}, 81--124. } \author{Kjell Konis \email{konis@stats.ox.ac.uk}, with modifications by Martin Maechler.} \seealso{\code{\link{scaleTau2}}, \code{\link{covMcd}}, \code{\link[MASS]{cov.rob}}. } \examples{ data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) cO1 <- covOGK(hbk.x, sigmamu = scaleTau2) cO2 <- covOGK(hbk.x, sigmamu = s_Qn) cO3 <- covOGK(hbk.x, sigmamu = s_Sn) cO4 <- covOGK(hbk.x, sigmamu = s_mad) cO5 <- covOGK(hbk.x, sigmamu = s_IQR) %% FIXME: Add time comparison, here or in "vignette", "demo", "... data(toxicity) cO1tox <- covOGK(toxicity, sigmamu = scaleTau2) cO2tox <- covOGK(toxicity, sigmamu = s_Qn) ## nice formatting of correlation matrices: as.dist(round(cov2cor(cO1tox$cov), 2)) as.dist(round(cov2cor(cO2tox$cov), 2)) ## "graphical" symnum(cov2cor(cO1tox$cov)) symnum(cov2cor(cO2tox$cov), legend=FALSE) } \keyword{robust} \keyword{multivariate} robustbase/man/functionXal-class.Rd0000644000176200001440000000212612114153713017061 0ustar liggesusers\name{functionXal-class} \docType{class} \alias{functionXal-class} \title{Class "functionXal" of Functionals (of Psi-like functions)} \description{ The class \code{"functionXal"} is a class of functionals (typically integrals) typically of \code{\linkS4class{functionX}} functions. Since the \code{functionX} functions typically also depend on tuning parameters, objects of this class (\code{"functionXal"}) are functions of these tuning parameters. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Directly extends class \code{"function"}.} } } \section{Extends}{ Class \code{"function"}, from data part. Class \code{"OptionalFunction"}, by class \code{"function"}. Class \code{"PossibleMethod"}, by class \code{"function"}. } % \section{Methods}{ % No methods defined with class "functionXal" in the signature. % } \seealso{ \code{\link{psiFunc}()} and the class definitions of \code{\linkS4class{functionX}} and \code{\linkS4class{psi_func}} which has several \code{functionXal} slots. } % \examples{ % ##---- Should be DIRECTLY executable !! ---- % } \keyword{classes} robustbase/man/salinity.Rd0000644000176200001440000000565513710054521015332 0ustar liggesusers\name{salinity} \alias{salinity} \docType{data} \title{Salinity Data} \description{ This is a data set consisting of measurements of water salinity (i.e., its salt concentration) and river discharge taken in North Carolina's Pamlico Sound, recording some bi-weekly averages in March, April, and May from 1972 to 1977. This dataset was listed by Ruppert and Carroll (1980). In Carrol and Ruppert (1985) the physical background of the data is described. They indicated that observations 5 and 16 correspond to periods of very heavy discharge and showed that the discrepant observation 5 was masked by observations 3 and 16, i.e., only after deletion of these observations it was possible to identify the influential observation 5. This data set is a prime example of the \emph{masking effect}. } \usage{data(salinity, package="robustbase")} \format{ A data frame with 28 observations on the following 4 variables (in parentheses are the names used in the 1980 reference). \describe{ \item{\code{X1}:}{Lagged Salinity (\sQuote{SALLAG})} \item{\code{X2}:}{Trend (\sQuote{TREND})} \item{\code{X3}:}{Discharge (\sQuote{H2OFLOW})} \item{\code{Y}:}{Salinity (\sQuote{SALINITY})} } } \note{The \CRANpkg{boot} package contains another version of this salinity data set, also attributed to Ruppert and Carroll (1980), but with two clear transcription errors, see the examples. } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.82, table 5. Ruppert, D. and Carroll, R.J. (1980) Trimmed least squares estimation in the linear model. \emph{JASA} \bold{75}, 828--838; table 3, p.835. Carroll, R.J. and Ruppert, D. (1985) Transformations in regression: A robust analysis. \emph{Technometrics} \bold{27}, 1--12 } \examples{ data(salinity) summary(lm.sali <- lm(Y ~ . , data = salinity)) summary(rlm.sali <- MASS::rlm(Y ~ . , data = salinity)) summary(lts.sali <- ltsReg(Y ~ . , data = salinity)) salinity.x <- data.matrix(salinity[, 1:3]) c_sal <- covMcd(salinity.x) plot(c_sal, "tolEllipsePlot") ## Connection with boot package's version : if(requireNamespace("boot")) { ## 'always' print( head(boot.sal <- boot::salinity ) ) print( head(robb.sal <- salinity [, c(4, 1:3)]) ) # difference: has one digit more ## Otherwise the same ? dimnames(robb.sal) <- dimnames(boot.sal) ## apart from the 4th column, they are "identical": stopifnot( all.equal(boot.sal[, -4], robb.sal[, -4], tol = 1e-15) ) ## But the discharge ('X3', 'dis' or 'H2OFLOW') __differs__ in two places: plot(cbind(robustbase = robb.sal[,4], boot = boot.sal[,4])) abline(0,1, lwd=3, col=adjustcolor("red", 1/4)) D.sal <- robb.sal[,4] - boot.sal[,4] stem(robb.sal[,4] - boot.sal[,4]) which(abs(D.sal) > 0.01) ## 2 8 ## *two* typos (=> difference ~= 1) in the version of 'boot': obs. 2 & 8 !!! cbind(robb = robb.sal[,4], boot = boot.sal[,4], D.sal) }# boot } \keyword{datasets} robustbase/man/adjboxStats.Rd0000644000176200001440000000637313721410436015765 0ustar liggesusers\name{adjboxStats} \alias{adjboxStats} \title{Statistics for Skewness-adjusted Boxplots} \description{ Computes the \dQuote{statistics} for producing boxplots adjusted for skewed distributions as proposed in Hubert and Vandervieren (2008), see \code{\link{adjbox}}. } \usage{ adjboxStats(x, coef = 1.5, a = -4, b = 3, do.conf = TRUE, do.out = TRUE, \dots) } \arguments{ \item{x}{a numeric vector for which adjusted boxplot statistics are computed.} \item{coef}{number determining how far \sQuote{whiskers} extend out from the box, see \code{\link{boxplot.stats}}.} \item{a, b}{scaling factors multiplied by the medcouple \code{\link{mc}()} to determine outlyer boundaries; see the references.} \item{do.conf,do.out}{logicals; if \code{FALSE}, the \code{conf} or \code{out} component respectively will be empty in the result.} \item{\dots}{further optional arguments to be passed to \code{\link{mc}()}, such as \code{doReflect}.} } \details{ Given the quartiles \eqn{Q_1}{Q1}, \eqn{Q_3}{Q3}, the interquartile range \eqn{\Delta Q := Q_3 - Q_1}{IQR := Q3-Q1}, and the medcouple \eqn{M :=}\code{mc(x)}, \eqn{c =}\code{coef}, the \dQuote{fence} is defined, for \eqn{M \ge 0} as \deqn{[Q_1 - c e^{a \cdot M}\Delta Q, Q_3 + c e^{b \cdot M}\Delta Q],% }{[Q1 - c*exp(a * M)*IQR, Q3 + c*exp(b * M)*IQR],} and for \eqn{M < 0} as \deqn{[Q_1 - c e^{-b \cdot M}\Delta Q, Q_3 + c e^{-a \cdot M}\Delta Q],% }{[Q1 - c*exp(-b * M)*IQR, Q3 + c*exp(-a * M)*IQR],} and all observations \code{x} outside the fence, the \dQuote{potential outliers}, are returned in \code{out}. Note that a typo in robustbase version up to 0.7-8, for the (rare left-skewed) case where \link{mc}(x) < 0, lead to a \dQuote{fence} not wide enough in the upper part, and hence \emph{less} outliers there. } \value{ A \code{\link{list}} with the components \item{stats}{a vector of length 5, containing the extreme of the lower whisker, the lower hinge, the median, the upper hinge and the extreme of the upper whisker.} \item{n}{the number of observations} \item{conf}{the lower and upper extremes of the \sQuote{notch} (\code{if(do.conf)}). See \code{\link{boxplot.stats}}.} \item{fence}{length 2 vector of interval boundaries which define the non-outliers, and hence the whiskers of the plot.} \item{out}{the values of any data points which lie beyond the fence, and hence beyond the extremes of the whiskers.} } \author{R Core Development Team (\code{\link{boxplot.stats}}); adapted by Tobias Verbeke and Martin Maechler.} \note{The code only slightly modifies the code of \R's \code{\link{boxplot.stats}}. } \seealso{\code{\link{adjbox}()}, also for references, the function which mainly uses this one; further \code{\link{boxplot.stats}}. } \examples{ data(condroz) adjboxStats(ccA <- condroz[,"Ca"]) adjboxStats(ccA, doReflect = TRUE)# small difference in fence ## Test reflection invariance [was not ok, up to and including robustbase_0.7-8] a1 <- adjboxStats( ccA, doReflect = TRUE) a2 <- adjboxStats(-ccA, doReflect = TRUE) nm1 <- c("stats", "conf", "fence") stopifnot(all.equal( a1[nm1], lapply(a2[nm1], function(u) rev(-u))), all.equal(a1[["out"]], -a2[["out"]])) } \keyword{robust} \keyword{univar} robustbase/man/coleman.Rd0000644000176200001440000000263113312375575015120 0ustar liggesusers\name{coleman} \alias{coleman} \docType{data} \title{Coleman Data Set} \description{ Contains information on 20 Schools from the Mid-Atlantic and New England States, drawn from a population studied by Coleman et al. (1966). Mosteller and Tukey (1977) analyze this sample consisting of measurements on six different variables, one of which will be treated as a responce. } \usage{data(coleman, package="robustbase")} \format{ A data frame with 20 observations on the following 6 variables. \describe{ \item{\code{salaryP}}{staff salaries per pupil} \item{\code{fatherWc}}{percent of white-collar fathers} \item{\code{sstatus}}{socioeconomic status composite deviation: means for family size, family intactness, father's education, mother's education, and home items} \item{\code{teacherSc}}{mean teacher's verbal test score} \item{\code{motherLev}}{mean mother's educational level, one unit is equal to two school years} \item{\code{Y}}{verbal mean test score (y, all sixth graders)} } } \author{Valentin Todorov} \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection} Wiley, p.79, table 2. } \examples{ data(coleman) pairs(coleman) summary( lm.coleman <- lm(Y ~ . , data = coleman)) summary(lts.coleman <- ltsReg(Y ~ . , data = coleman)) coleman.x <- data.matrix(coleman[, 1:6]) (Cc <- covMcd(coleman.x)) } \keyword{datasets} robustbase/man/colMedians.Rd0000644000176200001440000000603413710054521015544 0ustar liggesusers\name{colMedians} \title{Fast Row or Column-wise Medians of a Matrix} \alias{colMedians} \alias{rowMedians} \description{ Calculates the median for each row (column) of a matrix \code{x}. This is the same as but more efficient than \code{apply(x, MM, median)} for MM=2 or MM=1, respectively. } \usage{ colMedians(x, na.rm = FALSE, hasNA = TRUE, keep.names=TRUE) rowMedians(x, na.rm = FALSE, hasNA = TRUE, keep.names=TRUE) } \arguments{ \item{x}{a \code{\link{numeric}} \eqn{n \times p}{n x p} \code{\link{matrix}}.} \item{na.rm}{if \code{\link{TRUE}}, \code{\link{NA}}s are excluded first, otherwise not.} \item{hasNA}{logical indicating if \code{x} may contain \code{\link{NA}}s. If set to \code{FALSE}, no internal NA handling is performed which typically is faster.} \item{keep.names}{logical indicating if row or column names of \code{x} should become \code{\link{names}} of the result - as is the case for \code{\link{apply}(x, MM, median)}.} } \value{ a \code{\link{numeric}} vector of length \eqn{n} or \eqn{p}, respectively. } \section{Missing values}{ Missing values are excluded before calculating the medians \emph{unless} \code{hasNA} is false. Note that \code{na.rm} has no effect and is automatically false when \code{hasNA} is false, i.e., internally, before computations start, the following is executed: \preformatted{if (!hasNA) ## If there are no NAs, don't try to remove them narm <- FALSE} } \details{ The implementation of \code{rowMedians()} and \code{colMedians()} is optimized for both speed and memory. To avoid coercing to \code{\link{double}}s (and hence memory allocation), there is a special implementation for \code{\link{integer}} matrices. That is, if \code{x} is an \code{\link{integer}} \code{\link{matrix}}, then \code{rowMedians(as.double(x))} (\code{rowMedians(as.double(x))}) would require three times the memory of \code{rowMedians(x)} (\code{colMedians(x)}), but all this is avoided. } \author{Henrik Bengtsson, Harris Jaffee, Martin Maechler} \seealso{ See \code{\link{wgt.himedian}()} for a weighted hi-median, and \code{\link[matrixStats]{colWeightedMedians}()} etc from package \CRANpkg{matrixStats} for \emph{weighted} medians.\cr For mean estimates, see \code{rowMeans()} in \code{\link{colSums}}(). } \examples{ set.seed(1); n <- 234; p <- 543 # n*p = 127'062 x <- matrix(rnorm(n*p), n, p) x[sample(seq_along(x), size= n*p / 256)] <- NA R1 <- system.time(r1 <- rowMedians(x, na.rm=TRUE)) C1 <- system.time(y1 <- colMedians(x, na.rm=TRUE)) R2 <- system.time(r2 <- apply(x, 1, median, na.rm=TRUE)) C2 <- system.time(y2 <- apply(x, 2, median, na.rm=TRUE)) R2 / R1 # speedup factor: ~= 4 {platform dependent} C2 / C1 # speedup factor: ~= 5.8 {platform dependent} stopifnot(all.equal(y1, y2, tol=1e-15), all.equal(r1, r2, tol=1e-15)) (m <- cbind(x1=3, x2=c(4:1, 3:4,4))) stopifnot(colMedians(m) == 3, all.equal(colMeans(m), colMedians(m)),# <- including names ! all.equal(rowMeans(m), rowMedians(m))) } \keyword{array} \keyword{robust} \keyword{univar} robustbase/man/summary.mcd.Rd0000644000176200001440000000345410441335140015725 0ustar liggesusers\name{summary.mcd} \alias{summary.mcd} \alias{print.summary.mcd} \title{Summary Method for MCD objects} \usage{ \method{summary}{mcd}(object, \dots) \method{print}{summary.mcd}(x, digits = max(3, getOption("digits") - 3), print.gap = 2, \dots) } \arguments{ \item{object,x}{an object of class \code{"mcd"} (or \code{"summary.mcd"}); usually, a result of a call to \code{\link{covMcd}}.} \item{digits}{the number of significant digits to use when printing.} \item{print.gap}{number of horizontal spaces between numbers; see also \code{\link{print.default}}.} \item{\dots}{further arguments passed to or from other methods.} } \description{ \code{\link{summary}} method for class \code{"mcd"}. } \details{ \code{summary.mcd()}, the S3 method, simply returns an (S3) object of \code{\link{class} "summary.mcd"} for which there's a \code{\link{print}} method: \code{print.summary.mcd} prints summary statistics for the weighted covariance matrix and location estimates with weights based on MCD estimates. While the function \code{\link{print.mcd}} prints only the robust estimates of the location and the covariance matrix, \code{print.summary.mcd} will print also the correlation matrix (if requested in the call to \code{covMcd} with \code{cor=TRUE}), the eigenvalues of the covariance or the correlation matrix and the robust (\dQuote{Mahalanobis}) distances. } \value{ \code{summary.mcd} returns an \code{summary.mcd} object, whereas the \code{print} methods returns its first argument via \code{\link{invisible}}, as all \code{print} methods do. } \seealso{ \code{\link{covMcd}}, \code{\link{summary}} } \examples{ data(Animals, package = "MASS") brain <- Animals[c(1:24, 26:25, 27:28),] lbrain <- log(brain) summary(cLB <- covMcd(lbrain)) } \keyword{multivariate} \keyword{robust} robustbase/man/huberize.Rd0000644000176200001440000000771214221264626015316 0ustar liggesusers\name{huberize} \alias{huberize} \title{Huberization -- Bringing Outliers In} \description{ Huberization (named after Peter Huber's M-estimation algorithm for location originally) replaces outlying values in a sample \code{x} by their respective boundary: when \eqn{x_j < c_1} it is replaced by \eqn{c_1} and when \eqn{x_j > c_2} it is replaced by \eqn{c_2}. Consequently, values inside the interval \eqn{[c_1, c_2]} remain unchanged. Here, \eqn{c_j = M \pm c\cdot s}{c1,c2 = M +/- c*s} where \eqn{s := s(x)} is the \emph{robust} scale estimate \code{\link{Qn}(x)} if that is positive, and by default, \eqn{M} is the robust huber estimate of location \eqn{\mu} (with tuning constant \eqn{k}). In the degenerate case where \code{\link{Qn}(x) == 0}, trimmed means of \code{abs(x - M)} are tried as scale estimate \eqn{s}, with decreasing trimming proportions specified by the decreasing \code{trim} vector. } \usage{ huberize(x, M = huberM(x, k = k)$mu, c = k, trim = (5:1)/16, k = 1.5, warn0 = getOption("verbose"), saveTrim = TRUE) } \arguments{ \item{x}{numeric vector which is to be huberized.} \item{M}{a number; defaulting to \code{\link{huberM}(x, k)}, the robust Huber M-estimator of location.} \item{c}{a positive number, the tuning constant for huberization of the sample \code{x}.} \item{trim}{a \emph{decreasing} vector of trimming proportions in \eqn{[0, 0.5]}, only used to trim the absolute deviations from \code{M} in case \code{\link{Qn}(x)} is zero. } \item{k}{used if \code{M} is not specified as huberization center \code{M}, and so, by default is taken as Huber's M-estimate \code{\link{huberM}(x, k)}.} \item{warn0}{\code{\link{logical}} indicating if a warning should be signalled in case \code{\link{Qn}(x)} is zero and the trimmed means for all trimming proportions \code{trim} are zero as well.} \item{saveTrim}{a \code{\link{logical}} indicating if the last tried \code{trim[j]} value should be stored if \code{\link{Qn}(x)} was zero.} } \details{ \itemize{ \item In regular cases, \code{s = \link{Qn}(x)} is positive and used to huberize values of \code{x} outside \code{[M - c*s, M + c*s]}. \item In degenerate cases where \code{\link{Qn}(x) == 0}, we search for an \eqn{s > 0} by trying the trimmed mean \code{s := mean(abs(x-M), trim = trim[j])} with less and less trimming (as the trimming proportions \code{trim[]} must decrease). If even the last, \code{trim[length(trim)]}, leads to \eqn{s = 0}, a warning is printed when \code{warn0} is true. } } \value{ a numeric vector as \code{x}; in case \code{\link{Qn}(x)} was zero and \code{saveTrim} is true, also containing the (last) \code{trim} proportion used (to compute the scale \eqn{s}) as attribute \code{"trim"} (see \code{\link{attr}()}, \code{\link{attributes}}). } \author{Martin Maechler} \note{ For the use in \code{\link{mc}()} and similar cases where mainly numerical stabilization is necessary, a large \code{c = 1e12} will lead to \emph{no} huberization, i.e., all \code{y == x} for \code{y <- huberize(x, c)} for typical non-degenerate samples. } \seealso{ \code{\link{huberM}} and \code{\link{mc}} which is now stabilized by default via something like \code{huberize(*, c=1e11)}. } \examples{ ## For non-degenerate data and large c, nothing is huberized, ## as there are *no* really extreme outliers : set.seed(101) x <- rnorm(1000) stopifnot(all.equal(x, huberize(x, c=100))) ## OTOH, the "extremes" are shrunken towards the boundaries for smaller c: xh <- huberize(x, c = 2) table(x != xh) ## 45 out of a 1000: table(xh[x != xh])# 26 on the left boundary -2.098 and 19 on the right = 2.081 ## vizualization: stripchart(x); text(0,1, "x {original}", pos=3); yh <- 0.9 stripchart(xh, at = yh, add=TRUE, col=2) text(0, yh, "huberize(x, c=2)", col=2, pos=1) arrows( x[x!=xh], 1, xh[x!=xh], yh, length=1/8, col=adjustcolor("pink", 1/2)) } \keyword{robust} \keyword{univar} robustbase/man/lmrob.fit.Rd0000644000176200001440000000511214430460510015355 0ustar liggesusers\name{lmrob.fit} \alias{lmrob.fit} \alias{lmrob.fit.MM} \title{ MM-type estimator for regression } \description{ Compute MM-type estimators of regression: An S-estimator is used as starting value, and an M-estimator with fixed scale and redescending psi-function is used from there. Optionally a D-step (Design Adaptive Scale estimate) as well as a second M-step is calculated. } \usage{ lmrob.fit(x, y, control, init = NULL, mf = NULL) } \arguments{ \item{x}{design matrix (\eqn{n \times p}{n x p}) typically including a column of \code{1}s for the intercept.} \item{y}{numeric response vector (of length \eqn{n}).} \item{control}{a list of control parameters as returned by \code{\link{lmrob.control}}, used for both the initial S-estimate and the subsequent M- and D-estimates.} \item{init}{optional \code{\link{list}} of initial estimates. See \emph{Details}.} \item{mf}{defunct.} } \details{This function is the basic fitting function for MM-type estimation, called by \code{\link{lmrob}} and typically not to be used on its own. If given, \code{init} must be a list of initial estimates containing at least the initial coefficients and scale as \code{coefficients} and \code{scale}. Otherwise it calls \code{\link{lmrob.S}(..)} and uses it as initial estimator. } \value{ A list with components \item{fitted.values}{\eqn{X \beta}{X beta}, i.e., \code{X \%*\% coefficients}.} \item{residuals}{the raw residuals, \code{y - fitted.values}} \item{rweights}{robustness weights derived from the final M-estimator residuals (even when not converged).} \item{rank}{} \item{degree.freedom}{\code{n - rank}}% more! \item{coefficients}{estimated regression coefficient vector} \item{scale}{the robustly estimated error standard deviation}% = final.MM$scale, \item{cov}{variance-covariance matrix of \code{coefficients}, if the RWLS iterations have converged (and \code{control$cov} is not \code{"none"}).} \item{control}{}% = control, \item{iter}{}% = final.MM$iter, <<<<<< also 'init.S' ! \item{converged}{logical indicating if the RWLS iterations have converged.} \item{init.S}{the whole initial S-estimator result, including its own \code{converged} flag, see \code{\link{lmrob.S}} (only for MM-estimates).} \item{init}{A similar list that contains the results of intermediate estimates (not for MM-estimates).} } \author{ Matias Salibian-Barrera, Martin Maechler and Manuel Koller} \seealso{ \code{\link{lmrob}}, \code{\link{lmrob..M..fit}}, \code{\link{lmrob..D..fit}}, \code{\link{lmrob.S}} } \keyword{robust} \keyword{regression} robustbase/man/CrohnD.Rd0000644000176200001440000000310613312375575014655 0ustar liggesusers\name{CrohnD} \encoding{utf8} \alias{CrohnD} \docType{data} \title{Crohn's Disease Adverse Events Data} \description{ Data set issued from a study of the adverse events of a drug on 117 patients affected by Crohn's disease (a chronic inflammatory disease of the intestines). } \usage{data(CrohnD, package="robustbase")} \format{ A data frame with 117 observations on the following 9 variables. \describe{ %% FIXME: leave these away -- or explain: -- they code patient sub-groups \item{\code{ID}}{the numeric patient IDs} \item{\code{nrAdvE}}{the number of adverse events} \item{\code{BMI}}{Body MASS Index, i.e., \eqn{weight[kg] / (height[m])^2}.} \item{\code{height}}{in cm} \item{\code{country}}{a factor with levels \code{0} and \code{1}} \item{\code{sex}}{the person's gender, a binary factor with levels \code{M} \code{F}} \item{\code{age}}{in years, a numeric vector} \item{\code{weight}}{in kilograms, a numeric vector} \item{\code{treat}}{how CD was treated: a factor with levels \code{0}, \code{1} and \code{2}, meaning placebo, drug 1 and drug 2.} } } % \details{ % ~~ If necessary, more details than the __description__ above ~~ % } \source{ form the authors of the reference, with permission by the original data collecting agency. } \references{ Serigne N. Lô and Elvezio Ronchetti (2006). Robust Second Order Accurate Inference for Generalized Linear Models. Technical report, University of Geneva, Switzerland. } \examples{ data(CrohnD) str(CrohnD) with(CrohnD, ftable(table(sex,country, treat))) } \keyword{datasets} robustbase/man/smoothWgt.Rd0000644000176200001440000000465412441665126015500 0ustar liggesusers\name{smoothWgt} \alias{smoothWgt} \title{Smooth Weighting Function - Generalized Biweight}% The Biweight on a Stick \description{ \dQuote{The Biweight on a Stick} --- Compute a smooth (when \eqn{h > 0}) weight function typically for computing weights from large (robust) \dQuote{distances} using a piecewise polynomial function which in fact is a 2-parameter generalization of Tukey's 1-parameter \dQuote{biweight}. } \usage{ smoothWgt(x, c, h) } \arguments{ \item{x}{numeric vector of abscissa values} \item{c}{\dQuote{cutoff}, a typically positive number.} \item{h}{\dQuote{bandwidth}, a positive number.} } \details{ Let \eqn{w(x;c,h) := }\code{smoothWgt(x, c, h)}. Then, \deqn{% "FIXME": rather use amsmath package \cases{.} w(x; c,h) := 0 \ \ \ \ \ \mathrm{if}\ |x| \ge c + h/2,}{% w(x; c,h) := 0 if |x| >= c + h/2,} \deqn{ w(x; c,h) := 1 \ \ \ \ \ \mathrm{if}\ |x| \le c - h/2,}{% w(x; c,h) := 1 if |x| <= c - h/2,} \deqn{ w(x; c,h) := \bigl((1 - |x| - (c-h/2))^2\bigr)^2 \ \mathrm{if}\ c-h/2 < |x| < c+h/2,}{% w(x; c,h) := (1 - (|x| - (c-h/2))^2)^2 if c-h/2 < |x| < c+h/2.} \code{smoothWgt()} is \emph{scale invariant} in the sense that \deqn{w(\sigma x; \sigma c, \sigma h) = w(x; c, h),}{% w(S x; S c, S h) = w(x; c, h),} when \eqn{\sigma > 0}{S > 0}. } \value{ a numeric vector of the same length as \code{x} with weights between zero and one. Currently all \code{\link{attributes}} including \code{\link{dim}} and \code{\link{names}} are dropped. } %% \references{ TODO: Write a small vignette ! %% } \author{Martin Maechler} \seealso{ \code{\link{Mwgt}(.., psi = "bisquare")} of which \code{smoothWgt()} is a generalization, and \code{\link{Mwgt}(.., psi = "optimal")} which looks similar for larger \code{c} with its constant one part around zero, but also has only one parameter. } \examples{ ## a somewhat typical picture: curve(smoothWgt(x, c=3, h=1), -5,7, n = 1000) csW <- curve(smoothWgt(x, c=1/2, h=1), -2,2) # cutoff 1/2, bandwidth 1 ## Show that the above is the same as ## Tukey's "biweight" or "bi-square" weight function: bw <- function(x) pmax(0, (1 - x^2))^2 cbw <- curve(bw, col=adjustcolor(2, 1/2), lwd=2, add=TRUE) cMw <- curve(Mwgt(x,c=1,"biweight"), col=adjustcolor(3, 1/2), lwd=2, add=TRUE) stopifnot(## proving they are all the same: all.equal(csW, cbw, tol=1e-15), all.equal(csW, cMw, tol=1e-15)) } \keyword{arith} \keyword{robust} robustbase/man/outlierStats.Rd0000644000176200001440000001124714511464163016201 0ustar liggesusers\name{outlierStats} \alias{outlierStats} \title{Robust Regression Outlier Statistics} \description{ Simple statistics about observations with robustness weight of almost zero for models that include factor terms. The number of rejected observations and the mean robustness weights are computed for each level of each factor included in the model. } \usage{ outlierStats(object, x = object$x, control = object$control , epsw = control$eps.outlier , epsx = control$eps.x , warn.limit.reject = control$warn.limit.reject , warn.limit.meanrw = control$warn.limit.meanrw , shout = NA) } \arguments{ \item{object}{object of class \code{"lmrob"}, typically the result of a call to \code{\link{lmrob}}.} \item{x}{design matrix} \item{control}{list as returned by \code{\link{lmrob.control}()}.} \item{epsw}{limit on the robustness weight below which an observation is considered to be an outlier. Either a \code{numeric(1)} or a \code{\link{function}} that takes the number of observations as an argument.} \item{epsx}{limit on the absolute value of the elements of the design matrix below which an element is considered zero. Either a numeric(1) or a function that takes the maximum absolute value in the design matrix as an argument.} \item{warn.limit.reject}{limit of ratio \eqn{\#\mbox{rejected} / \#\mbox{obs in level}}{# rejected / # obs in level} above (\eqn{\geq}{>=}) which a warning is produced. Set to \code{NULL} to disable warning.} \item{warn.limit.meanrw}{limit of the mean robustness per factor level below which (\eqn{\leq}{<=}) a warning is produced. Set to \code{NULL} to disable warning.} \item{shout}{a \code{\link{logical}} (scalar) indicating if large \code{"Ratio"} or small \code{"Mean.RobWeight"} should lead to corresponding \code{\link{warning}()}s; cutoffs are determined by \code{warn.limit.reject} and \code{warn.limit.meanrw}, above. By default, \code{NA}; setting it to \code{FALSE} or \code{TRUE} disables or unconditionally enables \dQuote{shouting}.} } \details{ For models that include factors, the fast S-algorithm used by \code{\link{lmrob}} can produce \dQuote{bad} fits for some of the factor levels, especially if there are many levels with only a few observations. Such a \dQuote{bad} fit is characterized as a fit where most of the observations in a level of a factor are rejected, i.e., are assigned robustness weights of zero or nearly zero. We call such a fit a \dQuote{local exact fit}. If a local exact fit is detected, then we recommend to increase some of the control parameters of the \dQuote{fast S}-algorithm. As a first aid solution in such cases, one can use \code{setting="KS2014"}, see also \code{\link{lmrob.control}}. This function is called internally by \code{\link{lmrob}} to issue a warning if a local exact fit is detected. The output is available as \code{ostats} in objects of class \code{"lmrob"} (only if the statistic is computed). } \value{ A data frame for each column with any zero elements as well as an overall statistic. The data frame consist of the names of the coefficients in question, the number of non-zero observations in that level (\code{N.nonzero}), the number of rejected observations (\code{N.rejected}), the ratio of rejected observations to the number of observations in that level (\code{Ratio}) and the mean robustness weight of all the observations in the corresponding level (\code{Mean.RobWeight}). } \references{ Koller, M. and Stahel, W.A. (2017) Nonsingular subsampling for regression S estimators with categorical predictors, \emph{Computational Statistics} \bold{32}(2): 631--646. \doi{10.1007/s00180-016-0679-x} } \author{Manuel Koller}% 'share', tweaks: Martin Maechler \seealso{ \code{\link{lmrob.control}} for the default values of the control parameters; \code{\link{summarizeRobWeights}}. } \examples{ ## artificial data example data <- expand.grid(grp1 = letters[1:5], grp2 = letters[1:5], rep=1:3) set.seed(101) data$y <- c(rt(nrow(data), 1)) ## compute outlier statistics for all the estimators control <- lmrob.control(method = "SMDM", compute.outlier.stats = c("S", "MM", "SMD", "SMDM")) ## warning is only issued for some seeds set.seed(2) fit1 <- lmrob(y ~ grp1*grp2, data, control = control) ## do as suggested: fit2 <- lmrob(y ~ grp1*grp2, data, setting = "KS2014") ## the plot function should work for such models as well plot(fit1) \dontrun{ ## access statistics: fit1$ostats ## SMDM fit1$init$ostats ## SMD fit1$init$init$ostats ## SM fit1$init$init$init.S$ostats ## S }%dont } \keyword{robust} \keyword{regression} robustbase/man/huberM.Rd0000644000176200001440000000624513710054521014714 0ustar liggesusers\name{huberM} \alias{huberM} \title{Safe (generalized) Huber M-Estimator of Location} \concept{robust location} \description{ (Generalized) Huber M-estimator of location with MAD scale, being sensible also when the scale is zero where \code{\link[MASS]{huber}()} returns an error. } \usage{ huberM(x, k = 1.5, weights = NULL, tol = 1e-06, mu = if(is.null(weights)) median(x) else wgt.himedian(x, weights), s = if(is.null(weights)) mad(x, center=mu) else wgt.himedian(abs(x - mu), weights), se = FALSE, warn0scale = getOption("verbose")) } \arguments{ \item{x}{numeric vector.} \item{k}{positive factor; the algorithm winsorizes at \code{k} standard deviations.} \item{weights}{numeric vector of non-negative weights of same length as \code{x}, or \code{NULL}.} \item{tol}{convergence tolerance.} \item{mu}{initial location estimator.} \item{s}{scale estimator held constant through the iterations.} \item{se}{logical indicating if the standard error should be computed and returned (as \code{SE} component). Currently only available when \code{weights} is \code{NULL}.} \item{warn0scale}{logical; if true, and \code{s} is 0 and \code{length(x) > 1}, this will be warned about.} } \value{ list of location and scale parameters, and number of iterations used. \item{mu}{location estimate} \item{s}{the \code{s} argument, typically the \code{\link{mad}}.} \item{it}{the number of \dQuote{Huber iterations} used.} } \details{ Note that currently, when non-\code{NULL} \code{weights} are specified, the default for initial location \code{mu} and scale \code{s} is \code{\link{wgt.himedian}}, where strictly speaking a weighted \dQuote{non-hi} median should be used for consistency. Since \code{s} is not updated, the results slightly differ, see the examples below. When \code{se = TRUE}, the standard error is computed using the \eqn{\tau} correction factor but no finite sample correction. % and as if \code{s} was not estimated from the data. } \author{Martin Maechler, building on the MASS code mentioned.} \references{ Huber, P. J. (1981) \emph{Robust Statistics.} Wiley. } \seealso{ \code{\link[MASS]{hubers}} (and \code{huber}) in package \CRANpkg{MASS}; \code{\link{mad}}. } \examples{ huberM(c(1:9, 1000)) mad (c(1:9, 1000)) mad (rep(9, 100)) huberM(rep(9, 100)) ## When you have "binned" aka replicated observations: set.seed(7) x <- c(round(rnorm(1000),1), round(rnorm(50, m=10, sd = 10))) t.x <- table(x) # -> unique values and multiplicities x.uniq <- as.numeric(names(t.x)) ## == sort(unique(x)) x.mult <- unname(t.x) str(Hx <- huberM(x.uniq, weights = x.mult), digits = 7) str(Hx. <- huberM(x, s = Hx$s, se=TRUE), digits = 7) ## should be ~= Hx stopifnot(all.equal(Hx[-4], Hx.[-4])) str(Hx2 <- huberM(x, se=TRUE), digits = 7)## somewhat different, since 's' differs ## Confirm correctness of std.error : \donttest{ system.time( SS <- replicate(10000, vapply(huberM(rnorm(400), se=TRUE), as.double, 1.)) ) # ~ 12.2 seconds rbind(mean(SS["SE",]), sd(SS["mu",]))# both ~ 0.0508 stopifnot(all.equal(mean(SS["SE",]), sd ( SS["mu",]), tolerance= 0.002)) } } \keyword{univar} \keyword{robust} robustbase/man/r6pack.Rd0000644000176200001440000000476414511464163014673 0ustar liggesusers\name{r6pack} \alias{r6pack} \title{Robust Distance based observation orderings based on robust "Six pack"} \description{ Compute six initial robust estimators of multivariate location and \dQuote{scatter} (scale); then, for each, compute the distances \eqn{d_{ij}}{d_ij} and take the \code{h} (\eqn{h > n/2}) observations with smallest distances. Then compute the statistical distances based on these h observations. Return the indices of the observations sorted in increasing order. } \usage{ r6pack(x, h, full.h, scaled = TRUE, scalefn = rrcov.control()$scalefn) } \arguments{ \item{x}{n x p data matrix} \item{h}{integer, typically around (and slightly larger than) \eqn{n/2}.} \item{full.h}{logical specifying if the full (length n) observation ordering should be returned; otherwise only the first \code{h} are. For \code{.detmcd()}, \code{full.h=FALSE} is typical.} \item{scaled}{logical indicating if the data \code{x} is already scaled; if false, we apply \code{x <- doScale(x, median, scalefn)}.} \item{scalefn}{a \code{\link{function}(u)} to compute a robust univariate scale of u.} }% args \details{%% --> ../R/detmcd.R The six initial estimators are \enumerate{ \item Hyperbolic tangent of standardized data \item Spearmann correlation matrix \item Tukey normal scores \item Spatial sign covariance matrix \item BACON \item Raw OGK estimate for scatter } } \references{ Hubert, M., Rousseeuw, P. J. and Verdonck, T. (2012) A deterministic algorithm for robust location and scatter. Journal of Computational and Graphical Statistics \bold{21}, 618--637. } \value{ a \eqn{h' \times 6}{h' x 6} \code{\link{matrix}} of observation indices, i.e., with values from \eqn{1,\dots,n}{1..n}. If \code{full.h} is true, \eqn{h' = n}, otherwise \eqn{h' = h}. } \author{Valentin Todorov, based on the original Matlab code by Tim Verdonck and Mia Hubert. Martin Maechler for tweaks (performance etc), and \code{full.h}. } \seealso{ \code{\link{covMcd}(*, nsamp = "deterministic")}; \code{\link[rrcov]{CovSest}(*, nsamp = "sdet")} from package \CRANpkg{rrcov}. } \examples{ data(pulpfiber) dim(m.pulp <- data.matrix(pulpfiber)) # 62 x 8 dim(fr6 <- r6pack(m.pulp, h = 40, full.h= FALSE)) # h x 6 = 40 x 6 dim(fr6F <- r6pack(m.pulp, h = 40, full.h= TRUE )) # n x 6 = 62 x 6 stopifnot(identical(fr6, fr6F[1:40,])) \dontshow{ stopifnot(apply(fr6[1:10,], 2L, function(col) c(1,3,6,35,36,38) \%in\% col)) } } \keyword{robust} \keyword{multivariate} robustbase/man/heart.Rd0000644000176200001440000000317713710054521014576 0ustar liggesusers\name{heart} \alias{heart} \docType{data} \title{Heart Catherization Data} \description{ This data set was analyzed by Weisberg (1980) and Chambers et al. (1983). A catheter is passed into a major vein or artery at the femoral region and moved into the heart. The proper length of the introduced catheter has to be guessed by the physician. The aim of the data set is to describe the relation between the catheter length and the patient's height (X1) and weight (X2). This data sets is used to demonstrate the effects caused by collinearity. The correlation between height and weight is so high that either variable almost completely determines the other. } \usage{ data(heart) %> QA bug: would want: %> data(heart, package="robustbase") %> but that gives two warnings } \format{ A data frame with 12 observations on the following 3 variables. \describe{ \item{\code{height}}{Patient's height in inches} \item{\code{weight}}{Patient's weights in pounds} \item{\code{clength}}{Y: Catheter Length (in centimeters)} } } \note{There are other \code{heart} datasets in other \R packages, notably \CRANpkg{survival}, hence considering using \code{package = "robustbase"}, see examples. } \source{ Weisberg (1980) Chambers et al. (1983) P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.103, table 13. } \examples{ data(heart, package="robustbase") heart.x <- data.matrix(heart[, 1:2]) # the X-variables plot(heart.x) covMcd(heart.x) summary( lm.heart <- lm(clength ~ . , data = heart)) summary(lts.heart <- ltsReg(clength ~ . , data = heart)) } \keyword{datasets} robustbase/man/rrcov.control.Rd0000644000176200001440000000767212440116711016310 0ustar liggesusers\name{rrcov.control} \alias{rrcov.control} \title{Control Settings for covMcd and ltsReg} \description{ Auxiliary function for passing the estimation options as parameters to the estimation functions. } \usage{ rrcov.control(alpha = 1/2, method = c("covMcd", "covComed", "ltsReg"), nsamp = 500, nmini = 300, kmini = 5, seed = NULL, tolSolve = 1e-14, scalefn = "hrv2012", maxcsteps = 200, trace = FALSE, wgtFUN = "01.original", beta, use.correction = identical(wgtFUN, "01.original"), adjust = FALSE) } \arguments{ \item{alpha}{This parameter controls the size of the subsets over which the determinant is minimized, i.e., \code{alpha*n} observations are used for computing the determinant. Allowed values are between 0.5 and 1 and the default is 0.5. } \item{method}{a string specifying the \dQuote{main} function for which \code{rrcov.control()} is used. This currently only makes a difference to determine the default for \code{beta}.} \item{nsamp}{number of subsets used for initial estimates or \code{"best"} or \code{"exact"}. Default is \code{nsamp = 500}. If \code{nsamp="best"} exhaustive enumeration is done, as far as the number of trials do not exceed 5000. If \code{nsamp="exact"} exhaustive enumeration will be attempted however many samples are needed. In this case a warning message will be displayed saying that the computation can take a very long time. } \item{nmini, kmini}{for \code{\link{covMcd}}: For large \eqn{n}, the algorithm splits the data into maximally \eqn{kmini} subsets of targetted size \code{nmini}. See \code{\link{covMcd}} for more details.}%--- ./covMcd.Rd \item{seed}{initial seed for R's random number generator; see \code{\link{.Random.seed}} and the description of the \code{seed} argument in \code{\link{lmrob.control}}.} \item{tolSolve}{numeric tolerance to be used for inversion (\code{\link{solve}}) of the covariance matrix in \code{\link{mahalanobis}}.} \item{scalefn}{(for deterministic \code{\link{covMcd}()}:) a character string or \code{\link{function}} for computing a robust scale estimate. The current default \code{"hrv2012"} uses the recommendation of Hubert et al (2012); see \code{\link{covMcd}} for more.} \item{maxcsteps}{integer specifying the maximal number of concentration steps for the deterministic MCD.} \item{trace}{logical or integer indicating whether to print intermediate results. Default is \code{trace = FALSE}.} \item{wgtFUN}{a character string or \code{\link{function}}, specifying how the weights for the reweighting step should be computed, see \code{\link{ltsReg}}, \code{\link{covMcd}} or \code{\link{covComed}}, respectively. The default is specified by \code{"01.original"}, as the resulting weights are 0 or 1. Alternative string specifications need to match \code{names(.wgtFUN.covComed)} - which currently is experimental.} \item{beta}{a quantile, experimentally used for some of the prespecified \code{wgtFUN}s, see e.g., \code{\link{.wgtFUN.covMcd}} and \code{\link{.wgtFUN.covComed}}.} \item{use.correction}{whether to use finite sample correction factors. Defaults to \code{TRUE}.} \item{adjust}{(for \code{\link{ltsReg}()}:) whether to perform intercept adjustment at each step. Because this can be quite time consuming, the default is \code{adjust = FALSE}.} } \seealso{ For details, see the documentation about \code{\link{ltsReg}} and \code{\link{covMcd}}, respectively. } \value{ A list with components, as the parameters passed by the invocation } \author{Valentin Todorov} \examples{ data(Animals, package = "MASS") brain <- Animals[c(1:24, 26:25, 27:28),] data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) ctrl <- rrcov.control(alpha=0.75, trace=TRUE) covMcd(hbk.x, control = ctrl) covMcd(log(brain), control = ctrl) } \keyword{robust} \keyword{multivariate} robustbase/man/rankMM.Rd0000644000176200001440000000243313710054521014652 0ustar liggesusers\name{rankMM} \title{Simple Matrix Rank} \alias{rankMM} \description{ Compute the rank of a matrix \code{A} in simple way, based on the SVD, \code{\link{svd}()}, and \dQuote{the same as Matlab}. } \usage{ rankMM(A, tol = NULL, sv = svd(A, 0, 0)$d) } \arguments{ \item{A}{a numerical matrix, maybe non-square. When \code{sv} is specified, only \code{dim(A)} is made use of.} \item{tol}{numerical tolerance (compared to singular values). By default, when \code{NULL}, the tolerance is determined from the maximal value of \code{sv} and the computer epsilon.} \item{sv}{vector of \emph{non-increasing} singular values of \code{A}, (to be passed if already known).} } \seealso{ There are more sophisticated proposals for computing the rank of a matrix; for a couple of those, see \code{\link[Matrix]{rankMatrix}} in the \CRANpkg{Matrix} package. } \value{ an integer from the set \code{0:min(dim(A))}. } \author{ Martin Maechler, Date: 7 Apr 2007 } \examples{ rankMM # - note the simple function definition hilbert <- function(n) { i <- seq_len(n); 1/outer(i - 1L, i, "+") } hilbert(4) H12 <- hilbert(12) rankMM(H12) # 11 - numerically more realistic rankMM(H12, tol=0) # -> 12 ## explanation : round(log10(svd(H12, 0,0)$d), 1) } \keyword{algebra} \keyword{array} robustbase/man/NOxEmissions.Rd0000644000176200001440000000371313312375575016102 0ustar liggesusers\name{NOxEmissions} \alias{NOxEmissions} \docType{data} \encoding{utf8} \title{NOx Air Pollution Data} \description{ A typical medium sized environmental data set with hourly measurements of \eqn{NOx} pollution content in the ambient air. } \usage{data(NOxEmissions, package="robustbase")} \format{ A data frame with 8088 observations on the following 4 variables. \describe{ \item{\code{julday}}{day number, a factor with levels \code{373} \dots \code{730}, typically with 24 hourly measurements.} \item{\code{LNOx}}{\eqn{\log} of hourly mean of NOx concentration in ambient air [ppb] next to a highly frequented motorway.} \item{\code{LNOxEm}}{\eqn{\log} of hourly sum of NOx emission of cars on this motorway in arbitrary units.} \item{\code{sqrtWS}}{Square root of wind speed [m/s].} } } \details{ The original data set had more observations, but with missing values. Here, all cases with missing values were omitted (\code{\link{na.omit}(.)}), and then only those were retained that belonged to days with at least 20 (fully) observed hourly measurements. } \source{ René Locher (at ZHAW, Switzerland). %% E-mail to R-SIG-robust mailing list, on 2006-04-20. } \seealso{another NOx dataset, \code{\link{ambientNOxCH}}. } % \references{ % ~~ possibly secondary sources and usages ~~ % } \examples{ data(NOxEmissions) plot(LNOx ~ LNOxEm, data = NOxEmissions, cex = 0.25, col = "gray30") \dontrun{## these take too much time -- ## p = 340 ==> already Least Squares is not fast (lmNOx <- lm(LNOx ~ . ,data = NOxEmissions)) plot(lmNOx) #-> indication of 1 outlier M.NOx <- MASS::rlm(LNOx ~ . , data = NOxEmissions) ## M-estimation works ## whereas MM-estimation fails: try(MM.NOx <- MASS::rlm(LNOx ~ . , data = NOxEmissions, method = "MM")) ## namely because S-estimation fails: try(lts.NOx <- ltsReg(LNOx ~ . , data = NOxEmissions)) try(lmR.NOx <- lmrob (LNOx ~ . , data = NOxEmissions)) }% don't run } \keyword{datasets} robustbase/man/robustbase-internal.Rd0000644000176200001440000000341013710054464017452 0ustar liggesusers\name{robustbase-internals} \title{Internal Functions of Package 'robustbase'} %% Formality: everything in \usage{} "needs" \alias{} \alias{internals} % \alias{print.glmrob} \alias{glmrobMqle} \alias{glmrobMqleDiffQuasiDevB} \alias{glmrobMqleDiffQuasiDevPois} % \alias{robMD} \alias{mahalanobisD} % \alias{doScale} \alias{.signflip} % \description{ These functions are for internal use \emph{or} not yet documented. } \usage{ \method{print}{glmrob}(x, digits = max(3, getOption("digits") - 3), ...) %% MM: These are not *needed*, since they are namespace-hidden %% -- OTOH, it shows what we might want to export {and then document} glmrobMqle(X, y, weights = NULL, start = NULL, offset = NULL, family, weights.on.x = "none", control = glmrobMqle.control(), intercept = TRUE, trace = FALSE) glmrobMqleDiffQuasiDevB (mu, mu0, y, ni, w.x, phi, tcc) glmrobMqleDiffQuasiDevPois(mu, mu0, y, ni, w.x, phi, tcc) %%--------From here on, are exported (and MM would want minimal doc, evt.) ------ robMD(x, intercept, wqr, ...) mahalanobisD(x, center, sd) ## Utilities currently for the deterministic MCD only: ## subject to change / be renamed ? doScale(x, center, scale) .signflip(loadings) } % glmrobMqleEpsiB (mu, Vmu, ni, H, K, tcc) % glmrobMqleEpsiPois (mu, Vmu, ni, H, K, tcc) % glmrobMqleEpsiSB (mu, Vmu, ni, H, K, tcc) % glmrobMqleEpsiSPois (mu, Vmu, ni, H, K, tcc) % glmrobMqleEpsi2B (mu, Vmu, ni, H, K, tcc) % glmrobMqleEpsi2Pois (mu, Vmu, ni, H, K, tcc) %% For modsel.* aka anova.glmrob(); later for nlrob() only; %% eliminated, 2013-07-26 {for robustbase 0.9-11}, MM : %% psi.bisquare(u, c = 4.685, deriv = 0, rho = FALSE) %% psi.hampel(u, a = 2, b = 4, c = 8, deriv = 0, rho = FALSE) %% psi.huber(u, k = 1.345, deriv = 0, rho = FALSE) \keyword{internal} robustbase/man/lmrob.Rd0000644000176200001440000003111414430460510014575 0ustar liggesusers\title{MM-type Estimators for Linear Regression} \name{lmrob} \encoding{utf8} \alias{lmrob} % "Link to here", even those are not exported: \alias{.vcov.avar1} \alias{.vcov.w} \description{ Computes fast MM-type estimators for linear (regression) models. } \usage{ lmrob(formula, data, subset, weights, na.action, method = "MM", model = TRUE, x = !control$compute.rd, y = FALSE, singular.ok = TRUE, contrasts = NULL, offset = NULL, control = NULL, init = NULL, ...) } \arguments{ \item{formula}{a symbolic description of the model to be fit. See \code{\link{lm}} and \code{\link{formula}} for more details.} \item{data}{an optional data frame, list or environment (or object coercible by \code{\link{as.data.frame}} to a data frame) containing the variables in the model. If not found in \code{data}, the variables are taken from \code{environment(formula)}, typically the environment from which \code{lmrob} is called.} \item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.} \item{weights}{an optional vector of weights to be used in the fitting process (in addition to the robustness weights computed in the fitting process).} \item{na.action}{a function which indicates what should happen when the data contain \code{NA}s. The default is set by the \code{na.action} setting of \code{\link{options}}, and is \code{\link{na.fail}} if that is unset. The \dQuote{factory-fresh} default is \code{\link{na.omit}}. Another possible value is \code{NULL}, no action. Value \code{\link{na.exclude}} can be useful.} \item{method}{string specifying the estimator-chain. \code{MM} is interpreted as \code{SM}. See \emph{Details}, notably the currently recommended \code{setting = "KS2014"}.} \item{model, x, y}{logicals. If \code{TRUE} the corresponding components of the fit (the model frame, the model matrix, the response) are returned.} \item{singular.ok}{logical. If \code{FALSE} (the default in S but not in \R) a singular fit is an error.} \item{contrasts}{an optional list. See the \code{contrasts.arg} of \code{\link{model.matrix.default}}.} \item{offset}{this can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting. An \code{\link{offset}} term can be included in the formula instead or as well, and if both are specified their sum is used.} \item{control}{a \code{\link{list}} specifying control parameters; use the function \code{\link{lmrob.control}(.)} and see its help page.} \item{init}{an optional argument to specify or supply the initial estimate. See \emph{Details}.} \item{\dots}{additional arguments can be used to specify control parameters directly instead of (but not in addition to!) via \code{control}.} } \details{ \describe{ \item{Overview:}{ This function computes an MM-type regression estimator as described in Yohai (1987) and Koller and Stahel (2011). By default it uses a bi-square redescending score function, and it returns a highly robust and highly efficient estimator (with 50\% breakdown point and 95\% asymptotic efficiency for normal errors). The computation is carried out by a call to \code{\link{lmrob.fit}()}. The argument \code{setting} of \code{\link{lmrob.control}} is provided to set alternative defaults as suggested in Koller and Stahel (2011) (\code{setting="KS2011"}; now do use its extension \code{setting="KS2014"}). For further details, see \code{\link{lmrob.control}}. } \item{Initial Estimator \code{init}:}{ The initial estimator may be specified using the argument \code{init}. This can either be \itemize{ \item a \emph{string} used to specify built in internal estimators (currently \code{"S"} and \code{"M-S"}, see \emph{See also} below); \item a \code{\link{function}} taking arguments \code{x, y, control, mf} (where \code{mf} stands for \code{model.frame}) and returning a \code{\link{list}} containing at least the initial coefficients as component \code{"coefficients"} and the initial scale estimate as \code{"scale"}. \item Or a \code{\link{list}} giving the initial coefficients and scale as components \code{"coefficients"} and \code{"scale"}. See also \emph{Examples}. } Note that when \code{init} is a function or list, the \code{method} argument must \emph{not} contain the initial estimator, e.g., use \code{MDM} instead of \code{SMDM}. The default, equivalent to \code{init = "S"}, uses as initial estimator an S-estimator (Rousseeuw and Yohai, 1984) which is computed using the Fast-S algorithm of Salibian-Barrera and Yohai (2006), calling \code{\link{lmrob.S}()}. That function, since March 2012, by default uses \emph{nonsingular} subsampling which makes the Fast-S algorithm feasible for categorical data as well, see Koller (2012). Note that convergence problems may still show up as warnings, e.g., \preformatted{ S refinements did not converge (to refine.tol=1e-07) in 200 (= k.max) steps } and often can simply be remedied by increasing (i.e. weakening) \code{refine.tol} or increasing the allowed number of iterations \code{k.max}, see \code{\link{lmrob.control}}. } \item{Method \code{method}:}{ The following chain of estimates is customizable via the \code{method} argument. % of \code{\link{lmrob.control}}. There are currently two types of estimates available, \describe{ \item{\code{"M"}:}{corresponds to the standard M-regression estimate.} \item{\code{"D"}:}{stands for the Design Adaptive Scale estimate as proposed in Koller and Stahel (2011).} } The \code{method} argument takes a string that specifies the estimates to be calculated as a chain. Setting \code{method='SMDM'} will result in an intial S-estimate, followed by an M-estimate, a Design Adaptive Scale estimate and a final M-step. For methods involving a \code{D}-step, the default value of \code{psi} (see \code{\link{lmrob.control}}) is changed to \code{"lqq"}. By default, standard errors are computed using the formulas of Croux, Dhaene and Hoorelbeke (2003) (\code{\link{lmrob.control}} option \code{cov=".vcov.avar1"}). This method, however, works only for MM-estimates (i.e., \code{method = "MM"} or \code{ = "SM"}). For other \code{method} arguments, the covariance matrix estimate used is based on the asymptotic normality of the estimated coefficients (\code{cov=".vcov.w"}) as described in Koller and Stahel (2011). The var-cov computation can be skipped by \code{cov = "none"} and (re)done later by e.g., \code{vcov(, cov = ".vcov.w")}. As of robustbase version 0.91-0 (April 2014), the computation of robust standard errors for \code{method="SMDM"} has been changed. The old behaviour can be restored by setting the control parameter \code{cov.corrfact = "tauold"}.%% FIXME: regr.test for that } }%end {describe} } \value{ An object of class \code{lmrob}; a list including the following components: \item{coefficients}{The estimate of the coefficient vector} \item{scale}{The scale as used in the M estimator.} \item{residuals}{Residuals associated with the estimator.} %loss \item{converged}{\code{TRUE} if the IRWLS iterations have converged.} \item{iter}{number of IRWLS iterations} \item{rweights}{the \dQuote{robustness weights} \eqn{\psi(r_i/S) / (r_i/S)}.} \item{fitted.values}{Fitted values associated with the estimator.} %control \item{init.S}{The \code{\link{list}} returned by \code{\link{lmrob.S}()} or \code{\link{lmrob.M.S}()} (for MM-estimates, i.e., \code{method="MM"} or \code{"SM"} only)} \item{init}{A similar list that contains the results of intermediate estimates (\emph{not} for MM-estimates).} %qr \item{rank}{the numeric rank of the fitted linear model.} \item{cov}{The estimated covariance matrix of the regression coefficients} \item{df.residual}{the residual degrees of freedom.} %degree.freedom \item{weights}{the specified weights (missing if none were used).} \item{na.action}{(where relevant) information returned by \code{\link{model.frame}} on the special handling of \code{NA}s.} \item{offset}{the offset used (missing if none were used).} \item{contrasts}{(only where relevant) the contrasts used.} \item{xlevels}{(only where relevant) a record of the levels of the factors used in fitting.} \item{call}{the matched call.} \item{terms}{the \code{terms} object used.} %assign \item{model}{if requested (the default), the model frame used.} \item{x}{if requested, the model matrix used.} \item{y}{if requested, the response used.} In addition, non-null fits will have components \code{assign}, and \code{qr} relating to the linear fit, for use by extractor functions such as \code{summary}. } \references{ Croux, C., Dhaene, G. and Hoorelbeke, D. (2003) \emph{Robust standard errors for robust estimators}, Discussion Papers Series 03.16, K.U. Leuven, CES. Koller, M. (2012) Nonsingular subsampling for S-estimators with categorical predictors, \emph{ArXiv e-prints} \url{https://arxiv.org/abs/1208.5595}; extended version published as Koller and Stahel (2017), see \code{\link{lmrob.control}}. Koller, M. and Stahel, W.A. (2011) Sharpening Wald-type inference in robust regression for small samples. \emph{Computational Statistics & Data Analysis} \bold{55}(8), 2504--2515. Maronna, R. A., and Yohai, V. J. (2000) Robust regression with both continuous and categorical predictors. \emph{Journal of Statistical Planning and Inference} \bold{89}, 197--214. Rousseeuw, P.J. and Yohai, V.J. (1984) Robust regression by means of S-estimators, In \emph{Robust and Nonlinear Time Series}, J. Franke, W. Härdle and R. D. Martin (eds.). Lectures Notes in Statistics 26, 256--272, Springer Verlag, New York. Salibian-Barrera, M. and Yohai, V.J. (2006) A fast algorithm for S-regression estimates, \emph{Journal of Computational and Graphical Statistics} \bold{15}(2), 414--427. \doi{10.1198/106186006X113629} Yohai, V.J. (1987) High breakdown-point and high efficiency estimates for regression. \emph{The Annals of Statistics} \bold{15}, 642--65. Yohai, V., Stahel, W.~A. and Zamar, R. (1991) A procedure for robust estimation and inference in linear regression; in Stahel and Weisberg (eds), \emph{Directions in Robust Statistics and Diagnostics}, Part II, Springer, New York, 365--374; \doi{10.1007/978-1-4612-4444-8_20}. } \author{(mainly:) Matias Salibian-Barrera and Manuel Koller} \seealso{ \code{\link{lmrob.control}}; for the algorithms \code{\link{lmrob.S}}, \code{\link{lmrob.M.S}} and \code{\link{lmrob.fit}}; and for methods, \code{\link{summary.lmrob}}, for the extra \dQuote{statistics}, notably \eqn{R^2} (\dQuote{R squared}); \code{\link{predict.lmrob}}, \code{\link{print.lmrob}}, \code{\link{plot.lmrob}}, and \code{\link{weights.lmrob}}. } \examples{ data(coleman) set.seed(0) ## Default for a very long time: summary( m1 <- lmrob(Y ~ ., data=coleman) ) ## Nowadays **strongly recommended** for routine use: summary(m2 <- lmrob(Y ~ ., data=coleman, setting = "KS2014") ) ## ------------------ plot(residuals(m2) ~ weights(m2, type="robustness")) ##-> weights.lmrob() abline(h=0, lty=3) data(starsCYG, package = "robustbase") ## Plot simple data and fitted lines plot(starsCYG) lmST <- lm(log.light ~ log.Te, data = starsCYG) (RlmST <- lmrob(log.light ~ log.Te, data = starsCYG)) abline(lmST, col = "red") abline(RlmST, col = "blue") ## --> Least Sq.:/ negative slope \\ robust: slope ~= 2.2 % checked in ../tests/lmrob-data.R summary(RlmST) # -> 4 outliers; rest perfect vcov(RlmST) stopifnot(all.equal(fitted(RlmST), predict(RlmST, newdata = starsCYG), tol = 1e-14)) ## FIXME: setting = "KS2011" or setting = "KS2014" **FAIL** here ##--- 'init' argument ----------------------------------- ## 1) string set.seed(0) m3 <- lmrob(Y ~ ., data=coleman, init = "S") stopifnot(all.equal(m1[-18], m3[-18])) ## 2) function initFun <- function(x, y, control, ...) { # no 'mf' needed init.S <- lmrob.S(x, y, control) list(coefficients=init.S$coef, scale = init.S$scale) } set.seed(0) m4 <- lmrob(Y ~ ., data=coleman, method = "M", init = initFun) ## list m5 <- lmrob(Y ~ ., data=coleman, method = "M", init = list(coefficients = m3$init$coef, scale = m3$scale)) stopifnot(all.equal(m4[-17], m5[-17])) } \keyword{robust} \keyword{regression} robustbase/man/scaleTau2.Rd0000644000176200001440000001111314123440375015310 0ustar liggesusers\name{scaleTau2} \alias{scaleTau2} \title{Robust Tau-Estimate of Scale} \description{ Computes the robust \eqn{\tau}-estimate of univariate scale, as proposed by Maronna and Zamar (2002); improved by a consistency factor, %% FIXME: TODO: add a finite sample correction by Martin Maechler %% (currently have 'n-2' but can even be better !!!! } \usage{%--> ../R/OGK.R <<< scaleTau2(x, c1 = 4.5, c2 = 3.0, na.rm = FALSE, consistency = TRUE, mu0 = median(x), sigma0 = median(x.), mu.too = FALSE, iter = 1, tol.iter = 1e-7) } \arguments{ \item{x}{numeric vector} \item{c1,c2}{non-negative numbers, specifying cutoff values for the biweighting of the mean and the rho function respectively.} \item{na.rm}{a logical value indicating whether \code{NA} values should be stripped before the computation proceeds.} \item{consistency}{logical indicating if the consistency correction factor (for the scale) should be applied.} \item{mu0}{the initial location estimate \eqn{\mu_0}{mu0}, defaulting to the \code{\link{median}}.} \item{sigma0}{the initial scale estimate \eqn{s_0}{s0}, defaulting to the MAD; may be set to a positive value when the MAD is zero.} \item{mu.too}{logical indicating if both location and scale should be returned or just the scale (when \code{mu.too=FALSE} as by default).} \item{iter}{positive integer or logical indicating if and how many iterations should be done. The default, \code{iter = 1} computes the \dQuote{traditional} tau-estimate of scale.} \item{tol.iter}{if \code{iter} is true, or \code{iter > 1}, stop the iterations when \eqn{|s_n - s_o| \le \epsilon s_n}, where \eqn{\epsilon :=}\code{tol.iter}, and \eqn{s_o} and \eqn{s_n} are the previous and current estimates of \eqn{\sigma}.} } \details{ First, \eqn{s_0}{s0} := MAD, i.e. the equivalent of \code{\link{mad}(x, constant=1)} is computed. Robustness weights \eqn{w_i := w_{c1}((x_i - med(X))/ s_0)} are computed, where \eqn{w_c(u) = max(0, (1 - (u/c)^2)^2)}. The robust location measure is defined as \eqn{\mu(X) := (\sum_i w_i x_i)/(\sum_i w_i)}, and the robust \eqn{\tau (tau)}{tau}-estimate is \eqn{s(X)^2 := s_0^2 * (1/n) \sum_i \rho_{c2}((x_i - \mu(X))/s_0)}, where \eqn{\rho_c(u) = min(c^2, u^2)}. \cr When \code{iter=TRUE} or \code{iter > 1}, the above estimate is \emph{iterated} in a fixpoint iteration, setting \eqn{s_0} to the current estimate \eqn{s(X)} and iterating until the number of iterations is larger than \code{iter} or the fixpoint is found in the sense that \ \cr \code{scaleTau2(*, consistency=FALSE)} returns \eqn{s(X)}, whereas this value is divided by its asymptotic limit when \code{consistency = TRUE} as by default. Note that for \code{n = length(x) == 2}, all equivariant scale estimates are proportional, and specifically, \code{scaleTau2(x, consistency=FALSE) == mad(x, constant=1)}. See also the reference. } \value{ numeric vector of length one (if \code{mu.too} is \code{FALSE} as by default) or two (when \code{mu.too = TRUE}) with robust scale or (location,scale) estimators \eqn{\hat\sigma(x)}{s^(x)} or \eqn{(\hat\mu(x),\hat\sigma(x))}{(m^(x), s^(x))}. } \references{ Maronna, R.A. and Zamar, R.H. (2002) Robust estimates of location and dispersion of high-dimensional datasets; \emph{Technometrics} \bold{44}(4), 307--317. % MM: ~/save/papers/robust-diverse/Maronna-Zamar-OGK_2002.pdf Yohai, V.J., and Zamar, R.H. (1988). High breakdown-point estimates of regression by means of the minimization of an efficient scale. \emph{Journal of the American Statistical Association} \bold{83}, 406--413. % MM: ~/save/papers/robust-diverse/Yohai-Zamar-tau_JASA1988.pdf } \author{Original by Kjell Konis with substantial modifications by Martin Maechler. } \seealso{\code{\link{Sn}}, \code{\link{Qn}}, \code{\link{mad}}; further \code{\link{covOGK}} for which \code{scaleTau2} was designed. } \examples{ x <- c(1:7, 1000) sd(x) # non-robust std.deviation scaleTau2(x) scaleTau2(x, mu.too = TRUE) (sI <- scaleTau2(c(x,Inf), mu.too = TRUE)) (sIN <- scaleTau2(c(x,Inf,NA), mu.too = TRUE, na.rm=TRUE)) stopifnot({ identical(sI, sIN) all.equal(scaleTau2(c(x, 999), mu.too = TRUE), sIN, tol = 1e-15) }) if(doExtras <- robustbase:::doExtras()) { set.seed(11) ## show how much faster this is, compared to Qn x <- sample(c(rnorm(1e6), rt(5e5, df=3))) (system.time(Qx <- Qn(x))) ## 2.04 [2017-09, lynne] (system.time(S2x <- scaleTau2(x))) ## 0.25 (ditto) cbind(Qn = Qx, sTau2 = S2x) }## Qn sTau2 ## 1.072556 1.071258 } \keyword{robust} \keyword{univar} robustbase/man/lmrob.control.Rd0000644000176200001440000004057114511464163016273 0ustar liggesusers\name{lmrob.control} \title{Tuning Parameters for lmrob() and Auxiliaries} \encoding{utf8} \alias{lmrob.control} \alias{update.lmrobCtrl} \alias{.Mchi.tuning.default} \alias{.Mpsi.tuning.default} \alias{.Mchi.tuning.defaults} \alias{.Mpsi.tuning.defaults} \description{ Tuning parameters for \code{\link{lmrob}}, the MM-type regression estimator and the associated S-, M- and D-estimators. Using \code{setting="KS2011"} sets the defaults as suggested by Koller and Stahel (2011) and analogously for \code{"KS2014"}. The \code{.M*.default} \code{\link{function}}s and \code{.M*.defaults} \code{\link{list}}s contain default tuning parameters for all the predefined \eqn{\psi}{psi} functions, see also \code{\link{Mpsi}}, etc. } \usage{ lmrob.control(setting, seed = NULL, nResample = 500, tuning.chi = NULL, bb = 0.5, tuning.psi = NULL, max.it = 50, groups = 5, n.group = 400, k.fast.s = 1, best.r.s = 2, k.max = 200, maxit.scale = 200, k.m_s = 20, refine.tol = 1e-7, rel.tol = 1e-7, scale.tol = 1e-10, solve.tol = 1e-7, zero.tol = 1e-10, trace.lev = 0, mts = 1000, subsampling = c("nonsingular", "simple"), compute.rd = FALSE, method = "MM", psi = "bisquare", numpoints = 10, cov = NULL, split.type = c("f", "fi", "fii"), fast.s.large.n = 2000, # only for outlierStats() : eps.outlier = function(nobs) 0.1 / nobs, eps.x = function(maxx) .Machine$double.eps^(.75)*maxx, compute.outlier.stats = method, warn.limit.reject = 0.5, warn.limit.meanrw = 0.5, \dots) \method{update}{lmrobCtrl}(object, \dots) .Mchi.tuning.defaults .Mchi.tuning.default(psi) .Mpsi.tuning.defaults .Mpsi.tuning.default(psi) } \arguments{ \item{setting}{a string specifying alternative default values. Leave empty for the defaults or use \code{"KS2011"} or \code{"KS2014"} for the defaults suggested by Koller and Stahel (2011, 2017). See \emph{Details}.} \item{seed}{\code{NULL} or an integer vector compatible with \code{\link{.Random.seed}}: the seed to be used for random re-sampling used in obtaining candidates for the initial S-estimator. The current value of \code{.Random.seed} will be preserved if \code{seed} is set, i.e. non-\code{NULL}; otherwise, as by default, \code{.Random.seed} will be used and modified as usual from calls to \code{\link{runif}()} etc. } \item{nResample}{number of re-sampling candidates to be used to find the initial S-estimator. Currently defaults to 500 which works well in most situations (see references).} \item{tuning.chi}{tuning constant vector for the S-estimator. If \code{NULL}, as by default, sensible defaults are set (depending on \code{psi}) to yield a 50\% breakdown estimator. See \emph{Details}.} \item{bb}{expected value under the normal model of the \dQuote{chi} (rather \eqn{\rho (rho)}{rho}) function with tuning constant equal to \code{tuning.chi}. This is used to compute the S-estimator.} \item{tuning.psi}{tuning constant vector for the redescending M-estimator. If \code{NULL}, as by default, this is set (depending on \code{psi}) to yield an estimator with asymptotic efficiency of 95\% for normal errors. See \emph{Details}.} \item{max.it}{integer specifying the maximum number of IRWLS iterations.} \item{groups}{(for the fast-S algorithm): Number of random subsets to use when the data set is large.} \item{n.group}{(for the fast-S algorithm): Size of each of the \code{groups} above. Note that this must be at least \eqn{p}.} \item{k.fast.s}{(for the fast-S algorithm): Number of local improvement steps (\dQuote{\emph{I-steps}}) for each re-sampling candidate.} \item{k.m_s}{(for the M-S algorithm): specifies after how many unsuccessful refinement steps the algorithm stops.} \item{best.r.s}{(for the fast-S algorithm): Number of of best candidates to be iterated further (i.e., \dQuote{\emph{\bold{r}efined}}); is denoted \eqn{t} in Salibian-Barrera & Yohai(2006).} \item{k.max}{(for the fast-S algorithm): maximal number of refinement steps for the \dQuote{fully} iterated best candidates.} \item{maxit.scale}{integer specifying the maximum number of C level \code{find_scale()} iterations (in fast-S and M-S algorithms).} \item{refine.tol}{(for the fast-S algorithm): relative convergence tolerance for the fully iterated best candidates.} \item{rel.tol}{(for the RWLS iterations of the MM algorithm): relative convergence tolerance for the parameter vector.} \item{scale.tol}{(for the scale estimation iterations of the S algorithm): relative convergence tolerance for the \code{scale} \eqn{\sigma(.)}.} \item{solve.tol}{(for the S algorithm): relative tolerance for inversion. Hence, this corresponds to \code{\link{solve.default}()}'s \code{tol}.} \item{zero.tol}{for checking 0-residuals in the S algorithm, non-negative number \eqn{\epsilon_z}{ez} such that \eqn{\{i; \left|\tilde{R}_i\right| \le \epsilon_z\}}{{i; |R~[i]| <= ez}} correspond to \eqn{0}-residuals, where \eqn{\tilde{R}_i}{R~[i]} are standardized residuals, \eqn{\tilde{R}_i = R_i/s_y}{R~[i] = R[i]/sy} and \eqn{s_y = \frac{1}{n} \sum_{i=1}^n \left|y_i\right|}{% sy = ave_i |y[i]| = 1/n sum(i=1..n, |y[i]|)}.} \item{trace.lev}{integer indicating if the progress of the MM-algorithm and the fast-S algorithms, see \code{\link{lmrob.S}}, should be traced (increasingly); default \code{trace.lev = 0} does no tracing.} \item{mts}{maximum number of samples to try in subsampling algorithm.} \item{subsampling}{type of subsampling to be used, a string: \code{"simple"} for simple subsampling (default prior to version 0.9), \code{"nonsingular"} for nonsingular subsampling. See also \code{\link{lmrob.S}}.} \item{compute.rd}{logical indicating if robust distances (based on the MCD robust covariance estimator \code{\link{covMcd}}) are to be computed for the robust diagnostic plots. This may take some time to finish, particularly for large data sets, and can lead to singularity problems when there are \code{\link{factor}} explanatory variables (with many levels, or levels with \dQuote{few} observations). Hence, is \code{FALSE} by default.} \item{method}{string specifying the estimator-chain. \code{MM} is interpreted as \code{SM}. See \emph{Details} of \code{\link{lmrob}} for a description of the possible values.} \item{psi}{string specifying the type \eqn{\psi}-function used. See \emph{Details} of \code{\link{lmrob}}. Defaults to \code{"bisquare"} for S and MM-estimates, otherwise \code{"lqq"}.} \item{numpoints}{number of points used in Gauss quadrature.} \item{cov}{function or string with function name to be used to calculate covariance matrix estimate. The default is \code{if(method \%in\% c('SM', 'MM')) ".vcov.avar1" else ".vcov.w"}. See \emph{Details} of \code{\link{lmrob}}.} \item{split.type}{determines how categorical and continuous variables are split. See \code{\link{splitFrame}}.} \item{fast.s.large.n}{minimum number of observations required to switch from ordinary \dQuote{fast S} algorithm to an efficient \dQuote{large n} strategy.} \item{eps.outlier}{limit on the robustness weight below which an observation is considered to be an outlier. Either a \code{numeric(1)} or a function that takes the number of observations as an argument. Used only in \code{\link{summary.lmrob}} and \code{\link{outlierStats}}.} \item{eps.x}{limit on the absolute value of the elements of the design matrix below which an element is considered zero. Either a \code{numeric(1)} or a function that takes the maximum absolute value in the design matrix as an argument.} \item{compute.outlier.stats}{vector of \code{\link{character}} strings, each valid to be used as \code{method} argument. Used to specify for which estimators outlier statistics (and warnings) should be produced. Set to empty (\code{NULL} or \code{character(0)}) if none are required. \cr Note that the default is \code{method} which by default is either \code{"MM"}, \code{"SM"}, or \code{"SMDM"}; hence using \code{compute.outlier.stats = "S"} provides \code{\link{outlierStats}()} to a \code{\link{lmrob.S}()} result.} \item{warn.limit.reject}{limit of ratio \eqn{\#\mbox{rejected} / \#\mbox{obs in level}}{# rejected / # obs in level} above (\eqn{\geq}{>=}) which a warning is produced. Set to \code{NULL} to disable warning.} \item{warn.limit.meanrw}{limit of the mean robustness per factor level below which (\eqn{\leq}{<=}) a warning is produced. Set to \code{NULL} to disable warning.} \item{object}{an \code{"lmrobCtrl"} object, as resulting from a \code{lmrob.control(*)} or an \code{update(, *)} call.} \item{\dots}{for \describe{ \item{\code{lmrob.control()}:}{further arguments to be added as \code{\link{list}} components to the result, e.g., those to be used in \code{.vcov.w()}.} \item{\code{update(object, *)}:}{(named) components from \code{object}, to be \emph{modified}, \bold{not} \code{setting = *}.} }} } \value{ \code{.Mchi.tuning.default(psi)} and \code{.Mpsi.tuning.default(psi)} return a short \code{\link{numeric}} vector of tuning constants which are defaults for the corresponding psi-function, see the \emph{Details}. They are based on the named \code{\link{list}}s \code{.Mchi.tuning.defaults} and \code{.Mpsi.tuning.defaults}, respectively. \code{lmrob.control()} returns a named \code{\link{list}} with over twenty components, corresponding to the arguments, where \code{tuning.psi} and \code{tuning.chi} are typically computed, as \code{.Mpsi.tuning.default(psi)} or \code{.Mchi.tuning.default(psi)}, respectively. It is of \code{\link{class}} \code{"lmrobCtrl"} and we provide \code{print()}, \code{\link{update}()} and \code{\link{within}} methods. \code{update(, ....)} does \emph{not} allow a \code{setting="<...>"} in \code{....}. } \details{The option \code{setting="KS2011"} alters the default arguments. They are changed to \code{method = "SMDM"}, \code{psi = "lqq"}, \code{max.it = 500}, \code{k.max = 2000}, \code{cov = ".vcov.w"}. The defaults of all the remaining arguments are not changed. The option \code{setting="KS2014"} builds upon \code{setting="KS2011"}. More arguments are changed to \code{best.r.s = 20, k.fast.s = 2, nResample = 1000}. This setting should produce more stable estimates for designs with \code{\link{factor}}s. By default, and in \code{.Mpsi.tuning.default()} and \code{.Mchi.tuning.default()}, \code{tuning.chi} and \code{tuning.psi} are set to yield an MM-estimate with breakdown point \eqn{0.5} and efficiency of 95\% at the normal. If numeric \code{tuning.chi} or \code{tuning.psi} are specified, say \code{cc}, for \code{psi = "ggw"} or \code{"lqq"}, \code{\link{.psi.const}(cc, psi)} is used, see its help page. To get the defaults, e.g., \code{.Mpsi.tuning.default(psi)} is equivalent to but more efficient than the formerly widely used \code{lmrob.control(psi = psi)$tuning.psi}. These defaults are: \tabular{rll}{ \code{psi} \tab\code{tuning.chi} \tab\code{tuning.psi} \cr \code{bisquare}\tab\code{1.54764} \tab\code{4.685061} \cr \code{welsh} \tab\code{0.5773502} \tab\code{2.11} \cr \code{ggw} \tab\code{c(-0.5, 1.5, NA, 0.5)} \tab\code{c(-0.5, 1.5, 0.95, NA)} \cr \code{lqq} \tab\code{c(-0.5, 1.5, NA, 0.5)} \tab\code{c(-0.5, 1.5, 0.95, NA)} \cr \code{optimal} \tab\code{0.4047} \tab\code{1.060158} \cr \code{hampel} \tab\code{c(1.5, 3.5, 8)*0.2119163} \tab\code{c(1.5, 3.5, 8)*0.9014} } The values for the tuning constant for the \code{ggw} and \code{lqq} psi functions are specified differently here by a vector with four elements: minimal slope, b (controlling the bend at the maximum of the curve), efficiency, breakdown point. Use \code{NA} for an unspecified value of either efficiency or breakdown point, see examples in the tables (above and below). For these table examples, the respective \dQuote{inner constants} are stored precomputed, see \code{\link{.psi.lqq.findc}} for more. The constants for the \code{"hampel"} psi function are chosen to have a redescending slope of \eqn{-1/3}. Constants for a slope of \eqn{-1/2} would be \tabular{rll}{ \code{psi} \tab\code{tuning.chi} \tab\code{tuning.psi} \cr \code{"hampel"}\tab\code{c(2, 4, 8) * 0.1981319} \tab\code{c(2, 4, 8) * 0.690794} } Alternative coefficients for an efficiency of 85\% at the normal are given in the table below. \tabular{rl}{ \code{psi} \tab\code{tuning.psi} \cr \code{bisquare} \tab\code{3.443689} \cr \code{welsh} \tab\code{1.456} \cr \code{ggw}, \code{lqq}\tab\code{c(-0.5, 1.5, 0.85, NA)} \cr \code{optimal} \tab\code{0.8684} \cr \code{hampel} (-1/3) \tab\code{c(1.5, 3.5, 8)* 0.5704545} \cr \code{hampel} (-1/2) \tab\code{c( 2, 4, 8) * 0.4769578} } } \references{ Koller, M. and Stahel, W.A. (2011) Sharpening Wald-type inference in robust regression for small samples. \emph{Computational Statistics & Data Analysis} \bold{55}(8), 2504--2515. Koller, M. and Stahel, W.A. (2017) Nonsingular subsampling for regression S estimators with categorical predictors, \emph{Computational Statistics} \bold{32}(2): 631--646. \doi{10.1007/s00180-016-0679-x}. Referred as \code{"KS2014"} everywhere in \pkg{robustbase}; A shorter first version, Koller (2012) has been available from \url{https://arxiv.org/abs/1208.5595}. } \author{Matias Salibian-Barrera, Martin Maechler and Manuel Koller} \seealso{ \code{\link{Mpsi}}, etc, for the (fast!) psi function computations; \code{\link{lmrob}}, also for references and examples. } \examples{ ## Show the default settings: str(lmrob.control()) ## Artificial data for a simple "robust t test": set.seed(17) y <- y0 <- rnorm(200) y[sample(200,20)] <- 100*rnorm(20) gr <- as.factor(rbinom(200, 1, prob = 1/8)) lmrob(y0 ~ 0+gr) ## Use Koller & Stahel(2011)'s recommendation but a larger 'max.it': str(ctrl <- lmrob.control("KS2011", max.it = 1000)) str(.Mpsi.tuning.defaults) stopifnot(identical(.Mpsi.tuning.defaults, sapply(names(.Mpsi.tuning.defaults), .Mpsi.tuning.default))) ## Containing (names!) all our (pre-defined) redescenders: str(.Mchi.tuning.defaults) ## Difference between settings: Cdef <- lmrob.control() C11 <- lmrob.control("KS2011") C14 <- lmrob.control("KS2014") str(C14) ## Differences: diffD <- names(which(!mapply(identical, Cdef,C11, ignore.environment=TRUE))) diffC <- names(which(!mapply(identical, C11, C14, ignore.environment=TRUE))) ## KS2011 vs KS2014: Apart from `setting` itself, they only differ in three places: cbind(KS11 = unlist(C11[diffC[-1]]), KS14 = unlist(C14[diffC[-1]])) ## KS11 KS14 ## nResample 500 1000 ## best.r.s 2 20 ## k.fast.s 1 2 ## default vs KS2011: a bit more: setting + 8 str2simpLang <- function(x) { r <- if(is.null(x)) quote((NULL)) else str2lang(deparse(x)) if(is.call(r)) format(r) else r } cbind(deflt= lapply(Cdef[diffD], str2simpLang), KS11 = lapply(C11 [diffD], str2simpLang)) ## update()ing a lmrob.control() , e.g., C14mod <- update(C14, trace.lev = 2) # the same as C14m.d <- C14; C14m.d$trace.lev <- 2 stopifnot(identical(C14mod, C14m.d)) ## changing psi --> updates tuning.{psi,chi}: C14mp <- update(C14, psi = "hampel", seed=101) ## updating 'method' is "smart" : C.SMDM <- update(Cdef, method="SMDM") all.equal(Cdef, C.SMDM) # changed also psi, tuning.{psi,chi} and cov ! chgd <- c("method", "psi", "tuning.chi", "tuning.psi", "cov") str(Cdef [chgd]) str(C.SMDM[chgd]) C14m <- update(C14, method="SMM") (ae <- all.equal(C14, C14mp))# changed tuning.psi & tuning.chi, too stopifnot(exprs = { identical(C14, update(C14, method="SMDM")) # no change! identical(c("psi", "seed", "tuning.chi", "tuning.psi"), sort(gsub("[^.[:alpha:]]", "", sub(":.*", "", sub("^Component ", "", ae))))) identical(C14m, local({C <- C14; C$method <- "SMM"; C})) }) ## try( update(C14, setting="KS2011") ) #--> Error: .. not allowed \dontshow{tools::assertError(update(C14, setting="KS2011"))} } \keyword{robust} \keyword{regression} robustbase/man/covPlot.Rd0000644000176200001440000001265413434014060015116 0ustar liggesusers\name{plot.mcd} \alias{covPlot} \alias{plot.mcd} %NO \alias{ddplot} %NO \alias{distplot} %NO \alias{chi2qqplot} %NO \alias{ellipse} \title{Robust Distance Plots} \description{ Shows the Mahalanobis distances based on robust and classical estimates of the location and the covariance matrix in different plots. The following plots are available: \itemize{ \item index plot of the robust and mahalanobis distances \item distance-distance plot \item Chisquare QQ-plot of the robust and mahalanobis distances \item plot of the tolerance ellipses (robust and classic) \item Scree plot - Eigenvalues comparison plot } } \usage{ \method{plot}{mcd}(x, which = c("all", "dd", "distance", "qqchi2", "tolEllipsePlot", "screeplot"), classic = FALSE, ask = (which[1] == "all" && dev.interactive()), cutoff, id.n, labels.id = rownames(x$X), cex.id = 0.75, label.pos = c(4,2), tol = 1e-7, \dots) covPlot(x, which = c("all", "dd", "distance", "qqchi2", "tolEllipsePlot", "screeplot"), classic = FALSE, ask = (which[1] == "all" && dev.interactive()), m.cov = covMcd(x), cutoff = NULL, id.n, labels.id = rownames(x), cex.id = 0.75, label.pos = c(4,2), tol = 1e-07, \dots) %% ddplot(x, \dots) %% distplot(x, \dots) %% chi2qqplot(x, \dots) %% ellipse(x, \dots) } \arguments{ \item{x}{For the \code{plot()} method, a \code{mcd} object, typically result of \code{\link{covMcd}}.\cr For \code{covPlot()}, the numeric data matrix such as the \code{X} component as returned from \code{\link{covMcd}}.} \item{which}{string indicating which plot to show. See the \emph{Details} section for a description of the options. Defaults to \code{"all"}.}. \item{classic}{whether to plot the classical distances too. Defaults to \code{FALSE}.}. \item{ask}{logical indicating if the user should be \emph{ask}ed before each plot, see \code{\link{par}(ask=.)}. Defaults to \code{which == "all" && \link{dev.interactive}()}. } \item{cutoff}{the cutoff value for the distances.} \item{id.n}{number of observations to be identified by a label. If not supplied, the number of observations with distance larger than \code{cutoff} is used.} \item{labels.id}{vector of labels, from which the labels for extreme points will be chosen. \code{NULL} uses observation numbers.} \item{cex.id}{magnification of point labels.} \item{label.pos}{positioning of labels, for the left half and right half of the graph respectively (used as \code{\link{text}(.., pos=*)}).} \item{tol}{tolerance to be used for computing the inverse, see \code{\link{solve}}. Defaults to \code{tol = 1e-7}.} \item{m.cov}{an object similar to those of class \code{"mcd"}; however only its components \code{center} and \code{cov} will be used. If missing, the MCD will be computed (via \code{\link{covMcd}()}).} \item{\dots}{other parameters to be passed through to plotting functions.} } \details{ These functions produce several plots based on the robust and classical location and covariance matrix. Which of them to select is specified by the attribute \code{which}. The \code{plot} method for \code{"mcd"} objects is calling \code{covPlot()} directly, whereas \code{covPlot()} should also be useful for plotting other (robust) covariance estimates. The possible options are: \describe{ \item{\code{distance}}{index plot of the robust distances} \item{\code{dd}}{distance-distance plot} \item{\code{qqchi2}}{a qq-plot of the robust distances versus the quantiles of the chi-squared distribution} \item{\code{tolEllipsePlot}}{a tolerance ellipse plot, via \code{\link{tolEllipsePlot}()}} \item{\code{screeplot}}{an eigenvalues comparison plot - screeplot} } The Distance-Distance Plot, introduced by Rousseeuw and van Zomeren (1990), displays the robust distances versus the classical Mahalanobis distances. The dashed line is the set of points where the robust distance is equal to the classical distance. The horizontal and vertical lines are drawn at values equal to the cutoff which defaults to square root of the 97.5\% quantile of a chi-squared distribution with p degrees of freedom. Points beyond these lines can be considered outliers. } %\value{} \references{ P. J. Rousseeuw and van Zomeren, B. C. (1990). Unmasking Multivariate Outliers and Leverage Points. \emph{Journal of the American Statistical Association} \bold{85}, 633--639. P. J. Rousseeuw and K. van Driessen (1999) A fast algorithm for the minimum covariance determinant estimator. \emph{Technometrics} \bold{41}, 212--223. } \seealso{ \code{\link{tolEllipsePlot}} } \examples{ data(Animals, package ="MASS") brain <- Animals[c(1:24, 26:25, 27:28),] mcd <- covMcd(log(brain)) plot(mcd, which = "distance", classic = TRUE)# 2 plots plot(mcd, which = "dd") plot(mcd, which = "tolEllipsePlot", classic = TRUE) op <- par(mfrow = c(2,3)) plot(mcd) ## -> which = "all" (5 plots) par(op) ## same plots for another robust Cov estimate: data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) cOGK <- covOGK(hbk.x, n.iter = 2, sigmamu = scaleTau2, weight.fn = hard.rejection) covPlot(hbk.x, m.cov = cOGK, classic = TRUE) % %% this "\dont*{} case is needed for R <= 2.3.1 : % \dontrun{ covPlot(hbk.x, m.cov = cOGK, classic = TRUE, ask= TRUE)} % \dontshow{covPlot(hbk.x, m.cov = cOGK, classic = TRUE, ask= FALSE)} } \keyword{hplot} \keyword{robust} \keyword{multivariate} robustbase/man/ambientNOxCH.Rd0000754000176200001440000001472713312375575015774 0ustar liggesusers\name{ambientNOxCH} \alias{ambientNOxCH} \docType{data} \encoding{utf8} \title{ Daily Means of NOx (mono-nitrogen oxides) in air} \description{ This dataset contains daily means (from midnight to midnight) of NOx, i.e., mono-nitrogen oxides, in [ppb] at 13 sites in central Switzerland and Aarau for the year 2004. } \usage{data(ambientNOxCH, package="robustbase")} \format{ A data frame with 366 observations on the following 14 variables. \describe{ \item{\code{date}}{date of day, of class \code{"Date"}.} \item{\code{ad}}{Site is located north of Altdorf 100 meters east of motorway A2, on an open field at the beginning of a more than 2000m deep valley (690.175, 193.55; 438; inLuft)} \item{\code{ba}}{Site is located in the centre of the little town of Baden in a residential area. Baden has 34'000 inhabitants and is situated on the swiss plateau (666.075, 257.972; 377; inLuft).} \item{\code{ef}}{Site is located 6 km south of altdorf and 800 m north of the village of Erstfeld. The motorway A2 passes 5 m west of the measuring site. Over 8 million vehicles have passed Erstfeld in 2004 where 13\% of the counts were attributed to trucks (691.43, 187.69; 457; MFM-U).} \item{\code{la}}{Site is located on a wooded hill in a rural area called Laegern, about 190 m above Baden, which is about 5 km away (669.8, 259; 690; NABEL).} \item{\code{lu}}{Site is located in the center of town of Lucerne, which has 57'000 inhabitants (666.19, 211.975; 460; inLuft).} \item{\code{re}}{Site is located 1 km west of Reiden on the Swiss plateau. The motorway A2 passes 5 m west of the measuring site (639.56, 232.11; 462; MFM-U).} \item{\code{ri}}{Site is located at Rigi Seebodenalp, 649 m above the lake of Lucerne on an alp with half a dozen small houses (677.9, 213.5; 1030; NABEL).} \item{\code{se}}{Site is located in Sedel next to town of Lucerne 35m above and 250m south of motorway A14 from Zug to Lucerne on a low hill with free 360° panorama (665.5, 213.41; 484; inLuft).} \item{\code{si}}{Site is located at the border of a small industrial area in Sisseln, 300 m east of a main road (640.725, 266.25; 305; inLuft).} \item{\code{st}}{Site is located at the south east border of Stans with 7'000 inhabitants (670.85, 201.025; 438; inLuft).} \item{\code{su}}{Site is located in the center of Suhr (8700 inhabitants), 10 m from the main road (648.49, 246.985; 403; inLuft).} \item{\code{sz}}{Site is located in Schwyz (14'200 inhabitants) near a shopping center (691.92, 208.03; 470; inLuft).} \item{\code{zg}}{Site is located in the centre of Zug with 22'000 inhabitants, 24 m from the main road (681.625, 224.625; 420; inLuft).} } } \details{ The 13 sites are part of one of the three air quality monitoring networks: inLuft (regional authorities of central Switzerland and canton Aargau) \cr NABEL (Swiss federal network) \cr MFM-U (Monitoring flankierende Massnahmen Umwelt), special Swiss federal network along transit motorways A2 and A13 from Germany to Italy through Switzerland \cr The information within the brackets means: Swiss coordinates km east, km north; m above sea level; network When the measuring sites are exposed to the same atmospheric condition and when there is no singular emission event at any site, \code{log(mean(NOx) of a specific day at each site)} is a linear function of \code{log(yearly.mean(NOx) at the corresponding site)}. The offset and the slope of the straight line reflects the atmospheric conditions at this specific day. During winter time, often an inversion prevents the emissions from being diluted vertically, so that there evolve two separate atmospheric compartements: One below the inversion boundary with polluted air and one above with relatively clean air. In our example below, Rigi Seebodenalp is above the inversion boundary between December 10th and 12th. } \source{ http://www.in-luft.ch/ \cr http://www.empa.ch/plugin/template/empa/*/6794 \cr http://www.bafu.admin.ch/umweltbeobachtung/02272/02280 } \seealso{another NOx dataset, \code{\link{NOxEmissions}}. } \examples{ data(ambientNOxCH) str (ambientNOxCH) yearly <- log(colMeans(ambientNOxCH[,-1], na.rm=TRUE)) xlim <- range(yearly) lNOx <- log(ambientNOxCH[, -1]) days <- ambientNOxCH[, "date"] ## Subset of 9 days starting at April 4: idays <- seq(which(ambientNOxCH$date=="2004-12-04"), length=9) ylim <- range(lNOx[idays,],na.rm=TRUE) op <- par(mfrow=c(3,3),mar=rep(1,4), oma = c(0,0,2,0)) for (id in idays) { daily <- unlist(lNOx[id,]) plot(NA, xlim=xlim,ylim=ylim, ann=FALSE, type = "n") abline(0:1, col="light gray") abline(lmrob(daily~yearly, na.action=na.exclude), col="red", lwd=2) text(yearly, daily, names(yearly), col="blue") mtext(days[id], side=1, line=-1.2, cex=.75, adj=.98) } mtext("Daily ~ Yearly log( NOx mean values ) at 13 Swiss locations", outer=TRUE) par(op) ## do all 366 regressions: Least Squares and Robust: LS <- lapply(1:nrow(ambientNOxCH), function(id) lm(unlist(lNOx[id,]) ~ yearly, na.action = na.exclude)) R <- lapply(1:nrow(ambientNOxCH), function(id) lmrob(unlist(lNOx[id,]) ~ yearly, na.action = na.exclude)) ## currently 4 warnings about non-convergence; ## which ones? days[notOk <- ! sapply(R, `[[`, "converged") ] ## "2004-01-10" "2004-05-12" "2004-05-16" "2004-11-16" ## first problematic case: daily <- unlist(lNOx[which(notOk)[1],]) plot(daily ~ yearly, main = paste("lmrob() non-convergent:",days[notOk[1]])) rr <- lmrob(daily ~ yearly, na.action = na.exclude, control = lmrob.control(trace=3, max.it = 100)) ##-> 53 iter. ## Look at all coefficients: R.cf <- t(sapply(R, coef)) C.cf <- t(sapply(LS, coef)) plot(C.cf, xlim=range(C.cf[,1],R.cf[,1]), ylim=range(C.cf[,2],R.cf[,2])) mD1 <- rowMeans(abs(C.cf - R.cf)) lrg <- mD1 > quantile(mD1, 0.80) arrows(C.cf[lrg,1], C.cf[lrg,2], R.cf[lrg,1], R.cf[lrg,2], length=.1, col="light gray") points(R.cf, col=2) ## All robustness weights aW <- t(sapply(R, weights, type="robustness")) colnames(aW) <- names(yearly) summary(aW) sort(colSums(aW < 0.05, na.rm = TRUE)) # how often "clear outlier": # lu st zg ba se sz su si re la ef ad ri # 0 0 0 1 1 1 2 3 4 10 14 17 48 lattice::levelplot(aW, asp=1/2, main="Robustness weights", xlab= "day", ylab= "site") } \keyword{datasets} robustbase/man/alcohol.Rd0000644000176200001440000000310313713564014015107 0ustar liggesusers\name{alcohol} \alias{alcohol} \docType{data} \title{Alcohol Solubility in Water Data} \description{ The solubility of alcohols in water is important in understanding alcohol transport in living organisms. This dataset from (Romanelli et al., 2001) contains physicochemical characteristics of 44 aliphatic alcohols. The aim of the experiment was the prediction of the solubility on the basis of molecular descriptors. } \usage{data(alcohol, package="robustbase")} \format{ A data frame with 44 observations on the following 7 numeric variables. \describe{ \item{\code{SAG}}{solvent accessible surface-bounded molecular volume.} \item{\code{V}}{volume} \item{\code{logPC}}{Log(PC); PC = octanol-water partitions coefficient} \item{\code{P}}{polarizability} \item{\code{RM}}{molar refractivity} \item{\code{Mass}}{the mass} \item{\code{logSolubility}}{ln(Solubility), the response.} } } % \details{ % } \source{ The website accompanying the MMY-book: \url{https://www.wiley.com/legacy/wileychi/robust_statistics/} } \references{ %% FIXME: I don't think this is the correct reference % Romanelli, J.R., Kelly, J.J. and Litwein, D.E.M (2001) % Hand-assisted laparoscopic surgery in the United States: An overview % \emph{Seminars in Laparoscopic Surgery} \bold{8} 96--103. Maronna, R.A., Martin, R.D. and Yohai, V.J. (2006) \emph{Robust Statistics, Theory and Methods}, Wiley. } \examples{ data(alcohol) ## version of data set with trivial names, as s.alcohol <- alcohol names(s.alcohol) <- paste("Col", 1:7, sep='') } \keyword{datasets} robustbase/man/delivery.Rd0000644000176200001440000000165613312375575015333 0ustar liggesusers\name{delivery} \alias{delivery} \docType{data} \title{Delivery Time Data} \description{ Delivery Time Data, from Montgomery and Peck (1982). The aim is to explain the time required to service a vending machine (Y) by means of the number of products stocked (X1) and the distance walked by the route driver (X2). } \usage{data(delivery, package="robustbase")} \format{ A data frame with 25 observations on the following 3 variables. \describe{ \item{\code{n.prod}}{Number of Products} \item{\code{distance}}{Distance} \item{\code{delTime}}{Delivery time} } } \source{ Montgomery and Peck (1982, p.116) } \references{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, page 155, table 23. } \examples{ data(delivery) summary(lm.deli <- lm(delTime ~ ., data = delivery)) delivery.x <- as.matrix(delivery[, 1:2]) c_deli <- covMcd(delivery.x) c_deli } \keyword{datasets} robustbase/man/lmc-rmc.Rd0000644000176200001440000000605014233260125015016 0ustar liggesusers\name{lmc} \alias{lmc} \alias{rmc} \title{Left and Right Medcouple, Robust Measures of Tail Weight} \description{ Compute the left and right \sQuote{medcouple}, \emph{robust} estimators of tail weight, in some sense robust versions of the kurtosis, the very unrobust centralized 4th moment. } \usage{ lmc(x, mx = median(x, na.rm=na.rm), na.rm = FALSE, doReflect = FALSE, ...) rmc(x, mx = median(x, na.rm=na.rm), na.rm = FALSE, doReflect = FALSE, ...) } \arguments{ \item{x}{a numeric vector} \item{mx}{number, the \dQuote{center} of \code{x} wrt which the left and right parts of \code{x} are defined: \preformatted{ lmc(x, mx, *) := mc(x[x <= mx], *) rmc(x, mx, *) := mc(x[x >= mx], *)} } \item{na.rm}{logical indicating how missing values (\code{\link{NA}}s) should be dealt with.} \item{doReflect}{logical indicating if \code{\link{mc}} should also be computed on the \emph{reflected} sample \code{-x}. Setting \code{doReflect=TRUE} makes sense for mathematical strictness reasons, as the internal MC computes the himedian() which can differ slightly from the median. Note that \code{\link{mc}()}'s own default is true iff \code{length(x) <= 100}. } \item{\dots}{further arguments to \code{\link{mc}()}, see its help page.} } \value{ each a number (unless \code{\dots} contains \code{full.result = TRUE}). } \references{ Brys, G., Hubert, M. and Struyf, A. (2006). Robust measures of tail weight, \emph{Computational Statistics and Data Analysis} \bold{50(3)}, 733--759. and those in \sQuote{References} of \code{\link{mc}}. } \examples{ mc(1:5) # 0 for a symmetric sample lmc(1:5) # 0 rmc(1:5) # 0 x1 <- c(1, 2, 7, 9, 10) mc(x1) # = -1/3 c( lmc( x1), lmc( x1, doReflect=TRUE))# 0 -1/3 c( rmc( x1), rmc( x1, doReflect=TRUE))# -1/3 -1/6 c(-rmc(-x1), -rmc(-x1, doReflect=TRUE)) # 2/3 1/3 data(cushny) lmc(cushny) # 0.2 rmc(cushny) # 0.45 isSym_LRmc <- function(x, tol = 1e-14) all.equal(lmc(-x, doReflect=TRUE), rmc( x, doReflect=TRUE), tolerance = tol) sym <- c(-20, -5, -2:2, 5, 20) stopifnot(exprs = { lmc(sym) == 0.5 rmc(sym) == 0.5 isSym_LRmc(cushny) isSym_LRmc(x1) }) ## Susceptibility to large outliers: ## "Sensitivity Curve" := empirical influence function dX10 <- function(X) c(1:5,7,10,15,25, X) # generate skewed size-10 with 'X' x <- c(26:40, 45, 50, 60, 75, 100) (lmc10N <- vapply(x, function(X) lmc(dX10(X)), 1)) (rmc10N <- vapply(x, function(X) rmc(dX10(X)), 1)) cols <- adjustcolor(2:3, 3/4) plot(x, lmc10N, type="o", cex=1/2, main = "lmc & rmc( c(1:5,7,10,15,25, X) )", xlab=quote(X), log="x", col=cols[1]) lines(x, rmc10N, col=cols[2], lwd=3) legend("top", paste0(c("lmc", "rmc"), "(X)"), col=cols, lty=1, lwd=c(1,3), pch = c(1, NA), bty="n") n <- length(x) stopifnot(exprs = { all.equal(current = lmc10N, target = rep(0, n)) all.equal(current = rmc10N, target = c(3/19, 1/5, 5/21, 3/11, 7/23, rep(1/3, n-5))) ## and it stays stable with outlier X --> oo : lmc(dX10(1e300)) == 0 rmc(dX10(1e300)) == rmc10N[6] }) } \keyword{robust} \keyword{univar} robustbase/man/tolEllipsePlot.Rd0000644000176200001440000000425413710054521016443 0ustar liggesusers\name{tolEllipsePlot} \alias{tolEllipsePlot} \title{Tolerance Ellipse Plot} \description{ Plots the 0.975 tolerance ellipse of the bivariate data set \code{x}. The ellipse is defined by those data points whose distance is equal to the squareroot of the 0.975 chisquare quantile with 2 degrees of freedom. } \usage{ tolEllipsePlot(x, m.cov = covMcd(x), cutoff = NULL, id.n = NULL, classic = FALSE, tol = 1e-07, xlab = "", ylab = "", main = "Tolerance ellipse (97.5\%)", txt.leg = c("robust", "classical"), col.leg = c("red", "blue"), lty.leg = c("solid","dashed")) } \arguments{ \item{x}{a two dimensional matrix or data frame. } \item{m.cov}{an object similar to those of class \code{"mcd"}; however only its components \code{center} and \code{cov} will be used. If missing, the MCD will be computed (via \code{\link{covMcd}()}).} \item{cutoff}{numeric distance needed to flag data points outside the ellipse.} \item{id.n}{number of observations to be identified by a label. If not supplied, the number of observations with distance larger than \code{cutoff} is used.} \item{classic}{whether to plot the classical distances as well, \code{FALSE} by default.} \item{tol}{tolerance to be used for computing the inverse, see \code{\link{solve}}. Defaults to \code{1e-7}.} \item{xlab, ylab, main}{passed to \code{\link{plot.default}}.} \item{txt.leg, col.leg, lty.leg}{character vectors of length 2 for the legend, only used if \code{classic = TRUE}.} } \author{Peter Filzmoser, Valentin Todorov and Martin Maechler} %\details{} %\value{} %\references{ } \seealso{ \code{\link{covPlot}} which calls \code{tolEllipsePlot()} when desired. \code{\link[cluster]{ellipsoidhull}} and \code{\link[cluster]{predict.ellipsoid}} from package \CRANpkg{cluster}. } \examples{ data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) mcd <- covMcd(hbk.x) # compute mcd in advance ## must be a 2-dimensional data set: take the first two columns : tolEllipsePlot(hbk.x[,1:2]) ## an "impressive" example: data(telef) tolEllipsePlot(telef, classic=TRUE) } \keyword{hplot} \keyword{robust} robustbase/man/starsCYG.Rd0000644000176200001440000000304313312375575015177 0ustar liggesusers\name{starsCYG} \alias{starsCYG} \docType{data} \title{Hertzsprung-Russell Diagram Data of Star Cluster CYG OB1} \description{ Data for the Hertzsprung-Russell Diagram of the Star Cluster CYG OB1, which contains 47 stars in the direction of Cygnus, from C.Doom. The first variable is the logarithm of the effective temperature at the surface of the star (Te) and the second one is the logarithm of its light intencity (\eqn{L/L_0}). In the Hertzsprung-Russell diagram, which is the scatterplot of these data points, where the log temperature is plotted from left to right, two groups of points are seen:\cr the majority which tend to follow a steep band and four stars in the upper corner. In the astronomy the 43 stars are said to lie on the main sequence and the four remaining stars are called \dQuote{giants} (the points 11, 20, 30, 34). } \usage{data(starsCYG, package="robustbase")} \format{ A data frame with 47 observations on the following 2 variables \describe{ \item{\code{log.Te}}{Logarithm of the effective temperature at the surface of the star (Te).} \item{\code{log.light}}{Logarithm of its light intencity (\eqn{L/L_0})} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.27, table 3. } \examples{ data(starsCYG) plot(starsCYG) cst <- covMcd(starsCYG) lm.stars <- lm(log.light ~ log.Te, data = starsCYG) summary(lm.stars) plot(lm.stars) lts.stars <- ltsReg(log.light ~ log.Te, data = starsCYG) plot(lts.stars) } \keyword{datasets} robustbase/man/epilepsy.Rd0000644000176200001440000000516013312375575015334 0ustar liggesusers\name{epilepsy} \alias{epilepsy} \docType{data} \title{Epilepsy Attacks Data Set} \description{Data from a clinical trial of 59 patients with epilepsy (Breslow, 1996) in order to illustrate diagnostic techniques in Poisson regression. } \usage{data(epilepsy, package="robustbase")} \format{ A data frame with 59 observations on the following 11 variables. \describe{ \item{\code{ID}}{Patient identification number} \item{\code{Y1}}{Number of epilepsy attacks patients have during the first follow-up period} \item{\code{Y2}}{Number of epilepsy attacks patients have during the second follow-up period} \item{\code{Y3}}{Number of epilepsy attacks patients have during the third follow-up period} \item{\code{Y4}}{Number of epilepsy attacks patients have during the forth follow-up period} \item{\code{Base}}{Number of epileptic attacks recorded during 8 week period prior to randomization} \item{\code{Age}}{Age of the patients} \item{\code{Trt}}{a factor with levels \code{placebo} \code{progabide} indicating whether the anti-epilepsy drug Progabide has been applied or not} \item{\code{Ysum}}{Total number of epilepsy attacks patients have during the four follow-up periods } \item{\code{Age10}}{Age of the patients devided by 10} \item{\code{Base4}}{Variable \code{Base} devided by 4} } } \details{Thall and Vail reported data from a clinical trial of 59 patients with epilepsy, 31 of whom were randomized to receive the anti-epilepsy drug Progabide and 28 of whom received a placebo. Baseline data consisted of the patient's age and the number of epileptic seizures recorded during 8 week period prior to randomization. The response consisted of counts of seizures occuring during the four consecutive follow-up periods of two weeks each. } \source{ Thall, P.F. and Vail S.C. (1990) Some covariance models for longitudinal count data with overdispersion. \emph{Biometrics} \bold{46}, 657--671. } \references{ Diggle, P.J., Liang, K.Y., and Zeger, S.L. (1994) \emph{Analysis of Longitudinal Data}; Clarendon Press. Breslow N. E. (1996) Generalized linear models: Checking assumptions and strengthening conclusions. \emph{Statistica Applicata} \bold{8}, 23--41. } \examples{ data(epilepsy) str(epilepsy) pairs(epilepsy[,c("Ysum","Base4","Trt","Age10")]) Efit1 <- glm(Ysum ~ Age10 + Base4*Trt, family=poisson, data=epilepsy) summary(Efit1) ## Robust Fit : Efit2 <- glmrob(Ysum ~ Age10 + Base4*Trt, family=poisson, data=epilepsy, method = "Mqle", tcc=1.2, maxit=100) summary(Efit2) } \keyword{datasets} robustbase/man/lactic.Rd0000644000176200001440000000141313312375575014736 0ustar liggesusers\name{lactic} \alias{lactic} %% FIXME: shorter \title{Lactic Acid Concentration Measurement Data} \description{ Data on the Calibration of an Instrument that Measures Lactic Acid Concentration in Blood, from Afifi and Azen (1979) - comparing the true concentration X with the measured value Y. } \usage{data(lactic, package="robustbase")} \format{ A data frame with 20 observations on the following 2 variables. \describe{ \item{\code{X}}{True Concentration} \item{\code{Y}}{Instrument} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.62, table 10. } \examples{ data(lactic) summary(lm.lactic <- lm(Y ~., data=lactic)) %% summary(lts.lactic <- ltsReg(Y ~., data=lactic)) } \keyword{datasets} robustbase/man/wgt.himedian.Rd0000644000176200001440000000145113710054521016042 0ustar liggesusers\name{wgt.himedian} \alias{wgt.himedian} \title{Weighted Hi-Median} \description{ Compute the weighted Hi-Median of \code{x}. } \usage{ wgt.himedian(x, weights = rep(1, n)) } \arguments{ \item{x}{numeric vector} \item{weights}{numeric vector of weights; of the same length as \code{x}.} } % \value{ % a number % } %\author{Martin Maechler} \note{ this is rather a by-product of the code used in \code{\link{Sn}} and \code{\link{Qn}}. We currently plan to replace it with more general weighted quantiles. } \seealso{\code{\link{median}}; %%..... also \code{\link[Hmisc]{wtd.quantile}} from package \CRANpkg{Hmisc}. } \examples{ x <- c(1:6, 20) median(x) ## 4 stopifnot(all.equal(4, wgt.himedian(x)), all.equal(6, wgt.himedian(x, c(rep(1,6), 5)))) } \keyword{univar} \keyword{robust} robustbase/man/predict.lmrob.Rd0000644000176200001440000001207712471655756016262 0ustar liggesusers\name{predict.lmrob} \alias{predict.lmrob} \title{Predict method for Robust Linear Model ("lmrob") Fits} \description{ Predicted values based on robust linear model object. } \usage{ \method{predict}{lmrob}(object, newdata, se.fit = FALSE, scale = NULL, df = NULL, interval = c("none", "confidence", "prediction"), level = 0.95, type = c("response", "terms"), terms = NULL, na.action = na.pass, pred.var = res.var/weights, weights = 1, ...) } \arguments{ %% the following is +- copy-pasted from predict.lm.Rd: \item{object}{object of class inheriting from \code{"lmrob"}} \item{newdata}{an optional data frame in which to look for variables with which to predict. If omitted, the fitted values are used.} \item{se.fit}{a switch indicating if standard errors are required.} \item{scale}{scale parameter for std.err. calculation} \item{df}{degrees of freedom for scale} \item{interval}{type of interval calculation.} \item{level}{tolerance/confidence level} \item{type}{Type of prediction (response or model term).} \item{terms}{if \code{type="terms"}, which terms (default is all terms)} \item{na.action}{function determining what should be done with missing values in \code{newdata}. The default is to predict \code{NA}.} \item{pred.var}{the variance(s) for future observations to be assumed for prediction intervals. See \sQuote{Details}.} \item{weights}{variance weights for prediction. This can be a numeric vector or a one-sided model formula. In the latter case, it is interpreted as an expression evaluated in \code{newdata}} \item{\dots}{further arguments passed to or from other methods.} } \details{ Note that this \code{lmrob} method for \code{\link{predict}} is closely modeled after the method for \code{lm()}, \code{\link{predict.lm}}, maybe see there for caveats with missing value treatment. %% Also lifted from predict.lm.Rd : The prediction intervals are for a single observation at each case in \code{newdata} (or by default, the data used for the fit) with error variance(s) \code{pred.var}. This can be a multiple of \code{res.var}, the estimated value of \eqn{\sigma^2}: the default is to assume that future observations have the same error variance as those used for fitting. If \code{weights} is supplied, the inverse of this is used as a scale factor. For a weighted fit, if the prediction is for the original data frame, \code{weights} defaults to the weights used for the model fit, with a warning since it might not be the intended result. If the fit was weighted and \code{newdata} is given, the default is to assume constant prediction variance, with a warning. } \value{ %% the following is +- copy-pasted from predict.lm.Rd: \code{predict.lmrob} produces a vector of predictions or a matrix of predictions and bounds with column names \code{fit}, \code{lwr}, and \code{upr} if \code{interval} is set. If \code{se.fit} is \code{TRUE}, a list with the following components is returned: \item{fit}{vector or matrix as above} \item{se.fit}{standard error of predicted means} \item{residual.scale}{residual standard deviations} \item{df}{degrees of freedom for residual} } % \references{ % } \author{Andreas Ruckstuhl} \seealso{ \code{\link{lmrob}} and the (non-robust) traditional \code{\link{predict.lm}} method. } \examples{ ## Predictions --- artificial example -- closely following example(predict.lm) set.seed(5) n <- length(x <- sort(c(round(rnorm(25), 1), 20))) y <- x + rnorm(n) iO <- c(sample(n-1, 3), n) y[iO] <- y[iO] + 10*rcauchy(iO) p.ex <- function(...) { plot(y ~ x, ...); abline(0,1, col="sky blue") points(y ~ x, subset=iO, col="red", pch=2) abline(lm (y ~ x), col = "gray40") abline(lmrob(y ~ x), col = "forest green") legend("topleft", c("true", "Least Squares", "robust"), col = c("sky blue", "gray40", "forest green"), lwd=1.5, bty="n") } p.ex() fm <- lmrob(y ~ x) predict(fm) new <- data.frame(x = seq(-3, 10, 0.25)) str(predict(fm, new, se.fit = TRUE)) pred.w.plim <- predict(fm, new, interval = "prediction") pred.w.clim <- predict(fm, new, interval = "confidence") pmat <- cbind(pred.w.clim, pred.w.plim[,-1]) matlines(new$x, pmat, lty = c(1,2,2,3,3))# add to first plot ## show zoom-in region : rect(xleft = -3, ybottom = -20, xright = 10, ytop = 40, lty = 3, border="orange4") ## now zoom in : p.ex(xlim = c(-3,10), ylim = c(-20, 40)) matlines(new$x, pmat, lty = c(1,2,2,3,3)) box(lty = 3, col="orange4", lwd=3) legend("bottom", c("fit", "lwr CI", "upr CI", "lwr Pred.I", "upr Pred.I"), col = 1:5, lty=c(1,2,2,3,3), bty="n") ## Prediction intervals, special cases ## The first three of these throw warnings w <- 1 + x^2 fit <- lmrob(y ~ x) wfit <- lmrob(y ~ x, weights = w) predict(fit, interval = "prediction") predict(wfit, interval = "prediction") predict(wfit, new, interval = "prediction") predict(wfit, new, interval = "prediction", weights = (new$x)^2) -> p.w2 p.w2 stopifnot(identical(p.w2, ## the same as using formula: predict(wfit, new, interval = "prediction", weights = ~x^2))) } \keyword{robust} \keyword{regression} robustbase/man/psi_func-class.Rd0000644000176200001440000000503113310673404016376 0ustar liggesusers\name{psi_func-class} \docType{class} \alias{psi_func-class} \title{Class of "Psi Functions" for M-Estimation} %% 'psi_func' cannot be used in title! \description{ The class \code{"psi_func"} is used to store \eqn{\psi \ (psi)}{psi} functions for M-estimation. In particular, an object of the class contains \eqn{\rho(x) \ (\code{rho})}{rho(x)}, its derivative \eqn{\psi(x) \ (psi)}{psi(x)}, the weight function \eqn{\psi(x)/x}, and first derivative of \eqn{\psi}, \code{Dpsi = } \eqn{\psi'(x)}. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("psi_func", ...)}, but preferably by \code{\link{psiFunc}(...)}. } \section{Slots}{ \describe{ \item{\code{rho}:}{the \eqn{\rho()}{rho()} function, an object of class \code{"functionX"}. This is used to formulate the objective function; \eqn{\rho()} can be regarded as generalized negative log-likelihood.} \item{\code{psi}:}{\eqn{\psi()}{psi()} is the derivative of \eqn{\rho}, \eqn{\psi(x) = \frac{d}{dx} \rho(x)}{psi(x) = d/dx rho(x)}; also of class \code{"functionX"}.} \item{\code{wgt}:}{The weight function \eqn{\psi(x)/x}, of class \code{"functionX"}.} \item{\code{Dpsi}:}{the derivative of \eqn{\psi}, \eqn{Dpsi(x) = psi'(x)}; of class \code{"functionX"}.} \item{\code{Dwgt}:}{the derivative of the weight function, of class \code{"functionX"}, is generated automatically if \code{\link{psiFunc}} constructor is used.} \item{\code{tDefs}:}{\emph{named} numeric vector of \bold{t}uning parameter \bold{Def}ault values.} %%% FIXME : Replace these by *methods* \item{\code{Erho}:}{A function of class \code{"functionXal"} for computing \eqn{E[\rho(X)]} when \eqn{X} is standard normal \eqn{\mathcal{N}(0,1)}{N(0,1)}.} \item{\code{Epsi2}:}{A function of class \code{"functionXal"} for computing \eqn{E[\psi^2(X)]} when \eqn{X} is standard normal.} \item{\code{EDpsi}:}{A function of class \code{"functionXal"} for computing \eqn{E[\psi'(X)]} when \eqn{X} is standard normal.} \item{\code{name}:}{Name of \eqn{\psi}{psi}-function used for printing.} \item{\code{xtras}:}{Potentially further information.} } } \section{Methods}{ Currently, only \code{\link{chgDefaults}()}, \code{\link[=plot-methods]{plot}()} and \code{show()}. } \author{Martin Maechler} \seealso{ \code{\link{psiFunc}}. } \examples{ str(huberPsi, give.attr = FALSE) plot(hampelPsi)# calling the plot method (nicely showing "all" !) } \keyword{classes} \keyword{robust} robustbase/man/lmrob.lar.Rd0000644000176200001440000000377213710236747015400 0ustar liggesusers\name{lmrob.lar} \alias{lmrob.lar} \title{Least Absolute Residuals / L1 Regression} \description{ To compute least absolute residuals (LAR) or \dQuote{L1} regression, \code{lmrob.lar} implements the routine L1 in Barrodale and Roberts (1974), which is based on the simplex method of linear programming. It is a copy of \code{lmRob.lar} (in early 2012) from the \CRANpkg{robust} package. } \usage{ lmrob.lar(x, y, control, \dots) } \arguments{ \item{x}{numeric matrix for the predictors.} \item{y}{numeric vector for the response.} \item{control}{\code{\link{list}} as returned by \code{\link{lmrob.control}()} .} \item{\dots}{(unused but needed when called as \code{init(x,y,ctrl, mf)} from \code{\link{lmrob}()})} } \details{ This method is used for computing the M-S estimate and typically not to be used on its own. A description of the Fortran subroutines used can be found in Marazzi (1993). In the book, the main method is named \code{RILARS}. } \value{ A list that includes the following components: \item{coef }{The L1-estimate of the coefficient vector} \item{scale }{The residual scale estimate (mad)} \item{resid }{The residuals} \item{iter }{The number of iterations required by the simplex algorithm} \item{status }{Return status (0: optimal, but non unique solution, 1: optimal unique solution)} \item{converged }{Convergence status (always \code{TRUE}), needed for \code{\link{lmrob.fit}}.} } \references{ Marazzi, A. (1993). \emph{Algorithms, routines, and S functions for robust statistics}. Wadsworth & Brooks/Cole, Pacific Grove, CA. } \author{ Manuel Koller } \seealso{ \code{\link[quantreg]{rq}} from CRAN package \CRANpkg{quantreg}. } \examples{ data(stackloss) X <- model.matrix(stack.loss ~ . , data = stackloss) y <- stack.loss (fm.L1 <- lmrob.lar(X, y)) with(fm.L1, stopifnot(converged , status == 1L , all.equal(scale, 1.5291576438) , sum(abs(residuals) < 1e-15) == 4 # p=4 exactly fitted obs. )) } \keyword{ L1 } \keyword{ regression } robustbase/man/carrots.Rd0000644000176200001440000000342113312375575015155 0ustar liggesusers\name{carrots} \alias{carrots} \docType{data} \title{Insect Damages on Carrots} \description{ The damage carrots data set from Phelps (1982) was used by McCullagh and Nelder (1989) in order to illustrate diagnostic techniques because of the presence of an outlier. In a soil experiment trial with three blocks, eight levels of insecticide were applied and the carrots were tested for insect damage. } \usage{data(carrots, package="robustbase")} \format{ A data frame with 24 observations on the following 4 variables. \describe{ \item{success}{ integer giving the number of carrots with insect damage.} \item{total}{ integer giving the total number of carrots per experimental unit.} \item{logdose}{a numeric vector giving log(dose) values (eight different levels only).} \item{block}{factor with levels \code{B1} to \code{B3}} } } \source{ Phelps, K. (1982). Use of the complementary log-log function to describe doseresponse relationships in insecticide evaluation field trials. \cr In R. Gilchrist (Ed.), \emph{Lecture Notes in Statistics, No. 14. GLIM.82: Proceedings of the International Conference on Generalized Linear Models}; Springer-Verlag. } \references{ McCullagh P. and Nelder, J. A. (1989) \emph{Generalized Linear Models.} London: Chapman and Hall. Eva Cantoni and Elvezio Ronchetti (2001); JASA, and \cr Eva Cantoni (2004); JSS, see \code{\link{glmrob}} } \examples{ data(carrots) str(carrots) plot(success/total ~ logdose, data = carrots, col = as.integer(block)) coplot(success/total ~ logdose | block, data = carrots) ## Classical glm Cfit0 <- glm(cbind(success, total-success) ~ logdose + block, data=carrots, family=binomial) summary(Cfit0) ## Robust Fit (see help(glmrob)) .... } \keyword{datasets} robustbase/man/psiFindc.Rd0000644000176200001440000001247213177452122015236 0ustar liggesusers\name{psi.findc} \title{Find Tuning Constant(s) for "lqq" and "ggw" Psi Functions} \alias{.psi.ggw.findc}% --> ../R/lmrob.MM.R \alias{.psi.lqq.findc} \alias{.psi.const} \description{ Find psi function tuning constant sets for \code{"LQQ"} and \code{"GGW"} psi (\eqn{\psi}) functions by specifying largest descent (minimal slope), efficiency and or breakdown point. \code{.psi.const()} is called from \code{\link{lmrob.control}()} to set the tuning constants for psi and chi for \code{"LQQ"} and \code{"GGW"} psi. Unless the specified tuning constants are from fixed small set where the computations are stored precomputed, \code{.psi.const()} calls the corresponding \code{.psi..findc()}. } \usage{ .psi.ggw.findc(ms, b, eff = NA, bp = NA, subdivisions = 100L, rel.tol = .Machine$double.eps^0.25, abs.tol = rel.tol, tol = .Machine$double.eps^0.25, ms.tol = tol/64, maxiter = 1000) .psi.lqq.findc(ms, b.c, eff = NA, bp = NA, interval = c(0.1, 4), subdivisions = 100L, rel.tol = .Machine$double.eps^0.25, abs.tol = rel.tol, tol = .Machine$double.eps^0.25, maxiter = 1000) .psi.const(cc, psi) } \arguments{ \item{ms}{number, the minimal slope, typically negative.} \item{b, b.c}{number, specifying \eqn{b} or \eqn{b/c} for \code{"ggw"} or \code{"lqq"} respectively.} \item{eff}{a number (or \code{\link{NA}}), the desired \emph{efficiency}, in \eqn{[0,1]} of the estimator. If \code{NA}, \code{bp} must be specified as valid number.} \item{bp}{a number (or \code{\link{NA}}), the desired \emph{\bold{b}reakdown \bold{p}oint} of the estimator, in \eqn{[0,1]}.} \item{interval}{for finding \eqn{c} via \code{\link{uniroot}()}.} \item{subdivisions}{passed to \code{\link{integrate}()}.} \item{rel.tol, abs.tol}{relative and absolute tolerance for \code{\link{integrate}()}.} \item{tol}{relative tolerance for \code{\link{uniroot}()}.} \item{ms.tol}{relative tolerance for the internal \code{.psi.ggw.finda()}, eventually passed to \code{\link{optimize}} inside (internal) \code{.psi.ggw.mxs()}.} \item{maxiter}{maximal number of iterations for \code{\link{uniroot}()}.} %% for .psi.const() : \item{cc}{(for \code{.psi.const()}:) numeric vector of length 4, containing all constants \code{c(ms, b*, eff, bp)}, where \code{b* = b} for \code{"ggw"} and \code{b* = b.c} for \code{"lqq"}, and one of \code{(eff, bp)} is \code{NA}.} \item{psi}{a string, either \code{"ggw"} or \code{"lqq"}.} } \details{ For some important special cases, the result of \code{.psi.*.findc()} are stored precomputed for efficiency reasons. These cases are (the defaults for \code{tuning.chi} and \code{tuning.psi} respectively in \code{\link{lmrob.control}()}s result, \tabular{ll}{ \code{tuning.chi} \tab\code{tuning.psi} \cr \code{c(-0.5, 1.5, NA, 0.5)} \tab\code{c(-0.5, 1.5, 0.95, NA)} } and for \code{"ggw"} additionally, these four cases: \tabular{ll}{ \code{tuning.chi} \tab\code{tuning.psi} \cr {} \tab\code{c(-0.5, 1.5, 0.85, NA)} \cr \code{c(-0.5, 1, NA, 0.5)} \tab\code{c(-0.5, 1, 0.95, NA)} \cr {} \tab\code{c(-0.5, 1, 0.85, NA)} } %tab Note that for \code{"ggw"}, exactly these \eqn{2+4 = 6} cases also allow fast \eqn{\rho} and \eqn{\chi} (aka \eqn{\tilde\rho(\cdot)}{rho~(.)}, see \code{\link{Mchi}}), function evaluations. For all other tuning constant settings, rho() evaluations are based on numerical integration via \R's own \code{Rdqags()} C function (part of R's official API). } \value{ a \code{\link{numeric}} vector of constants, for \code{"lqq"} or \code{"ggw"} psi functions, respectively: \describe{ \item{\code{"lqq"}:}{\eqn{(b, c, s) = (b/c * c, c, s = 1 - min_slope)},} \item{\code{"ggw"}:}{\eqn{(0, a, b, c, \rho(\infty))}{% (0, a, b, c, rho(Inf))}.} } \code{.psi.const(cc, psi)} returns the argument \code{cc} with the above constant vectors as attribute \code{"constants"}, in the case of \code{psi = "lqq"} in all cases (since \pkg{robustbase} version >= 0.93), for \code{psi = "ggw"} only in the non-standard cases. } \references{ See the vignette about %% ../vignettes/psi_functions.Rnw : \dQuote{\eqn{\psi}{psi}-Functions Available in Robustbase}. } \seealso{\code{\link{Mpsi}()} etc for the psi function definitions; \code{\link{.Mpsi.tuning.defaults}}, etc, for tuning constants' defaults for \code{\link{lmrob}()}. } \author{Manuel Koller (original) and Martin Maechler (arguments, export, docs). } \examples{ (c.ge95 <- .psi.ggw.findc(ms = -0.5, b = 1.5, eff = 0.95)) (c.ge90 <- .psi.ggw.findc(ms = -0.5, b = 1.5, eff = 0.90)) (c.gb50 <- .psi.ggw.findc(ms = -0.5, b = 1.5, bp = 0.50)) stopifnot(all.equal(c.ge95, c(0, 1.386362, 1.5, 1.0628199, 4.7773893), tol = 1e-5), all.equal(c.ge90, c(0, 1.0282811, 1.5, 0.87086259, 3.2075233), tol = 1e-5), all.equal(c.gb50, c(0, 0.20367394, 1.5, 0.29591308, 0.37033962),tol = 1e-5)) (cl.e.95 <- .psi.lqq.findc(ms = -0.5, b.c = 1.5, eff = .95)) (cl.b.50 <- .psi.lqq.findc(ms = -0.5, b.c = 1.5, bp = .50)) stopifnot(all.equal(cl.e.95, c(1.4734061, 0.98227073, 1.5), tol = 1e-5), all.equal(cl.b.50, c(0.40154568, 0.26769712, 1.5), tol = 1e-5)) } \keyword{utilities} robustbase/man/nlrob.control.Rd0000644000176200001440000000434614511464163016274 0ustar liggesusers\name{nlrob.control} \alias{nlrob.control} \title{Control Nonlinear Robust Regression Algorithms} \description{ Allow the user to specify details for the different nonlinear robust regression algorithms in \code{\link{nlrob}}. } \usage{ nlrob.control(method, psi = c("bisquare", "lqq", "welsh", "optimal", "hampel", "ggw"), init = c("S", "lts"), optimizer = "JDEoptim", optArgs = list(), ...) } \arguments{ \item{method}{\code{\link{character}} string specifying the method} \item{psi}{string specifying the psi-function which defines the estimator.} \item{init}{for some methods, currently, \code{"MM"} only, a string specifying the initial estimator. } \item{optimizer}{currently only \code{"JDEoptim"} from package \CRANpkg{DEoptimR}.} \item{optArgs}{ a \code{\link{list}} of optional arguments to the optimizer. Currently, that is \code{\link[DEoptimR]{JDEoptim}} from package \CRANpkg{DEoptimR}. } \item{\dots}{optional arguments depending on \code{method}, such as \code{fnscale}, \code{tuning.chi} or both \code{tuning.chi.tau} and \code{tuning.chi.scale}; for \code{method = "MM"} also \code{optim.control} to be passed to the \code{\link{optim}(.., hessian=TRUE)} call. %% Internally, \code{nlrob.control()} will choose (or check) defaults for the psi/rho/chi related tuning parameters, also depending on the \code{method} chosen; see e.g., the \sQuote{Examples}. } } %% Code in >>> ../R/nlregrob.R <<< %% \value{ a \code{\link{list}} with several named components. The contents depend quite a bit on the \code{method}. } %\author{Martin Maechler} \seealso{ \code{\link{nlrob}}; for some details, \code{\link{nlrob.algorithms}}. } \examples{ ## Show how the different 'method's have different smart defaults : str(nlrob.control("MM")) str(nlrob.control("MM", psi = "hampel"))# -> other tuning.psi.M and tuning.chi.scale defaults str(nlrob.control("MM", psi = "lqq", tol = 1e-10))# other tuning.psi.M & tuning.chi.scale defaults str(nlrob.control("tau")) str(nlrob.control("tau",psi= "lqq")) str(nlrob.control("CM")) # tuning.chi undefined, unneeded str(nlrob.control("CM", psi= "optimal")) str(nlrob.control("mtl")) } \keyword{utilities} robustbase/man/weights.lmrob.Rd0000644000176200001440000000240012343540277016254 0ustar liggesusers\name{weights.lmrob} \title{Extract Robustness and Model Weights} \alias{weights.lmrob} \alias{weights.glmrob} \description{ \code{weights()} extracts robustness weights or fitting (or prior) weights from a \code{lmrob} or \code{glmrob} object. } \usage{ \method{weights}{lmrob}(object, type = c("prior", "robustness"), ...) } \arguments{ \item{object}{ an object of class \code{"lmrob"} or \code{"glmrob"}, typically the result of a call to \code{\link{lmrob}}, or \code{\link{glmrob}}, respectively.} \item{type}{the type of weights to be returned. Either \code{"prior"} (default), or \code{"robustness"}.} \item{\dots}{not used currently.} } \details{ The \dQuote{prior weights} correspond to the weights specified using the \dQuote{weights} argument when calling \code{lmrob}. The \dQuote{robustness weights} are the weights assigned by the M-estimator of regression, \eqn{\psi(r_i/S) / (r_i/S)}. The robust coefficient estimate then numericarlly corresponds to a weighted least squares fit using the product of both types of weights as weights. } \value{ Weights extracted from the object \code{object}. } \author{Manuel Koller and Martin Maechler.} \seealso{ \code{\link{lmrob}}, \code{\link{glmrob}} and \code{\link{weights}} } robustbase/man/pulpfiber.Rd0000644000176200001440000000446513312375575015501 0ustar liggesusers\name{pulpfiber} \alias{pulpfiber} \encoding{utf-8} \docType{data} \title{Pulp Fiber and Paper Data} \description{ Measurements of aspects pulp fibers and the paper produced from them. Four properties of each are measured in sixty-two samples. } \usage{data(pulpfiber, package="robustbase")} \format{ A data frame with 62 observations on the following 8 variables. \describe{ \item{\code{X1}}{numeric vector of arithmetic fiber length} \item{\code{X2}}{numeric vector of long fiber fraction} \item{\code{X3}}{numeric vector of fine fiber fraction} \item{\code{X4}}{numeric vector of zero span tensile} % \item{\ }{\ }% ------------- white space -- \item{\code{Y1}}{numeric vector of breaking length} \item{\code{Y2}}{numeric vector of elastic modulus} \item{\code{Y3}}{numeric vector of stress at failure} \item{\code{Y4}}{numeric vector of burst strength} } } \details{ Cited from the reference article: \emph{The dataset contains measurements of properties of pulp fibers and the paper made from them. The aim is to investigate relations between pulp fiber properties and the resulting paper properties. The dataset contains \eqn{n = 62} measurements of the following four pulp fiber characteristics: arithmetic fiber length, long fiber fraction, fine fiber fraction, and zero span tensile. The four paper properties that have been measured are breaking length, elastic modulus, stress at failure, and burst strength.} The goal is to predict the \eqn{q = 4} paper properties from the \eqn{p = 4} fiber characteristics. } \source{ Rousseeuw, P. J., Van Aelst, S., Van Driessen, K., and Agulló, J. (2004) Robust multivariate regression; \emph{Technometrics} \bold{46}, 293--305. Till 2016 available from \code{http://users.ugent.be/~svaelst/data/pulpfiber.txt} } \author{port to \R and this help page: Martin Maechler } \references{ Lee, J. (1992) \emph{Relationships Between Properties of Pulp-Fibre and Paper}, unpublished doctoral thesis, U. Toronto, Faculty of Forestry. } \examples{ data(pulpfiber) str(pulpfiber) pairs(pulpfiber, gap=.1) ## 2 blocks of 4 .. c1 <- cov(pulpfiber) cR <- covMcd(pulpfiber) ## how different are they: The robust estimate has more clear high correlations: symnum(cov2cor(c1)) symnum(cov2cor(cR$cov)) } \keyword{datasets} robustbase/man/summarizeRobWeights.Rd0000644000176200001440000000317012137052541017500 0ustar liggesusers\name{summarizeRobWeights} \alias{summarizeRobWeights} \title{Print a Nice "summary" of Robustness Weights} \description{ Print a nice \dQuote{summary} about a numeric vector of robustness weights. Observations with weights around zero are marked as outliers. } \usage{ summarizeRobWeights(w, digits = getOption("digits"), header = "Robustness weights:", eps = 0.1 / length(w), eps1 = 1e-3, \dots) } \arguments{ \item{w}{numeric vector of robustness weigths.} \item{digits}{digits to be used for \code{\link{print}}ing.} \item{header}{string to be printed as header line.} \item{eps}{numeric tolerance \eqn{\epsilon}{eps}: values of \code{w} with \eqn{\left|w_i\right| < \epsilon/n}{|w[i]| < eps/n} are said to be outliers.} \item{eps1}{numeric tolerance: values of \code{w} with \eqn{\left|1 - w_i\right| < eps1}{|1 - w[i]| < eps1} are said to have weight \sQuote{\code{~= 1}}.} \item{\dots}{potential further arguments, passed to \code{\link{print}()}.} } \seealso{ The \code{\link{summary}} methods for \code{\link{lmrob}} and \code{\link{glmrob}} make use of \code{summarizeRobWeights()}. Our methods for \code{\link{weights}()}, \code{\link{weights.lmrob}(*, type="robustness")} and \code{\link{weights.glmrob}(*, type="robustness")}. } \value{ none; the function is used for its side effect of printing. } \author{Martin Maechler} \examples{ w <- c(1,1,1,1,0,1,1,1,1,0,1,1,.9999,.99999, .5,.6,1e-12) summarizeRobWeights(w) # two outside ~= {0,1} summarizeRobWeights(w, eps1 = 5e-5)# now three outside {0,1} ## See the summary() outputs } \keyword{utilities} robustbase/man/bushfire.Rd0000644000176200001440000000126613312375575015314 0ustar liggesusers\name{bushfire} \alias{bushfire} \docType{data} \title{ Campbell Bushfire Data } \description{ This data set was used by Campbell (1984) to locate bushfire scars. The dataset contains satelite measurements on five frequency bands, corresponding to each of 38 pixels. } \usage{data(bushfire, package="robustbase")} \format{ A data frame with 38 observations on 5 variables. % \describe{} } %\Note{} \source{ Maronna, R.A. and Yohai, V.J. (1995) The Behavoiur of the Stahel-Donoho Robust Multivariate Estimator. \emph{Journal of the American Statistical Association} \bold{90}, 330--341. } %\seealso{} \examples{ data(bushfire) plot(bushfire) covMcd(bushfire) } \keyword{datasets} robustbase/man/SiegelsEx.Rd0000644000176200001440000000175413312375575015377 0ustar liggesusers\name{SiegelsEx} \alias{SiegelsEx} \docType{data} \title{Siegel's Exact Fit Example Data} \description{ A small counterexample data set devised by Andrew Siegel. Six (out of nine) data points lie on the line \eqn{y = 0} such that some robust regression estimators exhibit the \dQuote{\emph{exact fit}} property. } \usage{data(SiegelsEx, package="robustbase")} \format{ A data frame with 9 observations on the following 2 variables. \describe{ \item{\code{x}}{a numeric vector} \item{\code{y}}{a numeric vector} } } \source{ Emerson and Hoaglin (1983, p.139) } \references{ Peter J. Rousseeuw and Annick M. Leroy (1987) \emph{Robust Regression and Outlier Detection} Wiley, p.60--61 } \examples{ data(SiegelsEx) plot(SiegelsEx, main = "Siegel's example for 'exact fit'") abline( lm(y ~ x, data = SiegelsEx)) abline(MASS::lqs(y ~ x, data = SiegelsEx, method = "lms"), col = 2) legend("topright", leg = c("lm", "LMS"), col=1:2, lwd=1, inset = 1/20) } \keyword{datasets} robustbase/man/fullRank.Rd0000644000176200001440000000432313170062205015241 0ustar liggesusers\name{fullRank} \alias{fullRank} \title{Remove Columns (or Rows) From a Matrix to Make It Full Rank} \description{ From the QR decomposition with pivoting, (\code{\link{qr}(x, tol)} if \eqn{n \ge p}), if the matrix is not of full rank, the corresponding columns (\eqn{n \ge p}{n >= p}) or rows (\eqn{n < p}) are omitted to form a full rank matrix. } \usage{% -> ../R/adjoutlyingness.R fullRank(x, tol = 1e-7, qrx = qr(x, tol=tol)) } \arguments{ \item{x}{a numeric matrix of dimension \eqn{n \times p}{n * p}, or a similar object for which \code{\link{qr}()} works.} \item{tol}{tolerance for determining rank (deficiency). Currently is simply passed to \code{\link{qr}}.} \item{qrx}{optionally may be used to pass a \code{\link{qr}(x, ..)}; only used when \code{p <= n}.} } \value{ a version of the matrix \code{x}, with less columns or rows if \code{x}'s rank was smaller than \code{min(n,p)}. If \code{x} is of full rank, it is returned unchanged. } \author{Martin Maechler} \note{ This is useful for robustness algorithms that rely on \eqn{X} matrices of full rank, e.g., \code{\link{adjOutlyingness}}. This also works for numeric data frames and whenever \code{qr()} works correctly. } \seealso{ \code{\link{qr}}; for more sophisticated rank determination, \code{\link[Matrix]{rankMatrix}} from package \CRANpkg{Matrix}. } \examples{ stopifnot(identical(fullRank(wood), wood)) ## More sophisticated and delicate dim(T <- tcrossprod(data.matrix(toxicity))) # 38 x 38 dim(T. <- fullRank(T)) # 38 x 10 if(requireNamespace("Matrix")) { rMmeths <- eval(formals(Matrix::rankMatrix)$method) rT. <- sapply(rMmeths, function(.m.) Matrix::rankMatrix(T., method = .m.)) print(rT.) # "qr" (= "qrLinpack"): 13, others rather 10 } dim(T.2 <- fullRank(T, tol = 1e-15))# 38 x 18 dim(T.3 <- fullRank(T, tol = 1e-12))# 38 x 13 dim(T.3 <- fullRank(T, tol = 1e-10))# 38 x 13 dim(T.3 <- fullRank(T, tol = 1e-8 ))# 38 x 12 dim(T.) # default from above 38 x 10 dim(T.3 <- fullRank(T, tol = 1e-5 ))# 38 x 10 -- still plot(svd(T, 0,0)$d, log="y", main = "singular values of T", yaxt="n") axis(2, at=10^(-14:5), las=1) ## pretty clearly indicates that rank 10 is "correct" here. } \keyword{algebra} \keyword{array} robustbase/man/milk.Rd0000644000176200001440000000421213312375575014433 0ustar liggesusers\name{milk} \alias{milk} \docType{data} \title{Daudin's Milk Composition Data} \description{ Daudin et al.(1988) give 8 readings on the composition of 86 containers of milk. They speak about 85 observations, but this can be explained with the fact that observations 63 and 64 are identical (as noted by Rocke (1996)). The data set was used for analysing the stability of principal component analysis by the bootstrap method. In the same context, but using high breakdown point robust PCA, these data were analysed by Todorov et al. (1994). Atkinson (1994) used these data for ilustration of the forward search algorithm for identifying of multiple outliers. } \usage{data(milk, package="robustbase")} \format{ A data frame with 86 observations on the following 8 variables, all but the first measure units in \emph{grams / liter}. \describe{ \item{\code{X1}}{density} \item{\code{X2}}{fat content} \item{\code{X3}}{protein content} \item{\code{X4}}{casein content} \item{\code{X5}}{cheese dry substance measured in the factory} \item{\code{X6}}{cheese dry substance measured in the laboratory} \item{\code{X7}}{milk dry substance} \item{\code{X8}}{cheese product} } } \source{ Daudin, J.J. Duby, C. and Trecourt, P. (1988) Stability of Principal Component Analysis Studied by the Bootstrap Method; \emph{Statistics} \bold{19}, 241--258. } \references{ Todorov, V., Neyko, N., Neytchev, P. (1994) Stability of High Breakdown Point Robust PCA, in \emph{Short Communications, COMPSTAT'94}; Physica Verlag, Heidelberg. Atkinson, A.C. (1994) Fast Very Robust Methods for the Detection of Multiple Outliers. \emph{J. Amer. Statist. Assoc.} \bold{89} 1329--1339. Rocke, D. M. and Woodruff, D. L. (1996) Identification of Outliers in Multivariate Data; \emph{J. Amer. Statist. Assoc.} \bold{91} (435), 1047--1061. } \examples{ data(milk) (c.milk <- covMcd(milk)) summarizeRobWeights(c.milk $ mcd.wt)# 19..20 outliers umilk <- unique(milk) # dropping obs.64 (== obs.63) summary(cumilk <- covMcd(umilk, nsamp = "deterministic")) # 20 outliers %%not yet ## the best 'crit' we've seen was } \keyword{datasets} robustbase/man/condroz.Rd0000644000176200001440000000245713312375575015166 0ustar liggesusers\name{condroz} \alias{condroz} \encoding{utf8} \docType{data} \title{ Condroz Data } \description{ Dataset with pH-value and Calcium content in soil samples, collected in different communities of the Condroz region in Belgium. The data pertain to a subset of 428 samples with a pH-value between 7.0 and 7.5. } \usage{data(condroz, package="robustbase")} \format{ A data frame with 428 observations on the following 2 variables. \describe{ \item{\code{Ca}}{Calcium content of the soil sample} \item{\code{pH}}{pH value of the soil sample} } } \details{ For more information on the dataset, cf. Goegebeur et al. (2005). } \source{ Hubert and Vandervieren (2006), p. 10. This dataset is also studied in Vandewalle et al. (2004). } \references{ See also those for \code{\link{adjbox}}. Goegebeur, Y., Planchon, V., Beirlant, J., Oger, R. (2005). Quality Assesment of Pedochemical Data Using Extreme Value Methodology, Journal of Applied Science, 5, p. 1092-1102. Vandewalle, B., Beirlant, J., Hubert, M. (2004). A robust estimator of the tail index based on an exponential regression model, in Hubert, M., Pison G., Struyf, A. and S. Van Aelst, ed., Theory and Applications of Recent Robust Methods, Birkhäuser, Basel, p. 367-376. } \examples{ adjbox(condroz$Ca) } \keyword{datasets} robustbase/man/hbk.Rd0000644000176200001440000000254013710054521014230 0ustar liggesusers\name{hbk} \alias{hbk} \docType{data} \title{Hawkins, Bradu, Kass's Artificial Data} \description{ Artificial Data Set generated by Hawkins, Bradu, and Kass (1984). The data set consists of 75 observations in four dimensions (one response and three explanatory variables). It provides a good example of the masking effect. The first 14 observations are outliers, created in two groups: 1--10 and 11--14. Only observations 12, 13 and 14 appear as outliers when using classical methods, but can be easily unmasked using robust distances computed by, e.g., MCD - covMcd(). } \usage{data(hbk, package="robustbase")} \format{ A data frame with 75 observations on 4 variables, where the last variable is the dependent one. \describe{ \item{X1}{x[,1]} \item{X2}{x[,2]} \item{X3}{x[,3]} \item{Y}{y} } } \note{ This data set is also available in package \CRANpkg{wle} as \code{artificial}. } \source{ Hawkins, D.M., Bradu, D., and Kass, G.V. (1984) Location of several outliers in multiple regression data using elemental sets. \emph{Technometrics} \bold{26}, 197--208. P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.94. } \examples{ data(hbk) plot(hbk) summary(lm.hbk <- lm(Y ~ ., data = hbk)) hbk.x <- data.matrix(hbk[, 1:3]) (cHBK <- covMcd(hbk.x)) } \keyword{datasets} robustbase/man/ltsPlot.Rd0000644000176200001440000001022213434014060015116 0ustar liggesusers\name{plot.lts} \alias{plot.lts} \alias{ltsPlot} \title{Robust LTS Regression Diagnostic Plots} \description{ Four plots (selectable by \code{which}) are currently provided: \enumerate{ \item a plot of the standardized residuals versus their index, \item a plot of the standardized residuals versus fitted values, \item a Normal Q-Q plot of the standardized residuals, and \item a regression diagnostic plot (standardized residuals versus robust distances of the predictor variables). } } \usage{ \method{plot}{lts}(x, which = c("all","rqq","rindex","rfit","rdiag"), classic=FALSE, ask = (which[1] == "all" && dev.interactive()), id.n, \dots) % ltsPlot(x, which = c("all","rqq","rindex","rfit","rdiag"), classic=FALSE, ask=(which=="all" && dev.interactive()), id.n, \dots) } \arguments{ \item{x}{a \code{lts} object, typically result of \code{ltsReg}.} \item{which}{string indicating which plot to show. See the \emph{Details} section for a description of the options. Defaults to \code{"all"}.}. \item{classic}{whether to plot the classical distances too. Default is \code{FALSE}.}. \item{ask}{logical indicating if the user should be \emph{ask}ed before each plot, see \code{\link{par}(ask=.)}. Defaults to \code{which == "all" && \link{dev.interactive}()}. } \item{id.n}{number of observations to be identified by a label starting with the most extreme. Default is the number of identified outliers (can be different for the different plots - see Details).} \item{\dots}{other parameters to be passed through to plotting functions.} } \details{ This function produces several plots based on the robust and classical regression estimates. Which of them to select is specified by the attribute \code{which}. The possible options are: \describe{ \item{\code{rqq}:}{Normal Q-Q plot of the standardized residuals;} \item{\code{rindex}:}{plot of the standardized residuals versus their index;} \item{\code{rfit}:}{plot of the standardized residuals versus fitted values;} \item{\code{rdiag}:}{regression diagnostic plot.} } The normal quantile plot produces a normal Q-Q plot of the standardized residuals. A line is drawn which passes through the first and third quantile. The \code{id.n} residuals with largest distances from this line are identified by labels (the observation number). The default for \code{id.n} is the number of regression outliers (lts.wt==0). In the Index plot and in the Fitted values plot the standardized residuals are displayed against the observation number or the fitted value respectively. A horizontal dashed line is drawn at 0 and two solid horizontal lines are located at +2.5 and -2.5. The id.n residuals with largest absolute values are identified by labels (the observation number). The default for id.n is the number regression outliers (lts.wt==0). The regression diagnostic plot, introduced by Rousseeuw and van Zomeren (1990), displays the standardized residuals versus robust distances. Following Rousseeuw and van Zomeren (1990), the horizontal dashed lines are located at +2.5 and -2.5 and the vertical line is located at the upper 0.975 percent point of the chi-squared distribution with p degrees of freedom. The id.n residuals with largest absolute values and/or largest robust Mahalanobis distances are identified by labels (the observation number). The default for id.n is the number of all outliers: regression outliers (lts.wt==0) + leverage (bad and good) points (RD > 0.975 percent point of the chi-squared distribution with p degrees of freedom). } %\value{} \references{ P. J. Rousseeuw and van Zomeren, B. C. (1990). Unmasking Multivariate Outliers and Leverage Points. \emph{Journal of the American Statistical Association} \bold{85}, 633--639. P. J. Rousseeuw and K. van Driessen (1999) A fast algorithm for the minimum covariance determinant estimator. \emph{Technometrics} \bold{41}, 212--223. } \seealso{ \code{\link{covPlot}} } \examples{ data(hbk) lts <- ltsReg(Y ~ ., data = hbk) lts plot(lts, which = "rqq") %% Improve: also show model 'call' } \keyword{hplot} \keyword{multivariate} robustbase/man/Qn.Rd0000644000176200001440000001514414123440375014053 0ustar liggesusers\name{Qn} \alias{Qn} \alias{Qn.old} \alias{s_Qn} % \title{Robust Location-Free Scale Estimate More Efficient than MAD} \description{ Compute the robust scale estimator \eqn{Q_n}{Qn}, an efficient alternative to the MAD. By default, \eqn{Q_n(x_1, \ldots, x_n)}{Qn(x1, .., xn)} is the \eqn{k}-th order statistic (a quantile) of the \code{choose(n, 2)} absolute differences \eqn{|x_i - x_j|}{abs(x[i] - x[j])}, (for \eqn{1 \le i < j \le n}{1 <= i < j <= n}), where by default (originally only possible value) \eqn{k = choose(n\%/\% 2 + 1, 2)} which is about the first quartile (25\% quantile) of these pairwise differences. See the references for more. } \usage{ Qn(x, constant = NULL, finite.corr = is.null(constant) && missing(k), na.rm = FALSE, k = choose(n \%/\% 2 + 1, 2), warn.finite.corr = TRUE) s_Qn(x, mu.too = FALSE, \dots) } \arguments{% >> ../R/qnsn.R << \item{x}{numeric vector of observations.} \item{constant}{number by which the result is multiplied; the default achieves consistency for normally distributed data. Note that until Nov. 2010, \dQuote{thanks} to a typo in the very first papers, a slightly wrong default constant, 2.2219, was used instead of the correct one which is equal to \code{1 / (sqrt(2) * qnorm(5/8))} (as mentioned already on p.1277, after (3.7) in Rousseeuw and Croux (1993)). If you need the old slightly off version for historical reproducibility, you can use \code{Qn.old()}. Note that the relative difference is only about 1 in 1000, and that the correction should not affect the finite sample corrections for \eqn{n \le 9}{n <= 9}. } \item{finite.corr}{logical indicating if the finite sample bias correction factor should be applied. Defaults to \code{TRUE} unless \code{constant} is specified. Note the for non-default \code{k}, the consistency \code{constant} already depends on \code{n} leading to \emph{some} finite sample correction, but no simulation-based small-\code{n} correction factors are available.} \item{na.rm}{logical specifying if missing values (\code{\link{NA}}) should be removed from \code{x} before further computation. If false as by default, and if there are \code{NA}s, i.e., \code{if(anyNA(x))}, the result will be \code{NA}.} \item{k}{integer, typically half of n, specifying the \dQuote{quantile}, i.e., rather the order statistic that \code{Qn()} should return; for the Qn() proper, this has been hard wired to \code{choose(n\%/\%2 +1, 2)}, i.e., \eqn{\lfloor\frac{n}{2}\rfloor +1}{floor(n/2) +1}. Choosing a large \code{k} is less robust but allows to get non-zero results in case the default \code{Qn()} is zero.} \item{warn.finite.corr}{logical indicating if a \code{\link{warning}} should be signalled when \code{k} is non-default, in which case specific small-\eqn{n} correction is not yet provided.} \item{mu.too}{logical indicating if the \code{\link[stats]{median}(x)} should also be returned for \code{s_Qn()}.} \item{\dots}{potentially further arguments for \code{s_Qn()} passed to \code{Qn()}.} } \value{ \code{Qn()} returns a number, the \eqn{Q_n}{Qn} robust scale estimator, scaled to be consistent for \eqn{\sigma^2} and i.i.d. Gaussian observations, optionally bias corrected for finite samples. \code{s_Qn(x, mu.too=TRUE)} returns a length-2 vector with location (\eqn{\mu}) and scale; this is typically only useful for \code{\link{covOGK}(*, sigmamu = s_Qn)}. } \details{ As the (default, consistency) constant needed to be corrected, the finite sample correction has been based on a much more extensive simulation, and on a 3rd or 4th degree polynomial model in \eqn{1/n} for odd or even n, respectively. } \references{ Rousseeuw, P.J. and Croux, C. (1993) Alternatives to the Median Absolute Deviation, \emph{Journal of the American Statistical Association} \bold{88}, 1273--1283. \doi{10.2307/2291267}% JSTOR % MM: ~/save/papers/Rousseeuw/93/R+Croux_MAD_Sn_Qn.pdf Christophe Croux and Peter J. Rousseeuw (1992) A class of high-breakdown scale estimators based on subranges , \emph{Communications in Statistics - Theory and Methods} \bold{21}, 1935--1951; \doi{10.1080/03610929208830889} Christophe Croux and Peter J. Rousseeuw (1992) Time-Efficient Algorithms for Two Highly Robust Estimators of Scale, \emph{Computational Statistics, Vol. 1}, ed. Dodge and Whittaker, Physica-Verlag Heidelberg, 411--428; available via Springer Link. % MM: ~/save/papers/robust-diverse/Croux-Rousseeuw-Timeff_Scale_1992.pdf %% no longer \url{http://win-www.uia.ac.be/u/statis/abstract/Timeff92.htm}. About the typo in the \code{constant}:\cr Christophe Croux (2010) Private e-mail, Fri Jul 16, w/ Subject \emph{Re: Slight inaccuracy of Qn implementation \dots\dots}. } \seealso{\code{\link[stats]{mad}} for the \sQuote{most robust} but much less efficient scale estimator; \code{\link{Sn}} for a similar faster but less efficient alternative. Finally, \code{\link{scaleTau2}} which some consider \dQuote{uniformly} better than Qn or competitors. } \author{Original Fortran code: Christophe Croux and Peter Rousseeuw \email{rousse@wins.uia.ac.be}. \cr Port to C and R: Martin Maechler, \email{maechler@R-project.org} } \examples{ set.seed(153) x <- sort(c(rnorm(80), rt(20, df = 1))) s_Qn(x, mu.too = TRUE) Qn(x, finite.corr = FALSE) ## A simple pure-R version of Qn() -- slow and memory-rich for large n: O(n^2) Qn0R <- function(x, k = choose(n \%/\% 2 + 1, 2)) { % n <- length(x <- sort(x)) if(n == 0) return(NA) else if(n == 1) return(0.) stopifnot(is.numeric(k), k == as.integer(k), 1 <= k, k <= n*(n-1)/2) m <- outer(x,x,"-")# abs not needed as x[] is sorted sort(m[lower.tri(m)], partial = k)[k] } (Qx1 <- Qn(x, constant=1)) # 0.5498463 ## the C-algorithm "rounds" to 'float' single precision .. stopifnot(all.equal(Qx1, Qn0R(x), tol = 1e-6)) (qn <- Qn(c(1:4, 10, Inf, NA), na.rm=TRUE)) stopifnot(is.finite(qn), all.equal(4.075672524, qn, tol=1e-10)) ## -- compute for different 'k' : n <- length(x) # = 100 here (k0 <- choose(floor(n/2) + 1, 2)) # 51*50/2 == 1275 stopifnot(identical(Qx1, Qn(x, constant=1, k=k0))) nn2 <- n*(n-1)/2 all.k <- 1:nn2 system.time(Qss <- sapply(all.k, function(k) Qn(x, 1, k=k))) system.time(Qs <- Qn (x, 1, k = all.k)) system.time(Qs0 <- Qn0R(x, k = all.k) ) stopifnot(exprs = { Qs[1] == min(diff(x)) Qs[nn2] == diff(range(x)) all.equal(Qs, Qss, tol = 1e-15) # even exactly all.equal(Qs0, Qs, tol = 1e-7) # see 2.68e-8, as Qn() C-code rounds to (float) }) plot(2:nn2, Qs[-1], type="b", log="y", main = "Qn(*, k), k = 2..n(n-1)/2") } \keyword{robust} \keyword{univar} robustbase/man/Sn.Rd0000644000176200001440000000463014123440375014053 0ustar liggesusers\name{Sn} \alias{Sn} \alias{s_Sn} % \title{Robust Location-Free Scale Estimate More Efficient than MAD} \description{ Compute the robust scale estimator \eqn{S_n}{Sn}, an efficient alternative to the MAD. } \usage{ Sn(x, constant = 1.1926, finite.corr = missing(constant), na.rm = FALSE) s_Sn(x, mu.too = FALSE, \dots) } \arguments{ \item{x}{numeric vector of observations.} \item{constant}{number by which the result is multiplied; the default achieves consisteny for normally distributed data.} \item{finite.corr}{logical indicating if the finite sample bias correction factor should be applied. Default to \code{TRUE} unless \code{constant} is specified.} \item{na.rm}{logical specifying if missing values (\code{\link{NA}}) should be removed from \code{x} before further computation. If false as by default, and if there are \code{NA}s, i.e., \code{if(anyNA(x))}, the result will be \code{NA}.} \item{mu.too}{logical indicating if the \code{\link[stats]{median}(x)} should also be returned for \code{s_Sn()}.} \item{\dots}{potentially further arguments for \code{s_Sn()} passed to \code{Sn()}.} } \value{ \code{Sn()} returns a number, the \eqn{S_n}{Sn} robust scale estimator, scaled to be consistent for \eqn{\sigma^2} and i.i.d. Gaussian observations, optionally bias corrected for finite samples. \code{s_Sn(x, mu.too=TRUE)} returns a length-2 vector with location (\eqn{\mu}) and scale; this is typically only useful for \code{\link{covOGK}(*, sigmamu = s_Sn)}. } \details{ ............ FIXME ........ } \references{ Rousseeuw, P.J. and Croux, C. (1993) Alternatives to the Median Absolute Deviation, \emph{Journal of the American Statistical Association} \bold{88}, 1273--1283. } \seealso{\code{\link[stats]{mad}} for the \sQuote{most robust} but much less efficient scale estimator; \code{\link{Qn}} for a similar more efficient but slower alternative; \code{\link{scaleTau2}}. } \author{Original Fortran code: Christophe Croux and Peter Rousseeuw \email{rousse@wins.uia.ac.be}. \cr Port to C and R: Martin Maechler, \email{maechler@R-project.org} } \examples{ x <- c(1:10, 100+1:9)# 9 outliers out of 19 Sn(x) Sn(x, c=1)# 9 Sn(x[1:18], c=1)# 9 set.seed(153) x <- sort(c(rnorm(80), rt(20, df = 1))) s_Sn(x, mu.too=TRUE) (s <- Sn(c(1:4, 10, Inf, NA), na.rm=TRUE)) stopifnot(is.finite(s), all.equal(3.5527554, s, tol=1e-10)) } \keyword{robust} \keyword{univar} robustbase/man/toxicity.Rd0000644000176200001440000000350713713564014015352 0ustar liggesusers\name{toxicity} \encoding{utf8} \alias{toxicity} \docType{data} \title{Toxicity of Carboxylic Acids Data} \description{ The aim of the experiment was to predict the toxicity of carboxylic acids on the basis of several molecular descriptors. } \usage{data(toxicity, package="robustbase")} \format{ A data frame with 38 observations on the following 10 variables which are attributes for carboxylic acids: \describe{ \item{\code{toxicity}}{aquatic toxicity, defined as \eqn{\log(IGC_{50}^{-1})}{log(IGC50^(-1))}; typically the \dQuote{response}.} \item{\code{logKow}}{\eqn{log Kow}, the partition coefficient} \item{\code{pKa}}{pKa: the dissociation constant} \item{\code{ELUMO}}{\bold{E}nergy of the \bold{l}owest \bold{u}noccupied \bold{m}olecular \bold{o}rbital} \item{\code{Ecarb}}{Electrotopological state of the \bold{carb}oxylic group} \item{\code{Emet}}{Electrotopological state of the \bold{met}hyl group} \item{\code{RM}}{Molar refractivity} \item{\code{IR}}{Refraction index} \item{\code{Ts}}{Surface tension} \item{\code{P}}{Polarizability} } } % \details{ % } \source{ The website accompanying the MMY-book: \url{https://www.wiley.com/legacy/wileychi/robust_statistics/} } \references{ Maguna, F.P., Núñez, M.B., Okulik, N.B. and Castro, E.A. (2003) Improved QSAR analysis of the toxicity of aliphatic carboxylic acids; \emph{Russian Journal of General Chemistry} \bold{73}, 1792--1798. } \examples{ data(toxicity) summary(toxicity) plot(toxicity) plot(toxicity ~ pKa, data = toxicity) ## robustly scale the data (to scale 1) using Qn (scQ.tox <- sapply(toxicity, Qn)) scTox <- scale(toxicity, center = FALSE, scale = scQ.tox) csT <- covOGK(scTox, n.iter = 2, sigmamu = s_Qn, weight.fn = hard.rejection) as.dist(round(cov2cor(csT$cov), 2)) } \keyword{datasets} robustbase/man/wood.Rd0000644000176200001440000000210613312375575014447 0ustar liggesusers\name{wood} \alias{wood} \docType{data} \title{Modified Data on Wood Specific Gravity} \description{ The original data are from Draper and Smith (1966) and were used to determine the influence of anatomical factors on wood specific gravity, with five explanatory variables and an intercept. These data were contaminated by replacing a few observations with outliers. } \usage{data(wood, package="robustbase")} \format{ A data frame with 20 observations on the following 6 variables. \describe{ \item{x1, x2, x3, x4, x5}{explanatory \dQuote{anatomical} wood variables.} \item{y}{wood specific gravity, the target variable.} } } \source{ Draper and Smith (1966, p.227) Peter J. Rousseeuw and Annick M. Leroy (1987) \emph{Robust Regression and Outlier Detection} Wiley, p.243, table 8. } \examples{ data(wood) plot(wood) summary( lm.wood <- lm(y ~ ., data = wood)) summary(rlm.wood <- MASS::rlm(y ~ ., data = wood)) summary(lts.wood <- ltsReg(y ~ ., data = wood)) wood.x <- as.matrix(wood)[,1:5] c_wood <- covMcd(wood.x) c_wood } \keyword{datasets} robustbase/man/M.psi.Rd0000644000176200001440000002020114410263765014456 0ustar liggesusers\name{Mpsi} \title{Psi / Chi / Wgt / Rho Functions for *M-Estimation} \alias{Mchi} \alias{Mpsi} \alias{Mwgt} \alias{MrhoInf} \alias{.Mchi} \alias{.Mpsi} \alias{.Mwgt} \alias{.Mwgt.psi1} \alias{.MrhoInf} \alias{.psi2ipsi} \alias{.regularize.Mpsi} \description{ Compute Psi / Chi / Wgt / Rho functions for M-estimation, i.e., including MM, etc. For definitions and details, please use the vignette \href{https://cran.r-project.org/package=robustbase/vignettes/psi_functions.pdf}{% \dQuote{\eqn{\psi}{psi}-Functions Available in Robustbase}}. \code{MrhoInf(x)} computes \eqn{\rho(\infty)}{rho(Inf)}, i.e., the normalizing or scaling constant for the transformation from \eqn{\rho(\cdot)}{rho(.)} to \eqn{\tilde\rho(\cdot)}{rho~(.)}, where the latter, aka as \eqn{\chi()}{chi()} fulfills \eqn{\tilde\rho(\infty) = 1}{rho~(Inf) = 1} which makes only sense for \dQuote{redescending} psi functions, i.e., not for \code{"huber"}. \code{Mwgt(x, *)} computes \eqn{\psi(x)/x} (fast and numerically accurately). } \usage{ Mpsi(x, cc, psi, deriv = 0) Mchi(x, cc, psi, deriv = 0) Mwgt(x, cc, psi) MrhoInf(cc, psi) .Mwgt.psi1(psi, cc = .Mpsi.tuning.default(psi)) .regularize.Mpsi(psi, redescending = TRUE) } \arguments{ \item{x}{numeric (\dQuote{abscissa} values) vector, possibly with \code{\link{attributes}} such as \code{\link{dim}} or \code{\link{names}}, etc. These are preserved for the \code{M*()} functions (but not the \code{.M()} ones).} \item{cc}{numeric tuning constant, for some \code{psi} of length \eqn{> 1}.} \item{psi}{a string specifying the psi / chi / rho / wgt function; either \code{"huber"}, or one of the same possible specifiers as for \code{psi} in \code{\link{lmrob.control}}, i.e. currently, \code{"bisquare"}, \code{"lqq"}, \code{"welsh"}, \code{"optimal"}, \code{"hampel"}, or \code{"ggw"}.} \item{deriv}{an integer, specifying the \emph{order} of derivative to consider; particularly, \code{Mpsi(x, *, deriv = -1)} is the principal function of \eqn{\psi()}{psi()}, typically denoted \eqn{\rho()}{rho()} in the literature. For some psi functions, currently \code{"huber"}, \code{"bisquare"}, \code{"hampel"}, and \code{"lqq"}, \code{deriv = 2} is implemented, for the other psi's only \eqn{d \in \{-1,0,1\}}{d in {-1,0,1\}}.}} \item{redescending}{logical indicating in \code{.regularize.Mpsi(psi,.)} if the \code{psi} function is redescending.} } \details{ Theoretically, \code{Mchi()} would not be needed explicitly as it can be computed from \code{Mpsi()} and \code{MrhoInf()}, namely, by \preformatted{Mchi(x, *, deriv = d) == Mpsi(x, *, deriv = d-1) / MrhoInf(*)} for \eqn{d = 0, 1, 2} (and \sQuote{*} containing \code{par, psi}, and equality is in the sense of \code{\link{all.equal}(x,y, tol)} with a small \code{tol}. Similarly, \code{Mwgt} would not be needed strictly, as it could be defined via \code{Mpsi}), but the explicit definition takes care of 0/0 and typically is of a more simple form. For experts, there are slightly even faster versions, \code{.Mpsi()}, \code{.Mwgt()}, etc. \code{.Mwgt.psi1()} mainly a utility for \code{\link{nlrob}()}, returns a \emph{\code{\link{function}}} with similar semantics as \code{\link[MASS]{psi.hampel}}, \code{\link[MASS]{psi.huber}}, or \code{\link[MASS]{psi.bisquare}} from package \CRANpkg{MASS}. Namely, a function with arguments \code{(x, deriv=0)}, which for \code{deriv=0} computes \code{Mwgt(x, cc, psi)} and otherwise computes \code{Mpsi(x, cc, psi, deriv=deriv)}. \code{.Mpsi()}, \code{.Mchi()}, \code{.Mwgt()}, and \code{.MrhoInf()} are low-level versions of \code{Mpsi()}, \code{Mchi()}, \code{Mwgt()}, and \code{MrhoInf()}, respectively, and \code{.psi2ipsi()} provides the psi-function integer codes needed for \code{ipsi} argument of the \code{.M*()} functions. For \code{psi = "ggw"}, the \eqn{\rho()}{rho()} function has no closed form and must be computed via numerical integration, apart from 6 special cases including the defaults, see the \sQuote{Details} in \code{help(\link{.psi.ggw.findc})}. \code{.Mpsi.regularize()} may (rarely) be used to regularize a psi function. } \value{ a numeric vector of the same length as \code{x}, with corresponding function (or derivative) values. } \references{ See the vignette about %% ../vignettes/psi_functions.Rnw : \dQuote{\eqn{\psi}{psi}-Functions Available in Robustbase}. } \author{ Manuel Koller, notably for the original C implementation; tweaks and speedup via \code{\link{.Call}} and \code{.M*()} etc by Martin Maechler. } \seealso{ \code{\link{psiFunc}} and the \code{\linkS4class{psi_func}} class, both of which provide considerably more on the \R side, but are less optimized for speed. \code{\link{.Mpsi.tuning.defaults}}, etc, for tuning constants' defaults for\code{lmrob()}, and \code{\link{.psi.ggw.findc}()} utilities to construct such constants' vectors. } \examples{ x <- seq(-5,7, by=1/8) matplot(x, cbind(Mpsi(x, 4, "biweight"), Mchi(x, 4, "biweight"), Mwgt(x, 4, "biweight")), type = "l") abline(h=0, v=0, lty=2, col=adjustcolor("gray", 0.6)) hampelPsi (ccHa <- hampelPsi @ xtras $ tuningP $ k) psHa <- hampelPsi@psi(x) % FIXME: interesting as long as hampelPsi does not use Mpsi(... "hampel") ! ## using Mpsi(): Mp.Ha <- Mpsi(x, cc = ccHa, psi = "hampel") stopifnot(all.equal(Mp.Ha, psHa, tolerance = 1e-15)) psi.huber <- .Mwgt.psi1("huber") if(getRversion() >= "3.0.0") stopifnot(identical(psi.huber, .Mwgt.psi1("huber", 1.345), ignore.env=TRUE)) curve(psi.huber(x), -3, 5, col=2, ylim = 0:1) curve(psi.huber(x, deriv=1), add=TRUE, col=3) ## and show that this is indeed the same as MASS::psi.huber() : x <- runif(256, -2,3) stopifnot(all.equal(psi.huber(x), MASS::psi.huber(x)), all.equal( psi.huber(x, deriv=1), as.numeric(MASS::psi.huber(x, deriv=1)))) ## and how to get MASS::psi.hampel(): psi.hampel <- .Mwgt.psi1("Hampel", c(2,4,8)) x <- runif(256, -4, 10) stopifnot(all.equal(psi.hampel(x), MASS::psi.hampel(x)), all.equal( psi.hampel(x, deriv=1), as.numeric(MASS::psi.hampel(x, deriv=1)))) ## "lqq" / "LQQ" and its tuning constants: ctl0 <- lmrob.control(psi = "lqq", tuning.psi=c(-0.5, 1.5, 0.95, NA)) ctl <- lmrob.control(psi = "lqq", tuning.psi=c(-0.5, 1.5, 0.90, NA)) ctl0$tuning.psi ## keeps the vector _and_ has "constants" attribute: ## [1] -0.50 1.50 0.95 NA ## attr(,"constants") ## [1] 1.4734061 0.9822707 1.5000000 ctl$tuning.psi ## ditto: ## [1] -0.5 1.5 0.9 NA \\ .."constants" 1.213726 0.809151 1.500000 stopifnot(all.equal(Mpsi(0:2, cc = ctl$tuning.psi, psi = ctl$psi), c(0, 0.977493, 1.1237), tol = 6e-6)) x <- seq(-4,8, by = 1/16) ## Show how you can use .Mpsi() equivalently to Mpsi() stopifnot(all.equal( Mpsi(x, cc = ctl$tuning.psi, psi = ctl$psi), .Mpsi(x, ccc = attr(ctl$tuning.psi, "constants"), ipsi = .psi2ipsi("lqq")))) stopifnot(all.equal( Mpsi(x, cc = ctl0$tuning.psi, psi = ctl0$psi, deriv=1), .Mpsi(x, ccc = attr(ctl0$tuning.psi, "constants"), ipsi = .psi2ipsi("lqq"), deriv=1))) ## M*() preserving attributes : x <- matrix(x, 32, 8, dimnames=list(paste0("r",1:32), col=letters[1:8])) comment(x) <- "a vector which is a matrix" px <- Mpsi(x, cc = ccHa, psi = "hampel") stopifnot(identical(attributes(x), attributes(px))) ## The "optimal" psi exists in two versions "in the litterature": --- ## Maronna et al. 2006, 5.9.1, p.144f: psi.M2006 <- function(x, c = 0.013) sign(x) * pmax(0, abs(x) - c/dnorm(abs(x))) ## and the other is the one in robustbase from 'robust': via Mpsi(.., "optimal") ## Here are both for 95\% efficiency: (c106 <- .Mpsi.tuning.default("optimal")) c1 <- curve(Mpsi(x, cc = c106, psi="optimal"), -5, 7, n=1001) c2 <- curve(psi.M2006(x), add=TRUE, n=1001, col=adjustcolor(2,0.4), lwd=2) abline(0,1, v=0, h=0, lty=3) ## the two psi's are similar, but really quite different ## a zoom into Maronna et al's: c3 <- curve(psi.M2006(x), -.5, 1, n=1001); abline(h=0,v=0, lty=3);abline(0,1, lty=2) } \keyword{robust} robustbase/man/Animals2.Rd0000644000176200001440000000376613710054521015145 0ustar liggesusers\name{Animals2} \alias{Animals2} \title{Brain and Body Weights for 65 Species of Land Animals} \description{ A data frame with average brain and body weights for 62 species of land mammals and three others. Note that this is simply the union of \code{\link[MASS]{Animals}} and \code{\link[MASS]{mammals}}. } \usage{ Animals2 } \format{ \describe{ \item{\code{body}}{body weight in kg} \item{\code{brain}}{brain weight in g} } } \source{ Weisberg, S. (1985) \emph{Applied Linear Regression.} 2nd edition. Wiley, pp. 144--5. P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection.} Wiley, p. 57. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Forth Edition. Springer. } \note{ After loading the \CRANpkg{MASS} package, the data set is simply constructed by \code{Animals2 <- local({D <- rbind(Animals, mammals); unique(D[order(D$body,D$brain),])})}. Rousseeuw and Leroy (1987)'s \sQuote{brain} data is the same as \CRANpkg{MASS}'s \code{Animals} (with Rat and Brachiosaurus interchanged, see the example below). } \examples{ data(Animals2) ## Sensible Plot needs doubly logarithmic scale plot(Animals2, log = "xy") ## Regression example plot: plotbb <- function(bbdat) { d.name <- deparse(substitute(bbdat)) plot(log(brain) ~ log(body), data = bbdat, main = d.name) abline( lm(log(brain) ~ log(body), data = bbdat)) abline(MASS::rlm(log(brain) ~ log(body), data = bbdat), col = 2) legend("bottomright", leg = c("lm", "rlm"), col=1:2, lwd=1, inset = 1/20) } plotbb(bbdat = Animals2) ## The `same' plot for Rousseeuw's subset: data(Animals, package = "MASS") brain <- Animals[c(1:24, 26:25, 27:28),] plotbb(bbdat = brain) lbrain <- log(brain) plot(mahalanobis(lbrain, colMeans(lbrain), var(lbrain)), main = "Classical Mahalanobis Distances") mcd <- covMcd(lbrain) plot(mahalanobis(lbrain,mcd$center,mcd$cov), main = "Robust (MCD) Mahalanobis Distances") } \keyword{datasets} robustbase/man/nlrob.Rd0000644000176200001440000003370413710054521014606 0ustar liggesusers\name{nlrob} \title{Robust Fitting of Nonlinear Regression Models} \alias{nlrob} \alias{fitted.nlrob} \alias{residuals.nlrob} \alias{predict.nlrob} \alias{vcov.nlrob} \description{ \code{nlrob} fits a nonlinear regression model by robust methods. Per default, by an M-estimator, using iterated reweighted least squares (called \dQuote{IRLS} or also \dQuote{IWLS}). } \usage{ nlrob(formula, data, start, lower, upper, weights = NULL, na.action = na.fail, method = c("M", "MM", "tau", "CM", "mtl"), psi = .Mwgt.psi1("huber", cc=1.345), scale = NULL, test.vec = c("resid", "coef", "w"), maxit = 20, tol = 1e-06, acc, algorithm = "default", doCov = FALSE, model = FALSE, control = if(method == "M") nls.control() else nlrob.control(method, optArgs = list(trace=trace), ...), trace = FALSE, ...) \method{fitted}{nlrob}(object, ...) \method{residuals}{nlrob}(object, type = , ...)% FIXME: more 'type's + DOCU \method{predict}{nlrob}(object, newdata, ...) } \arguments{ \item{formula}{a nonlinear \code{\link{formula}} including variables and parameters of the model, such as \code{y ~ f(x, theta)} (cf. \code{\link{nls}}). (For some checks: if \eqn{f(.)} is linear, then we need parentheses, e.g., \code{y ~ (a + b * x)}; (note that \code{._nlrob.w} is not allowed as variable or parameter name)) %% FIXME in code -- long overdue, as nls() is more flexible *SINCE* R 2.2.1 %% Do not use \code{w} as variable or parameter name! %% FIXME: this should really no longer be needed ==> add a check } \item{data}{an optional data frame containing the variables in the model. If not found in \code{data}, the variables are taken from \code{environment(formula)}, typically the environment from which \code{nlrob} is called.} \item{start}{a named numeric vector of starting parameters estimates, only for \code{method = "M"}.} \item{lower, upper}{numeric vectors of lower and upper bounds; if needed, will be replicated to be as long as the longest of \code{start}, \code{lower} or \code{upper}. For (the default) \code{method = "M"}, if the bounds are unspecified all parameters are assumed to be unconstrained; also, for method \code{"M"}, bounds can only be used with the \code{"port"} algorithm. They are ignored, with a warning, in cases they have no effect. For all other methods, currently these bounds \emph{must} be specified as finite values, and one of them must have \code{\link{names}} matching the parameter names in \code{formula}. For methods \code{"CM"} and \code{"mtl"}, the bounds must \emph{additionally} have an entry named \code{"sigma"} as that is determined simultaneously in the same optimization, and hence its \code{lower} bound must not be negative. } \item{weights}{an optional vector of weights to be used in the fitting process (for intrinsic weights, not the weights \code{w} used in the iterative (robust) fit). I.e., \code{sum(w * e^2)} is minimized with \code{e} = residuals, \eqn{e_i = y_i - f(xreg_i, \theta)}{e[i] = y[i] - f(xreg[i], theta)}, where \eqn{f(x,\theta)}{f(x, theta)} is the nonlinear function, and \code{w} are the robust weights from \code{resid * weights}.} \item{na.action}{a function which indicates what should happen when the data contain \code{NA}s. The default action is for the procedure to fail. If NAs are present, use \code{na.exclude} to have residuals with \code{length == nrow(data) == length(w)}, where \code{w} are the weights used in the iterative robust loop. This is better if the explanatory variables in \code{formula} are time series (and so the NA location is important). For this reason, \code{na.omit}, which leads to omission of cases with missing values on any required variable, is not suitable here since the residuals length is different from \code{nrow(data) == length(w)}. } \item{method}{a character string specifying which method to use. The default is \code{"M"}, for historical and back-compatibility reasons. For the other methods, primarily see \code{\link{nlrob.algorithms}}. % nlrob-algos.Rd \describe{ \item{"M"}{Computes an M-estimator, using \code{\link{nls}(*, weights=*)} iteratively (hence, IRLS) with weights equal to \eqn{\psi(r_i) / r_i}, where \eqn{r_i} is the i-the residual from the previous fit.} \item{"MM"}{Computes an MM-estimator, starting from \code{init}, either "S" or "lts".}% more: FIXME \item{"tau"}{Computes a Tau-estimator.} \item{"CM"}{Computes a \dQuote{Constrained M} (=: CM) estimator.} \item{"mtl"}{Compute as \dQuote{Maximum Trimmed Likelihood} (=: MTL) estimator.} } Note that all methods but \code{"M"} are \dQuote{random}, hence typically to be preceded by \code{\link{set.seed}()} in usage, see also \code{\link{nlrob.algorithms}}. % nlrob-algos.Rd } \item{psi}{a function (possibly by name) of the form \code{g(x, 'tuning constant(s)', deriv)} that for \code{deriv=0} returns \eqn{\psi(x)/x}{psi(x)/x} and for \code{deriv=1} returns \eqn{\psi'(x)}{psi'(x)}. Note that tuning constants can \emph{not} be passed separately, but directly via the specification of \code{psi}, typically via a simple \code{\link{.Mwgt.psi1}()} call as per default. Note that this has been a deliberately non-backcompatible change for robustbase version 0.90-0 (summer 2013 -- early 2014). } \item{scale}{when not \code{NULL} (default), a positive number specifying a scale kept \emph{fixed} during the iterations (and returned as \code{Scale} component).} \item{test.vec}{character string specifying the convergence criterion. The relative change is tested for residuals with a value of \code{"resid"} (the default), for coefficients with \code{"coef"}, and for weights with \code{"w"}.} \item{maxit}{maximum number of iterations in the robust loop.} \item{tol}{non-negative convergence tolerance for the robust fit.} \item{acc}{previous name for \code{tol}, now deprecated.} \item{algorithm}{character string specifying the algorithm to use for \code{\link{nls}}, see there, only when \code{method = "M"}. The default algorithm is a Gauss-Newton algorithm.} \item{doCov}{a logical specifying if \code{nlrob()} should compute the asymptotic variance-covariance matrix (see \code{\link{vcov}}) already. This used to be hard-wired to \code{TRUE}; however, the default has been set to \code{FALSE}, as \code{\link{vcov}(obj)} and \code{\link{summary}(obj)} can easily compute it when needed.} \item{model}{a \code{\link{logical}} indicating if the \code{\link{model.frame}} should be returned as well.} \item{control}{an optional list of control settings. \describe{ \item{for \code{method = "M"}:}{settings for \code{\link{nls}()}. See \code{\link{nls.control}} for the names of the settable control values and their effect.} \item{for all \code{method}s but \code{"M"}:}{a list, typically resulting from \code{\link{nlrob.control}(method, *)}.} } } \item{trace}{logical value indicating if a \dQuote{trace} of the \code{nls} iteration progress should be printed. Default is \code{FALSE}. \cr If \code{TRUE}, in each robust iteration, the residual sum-of-squares and the parameter values are printed at the conclusion of each \code{nls} iteration. When the \code{"plinear"} algorithm is used, the conditional estimates of the linear parameters are printed after the nonlinear parameters.} \item{object}{an \R object of class \code{"nlrob"}, typically resulting from \code{nlrob(..)}.} \item{\dots}{for \code{nlrob}: only when \code{method} is \emph{not} \code{"M"}, optional arguments for \code{\link{nlrob.control}}; \cr for other functions: potentially optional arguments passed to the extractor methods.} \item{type}{a string specifying the \emph{type} of residuals desired. Currently, \code{"response"} and \code{"working"} are supported. %% FIXME: 1. document these (here) 2. write and support more types } \item{newdata}{a data frame (or list) with the same names as the original \code{data}, see e.g., \code{\link{predict.nls}}.} } \details{ For \code{method = "M"}, iterated reweighted least squares (\dQuote{IRLS} or \dQuote{IWLS}) is used, calling \code{\link{nls}(*, weights= .)} where \code{weights} \eqn{w_i} are proportional to \eqn{\psi(r_i/ \hat{\sigma})}{psi(r_i/ sig.)}. All other methods minimize differently, and work \bold{without} \code{\link{nls}}. See \link{nlrob.algorithms} % -> nlrob-algos.Rd for details. } \value{ \code{nlrob()} returns an object of S3 class \code{"nlrob"}, for \code{method = "M"} also inheriting from class \code{"nls"}, (see \code{\link{nls}}). It is a list with several components; they are not documented yet, as some of them will probably change. Instead, rather use \dQuote{accessor} methods, where possible: There are methods (at least) for the generic accessor functions \code{\link{summary}()}, \code{\link{coefficients}()} (aka \code{coef()}) \code{fitted.values()}, \code{residuals()}, \code{\link{sigma}()} and \code{\link{vcov}()}, the latter for the variance-covariance matrix of the estimated parameters, as returned by \code{coef()}, i.e., not including the variance of the errors. For \code{nlrob()} results, \code{\link{estimethod}()} returns the \dQuote{estimation method}, which coincides with the \code{method} argument used. \code{residuals(.)}, by default \code{type = "response"}, returns the residuals \eqn{e_i}, defined above as \eqn{e_i = Y_i - f_(x_i, \hat\theta)}{e[i] = Y[i] - f(x[i], theta^)}. These differ from the standardized or weighted residuals which, e.g., are assumed to be normally distributed, and a version of which is returned in \code{working.residuals} component. %% and another is working.residuals/Scale } \author{ \describe{ \item{\code{method = "M"}:}{ Andreas Ruckstuhl (inspired by \code{\link[MASS]{rlm}}() and \code{\link{nls}}()), in July 1994 for S-plus.\cr Christian Sangiorgio did the update to \R and corrected some errors, from June 2002 to January 2005, and Andreas contributed slight changes and the first methods in August 2005.} \item{\code{method = "MM"}, etc:}{Originally all by Eduardo L. T. Conceicao, see \code{\link{nlrob.algorithms}}:} % nlrob-algos.Rd } Since then, the help page, testing, more cleanup, new methods: Martin Maechler. } \note{ This function (with the only method \code{"M"}) used to be named \code{rnls} and has been in package \CRANpkg{sfsmisc} in the past, but been dropped there. } \seealso{ \code{\link{nls}}, \code{\link[MASS]{rlm}}. } \examples{ DNase1 <- DNase[ DNase$Run == 1, ] ## note that selfstarting models don't work yet % <<< FIXME !!! ##--- without conditional linearity --- ## classical fmNase1 <- nls( density ~ Asym/(1 + exp(( xmid - log(conc) )/scal ) ), data = DNase1, start = list( Asym = 3, xmid = 0, scal = 1 ), trace = TRUE ) summary( fmNase1 ) ## robust RmN1 <- nlrob( density ~ Asym/(1 + exp(( xmid - log(conc) )/scal ) ), data = DNase1, trace = TRUE, start = list( Asym = 3, xmid = 0, scal = 1 )) summary( RmN1 ) ##--- using conditional linearity --- ## classical fm2DNase1 <- nls( density ~ 1/(1 + exp(( xmid - log(conc) )/scal ) ), data = DNase1, start = c( xmid = 0, scal = 1 ), alg = "plinear", trace = TRUE ) summary( fm2DNase1 ) ## robust frm2DNase1 <- nlrob(density ~ 1/(1 + exp(( xmid - log(conc) )/scal ) ), data = DNase1, start = c( xmid = 0, scal = 1 ), alg = "plinear", trace = TRUE ) summary( frm2DNase1 ) ## Confidence for linear parameter is quite smaller than "Asym" above c1 <- coef(summary(RmN1)) c2 <- coef(summary(frm2DNase1)) rownames(c2)[rownames(c2) == ".lin"] <- "Asym" stopifnot(all.equal(c1[,1:2], c2[rownames(c1), 1:2], tol = 0.09)) # 0.07315 ### -- new examples -- "moderate outlier": DN2 <- DNase1 DN2[10,"density"] <- 2*DN2[10,"density"] fm3DN2 <- nls(density ~ Asym/(1 + exp(( xmid - log(conc) )/scal ) ), data = DN2, trace = TRUE, start = list( Asym = 3, xmid = 0, scal = 1 )) ## robust Rm3DN2 <- nlrob(density ~ Asym/(1 + exp(( xmid - log(conc) )/scal ) ), data = DN2, trace = TRUE, start = list( Asym = 3, xmid = 0, scal = 1 )) Rm3DN2 summary(Rm3DN2) # -> robustness weight of obs. 10 ~= 0.037 confint(Rm3DN2, method = "Wald") stopifnot(identical(Rm3DN2$dataClasses, c(density = "numeric", conc = "numeric"))) ## utility function sfsmisc::lseq() : lseq <- function (from, to, length) 2^seq(log2(from), log2(to), length.out = length) ## predict() {and plot}: h.x <- lseq(min(DN2$conc), max(DN2$conc), length = 100) nDat <- data.frame(conc = h.x) h.p <- predict(fm3DN2, newdata = nDat)# classical h.rp <- predict(Rm3DN2, newdata = nDat)# robust plot(density ~ conc, data=DN2, log="x", main = format(formula(Rm3DN2))) lines(h.x, h.p, col="blue") lines(h.x, h.rp, col="magenta") legend("topleft", c("classical nls()", "robust nlrob()"), lwd = 1, col= c("blue", "magenta"), inset = 0.05) ## See ?nlrob.algorithms for examples \donttest{ DNase1 <- DNase[DNase$Run == 1,] form <- density ~ Asym/(1 + exp(( xmid -log(conc) )/scal )) gMM <- nlrob(form, data = DNase1, method = "MM", lower = c(Asym = 0, xmid = 0, scal = 0), upper = 3, trace = TRUE) ## "CM" (and "mtl") additionally need bounds for "sigma" : gCM <- nlrob(form, data = DNase1, method = "CM", lower = c(Asym = 0, xmid = 0, scal = 0, sigma = 0), upper = c(3,3,3, sigma = 0.8)) summary(gCM)# did fail; note it has NA NA NA (std.err, t val, P val) stopifnot(identical(Rm3DN2$dataClasses, gMM$dataClasses), identical( gCM$dataClasses, gMM$dataClasses)) }%not (always) tested } \keyword{robust} \keyword{regression} \keyword{nonlinear} robustbase/man/summary.lmrob.Rd0000644000176200001440000001220213167157117016302 0ustar liggesusers\name{summary.lmrob} \title{Summary Method for "lmrob" Objects} % \alias{summary.lmrob} \alias{hatvalues.lmrob} \alias{.lmrob.hat} \alias{vcov.lmrob} \alias{print.summary.lmrob} \alias{model.matrix.lmrob} % \description{ Summary method for \R object of class \code{"lmrob"} and \code{\link{print}} method for the summary object. Further, methods \code{\link{fitted}()}, \code{\link{residuals}()} work (via the default methods), and \code{\link{predict}()} (see \code{\link{predict.lmrob}}, \code{\link{vcov}()}, \code{\link{weights}()} (see \code{\link{weights.lmrob}}), \code{\link{model.matrix}()}, \code{\link{confint}()}, \code{\link{dummy.coef}()}, \code{\link{hatvalues}()}, etc., have explicitly defined \code{lmrob} methods. \code{.lmrob.hat()} is the lower level \dQuote{work horse} of the \code{hatvalues()} method. } \usage{% all source in ../R/lmrob.R <<< \method{summary}{lmrob}(object, correlation = FALSE, symbolic.cor = FALSE, \dots) \method{print}{summary.lmrob}(x, digits = max(3, getOption("digits") - 3), symbolic.cor= x$symbolic.cor, signif.stars = getOption("show.signif.stars"), showAlgo = TRUE, \dots) \method{vcov}{lmrob}(object, cov = object$control$cov, complete = TRUE, \dots) \method{model.matrix}{lmrob}(object, \dots) % not yet % .lmrob.hat(x, w = rep(1, NROW(x)), wqr = qr(sqrt(w) * x)) } \arguments{ \item{object}{an \R object of class \code{lmrob}, typically created by \code{\link{lmrob}}.} \item{correlation}{logical variable indicating whether to compute the correlation matrix of the estimated coefficients.} \item{symbolic.cor}{logical indicating whether to use symbols to display the above correlation matrix.} \item{x}{an \R object of class \code{summary.lmrob}, typically resulting from \code{summary(\link{lmrob}(..),..)}.} \item{digits}{number of digits for printing, see \code{digits} in \code{\link{options}}.} \item{signif.stars}{logical variable indicating whether to use stars to display different levels of significance in the individual t-tests.} \item{showAlgo}{optional \code{\link{logical}} indicating if the algorithmic parameters (as mostly inside the \code{control} part) should be shown.} \item{cov}{covariance estimation function to use, a \code{\link{function}} or \link{character} string naming the function; \pkg{robustbase} currently provides \code{".vcov.w"} and \code{".vcov.avar1"}, see \emph{Details} of \code{\link{lmrob}}. Particularly useful when \code{object} is the result of \code{lmrob(.., cov = "none")}, where \preformatted{ object$cov <- vcov(object, cov = ".vcov.w")} allows to \emph{update} the fitted object.} \item{complete}{(mainly for \R \code{>= 3.5.0}:)% ~/R/D/r-devel/R/src/library/stats/man/vcov.Rd \code{\link{logical}} indicating if the full variance-covariance matrix should be returned also in case of an over-determined system where some coefficients are undefined and \code{\link{coef}(.)} contains \code{NA}s correspondingly. When \code{complete = TRUE}, \code{vcov()} is compatible with \code{coef()} also in this singular case.} \item{\dots}{potentially more arguments passed to methods.} } \value{ \code{summary(object)} returns an object of S3 class \code{"summary.lmrob"}, basically a \code{\link{list}} with components "call", "terms", "residuals", "scale", "rweights", "converged", "iter", "control" all copied from \code{object}, and further components, partly for compatibility with \code{\link{summary.lm}}, \item{coefficients}{a \code{\link{matrix}} with columns \code{"Estimate"}, \code{"Std. Error"}, \code{"t value"}, and \code{"PR(>|t|)"}, where "Estimate" is identical to \code{\link{coef}(object)}. Note that \code{\link{coef}()} is slightly preferred to access this matrix.} \item{df}{degrees of freedom, in an \code{\link{lm}} compatible way.} \item{sigma}{identical to \code{\link{sigma}(object)}.} \item{aliased}{..}%FIXME \item{cov}{derived from \code{object$cov}.}% FIXME: say more \item{r.squared}{robust \dQuote{R squared} or \eqn{R^2}, a coefficient of determination: This is the consistency corrected robust coefficient of determination by Renaud and Victoria-Feser (2010).} \item{adj.r.squared}{an adjusted R squared, see \code{r.squared}.} } \references{ Renaud, O. and Victoria-Feser, M.-P. (2010). A robust coefficient of determination for regression, \emph{Journal of Statistical Planning and Inference} \bold{140}, 1852-1862. } \seealso{\code{\link{lmrob}}, \code{\link{predict.lmrob}}, \code{\link{weights.lmrob}}, \code{\link{summary.lm}}, \code{\link{print}}, \code{\link{summary}}. } \examples{ mod1 <- lmrob(stack.loss ~ ., data = stackloss) sa <- summary(mod1) # calls summary.lmrob(....) sa # dispatches to call print.summary.lmrob(....) ## correlation between estimated coefficients: cov2cor(vcov(mod1)) cbind(fit = fitted(mod1), resid = residuals(mod1), wgts= weights(mod1, type="robustness"), predict(mod1, interval="prediction")) data(heart) sm2 <- summary( m2 <- lmrob(clength ~ ., data = heart) ) sm2 } \keyword{robust} \keyword{regression} robustbase/man/tukeyPsi1.Rd0000644000176200001440000000611712152143522015365 0ustar liggesusers\name{tukeyPsi1} \alias{robustbase-deprecated} \alias{tukeyPsi1} \alias{tukeyChi} \title{Tukey's Bi-square Score (Psi) and "Chi" (Rho) Functions and Derivatives} \description{ These are \bold{deprecated}, replaced by \code{\link{Mchi}(*, psi="tukey")}, \code{\link{Mpsi}(*, psi="tukey")} \code{tukeyPsi1()} computes Tukey's bi-square score (psi) function, its first derivative or it's integral/\dQuote{principal function}. This is scaled such that \eqn{\psi'(0) = 1}, i.e., \eqn{\psi(x) \approx x}{psi(x) ~= x} around 0. \code{tukeyChi()} computes Tukey's bi-square loss function, \code{chi(x)} and its first two derivatives. Note that in the general context of \eqn{M}-estimators, these loss functions are called \eqn{\rho (rho)}{rho}-functions. } \usage{ tukeyPsi1(x, cc, deriv = 0) tukeyChi (x, cc, deriv = 0) } \arguments{ \item{x}{numeric vector.} \item{cc}{ tuning constant } \item{deriv}{integer in \eqn{\{-1,0,1,2\}} specifying the order of the derivative; the default, \code{deriv = 0} computes the psi-, or chi- ("rho"-)function.} } \value{ a numeric vector of the same length as \code{x}. } \note{\code{tukeyPsi1(x, d)} and \code{\link{tukeyChi}(x, d+1)} are just re-scaled versions of each other (for \code{d in -1:1}), i.e., \deqn{\chi^{(\nu)}(x, c) = (6/c^2) \psi^{(\nu-1)}(x,c),} for \eqn{\nu = 0,1,2}. We use the name \sQuote{tukeyPsi\bold{1}}, because \code{tukeyPsi} is reserved for a future \dQuote{Psi Function} class object, see \code{\link{psiFunc}}. } \seealso{ \code{\link{lmrob}} and \code{\link{Mpsi}}; further \code{\link{anova.lmrob}} which needs the \code{deriv = -1}. } \author{Matias Salibian-Barrera, Martin Maechler and Andreas Ruckstuhl} \examples{ \dontshow{oop <- options(warn = -1) # against the "deprecated" warnings} op <- par(mfrow = c(3,1), oma = c(0,0, 2, 0), mgp = c(1.5, 0.6, 0), mar= .1+c(3,4,3,2)) x <- seq(-2.5, 2.5, length = 201) cc <- 1.55 # as set by default in lmrob.control() plot. <- function(...) { plot(...); abline(h=0,v=0, col="gray", lty=3)} plot.(x, tukeyChi(x, cc), type = "l", col = 2) plot.(x, tukeyChi(x, cc, deriv = 1), type = "l", col = 2) plot.(x, tukeyChi(x, cc, deriv = 2), type = "l", col = 2) % \ is escape for Rd mtext(sprintf("tukeyChi(x, c = \%g, deriv), deriv = 0,1,2", cc), outer = TRUE, font = par("font.main"), cex = par("cex.main")) par(op) op <- par(mfrow = c(3,1), oma = c(0,0, 2, 0), mgp = c(1.5, 0.6, 0), mar= .1+c(3,4,1,1)) x <- seq(-5, 5, length = 201) cc <- 4.69 # as set by default in lmrob.control() plot. <- function(...) { plot(..., asp = 1); abline(h=0,v=0, col="gray", lty=3)} plot.(x, tukeyPsi1(x, cc), type = "l", col = 2) abline(0:1, lty = 3, col = "light blue") plot.(x, tukeyPsi1(x, cc, deriv = -1), type = "l", col = 2) plot.(x, tukeyPsi1(x, cc, deriv = 1), type = "l", col = 2); abline(h=1,lty=3) % \ is escape for Rd mtext(sprintf("tukeyPsi1(x, c = \%g, deriv), deriv = 0, -1, 1", cc), outer = TRUE, font = par("font.main"), cex = par("cex.main")) par(op) \dontshow{options(oop)} } \keyword{robust} robustbase/man/chgDefaults-methods.Rd0000644000176200001440000000167212145665261017375 0ustar liggesusers\name{chgDefaults-methods} \title{Change Defaults (Parameters) of "Psi Function" Objects} \docType{methods} \alias{chgDefaults}% the generic \alias{chgDefaults-methods} \alias{chgDefaults,ANY-method} \alias{chgDefaults,psi_func-method} \description{ To modify an object of class \code{\linkS4class{psi_func}}, i.e. typically change the tuning parameters, the generic function \code{chgDefaults()} is called and works via the corresponding method. } \section{Methods}{ \describe{ \item{\code{object = "psi_func"}}{The method is used to change the default values for the tuning parameters, and returns an object of class \code{\linkS4class{psi_func}}, a copy of input \code{object} with the slot \code{tDefs} possibly changed;. } } } \seealso{ \code{\link{psiFunc}} } \examples{ ## Hampel's psi and rho: H.38 <- chgDefaults(hampelPsi, k = c(1.5, 3.5, 8)) H.38 plot(H.38) ## for more see ?psiFunc } \keyword{methods} robustbase/man/estimethod.Rd0000644000176200001440000000136512270435276015647 0ustar liggesusers\name{estimethod} \alias{estimethod} \title{Extract the Estimation Method 'Estimethod' from a Fitted Model} \description{ Extract the estimation method as a \code{\link{character}} string from a fitted model. } \usage{ estimethod(object, ...) } \arguments{ \item{object}{a fitted model.} \item{...}{additional, optional arguments. (None are used in our methods)} } \value{ a \code{\link{character}} string, the estimation method used. } \details{ This is a (S3) generic function for which we provide methods, currently for \code{\link{nlrob}} only.% TODO %% \code{\link{lmrob}}, %% \code{\link{glmrob}}, %% , and \code{\link{nls}}. } \seealso{ \code{\link{nlrob}}, and \code{\link{nlrob.MM}}, notably for examples. } \keyword{models} robustbase/man/airmay.Rd0000644000176200001440000000245113312375575014764 0ustar liggesusers\name{airmay} \alias{airmay} \docType{data} \title{Air Quality Data} \description{ Air Quality Data Set for May 1973, from Chambers et al. (1983). The whole data set consists of daily readings of air quality values from May 1, 1973 to September 30, 1973, but here are included only the values for May. This data set is an example of the special treatment of the missing values. } \usage{data(airmay, package="robustbase")} \format{ A data frame with 31 observations on the following 4 variables. \describe{ \item{\code{X1}}{Solar Radiation in Longleys in the frequency band 4000-7700 from 0800 to 1200 hours at Central Park} \item{\code{X2}}{Average windspeed (in miles per hour) between 7000 and 1000 hours at La Guardia Airport} \item{\code{X3}}{Maximum daily temperature (in degrees Fahrenheit) at La Guardia Airport} \item{\code{Y}}{Mean ozone concentration (in parts per billion) from 1300 to 1500 hours at Roosevelt Island} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.86, table 6. } \examples{ data(airmay) summary(lm.airmay <- lm(Y ~ ., data=airmay)) %%summary(lts.airmay <- ltsReg(Y ~ ., data=airmay)) airmay.x <- data.matrix(airmay[,1:3]) %%covMcd(airmay.x) } \keyword{datasets} robustbase/man/lmrob..M..fit.Rd0000644000176200001440000001010114430460510015676 0ustar liggesusers\name{lmrob..M..fit} \alias{lmrob..M..fit} \title{Compute M-estimators of regression} \description{ This function performs RWLS iterations to find an M-estimator of regression. When started from an S-estimated \code{beta.initial}, this results in an MM-estimator. } \usage{ lmrob..M..fit(x = obj$x, y = obj$y, beta.initial = obj$coef, scale = obj$scale, control = obj$control, obj, mf, method = obj$control$method) } \arguments{ \item{x}{design matrix (\eqn{n \times p}{n x p}) typically including a column of \code{1}s for the intercept.} \item{y}{numeric response vector (of length \eqn{n}).} \item{beta.initial}{numeric vector (of length \eqn{p}) of initial estimate. Usually the result of an S-regression estimator.} \item{scale}{robust residual scale estimate. Usually an S-scale estimator.} \item{control}{list of control parameters, as returned by \code{\link{lmrob.control}}. Currently, the components \code{c("max.it", "rel.tol","trace.lev", "psi", "tuning.psi", "mts", "subsampling")} are accessed.} \item{obj}{an optional \code{lmrob}-object. If specified, this is typically used to set values for the other arguments.} \item{mf}{defunct.} \item{method}{optional; the \code{method} used for \emph{obj} computation.} } \details{ This function is used by \code{\link{lmrob.fit}} (and \code{anova(, type = "Deviance")}) and typically not to be used on its own. } \value{A list with the following elements: \item{coef}{the M-estimator (or MM-estim.) of regression} \item{control}{the \code{control} list input used} \item{scale}{ The residual scale estimate} \item{seed }{ The random number generator seed} \item{converged}{ \code{TRUE} if the RWLS iterations converged, \code{FALSE} otherwise} } \references{ Yohai, 1987 } \seealso{ \code{\link{lmrob.fit}}, \code{\link{lmrob}}; \code{\link[MASS]{rlm}} from package \CRANpkg{MASS}. } \author{Matias Salibian-Barrera and Martin Maechler} \examples{ data(stackloss) X <- model.matrix(stack.loss ~ . , data = stackloss) y <- stack.loss ## Compute manual MM-estimate: ## 1) initial LTS: m0 <- ltsReg(X[,-1], y) ## 2) M-estimate started from LTS: m1 <- lmrob..M..fit(X, y, beta.initial = coef(m0), scale = m0$scale, method = "SM", control = lmrob.control(tuning.psi = 1.6, psi = 'bisquare')) ## no 'method' (nor 'obj'): m1. <- lmrob..M..fit(X, y, beta.initial = coef(m0), scale = m0$scale, control = m1$control) stopifnot(all.equal(m1, m1., tol = 1e-15)) # identical {call *not* stored!} cbind(m0$coef, m1$coef) ## the scale is kept fixed: stopifnot(identical(unname(m0$scale), m1$scale)) ## robustness weights: are r.s <- with(m1, residuals/scale) # scaled residuals m1.wts <- Mpsi(r.s, cc = 1.6, psi="tukey") / r.s summarizeRobWeights(m1.wts) ##--> outliers 1,3,4,13,21 which(m0$lts.wt == 0) # 1,3,4,21 but not 13 \dontshow{stopifnot(which(m0$lts.wt == 0) == c(1,3,4,21)) } ## Manually add M-step to SMD-estimate (=> equivalent to "SMDM"): m2 <- lmrob(stack.loss ~ ., data = stackloss, method = 'SMD') m3 <- lmrob..M..fit(obj = m2) ## Simple function that allows custom initial estimates ## (Deprecated; use init argument to lmrob() instead.) %% MM: why deprecated? lmrob.custom <- function(x, y, beta.initial, scale, terms) { ## initialize object obj <- list(control = lmrob.control("KS2011"), terms = terms) ## terms is needed for summary() ## M-step obj <- lmrob..M..fit(x, y, beta.initial, scale, obj = obj) ## D-step obj <- lmrob..D..fit(obj, x) ## Add some missing elements obj$cov <- TRUE ## enables calculation of cov matrix obj$p <- obj$qr$rank obj$degree.freedom <- length(y) - obj$p ## M-step obj <- lmrob..M..fit(x, y, obj=obj) obj$control$method <- ".MDM" obj } m4 <- lmrob.custom(X, y, m2$init$init.S$coef, m2$init$scale, m2$terms) stopifnot(all.equal(m4$coef, m3$coef)) ## Start from ltsReg: m5 <- ltsReg(stack.loss ~ ., data = stackloss) m6 <- lmrob.custom(m5$X, m5$Y, coef(m5), m5$scale, m5$terms) } \keyword{robust} \keyword{regression} robustbase/man/summary.nlrob.Rd0000644000176200001440000000307112270244535016302 0ustar liggesusers\name{summary.nlrob} \alias{summary.nlrob} \title{Summarizing Robust Fits of Nonlinear Regression Models } \description{ \code{summary} method for objects of class \code{"nlrob"}, i.e., \code{\link{nlrob}()} results. Currently it only works for \code{nlrob(*, method="M")}. } \usage{ \method{summary}{nlrob}(object, correlation = FALSE, symbolic.cor = FALSE, ...) } \arguments{ \item{object}{an object of class \code{"nlrob"}, usually, a result of a call to \code{\link{nlrob}}.} \item{correlation}{logical variable indicating whether to compute the correlation matrix of the estimated coefficients.} \item{symbolic.cor}{logical indicating whether to use symbols to display the above correlation matrix.} \item{\dots}{further arguments passed to or from other methods.} } \value{ The function \code{\link{summary.nlrob}} computes and returns an object of class \code{"summary.nlrob"} of summary statistics of the robustly fitted linear model given in \code{object}. There is a print method, \code{print.summary.lmrob()}, which nicely formats the output. The result keeps a large part of \code{object}'s components such as \code{residuals}, \code{cov} or \code{w}, and additionally contains \item{coefficients}{the matrix of coefficients, standard errors and p-values.} \item{correlation}{if the \code{correlation} argument was true, the correlation matrix of the parameters.} %% maybe add more } \author{Andreas Ruckstuhl} \seealso{\code{\link{nlrob}()}, also for examples. } \keyword{regression} \keyword{nonlinear} \keyword{robust} robustbase/man/ltsReg.Rd0000644000176200001440000002345214436463173014746 0ustar liggesusers\name{ltsReg} \alias{ltsReg} \alias{ltsReg.default} \alias{ltsReg.formula} \alias{print.lts} \title{Least Trimmed Squares Robust (High Breakdown) Regression} \concept{High breakdown point} \description{ Carries out least trimmed squares (LTS) robust (high breakdown point) regression. } \usage{ ltsReg(x, \dots) \method{ltsReg}{formula}(formula, data, subset, weights, na.action, model = TRUE, x.ret = FALSE, y.ret = FALSE, contrasts = NULL, offset, \dots) \method{ltsReg}{default}(x, y, intercept = TRUE, alpha = , nsamp = , adjust = , mcd = TRUE, qr.out = FALSE, yname = NULL, seed = , trace = , use.correction = , wgtFUN = , control = rrcov.control(), \dots) } \arguments{ \item{formula}{a \code{\link{formula}} of the form \code{y ~ x1 + x2 + ...}.} \item{data}{data frame from which variables specified in \code{formula} are to be taken.} \item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.} \item{weights}{an optional vector of weights to be used in the fitting process. \bold{NOT USED YET}. %%% If specified, weighted least squares is used %%% with weights \code{weights} (that is, minimizing \code{sum(w*e^2)}); %%% otherwise ordinary least squares is used. } \item{na.action}{a function which indicates what should happen when the data contain \code{NA}s. The default is set by the \code{na.action} setting of \code{\link{options}}, and is \code{\link{na.fail}} if that is unset. The \dQuote{factory-fresh} default is \code{\link{na.omit}}. Another possible value is \code{NULL}, no action. Value \code{\link{na.exclude}} can be useful.} \item{model, x.ret, y.ret}{\code{\link{logical}}s indicating if the model frame, the model matrix and the response are to be returned, respectively.} \item{contrasts}{an optional list. See the \code{contrasts.arg} of \code{\link{model.matrix.default}}.} \item{offset}{this can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting. An \code{\link{offset}} term can be included in the formula instead or as well, and if both are specified their sum is used.} \item{x}{a matrix or data frame containing the explanatory variables.} \item{y}{the response: a vector of length the number of rows of \code{x}.}. \item{intercept}{if true, a model with constant term will be estimated; otherwise no constant term will be included. Default is \code{intercept = TRUE} } \item{alpha}{the percentage (roughly) of squared residuals whose sum will be minimized, by default 0.5. In general, \code{alpha} must between 0.5 and 1.} \item{nsamp}{number of subsets used for initial estimates or \code{"best"} or \code{"exact"}. Default is \code{nsamp = 500}. For \code{nsamp="best"} exhaustive enumeration is done, as long as the number of trials does not exceed 5000. For \code{"exact"}, exhaustive enumeration will be attempted however many samples are needed. In this case a warning message will be displayed saying that the computation can take a very long time. } \item{adjust}{whether to perform intercept adjustment at each step. Since this can be time consuming, the default is \code{adjust = FALSE}.} \item{mcd}{whether to compute robust distances using Fast-MCD.} \item{qr.out}{whether to return the QR decomposition (see \code{\link{qr}}); defaults to false.} \item{yname}{the name of the dependent variable. Default is \code{yname = NULL}} \item{seed}{initial seed for random generator, like \code{\link{.Random.seed}}, see \code{\link{rrcov.control}}.} \item{trace}{logical (or integer) indicating if intermediate results should be printed; defaults to \code{FALSE}; values \eqn{\ge 2}{>= 2} also produce print from the internal (Fortran) code.} \item{use.correction}{ whether to use finite sample correction factors. Default is \code{use.correction=TRUE}} \item{wgtFUN}{a character string or \code{\link{function}}, specifying how the weights for the reweighting step should be computed. Up to April 2013, the only option has been the original proposal in (1999), now specified by \code{wgtFUN = "01.original"} (or via \code{control}).} %% MM: want control also for formula !?!?!?!?!! \item{control}{a list with estimation options - same as these provided in the function specification. If the control object is supplied, the parameters from it will be used. If parameters are passed also in the invocation statement, they will override the corresponding elements of the control object.} \item{\dots}{arguments passed to or from other methods.} } \details{ The LTS regression method minimizes the sum of the \eqn{h} smallest squared residuals, where \eqn{h > n/2}, i.e. at least half the number of observations must be used. The default value of \eqn{h} (when \code{alpha=1/2}) is roughly \eqn{n / 2}, more precisely, \code{(n+p+1) \%/\% 2} where \eqn{n} is the total number of observations, but by setting \code{alpha}, the user may choose higher values up to n, where \eqn{h = h(\alpha,n,p) =} \code{\link{h.alpha.n}(alpha,n,p)}. The LTS estimate of the error scale is given by the minimum of the objective function multiplied by a consistency factor and a finite sample correction factor -- see Pison et al. (2002) for details. The rescaling factors for the raw and final estimates are returned also in the vectors \code{raw.cnp2} and \code{cnp2} of length 2 respectively. The finite sample corrections can be suppressed by setting \code{use.correction=FALSE}. The computations are performed using the Fast LTS algorithm proposed by Rousseeuw and Van Driessen (1999). As always, the formula interface has an implied intercept term which can be removed either by \code{y ~ x - 1} or \code{y ~ 0 + x}. See \code{\link{formula}} for more details. } \value{ The function \code{ltsReg} returns an object of class \code{"lts"}. The \code{\link{summary}} method function is used to obtain (and print) a summary table of the results, and \code{\link[=ltsPlot]{plot}()} can be used to plot them, see the the specific help pages. The generic accessor functions \code{\link{coefficients}}, \code{\link{fitted.values}} and \code{\link{residuals}} extract various useful features of the value returned by \code{ltsReg}. An object of class \code{lts} is a \code{\link{list}} containing at least the following components: \item{crit}{ the value of the objective function of the LTS regression method, i.e., the sum of the \eqn{h} smallest squared raw residuals. } \item{coefficients}{ vector of coefficient estimates (including the intercept by default when \code{intercept=TRUE}), obtained after reweighting. } \item{best}{ the best subset found and used for computing the raw estimates, with \code{\link{length}(best) == quan = \link{h.alpha.n}(alpha,n,p)}. } \item{fitted.values}{vector like \code{y} containing the fitted values of the response after reweighting.} \item{residuals}{vector like \code{y} containing the residuals from the weighted least squares regression.} \item{scale}{scale estimate of the reweighted residuals. } \item{alpha}{same as the input parameter \code{alpha}.} \item{quan}{the number \eqn{h} of observations which have determined the least trimmed squares estimator.} \item{intercept}{same as the input parameter \code{intercept}.} \item{cnp2}{a vector of length two containing the consistency correction factor and the finite sample correction factor of the final estimate of the error scale.} \item{raw.coefficients}{vector of raw coefficient estimates (including the intercept, when \code{intercept=TRUE}).} \item{raw.scale}{scale estimate of the raw residuals.} \item{raw.resid}{vector like \code{y} containing the raw residuals from the regression.} \item{raw.cnp2}{a vector of length two containing the consistency correction factor and the finite sample correction factor of the raw estimate of the error scale.} \item{lts.wt}{ vector like y containing weights that can be used in a weighted least squares. These weights are 1 for points with reasonably small residuals, and 0 for points with large residuals. } \item{raw.weights}{ vector containing the raw weights based on the raw residuals and raw scale. } \item{method}{character string naming the method (Least Trimmed Squares).} \item{X}{the input data as a matrix (including intercept column if applicable).} \item{Y}{the response variable as a vector.} } \author{Valentin Todorov \email{valentin.todorov@chello.at}, based on work written for S-plus by Peter Rousseeuw and Katrien van Driessen from University of Antwerp.% no E-mails for spam-protection } \references{ Peter J. Rousseeuw (1984), Least Median of Squares Regression. \emph{Journal of the American Statistical Association} \bold{79}, 871--881. P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection.} Wiley. P. J. Rousseeuw and K. van Driessen (1999) A fast algorithm for the minimum covariance determinant estimator. \emph{Technometrics} \bold{41}, 212--223. Pison, G., Van Aelst, S., and Willems, G. (2002) Small Sample Corrections for LTS and MCD. \emph{Metrika} \bold{55}, 111-123. } \seealso{ \code{\link{covMcd}}; \code{\link{summary.lts}} for summaries, \code{\link{lmrob}()} for alternative robust estimator with HBDP. The generic functions \code{\link{coef}}, \code{\link{residuals}}, \code{\link{fitted}}. } \examples{ data(heart) ## Default method works with 'x'-matrix and y-var: heart.x <- data.matrix(heart[, 1:2]) # the X-variables heart.y <- heart[,"clength"] ltsReg(heart.x, heart.y) data(stackloss) ltsReg(stack.loss ~ ., data = stackloss) } \keyword{robust} \keyword{regression} robustbase/man/functionX-class.Rd0000644000176200001440000000201312114153713016537 0ustar liggesusers\name{functionX-class} \docType{class} \alias{functionX-class} \title{Class "functionX" of Psi-like Vectorized Functions} \description{ The class \code{"functionX"} of vectorized functions of one argument \code{x} and typically further tuning parameters. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("functionX", ...)}. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Directly extends class \code{"function"}.} } } \section{Extends}{ Class \code{"function"}, from data part. Class \code{"OptionalFunction"}, by class \code{"function"}. Class \code{"PossibleMethod"}, by class \code{"function"}. } \section{Methods}{ No methods defined with class "functionX" in the signature. } \author{Martin Maechler} \seealso{ \code{\link{psiFunc}()}, and class descriptions of \code{\linkS4class{functionXal}} for \emph{functionals} of \code{"functionX"}, and \code{\linkS4class{psi_func}} which has several \code{functionX} slots. } % \examples{ % } \keyword{classes} robustbase/man/kootenay.Rd0000644000176200001440000000453413312375575015337 0ustar liggesusers\name{kootenay} \encoding{utf8} \alias{kootenay} \docType{data} \title{Waterflow Measurements of Kootenay River in Libby and Newgate} \description{ The original data set is the waterflow in January of the Kootenay river, measured at two locations, namely, Libby (Montana) and Newgate (British Columbia) for 13 consecutive years, 1931--1943. The data set is of mostly interest because it has been used as example in innumerous didactical situations about robust regression. To this end, one number (in observation 4) has been modified from the original data from originally 44.9 to 15.7 (here). } \usage{data(kootenay, package="robustbase")} \format{ A data frame with 13 observations on the following 2 variables. \describe{ \item{\code{Libby}}{a numeric vector} \item{\code{Newgate}}{a numeric vector} } } \details{ The original (unmodified) version of the data is easily obtainable as \code{kootenay0} from the examples; other modified versions of the data sets are also used in different places, see the examples below. } \source{ Original Data, p.58f of Ezekiel and Fox (1959), \emph{Methods of Correlation and Regression Analysis}. Wiley, N.Y. } \references{ Hampel, F., Ronchetti, E., Rousseeuw, P. and Stahel, W. (1986) \emph{Robust Statistics: The Approach Based on Influence Functions}; Wiley, N.Y. Rousseeuw, P. J. and Leroy, A. M. (1987) \emph{Robust Regression & Outlier Detection}, Wiley, N. Y. } \examples{ data(kootenay) plot(kootenay, main = "'kootenay' data") points(kootenay[4,], col = 2, cex =2, pch = 3) abline(lm (Newgate ~ Libby, data = kootenay), col = "pink") abline(lmrob(Newgate ~ Libby, data = kootenay), col = "blue") ## The original version of Ezekiel & Fox: kootenay0 <- kootenay kootenay0[4, "Newgate"] <- 44.9 plot(kootenay0, main = "'kootenay0': the original data") abline(lm (Newgate ~ Libby, data = kootenay0), col = "pink") abline(lmrob(Newgate ~ Libby, data = kootenay0), col = "blue") ## The version with "milder" outlier -- Hampel et al., p.310 kootenay2 <- kootenay0 kootenay2[4, "Libby"] <- 20.0 # instead of 77.6 plot(kootenay2, main = "The 'kootenay2' data", xlim = range(kootenay[,"Libby"])) points(kootenay2[4,], col = 2, cex =2, pch = 3) abline(lm (Newgate ~ Libby, data = kootenay2), col = "pink") abline(lmrob(Newgate ~ Libby, data = kootenay2), col = "blue") } \keyword{datasets} robustbase/man/adjbox.Rd0000644000176200001440000001723313721410436014743 0ustar liggesusers\name{adjbox} \title{Plot an Adjusted Boxplot for Skew Distributions} \alias{adjbox} \alias{adjbox.default} \alias{adjbox.formula} \description{ Produces boxplots adjusted for skewed distributions as proposed in Hubert and Vandervieren (2008). } \usage{ adjbox(x, \dots) \method{adjbox}{formula}(formula, data = NULL, \dots, subset, na.action = NULL) \method{adjbox}{default}(x, \dots, range = 1.5, doReflect = FALSE, width = NULL, varwidth = FALSE, notch = FALSE, outline = TRUE, names, plot = TRUE, border = par("fg"), col = NULL, log = "", pars = list(boxwex = 0.8, staplewex = 0.5, outwex = 0.5), horizontal = FALSE, add = FALSE, at = NULL) } \arguments{ \item{formula}{a formula, such as \code{y ~ grp}, where \code{y} is a numeric vector of data values to be split into groups according to the grouping variable \code{grp} (usually a factor).} \item{data}{a data.frame (or list) from which the variables in \code{formula} should be taken.} \item{subset}{an optional vector specifying a subset of observations to be used for plotting.} \item{na.action}{a function which indicates what should happen when the data contain \code{NA}s. The default is to ignore missing values in either the response or the group.} \item{x}{for specifying data from which the boxplots are to be produced. Either a numeric vector, or a single list containing such vectors. Additional unnamed arguments specify further data as separate vectors (each corresponding to a component boxplot). \code{\link{NA}}s are allowed in the data.} \item{\dots}{For the \code{formula} method, named arguments to be passed to the default method. For the default method, unnamed arguments are additional data vectors (unless \code{x} is a list when they are ignored), and named arguments are arguments and graphical parameters to be passed to \code{\link{bxp}} in addition to the ones given by argument \code{pars} (and override those in \code{pars}). } \item{range}{this determines how far the plot whiskers extend out from the box, and is simply passed as argument \code{coef} to \code{\link{adjboxStats}()}. If \code{range} is positive, the whiskers extend to the most extreme data point which is no more than \code{range} times the interquartile range from the box. A value of zero causes the whiskers to extend to the data extremes.} \item{doReflect}{logical indicating if the MC should also be computed on the \emph{reflected} sample \code{-x}, and be averaged, see \code{\link{mc}}.} \item{width}{a vector giving the relative widths of the boxes making up the plot.} \item{varwidth}{if \code{varwidth} is \code{TRUE}, the boxes are drawn with widths proportional to the square-roots of the number of observations in the groups.} \item{notch}{if \code{notch} is \code{TRUE}, a notch is drawn in each side of the boxes. If the notches of two plots do not overlap this is \sQuote{strong evidence} that the two medians differ (Chambers \emph{et al.}, 1983, p. 62). See \code{\link{boxplot.stats}} for the calculations used.} \item{outline}{if \code{outline} is not true, the outliers are not drawn (as points whereas S+ uses lines).}% the argument name is most ugly but S+ compatible \item{names}{group labels which will be printed under each boxplot.} \item{boxwex}{a scale factor to be applied to all boxes. When there are only a few groups, the appearance of the plot can be improved by making the boxes narrower.} \item{staplewex}{staple line width expansion, proportional to box width.} \item{outwex}{outlier line width expansion, proportional to box width.} \item{plot}{if \code{TRUE} (the default) then a boxplot is produced. If not, the summaries which the boxplots are based on are returned.} \item{border}{an optional vector of colors for the outlines of the boxplots. The values in \code{border} are recycled if the length of \code{border} is less than the number of plots.} \item{col}{if \code{col} is non-null it is assumed to contain colors to be used to colour the bodies of the box plots. By default they are in the background colour.} \item{log}{character indicating if x or y or both coordinates should be plotted in log scale.} \item{pars}{a list of (potentially many) more graphical parameters, e.g., \code{boxwex} or \code{outpch}; these are passed to \code{\link{bxp}} (if \code{plot} is true); for details, see there.} \item{horizontal}{logical indicating if the boxplots should be horizontal; default \code{FALSE} means vertical boxes.} \item{add}{logical, if true \emph{add} boxplot to current plot.} \item{at}{numeric vector giving the locations where the boxplots should be drawn, particularly when \code{add = TRUE}; defaults to \code{1:n} where \code{n} is the number of boxes.} } \details{ The generic function \code{adjbox} currently has a default method (\code{adjbox.default}) and a formula interface (\code{adjbox.formula}). If multiple groups are supplied either as multiple arguments or via a formula, parallel boxplots will be plotted, in the order of the arguments or the order of the levels of the factor (see \code{\link{factor}}). Missing values are ignored when forming boxplots. Extremes of the upper and whiskers of the adjusted boxplots are computed using the medcouple (\code{\link{mc}()}), a robust measure of skewness. For details, cf. TODO %% << FIXME } \value{ A \code{\link{list}} with the following components: \item{stats}{a matrix, each column contains the extreme of the lower whisker, the lower hinge, the median, the upper hinge and the extreme of the upper whisker for one group/plot. If all the inputs have the same class attribute, so will this component.} \item{n}{a vector with the number of observations in each group.} \item{coef}{a matrix where each column contains the lower and upper extremes of the notch.} \item{out}{the values of any data points which lie beyond the extremes of the whiskers.} \item{group}{a vector of the same length as out whose elements indicate to which group the outlier belongs.} \item{names}{a vector of names for the groups.} } \references{ %% Hubert, M. and Vandervieren, E. (2006) %% \emph{An Adjusted Boxplot for Skewed Distributions}, %% Technical Report TR-06-11, KU Leuven, Section of Statistics, Leuven. %% \url{http://wis.kuleuven.be/stat/robust/Papers/TR0611.pdf} Hubert, M. and Vandervieren, E. (2008). An adjusted boxplot for skewed distributions, \emph{Computational Statistics and Data Analysis} \bold{52}, 5186--5201. \doi{10.1016/j.csda.2007.11.008} } \author{ R Core Development Team, slightly adapted by Tobias Verbeke } \note{ The code and documentation only slightly modifies the code of \code{\link{boxplot.default}}, \code{boxplot.formula} and \code{\link{boxplot.stats}} } \seealso{The medcouple, \code{\link{mc}}; \code{\link{boxplot}}. } \examples{ if(require("boot")) { ### Hubert and Vandervieren (2008), Fig. 5.%(2006): p. 10, Fig. 4. data(coal, package = "boot") coaldiff <- diff(coal$date) op <- par(mfrow = c(1,2)) boxplot(coaldiff, main = "Original Boxplot") adjbox(coaldiff, main = "Adjusted Boxplot") par(op) } ### Hubert and Vandervieren (2008), p. 11, Fig. 7a -- enhanced op <- par(mfrow = c(2,2), mar = c(1,3,3,1), oma = c(0,0,3,0)) with(condroz, { boxplot(Ca, main = "Original Boxplot") adjbox (Ca, main = "Adjusted Boxplot") boxplot(Ca, main = "Original Boxplot [log]", log = "y") adjbox (Ca, main = "Adjusted Boxplot [log]", log = "y") }) mtext("'Ca' from data(condroz)", outer=TRUE, font = par("font.main"), cex = 2) par(op) } \keyword{hplot} robustbase/man/sigma.Rd0000644000176200001440000000221713710054521014565 0ustar liggesusers\name{sigma} \title{Extract 'Sigma' - Standard Deviation of Errors for Robust Models} \alias{sigma} \alias{sigma.lmrob} \description{ Extract the estimated standard deviation of the errors, the \dQuote{residual standard deviation} (misnomed also \dQuote{residual standard error}) from a fitted model. } \usage{ % Declare S3 method as the generic is no longer in lme4 for R (>= 2015-06-01): \S3method{sigma}{lmrob}(object, \dots) } \arguments{ \item{object}{a fitted model.} \item{\dots}{additional, optional arguments. (None are used in our methods)} } \value{ the residual standard error as a scalar } \details{ For \R \code{<= 3.2.x}, we provide an (S3) generic function (as e.g., package \CRANpkg{lme4}) and methods for \code{\link{lmrob}}, \code{\link{nlrob}}, and \code{\link{nls}}. From \R \code{>= 3.3.0}, we provide methods for our \code{\link{lmrob}} and \code{\link{nlrob}} models. } \examples{ m.cl <- lm (Y ~ ., data=coleman) if(getRversion() >= "3.3.0") sigma(m.cl) else summary(m.cl)$sigma sigma( m1 <- lmrob(Y ~ ., data=coleman) ) sigma( m2 <- lmrob(Y ~ ., data=coleman, setting = "KS2014") ) } \keyword{models} robustbase/man/wagnerGrowth.Rd0000644000176200001440000000405613312375575016163 0ustar liggesusers\name{wagnerGrowth} \encoding{utf8} \Rdversion{1.1} \alias{wagnerGrowth} \docType{data} \title{ Wagner's Hannover Employment Growth Data } \description{ Wagner (1994) investigates the rate of employment growth (\code{y}) as function of percentage of people engaged in \bold{p}roducation \bold{a}ctivities (\code{PA}) and \bold{h}igher \bold{s}ervices (\code{HS}) and of the \bold{g}rowth of these percentages (\code{GPA}, \code{GHS}) during three time periods in 21 geographical regions of the greater Hannover area. } \usage{data(wagnerGrowth, package="robustbase")} \format{ A data frame with \eqn{21 \times 3 = 63}{21 * 3 = 63} observations (one per \code{Region x Period}) on the following 7 variables. \describe{ \item{\code{Region}}{a \code{\link{factor}} with 21 levels, denoting the corresponding region in Hannover (conceptually a \dQuote{block factor}).} \item{\code{PA}}{numeric: percent of people involved in production activities.} \item{\code{GPA}}{\bold{g}rowth of \code{PA}.} \item{\code{HS}}{a numeric vector} \item{\code{GHS}}{a numeric vector} \item{\code{y}}{a numeric vector} \item{\code{Period}}{a \code{\link{factor}} with levels \code{1:3}, denoting the time period, 1 = 1979-1982, 2 = 1983-1988, 3 = 1989-1992.} } } % \details{ % } \source{ Hubert, M. and Rousseeuw, P. J. (1997). Robust regression with both continuous and binary regressors, \emph{Journal of Statistical Planning and Inference} \bold{57}, 153--163. } \references{ Wagner J. (1994). Regionale Beschäftigungsdynamik und höherwertige Produktionsdienste: Ergebnisse für den Grossraum Hannover (1979-1992). \emph{Raumforschung und Raumordnung} \bold{52}, 146--150. } \examples{ data(wagnerGrowth) ## maybe str(wagnerGrowth) require(lattice) (xyplot(y ~ Period | Region, data = wagnerGrowth, main = "wagnerGrowth: 21 regions @ Hannover")) (dotplot(y ~ reorder(Region,y,median), data = wagnerGrowth, main = "wagnerGrowth", xlab = "Region [ordered by median(y | Region) ]")) } \keyword{datasets} robustbase/man/pilot.Rd0000644000176200001440000000155113312375575014631 0ustar liggesusers\name{pilot} \alias{pilot} \docType{data} \title{Pilot-Plant Data} \description{ Pilot-Plant data from Daniel and Wood (1971). The response variable corresponds to the acid content determined by titration and the explanatory variable is the organic acid content determined by extraction and weighing. This data set was analyzed also by Yale and Forsythe (1976). } \usage{data(pilot, package="robustbase")} \format{ A data frame with 20 observations on the following 2 variables. \describe{ \item{\code{X}}{Organic acid content - extraction} \item{\code{Y}}{Acid content - titration } } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, page 21, table 1. } \examples{ data(pilot) summary(lm.pilot <- lm(Y ~.,data=pilot)) %%summary(lts.pilot <- ltsReg(Y~.,data=pilot)) } \keyword{datasets} robustbase/man/predict.glmrob.Rd0000644000176200001440000000702611651467605016420 0ustar liggesusers\name{predict.glmrob} \alias{predict.glmrob} \title{Predict Method for Robust GLM ("glmrob") Fits} \description{ Obtains predictions and optionally estimates standard errors of those predictions from a fitted \emph{robust} generalized linear model (GLM) object. } \usage{ \method{predict}{glmrob}(object, newdata = NULL, type = c("link", "response", "terms"), se.fit = FALSE, dispersion = NULL, terms = NULL, na.action = na.pass, \dots) } \arguments{ %% the following is +- copy-pasted from predict.glm.Rd: \item{object}{a fitted object of class inheriting from \code{"glmrob"}.} \item{newdata}{optionally, a data frame in which to look for variables with which to predict. If omitted, the fitted linear predictors are used.} \item{type}{the type of prediction required. The default is on the scale of the linear predictors; the alternative \code{"response"} is on the scale of the response variable. Thus for a default binomial model the default predictions are of log-odds (probabilities on logit scale) and \code{type = "response"} gives the predicted probabilities. The \code{"terms"} option returns a matrix giving the fitted values of each term in the model formula on the linear predictor scale. The value of this argument can be abbreviated. } \item{se.fit}{logical switch indicating if standard errors are required.} \item{dispersion}{the dispersion of the GLM fit to be assumed in computing the standard errors. If omitted, that returned by \code{summary} applied to the object is used.} \item{terms}{with \code{type="terms"} by default all terms are returned. A character vector specifies which terms are to be returned} \item{na.action}{function determining what should be done with missing values in \code{newdata}. The default is to predict \code{NA}.} \item{\dots}{optional further arguments, currently simply passed to \code{\link{predict.lmrob}()}.} } % \details{ % If necessary, more details than the description above ~~ % } \value{ %% the following is +- copy-pasted from predict.glm.Rd: %% also correct,here ? If \code{se = FALSE}, a vector or matrix of predictions. If \code{se = TRUE}, a list with components \item{fit}{Predictions} \item{se.fit}{Estimated standard errors} \item{residual.scale}{A scalar giving the square root of the dispersion used in computing the standard errors.} } \author{Andreas Ruckstuhl} \seealso{ \code{\link{glmrob}()} to fit these robust GLM models, \code{\link{residuals.glmrob}()} and other methods; \code{\link{predict.lm}()}, the method used for a non-robust fit. } \examples{ data(carrots) ## simplistic testing & training: i.tr <- sample(24, 20) fm1 <- glmrob(cbind(success, total-success) ~ logdose + block, family = binomial, data = carrots, subset = i.tr) fm1 predict(fm1, carrots[-i.tr, ]) # --> numeric vector predict(fm1, carrots[-i.tr, ], type="response", se = TRUE)# -> a list % FIXME: gives a "bad" error -- should rather say "not yet implemented" % or implement it ! % predict(fm1, carrots[-i.tr, ], interval = "confidence") % predict(fm1, carrots[-i.tr, ], interval = "prediction") data(vaso) Vfit <- glmrob(Y ~ log(Volume) + log(Rate), family=binomial, data=vaso) newd <- expand.grid(Volume = (V. <- seq(.5, 4, by = 0.5)), Rate = (R. <- seq(.25,4, by = 0.25))) p <- predict(Vfit, newd) filled.contour(V., R., matrix(p, length(V.), length(R.)), main = "predict(glmrob(., data=vaso))", xlab="Volume", ylab="Rate") } \keyword{models} \keyword{regression} robustbase/man/covComed.Rd0000644000176200001440000000635613772570740015252 0ustar liggesusers\name{covComed} \title{Co-Median Location and Scatter "Covariance" Estimator} \alias{covComed} %%TODO: these two are not yet imported and hence not yet documented below: \alias{comedian} \alias{COM} \alias{.wgtFUN.covComed} % \concept{Co-median} \concept{Comedian} \description{ Compute (versions of) the (multivariate) \dQuote{Comedian} covariance, i.e., multivariate location and scatter estimator } \usage{ covComed(X, n.iter = 2, reweight = FALSE, tolSolve = control$tolSolve, trace = control$trace, wgtFUN = control$wgtFUN, control = rrcov.control()) % comedian(...) % COM(...) } \arguments{ \item{X}{data matrix of dimension, say \eqn{n \times p}{n x p}.} \item{n.iter}{number of comedian() iterations. Can be as low as zero.} \item{reweight}{logical indicating if the final distances and weights should be recomputed from the final \code{cov} and \code{center}. The default is currently \code{FALSE} because that was implicit in the first version of the \R code.} \item{tolSolve}{a numerical tolerance passed to \code{\link{solve}}.} \item{trace}{logical (or integer) indicating if intermediate results should be printed; defaults to \code{FALSE}; values \eqn{\ge 2}{>= 2} also produce print from the internal (Fortran) code.} \item{wgtFUN}{a character string or \code{\link{function}}, specifying how the weights for the reweighting step should be computed. The default, \code{wgtFUN = "01.original"} corresponds to 0-1 weights as proposed originally. Other predefined string options are available, though experimental, see the experimental \code{.wgtFUN.covComed} object.} \item{control}{a list with estimation options - this includes those above provided in the function specification, see \code{\link{rrcov.control}} for the defaults. If \code{control} is supplied, the parameters from it will be used. If parameters are passed also in the invocation statement, they will override the corresponding elements of the control object.} } \details{ .. not yet .. } \value{ an object of class \code{"covComed"} which is basically a list with components \item{comp1 }{Description of 'comp1'} \item{comp2 }{Description of 'comp2'} ... FIXME ... } \references{ Falk, M. (1997) On mad and comedians. \emph{Annals of the Institute of Statistical Mathematics} \bold{49}, 615--644. Falk, M. (1998). A note on the comedian for elliptical distributions. \emph{Journal of Multivariate Analysis} \bold{67}, 306--317. } \author{ Maria Anna di Palma (initial), Valentin Todorov and Martin Maechler } \seealso{ \code{\link{covMcd}}, etc } \examples{ data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) (cc1 <- covComed(hbk.x)) (ccW <- covComed(hbk.x, reweight=TRUE)) cc0 <- covComed(hbk.x, n.iter=0) cc0W <- covComed(hbk.x, n.iter=0, reweight=TRUE) stopifnot(all.equal(unclass(cc0), # here, the 0-1 weights don't change: cc0W[names(cc0)], tol=1e-12, check.environment = FALSE), which(cc1$weights == 0) == 1:14, which(ccW$weights == 0) == 1:14, which(cc0$weights == 0) == 1:14) %% TODO : more ## Martin's smooth reweighting: ## List of experimental pre-specified wgtFUN() creators: ## Cutoffs may depend on (n, p, control$beta) : str(.wgtFUN.covComed) } robustbase/man/residuals.glmrob.Rd0000644000176200001440000000523611651611170016746 0ustar liggesusers% Origin: src/library/stats/man/glm.summaries.Rd (as of 2011-10-23) \name{residuals.glmrob} \alias{residuals.glmrob} \title{Residuals of Robust Generalized Linear Model Fits} \usage{ \method{residuals}{glmrob}(object, type = c("deviance", "pearson", "working", "response", "partial"), \dots) } \arguments{ \item{object}{an object of class \code{glmrob}, typically the result of a call to \code{\link{glmrob}}.} \item{type}{the type of residuals which should be returned. The alternatives are: \code{"deviance"} (default), \code{"pearson"}, \code{"working"}, \code{"response"}, and \code{"partial"}.} \item{\dots}{further arguments passed to or from other methods.} } \description{ Compute residuals of a fitted \code{\link{glmrob}} model, i.e., robust generalized linear model fit. } \details{ The references in \code{\link{glm}} define the types of residuals: Davison & Snell is a good reference for the usages of each. The partial residuals are a matrix of working residuals, with each column formed by omitting a term from the model. The \code{residuals} (S3) method (see \code{\link{methods}}) for \code{\link{glmrob}} models has been modeled to follow closely the method for classical (non-robust) \code{\link{glm}} fitted models. Possibly, see its documentation, i.e., \link{residuals.glm}, for further details. } \seealso{ \code{\link{glmrob}} for computing \code{object}, \code{\link{anova.glmrob}}; the corresponding \emph{generic} functions, \code{\link{summary.glmrob}}, \code{\link{coef}}, % \code{\link{deviance}}, \code{\link{effects}}, \code{\link{fitted}}, \code{\link{residuals}}. } \references{ See those for the classical GLM's, \code{\link{glm}}. } \examples{ ### -------- Gamma family -- data from example(glm) --- clotting <- data.frame( u = c(5,10,15,20,30,40,60,80,100), lot1 = c(118,58,42,35,27,25,21,19,18), lot2 = c(69,35,26,21,18,16,13,12,12)) summary(cl <- glm (lot1 ~ log(u), data=clotting, family=Gamma)) summary(ro <- glmrob(lot1 ~ log(u), data=clotting, family=Gamma)) clotM5.high <- within(clotting, { lot1[5] <- 60 }) cl5.high <- glm (lot1 ~ log(u), data=clotM5.high, family=Gamma) ro5.high <- glmrob(lot1 ~ log(u), data=clotM5.high, family=Gamma) rr <- range(residuals(ro), residuals(cl), residuals(ro5.high)) plot(residuals(ro5.high) ~ residuals(cl5.high), xlim = rr, ylim = rr, asp = 1) abline(0,1, col=2, lty=3) points(residuals(ro) ~ residuals(cl), col = "gray", pch=3) ## Show all kinds of residuals: r.types <- c("deviance", "pearson", "working", "response") sapply(r.types, residuals, object = ro5.high) } \keyword{models} \keyword{regression} robustbase/man/adjOutlyingness.Rd0000644000176200001440000002340514430460426016656 0ustar liggesusers\name{adjOutlyingness} \alias{adjOutlyingness} \title{Compute (Skewness-adjusted) Multivariate Outlyingness} \description{ For an \eqn{n \times p}{n * p} data matrix (or data frame) \code{x}, compute the \dQuote{\emph{outlyingness}} of all \eqn{n} observations. Outlyingness here is a generalization of the Donoho-Stahel outlyingness measure, where skewness is taken into account via the medcouple, \code{\link{mc}()}. } \usage{ adjOutlyingness(x, ndir = 250, p.samp = p, clower = 4, cupper = 3, IQRtype = 7, alpha.cutoff = 0.75, coef = 1.5, qr.tol = 1e-12, keep.tol = 1e-12, only.outlyingness = FALSE, maxit.mult = max(100, p), trace.lev = 0, mcReflect = n <= 100, mcScale = TRUE, mcMaxit = 2*maxit.mult, mcEps1 = 1e-12, mcEps2 = 1e-15, mcTrace = max(0, trace.lev-1)) } \arguments{ \item{x}{a numeric \eqn{n \times p}{n * p} \code{\link{matrix}} or \code{\link{data.frame}}, which must be of full rank \eqn{p}.} \item{ndir}{positive integer specifying the number of directions that should be searched.} \item{p.samp}{the sample size to use for finding good random directions, must be at least \code{p}. The default, \code{p} had been hard coded previously.} \item{clower, cupper}{the constant to be used for the lower and upper tails, in order to transform the data towards symmetry. You can set \code{clower = 0, cupper = 0} to get the \emph{non}-adjusted, i.e., classical (\dQuote{central} or \dQuote{symmetric}) outlyingness. In that case, \code{\link{mc}()} is not used.} \item{IQRtype}{a number from \code{1:9}, denoting type of empirical quantile computation for the \code{\link{IQR}()}. The default 7 corresponds to \code{\link{quantile}}'s and \code{\link{IQR}}'s default. MM has evidence that \code{type=6} would be a bit more stable for small sample size.} \item{alpha.cutoff}{number in (0,1) specifying the quantiles \eqn{(\alpha, 1-\alpha)} which determine the \dQuote{outlier} cutoff. The default, using quartiles, corresponds to the definition of the medcouple (\code{\link{mc}}), but there is no stringent reason for using the same alpha for the outlier cutoff.} \item{coef}{positive number specifying the factor with which the interquartile range (\code{\link{IQR}}) is multiplied to determine \sQuote{boxplot hinges}-like upper and lower bounds.} \item{qr.tol}{positive tolerance to be used for \code{\link{qr}} and \code{\link{solve.qr}} for determining the \code{ndir} directions, each determined by a random sample of \eqn{p} (out of \eqn{n}) observations. Note that the default \eqn{10^{-12}} is rather small, and \code{\link{qr}}'s default \code{= 1e-7} may be more appropriate.} \item{keep.tol}{positive tolerance to determine which of the sample direction should be kept, namely only those for which \eqn{\|x\| \cdot \|B\|}{||x|| * ||B||} is larger than \code{keep.tol}.} \item{only.outlyingness}{logical indicating if the final outlier determination should be skipped. In that case, a vector is returned, see \sQuote{Value:} below.} \item{maxit.mult}{integer factor; \code{maxit <- maxit.mult * ndir} will determine the maximal number of direction searching iterations. May need to be increased for higher dimensional data, though increasing \code{ndir} may be more important.} \item{trace.lev}{an integer, if positive allows to monitor the direction search.} %% new (Aug-Dec 2020), related to mc(): see >>> ./mc.Rd <<< \item{mcReflect}{passed as \code{doReflect} to \code{\link{mc}()}.} \item{mcScale}{passed as \code{doScale} to \code{\link{mc}()}.} \item{mcMaxit}{passed as \code{maxit} to \code{\link{mc}()}.} \item{mcEps1}{passed as \code{eps1} to \code{\link{mc}()}; the default is slightly looser (100 larger) than the default for \code{mc}().} \item{mcEps2}{passed as \code{eps2} to \code{\link{mc}()}.} \item{mcTrace}{passed as \code{trace.lev} to \code{\link{mc}()}.} } \note{ % During Lukas Graz' Master's thesis (Spring 2021), it finally became clear to MM: If there are too many degrees of freedom for the projections, i.e., when \eqn{n \le 4p}{n <= 4*p}, the current definition of adjusted outlyingness is ill-posed, as one of the projections may lead to a denominator (quartile difference) of zero, and hence formally an adjusted outlyingness of infinity. The current implementation avoids \code{Inf} results, but will return seemingly random \code{adjout} values of around \eqn{10^{14} -- 10^{15}} which may be completely misleading, see, e.g., the \code{longley} data example. The result is \emph{random} as it depends on the sample of \code{ndir} directions chosen; specifically, to get sub samples the algorithm uses \code{\link{sample.int}(n, p.samp)} which from \R version 3.6.0 depends on \code{\link{RNGkind}(*, sample.kind)}. Exact reproducibility of results from \R versions 3.5.3 and earlier, requires setting \code{\link{RNGversion}("3.5.0")}.% same text in ./glmrob.Rd ("MT") In any case, do use \code{\link{set.seed}()} yourself for reproducibility! Till Aug/Oct. 2014, the default values for \code{clower} and \code{cupper} were accidentally reversed, and the signs inside \code{exp(.)} where swapped in the (now corrected) two expressions \preformatted{ tup <- Q3 + coef * IQR * exp(.... + clower * tmc * (tmc < 0)) tlo <- Q1 - coef * IQR * exp(.... - cupper * tmc * (tmc < 0)) } already in the code from Antwerpen (\file{mcrsoft/adjoutlingness.R}), contrary to the published reference. Further, the original algorithm had not been scale-equivariant in the direction construction, which has been amended in 2014-10 as well. The results, including diagnosed outliers, therefore have changed, typically slightly, since \pkg{robustbase} version 0.92-0. } \details{ \bold{FIXME}: Details in the comment of the Matlab code; also in the reference(s). %% SEE /u/maechler/R/MM/STATISTICS/robust/MC/mcmatl/adjoutlyingness.m %% ---- which has notes about input/output etc of the corresponding %% Matlab code The method as described can be useful as preprocessing in FASTICA (\url{http://research.ics.aalto.fi/ica/fastica/} see also the \R package \CRANpkg{fastICA}. } \value{ If \code{only.outlyingness} is true, a vector \code{adjout}, otherwise, as by default, a list with components \item{adjout}{numeric of \code{length(n)} giving the adjusted outlyingness of each observation.} \item{cutoff}{cutoff for \dQuote{outlier} with respect to the adjusted outlyingnesses, and depending on \code{alpha.cutoff}.} \item{nonOut}{logical of \code{length(n)}, \code{TRUE} when the corresponding observation is \bold{non}-outlying with respect to the cutoff and the adjusted outlyingnesses.} } \references{ Brys, G., Hubert, M., and Rousseeuw, P.J. (2005) A Robustification of Independent Component Analysis; \emph{Journal of Chemometrics}, \bold{19}, 1--12. Hubert, M., Van der Veeken, S. (2008) Outlier detection for skewed data; \emph{Journal of Chemometrics} \bold{22}, 235--246; \doi{10.1002/cem.1123}. %% preprint \url{http://wis.kuleuven.be/stat/robust/papers/2008/outlierdetectionskeweddata-revision.pdf} %%MM: Journal-pdf ~/save/papers/robust-diverse/Hubert_VdV_skewed-Chemom_2008.pdf %%MM: Compstat 2010: Slides (of talk) and paper of Mia H: %% ~/save/papers/robust-diverse/Hubert_skewed-CS2010-slides.pdf and %% ~/save/papers/robust-diverse/Hubert_skewed-CS2010-paper.pdf (slides are better !!) For the up-to-date reference, please consult %\url{https://wis.kuleuven.be/stat/robust}% to sound modern: \url{https://wis.kuleuven.be/statdatascience/robust} } \author{Guy Brys; help page and improvements by Martin Maechler} \seealso{the adjusted boxplot, \code{\link{adjbox}} and the medcouple, \code{\link{mc}}. } \examples{ ## An Example with bad condition number and "border case" outliers dim(longley) # 16 x 7 // set seed, as result is random : set.seed(31) ao1 <- adjOutlyingness(longley, mcScale=FALSE) ## which are outlying ? which(!ao1$nonOut) ## for this seed, two: "1956", "1957"; (often: none) ## For seeds 1:100, we observe (Linux 64b) if(FALSE) { adjO <- sapply(1:100, function(iSeed) { set.seed(iSeed); adjOutlyingness(longley)$nonOut }) table(nrow(longley) - colSums(adjO)) } ## #{outl.}: 0 1 2 3 ## #{cases}: 74 17 6 3 ## An Example with outliers : dim(hbk) set.seed(1) ao.hbk <- adjOutlyingness(hbk) str(ao.hbk) hist(ao.hbk $adjout)## really two groups table(ao.hbk$nonOut)## 14 outliers, 61 non-outliers: ## outliers are : which(! ao.hbk$nonOut) # 1 .. 14 --- but not for all random seeds! ## here, they are(*) the same as found by (much faster) MCD: ## *) only "almost", since the 2023-05 change to covMcd() cc <- covMcd(hbk) table(cc = cc$mcd.wt, ao = ao.hbk$nonOut)# one differ..: stopifnot(sum(cc$mcd.wt != ao.hbk$nonOut) <= 1) ## This is revealing: About 1--2 cases, where outliers are *not* == 1:14 ## (2023: ~ 1/8 [sec] per call) if(interactive()) { for(i in 1:30) { print(system.time(ao.hbk <- adjOutlyingness(hbk))) if(!identical(iout <- which(!ao.hbk$nonOut), 1:14)) { cat("Outliers:\n"); print(iout) } } } ## "Central" outlyingness: *not* calling mc() anymore, since 2014-12-11: trace(mc) out <- capture.output( oo <- adjOutlyingness(hbk, clower=0, cupper=0) ) untrace(mc) stopifnot(length(out) == 0) ## A rank-deficient case T <- tcrossprod(data.matrix(toxicity)) try(adjOutlyingness(T, maxit. = 20, trace.lev = 2)) # fails and recommends: T. <- fullRank(T) aT <- adjOutlyingness(T.) plot(sort(aT$adjout, decreasing=TRUE), log="y") plot(T.[,9:10], col = (1:2)[1 + (aT$adjout > 10000)]) ## .. (not conclusive; directions are random, more 'ndir' makes a difference!) } \keyword{robust} \keyword{multivariate} robustbase/man/biomassTill.Rd0000644000176200001440000000644213312375575015770 0ustar liggesusers\name{biomassTill} \alias{biomassTill} \docType{data} \title{Biomass Tillage Data} \description{ An agricultural experiment in which different tillage methods were implemented. The effects of tillage on plant (maize) biomass were subsequently determined by modeling biomass accumulation for each tillage treatment using a 3 parameter Weibull function. A datset where the total biomass is modeled conditional on a three value factor, and hence \emph{vector} parameters are used. } \usage{data("biomassTill", package="robustbase")} \format{ A data frame with 58 observations on the following 3 variables. \describe{ \item{\code{Tillage}}{Tillage treatments, a \code{\link{factor}} with levels \describe{ \item{\code{CA-}:}{a no-tillage system with plant residues removed} \item{\code{CA+}:}{a no-tillage system with plant residues retained} \item{\code{CT}:}{a conventionally tilled system with residues incorporated} } } \item{\code{DVS}}{the development stage of the maize crop. A DVS of \code{1} represents maize anthesis (flowering), and a DVS of \code{2} represents physiological maturity. For the data, numeric vector with 5 different values between 0.5 and 2.} \item{\code{Biomass}}{accumulated biomass of maize plants from each tillage treatment.} \item{\code{Biom.2}}{the same as \code{Biomass}, but with three values replaced by \dQuote{gross errors}.} } } \source{ From Strahinja Stepanovic and John Laborde, Department of Agronomy & Horticulture, University of Nebraska-Lincoln, USA } %% \references{ %% %% ~~ possibly secondary sources and usages ~~ %% } \examples{ data(biomassTill) str(biomassTill) require(lattice) ## With long tailed errors xyplot(Biomass ~ DVS | Tillage, data = biomassTill, type=c("p","smooth")) ## With additional 2 outliers: xyplot(Biom.2 ~ DVS | Tillage, data = biomassTill, type=c("p","smooth")) ### Fit nonlinear Regression models: ----------------------------------- ## simple starting values, needed: m00st <- list(Wm = rep(300, 3), a = rep( 1.5, 3), b = rep( 2.2, 3)) robm <- nlrob(Biomass ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, maxit = 200) ## ----------- summary(robm) ## ... 103 IRWLS iterations plot(sort(robm$rweights), log = "y", main = "ordered robustness weights (log scale)") mtext(getCall(robm)) ## the classical (only works for the mild outliers): cl.m <- nls(Biomass ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st) ## now for the extra-outlier data: -- fails with singular gradient !! try( rob2 <- nlrob(Biom.2 ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st) ) ## use better starting values: m1st <- setNames(as.list(as.data.frame(matrix( coef(robm), 3))), c("Wm", "a","b")) try(# just breaks a bit later! rob2 <- nlrob(Biom.2 ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m1st, maxit= 200, trace=TRUE) ) ## Comparison {more to come} % once we have "MM" working... rbind(start = unlist(m00st), class = coef(cl.m), rob = coef(robm)) } \keyword{datasets} robustbase/man/classPC.Rd0000644000176200001440000000705112661565243015032 0ustar liggesusers\name{classPC} \title{Compute Classical Principal Components via SVD or Eigen} \alias{classPC} \concept{PCA} \description{ Compute classical principal components (PC) via SVD (\code{\link{svd}} or eigenvalue decomposition (\code{\link{eigen}}) with non-trivial rank determination. } \usage{ classPC(x, scale = FALSE, center = TRUE, signflip = TRUE, via.svd = n > p, scores = FALSE) } \arguments{ \item{x}{a numeric \code{\link{matrix}}.} \item{scale}{logical indicating if the matrix should be scaled; it is mean centered in any case (via \code{\link{scale}(*, scale=scale)}c} \item{center}{logical or numeric vector for \dQuote{centering} the matrix.} \item{signflip}{logical indicating if the sign(.) of the loadings should be determined should flipped such that the absolutely largest value is always positive.} \item{via.svd}{logical indicating if the computation is via SVD or Eigen decomposition; the latter makes sense typically only for n <= p.} \item{scores}{logical indicating} } \author{ Valentin Todorov; efficiency tweaks by Martin Maechler } \value{ a \code{\link{list}} with components \item{rank}{the (numerical) matrix rank of \code{x}; an integer number, say \eqn{k}, from \code{0:min(dim(x))}. In the \eqn{n > p} case, it is \code{\link{rankMM}(x)}.} \item{eigenvalues}{the \eqn{k} eigenvalues, in the \eqn{n > p} case, proportional to the variances.} \item{loadings}{the loadings, a \eqn{p \times k}{p * k} matrix.} \item{scores}{if the \code{scores} argument was true, the \eqn{n \times k}{n * k} matrix of scores, where \eqn{k} is the \code{rank} above.} \item{center}{a numeric \eqn{p}-vector of means, unless the \code{center} argument was false.} \item{scale}{if the \code{scale} argument was not false, the \code{scale} used, a \eqn{p}-vector.} } %% \details{ %% } %% \references{ %% } \seealso{ In spirit very similar to \R's standard \code{\link{prcomp}} and \code{\link{princomp}}, one of the main differences being how the \emph{rank} is determined via a non-trivial tolerance. } \examples{ set.seed(17) x <- matrix(rnorm(120), 10, 12) # n < p {the unusual case} pcx <- classPC(x) (k <- pcx$rank) # = 9 [after centering!] pc2 <- classPC(x, scores=TRUE) pcS <- classPC(x, via.svd=TRUE) all.equal(pcx, pcS, tol = 1e-8) ## TRUE: eigen() & svd() based PC are close here pc0 <- classPC(x, center=FALSE, scale=TRUE) pc0$rank # = 10 here *no* centering (as E[.] = 0) ## Loadings are orthnormal: zapsmall( crossprod( pcx$loadings ) ) ## PC Scores are roughly orthogonal: S.S <- crossprod(pc2$scores) print.table(signif(zapsmall(S.S), 3), zero.print=".") stopifnot(all.equal(pcx$eigenvalues, diag(S.S)/k)) ## the usual n > p case : pc.x <- classPC(t(x)) pc.x$rank # = 10, full rank in the n > p case cpc1 <- classPC(cbind(1:3)) # 1-D matrix stopifnot(cpc1$rank == 1, all.equal(cpc1$eigenvalues, 1), all.equal(cpc1$loadings, 1)) \dontshow{ stopifnot(classPC(x, center=FALSE)$rank == min(dim(x))) ii <- names(pcx); ii <- ii[ii != "scores"] stopifnot(all.equal(pcx[ii], pc2[ii], tol=0), all.equal(pcx, pcS, tol=1e-8), length(pc.x$center) == 10, identical(pc0$center, FALSE), all.equal(crossprod(pcx $loadings), diag(9)), all.equal(crossprod(pc.x$loadings), diag(10)), all.equal(colSums(abs(pcx$loadings)), c(2.69035673, 2.78449399, 3.00148438, 2.9016688, 2.49400759, 2.90477204, 3.01639807, 2.4217181, 2.64665957)), length(pc0$scale) == 12) }% dont.. } \keyword{multivariate} robustbase/man/anova.lmrob.Rd0000644000176200001440000001010612221620231015670 0ustar liggesusers\name{anova.lmrob} \alias{anova.lmrob} \title{Analysis of Robust Deviances ('anova') for "lmrob" Objects} \description{ Compute an analysis of robust Wald-type or deviance-type test tables for one or more linear regression models fitted by \code{\link{lmrob}}. } \usage{ \method{anova}{lmrob}(object, \dots, test = c("Wald", "Deviance"), verbose = getOption("verbose")) } \arguments{ \item{object, \dots}{objects of class \code{"lmrob"}, typically the result of a call to \code{\link{lmrob}}. \code{\dots} arguments may also be symbolic descriptions of the reduced models (cf. argument \code{formula} in \code{\link{lm}}). } \item{test}{a character string specifying the test statistic to be used. Can be one of \code{"Wald"} or \code{"Deviance"}, with partial matching allowed, for specifying a \code{"Wald"}-type test or \code{"Deviance"}-type test.} \item{verbose}{logical; if true some informative messages are printed.} } \details{ Specifying a single object gives a sequential analysis of a robust quasi-deviance table for that fit. That is, the reductions in the robust residual deviance as each term of the formula is added in turn are given in as the rows of a table. (Currently not yet implemented.) If more than one object is specified, the table has a row for the residual quasi-degrees of freedom (however, this information is never used in the asymptotic tests). For all but the first model, the change in degrees of freedom and robust deviance is also given. (This only makes statistical sense if the models are nested.) As opposed to the convention, the models are forced to be listed from largest to smallest due to computational reasons. In addition, the table will contain test statistics and P values comparing the reduction in robust deviances for the model on the row to that on top of it. There are two different robust tests available: The "Wald"-type test (\code{test = "Wald"}) and the Deviance-type test (\code{test = "Deviance"}). When using formula description of the nested models in the dot arguments and \code{test = "Deviance"}, you may be urged to supply a \code{\link{lmrob}} fit for these models by an error message. This happens when the coefficients of the largest model reduced to the nested models result in invalid initial estimates for the nested models (indicated by robustness weights which are all 0). The comparison between two or more models by \code{\link{anova.lmrob}} will only be valid if they are fitted to the same dataset. } \value{ Basically, an object of class \code{\link{anova}} inheriting from class \code{\link{data.frame}}. } %%\references{ ~put references to the literature/web site here ~ } \author{Andreas Ruckstuhl} \seealso{\code{\link{lmrob}}, \code{\link{anova}}. } \examples{ data(salinity) summary(m0.sali <- lmrob(Y ~ . , data = salinity)) anova(m0.sali, Y ~ X1 + X3) ## -> X2 is not needed (m1.sali <- lmrob(Y ~ X1 + X3, data = salinity)) anova(m0.sali, m1.sali) # the same as before anova(m0.sali, m1.sali, test = "Deviance") ## whereas 'X3' is highly significant: m2 <- update(m0.sali, ~ . -X3) anova(m0.sali, m2) anova(m0.sali, m2, test = "Deviance") ## Global test [often not interesting]: anova(m0.sali, update(m0.sali, . ~ 1), test = "Wald") anova(m0.sali, update(m0.sali, . ~ 1), test = "Deviance") if(require("MPV")) { ## Montgomery, Peck & Vining datasets Jet <- table.b13 Jet.rflm1 <- lmrob(y ~ ., data=Jet, control = lmrob.control(max.it = 500)) summary(Jet.rflm1) anova(Jet.rflm1, y ~ x1 + x5 + x6, test="Wald") try( anova(Jet.rflm1, y ~ x1 + x5 + x6, test="Deviance") ) ## -> Error in anovaLm.... Please fit the nested models by lmrob ## {{ since all robustness weights become 0 in the nested model ! }} ## Ok: Do as the error message told us: ## test by comparing the two *fitted* models: Jet.rflm2 <- lmrob(y ~ x1 + x5 + x6, data=Jet, control=lmrob.control(max.it=100)) anova(Jet.rflm1, Jet.rflm2, test="Deviance") } # end{"MPV" data} } \keyword{robust} \keyword{models} \keyword{regression} robustbase/man/plot.lmrob.Rd0000644000176200001440000000565412513246457015600 0ustar liggesusers\name{plot.lmrob} \alias{plot.lmrob} \title{Plot Method for "lmrob" Objects} \description{ Diagnostic plots for elements of class lmrob } \usage{ \method{plot}{lmrob}(x, which = 1:5, caption = c("Standardized residuals vs. Robust Distances", "Normal Q-Q vs. Residuals", "Response vs. Fitted Values", "Residuals vs. Fitted Values" , "Sqrt of abs(Residuals) vs. Fitted Values"), panel = if(add.smooth) panel.smooth else points, sub.caption = deparse(x$call), main = "", compute.MD = TRUE, ask = prod(par("mfcol")) < length(which) && dev.interactive(), id.n = 3, labels.id = names(residuals(x)), cex.id = 0.75, label.pos = c(4,2), qqline = TRUE, add.smooth = getOption("add.smooth"), \dots, p=0.025) } \arguments{ \item{x}{ an object as created by \code{lmrob} } \item{which}{ integer number between 1 and 5 to specify which plot is desired } \item{caption}{Caption for the different plots} \item{panel}{panel function. The useful alternative to \code{\link{points}}, \code{\link{panel.smooth}} can be chosen by \code{add.smooth = TRUE}.} \item{main}{main title} \item{sub.caption}{sub titles} \item{compute.MD}{logical indicating if the robust Mahalanobis distances should be recomputed, using \code{\link{covMcd}()} when needed, i.e., if \code{which} contains \code{1}.} \item{ask}{waits for user input before displaying each plot } \item{id.n}{number of points to be labelled in each plot, starting with the most extreme.} \item{labels.id}{vector of labels, from which the labels for extreme points will be chosen. \code{NULL} uses observation numbers.} \item{cex.id}{magnification of point labels.} \item{label.pos}{positioning of labels, for the left half and right half of the graph respectively.}% for plots ... \item{qqline}{logical indicating if a \code{\link{qqline}()} should be added to the normal Q-Q plot.} \item{add.smooth}{logical indicating if a smoother should be added to most plots; see also \code{panel} above.} \item{\dots}{optional arguments for \code{\link{par}}, \code{\link{title}}, etc.} \item{p}{threshold for distance-distance plot} } \references{ Robust diagnostic plots as in Rousseeuw and van Zomeren (1990), see \sQuote{References} in \code{\link{ltsPlot}}. } \details{ if \code{compute.MD = TRUE} and the robust Mahalanobis distances need to be computed, they are stored (\dQuote{cached}) with the object \code{x} when this function has been called from top-level. } \seealso{ \code{\link{lmrob}}, also for examples, \code{\link{plot.lm}}. } \examples{ data(starsCYG) ## Plot simple data and fitted lines plot(starsCYG) lmST <- lm(log.light ~ log.Te, data = starsCYG) RlmST <- lmrob(log.light ~ log.Te, data = starsCYG) RlmST abline(lmST, col = "red") abline(RlmST, col = "blue") op <- par(mfrow = c(2,2), mgp = c(1.5, 0.6, 0), mar= .1+c(3,3,3,1)) plot(RlmST, which = c(1:2, 4:5)) par(op) } \keyword{robust} \keyword{regression} robustbase/man/pension.Rd0000644000176200001440000000223313312375575015153 0ustar liggesusers\name{pension} \alias{pension} \title{Pension Funds Data} \description{ The total 1981 premium income of pension funds of Dutch firms, for 18 Professional Branches, from de Wit (1982). } \usage{data(pension, package="robustbase")} \format{ A data frame with 18 observations on the following 2 variables. \describe{ \item{\code{Income}}{Premium Income (in millions of guilders)} \item{\code{Reserves}}{Premium Reserves (in millions of guilders)} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.76, table 13. } \examples{ data(pension) plot(pension) summary(lm.p <- lm(Reserves ~., data=pension)) summary(lmR.p <- lmrob(Reserves ~., data=pension)) summary(lts.p <- ltsReg(Reserves ~., data=pension)) abline( lm.p) abline(lmR.p, col=2) abline(lts.p, col=2, lty=2) ## MM: "the" solution is much simpler: plot(pension, log = "xy") lm.lp <- lm(log(Reserves) ~ log(Income), data=pension) lmR.lp <- lmrob(log(Reserves) ~ log(Income), data=pension) plot(log(Reserves) ~ log(Income), data=pension) ## no difference between LS and robust: abline( lm.lp) abline(lmR.lp, col=2) } \keyword{datasets} robustbase/man/possumDiv.Rd0000644000176200001440000001163113713564014015464 0ustar liggesusers\name{possumDiv} \alias{possumDiv} \alias{possum.mat} \docType{data} \title{Possum Diversity Data} \description{ Possum diversity data: As issued from a study of the diversity of possum (arboreal marsupials) in the Montane ash forest (Australia), this dataset was collected in view of the management of hardwood forest to take conservation and recreation values, as well as wood production, into account. The study is fully described in the two references. The number of different species of arboreal marsupials (possum) was observed on 151 different 3ha sites with uniform vegetation. For each site the nine variable measures (see below) were recorded. The problem is to model the relationship between \code{diversity} and these other variables. } \usage{data(possumDiv, package="robustbase")} \format{ Two different representations of the same data are available: \code{possumDiv} is a data frame of 151 observations of 9 variables, where the last two are factors, \code{eucalyptus} with 3 levels and \code{aspect} with 4 levels. \code{possum.mat} is a numeric (integer) matrix of 151 rows (observations) and 14 columns (variables) where the last seven ones are 0-1 dummy variables, three (\code{E.*}) are coding for the kind of \code{eucalyptus} and the last four are 0-1 coding for the \code{aspect} factor. The variables have the following meaning: \describe{ \item{Diversity}{main variable of interest is the number of different species of arboreal marsupial (possum) observed, with values in 0:5.} \item{Shrubs}{the number of shrubs.} \item{Stumps}{the number of cut stumps from past logging operations.} \item{Stags}{the number of stags (hollow-bearing trees).} \item{Bark}{bark index (integer) vector reflecting the quantity of decorticating bark.} \item{Habitat}{an integer score indicating the suitability of nesting and foraging habitat for Leadbeater's possum.} \item{BAcacia}{a numeric vector giving the basal area of acacia species.} \cr \item{eucalyptus}{a 3-level \code{\link{factor}} specifying the species of eucalypt with the greatest stand basal area. This has the same information as the following three variables} \item{E.regnans}{0-1 indicator for Eucalyptus regnans} \item{E.delegatensis}{0-1 indicator for Eucalyptus deleg.} \item{E.nitens}{0-1 indicator for Eucalyptus nitens} \cr \item{aspect}{a 4-level \code{\link{factor}} specifying the aspect of the site. It is the same information as the following four variables.} \item{NW-NE}{0-1 indicator} \item{NW-SE}{0-1 indicator} \item{SE-SW}{0-1 indicator} \item{SW-NW}{0-1 indicator} } } \source{ Eva Cantoni (2004) Analysis of Robust Quasi-deviances for Generalized Linear Models. \emph{Journal of Statistical Software} \bold{10}, 04, \url{https://www.jstatsoft.org/article/view/v010i04} } \references{ Lindenmayer, D. B., Cunningham, R. B., Tanton, M. T., Nix, H. A. and Smith, A. P. (1991) The conservation of arboreal marsupials in the montane ash forests of the central highlands of victoria, south-east australia: III. The habitat requirements of leadbeater's possum \emph{gymnobelideus leadbeateri} and models of the diversity and abundance of arboreal marsupials. \emph{Biological Conservation} \bold{56}, 295--315. Lindenmayer, D. B., Cunningham, R. B., Tanton, M. T., Smith, A. P. and Nix, H. A. (1990) The conservation of arboreal marsupials in the montane ash forests of the victoria, south-east australia, I. Factors influencing the occupancy of trees with hollows, \emph{Biological Conservation} \bold{54}, 111--131. See also the references in \code{\link{glmrob}}. } \examples{ data(possumDiv) head(possum.mat) str(possumDiv) ## summarize all variables as multilevel factors: summary(as.data.frame(lapply(possumDiv, function(v) if(is.integer(v)) factor(v) else v))) ## Following Cantoni & Ronchetti (2001), JASA, p.1026 f.:% cf. ../tests/poisson-ex.R pdFit <- glmrob(Diversity ~ . , data = possumDiv, family=poisson, tcc = 1.6, weights.on.x = "hat", acc = 1e-15) summary(pdFit) summary(pdF2 <- update(pdFit, ~ . -Shrubs)) summary(pdF3 <- update(pdF2, ~ . -eucalyptus)) summary(pdF4 <- update(pdF3, ~ . -Stumps)) summary(pdF5 <- update(pdF4, ~ . -BAcacia)) summary(pdF6 <- update(pdF5, ~ . -aspect))# too much .. anova(pdFit, pdF3, pdF4, pdF5, pdF6, test = "QD") # indeed, ## indeed, the last simplification is too much possumD.2 <- within(possumDiv, levels(aspect)[1:3] <- rep("other", 3)) ## and use this binary 'aspect' instead of the 4-level one: summary(pdF5.1 <- update(pdF5, data = possumD.2)) if(FALSE) # not ok, as formually not nested. anova(pdF5, pdF5.1) summarizeRobWeights(weights(pdF5.1, type="rob"), eps = 0.73) ##-> "outliers" (1, 59, 110) wrob <- setNames(weights(pdF5.1, type="rob"), rownames(possumDiv)) head(sort(wrob)) } \keyword{datasets} robustbase/man/psiFunc.Rd0000644000176200001440000000521213310673404015075 0ustar liggesusers\name{psiFunc} \alias{psiFunc} \alias{huberPsi} \alias{hampelPsi} \title{Constructor for Objects "Psi Function" Class} %% Rd Problem: 'psi_func' cannot be used in title! \description{ \code{psiFunc(..)} is a convenience interface to \code{new("psi_func",..)}, i.e. for constructing objects of class \code{"psi_func"}. } \usage{ psiFunc(rho, psi, wgt, Dpsi,Dwgt, Erho = NULL, Epsi2 = NULL, EDpsi = NULL, name, ...) huberPsi hampelPsi } \arguments{ \item{rho, psi, wgt, Dpsi, Dwgt}{each a \code{\link{function}} of \code{x} and tuning parameters typically. Specification of Dwgt is optional.} \item{Erho, Epsi2, EDpsi}{see \code{\linkS4class{psi_func}}, and note that these may change in the future.} \item{name}{Name of \eqn{\psi}{psi}-function used for printing.} \item{\dots}{potential further arguments for specifying tuning parameter names and defaults.} %% FIXME; give more details } % \details{ % ~~ If necessary, more details than the description above ~~ % } % \value{ % ~Describe the value returned % If it is a LIST, use % \item{comp1 }{Description of 'comp1'} % \item{comp2 }{Description of 'comp2'} % ... % } %%\references{ ~put references to the literature/web site here ~ } \author{Martin Maechler} \seealso{The description of class \code{\linkS4class{psi_func}}. } \examples{ plot(huberPsi) # => shows "all" {as an object with a smart plot() method} ## classical (Gaussian / "least-squares") psi {trivial}: F1 <- function(x, .) rep.int(1, length(x)) FF <- function(.) rep.int(1, length(.)) cPsi <- psiFunc(rho = function(x,.) x^2 / 2, psi = function(x, .) x, wgt = F1, Dpsi = F1, Erho = function(.) rep.int(1/2, length(.)), Epsi2 = FF, EDpsi = FF, name = "classic", . = Inf) show(cPsi) plot(cPsi) ## is the same as the limit of Huber's: plot(chgDefaults(huberPsi, k = Inf)) ## Hampel's psi and rho: H.38 <- chgDefaults(hampelPsi, k = c(1.5, 3.5, 8)) k. <- H.38@xtras$tuningP$k ; k.. <- as.vector(outer(c(-1,1), k.)) c.t <- adjustcolor("skyblue3", .8) .ax.k <- function(side) { abline(h=0, v=0, lty=2) axis(side, at = k.., labels=formatC(k..), pos=0, col=c.t, col.axis=c.t) } op <- par(mfrow=c(2,1), mgp = c(1.5, .6, 0), mar = .6+c(2,2,1,.5)) curve(H.38@psi(x), -10, 10, col=2, lwd=2, n=512) lines(k.., H.38@psi(k..), type = "h", lty=3, col=c.t); .ax.k(1) curve(H.38@rho(x), -10, 10, col=2, lwd=2, n=512); abline(h=0, v=0, lty=2) lines(k.., H.38@rho(k..), type = "h", lty=3, col=c.t); .ax.k(1) title(expression("Hampel's " ~~~ psi(x) ~~ "and" ~~ rho(x) ~~~ " functions")) par(op) ## Not the same, but similar, directly using the plot() method: plot(H.38) } \keyword{classes} \keyword{robust} robustbase/man/cushny.Rd0000644000176200001440000000432013312375575015010 0ustar liggesusers\name{cushny} \encoding{utf8} \alias{cushny} \docType{data} \title{Cushny and Peebles Prolongation of Sleep Data} \description{ The original data set was bivariate and recorded for ten subjects the prolongation of sleep caused by two different drugs. These data were used by Student as the first illustration of the paired t-test which only needs the \emph{differences} of the two measurements. These differences are the values of \code{cushny}. } \usage{data(cushny, package="robustbase")} \format{ numeric vector, sorted increasingly:\cr 0 0.8 1 1.2 1.3 1.3 1.4 1.8 2.4 4.6 } \source{ Cushny, A.R. and Peebles, A.R. (1905) The action of optical isomers. II. Hyoscines. \emph{J. Physiol.} \bold{32}, 501--510. These data were used by Student(1908) as the first illustration of the paired t-test, see also \code{\link[datasets]{sleep}}; then cited by Fisher (1925) and thereforth copied in numerous books as an example of a normally distributed sample, see, e.g., Anderson (1958). } \references{ Student (1908) The probable error of a mean. \emph{Biometrika} \bold{6}, 1--25. Fisher, R.A. (1925) \emph{Statistical Methods for Research Workers}; Oliver & Boyd, Edinburgh. Anderson, T.W. (1958) \emph{An Introduction to Multivariate Statistical Analysis}; Wiley, N.Y. Hampel, F., Ronchetti, E., Rousseeuw, P. and Stahel, W. (1986) \emph{Robust Statistics: The Approach Based on Influence Functions}; Wiley, N.Y. } \examples{ data(cushny) plot(cushny, rep(0, 10), pch = 3, cex = 3, ylab = "", yaxt = "n") plot(jitter(cushny), rep(0, 10), pch = 3, cex = 2, main = "'cushny' data (n= 10)", ylab = "", yaxt = "n") abline(h=0, col="gray", lty=3) myPt <- function(m, lwd = 2, ..., e = 1.5*par("cxy")[2]) segments(m, +e, m, -e, lwd = lwd, ...) myPt( mean(cushny), col = "pink3") myPt(median(cushny), col = "light blue") legend("topright", c("mean", "median"), lwd = 2, col = c("pink3", "light blue"), inset = .01) ## The 'sleep' data from the standard 'datasets' package: d.sleep <- local({ gr <- with(datasets::sleep, split(extra, group)) gr[[2]] - gr[[1]] }) stopifnot(all.equal(cushny, sort(d.sleep), tolerance=1e-15)) } \keyword{datasets} robustbase/man/anova.glmrob.Rd0000644000176200001440000001032113713564014016053 0ustar liggesusers\name{anova.glmrob} \alias{anova.glmrob} \title{Analysis of Robust Quasi-Deviance for "glmrob" Objects} \description{ Compute an analysis of robust quasi-deviance table for one or more generalized linear models fitted by \code{\link{glmrob}}. } \usage{ \method{anova}{glmrob}(object, ..., test = c("Wald", "QD", "QDapprox")) } \arguments{ \item{object, \dots}{objects of class \code{glmrob}, typically the result of a call to \code{\link{glmrob}}.} \item{test}{a character string specifying the test statistic to be used. (Partially) matching one of \code{"Wald"}, \code{"QD"} or \code{"QDapprox"}. See Details.} } \details{ Specifying a single object gives a sequential analysis of robust quasi-deviance table for that fit. That is, the reductions in the robust residual quasi-deviance as each term of the formula is added in turn are given in as the rows of a table. \emph{(Currently not yet implemented.)} If more than one object is specified, the table has a row for the residual quasi-degrees of freedom (However, this information is never used in the asymptotic tests). For all but the first model, the change in degrees of freedom and robust quasi-deviance is also given. (This only makes statistical sense if the models are nested.) It is conventional to list the models from smallest to largest, but this is up to the user. In addition, the table will contain test statistics and P values comparing the reduction in robust quasi-deviance for the model on the row to that on top of it. For all robust fitting methods, the \dQuote{Wald}-type test between two models can be applied (\code{test = "Wald"}). When using Mallows or Huber type robust estimators (\code{method="Mqle"} in \code{\link{glmrob}}), then there are additional test methods. One is the robust quasi-deviance test (\code{test = "QD"}), as described by Cantoni and Ronchetti (2001). The asymptotic distribution is approximated by a chi-square distibution. Another test (\code{test = "QDapprox"}) is based on a quadratic approximation of the robust quasi-deviance test statistic. Its asymptotic distribution is chi-square (see the reference). The comparison between two or more models by \code{anova.glmrob} will only be valid if they are fitted to the same dataset and by the same robust fitting method using the same tuning constant \eqn{c} (\code{tcc} in \code{\link{glmrob}}). } \value{ Basically, an object of class \code{\link{anova}} inheriting from class \code{\link{data.frame}}. } \references{ E. Cantoni and E. Ronchetti (2001) Robust Inference for Generalized Linear Models. \emph{JASA} \bold{96} (455), 1022--1030. E.Cantoni (2004) Analysis of Robust Quasi-deviances for Generalized Linear Models. \emph{Journal of Statistical Software} \bold{10}, \url{https://www.jstatsoft.org/article/view/v010i04} } \author{ Andreas Ruckstuhl } \seealso{ \code{\link{glmrob}}, \code{\link{anova}}. %% %% \code{\link{drop1}} for %% so-called \sQuote{type II} anova where each term is dropped one at a %% time respecting their hierarchy. } \examples{ ## Binomial response ----------- data(carrots) Cfit2 <- glmrob(cbind(success, total-success) ~ logdose + block, family=binomial, data=carrots, method="Mqle", control=glmrobMqle.control(tcc=1.2)) summary(Cfit2) Cfit4 <- glmrob(cbind(success, total-success) ~ logdose * block, family=binomial, data=carrots, method="Mqle", control=glmrobMqle.control(tcc=1.2)) anova(Cfit2, Cfit4, test="Wald") anova(Cfit2, Cfit4, test="QD") anova(Cfit2, Cfit4, test="QDapprox") ## Poisson response ------------ data(epilepsy) Efit2 <- glmrob(Ysum ~ Age10 + Base4*Trt, family=poisson, data=epilepsy, method="Mqle", control=glmrobMqle.control(tcc=1.2,maxit=100)) summary(Efit2) Efit3 <- glmrob(Ysum ~ Age10 + Base4 + Trt, family=poisson, data=epilepsy, method="Mqle", control=glmrobMqle.control(tcc=1.2,maxit=100)) anova(Efit3, Efit2, test = "Wald") anova(Efit3, Efit2, test = "QD") ## trivial intercept-only-model: E0 <- update(Efit3, . ~ 1) anova(E0, Efit3, Efit2, test = "QDapprox") %% failed in robustbase <= 2013-11-27 } \keyword{robust} \keyword{models} \keyword{regression} robustbase/man/glmrob.control.Rd0000644000176200001440000000312612150222661016425 0ustar liggesusers\name{glmrob..control} \title{Controlling Robust GLM Fitting by Different Methods} \alias{glmrobMqle.control} \alias{glmrobMT.control} \alias{glmrobBY.control} \description{ These are auxiliary functions as user interface for \code{\link{glmrob}} fitting when the different methods, \code{"Mqle"}, \code{"BY"}, or \code{"MT"} are used. Typically only used when calling \code{\link{glmrob}}. } \usage{ glmrobMqle.control(acc = 1e-04, test.acc = "coef", maxit = 50, tcc = 1.345) glmrobBY.control (maxit = 1000, const = 0.5, maxhalf = 10) glmrobMT.control (cw = 2.1, nsubm = 500, acc = 1e-06, maxit = 200) } \arguments{ \item{acc}{positive convergence tolerance; the iterations converge when ???} \item{test.acc}{Only "coef" is currently implemented} \item{maxit}{integer giving the maximum number of iterations. } \item{tcc}{tuning constant c for Huber's psi-function} \item{const}{for "BY", the normalizing constant ..}% FIXME \item{maxhalf}{for "BY"; the number of halving steps when the gradient itself no longer improves. We have seen examples when increasing \code{maxhalf} was of relevance.} \item{cw}{tuning constant c for Tukey's biweight psi-function} \item{nsubm}{the number of subsamples to take for finding an initial estimate for \code{method = "MT"}.} } %% \details{ %% } \value{ A \code{\link{list}} with the arguments as components. } \author{Andreas Ruckstuhl and Martin Maechler} \seealso{\code{\link{glmrob}}} \examples{ str(glmrobMqle.control()) str(glmrobBY.control()) str(glmrobMT.control()) } \keyword{robust} \keyword{regression} \keyword{nonlinear} robustbase/man/foodstamp.Rd0000644000176200001440000000455113710054521015464 0ustar liggesusers\name{foodstamp} \title{Food Stamp Program Participation} \alias{foodstamp} \docType{data} \encoding{utf8} \description{ This data consists of 150 randomly selected persons from a survey with information on over 2000 elderly US citizens, where the response, indicates participation in the U.S. Food Stamp Program. } \usage{data(foodstamp, package="robustbase")} \format{ A data frame with 150 observations on the following 4 variables. \describe{ \item{\code{participation}}{participation in U.S. Food Stamp Program; yes = 1, no = 0} \item{\code{tenancy}}{tenancy, indicating home ownership; yes = 1, no = 0} \item{\code{suppl.income}}{supplemental income, indicating whether some form of supplemental security income is received; yes = 1, no = 0} \item{\code{income}}{monthly income (in US dollars)} } } \source{ Data description and first analysis: Stefanski et al.(1986) who indicate Rizek(1978) as original source of the larger study. Electronic version from CRAN package \CRANpkg{catdata}. % which wrongly labeled 'income' (='INC') as "log(1 + income)" } \references{ Rizek, R. L. (1978) The 1977-78 Nationwide Food Consumption Survey. \emph{Family Econ. Rev.}, Fall, 3--7. %% MM ~/save/papers/robust-GLM/Stefanski_etal-Biometrika-1986.pdf : Stefanski, L. A., Carroll, R. J. and Ruppert, D. (1986) Optimally bounded score functions for generalized linear models with applications to logistic regression. \emph{Biometrika} \bold{73}, 413--424. Künsch, H. R., Stefanski, L. A., Carroll, R. J. (1989) Conditionally unbiased bounded-influence estimation in general regression models, with applications to generalized linear models. \emph{J. American Statistical Association} \bold{84}, 460--466. } \examples{ data(foodstamp) (T123 <- xtabs(~ participation+ tenancy+ suppl.income, data=foodstamp)) summary(T123) ## ==> the binary var's are clearly not independent foodSt <- within(foodstamp, { logInc <- log(1 + income) rm(income) }) m1 <- glm(participation ~ ., family=binomial, data=foodSt) summary(m1) rm1 <- glmrob(participation ~ ., family=binomial, data=foodSt) summary(rm1) ## Now use robust weights.on.x : rm2 <- glmrob(participation ~ ., family=binomial, data=foodSt, weights.on.x = "robCov") summary(rm2)## aha, now the weights are different: which( weights(rm2, type="robust") < 0.5) } \keyword{datasets} robustbase/man/print.lmrob.Rd0000644000176200001440000000135411721663343015744 0ustar liggesusers\name{print.lmrob} \alias{print.lmrob} \title{Print Method for Objects of Class "lmrob"} \description{ Print method for elements of class \code{"lmrob"}. } \usage{ \method{print}{lmrob}(x, digits = max(3, getOption("digits") - 3), \dots) } \arguments{ \item{x}{an \R object of class \code{lmrob}, typically created by \code{\link{lmrob}}.} \item{digits}{number of digits for printing, see \code{digits} in \code{\link{options}}.} \item{\dots}{potentially more arguments passed to methods.} } \seealso{\code{\link{lmrob}}, \code{\link{summary.lmrob}}, \code{\link{print}} and \code{\link{summary}}. } \examples{ data(coleman) ( m1 <- lmrob(Y ~ ., data=coleman) ) # -> print.lmrob() method } \keyword{robust} \keyword{regression} robustbase/man/lmrob..D..fit.Rd0000644000176200001440000000361114430460510015675 0ustar liggesusers\name{lmrob..D..fit} \alias{lmrob..D..fit} \title{Compute Design Adaptive Scale estimate} \description{This function calculates a Design Adaptive Scale estimate for a given MM-estimate. This is supposed to be a part of a chain of estimates like \code{SMD} or \code{SMDM}. } \usage{ lmrob..D..fit(obj, x=obj$x, control = obj$control, mf, method = obj$control$method) } \arguments{ \item{obj}{\code{lmrob}-object based on which the estimate is to be calculated.} \item{x}{the design matrix; if \code{\link{missing}}, the method tries to get it from \code{obj$x} and if this fails from \code{obj$model}.} \item{control}{list of control parameters, as returned by \code{\link{lmrob.control}}.} \item{mf}{defunct.} \item{method}{optional; the \code{method} used for \emph{obj} computation.} } \details{ This function is used by \code{\link{lmrob.fit}} and typically not to be used on its own. Note that \code{lmrob.fit()} specifies \code{control} potentially differently than the default, but does use the default for \code{method}. } \value{The given \code{lmrob}-object with the following elements updated: \item{scale}{The Design Adaptive Scale estimate} \item{converged}{ \code{TRUE} if the scale calculation converged, \code{FALSE} other.} } \references{ Koller, M. and Stahel, W.A. (2011), Sharpening Wald-type inference in robust regression for small samples, \emph{Computational Statistics & Data Analysis} \bold{55}(8), 2504--2515. } \seealso{ \code{\link{lmrob.fit}}, \code{\link{lmrob}} } \examples{ data(stackloss) ## Compute manual SMD-estimate: ## 1) MM-estimate m1 <- lmrob(stack.loss ~ ., data = stackloss) ## 2) Add Design Adaptive Scale estimate m2 <- lmrob..D..fit(m1) print(c(m1$scale, m2$scale)) summary(m1) summary(m2) ## the covariance matrix estimate is also updated } \author{Manuel Koller} \keyword{robust} \keyword{regression} robustbase/man/education.Rd0000644000176200001440000000255413312375575015461 0ustar liggesusers\name{education} \alias{education} \docType{data} \title{Education Expenditure Data} \description{ Education Expenditure Data, from Chatterjee and Price (1977, p.108). This data set, representing the education expenditure variables in the 50 US states, providing an interesting example of heteroscedacity. } \usage{data(education, package="robustbase")} \format{ A data frame with 50 observations on the following 6 variables. \describe{ \item{\code{State}}{State} \item{\code{Region}}{Region (1=Northeastern, 2=North central, 3=Southern, 4=Western)} \item{\code{X1}}{Number of residents per thousand residing in urban areas in 1970} \item{\code{X2}}{Per capita personal income in 1973} \item{\code{X3}}{Number of residents per thousand under 18 years of age in 1974} \item{\code{Y}}{Per capita expenditure on public education in a state, projected for 1975} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.110, table 16. } \examples{ data(education) education.x <- data.matrix(education[, 3:5]) %%covMcd(education.x) %%______ FIXME ___ X1:X3 is not what you mean !! ________ summary(lm.education <- lm(Y ~ Region + X1+X2+X3, data=education)) %%summary(lts.education <- ltsReg(Y .....) ## See example(lmrob.M.S) # for how robust regression is used } \keyword{datasets} robustbase/man/BYlogreg.Rd0000644000176200001440000001121614430460426015203 0ustar liggesusers\name{BYlogreg} \alias{BYlogreg} \title{Bianco-Yohai Estimator for Robust Logistic Regression} \encoding{utf8} \description{ Computation of the estimator of Bianco and Yohai (1996) in logistic regression. Now provides both the \emph{weighted} and regular (unweighted) BY-estimator. By default, an intercept term is included and p parameters are estimated. For more details, see the reference. Note: This function is for \dQuote{back-compatibility} with the \code{BYlogreg()} code web-published at KU Leuven, Belgium, % moved to ? -- but NA (404) on 2017-09-26: % --> ../R/BYlogreg.R % \url{http://feb.kuleuven.be/public/NDBAE06/programs/roblog/}; % now, findable at % http://feb.kuleuven.be/public/u0017833/software_donotuse/logreg/BYlogreg.txt % but rather use Wiley's book resources and also available as file \file{FunctionsRob/BYlogreg.ssc} from \url{https://www.wiley.com/legacy/wileychi/robust_statistics/robust.html}. However instead of using this function, the recommended interface is \code{\link{glmrob}(*, method = "BY")} or \code{... method = "WBY" ..}, see \code{\link{glmrob}}. } \usage{ BYlogreg(x0, y, initwml = TRUE, addIntercept = TRUE, const = 0.5, kmax = 1000, maxhalf = 10, sigma.min = 1e-4, trace.lev = 0) } \arguments{ \item{x0}{a numeric \eqn{n \times (p-1)}{n * (p-1)} matrix containing the explanatory variables.} \item{y}{numeric \eqn{n}-vector of binomial (0 - 1) responses.} \item{initwml}{logical for selecting one of the two possible methods for computing the initial value of the optimization process. If \code{initwml} is true (default), a weighted ML estimator is computed with weights derived from the MCD estimator computed on the explanatory variables. If \code{initwml} is false, a classical ML fit is perfomed. When the explanatory variables contain binary observations, it is recommended to set initwml to FALSE or to modify the code of the algorithm to compute the weights only on the continuous variables. } \item{addIntercept}{logical indicating that a column of \code{1} must be added the \eqn{x} matrix.} \item{const}{tuning constant used in the computation of the estimator (default=0.5).} \item{kmax}{maximum number of iterations before convergence (default=1000).} \item{maxhalf}{max number of step-halving (default=10).} \item{sigma.min}{smallest value of the scale parameter before implosion (and hence non-convergence) is assumed.} \item{trace.lev}{logical (or integer) indicating if intermediate results should be printed; defaults to \code{0} (the same as \code{FALSE}).} } %% \details{ %% If necessary, more details than the description above %% } \value{ a list with components \item{convergence}{logical indicating if convergence was achieved} \item{objective}{the value of the objective function at the minimum} \item{coefficients}{vector of parameter estimates} \item{vcov}{variance-covariance matrix of the coefficients (if convergence is TRUE).} \item{sterror}{standard errors, i.e., simply \code{sqrt(diag(.$vcov))}, if convergence.} } \references{ Croux, C., and Haesbroeck, G. (2003) Implementing the Bianco and Yohai estimator for Logistic Regression, \emph{Computational Statistics and Data Analysis} \bold{44}, 273--295. Ana M. Bianco and Víctor J. Yohai (1996) Robust estimation in the logistic regression model. In Helmut Rieder, \emph{Robust Statistics, Data Analysis, and Computer Intensive Methods}, Lecture Notes in Statistics \bold{109}, pages 17--34. } \author{ Originally, Christophe Croux and Gentiane Haesbroeck, with thanks to Kristel Joossens and Valentin Todorov for improvements. Speedup, tweaks, more \dQuote{control} arguments: Martin Maechler. } \seealso{ The more typical way to compute BY-estimates (via \code{\link{formula}} and methods): \code{\link{glmrob}(*, method = "WBY")} and \code{.. method = "BY"}. } \examples{ set.seed(17) x0 <- matrix(rnorm(100,1)) y <- rbinom(100, size=1, prob= 0.5) # ~= as.numeric(runif(100) > 0.5) BY <- BYlogreg(x0,y) BY <- BYlogreg(x0,y, trace.lev=TRUE) ## The "Vaso Constriction" aka "skin" data: data(vaso) vX <- model.matrix( ~ log(Volume) + log(Rate), data=vaso) vY <- vaso[,"Y"] head(cbind(vX, vY))# 'X' does include the intercept vWBY <- BYlogreg(x0 = vX, y = vY, addIntercept=FALSE) # as 'vX' has it already v.BY <- BYlogreg(x0 = vX, y = vY, addIntercept=FALSE, initwml=FALSE) ## they are relatively close, well used to be closer than now, ## with the (2023-05, VT) change of covMcd() scale-correction stopifnot( all.equal(vWBY, v.BY, tolerance = 0.008) ) # was ~ 1e-4 till 2023-05 } \keyword{robust} \keyword{regression} \keyword{nonlinear} robustbase/man/plot-methods.Rd0000644000176200001440000000360012140441054016076 0ustar liggesusers\name{plot-methods} \docType{methods} \alias{plot-methods} \alias{plot,psi_func-method} \title{Plot an Object of the "Psi Function" Class} \description{ The \code{\link{plot}} method objects of class \code{\linkS4class{psi_func}} simply visualizes the \eqn{\rho()}, \eqn{\psi()}, and weight functions and their derivatives. } \usage{ \S4method{plot}{psi_func}(x, y, which = c("rho", "psi", "Dpsi", "wgt", "Dwgt"), main = "full", % shortMain = FALSE, col = c("black", "red3", "blue3", "dark green", "light green"), leg.loc = "right", ...) } \arguments{ \item{x}{object of class \code{\linkS4class{psi_func}} to be plotted} \item{y}{(optional) vector of abscissa values (to plot object at).} \item{which}{\code{\link{character}} vector of slots to be included in plot; by default, all of the slots are included} \item{main}{string or logical indicating the kind of plot title; either \code{"full"}, \code{"short"} or \code{FALSE} which chooses a full, a short or no main title at all.} %% \item{shortMain}{use short or long plot title}%-------- deprecated! \item{col}{colors to be used for the different slots} \item{leg.loc}{legend placement, see also \code{x} argument of \code{\link{legend}}} \item{...}{passed to \code{\link{matplot}}} } \note{ An earlier version had argument \code{shortMain} which is deprecated now. Use \code{main = "short"} instead of \code{shortMain = TRUE}. If you want to specify your own title, use \code{main=FALSE}, and a subsequent \code{\link{title}(...)} call. } \seealso{ \code{\link{psiFunc}()} and the \code{\link{class}} \code{\linkS4class{psi_func}}. } \examples{ plot(huberPsi) plot(huberPsi, which=c("psi", "Dpsi", "wgt"), main="short", leg = "topleft") plot(hampelPsi) ## Plotting aspect ratio = 1:1 : plot(hampelPsi, asp=1, main="short", which = c("psi", "Dpsi", "wgt", "Dwgt")) } \keyword{methods} robustbase/man/h.alpha.n.Rd0000644000176200001440000000251510614156473015246 0ustar liggesusers\name{h.alpha.n} \alias{h.alpha.n} \title{Compute h, the subsample size for MCD and LTS} \description{ Compute h(alpha) which is the size of the subsamples to be used for MCD and LTS. Given \eqn{\alpha = alpha}{alpha}, \eqn{n} and \eqn{p}, \eqn{h} is an \emph{integer}, \eqn{h \approx \alpha n}{h ~= alpha*n}, where the exact formula also depends on \eqn{p}. For \eqn{\alpha = 1/2}, \code{h == floor(n+p+1)/2}; for the general case, it's simply \code{n2 <- (n+p+1) \%/\% 2; floor(2*n2 - n + 2*(n-n2)*alpha)}. } \usage{ h.alpha.n(alpha, n, p) } \arguments{ \item{alpha}{fraction, numeric (vector) in [0.5, 1], see, e.g., \code{\link{covMcd}}.} \item{n}{integer (valued vector), the sample size.} \item{p}{integer (valued vector), the dimension.} } \value{ numeric vector of \eqn{h(\alpha, n,p)}; when any of the arguments of length greater than one, the usual \R arithmetic (recycling) rules are used. } \seealso{\code{\link{covMcd}} and \code{\link{ltsReg}} which are \emph{defined} by \eqn{h = h(\alpha,n,p)} and hence both use \code{h.alpha.n}. } \examples{ n <- c(10:20,50,100) p <- 5 ## show the simple "alpha = 1/2" case: cbind(n=n, h= h.alpha.n(1/2, n, p), n2p = floor((n+p+1)/2)) ## alpha = 3/4 is recommended by some authors : n <- c(15, 20, 25, 30, 50, 100) cbind(n=n, h= h.alpha.n(3/4, n, p = 6)) } \keyword{arith} robustbase/man/lmrob.S.Rd0000644000176200001440000000760714432641147015021 0ustar liggesusers\name{lmrob.S} \alias{lmrob.S} \title{ S-regression estimators } \description{ Computes an S-estimator for linear regression, using the \dQuote{fast S} algorithm.% of Matias Salibian & Victor Yohai ... } \usage{ lmrob.S(x, y, control, trace.lev = control$trace.lev, only.scale = FALSE, mf) } \arguments{ \item{x}{design matrix (\eqn{n \times p}{n * p})} \item{y}{numeric vector of responses (or residuals for \code{only.scale=TRUE}).} \item{control}{ list as returned by \code{\link{lmrob.control}}; the following components are used for \code{lmrob.S()}: \code{"trace.lev"}, % as default for direct arg {trace.lev}, see below \code{"nResample"}, \code{"groups"}, \code{"n.group"}, \code{"fast.s.large.n"}, \code{"seed"}, \code{"bb"}, \code{"psi"}, \code{"tuning.chi"},% c.chi \code{"best.r.s"}, \code{"k.fast.s"}, \code{"k.max"}, \code{"maxit.scale"}, \code{"refine.tol"}, \code{"solve.tol"}, \code{"scale.tol"}, \code{"mts"}, \code{"subsampling"}. } \item{trace.lev}{integer indicating if the progress of the algorithm should be traced (increasingly); default \code{trace.lev = 0} does no tracing.} \item{only.scale}{\code{\link{logical}} indicating if only the scale of \code{y} should be computed. In this case, \code{y} will typically contain \emph{residuals}.}%% FIXME: explain %% namely, s, fulfilling the \sum_i chi(i) = 1/2 equation. \item{mf}{defunct.} } \details{ This function is used by \code{\link{lmrob.fit}} and typically not to be used on its own (because an S-estimator has too low efficiency \sQuote{on its own}). By default, the subsampling algorithm uses a customized LU decomposition which ensures a non singular subsample (if this is at all possible). This makes the Fast-S algorithm also feasible for categorical and mixed continuous-categorical data. One can revert to the old subsampling scheme by setting the parameter \code{subsampling} in \code{control} to \code{"simple"}. } \value{ By default (when \code{only.scale} is false), a list with components \item{coefficients}{numeric vector (length \eqn{p}) of S-regression coefficient estimates.} \item{scale}{the S-scale residual estimate}% 'residual estimate' ?? % resid. VAR !? % \item{cov}{covariance matrix (\eqn{p \times p}{p x p}) of the % coefficient estimates.} \item{fitted.values}{numeric vector (length \eqn{n}) of the fitted values.} \item{residuals}{numeric vector (length \eqn{n}) of the residuals.} \item{rweights}{numeric vector (length \eqn{n}) of the robustness weights.} \item{k.iter}{(maximal) number of refinement iterations used.} \item{converged}{logical indicating if \bold{all} refinement iterations had converged.} \item{control}{the same list as the \code{control} argument.} If \code{only.scale} is true, the computed scale (a number) is returned. } \seealso{\code{\link{lmrob}}, also for references. } \author{ Matias Salibian-Barrera and Manuel Koller; Martin Maechler for minor new options and more documentation. } \examples{ set.seed(33) x1 <- sort(rnorm(30)); x2 <- sort(rnorm(30)); x3 <- sort(rnorm(30)) X. <- cbind(x1, x2, x3) y <- 10 + X. \%*\% (10*(2:4)) + rnorm(30)/10 y[1] <- 500 # a moderate outlier X.[2,1] <- 20 # an X outlier X1 <- cbind(1, X.) (m.lm <- lm(y ~ X.)) set.seed(12) m.lmS <- lmrob.S(x=X1, y=y, control = lmrob.control(nRes = 20), trace.lev=1) m.lmS[c("coefficients","scale")] all.equal(unname(m.lmS$coef), 10 * (1:4), tolerance = 0.005) stopifnot(all.equal(unname(m.lmS$coef), 10 * (1:4), tolerance = 0.005), all.equal(m.lmS$scale, 1/10, tolerance = 0.09)) ## only.scale = TRUE: Compute the S scale, given residuals; s.lmS <- lmrob.S(X1, y=residuals(m.lmS), only.scale = TRUE, control = lmrob.control(trace.lev = 3)) all.equal(s.lmS, m.lmS$scale) # close: 1.89e-6 [64b Lnx] } \keyword{robust} \keyword{regression} robustbase/man/los.Rd0000644000176200001440000000302513312375575014275 0ustar liggesusers\name{los} \alias{los} \docType{data} \title{ Length of Stay Data } \description{ Length of stay for 201 patients that stayed at the University Hospital of Lausanne during the year 2000. } \usage{data(los, package="robustbase")} \format{ Vector of integer values giving the length of stay (days): int [1:201] 16 13 17 4 15 24 59 18 33 8 ... } \details{ These data may be used to estimate and predict the total resource consumption of this group of patients. Cf. Ruffieux, Paccaud and Marazzi (2000). } \source{ The data were kindly provided by A. Marazzi. Cf. Hubert, M. and Vandervieren, E. (2006), p. 13--15. } \references{ Ruffieux, C., Paccaud, F. and A. Marazzi (2000) Comparing rules for truncating hospital length of stay; \emph{Casemix Quarterly} \bold{2}, n. 1. See also those for \code{\link{adjbox}}. } \examples{ summary(los) # quite skewed, with median(.) = 8 plot(table(los)) boxplot(los, horizontal=TRUE, add=TRUE, col = "red", axes=FALSE) ##-> "outliers" instead of "just skewed" hist(log(los)) boxplot(log(los), add=TRUE, col=2, border=2, horizontal = TRUE, at = -1) ## Hubert and Vandervieren (2006), p. 15, Fig. 11. adjbox(los, col = "gray", staplecol="red", outcol = "red", main = "(Skewness-)Adjusted and original boxplot for 'los' data") boxplot(los, add = TRUE, staplewex= 0.2, outcex= 0.5, outpch= 4, staplecol = "blue", outcol = "blue", staplelwd=2) legend("topright", c("adjbox(los)", "boxplot(los)"), col=c("red","blue"), lwd = 1:2, bty="n") } \keyword{datasets} robustbase/man/telef.Rd0000644000176200001440000000135513312375575014603 0ustar liggesusers\name{telef} \alias{telef} \docType{data} \title{Number of International Calls from Belgium} \description{ Number of international calls from Belgium, taken from the Belgian Statistical Survey, published by the Ministry of Economy. } \usage{data(telef, package="robustbase")} \format{ A data frame with 24 observations on the following 2 variables. \describe{ \item{\code{Calls}}{Number of Calls (in tens of millions) } \item{\code{Year}}{Year (1950 - 1973)} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, page 26, table 2. } \examples{ data(telef) summary(lm.telef <- lm(Year~., data=telef)) %%summary(lts.telef <- ltsReg(Year~., data=telef)) } \keyword{datasets} robustbase/man/xtrData.Rd0000644000176200001440000000647514221264626015115 0ustar liggesusers\name{xtrData} \alias{x30o50} \docType{data} \title{Extreme Data examples} \description{ \code{x30o50}, called \sQuote{'XX'} in the thesis, has been a running case for which \code{mc()} had failed to converge. A numeric vector of 50 values, 30 of which are very close to zero, specifically, their absolute values are less than \code{1.5e-15}. The remaining 20 values (11 negative, 9 positive) have absolute values between 0.0022 and 1.66. } \usage{ data(x30o50, package="robustbase") } \format{ A summary is \preformatted{ Min. 1st Qu. Median Mean 3rd Qu. Max. -1.66006 0.00000 0.00000 -0.04155 0.00000 1.29768 } notably the 1st to 3rd quartiles are all very close to zero. } \details{ a good robust method will treat the 60\% \dQuote{almost zero} values as \dQuote{good} data and all other as outliers. This is somewhat counter intuitive to typical human perception where the 30 almost-zero numbers would be considered as inliers and the remaining 20 as \dQuote{good} data. The original \code{mc()} algorithm and also the amendments up to 2022 (\pkg{robustbase} versions before 0.95) would fail to converge unless (in newer versions) \code{eps1} was increased, e.g., only by a factor of 10, to \code{eps1 = 1e-13}. } \references{ Lukas Graz (2021); unpublished BSc thesis, see \code{\link{mc}}. } \examples{ data(x30o50) ## have 4 duplicated values : table(dX <- duplicated(x30o50)) x30o50[dX] # 0 2.77e-17 4.16e-17 2.08e-16 sort(x30o50[dX]) * 2^56 # 0 2 3 15 ## and they are c(0,2,3,15)*2^-56 table(sml <- abs(x30o50) < 1e-11)# 20 30 summary(x30o50[ sml]) # -1.082e-15 ... 1.499e-15 ; mean = 9.2e-19 ~~ 0 summary(x30o50[!sml]) ## Min. 1st Qu. Median Mean 3rd Qu. Max. ## -1.6601 -0.4689 -0.0550 -0.1039 0.3986 1.2977 op <- par(mfrow=c(3,1), mgp=c(1.5, .6, 0), mar = .3+c(2,3:1)) (Fn. <- ecdf(x30o50)) # <- only 46 knots (as have 4 duplications) plot(Fn.) ## and zoom in (*drastically*) to around x=0 : for(f in c(1e-13, 1.5e-15)) { plot(Fn., xval=f*seq(-1,1, length.out = 1001), ylim=c(0,1), main="[zoomed in]") if(f == 1e-13) rect(-1e-15,0, +1e-15, 1, col="thistle", border=1) plot(Fn., add=TRUE) } par(op) mcOld <- function(x, ..., doScale=TRUE) mc(x, doScale=doScale, c.huberize=Inf, ...) try( mcOld(x30o50) ) # Error: .. not 'converged' in 100 iteration mcOld(x30o50, eps1 = 1e-12) # -0.152 (mcX <- mc(x30o50)) # -7.10849e-13 stopifnot(exprs = { all.equal(-7.10848988e-13, mcX, tol = 1e-9) all.equal(mcX, mc(1e30*x30o50), tol = 4e-4) # not so close }) table(sml <- abs(x30o50) < 1e-8)# 20 30 range(x30o50[sml]) x0o50 <- x30o50; x0o50[sml] <- 0 (mcX0 <- mc(x0o50)) stopifnot(exprs = { all.equal(-0.378445401788, mcX0, tol=1e-12) all.equal(-0.099275805349, mc(x30o50[!sml]) -> mcL, tol=2e-11) all.equal(mcL, mcOld(x30o50[!sml])) }) ## -- some instability also wrt c.huberize: mcHubc <- function(dat, ...) function(cc) vapply(cc, function(c) mc(dat, c.huberize = c, ...), -1.) mcH50 <- mcHubc(x30o50) head(cHs <- c(sort(outer(c(1, 2, 5), 10^(2:15))), Inf), 9) mcXc <- mcH50(cHs) plot( mcXc ~ cHs, type="b", log="x" , xlab=quote(c[huberize])) plot((-mcXc) ~ cHs, type="b", log="xy", xlab=quote(c[huberize])) ## but for "regular" outlier skew data, there's no such dependency: mcXcu <- mcHubc(cushny)(cHs) stopifnot( abs(mcXcu - mcXcu[1]) < 1e-15) } \keyword{datasets} robustbase/man/cloud.Rd0000644000176200001440000000142513312375575014610 0ustar liggesusers\name{cloud} \alias{cloud} \docType{data} \title{Cloud point of a Liquid} \description{ This data set contains the measurements concerning the cloud point of a Liquid, from Draper and Smith (1969). The cloud point is a measure of the degree of crystallization in a stock. } \usage{data(cloud, package="robustbase")} \format{ A data frame with 19 observations on the following 2 variables. \describe{ \item{\code{Percentage}}{Percentage of I-8} \item{\code{CloudPoint}}{Cloud point} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.96, table 10. } \examples{ data(cloud) summary(lm.cloud <- lm(CloudPoint ~., data=cloud)) %% summary(lts.cloud <- ltsReg(CloudPoint ~., data=cloud)) } \keyword{datasets} robustbase/man/aircraft.Rd0000644000176200001440000000202614410263765015270 0ustar liggesusers\name{aircraft} \alias{aircraft} \docType{data} \title{Aircraft Data} \description{ Aircraft Data, deals with 23 single-engine aircraft built over the years 1947-1979, from Office of Naval Research. The dependent variable is cost (in units of $100,000) and the explanatory variables are aspect ratio, lift-to-drag ratio, weight of plane (in pounds) and maximal thrust. } \usage{data(aircraft, package="robustbase")} \format{ A data frame with 23 observations on the following 5 variables. \describe{ \item{\code{X1}}{Aspect Ratio} \item{\code{X2}}{Lift-to-Drag Ratio} \item{\code{X3}}{Weight} \item{\code{X4}}{Thrust} \item{\code{Y}}{Cost} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, page 154, table 22. } \examples{ data(aircraft) summary( lm.airc <- lm(Y ~ ., data = aircraft)) summary(rlm.airc <- MASS::rlm(Y ~ ., data = aircraft)) aircraft.x <- data.matrix(aircraft[,1:4]) c_air <- covMcd(aircraft.x) c_air } \keyword{datasets} robustbase/man/mc.Rd0000644000176200001440000001411514221264626014073 0ustar liggesusers\name{mc} \alias{mc} \title{Medcouple, a Robust Measure of Skewness} \description{ Compute the \sQuote{medcouple}, a \emph{robust} concept and estimator of skewness. The medcouple is defined as a scaled median difference of the left and right half of distribution, and hence \emph{not} based on the third moment as the classical skewness. } \usage{ mc(x, na.rm = FALSE, doReflect = (length(x) <= 100), doScale = FALSE, # was hardwired=TRUE, then default=TRUE c.huberize = 1e11, # was implicitly = Inf originally eps1 = 1e-14, eps2 = 1e-15, # << new in 0.93-2 (2018-07..) maxit = 100, trace.lev = 0, full.result = FALSE) } \arguments{ \item{x}{a numeric vector} \item{na.rm}{logical indicating how missing values (\code{\link{NA}}s) should be dealt with.} \item{doReflect}{logical indicating if the internal MC should also be computed on the \emph{reflected} sample \code{-x}, with final result \code{(mc.(x) - mc.(-x))/2}. This makes sense since the internal MC, \code{mc.()} computes the himedian() which can differ slightly from the median.}%% only whenever sum(x <= med) * sum(x >= med) is even \item{doScale}{logical indicating if the internal algorithm should also \emph{scale} the data (using the most distant value from the median which is unrobust and numerically dangerous); scaling has been hardwired in the original algorithm and \R's \code{mc()} till summer 2018, where it became the default. Since \pkg{robustbase} version 0.95-0, March 2022, the default is \code{FALSE}. As this may change the result, a message is printed about the new default, once per \R session. You can suppress the message by specifying \code{doScale = *} explicitly, or, by setting \code{\link{options}(mc_doScale_quiet=TRUE)}.} \item{c.huberize}{a positive number (default: \code{1e11}) used to stabilize the sample via \code{x <- \link{huberize}(x, c = c.huberize)} for the \code{mc()} computations in the case of a nearly degenerate sample (many observations practically equal to the median) or very extreme outliers. In previous versions of \pkg{robustbase} no such huberization was applied which is equivalent to \code{c.huberize = Inf}.} \item{eps1, eps2}{tolerance in the algorithm; \code{eps1} is used as a for convergence tolerance, where \code{eps2} is only used in the internal \code{h_kern()} function to prevent underflow to zero, so could be considerably smaller. The original code implicitly \emph{hard coded} in C \code{eps1 := eps2 := 1e-13}; only change with care!} \item{maxit}{maximal number of iterations; typically a few should be sufficient.} \item{trace.lev}{integer specifying how much diagnostic output the algorithm (in C) should produce. No output by default, most output for \code{trace.lev = 5}.} \item{full.result}{logical indicating if the full return values (from C) should be returned as a list via \code{attr(*, "mcComp")}.} } % \details{ % ~~ If necessary, more details than the description above ~~ % } \value{ a number between -1 and 1, which is the medcouple, \eqn{MC(x)}. For \code{r <- mc(x, full.result = TRUE, ....)}, then \code{attr(r, "mcComp")} is a list with components \item{medc}{the medcouple \eqn{mc.(x)}.} \item{medc2}{the medcouple \eqn{mc.(-x)} if \code{doReflect=TRUE}.} \item{eps}{tolerances used.} \item{iter,iter2}{number of iterations used.} \item{converged,converged2}{logical specifying \dQuote{convergence}.} } \section{Convergence Problems}{ For extreme cases there were convergence problems which should not happen anymore as we now use \code{doScale=FALSE} and huberization (when \code{c.huberize < Inf}). %% Some of them can be alleviated by \dQuote{loosening} the tolerances %% \code{eps1} and \code{eps2}. %% \cr The original algorithm and \code{mc(*, doScale=TRUE)} not only centers the data around the median but also scales them by the extremes which may have a negative effect e.g., when changing an extreme outlier to even more extreme, the result changes wrongly; see the 'mc10x' example. } \references{ Guy Brys, Mia Hubert and Anja Struyf (2004) A Robust Measure of Skewness; \emph{JCGS} \bold{13} (4), 996--1017. Hubert, M. and Vandervieren, E. (2008). An adjusted boxplot for skewed distributions, \emph{Computational Statistics and Data Analysis} \bold{52}, 5186--5201. Lukas Graz (2021). Improvement of the Algorithms for the Medcoule and the Adjusted Outlyingness; unpublished BSc thesis, supervised by M.Maechler, ETH Zurich. } \author{Guy Brys; modifications by Tobias Verbeke and bug fixes and extensions by Manuel Koller and Martin Maechler. The new default \code{doScale=FALSE}, and the new \code{c.huberize} were introduced as consequence of Lukas Graz' BSc thesis. } \seealso{\code{\link{Qn}} for a robust measure of scale (aka \dQuote{dispersion}), .... } \examples{ mc(1:5) # 0 for a symmetric sample x1 <- c(1, 2, 7, 9, 10) mc(x1) # = -1/3 data(cushny) mc(cushny) # 0.125 stopifnot(mc(c(-20, -5, -2:2, 5, 20)) == 0, mc(x1, doReflect=FALSE) == -mc(-x1, doReflect=FALSE), all.equal(mc(x1, doReflect=FALSE), -1/3, tolerance = 1e-12)) ## Susceptibility of the current algorithm to large outliers : dX10 <- function(X) c(1:5,7,10,15,25, X) # generate skewed size-10 with 'X' x <- c(10,20,30, 100^(1:20)) ## (doScale=TRUE, c.huberize=Inf) were (implicit) defaults in earlier {robustbase}: (mc10x <- vapply(x, function(X) mc(dX10(X), doScale=TRUE, c.huberize=Inf), 1)) ## limit X -> Inf should be 7/12 = 0.58333... but that "breaks down a bit" : plot(x, mc10x, type="b", main = "mc( c(1:5,7,10,15,25, X) )", xlab="X", log="x") ## The new behavior is much preferable {shows message about new 'doScale=FALSE'}: (mc10N <- vapply(x, function(X) mc(dX10(X)), 1)) lines(x, mc10N, col=adjustcolor(2, 3/4), lwd=3) mtext("mc(*, c.huberize=1e11)", col=2) stopifnot(all.equal(c(4, 6, rep(7, length(x)-2))/12, mc10N)) ## Here, huberization already solves the issue: mc10NS <- vapply(x, function(X) mc(dX10(X), doScale=TRUE), 1) stopifnot(all.equal(mc10N, mc10NS)) } \keyword{robust} \keyword{univar} robustbase/man/vaso.Rd0000644000176200001440000000420213312375575014446 0ustar liggesusers\name{vaso} \alias{vaso} \docType{data} \title{Vaso Constriction Skin Data Set} \description{ Finney's data on vaso constriction in the skin of the digits. } \usage{data(vaso, package="robustbase")} \format{ A data frame with 39 observations on the following 3 variables. \describe{ \item{\code{Volume}}{Inhaled volume of air} \item{\code{Rate}}{Rate of inhalation} \item{\code{Y}}{vector of 0 or 1 values.} } } \details{The data taken from Finney (1947) were obtained in a carefully controlled study in human physiology where a reflex \dQuote{vaso constriction} may occur in the skin of the digits after taking a single deep breath. The response y is the occurence (y = 1) or non-occurence (y = 0) of vaso constriction in the skin of the digits of a subject after he or she inhaled a certain volume of air at a certain rate. The responses of three subjects are available. The first contributed 9 responses, the second contributed 8 responses, and the third contributed 22 responses. Although the data represent repeated measurements, an analysis that assumes independent observations may be applied, as claimed by Pregibon (1981). } \source{ Finney, D.J. (1947) The estimation from individual records of the relationship between dose and quantal response. \emph{Biometrika} \bold{34}, 320--334 } \references{ Atkinson, A.C. and Riani, M. (2000) \emph{Robust Diagnostic Regression Analysis}, First Edition. New York: Springer, Table A.23. Fahrmeir, L. and Tutz, G. (2001) \emph{Multivariate Statistical Modelling Based on Generalized Linear Models}, Springer, Table 4.2. Kuensch, H.R., Stefanski, A. and Carrol, R.J. (1989) Conditionally unbiased bounded influence estimation in general regression models, with applications to generalized linear models, \emph{JASA} \bold{84}, 460--466. Pregibon, D. (1981) Logistic regression diagnostics, \emph{Annals of Statistics} \bold{9}, 705--724. } \examples{ data(vaso) str(vaso) pairs(vaso) glmV <- glm(Y ~ log(Volume) + log(Rate), family=binomial, data=vaso) summary(glmV) ## --> example(glmrob) showing classical & robust GLM } \keyword{datasets} robustbase/man/glmrob.Rd0000644000176200001440000003301614511464163014757 0ustar liggesusers\name{glmrob} \alias{glmrob} \title{Robust Fitting of Generalized Linear Models} \encoding{utf8} \description{ \code{glmrob} is used to fit generalized linear models by robust methods. The models are specified by giving a symbolic description of the linear predictor and a description of the error distribution. Currently, robust methods are implemented for \code{\link{family} = binomial}, \code{ = poisson}, \code{ = Gamma} and \code{ = gaussian}. } \usage{ glmrob(formula, family, data, weights, subset, na.action, start = NULL, offset, method = c("Mqle", "BY", "WBY", "MT"), weights.on.x = c("none", "hat", "robCov", "covMcd"), control = NULL, model = TRUE, x = FALSE, y = TRUE, contrasts = NULL, trace.lev = 0, ...) } \arguments{ \item{formula}{a \code{\link{formula}}, i.e., a symbolic description of the model to be fit (cf. \code{\link{glm}} or \code{\link{lm}}).} \item{family}{a description of the error distribution and link function to be used in the model. This can be a character string naming a family function, a family \code{\link{function}} or the result of a call to a family function. (See \code{\link{family}} for details of family functions.)} \item{data}{an optional data frame containing the variables in the model. If not found in \code{data}, the variables are taken from \code{environment(formula)}, typically the environment from which \code{glmrob} is called.} \item{weights}{an optional vector of weights to be used in the fitting process.} \item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.} \item{na.action}{a function which indicates what should happen when the data contain \code{NA}s. The default is set by the \code{na.action} setting in \code{\link{options}}. The \dQuote{factory-fresh} default is \code{\link{na.omit}}.} \item{start}{starting values for the parameters in the linear predictor. Note that specifying \code{start} has somewhat different meaning for the different \code{method}s. Notably, for \code{"MT"}, this skips the expensive computation of initial estimates via sub samples, but needs to be \emph{robust} itself.} \item{offset}{this can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting.} \item{method}{a character string specifying the robust fitting method. The details of method specification are given below.} \item{weights.on.x}{ a character string (can be abbreviated), a \code{\link{function}} or \code{\link{list}} (see below), or a numeric vector of length \code{n}, specifying how points (potential outliers) in x-space are downweighted. If \code{"hat"}, weights on the design of the form \eqn{\sqrt{1-h_{ii}}} are used, where \eqn{h_{ii}} are the diagonal elements of the hat matrix. If \code{"robCov"}, weights based on the robust Mahalanobis distance of the design matrix (intercept excluded) are used where the covariance matrix and the centre is estimated by \code{\link[MASS]{cov.rob}} from the package \CRANpkg{MASS}.\cr Similarly, if \code{"covMcd"}, robust weights are computed using \code{\link{covMcd}}. The default is \code{"none"}. If \code{weights.on.x} is a \code{\link{function}}, it is called with arguments \code{(X, intercept)} and must return an n-vector of non-negative weights. If it is a \code{\link{list}}, it must be of length one, and as element contain a function much like \code{\link{covMcd}()} or \code{\link[MASS]{cov.rob}()} (package \CRANpkg{MASS}), which computes multivariate location and \dQuote{scatter} of a data matrix \code{X}. } \item{control}{a list of parameters for controlling the fitting process. See the documentation for \code{\link{glmrobMqle.control}} for details.} \item{model}{a logical value indicating whether \emph{model frame} should be included as a component of the returned value.} \item{x, y}{logical values indicating whether the response vector and model matrix used in the fitting process should be returned as components of the returned value.} \item{contrasts}{an optional list. See the \code{contrasts.arg} of \code{model.matrix.default}.} \item{trace.lev}{logical (or integer) indicating if intermediate results should be printed; defaults to \code{0} (the same as \code{FALSE}).} \item{\dots}{arguments passed to \code{\link{glmrobMqle.control}} when \code{control} is \code{NULL} (as per default).} } \details{ \code{method="model.frame"} returns the \code{\link{model.frame}()}, the same as \code{\link{glm}()}. \cr \code{method="Mqle"} fits a generalized linear model using Mallows or Huber type robust estimators, as described in Cantoni and Ronchetti (2001) and Cantoni and Ronchetti (2006). In contrast to the implementation described in Cantoni (2004), the pure influence algorithm is implemented. \cr \code{method="WBY"} and \code{method="BY"}, available for logistic regression (\code{family = binomial}) only, call \code{\link{BYlogreg}(*, initwml= . )} for the (weighted) Bianco-Yohai estimator, where \code{initwml} is true for \code{"WBY"}, and false for \code{"BY"}. \cr \code{method="MT"}, currently only implemented for \code{family = poisson}, computes an \dQuote{[M]-Estimator based on [T]ransformation}, % -> ../R/MTestimador2.R by Valdora and Yohai (2013), via (hidden internal) \code{glmrobMT()}; as that uses \code{\link{sample}()}, from \R version 3.6.0 it depends on \code{\link{RNGkind}(*, sample.kind)}. Exact reproducibility of results from \R versions 3.5.3 and earlier, requires setting \code{\link{RNGversion}("3.5.0")}.% same text in ./adjOutlyingness.Rd \code{weights.on.x= "robCov"} makes sense if all explanatory variables are continuous. In the cases,where \code{weights.on.x} is \code{"covMcd"} or \code{"robCov"}, or list with a \dQuote{robCov} function, the mahalanobis distances \code{D^2} are computed with respect to the covariance (location and scatter) estimate, and the weights are \code{1/sqrt(1+ pmax.int(0, 8*(D2 - p)/sqrt(2*p)))}, where \code{D2 = D^2} and \code{p = ncol(X)}. } \value{ \code{glmrob} returns an object of class \code{"glmrob"} and is also inheriting from \code{\link{glm}}. \cr The \code{\link{summary}} method, see \code{\link{summary.glmrob}}, can be used to obtain or print a summary of the results. \cr The generic accessor functions \code{\link{coefficients}}, \code{effects}, \code{fitted.values} and \code{residuals} (see \code{\link{residuals.glmrob}}) can be used to extract various useful features of the value returned by \code{glmrob()}. An object of class \code{"glmrob"} is a list with at least the following components: \item{coefficients}{a named vector of coefficients} \item{residuals}{the \emph{working} residuals, that is the (robustly \dQuote{huberized}) residuals in the final iteration of the IWLS fit.} \item{fitted.values}{the fitted mean values, obtained by transforming the linear predictors by the inverse of the link function.} \item{w.r}{robustness weights for each observations; i.e., \code{residuals} \eqn{\times}{*} \code{w.r} equals the psi-function of the Preason's residuals.} \item{w.x}{weights used to down-weight observations based on the position of the observation in the design space.} \item{dispersion}{robust estimation of dispersion paramter if appropriate} \item{cov}{the estimated asymptotic covariance matrix of the estimated coefficients.} \item{tcc}{the tuning constant c in Huber's psi-function.} \item{family}{the \code{\link{family}} object used.} \item{linear.predictors}{the linear fit on link scale.} \item{deviance}{NULL; Exists because of compatipility reasons.} \item{iter}{the number of iterations used by the influence algorithm.} \item{converged}{logical. Was the IWLS algorithm judged to have converged?} \item{call}{the matched call.} \item{formula}{the formula supplied.} \item{terms}{the \code{\link{terms}} object used.} \item{data}{the \code{data argument}.} \item{offset}{the offset vector used.} \item{control}{the value of the \code{control} argument used.} \item{method}{the name of the robust fitter function used.} \item{contrasts}{(where relevant) the contrasts used.} \item{xlevels}{(where relevant) a record of the levels of the factors used in fitting.} %% FIXME: This is for glm() -- but *not* (yet ??) for glmrob() %% ----- should we change? % If a \code{\link{binomial}} \code{glm} model was specified by giving a % two-column response, the weights returned by \code{prior.weights} are % the total numbers of cases (multipied by the supplied case weights) and % the component \code{y} of the result is the proportion of successes. } \references{ Eva Cantoni and Elvezio Ronchetti (2001) Robust Inference for Generalized Linear Models. \emph{JASA} \bold{96} (455), 1022--1030. Eva Cantoni (2004) Analysis of Robust Quasi-deviances for Generalized Linear Models. \emph{Journal of Statistical Software}, \bold{10}, \url{https://www.jstatsoft.org/article/view/v010i04} Eva Cantoni and Elvezio Ronchetti (2006) A robust approach for skewed and heavy-tailed outcomes in the analysis of health care expenditures. \emph{Journal of Health Economics} \bold{25}, 198--213. S. Heritier, E. Cantoni, S. Copt, M.-P. Victoria-Feser (2009) \emph{Robust Methods in Biostatistics}. Wiley Series in Probability and Statistics. Marina Valdora and Víctor J. Yohai (2013) Robust estimators for Generalized Linear Models. In progress. } \author{Andreas Ruckstuhl ("Mqle") and Martin Maechler} %%\note{ } \seealso{ \code{\link{predict.glmrob}} for prediction; \code{\link{glmrobMqle.control}} } \examples{ ## Binomial response -------------- data(carrots) Cfit1 <- glm(cbind(success, total-success) ~ logdose + block, data = carrots, family = binomial) summary(Cfit1) Rfit1 <- glmrob(cbind(success, total-success) ~ logdose + block, family = binomial, data = carrots, method= "Mqle", control= glmrobMqle.control(tcc=1.2)) summary(Rfit1) Rfit2 <- glmrob(success/total ~ logdose + block, weights = total, family = binomial, data = carrots, method= "Mqle", control= glmrobMqle.control(tcc=1.2)) coef(Rfit2) ## The same as Rfit1 ## Binary response -------------- data(vaso) Vfit1 <- glm(Y ~ log(Volume) + log(Rate), family=binomial, data=vaso) coef(Vfit1) Vfit2 <- glmrob(Y ~ log(Volume) + log(Rate), family=binomial, data=vaso, method="Mqle", control = glmrobMqle.control(tcc=3.5)) coef(Vfit2) # c = 3.5 ==> not much different from classical ## Note the problems with tcc <= 3 %% FIXME algorithm ??? Vfit3 <- glmrob(Y ~ log(Volume) + log(Rate), family=binomial, data=vaso, method= "BY") coef(Vfit3)## note that results differ much. ## That's not unreasonable however, see Kuensch et al.(1989), p.465 ## Poisson response -------------- data(epilepsy) Efit1 <- glm(Ysum ~ Age10 + Base4*Trt, family=poisson, data=epilepsy) summary(Efit1) Efit2 <- glmrob(Ysum ~ Age10 + Base4*Trt, family = poisson, data = epilepsy, method= "Mqle", control = glmrobMqle.control(tcc= 1.2)) summary(Efit2) ## 'x' weighting: (Efit3 <- glmrob(Ysum ~ Age10 + Base4*Trt, family = poisson, data = epilepsy, method= "Mqle", weights.on.x = "hat", control = glmrobMqle.control(tcc= 1.2))) try( # gives singular cov matrix: 'Trt' is binary factor --> # affine equivariance and subsampling are problematic Efit4 <- glmrob(Ysum ~ Age10 + Base4*Trt, family = poisson, data = epilepsy, method= "Mqle", weights.on.x = "covMcd", control = glmrobMqle.control(tcc=1.2, maxit=100)) ) ##--> See example(possumDiv) for another Poisson-regression ### -------- Gamma family -- data from example(glm) --- clotting <- data.frame( u = c(5,10,15,20,30,40,60,80,100), lot1 = c(118,58,42,35,27,25,21,19,18), lot2 = c(69,35,26,21,18,16,13,12,12)) summary(cl <- glm (lot1 ~ log(u), data=clotting, family=Gamma)) summary(ro <- glmrob(lot1 ~ log(u), data=clotting, family=Gamma)) clotM5.high <- within(clotting, { lot1[5] <- 60 }) op <- par(mfrow=2:1, mgp = c(1.6, 0.8, 0), mar = c(3,3:1)) plot( lot1 ~ log(u), data=clotM5.high) plot(1/lot1 ~ log(u), data=clotM5.high) par(op) ## Obviously, there the first observation is an outlier with respect to both ## representations! cl5.high <- glm (lot1 ~ log(u), data=clotM5.high, family=Gamma) ro5.high <- glmrob(lot1 ~ log(u), data=clotM5.high, family=Gamma) with(ro5.high, cbind(w.x, w.r))## the 5th obs. is downweighted heavily! plot(1/lot1 ~ log(u), data=clotM5.high) abline(cl5.high, lty=2, col="red") abline(ro5.high, lwd=2, col="blue") ## result is ok (but not "perfect") %% FIXME: Need work -- option of *starting* from %% ----- see Andreas' ~/R/MM/Pkg-ex/robustbase/glmrob-gamma-ARu.R % ## a "regular outlier" in the middle : % clotM4.3 <- within(clotting, { lot1[4] <- 1000 }) % ## .. not even this one works : ... need *robust* start ?! % try(cl4.3 <- glm (lot1 ~ log(u), data=clotM4.3, family=Gamma)) % try(ro4.3 <- glmrob(lot1 ~ log(u), data=clotM4.3, family=Gamma)) % ## The new option to start from "lmrobMM" --- not yet ok either % try( % ro4.3 <- glmrob(lot1 ~ log(u), data=clotM4.3, family=Gamma, % start = "lmrobMM") % ) % ## summary(ro4.3) %% TODO the "same" with lot2 : %% summary(glm(lot2 ~ log(u), data=clotting, family=Gamma)) } \keyword{robust} \keyword{regression} \keyword{nonlinear} robustbase/man/exAM.Rd0000644000176200001440000000202213312375575014326 0ustar liggesusers\name{exAM} \alias{exAM} \docType{data} \title{Example Data of Antille and May - for Simple Regression} \description{ This is an artificial data set, cleverly construced and used by Antille and May to demonstrate \sQuote{problems} with LMS and LTS. } \usage{data(exAM, package="robustbase")} \format{ A data frame with 12 observations on 2 variables, \code{x} and \code{y}. } \details{ Because the points are not in general position, both LMS and LTS typically \emph{fail}; however, e.g., \code{\link[MASS]{rlm}(*, method="MM")} \dQuote{works}. } \source{ Antille, G. and El May, H. (1992) The use of slices in the LMS and the method of density slices: Foundation and comparison.\cr In Yadolah Dodge and Joe Whittaker, editors, \emph{COMPSTAT: Proc. 10th Symp. Computat. Statist., Neuchatel}, \bold{1}, 441--445; Physica-Verlag. } % \references{ % ~~ possibly secondary sources and usages ~~ % } \examples{ data(exAM) plot(exAM) summary(ls <- lm(y ~ x, data=exAM)) abline(ls) } \keyword{robust} \keyword{datasets} robustbase/TODO0000644000176200001440000003454314511464163013131 0ustar liggesusers -*- org -*- C-c C-o follows link[MM: grep-r -e '\(FIXME\|TODO\)'] * Before next release ** TODO glmrob() -- bug = R-forge bug : *warn* when family = "gaussian": they should rather use lmrob() !! ==> ~/R/MM/Pkg-ex/robustbase/glmrob_ChrSchoetz-ex.R ** TODO when lmrob.S() detects exact fit (in C code), it *should* return it, incl scale = 0 *and* it should have correct rweights[] in {0,1} and residuals[]; (fitted[] computed in R code after .C() call); ==> ~/R/MM/Pkg-ex/robustbase/ThMang_lmrob.R ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ and >>> tests/subsample.R (bottom) <<<------- *** in src/lmrob.c: Currently there *two* such situations the first explicit when #{zero resid} >= ~ (n + p)/2 {--> theory of *modified* MAD ==> ~/R/MM/STATISTICS/robust/tmad.R *** lmrob() calling lmrob..M.. after that should "work", optionally/always ?? using a non-zero scale, ------- it might use Martin's tmad() {trimmed mean of absolute deviations from the median} ** DONE print() and print(summary()): 'method' almost twice; show iBest for "deterministic" ** DONE covMcd(): allow 'hsets.ini' argument to covMcd(), and *return* them (optionally) as 'Hsubsets' ** DONE export rankMM, classSVD, .signflip [MM: repmat should not be needed] ** DONE 'scalefn' argument for covMcd() to be used for "detMCD" ** DONE adjOutlyingness(): *** DONE change defaults for clower & cupper (E-mail from P.Segaert). *** DONE But there is more: +/- swap ==> results not back compatible ** DONE colMedians() -> ask Henrik/ about "License: Artistic-2.0" ** DONE splitFrame() [important for lmrob(.. method = "M-S") --> lmrob.M.S()]: *character* should be treated *as* factors ** TODO nlrob() *** TODO summary(nlrob(*)) fails for new methods; better error message or *work* **** TODO for "MM" we are close to done; ideally want '$ rweights' (robustness weights) for all meth *** TODO residuals( nlrob(), type = "...") should provide types "as in the literature" *** DONE nlrob(*, method = "...") should call methods "tau", "CM", "MTL", "MM" by Eduardo Conceicao **** DONE shouldn't we rename jde() to jdeopt() or even jdeoptim(), jDEoptim(), or JDEoptim() R users already know optim() etc.. so the name seems more logical for them. * Short Term ** TODO lmrob.tau.fast.coefs() calls `coef(lmrob( ~ -1))` ... should use `lmrob.fit()` ! ** TODO lmrob.S() -> lmrob.control() should get nResample = "exact" -- as covMcd() / ltsReg() : *** use Fortran routines rfncomb() and rfgenp() [in src/rf-common.f -> need F77_name(.) / F77_CALL(.)] from C **** or even just right a version for C indices in {0, 1, ..., p-1} instead of Fortran {1, 2, ...., p} ** DONE sc.1) Qn(), Sn() should work with 'NA' (via 'na.rm=FALSE' argument, as mad()) ** DONE sc.2) Qn(), Sn() should work with Inf and "large x", see example(mc) ** DONE sc.2) mc() now works better with "large x", see example(mc) --- BSc thesis by Lukas Graz ** DONE fixed: mc(x) can *fail* to converge: thesis Lukas Graz {and ~/R/MM/Pkg-ex/robust/Robnik-mc.R } ** DONE Now that mc() works, also define lmc() and rmc() (left and right tail measures). ** TODO Try scaleTau2(*, iterate=TRUE) or 'iter=TRUE' (which can have 'iter = 10' etc) ** TODO [Peter Filzmoser, Geneva 2016-07-07 talk]: covMcd() warns when n < 2*p .. should not *warn* but give message() ** TODO [Peter Filzmoser, Geneva 2016-07-07 talk]: solve.default(getCov(mcd)) error with CovControlOgk() init *** INSTEAD it should report the (theoretical) breakdown point (p ...) / (n - h ... ) [from the MCD theory] ** TODO estimethod(): also for lmrob() and glmrob() models ** TODO VT implement .detMcd() in C ** TODO r6pack(milk, ..): *return* singularity message but do not signal an error ** TODO nlrob(*): *** TODO for the "vector parameter" biomass example in tests/nlrob-tst.R: method = "MM" As we do want the formula to work ==> we *must* allow 'lower' & 'upper' as list()s in R/nlregrob.R, have 14 matches for "eval *( *formula\[\[3L?" ((and *org* shows the `[[3L].]` (no ".") as underscored 3L)) : 123: y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 127: y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 141: y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 175: res <- y - eval( formula[[3L]], c(data, initial$par) ) 193: fit <- eval( formula[[3L]], c(data, coef) ) 254: fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 300: fit <- eval( formula[[3L]], c(data, coef) ) 355: fit <- eval( formula[[3L]], c(data, par) ) 361: fit <- eval( formula[[3L]], c(data, par) ) 366: fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 390: fit <- eval( formula[[3L]], c(data, coef) ) 434: fit <- eval( formula[[3L]], c(data, par) ) 442: fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 468: fit <- eval( formula[[3L]], c(data, coef) ) the same as in R/nlrob.R where we had eval(.., c(data, coef)) but now eval(.., c(data, start)) *** TODO nlrob(*, method=.) -- try at least *one* other optimizer than JDEoptim(), since we provide already most of the needed "hooks". *** DONE nlrob(*, method="M"): allow a "fixed initial sigma" (--> inference for "MM") *** TODO confint.nlrob():, "Wald" works; naming follows confint.lmer() from lme4 **** TODO method = "profile" based on (TODO) profile.nlrob() **** DONE method = "Wald" works ** TODO simulate() : implement for "nlrob" {and lmrob() ? and ..} ** glmrob *** BYlogreg() [ R/BYlogreg.R ] --> more tests in ./tests/glmrob-1.R --> glm.fit() instead of glm() --> vcov() instead of just std.err. {is already there} *** glmrob(*, weights.on.x = "robCov") uses MASS::cov.rob(), i.e. "MVE" and Andreas had a comment that "mcd" is worse. "covMcd" has been available for a while; now via robXweights() in ./R/glmrobMqle.R HOWEVER: Need something better when 'X' has (binary!) factors! "hat" +- works, but needs more work *** We now allow weights.on.x to be an arbitrary general wts(X, intercept) function _or_ a list containing a robMcd()-like function. Definitely need *testing* this situation! *** glmrob(): anova() has three variants: "V1", "Eva1", "Andreas1" --> ./R/glmrobMqle-DQD.R - gives warning every time {-> easy to fix} - Default is "V1" is that a good idea? *** glmrob() needs a bit more tests in ./tests/ [also consider those from man/glmrob.Rd] take those from Martin's old 'robGLM1' package (need more!) *** --> first test already shows that Martin's tests for "huberC == Inf" were *not* yet moved from robGLM1 to glmrob()... (in other words: glmrob() should work *** also, ni = 0 does not work quite as it should ( ./tests/binom-ni-small.R ) *** obj $ df ... maybe should be defined -- for "glm" methods to be applicable --> e.g. for predict(, interval="..") ! *** summary.glmrob() should be better documented; we should decide if the current return value is fine. *** Eva's code (and MM's) also computed & returned the "asymptotic efficiency"! *** anova.glmrob(): More modularization, allowing to provide own 'test' function. Test if Huber's C are different. Need theory to compare different C's and same model (which includes classical vs robust). *** add1() and/or drop1() would be nice ** TODO scaleTau2(): Also do a cheap finite-sample correction [MM] ! [DONE partly; but undocumented, since bound to change --> file:~/R/MM/STATISTICS/robust/1d-scale.R , 1d-scale-sim.R, etc --- unfinished!! ** TODO Psi/Rho/Chi/Wgt Functions We have quite a few "partial" collections of rho/psi functions; some are "sync"ed now, some not yet:: *** TODO 1) have the nice S4 class psi_func + psiFunc() and .defDwgt() functions in file:R/psi-rho-funs.R with further explorations, ideas in file:misc/experi-psi-rho-funs.R **** TODO print/show of such psi_func should show more; at least the psi function **** TODO str.psi_func() should be a bit nicer than the current default str() **** TODO nlrob(): also allow psi to be a 'psiFunc': --> ./R/nlrob.R ; consider even more *real* checks; now in tests/nlrob-tst.R *** DONE 2) deprecated: "old" tukeyChi() & tukeyPsi1() originally called from lmrob() , in ./R/biweight-funs.R *** DONE 3) psi.*(...., rho = FALSE/TRUE) functions from Andreas ([[file:.R/psi-funs-AR.R]]) replaced by using the new psi_func objects **** DONE nlrob() changed: uses psi = .Mwgt.psi1("huber", cc=1.345) as default *** TODO 4) have (C-based) functions Mpsi(), Mchi(), Mwgt(), etc, used from lmrob(), in ./R/lmrob.MM.R **** TODO provide Mpsi(psi = "t") etc; tuning parameter: 'nu' => MLE-t_\nu psi and psi' **** TODO provide '1)'-i.e. psi_func versions of the Mpsi() etc **** TODO Mpsi(*, "GGW") etc : have no (??) easy way to directly specify (a,b,c) tuning pars **** TODO *New* Mwgt(*, deriv=1) would correspond to Dwgt in psiFunc() which Manuel needs **** DONE now exported and documented in man/M.psi.Rd Further files, illustrating features, differences, etc: ./vignettes/psi_functions.Rnw -- with quite a few FIXME ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ./inst/xtraR/plot-psiFun.R chkPsiDeriv() {and plot utils} ./tests/psi-rho-etc.R compute asymp.efficiency and breakdown point ! ./tests/lmrob-psifns.R plot and lmrob()-test them **** DONE Deprecate* the "2)", tukeyChi() etc, making them call the M.*fun(): * Mid Term ** TODO R/lmrob.MM.R: Using lmrob.E( * ) *** TODO: numerically clearly better than these -- e.g. in summary.lmrob(): use integrate(), or *** ---- allow to specify 'use.integrate=TRUE, tol = 1e-10' etc for *accurate* cor.factors *** "hampel", "bisquare", "lqq" (polyn!): derive *exact* formula; others maybe, too <==> psi_func objects above? ** TODO New lmrob() features ("M-S" as option?): *** Function names with "." (which are exported) are frowned upon e.g. lmrob.split() *** checking .vcov.avar1() and its "posdefify" options [but "KS201x" uses .vcov.w() anyway] *** TODO lmrob.mar() [file:inst/doc/estimating.functions.R]: Maronna & Yohai (2010) should ~~~~~~~~~~ become part of robustbase, maybe under a better name, e.g. via lmrob( ... control ..) or directly. It is much used in the simulations of Koller & Stahel (2011) *** TODO Provide "simple M" estimator [so MASS :: rlm() is entirely superseeded] Consider lmrob(*, method = "M") --> default init = "ls" (Least Sq; as MASS:::rlm.default) which calls lmrob..M..fit() which is already documented as "simple" M-estimator (though the *scale* is kept fixed; i.e., no 'proposal 2'). ** TODO glmrob(), glmrobMqle(), etc : expression()s and eval() no longer "satisfactory", e.g., see FIXME in ./R/glmrobMqle.R ** TODO covMcd(): pass k3 as argument; default=current ==> allow "formula" k3 = k3(n,p) !! ** covOGK(): The argument name 'weight.fn' is pretty ugly and the default function name 'hard.rejection()' is just awful (we need a globally available function as 'role model'. - Could allow 'n.iter = 0' to simply compute Cov()_{ij} = rcov(X_i, X_j) ** rrcov etc *** rrcov.control() __ NEEDS name change ! ______ probably use mcd.control() and lts.control() or forget about *control() completely? since there are only a few in each ??????/ *** TODO tolellipse() --> renamed to tolEllipsePlot() **** maybe use cluster::ellipsoidPoints() **** allow other percentiles than just 97.5% **** maybe *return* something *** plot(mcd. ) [ R/covPlot.R ] : should show the call Default for 'ask' should be smarter: depend on prod(par("mfrow")) < #{plots} (which depends on 'classic' and p=2) *** ltsReg(): has undocumented '$resid' in addition to '$residuals' and '$raw.residuals'; drop it or document it ! ** More lmrob() considerations *** DONE more tests in tests/ *** fully implement and test the multivariate case (y = matrix with > 1 col.) *** src/lmrob.c : does median() , MAD() instead of using R's sort() routines * Long Term / Maybe ** inst/doc/lmrob_simulation.Rnw : *** use hyperlinks {e.g. using jss docu.class!} *** consider making parts available in (new) ./demo/lmrob...R *** tau_i (p.5) is not clear for Joe Average. .......................................... ** Generalizing 'wgt.himedian': We'd want a C API on which R builds. There are pure R implementations: - 'weighted.median()' in limma and I have generalized it ---> file:inst/xtraR/ex-funs.R - more general code (different 'tie' strategies; weighted *quantile*s) in file:/u/maechler/R/MM/STATISTICS/robust/weighted-median.R - The 'Hmisc' package has wtd.quantile() ** Miscellaneous *** Alternative version of covOGK() for correlation-only using's Huber's correlation formula which ensures [-1,1] range --> ~/R/MM/Pkg-ex/robustbase/robcorgroesser1.R and ~/R/MM/STATISTICS/robust/pairwise-new.R *** package 'riv' (author @ epfl.ch!) has 'slc()' ~= cov.S(.) -- in pure R code doesn't Valentin have a version too? otherwise: test this, ask author for "donation" to robustbase *** adjOutlyingness() : **** typo-bug is corrected; and I have made it more pretty. Still a bit problematic when denominator = 0 Currently leave away all the c/0 = Inf and 0/0 = NaN values. MM: Maybe, it's the fact that the coef = 1.5 should really depend on the sample size n and will be too large for small n (??) --> should ask Mia and maybe Guy Brys **** For really small (n,p): Taking 250 random samples of size p; is non-sense when choose(n,p) <= 250 Rather then, take *all* sub-samples of size p ==> getting a non-random result. *** Add data sets from the MMY-book -- mostly done {do we have *all* ?} *** Data Sets --- Valentin Todorov has several of Rousseeuw's in the 'rrov' package (and promised me "the rest" when needed) Don't like the *.x, *.y sub datasets: They shouldn't be needed when use a *formula* In his lts tests, he uses these "data sets from the literature": (Note that 'stackloss' is already in "datasets") : heart.x,heart.y, data(heart) stars.x,stars.y, data(stars) phosphor.x,phosphor.y, data(phosphor) stack.x,stack.loss, data(stackloss) coleman.x,coleman.y, data(coleman) salinity.x,salinity.y, data(salinity) aircraft.x,aircraft.y, data(aircraft) delivery.x,delivery.y, data(delivery) wood.x,wood.y, data(wood) hbk.x,hbk.y, data(hbk) robustbase/DESCRIPTION0000644000176200001440000000620214555227612014142 0ustar liggesusersPackage: robustbase Version: 0.99-2 VersionNote: Released 0.99-1 on 2023-11-28 to CRAN Date: 2024-01-27 Title: Basic Robust Statistics Authors@R: c(person("Martin","Maechler", role=c("aut","cre"), email="maechler@stat.math.ethz.ch", comment = c(ORCID = "0000-0002-8685-9910")) , person("Peter", "Rousseeuw", role="ctb", comment = "Qn and Sn") , person("Christophe", "Croux", role="ctb", comment = "Qn and Sn") , person("Valentin", "Todorov", role = "aut", email = "valentin.todorov@chello.at", comment = "most robust Cov") , person("Andreas", "Ruckstuhl", role = "aut", email = "andreas.ruckstuhl@zhaw.ch", comment = "nlrob, anova, glmrob") , person("Matias", "Salibian-Barrera", role = "aut", email = "matias@stat.ubc.ca", comment = "lmrob orig.") , person("Tobias", "Verbeke", role = c("ctb","fnd"), email = "tobias.verbeke@openanalytics.eu", comment = "mc, adjbox") , person("Manuel", "Koller", role = "aut", email = "koller.manuel@gmail.com", comment = "mc, lmrob, psi-func.") , person(c("Eduardo", "L. T."), "Conceicao", role = "aut", email = "mail@eduardoconceicao.org", comment = "MM-, tau-, CM-, and MTL- nlrob") , person("Maria", "Anna di Palma", role = "ctb", comment = "initial version of Comedian") ) URL: https://robustbase.R-forge.R-project.org/, https://R-forge.R-project.org/R/?group_id=59, https://R-forge.R-project.org/scm/viewvc.php/pkg/robustbase/?root=robustbase, svn://svn.r-forge.r-project.org/svnroot/robustbase/pkg/robustbase BugReports: https://R-forge.R-project.org/tracker/?atid=302&group_id=59 Description: "Essential" Robust Statistics. Tools allowing to analyze data with robust methods. This includes regression methodology including model selections and multivariate statistics where we strive to cover the book "Robust Statistics, Theory and Methods" by 'Maronna, Martin and Yohai'; Wiley 2006. Depends: R (>= 3.5.0) Imports: stats, graphics, utils, methods, DEoptimR Suggests: grid, MASS, lattice, boot, cluster, Matrix, robust, fit.models, MPV, xtable, ggplot2, GGally, RColorBrewer, reshape2, sfsmisc, catdata, doParallel, foreach, skewt SuggestsNote: mostly only because of vignette graphics and simulation Enhances: robustX, rrcov, matrixStats, quantreg, Hmisc EnhancesNote: linked to in man/*.Rd LazyData: yes NeedsCompilation: yes License: GPL (>= 2) Packaged: 2024-01-27 14:39:19 UTC; maechler Author: Martin Maechler [aut, cre] (), Peter Rousseeuw [ctb] (Qn and Sn), Christophe Croux [ctb] (Qn and Sn), Valentin Todorov [aut] (most robust Cov), Andreas Ruckstuhl [aut] (nlrob, anova, glmrob), Matias Salibian-Barrera [aut] (lmrob orig.), Tobias Verbeke [ctb, fnd] (mc, adjbox), Manuel Koller [aut] (mc, lmrob, psi-func.), Eduardo L. T. Conceicao [aut] (MM-, tau-, CM-, and MTL- nlrob), Maria Anna di Palma [ctb] (initial version of Comedian) Maintainer: Martin Maechler Repository: CRAN Date/Publication: 2024-01-27 16:30:02 UTC robustbase/build/0000755000176200001440000000000014555212626013532 5ustar liggesusersrobustbase/build/vignette.rds0000644000176200001440000000062214555212626016071 0ustar liggesusers}SO0lh@_jқ[0O"怆ȁkOv#$ @-BDeB|@I)E's! library(robustbase) > > source(system.file("xtraR/styleData.R", package = "robustbase")) # -> smallD list of small datasets > str(smallD,, 20) List of 23 $ x0 : num(0) $ x1I : num Inf $ x1 : num 3 $ xII : num [1:2] -Inf Inf $ x2I : num [1:2] -Inf 9 $ x2 : int [1:2] 1 2 $ x3.2I: num [1:3] -Inf 9 Inf $ x3I : num [1:3] -Inf 9 11 $ x3 : num [1:3] 1 2 10 $ y : int [1:10] 1 2 3 4 5 6 7 8 9 10 $ y1 : num [1:11] 1 2 3 4 5 6 7 8 9 10 100 $ y. : int [1:11] 1 2 3 4 5 6 7 8 9 10 11 $ xC. : num [1:11] 1 1 1 1 1 1 1 1 1 1 1 $ yI : num [1:12] 1 2 3 4 5 6 7 8 9 10 100 Inf $ y2 : num [1:12] 1 2 3 4 5 6 7 8 9 10 100 1000 $ y1. : num [1:12] 1 2 3 4 5 6 7 8 9 10 11 100 $ xC1. : num [1:12] 1 1 1 1 1 1 1 1 1 1 1 10 $ xC : num [1:12] 1 1 1 1 1 1 1 1 1 1 1 1 $ yI. : num [1:13] 1 2 3 4 5 6 7 8 9 10 11 100 Inf $ y2. : num [1:13] 1 2 3 4 5 6 7 8 9 10 11 100 1000 $ xC2. : num [1:13] 1 1 1 1 1 1 1 1 1 1 1 10 100 $ xC1 : num [1:13] 1 1 1 1 1 1 1 1 1 1 1 1 10 $ xC2 : num [1:14] 1 1 1 1 1 1 1 1 1 1 1 1 10 100 > > lx <- lapply(smallD, + function(x) { + m <- mad(x) + hx <- + if(!is.na(m) && m > 0 && m != Inf) # in all these cases, MASS::huber() fails + MASS::huber(x) + else list(m=NA, s=NA) + hMx <- huberM(x) + list(loc = + c(median = median(x), + huber = hx$m, + huberM = hMx$m), + scale= + c(mad = m, + huber = hx$s, + huberM = hMx$s)) + }) > > > r <- list(mu = sapply(lx, function(x) x$loc), + s = sapply(lx, function(x) x$scale)) > r $mu x0 x1I x1 xII x2I x2 x3.2I x3I x3 y y1 y. xC. median NA Inf 3 NaN -Inf 1.5 9 9.000000 2.000000 5.5 6.000000 6 1 huber NA NA NA NA NA 1.5 NA 7.776102 2.611949 5.5 6.167169 6 NA huberM NA Inf 3 NaN -Inf 1.5 9 7.776102 2.611949 5.5 6.167169 6 1 yI y2 y1. xC1. xC yI. y2. xC2. xC1 xC2 median 6.500000 6.500000 6.500000 1 1 7.000000 7.000000 1 1 1 huber 6.834339 6.834339 6.606518 NA NA 7.213034 7.213034 NA NA NA huberM 6.834339 6.834339 6.606518 1 1 7.213034 7.213034 1 1 1 $s x0 x1I x1 xII x2I x2 x3.2I x3I x3 y y1 y. xC. mad NA NA 0 NA NA 0.7413 Inf 2.9652 1.4826 3.7065 4.4478 4.4478 0 huber NA NA NA NA NA 0.7413 NA 2.9652 1.4826 3.7065 4.4478 4.4478 NA huberM NA 0 0 0 0 0.7413 Inf 2.9652 1.4826 3.7065 4.4478 4.4478 0 yI y2 y1. xC1. xC yI. y2. xC2. xC1 xC2 mad 4.4478 4.4478 4.4478 0 0 4.4478 4.4478 0 0 0 huber 4.4478 4.4478 4.4478 NA NA 4.4478 4.4478 NA NA NA huberM 4.4478 4.4478 4.4478 0 0 4.4478 4.4478 0 0 0 > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 0.192 0.05 0.242 0.004 0.004 > > proc.time() user system elapsed 0.196 0.054 0.243 robustbase/tests/tmcd.R0000644000176200001440000001567514236516416014665 0ustar liggesuserslibrary(robustbase) source(system.file("xtraR/test_MCD.R", package = "robustbase"))#-> doMCDdata ## ../inst/xtraR/test_MCD.R ## instead of relying on system.file("test-tools-1.R", package="Matrix"): source(system.file("xtraR/test-tools.R", package = "robustbase")) # showProc.time(), relErr() showProc.time() ## -- now do it: options(digits = 5) set.seed(101) # <<-- sub-sampling algorithm now based on R's RNG and seed doMCDdata() doMCDdata(method="DetMCD"); warnings() ## vvvv no timing for 'R CMD Rdiff' outputs doMCDdata(nrep = 12, time=FALSE) doMCDdata(nrep = 12, time=FALSE, method="DetMCD"); warnings() doMCDdata(nrep = 12, time=FALSE, method = "MASS") ###--- now the "close to singular" mahalanobis case: set.seed(6) (c3 <- covMcd(mort3)) (c3. <- covMcd(mort3, nsamp="deterministic")) stopifnot(log(c3$crit) <= log(c3.$crit), print(log(c3.$crit / c3$crit)) <= 0.8) ## see 0.516 / 0.291 {with seed 7} ## ## rescale variables: scaleV <- c(0.1, 0.1, 1, 1, .001, 0.1, 0.1, 100) mm <- data.matrix(mort3) * rep(scaleV, each = nrow(mort3)) C3 <- covMcd(mm) C3. <- covMcd(mm, nsamp="deterministic") stopifnot(C3$mcd.wt == c3$mcd.wt)# here, not for all seeds! ## error ("computationally singular") with old (too high) default tolerance: try( covMcd(mm, control= rrcov.control(tol = 1e-10)) ) try( covMcd(mm, control= rrcov.control(tol = 1e-10), nsamp="deterministic") ) showProc.time() ## "large" examples using different algo branches {seg.fault in version 0.4-4}: n <- 600 ## - partitioning will be triggered set.seed(1) X <- matrix(round(100*rnorm(n * 3)), n, 3) (cX <- covMcd(X)) cX. <- covMcd(X, nsamp="deterministic", scalefn = scaleTau2) i <- names(cX); i <- i[!(i %in% c("call", "nsamp", "method", "raw.weights"))] stopifnot(sum(cX.$raw.weights != cX$raw.weights) <= 2, all.equal(cX[i], cX.[i], tol= 1/9)) n <- 2000 ## - nesting will be triggered set.seed(4) X <- matrix(round(100*rnorm(n * 3)), n, 3) set.seed(1) summary(cX <- covMcd(X)) # <- show newly activated print.summary.mcd(.) cX. <- covMcd(X, nsamp="deterministic", scalefn = scaleTau2) i2 <- i[i != "mcd.wt"] stopifnot(print(sum(cX.$raw.weights != cX$raw.weights)) <= 3, # 2 all.equal(cX[i2], cX.[i2], tol= 1/10))# 1/16 set.seed(1) ## testing of 'raw.only' : cXo <- covMcd(X, raw.only=TRUE) i <- paste0("raw.", c("cov", "center", "cnp2")) stopifnot(cXo$raw.only, all.equal(cX[i], cXo[i], tol = 1e-15), c("best", "mah") %in% setdiff(names(cX), names(cXo))) showProc.time() ## Now, some small sample cases: ## maximal values: n. <- 10 p. <- 8 set.seed(44) (X. <- cbind(1:n., round(10*rt(n.,3)), round(10*rt(n.,2)), matrix(round(10*rnorm(n. * (p.-3)), 1), nrow = n., ncol = p.-3))) ## 2 x 1 ---> Error r <- tryCatch(covMcd(X.[1:2, 2, drop=FALSE]), error=function(e)e) stopifnot(inherits(r, "error"), grepl("too small sample size", r$message)) ## 3 x 2 --- ditto r <- tryCatch(covMcd(X.[1:3, 2:3]), error=function(e)e) stopifnot(inherits(r, "error"), grepl("too small sample size", r$message)) ## 5 x 3 [ n < 2 p ! ] --- also works for MASS X <- X.[1:5, 1:3] set.seed(101) ## the finite-sample correction is definitely doubtful: summary(cc <- covMcd(X, use.correction = FALSE)) str(cc) ## best = 2 3 4 5 if(hasMASS <- requireNamespace("MASS", quietly=TRUE)) { mcc <- MASS::cov.mcd(X) stopifnot(cc$best == mcc$best, all.equal(cc$center, mcc$center, tolerance = 1e-10), all.equal(c(mcc$cov / cc$raw.cov), rep(0.673549282206, 3*3))) } ## p = 4 -- 6 x 4 & 7 x 4 [ n < 2 p ! ] p <- 4 n <- 7 X <- X.[1:n, 1+(1:p)] stopifnot(dim(X) == c(n,p)) (cc <- covMcd(X, use.correction = FALSE)) str(cc) ## best = 1 2 4 5 6 7 if(hasMASS) { mcc <- MASS::cov.mcd(X) stopifnot(cc$best == mcc$best, all.equal(cc$center, mcc$center, tolerance = 1e-10), all.equal(c(mcc$cov / cc$raw.cov), rep(0.7782486992881, p*p))) } n <- 6 X <- X[1:n,] (cc <- covMcd(X, use.correction = FALSE)) if(hasMASS) { mcc <- MASS::cov.mcd(X) stopifnot(cc$best == mcc$best, all.equal(cc$center, mcc$center, tolerance = 1e-10), all.equal(c(mcc$cov / cc$raw.cov), rep(0.7528695976179, p*p))) } showProc.time() ## nsamp = "exact" -- here for p=7 coleman.x <- data.matrix(coleman[, 1:6]) showSys.time(CcX <- covMcd(coleman.x, nsamp= "exact")) showSys.time(Ccd <- covMcd(coleman.x, nsamp= "deterministic")) stopifnot(all.equal(CcX$best, c(2, 5:9, 11,13, 14:16, 19:20), tolerance=0), intersect(CcX$best, Ccd$best) == c(2,5,7,8,13,14,16,19,20), relErr(CcX$crit, Ccd$crit) < 0.35 # see ~ 0.34 ) summary(Ccd) demo(determinMCD)## ../demo/determinMCD.R ## ----------- including simple "exactfit" (code = 3) warnings() showProc.time() if(!robustbase:::doExtras()) quit() ## if ( doExtras ) ----------------------------------------------------------------- ## ============== ## (nmini, kmini) examples: set.seed(7) ; X1 <- gendata(10000, p=13, eps = 0.30) showSys.time(c1 <- covMcd(X1$X)) # 0.87 sec chk.covMcd <- function(ans, ind) { stopifnot(inherits(ans, "mcd")) ## check that all outliers were detected: mod.outl <- which(ans$mcd.wt == 0) outl.detected <- (ind %in% mod.outl) if(!all(outl.detected)) { cat("The following outliers are *not* detected:\n") print(which(!outl.detected)) } fp <- !(mod.outl %in% ind) if(any(fp)) { cat(sprintf("False positive \"outliers\" (a few expected) %d of %d (= %.2f%%):\n", sum(fp), nobs(ans), 100*sum(fp)/nobs(ans))) print(which(fp)) } else cat("** No ** false positive outliers -- exceptional!\n") } ## chk.covMcd(c1, X1$xind) cat("\ncovMcd(*, kmini=12, trace=2) ...\n------\n") showSys.time(c2 <- covMcd(X1$X, kmini=12, trace=2))# slower.. chk.covMcd(c2, X1$xind) ## Comparing: ii <- !(names(c1) %in% c("call", "method")) cat("\ncovMcd(*, nsamp=\"deterministic\")\n") showSys.time(cD <- covMcd(X1$X, nsamp="deterministic"))# quite slower than FASTMCD chk.covMcd(cD, X1$xind) cat("<.>$crit = log(det(.)) [minimal = best] :\n") print(cbind(sort(c(default = c1$crit, kmini.12 = c2$crit, determin = cD$crit)))) i2 <- names(c1)[ii]; i2 <- i2[i2 != "nsamp"] ## closer coincidence if "raw.*" are dropped: i3 <- i2; i3 <- i3[ - grep("^raw", i3) ] stopifnot(all.equal(c1[ii], c2[ii], tol= 0.02), all.equal(cD[i2], c1[i2], tol= 0.02), all.equal(cD[i3], c1[i3], tol= 6e-4), # 4.60e-4 ## the 0/1 weights coincide : cD$mcd.wt == c1$mcd.wt, c2$mcd.wt == c1$mcd.wt) showProc.time() ## Radarexample --- already some in ../man/radarImage.Rd <<<------------- data(radarImage) print(d <- dim(radarImage)); n.rI <- d[1] ## The 8 "clear" outliers (see also below) ii8 <- c(1548:1549, 1553:1554, 1565:1566, 1570:1571) set.seed(7) showSys.time( L1 <- lapply(0:200, function(n) n+ which(0 == covMcd(unname(radarImage[(n+1L):n.rI,]), trace=2)$mcd.wt))) ## check for covMcd() consistency: print(tablen <- table(vapply(L1, length, 1))) plot(tablen) print(iCommon <- Reduce(intersect, L1)) stopifnot(ii8 %in% iCommon) ## robustbase/tests/poisson-ex.R0000644000176200001440000002461714236516416016036 0ustar liggesusers library(robustbase) ## instead of relying on system.file("test-tools-1.R", package="Matrix"): source(system.file("xtraR/test-tools.R", package = "robustbase")) # assert.EQ() etc #### Poisson examples from Eva Cantoni's paper ### Using Possum Data ### ================ data(possumDiv) ## Try to follow closely Cantoni & Ronchetti(2001), JASA dim(X <- possum.mat[, -1]) # 151 13 str(y <- possum.mat[, "Diversity"]) ##--- reduce the matrix from singularity ourselves: X. <- possum.mat[, -c(1, match(c("E.nitens", "NW-NE"), colnames(possum.mat)))] dim(X.)# 151 11 ## "classical via robust: c = Inf : Inf. <- 1e5 ## --- FIXME ## The following used to fail because glm.fit() returns NA coefficients ## now fine .. keep this as test! glm.cr <- glmrob(y ~ X, family = "poisson", tcc = Inf.) (scr <- summary(glm.cr)) scl <- summary(glm.cl <- glm (Diversity ~ . , data=possumDiv, family=poisson)) sc2 <- summary(glm.c2 <- glmrob(Diversity ~ . , data=possumDiv, family=poisson, tcc = Inf.)) MMg <- model.matrix(glm.cl) assert.EQ(coef(scl), coef(sc2), tol = 6e-6, giveRE=TRUE) # 1.37e-6 dnms <- list(colnames(MMg), c("Estimate", "Std. Error", "z value", "Pr(>|z|)")) cf.sc <- array(c(-0.9469439, 0.01192096, -0.2724059, 0.04022862, 0.03988606, 0.07173483, 0.01763833, -0.01534376, 0.1149216, 0.06675529, 0.1169463, -0.4889071, ## SE 0.2655031, 0.02194661, 0.2859216, 0.01120463, 0.01438884, 0.03814053, 0.01059779, 0.1916126, 0.2724202, 0.1901612, 0.1902903, 0.2474653, ## z val -3.566603, 0.5431798, -0.9527294, 3.590356, 2.772014, 1.880803, 1.664341, -0.08007701, 0.421854, 0.3510457, 0.6145675, -1.975659, ## P val 0.0003616393, 0.587006, 0.3407272, 0.0003302263, 0.00557107, 0.05999869, 0.09604432, 0.936176, 0.6731316, 0.7255541, 0.5388404, 0.04819339), dim = c(12L, 4L), dimnames = dnms) assert.EQ(cf.sc, coef(sc2), tol = 4e-7, giveRE=TRUE) # 8.48e-8 ## c = 2.0 summary(g2 <- glmrob(Diversity ~ . , data=possumDiv, family=poisson, tcc = 2.0, trace=TRUE)) ## c = 1.6 glm.r <- glmrob(Diversity ~ . , data=possumDiv, family=poisson, tcc = 1.6, trace=TRUE) (s.16 <- summary(glm.r)) str(glm.r) ## Now with *smaller* X (two variables less): glm.c2 <- glmrob(y ~ X., family = "poisson", tcc = Inf.) summary(glm.c2) ## c = 1.6, x-weights, as in Cantoni-Ronchetti glm.r2 <- glmrob(y ~ X., family = "poisson", tcc = 1.6, weights.on.x = "hat") ## Now the same, for the direct possum data (no matrix), ## This indeed gives the same coefficients as in ## Table 3 of Cantoni+Ronchetti(2001): .. (tech.rep.): glm.r2. <- glmrob(Diversity ~ ., family = "poisson", data=possumDiv, tcc = 1.6, weights.on.x = "hat", acc = 1e-15) ## here iterate till convergence (acc = 10^(-15)) (sglm.r2 <- summary(glm.r2.)) ## This is too accurate for S.E. (but we have converged to end) cf2 <- matrix(c(-0.898213938628341, 0.269306882951903, 0.00717220104127189, 0.0224349606070713, -0.25335520175528, 0.288588183720387, 0.0403970350911325, 0.0113429514237665, 0.0411096703375411, 0.0145996036305452, 0.0730250489306713, 0.0386771060643486, 0.0176994176433365, 0.0107414247342375, -0.0289935051669504,0.194215229266707, 0.149521144883774, 0.271648514202971, 0.0503262879663932, 0.191675979065398, 0.0909870068741749, 0.192192515800464, -0.512247626309172, 0.250763990619973), 12,2, byrow=TRUE) assert.EQ(cf2, unname(coef(sglm.r2)[, 1:2]), tol = 1e-9, giveRE=TRUE)#-> show : ~ 1.46e-11 stopifnot(abs(glm.r2.$iter - 18) <= 1) # 18 iterations on 32-bit (2008) ## MT estimator -- "quick" examples if(!robustbase:::doExtras()) { cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' quit() } ## if ( doExtras ) ----------------------------------------------------- X1 <- cbind(1, X.) if(FALSE) ## for debugging ... options(warn = 1, error=recover) options(nwarnings = 1000) # def. 50 RNGversion("3.5.0") ## [TODO: adapt to "current" RNG settings] set.seed(57) showSys.time( ## m1 <- glmrobMT(x=X1, y=y) m1 <- glmrob(Diversity ~ ., data=possumDiv, family=poisson, method="MT") ) summary(warnings()) stopifnot(m1$converged) assert.EQ(m1$initial, c(-0.851594294907422, -0.0107066895370536, -0.226958540075445, 0.0355906625338308, 0.048010654640958, 0.0847493155436896, 0.0133604488401352, -0.024115201062159, 0.0270535337324518, 0.146022135657894, -0.00751380783260833, -0.417638086169033) , tol = 1e-13, check.attributes=FALSE, giveRE=TRUE) ## MM: I'm shocked how much this changes after every tweak ... (arch <- Sys.info()[["machine"]]) .M <- .Machine; str(.M[grep("^sizeof", names(.M))]) ## differentiate long-double.. if(arch == "x86_64") { if(.M$sizeof.longdouble != 16) arch <- paste0(arch, "--no-long-double") else if(osVersion == "Fedora 30 (Thirty)") arch <- paste0(arch, "_F30") } dput(signif(unname(coef(m1)), 11)) ## --> ## Something strange going on: R CMD check is different from interactive R, here. ## ???? [I see that the byte compiler is not listed in sessionInfo] ## In any case, take the dput(.) output from the *.Rout[.fail] file ## 2015-07-21: on 32-bit, the results *change* when re-run ??? beta1 <- list(i686 = ## old florence: ## c(-0.83715700394, 0.0085488694315, -0.16734609346, 0.040965601691, ## 0.042387113444, 0.063146240793, 0.018632137866, -0.0062886781262, ## 0.11466679192, 0.091457894347, -0.025009954018, -0.66867971209) ## for a "moment": f32sfs-2; 2015-07-20 ## c(-0.83818366695, 0.0085885492587, -0.1680548609, 0.040969491636, ## 0.042401438906, 0.063170238296, 0.018647880253, -0.0058039548495, ## 0.11500468542, 0.091940159895, -0.024804291737, -0.66861710581) ## f32sfs-2; 2015-07-21; in "R CMD check"/BATCH, *not* interactive c(-0.83701057367, 0.0085408263511, -0.16692955779, 0.040980220489, 0.042389760873, 0.063145608346, 0.018632314682, -0.0062819674369, 0.11513144785, 0.091268054568, -0.025531439815, -0.66981350787) ## f32sfs-2; 2015-07-21, in R-devel, several times in a row: ## c(-0.83734949811, 0.008554484224, -0.16727333284, 0.040980350692, ## 0.042391751765, 0.06315585848, 0.018633222478, -0.0062978140762, ## 0.11509071086, 0.091463771235, -0.025113314023, -0.66955433495) , "x86_64" = c(-0.83723213945, 0.0085385261915, -0.16697112315, 0.040985126003, 0.042400738973, 0.063168847366, 0.01863253681, -0.0064477807228, 0.11488937188, 0.091283185006, -0.025627390293, -0.66995658693) , "x86_64--no-long-double" = c(-0.83710423989, 0.0085428949874, -0.16713845989, 0.040973904414, 0.042391910971, 0.063159426394, 0.018629240073, -0.006362108938, 0.1145563969, 0.091490891317, -0.025378427464, -0.66943593439) , "x86_64_F30" = c(-0.83703991366, 0.008536691385, -0.16707196217, 0.040980171987, 0.042388781206, 0.063132162167, 0.018634264818, -0.0064298708197, 0.11486525895, 0.091433901799, -0.025384338265, -0.66920847831) ) ## just FYI: difference 32-bit vs 64-bit: assert.EQ(beta1[[1]], beta1[[2]], tol = 0.004, check.attributes=FALSE, giveRE=TRUE) ## Mean relative difference: 0.00142 [~ 2013-12]; 0.00273 [f32sfs-2; 2015-08]; then (R-devel 2015-07-21): 0.000916 assert.EQ(beta1[[2]], beta1[[3]], tol = 0.002, check.attributes=FALSE, giveRE=TRUE) ## Mean relative difference: 0.00082849 [2014-11] ## when bypassing BLAS in matprod() vvvvv seen 0.001385 [Lx 64b]: assert.EQ(coef(m1), beta1[[arch]], tol = 0.002, # typically 1e-10 is ok !! check.attributes=FALSE, giveRE=TRUE) ## The same, with another seed: set.seed(64) showSys.time( ## m2 <- glmrobMT(x=X1, y=y) m2 <- glmrob(Diversity ~ ., data=possumDiv, family=poisson, method="MT") ) summary(warnings()) stopifnot(m2$converged) if(FALSE) dput(signif(unname(m2$initial), 13)) ## --> assert.EQ(m2$initial, ## so this is *not* platform (32bit/64bit) dependent: c(-1.204304813829, 0.02776038445201, -0.3680174045842, 0.04325746912892, 0.03895315289169, 0.04537145479989, 0.02847987541025, 0.07073207523212, 0.355491639539, 0.1822955449528, 0.1323720331562, -0.3419939094877) , tol = 1e-12, check.attributes=FALSE, giveRE=TRUE) dput(signif(unname(coef(m2)), 11)) ## --> beta2 <- list(i686 = ## florence?, Nov. 2014 (or even Dec 2013) ## c(-0.83698669149, 0.0085587296184, -0.16778044558, 0.040960021262, ## 0.042402954975, 0.063188868629, 0.018630275088, -0.0061015509403, ## 0.11385896307, 0.090966386294, -0.02572887737, -0.66945784056) ## f32sfs-2, July 2015, "R CMD .." (non-interactive!): c(-0.83644647378, 0.0085365454367, -0.16770422458, 0.040958113098, 0.04238796628, 0.063174324485, 0.018618360015, -0.0062357940483, 0.11380146782, 0.090988141307, -0.025500338638, -0.66949122367) ## f32sfs-2, July 2015, interactive ## c(-0.83675287265, 0.0085383816807, -0.16763418359, 0.040968861778, ## 0.042399340988, 0.063148815999, 0.018624181637, -0.0061320761338, ## 0.11423331389, 0.0912474233, -0.025508101291, -0.66971416165) , "x86_64" = c(-0.83687097624, 0.0085341676033, -0.1674299545, 0.040968820903, 0.042397459287, 0.063159075944, 0.018625582804, -0.0063140636571, 0.11426134017, 0.091317308575, -0.025373078819, -0.66957444238) , "x86_64--no-long-double" = c(-0.8370370234, 0.008538975248, -0.1671607287, 0.040976013861, 0.042393702043, 0.06314300867, 0.018631172062, -0.0063382132536, 0.11445827857, 0.091409918881, -0.025308999173, -0.66935766843) , "x86_64_F30" = ## Fedora 30, R-devel (2019-06-13): c(-0.83651130836, 0.0085272636623, -0.16777225909, 0.040958046751, 0.042398611622, 0.063169934556, 0.018622060538, -0.0067041556052, 0.11358762483, 0.090950270043, -0.025393966426, -0.66916946118) ) ## just FYI: difference 32-bit vs 64-bit: assert.EQ(beta2[[1]], beta2[[2]], tol = 0.001, check.attributes=FALSE, giveRE=TRUE) ## Mean relative difference: 0.0009487 [2013-12 approx.] assert.EQ(beta2[[2]], beta2[[3]], tol = 0.001, check.attributes=FALSE, giveRE=TRUE) ## Mean relative difference: 0.0005119 [2014-11] ## when bypassing BLAS in matprod() vvvvv seen 0.0002766 [Lx 64b]: assert.EQ(coef(m2), beta2[[arch]], tol = 0.001, # typically 1e-10 is ok !! check.attributes=FALSE, giveRE=TRUE) ## slight changes of algorithm often change the above by ~ 4e-4 !!! summary(warnings()) ###---- Model Selection ----- ## (not yet) [ MM had this in ../../robGLM1/tests/quasi-possum.R ] cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/comedian-tst.R0000644000176200001440000000452714236516416016317 0ustar liggesusersrequire(robustbase) ## instead of relying on system.file("test-tools-1.R", package="Matrix"): source(system.file("xtraR/test-tools.R", package = "robustbase")) showProc.time() data(hbk); hbk.x <- data.matrix(hbk[, 1:3]) covComed(hbk.x) covComed(hbk.x, n.iter=4) showProc.time() data(radarImage) covComed(radarImage) covComed(radarImage[,3:5], n.iter = 5) showProc.time() data(bushfire) ; covComed(bushfire) data(heart); covComed(heart[, 1:2]) data(starsCYG); covComed(starsCYG) data(stackloss); covComed(stack.x) showProc.time() if(!robustbase:::doExtras()) quit() ## if ( doExtras ) ----------------------------------------------------------------- ## ============== i.rr <- c("raw.cov", "raw.center", "cov", "center") n <- 1024 ; p <- 7 set.seed(47) showSys.time( rX <- replicate(100, covComed(matrix(rnorm(n*p), n,p))[i.rr], simplify=FALSE)) ## Computing simulation-average (cov / center) <==> looking at Bias ## _FIXME_ Really look at "MSE = Var + Bias^2" -- or something like ## "simulation-average Squared Error or other Loss" C0 <- Reduce("+", lapply(rX, `[[`, "raw.cov")) / length(rX) C. <- Reduce("+", lapply(rX, `[[`, "cov")) / length(rX) round(1000 * C0) round(1000 * C.) assert.EQ(C0, diag(p), tol= 0.04, giveRE=TRUE) #-> 0.02805 assert.EQ(C., diag(p), tol= 0.09, giveRE=TRUE) #-> 0.06475 ## Hmm.. raw.cov is better than cov ?? c00 <- Reduce("+", lapply(rX, `[[`, "raw.center")) / length(rX) c0 <- Reduce("+", lapply(rX, `[[`, "center")) / length(rX) stopifnot(print(sqrt(mean( (c00 - rep(0, p))^2 ))) < 0.005)# 0.004188 stopifnot(print(sqrt(mean( (c0 - rep(0, p))^2 ))) < 0.005)# 0.003434 n <- 4096 ; p <- 11 set.seed(17) showSys.time( r4 <- replicate(64, covComed(matrix(10+rnorm(n*p), n,p))[i.rr], simplify=FALSE)) C0 <- Reduce("+", lapply(r4, `[[`, "raw.cov")) / length(r4) C. <- Reduce("+", lapply(r4, `[[`, "cov")) / length(r4) round(1000 * C0) round(1000 * C.) assert.EQ(C0, diag(p), tol = 0.025, giveRE=TRUE) # 0.0162 assert.EQ(C., diag(p), tol = 0.06 , giveRE=TRUE) # 0.0486 ## Again... raw.cov better than cov ?? c00 <- Reduce("+", lapply(r4, `[[`, "raw.center")) / length(r4) c0 <- Reduce("+", lapply(r4, `[[`, "center")) / length(r4) assert.EQ(c00, rep(10, p), tol = 2e-4, giveRE=TRUE)# 7.97267e-05 = "raw" is better ? assert.EQ(c0 , rep(10, p), tol = 2e-4, giveRE=TRUE)# 0.0001036 robustbase/tests/binom-no-x.R0000644000176200001440000000102012271657124015673 0ustar liggesusers library(robustbase) ### "intercept only" : "no x" set.seed(101) k <- rbinom(100, size=3, pr = 0.2) y <- cbind(k, n.k = 3 - k) gg <- glm(y ~ 1, family = "binomial") (cfK <- coef(summary(gg))) Inf. <- 1e5 # FIXME (note that much larger values *deteriorate* slightly!) rg.Inf <- glmrob(y ~ 1, family = "binomial", tcc= Inf.) stopifnot(all.equal(unname(cfK[1:2]), unname(unlist(coef(summary(rg.Inf))[1:2])), tolerance = 1e-7))# 4.09e-8 rg.0 <- glmrob(y ~ 1, family = "binomial") summary(rg.0) str(rg.0, digits= 6) robustbase/tests/lmrob-psifns.Rout.save0000644000176200001440000007456214437610457020041 0ustar liggesusers R version 4.3.0 Patched (2023-06-03 r84496) -- "Already Tomorrow" Copyright (C) 2023 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > #### Tests psi(), chi(),... etc and tuning.psi, tuning.chi : > > library(robustbase) > source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) > source(system.file("xtraR/test-tools.R", package = "robustbase")) # assert.EQ > > ### (1) Test the functions themselves -------------------------------- > if(!dev.interactive(orNone=TRUE)) pdf("rob-psifns.pdf") > > ## Simple version, no error checking, no derivative, nothing: > psiGGW <- function(x, a,b,c) { + ifelse((ax <- abs(x)) < c, + x, + ifelse((ea <- -((ax-c)^b)/(2*a)) < -708.4, 0, x * exp(ea))) + } > assert.EQ(Mpsi (5:9, cc=c(0, a=1/8,b=2,c=1/8, NA), "GGW"), + psiGGW(5:9, a=1/8,b=2,c=1/8), tol = 1e-13) > > > ## Check that psi() |-> works; ditto for +-Inf, NA,.. > cG <- c(-.5, 1, .95, NA) # one of the 6 "builtin"s > d0 <- numeric() > IoI <- c(-Inf, 0, Inf) > NN <- c(NaN, NA) > > cGs <- list( c(-.4, 1.5, 0.85, NA) + , c(-.4, 1.5 , 0.90, NA) + , c(-.4, 1.5 , 0.95, NA) + , c(-.4, 1.5, 0.975, NA) + , c(-.4, 1.5, 0.99 , NA) + , c(-.4, 1.5, 0.995, NA) + ## + , c(-.4, 1.25, 0.975, NA) + , c(-.4, 1.1, 0.975, NA) + , c(-.4, 1.025, 0.975, NA) + , c(-.4, 1.0125, 0.975, NA) + ## + ## FIXME , c(-.1, 1.25, 0.95, NA) + ## FIXME , c(-.1, 1.25, 0.99, NA) + ) > st <- system.time( + cG.cnst <- lapply(cGs, function(cc) + lmrob.control(psi = "ggw", tuning.psi = cc)$tuning.psi) + ) > cat('Time for constants computation of tuning.psi: ', st,'\n') Time for constants computation of tuning.psi: 0.455 0.05 0.508 0 0 > cGct <- t(sapply(cG.cnst, attr, "constants"))[,-1] > colnames(cGct) <- c("a","b","c", "rhoInf") > signif(cGct, 4) a b c rhoInf [1,] 1.0170 1.500 0.4996 2.384 [2,] 1.2810 1.500 0.5826 3.242 [3,] 1.8100 1.500 0.7335 5.139 [4,] 2.4430 1.500 0.8959 7.666 [5,] 3.4380 1.500 1.1250 12.090 [6,] 4.2970 1.500 1.3050 16.280 [7,] 1.3780 1.250 1.4350 7.654 [8,] 1.0140 1.100 1.7000 7.643 [9,] 0.8873 1.025 1.8130 7.712 [10,] 0.8693 1.012 1.8300 7.733 > assert.EQ(sapply(cG.cnst, function(cc) MrhoInf(cc, "ggw")), + cGct[,"rhoInf"], tol = 1e-8) > > > ## Do these checks for a *list* of (c.par, psi) combinations: > c.psi.list <- list( + list(1.345, "Huber"), + list(1.8, "Huber"), + list(cG, "GGW"), + list(c(2,4,8), "Hampel"), + list(c(1.5,3.5,8)*0.90, "Hampel"), + list(par=c(-.5,1.5,.95,NA), "lqq"), + list(bcs=c(1, 1, 1.25), "lqq"), + list(1.1, "optimal"), + list(0.1, "optimal"), + list(2.3, "Welsh") + ) > > for(c.psi in c.psi.list) { + tPar <- c.psi[[1]]; psi <- c.psi[[2]] + stopifnot(is.numeric(tPar), is.character(psi)) + cat("Psi function ", psi,"; tuning par. c[]= (", + paste(formatC(tPar, width=1), collapse=", "),")\n") + for(FUN in list(Mpsi, Mchi, Mwgt)) + stopifnot(identical(d0, FUN(d0, tPar, psi=psi)), + identical(NN, FUN(NN, tPar, psi=psi))) + stopifnot(identical(c(0,1,0), Mwgt(IoI, tPar,psi=psi))) + if(isPsi.redesc(psi)) + stopifnot(identical(c(0,0,0), Mpsi(IoI, tPar,psi=psi)), + identical(c(1,0,1), Mchi(IoI, tPar,psi=psi))) + else if(psi == "Huber") { + stopifnot(identical(c(-tPar,0,tPar), Mpsi(IoI, tPar,psi=psi)), + identical(c( Inf,0, Inf), Mchi(IoI, tPar,psi=psi))) + } + cat("chkPsi..(): ") + isHH <- psi %in% c("Huber", "Hampel") # not differentiable + tol <- switch(tolower(psi), + "huber"=, "hampel"= c(.001, 1.0), + "optimal" = .008, + "ggw" = c(5e-5, 5e-3, 1e-12), + "lqq" = c(1e-5, 5e-5, 1e-5, .08)) # .08 needed for bcs=c(1, 1, 1.25) + if(is.null(tol)) tol <- 1e-4 # default otherwise + cc <- chkPsi..(c(-5, 10), psi=psi, par=tPar, doD2 = !isHH, tol=tol) + ## -------- + cc. <- cc[!is.na(cc)] + if(is.logical(cc) && all(cc.)) + cat(" [Ok]\n") + else { + cat(" not all Ok:\n") + print(cc.[cc. != "TRUE"]) + } + cat("------------------------\n\n") + } Psi function Huber ; tuning par. c[]= ( 1.345 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function Huber ; tuning par. c[]= ( 1.8 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function GGW ; tuning par. c[]= ( -0.5, 1, 0.95, NA ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function Hampel ; tuning par. c[]= ( 2, 4, 8 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function Hampel ; tuning par. c[]= ( 1.35, 3.15, 7.2 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function lqq ; tuning par. c[]= ( -0.5, 1.5, 0.95, NA ) chkPsi..(): [Ok] ------------------------ Psi function lqq ; tuning par. c[]= ( 1, 1, 1.25 ) chkPsi..(): [Ok] ------------------------ Psi function optimal ; tuning par. c[]= ( 1.1 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function optimal ; tuning par. c[]= ( 0.1 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function Welsh ; tuning par. c[]= ( 2.3 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ > > > ## Nice plots -- and check derivatives ---- > > head(x. <- seq(-5, 10, length=1501)) [1] -5.00 -4.99 -4.98 -4.97 -4.96 -4.95 > ## [separate lines, for interactive "play": ] > stopifnot(chkPsiDeriv(p.psiFun(x., "LQQ", par=c(-.5,1.5,.95,NA)))) > stopifnot(chkPsiDeriv(p.psiFun(x., "GGW", par= cG))) > stopifnot(chkPsiDeriv(p.psiFun(x., "optimal", par=2))) > stopifnot(chkPsiDeriv(p.psiFun(x., "Hampel", + par = ## Default, but rounded: + round(c(1.5, 3.5, 8) * 0.9016085, 1)), + tol = 1e-3)) > > stopifnot(chkPsiDeriv(p.psiFun(x., "biweight", par = 4))) > stopifnot(chkPsiDeriv(p.psiFun(x., "Welsh", par = 1.5))) > stopifnot(chkPsiDeriv(p.psiFun(x., "huber", par = 1.5), + tol = c(1e-10, 5e-3))) > ## "huber"-rho via Mpsi(*, deriv=-1) was badly wrong till 2018-06 > > ## The same 6, all in one plot: > op <- par(mfrow=c(3,2), mgp = c(1.5, .6, 0), mar = .1+c(3,3,2,.5)) > p.psiFun2(x., "LQQ", par=c(-.5,1.5,.95,NA)) > p.psiFun2(x., "GGW", par= cG) > p.psiFun2(x., "optimal", par=1.3) > p.psiFun2(x., "Hampel", par = round(c(1.5, 3.5, 8) * 0.9016085, 1)) > p.psiFun2(x., "biweight", par = 4) > p.psiFun2(x., "Welsh", par = 1.5) > par(op) > > > ### (2) Test them as arguments of lmrob() or lmrob.control(): ----- > > data(aircraft) > > set.seed(1) > summary(mp0 <- lmrob(Y ~ ., data = aircraft, psi = 'bisquare', method = 'SMDM')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "bisquare") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.5552 -1.8395 -0.2113 2.8205 46.6311 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.8785690 6.5321336 1.053 0.306256 X1 -3.2192206 1.0907887 -2.951 0.008543 ** X2 1.5876658 0.7442079 2.133 0.046912 * X3 0.0018266 0.0004293 4.255 0.000477 *** X4 -0.0008677 0.0003685 -2.355 0.030083 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.335 Multiple R-squared: 0.7958, Adjusted R-squared: 0.7504 Convergence in 22 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| = 0 ( < 0.0043); 3 weights are ~= 1. The remaining 19 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.3958 0.8772 0.9738 0.9139 0.9892 0.9972 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 4.348e-03 8.399e-08 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(2) > summary(mp1 <- update(mp0, psi = 'optimal')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "optimal") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -6.6691 -2.4291 0.2249 3.8876 54.2841 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 9.5007403 5.5576768 1.709 0.10455 X1 -3.0487969 0.9158751 -3.329 0.00374 ** X2 1.2100330 0.6469186 1.870 0.07777 . X3 0.0013810 0.0003910 3.532 0.00238 ** X4 -0.0005549 0.0003269 -1.697 0.10687 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 4.545 Multiple R-squared: 0.8159, Adjusted R-squared: 0.775 Convergence in 1 IRWLS iterations Robustness weights: 2 observations c(16,22) are outliers with |weight| = 0 ( < 0.0043); 21 weights are ~= 1. Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 4.047e-01 5.000e-01 1.060e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 4.348e-03 8.399e-08 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "optimal" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(3) > summary(mp2 <- update(mp0, psi = 'ggw')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "ggw") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.4418 -1.7993 -0.1711 2.8466 47.0906 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.8192703 6.5041383 1.048 0.30831 X1 -3.1718079 1.0869559 -2.918 0.00918 ** X2 1.5705706 0.7510236 2.091 0.05096 . X3 0.0017983 0.0004300 4.182 0.00056 *** X4 -0.0008434 0.0003691 -2.285 0.03466 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.367 Multiple R-squared: 0.7942, Adjusted R-squared: 0.7484 Convergence in 20 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| <= 0.00044 ( < 0.0043); 16 weights are ~= 1. The remaining 6 ones are 3 4 12 16 17 19 0.9892 0.9891 0.8770 0.4139 0.9796 0.9839 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -5.000e-01 1.500e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -5.000e-01 1.500e+00 9.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol zero.tol eps.outlier eps.x 1.000e-07 1.000e-10 4.348e-03 8.399e-08 warn.limit.reject warn.limit.meanrw 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "ggw" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(4) > summary(mp3 <- update(mp0, psi = 'welsh')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "welsh") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.7243 -1.9199 -0.2471 2.8060 45.9435 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.6404575 6.5552085 1.013 0.324482 X1 -3.2329194 1.0954988 -2.951 0.008546 ** X2 1.6174887 0.7443222 2.173 0.043367 * X3 0.0018656 0.0004279 4.360 0.000378 *** X4 -0.0008941 0.0003680 -2.430 0.025803 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.408 Multiple R-squared: 0.7958, Adjusted R-squared: 0.7504 Convergence in 18 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| <= 0.0003 ( < 0.0043); 2 weights are ~= 1. The remaining 20 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.4284 0.8583 0.9701 0.9112 0.9874 0.9985 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 5.774e-01 5.000e-01 2.110e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 4.348e-03 8.399e-08 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "welsh" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(5) > summary(mp4 <- update(mp0, psi = 'ggw', tuning.psi = c(-.5, 1.5, 0.85, NA), + tuning.chi = c(-0.5, 1.5, NA, 0.5))) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "ggw", tuning.psi = c(-0.5, 1.5, 0.85, NA), tuning.chi = c(-0.5, 1.5, NA, 0.5)) \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -7.2207 -2.2226 0.3446 3.5745 52.2885 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 9.6540916 6.6414237 1.454 0.16327 X1 -3.2353135 1.0947329 -2.955 0.00847 ** X2 1.3343505 0.7636515 1.747 0.09762 . X3 0.0015256 0.0004619 3.303 0.00395 ** X4 -0.0006913 0.0003903 -1.771 0.09343 . --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.117 Multiple R-squared: 0.7832, Adjusted R-squared: 0.7351 Convergence in 15 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| <= 2.8e-08 ( < 0.0043); 15 weights are ~= 1. The remaining 7 ones are 3 4 12 16 17 19 23 0.87262 0.79602 0.73029 0.06024 0.96761 0.73117 0.97769 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -5.000e-01 1.500e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -5.000e-01 1.500e+00 8.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol zero.tol eps.outlier eps.x 1.000e-07 1.000e-10 4.348e-03 8.399e-08 warn.limit.reject warn.limit.meanrw 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "ggw" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(6) > summary(mp5 <- update(mp0, psi = 'ggw', + tuning.psi = c(-0.5, 1.0, 0.95, NA), + tuning.chi = c(-0.5, 1.0, NA, 0.5))) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "ggw", tuning.psi = c(-0.5, 1, 0.95, NA), tuning.chi = c(-0.5, 1, NA, 0.5)) \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.4182 -1.7447 -0.1322 2.8735 47.0376 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.7557666 6.0919101 1.109 0.282039 X1 -3.1767976 1.0196958 -3.115 0.005974 ** X2 1.5756461 0.7050185 2.235 0.038339 * X3 0.0018004 0.0004003 4.497 0.000279 *** X4 -0.0008432 0.0003446 -2.447 0.024897 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.502 Multiple R-squared: 0.7941, Adjusted R-squared: 0.7484 Convergence in 19 IRWLS iterations Robustness weights: 21 weights are ~= 1. The remaining 2 ones are 16 22 0.423706 0.005042 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -5.000e-01 1.000e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -5.000e-01 1.000e+00 9.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol zero.tol eps.outlier eps.x 1.000e-07 1.000e-10 4.348e-03 8.399e-08 warn.limit.reject warn.limit.meanrw 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "ggw" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(7) > summary(mp6 <- update(mp0, psi = 'hampel')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "hampel") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.706 -1.937 -0.234 2.825 46.037 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.4297956 6.7818616 0.948 0.35564 X1 -3.1885813 1.1366401 -2.805 0.01170 * X2 1.6224243 0.7839018 2.070 0.05315 . X3 0.0018590 0.0004445 4.182 0.00056 *** X4 -0.0008851 0.0003832 -2.310 0.03295 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.475 Multiple R-squared: 0.7946, Adjusted R-squared: 0.7489 Convergence in 11 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| = 0 ( < 0.0043); 20 weights are ~= 1. The remaining 2 ones are 12 16 0.8504 0.4975 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 bb 3.179e-01 7.417e-01 1.695e+00 5.000e-01 tuning.psi1 tuning.psi2 tuning.psi3 refine.tol 1.352e+00 3.156e+00 7.213e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 4.348e-03 8.399e-08 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "hampel" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(8) > ctr7 <- lmrob.control(psi = 'ggw', + tuning.psi = c(-0.3, 1.4, 0.95, NA), + tuning.chi = c(-0.3, 1.4, NA, 0.5)) > ctr7$tuning.psi ## -> "constants" [1] -0.30 1.40 0.95 NA attr(,"constants") [1] 0.0000000 2.0011562 1.4000000 0.4125717 5.6874488 > ctr7$tuning.chi [1] -0.3 1.4 NA 0.5 attr(,"constants") [1] 0.00000000 0.24044569 1.40000000 0.09081713 0.27558437 > summary(mp7 <-lmrob(Y ~ ., data = aircraft, control = ctr7)) # *not* converging in k.max=200 Call: lmrob(formula = Y ~ ., data = aircraft, control = ctr7) \--> method = "S" Residuals: Min 1Q Median 3Q Max -7.6919 -1.9269 0.1767 3.7081 48.5801 Algorithm did not converge Coefficients of the *initial* S-estimator: Estimate Std. Error t value Pr(>|t|) (Intercept) 13.155499 NA NA NA X1 -4.349383 NA NA NA X2 1.647243 NA NA NA X3 0.001817 NA NA NA X4 -0.001035 NA NA NA Robustness weights: 2 observations c(16,22) are outliers with |weight| <= 0.0003 ( < 0.0043); 4 weights are ~= 1. The remaining 17 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.03668 0.20120 0.58420 0.52290 0.71930 0.99110 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -3.000e-01 1.400e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -3.000e-01 1.400e+00 9.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol zero.tol eps.outlier eps.x 1.000e-07 1.000e-10 4.348e-03 8.399e-08 warn.limit.reject warn.limit.meanrw 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "ggw" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) Warning messages: 1: In lmrob.S(x, y, control = control) : S refinements did not converge (to refine.tol=1e-07) in 200 (= k.max) steps 2: In lmrob.fit(x, y, control, init = init) : initial estim. 'init' not converged -- will be return()ed basically unchanged > > set.seed(9) > summary(mp8 <- update(mp0, psi = 'lqq')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "lqq") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.280 -1.717 -0.138 2.857 47.743 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 7.0858566 6.3506262 1.116 0.279194 X1 -3.1657682 1.0600204 -2.987 0.007914 ** X2 1.5402736 0.7336570 2.099 0.050145 . X3 0.0017612 0.0004222 4.171 0.000574 *** X4 -0.0008188 0.0003616 -2.265 0.036118 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.319 Multiple R-squared: 0.7944, Adjusted R-squared: 0.7487 Convergence in 19 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| = 0 ( < 0.0043); 16 weights are ~= 1. The remaining 6 ones are 3 4 12 16 17 19 0.9861 0.9842 0.8921 0.3720 0.9820 0.9782 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -5.000e-01 1.500e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -5.000e-01 1.500e+00 9.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol zero.tol eps.outlier eps.x 1.000e-07 1.000e-10 4.348e-03 8.399e-08 warn.limit.reject warn.limit.meanrw 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "lqq" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(10) ## c(.) drops attributes : > ctr9 <- lmrob.control(psi = 'lqq', tuning.psi = c(ctr7$tuning.psi), tuning.chi = c(ctr7$tuning.chi)) > ctr9$tuning.psi [1] -0.30 1.40 0.95 NA attr(,"constants") [1] 1.3007171 0.9290836 1.3000000 > ctr9$tuning.chi [1] -0.3 1.4 NA 0.5 attr(,"constants") [1] 0.2763568 0.1973977 1.3000000 > ## Confirm these constants above (against the ones we got earlier) > ## by recomputing them using higher accuracy : > (tpsi. <- do.call(.psi.lqq.findc, c(ctr9$tuning.psi, list(rel.tol=1e-11, tol=1e-8)))) [1] 1.3007495 0.9291068 1.3000000 > (tchi. <- do.call(.psi.lqq.findc, c(ctr9$tuning.chi, list(rel.tol=1e-11, tol=1e-8)))) [1] 0.2763425 0.1973875 1.3000000 > (tol4 <- .Machine$double.eps^.25) [1] 0.0001220703 > > Rver <- getRversion() > integr.bug <- "2.12.0" <= Rver && Rver <= "3.0.1" > integr.bug [1] FALSE > if(integr.bug) tol4 <- 8*tol4 > > assert.EQ(attr(ctr9$tuning.psi, "constants"), tpsi., tol=tol4, giveRE=TRUE) Mean relative difference: 2.495013e-05 > assert.EQ(attr(ctr9$tuning.chi, "constants"), tchi., tol=tol4, giveRE=TRUE) Mean relative difference: 5.155651e-05 > > summary(mp9 <- lmrob(Y ~ ., data = aircraft, control = ctr9)) Call: lmrob(formula = Y ~ ., data = aircraft, control = ctr9) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -10.4061 -2.6517 -0.4156 3.7945 38.6444 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 3.5003005 12.9625202 0.270 0.79021 X1 -3.2953770 0.9467913 -3.481 0.00267 ** X2 1.8957842 0.9928099 1.910 0.07227 . X3 0.0022793 0.0014340 1.589 0.12936 X4 -0.0011563 0.0008966 -1.290 0.21347 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 6.127 Multiple R-squared: 0.7973, Adjusted R-squared: 0.7523 Convergence in 33 IRWLS iterations Robustness weights: 17 weights are ~= 1. The remaining 6 ones are 3 4 12 16 17 22 0.97698 0.99840 0.82584 0.78662 0.91318 0.06838 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -3.000e-01 1.400e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -3.000e-01 1.400e+00 9.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol zero.tol eps.outlier eps.x 1.000e-07 1.000e-10 4.348e-03 8.399e-08 warn.limit.reject warn.limit.meanrw 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "lqq" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 2.712 0.339 3.104 0.009 0.01 > > proc.time() user system elapsed 2.722 0.349 3.104 robustbase/tests/mc-etc.R0000644000176200001440000000670613325654361015101 0ustar liggesusers #### Testing medcouple and related functions ### Strict (and timing) tests are in ./mc-strict.R ### ~~~~~~~~~~~~ ### Here, we produce output which is *compared* with ./mc-etc.Rout.save library(robustbase) source(system.file("xtraR/mcnaive.R", package = "robustbase"))# mcNaive() ## This is somewhat interesting {diff the output !} ## particularly since *most* give the 'not found' diagnostic set.seed(17) for(n in 1:100) { cat(sprintf("n =%3d:\n------\n", n)) mcval <- mc(rlnorm(n), trace=TRUE, doRefl=FALSE) cat(sprintf(" --> mc(rlnorm(%d)) = %.6f\n", n, mcval)) } allEQ <- function(x,y) all.equal(x,y, tolerance = 1e-12) x3 <- c(-2, rep(-1,4), rep(0,6), 2, 2, 2:4) mcNaive(x3,"h.use") # 1/3 mcNaive(x3,"simple")# 0 mcComp <- robustbase:::mcComp mcComp. <- function (x, doScale, doReflect = FALSE, maxit = 15, eps1 = 1e-13, eps2 = eps1, trace.lev = 1, ...) { mcComp(x, doReflect=doReflect, doScale=doScale, maxit=maxit, eps1=eps1, eps2=eps2, trace.lev=trace.lev, ...) } try( mc(x3, doRefl = FALSE, maxit = 15, trace = 3)) ## "non-convergence" (32-bit) str(mcComp.(-x3, doScale=TRUE, trace = 4)) ### And here is the "real" problem of the whole 'eps' idea: x4 <- c(1:5,7,10,15,25, 1e15) ## this is also in mc-strict.R (but differently) mcNaive(x4,"h.use") # 0.5833333 mcNaive(x4,"simple")# == " == 7/12 mc(x4) # now ok, == 7/12 str(mcComp.( x4, doScale=TRUE, trace= 3))## = 0: conv.quickly str(mcComp.(-x4, doScale=TRUE, trace= 3)) # *not* conv! if(FALSE) { ## a much more extreme eps seems the cure: str(mcComp.( x4, doScale=TRUE, eps1=.Machine$double.xmin)) str(mcComp.(-x4, doScale=TRUE, eps1=.Machine$double.xmin)) } ### Examples "like x3" (non-convergence on 32-bit) xClist <- list(## length 5 : c(0,0, 1, 3,3), c(0,0, 1, 3:4), ## ## length 6 : c(0,0, 2, 4:6), c(0,0, 2, 3, 4, 6), c(0,0, 4, 5, 7, 8), c(0, 1,1, 2, 6,6), c(0, 3,3, 4, 6,6), c(0,0, 1, 3, 5,5), c(0,0, 1, 4,4, 6), c(0,0, 1, 4,4, 7), c(0,0, 1, 5,5, 6), ## n = 9 : c(-2,-2,-2, -1,-1, 1,1,1, 3), c(-3,-1,-1, 0, 1, 2,2,2,2) ) rlis <- lapply(xClist, function(x) try(mc(x, maxit=9), silent=TRUE)) table(sapply(rlis, class)) ## if(R.version$arch == "x86_64") { print(unlist(rlis)) rl2 <- lapply(xClist, mc, maxit=9) ##, eps1= 1e-10) stopifnot(allEQ(rlis, rl2), allEQ(unlist(rlis), sapply(xClist, mcNaive))) ##} set.seed(47) for(n in 3:60) { cat(" ") x <- round(2 * rnorm(n)) # many ties, often at median -- not converging ## if(R.version$arch == "x86_64") { ## non-convergence BUG rarely and only on 32-bit (solved, MK) mc1 <- mc(x) mc2 <- mcNaive(x, method = "simple") mc3 <- mcNaive(x, method = "h.use") stopifnot(allEQ(mc1, mc3)) if(mc2 != mc3) { cat("d"); if(!isTRUE(allEQ(mc2, mc3))) cat("!!") } ## } cat(".") }; cat("\n") cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' quit('no') ## ------ ## Find short example of non-convergence (32-bit) --> for above xClist n <- 9 for(ii in 1:100) { x <- round(2 * rnorm(n)) # many ties, often at median -- not converging mc1 <- mc(x) } ## x5 <- c(-3, -3, -2, -1, -1, 0, 0, 1, 2, 2, 3, 4) x6 <- c(-5, -2, -1, -1, -1, 0, 0, 0, 2, 2, 2, 4) robustbase/tests/m-s-estimator.R0000644000176200001440000001433414440373076016426 0ustar liggesusers## Test implementation of M-S estimator require(robustbase) source(system.file("xtraR/m-s_fns.R", package = "robustbase", mustWork=TRUE)) source(system.file("xtraR/ex-funs.R", package = "robustbase", mustWork=TRUE)) source(system.file("xtraR/test-tools.R", package = "robustbase")) # assert.EQ ## dataset with factors and continuous variables: data(education) education <- within(education, Region <- factor(Region)) ## for testing purposes: education2 <- within(education, Group <- factor(rep(1:3, length.out=length(Region)))) ## Test splitFrame (type fii is the only problematic type) testFun <- function(formula, x1.idx) { obj <- lm(formula, education2) mf <- obj$model ret <- splitFrame(mf, type="fii") if (missing(x1.idx)) { print(ret$x1.idx) return(which(unname(ret$x1.idx))) } stopifnot(identical(x1.idx, which(unname(ret$x1.idx)))) } testFun(Y ~ 1, integer(0)) testFun(Y ~ X1*X2*X3, integer(0)) testFun(Y ~ Region + X1 + X2 + X3, 1:4) testFun(Y ~ 0 + Region + X1 + X2 + X3, 1:4) testFun(Y ~ Region*X1 + X2 + X3, c(1:5, 8:10)) testFun(Y ~ Region*X1 + X2 + X3 + Region*Group, c(1:5, 8:18)) testFun(Y ~ Region*X1 + X2 + X3 + Region*Group*X2, c(1:6, 8:29)) testFun(Y ~ Region*X1 + X2 + Region*Group*X2, 1:28) testFun(Y ~ Region*X1 + X2 + Region:Group:X2, 1:21) testFun(Y ~ Region*X1 + X2*X3 + Region:Group:X2, c(1:6, 8:10, 12:23)) testFun(Y ~ (X1+X2+X3+Region)^2, c(1:7,10:12,14:19)) testFun(Y ~ (X1+X2+X3+Region)^3, c(1:19, 21:29)) testFun(Y ~ (X1+X2+X3+Region)^4, 1:32) testFun(Y ~ Region:X1:X2 + X1*X2, c(1:1, 4:7)) control <- lmrob.control() cntrlT1 <- lmrob.control(trace.lev=1) f.lm <- lm(Y ~ Region + X1 + X2 + X3, education) splt <- splitFrame(f.lm$model) stopifnot(identical(names(splt$x1.idx), names(coef(f.lm))), unname(splt$x1.idx) == c(rep(TRUE, 4), rep(FALSE, 3)) ) y <- education$Y ## test orthogonalizing x1 <- splt$x1 x2 <- splt$x2 tmp <- lmrob.lar(x1, y, control) y.tilde <- tmp$resid t1 <- tmp$coef x2.tilde <- x2 T2 <- matrix(0, nrow=ncol(x1), ncol=ncol(x2)) for (i in 1:ncol(x2)) { tmp <- lmrob.lar(x1, x2[,i], control) x2.tilde[,i] <- tmp$resid T2[,i] <- tmp$coef } T2 set.seed(10) mss1 <- m_s_subsample(x1, x2.tilde, y.tilde, cntrlT1, orth = FALSE) mss1 <- within(mss1, b1 <- drop(t1 + b1 - T2 %*% b2)) stopifnot(all.equal(30.81835, mss1$scale, tol=1e-7)) set.seed(10) mss2 <- m_s_subsample(x1, x2, y, cntrlT1, orth = TRUE) stopifnot(all.equal(mss1, mss2)) res <- vector("list", 100) set.seed(0) time <- system.time(for (i in seq_along(res)) { tmp <- m_s_subsample(x1, x2.tilde, y.tilde, control, FALSE) res[[i]] <- unlist(within(tmp, b1 <- drop(t1 + b1 - T2 %*% b2))) }) cat('Time elapsed in subsampling: ', time,'\n') ## show a summary of the results {"FIXME": output is platform dependent} summary(res1 <- do.call(rbind, res)) ## compare with fast S solution fmS <- lmrob(Y ~ Region + X1 + X2 + X3, education, init="S") coef(fmS) fmS$scale ### Comparing m-s_descent implementations() {our C and R} : ------------------- ctrl <- control #ctrl$trace.lev <- 5 ctrl$k.max <- 1 mC <- m_s_descent (x1, x2, y, ctrl, mss2$b1, mss2$b2, mss2$scale+10) mR <- m_s_descent_Ronly(x1, x2, y, ctrl, mss2$b1, mss2$b2, mss2$scale+10) nm <- c("b1","b2", "scale", "res") stopifnot(all.equal(mC[nm], mR[nm], check.attributes = FALSE, tolerance = 4e-14)) # seen 5.567e-15 in OpenBLAS ^^^^^ ## control$k.m_s <- 100 res3 <- vector("list", 100) time <- system.time(for (i in seq_along(res3)) { ri <- res[[i]] res3[[i]] <- unlist(m_s_descent(x1, x2, y, control, ri[1:4], ri[5:7], ri[8])) }) cat('Time elapsed in descent proc: ', time,'\n') ## show a summary of the results {"FIXME": output is platform dependent} res4 <- do.call(rbind, res3) summary(res4[,1:8]) stopifnot(all.equal( # 'test', not only plot: res1[, "scale"], res4[,"scale"], tol = 0.03), res1[, "scale"] >= res4[,"scale"] - 1e-7 ) # 1e-7 just in case plot(res1[, "scale"], res4[,"scale"]) abline(0,1, col=adjustcolor("gray", 0.5)) ## Test lmrob.M.S x <- model.matrix(fmS) control$trace.lev <- 3 ## --------- -- set.seed(1003) fMS <- lmrob.M.S(x, y, control, fmS$model) resid <- drop(y - x %*% fMS$coef) assert.EQ(resid, fMS$resid, check.attributes=FALSE, tol = 1e-12) ## Test direct call to lmrob ## 1. trace_lev output: set.seed(17) fMS <- lmrob(Y ~ Region + X1 + X2 + X3, education, init = "M-S", trace.lev=2) set.seed(13) fiMS <- lmrob(Y ~ Region + X1 + X2 + X3, education, init = "M-S") out2 <- capture.output(summary(fiMS)) writeLines(out2) set.seed(13) fiM.S <- lmrob(Y ~ Region + X1 + X2 + X3, education, init=lmrob.M.S) out3 <- capture.output(summary(fiM.S)) ## must be the same {apart from the "init=" in the call}: i <- 3 stopifnot(identical(out2[-i], out3[-i])) ## the difference: c(rbind(out2[i], out3[i])) ### "Skipping design matrix equilibration" warning can arise for reasonable designs ----- set.seed(1) x2 <- matrix(rnorm(2*30), 30, 2) data <- data.frame(y = rnorm(30), group = rep(letters[1:3], each=10), x2) obj <- lmrob(y ~ ., data, init="M-S", trace.lev=1) ## illustration: the zero row is introduced during the orthogonalization of x2 wrt x1 ## l1 regression always produces p zero residuals ## by chance, the zero residuals of multiple columns happen to be on the same row sf <- splitFrame(obj$model) x1 <- sf$x1 x2 <- sf$x2 control <- obj$control ## orthogonalize x2.tilde <- x2 for(i in 1:ncol(x2)) { tmp <- lmrob.lar(x1, x2[,i], control) x2.tilde[,i] <- tmp$resid } x2.tilde == 0 ## Specifying init="M-S" for a model without categorical variables ## used to cause a segfault; now uses "S" lmrob(LNOx ~ LNOxEm, NOxEmissions[1:10,], init="M-S") ## Now an ANOVA model with *only* categorical variables n <- 64 # multiple of 16 stopifnot(n %% 16 == 0) d.AOV <- data.frame(y = round(100*rnorm(64)), A=gl(4,n/4), B=gl(2,8, n), C=gl(2,4,n)) fm <- lmrob(y ~ A*B*C, data = d.AOV, init = "M-S", trace.lev=2) ## lmrob_M_S(n = 64, nRes = 500, (p1,p2)=(16,0), (orth,subs,desc)=(1,1,1)) ## Starting subsampling procedure.. Error in lmrob.M.S(x, y, control, mf) : ## 'Calloc' could not allocate memory (18446744073709551616 of 4 bytes) ## BTW: Can we compute an M-estimate (instead of MM-*) as we ## --- cannot have any x-outliers in such an ANOVA! robustbase/tests/wgt-himed.Rout.save0000644000176200001440000001072414004270422017261 0ustar liggesusers R version 4.0.3 Patched (2021-01-08 r79818) -- "Bunny-Wunnies Freak Out" Copyright (C) 2021 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > himed <- function(x) { n2 <- 1 + length(x) %/% 2; sort(x, partial = n2)[n2] } > > ## Tolerance 2e-7 {13 * higher than default 1.49e-8 } > is.all.equal <- function(x,y, tol = 2e-7) { + is.logical(r <- all.equal(x,y, tolerance = tol)) && r } > > > library(robustbase) > > stopifnot(is.na(wgt.himedian(numeric()))) > ## hi-median() seg.faulted or inf.looped till Jan.3, 2021 > > options(digits = 7)# single precision! > set.seed(15) > > cat(" n | range(x) | wgt.Himed\n", + "------------------------------\n",sep="") n | range(x) | wgt.Himed ------------------------------ > for(i in 1:100) { + n <- rpois(1, lam = 10) + cat(formatC(n,wid=3)," ") + x <- round(rnorm(n),3) + iw <- 1 + rpois(n, lam = 2) + him <- himed(rep(x, iw)) ## == naive R solution + whim <- wgt.himedian (x, iw) + if(!is.all.equal(whim, him)) + cat("whim != him: ", whim, "!=", him,"\n") + cat(formatC(range(x), wid = 6, flag="-"), "", + formatC(whim, wid = 6, flag="+"), "\n") + } 10 -1.255 1.831 +0.488 9 -1.167 1.412 +0.032 18 -2.253 1.611 +0.471 10 -1.589 0.754 +0.004 11 0.2 1.575 +0.712 16 -2.587 1.552 -0.04 8 -1.499 1.055 +0.041 15 -1.069 1.966 +0.621 15 -1.829 1.193 -0.177 13 -2.088 1.33 +0.022 15 -1.905 2.1 +0.104 18 -1.827 1.43 -0.338 15 -1.494 1.409 +0.378 6 -1.266 1.263 -0.162 11 -1.243 1.458 +0.227 9 -1.609 1.146 +0.609 6 -0.898 1.625 -0.659 14 -1.379 1.898 -0.123 6 -1.928 0.656 -0.44 9 -1.286 2.61 +0.442 8 -1.196 1.169 -0.376 5 -1.871 0.875 +0.004 10 -1.618 1.87 -0.023 11 -0.979 1.337 +0.156 6 -0.357 1.723 +0.519 8 -2.566 2.334 -0.416 14 -1.909 2.04 -0.051 15 -2.229 2.505 +0.377 9 -0.409 1.553 +0.452 7 -0.924 1.009 -0.645 13 -2.14 0.711 -0.348 9 -1.697 1.601 +0.369 12 -2.227 1.793 +0.508 14 -2.308 1.808 +0.444 11 -1.85 3.437 +0.285 9 -1.312 0.601 +0.017 7 -1.792 0.005 -0.247 10 -2.178 1.51 -0.905 10 -1.121 1.464 -0.133 8 -0.246 1.299 +0.885 14 -1.666 2.306 +0.234 9 -0.752 2.056 +0.151 13 -0.472 1.625 -0.055 6 -1.795 0.449 +0.122 10 -2.023 2.992 +0.141 8 -1.265 1.476 +0.083 11 -1.715 0.966 +0.137 8 -1.943 0.374 -0.215 8 -2.377 1.483 +0.029 8 -0.659 2.699 +1.052 8 -0.671 1.426 +0.033 12 -1.462 2.075 -0.13 14 -1.865 1.406 -0.478 6 -0.324 2.014 +1.453 12 -1.519 1.072 -0.106 12 -1.511 1.232 +0.055 14 -0.516 1.865 +0.31 8 -2.402 0.218 -0.319 10 -2.724 0.983 -0.47 5 -1.566 1.034 +0.676 7 -1.98 1.7 -0.002 11 -2.203 1.736 -0.209 8 -1.782 0.435 +0.035 15 -0.835 1.668 +0.166 12 -1.938 0.838 -0.228 8 -1.257 1.542 -0.527 11 -2.394 2.062 -0.007 14 -2.574 2.356 +0.095 10 -1.691 1.387 -0.217 14 -0.601 2.453 +0.951 9 -0.631 0.953 +0.601 9 -1.501 1.146 -0.337 7 -1.826 1.32 +0.621 6 -0.859 0.343 +0.009 4 -1.038 1.396 +1.375 9 -1.325 0.892 +0.399 10 -0.632 1.347 +0.026 4 -0.926 0.666 -0.555 11 -1.67 2.158 -0.355 17 -0.818 1.842 +0.054 6 -1.296 1.066 +0.384 11 -1.494 1.224 -0.222 11 -2.397 2.254 +0.165 11 -1.76 2.115 +0.008 9 -0.542 1.41 +0.412 7 -1.078 1.596 -0.43 9 -1.161 1.527 -0.12 7 0.112 1.656 +0.929 13 -1.175 2.39 +0.627 11 -1.177 1.838 +0.359 8 -1.601 0.792 +0.313 12 -2.57 1.827 +0.019 5 -0.463 0.505 +0.49 11 -2.077 1.694 +0.032 7 -1.139 1.232 -0.141 9 -0.814 1.482 +0.331 5 -0.542 0.405 +0.244 13 -2.127 1.754 -0.424 7 -1.082 1.015 -0.1 11 -0.773 2.253 -0.011 > > proc.time() user system elapsed 0.163 0.034 0.188 robustbase/tests/glmrob-specials.R0000644000176200001440000000151112271657124017000 0ustar liggesuserslibrary(robustbase) ## Model without coefficients [ print.glmrob() tests for this ..] ### very simple model [with outliers] set.seed(1) y <- rpois(1000, lambda = 4) ## without outliers m0o <- glm(y ~ 0, family = poisson, epsilon = 1e-12) m1o <- glm(y ~ 1, family = poisson, epsilon = 1e-12) y[1:3] <- 99:101 # outliers m0 <- glm(y ~ 0, family = poisson, epsilon = 1e-12) m1 <- glm(y ~ 1, family = poisson, epsilon = 1e-12) ## these both failed in version 0.1-2: rm0 <- glmrob(y ~ 0, family = poisson, acc = 1e-12) rm1 <- glmrob(y ~ 1, family = poisson, acc = 1e-12) rm0 rm1 (s0 <- summary(rm0)) (s1 <- summary(rm1)) str(s1) stopifnot(all.equal(c(coef(s1)), c(1.390672035557, 0.016213613600955, 85.77187478275, 0), tolerance = 1e-13))# 32-b: 4.7e-15 cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/lmrob-methods.Rout.save0000644000176200001440000004135614440373076020172 0ustar liggesusers R version 4.3.1 beta (2023-06-07 r84521) -- "Beagle Scouts" Copyright (C) 2023 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ### tests methods argument of lmrob.control > > library(robustbase) > > data(stackloss) > cat("doExtras:", doExtras <- robustbase:::doExtras(),"\n") doExtras: FALSE > > str(ctrl2 <- lmrob.control(trace.lev = if(doExtras) 2 else 0)) List of 34 $ setting : NULL $ seed : int(0) $ nResample : num 500 $ psi : chr "bisquare" $ tuning.chi : num 1.55 $ bb : num 0.5 $ tuning.psi : num 4.69 $ max.it : num 50 $ groups : num 5 $ n.group : num 400 $ best.r.s : int 2 $ k.fast.s : int 1 $ k.max : int 200 $ maxit.scale : int 200 $ k.m_s : int 20 $ refine.tol : num 1e-07 $ rel.tol : num 1e-07 $ scale.tol : num 1e-10 $ solve.tol : num 1e-07 $ zero.tol : num 1e-10 $ trace.lev : num 0 $ mts : int 1000 $ subsampling : chr "nonsingular" $ compute.rd : logi FALSE $ method : chr "MM" $ numpoints : int 10 $ cov : chr ".vcov.avar1" $ split.type : chr "f" $ fast.s.large.n : num 2000 $ eps.outlier :function (nobs) $ eps.x :function (maxx) $ compute.outlier.stats: chr "SM" $ warn.limit.reject : num 0.5 $ warn.limit.meanrw : num 0.5 - attr(*, "class")= chr "lmrobCtrl" > > ## S > set.seed(0) > summary(m0 <- lmrob(stack.loss ~ ., data = stackloss, method = "S", + compute.outlier.stats = "S")) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "S", compute.outlier.stats = "S") \--> method = "S" Residuals: Min 1Q Median 3Q Max -9.46226 -0.82076 0.02249 0.80806 8.31829 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -36.92542 5.41708 -6.816 3.0e-06 *** Air.Flow 0.84957 0.07892 10.765 5.2e-09 *** Water.Temp 0.43047 0.19507 2.207 0.0414 * Acid.Conc. -0.07354 0.07216 -1.019 0.3224 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 1.912 Multiple R-squared: 0.9863, Adjusted R-squared: 0.9839 Robustness weights: 5 observations c(1,3,4,13,21) are outliers with |weight| = 0 ( < 0.0048); one weight is ~= 1. The remaining 15 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.4126 0.7595 0.8726 0.8270 0.9718 0.9986 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 4.762e-03 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "S" seed : int(0) > set.seed(0) > m0a <- lmrob.S(m0$x, stack.loss, ctrl2) > > all.equal(m0 [c('coefficients', 'scale', 'rweights')], + m0a[c('coefficients', 'scale', 'rweights')]) [1] TRUE > > ## MM > set.seed(0) > summary(m1 <- lmrob(stack.loss ~ ., data = stackloss, method = "MM", + compute.outlier.stats = "S")) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "MM", compute.outlier.stats = "S") \--> method = "MM" Residuals: Min 1Q Median 3Q Max -10.50974 -1.43819 -0.09134 1.02503 7.23113 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -41.52462 5.29780 -7.838 4.82e-07 *** Air.Flow 0.93885 0.11743 7.995 3.68e-07 *** Water.Temp 0.57955 0.26296 2.204 0.0416 * Acid.Conc. -0.11292 0.06989 -1.616 0.1246 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 1.912 Multiple R-squared: 0.9593, Adjusted R-squared: 0.9521 Convergence in 17 IRWLS iterations Robustness weights: observation 21 is an outlier with |weight| = 0 ( < 0.0048); 2 weights are ~= 1. The remaining 18 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1215 0.8757 0.9428 0.8721 0.9797 0.9978 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 4.762e-03 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "S" seed : int(0) > > set.seed(0) > m2 <- update(m1, method = "SM") > > all.equal(m1[c('coefficients', 'scale', 'cov')], + m2[c('coefficients', 'scale', 'cov')]) [1] TRUE > > set.seed(0) > m3 <- update(m0, method = "SM", cov = '.vcov.w') > > ## SMD > set.seed(0) > summary(m4 <- lmrob(stack.loss ~ ., data = stackloss, method = "SMD", psi = 'bisquare', + compute.outlier.stats = "S")) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "SMD", psi = "bisquare", compute.outlier.stats = "S") \--> method = "SMD" Residuals: Min 1Q Median 3Q Max -10.50974 -1.43819 -0.09134 1.02503 7.23113 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -41.5246 8.9525 -4.638 0.000235 *** Air.Flow 0.9388 0.1175 7.990 3.71e-07 *** Water.Temp 0.5796 0.3199 1.812 0.087756 . Acid.Conc. -0.1129 0.1176 -0.960 0.350512 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 2.651 Multiple R-squared: 0.9593, Adjusted R-squared: 0.9521 Convergence in 17 IRWLS iterations Robustness weights: observation 21 is an outlier with |weight| = 0 ( < 0.0048); 2 weights are ~= 1. The remaining 18 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1215 0.8757 0.9428 0.8721 0.9797 0.9978 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 4.762e-03 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "S" seed : int(0) > summary(m4a <- lmrob..D..fit(m3)) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "SMD", compute.outlier.stats = "S", cov = ".vcov.w") \--> method = "MM" Residuals: Min 1Q Median 3Q Max -10.50974 -1.43819 -0.09134 1.02503 7.23113 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -41.5246 9.3676 -4.433 0.000365 *** Air.Flow 0.9388 0.1230 7.636 6.84e-07 *** Water.Temp 0.5796 0.3348 1.731 0.101505 Acid.Conc. -0.1129 0.1231 -0.917 0.371736 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 2.651 Multiple R-squared: 0.9593, Adjusted R-squared: 0.9521 Convergence in 17 IRWLS iterations Robustness weights: observation 21 is an outlier with |weight| = 0 ( < 0.0048); 2 weights are ~= 1. The remaining 18 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1215 0.8757 0.9428 0.8721 0.9797 0.9978 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 4.762e-03 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "S" seed : int(0) > > ## rearrange m4a and update call > m4a <- m4a[names(m4)] > class(m4a) <- class(m4) > m4a$call <- m4$call > > all.equal(m4, m4a, check.environment = FALSE) [1] "Component \"control\": Component \"method\": 1 string mismatch" [2] "Component \"init\": Component \"control\": Component \"method\": 1 string mismatch" [3] "Component \"cov\": Attributes: < Component \"corrfact\": Mean relative difference: 0.1167673 >" [4] "Component \"cov\": Attributes: < Component \"scorr\": Mean relative difference: 0.01959345 >" [5] "Component \"cov\": Mean relative difference: 0.09488594" > > ## SMDM > set.seed(0) > summary(m5 <- lmrob(stack.loss ~ ., data = stackloss, method = "SMDM", psi = 'bisquare', + compute.outlier.stats = "S")) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "SMDM", psi = "bisquare", compute.outlier.stats = "S") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -9.6746 -1.7721 0.1346 1.2041 6.6080 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -41.9398 9.7719 -4.292 0.000494 *** Air.Flow 0.8747 0.1231 7.107 1.76e-06 *** Water.Temp 0.8099 0.3363 2.408 0.027656 * Acid.Conc. -0.1188 0.1284 -0.926 0.367655 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 2.651 Multiple R-squared: 0.9384, Adjusted R-squared: 0.9275 Convergence in 17 IRWLS iterations Robustness weights: 2 weights are ~= 1. The remaining 19 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1546 0.9139 0.9597 0.8874 0.9866 0.9966 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 4.762e-03 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "S" seed : int(0) > summary(m5a <- lmrob..M..fit(obj=m4)) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "SMDM", psi = "bisquare", compute.outlier.stats = "S") \--> method = "SMD" Residuals: Min 1Q Median 3Q Max -9.6746 -1.7721 0.1346 1.2041 6.6080 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -41.9398 9.7719 -4.292 0.000494 *** Air.Flow 0.8747 0.1231 7.107 1.76e-06 *** Water.Temp 0.8099 0.3363 2.408 0.027656 * Acid.Conc. -0.1188 0.1284 -0.926 0.367655 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 2.651 Multiple R-squared: 0.9384, Adjusted R-squared: 0.9275 Convergence in 17 IRWLS iterations Robustness weights: 2 weights are ~= 1. The remaining 19 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1546 0.9139 0.9597 0.8874 0.9866 0.9966 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 4.762e-03 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "S" seed : int(0) > > ## rearrange m5a > m5a <- m5a[names(m5)] > class(m5a) <- class(m5) > > all.equal(m5, m5a, check.environment = FALSE) #-> 3 string mismatch [1] "Component \"control\": Component \"method\": 1 string mismatch" [2] "Component \"init\": Component \"control\": Component \"method\": 1 string mismatch" [3] "Component \"init\": Component \"init\": Component \"control\": Component \"method\": 1 string mismatch" > > ## Fast S large n strategy (sped up) > model <- model.frame(LNOx ~ . ,data = NOxEmissions) > control <- lmrob.control(fast.s.large.n = 10, n.group = 341, groups = 2) > set.seed(0) > try(ret <- lmrob.S(model.matrix(model, NOxEmissions)[1:682,], NOxEmissions$LNOx[1:682], control)) Error in lmrob.S(model.matrix(model, NOxEmissions)[1:682, ], NOxEmissions$LNOx[1:682], : Fast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'. In addition: Warning message: In lmrob.S(model.matrix(model, NOxEmissions)[1:682, ], NOxEmissions$LNOx[1:682], : 'control$n.group' is not much larger than 'p', probably too small > ## do what the error says > control <- lmrob.control(fast.s.large.n = Inf) > try(ret <- lmrob.S(model.matrix(model, NOxEmissions)[1:682,], NOxEmissions$LNOx[1:682], control)) Error in lmrob.S(model.matrix(model, NOxEmissions)[1:682, ], NOxEmissions$LNOx[1:682], : DGEEQU: column 30 of the design matrix is exactly zero. > ##-> Error ...... DGEEQU: column 30 of the design matrix is exactly zero. > ## > ## still fails, but this error is to be expected since only a part > ## of the design matrix is given > > proc.time() user system elapsed 0.253 0.085 0.376 robustbase/tests/lmrob-psifns.R0000644000176200001440000001513714241260162016330 0ustar liggesusers#### Tests psi(), chi(),... etc and tuning.psi, tuning.chi : library(robustbase) source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) source(system.file("xtraR/test-tools.R", package = "robustbase")) # assert.EQ ### (1) Test the functions themselves -------------------------------- if(!dev.interactive(orNone=TRUE)) pdf("rob-psifns.pdf") ## Simple version, no error checking, no derivative, nothing: psiGGW <- function(x, a,b,c) { ifelse((ax <- abs(x)) < c, x, ifelse((ea <- -((ax-c)^b)/(2*a)) < -708.4, 0, x * exp(ea))) } assert.EQ(Mpsi (5:9, cc=c(0, a=1/8,b=2,c=1/8, NA), "GGW"), psiGGW(5:9, a=1/8,b=2,c=1/8), tol = 1e-13) ## Check that psi() |-> works; ditto for +-Inf, NA,.. cG <- c(-.5, 1, .95, NA) # one of the 6 "builtin"s d0 <- numeric() IoI <- c(-Inf, 0, Inf) NN <- c(NaN, NA) cGs <- list( c(-.4, 1.5, 0.85, NA) , c(-.4, 1.5 , 0.90, NA) , c(-.4, 1.5 , 0.95, NA) , c(-.4, 1.5, 0.975, NA) , c(-.4, 1.5, 0.99 , NA) , c(-.4, 1.5, 0.995, NA) ## , c(-.4, 1.25, 0.975, NA) , c(-.4, 1.1, 0.975, NA) , c(-.4, 1.025, 0.975, NA) , c(-.4, 1.0125, 0.975, NA) ## ## FIXME , c(-.1, 1.25, 0.95, NA) ## FIXME , c(-.1, 1.25, 0.99, NA) ) st <- system.time( cG.cnst <- lapply(cGs, function(cc) lmrob.control(psi = "ggw", tuning.psi = cc)$tuning.psi) ) cat('Time for constants computation of tuning.psi: ', st,'\n') cGct <- t(sapply(cG.cnst, attr, "constants"))[,-1] colnames(cGct) <- c("a","b","c", "rhoInf") signif(cGct, 4) assert.EQ(sapply(cG.cnst, function(cc) MrhoInf(cc, "ggw")), cGct[,"rhoInf"], tol = 1e-8) ## Do these checks for a *list* of (c.par, psi) combinations: c.psi.list <- list( list(1.345, "Huber"), list(1.8, "Huber"), list(cG, "GGW"), list(c(2,4,8), "Hampel"), list(c(1.5,3.5,8)*0.90, "Hampel"), list(par=c(-.5,1.5,.95,NA), "lqq"), list(bcs=c(1, 1, 1.25), "lqq"), list(1.1, "optimal"), list(0.1, "optimal"), list(2.3, "Welsh") ) for(c.psi in c.psi.list) { tPar <- c.psi[[1]]; psi <- c.psi[[2]] stopifnot(is.numeric(tPar), is.character(psi)) cat("Psi function ", psi,"; tuning par. c[]= (", paste(formatC(tPar, width=1), collapse=", "),")\n") for(FUN in list(Mpsi, Mchi, Mwgt)) stopifnot(identical(d0, FUN(d0, tPar, psi=psi)), identical(NN, FUN(NN, tPar, psi=psi))) stopifnot(identical(c(0,1,0), Mwgt(IoI, tPar,psi=psi))) if(isPsi.redesc(psi)) stopifnot(identical(c(0,0,0), Mpsi(IoI, tPar,psi=psi)), identical(c(1,0,1), Mchi(IoI, tPar,psi=psi))) else if(psi == "Huber") { stopifnot(identical(c(-tPar,0,tPar), Mpsi(IoI, tPar,psi=psi)), identical(c( Inf,0, Inf), Mchi(IoI, tPar,psi=psi))) } cat("chkPsi..(): ") isHH <- psi %in% c("Huber", "Hampel") # not differentiable tol <- switch(tolower(psi), "huber"=, "hampel"= c(.001, 1.0), "optimal" = .008, "ggw" = c(5e-5, 5e-3, 1e-12), "lqq" = c(1e-5, 5e-5, 1e-5, .08)) # .08 needed for bcs=c(1, 1, 1.25) if(is.null(tol)) tol <- 1e-4 # default otherwise cc <- chkPsi..(c(-5, 10), psi=psi, par=tPar, doD2 = !isHH, tol=tol) ## -------- cc. <- cc[!is.na(cc)] if(is.logical(cc) && all(cc.)) cat(" [Ok]\n") else { cat(" not all Ok:\n") print(cc.[cc. != "TRUE"]) } cat("------------------------\n\n") } ## Nice plots -- and check derivatives ---- head(x. <- seq(-5, 10, length=1501)) ## [separate lines, for interactive "play": ] stopifnot(chkPsiDeriv(p.psiFun(x., "LQQ", par=c(-.5,1.5,.95,NA)))) stopifnot(chkPsiDeriv(p.psiFun(x., "GGW", par= cG))) stopifnot(chkPsiDeriv(p.psiFun(x., "optimal", par=2))) stopifnot(chkPsiDeriv(p.psiFun(x., "Hampel", par = ## Default, but rounded: round(c(1.5, 3.5, 8) * 0.9016085, 1)), tol = 1e-3)) stopifnot(chkPsiDeriv(p.psiFun(x., "biweight", par = 4))) stopifnot(chkPsiDeriv(p.psiFun(x., "Welsh", par = 1.5))) stopifnot(chkPsiDeriv(p.psiFun(x., "huber", par = 1.5), tol = c(1e-10, 5e-3))) ## "huber"-rho via Mpsi(*, deriv=-1) was badly wrong till 2018-06 ## The same 6, all in one plot: op <- par(mfrow=c(3,2), mgp = c(1.5, .6, 0), mar = .1+c(3,3,2,.5)) p.psiFun2(x., "LQQ", par=c(-.5,1.5,.95,NA)) p.psiFun2(x., "GGW", par= cG) p.psiFun2(x., "optimal", par=1.3) p.psiFun2(x., "Hampel", par = round(c(1.5, 3.5, 8) * 0.9016085, 1)) p.psiFun2(x., "biweight", par = 4) p.psiFun2(x., "Welsh", par = 1.5) par(op) ### (2) Test them as arguments of lmrob() or lmrob.control(): ----- data(aircraft) set.seed(1) summary(mp0 <- lmrob(Y ~ ., data = aircraft, psi = 'bisquare', method = 'SMDM')) set.seed(2) summary(mp1 <- update(mp0, psi = 'optimal')) set.seed(3) summary(mp2 <- update(mp0, psi = 'ggw')) set.seed(4) summary(mp3 <- update(mp0, psi = 'welsh')) set.seed(5) summary(mp4 <- update(mp0, psi = 'ggw', tuning.psi = c(-.5, 1.5, 0.85, NA), tuning.chi = c(-0.5, 1.5, NA, 0.5))) set.seed(6) summary(mp5 <- update(mp0, psi = 'ggw', tuning.psi = c(-0.5, 1.0, 0.95, NA), tuning.chi = c(-0.5, 1.0, NA, 0.5))) set.seed(7) summary(mp6 <- update(mp0, psi = 'hampel')) set.seed(8) ctr7 <- lmrob.control(psi = 'ggw', tuning.psi = c(-0.3, 1.4, 0.95, NA), tuning.chi = c(-0.3, 1.4, NA, 0.5)) ctr7$tuning.psi ## -> "constants" ctr7$tuning.chi summary(mp7 <-lmrob(Y ~ ., data = aircraft, control = ctr7)) # *not* converging in k.max=200 set.seed(9) summary(mp8 <- update(mp0, psi = 'lqq')) set.seed(10) ## c(.) drops attributes : ctr9 <- lmrob.control(psi = 'lqq', tuning.psi = c(ctr7$tuning.psi), tuning.chi = c(ctr7$tuning.chi)) ctr9$tuning.psi ctr9$tuning.chi ## Confirm these constants above (against the ones we got earlier) ## by recomputing them using higher accuracy : (tpsi. <- do.call(.psi.lqq.findc, c(ctr9$tuning.psi, list(rel.tol=1e-11, tol=1e-8)))) (tchi. <- do.call(.psi.lqq.findc, c(ctr9$tuning.chi, list(rel.tol=1e-11, tol=1e-8)))) (tol4 <- .Machine$double.eps^.25) Rver <- getRversion() integr.bug <- "2.12.0" <= Rver && Rver <= "3.0.1" integr.bug if(integr.bug) tol4 <- 8*tol4 assert.EQ(attr(ctr9$tuning.psi, "constants"), tpsi., tol=tol4, giveRE=TRUE) assert.EQ(attr(ctr9$tuning.chi, "constants"), tchi., tol=tol4, giveRE=TRUE) summary(mp9 <- lmrob(Y ~ ., data = aircraft, control = ctr9)) cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/small-sample.Rout.save0000644000176200001440000001415012271657124017774 0ustar liggesusers R version 3.0.2 Patched (2014-01-26 r64896) -- "Frisbee Sailing" Copyright (C) 2014 The R Foundation for Statistical Computing Platform: x86_64-unknown-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(robustbase) > ## testing functions: > source(system.file("xtraR/ex-funs.R", package = "robustbase")) > > set.seed(152) > > Nmax <- 12 > nn <- length(nset <- c(2:Nmax, 20, 50))## NOTA BENE: n == 1 etc are NOT YET TREATED! > Sim <- 2^9 # = 512 > > sn <- qn <- numeric(Sim) > cpu <- numeric(nn) > names(cpu) <- as.character(nset) > > for(n in nset) { + nS <- Sim ## if(n < 20) Sim else round(10*Sim/n) + cat("\nn = ",n,"\n------\nno.Sim. = ",nS,"\n") + cpu[as.character(n)] <- system.time(for(i in 1:nS) { + x <- rnorm(n) + sn[i] <- Sn0R(x) + qn[i] <- Qn0R(x) + Sn.x <- Sn(x, const = 1) + Qn.x <- Qn(x, const = 1) + if(!is.all.equal(Sn.x, sn[i], tol = 1e-5)) + cat("i=",i," Sn() != Sn0R(): ", Sn.x, "!=", sn[i],"\n") + if(!is.all.equal(Qn.x, qn[i], tol = 1e-5)) + cat("i=",i," Qn() != Qn0R(): ", Qn.x, "!=", qn[i],"\n") + })[1] + cat("Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...):\n") + print(c(mean(sn), sd(sn)/sqrt(nS), quantile(sn, p = (1:3)/4))) + print(c(mean(qn), sd(qn)/sqrt(nS), quantile(qn, p = (1:3)/4))) + } n = 2 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 1.17177829 0.03695327 0.52951343 1.05306039 1.62748640 25% 50% 75% 1.17177829 0.03695327 0.52951343 1.05306039 1.62748640 n = 3 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.46255834 0.01739738 0.15475882 0.36551414 0.65869811 25% 50% 75% 0.46255834 0.01739738 0.15475882 0.36551414 0.65869811 n = 4 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.84170614 0.02015695 0.51981874 0.77630479 1.12005284 25% 50% 75% 0.84170614 0.02015695 0.51981874 0.77630479 1.12005284 n = 5 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.64309841 0.01561957 0.35812751 0.60223019 0.87200727 25% 50% 75% 0.5591093 0.0133655 0.3273456 0.5337290 0.7412843 n = 6 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.81292713 0.01603854 0.54635157 0.78437781 1.03135301 25% 50% 75% 0.70787958 0.01251911 0.47540581 0.70464628 0.88664634 n = 7 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.71007470 0.01209913 0.52623935 0.69253904 0.88255673 25% 50% 75% 0.533802144 0.009128141 0.376988927 0.520001355 0.663751417 n = 8 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.82650425 0.01419433 0.58883395 0.79562656 1.00574823 25% 50% 75% 0.6645775 0.0103031 0.4938057 0.6323886 0.8027008 n = 9 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.75759768 0.01136142 0.57868634 0.74677447 0.91852270 25% 50% 75% 0.52259364 0.00736039 0.39618164 0.51826059 0.63137960 n = 10 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.83182112 0.01239199 0.62462885 0.80885273 1.01920116 25% 50% 75% 0.623637627 0.008403906 0.474993989 0.613887533 0.750270402 n = 11 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.76832479 0.01118554 0.59020462 0.76690177 0.93425844 25% 50% 75% 0.512979423 0.007139196 0.388824484 0.506239908 0.621525112 n = 12 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.85538366 0.01139366 0.66084900 0.84512561 1.01552717 25% 50% 75% 0.607337007 0.007056424 0.496338090 0.596697510 0.703213097 n = 20 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.84113700 0.00812507 0.71508462 0.82864901 0.94684680 25% 50% 75% 0.542464499 0.004534141 0.473622699 0.541929347 0.612841843 n = 50 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.846902031 0.004784579 0.780334229 0.846165240 0.917926909 25% 50% 75% 0.4860477 0.0023965 0.4492734 0.4853898 0.5220596 > > rbind("Time (CPU) used:" = summary(cpu)) Min. 1st Qu. Median Mean 3rd Qu. Max. Time (CPU) used: 0.284 0.321 0.385 0.4803 0.462 1.515 > > proc.time() user system elapsed 7.506 0.077 7.725 robustbase/tests/weights.R0000644000176200001440000002333114435170063015367 0ustar liggesusers## test handing of weights and offset argument require(robustbase) ## generate simple example data (extension of the one in ./NAcoef.R ) data <- expand.grid(x1=letters[1:3], x2=LETTERS[1:4], rep=1:3, KEEP.OUT.ATTRS = FALSE) ## generate offset column data$os <- 1:nrow(data) set.seed(1) data$y <- data$os + rnorm(nrow(data)) ## add collinear variables data$x3 <- rnorm(nrow(data)) data$x4 <- rnorm(nrow(data)) data$x5 <- data$x3 + data$x4 ## lm() will have 'x5' "aliased" (and give coef = NA) ## add some NA terms data$y[1] <- NA data$x4[2:3] <- NA ## to test anova ## generate weights ## some obs with weight 0 data$weights <- as.numeric(with(data, x1 != 'c' | (x2 != 'B' & x2 != 'C'))) ## some obs with weight 2 data$weights[data$x1 == 'b'] <- 2 ## data2 := {data + weights}, encoded in "data2" (-> "ok" for coef(), not for SE) data2 <- rbind(subset(data, weights > 0), subset(data, weights == 2)) ## using these parameters we're essentially forcing lmrob() to ## fit a classic model --> easier to compare to lm() ctrl <- lmrob.control(psi="optimal", tuning.chi = 20, bb = 0.0003846154, tuning.psi=20, method="SM", cov=".vcov.w") ## SM = MM == the case where .vcov.avar1 was also defined for ## Classical models start with 'cm', robust just with 'rm' (or just 'm'), ## "." - models drop 'x5' (which is aliased / extraneous by construction) : (cm0 <- lm (y ~ x1*x2 + x3 + x4 + x5 + offset(os), data)) (cm0.<- lm (y ~ x1*x2 + x3 + x4 + offset(os), data)) (cm1 <- lm (y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, weights=weights)) (cm1.<- lm (y ~ x1*x2 + x3 + x4 + offset(os), data, weights=weights)) (cm2 <- lm (y ~ x1*x2 + x3 + x4 + x5, data2, offset=os)) (cm2.<- lm (y ~ x1*x2 + x3 + x4, data2, offset=os)) (rm0 <- lmrob(y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, control=ctrl)) (rm0.<- lmrob(y ~ x1*x2 + x3 + x4 + offset(os), data, control=ctrl)) set.seed(2) (rm1 <- lmrob(y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, weights=weights, control=ctrl)) set.seed(2) (rm1.<- lmrob(y ~ x1*x2 + x3 + x4 + offset(os), data, weights=weights, control=ctrl)) set.seed(2) (rm2 <- lmrob(y ~ x1*x2 + x3 + x4 + x5, data2, offset=os, control=ctrl)) set.seed(2) (rm2.<- lmrob(y ~ x1*x2 + x3 + x4, data2, offset=os, control=ctrl)) sc0 <- summary(cm0) sc1 <- summary(cm1) sc2 <- summary(cm2) (sr0 <- summary(rm0)) (sr1 <- summary(rm1)) (sr2 <- summary(rm2)) ## test Estimates, Std. Errors, ... nc <- names(coef(cm1)) nc. <- setdiff(nc, "x5") # those who are "valid" stopifnot(exprs = { all.equal(coef(cm0.),coef(cm0)[nc.]) all.equal(coef(cm1.),coef(cm1)[nc.]) all.equal(coef(cm2.),coef(cm2)[nc.]) all.equal(coef(cm1), coef(cm2)) all.equal(coef(rm1), coef(rm2)) all.equal(coef(sc0), coef(sr0)) all.equal(coef(sc1), coef(sr1)) all.equal(coef(sc2), coef(sr2)) }) ## test class "lm" methods that do not depend on weights meths1 <- c("family", "formula", "labels", "model.matrix", "na.action", "terms") for (meth in meths1) stopifnot(all.equal(do.call(meth, list(rm0)), do.call(meth, list(rm1)))) ## class "lm" methods that depend on weights ## FIXME: meths2 <- c(#"AIC", "alias", #"BIC", "case.names", "coef", "confint", #"cooks.distance", #"deviance", "df.residual", #"dfbeta", #"dfbetas", #"drop1", "dummy.coef", #"effects", #"extractAIC", #"hatvalues", #"influence", "kappa", #"logLik", #"model.frame", ## disable because of zero.weights attribute "nobs", "predict", #"proj", #"rstandard", #"rstudent", #"simulate", ##"summary", ## see above "variable.names", ##"vcov", ## see below "weights") op <- options(warn = 1)# print immediately for (meth in meths2) { cat(meth,":") .SW. <- if(meth == "weights") suppressWarnings else identity # for suppressing ## No weights defined for this object. Use type="robustness" .... stopifnot(all.equal(do.call(meth, list(cm1)), do.call(meth, list(rm1))), all.equal(do.call(meth, list(cm2)), .SW.(do.call(meth, list(rm2))))) cat("\n") } options(op)# reverting ## further tests: anova(rm1, update(rm1, ~ . - x4 - x5)) anova(rm2, update(rm2, ~ . - x4 - x5)) stopifnot(all.equal(fitted(cm0), fitted(rm0)), all.equal(fitted(cm1), fitted(rm1)), all.equal(fitted(cm2), fitted(rm2))) nd <- expand.grid(x1=letters[1:3], x2=LETTERS[1:4]) set.seed(3) nd$x3 <- rnorm(nrow(nd)) nd$x4 <- rnorm(nrow(nd)) nd$x5 <- rnorm(nrow(nd)) # (*not* the sum x3+x4 !) nd$os <- nrow(nd):1 wts <- runif(nrow(nd)) stopifnot(exprs = { all.equal(predict(cm0, nd, interval="prediction"), predict(rm0, nd, interval="prediction")) all.equal(predict(cm1, nd, interval="prediction"), predict(rm1, nd, interval="prediction")) all.equal(predict(cm2, nd, interval="prediction"), predict(rm2, nd, interval="prediction")) all.equal(predict(cm0, nd, interval="prediction", weights=wts), predict(rm0, nd, interval="prediction", weights=wts)) all.equal(predict(cm1, nd, interval="prediction", weights=wts), predict(rm1, nd, interval="prediction", weights=wts)) all.equal(predict(cm2, nd, interval="prediction", weights=wts), predict(rm2, nd, interval="prediction", weights=wts), tolerance=1e-7) }) ## Padding can lead to differing values here ## so test only full rank part qrEQ <- function(m1, m2) { q1 <- qr(m1) q2 <- qr(m2) r <- 1:q1$rank stopifnot(q1$rank == q2$rank, all.equal(q1$pivot, q2$pivot), all.equal(q1$qraux[r],q2$qraux[r]), all.equal(q1$qr[r,r], q2$qr[r,r])) } qrEQ(cm0, rm0) qrEQ(cm1, rm1) qrEQ(cm2, rm2) stopifnot(all.equal(residuals(cm0), residuals(rm0)), all.equal(residuals(cm1), residuals(rm1)), all.equal(residuals(cm2), residuals(rm2)), all.equal(resid(cm0, type="pearson"), resid(rm0, type="pearson")), all.equal(resid(cm1, type="pearson"), resid(rm1, type="pearson")), all.equal(resid(cm2, type="pearson"), resid(rm2, type="pearson"))) ## R 3.5.0: vcov(*, complete=TRUE) new default ==> same NA's as coef() if(interactive()) withAutoprint({ op <- options(width = 130, digits = 2) # --> vcov() rows fit on 1 line vcov(cm0) # 'x5' is NA vcov(cm2) # 'x5', 'x1c:2B', 'x1c:2C' rows & columns are NA options(op) }) (no.C <- is.na(match("complete", names(formals(stats:::vcov.lm))))) ## temporary _FIXME_ vcovC <- if(no.C) function(M, ...) vcov(M, complete=FALSE, ...) else vcov # (complete=TRUE) stopifnot(all.equal(vcov(cm0), vcovC(rm0), check.attributes=FALSE), all.equal(vcov(cm1), vcovC(rm1), check.attributes=FALSE), all.equal(vcov(cm2), vcovC(rm2), check.attributes=FALSE)) ## "clean": cln <- function(vc) structure(vc, weights=NULL, eigen=NULL) ## .vcov.avar1() is not recommended here, but also should work with singular / NA coef case: ok0 <- !is.na(coef(rm0)) vr0.NA<- vcov(rm0, cov=".vcov.avar1", complete=NA) # "almost singular" warning vr0.T <- vcov(rm0, cov=".vcov.avar1", complete=TRUE) vr0.F <- vcov(rm0, cov=".vcov.avar1", complete=FALSE) stopifnot(identical(dim(vr0.NA), dim(vr0.T)), identical(dim(vr0.F), dim(vr0.T) - 1L), dim(vr0.F) == 14, all.equal(cln(vr0.F), vr0.T[ok0,ok0], tol = 1e-15)) if(!no.C) { vc0.T <- vcov(cm0, complete=TRUE) vc0.F <- vcov(cm0, complete=FALSE) } ok1 <- !is.na(coef(rm1)) ## cannot work because init/fit residuals are not of full length tools::assertError(vr1.NA<- vcov(rm1, cov=".vcov.avar1", complete=NA)) tools::assertError(vr1.T <- vcov(rm1, cov=".vcov.avar1", complete=TRUE )) tools::assertError(vr1.F <- vcov(rm1, cov=".vcov.avar1", complete=FALSE)) ## instead, must refit rm1. <- update(rm1, control = within(ctrl, cov <- ".vcov.avar1")) vr1.NA<- vcov(rm1., complete=NA) vr1.T <- vcov(rm1., complete=TRUE) vr1.F <- vcov(rm1., complete=FALSE) stopifnot(identical(vr1.F, vr1.NA), # in this case identical(dim(vr1.F), dim(vr1.T) - 3L), dim(vr1.F) == 12, isSymmetric(vr1.T), identical(rownames(vr1.F), rownames(vr1.T)[ok1]), all.equal(cln(vr1.F), vr1.T[ok1,ok1], tol=1e-15)) if(FALSE) ## ERROR "exact singular" (probably *NOT* to fix, as TRUE/FALSE do work !) vr2.NA<- vcov(rm2, cov=".vcov.avar1", complete=NA) # "almost singular" warning vr2.T <- vcov(rm2, cov=".vcov.avar1", complete=TRUE) vr2.F <- vcov(rm2, cov=".vcov.avar1", complete=FALSE) stopifnot(TRUE, # identical(dim(vr2.NA), dim(vr2.T)), identical(dim(vr2.F), dim(vr2.T) - 3L), dim(vr2.F) == 12, identical(rownames(vr2.F), rownames(vr1.F)), identical(rownames(vr2.T), rownames(vr1.T)), all.equal(cln(vr2.F), vr2.T[ok1,ok1], tol=1e-15)) ## Hmm, the supposedly heteroscedastic-robust estimates *are* very different: all.equal(vcov(cm0), vcovC(rm0, cov = ".vcov.avar1"), check.attributes=FALSE) # rel.diff. 0.5367564 if(FALSE) # does not make sense all.equal(vcov(cm1), vcovC(rm1, cov = ".vcov.avar1"), check.attributes=FALSE) all.equal(vcov(cm2), vcovC(rm2, cov = ".vcov.avar1"), check.attributes=FALSE) # rel.diff. 0.5757642 ## Null fits (rank(X)==0) are tested in NAcoef.R ## testing weight=0 bug lmrob(y ~ x3, data, weights=weights) robustbase/tests/wgt-himed-xtra.R0000644000176200001440000001034714055507253016564 0ustar liggesusers library(robustbase) ## testing functions: source(system.file("xtraR/ex-funs.R", package = "robustbase")) x <- c(0.26, 0.161, 1.33, -0.925, 0.199, -1.476, 0.489) iw <- c(5, 4, 4, 1, 5, 1, 5) stopifnot(0.26 == (himR <- weighted.median(rep(x,iw))), himR == wgt.himedian(x, iw), ## (once gave infinite loop) himR == wgt.himedian(x, as.integer(iw))) ## same result, but *different wweigted.median() debug output! ##-- even when having EXACT data (& exact differences!) all.equal(Qn(c(2:1,4:3)), 1.1376128) ###--- another inifinite loop {solved}: (z4 <- round(qnorm(ppoints(4)), 2)) # -1.05 -0.30 0.30 1.05 ## both the same (also wweigted.median debug output) (all.equal(weighted.median(z4, 4:1), print(wgt.himedian (z4, 4:1))))# 3.97e-8 (all.equal(weighted.median(z4, c(4,2,3,17)), print(wgt.himedian (z4, c(4,2,3,17)))))# 4.54e-8 Sn (z4)## = 0.8533053 Sn (z4, const = 1)# = 0.75 ##-- now Qn (z4)# --> gave (another) infinite loop ##--> now "works" after (float) rounding of differences! ##--- DIFFERENT whimed() output! stopifnot(all.equal(Qn(z4, const = 1), print(Qn0R(z4)))) set.seed(1) n <- length(x <- round(2048*runif(16))) n*(n-1)/2 # 120 xDiff <- local({y <- sort(x); m <- outer(y,y,"-"); m[lower.tri(m)] }) xDsrt <- unique(sort(xDiff)) stopifnot(exprs = { ## so all pairwise differences differ : length(xDsrt) == n*(n-1)/2 # 120 Qn0R(x, k=1:120) == xDsrt ## *all* the "quantiles" = order stats do differ indeed Qn(x, constant=1) == 374 }) ## Bugs in Qn(x, k = ) sapply(1:120, function(k) Qn(x, 1, k=k)) ## ends with 0 0 0 ... for the last ones if(FALSE) ## e.g. Qn(x, k=1:10) # enters infinite loop after k[4] = 5 ## yet another problem: Sn0R(c(1.1, -0.93, -0.11, -0.74))# 0.82 Sn (c(1.1, -0.93, -0.11, -0.74))# 0.9329471 ## gave segmentation fault at Sat Mar 16 23:54:30 2002 ## not anymore but 0.9329471 ### Check validity of basic algorithm few times set.seed(471) for(sim in 1:100) { # had '500' cat(".") x <- rnorm(rpois(1, lam=80))# not too large the *n0R() use time! ##--> Sn0R() "fails" for odd n stopifnot(all.equal(Sn(x, const = 1), Sn0R(x)), all.equal(Qn(x, const = 1), Qn0R(x), tolerance = 7e-8)) x <- round(x,2) stopifnot(all.equal(Sn(x, const = 1), Sn0R(x)), all.equal(Qn(x, const = 1), Qn0R(x), tolerance = 7e-8)) if(sim %% 50 == 0) cat(sim, "\n") } ###---- Last series of problems: when n^2 > max.integer: ## Large x with 1% outliers N <- 1e5 n.o <- round(0.01 * N) nSim <- 24## interesting nSim <- 4 ## for package testing estim.lst <- c("mad", "Sn", "Qn") Res <- array(NA, dim = c(nSim, length(estim.lst), 1 + 2), dimnames= list(NULL,estim.lst, c("Tx","cpu1", "cpu3"))) set.seed(101) for(i in 1:nSim) { x <- sample(c(rnorm(N), 10*min(1, abs(rt(1, df=2))) + rnorm(n.o))) cat(i) for(S in estim.lst) { cpu <- system.time(Tx <- get(S)(x))[1:3] Res[i, S,] <- c(Tx, cpu[c(1,3)]) } cat(" ") }; cat("\n") options(digits = 5) (Tx <- Res[,, "Tx"]) stopifnot(abs(range(Tx - 1)) < 0.03) q()##============================================================================ ### -- Rest: rather for demo -- keep here for reference apply(Res, c(2,3), mean) ## Variation: robust or not: 1000* apply(Tx, 2, sd)#-> Qn < Sn < mad 1000* apply(Tx, 2, Qn)#-> Qn > Sn > mad if(dev.interactive(orNone=TRUE)) { boxplot(Tx, main = sprintf("n=%d x N(0,1) + %d (1%%) outliers to the right", N,n.o)) abline(h = 1, lty = 3, lwd = 2, col = "gray") } if(interactive()) { ## i.e. not when package testing .. N <- 500 set.seed(101) str(iw <- 1L+ as.integer(rpois(N, 1))); str(w <- as.double(iw)) cr <- ci <- numeric(50) for(nn in seq_along(ci)) { x <- round(rnorm(N),1) cat(".") cr[nn] <- system.time(for(i in 1:1000) rr <- wgt.himedian(x, w))[1] ci[nn] <- system.time(for(i in 1:1000) ri <- wgt.himedian(x, iw))[1] stopifnot(rr == ri) };cat("\n") ## Or rather (as correctly) a "paired" comparsion: boxplot(cr - ci, notch=TRUE) ## rather t.test( cr, ci, paired = TRUE) ##-> P-value of 0.0219 t.test(log(cr), log(ci), paired = TRUE) ##-> P-value of 0.0088 wilcox.test(cr, ci, paired = TRUE) ##-> P-value of 2.23e-5 (!!) } robustbase/tests/MCD-specials.Rout.save0000644000176200001440000003002414432640641017604 0ustar liggesusers R version 4.3.0 Patched (2023-05-17 r84446) -- "Already Tomorrow" Copyright (C) 2023 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > #### Test special cases for covMcd() > > library(robustbase) > > ### 1) p = 1 ---------------------------------------------------- > set.seed(1) > x <- c(rnorm(50),100, 1e10) > (r1 <- covMcd(x)) Minimum Covariance Determinant (MCD) estimator approximation. Method: Univariate Fast MCD(alpha=0.5 ==> h=27); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = x) Log(Det.): -2.13 Robust Estimate of Location: x 0.1922 Robust Estimate of Covariance: x x 0.5978 > str(r1) List of 15 $ call : language covMcd(x = x) $ nsamp : num 500 $ method : chr "Univariate Fast MCD(alpha=0.5 ==> h=27); nsamp = 500; (n,k)mini = (300,5)" $ cov : num [1, 1] 0.598 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "x" .. ..$ : chr "x" $ center : Named num 0.192 ..- attr(*, "names")= chr "x" $ n.obs : int 52 $ alpha : num 0.5 $ quan : num 27 $ raw.cov : num [1, 1] 0.839 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "x" .. ..$ : chr "x" $ raw.center: Named num 0.325 ..- attr(*, "names")= chr "x" $ crit : num -2.13 $ mcd.wt : num [1:52] 1 1 1 1 1 1 1 1 1 1 ... $ X : num [1:52, 1] -0.626 0.184 -0.836 1.595 0.33 ... ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:52] "1" "2" "3" "4" ... .. ..$ : NULL $ raw.cnp2 : num [1:2] 6.45 1.14 $ cnp2 : num [1:2] 1.17 1.01 - attr(*, "class")= chr "mcd" > summary(r1) Minimum Covariance Determinant (MCD) estimator approximation. Method: Univariate Fast MCD(alpha=0.5 ==> h=27); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = x) Log(Det.): -2.13 Robust Estimate of Location: x 0.1922 Robust Estimate of Covariance: x x 0.5978 Eigenvalues: [1] 0.5978 Robustness weights: 4 observations c(14,24,51,52) are outliers with |weight| = 0 ( < 0.0019); 48 weights are ~= 1. > ## with alpha = 1 > (r1.1 <- covMcd(x, alpha = 1)) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=52) alpha = 1: The minimum covariance determinant estimates based on 52 observations are equal to the classical estimates. Call: covMcd(x = x, alpha = 1) Log(Det.): 42.1 Robust Estimate of Location: x 2.059 Robust Estimate of Covariance: x x 223.9 > str(r1.1) List of 15 $ call : language covMcd(x = x, alpha = 1) $ nsamp : num 500 $ method : chr "MCD(alpha=1 ==> h=52) \nalpha = 1: The minimum covariance determinant estimates based on 52 observations \nare "| __truncated__ $ cov : num [1, 1] 224 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "x" .. ..$ : chr "x" $ center : Named num 2.06 ..- attr(*, "names")= chr "x" $ n.obs : int 52 $ alpha : num 1 $ quan : num 52 $ raw.cov : num [1, 1] 1.92e+18 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "x" .. ..$ : chr "x" $ raw.center: Named num 1.92e+08 ..- attr(*, "names")= chr "x" $ crit : num 42.1 $ mcd.wt : num [1:52] 1 1 1 1 1 1 1 1 1 1 ... $ X : num [1:52, 1] -0.626 0.184 -0.836 1.595 0.33 ... ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:52] "1" "2" "3" "4" ... .. ..$ : NULL $ raw.cnp2 : num [1:2] 1 1 $ cnp2 : num [1:2] 1.14 1 - attr(*, "class")= chr "mcd" > summary(r1.1) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=52) alpha = 1: The minimum covariance determinant estimates based on 52 observations are equal to the classical estimates. Call: covMcd(x = x, alpha = 1) Log(Det.): 42.1 Robust Estimate of Location: x 2.059 Robust Estimate of Covariance: x x 223.9 Eigenvalues: [1] 223.9 Robustness weights: 2 observations c(51,52) are outliers with |weight| = 0 ( < 0.0019); 50 weights are ~= 1. > > ### 1b) p = 1, constant scale > (rc <- covMcd(rep(1,12))) Minimum Covariance Determinant (MCD) estimator approximation. Method: Univariate Fast MCD(alpha=0.5 ==> h=7); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = rep(1, 12)) Initial scale 0 because more than 'h' (=7) observations are identical. Log(Det.): -Inf Robust Estimate of Location: rep(1, 12) 1 Robust Estimate of Covariance: rep(1, 12) rep(1, 12) 0 Warning message: In covMcd(rep(1, 12)) : Initial scale 0 because more than 'h' (=7) observations are identical. > str(rc) List of 16 $ call : language covMcd(x = rep(1, 12)) $ nsamp : num 500 $ method : chr "Univariate Fast MCD(alpha=0.5 ==> h=7); nsamp = 500; (n,k)mini = (300,5)" $ singularity:List of 2 ..$ kind: chr "identicalObs" ..$ q : num 7 $ cov : num [1, 1] 0 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "rep(1, 12)" .. ..$ : chr "rep(1, 12)" $ raw.cov : num [1, 1] 0 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "rep(1, 12)" .. ..$ : chr "rep(1, 12)" $ center : Named num 1 ..- attr(*, "names")= chr "rep(1, 12)" $ raw.center : Named num 1 ..- attr(*, "names")= chr "rep(1, 12)" $ n.obs : int 12 $ alpha : num 0.5 $ quan : num 7 $ crit : num -Inf $ mcd.wt : num [1:12] 1 1 1 1 1 1 1 1 1 1 ... $ X : num [1:12, 1] 1 1 1 1 1 1 1 1 1 1 ... ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:12] "1" "2" "3" "4" ... .. ..$ : NULL $ raw.cnp2 : num [1:2] 4.97 1.41 $ cnp2 : num [1:2] 1 1 - attr(*, "class")= chr "mcd" > summary(rc) Minimum Covariance Determinant (MCD) estimator approximation. Method: Univariate Fast MCD(alpha=0.5 ==> h=7); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = rep(1, 12)) Initial scale 0 because more than 'h' (=7) observations are identical. Log(Det.): -Inf Robust Estimate of Location: rep(1, 12) 1 Robust Estimate of Covariance: rep(1, 12) rep(1, 12) 0 Eigenvalues: [1] 0 Robustness weights: All 12 weights are ~= 1. > ## with alpha = 1 > (rc1 <- covMcd(rep(1,12), alpha = 1)) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=12) alpha = 1: The minimum covariance determinant estimates based on 12 observations are equal to the classical estimates. Call: covMcd(x = rep(1, 12), alpha = 1) The classical covariance matrix is singular. Log(Det.): -Inf Robust Estimate of Location: rep(1, 12) 1 Robust Estimate of Covariance: rep(1, 12) rep(1, 12) 0 > str(rc1) List of 16 $ call : language covMcd(x = rep(1, 12), alpha = 1) $ nsamp : num 500 $ method : chr "MCD(alpha=1 ==> h=12) \nalpha = 1: The minimum covariance determinant estimates based on 12 observations \nare "| __truncated__ $ cov : num [1, 1] 0 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "rep(1, 12)" .. ..$ : chr "rep(1, 12)" $ center : Named num 1 ..- attr(*, "names")= chr "rep(1, 12)" $ n.obs : int 12 $ singularity:List of 1 ..$ kind: chr "classical" $ alpha : num 1 $ quan : num 12 $ raw.cov : num [1, 1] 0 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "rep(1, 12)" .. ..$ : chr "rep(1, 12)" $ raw.center : Named num 1 ..- attr(*, "names")= chr "rep(1, 12)" $ crit : num -Inf $ mcd.wt : num [1:12] 1 1 1 1 1 1 1 1 1 1 ... $ X : num [1:12, 1] 1 1 1 1 1 1 1 1 1 1 ... ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:12] "1" "2" "3" "4" ... .. ..$ : NULL $ raw.cnp2 : num [1:2] 1 1 $ cnp2 : num [1:2] 1 1 - attr(*, "class")= chr "mcd" > summary(rc1) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=12) alpha = 1: The minimum covariance determinant estimates based on 12 observations are equal to the classical estimates. Call: covMcd(x = rep(1, 12), alpha = 1) The classical covariance matrix is singular. Log(Det.): -Inf Robust Estimate of Location: rep(1, 12) 1 Robust Estimate of Covariance: rep(1, 12) rep(1, 12) 0 Eigenvalues: [1] 0 Robustness weights: All 12 weights are ~= 1. > > ### 2) constant observations { multivariate scale == 0 } ----------- > (X <- matrix(rep(2*(1:4), 12), nrow = 12, byrow = TRUE)) [,1] [,2] [,3] [,4] [1,] 2 4 6 8 [2,] 2 4 6 8 [3,] 2 4 6 8 [4,] 2 4 6 8 [5,] 2 4 6 8 [6,] 2 4 6 8 [7,] 2 4 6 8 [8,] 2 4 6 8 [9,] 2 4 6 8 [10,] 2 4 6 8 [11,] 2 4 6 8 [12,] 2 4 6 8 > (rC <- covMcd(X)) Minimum Covariance Determinant (MCD) estimator approximation. Method: Fast MCD(alpha=0.5 ==> h=8); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = X) The covariance matrix of the data is singular. There are 12 observations (in the entire dataset of 12 obs.) lying on the hyperplane with equation a_1*(x_i1 - m_1) + ... + a_p*(x_ip - m_p) = 0 with (m_1, ..., m_p) the mean of these observations and coefficients a_i from the vector a <- c(1, 0, 0, 0) Log(Det.): -Inf Robust Estimate of Location: [1] 2 4 6 8 Robust Estimate of Covariance: [,1] [,2] [,3] [,4] [1,] 0 0 0 0 [2,] 0 0 0 0 [3,] 0 0 0 0 [4,] 0 0 0 0 Warning message: In covMcd(X) : The covariance matrix of the data is singular. There are 12 observations (in the entire dataset of 12 obs.) lying on the hyperplane with equation a_1*(x_i1 - m_1) + ... + a_p*(x_ip - m_p) = 0 with (m_1, ..., m_p) the mean of these observations and coefficients a_i from the vector a <- c(1, 0, 0, 0) > summary(rC) Minimum Covariance Determinant (MCD) estimator approximation. Method: Fast MCD(alpha=0.5 ==> h=8); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = X) The covariance matrix of the data is singular. There are 12 observations (in the entire dataset of 12 obs.) lying on the hyperplane with equation a_1*(x_i1 - m_1) + ... + a_p*(x_ip - m_p) = 0 with (m_1, ..., m_p) the mean of these observations and coefficients a_i from the vector a <- c(1, 0, 0, 0) Log(Det.): -Inf Robust Estimate of Location: [1] 2 4 6 8 Robust Estimate of Covariance: [,1] [,2] [,3] [,4] [1,] 0 0 0 0 [2,] 0 0 0 0 [3,] 0 0 0 0 [4,] 0 0 0 0 Eigenvalues: [1] 0 0 0 0 Robustness weights: All 12 weights are ~= 1. > (rC1 <- covMcd(X, alpha = 1)) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=12) alpha = 1: The minimum covariance determinant estimates based on 12 observations are equal to the classical estimates. Call: covMcd(x = X, alpha = 1) The classical covariance matrix is singular. Log(Det.): -Inf Robust Estimate of Location: [1] 2 4 6 8 Robust Estimate of Covariance: [,1] [,2] [,3] [,4] [1,] 0 0 0 0 [2,] 0 0 0 0 [3,] 0 0 0 0 [4,] 0 0 0 0 > summary(rC1) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=12) alpha = 1: The minimum covariance determinant estimates based on 12 observations are equal to the classical estimates. Call: covMcd(x = X, alpha = 1) The classical covariance matrix is singular. Log(Det.): -Inf Robust Estimate of Location: [1] 2 4 6 8 Robust Estimate of Covariance: [,1] [,2] [,3] [,4] [1,] 0 0 0 0 [2,] 0 0 0 0 [3,] 0 0 0 0 [4,] 0 0 0 0 Eigenvalues: [1] 0 0 0 0 Robustness weights: All 12 weights are ~= 1. > > ### 3) alpha = 1 : classical estimates --- for general cases -------- > > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 0.179 0.043 0.231 0.003 0.004 > > proc.time() user system elapsed 0.182 0.047 0.231 robustbase/tests/binom-ni-small.Rout.save0000644000176200001440000002756212553432042020227 0ustar liggesusers R Under development (unstable) (2015-07-18 r68693) -- "Unsuffered Consequences" Copyright (C) 2015 The R Foundation for Statistical Computing Platform: x86_64-unknown-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(robustbase) > > ### Binomial example with *small* ni > > N <- 51 > set.seed(123) > table(ni <- rpois(N, lam=4))# has 4 '1's, (no '0') 1 2 3 4 5 6 7 8 10 4 8 10 9 7 4 5 3 1 > n0 <- ni; n0[print(which(ni == 1)[1:2])] <- 0 # has two '0's [1] 6 18 > x <- seq(0,1, length=N) > pr.x <- plogis(5*(x - 1/2)) > k <- rbinom(N, size = ni, prob = pr.x) > k0 <- rbinom(N, size = n0, prob = pr.x) > cbind(k,ni, k0,n0) k ni k0 n0 [1,] 0 3 0 3 [2,] 1 6 2 6 [3,] 0 3 0 3 [4,] 1 6 2 6 [5,] 0 7 2 7 [6,] 0 1 0 0 [7,] 1 4 1 4 [8,] 2 7 1 7 [9,] 0 4 0 4 [10,] 1 4 2 4 [11,] 0 8 1 8 [12,] 0 4 0 4 [13,] 0 5 3 5 [14,] 2 4 1 4 [15,] 0 2 0 2 [16,] 3 7 2 7 [17,] 2 3 2 3 [18,] 1 1 0 0 [19,] 1 3 1 3 [20,] 4 8 2 8 [21,] 3 7 3 7 [22,] 3 5 1 5 [23,] 0 5 2 5 [24,] 4 10 3 10 [25,] 1 5 2 5 [26,] 2 5 5 5 [27,] 2 4 3 4 [28,] 3 4 4 4 [29,] 3 3 3 3 [30,] 2 2 1 2 [31,] 4 8 5 8 [32,] 5 7 3 7 [33,] 3 5 3 5 [34,] 6 6 3 6 [35,] 1 1 1 1 [36,] 1 4 3 4 [37,] 3 5 3 5 [38,] 1 2 1 2 [39,] 3 3 1 3 [40,] 2 2 2 2 [41,] 2 2 2 2 [42,] 3 3 3 3 [43,] 2 3 3 3 [44,] 3 3 3 3 [45,] 2 2 1 2 [46,] 1 2 2 2 [47,] 2 2 2 2 [48,] 4 4 4 4 [49,] 3 3 3 3 [50,] 5 6 5 6 [51,] 1 1 1 1 > g1 <- glm(cbind(k , ni-k ) ~ x, family = binomial) > coef(summary(g1))[,1:2] Estimate Std. Error (Intercept) -2.515884 0.3784211 x 5.123650 0.7344629 > g0 <- glm(cbind(k0, n0-k0) ~ x, family = binomial)# works too > g0. <- glm(cbind(k0, n0-k0) ~ x, family = binomial, subset = n0 > 0) > ## all.equal(g0, g0.) > stopifnot(all.equal(print(coef(summary(g0))), coef(summary(g0.)))) Estimate Std. Error z value Pr(>|z|) (Intercept) -1.913157 0.3346560 -5.716786 1.085574e-08 x 4.061024 0.6512647 6.235596 4.500620e-10 > > > rg1 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial) > rg1. <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, + acc = 1e-10) # default is just 1e-4 > > stopifnot(all.equal(unname(coef(rg1.)), c(-2.37585864, 4.902389143), tolerance=1e-9), + all.equal(coef(rg1), coef(rg1.), tolerance=1e-4), + all.equal(vcov(rg1.), vcov(rg1), tolerance = 1e-4)) > rg1$iter [1] 5 > which(rg1.$w.r != 1) ## 7 of them : [1] 11 18 23 29 34 36 46 > str(rg1.["family" != names(rg1.)]) List of 27 $ coefficients : Named num [1:2] -2.38 4.9 ..- attr(*, "names")= chr [1:2] "(Intercept)" "x" $ residuals : Named num [1:51] -0.528 0.622 -0.582 0.435 -0.981 ... ..- attr(*, "names")= chr [1:51] "1" "2" "3" "4" ... $ fitted.values : Named num [1:51] 0.085 0.093 0.102 0.111 0.121 ... ..- attr(*, "names")= chr [1:51] "1" "2" "3" "4" ... $ w.r : num [1:51] 1 1 1 1 1 1 1 1 1 1 ... $ w.x : num [1:51] 1 1 1 1 1 1 1 1 1 1 ... $ ni : num [1:51] 3 6 3 6 7 1 4 7 4 4 ... $ dispersion : num 1 $ cov : num [1:2, 1:2] 0.144 -0.253 -0.253 0.55 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:2] "(Intercept)" "x" .. ..$ : chr [1:2] "(Intercept)" "x" $ matM : num [1:2, 1:2] 0.625 0.287 0.287 0.163 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:2] "(Intercept)" "x" .. ..$ : chr [1:2] "(Intercept)" "x" $ matQ : num [1:2, 1:2] 0.551 0.252 0.252 0.143 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:2] "(Intercept)" "x" .. ..$ : chr [1:2] "(Intercept)" "x" $ tcc : num 1.34 $ linear.predictors: Named num [1:51] -2.38 -2.28 -2.18 -2.08 -1.98 ... ..- attr(*, "names")= chr [1:51] "1" "2" "3" "4" ... $ deviance : NULL $ iter : int 11 $ y : Named num [1:51] 0 0.167 0 0.167 0 ... ..- attr(*, "names")= chr [1:51] "1" "2" "3" "4" ... $ converged : logi TRUE $ model :'data.frame': 51 obs. of 2 variables: ..$ cbind(k, ni - k): int [1:51, 1:2] 0 1 0 1 0 0 1 2 0 1 ... .. ..- attr(*, "dimnames")=List of 2 .. .. ..$ : NULL .. .. ..$ : chr [1:2] "k" "" ..$ x : num [1:51] 0 0.02 0.04 0.06 0.08 0.1 0.12 0.14 0.16 0.18 ... ..- attr(*, "terms")=Classes 'terms', 'formula' language cbind(k, ni - k) ~ x .. .. ..- attr(*, "variables")= language list(cbind(k, ni - k), x) .. .. ..- attr(*, "factors")= int [1:2, 1] 0 1 .. .. .. ..- attr(*, "dimnames")=List of 2 .. .. .. .. ..$ : chr [1:2] "cbind(k, ni - k)" "x" .. .. .. .. ..$ : chr "x" .. .. ..- attr(*, "term.labels")= chr "x" .. .. ..- attr(*, "order")= int 1 .. .. ..- attr(*, "intercept")= int 1 .. .. ..- attr(*, "response")= int 1 .. .. ..- attr(*, ".Environment")= .. .. ..- attr(*, "predvars")= language list(cbind(k, ni - k), x) .. .. ..- attr(*, "dataClasses")= Named chr [1:2] "nmatrix.2" "numeric" .. .. .. ..- attr(*, "names")= chr [1:2] "cbind(k, ni - k)" "x" $ call : language glmrob(formula = cbind(k, ni - k) ~ x, family = binomial, acc = 1e-10) $ formula :Class 'formula' language cbind(k, ni - k) ~ x .. ..- attr(*, ".Environment")= $ terms :Classes 'terms', 'formula' language cbind(k, ni - k) ~ x .. ..- attr(*, "variables")= language list(cbind(k, ni - k), x) .. ..- attr(*, "factors")= int [1:2, 1] 0 1 .. .. ..- attr(*, "dimnames")=List of 2 .. .. .. ..$ : chr [1:2] "cbind(k, ni - k)" "x" .. .. .. ..$ : chr "x" .. ..- attr(*, "term.labels")= chr "x" .. ..- attr(*, "order")= int 1 .. ..- attr(*, "intercept")= int 1 .. ..- attr(*, "response")= int 1 .. ..- attr(*, ".Environment")= .. ..- attr(*, "predvars")= language list(cbind(k, ni - k), x) .. ..- attr(*, "dataClasses")= Named chr [1:2] "nmatrix.2" "numeric" .. .. ..- attr(*, "names")= chr [1:2] "cbind(k, ni - k)" "x" $ data : $ offset : NULL $ control :List of 4 ..$ acc : num 1e-10 ..$ test.acc: chr "coef" ..$ maxit : num 50 ..$ tcc : num 1.34 $ method : chr "Mqle" $ prior.weights : num [1:51] 1 1 1 1 1 1 1 1 1 1 ... $ contrasts : NULL $ xlevels : Named list() > > rg2 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, + acc = 1e-10, tcc = 3) # large cutoff: almost classical > vcov(rg2) # << already close to limit (Intercept) x (Intercept) 0.1430407 -0.2501886 x -0.2501886 0.5388665 > rg10 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = 10) > rgL <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = 100) > > no.comp <- - match(c("call", "data", "family", "control", "tcc"), names(rg10)) > stopifnot(all.equal(rg10[no.comp], rgL[no.comp], tolerance= 1e-14)) > > vcov(rgL) # is now the same as the following: (Intercept) x (Intercept) 0.1432102 -0.2504843 x -0.2504843 0.5394659 > if(FALSE) { ## tcc=Inf fails: non-convergence / singular matrix from GOTO/Atlas3 + rgI <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = Inf) + ## tcc = Inf still *FAILS* (!) + stopifnot(all.equal(rgL[no.comp], rgI[no.comp], tolerance= 0)) + ## and is quite close to the classic one: + (all.equal(vcov(rgI), vcov(g1))) + } > > rg0 <- glmrob(cbind(k0, n0-k0) ~ x, family = binomial) Warning message: In glmrobMqle(X = X, y = Y, weights = weights, start = start, offset = offset, : fitted probabilities numerically 0 or 1 occurred > ## --> warning.. > rg0. <- glmrob(cbind(k0, n0-k0) ~ x, family = binomial, subset = n0 > 0) > > coef(summary(rg0)) # not yet good (cf. 'g0' above!) -- but the one of rg0. is Estimate Std. Error z value Pr(>|z|) (Intercept) -1.852918 NaN NaN NaN x 3.847520 NaN NaN NaN > stopifnot(all.equal(coef(rg0), coef(rg0.))) > > > ### Example where all ni >= 3 -- works better, now also correct as.var. !! > ### ----------------- ======= > > min(n3 <- ni + 2)# = 3 [1] 3 > k3 <- rbinom(N, size = n3, prob = pr.x) > g3 <- glm(cbind(k3 , n3-k3) ~ x, family = binomial) > (cfg <- coef(summary(g3))[,1:2]) Estimate Std. Error (Intercept) -2.945565 0.3420351 x 5.546417 0.6259260 > stopifnot(all.equal(sqrt(diag(vcov(g3))), cfg[,2])) > > rg3 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial) > (s3 <- summary(rg3)) Call: glmrob(formula = cbind(k3, n3 - k3) ~ x, family = binomial) Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -2.8901 0.3499 -8.260 <2e-16 *** x 5.5039 0.6447 8.537 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robustness weights w.r * w.x: 48 weights are ~= 1. The remaining 3 ones are 42 44 51 0.5127 0.7846 0.7388 Number of observations: 51 Fitted by method 'Mqle' (in 5 iterations) (Dispersion parameter for binomial family taken to be 1) No deviance values available Algorithmic parameters: acc tcc 0.0001 1.3450 maxit 50 test.acc "coef" > summary(rg3$w.r) Min. 1st Qu. Median Mean 3rd Qu. Max. 0.5127 1.0000 1.0000 0.9811 1.0000 1.0000 > rg3.5 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 5) > (s3.5 <- summary(rg3.5)) Call: glmrob(formula = cbind(k3, n3 - k3) ~ x, family = binomial, tcc = 5) Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -2.9454 0.3420 -8.611 <2e-16 *** x 5.5461 0.6259 8.861 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robustness weights w.r * w.x: All 51 weights are ~= 1. Number of observations: 51 Fitted by method 'Mqle' (in 1 iterations) (Dispersion parameter for binomial family taken to be 1) No deviance values available Algorithmic parameters: acc 1e-04 maxit tcc 50 5 test.acc "coef" > summary(rg3.5$w.r)# all 1 Min. 1st Qu. Median Mean 3rd Qu. Max. 1 1 1 1 1 1 > stopifnot(all.equal(coef(s3)[,1:2], coef(s3.5)[,1:2], tolerance = 0.02)) > > rg3.15 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 15, acc=1e-10) > (s3.15 <- summary(rg3.15)) Call: glmrob(formula = cbind(k3, n3 - k3) ~ x, family = binomial, tcc = 15, acc = 1e-10) Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -2.9456 0.3420 -8.612 <2e-16 *** x 5.5464 0.6259 8.861 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robustness weights w.r * w.x: All 51 weights are ~= 1. Number of observations: 51 Fitted by method 'Mqle' (in 1 iterations) (Dispersion parameter for binomial family taken to be 1) No deviance values available Algorithmic parameters: acc 1e-10 maxit tcc 50 15 test.acc "coef" > > stopifnot(all.equal(coef(s3.15)[,1:2], cfg, tolerance = 1e-5),# 2e-6 + all.equal(cfg[,"Estimate"], rg3.15$coeff, tolerance= 1e-8) # 6.05e-10 + ) > ##rg3.15$eff # == 1 > > ## doesn't change any more: > rg3.1000 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 1000, + acc=1e-10) > stopifnot(all.equal(rg3.1000[no.comp], + rg3.15 [no.comp], tol = 1e-13)) > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 0.389 0.082 0.561 0 0.008 > > proc.time() user system elapsed 0.389 0.090 0.561 robustbase/tests/binom-ni-small.R0000644000176200001440000000644312271657124016544 0ustar liggesuserslibrary(robustbase) ### Binomial example with *small* ni N <- 51 set.seed(123) table(ni <- rpois(N, lam=4))# has 4 '1's, (no '0') n0 <- ni; n0[print(which(ni == 1)[1:2])] <- 0 # has two '0's x <- seq(0,1, length=N) pr.x <- plogis(5*(x - 1/2)) k <- rbinom(N, size = ni, prob = pr.x) k0 <- rbinom(N, size = n0, prob = pr.x) cbind(k,ni, k0,n0) g1 <- glm(cbind(k , ni-k ) ~ x, family = binomial) coef(summary(g1))[,1:2] g0 <- glm(cbind(k0, n0-k0) ~ x, family = binomial)# works too g0. <- glm(cbind(k0, n0-k0) ~ x, family = binomial, subset = n0 > 0) ## all.equal(g0, g0.) stopifnot(all.equal(print(coef(summary(g0))), coef(summary(g0.)))) rg1 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial) rg1. <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, acc = 1e-10) # default is just 1e-4 stopifnot(all.equal(unname(coef(rg1.)), c(-2.37585864, 4.902389143), tolerance=1e-9), all.equal(coef(rg1), coef(rg1.), tolerance=1e-4), all.equal(vcov(rg1.), vcov(rg1), tolerance = 1e-4)) rg1$iter which(rg1.$w.r != 1) ## 7 of them : str(rg1.["family" != names(rg1.)]) rg2 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, acc = 1e-10, tcc = 3) # large cutoff: almost classical vcov(rg2) # << already close to limit rg10 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = 10) rgL <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = 100) no.comp <- - match(c("call", "data", "family", "control", "tcc"), names(rg10)) stopifnot(all.equal(rg10[no.comp], rgL[no.comp], tolerance= 1e-14)) vcov(rgL) # is now the same as the following: if(FALSE) { ## tcc=Inf fails: non-convergence / singular matrix from GOTO/Atlas3 rgI <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = Inf) ## tcc = Inf still *FAILS* (!) stopifnot(all.equal(rgL[no.comp], rgI[no.comp], tolerance= 0)) ## and is quite close to the classic one: (all.equal(vcov(rgI), vcov(g1))) } rg0 <- glmrob(cbind(k0, n0-k0) ~ x, family = binomial) ## --> warning.. rg0. <- glmrob(cbind(k0, n0-k0) ~ x, family = binomial, subset = n0 > 0) coef(summary(rg0)) # not yet good (cf. 'g0' above!) -- but the one of rg0. is stopifnot(all.equal(coef(rg0), coef(rg0.))) ### Example where all ni >= 3 -- works better, now also correct as.var. !! ### ----------------- ======= min(n3 <- ni + 2)# = 3 k3 <- rbinom(N, size = n3, prob = pr.x) g3 <- glm(cbind(k3 , n3-k3) ~ x, family = binomial) (cfg <- coef(summary(g3))[,1:2]) stopifnot(all.equal(sqrt(diag(vcov(g3))), cfg[,2])) rg3 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial) (s3 <- summary(rg3)) summary(rg3$w.r) rg3.5 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 5) (s3.5 <- summary(rg3.5)) summary(rg3.5$w.r)# all 1 stopifnot(all.equal(coef(s3)[,1:2], coef(s3.5)[,1:2], tolerance = 0.02)) rg3.15 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 15, acc=1e-10) (s3.15 <- summary(rg3.15)) stopifnot(all.equal(coef(s3.15)[,1:2], cfg, tolerance = 1e-5),# 2e-6 all.equal(cfg[,"Estimate"], rg3.15$coeff, tolerance= 1e-8) # 6.05e-10 ) ##rg3.15$eff # == 1 ## doesn't change any more: rg3.1000 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 1000, acc=1e-10) stopifnot(all.equal(rg3.1000[no.comp], rg3.15 [no.comp], tol = 1e-13)) cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/NAcoef.R0000644000176200001440000001400114236516416015047 0ustar liggesusers## test handing of NA coefficients / singular fits ## also check: ## -- what would have to be done if class "lm" was added. ## -- general compatibility to class lm. require(robustbase) options(digits = 5)# -> higher chance of platform independence ## generate simple example data (almost as in ./weights.R ) data <- expand.grid(x1=letters[1:3], x2=LETTERS[1:3], rep=1:3) set.seed(1) data$y <- rnorm(nrow(data)) ## drop all combinations of one interaction: data <- subset(data, x1 != 'c' | (x2 != 'B' & x2 != 'C')) ## add collinear variables data$x3 <- rnorm(nrow(data)) data$x4 <- rnorm(nrow(data)) data$x5 <- data$x3 + data$x4 ## add some NA terms data$y[1] <- NA data$x4[2:3] <- NA ## to test anova ## Classical models start with 'cm', robust just with 'rm' (or just 'm'): cm0 <- lm (y ~ x1*x2 + x3, data) cm1 <- lm (y ~ x1*x2 + x3 + x4 + x5, data) set.seed(2) rm1 <- lmrob(y ~ x1*x2 + x3 + x4 + x5, data) m3 <- lmrob(y ~ x1*x2 + x3 + x4, data) # same column space as rm1 rm0 <- lmrob(y ~ x1*x2 + x3, data) ## clean version of rm1 (to check predict) data2 <- data.frame(y=data$y[-(1:3)], rm1$x[,!is.na(rm1$coef)]) set.seed(2) rm1c <- lmrob(y ~ x1b + x1c + x2B + x2C + x3 + x4 + x1b:x2B + x1b:x2C, data2) ## add class lm to rm1 (for now) class(rm1) <- c(class(rm1), "lm") class(rm0) <- c(class(rm0), "lm") ## the full matrix (data) should be returned by model matrix (frame) stopifnot(all.equal(model.matrix(cm1), model.matrix(rm1)), all.equal(model.frame (cm1), model.frame (rm1))) ## qr decomposition should be for the full data and pivots identical lm result qr.cm1 <- qr(cm1)$qr qr.rm1 <- rm1$qr$qr stopifnot(NCOL(qr.rm1) == NCOL(qr.cm1), NROW(qr.rm1) == NROW(qr.cm1), length(rm1$qr$qraux) == length(qr(cm1)$qraux), all.equal(rm1$qr$pivot, qr(cm1)$pivot), all.equal(dimnames(qr.rm1),dimnames(qr.cm1))) ## the alias function should return the same result stopifnot(all.equal(alias(cm1), alias(rm1))) #### ## these helper functions should print NAs for the dropped coefficients print(rm1) summary(rm1) -> s1 confint(rm1) -> ci1 stopifnot(identical(is.na(coef(cm1)), apply(ci1, 1L, anyNA)), identical(sigma(rm1), s1$ sigma), identical(vcov(rm1, complete=FALSE), s1$ cov ), TRUE) print(s1, showAlgo=FALSE) ci1 ## drop1 should return df = 0 #drop1(rm1) ## drop.lm does not return valid results (yet)! #### ## methods that should just drop the NA coefficients ## m3 is actually the same as rm1, so anova should raise an error tools::assertError(anova(rm1, m3, test="Wald")) tools::assertError(anova(rm1, m3, test="Deviance")) ## but comparing rm1 and rm0 should be ok anova(rm1, rm0, test="Wald") anova(rm1, rm0, test="Deviance") ## commands with single #: ## they do (or might) not return sensible results for robust fits ## and need to be checked again #cooks.distance(rm1) #deviance(rm1) #dfbeta(rm1) #dfbetas(rm1) #effects(rm1) ## fails #extractAIC(rm1) #influence(rm1) stopifnot(all.equal(hv1 <- hatvalues(rm1), .lmrob.hat(wqr=rm1$qr), tol=1e-15), all.equal(hv1, stats:::hatvalues.lm(rm1), tol=1e-15), all.equal(hat(cm1$qr), unname(hatvalues(cm1)), tol=1e-15), all.equal(unname(hv1), hat(rm1$qr), tol=1e-15), ## ditto : all.equal(hv1c <- hatvalues(rm1c), stats:::hatvalues.lm(rm1c), tol=1e-15)) ## kappa() & labels() : stopifnot(is.infinite(kr1 <- kappa(rm1)), kr1 == kappa(cm1), # = +Inf both identical(labels(rm1), labels(cm1))) logLik(rm1)# well, and what does it mean? ## plot(rm1, which=1) ## plot.lmrob() fails "singular covariance" .. FIXME! par(mfrow=c(2,2)) plot(rm1, which=2:4) stopifnot(all.equal(predict(rm1), predict(rm1c), tol=1e-15), all.equal(predict(rm1, se.fit=TRUE, interval="confidence"), predict(rm1c, se.fit=TRUE, interval="confidence"), tol=4e-15)) # seen 1.3e-15 (ATLAS) predict(rm1, type="terms", se.fit=TRUE, interval="confidence") #proj(rm1) ## fails "FIXME" residuals(rm1) #rstandard(rm1) #rstudent(rm1) #simulate(rm1) ## just $weights needs to be changed to prior weights V1 <- vcov(rm1, complete=FALSE) ## but don't show the "eigen" part {vectors may flip sign}: attributes(V1) <- attributes(V1)[c("dim","dimnames", "weights")]; V1 set.seed(12); sc <- simulate(cm1, 64) set.seed(12); rc <- simulate(rm1, 64) stopifnot(all.equal(sqrt(diag(V1)), coef(summary(rm1))[,"Std. Error"], tol=1e-15), all.equal(sc, rc, tolerance = 0.08),# dimension *and* approx. values (no NA) identical(variable.names(rm1), variable.names(cm1)), all.equal(residuals(rm1), residuals(cm1), tolerance = 0.05),# incl. names all.equal(rstudent (rm1), rstudent (cm1), tolerance = 0.06), identical(dimnames(rm1), dimnames(cm1)), all.equal(dummy.coef(rm1), dummy.coef(cm1), tolerance= .5)) ## check mostly structure ## other helper functions stopifnot(identical(case.names(rm1), case.names(cm1)), all.equal(family(rm1), family(cm1)),# identical() upto environment identical(formula(rm1), formula(cm1)), nobs(rm1) == nobs(cm1)) #add1(rm0, ~ . + x3 + x4 + x5) ## does not return valid results (yet)! ## test other initial estimators lmrob(y ~ x1*x2 + x3 + x4 + x5, data, init="M-S") lmrob(y ~ x1*x2 + x3 + x4 + x5, data, init=lmrob.lar) ## test all zero design matrix data <- data.frame(y=1:10,x1=0,x2=0,os=2,w=c(0.5, 1)) (m5 <- lmrob(y ~ 1+x1+x2+offset(os), data, weights=w)) (sm5 <- summary(m5)) (m6 <- lmrob(y ~ 0+x1+x2+offset(os), data, weights=w)) (sm6 <- summary(m6)) sc5 <- summary(cm5 <- lm(y ~ 1+x1+x2+offset(os), data, weights=w)) sc6 <- summary(cm6 <- lm(y ~ 0+x1+x2+offset(os), data, weights=w)) if(getRversion() <= "3.5.1" && as.numeric(R.version$`svn rev`) < 74993) ## in the past, lm() returned logical empty matrix storage.mode(sc6$coefficients) <- "double" stopifnot(all.equal(coef(m5), coef(cm5), tolerance = 0.01), all.equal(coef(m6), coef(cm6), tolerance = 1e-14), all.equal(coef(sm5), coef(sc5), tolerance = 0.05), all.equal(coef(sm6), coef(sc6), tolerance = 1e-14), identical(sm5$df, sc5$df), identical(sm6$df, sc6$df)) robustbase/tests/NAcoef.Rout.save0000644000176200001440000004662314437610457016556 0ustar liggesusers R version 4.3.1 beta (2023-06-05 r84504) -- "Beagle Scouts" Copyright (C) 2023 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ## test handing of NA coefficients / singular fits > ## also check: > ## -- what would have to be done if class "lm" was added. > ## -- general compatibility to class lm. > require(robustbase) Loading required package: robustbase > options(digits = 5)# -> higher chance of platform independence > > ## generate simple example data (almost as in ./weights.R ) > data <- expand.grid(x1=letters[1:3], x2=LETTERS[1:3], rep=1:3) > set.seed(1) > data$y <- rnorm(nrow(data)) > ## drop all combinations of one interaction: > data <- subset(data, x1 != 'c' | (x2 != 'B' & x2 != 'C')) > ## add collinear variables > data$x3 <- rnorm(nrow(data)) > data$x4 <- rnorm(nrow(data)) > data$x5 <- data$x3 + data$x4 > ## add some NA terms > data$y[1] <- NA > data$x4[2:3] <- NA ## to test anova > > ## Classical models start with 'cm', robust just with 'rm' (or just 'm'): > cm0 <- lm (y ~ x1*x2 + x3, data) > cm1 <- lm (y ~ x1*x2 + x3 + x4 + x5, data) > set.seed(2) > rm1 <- lmrob(y ~ x1*x2 + x3 + x4 + x5, data) > m3 <- lmrob(y ~ x1*x2 + x3 + x4, data) # same column space as rm1 > rm0 <- lmrob(y ~ x1*x2 + x3, data) > > ## clean version of rm1 (to check predict) > data2 <- data.frame(y=data$y[-(1:3)], rm1$x[,!is.na(rm1$coef)]) > set.seed(2) > rm1c <- lmrob(y ~ x1b + x1c + x2B + x2C + x3 + x4 + x1b:x2B + x1b:x2C, data2) > > ## add class lm to rm1 (for now) > class(rm1) <- c(class(rm1), "lm") > class(rm0) <- c(class(rm0), "lm") > > ## the full matrix (data) should be returned by model matrix (frame) > stopifnot(all.equal(model.matrix(cm1), model.matrix(rm1)), + all.equal(model.frame (cm1), model.frame (rm1))) > ## qr decomposition should be for the full data and pivots identical lm result > qr.cm1 <- qr(cm1)$qr > qr.rm1 <- rm1$qr$qr > stopifnot(NCOL(qr.rm1) == NCOL(qr.cm1), + NROW(qr.rm1) == NROW(qr.cm1), + length(rm1$qr$qraux) == length(qr(cm1)$qraux), + all.equal(rm1$qr$pivot, qr(cm1)$pivot), + all.equal(dimnames(qr.rm1),dimnames(qr.cm1))) > ## the alias function should return the same result > stopifnot(all.equal(alias(cm1), alias(rm1))) > > #### > ## these helper functions should print NAs for the dropped coefficients > print(rm1) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data) \--> method = "MM" Coefficients: (Intercept) x1b x1c x2B x2C x3 0.4381 0.5968 0.0344 0.2012 0.1789 -0.1320 x4 x5 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.2155 NA -1.8763 NA -0.8651 NA > summary(rm1) -> s1 > confint(rm1) -> ci1 > stopifnot(identical(is.na(coef(cm1)), apply(ci1, 1L, anyNA)), + identical(sigma(rm1), s1$ sigma), + identical(vcov(rm1, complete=FALSE), s1$ cov ), + TRUE) > > print(s1, showAlgo=FALSE) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -1.4584 -0.3556 0.0246 0.3651 1.0296 Coefficients: (3 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) 0.4381 0.5443 0.80 0.44 x1b 0.5968 0.6423 0.93 0.38 x1c 0.0344 0.6880 0.05 0.96 x2B 0.2012 0.7164 0.28 0.79 x2C 0.1789 0.6871 0.26 0.80 x3 -0.1320 0.4155 -0.32 0.76 x4 -0.2155 0.1694 -1.27 0.24 x5 NA NA NA NA x1b:x2B -1.8763 1.2153 -1.54 0.16 x1c:x2B NA NA NA NA x1b:x2C -0.8651 0.7466 -1.16 0.28 x1c:x2C NA NA NA NA Robust residual standard error: 0.927 (3 observations deleted due to missingness) Multiple R-squared: 0.338, Adjusted R-squared: -0.251 Convergence in 15 IRWLS iterations Robustness weights: 2 weights are ~= 1. The remaining 16 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.787 0.937 0.985 0.952 0.988 0.994 > ci1 2.5 % 97.5 % (Intercept) -0.79333 1.66946 x1b -0.85607 2.04973 x1c -1.52188 1.59076 x2B -1.41948 1.82189 x2C -1.37549 1.73320 x3 -1.07182 0.80783 x4 -0.59863 0.16756 x5 NA NA x1b:x2B -4.62539 0.87283 x1c:x2B NA NA x1b:x2C -2.55391 0.82381 x1c:x2C NA NA > ## drop1 should return df = 0 > #drop1(rm1) ## drop.lm does not return valid results (yet)! > > #### > ## methods that should just drop the NA coefficients > ## m3 is actually the same as rm1, so anova should raise an error > tools::assertError(anova(rm1, m3, test="Wald")) > tools::assertError(anova(rm1, m3, test="Deviance")) > ## but comparing rm1 and rm0 should be ok > anova(rm1, rm0, test="Wald") Robust Wald Test Table Model 1: y ~ x1 * x2 + x3 + x4 + x5 Model 2: y ~ x1 * x2 + x3 Largest model fitted by lmrob(), i.e. SM pseudoDf Test.Stat Df Pr(>chisq) 1 6 2 10 1.62 1 0.2 > anova(rm1, rm0, test="Deviance") Robust Deviance Table Model 1: y ~ x1 * x2 + x3 + x4 + x5 Model 2: y ~ x1 * x2 + x3 Largest model fitted by lmrob(), i.e. SM pseudoDf Test.Stat Df Pr(>chisq) 1 6 2 10 1.4 1 0.24 > ## commands with single #: > ## they do (or might) not return sensible results for robust fits > ## and need to be checked again > #cooks.distance(rm1) > #deviance(rm1) > #dfbeta(rm1) > #dfbetas(rm1) > #effects(rm1) ## fails > #extractAIC(rm1) > #influence(rm1) > stopifnot(all.equal(hv1 <- hatvalues(rm1), .lmrob.hat(wqr=rm1$qr), tol=1e-15), + all.equal(hv1, stats:::hatvalues.lm(rm1), tol=1e-15), + all.equal(hat(cm1$qr), unname(hatvalues(cm1)), tol=1e-15), + all.equal(unname(hv1), hat(rm1$qr), tol=1e-15), + ## ditto : + all.equal(hv1c <- hatvalues(rm1c), stats:::hatvalues.lm(rm1c), tol=1e-15)) > > ## kappa() & labels() : > stopifnot(is.infinite(kr1 <- kappa(rm1)), kr1 == kappa(cm1), # = +Inf both + identical(labels(rm1), labels(cm1))) > logLik(rm1)# well, and what does it mean? 'log Lik.' -17.67 (df=10) > ## plot(rm1, which=1) ## plot.lmrob() fails "singular covariance" .. FIXME! > par(mfrow=c(2,2)) > plot(rm1, which=2:4) > stopifnot(all.equal(predict(rm1), predict(rm1c), tol=1e-15), + all.equal(predict(rm1, se.fit=TRUE, interval="confidence"), + predict(rm1c, se.fit=TRUE, interval="confidence"), tol=4e-15)) # seen 1.3e-15 (ATLAS) > predict(rm1, type="terms", se.fit=TRUE, interval="confidence") $fit x1 x2 x3 x4 x5 x1:x2 4 -0.26908 0.074520 -0.166290 0.17233795 0 0.45689 5 0.32774 0.074520 0.026620 -0.03309916 0 -1.41939 7 -0.26908 0.052168 -0.038119 0.28384254 0 0.45689 8 0.32774 0.052168 0.020155 -0.26844357 0 -0.40816 10 -0.26908 -0.126688 0.194821 -0.38642275 0 0.45689 11 0.32774 -0.126688 0.067831 0.11957373 0 0.45689 12 -0.23465 -0.126688 0.065098 0.26547275 0 0.45689 13 -0.26908 0.074520 0.020882 -0.08237063 0 0.45689 14 0.32774 0.074520 -0.132148 0.06953345 0 -1.41939 16 -0.26908 0.052168 -0.087685 -0.47721028 0 0.45689 17 0.32774 0.052168 0.034769 0.04888197 0 -0.40816 19 -0.26908 -0.126688 0.046496 -0.10823918 0 0.45689 20 0.32774 -0.126688 -0.078945 0.03438888 0 0.45689 21 -0.23465 -0.126688 -0.060426 0.20062634 0 0.45689 22 -0.26908 0.074520 0.103967 -0.00026715 0 0.45689 23 0.32774 0.074520 0.106440 0.42945756 0 -1.41939 25 -0.26908 0.052168 -0.035072 -0.27545520 0 0.45689 26 0.32774 0.052168 -0.088392 0.00739277 0 -0.40816 attr(,"constant") [1] 0.32347 $se.fit x1 x2 x3 x4 x5 x1:x2 4 0.35192 0.42010 0.523390 0.13540939 0 0.29013 5 0.29582 0.42010 0.083786 0.02600668 0 0.95012 7 0.35192 0.40345 0.119979 0.22302078 0 0.29013 8 0.29582 0.40345 0.063436 0.21092151 0 0.53827 10 0.35192 0.40191 0.613190 0.30362011 0 0.29013 11 0.29582 0.40191 0.213494 0.09395148 0 0.29013 12 0.40411 0.40191 0.204892 0.20858727 0 0.29013 13 0.35192 0.42010 0.065724 0.06472026 0 0.29013 14 0.29582 0.42010 0.415930 0.05463383 0 0.95012 16 0.35192 0.40345 0.275984 0.37495370 0 0.29013 17 0.29582 0.40345 0.109434 0.03840755 0 0.53827 19 0.35192 0.40191 0.146343 0.08504570 0 0.29013 20 0.29582 0.40191 0.248476 0.02702003 0 0.29013 21 0.40411 0.40191 0.190187 0.15763614 0 0.29013 22 0.35192 0.42010 0.327230 0.00020991 0 0.29013 23 0.29582 0.42010 0.335015 0.33743343 0 0.95012 25 0.35192 0.40345 0.110386 0.21643068 0 0.29013 26 0.29582 0.40345 0.278210 0.00580864 0 0.53827 $lwr x1 x2 x3 x4 x5 x1:x2 4 -1.06517 -0.87582 -1.35028 -0.1339794 0 -0.19943 5 -0.34144 -0.87582 -0.16292 -0.0919303 0 -3.56872 7 -1.06517 -0.86049 -0.30953 -0.2206655 0 -0.19943 8 -0.34144 -0.86049 -0.12335 -0.7455812 0 -1.62581 10 -1.06517 -1.03588 -1.19231 -1.0732591 0 -0.19943 11 -0.34144 -1.03588 -0.41513 -0.0929593 0 -0.19943 12 -1.14880 -1.03588 -0.39840 -0.2063844 0 -0.19943 13 -1.06517 -0.87582 -0.12780 -0.2287780 0 -0.19943 14 -0.34144 -0.87582 -1.07305 -0.0540569 0 -3.56872 16 -1.06517 -0.86049 -0.71200 -1.3254145 0 -0.19943 17 -0.34144 -0.86049 -0.21279 -0.0380019 0 -1.62581 19 -1.06517 -1.03588 -0.28455 -0.3006259 0 -0.19943 20 -0.34144 -1.03588 -0.64104 -0.0267347 0 -0.19943 21 -1.14880 -1.03588 -0.49066 -0.1559714 0 -0.19943 22 -1.06517 -0.87582 -0.63628 -0.0007420 0 -0.19943 23 -0.34144 -0.87582 -0.65142 -0.3338699 0 -3.56872 25 -1.06517 -0.86049 -0.28478 -0.7650554 0 -0.19943 26 -0.34144 -0.86049 -0.71775 -0.0057473 0 -1.62581 attr(,"constant") [1] 0.32347 $upr x1 x2 x3 x4 x5 x1:x2 4 0.52701 1.02486 1.01770 0.47865527 0 1.11321 5 0.99693 1.02486 0.21616 0.02573203 0 0.72993 7 0.52701 0.96483 0.23329 0.78835059 0 1.11321 8 0.99693 0.96483 0.16366 0.20869402 0 0.80949 10 0.52701 0.78250 1.58195 0.30041366 0 1.11321 11 0.99693 0.78250 0.55079 0.33210673 0 1.11321 12 0.67950 0.78250 0.52860 0.73732993 0 1.11321 13 0.52701 1.02486 0.16956 0.06403677 0 1.11321 14 0.99693 1.02486 0.80875 0.19312376 0 0.72993 16 0.52701 0.96483 0.53663 0.37099391 0 1.11321 17 0.99693 0.96483 0.28233 0.13576588 0 0.80949 19 0.52701 0.78250 0.37755 0.08414755 0 1.11321 20 0.99693 0.78250 0.48315 0.09551244 0 1.11321 21 0.67950 0.78250 0.36981 0.55722407 0 1.11321 22 0.52701 1.02486 0.84421 0.00020769 0 1.11321 23 0.99693 1.02486 0.86430 1.19278501 0 0.72993 25 0.52701 0.96483 0.21464 0.21414502 0 1.11321 26 0.99693 0.96483 0.54096 0.02053283 0 0.80949 attr(,"constant") [1] 0.32347 $df [1] 9 $residual.scale [1] 0.92726 > #proj(rm1) ## fails "FIXME" > residuals(rm1) 4 5 7 8 10 11 12 13 1.003436 1.029645 -0.321738 0.691394 -0.498376 0.342960 -0.359752 -1.145548 14 16 17 19 20 21 22 23 -1.458427 -0.043483 -0.395061 0.498376 -0.342960 0.359752 0.092640 0.232325 25 26 0.366908 -0.270349 > #rstandard(rm1) > #rstudent(rm1) > #simulate(rm1) ## just $weights needs to be changed to prior weights > V1 <- vcov(rm1, complete=FALSE) > ## but don't show the "eigen" part {vectors may flip sign}: > attributes(V1) <- attributes(V1)[c("dim","dimnames", "weights")]; V1 (Intercept) x1b x1c x2B x2C x3 (Intercept) 0.296312 -0.321429 -0.338842 -0.238010 -0.3289125 0.1357438 x1b -0.321429 0.412501 0.369763 0.253038 0.3616767 -0.1594475 x1c -0.338842 0.369763 0.473317 0.274811 0.3497592 -0.1464335 x2B -0.238010 0.253038 0.274811 0.513277 0.2342086 -0.0640599 x2C -0.328913 0.361677 0.349759 0.234209 0.4721185 -0.2294044 x3 0.135744 -0.159448 -0.146434 -0.064060 -0.2294044 0.1726038 x4 -0.035258 0.039598 0.060587 0.035359 0.0024864 0.0037187 x1b:x2B 0.305162 -0.399754 -0.273260 -0.557840 -0.5219539 0.3087350 x1b:x2C 0.321423 -0.414159 -0.349092 -0.233097 -0.4394078 0.2039253 x4 x1b:x2B x1b:x2C (Intercept) -0.0352579 0.305162 0.321423 x1b 0.0395980 -0.399754 -0.414159 x1c 0.0605871 -0.273260 -0.349092 x2B 0.0353593 -0.557840 -0.233097 x2C 0.0024864 -0.521954 -0.439408 x3 0.0037187 0.308735 0.203925 x4 0.0286797 0.063743 -0.012435 x1b:x2B 0.0637434 1.476860 0.498060 x1b:x2C -0.0124347 0.498060 0.557368 attr(,"weights") 4 5 7 8 10 11 12 13 14 16 0.89614 0.89081 0.98906 0.94998 0.97385 0.98757 0.98633 0.86577 0.78729 0.99980 17 19 20 21 22 23 25 26 0.98353 0.97385 0.98757 0.98633 0.99909 0.99429 0.98578 0.99227 > set.seed(12); sc <- simulate(cm1, 64) > set.seed(12); rc <- simulate(rm1, 64) > > stopifnot(all.equal(sqrt(diag(V1)), coef(summary(rm1))[,"Std. Error"], tol=1e-15), + all.equal(sc, rc, tolerance = 0.08),# dimension *and* approx. values (no NA) + identical(variable.names(rm1), variable.names(cm1)), + all.equal(residuals(rm1), residuals(cm1), tolerance = 0.05),# incl. names + all.equal(rstudent (rm1), rstudent (cm1), tolerance = 0.06), + identical(dimnames(rm1), dimnames(cm1)), + all.equal(dummy.coef(rm1), dummy.coef(cm1), tolerance= .5)) ## check mostly structure > > ## other helper functions > stopifnot(identical(case.names(rm1), case.names(cm1)), + all.equal(family(rm1), family(cm1)),# identical() upto environment + identical(formula(rm1), formula(cm1)), + nobs(rm1) == nobs(cm1)) > #add1(rm0, ~ . + x3 + x4 + x5) ## does not return valid results (yet)! > > > ## test other initial estimators > lmrob(y ~ x1*x2 + x3 + x4 + x5, data, init="M-S") Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data, init = "M-S") \--> method = "M-SM" Coefficients: (Intercept) x1b x1c x2B x2C x3 0.4358 0.5996 0.0346 0.2005 0.1877 -0.1395 x4 x5 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.2185 NA -1.8957 NA -0.8698 NA Warning message: In lmrob.M.S(x, y, control, mf = mf) : Skipping design matrix equilibration (DGEEQU): row 12 is exactly zero. > lmrob(y ~ x1*x2 + x3 + x4 + x5, data, init=lmrob.lar) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data, init = lmrob.lar) \--> method = "lM" Coefficients: (Intercept) x1b x1c x2B x2C x3 0.561131 0.444339 0.000184 0.530303 -0.251794 0.236541 x4 x5 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.082680 NA -1.298418 NA -0.597602 NA > > ## test all zero design matrix > data <- data.frame(y=1:10,x1=0,x2=0,os=2,w=c(0.5, 1)) > (m5 <- lmrob(y ~ 1+x1+x2+offset(os), data, weights=w)) Call: lmrob(formula = y ~ 1 + x1 + x2 + offset(os), data = data, weights = w) \--> method = "MM" Coefficients: (Intercept) x1 x2 3.64 NA NA > (sm5 <- summary(m5)) Call: lmrob(formula = y ~ 1 + x1 + x2 + offset(os), data = data, weights = w) \--> method = "MM" Weighted Residuals: Min 1Q Median 3Q Max -3.6412 -1.8110 -0.0473 2.0093 4.3588 Coefficients: (2 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) 3.64 1.03 3.53 0.0064 ** x1 NA NA NA NA x2 NA NA NA NA --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 3.24 Convergence in 8 IRWLS iterations Robustness weights: 1 2 3 4 5 6 7 8 9 10 0.909 0.889 0.970 0.977 0.998 0.999 0.992 0.952 0.952 0.842 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.55e+00 5.00e-01 4.69e+00 1.00e-07 rel.tol scale.tol solve.tol zero.tol 1.00e-07 1.00e-10 1.00e-07 1.00e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 1.00e-02 1.82e-12 5.00e-01 5.00e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > (m6 <- lmrob(y ~ 0+x1+x2+offset(os), data, weights=w)) Call: lmrob(formula = y ~ 0 + x1 + x2 + offset(os), data = data, weights = w) \--> method = "MM" Coefficients: x1 x2 NA NA > (sm6 <- summary(m6)) Call: lmrob(formula = y ~ 0 + x1 + x2 + offset(os), data = data, weights = w) \--> method = "MM" Weighted Residuals: Min 1Q Median 3Q Max -3.83 -1.37 1.09 3.54 6.00 Coefficients: (2 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) x1 NA NA NA NA x2 NA NA NA NA Robust residual standard error: NA Convergence in 0 IRWLS iterations Robustness weights: [1] NA NA NA NA NA NA NA NA NA NA Algorithmic parameters: tuning.psi rel.tol scale.tol solve.tol 4.69e+00 1.00e-07 1.00e-10 1.00e-07 zero.tol eps.outlier warn.limit.reject warn.limit.meanrw 1.00e-10 1.00e-02 5.00e-01 5.00e-01 max.it maxit.scale trace.lev compute.rd fast.s.large.n 50 200 0 0 2000 eps.x 0 psi cov compute.outlier.stats "bisquare" ".vcov.avar1" "SM" seed : int(0) > > sc5 <- summary(cm5 <- lm(y ~ 1+x1+x2+offset(os), data, weights=w)) > sc6 <- summary(cm6 <- lm(y ~ 0+x1+x2+offset(os), data, weights=w)) > > if(getRversion() <= "3.5.1" && as.numeric(R.version$`svn rev`) < 74993) + ## in the past, lm() returned logical empty matrix + storage.mode(sc6$coefficients) <- "double" > > stopifnot(all.equal(coef(m5), coef(cm5), tolerance = 0.01), + all.equal(coef(m6), coef(cm6), tolerance = 1e-14), + all.equal(coef(sm5), coef(sc5), tolerance = 0.05), + all.equal(coef(sm6), coef(sc6), tolerance = 1e-14), + identical(sm5$df, sc5$df), + identical(sm6$df, sc6$df)) > > proc.time() user system elapsed 0.730 0.133 0.901 robustbase/tests/weights.Rout.save0000644000176200001440000006101114437610457017061 0ustar liggesusers R version 4.3.0 Patched (2023-06-03 r84496) -- "Already Tomorrow" Copyright (C) 2023 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ## test handing of weights and offset argument > require(robustbase) Loading required package: robustbase > > ## generate simple example data (extension of the one in ./NAcoef.R ) > data <- expand.grid(x1=letters[1:3], x2=LETTERS[1:4], rep=1:3, + KEEP.OUT.ATTRS = FALSE) > ## generate offset column > data$os <- 1:nrow(data) > set.seed(1) > data$y <- data$os + rnorm(nrow(data)) > ## add collinear variables > data$x3 <- rnorm(nrow(data)) > data$x4 <- rnorm(nrow(data)) > data$x5 <- data$x3 + data$x4 ## lm() will have 'x5' "aliased" (and give coef = NA) > ## add some NA terms > data$y[1] <- NA > data$x4[2:3] <- NA ## to test anova > ## generate weights > ## some obs with weight 0 > data$weights <- as.numeric(with(data, x1 != 'c' | (x2 != 'B' & x2 != 'C'))) > ## some obs with weight 2 > data$weights[data$x1 == 'b'] <- 2 > ## data2 := {data + weights}, encoded in "data2" (-> "ok" for coef(), not for SE) > data2 <- rbind(subset(data, weights > 0), + subset(data, weights == 2)) > ## using these parameters we're essentially forcing lmrob() to > ## fit a classic model --> easier to compare to lm() > ctrl <- lmrob.control(psi="optimal", tuning.chi = 20, bb = 0.0003846154, + tuning.psi=20, method="SM", cov=".vcov.w") > ## SM = MM == the case where .vcov.avar1 was also defined for > > ## Classical models start with 'cm', robust just with 'rm' (or just 'm'), > ## "." - models drop 'x5' (which is aliased / extraneous by construction) : > (cm0 <- lm (y ~ x1*x2 + x3 + x4 + x5 + offset(os), data)) Call: lm(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data) Coefficients: (Intercept) x1b x1c x2B x2C x2D 0.01008 -1.14140 0.48156 0.01357 0.86985 0.15178 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.01655 -0.02388 NA 1.05416 -0.32889 0.69954 x1c:x2C x1b:x2D x1c:x2D -0.73949 1.08478 -1.31578 > (cm0.<- lm (y ~ x1*x2 + x3 + x4 + offset(os), data)) Call: lm(formula = y ~ x1 * x2 + x3 + x4 + offset(os), data = data) Coefficients: (Intercept) x1b x1c x2B x2C x2D 0.01008 -1.14140 0.48156 0.01357 0.86985 0.15178 x3 x4 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.01655 -0.02388 1.05416 -0.32889 0.69954 -0.73949 x1b:x2D x1c:x2D 1.08478 -1.31578 > (cm1 <- lm (y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, weights=weights)) Call: lm(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data, weights = weights) Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.021632 -0.079147 NA 1.040529 NA 0.736944 x1c:x2C x1b:x2D x1c:x2D NA 1.099090 -1.371953 > (cm1.<- lm (y ~ x1*x2 + x3 + x4 + offset(os), data, weights=weights)) Call: lm(formula = y ~ x1 * x2 + x3 + x4 + offset(os), data = data, weights = weights) Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.021632 -0.079147 1.040529 NA 0.736944 NA x1b:x2D x1c:x2D 1.099090 -1.371953 > (cm2 <- lm (y ~ x1*x2 + x3 + x4 + x5, data2, offset=os)) Call: lm(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data2, offset = os) Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.021632 -0.079147 NA 1.040529 NA 0.736944 x1c:x2C x1b:x2D x1c:x2D NA 1.099090 -1.371953 > (cm2.<- lm (y ~ x1*x2 + x3 + x4, data2, offset=os)) Call: lm(formula = y ~ x1 * x2 + x3 + x4, data = data2, offset = os) Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.021632 -0.079147 1.040529 NA 0.736944 NA x1b:x2D x1c:x2D 1.099090 -1.371953 > (rm0 <- lmrob(y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, control=ctrl)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data, control = ctrl) \--> method = "MM" Coefficients: (Intercept) x1b x1c x2B x2C x2D 0.01008 -1.14140 0.48156 0.01357 0.86985 0.15178 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.01655 -0.02388 NA 1.05416 -0.32889 0.69954 x1c:x2C x1b:x2D x1c:x2D -0.73949 1.08478 -1.31578 > (rm0.<- lmrob(y ~ x1*x2 + x3 + x4 + offset(os), data, control=ctrl)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + offset(os), data = data, control = ctrl) \--> method = "MM" Coefficients: (Intercept) x1b x1c x2B x2C x2D 0.01008 -1.14140 0.48156 0.01357 0.86985 0.15178 x3 x4 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.01655 -0.02388 1.05416 -0.32889 0.69954 -0.73949 x1b:x2D x1c:x2D 1.08478 -1.31578 > set.seed(2) > (rm1 <- lmrob(y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, weights=weights, control=ctrl)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data, weights = weights, control = ctrl) \--> method = "MM" Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.021632 -0.079147 NA 1.040529 NA 0.736944 x1c:x2C x1b:x2D x1c:x2D NA 1.099090 -1.371953 > set.seed(2) > (rm1.<- lmrob(y ~ x1*x2 + x3 + x4 + offset(os), data, weights=weights, control=ctrl)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + offset(os), data = data, weights = weights, control = ctrl) \--> method = "MM" Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.021632 -0.079147 1.040529 NA 0.736944 NA x1b:x2D x1c:x2D 1.099090 -1.371953 > set.seed(2) > (rm2 <- lmrob(y ~ x1*x2 + x3 + x4 + x5, data2, offset=os, control=ctrl)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data2, offset = os, control = ctrl) \--> method = "MM" Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.021632 -0.079147 NA 1.040529 NA 0.736944 x1c:x2C x1b:x2D x1c:x2D NA 1.099090 -1.371953 > set.seed(2) > (rm2.<- lmrob(y ~ x1*x2 + x3 + x4, data2, offset=os, control=ctrl)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4, data = data2, offset = os, control = ctrl) \--> method = "MM" Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.021632 -0.079147 1.040529 NA 0.736944 NA x1b:x2D x1c:x2D 1.099090 -1.371953 > > sc0 <- summary(cm0) > sc1 <- summary(cm1) > sc2 <- summary(cm2) > (sr0 <- summary(rm0)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data, control = ctrl) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -1.50524 -0.48219 0.01663 0.42714 1.59122 Coefficients: (1 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) 0.01008 0.76421 0.013 0.990 x1b -1.14140 1.02228 -1.117 0.278 x1c 0.48156 1.01891 0.473 0.642 x2B 0.01357 0.95276 0.014 0.989 x2C 0.86985 0.94762 0.918 0.370 x2D 0.15178 0.99480 0.153 0.880 x3 -0.01655 0.22284 -0.074 0.942 x4 -0.02388 0.25629 -0.093 0.927 x5 NA NA NA NA x1b:x2B 1.05416 1.30705 0.807 0.430 x1c:x2B -0.32889 1.30044 -0.253 0.803 x1b:x2C 0.69954 1.37279 0.510 0.616 x1c:x2C -0.73949 1.30141 -0.568 0.577 x1b:x2D 1.08478 1.32102 0.821 0.422 x1c:x2D -1.31578 1.33335 -0.987 0.336 Robust residual standard error: 1.007 (3 observations deleted due to missingness) Multiple R-squared: 0.9933, Adjusted R-squared: 0.9887 Convergence in 1 IRWLS iterations Robustness weights: All 33 weights are ~= 1. Algorithmic parameters: bb refine.tol rel.tol scale.tol 3.846e-04 1.000e-07 1.000e-07 1.000e-10 solve.tol zero.tol eps.outlier eps.x 1.000e-07 1.000e-10 3.030e-03 4.369e-12 warn.limit.reject warn.limit.meanrw 5.000e-01 5.000e-01 nResample tuning.chi tuning.psi max.it best.r.s 500 20 20 50 2 k.fast.s k.max maxit.scale trace.lev mts 1 200 200 0 1000 compute.rd fast.s.large.n 0 2000 psi subsampling cov "optimal" "nonsingular" ".vcov.w" compute.outlier.stats "SM" seed : int(0) > (sr1 <- summary(rm1)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data, weights = weights, control = ctrl) \--> method = "MM" Weighted Residuals: Min 1Q Median 3Q Max -2.0956 -0.5369 0.0000 0.3925 2.0381 Coefficients: (3 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) -0.002961 0.977109 -0.003 0.998 x1b -1.132857 1.133342 -1.000 0.333 x1c 0.492904 1.297399 0.380 0.709 x2B 0.017959 1.213927 0.015 0.988 x2C 0.858031 1.204169 0.713 0.487 x2D 0.208510 1.275792 0.163 0.872 x3 -0.021632 0.284226 -0.076 0.940 x4 -0.079147 0.324629 -0.244 0.811 x5 NA NA NA NA x1b:x2B 1.040529 1.443384 0.721 0.482 x1c:x2B NA NA NA NA x1b:x2C 0.736944 1.530596 0.481 0.637 x1c:x2C NA NA NA NA x1b:x2D 1.099090 1.461384 0.752 0.464 x1c:x2D -1.371953 1.698858 -0.808 0.432 Robust residual standard error: 1.281 (3 observations deleted due to missingness) Multiple R-squared: 0.9923, Adjusted R-squared: 0.9866 Convergence in 1 IRWLS iterations Robustness weights: All 27 weights are ~= 1. Algorithmic parameters: bb refine.tol rel.tol scale.tol 3.846e-04 1.000e-07 1.000e-07 1.000e-10 solve.tol zero.tol eps.outlier eps.x 1.000e-07 1.000e-10 3.704e-03 5.094e-12 warn.limit.reject warn.limit.meanrw 5.000e-01 5.000e-01 nResample tuning.chi tuning.psi max.it best.r.s 500 20 20 50 2 k.fast.s k.max maxit.scale trace.lev mts 1 200 200 0 1000 compute.rd fast.s.large.n 0 2000 psi subsampling cov "optimal" "nonsingular" ".vcov.w" compute.outlier.stats "SM" seed : int(0) > (sr2 <- summary(rm2)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data2, offset = os, control = ctrl) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -1.52261 -0.51773 0.06925 0.38640 1.61986 Coefficients: (3 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) -0.002961 0.742168 -0.004 0.997 x1b -1.132857 0.860835 -1.316 0.200 x1c 0.492904 0.985445 0.500 0.621 x2B 0.017959 0.922044 0.019 0.985 x2C 0.858031 0.914632 0.938 0.357 x2D 0.208510 0.969033 0.215 0.831 x3 -0.021632 0.215885 -0.100 0.921 x4 -0.079147 0.246574 -0.321 0.751 x5 NA NA NA NA x1b:x2B 1.040529 1.096329 0.949 0.351 x1c:x2B NA NA NA NA x1b:x2C 0.736944 1.162571 0.634 0.532 x1c:x2C NA NA NA NA x1b:x2D 1.099090 1.110001 0.990 0.331 x1c:x2D -1.371953 1.290375 -1.063 0.297 Robust residual standard error: 0.9728 (4 observations deleted due to missingness) Multiple R-squared: 0.9923, Adjusted R-squared: 0.989 Convergence in 1 IRWLS iterations Robustness weights: All 38 weights are ~= 1. Algorithmic parameters: bb refine.tol rel.tol scale.tol 3.846e-04 1.000e-07 1.000e-07 1.000e-10 solve.tol zero.tol eps.outlier eps.x 1.000e-07 1.000e-10 2.632e-03 4.369e-12 warn.limit.reject warn.limit.meanrw 5.000e-01 5.000e-01 nResample tuning.chi tuning.psi max.it best.r.s 500 20 20 50 2 k.fast.s k.max maxit.scale trace.lev mts 1 200 200 0 1000 compute.rd fast.s.large.n 0 2000 psi subsampling cov "optimal" "nonsingular" ".vcov.w" compute.outlier.stats "SM" seed : int(0) > > ## test Estimates, Std. Errors, ... > nc <- names(coef(cm1)) > nc. <- setdiff(nc, "x5") # those who are "valid" > stopifnot(exprs = { + all.equal(coef(cm0.),coef(cm0)[nc.]) + all.equal(coef(cm1.),coef(cm1)[nc.]) + all.equal(coef(cm2.),coef(cm2)[nc.]) + all.equal(coef(cm1), coef(cm2)) + all.equal(coef(rm1), coef(rm2)) + all.equal(coef(sc0), coef(sr0)) + all.equal(coef(sc1), coef(sr1)) + all.equal(coef(sc2), coef(sr2)) + }) > > ## test class "lm" methods that do not depend on weights > meths1 <- c("family", + "formula", + "labels", + "model.matrix", + "na.action", + "terms") > for (meth in meths1) + stopifnot(all.equal(do.call(meth, list(rm0)), + do.call(meth, list(rm1)))) > > ## class "lm" methods that depend on weights > ## FIXME: > meths2 <- c(#"AIC", + "alias", + #"BIC", + "case.names", + "coef", + "confint", + #"cooks.distance", + #"deviance", + "df.residual", + #"dfbeta", + #"dfbetas", + #"drop1", + "dummy.coef", + #"effects", + #"extractAIC", + #"hatvalues", + #"influence", + "kappa", + #"logLik", + #"model.frame", ## disable because of zero.weights attribute + "nobs", + "predict", + #"proj", + #"rstandard", + #"rstudent", + #"simulate", + ##"summary", ## see above + "variable.names", + ##"vcov", ## see below + "weights") > op <- options(warn = 1)# print immediately > for (meth in meths2) { + cat(meth,":") + .SW. <- if(meth == "weights") suppressWarnings else identity # for suppressing + ## No weights defined for this object. Use type="robustness" .... + stopifnot(all.equal(do.call(meth, list(cm1)), + do.call(meth, list(rm1))), + all.equal(do.call(meth, list(cm2)), + .SW.(do.call(meth, list(rm2))))) + cat("\n") + } alias : case.names : coef : confint : df.residual : dummy.coef : kappa : nobs : predict : variable.names : weights : > options(op)# reverting > > ## further tests: > anova(rm1, update(rm1, ~ . - x4 - x5)) Robust Wald Test Table Model 1: y ~ x1 * x2 + x3 + x4 + x5 + offset(os) Model 2: y ~ x1 + x2 + x3 + x1:x2 + offset(os) Largest model fitted by lmrob(), i.e. SM pseudoDf Test.Stat Df Pr(>chisq) 1 18 2 22 0.059442 1 0.8074 > anova(rm2, update(rm2, ~ . - x4 - x5)) Robust Wald Test Table Model 1: y ~ x1 * x2 + x3 + x4 + x5 Model 2: y ~ x1 + x2 + x3 + x1:x2 Largest model fitted by lmrob(), i.e. SM pseudoDf Test.Stat Df Pr(>chisq) 1 23 2 27 0.10303 1 0.7482 > > stopifnot(all.equal(fitted(cm0), fitted(rm0)), + all.equal(fitted(cm1), fitted(rm1)), + all.equal(fitted(cm2), fitted(rm2))) > > nd <- expand.grid(x1=letters[1:3], x2=LETTERS[1:4]) > set.seed(3) > nd$x3 <- rnorm(nrow(nd)) > nd$x4 <- rnorm(nrow(nd)) > nd$x5 <- rnorm(nrow(nd)) # (*not* the sum x3+x4 !) > nd$os <- nrow(nd):1 > wts <- runif(nrow(nd)) > stopifnot(exprs = { + all.equal(predict(cm0, nd, interval="prediction"), + predict(rm0, nd, interval="prediction")) + all.equal(predict(cm1, nd, interval="prediction"), + predict(rm1, nd, interval="prediction")) + all.equal(predict(cm2, nd, interval="prediction"), + predict(rm2, nd, interval="prediction")) + all.equal(predict(cm0, nd, interval="prediction", weights=wts), + predict(rm0, nd, interval="prediction", weights=wts)) + all.equal(predict(cm1, nd, interval="prediction", weights=wts), + predict(rm1, nd, interval="prediction", weights=wts)) + all.equal(predict(cm2, nd, interval="prediction", weights=wts), + predict(rm2, nd, interval="prediction", weights=wts), + tolerance=1e-7) + }) There were 14 warnings (use warnings() to see them) > > ## Padding can lead to differing values here > ## so test only full rank part > qrEQ <- function(m1, m2) { + q1 <- qr(m1) + q2 <- qr(m2) + r <- 1:q1$rank + stopifnot(q1$rank == q2$rank, + all.equal(q1$pivot, q2$pivot), + all.equal(q1$qraux[r],q2$qraux[r]), + all.equal(q1$qr[r,r], q2$qr[r,r])) + } > qrEQ(cm0, rm0) > qrEQ(cm1, rm1) > qrEQ(cm2, rm2) > > stopifnot(all.equal(residuals(cm0), residuals(rm0)), + all.equal(residuals(cm1), residuals(rm1)), + all.equal(residuals(cm2), residuals(rm2)), + all.equal(resid(cm0, type="pearson"), resid(rm0, type="pearson")), + all.equal(resid(cm1, type="pearson"), resid(rm1, type="pearson")), + all.equal(resid(cm2, type="pearson"), resid(rm2, type="pearson"))) > > ## R 3.5.0: vcov(*, complete=TRUE) new default ==> same NA's as coef() > if(interactive()) withAutoprint({ + op <- options(width = 130, digits = 2) # --> vcov() rows fit on 1 line + vcov(cm0) # 'x5' is NA + vcov(cm2) # 'x5', 'x1c:2B', 'x1c:2C' rows & columns are NA + options(op) + }) > > (no.C <- is.na(match("complete", names(formals(stats:::vcov.lm))))) ## temporary _FIXME_ [1] FALSE > vcovC <- if(no.C) function(M, ...) vcov(M, complete=FALSE, ...) else vcov # (complete=TRUE) > stopifnot(all.equal(vcov(cm0), vcovC(rm0), check.attributes=FALSE), + all.equal(vcov(cm1), vcovC(rm1), check.attributes=FALSE), + all.equal(vcov(cm2), vcovC(rm2), check.attributes=FALSE)) > > ## "clean": > cln <- function(vc) structure(vc, weights=NULL, eigen=NULL) > ## .vcov.avar1() is not recommended here, but also should work with singular / NA coef case: > ok0 <- !is.na(coef(rm0)) > vr0.NA<- vcov(rm0, cov=".vcov.avar1", complete=NA) # "almost singular" warning Warning messages: 1: In lf.cov(object, complete = complete, ...) : X'WX is almost singular. Consider using cov = ".vcov.w" 2: In lf.cov(object, complete = complete, ...) : .vcov.avar1: negative diag() fixed up; consider 'cov=".vcov.w."' instead > vr0.T <- vcov(rm0, cov=".vcov.avar1", complete=TRUE) > vr0.F <- vcov(rm0, cov=".vcov.avar1", complete=FALSE) > stopifnot(identical(dim(vr0.NA), dim(vr0.T)), + identical(dim(vr0.F), dim(vr0.T) - 1L), dim(vr0.F) == 14, + all.equal(cln(vr0.F), vr0.T[ok0,ok0], tol = 1e-15)) > > if(!no.C) { + vc0.T <- vcov(cm0, complete=TRUE) + vc0.F <- vcov(cm0, complete=FALSE) + } > > ok1 <- !is.na(coef(rm1)) > ## cannot work because init/fit residuals are not of full length > tools::assertError(vr1.NA<- vcov(rm1, cov=".vcov.avar1", complete=NA)) > tools::assertError(vr1.T <- vcov(rm1, cov=".vcov.avar1", complete=TRUE )) > tools::assertError(vr1.F <- vcov(rm1, cov=".vcov.avar1", complete=FALSE)) > ## instead, must refit > rm1. <- update(rm1, control = within(ctrl, cov <- ".vcov.avar1")) > vr1.NA<- vcov(rm1., complete=NA) > vr1.T <- vcov(rm1., complete=TRUE) > vr1.F <- vcov(rm1., complete=FALSE) > > stopifnot(identical(vr1.F, vr1.NA), # in this case + identical(dim(vr1.F), dim(vr1.T) - 3L), dim(vr1.F) == 12, isSymmetric(vr1.T), + identical(rownames(vr1.F), rownames(vr1.T)[ok1]), + all.equal(cln(vr1.F), vr1.T[ok1,ok1], tol=1e-15)) > > if(FALSE) ## ERROR "exact singular" (probably *NOT* to fix, as TRUE/FALSE do work !) + vr2.NA<- vcov(rm2, cov=".vcov.avar1", complete=NA) # "almost singular" warning > vr2.T <- vcov(rm2, cov=".vcov.avar1", complete=TRUE) > vr2.F <- vcov(rm2, cov=".vcov.avar1", complete=FALSE) > stopifnot(TRUE, # identical(dim(vr2.NA), dim(vr2.T)), + identical(dim(vr2.F), dim(vr2.T) - 3L), dim(vr2.F) == 12, + identical(rownames(vr2.F), rownames(vr1.F)), + identical(rownames(vr2.T), rownames(vr1.T)), + all.equal(cln(vr2.F), vr2.T[ok1,ok1], tol=1e-15)) > > ## Hmm, the supposedly heteroscedastic-robust estimates *are* very different: > all.equal(vcov(cm0), vcovC(rm0, cov = ".vcov.avar1"), check.attributes=FALSE) # rel.diff. 0.5367564 [1] "Mean relative difference: 0.5367564" > if(FALSE) # does not make sense + all.equal(vcov(cm1), vcovC(rm1, cov = ".vcov.avar1"), check.attributes=FALSE) > all.equal(vcov(cm2), vcovC(rm2, cov = ".vcov.avar1"), check.attributes=FALSE) # rel.diff. 0.5757642 [1] "Mean relative difference: 0.5757642" > > > ## Null fits (rank(X)==0) are tested in NAcoef.R > > ## testing weight=0 bug > lmrob(y ~ x3, data, weights=weights) Call: lmrob(formula = y ~ x3, data = data, weights = weights) \--> method = "MM" Coefficients: (Intercept) x3 18.7474 0.1751 > > proc.time() user system elapsed 0.883 0.193 1.100 robustbase/tests/lmrob-data.R0000644000176200001440000002124014241260162015727 0ustar liggesusers### lmrob() with "real data" ----------------------- ## testing functions: source(system.file("xtraR/test-tools.R", package = "robustbase")) (doExtras <- robustbase:::doExtras()) showProc.time() library(robustbase) ##' short form to get "pure" robustness weights rw <- function(fm) unname(weights(fm, type="robustness")) set.seed(0) data(salinity) summary(m0.sali <- lmrob(Y ~ . , data = salinity)) (A1 <- anova(m0.sali, Y ~ X1 + X3)) ## -> X2 is not needed (m1.sali <- lmrob(Y ~ X1 + X3, data = salinity)) (A2 <- anova(m0.sali, m1.sali)) # the same as before stopifnot(all.equal(A1[2,"Pr(>chisq)"], A2[2,"Pr(>chisq)"], tolerance=1e-14)) anova(m0.sali, m1.sali, test = "Deviance") ## whereas 'X3' is highly significant: m2 <- update(m0.sali, ~ . -X3) (A3 <- anova(m0.sali, m2)) (A4 <- anova(m0.sali, m2, test = "Deviance")) cX3 <- c(Estimate = -0.627327396, `Std. Error` = 0.15844971, `t value` = -3.9591577, `Pr(>|t|)` = 0.000584156) stopifnot(all.equal(cX3, coef(summary(m0.sali))["X3",], tolerance = 1e-6)) showProc.time() ## example(lmrob) set.seed(7) data(coleman) summary( m1 <- lmrob(Y ~ ., data=coleman) ) stopifnot(identical(which(rw(m1) < 0.2), c(3L, 18L))) if(FALSE) # to find out *why setting = "KS201x" fails trace(lmrob.S, exit = quote({cat("coef:\n"); print(b$coefficients)})) if(FALSE) # to find out via setting = "KS201x" fails here in the *initial* estimate debug(lmrob.S) data(starsCYG) lmST <- lm(log.light ~ log.Te, data = starsCYG) (RlmST <- lmrob(log.light ~ log.Te, data = starsCYG, control=lmrob.control(trace = 1))) summary(RlmST) ## Least Sq. w/ negative slope, where robust has slope ~= 2.2 : stopifnot(exprs = { coef(lmST)[["log.Te"]] < 0 all.equal(coef(RlmST), c("(Intercept)" = -4.969, log.Te=2.253), tol = 1e-3) identical(which(rw(RlmST) < 0.01), as.integer( c(11,20,30,34) )) }) showProc.time() ## ==> Now see that "KS2011" and "KS2014" both break down -- and it is the fault of "lqq" *only* : (RlmST.11 <- update(RlmST, control = lmrob.control("KS2011", trace= 1))) (RlmST.14 <- update(RlmST, control = lmrob.control("KS2014", trace= 1))) (RlmSTM11 <- update(RlmST, control = lmrob.control("KS2011", method="MM", trace= 1))) (RlmSTM14 <- update(RlmST, control = lmrob.control("KS2014", method="MM", trace= 1))) ## using "biweight" instead of "lqq" fixes the problem : (RlmSTbM11 <- update(RlmST,control = lmrob.control("KS2011", method="MM", psi="biweight",trace= 1))) (RlmSTbM14 <- update(RlmST,control = lmrob.control("KS2014", method="MM", psi="biweight",trace= 1))) (RlmSTb.11 <- update(RlmST,control = lmrob.control("KS2011", psi="biweight",trace= 1))) (RlmSTb.14 <- update(RlmST,control = lmrob.control("KS2014", psi="biweight",trace= 1))) ## NB: RlmST has component 'init.S' the others have "init" -- documented in ?lmrob.fit == ../man/lmrob.fit.Rd R.ini.cf <- t(sapply(mget(ls(patt = "^RlmST")), function(OB) OB$init$coef)) R..cf <- t(sapply(mget(ls(patt = "^RlmST")), coef)) cbind(R.ini.cf, R..cf) ##---> "lqq" is *NOT* robust enough here -- but "biweight" is !! showProc.time() options(digits = 5)# less platform dependence ## Directly look at init.S(): x.s <- model.matrix(~ log.Te, data = starsCYG) y.s <- model.response(model.frame(log.light ~ log.Te, data = starsCYG)) ini.df <- lmrob.S(x.s, y.s, control=lmrob.control()) ini.11 <- lmrob.S(x.s, y.s, control=lmrob.control("KS2011")) ini.14 <- lmrob.S(x.s, y.s, control=lmrob.control("KS2014")) ## but these are fine !! : rbind(deflt = ini.df$coef, KS.11 = ini.11$coef, KS.14 = ini.14$coef) ##==> No, it is *not* the init.S() ini.14$scale # 0.48144 ## More clearly shows how M-estimate is converging to *WRONG* solution: (RlmST.lqq <- update(RlmST, init=ini.14, control = lmrob.control(method="MM", psi="lqq", trace= 4))) ## --> break down ## The 10 largest residuals from the robust init. S-estim: (i10 <- head(order(abs(residuals(ini.14)), decreasing=TRUE), 10)) residuals(ini.14)[i10] ## ==> and their weights for the different psi() and their default (95% efficiency) tuning: PSIs <- names(.Mchi.tuning.defaults) sapply(PSIs, function(PSI) Mwgt(residuals(ini.14)[i10], cc = .Mpsi.tuning.defaults[[PSI]], psi=PSI)) ## All MM: RlmST.MM <- lapply(setNames(,PSIs), function(PSI) update(RlmST, init=ini.14, control = lmrob.control(method="MM", psi = PSI))) cf.MM <- t(sapply(RlmST.MM, coef)) cf.MM[order(cf.MM[,1], cf.MM[,2]),] ## only 'bisquare' and 'optimal' are robust enough showProc.time() ##=== Werner's analysis: Sensitivity curves for the most-left obs ========================================= dd <- starsCYG dd <- dd[order(dd[,"log.Te"]),] # ==> leverage points come first (and easier plotting) (rr <- lmrob(log.light ~ log.Te, data = dd)) (rr14 <- update(rr, control = lmrob.control("KS2014"))) dd[1,2] # 6.05 will be replaced for sensitivity curve leg.s <- c("default, biweight" ,"KS14, lqq" ,"KS14, biweight" ,"KS14, optimal" ,"KS14, Hampel" ,"KS14, GGW" ,"KS14, Welsh" ) nEst <- length(leg.s) # == number of estimators used below nn <- length(y1 <- c(NA, seq(2,9, length=if(doExtras) 64 else 8))) nCf <- length(coef(rr)) + 1 # +1: sigma r.coef <- matrix(NA, length(y1), nEst*nCf) t.d <- dd oo <- options(warn = 1) showProc.time() ## vary the left-most observation and fit all three for (i in seq_along(y1)) { cat(sprintf("i=%3d, y1[i]=%11.6g: -- ", i, y1[i])) t.d[1,2] <- y1[i] ## the (old) default does not converge in 4 cases lr <- update(rr, data=t.d, control = lmrob.control(maxit=500)) ; cat(" 1") lr14 <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="lqq") ) ; cat(" 2") lr14b <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="biweight") ); cat(" 3") lr14o <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="optimal" ) ); cat(" 4") lr14h <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="hampel" ) ); cat(" 5") lr14g <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="ggw" ) ); cat(" 6") lr14a <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="welsh" ) ); cat(" 7") r.coef[i,] <- c(coef(lr ), sigma(lr), coef(lr14 ), sigma(lr14), coef(lr14b), sigma(lr14b), coef(lr14o), sigma(lr14o), coef(lr14h), sigma(lr14h), coef(lr14g), sigma(lr14g), coef(lr14a), sigma(lr14a)) cat("\n") } showProc.time() options(oo) ## cbind(y=y.1, r.coef) ## y1[1] = where the NA is pMat <- function(j, main, x.legend, col = 1:8, lty=1:6, lwd = 2, ylab=NA, ...) { stopifnot(j %in% seq_len(ncol(r.coef))) matplot(y1, r.coef[, j], type="l", xlab = quote("varying obs." ~ ~ y[1]), ylab=ylab, main=main, col=col, lty=lty, lwd=lwd, ...) xx <- par("usr")[1:2]; yL <- .99* xx[1] + .01*xx[2] matpoints(yL, r.coef[1, j, drop=FALSE], pch = 20, col=col, lwd=lwd) abline(h = r.coef[1, j, drop=FALSE], col = col, lwd=1, lty=3) legend(x.legend, leg.s, lty=lty, col=col, lwd=lwd, bty = "n") abline(v = dd[1,2], col=adjustcolor("tomato", 1/4)) # true y value } (jj0 <- nCf*(seq_len(nEst)-1L)) op <- { if(requireNamespace("sfsmisc", quietly=TRUE)) sfsmisc::mult.fig(2)$old.par else par(mfrow = 2:1, mar = .1+ c(4,4,2,1), mgp = c(1.5, 0.6, 0)) } pMat(j = 2+jj0, main = quote("slope" ~~ hat(beta[2])), "bottomleft") pMat(j = 3+jj0, main = quote(hat(sigma)), "topleft") par(op) showProc.time() ## -------------------------------- set.seed(47) data(hbk) m.hbk <- lmrob(Y ~ ., data = hbk) summary(m.hbk) stopifnot(1:10 == which(rw(m.hbk) < 0.01)) data(heart) summary(mhrt <- lmrob(clength ~ ., data = heart)) # -> warning 'maxit.scale=200' too small stopifnot(8 == which(rw(mhrt) < 0.15), 11 == which(0.61 < rw(mhrt) & rw(mhrt) < 0.62), c(1:7,9:10,12) == which(rw(mhrt) > 0.90)) iN <- c(3,5,7,11) heartN <- heart; heartN[iN, "clength"] <- NA lmN <- lm (clength ~ ., data = heartN) # default na.action=na.omit mhN <- lmrob(clength ~ ., data = heartN) # default na.action=na.omit # ==> everything just uses the n=8 complete obs summary(mhN) # now *does* note the 4 omitted obs. mhNex <- lmrob(clength ~ ., data = heartN, na.action=na.exclude) summary(mhNex) mhNx1 <- update(mhNex, ~ . - weight) mhNx0 <- update(mhNex, ~ 1) stopifnot( length(r.mNex <- resid(mhNex)) == nrow(heartN) , iN == which(iNAr <- is.na(r.mNex)) , identical(iNAr, is.na(r.mN1 <- residuals(mhNx1))) , identical(iNAr, is.na(r.mN0 <- residuals(mhNx0))) ) showProc.time() data(stackloss) mSL <- lmrob(stack.loss ~ ., data = stackloss) summary(mSL) showProc.time() robustbase/tests/tlts.Rout.save0000644000176200001440000003505113162677266016407 0ustar liggesusers R Under development (unstable) (2017-09-26 r73351) -- "Unsuffered Consequences" Copyright (C) 2017 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(robustbase) > ## library(MASS)## MASS::lqs > > source(system.file("xtraR/test_LTS.R", package = "robustbase")) > ## ../inst/test_LTS.R > > y20 <- c(2:4, 8, 12, 22, 28, 29, 33, 34, 38, 40, 41, 47:48, 50:51, 54, 56, 59) > > test_location <- function() { + ## Improve: print less, and test equality explicitly + Y <- y20 + print(ltsReg(y=Y)) + print(ltsReg(y=Y, intercept=TRUE)) + print(ltsReg(y=Y, intercept=FALSE)) + print(ltsReg(y=Y, alpha=1)) + print(ltsReg(Y ~ 1)) + print(ltsReg(Y ~ 0))# = Y ~ 1 - 1 : empty model (no coefficients) + print(ltsReg(Y ~ 1, alpha=1)) + } > > test_rsquared <- function() { + x1 <- y20 + y1 <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 3.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5) + ll1 <- ltsReg(x1,y1, alpha = 0.8) + ## print() ing is platform-dependent, since only ~= 0 + stopifnot(all.equal(unname(coef(ll1)), c(1,0), tolerance=1e-12), + ll1$scale < 1e-14) + print(ltsReg(y1,x1, alpha = 0.8)) + print(ltsReg(y1,x1, alpha = 0.8, intercept = FALSE)) + } > > options(digits = 5) > set.seed(101) # <<-- sub-sampling algorithm now based on R's RNG and seed > > doLTSdata() Call: doLTSdata() ======================================================== Data Set n p Half obj Time [ms] ======================================================== heart 12 2 8 0.065810 Best subsample: [1] 1 2 4 5 6 7 11 12 Outliers: 4 [1] 3 8 9 10 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept height weight 63.353 -1.227 0.688 Scale estimate 1.52 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): 1 2 3 4 5 6 7 8 9 10 11 -1.393 0.169 0.000 0.443 -0.341 0.165 -0.115 0.000 0.000 0.000 0.666 12 0.404 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 63.3528 4.0227 15.75 1.9e-05 *** height -1.2265 0.1403 -8.74 0.00032 *** weight 0.6884 0.0528 13.04 4.7e-05 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.765 on 5 degrees of freedom Multiple R-Squared: 0.991, Adjusted R-squared: 0.988 F-statistic: 286 on 2 and 5 DF, p-value: 6.99e-06 -------------------------------------------------------- starsCYG 47 1 25 1.880169 Best subsample: [1] 2 4 6 10 13 15 17 19 21 22 25 27 28 29 33 35 36 38 39 41 42 43 44 45 46 Outliers: 6 [1] 7 9 11 20 30 34 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept log.Te -8.50 3.05 Scale estimate 0.456 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -0.784 -0.214 0.000 0.227 0.592 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept -8.500 1.926 -4.41 7.8e-05 *** log.Te 3.046 0.437 6.97 2.4e-08 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.341 on 39 degrees of freedom Multiple R-Squared: 0.554, Adjusted R-squared: 0.543 F-statistic: 48.5 on 1 and 39 DF, p-value: 2.39e-08 -------------------------------------------------------- phosphor 18 2 11 0.245377 Best subsample: [1] 1 2 3 4 6 7 11 12 14 15 18 Outliers: 1 [1] 17 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept inorg organic 60.9149 1.2110 0.0883 Scale estimate 13.5 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -30.297 -3.591 -0.692 4.251 17.116 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 60.9149 10.1995 5.97 3.4e-05 *** inorg 1.2110 0.3549 3.41 0.0042 ** organic 0.0883 0.2574 0.34 0.7366 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 12.7 on 14 degrees of freedom Multiple R-Squared: 0.519, Adjusted R-squared: 0.45 F-statistic: 7.55 on 2 and 14 DF, p-value: 0.00597 -------------------------------------------------------- stackloss 21 3 13 0.083378 Best subsample: [1] 5 6 7 8 9 10 11 12 15 16 17 18 19 Outliers: 4 [1] 1 3 4 21 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept Air.Flow Water.Temp Acid.Conc. -37.6525 0.7977 0.5773 -0.0671 Scale estimate 1.92 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -2.506 -0.424 0.000 0.576 1.934 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept -37.6525 4.7321 -7.96 2.4e-06 *** Air.Flow 0.7977 0.0674 11.83 2.5e-08 *** Water.Temp 0.5773 0.1660 3.48 0.0041 ** Acid.Conc. -0.0671 0.0616 -1.09 0.2961 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 1.25 on 13 degrees of freedom Multiple R-Squared: 0.975, Adjusted R-squared: 0.969 F-statistic: 169 on 3 and 13 DF, p-value: 1.16e-10 -------------------------------------------------------- coleman 20 5 13 0.028344 Best subsample: [1] 1 2 6 7 8 9 11 13 14 15 16 19 20 Outliers: 2 [1] 3 18 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept salaryP fatherWc sstatus teacherSc motherLev 29.7577 -1.6985 0.0851 0.6662 1.1840 -4.0668 Scale estimate 1.12 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -1.216 -0.389 0.000 0.306 0.984 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 29.7577 5.5322 5.38 0.00017 *** salaryP -1.6985 0.4660 -3.64 0.00336 ** fatherWc 0.0851 0.0208 4.09 0.00149 ** sstatus 0.6662 0.0382 17.42 6.9e-10 *** teacherSc 1.1840 0.1643 7.21 1.1e-05 *** motherLev -4.0668 0.8487 -4.79 0.00044 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.782 on 12 degrees of freedom Multiple R-Squared: 0.988, Adjusted R-squared: 0.983 F-statistic: 203 on 5 and 12 DF, p-value: 3.65e-11 -------------------------------------------------------- salinity 28 3 16 0.065610 Best subsample: [1] 2 3 4 6 7 12 14 15 17 18 19 20 21 22 26 27 Outliers: 4 [1] 5 16 23 24 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept X1 X2 X3 38.063 0.443 -0.206 -1.373 Scale estimate 1.23 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -2.482 -0.390 0.000 0.339 1.701 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 38.063 5.172 7.36 4.1e-07 *** X1 0.443 0.086 5.15 4.9e-05 *** X2 -0.206 0.138 -1.50 0.15 X3 -1.373 0.195 -7.06 7.7e-07 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 1.03 on 20 degrees of freedom Multiple R-Squared: 0.899, Adjusted R-squared: 0.884 F-statistic: 59.3 on 3 and 20 DF, p-value: 3.92e-10 -------------------------------------------------------- aircraft 23 4 14 0.298554 Best subsample: [1] 1 5 6 7 8 9 10 11 13 14 15 17 20 23 Outliers: 2 [1] 16 22 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept X1 X2 X3 X4 9.500740 -3.048797 1.210033 0.001381 -0.000555 Scale estimate 5.69 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -6.67 -2.43 0.00 2.79 6.79 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 9.500740 5.577532 1.70 0.1078 X1 -3.048797 0.919147 -3.32 0.0044 ** X2 1.210033 0.649230 1.86 0.0808 . X3 0.001381 0.000392 3.52 0.0028 ** X4 -0.000555 0.000328 -1.69 0.1102 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 4.35 on 16 degrees of freedom Multiple R-Squared: 0.826, Adjusted R-squared: 0.782 F-statistic: 19 on 4 and 16 DF, p-value: 6.47e-06 -------------------------------------------------------- delivery 25 2 14 0.112945 Best subsample: [1] 2 5 6 7 8 10 12 13 14 15 17 21 22 25 Outliers: 3 [1] 1 9 24 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept n.prod distance 3.7196 1.4058 0.0163 Scale estimate 2.38 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -5.0321 -1.0306 -0.0124 0.3474 4.2371 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 3.71959 0.91011 4.09 0.00063 *** n.prod 1.40578 0.13128 10.71 1.7e-09 *** distance 0.01625 0.00301 5.40 3.3e-05 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 2.32 on 19 degrees of freedom Multiple R-Squared: 0.962, Adjusted R-squared: 0.958 F-statistic: 243 on 2 and 19 DF, p-value: 2.9e-14 -------------------------------------------------------- wood 20 5 13 0.070258 Best subsample: [1] 2 3 9 10 11 12 13 14 15 16 17 18 20 Outliers: 4 [1] 4 6 8 19 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept x1 x2 x3 x4 x5 0.377 0.217 -0.085 -0.564 -0.400 0.607 Scale estimate 0.0124 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -0.00928 -0.00177 0.00000 0.00115 0.01300 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 0.3773 0.0540 6.99 3.8e-05 *** x1 0.2174 0.0421 5.16 0.00042 *** x2 -0.0850 0.1977 -0.43 0.67634 x3 -0.5643 0.0435 -12.98 1.4e-07 *** x4 -0.4003 0.0654 -6.12 0.00011 *** x5 0.6074 0.0786 7.73 1.6e-05 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.00745 on 10 degrees of freedom Multiple R-Squared: 0.958, Adjusted R-squared: 0.937 F-statistic: 46 on 5 and 10 DF, p-value: 1.4e-06 -------------------------------------------------------- hbk 75 3 40 3.724554 Best subsample: [1] 11 12 14 16 17 18 20 25 26 30 31 32 33 34 35 36 37 39 40 41 42 44 45 46 48 [26] 50 55 56 58 59 60 61 63 64 66 67 69 71 72 74 Outliers: 10 [1] 1 2 3 4 5 6 7 8 9 10 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept X1 X2 X3 -0.1805 0.0814 0.0399 -0.0517 Scale estimate 0.744 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -0.926 -0.396 0.000 0.397 1.011 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept -0.1805 0.1044 -1.73 0.089 . X1 0.0814 0.0667 1.22 0.227 X2 0.0399 0.0405 0.99 0.328 X3 -0.0517 0.0354 -1.46 0.149 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.557 on 61 degrees of freedom Multiple R-Squared: 0.0428, Adjusted R-squared: -0.00429 F-statistic: 0.909 on 3 and 61 DF, p-value: 0.442 -------------------------------------------------------- ======================================================== > if(FALSE) { ## FIXME: These *FAIL* ! + doLTSdata(nrep = 12, time = FALSE) + doLTSdata(nrep = 12, time = FALSE, method = "MASS") + } > > test_rsquared() Call: ltsReg.default(x = y1, y = x1, alpha = 0.8) Coefficients: Intercept y1 25.9 5.3 Scale estimate 18 Call: ltsReg.default(x = y1, y = x1, intercept = FALSE, alpha = 0.8) Coefficients: y1 31.4 Scale estimate 24.6 Warning messages: 1: In covMcd(X, alpha = alpha, use.correction = use.correction) : Initial scale 0 because more than 'h' (=16) observations are identical. 2: In covMcd(X, alpha = alpha, use.correction = use.correction) : Initial scale 0 because more than 'h' (=16) observations are identical. > test_location() Call: ltsReg.default(y = Y) Coefficients: [1] 44.6 Scale estimate 19.7 Call: ltsReg.default(y = Y, intercept = TRUE) Coefficients: [1] 44.6 Scale estimate 19.7 Call: ltsReg.default(y = Y, intercept = FALSE) Coefficients: [1] 44.6 Scale estimate 20 Call: ltsReg.default(y = Y, alpha = 1) Coefficients: [1] 33 Scale estimate 19.3 Call: ltsReg.formula(formula = Y ~ 1) Coefficients: [1] 44.6 Scale estimate 19.7 Call: ltsReg.formula(formula = Y ~ 0) No coefficients Call: ltsReg.formula(formula = Y ~ 1, alpha = 1) Coefficients: [1] 33 Scale estimate 19.3 > > if(length(W <- warnings())) print(if(getRversion() >= "3.5") summary(W) else W) 2 identical warnings: In covMcd(X, alpha = alpha, use.correction = use.correction) : Initial scale 0 because more than 'h' (=16) observations are identical. > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 0.332 0.058 0.463 0.002 0.001 > > proc.time() user system elapsed 0.334 0.059 0.463 robustbase/tests/psi-rho-etc.Rout.save0000644000176200001440000002031612271657124017540 0ustar liggesusers R version 3.0.2 Patched (2014-01-26 r64896) -- "Frisbee Sailing" Copyright (C) 2014 The R Foundation for Statistical Computing Platform: x86_64-unknown-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > require(robustbase) Loading required package: robustbase > ## see also ./lmrob-psifns.R <<<<<<<< > source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) > > EQ <- function(x,y) all.equal(x,y, tolerance = 1e-13) > > ## Demonstrate that one of tukeyChi() / tukeyPsi1() is superfluous > x <- seq(-4,4, length=201) > suppressWarnings(## as tukeyPsi1(), tukeyChi() are deprecated + for(c. in c(0.1, 1:2, pi, 100)) { + ix <- abs(x) != c. + stopifnot(EQ(tukeyChi(x, c.), + 6/c.^2* tukeyPsi1(x, c., deriv=-1)), + EQ(tukeyChi(x, c., deriv= 1), + 6/c.^2* tukeyPsi1(x, c., deriv= 0)), + EQ(tukeyChi(x, c., deriv= 2), + 6/c.^2* tukeyPsi1(x, c., deriv= 1)), + ## Now show equivalence with Mpsi(): + EQ(tukeyPsi1(x, c.), Mpsi(x, c., "tukey")), + EQ(tukeyPsi1(x, c., d=1), Mpsi(x, c., "tukey", d=1)), + EQ(tukeyPsi1(x[ix], c., d=2), Mpsi(x[ix], c., "tukey", d=2)) + ) + } + ) > ## Test if default arguments are used > h2Psi <- chgDefaults(huberPsi, k = 2) > > x <- 1:10 > stopifnot(h2Psi@ rho(x, k=2) == h2Psi@ rho(x), + h2Psi@ psi(x, k=2) == h2Psi@ psi(x), + h2Psi@Dpsi(x, k=2) == h2Psi@Dpsi(x), + h2Psi@ wgt(x, k=2) == h2Psi@ wgt(x), + h2Psi@Dwgt(x, k=2) == h2Psi@Dwgt(x)) > > ## Test default arguments for E... slots > stopifnot(EQ(h2Psi@Erho (), 0.49423127328548), + EQ(h2Psi@Epsi2(), 0.920536925636323), + EQ(h2Psi@EDpsi(), 0.954499736103642)) > > stopifnot(EQ(1, huberPsi@psi(1, k = 1e16)), + huberPsi@wgt(0.1591319494080224, 0.5 + 1/13) <= 1) > ## both used to fail because of numeric instability in pmin2/pmax2 > > f1 <- function(.) rep.int(1, length(.)) > F1 <- function(x, .) rep.int(1, length(x)) > ## correct "classical psi": > cPs <- psiFunc(rho = function(x,.) x^2 / 2, psi = function(x, .) x, + wgt = F1, Dpsi = F1, Erho = function(.) rep.int(1/2, length(.)), + Epsi2 = f1, EDpsi = f1, . = Inf) > validObject(cPs); cPs [1] TRUE psi function > ## incorrect dummy psi > cP <- psiFunc(rho = F1, psi = F1, wgt = F1, Dpsi = F1, + Erho = f1, Epsi2 = f1, EDpsi = f1, . = Inf) > cP psi function > ## Check the autogenerated Dwgt(): > x <- seq(0,2, by=1/4) > stopifnot(## strict symmetry { including Dwgt(0) == 0 } : + huberPsi @Dwgt(-x) == -huberPsi @Dwgt(x), + hampelPsi@Dwgt(-x) == -hampelPsi@Dwgt(x), + huberPsi @Dwgt(x)[x < 1.345] == 0, + hampelPsi@Dwgt(x)[x < 1.487] == 0, + EQ(huberPsi @Dwgt(x[x >= 1.5]), + c(-0.597777777777778, -0.439183673469388, -0.33625)), + EQ(hampelPsi@Dwgt(x[x >= 1.5]), + c(-0.660883932259397, -0.485547378802822, -0.371747211895911)) + ) > > .defDwgt <- robustbase:::.defDwgt > (ddd <- .defDwgt(psi = function(u, k) pmin.int(k, pmax.int(-k, u)), + Dpsi = function(u, k) abs(u) <= k)) function (u, k) { y <- u u <- u[not0 <- u != 0] y[not0] <- (Dpsi(u, k) - psi(u, k)/u)/u y } > stopifnot(is.function(ddd), names(formals(ddd)) == c("u","k"), + EQ(ddd(x, 1.345), huberPsi@Dwgt(x))) > > ## TODO: Provide some functionality of this as a Plot+Check function > ## ---- and then call the function for all our psiFunc objects (with different 'k') > kk <- c(1.5, 3, 8) > psiH.38 <- chgDefaults(hampelPsi, k = kk) > c1 <- curve(psiH.38@psi(x), -10, 10, n=512, col=2) > abline(h=0, v=0, lty=3, lwd=.5, col="gray25") > c2 <- curve(x * psiH.38@wgt(x), add=TRUE, n=512, col=adjustcolor("blue", .5), lwd=2) > title("psi_Hampel_(1.5, 3, 8) : psi(x) = x * wgt(x)") > axis(1, at=kk, expression(k[1], k[2], k[3]), pos=0) > axis(2, at=kk[1], quote(k[1]), pos=0, las=1) > stopifnot(all.equal(c1,c2, tolerance= 1e-15)) > > r1 <- curve(psiH.38@rho(x), -10, 10, col=2, + main = quote(rho(x) == integral(phi(t) * dt, 0, x))) > axis(1, at=kk, expression(k[1], k[2], k[3]), pos=0) > curve(psiH.38@psi(x), add=TRUE, n=512, col=adjustcolor("blue", .5), lwd=2) > abline(h=0, v=0, lty=3, lwd=.5, col="gray25") > ## check rho(x) = \int_0^x psi(x) dx {slightly *more* than rho' = psi !} > rhoH.38.int <- function(x) integrate(function(u) psiH.38@psi(u), 0, x, rel.tol=1e-10)$value > r2 <- curve(sapply(x, rhoH.38.int), add = TRUE, + lwd=4, col=adjustcolor("red", 1/4)) > ## numerical integration == "formula" : > stopifnot(all.equal(r1,r2, tolerance=1e-10)) > > curve(psiH.38@Dpsi(x), -10, 10, n=512, col=2, + main = quote(psi*minute(x))) > abline(h=0, v=0, lty=3, lwd=.5, col="gray25") > > ## check rho'(x) = phi(x) etc {TODO: for all our psiFun.} > head(xx <- seq(-10, 10, length=1024)) [1] -10.000000 -9.980450 -9.960899 -9.941349 -9.921799 -9.902248 > FrhoH.38 <- splinefun(xx, rho.x <- psiH.38@rho (xx)) > FpsiH.38 <- splinefun(xx, psi.x <- psiH.38@psi (xx)) > F1psH.38 <- splinefun(xx, Dps.x <- psiH.38@Dpsi(xx)) > > curve(FpsiH.38(x, deriv=1), -10,10, n=512) > curve(F1psH.38, add=TRUE, col=4, n=512) > stopifnot(all.equal(FpsiH.38(xx, deriv=1), Dps.x, + tolerance = 0.02))# not better because of discontinuities > > curve(FrhoH.38(x, deriv=1), -10,10, n=512) > curve(FpsiH.38, add=TRUE, col=4, n=512) > stopifnot(all.equal(FrhoH.38(xx, deriv=1), psi.x, tolerance = 1e-4)) > > E.norm <- function(FUN, tol=1e-12, ...) { + integrate(function(x) FUN(x) * dnorm(x), -Inf, Inf, + rel.tol=tol, ...)$value + } > > ##' asymptotic efficiency -- both integrate + "formula"(@Epsi, @EDpsi) version > aeff.P <- function(psiF, k, ...) { + stopifnot(is(psiF, "psi_func")) + if(!missing(k)) + psiF <- chgDefaults(psiF, k = k) + ## E[ psi'(X) ] ^2 / E[ psi(X) ^ 2 ] : + c(int = E.norm(psiF@Dpsi, ...)^2 / E.norm(function(x) psiF@psi(x)^2, ...), + form= psiF@EDpsi()^2 / psiF@Epsi2()) + } > > > ## Breakdown Point --- for redescenders only, > ## both integrate + "formula"(@Erho) version > bp.P <- function(psiF, k, ...) { + stopifnot(is(psiF, "psi_func")) + if(!missing(k)) + psiF <- chgDefaults(psiF, k = k) + if(!is.finite( rhoInf <- psiF@rho(Inf) )) + stop("rho(Inf) is not finite: ", rhoInf) + integ <- function(x) psiF@rho(x) + c(int = E.norm(integ, ...), form= psiF@Erho()) / rhoInf + } > > ## Print & Check the result of aeff.P() or bp.P() > chkP <- function(rp, tol = 1e-9) { + print(rp) + ae <- all.equal(rp[[1]], rp[[2]], tolerance=tol) + if(isTRUE(ae)) invisible(rp) else stop(ae) + } > > chkP(aeff.P(huberPsi)) int form 0.9500003 0.9500003 > chkP(aeff.P(huberPsi, k = 1.5)) int form 0.9642358 0.9642358 > chkP(aeff.P(huberPsi, k = 2)) int form 0.9897156 0.9897156 > chkP(aeff.P(huberPsi, k = 2.5)) int form 0.9977041 0.9977041 > > chkP(aeff.P(hampelPsi)) int form 0.9613126 0.9613126 > chkP(aeff.P(hampelPsi, k = c(1.5, 3, 8))) int form 0.9632396 0.9632396 > chkP(aeff.P(hampelPsi, k = c(2, 4, 8), tol=1e-10),# fails with tol=1e-11 + tol = 1e-4) int form 0.989679 0.989679 > > ## Now works too: > chkP(bp.P(hampelPsi)) int form 0.08615786 0.08615786 > chkP(bp.P(hampelPsi, k = c(1.5, 3, 8))) int form 0.06696027 0.06696027 > chkP(bp.P(hampelPsi, k = c(2, 4, 8))) int form 0.04942297 0.04942297 > > > ## test derivatives (adapted from ./lmrob-psifns.R) > head(x. <- seq(-5, 10, length=1501)) [1] -5.00 -4.99 -4.98 -4.97 -4.96 -4.95 > ## [separate lines, for interactive "play": ] > stopifnot(chkPsiDeriv(plot(huberPsi, x.))) > ## ToDo: improve accuracy of derivative check > stopifnot(chkPsiDeriv(plot(hampelPsi, x.), tol=c(1e-4, 1e-1))) > > > proc.time() user system elapsed 1.167 0.079 1.301 robustbase/tests/OGK-ex.Rout.save0000644000176200001440000001160412271657124016440 0ustar liggesusers R version 2.4.0 Patched (2006-10-03 r39576) Copyright (C) 2006 The R Foundation for Statistical Computing ISBN 3-900051-07-0 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(robustbase) > > ## minimal testing only > data(ruspini, package = "cluster") > > rub1 <- covOGK(ruspini, 1, scaleTau2, covGK, hard.rejection, consistency=FALSE) > rub2 <- covOGK(ruspini, 2, scaleTau2, covGK, hard.rejection, consistency=FALSE) > > AE <- function(x,y) all.equal(x,y, tolerance = 2e-15) > ## The following test is already fulfilled by Kjell Konis' original code: > stopifnot(AE(c(rub1$wcov)[c(1,3:4)], + c(917.99893333333, 94.9232, 2340.319288888888)), + all.equal(rub1$wcov, rub2$wcov, tolerance=0) + , + AE(c(rub1$cov)[c(1,3:4)], + c(923.5774514441657, 91.5385216376565, 2342.4556232436971)) + , + AE(c(rub2$cov)[c(1,3:4)], + c(927.2465953711782, 91.8009184487779, 2346.5790105548940)) + ) > > data(milk) > cM1 <- covOGK(milk, 1, sigmamu = scaleTau2, weight.fn = hard.rejection) > cM2 <- covOGK(milk, 2, sigmamu = scaleTau2, weight.fn = hard.rejection) > > symnum(cov2cor(cM1 $cov)) [1,] 1 [2,] 1 [3,] . . 1 [4,] . * 1 [5,] . . * * 1 [6,] . . * * * 1 [7,] . . . . . . 1 [8,] . , . . . . 1 attr(,"legend") [1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1 > symnum(cov2cor(cM2 $cov)) [1,] 1 [2,] 1 [3,] . . 1 [4,] . . B 1 [5,] . . * * 1 [6,] . . B * * 1 [7,] . , . . . . 1 [8,] . . . . . . 1 attr(,"legend") [1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1 > symnum(cov2cor(cM1 $wcov)) X1 X2 X3 X4 X5 X6 X7 X8 X1 1 X2 1 X3 1 X4 B 1 X5 * * 1 X6 * * * 1 X7 . . 1 X8 . . . . . . 1 attr(,"legend") [1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1 > symnum(cov2cor(cM2 $wcov)) X1 X2 X3 X4 X5 X6 X7 X8 X1 1 X2 1 X3 1 X4 . B 1 X5 * B 1 X6 B B B 1 X7 . , . . . . 1 X8 . . . . . . 1 attr(,"legend") [1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1 > > cMQn <- covOGK(milk, sigmamu = s_Qn, weight.fn = hard.rejection) > cMSn <- covOGK(milk, sigmamu = s_Sn, weight.fn = hard.rejection) > cMiqr <- covOGK(milk, sigmamu = s_IQR, weight.fn = hard.rejection) > cMmad <- covOGK(milk, sigmamu = s_mad, weight.fn = hard.rejection) > > as.dist(round(cov2cor(cMQn$wcov), 3)) X1 X2 X3 X4 X5 X6 X7 X2 0.091 X3 0.227 0.187 X4 0.288 0.176 0.964 X5 0.256 0.132 0.943 0.952 X6 0.241 0.196 0.954 0.956 0.957 X7 0.445 0.634 0.360 0.372 0.377 0.370 X8 0.014 0.452 0.440 0.380 0.340 0.350 0.479 > as.dist(round(cov2cor(cMSn$wcov), 3)) X1 X2 X3 X4 X5 X6 X7 X2 0.096 X3 0.242 0.219 X4 0.305 0.200 0.960 X5 0.269 0.142 0.945 0.952 X6 0.260 0.233 0.948 0.953 0.964 X7 0.445 0.636 0.391 0.399 0.395 0.408 X8 0.020 0.448 0.453 0.384 0.331 0.360 0.484 > as.dist(round(cov2cor(cMiqr$wcov), 3)) X1 X2 X3 X4 X5 X6 X7 X2 0.162 X3 0.181 0.215 X4 0.225 0.199 0.964 X5 0.210 0.140 0.945 0.954 X6 0.187 0.239 0.950 0.951 0.954 X7 0.453 0.660 0.350 0.354 0.355 0.367 X8 0.111 0.454 0.470 0.407 0.345 0.404 0.516 > as.dist(round(cov2cor(cMmad$wcov), 3)) X1 X2 X3 X4 X5 X6 X7 X2 0.077 X3 0.228 0.175 X4 0.289 0.159 0.962 X5 0.257 0.092 0.945 0.952 X6 0.238 0.189 0.954 0.956 0.962 X7 0.451 0.588 0.345 0.358 0.353 0.358 X8 -0.003 0.392 0.488 0.412 0.353 0.380 0.439 > > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 1.925 0.07 2.512 0 0 > robustbase/tests/nlrob-tst.R0000644000176200001440000004144014410263765015647 0ustar liggesuserscommandArgs() library(robustbase) ## for now: ---------------------------------------- if(file.exists(fil <- system.file("xtraR/test-tools.R", package = "robustbase"))) { source(fil) } else { identical3 <- function(x,y,z) identical(x,y) && identical (y,z) identical4 <- function(a,b,c,d) identical(a,b) && identical3(b,c,d) assert.EQ <- function(target, current, tol = if(showOnly) 0 else 1e-15, giveRE = FALSE, showOnly = FALSE, ...) { ## Purpose: check equality *and* show non-equality ## ---------------------------------------------------------------------- ## showOnly: if TRUE, return (and hence typically print) all.equal(...) T <- isTRUE(ae <- all.equal(target, current, tolerance = tol, ...)) if(showOnly) return(ae) else if(giveRE && T) { ## don't show if stop() later: ae0 <- if(tol == 0) ae else all.equal(target, current, tolerance = 0, ...) if(!isTRUE(ae0)) writeLines(ae0) } if(!T) stop("all.equal() |-> ", paste(ae, collapse=sprintf("%-19s","\n"))) else if(giveRE) invisible(ae0) } } if(FALSE) ## in future: ---------------------------------------- source(system.file("xtraR/test-tools.R", package = "robustbase")) ## ## -> assert.EQ(), identical3(), .. ## From /tests/reg-tests-1d.R (2022-08-09): ## A good guess if we have _not_ translated error/warning/.. messages: ## (should something like this be part of package tools ?) englishMsgs <- { ## 1. LANGUAGE takes precedence over locale settings: if(nzchar(lang <- Sys.getenv("LANGUAGE"))) lang == "en" else { ## 2. Query the locale if(!onWindows) { ## sub() : lc.msgs <- sub("\\..*", "", print(Sys.getlocale("LC_MESSAGES"))) lc.msgs == "C" || substr(lc.msgs, 1,2) == "en" } else { ## Windows lc.type <- sub("\\..*", "", sub("_.*", "", print(Sys.getlocale("LC_CTYPE")))) lc.type == "English" || lc.type == "C" } } } cat(sprintf("English messages: %s\n", englishMsgs)) DNase1 <- DNase[ DNase$Run == 1, ] Y <- DNase1[,"density"] # for convenience below ## classical fm1 <- nls(density ~ Asym/(1 + exp(( xmid - log(conc) )/scal ) ), data = DNase1, start = list(Asym = 3, xmid = 0, scal = 1), trace=TRUE) summary(fm1) wm1 <- update(fm1, weights = sqrt(conc)) # (weights as function of ) ## robust rm1 <- nlrob(formula(fm1), data = DNase1, trace = TRUE, start = list(Asym = 3, xmid = 0, scal = 1)) (sm1 <- summary(rm1)) stopifnot(all.equal(Y, fitted(fm1) + residuals(fm1), check.attributes=FALSE), ## fitted() has "label" attribute identical3(c(fitted(fm1)), predict(fm1), predict(fm1, newdata=DNase1)), ## robust fit : identical3(fitted(rm1), predict(rm1), predict(rm1, newdata=DNase1)), all.equal(Y, unname(fitted(rm1) + residuals(rm1)))) print(coef(rm1), digits=12) ## 2.35963008460 1.49945088410 1.04506391722 F19 Lx 64b ## 2.35963008460 1.49945088410 1.04506391722 Win(Serv.2003) 64b ## 2.35963008613 1.49945088600 1.04506391793 F19 Lx 32b ## 2.35963008613 1.49945088600 1.04506391793 Win(Serv.2003) 32b assert.EQ(coef(rm1), giveRE=TRUE, c(Asym=2.35963008, xmid=1.49945088, scal=1.04506392), tol = 4e-8) assert.EQ(sqrt(diag(sm1$cov)), giveRE=TRUE, ## 32b 0.08626872273, 0.0902194541, 0.03503833759 c(Asym=0.0862687305, xmid=0.0902194608, scal=0.0350383389), tol = 7e-7) ## examples with weights: rm. <- update(rm1, weights = NULL)# 'NULL' but not missing() ww <- sqrt(DNase1[,"conc"]) wr1 <- update(rm1, weights = sqrt(conc), trace=FALSE) wr1. <- update(rm1, weights = ww, trace=FALSE) ii <- names(rm1) != "call" stopifnot(all.equal(rm1[ii], rm.[ii], tol = 1e-15), all.equal(wr1[ii],wr1.[ii], tol = 1e-15)) ## When y has NA's: Add them to the end of DNase1 n <- nrow(DNase1) DNAnase <- DNase1[c(1:n,1:3), ] DNAnase$density[n+(1:3)] <- NA naExp <- expression(na.fail = na.fail, # << the default in model.frame() which most use na.omit = na.omit, na.pass = na.pass, na.exclude= na.exclude) noAction <- function(fm) { if(("call" %in% names(fm)) && !is.null(fm$call)) fm$call <- noAction(fm$call) if(!is.na(H <- match("na.action", names(fm)))) fm[-H] else fm } rNls <- lapply(naExp, function(naAct) tryCatch(error = identity, nls(formula(fm1), data = DNAnase, na.action = eval(naAct), start = list(Asym = 3, xmid = 0, scal = 1)) ) ) str(sapply(rNls, class)) ## $ na.fail : chr [1:3] "simpleError" "error" "condition" ## $ na.omit : chr "nls" ## $ na.pass : chr [1:3] "simpleError" "error" "condition" ## $ na.exclude: chr "nls" stopifnot(exprs = { inherits(rNls$na.pass, "simpleError") inherits(rNls$na.fail, "simpleError") }) if(englishMsgs) stopifnot(exprs = { conditionMessage(rNls$na.fail) == "missing values in object" grepl("NA.*foreign function call", conditionMessage(rNls$na.pass)) }) sO <- summary(rNls$na.omit) sX <- summary(rNls$na.exclude) stopifnot(exprs = { identical(noAction(sO), noAction(sX)) all.equal(coef(sm1), coef(sX), tol = 0.10) # 0.0857 }) robNls <- lapply(c(noCov=FALSE,doCov=TRUE), function(doCov) lapply(naExp, function(naAct) tryCatch(error = identity, nlrob(formula(fm1), data = DNAnase, na.action = eval(naAct), doCov = doCov, ## <-- start = list(Asym = 3, xmid = 0, scal = 1)) ) ) ) ## gives ## Warning messages: ## In old - new : ## longer object length is not a multiple of shorter object length str(sapply(robNls[["noCov"]], class)) ## List of 4 ## $ na.fail : chr [1:3] "simpleError" "error" "condition" ## $ na.omit : chr [1:3] "simpleError" "error" "condition" <<< FIXME, should work as nls() does ## $ na.pass : chr [1:3] "simpleError" "error" "condition" ## $ na.exclude: chr [1:2] "nlrob" "nls" stopifnot(identical(sapply(robNls[["noCov"]], class), sapply(robNls[["doCov"]], class))) ## same checks as for nls(): lapply(robNls, function(LL) stopifnot(exprs = { inherits(LL$na.pass, "simpleError") inherits(LL$na.fail, "simpleError") })) -> .tmp if(englishMsgs) lapply(robNls, function(LL) stopifnot(exprs = { conditionMessage(LL$na.fail) == "missing values in object" ## different message than nls(): grepl("missing.*weights not allowed", conditionMessage(LL$na.pass)) }) ) -> .tmp ## the only one which works currently: robNxcl <- robNls[["noCov"]]$na.exclude ## Fix these : =================== .t <- try( s.no <- summary(robNxcl) ) .t <- try( v.no <- vcov(robNxcl) ) ## both give *same* error: ## Error in .vcov.m(object, Scale = sc, resid.sc = as.vector(object$residuals)/sc) : ## length(resid.sc) == nobs(object) is not TRUE ## Fix these : =================== str( s.do <- summary(robNls[["doCov"]]$na.exclude) ) try(## the "doCov" -- fails "only" when printing: print(s.do) #-> error ) -> .tmp ## Call: ## .... ## Residuals: ## Error in if (rdf > 5L) { : missing value where TRUE/FALSE needed vcov(robNls[["doCov"]]$na.exclude) ## Asym xmid scal ## Asym NA NA NA ## xmid NA NA NA ## scal NA NA NA try( ## debug this one rmNAo <- nlrob(formula(fm1), data = DNAnase, na.action = na.omit, trace = TRUE, start = list(Asym = 3, xmid = 0, scal = 1)) ### fails ## Error in (function (..., row.names = NULL, check.rows = FALSE, check.names = TRUE, : ## arguments imply differing number of rows: 19, 16 ) ## From: "Pascal A. Niklaus" ## To: ## Subject: nlrob problem ## Date: Tue, 20 Dec 2011 07:04:38 +0100 ## For psi-functions that can become zero (e.g. psi.bisquare), weights in ## the internal call to nls can become zero. ## Was ## psiTuk <- robustbase:::psi.bisquare ## psiHamp <- robustbase:::psi.hampel lmrob.control(psi="bisquare")$tuning.psi psiTuk <- function(x, der=0) { ## cc: dput( lmrob.control(psi="bisquare")$tuning.psi ) if(der == 0) Mwgt(x, cc=4.685061, psi="Tukey") else Mpsi(x, cc=4.685061, psi="Tukey", deriv=1) } c.Ha <- lmrob.control(psi="hampel"); c.Ha$tuning.psi psiHamp <- function(x, der=0) { ## cc: dput( lmrob.control(psi="hampel")$tuning.psi ) if(der == 0) Mwgt(x, cc=c(1.35241275, 3.15562975, 7.212868), psi="Hampel") else Mpsi(x, cc=c(1.35241275, 3.15562975, 7.212868), psi="Hampel", deriv=1) } d <- data.frame(x = -6:9, y = 43 + c(7, 52, 21, 12, 10, -4, -5, -4, 0, -77, -8, -10, 22, 33, 38, 51)) nlr1 <- nlrob(y ~ a*(x + b*exp(-c*x)), start=list(a= 4, b= 1, c= 1.2), data = d, maxit = 50, # default 20 is *not* sufficient model = TRUE, trace=TRUE) ## These failed in robustbase version 0.8-0 and earlier nlr2 <- update(nlr1, psi = psiTuk) # now *does* converge... ## check 'model' and dataClasses stopifnot(is.list(mod <- nlr2$model), is.data.frame(mod), inherits(attr(mod, "terms"), "terms"), identical(dCl <- attr(attr(mod, "terms"),"dataClasses"), nlr2$dataClasses), identical(dCl, c(y = "numeric", x = "numeric"))) ## 'port' ditto: nlr2. <- update(nlr2, algorithm= "port") nlr3 <- update(nlr1, psi = psiHamp) # *does* converge, too... nlr3. <- update(nlr3, algorithm= "port") summary(nlr2.) summary(nlr3.) i. <- -c(2, 15) # <- drop 'call' and 'iter' components stopifnot(all.equal(nlr2[i.], nlr2.[i.], tolerance = 2e-5, check.environment = FALSE), all.equal(nlr3[i.], nlr3.[i.], tolerance = 1e-4, check.environment = FALSE), ## The redescending psi() give some exact 0 weights : identical(which(abs(nlr2$rweights) < 1e-9), c(1L, 10 :12)), identical(which(abs(nlr3$rweights) < 1e-9), c(1L, 10L,12L)) ) ## Different example with more data: pp <- list(a=10, b=4, c=1/4) x <- seq(-6,9, by = 1/8) f.x <- with(pp, a*(x + b*exp(-c*x))) set.seed(6); y <- y0 <- f.x + 4*rnorm(x) iO <- c(2:3,20,70:71,90); y[iO] <- y[iO] + 32*c(-1,-1,1)*(2+rlnorm(iO)); y <- round(y) plot(x,y); lines(x, f.x, col="tomato", lty = 2) dd <- data.frame(x,y) nlc1 <- nls(formula(nlr1), start = coef(nlr1), data=dd, trace=TRUE) nlR1 <- update(nlr1, data = dd)# update the model with the new data summary(nlR1) lines(x, predict(nlc1), col=3) lines(x, predict(nlR1), col=4) legend("top", c("f(x)", "least squares", "robust"), col=c("tomato", palette()[3:4]), lty=c(2,1,1)) ## These both now *do* converge, but failed earlier (nlbi <- update(nlR1, psi = psiTuk)) (nlFH <- update(nlR1, psi = psiHamp)) lines(x, predict(nlbi), col=5) lines(x, predict(nlFH), col=6) stopifnot(nlR1$status == "converged", nlbi$status == "converged", nlFH$status == "converged") assert.EQ(coef(nlR1), c(a=9.914874, b=3.98612416, c=0.250896252), tol = 4e-9) assert.EQ(coef(nlbi), c(a=9.947458207, b=3.954210623, c=0.2535835248), tol = 4e-9) ## This is suddently quite different : ???!?!?? ## assert.EQ(coef(nlFH), c(a=9.94242831, b=3.97370746, c=0.252907618)) assert.EQ(coef(nlFH), c(a=9.952893755,b=3.949047387,c=0.2536216541), tol = 1e-7) assert.EQ(1000*diag(vcov(nlR1)), c(a=16.167493, b=10.0986644, c=0.0200814189), tol = 7e-7, giveRE=TRUE) assert.EQ(1000*local({V <- vcov(nlFH); V[lower.tri(V, diag=TRUE)]}), c(16.33774615, -9.704702857, 0.3149189329, 10.03560556, -0.4079936961, 0.02039106329), tol = 7e-7) assert.EQ(predict(nlR1), predict(nlbi), tol = 0.05, giveRE=TRUE) assert.EQ(predict(nlR1), predict(nlFH), tol = 0.05, giveRE=TRUE) nlFH2 <- update(nlFH, psi = .Mwgt.psi1("Hampel", c(2,4,8))) ## TODO: and compare ## TODO: same with Tukey ##----- *Vector* parameters indexed by factor levels ------------- ##----- MM: ~/R/MM/Pkg-ex/robustbase/nlrob-vectorpar.R data(biomassTill)## see also smaller example in ../man/biomassTill.Rd if(!dev.interactive(orNone=TRUE)) pdf("nlrob-biomT.pdf") require(lattice) xyplot(Biomass ~ DVS | Tillage, data = biomassTill) xyplot(Biom.2 ~ DVS | Tillage, data = biomassTill) ## starting values m0.st <- list(Wm = rep(200, 3), a = rep( 1, 3), b = rep( 2, 3)) ##-> nls(), even with -expm1(.) fails to start properly (and hence nlrob() fails too): try( m.c0 <- nls(Biomass ~ Wm[Tillage] * (- expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m0.st, trace=TRUE) ) ## several other versions of the above fail similarly. This works: m00st <- list(Wm = rep(300, 3), a = rep( 1.5, 3), b = rep( 2.2, 3)) m.c00 <- nls(Biomass ~ Wm[Tillage] * (- expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE) ## These were the "true" beta for simulating in creation of {Biomass, Biom.2}: m1.st <- list(Wm = c(219.8, 265.9, 343.4), a = c(1.461, 1.493, 1.294), b = c(2.889, 2.838, 4.046)) m.cl <- nls(Biomass ~ Wm[Tillage] * (1 - exp(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE) ## this now fails to converge: try( # "singular gradient" m.c2 <- nls(Biom.2 ~ Wm[Tillage] * (1 - exp(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE) ) str(C1 <- nls.control(minFactor=1e-6, warnOnly=TRUE, printEval=TRUE, maxiter=500)) try( m.c2 <- nls(Biom.2 ~ Wm[Tillage] * (1 - exp(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE, control=C1) ) ## fails (!) too {numericDeriv() in iteration 129} even though we have ## 'warnOnly' ! ==> bug in nls() !!!!!!!!!!!!!!!!!!!!!!!!!!! ## -expm1(u) is better than (1 - exp(u)) : m.c00 <- nls(Biom.2 ~ Wm[Tillage] * (- expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE, control=C1) ## "fails" but returns .. very bad.. m.c00 ## Use better starting values, as we have such problems: m.c2 <- nls(Biom.2 ~ Wm[Tillage] * (- expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m1.st, trace=TRUE, control=C1) ## "fails" but returns at least: singular gradient iteration 126 m.c2 ## Robust: not converging in 20 steps (only warning) mrob <- nlrob(Biomass ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE) stopifnot(identical(mrob$dataClasses, c(Biomass = "numeric", Tillage = "factor", DVS = "numeric"))) try(## now: singular gradient in nls mr.2 <- nlrob(Biom.2 ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE) ) ## Compare coeffs: rbind(c.true = unlist(m1.st), cl0 = coef(m.c00), cl = coef(m.cl), rob = coef(mrob), c2 = coef(m.c2))#, r.2 = coef(mr.2)) ## Compare fit ## Now for plotting --- nice would be xyplot, but I don't easily see how: (yl2 <- range(biomassTill[,"Biom.2"])) (ylim <- range(biomassTill[,"Biomass"]))# --> *not* showing the two outliers! ## or even a bit more robustly: ## sfsmisc::rrange(biomassTill[,"Biom.2"]) ##-> -201.3064 394.0914 ## using global data + fits from above p.biomass.fits <- function(ylim = c(-200, 400), n = 257, f.DVS = 0.1, leg.txt = c(outer(c("nls() ", "nlrob()"), c("", "[ + 2 outl.]"), paste)), col = c("blue2","blue3","tomato","red3"), lty = c(2,1,2,1), lwd = 2) { ## more and equispaced DVS values for nice plot: rr <- extendrange(biomassTill[,"DVS"], f=f.DVS) bbDVS <- seq(rr[1], rr[2], length = n) b.Till <- biomassTill[,"Tillage"] nP <- nlevels(b.Till) # == 3 m <- length(leg.txt) col <- rep_len(col, m) lwd <- rep_len(lwd, m) lty <- rep_len(lty, m) ## Prefer xyplot() - this is ugly but works (and tests predict(*, )): op <- par(mfrow = c(nP,1), mar = .1 + c(3, 3, 2, 1), mgp = c(1.25, 0.6, 0)) on.exit(par(op)) for(lev in levels(b.Till)) { cat(lev,":\n--------\n") dsub <- subset(biomassTill, Tillage == lev) plot(Biom.2 ~ DVS, data = dsub, ylim=ylim, main = paste("Tillage = ", lev)) grid() dd <- data.frame(Tillage = factor(rep.int(lev, n), levels=levels(b.Till)), DVS = bbDVS) lines(predict(m.cl, dd) ~ DVS, data=dd, col=col[1], lty=lty[1], lwd=lwd[1]) lines(predict(mrob, dd) ~ DVS, data=dd, col=col[2], lty=lty[2], lwd=lwd[2]) lines(predict(m.c2, dd) ~ DVS, data=dd, col=col[3], lty=lty[3], lwd=lwd[3]) ## lines(predict(mr.2, dd) ~ DVS, data=dd, col=col[4], lty=lty[4], lwd=lwd[4]) if(lev == "CA-") legend("top", leg.txt, col = col, lty=lty, lwd=lwd, inset=.02, bg = "gray96") #, bty="n") } } ## showing all data points: p.biomass.fits(ylim = yl2) ## more interesting: p.biomass.fits() cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/MT-tst.R0000644000176200001440000001527014236516416015055 0ustar liggesusersrequire("robustbase") ##---> ./poisson-ex.R ## ~~~~~~~~~~~~~~ for more glmrobMT() tests source(system.file("xtraR/ex-funs.R", package = "robustbase")) source(system.file("xtraR/test-tools.R", package = "robustbase"))## -> showSys.time(), assert.EQ() if(!require("sfsmisc")) { eaxis <- axis # so we can use eaxis() below } (doExtras <- robustbase:::doExtras()) ## Explore the espRho() function: --------------------------------------------- if(!dev.interactive(orNone=TRUE)) pdf("MT-E_rho.pdf") E.rho <- robustbase:::espRho lambdas <- ((1:10)/2)^2 cws <- c(1, 1.5, 1.75, 2, 2.25, 3) (gr <- expand.grid(lam = lambdas, cw = cws)) Egr <- apply(gr, 1, function(r) { lam <- r[["lam"]]; cw <- r[["cw"]]; sL <- sqrt(lam) xx <- seq(lam - 2*sL, lam + 2*sL, length=17) vapply(xx, function(X) E.rho(lam, xx=X, cw=cw), NA_real_) }) str(Egr)# 17 x 60 mLeg <- function(pos, type="o") legend(pos, legend=paste("lambda = ", format(lambdas, digits=2)), lty=1:5, col=1:6, pch= c(1:9, 0, letters, LETTERS), bty="n") matplot(Egr[, gr[,"cw"]== 1.0 ], type="o",main="c_w = 1.0" ); mLeg("bottomright") matplot(Egr[, gr[,"cw"]== 1.5 ], type="o",main="c_w = 1.5" ); mLeg("bottomright") matplot(Egr[, gr[,"cw"]== 1.75], type="o",main="c_w = 1.75"); mLeg("bottomright") matplot(Egr[, gr[,"cw"]== 2.0 ], type="o",main="c_w = 2.0" ); mLeg("bottomright") matplot(Egr[, gr[,"cw"]== 2.25], type="o",main="c_w = 2.25"); mLeg("bottomright") matplot(Egr[, gr[,"cw"]== 3.0 ], type="o",main="c_w = 3.0" ); mLeg("bottomright") dev.off() ## Explore the m() function: --------------------------------------------- if(!dev.interactive(orNone=TRUE)) pdf("MT-m_rho.pdf") mkM <- robustbase:::mk.m_rho # itself calling splinefun(*, "monoH.FC") getSpline.xy <- function(splfun) { ## Depending on the version of R, the ## environment of splinefun() slightly changes: stopifnot(is.function(splfun), length(e <- environment(splfun)) > 0) if("x0" %in% ls(e)) list(x = e$x0, y = e$y0) else list(x = e$x, y = e$y) } m21 <- mkM(2.1, recompute=TRUE)# the default 'cw = 2.1' m16 <- mkM(1.6, recompute=TRUE) p.m2 <- function(mrho, from = 0, to, col=2, addKnots=TRUE, pchK=4, cexK=1.5, ...) { stopifnot(is.function(mrho)) curve(mrho, from, to, col=col, ...) curve(sqrt(x), add=TRUE, col=adjustcolor("gray",.5), lwd=2) if(addKnots) points(getSpline.xy(mrho), pch=pchK, cex=cexK) } p.m.diff <- function(mrho, from = 0, to, col=2, addKnots=TRUE, pchK=4, cexK=1.5, ...) { stopifnot(is.function(mrho)) curve(mrho(x) - sqrt(x), from=from, to=to, n=512, col=col, ...) abline(h=0,lty=3) if(addKnots) { xy <- getSpline.xy(mrho) if(is.numeric(x <- xy$x)) points(x, xy$y - sqrt(x), pch=pchK, cex=cexK) else warning("'addKnots' not available: No knots in function's environment") } } p.m2(m21, to=10) p.m2(m16, to=10) p.m2(m21, to=50) p.m2(m21, to=120, cexK=.8) p.m.diff(m21, to=120, cex=.5)# pchK="." p.m.diff(m16, to=120, cex=.5)# pchK="." mm21 <- function(.) robustbase:::mm(., m21) environment(mm21) <- environment(m21)# <- for p.m() p.m2(mm21, to=120, cexK=.8) p.m.diff(mm21, to=120, cexK=.8)#-- discontinuity at 100 !! ## TODO: ways to improve! ## Here: look at "larger lambda" (and more cw) la2 <- 5*2^seq(0, 10, by = 0.25) c.s <- .25*c(1:10, 15, 50) mL <- lapply(c.s, function(cc) mkM(cc, lambda = la2, recompute=TRUE)) str(mL, max=1) # a list of functions.. assert.EQ(la2, getSpline.xy(mL[[1]])$x) mmL <- sapply(mL, function(F) getSpline.xy(F)$y) matplot(la2, mmL, type ="l") # "all the same" from very far ... mm.d. <- mmL - sqrt(la2) matplot(la2, mm.d., type ="l", xlab=quote(lambda)); abline(h=0, lty=3) legend("bottom", legend= paste("cw=",c.s), col=1:6, lty=1:5, ncol = 3, bty="n") matplot(la2, -mm.d., type ="l", xlab=quote(lambda), log = "xy", axes=FALSE) eaxis(1); eaxis(2) legend("bottom", legend= paste("cw=",c.s), col=1:6, lty=1:5, ncol = 3, bty="n") ## ok, that's the correct scale c.s2 <- c.s [c.s >= .75] mm.d2 <- mm.d.[, c.s >= .75] matplot(la2, -mm.d2, type ="l", xlab=quote(lambda), log = "xy", axes=FALSE) eaxis(1); eaxis(2) legend("bottomleft", legend= paste("cw=",c.s2), col=1:6, lty=1:5, ncol = 3, bty="n") ##-> log (sqrt(lam) - m(lam)) = a[c] - beta * log(lam) : dd2 <- data.frame(m.d = c(mm.d2), cw = rep(c.s2, each = length(la2)), lambda = rep(la2, length(c.s2))) ## gives a pretty nice picture: summary(fm <- lm(log(-m.d) ~ 0+factor(cw) + log(lambda), data = dd2, subset = lambda >= 50)) ##=> slope of log(lambda) = -1/2 dd3 <- within(dd2, { ld2 <- log(-m.d) + 1/2 * log(lambda) })[dd2[,"lambda"] >= 50,] plot(ld2 ~ cw, data = dd3, type = "b") plot(ld2 ~ cw, data = dd3, type = "b", log="x") coplot(ld2 ~ cw|lambda, data = dd3) coplot(ld2 ~ cw|log(lambda), data = dd3) coplot(ld2 ~ log10(cw) | log10(lambda), data = dd3) dev.off() ##-------------------------------------------------------- end m(.) ------------- ## The simple intercept example from ./glmrob-1.R set.seed(113) y <- rpois(17, lambda = 4) y[1:2] <- 99:100 # outliers y.1 <- y x.1 <- cbind(rep(1, length(y))) options("robustbase:m_rho_recompute" = TRUE)#-> recompute in any case: showSys.time( r <- glmrob(y ~ 1, family = poisson, method = "MT", nsubm=100) )# some output str(r) ## was c(ini = 1.30833281965018, est = 1.29369680430613) ## then c(ini = 1.30833281965018, est = 1.29369680422799) ## c(ini = 1.30833281965018, est = 1.29369680430627) r.64b <- c(ini = 1.30833281965018, est = 1.29369680452016) stopifnot(r$converged) assert.EQ(r$initial, r.64b[["ini"]], check.attributes=FALSE, tol = 1e-13)# rel.diff: 3.394.e-16 assert.EQ(r$coefficients, r.64b[["est"]], check.attributes=FALSE, tol = 1e-09)# as long we use different optim()) ## now, as the algorithm has a random start: set.seed(7) nSim <- if(doExtras) 20 else 2 showSys.time(LL <- replicate(nSim, glmrob(y ~ 1, family = poisson, method = "MT"), simplify=FALSE)) ini <- sapply(LL, `[[`, "initial") est <- sapply(LL, `[[`, "coefficients") ## surprise: all the 20 initial estimators are identical: stopifnot(diff(range(ini)) == 0, diff(range(est)) == 0) ## probably too accurate ... but ok, for now assert.EQ(est[1], r.64b[["est"]], check.attributes=FALSE, tol = 1e-10)# Winbuilder needed ~ 2e-11 assert.EQ(ini[1], r.64b[["ini"]], check.attributes=FALSE, tol = 1e-10) ccvv <- sapply(LL, `[[`, "cov") stopifnot(ccvv[1] == ccvv) assert.EQ(print(ccvv[1]), 0.0145309081924157, tol = 1e-7, giveRE=TRUE) cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' ## "Platform" info (SysI <- Sys.info()[c("sysname", "release", "nodename", "machine")]) if(require("sfsmisc") && SysI[["sysname"]] == "Linux") ## not on the Mac (yet) c(SysI, MIPS=Sys.MIPS(), Sys.sizes()) else SysI robustbase/tests/Rsquared.R0000644000176200001440000000426613326360567015521 0ustar liggesusersrequire(robustbase) set.seed(17)# reproducibility! ## to check: ## - for the empty model summary(lmrob(Y ~ 0, coleman)) ## - with and without an intercept in the model summary(lmrob(Y ~ 1, coleman)) writeLines(sfm <- capture.output( summary(lmrob(Y ~ ., coleman)))) # and this must be "identical": sfm2 <- capture.output(summary(lmrob(Y ~ ., coleman, model=FALSE, x=FALSE, y=FALSE))) iCall <- grep("lmrob.*coleman", sfm)# the only line that differs stopifnot(sfm[-iCall] == sfm2[-iCall]) ## w/o intercept: summary(lmrob(Y ~ . - 1, coleman, model=FALSE, x=FALSE, y=FALSE)) ## - when prior-weights are included wts <- c(rep(0.05, 10), rep(2, 10)) summary(lmrob(Y ~ . - 1, coleman, model=FALSE, x=FALSE, y=FALSE, weights = wts)) ## - should work for object with NA in the coefficients, and ## - should work for object with NA in the observations --> both in ./NAcoef.R ## check equality with lm() for classical model test <- function(formula, data, items=c("coefficients", "residuals", "df", "scale", "r.squared", "adj.r.squared", "weights"), tol = 1e-4, ...) { lmrCtrl <- lmrob.control(psi = "hampel", tuning.psi = c(1000, 2000, 3000), method="SMDM", ...) sc <- summary(lm (formula, data)) sr <- summary(lmrob(formula, data, control= lmrCtrl)) names(sc)[names(sc) == "sigma"] <- "scale" if(sc$df[1] == 0 && getRversion() <= "3.5.1" && as.numeric(R.version$`svn rev`) < 74993) ## in the past, lm() returned logical empty matrix storage.mode(sc$coefficients) <- "double" ret <- all.equal(sc[items], sr[items], tolerance=tol) if (!isTRUE(ret)) { print(sr) for (i in seq_along(items)) { print(sc[items[i]]) print(sr[items[i]]) } print(ret) stop(sprintf("all.equal(sc[items], sr[items], tol.. = %g) are not all TRUE", tol)) } ret } set.seed(101) test(Y ~ 0, coleman, c("residuals", "df", "coefficients", "r.squared", "adj.r.squared"), tol=1e-10) test(Y ~ 1, coleman, tol = 2e-4) test(Y ~ ., coleman, tol = 4e-4) test(Y ~ . - 1, coleman, tol = 4e-4) robustbase/tests/tlts.R0000644000176200001440000000262113162677266014717 0ustar liggesuserslibrary(robustbase) ## library(MASS)## MASS::lqs source(system.file("xtraR/test_LTS.R", package = "robustbase")) ## ../inst/test_LTS.R y20 <- c(2:4, 8, 12, 22, 28, 29, 33, 34, 38, 40, 41, 47:48, 50:51, 54, 56, 59) test_location <- function() { ## Improve: print less, and test equality explicitly Y <- y20 print(ltsReg(y=Y)) print(ltsReg(y=Y, intercept=TRUE)) print(ltsReg(y=Y, intercept=FALSE)) print(ltsReg(y=Y, alpha=1)) print(ltsReg(Y ~ 1)) print(ltsReg(Y ~ 0))# = Y ~ 1 - 1 : empty model (no coefficients) print(ltsReg(Y ~ 1, alpha=1)) } test_rsquared <- function() { x1 <- y20 y1 <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 3.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5) ll1 <- ltsReg(x1,y1, alpha = 0.8) ## print() ing is platform-dependent, since only ~= 0 stopifnot(all.equal(unname(coef(ll1)), c(1,0), tolerance=1e-12), ll1$scale < 1e-14) print(ltsReg(y1,x1, alpha = 0.8)) print(ltsReg(y1,x1, alpha = 0.8, intercept = FALSE)) } options(digits = 5) set.seed(101) # <<-- sub-sampling algorithm now based on R's RNG and seed doLTSdata() if(FALSE) { ## FIXME: These *FAIL* ! doLTSdata(nrep = 12, time = FALSE) doLTSdata(nrep = 12, time = FALSE, method = "MASS") } test_rsquared() test_location() if(length(W <- warnings())) print(if(getRversion() >= "3.5") summary(W) else W) cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/nlregrob-tst.R0000644000176200001440000004072014241260162016333 0ustar liggesusersstopifnot(require("robustbase")) source(system.file("xtraR", "platform-sessionInfo.R", # moreSessionInfo() etc package = "robustbase", mustWork=TRUE)) ## testing functions: source(system.file("xtraR/test-tools.R", package = "robustbase")) ## showProc.time(), showSys.time() ... S.time <- showSys.time # "back compatible" mS <- moreSessionInfo(print.=TRUE) ## as long as we don't export these (nor provide an nlrob(., method=.) interface: nlrob.MM <- robustbase:::nlrob.MM nlrob.tau <- robustbase:::nlrob.tau nlrob.CM <- robustbase:::nlrob.CM nlrob.mtl <- robustbase:::nlrob.mtl (doExtras <- robustbase:::doExtras()) if(doExtras) { NP <- 30 ; tol <- 1e-11 } else { ## "fast" NP <- 15 ; tol <- 1e-7 } start.from.true <- !doExtras # (but not necessarily ..) if(start.from.true) { # population size = NP (random) + 1 (true parameters) init_p <- c(1, 0.2) init_p_sigma <- c(1, 0.2, 1) } else { init_p <- init_p_sigma <- NULL } if(!dev.interactive(orNone=TRUE)) pdf("nlregrob-tst.pdf") RNGversion("3.5.0") # -- TODO once R >> 3.5.0 : update results !! ## Stromberg, Arnold J. (1993). ## Computation of high breakdown nonlinear regression parameters. ## J. Amer. Statist. Assoc. 88(421), 237-244. ## exponential regression Expo <- function(x, a, b) exp(a + b*x) set.seed(2345) # for reproducibility ## data without outliers: d.exp30 <- data.frame(x = sort( runif(30, 0, 10) ), err = rnorm(30)) d.exp30 <- transform(d.exp30, y = Expo(x, 1, 0.2) + err) ## classical (starting at truth .. hmm) op <- options(digits=12) Cfit <- nls(y ~ Expo(x, a, b), data = d.exp30, start = c(a = 1, b = 0.2), control = nls.control(tol = 8e-8, printEval = TRUE), trace=TRUE) showProc.time()# ---- OS X needing 6e-8 options(op) ## robust Rfit.MM.S.bisquare <- nlrob.MM(y ~ Expo(x, a, b), data = d.exp30, lower = c(a = -10, b = -2), upper = c(10, 2), NP = NP, tol = tol, add_to_init_pop = init_p ) if(doExtras) { Rfit.MM.S.lqq <- update(Rfit.MM.S.bisquare, psi = "lqq") Rfit.MM.S.optimal <- update(Rfit.MM.S.bisquare, psi = "optimal") Rfit.MM.S.hampel <- update(Rfit.MM.S.bisquare, psi = "hampel") } showProc.time() Rfit.MM.lts.bisquare <- update(Rfit.MM.S.bisquare, init = "lts") Rfit.MM.lts.lqq <- update(Rfit.MM.S.bisquare, init = "lts", psi = "lqq") Rfit.MM.lts.optimal <- update(Rfit.MM.S.bisquare, init = "lts", psi = "optimal") Rfit.MM.lts.hampel <- update(Rfit.MM.S.bisquare, init = "lts", psi = "hampel") showProc.time() S.time(Rfit.tau.bisquare <- nlrob.tau( y ~ Expo(x, a, b), data = d.exp30, lower = c(a = -10, b = -2), upper = c(10, 2), NP = NP, add_to_init_pop = init_p )) S.time(Rfit.tau.optimal <- update(Rfit.tau.bisquare, psi = "optimal")) S.time(Rfit.CM <- nlrob.CM( y ~ Expo(x, a, b), data = d.exp30, lower = c(a = -10, b = -2, sigma = 0), upper = c( 10, 2, 10), NP = NP, add_to_init_pop = init_p_sigma )) S.time(Rfit.mtl <- nlrob.mtl(y ~ Expo(x, a, b), data = d.exp30, lower = c(a = -10, b = -2, sigma = 0), upper = c( 10, 2, 3), NP = ceiling(NP*1.33), # <- higher prob. to get close tol = tol, trace=TRUE, details=TRUE, add_to_init_pop = init_p_sigma )) showProc.time() plot(y ~ x, d.exp30, main = "Data = d.exp30") cTr <- adjustcolor("red4", 0.5) cLS <- adjustcolor("blue2", 0.5) cE <- curve(Expo(x, a=1, b=0.2), 0, 10, n=1+2^9, col=cTr, lwd=2, lty=2, add=TRUE) lines(d.exp30$x, fitted(Cfit), col=cLS, lwd=3) ll <- length(m1 <- sapply(ls.str(patt="^Rfit"), get, simplify=FALSE)) .tmp <- lapply(m1, function(.) lines(d.exp30$x, fitted(.))) legend("topleft", c("true", "LS", names(m1)), lwd=c(2,3, rep(1,ll)), lty=c(2,1, rep(1,ll)), col=c(cTr,cLS, rep(par("fg"),ll)), bty="n", inset=.01) showProc.time() ## 40% outliers present {use different data name: seen in print() d.exp40out <- within(d.exp30, y[15:27] <- y[15:27] + 100) op <- options(digits=12) Cfit.40out <- update(Cfit, data = d.exp40out, trace=TRUE, control = nls.control(tol = Cfit$control$tol)) if(FALSE) ## this fails for "bad" non-R BLAS/LAPACK Cfit.no.out <- update(Cfit.40out, subset = -(15:27)) ## help giving it a good start *and* raise tolerance (from 8e-8): ## still fails for all three of {ATLAS, MKL, OpenBLAS} with ## Error in nls(formula = y ~ Expo(x, a, b), data = d.exp.Hlev, start = c(a = 1, : ## step factor 0.000488281 reduced below 'minFactor' of 0.000976562 Cfit.no.out <- tryCatch(error = function(e) e, update(Cfit.40out, subset = -(15:27), start = c(a = 1, b = 0.2), trace=TRUE, control = nls.control(maxiter = 1000, tol = 5e-7, printEval=TRUE)) ) Cfit.no..ok <- !inherits(Cfit.no.out, "error") options(op) if(doExtras) { Rf.out.MM.S.bisquare <- update(Rfit.MM.S.bisquare, data=d.exp40out) Rf.out.MM.S.lqq <- update(Rf.out.MM.S.bisquare, psi = "lqq") Rf.out.MM.S.optimal <- update(Rf.out.MM.S.bisquare, psi = "optimal") Rf.out.MM.S.hampel <- update(Rf.out.MM.S.bisquare, psi = "hampel") showProc.time() } Rf.out.MM.lts.bisquare <- update(Rfit.MM.S.bisquare, data=d.exp40out, init= "lts") Rf.out.MM.lts.lqq <- update(Rf.out.MM.lts.bisquare, psi= "lqq") #----------- Rf.out.MM.lts.optimal <- update(Rf.out.MM.lts.bisquare, psi= "optimal") Rf.out.MM.lts.hampel <- update(Rf.out.MM.lts.bisquare, psi= "hampel") showProc.time() Rf.out.tau.bisquare <- update(Rfit.tau.bisquare, data=d.exp40out) Rf.out.tau.optimal <- update(Rfit.tau.bisquare, data=d.exp40out, psi = "optimal") Rf.out.CM <- update(Rfit.CM, data=d.exp40out) Rf.out.mtl <- update(Rfit.mtl, data=d.exp40out) showProc.time() plot(y ~ x, d.exp40out, main = "Data = d.exp40out") cE <- curve(Expo(x, a=1, b=0.2), 0, 10, n=1+2^9, col=cTr, lwd=2, lty=2, add=TRUE) ll <- length(m1 <- sapply(ls.str(patt="^Rf.out"), get, simplify=FALSE)) .tmp <- lapply(m1, function(.) lines(d.exp40out$x, fitted(.))) xx <- local({p <- par("usr"); seq(p[1],p[2], length.out=256)}) if(Cfit.no..ok) lines(xx, predict(Cfit.no.out, list(x=xx)), col=cLS, lwd=3) lines(xx, predict(Cfit.40out , list(x=xx)), col=cLS, lty=2) legend("topleft", c("true", "LS [w/o outl]", "LS", names(m1)), lwd=c(2,3, rep(1,1+ll)), lty=c(2,1,2, rep(1,ll)), col=c(cTr,cLS,cLS, rep(par("fg"),ll)), bty="n", inset=.01) showProc.time() ## presence of high leverage point outliers d.exp.Hlev <- within(d.exp40out, { x[28:30] <- x[28:30] + 10 ## shift 10 y <- Expo(x, 1, 0.2) + err y[28:30] <- y[28:30] + 500 }) op <- options(digits=12) Cfit.Hlev <- tryCatch(error = function(e) e, update(Cfit.40out, data = d.exp.Hlev, start = c(a = 1, b = 0.2), trace=TRUE, control = nls.control(maxiter = 100, tol = 5e-7, printEval=TRUE)) ) if(Cfit.Hlev..ok <- !inherits(Cfit.Hlev, "error")) { Cfit.no.Hlev <- update(Cfit.Hlev, subset = -(28:30)) } else { ## substitute -- better? Cfit.no.Hlev <- update(Cfit, subset = -(28:30)) } showProc.time() options(op) if(doExtras) { Rf.Hlev.MM.S.bisquare <- update(Rfit.MM.S.bisquare, data = d.exp.Hlev) Rf.Hlev.MM.S.lqq <- update(Rf.Hlev.MM.S.bisquare, psi = "lqq") Rf.Hlev.MM.S.optimal <- update(Rf.Hlev.MM.S.bisquare, psi = "optimal") Rf.Hlev.MM.S.hampel <- update(Rf.Hlev.MM.S.bisquare, psi = "hampel") showProc.time() } Rf.Hlev.MM.lts.bisquare <- update(Rfit.MM.S.bisquare, data = d.exp.Hlev, init="lts") Rf.Hlev.MM.lts.lqq <- update(Rf.Hlev.MM.lts.bisquare, psi= "lqq") Rf.Hlev.MM.lts.optimal <- update(Rf.Hlev.MM.lts.bisquare, psi="optimal") Rf.Hlev.MM.lts.hampel <- update(Rf.Hlev.MM.lts.bisquare, psi= "hampel") showProc.time() Rf.Hlev.tau.bisquare <- update(Rfit.tau.bisquare, data = d.exp.Hlev) Rf.Hlev.tau.optimal <- update(Rf.Hlev.tau.bisquare, psi = "optimal") Rf.Hlev.CM <- update(Rfit.CM, data = d.exp.Hlev) Rf.Hlev.mtl <- update(Rfit.mtl, data = d.exp.Hlev) showProc.time() plot(y ~ x, d.exp.Hlev, main = "Data = d.exp.Hlev") cE <- curve(Expo(x, a=1, b=0.2), 0, par("usr")[2], n=1+2^9, col=cTr, lwd=2, lty=2, add=TRUE) x.H <- seq(par("usr")[1], par("usr")[2], length.out = 256) ll <- length(m1 <- sapply(ls.str(patt="^Rf.Hlev"), get, simplify=FALSE)) .tmp <- lapply(m1, function(.) lines(x.H, predict(., list(x=x.H)))) lines(x.H, predict(Cfit.no.Hlev, list(x=x.H)), col=cLS, lwd=3)## L.S.() if(Cfit.Hlev..ok) { lines(x.H, predict(Cfit.Hlev, list(x=x.H)), col=cLS, lty=2)## L.S. legend("topleft", c("true", "LS [w/o outl]", "LS", names(m1)), lwd=c(2,3, rep(1,1+ll)), lty=c(2,1,2, rep(1,ll)), col=c(cTr, cLS,cLS, rep(par("fg"),ll)), bty="n", inset=.01) } else { cat("no Cfit.Hlev lines as nls() failed there\n") cat(" : legend(...) !?\n") } showProc.time() cfcl <- coef(Cfit) if(Cfit.no..ok) { cfcl.n.o <- coef(Cfit.no.out) } else { cfcl.n.o <- cfcl } # use substitute - for code below cfcl.n.H <- coef(Cfit.no.Hlev) ## no outliers present assert.EQ(coef(Rfit.MM.S.bisquare), cfcl, tol = 0.01, giveRE=TRUE) if(doExtras) { assert.EQ(coef(Rfit.MM.S.lqq), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.MM.S.optimal), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.MM.S.hampel), cfcl, tol = 0.01, giveRE=TRUE) } assert.EQ(coef(Rfit.MM.lts.bisquare), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.MM.lts.lqq), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.MM.lts.optimal), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.MM.lts.hampel), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.tau.bisquare), cfcl, tol = 0.02, giveRE=TRUE)# 0.009873 assert.EQ(coef(Rfit.tau.optimal), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.CM)[-3], cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.mtl)[-3], cfcl, tol = 0.02, giveRE=TRUE) ## 40% outliers present -- compare with L.S.(good.data) if(doExtras) { assert.EQ(coef(Rf.out.MM.S.bisquare), cfcl.n.o, tol = 7e-4, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.S.lqq), cfcl.n.o, tol = 1e-5, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.S.optimal), cfcl.n.o, tol = 1e-5, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.S.hampel), cfcl.n.o, tol = 1e-5, giveRE=TRUE) } assert.EQ(coef(Rf.out.MM.lts.bisquare), cfcl.n.o, tol = 6e-4, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.lts.lqq), cfcl.n.o, tol = 1e-5, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.lts.optimal), cfcl.n.o, tol = 1e-5, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.lts.hampel), cfcl.n.o, tol = 1e-5, giveRE=TRUE) assert.EQ(coef(Rf.out.tau.bisquare), cfcl.n.o, tol = .007, giveRE=TRUE) assert.EQ(coef(Rf.out.tau.optimal), cfcl.n.o, tol = .002, giveRE=TRUE) assert.EQ(coef(Rf.out.CM)[-3], cfcl.n.o, tol = .012, giveRE=TRUE)# 0.00708,0.01079 assert.EQ(coef(Rf.out.mtl)[-3], cfcl.n.o, tol = .002, giveRE=TRUE)# better in 64b ## presence of high leverage point outliers -- compare with LS(good.data) if(doExtras) { assert.EQ(coef(Rf.Hlev.MM.S.bisquare), cfcl.n.H, tol = .01, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.S.lqq), cfcl.n.H, tol = .02, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.S.optimal), cfcl.n.H, tol = .005, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.S.hampel), cfcl.n.H, tol = .02, giveRE=TRUE) } assert.EQ(coef(Rf.Hlev.MM.lts.bisquare),cfcl.n.H, tol = .01, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.lts.lqq), cfcl.n.H, tol = .015, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.lts.optimal), cfcl.n.H, tol = .002, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.lts.hampel), cfcl.n.H, tol = .02, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.tau.bisquare), cfcl.n.H, tol = .05, giveRE=TRUE)# 0.0363, 0.0415 assert.EQ(coef(Rf.Hlev.tau.optimal), cfcl.n.H, tol = .03, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.CM)[-3], cfcl.n.H, tol = .12, giveRE=TRUE)# 0.032, 0.082 assert.EQ(coef(Rf.Hlev.mtl)[-3], cfcl.n.H, tol = .08, giveRE=TRUE) length(mods <- sapply(ls.str(patt="^Rf"), get, simplify=FALSE)) # 36 is.conv <- sapply(mods, `[[`, "status") == "converged" prblm <- mods[!is.conv] if(length(prblm)) { cat("\n*** NON-converged model fits:\n") print(prblm) mods <- mods[is.conv] } else cat("\n All models converged\n") ## Now, all mods are converged ----------- dKnd <- as.factor(vapply(mods, function(.m.) as.character(getCall(.m.)[["data"]]), "")) table(dKnd) ## (iKnd <- setNames(seq_len(nlevels(dKnd)), levels(dKnd))) ## Coefficients: Some have 'sigma', some not: pcf <- vapply(lcf <- lapply(mods, coef), length, 1) table(pcf) ## 2 and 3 stopifnot(min(pcf) + 1 == max(pcf)) # +1 : those which have 'sigma pp <- min(pcf) ccf <- t(simplify2array(lapply(lcf, `[`, 1:max(pcf)))) ## take the "Scale" for those that do not have 'sigma' among coef(): i.n <- is.na(ccf[,"sigma"]) ccf[i.n, "sigma"] <- vapply(mods[i.n], `[[`, 0, "Scale") ## not yet: vapply(mods[i.n], sigma, 0.) ccf ## well, the 'sigma's are definitely *not* unbiased estimates of ## true sqrt(var(eps)) ... [FIXME] ## --> indeed, this can be found in the CM paper [TODO: write more here] plot(ccf[,1:2], pch = as.integer(dKnd))## use 'method' to get color legend("topright", inset=.01, names(iKnd), pch = iKnd) points(rbind(cfcl.n.H, cfcl, cfcl.n.o), # <- order from iKind col=adjustcolor("tomato",.5), cex=2, pch=1:3, lwd=5) ## optional labs <- sub("^Rfit\\.", '', sub("^Rf\\.[A-Za-z]+\\.", '', rownames(ccf))) labs <- sub("hampel$", "Ham", sub("optimal$", "opt", sub("bisquare$", "biS", labs))) labs text(ccf[,1:2], labs, cex=0.75, col=adjustcolor(1, 0.5), adj= -1/5, srt=75, xpd=NA) points(rbind(cfcl), col=adjustcolor("tomato",.5), cex=2, pch=3, lwd=5) showProc.time() ###------- Extended Tests for the DNase1 example from >>>> ../man/nlrob-algos.Rd <<<< ### ===================== DNase1 <- DNase[DNase$Run == 1,] form <- density ~ Asym/(1 + exp(( xmid -log(conc) )/scal )) pnms <- c("Asym", "xmid", "scal") psNms <- c(pnms, "sigma") ##' a version that recycles x: setNames. <- function(x, nm) setNames(rep_len(x, length(nm)), nm) ## for comparisons, later: all.eq.mod <- function(m1, m2, sub=FALSE, excl = c("call", "ctrl"), ...) { nm1 <- names(m1) stopifnot(if(sub) nm1 %in% names(m2) else nm1 == names(m2)) ni <- if(sub) nm1[is.na(match(nm1, c("call","ctrl")))] else is.na(match(names(m1), excl))## <<- all but those with names in 'excl' all.equal(m1[ni], m2[ni], ...) } set.seed(47) # as these by default use randomized optimization: fMM <- robustbase:::nlrob.MM(form, data = DNase1, lower = setNames.(0, pnms), upper = 3, ## call to nlrob.control to pass 'optim.control': ctrl = nlrob.control("MM", optim.control = list(trace = 1), optArgs = list(trace = TRUE))) showProc.time() if(doExtras) { ftau <- robustbase:::nlrob.tau(form, data = DNase1, lower= setNames.(0, pnms), upper= 3, trace=TRUE) ## fCM <- robustbase:::nlrob.CM (form, data = DNase1, lower= setNames.(0, psNms), upper= 3, trace=TRUE) ## fmtl <- robustbase:::nlrob.mtl(form, data = DNase1, lower= setNames.(0, psNms), upper= 3, trace=TRUE) ## mods <- list(MM=fMM, tau=ftau, CM=fCM, MTL=fmtl) print(sts <- sapply(mods, `[[`, "status")) stopifnot(sts == "converged") print(sapply(mods, `[[`, "data")) # currently 'language' %% FIXME print(sapply(mods, `[[`, "coefficients")) # nice matrix showProc.time() } ## Compare with traditional M-estimate, a) started robustly b) psi = Tukey's: fM <- nlrob(formula(fMM), data=eval(fMM$data), start = coef(fMM), psi = .Mwgt.psi1("bisquare"), trace = TRUE) rbind(M=coef(fM), MM=coef(fMM)) # "similar" ... well, no: the sigma's get much different ## stopifnot(%%____FIXME___ all.equal(coef(fM), coef(fMM), tolerance = 1e-4) ## ) # had 3.26e-5 ## FIXME: nlrob( "M") should allow to keep specify an initial sigma *and* keep that fixed showProc.time() if(doExtras) { ### Now call the above methods via nlrob(): set.seed(47) # (same as above) ## without "sigma" gMM <- nlrob(form, data = DNase1, method = "MM", lower = setNames(c(0,0,0), pnms), upper = 3) gtau <- nlrob(form, data = DNase1, method = "tau", lower = setNames(c(0,0,0), pnms), upper = 3) ## those with "sigma" -> must be in (lower, upper), too : gCM <- nlrob(form, data = DNase1, method = "CM", lower = setNames(c(0,0,0,0), psNms), upper = 3) gmtl <- nlrob(form, data = DNase1, method = "mtl", lower = setNames(c(0,0,0,0), psNms), upper = 3) showProc.time() ## list {and test print() for these}: (mod2 <- list(MM=gMM, tau=gtau, CM=gCM, MTL=gmtl)) stopifnot(mapply(all.eq.mod, mods, mod2, sub=TRUE)) }# only if(doExtras) robustbase/tests/exact-fit-categorical.R0000644000176200001440000000145511726415440020060 0ustar liggesusers## recreating exact fit problem for categorical data require(robustbase) ## some simple balanced dataset with one grouping variable ngrp <- 10 nrep <- 10 set.seed(2) data <- data.frame(y = rnorm(ngrp*nrep), grp=rep(letters[1:ngrp], each=nrep)) ## this works fine m1 <- lmrob(y ~ grp, data) ## now contaminate the dataset data2 <- data data2$y[1:48] <- 1e10 try(m2 <- lmrob(y ~ grp, data2, trace.lev = 3)) ## All observations of group "e" get rob. weight of 0: weights <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ## from trace output weights %*% m1$x robustbase/tests/mc-strict.R0000644000176200001440000003313714236516416015634 0ustar liggesusers #### Testing medcouple mc() and related functions ### here, we do "strict tests" -- hence no *.Rout.save ### hence, can also produce non-reproducible output such as timing library(robustbase) for(f in system.file("xtraR", c("mcnaive.R", # -> mcNaive() "styleData.R", # -> smallD list of small datasets "platform-sessionInfo.R"), # -> moreSessionInfo() package = "robustbase", mustWork=TRUE)) { cat("source(",f,"):\n", sep="") source(f) } ## instead of relying on system.file("test-tools-1.R", package="Matrix"): source(system.file("xtraR/test-tools.R", package = "robustbase")) # assert.EQ() etc assertEQm12 <- function(x,y, giveRE=TRUE, ...) assert.EQ(x,y, tol = 1e-12, giveRE=giveRE, ...) ## ^^ shows *any* difference ("tol = 0") unless there is no difference at all ## c.time <- function(...) cat('Time elapsed: ', ..., '\n') S.time <- function(expr) c.time(system.time(expr)) DO <- function(...) S.time(stopifnot(...)) ## from {sfsmisc}: lseq <- function(from, to, length) exp(seq(log(from), log(to), length.out = length)) mS <- moreSessionInfo(print.=TRUE) (doExtras <- robustbase:::doExtras())# TRUE if interactive() or activated by envvar if(!dev.interactive(orNone=TRUE)) pdf("mc-strict.pdf") tools::assertCondition(mc(1:11), "message") # change of default to doScale=FALSE smlMC <- vapply(smallD, mc, pi) smlMCo <- vapply(smallD, mc, pi, doScale=TRUE, c.huberize=Inf) yI <- c("yI", "yI."); notI <- setdiff(names(smallD), yI) yI2 <- c(yI, "x3I"); notI2 <- setdiff(names(smallD), yI2) assert.EQ(smlMC [notI], smlMCo[notI], tol = 4e-11, giveRE=TRUE) ## above small diff. is from 'x3I'; dropping that, too, leaves no differences table(smlMC [notI2] == smlMCo[notI2]) n.set <- c(1:99, 1e5L+ 0:1) # large n gave integer overflow in earlier versions DO(0 == sapply(n.set, function(n) mc(seq_len(n)))) DO(0 == sapply(n.set, function(n) mc(seq_len(n), doRefl=FALSE))) DO(0 == sapply(1:100, function(n) mcNaive(seq_len(n), "simple"))) DO(0 == sapply(1:100, function(n) mcNaive(seq_len(n), "h.use" ))) x1 <- c(1, 2, 7, 9, 10) mcNaive(x1) # = -1/3 assertEQm12(-1/3, mcNaive(x1, "simple")) assertEQm12(-1/3, mcNaive(x1, "h.use")) assertEQm12(-1/3, mc(x1)) x2 <- c(-1, 0, 0, 0, 1, 2) mcNaive(x2, meth="simple") # = 0 - which is wrong mcNaive(x2, meth="h.use") # = 1/6 = 0.16666 assertEQm12(1/6, mc(x2)) assertEQm12(1/6, mcNaive(x2, "h.use")) x4 <- c(1:5,7,10,15,25, 1e15) ## - bombed in original algo mcNaive(x4,"h.use") # 0.5833333 assertEQm12( 7/12, mcNaive(x4, "h.use")) assertEQm12( 7/12, mcNaive(x4, "simple")) assertEQm12( 7/12, mc( x4, doRefl= FALSE)) assertEQm12(-7/12, mc(-x4, doRefl= FALSE)) xx <- c(-3, -3, -2, -2, -1, rep(0, 6), 1, 1, 1, 2, 2, 3, 3, 5) stopifnot(exprs = { mc(xx, doScale=TRUE , c.huberize = Inf) == 0 ## old mc() mc(xx) == 0 mc(xx, doReflect=FALSE) == 0 -mc(-xx, doReflect=FALSE) == 0 mcNaive(xx, "h.use" ) == 0 mcNaive(xx, "simple") == 0 }) set.seed(17) for(n in 3:50) { cat(" ") for(k in 1:5) { x <- rlnorm(n) mc1 <- mc(x) mc2 <- mcNaive(x, method = "simple") mc3 <- mcNaive(x, method = "h.use" ) stopifnot(all.equal(mc1, mc3, tolerance = 1e-10),# 1e-12 not quite ok mc2 == mc3) cat(".") } }; cat("\n") ###---- Strict tests of adjOutlyingness(): ### ================= changed after long-standing bug fix in Oct.2014 ## For longley, note n < 4p and hence "random nonsense" numbers set.seed(1); S.time(a1.1 <- adjOutlyingness(longley)) set.seed(11); S.time(a1.2 <- adjOutlyingness(longley)) ## set.seed(2); S.time(a2 <- adjOutlyingness(hbk)) # 75 x 4 set.seed(3); S.time(a3 <- adjOutlyingness(hbk[, 1:3]))# the 'X' space set.seed(4); S.time(a4 <- adjOutlyingness(milk)) # obs.63 = obs.64 set.seed(5); S.time(a5 <- adjOutlyingness(wood)) # 20 x 6 ==> n < 4p set.seed(6); S.time(a6 <- adjOutlyingness(wood[, 1:5]))# ('X' space) 20 x 5: n = 4p (ok!) ## 32-bit <-> 64-bit different results {tested on Linux only} is32 <- .Machine$sizeof.pointer == 4 ## <- should work for Linux/MacOS/Windows isMac <- Sys.info()[["sysname"]] == "Darwin" isSun <- Sys.info()[["sysname"]] == "SunOS" Rnk <- function(u) rank(unname(u), ties.method = "first") ## to use for testing below: cat("\nRnk(a3 $ adjout): "); dput(Rnk(a3$adjout), control= {}) cat("\nRnk(a4 $ adjout): "); dput(Rnk(a4$adjout), control= {}) (i.a4Out <- which( ! a4$nonOut)) # the outliers -- varies "wildly" stopifnot(70 %in% i.a4Out) { if(is32 && !isMac) all.equal(i.a4Out, c(1, 2, 41, 70)) ## and this is "typically" true, but not for a 64-bit Linux version bypassing BLAS in matprod else if(isSun || isMac) TRUE else if(grepl("^Fedora", osVersion) && !is32) identical(i.a4Out, 70L) # since Dec 2020 (F 32) else all.equal(i.a4Out, c(9:19, 23:27,57, 59, 70, 77)) } ## only for ATLAS (BLAS/Lapack), not all are TRUE; which ones [but n < 4p] if(!all(a5$nonOut)) print(which(!a5$nonOut)) # if we know, enable check below stopifnot(exprs = { which(!a2$nonOut) == 1:14 which(!a3$nonOut) == 1:14 ## 'longley', 'wood' have no outliers in the "adjOut" sense: if(doExtras && !isMac) { ## longley also has n < 4p (!) if(mS$ strictR) sum(a1.2$nonOut) >= 15 # sum(.) = 16 [nb-mm3, Oct.2014] else ## however, openBLAS Fedora Linux /usr/bin/R gives sum(a1.2$nonOut) = 13 sum(a1.2$nonOut) >= 13 } else TRUE if(doExtras) { ## have n < 4p (!) if(mS$ strictR) a5$nonOut else ## not for ATLAS sum(a5$nonOut) >= 18 # 18: OpenBLAS } else TRUE a6$nonOut[-20] ## hbk (n = 75, p = 3) should be "stable" (but isn't quite) abs(Rnk(a3$adjout) - c(62, 64, 69, 71, 70, 66, 65, 63, 68, 67, 73, 75, 72, 74, 35, 60, 55, 4, 22, 36, 6, 33, 34, 28, 53, 16, 13, 9, 27, 31, 49, 39, 20, 50, 14, 2, 24, 40, 54, 21, 17, 37, 52, 23, 58, 19, 61, 11, 25, 8, 46, 59, 48, 47, 29, 44, 43, 42, 7, 30, 18, 51, 41, 15, 10, 38, 3, 56, 57, 5, 1, 12, 26, 32, 45) ) <= 3 ## all 0 on 64-bit (F 32) Linux }) ## milk (n = 86) : -- Quite platform dependent! r <- Rnk(a4$adjout) r64 <- ## the 64-bit (ubuntu 14.04, nb-mm3) values: c(65, 66, 61, 56, 47, 51, 19, 37, 74, 67, 79, 86, 83, 84, 85, 82, 81, 73, 80, 55, 27, 3, 70, 68, 78, 76, 77, 53, 48, 8, 29, 33, 6, 32, 28, 31, 36, 40, 22, 58, 64, 52, 39, 63, 44, 30, 57, 46, 43, 45, 25, 54, 12, 1, 9, 2, 71, 14, 75, 23, 4, 10, 34, 35, 17, 24, 15, 20, 38, 72, 42, 13, 50, 60, 62, 26, 69, 18, 5, 21, 7, 49, 11, 41, 59, 16) r32 <- ## Linux 32bit (florence: 3.14.8-100.fc19.i686.PAE) c(78, 79, 72, 66, 52, 61, 22, 41, 53, 14, 74, 85, 82, 83, 84, 80, 81, 56, 73, 65, 30, 3, 16, 17, 68, 57, 58, 63, 54, 8, 32, 37, 6, 36, 31, 35, 40, 44, 25, 69, 77, 62, 43, 76, 48, 34, 67, 51, 47, 49, 28, 64, 12, 1, 9, 2, 33, 15, 59, 26, 4, 10, 38, 39, 20, 27, 18, 23, 42, 86, 46, 13, 60, 71, 75, 29, 50, 21, 5, 24, 7, 55, 11, 45, 70, 19) d <- (r - if (is32) r32 else r64) cbind(r, d) table(abs(d)) cumsum(table(abs(d))) # <=> unscaled ecdf(d) ## For the biggest part (79 out of 86), the ranks are "close": ## 2014: still true, but in a different sense.. ## ^ typically, but e.g., *not* when using non-BLAS matprod(): sum(abs(d) <= 17) >= 78 sum(abs(d) <= 13) >= 75 ## check of adjOutlyingness *free* bug ## reported by Kaveh Vakili set.seed(-37665251) X <- matrix(rnorm(100*5), 100, 5) Z <- matrix(rnorm(10*5)/100, 10, 5) Z[,1] <- Z[,1] + 5 X[91:100,] <- Z # if anything these should be outliers, but ... for (i in 1:10) { ## this would produce an error in the 6th iteration aa <- adjOutlyingness(x=X, ndir=250) if(any(is.out <- !aa$nonOut)) cat("'outliers' at obs.", paste(which(is.out), collapse=", "),"\n") stopifnot(1/4 < aa$adjout & aa$adjout < 16) } ## Check "high"-dimensional Noise ... typically mc() did *not* converge for some re-centered columns ## Example by Valentin Todorov: n <- 50 p <- 30 set.seed(1) # MM a <- matrix(rnorm(n * p), nrow=n, ncol=p) str(a) kappa(a) # 20.42 (~ 10--20 or so; definitely not close to singular) a.a <- adjOutlyingness(a, mcScale=FALSE, # <- my own recommendation trace.lev=1) a.s <- adjOutlyingness(a, mcScale=TRUE, trace.lev=1) ## a.a : str(a.a) # high 'adjout' values "all similar" -> no outliers .. hmm .. ??? (hdOut <- which( ! a.a$nonOut)) ## indices of "outlier" -- very platform dependent ! a.a$MCadjout; all.equal(a.a$MCadjout, 0.136839766177, tol = 1e-12) # seen 7.65e-14 and "big" differences on non-default platforms ## a.s : which(! a.s$nonOut ) # none [all TRUE] a.s$MCadjout # platform dependent; saw all.equal(a.s$MCadjout, 0.32284906741568, tol = 1e-13) # seen 2.2e-15 .. # and big diffs on non-default platforms ## ## The adjout values are all > 10^15 !!! why ?? ## Now (2021) I know: n < 4*p ==> can find 1D-projection where 1 of the 2 {Q3-Q2, Q2-Q1} is 0 ! ##--------------------------------------------------------------------------------------------- ###-- Back to mc() checks for "hard" cases ### ===== ----------------------- ## "large n" (this did overflow sum_p, sum_q earlier ==> had inf.loop): set.seed(3); x <- rnorm(2e5) (mx <- mc(x, trace.lev=3)) stopifnot(print(abs(mx - -0.000772315846101988)) < 1e-15) # 3.252e-19, 64b Linux # 1.198e-16, 32b Windows ### Some platform info : local({ nms <- names(Si <- Sys.info()) dropNms <- c("nodename", "machine", "login") structure(Si[c("nodename", nms[is.na(match(nms, dropNms))])], class="simple.list") }) if(identical(1L, grep("linux", R.version[["os"]]))) { ##----- Linux - only ---- ## Sys.procinfo <- function(procfile) { l2 <- strsplit(readLines(procfile),"[ \t]*:[ \t]*") r <- sapply(l2[sapply(l2, length) == 2], function(c2)structure(c2[2], names= c2[1])) attr(r,"Name") <- procfile class(r) <- "simple.list" r } ## Scpu <- Sys.procinfo("/proc/cpuinfo") Smem <- Sys.procinfo("/proc/meminfo") print(Scpu[c("model name", "cpu MHz", "cache size", "bogomips")]) print(Smem[c("MemTotal", "SwapTotal")]) } ##' Checking the breakdown point of mc() --- Hubert et al. theory said : 25% ##' using non-default doReflect=FALSE as that corresponds to original Hubert et al. ##' ##' @title Medcouple mc() checking ##' @param x ##' @param Xfun ##' @param eps ##' @param NAiferror ##' @param doReflect ##' @param ... ##' @return mc(*,..) or NaN in case mc() signals an error [non-convergence] ##' @author Martin Maechler mcX <- function(x, Xfun, eps=0, NAiferror=FALSE, doReflect=FALSE, ...) { stopifnot(is.numeric(x), is.function(Xfun), "eps" %in% names(formals(Xfun))) myFun <- if(NAiferror) function(u) tryCatch(mc(Xfun(u, eps=eps), doReflect=doReflect, ...), error = function(e) NaN) else function(u) mc(Xfun(u, eps=eps), doReflect=doReflect, ...) vapply(x, myFun, 1.) } X1. <- function(u, eps=0) c(1,2,3, 7+(-10:10)*eps, u + (-1:1)*eps) ## ==> This *did* breakdown [but points not "in general position"]: ## but now is stable: r.mc1 <- curve(mcX(x, X1.), 10, 1e35, log="x", n=1001) stopifnot(r.mc1$y == 0) # now stable if(FALSE) { rt1 <- uniroot(function(x) mcX(exp(x), X1.) - 1/2, lower=0, upper=500) exp(rt1$root) # 4.056265e+31 } ## eps > 0 ==> No duplicated points ==> theory says breakdown point = 0.25 ## ------- but get big numerical problems: if(FALSE) { # ==> convergence problem [also in maxit = 1e5] .. really an *inf* loop! r.mc1.1 <- curve(mcX(x, X1., eps= .1 ), 10, 1e35, log="x", n=1001) r.mc1.2 <- curve(mcX(x, X1., eps= .01 ), 10, 1e35, log="x", n=1001) r.mc1.3 <- curve(mcX(x, X1., eps= .001), 10, 1e35, log="x", n=1001) r.mc1.5 <- curve(mcX(x, X1., eps= 1e-5), 10, 1e35, log="x", n=1001) r.mc1.8 <- curve(mcX(x, X1., eps= 1e-8), 10, 1e35, log="x", n=1001) r.mc1.15 <- curve(mcX(x, X1., eps=1e-15), 10, 1e35, log="x", n=1001)# still! } ## practically identical to eps = 0 where we have breakdown (see above) r.mc1.16 <- curve(mcX(x, X1., eps=1e-16), 10, 1e35, log="x", n=1001) all.equal(r.mc1, r.mc1.16, tol=1e-15)#-> TRUE ## Quite bad case: Non convergence X2. <- function(u) c(1:3, seq(6, 8, by = 1/8), u, u, u) ## try(mc(X2.(4.3e31)))## -> error: no convergence ## but now stopifnot(exprs = { all.equal(1/30, mc(X2.(4.3e31)), tol=1e-12) all.equal(1/30, mc(X2.(4.3e31), eps1=1e-7, eps2=1e-100), tol=1e-12) }) ## related, more direct: X3. <- function(u) c(10*(1:3), 60:80, (4:6)*u) stopifnot(0 == mc(X3.(1e31), trace=5)) # fine convergence in one iter. stopifnot(0 == mc(X3.(1e32), trace=3)) # did *not* converge ### TODO : find example with *smaller* sample size -- with no convergence X4. <- function(u, eps, ...) c(10, 70:75, (2:3)*u) mc(X4.(1e34))# "fine" ## now stable too: r.mc4 <- curve(mcX(x, X4.), 100, 1e35, log="x", n=2^12) stopifnot(abs(1/3 - r.mc4$y) < 1e-15) X5. <- function(u) c(10*(1:3), 70:78, (4:6)*u) stopifnot(all.equal(4/15, mc(X5.(1e32), maxit=1000))) X5. <- function(u, eps,...) c(5*(1:12), (4:6)*u) str(r.mc5 <- mc(X5.(1e32), doReflect=FALSE, full.result = TRUE)) ## Now, stable: stopifnot(all.equal(1/5, c(r.mc5))) ## was 1; platform dependent .. stopifnot(all.equal(4/15, mc(X5.(5e31)))) # had no convergence w/ maxit=10000 r.mc5Sml <- curve(mcX(x, X5.), 1, 100, log="x", n=1024) ## quite astonishing x <- lseq(1, 1e200, 2^11) mc5L <- mcX(x, X5.) table(err <- abs(0.2 - mc5L[x >= 24])) # I see all 0! stopifnot(abs(err) < 1e-15) c.time(proc.time()) summary(warnings()) # seen 15 x In mcComp(....) : ## maximal number of iterations (100 =? 100) reached prematurely robustbase/tests/wgt-himed.R0000644000176200001440000000163414004270422015574 0ustar liggesusershimed <- function(x) { n2 <- 1 + length(x) %/% 2; sort(x, partial = n2)[n2] } ## Tolerance 2e-7 {13 * higher than default 1.49e-8 } is.all.equal <- function(x,y, tol = 2e-7) { is.logical(r <- all.equal(x,y, tolerance = tol)) && r } library(robustbase) stopifnot(is.na(wgt.himedian(numeric()))) ## hi-median() seg.faulted or inf.looped till Jan.3, 2021 options(digits = 7)# single precision! set.seed(15) cat(" n | range(x) | wgt.Himed\n", "------------------------------\n",sep="") for(i in 1:100) { n <- rpois(1, lam = 10) cat(formatC(n,wid=3)," ") x <- round(rnorm(n),3) iw <- 1 + rpois(n, lam = 2) him <- himed(rep(x, iw)) ## == naive R solution whim <- wgt.himedian (x, iw) if(!is.all.equal(whim, him)) cat("whim != him: ", whim, "!=", him,"\n") cat(formatC(range(x), wid = 6, flag="-"), "", formatC(whim, wid = 6, flag="+"), "\n") } robustbase/tests/glmrob-1.R0000644000176200001440000001666014236516416015351 0ustar liggesuserslibrary(robustbase) ## instead of relying on system.file("test-tools-1.R", package="Matrix"): source(system.file("xtraR/test-tools.R", package = "robustbase")) ###>> 1 ------------------- family = poisson ------------------------------------ ### very simple model [with outliers] set.seed(113) y <- rpois(17, lambda = 4) ## -> target: beta_0 = log(E[Y]) = log(4) = 1.386294 y[1:2] <- 99:100 # outliers y rm1 <- glmrob(y ~ 1, family = poisson, trace = TRUE, acc = 1e-13) # default is just 1e-4 ## and check the robustness weights: assert.EQ(c(0.0287933850640724, 0.0284930623638766, 0.950239140568007, 0.874115394740014), local({w <- rm1$w.r; w[ w != 1 ] }), tol = 1e-14) assert.EQ(coef(rm1), c("(Intercept)" = 1.41710946076738),tol = 1e-14) cm1 <- glm (y ~ 1, family = poisson, trace = TRUE) rmMT <- glmrob(y ~ 1, family = poisson, trace = TRUE, method="MT") (sMT <- summary(rmMT)) if(FALSE) # for manual digging: debug(robustbase:::glmrobMqle) allresid <- function(obj, types = c("deviance", "pearson", "working", "response")) { sapply(types, residuals, object = obj) } okFit <- function(obj, check.attr=FALSE, ...) { all.equal(obj$y, obj$fitted.values + residuals(obj, "response"), check.attributes=check.attr, ...) } ## check validity of several methods simultaneously: y. <- model.response(model.frame(rm1)) stopifnot(okFit(cm1), okFit(rm1), y. == y) alr.c <- allresid(cm1) alr.r <- allresid(rm1) ## MM --- just for now -- plot(resid(cm1), resid(rm1), asp=1); abline(0,1, col=2) plot(resid(cm1,type="pearson"), resid(rm1, type="pearson"), asp=1); abline(0,1, col=2) plot(resid(cm1,type="working"), resid(rm1, type="working"), asp=1); abline(0,1, col=2) ## leave away the outliers -- cm0 <- glm (y ~ 1, family = poisson, trace = TRUE, subset = -(1:2)) plot(resid(cm0), resid(rm1)[-(1:2)], asp=1); abline(0,1, col=2) plot(resid(cm0,type="pearson"), resid(rm1, type="pearson")[-(1:2)], asp=1); abline(0,1, col=2) plot(resid(cm0,type="working"), resid(rm1, type="working")[-(1:2)], asp=1); abline(0,1, col=2) plot(resid(cm0,type="response"), resid(rm1, type="response")[-(1:2)], asp=1); abline(0,1, col=2) ## Use weights (slow convergence !) w2 <- c(rep(1,8), rep(10,9)) rm2 <- glmrob(y ~ 1, family = poisson, trace = TRUE, weights = w2, maxit = 500, acc = 1e-10) # default is just 1e-4 ## slow convergence stopifnot(okFit(rm2)) ###>> 2 ------------------- family = binomial ----------------------------------- ## Using *factor* y ... x <- seq(0,5, length = 120) summary(px <- plogis(-5 + 2*x)) set.seed(7) (f <- factor(rbinom(length(x), 1, prob=px))) summary(m.c0 <- glm (f ~ x, family = binomial)) summary(m.r0 <- glmrob(f ~ x, family = binomial)) ## add outliers --- in y: f. <- f f.[i1 <- 2:3] <- 1 f.[i0 <- 110+c(1,7)] <- 0 m.c1 <- glm (f. ~ x, family = binomial) summary(m.r1 <- glmrob(f. ~ x, family = binomial)) ## hmm, not so robust? stopifnot(m.r1$w.r[c(i0,i1)] < 1/3, # well, at least down weighted ## and coefficients change less : (coef(m.r1) - coef(m.c0)) / (coef(m.c1) - coef(m.c0)) < 1) assert.EQ(c("(Intercept)" = -3.10817337603974, x = 1.31618564057790), coef(m.r1), tol= 1e-14, giveRE=TRUE) y <- as.numeric(as.character(f.)) m.r2 <- BYlogreg(x0=x, y=y, trace=TRUE, maxhalf= 10) m.r2A <- BYlogreg(x0=x, y=y, trace= 2 , maxhalf= 15) ## different.. but not so much: iB <- 1:5 assert.EQ(m.r2A[iB], m.r2[iB], tol = .003, giveRE=TRUE) assert.EQ(c("Intercept" = -2.9554950286, x = 1.2574679132), ## 32-bit{ada-5} -2.95549502890363 1.25746791332613 m.r2$coef, tol=8e-10, giveRE=TRUE)# seen 5.316e-10 for --disable-long-double assert.EQ( c(0.685919891749065, 0.256419206157062), ## 32-bit{ada-5}: ## 0.685919891858219, 0.256419206203016) m.r2$sterror, tol=4e-9)# seen 1.025e-9 for --disable-long-double data(foodstamp) str(foodstamp) ## Model with 'income' instead of log(income+1) is "interesting" ## because BYlogreg() needs maxhalf > 10 for convergence! m.fs0 <- glm (participation ~ ., family=binomial, data=foodstamp) m.fs0QL <- glmrob(participation ~ ., family=binomial, data=foodstamp) y.fs <- foodstamp[,"participation"] X.fs0 <- model.matrix(m.fs0) head(X.fs0) ## (former default) maxhalf = 10 leads to too early convergence: m.fsWBY. <- BYlogreg(x0=X.fs0, y=y.fs, addIntercept=FALSE, trace=TRUE, maxhalf=10) m.fs.BY. <- BYlogreg(x0=X.fs0, y=y.fs, initwml=FALSE, addIntercept=FALSE, trace=TRUE, maxhalf=10) m.fsWBY <- BYlogreg(x0=X.fs0, y=y.fs, addIntercept=FALSE, trace=TRUE, maxhalf=18) m.fs.BY <- BYlogreg(x0=X.fs0, y=y.fs, initwml=FALSE, addIntercept=FALSE, trace=TRUE, maxhalf=18) assert.EQ(m.fsWBY.[iB], m.fsWBY[iB], tol= 0.07)## almost 7% different assert.EQ(m.fs.BY.[iB], m.fs.BY[iB], tol= 0.08) foodSt <- within(foodstamp, { logInc <- log(1 + income) ; rm(income) }) m.fsML <- glm (participation ~ ., family=binomial, data=foodSt) m.fsQL <- glmrob(participation ~ ., family=binomial, data=foodSt) X.fs <- model.matrix(m.fsML) stopifnot(dim(X.fs) == c(150, 4)) # including intercept! try(## FIXME -- Mahalanobis fails with singular matrix, here: m.fsWBY <- BYlogreg(x0=X.fs, y=y.fs, addIntercept=FALSE, trace=TRUE, maxhalf=18) ) ## maxhalf=18 is too much --> no convergence (in 1000 steps) m.fs.BY <- BYlogreg(x0=X.fs, y=y.fs, initwml=FALSE, addIntercept=FALSE, trace=TRUE, maxhalf=18) signif( rbind(ML = coef(m.fsML), QL =coef(m.fsQL), WBY0=coef(m.fsWBY.), BY0=coef(m.fs.BY.), WBY =coef(m.fsWBY ), BY =coef(m.fs.BY) ) , 4) if(FALSE) { ## *scaling* of X ( ?? <==> ?? 'sigma1' ) ------------------ ## no "W" (Mahalanobis fail because of *singular* X): m.fs.BY100 <- BYlogreg(x0=100*X.fs, initwml=FALSE, y=y.fs, addIntercept=FALSE, trace=TRUE, maxhalf=18) ## ==> no convergence X1c <- cbind(1, 100*X.fs[,-1]) m.fsWBY1c <- BYlogreg(x0=X1c, y=y.fs, addIntercept=FALSE, trace=TRUE, maxhalf=18) ## ==> illegal singularity$kind }## not yet ###-------- Gamma ------------ ## Realistic "data" {from help(glmrob)}: mu <- c(122.131, 53.0979, 39.9039, 33.9232, 28.007, 24.923, 21.5747, 19.6971, 18.4516) ns.resid <- c(-0.0338228, 0.0923228, 0.0525284, 0.0317426, -0.035954, 0.00308925, -0.026637, -0.0353932, -0.0244761) Vmu <- c(14915.9, 2819.38, 1592.32, 1150.78, 784.39, 621.156, 465.467, 387.978, 340.462) Hp2 <- robustbase:::Huberprop2 ## Hp2. <- robustbase:::Huberprop2. ## was: phis <- 2^(-70:-1) -- but that was *not* reliable (on 32-bit e.g.) phis <- 2^(-42:-1) H1 <- sapply(phis, function(phi) Hp2(phi, ns.resid=ns.resid, mu=mu, Vmu=Vmu, tcc = 1.345)) ## H2 <- sapply(phis, function(phi) ## Hp2.(phi, ns.resid=ns.resid, mu=mu, Vmu=Vmu, tcc = 1.345)) dput(signif(H1)) H2 <- c(9.91741, 9.88674, 9.89438, 9.88674, 9.88961, 9.88961, 9.88961, 9.88984, 9.88973, 9.88964, 9.8897, 9.88975, 9.88976, 9.88975, 9.88974, 9.88974, 9.88974, 9.88974, 9.88974, 9.88974, 9.88974, 9.88974, 9.88975, 9.88975, 9.88975, 9.33161, 8.70618, 8.39347, 8.23714, 8.15902, 8.12006, 7.16275, 3.38703, -0.0879886, -2.3322, -4.16929, -5.26821, -5.80526, -6.04822, -6.11538, -6.02613, -5.66718) all.equal(H1,H2, tolerance = 0) # -> see 8.869e-7 stopifnot(all.equal(H1,H2, tolerance = 1e-5)) if(dev.interactive(TRUE)) # shows that phi < 1e-12 is doubtful matplot(phis, cbind(H1,H2), log="x", ylim = rrange(H1), type="o") robustbase/tests/MCD-specials.R0000644000176200001440000000141410377104102016107 0ustar liggesusers#### Test special cases for covMcd() library(robustbase) ### 1) p = 1 ---------------------------------------------------- set.seed(1) x <- c(rnorm(50),100, 1e10) (r1 <- covMcd(x)) str(r1) summary(r1) ## with alpha = 1 (r1.1 <- covMcd(x, alpha = 1)) str(r1.1) summary(r1.1) ### 1b) p = 1, constant scale (rc <- covMcd(rep(1,12))) str(rc) summary(rc) ## with alpha = 1 (rc1 <- covMcd(rep(1,12), alpha = 1)) str(rc1) summary(rc1) ### 2) constant observations { multivariate scale == 0 } ----------- (X <- matrix(rep(2*(1:4), 12), nrow = 12, byrow = TRUE)) (rC <- covMcd(X)) summary(rC) (rC1 <- covMcd(X, alpha = 1)) summary(rC1) ### 3) alpha = 1 : classical estimates --- for general cases -------- cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/lmrob-methods.R0000644000176200001440000000413414440373076016476 0ustar liggesusers### tests methods argument of lmrob.control library(robustbase) data(stackloss) cat("doExtras:", doExtras <- robustbase:::doExtras(),"\n") str(ctrl2 <- lmrob.control(trace.lev = if(doExtras) 2 else 0)) ## S set.seed(0) summary(m0 <- lmrob(stack.loss ~ ., data = stackloss, method = "S", compute.outlier.stats = "S")) set.seed(0) m0a <- lmrob.S(m0$x, stack.loss, ctrl2) all.equal(m0 [c('coefficients', 'scale', 'rweights')], m0a[c('coefficients', 'scale', 'rweights')]) ## MM set.seed(0) summary(m1 <- lmrob(stack.loss ~ ., data = stackloss, method = "MM", compute.outlier.stats = "S")) set.seed(0) m2 <- update(m1, method = "SM") all.equal(m1[c('coefficients', 'scale', 'cov')], m2[c('coefficients', 'scale', 'cov')]) set.seed(0) m3 <- update(m0, method = "SM", cov = '.vcov.w') ## SMD set.seed(0) summary(m4 <- lmrob(stack.loss ~ ., data = stackloss, method = "SMD", psi = 'bisquare', compute.outlier.stats = "S")) summary(m4a <- lmrob..D..fit(m3)) ## rearrange m4a and update call m4a <- m4a[names(m4)] class(m4a) <- class(m4) m4a$call <- m4$call all.equal(m4, m4a, check.environment = FALSE) ## SMDM set.seed(0) summary(m5 <- lmrob(stack.loss ~ ., data = stackloss, method = "SMDM", psi = 'bisquare', compute.outlier.stats = "S")) summary(m5a <- lmrob..M..fit(obj=m4)) ## rearrange m5a m5a <- m5a[names(m5)] class(m5a) <- class(m5) all.equal(m5, m5a, check.environment = FALSE) #-> 3 string mismatch ## Fast S large n strategy (sped up) model <- model.frame(LNOx ~ . ,data = NOxEmissions) control <- lmrob.control(fast.s.large.n = 10, n.group = 341, groups = 2) set.seed(0) try(ret <- lmrob.S(model.matrix(model, NOxEmissions)[1:682,], NOxEmissions$LNOx[1:682], control)) ## do what the error says control <- lmrob.control(fast.s.large.n = Inf) try(ret <- lmrob.S(model.matrix(model, NOxEmissions)[1:682,], NOxEmissions$LNOx[1:682], control)) ##-> Error ...... DGEEQU: column 30 of the design matrix is exactly zero. ## ## still fails, but this error is to be expected since only a part ## of the design matrix is given robustbase/tests/subsample.R0000644000176200001440000003110014440373076015706 0ustar liggesusers### test subsample ### LU decomposition and singular subsamples handling require(robustbase) source(system.file("xtraR/subsample-fns.R", package = "robustbase", mustWork=TRUE)) ## instead of relying on system.file("test-tools-1.R", package="Matrix"): source(system.file("xtraR/test-tools.R", package = "robustbase")) # assert.EQ(), showProc.time() .. options(nwarnings = 4e4) cat("doExtras:", doExtras <- robustbase:::doExtras(),"\n") showProc.time() A <- rbind(c(0.001, 1), c(1, 2)) set.seed(11) str(sa <- tstSubsample(A)) A <- rbind(c(3, 17, 10), c(2, 4, -2), c(6, 18, 12)) tstSubsample(A) ## test some random matrix set.seed(1002) A <- matrix(rnorm(100), 10) tstSubsample(A) ## test singular matrix handling A <- rbind(c(1, 0, 0), c(0, 1, 0), c(0, 1, 0), c(0, 0, 1)) tstSubsample(A) ## test subsample with mts > 0 data <- data.frame(y = rnorm(9), expand.grid(A = letters[1:3], B = letters[1:3])) x <- model.matrix(y ~ ., data) y <- data$y ## this should produce a warning and return status == 2 showSys.time(z <- Rsubsample(x, y, mts=2)) stopifnot(z$status == 2) ## test equilibration ## columns only X <- rbind(c(1e-7, 1e-10), c(2 , 0.2)) y <- 1:2 tstSubsample(t(X), y) ## rows only X <- rbind(c(1e-7, 1e+10), c(2 , 0.2)) y <- 1:2 tstSubsample(X, y) ## both X <- rbind(c(1e-7, 2 ), c(1e10, 2e12)) y <- 1:2 tstSubsample(X, y) showProc.time() ## test real data example data(possumDiv)## 151 * 9; the last two variables are factors with(possumDiv, table(eucalyptus, aspect)) mf <- model.frame(Diversity ~ .^2, possumDiv) X <- model.matrix(mf, possumDiv) y <- model.response(mf) stopifnot(qr(X)$rank == ncol(X)) ## this used to fail: different pivots in step 37 str(s1 <- tstSubsample(X, y)) s2 <- tstSubsample(X / max(abs(X)), y / max(abs(X))) s3 <- tstSubsample(X * 2^-50, y * 2^-50) ## all components *BUT* x, y, lu, Dr, Dc, rowequ, colequ : nm <- names(s1); nm <- nm[is.na(match(nm, c("x","y","lu", "Dr", "Dc", "rowequ", "colequ")))] stopifnot(all.equal(s1[nm], s2[nm], tolerance=1e-10), all.equal(s1[nm], s3[nm], tolerance=1e-10)) showProc.time() set.seed(10) nsing <- sum(replicate(if(doExtras) 200 else 20, tstSubsampleSing(X, y))) stopifnot(nsing == 0) showProc.time() ## test example with many categorical predictors - 2 different random seeds: set.seed(10) ; r1 <- lmrob(Diversity ~ .^2 , data = possumDiv, cov="none") set.seed(108); r2 <- lmrob(Diversity ~ .^2 , data = possumDiv, cov="none")# lmrob.S() failed (i1 <- r1$init) # print() (i2 <- r1$init) # ... and they are "somewhat" close: stopifnot(all.equal(r1[names(r1) != "init.S"], r2[names(r2) != "init.S"], tol = 0.40)) c1 <- coef(r1) c2 <- coef(r2) relD <- (c1-c2)*2/(c1+c2) xCf <- which(abs(relD) >= 10) stopifnot(exprs = { identical(xCf, c(`Bark:aspectSW-NW` = 46L)) all.equal(c1[-xCf], c2[-xCf], tol = 0.35) # 0.3418 sign(c1[-xCf]) == sign(c2[-xCf]) }) showProc.time() ## investigate problematic subsample: idc <- 1 + c(140, 60, 12, 13, 89, 90, 118, 80, 17, 134, 59, 94, 36, 43, 46, 93, 107, 62, 57, 116, 11, 45, 35, 38, 120, 34, 29, 33, 147, 105, 115, 92, 61, 91, 104, 141, 138, 129, 130, 84, 119, 132, 6, 135, 112, 16, 67, 41, 102, 76, 111, 82, 148, 24, 131, 10, 96, 0, 87, 21, 127, 56, 124) rc <- lm(Diversity ~ .^2 , data = possumDiv, subset = idc) X <- model.matrix(rc) y <- possumDiv$Diversity[idc] tstSubsample(X, y)## have different pivots ... could not find non-singular lu <- LU.gaxpy(t(X)) stopifnot(lu$sing) zc <- Rsubsample(X, y) stopifnot(zc$status > 0) ## column 52 is linearly dependent and should have been discarded ## qr(t(X))$pivot image(as(round(zc$lu - (lu$L + lu$U - diag(nrow(lu$U))), 10), "Matrix")) image(as( sign(zc$lu) - sign(lu$L + lu$U - diag(nrow(lu$U))), "Matrix")) showProc.time() ## test equilibration ## colequ only X <- matrix(c(1e-7, 2, 1e-10, 0.2), 2) y <- 1:2 tstSubsample(t(X), y) ## rowequ only X <- matrix(c(1e-7, 2, 1e10, 0.2), 2) y <- 1:2 tstSubsample(X, y) ## both X <- matrix(c(1e-7, 1e10, 2, 2e12), 2) y <- 1:2 tstSubsample(X, y) showProc.time() ### real data, see MM's ~/R/MM/Pkg-ex/robustbase/hedlmrob.R ## close to singular cov(): attach(system.file("external", "d1k27.rda", package="robustbase", mustWork=TRUE)) fm1 <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k27) ## ^^^^^ gave error, earlier, now with a warning -- use ".vcov.w" ## --> cov = ".vcov.w" fm2 <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k27, cov = ".vcov.w", trace = TRUE) showProc.time()# 2.77 if(doExtras) withAutoprint({##--------------------------------------------------------- ## Q: does it change to use numeric instead of binary factors ? ## A: not really .. d1k.n <- d1k27 d1k.n[-(1:5)] <- lapply(d1k27[,-(1:5)], as.numeric) fm1.n <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k.n) fm2.n <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k.n, cov = ".vcov.w", trace = 2) summary(weights(fm1, type="robustness")) hist(weights(fm1, type="robustness"), main="robustness weights of fm1") rug(weights(fm1, type="robustness")) showProc.time()## 2.88 ## fmc <- lm (y ~ poly(a,2)-a + poly(tf, 2)-tf + poly(A, 2)-A + . , data = d1k27) summary(fmc) ## -> has NA's for 'a, tf, A' --- bad that it did *not* work to remove them nform <- update(formula(fm1), ~ . +poly(A,2) -A -I(A^2) +poly(a,2) -a -I(a^2) +poly(tf,2) -tf -I(tf^2)) fm1. <- lmrob(nform, data = d1k27)# now w/o warning !? !! fm2. <- lmrob(nform, data = d1k27, cov = ".vcov.w", trace = TRUE) ## now lmrob takes care of NA coefficients automatically lmrob(y ~ poly(a,2)-a + poly(tf, 2)-tf + poly(A, 2)-A + . , data = d1k27) showProc.time() ## 4.24 }) ## only if(doExtras) ----------------------------------------------------- ## test exact fit property set.seed(20) data <- data.frame(y=c(rep.int(0, 20), round(100*rnorm(5))), group=rep(letters[1:5], each=5)) x <- model.matrix(y ~ group, data) (ini <- lmrob.S(x, data$y, lmrob.control())) (ret <- lmrob(y ~ group, data)) summary(ret) showProc.time() ## 4.24 ##--- continuous x -- exact fit -- inspired by Thomas Mang's real data example mkD9 <- function(iN, dN = 1:m) { stopifnot((length(iN) -> m) == length(dN), 1 <= m, m <= 5, iN == as.integer(iN), is.numeric(dN), !is.na(dN)) x <- c(-3:0,0:1,1:3) # {n=9; sorted; x= 0, 1 are "doubled"} y <- x+5 y[iN] <- y[iN] + dN data.frame(x,y) } mkRS <- function(...) { set.seed(...); .Random.seed } d <- mkD9(c(1L,3:4, 7L)) rs2 <- mkRS(2) Se <- tryCatch(error = identity, with(d, lmrob.S(cbind(1,x), y, lmrob.control("KS2014", seed=rs2)))) ## gave DGELS rank error {for lmrob.c+wg..} if(inherits(Se, "error")) { cat("Caught ") print(Se) } else withAutoprint({ ## no error coef(Se) stopifnot(coef(Se) == c(5, 1)) # was (0 0) residuals(Se) # was == y ---- FIXME }) ## try 100 different seeds repS <- lapply(1:100, function(ii) tryCatch(error = identity, with(d, lmrob.S(cbind(1,x), y, lmrob.control("KS2014", seed = mkRS(ii)))))) if(FALSE) ## was str(unique(repS))## ==> 100 times the same error ## now completely different: *all* returned properly str(cfS <- t(sapply(repS, coef))) # all numeric -- not *one* error -- ## even all the *same* (5 1) solution: (ucfS <- unique(cfS)) stopifnot(identical(ucfS, array(c(5, 1), dim = 1:2, dimnames = list(NULL, c("", "x"))))) ## *Not* "KS2014" but the defaults works *all the time* (!) repS0 <- lapply(1:100, function(ii) tryCatch(error = identity, with(d, lmrob.S(cbind(1,x), y, lmrob.control(seed = mkRS(ii)))))) summary(warnings()) ## 100 identical warnings: ## In lmrob.S(cbind(1, x), y, lmrob.control(seed = mkRS(ii))) : ## S-estimated scale == 0: Probably exact fit; check your data str(cfS0 <- t(sapply(repS0, coef))) # all numeric -- not *one* error ## even all the same *and* the same as "KS2014" (ucfS0 <- unique(cfS0)) stopifnot(nrow(ucfS0) == 1L, ucfS0 == c(5,1)) d9L <- list( mkD9(c(1L,3L, 5L, 7L)) , mkD9(c(1L,3L, 8:9)) , mkD9(2L*(1:4)) ) if(doExtras) { sfsmisc::mult.fig(length(d9L)); invisible(lapply(d9L, function(d) plot(y ~ x, data=d))) } dorob <- function(dat, control=lmrob.control(...), meth = c("S", "MM"), doPl=interactive(), cex=1, ...) { meth <- match.arg(meth) stopifnot(is.data.frame(dat), c("x","y") %in% names(dat), is.list(control)) if(doPl) plot(y ~ x, data=dat) ## with(dat, n.plot(x, y, cex=cex)) ans <- tryCatch(error = identity, switch(meth , "S" = with(dat, lmrob.S(cbind(1,x), y, control)) , "MM"= lmrob(y ~ x, data = dat, control=control) , stop("invalid 'meth'"))) if(!doPl) return(ans) ## else if(!inherits(ans, "error")) { abline(coef(ans)) } else { # error mtext(paste(paste0("lmrob.", meth), "Error:", conditionMessage(ans))) } invisible(ans) } ## a bad case -- much better new robustbase >= 0.99-0 Se <- dorob(d9L[[1]], lmrob.control("KS2014", mkRS(2), trace.lev=4)) ## was really bad -- ended returning coef = (0 0); fitted == 0, residuals == 0 !! if(doExtras) sfsmisc::mult.fig(length(d9L)) r0 <- lapply(d9L, dorob, seed=rs2, doPl=doExtras) # 3 x ".. exact fit" warning if(doExtras) print(r0) ## back to 3 identical fits: (5 1) (cf0 <- sapply(r0, coef)) stopifnot(cf0 == c(5,1)) if(doExtras) sfsmisc::mult.fig(length(d9L)) ### Here, all 3 were "0-models" r14 <- lapply(d9L, dorob, control=lmrob.control("KS2014", seed=rs2), doPl=doExtras) ## --> 3 (identical) warnings: In lmrob.S(cbind(1, x), y, control) :# ## S-estimated scale == 0: Probably exact fit; check your data ## now *does* plot if(doExtras) print(r14) ## all 3 are "identical" (cf14 <- sapply(r14, coef)) identical(cf0, cf14) # see TRUE; test a bit less: stopifnot(all.equal(cf0, cf14, tol=1e-15)) ## use "large n" ctrl.LRG.n <- lmrob.control("KS2014", seed=rs2, trace.lev = if(doExtras) 2 else 1, # 3: too much (for now), nResample = 60, fast.s.large.n = 7, n.group = 3, groups = 2) rLrg.n <- lapply(d9L, \(d) lmrob.S(cbind(1,d$x), d$y, ctrl.LRG.n)) summary(warnings()) sapply(rLrg.n, coef) ## currently ... .... really would want always (5 1) ## [,1] [,2] [,3] ## [1,] 5 5 7.333333 ## [2,] 1 1 1.666667 ## ==> use lmrob() instead of lmrob.S(): mm0 <- lapply(d9L, dorob, meth = "MM", seed=rs2, doPl=doExtras) # looks all fine -- no longer: error in [[3]] if(doExtras) print(mm0) ## now, the 3rd one errors (on Linux, not on M1 mac!) (cm0 <- sapply(mm0, function(.) if(inherits(.,"error")) noquote(paste("Caught", as.character(.))) else coef(.))) ## no longer needed c0.12 <- rbind(`(Intercept)` = c(5.7640215, 6.0267156), x = c(0.85175883, 1.3823841)) if(is.list(cm0)) { ## after error {was on Linux+Win, not on M1 mac}: ## NB: This does *not* happen on Macbuilder -- there the result it cf = (5 1) !! stopifnot(all.equal(tol = 1e-8, # seen 4.4376e-9 c0.12, simplify2array(cm0[1:2]))) print(cm0[[3]]) ## FIXME?: Caught Error in eigen(ret, symmetric = TRUE): infinite or missing values in 'x'\n } else if(is.matrix(cm0)) { # when no error happened k <- ncol(cm0) stopifnot(all.equal(tol = 1e-8, rbind(`(Intercept)` = rep(5,k), "x" = rep(1,k)), cm0)) } else warning("not yet encountered this case {and it should not happen}") se3 <- lmrob(y ~ x, data=d9L[[3]], init = r0[[3]], seed=rs2, trace.lev=6) if(doExtras) sfsmisc::mult.fig(length(d9L)) ### Here, all 3 were "0-models" ## now, have 3 *different* cases {with this seed} ## [1] : init fails (-> r14[[1]] above) ## [2] : init s=0, b=(5,1) .. but residuals(),fitted() wrong ## [3] : init s=0, b=(5,1) ..*and* residuals(),fitted() are good cm14 <- lapply(d9L, dorob, meth = "MM", control=lmrob.control("KS2014", seed=rs2), doPl=doExtras) ## now, first is error; for others, coef = (5, 1) are correct: stopifnot(exprs = { sapply(cm14[-1], coef) == c(5,1) sapply(cm14[-1], sigma) == 0 }) m2 <- cm14[[2]] summary(m2) # prints quite nicely; and this is perfect (for scale=0), too: ## {residual != 0 <==> weights = 0}: cbind(rwgt = weights(m2, "rob"), res = residuals(m2), fit = fitted(m2), y = d9L[[2]][,"y"]) sapply(cm14, residuals) ## now, [2] is good; [3] still wrong - FIXME sapply(cm14, fitted) sapply(cm14, weights, "robust")## [2]: 0 1 0 1 1 1 1 0 0; [3]: all 0 ## (unfinished ... do *test* once we've checked platform consistency) summary(warnings()) showProc.time() robustbase/tests/Rsquared.Rout.save0000644000176200001440000002464114437610457017205 0ustar liggesusers R version 4.3.0 Patched (2023-06-03 r84496) -- "Already Tomorrow" Copyright (C) 2023 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > require(robustbase) Loading required package: robustbase > > set.seed(17)# reproducibility! > ## to check: > ## - for the empty model > summary(lmrob(Y ~ 0, coleman)) Call: lmrob(formula = Y ~ 0, data = coleman) \--> method = "" Residuals: Min 1Q Median 3Q Max 22.70 32.78 35.86 39.95 43.10 No Coefficients > ## - with and without an intercept in the model > summary(lmrob(Y ~ 1, coleman)) Call: lmrob(formula = Y ~ 1, data = coleman) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -12.8605 -2.7855 0.2945 4.3895 7.5395 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 35.560 1.342 26.5 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.48 Convergence in 9 IRWLS iterations Robustness weights: one weight is ~= 1. The remaining 19 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.5611 0.8986 0.9553 0.9044 0.9918 0.9987 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 5.000e-03 1.819e-12 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > writeLines(sfm <- capture.output( + summary(lmrob(Y ~ ., coleman)))) # and this must be "identical": Call: lmrob(formula = Y ~ ., data = coleman) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -4.16181 -0.39226 0.01611 0.55619 7.22766 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 30.50232 6.71260 4.544 0.000459 *** salaryP -1.66615 0.43129 -3.863 0.001722 ** fatherWc 0.08425 0.01467 5.741 5.10e-05 *** sstatus 0.66774 0.03385 19.726 1.30e-11 *** teacherSc 1.16778 0.10983 10.632 4.35e-08 *** motherLev -4.13657 0.92084 -4.492 0.000507 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 1.134 Multiple R-squared: 0.9814, Adjusted R-squared: 0.9747 Convergence in 11 IRWLS iterations Robustness weights: observation 18 is an outlier with |weight| = 0 ( < 0.005); The remaining 19 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1491 0.9412 0.9847 0.9279 0.9947 0.9982 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 5.000e-03 1.569e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > sfm2 <- capture.output(summary(lmrob(Y ~ ., coleman, model=FALSE, x=FALSE, y=FALSE))) > iCall <- grep("lmrob.*coleman", sfm)# the only line that differs > stopifnot(sfm[-iCall] == sfm2[-iCall]) > ## w/o intercept: > summary(lmrob(Y ~ . - 1, coleman, model=FALSE, x=FALSE, y=FALSE)) Call: lmrob(formula = Y ~ . - 1, data = coleman, model = FALSE, x = FALSE, y = FALSE) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -4.86146 -0.59195 -0.04679 0.87826 5.40639 Coefficients: Estimate Std. Error t value Pr(>|t|) salaryP -1.97540 0.45262 -4.364 0.000555 *** fatherWc 0.03388 0.02220 1.526 0.147749 sstatus 0.55922 0.07590 7.367 2.34e-06 *** teacherSc 1.60446 0.19039 8.427 4.51e-07 *** motherLev -0.48903 0.90805 -0.539 0.598097 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 1.344 Multiple R-squared: 0.9987, Adjusted R-squared: 0.9983 Convergence in 14 IRWLS iterations Robustness weights: 3 weights are ~= 1. The remaining 17 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.06899 0.89030 0.95860 0.82750 0.98700 0.99820 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 5.000e-03 1.569e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > > ## - when prior-weights are included > wts <- c(rep(0.05, 10), rep(2, 10)) > summary(lmrob(Y ~ . - 1, coleman, model=FALSE, x=FALSE, y=FALSE, + weights = wts)) Call: lmrob(formula = Y ~ . - 1, data = coleman, weights = wts, model = FALSE, x = FALSE, y = FALSE) \--> method = "MM" Weighted Residuals: Min 1Q Median 3Q Max -1.01509 -0.04288 0.04892 0.38289 9.00119 Coefficients: Estimate Std. Error t value Pr(>|t|) salaryP -2.68644 0.05871 -45.761 < 2e-16 *** fatherWc 0.04761 0.00721 6.603 8.39e-06 *** sstatus 0.58362 0.00314 185.842 < 2e-16 *** teacherSc 1.77115 0.07918 22.369 6.20e-13 *** motherLev -1.03171 0.34154 -3.021 0.0086 ** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 0.423 Multiple R-squared: 0.9985, Adjusted R-squared: 0.998 Convergence in 5 IRWLS iterations Robustness weights: 3 observations c(12,16,18) are outliers with |weight| = 0 ( < 0.005); 5 weights are ~= 1. The remaining 12 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.5441 0.9257 0.9833 0.9303 0.9956 0.9985 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol zero.tol 1.000e-07 1.000e-10 1.000e-07 1.000e-10 eps.outlier eps.x warn.limit.reject warn.limit.meanrw 5.000e-03 2.219e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > ## - should work for object with NA in the coefficients, and > ## - should work for object with NA in the observations --> both in ./NAcoef.R > > ## check equality with lm() for classical model > test <- function(formula, data, + items=c("coefficients", "residuals", "df", "scale", + "r.squared", "adj.r.squared", "weights"), + tol = 1e-4, ...) + { + lmrCtrl <- lmrob.control(psi = "hampel", tuning.psi = c(1000, 2000, 3000), + method="SMDM", ...) + sc <- summary(lm (formula, data)) + sr <- summary(lmrob(formula, data, control= lmrCtrl)) + names(sc)[names(sc) == "sigma"] <- "scale" + if(sc$df[1] == 0 && getRversion() <= "3.5.1" && as.numeric(R.version$`svn rev`) < 74993) + ## in the past, lm() returned logical empty matrix + storage.mode(sc$coefficients) <- "double" + ret <- all.equal(sc[items], sr[items], tolerance=tol) + if (!isTRUE(ret)) { + print(sr) + for (i in seq_along(items)) { + print(sc[items[i]]) + print(sr[items[i]]) + } + print(ret) + stop(sprintf("all.equal(sc[items], sr[items], tol.. = %g) are not all TRUE", + tol)) + } + ret + } > > set.seed(101) > > test(Y ~ 0, coleman, c("residuals", "df", "coefficients", + "r.squared", "adj.r.squared"), tol=1e-10) [1] TRUE > test(Y ~ 1, coleman, tol = 2e-4) [1] TRUE > test(Y ~ ., coleman, tol = 4e-4) [1] TRUE > test(Y ~ . - 1, coleman, tol = 4e-4) [1] TRUE > > > proc.time() user system elapsed 0.723 0.152 0.894 robustbase/tests/huber-etc.R0000644000176200001440000000165414222041416015570 0ustar liggesuserslibrary(robustbase) source(system.file("xtraR/styleData.R", package = "robustbase")) # -> smallD list of small datasets str(smallD,, 20) lx <- lapply(smallD, function(x) { m <- mad(x) hx <- if(!is.na(m) && m > 0 && m != Inf) # in all these cases, MASS::huber() fails MASS::huber(x) else list(m=NA, s=NA) hMx <- huberM(x) list(loc = c(median = median(x), huber = hx$m, huberM = hMx$m), scale= c(mad = m, huber = hx$s, huberM = hMx$s)) }) r <- list(mu = sapply(lx, function(x) x$loc), s = sapply(lx, function(x) x$scale)) r cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/psi-rho-etc.R0000644000176200001440000001464712271657124016065 0ustar liggesusersrequire(robustbase) ## see also ./lmrob-psifns.R <<<<<<<< source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) EQ <- function(x,y) all.equal(x,y, tolerance = 1e-13) ## Demonstrate that one of tukeyChi() / tukeyPsi1() is superfluous x <- seq(-4,4, length=201) suppressWarnings(## as tukeyPsi1(), tukeyChi() are deprecated for(c. in c(0.1, 1:2, pi, 100)) { ix <- abs(x) != c. stopifnot(EQ(tukeyChi(x, c.), 6/c.^2* tukeyPsi1(x, c., deriv=-1)), EQ(tukeyChi(x, c., deriv= 1), 6/c.^2* tukeyPsi1(x, c., deriv= 0)), EQ(tukeyChi(x, c., deriv= 2), 6/c.^2* tukeyPsi1(x, c., deriv= 1)), ## Now show equivalence with Mpsi(): EQ(tukeyPsi1(x, c.), Mpsi(x, c., "tukey")), EQ(tukeyPsi1(x, c., d=1), Mpsi(x, c., "tukey", d=1)), EQ(tukeyPsi1(x[ix], c., d=2), Mpsi(x[ix], c., "tukey", d=2)) ) } ) ## Test if default arguments are used h2Psi <- chgDefaults(huberPsi, k = 2) x <- 1:10 stopifnot(h2Psi@ rho(x, k=2) == h2Psi@ rho(x), h2Psi@ psi(x, k=2) == h2Psi@ psi(x), h2Psi@Dpsi(x, k=2) == h2Psi@Dpsi(x), h2Psi@ wgt(x, k=2) == h2Psi@ wgt(x), h2Psi@Dwgt(x, k=2) == h2Psi@Dwgt(x)) ## Test default arguments for E... slots stopifnot(EQ(h2Psi@Erho (), 0.49423127328548), EQ(h2Psi@Epsi2(), 0.920536925636323), EQ(h2Psi@EDpsi(), 0.954499736103642)) stopifnot(EQ(1, huberPsi@psi(1, k = 1e16)), huberPsi@wgt(0.1591319494080224, 0.5 + 1/13) <= 1) ## both used to fail because of numeric instability in pmin2/pmax2 f1 <- function(.) rep.int(1, length(.)) F1 <- function(x, .) rep.int(1, length(x)) ## correct "classical psi": cPs <- psiFunc(rho = function(x,.) x^2 / 2, psi = function(x, .) x, wgt = F1, Dpsi = F1, Erho = function(.) rep.int(1/2, length(.)), Epsi2 = f1, EDpsi = f1, . = Inf) validObject(cPs); cPs ## incorrect dummy psi cP <- psiFunc(rho = F1, psi = F1, wgt = F1, Dpsi = F1, Erho = f1, Epsi2 = f1, EDpsi = f1, . = Inf) cP ## Check the autogenerated Dwgt(): x <- seq(0,2, by=1/4) stopifnot(## strict symmetry { including Dwgt(0) == 0 } : huberPsi @Dwgt(-x) == -huberPsi @Dwgt(x), hampelPsi@Dwgt(-x) == -hampelPsi@Dwgt(x), huberPsi @Dwgt(x)[x < 1.345] == 0, hampelPsi@Dwgt(x)[x < 1.487] == 0, EQ(huberPsi @Dwgt(x[x >= 1.5]), c(-0.597777777777778, -0.439183673469388, -0.33625)), EQ(hampelPsi@Dwgt(x[x >= 1.5]), c(-0.660883932259397, -0.485547378802822, -0.371747211895911)) ) .defDwgt <- robustbase:::.defDwgt (ddd <- .defDwgt(psi = function(u, k) pmin.int(k, pmax.int(-k, u)), Dpsi = function(u, k) abs(u) <= k)) stopifnot(is.function(ddd), names(formals(ddd)) == c("u","k"), EQ(ddd(x, 1.345), huberPsi@Dwgt(x))) ## TODO: Provide some functionality of this as a Plot+Check function ## ---- and then call the function for all our psiFunc objects (with different 'k') kk <- c(1.5, 3, 8) psiH.38 <- chgDefaults(hampelPsi, k = kk) c1 <- curve(psiH.38@psi(x), -10, 10, n=512, col=2) abline(h=0, v=0, lty=3, lwd=.5, col="gray25") c2 <- curve(x * psiH.38@wgt(x), add=TRUE, n=512, col=adjustcolor("blue", .5), lwd=2) title("psi_Hampel_(1.5, 3, 8) : psi(x) = x * wgt(x)") axis(1, at=kk, expression(k[1], k[2], k[3]), pos=0) axis(2, at=kk[1], quote(k[1]), pos=0, las=1) stopifnot(all.equal(c1,c2, tolerance= 1e-15)) r1 <- curve(psiH.38@rho(x), -10, 10, col=2, main = quote(rho(x) == integral(phi(t) * dt, 0, x))) axis(1, at=kk, expression(k[1], k[2], k[3]), pos=0) curve(psiH.38@psi(x), add=TRUE, n=512, col=adjustcolor("blue", .5), lwd=2) abline(h=0, v=0, lty=3, lwd=.5, col="gray25") ## check rho(x) = \int_0^x psi(x) dx {slightly *more* than rho' = psi !} rhoH.38.int <- function(x) integrate(function(u) psiH.38@psi(u), 0, x, rel.tol=1e-10)$value r2 <- curve(sapply(x, rhoH.38.int), add = TRUE, lwd=4, col=adjustcolor("red", 1/4)) ## numerical integration == "formula" : stopifnot(all.equal(r1,r2, tolerance=1e-10)) curve(psiH.38@Dpsi(x), -10, 10, n=512, col=2, main = quote(psi*minute(x))) abline(h=0, v=0, lty=3, lwd=.5, col="gray25") ## check rho'(x) = phi(x) etc {TODO: for all our psiFun.} head(xx <- seq(-10, 10, length=1024)) FrhoH.38 <- splinefun(xx, rho.x <- psiH.38@rho (xx)) FpsiH.38 <- splinefun(xx, psi.x <- psiH.38@psi (xx)) F1psH.38 <- splinefun(xx, Dps.x <- psiH.38@Dpsi(xx)) curve(FpsiH.38(x, deriv=1), -10,10, n=512) curve(F1psH.38, add=TRUE, col=4, n=512) stopifnot(all.equal(FpsiH.38(xx, deriv=1), Dps.x, tolerance = 0.02))# not better because of discontinuities curve(FrhoH.38(x, deriv=1), -10,10, n=512) curve(FpsiH.38, add=TRUE, col=4, n=512) stopifnot(all.equal(FrhoH.38(xx, deriv=1), psi.x, tolerance = 1e-4)) E.norm <- function(FUN, tol=1e-12, ...) { integrate(function(x) FUN(x) * dnorm(x), -Inf, Inf, rel.tol=tol, ...)$value } ##' asymptotic efficiency -- both integrate + "formula"(@Epsi, @EDpsi) version aeff.P <- function(psiF, k, ...) { stopifnot(is(psiF, "psi_func")) if(!missing(k)) psiF <- chgDefaults(psiF, k = k) ## E[ psi'(X) ] ^2 / E[ psi(X) ^ 2 ] : c(int = E.norm(psiF@Dpsi, ...)^2 / E.norm(function(x) psiF@psi(x)^2, ...), form= psiF@EDpsi()^2 / psiF@Epsi2()) } ## Breakdown Point --- for redescenders only, ## both integrate + "formula"(@Erho) version bp.P <- function(psiF, k, ...) { stopifnot(is(psiF, "psi_func")) if(!missing(k)) psiF <- chgDefaults(psiF, k = k) if(!is.finite( rhoInf <- psiF@rho(Inf) )) stop("rho(Inf) is not finite: ", rhoInf) integ <- function(x) psiF@rho(x) c(int = E.norm(integ, ...), form= psiF@Erho()) / rhoInf } ## Print & Check the result of aeff.P() or bp.P() chkP <- function(rp, tol = 1e-9) { print(rp) ae <- all.equal(rp[[1]], rp[[2]], tolerance=tol) if(isTRUE(ae)) invisible(rp) else stop(ae) } chkP(aeff.P(huberPsi)) chkP(aeff.P(huberPsi, k = 1.5)) chkP(aeff.P(huberPsi, k = 2)) chkP(aeff.P(huberPsi, k = 2.5)) chkP(aeff.P(hampelPsi)) chkP(aeff.P(hampelPsi, k = c(1.5, 3, 8))) chkP(aeff.P(hampelPsi, k = c(2, 4, 8), tol=1e-10),# fails with tol=1e-11 tol = 1e-4) ## Now works too: chkP(bp.P(hampelPsi)) chkP(bp.P(hampelPsi, k = c(1.5, 3, 8))) chkP(bp.P(hampelPsi, k = c(2, 4, 8))) ## test derivatives (adapted from ./lmrob-psifns.R) head(x. <- seq(-5, 10, length=1501)) ## [separate lines, for interactive "play": ] stopifnot(chkPsiDeriv(plot(huberPsi, x.))) ## ToDo: improve accuracy of derivative check stopifnot(chkPsiDeriv(plot(hampelPsi, x.), tol=c(1e-4, 1e-1))) robustbase/tests/Qn-Sn-plots.R0000644000176200001440000000524114055507253016013 0ustar liggesuserslibrary(robustbase) ### Back-compatibility check of consistency & finite sample correction for Qn() : set.seed(153) x <- sort(c(rnorm(80), rt(20, df = 1))) ix <- c(27, 57, 15, 1, 26, 13, 23, 70, 9, 54, 6, 12, 8, 80, 11, 69, 41, 53, 10, 37, 50, 21, 48, 51, 71, 17, 30, 16, 45, 28, 55, 5, 59, 77, 61, 40, 63, 42, 72, 66) QnA. <- c(0, 0.72307295, 1.2926498, 1.596857, 1.0979815, 0.84209457, 1.0719335, 0.88620416, 1.0905118, 0.99056842, 1.2229216, 1.0626517, 1.1738174, 1.1433873, 1.2071829, 1.1562513, 1.2182886, 1.1587793, 1.1585524, 1.1555462, 1.1376428, 1.0532134, 1.0447343, 1.0200998, 1.0495224, 1.0120569, 1.0094172, 0.9749928, 0.9530458, 0.92767184, 0.90922667, 0.98987601, 0.98223857, 1.0053697, 0.98792848, 0.951908, 0.92226488, 0.92312857, 0.92313406, 0.92733413) QnAx <- sapply(seq_along(ix), function(n) Qn(x[ix[1:n]])) stopifnot( all.equal(QnA., QnAx) ) ### -------------- Plots ----------------------------------------------- if(!dev.interactive(orNone=TRUE)) pdf("Qn-Sn-plots.pdf") plot(QnA., type="o", main="Qn()") abline(h = 1, lty=2) n <- 1:50 (qnn <- sapply(n, function(n)Qn(1:n, const=1))) plot(n, qnn, type = 'b', col = 2, ylab = "Qn", main = "Qn(1:n) [unscaled]") (snn <- sapply(n, function(n)Sn(1:n, const=1))) plot(n, snn, type = 'b', col = 2, ylab = "Sn", main = "Sn(1:n) [unscaled]") matplot(n, cbind(qnn, snn), type = 'b', ylab = "Qn & Sn", main = "Qn(1:n) & Sn(1:n) [unscaled]") legend("topleft", c("Qn", "Sn"), col=1:2, lty=1:2, bty="n", pch=paste(1:2)) (sdn <- c(1, sapply(n[-1], function(n)sd(1:n)/n))) ## sd(1) => NA for(Sample in c(function(n) ppoints(n), function(n) qnorm(ppoints(n)))) { ##mult.fig(2) : op <- par(mfrow=c(2,1), mgp = c(1.5,.6,0), mar = .1 + c(4,4,2,1)) for(N in c(50, 200)) { n <- 1:N sdn <- c(1, sapply(n[-1], function(m)sd(Sample(m)))) r <- cbind(Qn = sapply(n, function(m)Qn(Sample(m))), Sn = sapply(n, function(m)Sn(Sample(m)))) / sdn matplot(n, r, type = 'b', col = 2:3, lty = 1, ylab = "Qn & Sn", main = "Qn(Sample(n)) & Sn(..) [consistently scaled]") legend(.85*N, 0.4, c("Qn()", "Sn()"), col = 2:3, lty = 1, pch = paste(1:2)) abline(h=1, col = "gray", lty = 2) } par(op) ## Hmm, the above does not look 100% consistent to *my* eyes... ## Investigate: matplot(n, r, ylim = c(0.9, 1.1), type = 'b', col = 2:3, lty = 1) abline(h=1, col = "gray", lty = 2) matplot(n, r^2, ylim = c(0.7, 1.3), type = 'b', col = 2:3, lty = 1) abline(h=1, col = "gray", lty = 2) } rownames(r) <- paste(n) r robustbase/tests/LTS-specials.R0000644000176200001440000000351113774561564016175 0ustar liggesusers#### Test special cases for ltsReg() library(robustbase) ## Platform - and other such info -- so we find it in old saved outputs .libPaths() SysI <- Sys.info() structure(Sys.info()[c(4,5,1:3)], class="simple.list") library(lib.loc = .libPaths()[1]) # the "R CMD check specific tmp-library" sessionInfo() c(robustbase = packageDescription("robustbase")$Built, DEoptimR = packageDescription("DEoptimR")$Built) if(SysI[["sysname"]] == "Linux" && require("sfsmisc")) local({ nn <- names(.Sc <- sfsmisc::Sys.cpuinfo()) nn <- names(.Sc <- .Sc[nn != "flags"]) print(.Sc[grep("\\.[0-9]$", nn, invert=TRUE)]) }) ### 1) p = 1 ---------------------------------------------------- set.seed(1) x <- c(rnorm(50),100, 1e10) (r1 <- ltsReg(x ~ 1)) # failed in Valentin's 1.0-3 (pre-version) summary(r1) (r1. <- ltsReg(y = x)) i1 <- 15:17; ii <- (1:20)[-i1] UN <- function(lis) lapply(lis, unname) dimnames(r1.$X)[1] <- dimnames(r1$X)[1] stopifnot(all.equal( r1[ii], r1.[ii], tolerance= 1e-15), all.equal(UN(r1[i1]), UN(r1.[i1]), tolerance= 1e-15)) ## intercept=FALSE, p > 1 -- coefficients were switched once n <- 100; theta <- c(x=10, x2=40) set.seed(7) X <- cbind(x = rt(n, 4), x2 = rnorm(n)) dat <- data.frame(X, y = X %*% theta + rt(n, df=3)/10) summary(M <- ltsReg(y ~ . -1, data = dat)) stopifnot(all.equal(coef(M), theta, tolerance = 1e-3)) ## with alpha = 1 (r1.1 <- ltsReg(x ~ 1, alpha = 1)) summary(r1.1) ### 1b) p = 1, constant scale (rc <- ltsReg(y = rep(1,12))) str(rc) summary(rc) ## with alpha = 1 (rc1 <- ltsReg(y = rep(1,12), alpha = 1)) summary(rc1) stopifnot(residuals(rc) == 0, all.equal(unname(coef(rc )), 1), residuals(rc1) == 0, all.equal(unname(coef(rc1)), 1)) ### 2) alpha = 1 : classical estimates --- for general cases -------- cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/large-values.R0000644000176200001440000000503114222041416016272 0ustar liggesusers### Have had cases where differences between large numbers lose precision, or even give Inf, ### which lead to NA require(robustbase) source(system.file("xtraR/styleData.R", package = "robustbase"))# -> smallD, mkMx() stopifnot(exprs = { all.equal(scaleTau2(c(-4:4, 10000), consistency=FALSE), (scaleTau2(c(-4:4, 1e300), consistency=FALSE) -> sT), # <- gave NaN, now fine ! tol = 1e-15) # even 0 (exact equality; Linux 64b) all.equal(3.41103800034854, sT, tol = 1e-14) # seen 6.5e-16 }) exL <- c( list( xNA = c(NA, 1:6) , xMe9 = c(-4:4, 1e9) , xM = c(-4:4, .Machine$double.xmax) , xI = c(-4:4, Inf) , IxI = c(-Inf, -4:4, Inf) , IxI2 = c(-Inf, -4:4, Inf,Inf)) , mkMx(c(1e6, 1e9, 1e12, 1e14, 1e16, 1e20, 1e40, .Machine$double.xmax, Inf)) ) madL <- vapply(exL, mad, pi) ## Initially, scaleTau2() "works" but gives NaN everywhere -- now fine! sT2.L <- vapply(exL, scaleTau2, FUN.VALUE=1, sigma0 = 1, consistency=FALSE) sT2.i2.L <- vapply(exL, scaleTau2, FUN.VALUE=1, sigma0 = 1, consistency=FALSE, iter = 2) sT2.i5.L <- vapply(exL, scaleTau2, FUN.VALUE=1, sigma0 = 1, consistency=FALSE, iter = 5) cbind(madL, sT2.L) stopifnot(exprs = { is.na(madL [1]) is.na(sT2.L[1]) 2.3 < sT2.L[-1] sT2.L[-1] < 2.71 }) xI <- exL$xI stopifnot(exprs = { mad(exL$xI, constant = 1) == 2.5 }) stopifnot(exprs = { all.equal(3.5471741782, scaleTau2(xI)) # gave NaN ## These even gave Error in ..... : NA/NaN/Inf in foreign function call (arg 1) all.equal(3.5778, Sn(xI)) all.equal(3.1961829592, Qn(xI)) }) ## From example(mc) {by MM} : ## Susceptibility of the current algorithm to large outliers : dX10 <- function(X) c(1:5,7,10,15,25, X) # generate skewed size-10 with 'X' (Xs <- c(10,20,30, 60, 10^(2:10), 1000^(4:19), 1e6^c(10:20,10*(3:5)), Inf)) mc10x <- vapply(Xs, function(x) mc(dX10(x)), 1) ## now fixed: stopifnot(all.equal(c(4,6, rep(7,42))/12, mc10x)) plot(Xs, mc10x, type="b", main = "mc( c(1:5,7,10,15,25, X) )", xlab="X", log="x") ## so, Inf does work, indeed for mc() mcOld <- function(x, ..., doScale=TRUE) mc(x, doScale=doScale, c.huberize=Inf, ...) (x10I <- dX10(Inf)) set.seed(2020-12-04)# rlnorm(.) summary(xlN <- rlnorm(100)) xII <- c(-Inf, xlN, Inf) stopifnot(exprs = { all.equal(0.5, print(mcOld(x10I))) all.equal(7/12, print(mc (x10I, doScale=TRUE ))) # was 0.5 before huberization all.equal(7/12, print(mc (x10I, doScale=FALSE))) mcOld(xII) == 0 all.equal(0.3646680319, mc(xII)) }) robustbase/tests/lmrob-ex12.R0000644000176200001440000001677314555212477015632 0ustar liggesusers library(robustbase) set.seed(1) # since now .Random.seed is used by default! ## EX 1 data(coleman) ## "empty model" (not really a lot of sense) (m0 <- lmrob(Y ~ 0, data = coleman)) summary(m0) stopifnot(is.numeric(coef(m0)), length(coef(m0)) == 0, residuals(m0) == coleman[,"Y"]) ## "Intercept" only: robust mean (m1 <- lmrob(Y ~ 1, data = coleman)) summary(m1) stopifnot(all.equal(coef(m1), c("(Intercept)" = 35.56048875388), tol = 1e-11)) (mC <- lmrob(Y ~ ., data = coleman, control = lmrob.control(refine.tol = 1e-8, rel.tol = 1e-9))) summary(mC) ## Values will change once we use R's random number generator ! stopifnot( all.equal(unname(coef(mC)), c(30.50232, -1.666147, 0.08425381, 0.6677366, 1.167777, -4.136569), tolerance = 2e-7)# 6.112 e-8 (32-b) ) dput(signif(unname(coef(mC)), 7)) ## 64b(0.2-0): c(30.50232, -1.666147, 0.08425381, 0.6677366, 1.167777, -4.136569) ## 32b(0.2-0): "exactly" same ! ## Full precision: dput(unname(coef(mC))) ## 2012-06-04: ## 32-bit:c(30.5023184450149, -1.66614687548007, 0.0842538074792178, 0.667736590070332, 1.16777744029117, -4.13656885405815) ## 64-bit:c(30.5023184450148, -1.66614687548008, 0.0842538074792178, 0.667736590070332, 1.16777744029117, -4.13656885405814) ## ## 32-bit:c(30.5023183940104, -1.66614687550933, 0.0842538074635567, 0.667736589938547, 1.16777744089398, -4.13656884777543) ## 64-bit:c(30.5023184150851, -1.66614687537736, 0.0842538074722959, 0.667736589980183, 1.16777744061092, -4.1365688503035) str(mC) ## EX 2 gen <- function(n,p, n0, y0, x0, beta = rep(1, p)) { stopifnot(n >= 1, p >= 1, n0 >= 0, length(beta) == p) x <- matrix(rnorm(n*p),n,p) # iid x's y <- x %*% beta + rnorm(n) xc <- matrix(0,n0,p) xc[,1] <- x0 xc <- xc + 0.1*matrix(rnorm(n0*p),n0,p) x[1:n0,] <- xc y[1:n0] <- y0 + .001*rnorm(n0) list(x=x, y=y) } ## generate --a sample of n observations with p variables ## and 10% of outliers near (x1,y) = (10,10) n <- 500 ; n0 <- n %/% 10 p <- 7 ## p = 20 is more impressive but too slow for "standard test" set.seed(17) a <- gen(n=n, p=p, n0= n0, y0=10, x0=10) plot(a$x[,1], a$y, col = c(rep(2, n0), rep(1, n-n0))) system.time( m1 <- lmrob(y~x, data = a, control = lmrob.control(compute.rd = TRUE, trace.lev=4))) plot(m1, ask=FALSE) ##-> currently 5 plots; MM:I don't like #3 (Response vs fitted) S1 <- m1$init.S resS1 <- drop(a$y - model.matrix(m1, data=a) %*% coef(S1)) all.equal(S1$residuals, resS1)## hmm, but still close ## "Mean relative difference: 2.655326e-07" ctr.t3 <- lmrob.control(trace.lev = 3) (mS <- lmrob.S(x=a$x, y=residuals(S1), only.scale=TRUE, control = ctr.t3)) all.equal(S1$scale, mS) ## "Mean relative difference: 0.003015849" -- too different, why? (mS. <- lmrob.S(x=a$x, y=resS1, only.scale=TRUE, control = ctr.t3)) all.equal(mS, mS., tol=0)# 2.401 e -10 -- ok/perfect stopifnot(all.equal(mS, mS.), all.equal(mS, S1$scale, tol = 0.008)) # at least that ## don't compute robust distances --> faster by factor of two: system.time(m2 <- lmrob(y~x, data = a, control = lmrob.control(compute.rd = FALSE))) ## ==> half of the CPU time is spent in covMcd()! (sm2 <- summary(m2)) l1 <- lm(y~x, data = a) cbind(robust = coef(sm2)[,1:2], lm = coef(summary(l1))[,1:2]) m2.S1 <- with(a, lmrob.S(cbind(1,x), y, trace.lev = 2, ## trace.lev = 2 : quite a bit of output control= lmrob.control(seed = .Random.seed, nRes = 80, k.max = 20, refine.tol = 1e-4))) S.ctrl <- lmrob.control(seed = .Random.seed,## << keeps .Random.seed unchanged nResample = 1000, best.r.s = 15, refine.tol = 1e-9) m2.S <- with(a, lmrob.S(cbind(1,x), y, control = S.ctrl, trace.lev = 1)) str(m2.S) ##--- Now use n > 2000 --> so we use C internal fast_s_large_n(...) n <- 2500 ; n0 <- n %/% 10 a2 <- gen(n=n, p = 3, n0= n0, y0=10, x0=10) plot(a2$x[,1], a2$y, col = c(rep(2, n0), rep(1, n-n0))) rs <- .Random.seed system.time( m3 <- lmrob(y~x, data = a2, trace.lev=2) ) m3 nrs <- .Random.seed # <-- to check that using 'seed' keeps .Random.seed system.time( m4 <- lmrob(y~x, data = a2, seed = rs, compute.rd = FALSE)) (sm4 <- summary(m4)) ## random seed must be the same because we used 'seed = *' : stopifnot(nrs == .Random.seed, identical(coef(m3), coef(m4))) dput(signif(cf <- unname(coef(m3)), 7)) ## 2012-06-04:c(-0.05108914, 1.005971, 1.003201, 0.9833263) - 32 AND 64 bit ## ## 0.2-0: c(0.007446546, 1.000712, 1.027921, 0.9896527) ## 0.2-1: c(0.03148659, 0.9980933, 1.016364, 1.03243) ## both for 32 and 64 bit dput(signif(100 * (sd <- unname(coef(sm4)[, "Std. Error"])), 7)) ## 2012-06-04:c(2.213815, 0.2864678, 2.202318, 2.180886) - 32 AND 64 bit ## ## 0.2-0: c(2.219388, 0.274644, 2.196982, 2.26253) ## 0.2-1: c(2.194914, 0.2737579, 2.371728, 2.206261) ## both for 32 and 64 bit stopifnot( all.equal(cf, c(-0.05108914, 1.00597115, 1.00320052, 0.98332632), tolerance= 7e-7) , # ... e-7 needed on 64b all.equal(100*sd,c(2.2138147, 0.2864678, 2.2023182, 2.1808862),tolerance= 7e-7) ) # 1.334 e-7 needed on 64b cat('Time elapsed: ', proc.time(),'\n') # "stats" ## rm(a,m1, m2, m3, m4, sm2, l1) ## Small examples from R-SIG-robust ## First example from René Locher : dat1 <- data.frame(lconc= log(c(21.8, 23.7, 12.2, 38.5, 21, 38.9)), dist = c( 100, 180, 280, 30, 220, 6)) m5 <- lmrob(lconc ~ dist, data = dat1) ## Warning messages: ## ... S refinements did not converge (to tol=1e-07) in 200 iterations ## " " " m5$init.S$converged # FALSE m5. <- lmrob(lconc ~ dist, data = dat1, control = lmrob.control(refine.tol = 1e-5)) m5.$init.S$converged # TRUE ## gives TRUE as the IRWLS iterations after the lmrob.S() have converged. ## 2nd example from René Locher , 6 Jun 2007 dat2 <- data.frame(lconc=log(c(29.5,40.1,21.1,25.3,27.3,25.2,26.9,19.1,16.4)), dist = c(520, 1480,1780, 740, 540,1050,1100,1640,1860)) res2 <- lmrob(lconc~dist, data = dat2) ## Used to give Warning messages: ## 1: rwls(): not converged in 1000 lambda iterations ## ... ## 4: rwls(): ............ res2 <- lmrob(lconc~dist, data = dat2, trace.lev = 3) ## ------------- summary(res2) stopifnot(dim(model.matrix(res2)) == c(9,2)) ## Check predict(): dd <- seq(300, 2000, by = 50) with(dat2, plot(dist, lconc, pch=20, cex=2, xlim = range(dd))) new.d <- data.frame(dist=dd) fit.dd <- predict(res2, new.d) lines(dd, fit.dd, col=2, type="o") predict(res2, new.d, se=TRUE)$se.fit matlines(dd, predict(res2, new.d, interval="confidence")[, 2:3], col=3) ## Check handling of X of not full rank test <- function(n, ...) { X <- cbind(rep_len(1:3, n), rnorm(n), rnorm(n), NA) y <- rnorm(n) X[,4] <- X[,2] + X[,3] X <- data.frame(X) X$X1 <- factor(X$X1) fm <- tryCatch(suppressWarnings(lmrob(y ~ ., X, ...)), error=identity) stopifnot(inherits(fm, "lmrob")) } set.seed(0) test(12) ## fast_S() test(2500) ## fast_S_large_n() test(200, trace.lev = TRUE) ## Check a case, where cov() matrix needs "posdefify": coleman16 <- coleman[ -c(2, 7, 16, 19),] (m16 <- lmrob(Y ~ ., data = coleman16, tuning.psi = 3.44, trace.lev = TRUE)) ## failed in 0.9_0 ctrl <- lmrob.control() tools::assertWarning(verbose = TRUE, lmrob(Y ~ ., data = coleman, setting = "KS2011", control = ctrl) ) ## perfect fit ex. from Thomas Wang, Jan.26, 2024: x <- c(8, 16, 4, 24) y <- c(3328, 6656, 1664, 9984) tools::assertWarning(verbose = TRUE, fmS <- lmrob.S(x, y, ctrl)# gave a bad error in robustbase 0.99-{0,1} ) stopifnot(all.equal(416, fmS$coeff, tolerance = 1e-15), fmS$scale == 0, fmS$residuals == 0) cat('Time elapsed: ', proc.time(),'\n') # "stats" robustbase/tests/OGK-ex.R0000644000176200001440000000300012271657124014742 0ustar liggesuserslibrary(robustbase) ## minimal testing only data(ruspini, package = "cluster") rub1 <- covOGK(ruspini, 1, scaleTau2, covGK, hard.rejection, consistency=FALSE) rub2 <- covOGK(ruspini, 2, scaleTau2, covGK, hard.rejection, consistency=FALSE) AE <- function(x,y) all.equal(x,y, tolerance = 2e-15) ## The following test is already fulfilled by Kjell Konis' original code: stopifnot(AE(c(rub1$wcov)[c(1,3:4)], c(917.99893333333, 94.9232, 2340.319288888888)), all.equal(rub1$wcov, rub2$wcov, tolerance=0) , AE(c(rub1$cov)[c(1,3:4)], c(923.5774514441657, 91.5385216376565, 2342.4556232436971)) , AE(c(rub2$cov)[c(1,3:4)], c(927.2465953711782, 91.8009184487779, 2346.5790105548940)) ) data(milk) cM1 <- covOGK(milk, 1, sigmamu = scaleTau2, weight.fn = hard.rejection) cM2 <- covOGK(milk, 2, sigmamu = scaleTau2, weight.fn = hard.rejection) symnum(cov2cor(cM1 $cov)) symnum(cov2cor(cM2 $cov)) symnum(cov2cor(cM1 $wcov)) symnum(cov2cor(cM2 $wcov)) cMQn <- covOGK(milk, sigmamu = s_Qn, weight.fn = hard.rejection) cMSn <- covOGK(milk, sigmamu = s_Sn, weight.fn = hard.rejection) cMiqr <- covOGK(milk, sigmamu = s_IQR, weight.fn = hard.rejection) cMmad <- covOGK(milk, sigmamu = s_mad, weight.fn = hard.rejection) as.dist(round(cov2cor(cMQn$wcov), 3)) as.dist(round(cov2cor(cMSn$wcov), 3)) as.dist(round(cov2cor(cMiqr$wcov), 3)) as.dist(round(cov2cor(cMmad$wcov), 3)) cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/small-sample.R0000644000176200001440000000216111757675047016322 0ustar liggesuserslibrary(robustbase) ## testing functions: source(system.file("xtraR/ex-funs.R", package = "robustbase")) set.seed(152) Nmax <- 12 nn <- length(nset <- c(2:Nmax, 20, 50))## NOTA BENE: n == 1 etc are NOT YET TREATED! Sim <- 2^9 # = 512 sn <- qn <- numeric(Sim) cpu <- numeric(nn) names(cpu) <- as.character(nset) for(n in nset) { nS <- Sim ## if(n < 20) Sim else round(10*Sim/n) cat("\nn = ",n,"\n------\nno.Sim. = ",nS,"\n") cpu[as.character(n)] <- system.time(for(i in 1:nS) { x <- rnorm(n) sn[i] <- Sn0R(x) qn[i] <- Qn0R(x) Sn.x <- Sn(x, const = 1) Qn.x <- Qn(x, const = 1) if(!is.all.equal(Sn.x, sn[i], tol = 1e-5)) cat("i=",i," Sn() != Sn0R(): ", Sn.x, "!=", sn[i],"\n") if(!is.all.equal(Qn.x, qn[i], tol = 1e-5)) cat("i=",i," Qn() != Qn0R(): ", Qn.x, "!=", qn[i],"\n") })[1] cat("Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...):\n") print(c(mean(sn), sd(sn)/sqrt(nS), quantile(sn, p = (1:3)/4))) print(c(mean(qn), sd(qn)/sqrt(nS), quantile(qn, p = (1:3)/4))) } rbind("Time (CPU) used:" = summary(cpu)) robustbase/src/0000755000176200001440000000000014555212627013223 5ustar liggesusersrobustbase/src/qn_sn.c0000644000176200001440000003535214435623207014512 0ustar liggesusers/* * Copyright (C) 2005--2023 Martin Maechler, ETH Zurich * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This is a merge of the C version of original files qn.f and sn.f, * translated by f2c (version 20010821). ==== ==== * and then by f2c-clean,v 1.9 2000/01/13 13:46:53 * and further clean-edited manually by Martin Maechler. * * Further added interface functions to be called via .C() from R or S-plus * Note that Peter Rousseeuw has explicitely given permission to * use his code under the GPL for the R project. */ /* Original comments by the authors of the Fortran original code, * (merged for Qn & Sn in one file by M.M.): This file contains fortran functions for two new robust estimators of scale denoted as Qn and Sn, decribed in Rousseeuw and Croux (1993). These estimators have a high breakdown point and a bounded influence function. The implementation given here is very fast (running in O(n logn) time) and needs little storage space. Rousseeuw, P.J. and Croux, C. (1993) Alternatives to the Median Absolute Deviation", Journal of the American Statistical Association, Vol. 88, 1273-1283. For both estimators, implementations in the pascal language can be obtained from the original authors. This software may be used and copied freely for scientific and/or non-commercial purposes, provided reference is made to the abovementioned paper. Note by MM: We have explicit permission from P.Rousseeuw to licence it under the GNU Public Licence. See also ../inst/Copyrights */ #include /* ^^^^^^^^^^ is supposedly more common and standard than * #include * or #include */ /* --> int64_t ; if people don't have the above, they can forget about it.. */ /* #include "int64.h" */ #include /* -> and much more */ /* Interface routines to be called via .C() : */ #include "robustbase.h" /* ----------------- Further Declarations ------------------------------ */ /* sn0() and qn0() --- but also mc_C() in ./mc.c * ----- ---- ------ use pull(a,n,k): finds the k-th order statistic of an array a[] of length n (preserving a[]) */ /* whimed_i(a,iw,n): finds the weighted high median of an array a[] of length n, with positive int weights iw[] (using auxiliary arrays acand[], a_srt[] & iw_cand[] all of length n). */ /* qn0() uses (and for C API:) */ /* Main routines for C API */ double qn(double *x, int n, int h, int finite_corr); double sn(double *x, int n, int is_sorted, int finite_corr); /* these have no extra factors (no consistency factor & finite_corr): */ void qn0(const double x[], int n, const int64_t k[], int len_k, /* ==> */ double *res); double sn0(double *x, int n, int is_sorted, double *a2); /* ----------- Implementations -----------------------------------*/ // === called from R ( ../R/qnsn.R ) via .C() : void Qn0(double *x, Sint *n, double *k, Sint *len_k, double *res) { int l_k = (int)*len_k; // "hack" as R / .C() have no int_64 : copy k[] to int64 ik[]: int64_t *ik = (int64_t *) R_alloc(l_k, sizeof(int64_t)); for(int i=0; i < l_k; i++) ik[i] = (int64_t) k[i]; qn0(x, (int)*n, ik, l_k, res); } void Sn0(double *x, Sint *n, Sint *is_sorted, double *res, double *a2) { char *vmax = vmaxget(); *res = sn0(x, (int)*n, (int)*is_sorted, a2); #ifdef DEBUG_Sno REprintf("Sn0(* -> res=%g)\n", *res); #endif vmaxset(vmax); } void qn0(const double x[], int n, const int64_t k[], int len_k, /* ==> */ double *res) { /*-------------------------------------------------------------------- Efficient algorithm for the scale estimator: Q*_n = { |x_i - x_j|; i= 2) */ double *y = (double *)R_alloc(n, sizeof(double)); double *work = (double *)R_alloc(n, sizeof(double)); double *a_srt = (double *)R_alloc(n, sizeof(double)); double *a_cand = (double *)R_alloc(n, sizeof(double)); int *left = (int *)R_alloc(n, sizeof(int)); int *right = (int *)R_alloc(n, sizeof(int)); int *p = (int *)R_alloc(n, sizeof(int)); int *q = (int *)R_alloc(n, sizeof(int)); int *weight = (int *)R_alloc(n, sizeof(int)); const int64_t nn2 = (int64_t) n * (n + 1) / 2, // = choose(n+1, 2) n2 = (int64_t) n * n, // k >= k_L ==> right[] = n (and smaller k can start with smaller right[], hence more efficiently) /* NB: The correct k_L seems hard to find, till only from largish simulations and "regression fit": * 2021, Jan. 3 -- 19 : k_L = 2 + ((int64_t)(n-1) * (n-2))/2; * 2021, Jan. 19 -- 27, 11:13: k_L = ((int64_t)(n-1) * (n-2))/2; * 2021, Jan. 27 -- ... : from .../Pkg-ex/robustbase/Qn-multi-k-debugging.R bnd1 <- function(n) 5 - 1.75*(n %% 2) + (0.3939 - 0.0067*(n %% 2)) * n*(n-1) */ k_L = (int64_t) (5 - 1.75*(n % 2) + (0.3939 - 0.0067*(n % 2)) * ((int64_t) n)*(n-1)); int h = n / 2 + 1; // really use, only to set right[] below ? for (int i = 0; i < n; ++i) y[i] = x[i]; R_qsort(y, 1, n); /* y := sort(x) */ #ifdef DEBUG_qn REprintf("qn0(x, n=%2d, .. |k|=%d): \n", n, len_k); #endif // k = (int64_t)h * (h - 1) / 2; for(int i_k=0; i_k < len_k; i_k++) { // compute k'th quantile k' := k[i_k] ------------------ /* Following should be `long long int' : they can be of order n^2 */ int64_t nl = nn2, nr = n2, knew = k[i_k] + nl;/* = k + (n+1 \over 2) */ #ifdef DEBUG_qn REprintf(" qn0(x, ..): nl,nr= %d, %d; k[%d] = %5d :\n", nl,nr, i_k, k[i_k]); #endif /* L200: */ Rboolean found = FALSE; double trial = R_NaReal;/* -Wall */ int i, j; for (int i = 0; i < n; ++i) left [i] = n - i + 1; if(k[i_k] >= k_L) { // for these large "quantiles", need "large" right[] boundary for (int i = 0; i < n; ++i) right[i] = n; } else { for (int i = 0; i < n; ++i) right[i] = (i <= h) ? n : n - (i - h); /* the n - (i-h) is from the paper (Cr. + Ro. '92 "Time-efficient"); original code had 'n' */ } while(!found && nr - nl > n) { j = 0; /* Truncation to float : try to make sure that the same values are got later (guard bits !) */ for (i = 1; i < n; ++i) { if (left[i] <= right[i]) { weight[j] = right[i] - left[i] + 1; int jh = left[i] + weight[j] / 2; work[j] = (float)(y[i] - y[n - jh]); ++j; } } trial = whimed_i(work, weight, j, a_cand, a_srt, /*iw_cand*/ p); #ifdef DEBUG_qn REprintf(" ..!found: whimed("); # ifdef DEBUG_long REprintf("wrk=c("); for(i=0; i < j; i++) REprintf("%s%g", (i>0)? ", " : "", work[i]); REprintf("),\n wgt=c("); for(i=0; i < j; i++) REprintf("%s%d", (i>0)? ", " : "", weight[i]); REprintf("), j= %3d) -> trial= %7g\n", j, trial); # else REprintf("j=%3d) -> trial= %g:", j, trial); # endif #endif j = 0; for (i = n - 1; i >= 0; --i) { while (j < n && ((float)(y[i] - y[n - j - 1])) < trial) ++j; p[i] = j; } #ifdef DEBUG_qn REprintf(" f_1: j=%2d", j); #endif j = n + 1; for (i = 0; i < n; ++i) { while ((float)(y[i] - y[n - j + 1]) > trial) --j; q[i] = j; } int64_t sump = 0, sumq = 0; for (i = 0; i < n; ++i) { sump += p[i]; sumq += q[i] - 1; } #ifdef DEBUG_qn REprintf(" f_2 -> j=%2d, sump|q= %lld,%lld; ", j, sump,sumq); #endif if (knew <= sump) { for (i = 0; i < n; ++i) right[i] = p[i]; nr = sump; #ifdef DEBUG_qn REprintf("knew <= sump =: nr , new right[]\n"); #endif } else if (knew > sumq) { for (i = 0; i < n; ++i) left[i] = q[i]; nl = sumq; #ifdef DEBUG_qn REprintf("knew > sumq =: nl , new left[]\n"); #endif } else { /* sump < knew <= sumq */ found = TRUE; #ifdef DEBUG_qn REprintf("sump < knew <= sumq ---> FOUND\n"); #endif } } /* while */ if (found) res[i_k] = trial; else { #ifdef DEBUG_qn REprintf(".. not fnd -> new work[]"); #endif j = 0; for (i = 1; i < n; ++i) { for (int jj = left[i]; jj <= right[i]; ++jj) { work[j] = y[i] - y[n - jj]; j++; }/* j will be = sum_{i=2}^n (right[i] - left[i] + 1)_{+} */ } #ifdef DEBUG_qn REprintf(" of length %d; k' = knew-nl = %d\n", j, knew-nl); #endif /* return pull(work, j - 1, knew - nl) : */ knew -= (nl + 1); /* -1: 0-indexing */ if(knew > j-1) { // see this happening when the quantile number k[i_k] is close to the right end! knew = j-1; #ifdef DEBUG_qn REprintf("knew >= j should never happen, setting it to j-1=%d\n", knew); #endif } else if(knew < 0) { knew = 0; #ifdef DEBUG_qn REprintf("knew < 0 should never happen, setting it to 0\n"); #endif } rPsort(work, j, (int)knew); res[i_k] = work[knew]; } } // for(int i_k=0, i_k < len_k ...) { k_ = k[i_k] ; .... return; } /* qn0 */ #ifdef __never__ever__ /* NB: "old version" -- with *wrong* constant and "inaccurate" finite-sample corr * -- equivalent to Qn.old() in ../R/qnsn.R , see also ../man/Qn.Rd * * (This is *not* called from our R code anyway) */ double qn(double *x, int n, int finite_corr) { /* Efficient algorithm for the scale estimator: Qn = dn * 2.2219 * {|x_i-x_j|; i= n) containing the observations n : number of observations (n>=2) is_sorted: logical indicating if x is already sorted a2 : to contain a2[i] := LOMED_{j != i} | x_i - x_j |, for i=1,...,n */ /* Local variables */ double medA, medB; int i, diff, half, Amin, Amax, even, length; int leftA,leftB, nA,nB, tryA,tryB, rightA,rightB; int n1_2; if(!is_sorted) R_qsort(x, 1, n); a2[0] = x[n / 2] - x[0]; n1_2 = (n + 1) / 2; /* first half for() loop : */ for (i = 2; i <= n1_2; ++i) { nA = i - 1; nB = n - i; diff = nB - nA; leftA = leftB = 1; rightA = rightB = nB; Amin = diff / 2 + 1; Amax = diff / 2 + nA; while (leftA < rightA) { length = rightA - leftA + 1; even = 1 - length % 2; half = (length - 1) / 2; tryA = leftA + half; tryB = leftB + half; if (tryA < Amin) { rightB = tryB; leftA = tryA + even; } else { if (tryA > Amax) { rightA = tryA; leftB = tryB + even; } else { medA = x[i - 1] - x[i - tryA + Amin - 2]; medB = x[tryB + i - 1] - x[i - 1]; if (medA >= medB) { rightA = tryA; leftB = tryB + even; } else { rightB = tryB; leftA = tryA + even; } } } } /* while */ if (leftA > Amax) { a2[i - 1] = x[leftB + i - 1] - x[i - 1]; } else { medA = x[i - 1] - x[i - leftA + Amin - 2]; medB = x[leftB + i - 1] - x[i - 1]; a2[i - 1] = fmin2(medA,medB); } } /* second half for() loop : */ for (i = n1_2 + 1; i <= n - 1; ++i) { nA = n - i; nB = i - 1; diff = nB - nA; leftA = leftB = 1; rightA = rightB = nB; Amin = diff / 2 + 1; Amax = diff / 2 + nA; while (leftA < rightA) { length = rightA - leftA + 1; even = 1 - length % 2; half = (length - 1) / 2; tryA = leftA + half; tryB = leftB + half; if (tryA < Amin) { rightB = tryB; leftA = tryA + even; } else { if (tryA > Amax) { rightA = tryA; leftB = tryB + even; } else { medA = x[i + tryA - Amin] - x[i - 1]; medB = x[i - 1] - x[i - tryB - 1]; if (medA >= medB) { rightA = tryA; leftB = tryB + even; } else { rightB = tryB; leftA = tryA + even; } } } } /* while */ if (leftA > Amax) { a2[i - 1] = x[i - 1] - x[i - leftB - 1]; } else { medA = x[i + leftA - Amin] - x[i - 1]; medB = x[i - 1] - x[i - leftB - 1]; a2[i - 1] = fmin2(medA,medB); } } a2[n - 1] = x[n - 1] - x[n1_2 - 1]; return pull(a2, n, n1_2); } /* sn0 */ // C API version -- *not* called from our R code : double sn(double *x, int n, int is_sorted, int finite_corr) { /* Efficient algorithm for the scale estimator: Sn = cn * 1.1926 * LOMED_{i} HIMED_{j} |x_i-x_j| which can equivalently be written as Sn = cn * 1.1926 * LOMED_{i} LOMED_{j != i} |x_i-x_j|*/ double cn, r; double *a2 = (double *)R_alloc(n, sizeof(double)); r = 1.1926 * /* asymptotic consistency for sigma^2 */ sn0(x, n, is_sorted, a2); /* === */ cn = 1.; /* n >= 10 even, or no finite_corr[ection] */ if (finite_corr) { if (n <= 9) { if (n == 2) cn = 0.743; else if (n == 3) cn = 1.851; else if (n == 4) cn = 0.954; else if (n == 5) cn = 1.351; else if (n == 6) cn = 0.993; else if (n == 7) cn = 1.198; else if (n == 8) cn = 1.005; else if (n == 9) cn = 1.131; } else if (n % 2 == 1) /* n odd, >= 11 */ cn = n / (n - 0.9); } return cn * r; } /* sn */ /* pull(): auxiliary routine for (Qn and) Sn * ====== ======== ------------------------ */ double pull(double *a_in, int n, int k) { /* Finds the k-th order statistic of an array a[] of length n * -------------------- */ int j; double *a, ax; char* vmax = vmaxget(); a = (double *)R_alloc(n, sizeof(double)); /* Copy a[] and use copy since it will be re-shuffled: */ for (j = 0; j < n; j++) a[j] = a_in[j]; k--; /* 0-indexing */ rPsort(a, n, k); ax = a[k]; vmaxset(vmax); return ax; } /* pull */ /* Local variables section * Local variables: * mode: c * kept-old-versions: 12 * kept-new-versions: 20 * End: */ robustbase/src/rob-utils.c0000644000176200001440000000574212440116711015303 0ustar liggesusers/* * Copyright (C) 2014 Martin Maechler, ETH Zurich * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, a copy is available at * http://www.r-project.org/Licenses/ */ #include /* ^^^^^^^^^^ is supposedly more common and standard than * #include * or #include */ /* --> int64_t ; if people don't have the above, they can forget about it.. */ /* #include "int64.h" */ #include /* -> and much more */ // Interface routines to be called via .C(), .Call() : #include "robustbase.h" //-> , -> XLENGTH, R_xlen_t /* Smooth Weighting Function -- typically for computing weights from large distances * ------------------------- * \ <-- quartic polynomial here * \ * --------------------------- * In fact a 2-parameter generalization of Tukey's 1-parameter "biweight" * * --- see also psi, rho, ... utilities in ./lmrob.c */ double wgt_flex(double x, double c, double h) { double h2 = h/2.; x = fabs(x); if (x >= c+h2) return 0. ; if (x <= c-h2) return 1. ; // non-trivial {biweight like} down weighting: x = (x - (c-h2)) / h; // is in (0, 1) x = 1 - x*x; return x*x; // = (1 - ((|x| - (c - h/2))/ h)^2)^2 {in original 'x'} } SEXP R_wgt_flex(SEXP x_, SEXP c_, SEXP h_) { // TODO?: add , SEXP keep_attributes /* * Calculate Flexible weight function for vectorized x */ int nprot = 1; if (isInteger(x_)) { x_ = PROTECT(coerceVector(x_, REALSXP)); nprot++; } if (isInteger(c_)) { c_ = PROTECT(coerceVector(c_, REALSXP)); nprot++; } if (isInteger(h_)) { h_ = PROTECT(coerceVector(h_, REALSXP)); nprot++; } if (!isReal(x_)) error(_("Argument '%s' must be numeric or integer"), "x"); if (!isReal(c_) || LENGTH(c_) != 1) error(_("Argument '%s' must be numeric or integer of length 1"), "c"); if (!isReal(h_) || LENGTH(h_) != 1) error(_("Argument '%s' must be numeric or integer of length 1"), "h"); R_xlen_t i, n = XLENGTH(x_); SEXP res = PROTECT(allocVector(REALSXP, n)); // the result double *x = REAL(x_), *r = REAL(res), c = asReal(c_), h = asReal(h_); for(i = 0; i < n; i++) r[i] = ISNAN(x[i]) ? x[i] : wgt_flex(x[i], c, h); /* if(asLogical(keep_attributes)) { */ // do the "no exception" version of copyMostAttrib() in ..R/src/main/attrib.c /* } */ UNPROTECT(nprot); return res; } robustbase/src/lmrob.c0000644000176200001440000026754114531132254014510 0ustar liggesusers/* -*- mode: c; kept-new-versions: 40; kept-old-versions: 40 -*- * Indentation (etc) style: C-c . gnu */ /* file lmrob.c * was roblm/src/roblm.c - version 0.6 by Matias Salibian-Barreras * Includes the stable correct asymptotic variance estimators * of Croux, Dhaene, Hoorelbeke * Includes the fast-s algorithm */ /* Robust MM regression estimates * * ------------------------------ */ /* comment code * * * adapt other sampler <<<<<<<<<< R's random number generator !!!! * replace abort for too many singular resamples by * returning the number of singular ones */ /* MM: - Done: fast_s[_large_n]() both had FIXED seed (= 37), and effectively discarded the seed_rand argument below - Done: drop 'register' : today's compilers do optimize well! - Done: using Calloc() / Free() instead of malloc()/free() */ /* Calls [BLAS 2] DGEMV("N", ..) == dgemv("N", ..), which computes y := alpha * A*x + beta* y, where here, always, alpha = -1 , beta = 1 ('dmone' or 'done', respectively), i.e., we do y := -A*x + y = y - A*x Now, call DGEMV =~= F77_CALL(dgemv) only via macro get_Res_Xb() */ /* kollerma: Added alternative psi functions callable via psifun, chifun and wgtfun. ipsi is used to distinguish between the different types: 0: huber, 1: biweight="bisquare", 2: GaussWeight="welsh", 3: optimal, 4: hampel, 5: GGW="ggw" (Generalized Gauss Weight), 6: LQQ="lqq"=lin.quadr.qu../ piecewise linear psi'() The ipsi argument works for the S-estimator as well as for the MM-estimator. - Added implementation of M-S algorithm. - Modified subsampling behaviour: avoiding singular resamples by using customized LU decomposition. - Replaced C style matrices with Fortran style matrices, with as little copying as possible. - Using LAPACK's DGELS instead of local lu() decomposition. - Code clean up: removed all subroutines that were unused. */ #ifndef USE_FC_LEN_T # define USE_FC_LEN_T #endif #include #include #include #ifndef FCONE # define FCONE #endif #include #include #include #include "robustbase.h" //-> , -> XLENGTH, R_xlen_t /* these will also move to "lmrob.h" --- * but first make many of these 'static' <<< FIXME! */ void fast_s_large_n(double *X, double *y, double s_y, int n, int p, int nRes, int *max_it_scale, double *res, int groups, int n_group, int K, int *max_k, double rel_tol, double inv_tol, double scale_tol, double zero_tol, int *converged, int best_r, double bb, const double rrhoc[], int ipsi, double *bbeta, double *sscale, int trace_lev, int mts, Rboolean ss); void fast_s(double *X, double *y, double s_y, int n, int p, int nResample, int *max_it_scale, double *res, int K, int *max_k, double rel_tol, double inv_tol, double scale_tol, double zero_tol, int *converged, int best_r, double bb, const double rrhoc[], int ipsi, double *bbeta, double *sscale, int trace_lev, int mts, Rboolean ss); Rboolean rwls(const double X[], const double y[], int n, int p, double *estimate, double *i_estimate, double *resid, double *loss, double scale, double epsilon, int *max_it, const double rho_c[], const int ipsi, int trace_lev); double norm2 (const double x[], int n); double norm (const double x[], int n); double norm1(const double x[], int n); double mean_abs(const double y[], int n) { return norm1(y, n) / n ; } double norm_diff2(const double x[], const double y[], int n); double norm_diff (const double x[], const double y[], int n); double norm1_diff(const double x[], const double y[], int n); /* moved to robustbase.h * * double normcnst(const double c[], int ipsi); * double rho_inf (const double c[], int ipsi); * double rho(double x, const double c[], int ipsi); * double psi(double x, const double c[], int ipsi); * double psip(double x, const double c[], int ipsi);// psi' * double psi2(double x, const double c[], int ipsi);// psi'' * double wgt(double x, const double c[], int ipsi); */ double rho_huber(double x, const double c[]); double psi_huber(double x, const double c[]); double psip_huber(double x, const double c[]); double psi2_huber(double x, const double c[]); double wgt_huber(double x, const double c[]); double rho_biwgt(double x, const double c[]); double psi_biwgt(double x, const double c[]); double psip_biwgt(double x, const double c[]); double psi2_biwgt(double x, const double c[]); double wgt_biwgt(double x, const double c[]); double rho_gwgt(double x, const double c[]); double psi_gwgt(double x, const double c[]); double psip_gwgt(double x, const double c[]); double wgt_gwgt(double x, const double c[]); double rho_opt(double x, const double c[]); double psi_opt(double x, const double c[]); double psip_opt(double x, const double c[]); double wgt_opt(double x, const double c[]); double rho_hmpl(double x, const double c[]); double psi_hmpl(double x, const double c[]); double psip_hmpl(double x, const double c[]); double psi2_hmpl(double x, const double c[]); double wgt_hmpl(double x, const double c[]); double rho_ggw(double x, const double c[]); void psi_ggw_vec(double *x, int n, void *k); double psi_ggw(double x, const double c[]); double psip_ggw(double x, const double c[]); double wgt_ggw(double x, const double c[]); double rho_lqq(double x, const double c[]); double psi_lqq(double x, const double c[]); double psip_lqq(double x, const double c[]); double psi2_lqq(double x, const double c[]); double wgt_lqq(double x, const double c[]); double sum_rho_sc(const double r[], double scale, int n, int p, const double c[], int ipsi); int refine_fast_s(const double X[], double *wx, const double y[], double s_y, double *wy, double *weights, int n, int p, double *res, double *work, int lwork, const double beta_cand[], double *beta_j, Rboolean *conv, int kk, double rel_tol, double zero_tol, int trace_lev, double b, const double rrhoc[], int ipsi, double initial_scale, double *beta_ref, double *scale); void m_s_subsample(double *X1, double *y, int n, int p1, int p2, int nResample, int max_it_scale, double rel_tol, double inv_tol, double scale_tol, double zero_tol, double bb, const double rrhoc[], int ipsi, double *sscale, int trace_lev, double *b1, double *b2, double *t1, double *t2, double *y_tilde, double *res, double *x1, double *x2, int *NIT, int *K, int *KODE, double *SIGMA, double BET0, double *SC1, double *SC2, double *SC3, double *SC4, int mts, Rboolean ss); Rboolean m_s_descent(double *X1, double *X2, double *y, int n, int p1, int p2, int K_m_s, int max_k, int max_it_scale, double rel_tol, double scale_tol, double bb, const double rrhoc[], int ipsi, double *sscale, int trace_lev, double *b1, double *b2, double *t1, double *t2, double *y_tilde, double *res, double *res2, double *x1, double *x2, int *NIT, int *K, int *KODE, double *SIGMA, double BET0, double *SC1, double *SC2, double *SC3, double *SC4); int subsample(const double x[], const double y[], int n, int m, double *beta, int *ind_space, int *idc, int *idr, double *lu, double *v, int *p, double *Dr, double *Dc, int rowequ, int colequ, Rboolean sample, int mts, Rboolean ss, double tol_inv, Rboolean solve); int fast_s_with_memory(double *X, double *y, double s_y, double *res, int nn, int pp, int nRes, int *max_it_scale, int K, int *max_k, double rel_tol, double inv_tol, double scale_tol, double zero_tol, int trace_lev, int best_r, double bb, const double rrhoc[], int ipsi, int mts, Rboolean ss, // output ==> double **best_betas, double *best_scales); /* for "tracing" only : */ void disp_mat(const double **a, int n, int m); void disp_vec(const double a[], int n); void disp_veci(const int a[], int n); double kthplace(double *, int, int); int find_max(const double a[], int n); double find_scale(const double r[], double b, const double rrhoc[], int ipsi, double initial_scale, int n, int p, int* iter, // input: max_iter, output: #{iterations used} double scale_tol, Rboolean trace); double MAD(const double a[], int n, double center, double *tmp, double *tmp2); void zero_mat(double **a, int n, int m); // used in many F77_CALL()s : static const double dmone = -1., done = 1.; static const int one = 1; #define get_Res_Xb(_Mp,_Np,_A_,_x_,_Y_) \ F77_CALL(dgemv)("N", &_Mp, &_Np, &dmone, _A_, &_Mp, _x_, &one, &done, \ /*--> */ _Y_, &one FCONE) // additionally using info (is *modified), trace_lev: #define INIT_WLS(_X_, _y_, _n_, _p_) \ int lwork = -1; \ double work0; \ \ /* Determine optimal block size for work array */ \ F77_CALL(dgels)("N", &_n_, &_p_, &one, _X_, &_n_, _y_, \ &_n_, &work0, &lwork, &info FCONE); \ if (info) { \ warning(_("DGELS could not determine optimal block size, using minimum")); \ lwork = 2*_p_; \ } else \ lwork = (int)work0; \ \ if (trace_lev >= 4) \ Rprintf(" Optimal block size for DGELS: %d\n", lwork); \ \ /* allocate */ \ double *work = (double *) R_alloc(lwork, sizeof(double)), \ *weights = (double *) R_alloc(_n_, sizeof(double)); /* Solve weighted LS, called in a loop, from rwls(), fast_refine_s() & m_s_descent(): _x_ is a "work array" only, input(_X, _y_, _wts_); output _beta_ (_y_ is *modified*) also uses (trace_level, info, one, work, lwork) */ #define FIT_WLS(_X_, _wts_, _x_, _y_, _n_, _p_, _beta_) \ /* add weights to _y_ and _x_ */ \ for (int j=0; j<_n_; j++) { \ double wtmp = sqrt(_wts_[j]); \ _y_[j] *= wtmp; \ for (int k=0; k<_p_; k++) \ _x_[_n_*k+j] = _X_[_n_*k+j] * wtmp; \ } \ /* solve weighted least squares problem */ \ F77_CALL(dgels)("N", &_n_, &_p_, &one, _x_, &_n_, _y_, \ &_n_, work, &lwork, &info FCONE); \ if (info) { \ if (info < 0) { \ error(_("DGELS: illegal %i-th argument"), -info); \ } else { \ if (trace_lev >= 4) { \ Rprintf(" Robustness weights in failing step: "); \ disp_vec(_wts_, _n_); \ } \ error(_("DGELS: weighted design matrix not of full rank (column %d).\n" \ "Use control parameter 'trace.lev = 4' to get diagnostic output"), info); \ } \ } \ COPY(_y_, _beta_, _p_) // *modifying* 'info': #define SETUP_EQUILIBRATION(_n_, _p_, _X_, _large_n_) \ /* equilibration of matrix _X_ */ \ /* solve (Dr X Dc) b = Dr y with beta = Dc b instead of */ \ /* X beta = y */ \ /* see Demmel (1997) APPLIED NUMERICAL LINEAR ALGEBRA */ \ /* Section 2.5.2 Equilibration */ \ double *Dr, *Dc, *Xe, rowcnd, colcnd, amax; \ int rowequ = 0 , colequ = 0; \ Dr = (double *) R_alloc(_n_, sizeof(double)); \ Dc = (double *) R_alloc(_p_, sizeof(double)); \ Xe = (double *) R_alloc(_n_*_p_, sizeof(double)); \ COPY(_X_, Xe, _n_*_p_); \ F77_CALL(dgeequ)(&_n_, &_p_, Xe, &_n_, Dr, Dc, &rowcnd, \ &colcnd, &amax, &info); \ if (info) { \ if (info < 0) { \ error(_("DGEEQU: illegal %i-th argument"), - info); \ } else if (info > _n_) { \ if (_large_n_) { \ error(_("Fast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'.")); \ } else { \ error(_("DGEEQU: column %i of the design matrix is exactly zero."), info - _n_); \ } \ } else { \ /* FIXME: replace dgeequ by our own version */ \ /* that does not treat this as error */ \ warning(_(" Skipping design matrix equilibration (DGEEQU): row %i is exactly zero."), info); \ } \ } else { \ /* scale _X_ */ \ char equed = '?'; /* init to prevent warning */ \ F77_CALL(dlaqge)(&_n_, &_p_, Xe, &_n_, Dr, Dc, &rowcnd, \ &colcnd, &amax, &equed FCONE); \ rowequ = equed == 'B' || equed == 'R'; \ colequ = equed == 'B' || equed == 'C'; \ } #define SETUP_SUBSAMPLE(_n_, _p_, _X_, _large_n_) \ /* (Pointers to) Arrays - to be allocated */ \ int *ind_space = (int *) R_alloc(_n_, sizeof(int)), \ *idc = (int *) R_alloc(_n_, sizeof(int)), \ *idr = (int *) R_alloc(_p_, sizeof(int)), \ *pivot = (int *) R_alloc(_p_-1, sizeof(int)); \ double *lu = (double *) R_alloc(_p_*_p_, sizeof(double)), \ *v = (double *) R_alloc(_p_, sizeof(double)); \ SETUP_EQUILIBRATION(_n_, _p_, _X_, _large_n_); #define COPY(from, to, len) Memcpy(to, from, len) /* In theory BLAS should be fast, but this seems slightly slower, * particularly for non-optimized BLAS :*/ /* #define COPY(FROM, TO, _p_) \ */ /* F77_CALL(dcopy)(&_p_, FROM, &one, TO, &one); */ // NB: INFI is now combined with s_y (or SIGMA) in order to be equivariant (!) : #define INFI 1e+20 /* Called from R's lmrob.S() in ../R/lmrob.MM.R, * help() in ../man/lmrob.S.Rd, this function computes an S-regression estimator ~~~~~~~~~~~~~~~~~ */ void R_lmrob_S(double *X, double *y, // y[] on input; residuals[] on output int *n, int *P, int *nRes, // = nResample ( = 500, by default) double *scale, double *beta_s, double *rrhoc, int *iipsi, double *bb, int *best_r, int *Groups, int *N_group, int *K_s, int *max_k, int *max_it_scale, double *rel_tol, double *inv_tol, double *scale_tol, // <- new, was hardwired to EPS_SCALE := 1e-10 double *zero_tol, // <- very new, was hardwired to EPS_SCALE := 1e-10 int *converged, int *trace_lev, int *mts, int *ss, // subsampling int *cutoff // defining "large n" <==> using fast_s_large_n() ) { /* best_r = 't' of Salibian-Barrera_Yohai(2006), * = no. of best candidates to be iterated further ("refined") * = 2, by default */ if (*nRes > 0) { double *res = (double *) R_alloc(*n, sizeof(double)); // residuals double s_y = mean_abs(y, *n); if (*n > *cutoff) { if(*trace_lev > 0) Rprintf("lmrob_S(n = %d, nRes = %d): fast_s_large_n():\n", *n, *nRes); fast_s_large_n(X, y, s_y, *n, *P, *nRes, max_it_scale, res, *Groups, *N_group, *K_s, max_k, *rel_tol, *inv_tol, *scale_tol, *zero_tol, converged, *best_r, *bb, rrhoc, *iipsi, beta_s, scale, *trace_lev, *mts, (Rboolean)*ss); } else { if(*trace_lev > 0) Rprintf("lmrob_S(n = %d, nRes = %d): fast_s() [non-large n]:\n", *n, *nRes); fast_s(X, y, s_y, *n, *P, *nRes, max_it_scale, res, *K_s, max_k, *rel_tol, *inv_tol, *scale_tol, *zero_tol, converged, *best_r, *bb, rrhoc, *iipsi, beta_s, scale, *trace_lev, *mts, *ss); } COPY(res, y, *n); // return the 'residuals' in 'y' } else { // nRes[] <= 0 <==> 'only.scale = TRUE' if(*trace_lev > 0) Rprintf("lmrob_S(nRes = 0, n = %d): --> find_scale(*, scale=%g) only:", *n, *scale); *scale = find_scale(y, *bb, rrhoc, *iipsi, *scale, *n, *P, max_it_scale, *scale_tol, *trace_lev >= 3); if(*trace_lev > 0) Rprintf(" used %d iterations\n", *max_it_scale); } } /* Called from R, this function computes an M-S-regression estimator */ // not only called from ../R/lmrob.M.S.R, but also ../inst/xtraR/m-s_fns.R // ~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~ void R_lmrob_M_S(double *X1, double *X2, double *y, double *res, int *nn, int *pp1, int *pp2, int *nRes, int *max_it_scale, double *scale, double *b1, double *b2, double *rho_c, int *ipsi, double *bb, int *K_m_s, int *max_k, double *rel_tol, double *inv_tol, double *scale_tol, double *zero_tol, int *converged, int *trace_lev, int *orthogonalize, int *subsample, int *descent, int *mts, int *ss) { /* Initialize (some of the) memory here, * so that we have to do it only once */ int i, n = *nn, p1 = *pp1, p2 = *pp2; if(*trace_lev > 0) Rprintf( "lmrob_M_S(n = %d, nRes = %d, (p1,p2)=(%d,%d), (orth,subs,desc)=(%d,%d,%d))\n", n, *nRes, p1, p2, *orthogonalize, *subsample, *descent); double *t1 = (double *) R_alloc(n, sizeof(double)), /* size n needed for rllarsbi */ *t2 = (double *) R_alloc(p2, sizeof(double)), *ot1 = (double *) R_alloc(p1, sizeof(double)), *oT2 = (double *) R_alloc(p2*p1, sizeof(double)), *y_work = (double *) R_alloc(n, sizeof(double)), *y_tilde = (double *) R_alloc(n, sizeof(double)), *x1 = (double *) R_alloc(n*p1, sizeof(double)), *x2 = (double *) R_alloc(n*p2, sizeof(double)); COPY(y, y_work, n); COPY(X2, x2, n*p2); /* Variables required for rllarsbi() := L1 / least absolute residuals - estimate */ int NIT=0, K=0, KODE=0; double SIGMA = 0., *SC1 = (double *) R_alloc(n, sizeof(double)), *SC2 = (double *) R_alloc(p1, sizeof(double)), *SC3 = (double *) R_alloc(p1, sizeof(double)), *SC4 = (double *) R_alloc(p1, sizeof(double)); const double BET0 = 0.773372647623; /* = pnorm(0.75) */ /* STEP 1: Orthgonalize X2 and y from X1 */ if (*orthogonalize) { COPY(X1, x1, n*p1); F77_CALL(rllarsbi)(x1, y_work, &n, &p1, &n, &n, rel_tol, &NIT, &K, &KODE, &SIGMA, t1, y_tilde, SC1, SC2, SC3, SC4, &BET0); COPY(t1, ot1, p1); for (i=0; i < p2; i++) { COPY(X1, x1, n*p1); double *ptr = X2+i*n; COPY(ptr, y_work, n); F77_CALL(rllarsbi)(x1, y_work, &n, &p1, &n, &n, rel_tol, &NIT, &K, &KODE, &SIGMA, t1, x2+i*n, SC1, SC2, SC3, SC4, &BET0); ptr = oT2+i*p1; COPY(t1, ptr, p1); } COPY(y_tilde, y_work, n); /* compare with Maronna & Yohai 2000: * y_work and y_tilde now contain \tilde y, ot1 -> t_1, * x2 -> \tilde x2, oT2 -> T_2 */ if(*trace_lev >= 2) Rprintf(" orthogonalized: SIGMA=%g\n", SIGMA); } else { SIGMA = mean_abs(y, n); if(*trace_lev >= 2) Rprintf(" *no* orthog., SIGMA=mean(|y_i|)= %g\n", SIGMA); } /* STEP 2: Subsample */ if (*subsample) { m_s_subsample(X1, y_work, n, p1, p2, *nRes, *max_it_scale, *rel_tol, *inv_tol, *scale_tol, *zero_tol, *bb, rho_c, *ipsi, scale, *trace_lev, b1, b2, t1, t2, y_tilde, res, x1, x2, &NIT, &K, &KODE, &SIGMA, BET0, SC1, SC2, SC3, SC4, *mts, *ss); if (*scale < 0) error(_("m_s_subsample() stopped prematurely (scale < 0).")); } /* STEP 3: Transform back */ if (*orthogonalize) { /* t1 = ot1 + b1 - oT2 %*% b2 */ for(int i=0; i < p1; i++) t1[i] = ot1[i] + b1[i]; get_Res_Xb(p1,p2, oT2, b2, /*--> */ t1); COPY(t1, b1, p1); /* restore x2 */ COPY(X2, x2, n*p2); } /* update / calculate residuals res := y - X1*b1 - X2*b2 */ COPY(y, res, n); get_Res_Xb(n,p1, X1, b1, /*--> */ res); get_Res_Xb(n,p2, X2, b2, /*--> */ res); /* STEP 4: Descent procedure */ if (*descent) { *converged = m_s_descent( X1, X2, y, n, p1, p2, *K_m_s, *max_k, *max_it_scale, *rel_tol, *scale_tol, *bb, rho_c, *ipsi, scale, *trace_lev, b1, b2, t1, t2, y_tilde, res, y_work, x1, x2, &NIT, &K, &KODE, &SIGMA, BET0, SC1, SC2, SC3, SC4); } } /* This function performs RWLS iterations starting from * an S-regression estimator (and associated residual scale). * So, in itself, this is ``just'' an M-estimator -- called from R's * lmrob..M..fit() [ ../R/lmrob.MM.R ] * ~~~~~~~~~~~~~~~ * NOTE: rel_tol now controls the *relative* changes in beta, * instead of being hard-wired to EPS = 1e-7 and bounding the * absolute || beta_1 - beta_2 || */ void R_lmrob_MM(double *X, double *y, int *n, int *P, double *beta_initial, double *scale, double *beta_m, double *resid, int *max_it, double *rho_c, int *ipsi, double *loss, double *rel_tol, int *converged, int *trace_lev) { /* starting from the S-estimate (beta_initial), use * irwls to compute the MM-estimate (beta_m) */ if(*trace_lev > 0) Rprintf("lmrob_MM(): rwls():\n"); *converged = (int)rwls(X,y,*n,*P,beta_m, beta_initial, resid, loss, *scale, *rel_tol, max_it, rho_c, *ipsi, *trace_lev); if (!converged) COPY(beta_initial, beta_m, *P); } /* Call subsample() from R, for testing purposes only */ void R_subsample(const double x[], const double y[], int *n, int *m, double *beta, int *ind_space, int *idc, int *idr, double *lu, double *v, int *p, double *_Dr, double *_Dc, int *_rowequ, int *_colequ, int *status, int *sample, int *mts, int *ss, double *tol_inv, int *solve) { int info; /* set the seed */ GetRNGstate(); SETUP_EQUILIBRATION(*n, *m, x, 0); *status = subsample(Xe, y, *n, *m, beta, ind_space, idc, idr, lu, v, p, Dr, Dc, rowequ, colequ, (Rboolean)*sample, *mts, (Rboolean)*ss, *tol_inv, (Rboolean)*solve); COPY(Dr, _Dr, *n); COPY(Dc, _Dc, *m); *_rowequ = rowequ; *_colequ = colequ; PutRNGstate(); } //---- Psi(), Rho(), Functions----------------------------------------------------------- SEXP R_psifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_) { /* * Calculate psi for vectorized x, scaled to get psi'(0) = 1 * deriv -1: rho(x) {*not* normalized} * deriv 0: psi(x) = rho'(x) * deriv 1: psi'(x) = rho''(x) {we always have psip(0) == 1} * deriv 2: psi''(x)= rho'''(x) */ int nprot = 1, ipsi = asInteger(ipsi_), deriv = asInteger(deriv_); if (isInteger(x_)) { x_ = PROTECT(coerceVector(x_, REALSXP)); nprot++; } if (!isReal(x_)) error(_("Argument '%s' must be numeric or integer"), "x"); if (!isReal(c_)) error(_("Argument '%s' must be numeric or integer"), "cc"); R_xlen_t i, n = XLENGTH(x_); SEXP res = PROTECT(allocVector(REALSXP, n)); // the result double *x = REAL(x_), *r = REAL(res), *cc = REAL(c_); // put the for() loop *inside* the switch (--> speed for llength >> 1) : #define for_i_n_NA for(i = 0; i < n; i++) r[i] = ISNAN(x[i]) ? x[i] : switch(deriv) { // our rho() is rho~(), i.e., scaled to max = 1 case -1: if(is_redescender(ipsi)) { double rho_Inf = rho_inf(cc, ipsi); for_i_n_NA rho(x[i], cc, ipsi) * rho_Inf; } else { // huber, .. for_i_n_NA rho(x[i], cc, ipsi); } break; case 0: for_i_n_NA psi (x[i], cc, ipsi); break; case 1: for_i_n_NA psip(x[i], cc, ipsi); break; case 2: for_i_n_NA psi2(x[i], cc, ipsi); break; default: error(_("'deriv'=%d is invalid"), deriv); } UNPROTECT(nprot); return res; } SEXP R_chifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_) { /* * Calculate chi for vectorized x, i.e. rho~(.) with rho~(inf) = 1: * deriv 0: chi (x) = \rho(x) / \rho(Inf) =: \rho(x) * nc == our rho() C-function * deriv 1: chi'(x) = psi(x) * nc * deriv 2: chi''(x) = psi'(x) * nc */ int nprot = 1, ipsi = asInteger(ipsi_), deriv = asInteger(deriv_); if (isInteger(x_)) { x_ = PROTECT(coerceVector(x_, REALSXP)); nprot++; } if (!isReal(x_)) error(_("Argument '%s' must be numeric or integer"), "x"); if (!isReal(c_)) error(_("Argument '%s' must be numeric or integer"), "cc"); R_xlen_t i, n = XLENGTH(x_); SEXP res = PROTECT(allocVector(REALSXP, n)); // the result double *x = REAL(x_), *r = REAL(res), *cc = REAL(c_); // our rho() is rho~() == chi(), i.e., scaled to max = 1 double rI = (deriv > 0) ? rho_inf(cc, ipsi) : 0./* -Wall */; switch(deriv) { case 0: for_i_n_NA rho(x[i], cc, ipsi); break; case 1: for_i_n_NA psi (x[i], cc, ipsi) / rI; break; case 2: for_i_n_NA psip(x[i], cc, ipsi) / rI; break; case 3: for_i_n_NA psi2(x[i], cc, ipsi) / rI; break; default: error(_("'deriv'=%d is invalid"), deriv); } UNPROTECT(nprot); return res; } SEXP R_wgtfun(SEXP x_, SEXP c_, SEXP ipsi_) { /* * Calculate wgt(x) = psi(x)/x for vectorized x */ int nprot = 1, ipsi = asInteger(ipsi_); if (isInteger(x_)) { x_ = PROTECT(coerceVector(x_, REALSXP)); nprot++; } if (!isReal(x_)) error(_("Argument '%s' must be numeric or integer"), "x"); if (!isReal(c_)) error(_("Argument '%s' must be numeric or integer"), "cc"); R_xlen_t i, n = XLENGTH(x_); SEXP res = PROTECT(allocVector(REALSXP, n)); // the result double *x = REAL(x_), *r = REAL(res), *cc = REAL(c_); for_i_n_NA wgt(x[i], cc, ipsi); UNPROTECT(nprot); return res; } #undef for_i_n_NA SEXP R_rho_inf(SEXP cc, SEXP ipsi) { if (!isReal(cc)) error(_("Argument 'cc' must be numeric")); if (!isInteger(ipsi)) error(_("Argument 'ipsi' must be integer")); return ScalarReal(rho_inf(REAL(cc), INTEGER(ipsi)[0])); } double rho_inf(const double k[], int ipsi) { /* * Compute \rho(\infty) for psi functions * (Note that our C function rho() is "rho~" and has rho(Inf) = 1) */ double c = k[0]; switch(ipsi) { default: error(_("rho_inf(): ipsi=%d not implemented."), ipsi); case 0: return(R_PosInf); // huber case 1: return(c*c/6.); // biweight case 2: return(c*c); // GaussWeight / "Welsh" case 3: return(3.25*c*c); // Optimal case 4: return(0.5*k[0]*(k[1]+k[2]-k[0])); // Hampel case 5: // GGW (Generalized Gauss Weight) switch((int)c) { default: case 0: return(k[4]); break; // k[4] == cc[5] in R -- must be correct! case 1: return(5.309853); break; case 2: return(2.804693); break; case 3: return(0.3748076); break; case 4: return(4.779906); break; case 5: return(2.446574); break; case 6: return(0.4007054); break; }; case 6: // LQQ aka 'lin psip' return (k[2]*k[1]*(3*k[1]+2*k[0]) + (k[0]+k[1])*(k[0]+k[1])) / (6.*(k[2]-1.)); } } // rho_inf() double normcnst(const double k[], int ipsi) { /* * return normalizing constant for psi functions := 1 / \rho(\infty) */ double c = k[0]; switch(ipsi) { default: error(_("normcnst(): ipsi=%d not implemented."), ipsi); case 0: return(0.); // huber {normcnst() should never be used for that!} case 1: return(6./(c*c)); // biweight case 2: return(1./(c*c)); // GaussWeight / "Welsh" case 3: return(1./3.25/(c*c)); // Optimal case 4: return(2./(k[0]*(k[1]+k[2]-k[0]))); // Hampel case 5: // GGW switch((int)c) { default: case 0: return(1./ k[4]); break; // k[4] == cc[5] in R -- must be correct! case 1: return(1./5.309853); break; case 2: return(1./2.804693); break; case 3: return(1./0.3748076); break; case 4: return(1./4.779906); break; case 5: return(1./2.446574); break; case 6: return(1./0.4007054); break; }; case 6: // LQQ aka 'lin psip' return((6*(k[2]-1))/(k[2]*k[1]*(3*k[1]+2*k[0])+(k[0]+k[1])*(k[0]+k[1]))); } } // normcnst() double rho(double x, const double c[], int ipsi) { /* * return the correct rho according to ipsi * This rho() is normalized to 1, called rho~() or chi() in other contexts */ switch(ipsi) { default: error(_("rho(): ipsi=%d not implemented."), ipsi); case 0: return(rho_huber(x, c)); // huber case 1: return(rho_biwgt(x, c)); // biweight case 2: return(rho_gwgt(x, c)); // GaussWeight / "Welsh" case 3: return(rho_opt(x, c)); // Optimal case 4: return(rho_hmpl(x, c)); // Hampel case 5: return(rho_ggw(x, c)); // GGW (Generalized Gauss Weight) case 6: return(rho_lqq(x, c)); // LQQ := Linear-Quadratic-Quadratic // was LGW := "lin psip" := piecewise linear psi'() } } double psi(double x, const double c[], int ipsi) { /* * return the correct psi according to ipsi * this is actually rho' and not psi */ switch(ipsi) { default: error(_("psi(): ipsi=%d not implemented."), ipsi); case 0: return(psi_huber(x, c)); // huber case 1: return(psi_biwgt(x, c)); // biweight case 2: return(psi_gwgt(x, c)); // GaussWeight / "Welsh" case 3: return(psi_opt(x, c)); // Optimal case 4: return(psi_hmpl(x, c)); // Hampel case 5: return(psi_ggw(x, c)); // GGW case 6: return(psi_lqq(x, c)); // LQQ (piecewise linear psi') } } double psip(double x, const double c[], int ipsi) { /* * return the correct ppsi according to ipsi * this is actually rho'' and not psip */ switch(ipsi) { default: error(_("psip(): ipsi=%d not implemented."), ipsi); case 0: return(psip_huber(x, c)); // huber case 1: return(psip_biwgt(x, c)); // biweight case 2: return(psip_gwgt(x, c)); // GaussWeight / "Welsh" case 3: return(psip_opt(x, c)); // Optimal case 4: return(psip_hmpl(x, c)); // Hampel case 5: return(psip_ggw(x, c)); // GGW case 6: return(psip_lqq(x, c)); // LQQ (piecewise linear psi') } } double psi2(double x, const double c[], int ipsi) { /* Compute psi''(x) == rho'''(x) */ switch(ipsi) { // default: error(_("psi2: ipsi=%d not implemented."), ipsi); case 0: return(psi2_huber(x, c)); // huber case 1: return(psi2_biwgt(x, c)); // biweight case 4: return(psi2_hmpl(x, c)); // Hampel case 6: return(psi2_lqq(x, c)); // LQQ (piecewise linear psi') default: error(_("psi2(): ipsi=%d not implemented."), ipsi); /* case 2: return(psi2_gwgt(x, c)); // GaussWeight / "Welsh" case 3: return(psi2_opt(x, c)); // Optimal case 5: return(psi2_ggw(x, c)); // GGW */ } } double wgt(double x, const double c[], int ipsi) { /* * return the correct wgt according to ipsi * wgt: rho'(x) / x */ switch(ipsi) { default: case 0: return(wgt_huber(x, c)); // huber case 1: return(wgt_biwgt(x, c)); // biweight case 2: return(wgt_gwgt(x, c)); // GaussWeight / "Welsh" case 3: return(wgt_opt(x, c)); // Optimal case 4: return(wgt_hmpl(x, c)); // Hampel case 5: return(wgt_ggw(x, c)); // GGW case 6: return(wgt_lqq(x, c)); // LQQ (piecewise linear psi') } } //--- Huber's rho / psi / ... //--- ------- /* Huber's rho(): contrary to all the redescenders below, this can NOT be scaled to rho(Inf)=1 : */ double rho_huber(double x, const double c[]) { return (fabs(x) <= c[0]) ? x*x*0.5 : c[0]*(fabs(x) - c[0]/2); } double psi_huber(double x, const double c[]) { // Huber's psi = rho'() return (x <= -c[0]) ? -c[0] : ((x < c[0]) ? x : c[0]); } double psip_huber(double x, const double c[]) { // psi' = rho'' : Second derivative of Huber's loss function return (fabs(x) >= c[0]) ? 0. : 1.; } double psi2_huber(double x, const double c[]) { // psi'' = rho''' : Third derivative of Huber's loss function return 0; // FIXME? return NaN when |x| == c ?? -- then also for psi2_hmpl() } double wgt_huber(double x, const double c[]) { /* * Weights for Huber's loss function w(x) = psi(x)/x */ return (fabs(x) >= c[0]) ? c[0]/fabs(x) : 1.; } //--- Biweight = Bisquare = Tukey's Biweight ... //--- -------------------------------------- double rho_biwgt(double x, const double c[]) { /* * Tukey's bisquare loss function == R's tukeyChi() */ if (fabs(x) > (*c)) return(1.); else { double t = x / (*c); t *= t; /* = t^2 */ return( t*(3. + t*(-3. + t)) ); } } double psi_biwgt(double x, const double c[]) { /* * First derivative of Tukey's bisquare loss function */ if (fabs(x) > (*c)) return(0.); else { double a = x / (*c), u = 1. - a*a; return( x * u * u ); } } double psip_biwgt(double x, const double c[]) { /* * Second derivative of Tukey's bisquare loss function */ if (fabs(x) > (*c)) return(0.); else { x /= *c; double x2 = x*x; return( (1. - x2) * (1 - 5 * x2)); } } double psi2_biwgt(double x, const double c[]) { /** 3rd derivative of Tukey's bisquare loss function rho() *= 2nd derivative of psi() : */ if (fabs(x) >= c[0]) // psi''() *is* discontinuous at x = c[0]: use "middle" value there: return (fabs(x) == c[0]) ? 4*x/c[0] : 0.; else { x /= c[0]; double x2 = x*x; return 4*x/c[0] * (5 * x2 - 3.); } } double wgt_biwgt(double x, const double c[]) { /* * Weights for Tukey's bisquare loss function */ if( fabs(x) > *c ) return(0.); else { double a = x / (*c); a = (1. - a)*(1. + a); return( a * a ); } } //---------- gwgt == Gauss Weight Loss function =: "Welsh" -------------------- double rho_gwgt(double x, const double c[]) { /* * Gauss Weight Loss function */ double ac = x / (*c); return(-expm1(-(ac*ac)/2)); } /*========= TODO --- these limits could be lowered for the (normal) case where we have sub-normal numbers, ---- i.e. 0 < x < .Machine$double.xmin min_x = 2^-1074 = 2^-(1022+52) ~= 4e-324 */ // Largest x such that exp(-x) does not underflow : static double MIN_Exp = -708.4; // ~ = M_LN2 * DBL_MIN_EXP = -log(2) * 1022 = -708.3964 */ // Largest x such that exp(-x^2/2) does not underflow : static double MAX_Ex2 = 37.7; // ~ = sqrt(- 2. * M_LN2 * DBL_MIN_EXP); /* max {x | exp(-x^2/2) < .Machine$double.xmin } = * min {x | x^2 > -2*log(2)* .Machine$double.min.exp } = * = sqrt(-2*log(2)* .Machine$double.min.exp) = {IEEE double} * = sqrt(log(2) * 2044) = 37.64031 */ double psi_gwgt(double x, const double c[]) { /* * Gauss Weight Psi() */ double a = x / (*c); if(fabs(a) > MAX_Ex2) return 0.; else return x*exp(-(a*a)/2); } double psip_gwgt(double x, const double c[]) { /* * Gauss Weight Psi'() */ x /= (*c); if(fabs(x) > MAX_Ex2) return 0.; else { double ac = x*x; return exp(-ac/2) * (1. - ac); } } double wgt_gwgt(double x, const double c[]) { /* * Gauss Weight Loss function */ double a = x / (*c); return(exp(-(a*a)/2)); } double rho_opt(double x, const double c[]) { /* * Optimal psi Function, thank you robust package */ double ac = x / (*c), // AX=S/XK ax = fabs(ac); // AX=ABST/XK if (ax > 3) // IF (AX .GT. 3.D0) THEN return(1); // rlRHOm2=3.25D0*XK*XK else if (ax > 2.) { const double R1 = -1.944/2., R2 = 1.728/4., R3 = -0.312/6., R4 = 0.016/8.; ax *= ax; // = |x/c| ^ 2 return (ax*(R1+ ax*(R2+ ax*(R3+ ax*R4))) +1.792)/3.25; // rlRHOm2=XK*XK*(R1*AX**2+R2*AX**4+R3*AX**6+R4*AX**8+1.792D0) } else return(ac*ac/6.5); // rlRHOm2=S2/2.D0 } double psi_opt(double x, const double c[]) { /* * Optimal psi Function, thank you robust package */ double R1 = -1.944, R2 = 1.728, R3 = -0.312, R4 = 0.016; double ax, ac; ac = x / (*c); // AX=S/XK ax = fabs(ac); // AX=ABST/XK if (ax > 3.) // IF (AX .GT. 3.D0) THEN return(0); // rlPSIm2=0.D0 else if (ax > 2.) { // ELSE IF(AX .GT. 2.D0) THEN double a2 = ac*ac; if (ac > 0.) // IF (AX .GT. 0.D0) THEN return fmax2(0., (*c)*((((R4*a2 +R3)*a2 +R2)*a2 +R1)*ac)); // rlPSIm2=DMAX1(0.D0,XK*(R4*AX**7+R3*AX**5+R2*AX**3+R1*AX)) else return -fabs((*c)*((((R4*a2 +R3)*a2 +R2)*a2 +R1)*ac)); // rlPSIm2=-DABS(XK*(R4*AX**7+R3*AX**5+R2*AX**3+R1*AX)) } else return x; } double psip_opt(double x, const double c[]) { /* * psi'() for Optimal psi Function, thank you robust package */ double ac = x / (*c), ax = fabs(ac); if (ax > 3.) return 0; else if (ax > 2.) { const double R1 = -1.944, R2 = 1.728, R3 = -0.312, R4 = 0.016; ax *= ax; // = |x/c| ^ 2 return R1 + ax*(3*R2 + ax*(5*R3 + ax * 7*R4)); } else return 1; } double wgt_opt(double x, const double c[]) { /* * w(.) for optimal psi Function, thank you robust package */ double ac = x / (*c), ax = fabs(ac); if (ax > 3.) return 0.; else if (ax > 2.) { const double R1 = -1.944, R2 = 1.728, R3 = -0.312, R4 = 0.016; ax *= ax; // = |x/c| ^ 2 return fmax2(0., R1+ ax*(R2 + ax*(R3 + ax*R4))); } else return 1.; } double rho_hmpl(double x, const double k[]) { /* * rho() for Hampel's redescending psi function * constants (a, b, r) == k[0:2] s.t. slope of psi is 1 in the center * * This function is normalized s.t. rho(inf) = 1 */ double u = fabs(x), nc = k[0]*(k[1]+k[2]-k[0])/2; if (u <= k[0]) return( x*x/2 / nc ); else if (u <= k[1]) return( ( u - k[0]/2 ) * k[0] / nc ); else if (u <= k[2]) return( ( k[1] - k[0]/2 + (u - k[1]) * (1 - ( u - k[1] ) / ( k[2] - k[1] ) / 2 )) * k[0] / nc); else return( 1 ); } double psi_hmpl(double x, const double k[]) { /* * psi() for Hampel's redescending psi function * constants (a, b, r) == k[0:2] s.t. slope of psi is 1 in the center */ // double sx = sign(x), u = fabs(x); : double sx, u; if (x < 0) { sx = -1; u = -x; } else { sx = +1; u = x; } if (u <= k[0]) return( x ); else if (u <= k[1]) return sx * k[0]; else if (u <= k[2]) return sx * k[0] * (k[2] - u) / (k[2] - k[1]); else return 0.; } double psip_hmpl(double x, const double k[]) { /* * psi'() for Hampel's redescending psi function * constants (a, b, r) == k[0:2] s.t. slope of psi is 1 in the center */ double u = fabs(x); if (u <= k[0]) return( 1 ); else if (u <= k[1]) return( 0 ); else if (u <= k[2]) return( k[0] / ( k[1] - k[2] ) ); else return( 0 ); } double psi2_hmpl(double x, const double k[]) { /* * psi''() for Hampel's redescending psi function * constants (a, b, r) == k[0:2] s.t. slope of psi is 1 in the center */ return 0.; // even though psi'() is already discontinuous at k[j] } double wgt_hmpl(double x, const double k[]) { /* * w(x) = psi(x)/x for Hampel's redescending psi function * Hampel redescending psi function * constants (a, b, r) == k[0:2] s.t. slope of psi is 1 in the center */ double u = fabs(x); if (u <= k[0]) return( 1 ); else if (u <= k[1]) return( k[0] / u ); else if (u <= k[2]) return( k[0] * ( k[2] - u ) / ( k[2] - k[1] ) / u ); else return( 0 ); } //--- GGW := Generalized Gauss-Weight Koller and Stahel (2011) //--- --- // rho() & chi() need to be calculated by numerical integration -- apart from 6 pre-stored cases double rho_ggw(double x, const double k[]) { /* * Gauss Weight with constant center */ if (k[0] > 0) { // for hard-coded constants --- use a *polynomial* approximation const double C[6][20] = { // 0: b = 1, 95% efficiency {0.094164571656733, -0.168937372816728, 0.00427612218326869, 0.336876420549802, -0.166472338873754, 0.0436904383670537, -0.00732077121233756, 0.000792550423837942, -5.08385693557726e-05, 1.46908724988936e-06, -0.837547853001024, 0.876392734183528, -0.184600387321924, 0.0219985685280105, -0.00156403138825785, 6.16243137719362e-05, -7.478979895101e-07, -3.99563057938975e-08, 1.78125589532002e-09, -2.22317669250326e-11}, // 1: b = 1, 85% efficiency {0.174505224068561, -0.168853188892986, 0.00579250806463694, 0.624193375180937, -0.419882092234336, 0.150011303015251, -0.0342185249354937, 0.00504325944243195, -0.0004404209084091, 1.73268448820236e-05, -0.842160072154898, 1.19912623576069, -0.345595777445623, 0.0566407000764478, -0.00560501531439071, 0.000319084704541442, -7.4279004383686e-06, -2.02063746721802e-07, 1.65716101809839e-08, -2.97536178313245e-10}, // 2: b = 1, bp 0.5 {1.41117142330711, -0.168853741371095, 0.0164713906344165, 5.04767833986545, -9.65574752971554, 9.80999125035463, -6.36344090274658, 2.667031271863, -0.662324374141645, 0.0740982983873332, -0.84794906554363, 3.4315790970352, -2.82958670601597, 1.33442885893807, -0.384812004961396, 0.0661359078129487, -0.00557221619221031, -5.42574872792348e-05, 4.92564168111658e-05, -2.80432020951381e-06}, // 3: b = 1.5, 95% efficiency {0.104604570079252, 0.0626649856211545, -0.220058184826331, 0.403388189975896, -0.213020713708997, 0.102623342948069, -0.0392618698058543, 0.00937878752829234, -0.00122303709506374, 6.70669880352453e-05, 0.632651530179424, -1.14744323908043, 0.981941598165897, -0.341211275272191, 0.0671272892644464, -0.00826237596187364, 0.0006529134641922, -3.23468516804340e-05, 9.17904701930209e-07, -1.14119059405971e-08}, // 4: b = 1.5, 85% efficiency {0.205026436642222, 0.0627464477520301, -0.308483319391091, 0.791480474953874, -0.585521414631968, 0.394979618040607, -0.211512515412973, 0.0707208739858416, -0.0129092527174621, 0.000990938134086886, 0.629919019245325, -1.60049136444912, 1.91903069049618, -0.933285960363159, 0.256861783311473, -0.0442133943831343, 0.00488402902512139, -0.000338084604725483, 1.33974565571893e-05, -2.32450916247553e-07}, // 5: b = 1.5, bp 0.5 {1.35010856132000, 0.0627465630782482, -0.791613168488525, 5.21196700244212, -9.89433796586115, 17.1277266427962, -23.5364159883776, 20.1943966645350, -9.4593988142692, 1.86332355622445, 0.62986381140768, -4.10676399816156, 12.6361433997327, -15.7697199271455, 11.1373468568838, -4.91933095295458, 1.39443093325178, -0.247689078940725, 0.0251861553415515, -0.00112130382664914}}; double end[6] = {18.5527638190955, 13.7587939698492, 4.89447236180905, 11.4974874371859, 8.15075376884422, 3.17587939698492}; int j = ((int)k[0]) - 1; double c; switch(j) { // c : identical numbers to those in SET_ABC_GGW below case 0: c = 1.694; break; case 1: c = 1.2442567; break; case 2: c = 0.4375470; break; case 3: c = 1.063; break; case 4: c = 0.7593544; break; case 5: c = 0.2959132; break; default: error(_("rho_ggw(): case (%i) not implemented."), j+1); } x = fabs(x); if (x <= c) return(C[j][0]*x*x); else if (x <= 3*c) return(C[j][1] + x*(C[j][2] + x*(C[j][3] + x*(C[j][4] + x*(C[j][5] + x*(C[j][6] + x*(C[j][7] + x*(C[j][8] + x*(C[j][9]))))))))); else if (x <= end[j]) return(C[j][10] + x*(C[j][11] + x*(C[j][12] + x*(C[j][13] + x*(C[j][14] + x*(C[j][15] + x*(C[j][16] + x*(C[j][17] + x*(C[j][18]+ x*(C[j][19])))))))))); else return(1.); } else { // k[0] == 0; k[1:4] = (a, b, c, rho(Inf)) = "general parameters" x = fabs(x); double a = 0., epsabs = R_pow(DBL_EPSILON, 0.25), result, abserr; int neval, ier, last, limit = 100, lenw = 4 * limit; int *iwork = (int *) R_alloc(limit, sizeof(int)); double *work = (double *) R_alloc(lenw, sizeof(double)); // --> calculate integral of psi(.); Rdqags() is from R's official API ("Writing R Extensions") Rdqags(psi_ggw_vec, (void *)k, &a, &x, &epsabs, &epsabs, &result, &abserr, &neval, &ier, &limit, &lenw, &last, iwork, work); if (ier >= 1) error(_("Error from Rdqags(psi_ggw*, k, ...): ier = %i"), ier); return(result/k[4]); } } void psi_ggw_vec(double *x, int n, void *k) { for (int i = 0; i piecewise constant psi''(): double psi2_lqq (double x, const double k[]) { // double sx = sign(x), ax = fabs(x); : double sx, ax; if (x < 0) { sx = -1; ax = -x; } else { sx = +1; ax = x; } // k[0:2] == (b, c, s) : if (ax <= k[1]) return 0.; else { double k01 = k[0] + k[1]; if (/*k[1] < ax && */ ax <= k01) return sx * (- k[2]/k[0]); else { double s5 = 1. - k[2], // = (1-s) a = (k[0] * k[2] - 2 * k01)/ s5; if (/* k01 < ax && */ ax < k01 + a) return sx * (- s5 / a); else return 0.; } } } double psi_lqq (double x, const double k[]) { double ax = fabs(x); if (ax <= k[1]) return(x); else { // k[0:2] == (b, c, s) : double k01 = k[0] + k[1]; if (ax <= k01) return((double) (x>0 ? 1 : (x<0 ? -1 : 0)) * (ax - k[2] * pow(ax - k[1], 2.) / k[0] / 2.)); else { double s5 = k[2] - 1., // s - 1 s6 = -2 * k01 + k[0] * k[2]; // numerator( -a ) ==> s6/s5 = -a if (/* k01 < ax && */ ax < k01 - s6 / s5) return((double) (x>0 ? 1 : -1) * (-s6/2. - pow(s5, 2.) / s6 * (pow(ax - k01, 2.) / 2. + s6 / s5 * (ax - k01)))); else return 0.; } } } double rho_lqq (double x, const double k[]) { double ax = fabs(x), k01 = k[0] + k[1]; if (ax <= k[1]) return((3. * k[2] - 3.) / (k[2] * k[1] * (3. * k[1] + 2. * k[0]) + pow(k01, 2.)) * x * x); else if (/* k[1] < ax && */ ax <= k01) { double s0 = ax - k[1]; return((6. * k[2] - 6.) / (k[2] * k[1] * (3. * k[1] + 2. * k[0]) + pow(k01, 2.)) * (x * x / 2. - k[2] / k[0] * pow(s0, 3.) / 6.)); } else { double s5 = k[2] - 1., s6 = -2 * k01 + k[0] * k[2]; if (/* k01 < ax && */ ax < k01 - s6 / s5) { double s7 = ax - k01, k01_2 = pow(k01, 2.); return((6. * s5) / (k[2] * k[1] * (3. * k[1] + 2. * k[0]) + k01_2) * (k01_2 / 2. - k[2] * k[0] * k[0] / 6. - s7/2. * (s6 + s7 * (s5 + s7 * s5 * s5 / 3. / s6)))); } else return 1.; } } double wgt_lqq (double x, const double k[]) { double ax = fabs(x); if (ax <= k[1]) return(1.); else { double k01 = k[0] + k[1]; if (ax <= k01) { double s0 = ax - k[1]; return(1. - k[2] * s0 * s0 / (2 * ax * k[0])); } else { double s5 = k[2] - 1., s6 = -2 * k01 + k[0] * k[2]; if (ax < k01 - s6 / s5) { double s7 = ax - k01; return(-(s6/2. + s5 * s5 / s6 * s7 * (s7/2. + s6 / s5)) / ax); } else return(0.); } } } /*============================================================================*/ /* This function finds the k-th place in the vector a[]; * in the process it permutes the elements of a */ double kthplace(double *a, int n, int k) { k--; // (0-indexing in C) int l=0, lr=n-1; while (l < lr) { double ak=a[k]; int jnc=l, j=lr; while (jnc <= j) { while (a[jnc] < ak) jnc++; while (a[j] > ak) j--; if (jnc <= j) { double w=a[jnc]; a[jnc]=a[j]; a[j]=w; jnc++; j--; } } if (j < k) l=jnc; if (k < jnc) lr=j; } return(a[k]); } /* This is from VR's bundle, MASS package VR/MASS/src/lqs.c : */ /* Sampling k from 0:(n-1) without replacement: x[0:(k-1)] <- k distinct values from {0, .., n-1} */ static void sample_noreplace(int *x, int n, int k, int *ind_space) { #define II ind_space for (int i = 0; i < n; i++) II[i] = i; int nn=n; for (int i = 0; i < k; i++) { // nn == n-i int j = (int)(nn * unif_rand()); // in {0, .., nn-1} x[i] = II[j]; II[j] = II[--nn]; } #undef II } void get_weights_rhop(const double r[], double s, int n, const double rrhoc[], int ipsi, /* --> */ double *w) { for(int i=0; i < n; i++) // work correctly for s == 0 (basically 0/0 = 0 in that case): w[i] = wgt((r[i] == 0.) ? 0. : (r[i] / s), rrhoc, ipsi); } /* RWLS iterations starting from i_estimate, * ---- the workhorse of the "lmrob_MM" algorithm, called only from R_lmrob_MM(), * which itself is called only from R's lmrob..M..fit(). * In itself, ``just'' an M-estimator : */ Rboolean rwls(const double X[], const double y[], int n, int p, double *estimate, double *i_estimate, double *resid, double* loss, double scale, double epsilon, int *max_it, /* on Input: maximal number of iterations; on Output: number of iterations */ const double rho_c[], const int ipsi, int trace_lev) { double d_beta = 0.; int j, iterations = 0; Rboolean converged = FALSE; double *wx = (double *) R_alloc(n*p, sizeof(double)), *wy = (double *) R_alloc(n, sizeof(double)), *beta0 = (double *) R_alloc(p, sizeof(double)); int info = 1; INIT_WLS(wx, wy, n, p); // -> work[] etc COPY(i_estimate, beta0, p); /* calculate residuals */ COPY(y, resid, n); get_Res_Xb(n,p, X, beta0, resid); /* main loop */ while(!converged && ++iterations < *max_it) { R_CheckUserInterrupt(); /* compute weights for WLS */ get_weights_rhop(resid, scale, n, rho_c, ipsi, weights); if(trace_lev >= 5) { Rprintf(" it %4d: scale=%g, resid = ", iterations, scale); disp_vec(resid, n); Rprintf(" new weights = "); disp_vec(weights, n); } /* solve weighted least squares problem */ COPY(y, wy, n); FIT_WLS(X, weights, wx, wy, n, p, /* -> */ estimate); /* calculate residuals */ if(trace_lev >= 5) { Rprintf(" FIT_WLS() => new estimate= "); disp_vec(estimate, p); } COPY(y, resid, n); get_Res_Xb(n,p, X, estimate, resid); d_beta = norm1_diff(beta0,estimate, p); if(trace_lev >= 3) { /* get the loss for the new estimate */ *loss = sum_rho_sc(resid,scale,n,0,rho_c,ipsi); Rprintf(" it %4d: L(b1) = %#.12g ", iterations, *loss); if(trace_lev >= 4) { Rprintf("\n b1 = ("); for(j=0; j < p; j++) Rprintf("%s%.11g", (j > 0)? ", " : "", estimate[j]); Rprintf(");"); } Rprintf(" ||b0 - b1||_1 = %g\n", d_beta); } /* check for convergence */ converged = d_beta <= epsilon * fmax2(epsilon, norm1(estimate, p)); COPY(estimate, beta0, p); } /* end while(!converged & iter <=...) */ if(trace_lev > 0) { if(trace_lev < 3) *loss = sum_rho_sc(resid,scale,n,0,rho_c,ipsi); Rprintf(" rwls() used %2d it.; last ||b0 - b1||_1 = %#g, L(b1) = %.12g; %sconvergence\n", iterations, d_beta, *loss, (converged ? "" : "NON-")); } *max_it = iterations; return converged; } /* rwls() */ /* sets the entries of a matrix to zero */ void zero_mat(double **a, int n, int m) { int i,j; for(i=0; i < n; i++) for(j=0; j < m; j++) a[i][j] = 0.; } /* * * 2004 / 5 -- Matias Salibian-Barrera & Victor Yohai * Department of Statistics, University of British Columbia * matias@stat.ubc.ca * Department of Mathematics, University of Buenos Aires * vyohai@uolsinectis.com.ar * * * Reference: A fast algorithm for S-regression estimates, * 2005, Salibian-Barrera and Yohai. */ /* This function implements the "large n" strategy */ void fast_s_large_n(double *X, double *y, double s_y, int n, int p, int nRes, int *max_it_scale, double *res, int groups, int n_group, int K, int *max_k, double rel_tol, double inv_tol, double scale_tol, double zero_tol, int *converged, int best_r, double bb, const double rrhoc[], int ipsi, double *bbeta, double *sscale, int trace_lev, int mts, Rboolean ss) { /* *X = the n x p design matrix (incl. intercept if appropriate), * in column order as sent by R) * *y = the ( n ) response vector * n = the length of y * p = the number of columns in X * nRes = number of re-sampling candidates to be used in each partition * groups = number of groups in which to split the * random subsample * n_group = size of each of the (groups) groups * to use in the random subsample * K = number of refining steps for each candidate (typically 1 or 2) * *max_k = [on Input:] number of refining steps for each candidate (typically 1 or 2, * used to be hard coded to MAX_ITER_REFINE_S = 50 ) * [on Output:] effectively used iterations * *rel_tol= convergence tolerance for iterative refinement iterations [used to be hard coded to EPS = 1e-7 ] * *converged: will become 0(FALSE) iff at least one of the best_r iterations * did not converge (in max_k steps to rel_tol precision) * best_r = no. of best candidates to be iterated further ("refined") * bb = right-hand side of S-equation (typically 1/2) * *rrhoc = tuning constant(s) for loss function * (this should be associated with bb) * ipsi = indicator for type of psi function to be used * *bbeta = final estimator 'best beta' * *sscale = associated scale estimator (or -1 when problem) */ double sc, best_sc, worst_sc, INF_sc = s_y * INFI; // may overflow to 'Inf' int sg = groups * n_group, // (integer overflow already checked in R code) k = groups * best_r; double *beta_ref = (double *) R_alloc(p, sizeof(double)), *final_best_scales = (double *) R_alloc(best_r, sizeof(double)), *best_scales = (double *) R_alloc( k, sizeof(double)), *xsamp = (double *) R_alloc(n_group*p, sizeof(double)), *ysamp = (double *) R_alloc(n_group, sizeof(double)); int *indices = (int *) R_alloc(sg, sizeof(int)), *ind_space = (int *) R_alloc(n, sizeof(int)); #define CALLOC_MAT(_M_, _n_, _d_) \ _M_ = (double **) Calloc(_n_, double *); \ for(int i=0; i < _n_; i++) \ _M_[i] = (double *) R_alloc(_d_, sizeof(double)) double **final_best_betas; CALLOC_MAT(final_best_betas, best_r, p); double ** best_betas; CALLOC_MAT( best_betas, k , p); /* assume that n > 2000 */ /* set the seed */ GetRNGstate(); /* get a sample of k indices */ sample_noreplace(indices, n, sg, ind_space); /* FIXME: define groups using nonsingular subsampling? */ /* would also need to allow observations to be part */ /* of multiple groups at the same time */ /* FIXME: Also look at lqs_setup(), * ----- and xr[.,.] "fortran-like" matrix can be used from there!*/ /* For each (of 'groups') group : get the best_r best betas : */ #define X(_k_, _j_) X[(_j_)*n + _k_] #define xsamp(_k_, _j_) xsamp[(_j_)*n_group + _k_] int S_code; for(int i=0; i < groups; i++) { /* populate "matrix" for group [i] */ int ij = i*n_group; for(int j = 0; j < n_group; j++, ij++) { // ij == i*n_group + j for (k = 0; k < p; k++) xsamp(j, k) = X(indices[ij], k); ysamp[j] = y[indices[ij]]; } if (trace_lev) Rprintf(" Subsampling to find candidate betas in group %d:\n", i); S_code = fast_s_with_memory(xsamp, ysamp, s_y, res, n_group, p, nRes, max_it_scale, K, max_k, rel_tol, inv_tol, scale_tol, zero_tol, trace_lev, best_r, bb, rrhoc, ipsi, mts, ss, /* --> */ best_betas + i* best_r, best_scales+ i* best_r); switch(S_code) { case 0: /* normal case: --> go on */ break; case 1: /* singular ! */ *sscale = -1.; /* problem */ if(trace_lev) Rprintf(" 'singularity' from fast_s_with_memory() in group %d\n", i+1); goto cleanup_and_return; case 2: /* scale = 0 <==> exact fit in group [i] */ ; *sscale = 0.; /* and the corresponding "best beta": */ COPY(best_betas[i], bbeta, p); if(trace_lev) { Rprintf(" scale=0 from fast_s_with_memory() in group %d\n", i+1); if(trace_lev >= 3) { Rprintf(" and bbeta[]: "); disp_vec(bbeta, p); } } goto cleanup_and_return; default: error(_("fast_s_with_memory() returned invalid code %d"), S_code); } } #undef xsamp /* now iterate (refine) these "best_r * groups" * best betas in the (xsamp,ysamp) sample * with K C-steps and keep only the "best_r" best ones */ /* initialize new work matrices */ double *w_beta= (double *) R_alloc(p, sizeof(double)), *wx = (double *) R_alloc(n*p, sizeof(double)), // need only k here, *wy = (double *) R_alloc(n, sizeof(double)); // but n in the last step xsamp = (double *) R_alloc(sg*p, sizeof(double)); ysamp = (double *) R_alloc(sg, sizeof(double)); #define xsamp(_k_,_j_) xsamp[(_j_)*sg + _k_] for (int ij = 0; ij < sg; ij++) { for (k = 0; k < p; k++) xsamp(ij, k) = X(indices[ij],k); ysamp[ij] = y[indices[ij]]; } int info = 1; INIT_WLS(wx, wy, n, p); Rboolean conv = FALSE; int pos_worst_scale = 0; for(int i=0; i < best_r; i++) final_best_scales[i] = INF_sc; worst_sc = INF_sc; /* set the matrix to zero */ zero_mat(final_best_betas, best_r, p); for(int i=0; i < (best_r * groups); i++) { if(trace_lev >= 3) { Rprintf(" Sample[%3d]: before refine_(*, conv=FALSE):\n", i); if(i > 0) { Rprintf(" beta_cand : "); disp_vec(best_betas[i],p); Rprintf(" with scale %.15g\n", best_scales[i]); } } int ik = refine_fast_s(xsamp, wx, ysamp, s_y, wy, weights, sg, p, res, work, lwork, best_betas[i], w_beta, &conv/* = FALSE*/, K, rel_tol, zero_tol, trace_lev, bb, rrhoc, ipsi, best_scales[i], /* -> */ beta_ref, &sc); if(trace_lev >= 3) { Rprintf(" after refine: beta_ref : "); disp_vec(beta_ref,p); Rprintf(" with scale %.15g\n", sc); if(ik < 0) Rprintf("* exact fit! %d zero residuals", -ik); } if ( sum_rho_sc(res, worst_sc, sg, p, rrhoc, ipsi) < bb ) { int scale_iter = *max_it_scale; /* scale will be better */ sc = find_scale(res, bb, rrhoc, ipsi, sc, sg, p, &scale_iter, scale_tol, trace_lev >= 3); int k2 = pos_worst_scale; final_best_scales[ k2 ] = sc; COPY(beta_ref, final_best_betas[k2], p); pos_worst_scale = find_max(final_best_scales, best_r); worst_sc = final_best_scales[pos_worst_scale]; } } /* now iterate the best "best_r" * betas in the whole sample until convergence (max_k, rel_tol) */ best_sc = INF_sc; *converged = 1; k = 0; if(trace_lev) Rprintf(" Now refine() to convergence for %d very best ones:\n", best_r); for(int i=0; i < best_r; i++) { conv = TRUE; if(trace_lev >= 4) Rprintf(" i=%d:\n", i); int it_k = refine_fast_s(X, wx, y, s_y, wy, weights, n, p, res, work, lwork, final_best_betas[i], w_beta, &conv/* = TRUE */, *max_k, rel_tol, zero_tol, trace_lev, bb, rrhoc, ipsi, final_best_scales[i], /* -> */ beta_ref, &sc); if(trace_lev) { Rprintf(" Final best[%d]: %sconvergence ", i, conv ? "" : "NON "); if(it_k >= 0) Rprintf("(%d iter.)", it_k); else Rprintf("(Exact fit! %d zeroes; scale=0, sc=%g sc || sc == 0) { if(trace_lev && best_sc > sc) Rprintf(": -> improved scale to %.15g", sc); best_sc = sc; COPY(beta_ref, bbeta, p); if(best_sc == 0.) { if(trace_lev) Rprintf(" = 0 ==> finish\n"); break; } } if (trace_lev) Rprintf("\n"); if (!conv && *converged) *converged = 0; if (it_k >= 0 && k < it_k) k = it_k; } *sscale = best_sc; *max_k = k; /* Done. Now clean-up. */ cleanup_and_return: PutRNGstate(); Free(best_betas); Free(final_best_betas); #undef X #undef xsamp } /* fast_s_large_n() */ // (called only in 1 place above) int fast_s_with_memory(double *X, double *y, double s_y, double *res, int n, int p, int nResample, int *max_it_scale, int K, int *max_k, double rel_tol, double inv_tol, double scale_tol, double zero_tol, int trace_lev, int best_r, double bb, const double rrhoc[], int ipsi, int mts, Rboolean ss, // ==> result double **best_betas, double *best_scales) { /* * Called from >>> fast_s_large_n() <<<, the adjustment for large "n", * same as fast_s, but it returns the best_r best betas, * and their associated scales --- unless there was an exact fit (scale = 0) case in one group. * * x : an n x p design matrix (including intercept if appropriate) * y : an n vector * res : an n vector of residuals * nRes = number of re-sampling candidates to be taken * K = number of refining steps for each candidate * best_r = number of (refined) to be retained for full iteration * bb = right-hand side of the S-equation (typically 1/2) * rrhoc[] = tuning constant(s) for loss function * (this should be associated with bb) * ipsi = indicator for type of loss function to be used * RETURN * *best_betas = the best ... coefficient vectors * *best_scales = their associated residual scales * * return(sing) : TRUE if have singularity * */ Rboolean conv = FALSE, sing = FALSE; // sing = TRUE|FALSE the final result double sc, INF_sc = s_y * INFI, /* may overflow to Inf */ worst_sc = INF_sc; int info = 1; // is set by *both* : SETUP_SUBSAMPLE(n, p, X, 1); INIT_WLS(X, y, n, p); double *wx = (double *) R_alloc(n*p, sizeof(double)), *wy = (double *) R_alloc(n, sizeof(double)), *w_beta = (double *) R_alloc(p, sizeof(double)), *beta_cand = (double *) R_alloc(p, sizeof(double)), *beta_ref = (double *) R_alloc(p, sizeof(double)); for(int i=0; i < best_r; i++) best_scales[i] = INF_sc; int pos_worst_scale = 0; /* resampling approximation */ for(int i=0; i < nResample; i++) { R_CheckUserInterrupt(); /* find a candidate */ sing = (Rboolean) // 0 |--> FALSE (= success); {1,2} |-> TRUE subsample(Xe, y, n, p, beta_cand, ind_space, idc, idr, lu, v, pivot, Dr, Dc, rowequ, colequ, 1, mts, ss, inv_tol, 1); if (sing) { for (int k=0; k < best_r; k++) best_scales[i] = -1.; return sing; } /* FIXME: is_ok ?? */ /* improve the re-sampling candidate */ /* conv = FALSE : do K refining steps */ int ik = refine_fast_s(X, wx, y, s_y, wy, weights, n, p, res, work, lwork, beta_cand, w_beta, &conv/* = FALSE*/, K, rel_tol, zero_tol, trace_lev, bb, rrhoc, ipsi, -1., /* -> */ beta_ref, &sc); if(ik < 0) { if(trace_lev) Rprintf(" * exact fit! %d zero residuals; scale = 0\n", -ik); /// YES: FIXME ---> return beta_cand and be done - as in fast_s(..) } if ( sum_rho_sc(res, worst_sc, n, p, rrhoc, ipsi) < bb ) { int scale_iter = *max_it_scale; /* scale will be better */ sc = find_scale(res, bb, rrhoc, ipsi, sc, n, p, &scale_iter, scale_tol, trace_lev >= 3); int k = pos_worst_scale; best_scales[ k ] = sc; for(int j=0; j < p; j++) best_betas[k][j] = beta_ref[j]; pos_worst_scale = find_max(best_scales, best_r); worst_sc = best_scales[pos_worst_scale]; if (trace_lev >= 2) { Rprintf(" Sample[%3d]: found new candidate with scale %.7g in %d iter (worst sc=%.5g)\n", i, sc, scale_iter, worst_sc); } } } /* for(i ) */ return sing; } /* fast_s_with_memory() */ void fast_s(double *X, double *y, double s_y, int n, int p, int nResample, int *max_it_scale, double *res, int K, int *max_k, double rel_tol, double inv_tol, double scale_tol, double zero_tol, int *converged, int best_r, double bb, const double rrhoc[], int ipsi, double *bbeta, double *sscale, int trace_lev, int mts, Rboolean ss) { /* *X = the n x p design matrix (incl. intercept if appropriate), * in column order as sent by R) * *y = the ( n ) response vector * nn =: n = the length of y * pp =: p = the number of columns in X * nRes = number of re-sampling candidates to be taken * K = number of refining steps for each candidate * best_r = number of (refined) to be retained for full iteration * *converged: will become FALSE iff at least one of the best_r iterations * did not converge (in max_k steps to rel_tol precision) * bb = right-hand side of the S-equation (typically 1/2) * *rrhoc = tuning constant(s) for loss function * (this should be associated with bb) * iipsi = indicator for type of loss function to be used * *bbeta = "best beta" = final estimator * *sscale = associated scale estimator (or -1 when problem) */ double sc, best_sc, aux; double INF_sc = s_y * INFI; // may overflow to 'Inf' /* Rprintf("fast_s %d\n", ipsi); */ int info; SETUP_SUBSAMPLE(n, p, X, 0); // More arrays, allocated: double *wx = (double *) R_alloc(n*p, sizeof(double)), *wy = (double *) R_alloc(n, sizeof(double)), *w_beta = (double *) R_alloc(p, sizeof(double)), *beta_cand = (double *) R_alloc(p, sizeof(double)), *beta_ref = (double *) R_alloc(p, sizeof(double)), *best_scales = (double *) R_alloc(best_r, sizeof(double)), // matrix: **best_betas = (double **) Calloc(best_r, double *); for(int i=0; i < best_r; i++) { best_betas[i] = (double*) R_alloc(p, sizeof(double)); best_scales[i] = INF_sc; } INIT_WLS(wx, wy, n, p); // (re-setting 'info') /* disp_mat(x, n, p); */ int pos_worst_scale = 0; Rboolean conv = FALSE; double worst_sc = INF_sc; /* srand((long)*seed_rand); */ GetRNGstate(); /* resampling approximation */ if (trace_lev) Rprintf(" Subsampling %d times to find candidate betas:\n", nResample); for(int i=0; i < nResample; i++) { R_CheckUserInterrupt(); /* find a candidate */ Rboolean sing = (Rboolean) // 0 |--> FALSE (= success); {1,2} |-> TRUE subsample(Xe, y, n, p, beta_cand, ind_space, idc, idr, lu, v, pivot, Dr, Dc, rowequ, colequ, 1, mts, ss, inv_tol, 1); if (sing) { *sscale = -1.; if (trace_lev >= 1) Rprintf(" Sample[%3d]: singular subsample() - giving up!\n", i); goto cleanup_and_return; } Rboolean trace_sample = trace_lev >= 3 && (p <= 9 || trace_lev >= 5); if (trace_sample) { Rprintf(" Sample[%3d]: idc = ", i); disp_veci(idc, p); if(p <= 3 || trace_lev >= 5) { Rprintf(" b^[] = "); disp_vec(beta_cand, p); } } /* improve the re-sampling candidate */ /* conv = FALSE : do K refining steps */ int ik = refine_fast_s(X, wx, y, s_y, wy, weights, n, p, res, work, lwork, beta_cand, w_beta, &conv/* = FALSE*/, K, rel_tol, zero_tol, trace_lev, bb, rrhoc, ipsi, -1., /* -> */ beta_ref, &sc); if(trace_lev >= 3) { double del = norm_diff(beta_cand, beta_ref, p); if(!trace_sample) Rprintf(" Sample[%3d]:", i); Rprintf(" after refine_(*, conv=F):\n" " beta_ref : "); disp_vec(beta_ref,p); Rprintf(" with ||beta_ref - beta_cand|| = %.12g, --> sc = %.15g\n", del, sc); } if(fabs(sc) == 0.) { /* exact zero set by refine_*() */ if(trace_lev >= 1) Rprintf(" |s|=0: Have %d (too many) exact zeroes -> leaving refinement!\n", -ik); *sscale = sc; /* *converged = 1; -- important when used as init in lmrob() -- * --------------- but for that need at least valid residuals, fitted, * and possibly a *scale* sc > 0 ??? -------------- FIXME */ COPY(beta_cand, bbeta, p); goto cleanup_and_return; } if ( sum_rho_sc(res, worst_sc, n, p, rrhoc, ipsi) < bb ) { int scale_iter = *max_it_scale; /* scale will be better */ sc = find_scale(res, bb, rrhoc, ipsi, sc, n, p, &scale_iter, scale_tol, trace_lev >= 3); int k = pos_worst_scale; best_scales[ k ] = sc; COPY(beta_ref, best_betas[k], p); pos_worst_scale = find_max(best_scales, best_r); worst_sc = best_scales[pos_worst_scale]; if (trace_lev >= 2) { if(trace_lev < 3) /* not yet "Sample[..]" */ Rprintf(" Sample[%3d]:", i); Rprintf(" found new candidate with scale %.7g in %d iter (worst sc=%.5g)\n", sc, scale_iter, worst_sc); } } } /* for(i in 1..nResample) */ /* now look for the very best */ if(trace_lev) Rprintf(" Now refine() to convergence for %d very best ones:\n", best_r); best_sc = INF_sc; *converged = 1; int k = 0; for(int i=0; i < best_r; i++) { conv = TRUE; if(trace_lev >= 4) Rprintf(" i=%d:\n", i); int it_k = refine_fast_s(X, wx, y, s_y, wy, weights, n, p, res, work, lwork, best_betas[i], w_beta, &conv /* = TRUE */, *max_k, rel_tol, zero_tol, trace_lev, bb, rrhoc, ipsi, best_scales[i], /* -> */ beta_ref, &aux); if(trace_lev) { Rprintf(" Best[%d]: %sconvergence ", i, conv ? "" : "NON "); if(it_k >= 0) Rprintf("(%d iter.)", it_k); else Rprintf("(%d zeroes; scale=0)", -it_k); } if(aux < best_sc) { best_sc = aux; COPY(beta_ref, bbeta, p); if(trace_lev) { Rprintf(": -> improved scale to %.15g", best_sc); if(trace_lev >= 2) { Rprintf("; bbeta= "), disp_vec(bbeta,p); } } } if(trace_lev) Rprintf("\n"); if (!conv && *converged) *converged = 0; // will stay at 0, even when next best_betas[i] is good if (k < it_k) k = it_k; } *sscale = best_sc; *max_k = k; cleanup_and_return: PutRNGstate(); Free(best_betas); return; } /* fast_s() */ int refine_fast_s(const double X[], double *wx, const double y[], double s_y, double *wy, double *weights, int n, int p, double *res, double *work, int lwork, const double beta_cand[], double *beta_j, Rboolean *conv, int kk, double rel_tol, double zero_tol, int trace_lev, // here: only print when trace_lev >= 3 double b, const double rrhoc[], int ipsi, double initial_scale, double *beta_ref, double *scale) { /* * X = matrix (n x p) of explanatory variables * y = vector ( n ) of responses * weights = robustness weights wt[] * y[] (length n) Output * res = residuals y[] - x[,] * beta (length n) Output * conv: FALSE means do kk refining steps (and conv stays FALSE) * TRUE means refine until convergence(rel_tol, max{k} = kk) * and in this case, 'conv' *returns* TRUE if refinements converged * rel_tol = * zero_tol = small positive, determining "zero residuals" - was hardwired to EPS_SCALE := 1e-10 * beta_cand= candidate beta[] (of length p) Input * beta_j = internal "working" beta[] (of length p) * is = initial scale input * beta_ref = resulting beta[] (of length p) Output * scale = final scale Output * for FIT_WLS, DGELS: * wx = matrix (n x p) * wy = vector of length n {also used for 'beta' (<==> p <= n )} * work = vector of length lwork * lwork = length of vector work * return( *or* -z, where z = zeroes := #{i; |R~_i| <= e_z}; e_z := zero_tol */ Rboolean trace_beta, converged = FALSE;/* Wall */ if (trace_lev >= 3) { Rprintf(" refine_fast_s(s0=%g, convChk=%s): ", initial_scale, *conv ? "TRUE" : "FALSE"); trace_beta = (p <= 6 || trace_lev >= 5); if(trace_beta) { Rprintf("beta_cand= "), disp_vec(beta_cand,p); } } /* calculate residuals */ COPY(y, res, n); get_Res_Xb(n,p, X, beta_cand, /*--> */ res); if( initial_scale < 0. ) initial_scale = MAD(res, n, 0., wy, weights);// wy and weights used as work arrays double s0 = s_y * zero_tol; int zeroes = 0; for(int j=0; j < n; j++) { // ensuring "eps_zero" to be equivariant in y[] : if(fabs(res[j]) <= s0) zeroes++; } if (trace_lev >= 4) Rprintf(" |{i; |R_i| <= %.4g ~= 0}| = %d zeroes (zero_tol=%.3g, s_y=%g);\n", s0, zeroes, zero_tol, s_y); /* if "perfect fit", return it with a 0 assoc. scale */ if(initial_scale <= 0. || zeroes > ((double)n)/2.) { /* <<- FIXME: should depend on 'b' ! */ // if(zeroes > (((double)n + (double)p)/2.)) -- corresponding to MAD_p but have MAD=MAD_0 above COPY(beta_cand, beta_ref, p); if (trace_lev >= 3) Rprintf(" too many zeroes -> scale=0 & quit refinement\n"); *scale = 0.; return -zeroes; // (for diagnosis + emphasize special scale=0) } s0 = initial_scale; // > 0 int info, iS; if(trace_lev >= 4) Rprintf(" %s %d refinement iterations, starting with s0=%g\n", *conv ? "maximally" : "doing", kk, s0); COPY(beta_cand, beta_j, p); for(iS=0; iS < kk; iS++) { /* one step for the scale */ s0 = s0 * sqrt( sum_rho_sc(res, s0, n, p, rrhoc, ipsi) / b ); /* compute weights for WLS */ get_weights_rhop(res, s0, n, rrhoc, ipsi, weights); /* solve weighted least squares problem */ COPY(y, wy, n); // wy = y[1:n] on in input, and beta[1:p] on output FIT_WLS(X, weights, wx, wy, n, p, /* -> */ beta_ref); if(*conv) { /* check for convergence */ double del = norm_diff(beta_j, beta_ref, p); double nrmB= norm(beta_j, p); converged = (del <= rel_tol * fmax2(rel_tol, nrmB)); if(trace_lev >= 4) Rprintf(" it %4d, ||b[i]||= %#.12g, ||b[i] - b[i-1]||= %#.15g --> conv=%s\n", iS, nrmB, del, (converged ? "TRUE" : "FALSE")); if(converged) break; } /* calculate residuals */ COPY(y, res, n); get_Res_Xb(n,p, X, beta_ref, /*--> */ res); COPY(beta_ref, beta_j, p); } /* for(iS = 0; iS < kk ) */ if(*conv) { if(converged) { if(trace_lev >= 3) Rprintf("refine_() converged after %d iterations\n", iS); } else { // !converged *conv = FALSE; warning(_("S refinements did not converge (to refine.tol=%g) in %d (= k.max) steps"), rel_tol, iS); } } *scale = s0; return iS; /* number of refinement steps */ } /* refine_fast_s() */ /* Subsampling part for M-S algorithm, i.e. called only from R_lmrob_M_S() * Recreates RLFRSTML function found in robust/src/lmrobml.f of package 'robust' */ void m_s_subsample(double *X1, double *y, int n, int p1, int p2, int nResample, int max_it_scale, double rel_tol, double inv_tol, double scale_tol, double zero_tol, double bb, const double rrhoc[], int ipsi, double *sscale, int trace_lev, double *b1, double *b2, double *t1, double *t2, double *y_tilde, double *res, double *x1, double *x2, int *NIT, int *K, int *KODE, double *SIGMA, double BET0, double *SC1, double *SC2, double *SC3, double *SC4, int mts, Rboolean ss) { int i, p = p1 + p2, info; double sc = /* INF_sc := */ *SIGMA * INFI; if(sc > DBL_MAX) sc = DBL_MAX; // cannot use 'Inf' here *sscale = sc; if (trace_lev >= 2) Rprintf(" Starting M-S subsampling procedure(p1=%d, p2=%d; ini.sc=%g) .. ", p1,p2, sc); SETUP_SUBSAMPLE(n, p2, x2, 0); /* set the seed */ GetRNGstate(); if (trace_lev >= 2) Rprintf(" [setup Ok]\n"); for(i=0; i < nResample; i++) { R_CheckUserInterrupt(); /* STEP 1: Draw a subsample of size p2 from (X2, y) */ Rboolean sing = (Rboolean) // 0 |--> FALSE (= success); {1,2} |-> TRUE subsample(Xe, y, n, p2, t2, ind_space, idc, idr, lu, v, pivot, Dr, Dc, rowequ, colequ, /* sample= */ TRUE, mts, ss, inv_tol, /*solve = */ TRUE); if (sing) { *sscale = -1.; goto cleanup_and_return; } /* calculate partial residuals */ COPY(y, y_tilde, n); get_Res_Xb(n,p2, x2, t2, y_tilde); /* STEP 3: Obtain L1-estimate of b1 */ COPY(X1, x1, n*p1); F77_CALL(rllarsbi)(x1, y_tilde, &n, &p1, &n, &n, &rel_tol, NIT, K, KODE, SIGMA, t1, res, SC1, SC2, SC3, SC4, &BET0); if (*KODE > 1) { // KODE in {0, 1} is ok REprintf("m_s_subsample(): Problem in RLLARSBI (L1-regr). KODE=%d. Exiting.\n", *KODE); *sscale = -1.; goto cleanup_and_return; } /* STEP 4: Check if candidate looks promising */ if (sum_rho_sc(res, *sscale, n, p, rrhoc, ipsi) < bb) { int scale_iter = max_it_scale; /* scale will be better */ /* STEP 5: Solve for sc */ sc = find_scale(res, bb, rrhoc, ipsi, sc, n, p, &scale_iter, scale_tol, trace_lev >= 4); if(trace_lev >= 2) Rprintf(" Sample[%3d]: new candidate with sc =%#16.9g in %d iter\n", i, sc, scale_iter); /* STEP 6: Update best fit */ *sscale = sc; COPY(t1, b1, p1); COPY(t2, b2, p2); if (sc < zero_tol) { REprintf("\nScale too small\n" "Aborting m_s_subsample()\n\n"); *sscale = -1.; goto cleanup_and_return; } } } /* for(i ) */ /* STEP 7: Clean up and return */ if (trace_lev >= 1) { Rprintf(" Finished M-S subsampling with scale = %.5f\n",*sscale); #define maybe_SHOW_b1_b2 \ if (trace_lev >= 3) { \ Rprintf(" b1: "); disp_vec(b1,p1);\ Rprintf(" b2: "); disp_vec(b2,p2);\ } maybe_SHOW_b1_b2; } cleanup_and_return: PutRNGstate(); } /* m_s_subsample() */ /* Descent step for M-S algorithm * Return value: convergence; note that convergence is *not* guaranteed */ Rboolean m_s_descent(double *X1, double *X2, double *y, int n, int p1, int p2, int K_m_s, int max_k, int max_it_scale, double rel_tol, double scale_tol, double bb, const double rrhoc[], int ipsi, double *sscale, int trace_lev, double *b1, double *b2, double *t1, double *t2, double *y_tilde, double *res, double *res2, double *x1, double *x2, int *NIT, int *K, int *KODE, double *SIGMA, double BET0, double *SC1, double *SC2, double *SC3, double *SC4) { int nnoimpr = 0, nref = 0; int p = p1 + p2; Rboolean converged = FALSE; double sc = *sscale; COPY(b1, t1, p1); COPY(b2, t2, p2); COPY(res, res2, n); if (trace_lev >= 2) Rprintf(" Starting descent procedure...\n"); int info = 1; INIT_WLS(x2, y, n, p2); if (trace_lev >= 3) { Rprintf(" Scale: %.5f\n", *sscale); if (trace_lev >= 5) { Rprintf(" res2: "); disp_vec(res2,n); } } /* Do descent steps until there is no improvement for */ /* K_m_s steps or we are converged */ /* (convergence is not guaranteed) */ while ( (nref++ < max_k) & (!converged) & (nnoimpr < K_m_s) ) { R_CheckUserInterrupt(); /* STEP 1: update b2 (save it to t2) */ /* y_tilde = y - x1 %*% t1 */ COPY(y, y_tilde, n); COPY(X1, x1, n*p1); // FIXME(MM)? don't need x1, use X1 (which is unchanged!) get_Res_Xb(n,p1, x1, t1, y_tilde); /* compute weights for WLS */ get_weights_rhop(res2, sc, n, rrhoc, ipsi, weights); /* solve weighted least squares problem */ FIT_WLS(X2, weights, x2, y_tilde, n, p2, /* -> */ t2); /* get (intermediate) residuals */ COPY(y, res2, n); get_Res_Xb(n,p2, X2, t2, res2); /* STEP 2: Obtain L1-estimate of b1 */ COPY(res2, y_tilde, n); F77_CALL(rllarsbi)(x1, y_tilde, &n, &p1, &n, &n, &rel_tol, NIT, K, KODE, SIGMA, t1, res2, SC1, SC2, SC3, SC4, &BET0); if (*KODE > 1) { error(_("m_s_descent(): Problem in RLLARSBI (RILARS). KODE=%d. Exiting."), *KODE); } /* STEP 3: Compute the scale estimate */ int scale_iter = max_it_scale; sc = find_scale(res2, bb, rrhoc, ipsi, sc, n, p, &scale_iter, scale_tol, trace_lev >= 4); // <- here only if higher trace_lev /* STEP 4: Check for convergence */ /* FIXME: check convergence using scale ? */ double del = sqrt(norm_diff2(b1, t1, p1) + norm_diff2(b2, t2, p2)); double nrmB = sqrt(norm2(t1, p1) + norm2(t2, p2)); converged = (del < rel_tol * fmax2(rel_tol, nrmB)); if (trace_lev >= 3) { if(converged) Rprintf(" -->> converged\n"); if (trace_lev >= 4) { Rprintf(" Ref.step %3d: #{no-improvements}=%3d; (del,dB)=(%12.7g,%12.7g)\n", nref, nnoimpr, del, rel_tol * fmax2(rel_tol, nrmB)); if (trace_lev >= 5) { Rprintf(" weights: "); disp_vec(weights,n); Rprintf(" t2: "); disp_vec(t2,p2); Rprintf(" t1: "); disp_vec(t1,p1); Rprintf(" res2: "); disp_vec(res2,n); } } } /* STEP 5: Update best fit */ if (sc < *sscale) { COPY(t1, b1, p1); COPY(t2, b2, p2); COPY(res2, res, n); *sscale = sc; if (trace_lev >= 2) Rprintf(" Refinement step %3d: better fit, scale: %#10.5g\n", nref, sc); nnoimpr = 0; } else { if (trace_lev >= 3) Rprintf(" Refinement step %3d: no improvement, scale: %#10.5g\n", nref, sc); nnoimpr++; } } // while(.) if ( (!converged) & (nref == max_k) ) warning(_(" M-S estimate: maximum number of refinement steps reached.")); if (trace_lev >= 1) { Rprintf(" Descent procedure: %sconverged (best scale: %.5g, last step: %.5g)\n", converged ? "" : "not ", *sscale, sc); if (nnoimpr == K_m_s) Rprintf(" The procedure stopped after %d steps because there was no improvement in the last %d steps.\n To increase this number, use the control parameter 'k.m_s'.\n", nref, nnoimpr); else if (trace_lev >= 2) Rprintf(" No improvements in %d out of %d steps.\n", nnoimpr, nref); maybe_SHOW_b1_b2; } return converged; } /* m_s_descent() */ /* draw a subsample of observations and calculate a candidate * * starting value for S estimates * * uses a custom LU decomposition, which acts on the transposed design * * matrix. In case of a singular subsample, the subsample is modified * * until it is non-singular (for ss == TRUE (== 1)). * * * * Parts of the algorithm are based on the Gaxpy version of the LU * * decomposition with partial pivoting by * * Golub G. H., Van Loan C. F. (1996) - MATRIX Computations */ int subsample(const double x[], const double y[], int n, int m, double *beta, int *ind_space, int *idc, int *idr, double *lu, double *v, int *pivot, double *Dr, double *Dc, int rowequ, int colequ, Rboolean sample, int mts, Rboolean ss, double tol_inv, Rboolean solve) { /* x: design matrix (n x m) y: response vector n: length of y, nrow of x m: ncol of x ( == p ) beta: [out] candidate parameters (length m) ind_space: (required in sample_noreplace, length n) holds the index permutation idc: (required in sample_noreplace, !! length n !!) [out] index of observations used in subsample idr: work array of length m lu: [out] LU decomposition of subsample of xt (m x m) Note: U has is not rescaled by 1 / *cf, as it should, this is done R_subsample(). v: work array of length m pivot: [out] pivoting table of LU decomposition (length m-1) Dr: row equilibration (as calculated in SETUP_EQUILIBRATION) Dc: column equilibration rowequ: whether rows were equilibrated coleq: whether cols were equilibrated sample: whether to sample or not mts: the number of singular samples allowed before giving up (Max Try Samples) ss: type of subsampling to be used: 0 (FALSE): simple subsampling 1 (TRUE): nonsingular subsampling tol_inv: tolerance for declaring a matrix singular solve: solve the least squares problem on the subsample? (0: no, 1: yes) return value ('condition'): 0: success 1: singular (matrix xt does not contain a m dim. full rank submatrix) 2: too many singular resamples (simple subsampling case) */ int j, k, l, mu = 0, i = 0, attempt = 0; Rboolean sing; #define xt(_k_, _j_) x[idr[_k_]*n+idc[_j_]] #define U(_k_, _j_) lu[_j_*m+_k_] #define u(_k_, _j_) lu + (_j_*m+_k_) #define L(_k_, _j_) lu[_j_*m+_k_] #define l(_k_, _j_) lu + (_j_*m+_k_) Start: /* STEP 1: Calculate permutation of 1:n */ if (sample) { sample_noreplace(ind_space, n, n, idc); } else // !sample --> "trivial permutation": for(k=0;k 51: %x\n", fabs(v[46]) > fabs(v[50])); */ /* Rprintf("47 < 51: %x\n", fabs(v[46]) < fabs(v[50])); */ /* } */ /* continue only if pivot is large enough */ if (tmpd >= tol_inv) { pivot[j] = mu; // swap j <--> mu : tmpd = v[j]; v[j] = v[mu]; v[mu] = tmpd; int itmp = idr[j]; idr[j] = idr[mu]; idr[mu] = itmp; for(k=j+1;k 0) { for(k=0;k= mts) { warning(_("Too many singular resamples. Aborting subsample().\n See parameter 'subsampling; in help of lmrob.config().")); return(2); } goto Start; } /* drop observation and try next one */ i++; } else { sing = FALSE; U(j, j) = v[j]; } } while(sing); } /* end for loop */ /* Rprintf("lu:"); disp_vec (lu, m*m); */ /* Rprintf("pivot:"); disp_veci(pivot, m-1); */ /* Rprintf("idc:"); disp_veci(idc, m); */ /* STEP 3: Solve for candidate parameters if requested */ if (solve == 0) { for(k=0;k beta[pivot[k]] */ for(k=m-2; k>=0; k--) { double tmp = beta[k]; beta[k] = beta[pivot[k]]; beta[pivot[k]] = tmp; } } return(0); #undef Xt #undef U #undef u #undef L #undef l } double find_scale(const double r[], double b, const double rrhoc[], int ipsi, double initial_scale, int n, int p, int* iter, // input: max_iter, output: #{iterations used} double scale_tol, Rboolean trace) { if(initial_scale <= 0.) { warning(_("find_scale(*, initial_scale = %g <= 0) -> final scale = 0"), initial_scale); return 0.; } // else double scale = initial_scale; if(trace) Rprintf("find_scale(*, ini.scale =%#13.11g, tol=%g):\n it | new scale\n", scale, scale_tol); for(int it = 0; it < iter[0]; it++) { scale *= sqrt( sum_rho_sc(r, scale, n, p, rrhoc, ipsi) / b ) ; if(trace) Rprintf(" %2d | %#13.10g\n", it, scale); if(fabs(scale - initial_scale) <= scale_tol*initial_scale) { // converged: *iter = it; return(scale); } initial_scale = scale; } warning(_("find_scale() did not converge in '%s' (= %d) iterations with tol=%g, last rel.diff=%g"), "maxit.scale", /* <- name from lmrob.control() */ *iter, scale_tol, (scale - initial_scale) / initial_scale); return(scale); } // As R's which.max(a), return()ing zero-based k in {0,1,...,n-1} int find_max(const double a[], int n) { int k = 0; if(n > 1) { double tt = a[0]; for(int i=1; i < n; i++) if(tt < a[i]) { tt = a[i]; k = i; } } return k; } double sum_rho_sc(const double r[], double scale, int n, int p, const double c[], int ipsi) { double s = 0; for(int i=0; i < n; i++) s += rho(r[i]/scale, c, ipsi); return(s / ((double) n - p)); } /* ||x||_2^2 */ double norm2(const double x[], int n) { double s = 0.; s = F77_CALL(dnrm2)(&n, x, &one); return( s*s ); } /* ||x||_2 */ double norm(const double x[], int n) { return(F77_CALL(dnrm2)(&n, x, &one)); } /* ||x||_1 */ double norm1(const double x[], int n) { return(F77_CALL(dasum)(&n, x, &one)); } /* ||x-y||_2^2 */ double norm_diff2(const double x[], const double y[], int n) { double s = 0; for(int i=0; i < n; i++) s += (x[i]-y[i])*(x[i]-y[i]); return( s ); } /* ||x-y||_2 */ double norm_diff(const double x[], const double y[], int n) { double s = 0; for(int i=0; i < n; i++) s += (x[i]-y[i])*(x[i]-y[i]); return( sqrt(s) ); } /* ||x-y||_1 */ double norm1_diff(const double x[], const double y[], int n) { double s = 0; for(int i=0; i < n; i++) s += fabs(x[i]-y[i]); return(s); } double median(const double x[], int n, double *aux) { for(int i=0; i < n; i++) aux[i]=x[i]; return (n % 2) ? /* odd */ kthplace(aux,n, n/2+1) : /* even */ (kthplace(aux,n,n/2) + kthplace(aux,n,n/2+1)) / 2. ; } double median_abs(const double x[], int n, double *aux) { for(int i=0; i < n; i++) aux[i] = fabs(x[i]); return (n % 2) ? /* odd */ kthplace(aux,n, n/2+1) : /* even */ (kthplace(aux,n, n/2) + kthplace(aux,n, n/2+1)) / 2. ; } double MAD(const double a[], int n, double center, double *b, // -> the centered a[] - center double *tmp) { /* if center == 0 then do not center */ /* if( fabs(center) > 0.) { */ for(int i=0; i < n; i++) b[i] = a[i] - center; /* } */ return( median_abs(b,n,tmp) * 1.4826 ); } void disp_vec(const double a[], int n) { for(int i=0; i < n; i++) Rprintf("%g ", a[i]); Rprintf("\n"); } void disp_veci(const int a[], int n) { for(int i=0; i < n; i++) Rprintf("%d ", a[i]); Rprintf("\n"); } void disp_mat(const double **a, int n, int m) { for(int i=0; i < n; i++) { Rprintf("\n"); for(int j=0; j < m; j++) Rprintf("%10.8f ",a[i][j]); } Rprintf("\n"); } void R_find_D_scale(double *rr, double *kkappa, double *ttau, int *llength, double *sscale, double *cc, int *iipsi, int *ttype, double *rel_tol, int *max_k, int *converged) { /* compute D_scale using iterative algorithm type: 1: d1 2: d2 3: dt1 4: dt2 */ *converged = 0; for (int k=0; k < *max_k; k++) { double scale = *sscale, tsum1 = 0, tsum2 = 0; // calculate weights for(int i=0; i < *llength; i++) { double a, w = wgt(rr[i] / ttau[i] / scale, cc, *iipsi); switch(*ttype) { case 1: // d1 a = rr[i]/ttau[i]; tsum1 += a*a*w; tsum2 += w; break; case 2: // d2 a = rr[i]/ttau[i]*w; tsum1 += a*a; tsum2 += w*w; break; default: case 3: // dt1 tsum1 += rr[i]*rr[i]*w; tsum2 += w*ttau[i]*ttau[i]; break; case 4: // dt2 a = rr[i]*w; tsum1 += a*a; a = ttau[i]*w; tsum2 += a*a; break; }; } *sscale = sqrt(tsum1 / tsum2 / *kkappa); // Rprintf("\n type = %d, scale = %10.8f \n", *ttype, *sscale); if (fabs(scale - *sscale) < *rel_tol * fmax2(*rel_tol, scale)) { *converged = 1; break; } } } /* specialized function calc_fitted */ /* calculates fitted values from simulation output array. */ /* this is used to process simulation output in the */ /* lmrob_simulation vignette */ void R_calc_fitted(double *XX, double *bbeta, double *RR, int *nn, int *pp, int *nnrep, int *nnproc, int *nnerr) { unsigned long A, B, C, D, E; A = (unsigned long)*nnerr; B = (unsigned long)*nnproc; C = (unsigned long)*nnrep; D = (unsigned long)*nn; E = (unsigned long)*pp; // calculate fitted values over errstr, procstr and replicates for(unsigned long a = 0; a < A; a++) { // errstr for(unsigned long b = 0; b < B; b++) { // procstr for(unsigned long c = 0; c < C; c++) { // replicates // check for NAs if (!ISNA(bbeta[c + /* 0*C + */ b*C*E + a*B*E*C])) { for(unsigned long d = 0; d < D; d++) { // observations RR[d + c*D + b*C*D + a*B*C*D] = 0; // initialize result for(unsigned long e = 0; e < E; e++) { // predictors RR[d + c*D + b*C*D + a*B*C*D] += bbeta[c + e*C + b*C*E + a*B*E*C] * XX[d + e*D + c*E*D + a*C*E*D]; } } } } } } } robustbase/src/templates-types.h0000644000176200001440000000212612441664610016530 0ustar liggesusers/* * Sets type-specific macros */ #define CONCAT(x,y) x ##_## y #define CONCAT3(x,y,z) x ##_## y ##_## z #define CONCAT_MACROS(x,y) CONCAT(x,y) #define CONCAT_3MACROS(x,y,z) CONCAT3(x,y,z) /* Data type macros for argument 'x' */ #if X_TYPE == 'i' # ifndef METHOD_NAME # define METHOD_NAME CONCAT_MACROS(METHOD, Integer) # define C_METHOD_NAME CONCAT_3MACROS(C, METHOD, Integer) # endif #define X_C_TYPE int #define X_IN_C INTEGER #define X_ISNAN(x) (x == NA_INTEGER) #elif X_TYPE == 'r' # ifndef METHOD_NAME # define METHOD_NAME CONCAT_MACROS(METHOD, Real) # define C_METHOD_NAME CONCAT_3MACROS(C, METHOD, Real) # endif #define X_C_TYPE double #define X_IN_C REAL #define X_ISNAN(x) ISNAN(x) #endif /* Data type macros for result ('ans') */ #ifndef ANS_TYPE /* Default to same as 'x' */ #define ANS_TYPE X_TYPE #endif #if ANS_TYPE == 'i' #define ANS_SXP INTSXP #define ANS_NA NA_INTEGER #define ANS_C_TYPE int #define ANS_IN_C INTEGER #elif ANS_TYPE == 'r' #define ANS_SXP REALSXP #define ANS_NA NA_REAL #define ANS_C_TYPE double #define ANS_IN_C REAL #endif robustbase/src/rowMedians_TYPE-template.h0000644000176200001440000001437714100267120020152 0ustar liggesusers/* included 2 x from ./rowMedians.c ~~~~~~~~~~~~ *********************************************************************** TEMPLATE: SEXP rowMedians_(...) GENERATES: SEXP rowMedians_Integer(SEXP x, int nrow, int ncol, int narm, int hasna, int byrow) SEXP rowMedians_Real(SEXP x, int nrow, int ncol, int narm, int hasna, int byrow) Arguments: The following macros ("arguments") should be defined for the template to work as intended. - METHOD: the name of the resulting function - X_TYPE: 'i' or 'r' Authors: Adopted from rowQuantiles.c by R. Gentleman. Template by Henrik Bengtsson. Copyright: Martin Maechler 2014-2021; Henrik Bengtsson, 2007-2013. ***********************************************************************/ #include #include #include /* Expand arguments: X_TYPE => (X_C_TYPE, X_IN_C, X_ISNAN, [METHOD_NAME]) */ #include "templates-types.h" #if X_TYPE == 'i' #define PSORT iPsort #elif X_TYPE == 'r' #define PSORT rPsort #endif SEXP METHOD_NAME(SEXP x, int nrow, int ncol, int narm, int hasna, int byrow) { /* R allocate a double vector of length 'nrow' Note that 'nrow' means 'ncol' if byrow=FALSE. */ SEXP ans = PROTECT(allocVector(REALSXP, nrow)); C_METHOD_NAME(X_IN_C(x), REAL(ans), nrow, ncol, narm, hasna, byrow); UNPROTECT(1); return(ans); } // "C-only" method [SEXP free, can be called from "pure C" : void C_METHOD_NAME(X_C_TYPE *x, double *res, int nrow, int ncol, int narm, int hasna, int byrow) { Rboolean isOdd; int ii, jj, kk, qq; int *colOffset; X_C_TYPE value, /* R allocate memory for the 'rowData'. This will be taken care of by the R garbage collector later on. */ *rowData = (X_C_TYPE *) R_alloc(ncol, sizeof(X_C_TYPE)); if (!hasna) // If there are no missing values, don't try to remove them narm = FALSE; /* When narm is false, isOdd and qq are the same for all rows */ if (!narm) { isOdd = (ncol % 2 == 1); qq = (int)(ncol/2) - 1; } else { isOdd = FALSE; qq = 0; } value = 0; /* Pre-calculate the column offsets */ colOffset = (int *) R_alloc(ncol, sizeof(int)); // HJ begin if (byrow) { for(jj=0; jj < ncol; jj++) colOffset[jj] = (int)jj*nrow; } else { for(jj=0; jj < ncol; jj++) colOffset[jj] = jj; } // HJ end if (hasna) { for(ii=0; ii < nrow; ii++) { if(ii % 1000 == 0) R_CheckUserInterrupt(); int rowIdx = byrow ? ii : ncol*ii; //HJ kk = 0; /* The index of the last non-NA value detected */ for(jj=0; jj < ncol; jj++) { value = x[rowIdx+colOffset[jj]]; //HJ if (X_ISNAN(value)) { if (!narm) { kk = -1; break; } } else { rowData[kk] = value; kk = kk + 1; } } if (kk == 0) { res[ii] = R_NaN; } else if (kk == -1) { res[ii] = R_NaReal; } else { /* When narm is true, isOdd and qq may change with row */ if (narm) { isOdd = (kk % 2 == 1); qq = (int)(kk/2) - 1; } /* Permute x[0:kk-1] so that x[qq] is in the correct place with smaller values to the left, ... */ PSORT(rowData, kk, qq+1); value = rowData[qq+1]; if (isOdd) { res[ii] = (double)value; } else { if (narm || !X_ISNAN(value)) { /* Permute x[0:qq-2] so that x[qq-1] is in the correct place with smaller values to the left, ... */ PSORT(rowData, qq+1, qq); if (X_ISNAN(rowData[qq])) res[ii] = R_NaReal; else res[ii] = ((double)(rowData[qq] + value))/2; } else { res[ii] = (double)value; } } } } // for(..) } else { // no NAs for(ii=0; ii < nrow; ii++) { if(ii % 1000 == 0) R_CheckUserInterrupt(); int rowIdx = byrow ? ii : ncol*ii; //HJ for(jj=0; jj < ncol; jj++) rowData[jj] = x[rowIdx+colOffset[jj]]; //HJ /* Permute x[0:ncol-1] so that x[qq] is in the correct place with smaller values to the left, ... */ PSORT(rowData, ncol, qq+1); value = rowData[qq+1]; if (isOdd) { res[ii] = (double)value; } else { /* Permute x[0:qq-2] so that x[qq-1] is in the correct place with smaller values to the left, ... */ PSORT(rowData, qq+1, qq); res[ii] = (double)((rowData[qq] + value))/2; } } // for(..) } /* if (hasna ...) */ } /* Undo template macros */ #undef PSORT #include "templates-types_undef.h" /*************************************************************************** HISTORY: 2014-12-09 [MMaechler] o do not use '== TRUE' '== FALSE' -- as we have no NA here o resolve REAL(ans) outside for(ii ..) o add "SEXP-free" C routines, others can call: C_rowMedians_(Real|Integer) 2013-04-23 [HB] o BUG FIX: The integer template of rowMedians_() would not handle ties properly. This was because ties were calculated as '(double)((rowData[qq] + value)/2)' instead of '((double)(rowData[qq] + value))/2'. 2013-01-13 [HB] o Merged rowMedians_Integer() and rowMedians_Read() into template rowMedians_(). 2013-01-13 [HB] o Using internal arguments 'by_row' instead of 'by_column'. 2011-12-11 [HB] o BUG FIX: rowMediansReal(..., na.rm=TRUE) did not handle NaN:s, only NA:s. Note that NaN:s does not exist for integers. 2011-10-12 [HJ] o Added colMedians(). o Now rowMediansInteger/Real() can operate also by columns, cf. argument 'by_column'. 2007-08-14 [HB] o Added checks for user interrupts every 1000 line. o Added argument 'hasNA' to rowMedians(). 2005-12-07 [HB] o BUG FIX: When calculating the median of an even number (non-NA) values, the length of the second sort was one element too short, which made the method to freeze, i.e. rPsort(rowData, qq, qq) is now (...qq+1, qq). 2005-11-24 [HB] o By implementing a special version for integers, there is no need to coerce to double in R, which would take up twice the amount of memory. o rowMedians() now handles NAs too. o Adopted from rowQuantiles.c in Biobase of Bioconductor. **************************************************************************/ robustbase/src/rfltsreg.f0000644000176200001440000011702013635407354015224 0ustar liggesuserscccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cc rrcov : Scalable Robust Estimators with High Breakdown Point cc cc This program is free software; you can redistribute it and/or modify cc it under the terms of the GNU General Public License as published by cc the Free Software Foundation; either version 2 of the License, or cc (at your option) any later version. cc cc This program is distributed in the hope that it will be useful, cc but WITHOUT ANY WARRANTY; without even the implied warranty of cc MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the cc GNU General Public License for more details. cc cc You should have received a copy of the GNU General Public License cc along with this program; if not, write to the Free Software cc Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA cc cc I would like to thank Peter Rousseeuw and Katrien van Driessen for cc providing the initial code of this function. cc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc subroutine rfltsreg(dat,n,nvar, nhalff, krep, inbest, objfct, * intercept,intadjust, nvad, datt, weights, * temp, index1,index2, aw2,aw, residu, y, nmahad, ndist, * am,am2, slutn, * jmiss, xmed,xmad, a,da, h,hvec, * c, cstock, mstock, c1stock, m1stock, * dath, sd, means, bmeans, i_trace) c implicit none c integer n ! n = sample size * , nvad ! nvad = p+1 * , nvar ! nvar = p * , nhalff ! nhalff = 'quan' = quan.f = h.alpha.n(alpha, n, rk) which is ! = (n + p + 1) %/% 2 when alpha= 1/2 double precision dat(n, nvad) ! dat = cbind(x,y) hence n x (p+1) integer krep ! krep = nsamp (e.g. = 5000 for "best") c krep := the total number of trial subsamples to be drawn when n exceeds 2*nmini; c krep = 0 :<==> "exact" <==> all possible subsamples integer inbest(nhalff) double precision objfct integer intercept, intadjust double precision datt(n,nvad) double precision weights(n) integer temp(n) integer index1(n) integer index2(n) double precision aw2(n),aw(n) double precision residu(n) double precision y(n) double precision nmahad(n) double precision ndist(n) double precision am(n), am2(n), slutn(n) integer jmiss(nvad) double precision xmed(nvad) double precision xmad(nvad) double precision a(nvad), da(nvad) double precision h(nvar,nvad), hvec(nvar*nvad) double precision c(nvar,nvad) double precision cstock(10, nvar*nvar) double precision mstock(10, nvar) c parameters for array sizes : integer km10 ! := 10*kmini * , kmini * , nmini * , nmaxi ! := nmini*kmini parameter (kmini = 5) parameter (km10 = 10*kmini) parameter (nmini = 300) parameter (nmaxi = nmini*kmini) double precision c1stock(km10, nvar*nvar) double precision m1stock(km10, nvar) double precision dath(nmaxi, nvad) double precision sd(nvar) double precision means(nvar) double precision bmeans(nvar) integer i_trace c --------------------- end {arguments} ---------------------- integer k1,k2,k3, maxmini integer i_aux(4) ! just for printing when i_trace > 0 c ccc parameter (nvmax=115) ccc parameter (nmax=57000) cc parameter (k1 = 2 ) parameter (k2 = 2 ) parameter (k3 = 100) ccc parameter (nvmax1=nvmax+1) ccc parameter (nvmax2=nvmax*nvmax) ccc parameter (nvm11=nvmax*(nvmax+1)) C-- VT parameter (maxmini=int((3*nmini-1)/2)+1) parameter (maxmini=450) cc integer matz ! , iseed, seed * , tottimes, step * , pnsel, replow integer i,ii,iii, j,jj,jjj, jndex, k,kk, lll, m,mm, nn integer jmin,jmax, jerd,jnc, jreg, kstep,kount c unused integer jdefaul, jbreak integer minigr integer nfac,nerr, ngroup, nhalf,nlen,nmax,nmore, nmore2, nquant integer nvmax1, nvm11, nvmax, nsel, nstop, nrep double precision bstd, dist2, eps, factor, object double precision fckw, fckwi, fckw1, percen double precision MADeps logical all,part,fine,final,rfodd,more1,more2 c unused integer rfnbreak integer rfncomb integer flag(km10) integer mini(kmini) integer subdat(2,nmaxi) integer subndex(maxmini) double precision faclts(11) double precision mcdndex(10,2,kmini) c Function double precision rffindq data faclts/2.6477,2.5092,2.3826,2.2662,2.1587, * 2.0589,1.9660,1.879,1.7973,1.7203,1.6473/ nrep = krep if(i_trace .ge. 2) then i_aux(1) = nrep call intpr('Entering rfltsreg() - krep: ',-1, i_aux, 1) endif call rndstart C -------- == GetRNGstate() in C CCCC 10.10.2005 - substitute the parameters nmax and nvmax nmax = n nvmax = nvar nvmax1 = nvmax+1 nvm11 = nvmax*(nvmax+1) if(nvar .lt.5 ) then eps=1.0D-12 else if(nvar .ge. 5 .and. nvar .le. 8) then eps=1.0D-14 else eps=1.0D-16 endif endif c Tolerance for rfstatis(): |MAD| < MADeps : <==> "problem" MADeps=1.0D-6 cc nhalff=int((n+nvar+1)/2) jmin=(n/2)+1 jmax = max((3*n/4)+(nvar+1)/4, nhalff) nquant=min(11, 1+ nint(40*(dble(nhalff)/n - 0.5))) factor=faclts(nquant) c unused jbreak=rfnbreak(nhalff,n,nvar) c unused jdefaul=(n+nvar+1)/2 percen = (1.D0*nhalff)/(1.D0*n) if(nvad.eq.1) goto 9000 cc CDDD CALL INTPR('>>> Enter RFLTSREG ... iseed=',-1,iseed,1) c seed=iseed matz=1 nsel=nvar ngroup=1 part=.false. fine=.false. final=.false. all=.true. do 21,i=1,nmaxi subdat(1,i)=1000000 subdat(2,i)=1000000 21 continue cc mini(1)=0 mini(2)=0 mini(3)=0 mini(4)=0 mini(5)=0 if(krep.gt.0 .and. n.gt.(2*nmini-1)) then kstep=k1 part=.true. ngroup=int(n/dble(nmini)) if(n.ge.(2*nmini) .and. n.le.(3*nmini-1)) then if(rfodd(n)) then mini(1)=int(n/2) mini(2)=int(n/2)+1 else mini(1)=n/2 mini(2)=n/2 endif else if(n.ge.(3*nmini) .and. n.le.(4*nmini-1)) then if(3*(n/3) .eq. n) then mini(1)=n/3 mini(2)=n/3 mini(3)=n/3 else mini(1)=int(n/3) mini(2)=int(n/3)+1 if(3*(n/3) .eq. n-1) then mini(3)=int(n/3) else mini(3)=int(n/3)+1 endif endif else if(n.ge.(4*nmini) .and. n.le.(5*nmini-1)) then if(4*(n/4) .eq. n) then mini(1)=n/4 mini(2)=n/4 mini(3)=n/4 mini(4)=n/4 else mini(1)=int(n/4) mini(2)=int(n/4)+1 if(4*(n/4) .eq. n-1) then mini(3)=int(n/4) mini(4)=int(n/4) else if(4*(n/4) .eq. n-2) then mini(3)=int(n/4)+1 mini(4)=int(n/4) else mini(3)=int(n/4)+1 mini(4)=int(n/4)+1 endif endif endif else mini(1)=nmini mini(2)=nmini mini(3)=nmini mini(4)=nmini mini(5)=nmini endif nhalf=int(mini(1)*percen) if(ngroup.gt.kmini) ngroup=kmini nrep=int(dble(krep)/ngroup) minigr=mini(1)+mini(2)+mini(3)+mini(4)+mini(5) if(i_trace .ge. 2) + call intpr(' rftls.... minigr=',-1,[minigr],1) call rfrdraw(subdat,n,minigr,mini,ngroup,kmini) else c krep == 0 or n <= 2*nmini-1 ( = 599 by default) minigr=n nhalf=nhalff kstep=k1 C VT::25.11.2010 - added krep==0 means "exact" (all combinations) if(krep.eq.0 .or. n.le.replow(nsel)) then c use all combinations; happens iff nsel = nvar = p <= 6 nrep=rfncomb(nsel,n) if(i_trace .ge. 2) call prallc(nrep) else nrep = krep all=.false. endif endif c seed=iseed cc CDDD CALL INTPR('>>> Start initialization ... nrep=',-1,nrep,1) do 31, j=1,nvmax do k=1,10 mstock(k,j)=1000000.D0 do kk=1,kmini m1stock((kk-1)*10+k,j)=1000000.D0 end do do i=1,nvmax do kk=1,kmini c1stock((kk-1)*10+k,(j-1)*nvmax+i)=1000000.D0 end do cstock(k,(j-1)*nvmax+i)=1000000.D0 end do end do means(j)=0.D0 bmeans(j)=0.D0 sd(j)=0.D0 do k=1,nvmax1 c(j,k)=0.D0 h(j,k)=0.D0 end do 31 continue do 41, j=1,nmax nmahad(j)=0.D0 ndist(j)=0.D0 index1(j)=1000000 index2(j)=1000000 temp(j)=1000000 weights(j)=0.D0 aw(j)=0.D0 aw2(j)=0.D0 residu(j)=0.D0 y(j)=0.D0 am(j)=0.D0 am2(j)=0.D0 slutn(j)=0.D0 41 continue do j=1,km10 flag(j)=1 end do do 45, j=1,nvmax1 jmiss(j)=0 xmed(j)=0.D0 xmad(j)=0.D0 a(j)=0.D0 da(j)=0.D0 do k=1,nmaxi dath(k,j)=0.D0 end do 45 continue do j=1,maxmini subndex(j)=0 end do do j=1,nvm11 hvec(j)=0.D0 end do if(i_trace .ge. 2) + call intpr(' rftls.... initialization ready',-1,[0],1) 9000 continue if(nvad.eq.1) then do jj=1,n ndist(jj)=dat(jj,1) end do call rfshsort(ndist,n) call rfmcduni(ndist,n,nhalff,slutn,bstd,am,am2,factor, * n-nhalff+1) goto 9999 endif cc if(.not.fine .and. .not.final) then call rfstatis(dat,xmed,xmad,aw2,intercept,nvad, nvmax1,nmax,n, * nstop,MADeps,weights,y,nvar,index2) if(nstop.eq.1) goto 9999 endif cc jreg=1 call rflsreg(nvmax1, nvmax,nvar,n,a,dat, weights, da, h, * fckw,hvec,nvm11,jmiss,nvad,n) cc nfac=nvad-1 nfac=nvar-1 call rfrtran(nvar,intercept,nfac,nvad,nvmax1,xmed, * xmad,a,nvad,fckw) call rftrc(h,da,nvmax,nvmax1,nvar,intercept,nfac,nvad, * xmed,xmad) jerd=0 tottimes=0 c---- - - - - - - - - Outermost loop - - - - - - - - - - - - - - - - - - - c---- 5555 object=10.D25 if(.not. part .or. final) nn=n if(part .and. fine .and. .not. final) nn=minigr if(fine.or.(.not.part.and.final)) then nrep=10 nsel=nhalf kstep=k2 if (final) then nhalf=nhalff ngroup=1 if (n*nvar .le.100000) then kstep=k3 else if (n*nvar .gt.100000 .and. n*nvar .le.200000) then kstep=10 else if (n*nvar .gt.200000 .and. n*nvar .le.300000) then kstep=9 else if (n*nvar .gt.300000 .and. n*nvar .le.400000) then kstep=8 else if (n*nvar .gt.400000 .and. n*nvar .le.500000) then kstep=7 else if (n*nvar .gt.500000 .and. n*nvar .le.600000) then kstep=6 else if (n*nvar .gt.600000 .and. n*nvar .le.700000) then kstep=5 else if (n*nvar .gt.700000 .and. n*nvar .le.800000) then kstep=4 else if (n*nvar .gt.800000 .and. n*nvar .le.900000) then kstep=3 else if (n*nvar .gt.900000 .and. n*nvar .le.1000000) then kstep=2 else kstep=1 endif if (n .gt. 5000) nrep=1 else nhalf= int(minigr*percen) endif endif if(i_trace .ge. 2) then i_aux(1) = nrep i_aux(2) = kstep i_aux(3) = nhalf call intpr('Main (number of trials nrep, kstep, nhalf):', + -1, i_aux, 3) endif do i=1,nsel-1 index1(i)=i end do index1(nsel)=nsel-1 cc if(.not. final) then do i=1,10 do j=1,ngroup mcdndex(i,1,j)=10.D25 mcdndex(i,2,j)=10.D25 end do end do endif if (fine .and. .not. final) then do j=1,minigr do k=1,nvad dath(j,k)=dat(subdat(1,j),k) end do end do endif kount=0 CDDD CALL INTPR('>>> MAIN LOOP BY GROUPS: NGROUP= ',-1,ngroup,1) do 1111 ii = 1,ngroup if(i_trace .ge. 3) + call intpr(' rftls.... looping by group ii=',-1,[ii],1) if(.not.fine) kount=0 if(part .and. .not. fine) nn=mini(ii) do i=1,nn index2(i)=i end do if(part .and. .not. fine) then jndex=0 do j=1,minigr if(subdat(2,j).eq.ii) then jndex=jndex+1 subndex(jndex)=subdat(1,j) endif end do do j=1,mini(ii) do k=1,nvad dath(j,k)=dat(subndex(j),k) end do end do endif do 1000 i=1,nrep if(i_trace .ge. 4) + call intpr(' rftls.... for(i = 1,nrep): i=',-1,[i],1) pnsel=nsel tottimes=tottimes+1 fckwi=0.D0 fckw1=0.D0 step=0 132 if((part.and..not.fine).or.(.not.part.and..not.final)) then if(part) then call rfrangen(mini(ii),nsel,index1) else if(all) then call rfgenpn(n,nsel,index1) else call rfrangen(n,nsel,index1) endif endif endif c 9550 continue if(.not.fine .and. part) then do j=1,pnsel do m=1,nvad c(j,m)=dath(index1(j),m) end do end do else if(.not.part .and. .not.final) then do j=1,pnsel do m=1,nvad c(j,m)=dat(index1(j),m) end do end do endif if((.not.part.and..not.final).or.(.not.fine.and.part)) then if(nvar.gt.1) then call rfequat(c,nvmax,nvmax1,hvec,nvm11,nvar,1,nerr) if(nerr.lt.0) then jerd=jerd+1 if(.not.all .and. i.gt.2) goto 132 goto 1000 endif else if(c(1,1).ne.0.D0) c(1,1)=c(1,2)/c(1,1) endif do jnc=1,nvar a(jnc)=c(jnc,1) end do endif if (final) then if(mstock(i,1).ne.1000000.D0) then do jj=1,nvar a(jj)=mstock(i,jj) end do else goto 1111 endif endif if (fine.and..not.final) then if(m1stock((ii-1)*10+i,1).ne.1000000.D0) then do jj=1,nvar a(jj)=m1stock((ii-1)*10+i,jj) end do else goto 1111 endif endif do jnc=1,nn residu(jnc)=0.D0 do j=1,nvar if(part.and..not.final) then residu(jnc)=residu(jnc)+a(j)*dath(jnc,j) else residu(jnc)=residu(jnc)+a(j)*dat(jnc,j) endif end do if(part.and..not.final) then residu(jnc)=dath(jnc,nvad)-residu(jnc) else residu(jnc)=dat(jnc,nvad)-residu(jnc) endif aw(jnc)=residu(jnc) end do more1=.false. more2=.false. nmore=200 nmore2=nmore/2 if(intadjust.eq.1) then CDDD CALL INTPR('>>> INTERCEPT ADJUSTMENT 1',-1,i,1) if(intercept.eq.1.and.((.not.fine.and.part).or. * .not.part.or.((nn-nhalf).le.nmore))) then call rfshsort(aw,nn) call rfmcduni(aw,nn,nhalf,slutn,bstd,am,am2, * factor,nn-nhalf+1) a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do else if(intercept.eq.1) then call rfshsort(aw,nn) do jj=1,nn am2(jj)=abs(aw(jj)) end do dist2=rffindq(am2,nn,nhalf,index1) do jj=1,nhalf aw2(jj)=aw(index1(jj)) end do dist2=rffindq(aw2,nhalf,1,index2) jnc=index1(index2(1)) if(jnc+nmore-nmore2+nhalf-1.gt.nn.or.jnc-nmore2.lt.1) * then call rfmcduni(aw,nn,nhalf,slutn,bstd,am,am2, * factor,nn-nhalf+1) a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do else 555 do jj=0,nhalf-1+nmore aw2(jj+1)=aw(jnc-nmore2+jj) end do nlen=nmore+1 call rfmcduni(aw2,nhalf+nmore,nhalf,slutn, * bstd,am,am2,factor,nlen) if(nlen.eq.1.and..not.more1) then if(.not.more2) then nmore=nmore2 nmore2=nmore2+nmore2 more1=.true. if(jnc-nmore2.ge.1) goto 555 endif else if(nlen.eq.(nmore+1).and..not.more2) then if(.not.more1) then nmore=nmore2 nmore2=-nmore2 more2=.true. if(jnc+nmore-nmore2+nhalf-1.le.nn) * goto 555 endif else if(nlen.eq.1.and.more1) then if(.not.more2) then nmore2=nmore2+100 if(jnc-nmore2.ge.1) goto 555 endif else if(nlen.eq.(nmore+1).and.more2) then if(.not.more1) then nmore2=nmore2+100 if(jnc+nmore-nmore2+nhalf-1.le.nn) goto 555 endif endif a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do endif endif endif do jnc=1,nn residu(jnc)=abs(residu(jnc)) end do dist2=rffindq(residu,nn,nhalf,index2) c 9555 do step=1,kstep tottimes=tottimes+1 do j=1,nhalf temp(j)=index2(j) end do call rfishsort(temp,nhalf) do j=1,nhalf if(.not.part.or.final) then do mm=1,nvad datt(j,mm)=dat(temp(j),mm) end do else do mm=1,nvad datt(j,mm)=dath(temp(j),mm) end do endif end do call rflsreg(nvmax1, nvmax,nvar,n,a,datt, weights, da, h, * fckw,hvec,nvm11,jmiss,nvad,nn) do jnc=1,nn residu(jnc)=0.D0 do j=1,nvar if(part.and..not.final) then residu(jnc)=residu(jnc)+a(j)*dath(jnc,j) else residu(jnc)=residu(jnc)+a(j)*dat(jnc,j) endif end do if(part.and..not.final) then residu(jnc)=dath(jnc,nvad)-residu(jnc) else residu(jnc)=dat(jnc,nvad)-residu(jnc) endif aw(jnc)=residu(jnc) end do more1=.false. more2=.false. nmore=200 nmore2=nmore/2 if(intadjust.eq.1) then CDDD CALL INTPR('>>> INTERCEPT ADJUSTMENT 2',-1,step,1) if(intercept .eq. 1 .and. ((.not.fine.and.part) .or. * .not.part.or.((nn-nhalf).le.nmore))) then call rfshsort(aw,nn) call rfmcduni(aw,nn,nhalf,slutn,bstd,am,am2, * factor,nn-nhalf+1) a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do else if(intercept.eq.1) then call rfshsort(aw,nn) do jj=1,nn am2(jj)=abs(aw(jj)) end do dist2=rffindq(am2,nn,nhalf,index1) do jj=1,nhalf aw2(jj)=aw(index1(jj)) end do dist2=rffindq(aw2,nhalf,1,index2) jnc=index1(index2(1)) if(jnc+nmore-nmore2+nhalf-1.gt.nn.or.jnc-nmore2.lt.1) * then call rfmcduni(aw,nn,nhalf,slutn,bstd,am,am2, * factor,nn-nhalf+1) a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do else c--- repeat { .... 666 continue do jj=0,nhalf-1+nmore aw2(jj+1)=aw(jnc-nmore2+jj) end do nlen=nmore+1 call rfmcduni(aw2,nhalf+nmore,nhalf,slutn,bstd, * am,am2,factor,nlen) if(nlen.eq.1.and..not.more1) then if(.not.more2) then nmore=nmore2 nmore2=nmore2+nmore2 more1=.true. if(jnc-nmore2.ge.1) goto 666 endif else if(nlen.eq.(nmore+1).and..not.more2) then if(.not.more1) then nmore=nmore2 nmore2=-nmore2 more2=.true. if(jnc+nmore-nmore2+nhalf-1.le.nn) goto 666 endif else if(nlen.eq.1.and.more1) then if(.not.more2) then nmore2=nmore2+100 if(jnc-nmore2.ge.1) goto 666 endif else if(nlen.eq.(nmore+1).and.more2) then if(.not.more1) then nmore2=nmore2+100 if(jnc+nmore-nmore2+nhalf-1.le.nn) goto 666 endif endif c } (end repeat) c update a[nvar] a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do endif endif endif do jnc=1,nn residu(jnc)=abs(residu(jnc)) end do dist2=rffindq(residu,nn,nhalf,index2) fckw=0.D0 do jnc=1,nhalf fckw=fckw+residu(jnc)**2 end do if(step.ge.2 .and. fckw.eq.fckw1) exit ! break {step loop} fckw1=fckwi fckwi=fckw if(final .and. ((i.eq.1.and.step.eq.1 .and. .not.fine) * .or.fckw.lt.object)) then if(i_trace .ge. 3) then call dblepr('Setting objfct= ', -1, fckw,1) endif object=fckw objfct=fckw do jjj=1,nhalf inbest(jjj)=index2(jjj) end do call rfcovcopy(a,bmeans,nvar,1) endif end do ! step in 1..kstep c 5000 if(.not. final) then if(part .and. .not. fine) then iii=ii else iii=1 cc At the end of the algorithm, only the ten cc best solutions need to be stored. endif if( flag((iii-1)*10+1).eq.1) then lll=1 else lll=2 endif do j = lll,10 if (fckw .le. mcdndex(j,2,iii)) then if(fckw.ne.mcdndex(j,2,iii)) then if(.not.fine.and.part) goto 203 goto 205 else do k=j,10 if(fckw.eq.mcdndex(k,2,iii)) then do jjj=1,nvar if(part.and..not.fine) then if(a(jjj).ne.m1stock((iii-1)*10+ * k,jjj)) then goto 203 endif else if(a(jjj).ne.mstock(k,jjj)) goto 205 endif end do endif end do endif exit ! j-loop c ---- cvvv using m1stock[,] 203 do k=10,j+1,-1 do kk=1,nvar m1stock((iii-1)*10+k,kk)= * m1stock((iii-1)*10+k-1,kk) end do mcdndex(k,1,iii)=mcdndex(k-1,1,iii) mcdndex(k,2,iii)=mcdndex(k-1,2,iii) end do do kk=1,nvar m1stock((iii-1)*10+j,kk)=a(kk) end do mcdndex(j,1,iii)=i mcdndex(j,2,iii)=fckw exit ! j-loop c ---- cvvv using mstock[,] 205 do k=10,j+1,-1 do kk=1,nvar mstock(k,kk)= mstock(k-1,kk) end do mcdndex(k,1,iii)=mcdndex(k-1,1,iii) mcdndex(k,2,iii)=mcdndex(k-1,2,iii) end do do kk=1,nvar mstock(j,kk)=a(kk) end do mcdndex(j,1,iii)=i mcdndex(j,2,iii)=fckw exit ! j-loop c ---- endif end do ! j-loop endif 1000 continue c..... end for( i = 1..nrep ) 1111 continue c---- -------- end for( ii = 1..ngroup ) cc if(part .and. .not. fine) then fine=.true. goto 5555 endif if(.not. final .and. (.not.part .or. fine)) then final=.true. goto 5555 endif 9999 continue call rndend C ------ == PutRNGstate() in C return end ccccc end {rfltsreg} =================================================== subroutine rfstatis(x, xmed,xmad * , aw2 * , intercept * , nvad * , nvmax1 * , nmax * , n * , nstop ! nstop=0: success; =1 : "problem": mad ~= 0 * , MADeps * , weights * , y * , nvar * , index2) cc implicit none integer intercept, nvad, nvmax1, nmax, n, nstop double precision x(n, nvad), xmed(nvmax1), xmad(nvmax1) double precision aw2(nmax) double precision MADeps, weights(nmax), y(nmax) integer nvar, index2(nmax) c Var integer j,jnc c Function double precision rfamdan cc nstop=0 ! == success if (intercept.eq.0) then c regression without intercept do 50 j=1,nvad xmed(j)=0.0 do jnc=1,n aw2(jnc) = abs(x(jnc,j)) end do xmad(j)=rfamdan(nmax,aw2,n,index2)*1.4826 if(abs(xmad(j)) .le. MADeps) then xmad(j)=0.0 do jnc=1,n xmad(j)=xmad(j)+aw2(jnc) end do xmad(j)=(xmad(j)/n)*1.2533 if(abs(xmad(j)) .le. MADeps) then nstop=1 return endif endif do jnc=1,n x(jnc,j)=x(jnc,j)/xmad(j) end do 50 continue else c regression with intercept xmed(nvar)=0.D0 xmad(nvar)=1.D0 do 120 j=1,nvad if(j.eq.nvar) goto 120 do jnc=1,n aw2(jnc)=x(jnc,j) end do xmed(j)=rfamdan(nmax,aw2,n,index2) do jnc=1,n aw2(jnc)=abs(aw2(jnc)-xmed(j)) end do xmad(j)=rfamdan(nmax,aw2,n,index2)*1.4826 if(abs(xmad(j)) .le. MADeps) then xmad(j)=0.0 do jnc=1,n xmad(j)=xmad(j)+aw2(jnc) end do xmad(j)=(xmad(j)/n)*1.2533 if(dabs(xmad(j)) .le. MADeps) then nstop=1 return endif endif do jnc=1,n x(jnc,j)=(x(jnc,j)-xmed(j))/xmad(j) end do 120 continue endif do jnc=1,n weights(jnc)=1.0 y(jnc)=x(jnc,nvad) end do return end cc function rfamdan(nmax, aa, n ,index2) cc implicit none integer nmax, n, index2(nmax) double precision aa(n) double precision rffindq, rfamdan integer jndl cc jndl=int(n/2.0) if(mod(n,2).eq.0) then rfamdan=(rffindq(aa,n,jndl, index2)+ * rffindq(aa,n,jndl+1,index2))/2.0 else rfamdan=rffindq(aa,n,jndl+1,index2) endif return end c --- subroutine rflsreg(nvmax1, nvmax, k, n, f, x, w, da, h, 1 fckw, hvec, nvm11,jmiss,nvad,nnn) implicit none cc Arguments integer nvmax1, nvmax, k, n, nvm11, nvad double precision f(k), x(n,nvad), w(n), da(k), h(nvmax,nvmax1) 1 , fckw, hvec(nvm11) integer jmiss(nvmax1), nnn cc External Functions double precision rfqlsrg cc Var double precision dfckw, dfact, dwjnc, dyj, dfka double precision ank, anul, hda integer j,l, jnc,ka,kplus, mm cc kplus=k+1 do jnc=1,k do j=1,kplus h(jnc,j)=0.D0 end do end do anul=0.0 do 30 jnc=1,nnn call rffcn(k,f,x,jnc,n,nvad) dwjnc=dble(w(jnc)) anul=anul+w(jnc) dyj=dble(x(jnc,kplus)) do ka=1,k dfka=dble(f(ka)) h(ka,k+1)=h(ka,k+1)+dwjnc*dfka*dyj do l=1,ka h(ka,l)=h(ka,l)+dwjnc*dfka*dble(f(l)) end do end do 30 continue do j=1,k do jnc=1,j h(jnc,j)=h(j,jnc) end do end do call rfmatnv(h,nvmax,nvmax1,hvec,nvm11,k,1,jmiss) mm=k+1 fckw = rfqlsrg(k,n,nvmax1,nvmax,f,x, w,h,mm,nvad,nnn) do jnc=1,k f(jnc)=h(jnc,k+1) end do dfckw=dble(fckw) ank=anul-k dfact=dble(ank) dfact=dfckw/dfact do jnc=1,k do j=1,k h(jnc,j)=h(jnc,j)*dfact end do end do do jnc=1,k hda=h(jnc,jnc) da(jnc)=sqrt(hda) end do return end ccccc ccccc subroutine rffcn(k, f,x, jnc,n,nvad) c implicit none integer k, jnc, n, nvad double precision f(k), x(n,nvad) c integer j do j=1,k f(j)=x(jnc,j) end do return end ccccc ccccc subroutine rfmatnv(an, nvmax,nvmax1, hvec, nvm11, na,nb, jmiss) c implicit none c integer nvmax,nvmax1 double precision an(nvmax,nvmax1) integer nvm11 double precision hvec(nvm11) integer na,nb, jmiss(nvmax1) c Var double precision deter, turn, swap integer j,n, nc, jcl,jdelc,jdla,jdlb,jdm,jhfd, * jnc,jncb,jncc,jncd,jnk,jpaal, nma,npnb, ldel deter=1.0D0 n=na npnb=n+nb jnk=0 do j=1,npnb jnk=(j-1)*nvmax do nc=1,nvmax jnk=jnk+1 hvec(jnk)=an(nc,j) end do end do ldel=0 jdm=nvmax nma=n-1 jdelc=1-jdm do 130 jhfd=1,n turn=0.0D0 jdelc=jdelc+jdm jdla=jdelc+jhfd-1 jdlb=jdelc+nma do jncb=jdla,jdlb if(dabs(hvec(jncb)) .gt. dabs(turn)) then turn=hvec(jncb) ldel=jncb endif end do if (turn .eq. 0) goto 180 jpaal=ldel-jdelc+1 jmiss(jhfd)=jpaal if(jpaal .gt. jhfd) then deter=-deter jpaal=jpaal-jdm jncd=jhfd-jdm do jnc=1,npnb jpaal=jpaal+jdm jncd=jncd+jdm swap=hvec(jncd) hvec(jncd)=hvec(jpaal) hvec(jpaal)=swap end do endif deter=deter*turn turn=1.0D0/turn jncd=jdelc+nma do jnc=jdelc,jncd hvec(jnc)=-hvec(jnc)*turn end do hvec(jdla)=turn jncb=jhfd-jdm jpaal=1-jdm do jnc=1,npnb jpaal=jpaal+jdm jncb=jncb+jdm if(jnc .ne. jhfd) then jcl=jpaal+nma swap=hvec(jncb) jncd=jdelc-1 do jncc=jpaal,jcl jncd=jncd+1 hvec(jncc)=hvec(jncc)+swap*hvec(jncd) end do hvec(jncb)=swap*turn endif end do 130 continue do 160 jncb=1,n jhfd=n+1-jncb ldel=jmiss(jhfd) if(ldel .ne. jhfd) then jpaal=(ldel-1)*jdm+1 jcl=jpaal+nma jdelc=(jhfd-1)*jdm+1-jpaal do jncc=jpaal,jcl jncd=jncc+jdelc swap=hvec(jncc) hvec(jncc)=hvec(jncd) hvec(jncd)=swap end do endif 160 continue c--- 180 jnk=0 do j=1,npnb do nc=1,nvmax jnk=jnk+1 an(nc,j)=hvec(jnk) end do end do return end ccccc ccccc double precision * function rfqlsrg(k,n,nvmax1,nvmax,f,x, w,h,mm,nvad,nnn) cc implicit none integer k,n,nvmax1,nvmax, mm,nvad,nnn double precision f(k), x(n,nvad), w(n), h(nvmax,nvmax1) double precision q,hsum integer jnc,jncb cc q=0.D0 do 30 jnc=1,nnn call rffcn(k,f,x,jnc,n,nvad) hsum=0.D0 do jncb=1,k hsum=h(jncb,mm)*f(jncb)+hsum enddo q=(hsum-x(jnc,mm))*(hsum-x(jnc,mm))*w(jnc)+q 30 continue rfqlsrg=q return end ccccc ccccc subroutine rfrtran(nvar,jcst,nfac,nvad,nvmax1, * xmed,xmad, aa,jal,fckw) cc implicit none integer nvar,jcst,nfac,nvad,nvmax1, jal double precision aa(jal), xmed(nvmax1), xmad(nvmax1), fckw c Var integer j if(nvar.le.1) then aa(1)=aa(1)*xmad(nvad)/xmad(1) else do j=1,nfac aa(j)=aa(j)*xmad(nvad)/xmad(j) end do if(jcst.eq.0) then aa(nvar)=aa(nvar)*xmad(nvad)/xmad(nvar) else aa(nvar)=aa(nvar)*xmad(nvad) do j=1,nfac aa(nvar)=aa(nvar)-aa(j)*xmed(j) end do aa(nvar)=aa(nvar)+xmed(nvad) endif endif fckw=fckw*(xmad(nvad)*xmad(nvad)) return end ccccc ccccc subroutine rftrc(h, da, nvmax,nvmax1, nvar,jcst,nfac,nvad, * xmed,xmad) c implicit none integer nvmax, nvmax1 double precision h(nvmax,nvmax1), da(nvmax) integer nvar,jcst,nfac,nvad double precision xmed(nvmax1), xmad(nvmax1) c Var double precision xmp2,hnn integer j,k, k2 cc xmp2=dble(xmad(nvad))*dble(xmad(nvad)) if(jcst.eq.0) then do 10 j=1,nvar do k=1,j h(j,k)=h(j,k)*(xmp2/(dble(xmad(j))*dble(xmad(k)))) end do da(j)=dsqrt(h(j,j)) 10 continue else do j=1,nvar h(j,nvad)=h(j,j) end do do 30, j=1,nvar do k=1,j h(j,k)=h(j,k)*xmp2/(dble(xmad(j))*dble(xmad(k))) end do da(j)=dsqrt(h(j,j)) 30 continue do 50 k=1,nfac h(nvar,k)=h(k,nvar)*xmp2/dble(xmad(k)) do 60 k2=1,nvar if(k.eq.k2) then h(nvar,k)=h(nvar,k)-dble(xmed(k))*xmp2/ * (dble(xmad(k2))*dble(xmad(k)))*h(k2,nvad) else if(k.lt.k2) then h(nvar,k)=h(nvar,k)-(dble(xmed(k2))*xmp2)/ * (dble(xmad(k2))*dble(xmad(k)))*h(k,k2) else ! k > k2 h(nvar,k)=h(nvar,k)-dble(xmed(k2))*xmp2/ * (dble(xmad(k2))*dble(xmad(k)))*h(k2,k) endif 60 continue 50 continue h(nvar,nvar)=h(nvar,nvad)*xmp2 do 70 k=1,nvar h(nvar,nvar)=h(nvar,nvar)+ * (dble(xmed(k))*dble(xmed(k)))*xmp2/ * (dble(xmad(k))*dble(xmad(k)))*h(k,nvad) 70 continue do 80 k=1,nvar if(k.ne.nvar) then h(nvar,nvar)=h(nvar,nvar)-2.0D0*xmp2*dble(xmed(k))/ * (dble(xmad(k)))*h(k,nvar) else h(nvar,nvar)=h(nvar,nvar)-2.0D0*xmp2*dble(xmed(k))/ * (dble(xmad(k)))*h(nvar,nvad) endif 80 continue do j=1,nfac do k=j+1,nvar hnn=2.0D0*dble(xmed(j))*dble(xmed(k))*xmp2 h(nvar,nvar)=h(nvar,nvar)+hnn/ * (dble(xmad(j))*dble(xmad(k)))*h(j,k) end do end do da(nvar)=dsqrt(h(nvar,nvar)) endif return end ccccc ccccc subroutine rfequat(am, nvmax,nvmax1, hvec,nvm11, na,nb, nerr) implicit none integer nvmax,nvmax1 double precision am(nvmax,nvmax1) integer nvm11 double precision hvec(nvm11) integer na,nb, nerr c Var double precision turn,swap,deter integer j,n, ldel, jbegc,jbegx,jdel,jdm,jendc,jendx,jhfd,jmat, * jnc,jncb,jncc,jncd,jnce,jncf,jnk,jrow, lclpl, nc,neqa,nznde ldel=0 jdm=nvmax deter=1.0D0 n=na jmat=n+nb jnk=0 do j=1,jmat jnk=(j-1)*nvmax do nc=1,nvmax jnk=jnk+1 hvec(jnk)=am(nc,j) end do end do nznde=n-1 lclpl=-jdm do 120 jhfd=1,n turn=0.D0 lclpl=lclpl+jdm+1 jdel=lclpl+n-jhfd do jncb=lclpl,jdel if(dabs(hvec(jncb)) .gt. dabs(turn)) then turn=hvec(jncb) ldel=jncb endif end do if(dabs(turn) .le. 1D-8) then nerr=-1 goto 180 endif if(ldel .ne. lclpl) then deter=-deter ldel=ldel-jdm jncb=lclpl-jdm do jncc=jhfd,jmat ldel=ldel+jdm jncb=jncb+jdm swap=hvec(jncb) hvec(jncb)=hvec(ldel) hvec(ldel)=swap end do end if deter=deter*turn if(jhfd.eq.n) goto 120 turn=1./turn jncb=lclpl+1 do jncc=jncb,jdel hvec(jncc)=hvec(jncc)*turn end do jncd=lclpl jrow=jhfd+1 do jncb=jrow,n jncd=jncd+1 jnce=lclpl jncf=jncd do jncc=jrow,jmat jnce=jnce+jdm jncf=jncf+jdm hvec(jncf)=hvec(jncf)-hvec(jnce)*hvec(jncd) end do end do 120 continue nerr=0 neqa=n+1 jbegx=nznde*jdm+1 do 150 jnc=neqa,jmat jbegx=jbegx+jdm jendx=jbegx+n jbegc=n*jdm+1 jendc=jbegc+nznde do 140 jncb=1,nznde jendx=jendx-1 jbegc=jbegc-jdm jendc=jendc-jdm-1 hvec(jendx)=hvec(jendx)/hvec(jendc+1) swap=hvec(jendx) jncd=jbegx-1 do jncc=jbegc,jendc jncd=jncd+1 hvec(jncd)=hvec(jncd)-hvec(jncc)*swap end do 140 continue hvec(jbegx)=hvec(jbegx)/hvec(1) 150 continue jnc=-jdm jbegx=nznde*jdm+1 jendx=jbegx+nznde do 160 jncb=neqa,jmat jbegx=jbegx+jdm jendx=jendx+jdm jnc=jnc+jdm jncd=jnc do jncc=jbegx,jendx jncd=jncd+1 hvec(jncd)=hvec(jncc) end do 160 continue 180 jnk=0 do j=1,jmat do nc=1,nvmax jnk=jnk+1 am(nc,j)=hvec(jnk) end do end do return end ccccc C-- VT-- The following functions were added C-- C-- MM: moved to ./rf-common.f - since they are used from ./rffastmcd.f too robustbase/src/robustbase.h0000644000176200001440000001437414440373076015555 0ustar liggesusers/* External and interal API of C and Fortran routines in robustbase */ #include // for SEXP #include /**< For internationalized messages */ #ifdef ENABLE_NLS #include #define _(String) dgettext ("Matrix", String) #else #define _(String) (String) #define dngettext(pkg, String, StringP, N) (N > 1 ? StringP : String) #endif /* --------- ./qn_sn.c : -------- */ #define Sint int void Qn0(double *x, Sint *n, double *k, Sint *len_k, double *res); void Sn0(double *x, Sint *n, Sint *is_sorted, double *res, double *a2); /* * void Qn (double *x, Sint *n, Sint *h, Sint *finite_corr, double *res); * void Sn (double *x, Sint *n, Sint *finite_corr, double *res); */ /* call via .C() from R : */ void wgt_himed_i(double *x, Sint *n, Sint *iw, double *res); void wgt_himed (double *x, Sint *n, double *w, double *res); /* call from C: */ double pull(double *a, int n, int k); double whimed_i(double *a, int *iw, int n, double *acand, double *a_srt, int *iw_cand); double whimed(double *a, double *w, int n, double *acand, double *a_srt, double *w_cand); /* --------- ./mc.c -------- */ /* call via .C() from R : */ void mc_C(double *z, int *in, double *eps, int *iter, double *out, int *scale); /* call from C: *iter is both input and output */ double mc_C_d(const double z[], int n, const double eps[], int *iter, int scale); /* --------- ./lmrob.c --------- */ static inline Rboolean is_redescender(int ipsi) {// a simple wrapper for readability // for now, fastest: if(ipsi == 0) return FALSE; return TRUE; /* if have many more, maybe switch(ipsi) { default: error(_("ipsi=%d not implemented."), ipsi); case 0: // huber and future other non-redescenders return FALSE; case 1: case 2: case 3: case 4: case 5: case 6: return TRUE; } */ } SEXP R_rho_inf(SEXP cc, SEXP ipsi); void R_lmrob_S(double *X, double *y, int *n, int *P, int *nRes, double *scale, double *beta_s, double *C, int *iipsi, double *bb, int *best_r, int *Groups, int *N_group, int *K_s, int *max_k, int *max_it_scale, double *rel_tol, double *inv_tol, double *scale_tol, // ^^^^^^^^^ = refine.tol in R double *zero_tol, int* converged, int *trace_lev, int *mts, int *ss, int *cutoff); void R_lmrob_M_S(double *X1, double *X2, double *y, double *res, int *n, int *p1, int *p2, int *nRes, int *max_it_scale, double *scale, double *b1, double *b2, double *rho_c, int *ipsi, double *bb, int *K_m_s, int *max_k, double *rel_tol, double *inv_tol, double *scale_tol, double *zero_tol, int *converged, int *trace_lev, int *orthogonalize, int *subsample, int *descent, int *mts, int *ss); void R_lmrob_MM(double *X, double *y, int *n, int *P, double *beta_initial, double *scale, double *beta_m, double *resid, int *max_it, double *rho_c, int *ipsi, double *loss, double *rel_tol, int *converged, int *trace_lev); void R_subsample(const double *x, const double *y, int *n, int *m, double *beta, int *ind_space, int *idc, int *idr, double *lu, double *v, int *p, double *_Dr, double *_Dc, int *_rowequ, int *_colequ, int *status, int *sample, int *mts, int *ss, double *tol_inv, int *solve); SEXP R_psifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_); SEXP R_chifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_); SEXP R_wgtfun(SEXP x_, SEXP c_, SEXP ipsi_); double rho(double x, const double c[], int ipsi); double psi(double x, const double c[], int ipsi); double psip(double x, const double c[], int ipsi);// psi' double psi2(double x, const double c[], int ipsi);// psi'' double wgt(double x, const double c[], int ipsi); double rho_inf (const double c[], int ipsi); // == \rho(\infty) double normcnst(const double c[], int ipsi); // == 1 / \rho(\infty) == 1 / rho_inf() void R_find_D_scale(double *rr, double *kkappa, double *ttau, int *llength, double *sscale, double *cc, int *iipsi, int *ttype, double *rel_tol, int *max_k, int *converged); void R_calc_fitted(double *XX, double *bbeta, double *RR, int *nn, int *pp, int *nnrep, int *nnproc, int *nnerr); // ------- ./rob-utils.c --------------- SEXP R_wgt_flex(SEXP x_, SEXP c_, SEXP h_); // ------- ./rowMedians.c --------------- SEXP R_rowMedians(SEXP x, SEXP naRm, SEXP hasNA, SEXP byRow, SEXP keepNms); /* ------- ./rffastmcd.f ------------ */ void F77_NAME(rffastmcd)( double *dat, int *n, int *nvar, int *nhalff, int *krep, // 5 int *nmini, int *kmini, // 7 double *initcov, double *initmean, int *inbest, double *det, // 11 int *weight, int *fit, double *coeff, int *kount, double *adcov, // 16 int *temp, int *index1, int *index2, int *indexx, double *nmahad,// 21 double *ndist, double *am, double *am2, // 24 double *slutn, double *med, double *mad, double *sd, // 28 double *means, double *bmeans, double *w, double *fv1, // 32 double *fv2, double *rec, double *sscp1, double *cova1, // 36 double *corr1, double *cinv1, double *cova2, // 39 double *cinv2, double *z__, double *cstock, double *mstock, // 43 double *c1stock, double *m1stock, double *dath, // 46 double *cutoff, double *chimed, int *i_trace); // 49 args /* ------- ./rfltsreg.f ------------ */ void F77_NAME(rfltsreg)( double *dat, int *n, int *nvar, int *nhalff, int *krep, int *inbest, double *objfct, int *intercept, int *intadjust, int *nvad, double *datt, double *weights, int *temp, int *index1, int *index2, double *aw2, double *aw, double *residu, double *y, double *nmahad, double *ndist, double *am, double *am2, double *slutn, int *jmiss, double *xmed, double *xmad, double *a, double *da, double *h__, double *hvec, double *c__, double *cstock, double *mstock, double *c1stock, double *m1stock, double *dath, double *sd, double *means, double *bmeans, int *i_trace); /* ------- ./rllarsbi.f -------------- */ void F77_NAME(rllarsbi)( double *X, double *Y, int *N, int *NP, int *MDX, int *MDT, double *TOL, int *NIT, int *K, int *KODE, double *SIGMA, double *THETA, double *RS, double *SC1, double *SC2, double *SC3, double *SC4, const double *BET0); robustbase/src/init.c0000644000176200001440000000772214437610457014344 0ustar liggesusers #include #include "robustbase.h" #define CDEF(name) {#name, (DL_FUNC) &name, sizeof(name ## _typ)/sizeof(name ## _typ[0]), name ##_typ} #define CALLDEF(name, n) {#name, (DL_FUNC) &name, n} static R_NativePrimitiveArgType Qn0_typ[] = { // ./qn_sn.c REALSXP, INTSXP, REALSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType Sn0_typ[] = { REALSXP, INTSXP, INTSXP, REALSXP, REALSXP }; static R_NativePrimitiveArgType mc_C_typ[] = { REALSXP, INTSXP, REALSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType wgt_himed_i_typ[] = { REALSXP, INTSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType wgt_himed_typ[] = { REALSXP, INTSXP, REALSXP, REALSXP }; static R_NativePrimitiveArgType R_lmrob_S_typ[] = { REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, /* rrhoc */ REALSXP, INTSXP, REALSXP, /* best_r */ INTSXP, INTSXP, INTSXP, /* K_s */ INTSXP, INTSXP, INTSXP, /* rel_tol*/ REALSXP, REALSXP, REALSXP, REALSXP, /* converged */ LGLSXP, INTSXP, INTSXP, INTSXP, INTSXP }; static R_NativePrimitiveArgType R_lmrob_MM_typ[] = { REALSXP, REALSXP, INTSXP, INTSXP, /* beta_initial */ REALSXP, REALSXP, /* beta_m */ REALSXP, REALSXP, /* max_it */ INTSXP, REALSXP, INTSXP, /* loss */ REALSXP, REALSXP, LGLSXP, INTSXP }; static R_NativePrimitiveArgType R_find_D_scale_typ[] = { REALSXP, REALSXP, REALSXP, INTSXP, REALSXP, /* c */ REALSXP, INTSXP, INTSXP, REALSXP, /* max_k */ INTSXP, LGLSXP }; static R_NativePrimitiveArgType R_calc_fitted_typ[] = { REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, INTSXP }; static R_NativePrimitiveArgType R_lmrob_M_S_typ[] = { REALSXP, REALSXP, REALSXP, REALSXP, /* nn */ INTSXP, INTSXP, INTSXP, INTSXP, INTSXP, /* scale */ REALSXP, REALSXP, REALSXP, /* rho_c */ REALSXP, INTSXP, REALSXP, /* K_m_s */ INTSXP, INTSXP, /* rel_tol */ REALSXP, REALSXP, REALSXP, REALSXP, /* converged */ LGLSXP, INTSXP, /* orthogonalize */ LGLSXP, LGLSXP, LGLSXP, INTSXP, INTSXP }; static R_NativePrimitiveArgType R_subsample_typ[] = { REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, LGLSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; static const R_CMethodDef CEntries[] = { CDEF(Qn0), CDEF(Sn0), CDEF(mc_C), CDEF(wgt_himed_i), CDEF(wgt_himed), CDEF(R_lmrob_S), CDEF(R_lmrob_MM), CDEF(R_find_D_scale), CDEF(R_calc_fitted), CDEF(R_lmrob_M_S), CDEF(R_subsample), {NULL, NULL, 0} }; static R_CallMethodDef CallEntries[] = { CALLDEF(R_rho_inf, 2), // -> lmrob.c CALLDEF(R_psifun, 4), CALLDEF(R_chifun, 4), CALLDEF(R_wgtfun, 3), CALLDEF(R_wgt_flex, 3), // -> rob-utils.c CALLDEF(R_rowMedians, 5),// -> rowMedians.c [Biobase also has rowQ for quantiles] {NULL, NULL, 0} }; static R_FortranMethodDef FortEntries[] = { {"rffastmcd", (DL_FUNC) &F77_SUB(rffastmcd), 49},/* ./rffastmcd.f */ {"rfltsreg", (DL_FUNC) &F77_SUB(rfltsreg), 41}, /* ./rfltsreg.f */ {"rllarsbi", (DL_FUNC) &F77_SUB(rllarsbi), 18}, /* ./rllarsbi.f */ {NULL, NULL, 0} }; void R_init_robustbase(DllInfo *dll) { R_registerRoutines(dll, CEntries, CallEntries, FortEntries, NULL); R_useDynamicSymbols(dll, FALSE); R_RegisterCCallable("robustbase", "R_psifun", (DL_FUNC) &R_psifun); R_RegisterCCallable("robustbase", "R_chifun", (DL_FUNC) &R_chifun); R_RegisterCCallable("robustbase", "R_wgtfun", (DL_FUNC) &R_wgtfun); R_RegisterCCallable("robustbase", "rho", (DL_FUNC) &rho); R_RegisterCCallable("robustbase", "psi", (DL_FUNC) &psi); R_RegisterCCallable("robustbase", "psip", (DL_FUNC) &psip); R_RegisterCCallable("robustbase", "psi2", (DL_FUNC) &psi2); R_RegisterCCallable("robustbase", "wgt", (DL_FUNC) &wgt); R_RegisterCCallable("robustbase", "rho_inf", (DL_FUNC) &rho_inf); R_RegisterCCallable("robustbase", "normcnst", (DL_FUNC) &normcnst); } robustbase/src/rf-common.f0000644000176200001440000002414614437440476015302 0ustar liggesusers c-- Routines common to c-- fastLTS ( ./rfltsreg.f ) and c-- fastMCD ( ./rffastmcd.f ) c c subroutine rfrangen(n, nsel, index) c c Randomly draws nsel cases out of n cases. c Here, index is the index set. c implicit none integer n, nsel, index(nsel) c unifrnd() == R C API's unif_rand() --> see ./R-rng4ftn.c double precision unifrnd integer i,j, num c do i=1,nsel cOLD 10 num=int(uniran(seed)*n)+1 10 num=int(unifrnd()*n)+1 C if(num .gt. n) then C call intpr('** rfrangen(): num > n; num=', -1, num, 1) C num=n C endif if(i.gt.1) then do j=1,i-1 if(index(j).eq.num) goto 10 end do endif index(i)=num end do return end c --------------------------------------------------------- cOLD function uniran(seed) cOLD cc cOLD cc Draws a random number from the uniform distribution on [0,1]. cOLD cc cOLD real uniran cOLD integer seed cOLD integer quot cOLD cc cOLD seed=seed*5761+999 cOLD quot=seed/65536 cOLD seed=seed-quot*65536 cOLD uniran=float(seed)/65536.D0 cOLD return cOLD end c --------------------------------------------------------- subroutine rfgenpn(n,nsel,index) c c Constructs all subsets of nsel cases out of n cases: c given last index[1..nsel] produce (alphabetically) next one c c NB: The *caller* makes sure this called exactly choose(n, nsel) == rfncomb(nsel, n) times, c starting with (1, 2, .., p-1, p-1) -> return (1 2 .. p) after first call. implicit none integer n,nsel,index(nsel) cc integer k,i k=nsel index(k)=index(k)+1 c while 10 if(k.eq.1 .or. index(k).le.(n-(nsel-k))) goto 100 k=k-1 index(k)=index(k)+1 do i=k+1,nsel index(i)=index(i-1)+1 end do goto 10 c end{while} 100 return end c --------------------------------------------------------- subroutine rfshsort(a,n) cc cc Sorts the array a of length n. cc implicit none integer n double precision a(n) c double precision t integer gap, i,j, nextj gap=n c --- repeat 100 gap=gap/2 if(gap.eq.0) goto 200 do 180 i=1,n-gap j=i 120 if(j.lt.1) goto 180 nextj=j+gap if(a(j).gt.a(nextj)) then t=a(j) a(j)=a(nextj) a(nextj)=t else j=0 endif j=j-gap goto 120 180 continue goto 100 c ---- --- end repeat 200 return end c --------------------------------------------------------- subroutine rfishsort(a,n) cc cc Sorts the integer array a of length n. cc implicit none integer n, a(n) c integer t, gap, i,j, nextj gap=n c --- repeat 100 gap=gap/2 if(gap.eq.0) goto 200 do 180 i=1,n-gap j=i 120 if(j.lt.1) goto 180 nextj=j+gap if(a(j).gt.a(nextj)) then t=a(j) a(j)=a(nextj) a(nextj)=t else j=0 endif j=j-gap goto 120 180 continue goto 100 c ---- --- end repeat 200 return end c --------------------------------------------------------- integer function replow(k) cc cc Find out which combinations of n and p are cc small enough in order to perform exaustive search cc Returns the maximal n for a given p, for which cc exhaustive search is to be done cc cc k is the number of variables (p) cc implicit none integer k c integer irep(6) data irep/500,50,22,17,15,14/ c if(k .le. 6) then replow = irep(k) else replow = 0 endif return end c --------------------------------------------------------- integer function rfncomb(k,n) cc cc Computes the number of combinations of k out of n. cc (To avoid integer overflow during the computation, cc ratios of reals are multiplied sequentially.) cc For comb > 1E+009 the resulting 'comb' may be too large cc to be put in the integer 'rfncomb', but the main program cc only calls this function for small enough n and k. cc implicit none integer k,n c double precision comb,fact integer j c comb=dble(1.0) do j=1,k fact=(dble(n-j+1.0))/(dble(k-j+1.0)) comb=comb*fact end do c Should give error now instead of integer overflow! c Don't know how to get .Machine$integer.max in Fortran, portably if(comb .gt. 2147483647) then comb=2147483647. call + dblepr('** too many combinations; using max.integer instead:', + -1,comb,1) endif rfncomb=int(comb+0.5D0) return end c --------------------------------------------------------- subroutine rfcovcopy(a,b,n1,n2) cc cc Copies matrix a to matrix b. cc double precision a(n1,n2) double precision b(n1,n2) c do i=1,n1 do j=1,n2 b(i,j)=a(i,j) end do end do return end c --------------------------------------------------------- double precision function rffindq(aw, ncas, k, index) c c Finds the k-th order statistic of the array aw[1..ncas], c sorting the array aw[.] until aw[k] is sure to contain the k-th value c c MM{FIXME}: "rather" use R's C API rPsort (double* X, int N, int K) implicit none integer ncas,k,index(ncas) double precision aw(ncas) c double precision ax,wa integer i,j,l,lr,jnc c do j=1,ncas index(j)=j end do c lower (= l) and upper ( =lr ) bounds: l=1 lr=ncas c--- while(l < lr) 20 if(l .lt. lr) then ax=aw(k) jnc=l j=lr c--- while(jnc < j) 30 if(jnc .le. j) then 40 if(aw(jnc).ge.ax) goto 50 jnc=jnc+1 goto 40 50 if(aw(j).le.ax) goto 60 j=j-1 goto 50 60 if(jnc .le. j) then ! swap jnc <--> j i=index(jnc) index(jnc)=index(j) index(j)=i wa=aw(jnc) aw(jnc)=aw(j) aw(j)=wa jnc=jnc+1 j=j-1 endif goto 30 end if if(j.lt.k) l=jnc if(k.lt.jnc) lr=j goto 20 end if rffindq=aw(k) return end c --------------------------------------------------------- subroutine rfrdraw(a,n,ntot,mini,ngroup,kmini) cc cc Draws ngroup nonoverlapping subdatasets out of a dataset of size n, cc such that the selected case numbers are uniformly distributed from 1 to n. cc implicit none integer n, ntot, kmini, a(2,ntot), mini(kmini), ngroup c unifrnd() == R C API's unif_rand() --> see ./R-rng4ftn.c double precision unifrnd c integer jndex, nrand, k,m,i,j cc jndex=0 do k=1,ngroup do 20 m=1,mini(k) cOLD nrand=int(uniran(seed)*(n-jndex))+1 nrand=int(unifrnd()*(n-jndex))+1 C if(nrand .gt. n-jndex) then C call intpr( C 1 '** rfrdraw(): need to correct nrand > n-jndex; nrand=', C 2 -1, nrand, 1) C nrand=n-jndex C endif jndex=jndex+1 if(jndex.eq.1) then a(1,jndex)=nrand a(2,jndex)=k else a(1,jndex)=nrand+jndex-1 a(2,jndex)=k do i=1,jndex-1 if(a(1,i).gt.nrand+i-1) then do j=jndex,i+1,-1 a(1,j)=a(1,j-1) a(2,j)=a(2,j-1) end do a(1,i)=nrand+i-1 a(2,i)=k goto 20 c ------- break endif end do endif 20 continue end do return end c --------------------------------------------------------- logical function rfodd(n) rfodd=.true. if(2*(n/2).eq.n) rfodd=.false. return end c --------------------------------------------------------- c unused function rfnbreak(nhalf,n,nvar) c unused cc c unused cc Computes the breakdown value - in percent! - of the MCD estimator c unused cc c unused implicit none c unused integer rfnbreak, nhalf, n, nvar c unused c unused if (nhalf.le.(n+nvar+1)/2) then c unused rfnbreak=(nhalf-nvar)*100/n c unused else c unused rfnbreak=(n-nhalf+1)*100/n c unused endif c unused return c unused end c --------------------------------------------------------- subroutine rfmcduni(w,ncas,jqu,slutn,bstd,aw,aw2,factor,len) cc cc rfmcduni : calculates the MCD in the univariate case. cc w contains the ordered observations cc c This version returns the index (jint) in 'len' c which is used in rfltreg.f implicit double precision (a-h,o-z), integer(i-n) integer ncas, jqu, len double precision w(ncas), aw(ncas), aw2(ncas) double precision slutn(len) cc sq=0.D0 sqmin=0.D0 ndup=1 do j=1,ncas-jqu+1 slutn(j)=0.D0 end do do jint=1,ncas-jqu+1 aw(jint)=0.D0 do j=1,jqu aw(jint)=aw(jint)+w(j+jint-1) if (jint.eq.1) sq=sq+w(j)*w(j) end do aw2(jint)=aw(jint)*aw(jint)/jqu if (jint.eq.1) then sq=sq-aw2(jint) sqmin=sq slutn(ndup)=aw(jint) len=jint else sq=sq - w(jint-1)*w(jint-1) + w(jint+jqu-1)*w(jint+jqu-1) * - aw2(jint) + aw2(jint-1) if(sq.lt.sqmin) then ndup=1 sqmin=sq slutn(ndup)=aw(jint) len=jint else if(sq.eq.sqmin) then ndup=ndup+1 slutn(ndup)=aw(jint) endif endif endif end do slutn(1)=slutn(int((ndup+1)/2))/jqu bstd=factor*sqrt(sqmin/jqu) return end c --------------------------------------------------------- robustbase/src/templates-types_undef.h0000644000176200001440000000035012441664610017706 0ustar liggesusers#undef CONCAT #undef CONCAT_MACROS #undef METHOD_NAME #undef C_METHOD_NAME #undef X_C_TYPE #undef X_IN_C #undef X_ISNAN #undef ANS_SXP #undef ANS_NA #undef ANS_C_TYPE #undef ANS_IN_C #undef X_TYPE #undef ANS_TYPE #undef MARGIN robustbase/src/R-rng4ftn.c0000644000176200001440000000033610441335044015137 0ustar liggesusers#include void F77_SUB(rndstart)(void) { GetRNGstate(); } void F77_SUB(rndend)(void) { PutRNGstate(); } double F77_SUB(normrnd)(void) { return norm_rand(); } double F77_SUB(unifrnd)(void) { return unif_rand(); } robustbase/src/monitor.c0000644000176200001440000000473514531132254015056 0ustar liggesusers// Flexible printing of informative messages from Fortran // ----------------- ------------ // other MM versions: ~/R/Pkgs/cobs99/src/monitor.c ~/R/Pkgs/lokern/src/monitor.c #include /* called for trace >= 2 : ----------------------------------------------- */ #ifdef _new_gfortran__fixme # define F_Logical int_least32_t * #else # define F_Logical int * #endif // but 'int *' gives warnings with LTO ? void F77_SUB(println)(void) { Rprintf("\n"); } void F77_SUB(prallc)(int *nrep) { Rprintf("will use *all* combinations: %d\n", *nrep); } void F77_SUB(pr1mcd)(int *i_trace, int *n, int *nvar, int *nhallf, int *krep, int *nmini, int *kmini) { Rprintf("rffastmcd(n=%d, nvar=%d, nhallf=%d, krep=%d, nmini=%d, kmini=%d, i_trace=%d)\n", *n, *nvar, *nhallf, *krep, *nmini, *kmini, *i_trace); } void F77_SUB(pr2mcd)(F_Logical part, F_Logical all, int *kstep, int *ngroup, int *minigr, int *nhalf, int *nrep) { Rprintf("pr[2]: (part=%d, all=%d); (kstep=%d, ngroup=%d, minigr=%d, nhalf=%d, nrep=%d)\n", *part, *all, *kstep, *ngroup, *minigr, *nhalf, *nrep); } void F77_SUB(pr3mcd)(F_Logical part, F_Logical fine, F_Logical final, int *nrep, int *nn, int *nsel, int *nhalf, int *kstep, int *nmini, int *kmini) { char* phase_kind = (*part) ? ((*fine && !*final) ? "fine (2 of 3)" : ((*final) ? "final (3 of 3)" : "first (of 3)")) : ((*final) ? "final" : "one"); Rprintf(" Main loop, phase[%s]:\n (nrep=%4d, nn=%4d, nsel=%4d, nhalf=%4d, kstep=%d, nmini=%d, kmini=%d)\n", phase_kind, *nrep, *nn, *nsel, *nhalf, *kstep, *nmini, *kmini); } void F77_SUB(prp1mcd)(int *n, int *ngroup, int *minigr, int *nhalf, int *nrep, int mini[]) { // int mini[*kmini]; Rprintf(" Partitioning n=%d into at most kmini groups: ngroup=%d, minigr=%d, nhalf=%d, nrep=%d;" "\n groups are of sizes (", *n, *ngroup, *minigr, *nhalf, *nrep); for(int j=0; j < *ngroup; j++) Rprintf(" %d", mini[j]); Rprintf(")\n"); } void F77_SUB(pr9mcd)(int *ntot) { Rprintf(" -- finishing: total times = %d\n", *ntot); } /* called for trace >= 3 : ----------------------------------------------- */ void F77_SUB(prgrmcd)(int *ii, int *nn, int *i_trace) { Rprintf(" group ii = %d (nn = %d)%s\n", *ii, *nn, (*i_trace >= 4) ? ": i=1..nrep loop: " : ""); } void F77_SUB(pr4mcd)(int *i) { Rprintf(" i = %d ", *i); } void F77_SUB(pr5mcd)(int *step, int *ntot) { Rprintf("(step %d, tot=%d)", *step, *ntot); } robustbase/src/Makevars0000644000176200001440000000021710477324431014713 0ustar liggesusers## Dear Emacs, make me -*- Makefile -*- ## we use the BLAS and now also the LAPACK library: PKG_LIBS= $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) robustbase/src/eigen.f0000644000176200001440000005117112432357143014461 0ustar liggesusersc--- EISPACK Eigen Value Computation --- c--- ================================ --- c--- This has been /src/appl/eigen.f from the 1990's upto 2013 c--- when it has become deprecated and unused in R's own sources ---------- c--- c--- "TODO": Use LAPACK's eigen routines instead ------------------------ c--- ====== -------------------------------------------------------------- c DOUBLE PRECISION FUNCTION EPSLON (X) DOUBLE PRECISION X C C ESTIMATE UNIT ROUNDOFF IN QUANTITIES OF SIZE X. C DOUBLE PRECISION A,B,C,EPS C C THIS PROGRAM SHOULD FUNCTION PROPERLY ON ALL SYSTEMS C SATISFYING THE FOLLOWING TWO ASSUMPTIONS, C 1. THE BASE USED IN REPRESENTING FLOATING POINT C NUMBERS IS NOT A POWER OF THREE. C 2. THE QUANTITY A IN STATEMENT 10 IS REPRESENTED TO C THE ACCURACY USED IN FLOATING POINT VARIABLES C THAT ARE STORED IN MEMORY. C THE STATEMENT NUMBER 10 AND THE GO TO 10 ARE INTENDED TO C FORCE OPTIMIZING COMPILERS TO GENERATE CODE SATISFYING C ASSUMPTION 2. C UNDER THESE ASSUMPTIONS, IT SHOULD BE TRUE THAT, C A IS NOT EXACTLY EQUAL TO FOUR-THIRDS, C B HAS A ZERO FOR ITS LAST BIT OR DIGIT, C C IS NOT EXACTLY EQUAL TO ONE, C EPS MEASURES THE SEPARATION OF 1.0 FROM C THE NEXT LARGER FLOATING POINT NUMBER. C THE DEVELOPERS OF EISPACK WOULD APPRECIATE BEING INFORMED C ABOUT ANY SYSTEMS WHERE THESE ASSUMPTIONS DO NOT HOLD. C C THIS VERSION DATED 4/6/83. C A = 4.0D0/3.0D0 10 B = A - 1.0D0 C = B + B + B EPS = DABS(C-1.0D0) IF (EPS .EQ. 0.0D0) GO TO 10 EPSLON = EPS*DABS(X) RETURN END DOUBLE PRECISION FUNCTION PYTHAG(A,B) DOUBLE PRECISION A,B C C FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW C DOUBLE PRECISION P,R,S,T,U P = DMAX1(DABS(A),DABS(B)) IF (P .EQ. 0.0D0) GO TO 20 R = (DMIN1(DABS(A),DABS(B))/P)**2 10 CONTINUE T = 4.0D0 + R IF (T .EQ. 4.0D0) GO TO 20 S = R/T U = 1.0D0 + 2.0D0*S P = U*P R = (S/U)**2 * R GO TO 10 20 PYTHAG = P RETURN END SUBROUTINE RS(NM,N,A,W,MATZ,Z,FV1,FV2,IERR) C INTEGER N,NM,IERR,MATZ DOUBLE PRECISION A(NM,N),W(N),Z(NM,N),FV1(N),FV2(N) C C THIS SUBROUTINE CALLS THE RECOMMENDED SEQUENCE OF C SUBROUTINES FROM THE EIGENSYSTEM SUBROUTINE PACKAGE (EISPACK) C TO FIND THE EIGENVALUES AND EIGENVECTORS (IF DESIRED) C OF A REAL SYMMETRIC MATRIX. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF THE TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX A. C C A CONTAINS THE REAL SYMMETRIC MATRIX. C C MATZ IS AN INTEGER VARIABLE SET EQUAL TO ZERO IF C ONLY EIGENVALUES ARE DESIRED. OTHERWISE IT IS SET TO C ANY NON-ZERO INTEGER FOR BOTH EIGENVALUES AND EIGENVECTORS. C C ON OUTPUT C C W CONTAINS THE EIGENVALUES IN ASCENDING ORDER. C C Z CONTAINS THE EIGENVECTORS IF MATZ IS NOT ZERO. C C IERR IS AN INTEGER OUTPUT VARIABLE SET EQUAL TO AN ERROR C COMPLETION CODE DESCRIBED IN THE DOCUMENTATION FOR TQLRAT C AND TQL2. THE NORMAL COMPLETION CODE IS ZERO. C C FV1 AND FV2 ARE TEMPORARY STORAGE ARRAYS. C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ C IF (N .LE. NM) GO TO 10 IERR = 10 * N GO TO 50 C 10 IF (MATZ .NE. 0) GO TO 20 C .......... FIND EIGENVALUES ONLY .......... CALL TRED1(NM,N,A,W,FV1,FV2) CALL TQLRAT(N,W,FV2,IERR) GO TO 50 C .......... FIND BOTH EIGENVALUES AND EIGENVECTORS .......... 20 CALL TRED2(NM,N,A,W,FV1,Z) CALL TQL2(NM,N,W,FV1,Z,IERR) 50 RETURN END SUBROUTINE TQL2(NM,N,D,E,Z,IERR) C INTEGER I,J,K,L,M,N,II,L1,L2,NM,MML,IERR DOUBLE PRECISION D(N),E(N),Z(NM,N) DOUBLE PRECISION C,C2,C3,DL1,EL1,F,G,H,P,R,S,S2,TST1,TST2,PYTHAG C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL2, C NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND C WILKINSON. C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). C C THIS SUBROUTINE FINDS THE EIGENVALUES AND EIGENVECTORS C OF A SYMMETRIC TRIDIAGONAL MATRIX BY THE QL METHOD. C THE EIGENVECTORS OF A FULL SYMMETRIC MATRIX CAN ALSO C BE FOUND IF TRED2 HAS BEEN USED TO REDUCE THIS C FULL MATRIX TO TRIDIAGONAL FORM. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX. C C D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. C C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX C IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. C C Z CONTAINS THE TRANSFORMATION MATRIX PRODUCED IN THE C REDUCTION BY TRED2, IF PERFORMED. IF THE EIGENVECTORS C OF THE TRIDIAGONAL MATRIX ARE DESIRED, Z MUST CONTAIN C THE IDENTITY MATRIX. C C ON OUTPUT C C D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN C ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT BUT C UNORDERED FOR INDICES 1,2,...,IERR-1. C C E HAS BEEN DESTROYED. C C Z CONTAINS ORTHONORMAL EIGENVECTORS OF THE SYMMETRIC C TRIDIAGONAL (OR FULL) MATRIX. IF AN ERROR EXIT IS MADE, C Z CONTAINS THE EIGENVECTORS ASSOCIATED WITH THE STORED C EIGENVALUES. C C IERR IS SET TO C ZERO FOR NORMAL RETURN, C J IF THE J-TH EIGENVALUE HAS NOT BEEN C DETERMINED AFTER 30 ITERATIONS. C C CALLS PYTHAG FOR DSQRT(A*A + B*B) . C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ c c unnecessary initialization of C3 and S2 to keep g77 -Wall happy c C3 = 0.0D0 S2 = 0.0D0 C IERR = 0 IF (N .EQ. 1) GO TO 1001 C do I = 2, N E(I-1) = E(I) end do C F = 0.0D0 TST1 = 0.0D0 E(N) = 0.0D0 C DO 240 L = 1, N J = 0 H = DABS(D(L)) + DABS(E(L)) IF (TST1 .LT. H) TST1 = H C .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... DO 110 M = L, N TST2 = TST1 + DABS(E(M)) IF (TST2 .EQ. TST1) GO TO 120 C .......... E(N) IS ALWAYS ZERO, SO THERE IS NO EXIT C THROUGH THE BOTTOM OF THE LOOP .......... 110 CONTINUE C 120 IF (M .EQ. L) GO TO 220 130 IF (J .EQ. 30) GO TO 1000 J = J + 1 C .......... FORM SHIFT .......... L1 = L + 1 L2 = L1 + 1 G = D(L) P = (D(L1) - G) / (2.0D0 * E(L)) R = PYTHAG(P,1.0D0) D(L) = E(L) / (P + DSIGN(R,P)) D(L1) = E(L) * (P + DSIGN(R,P)) DL1 = D(L1) H = G - D(L) IF (L2 .GT. N) GO TO 145 C DO I = L2, N D(I) = D(I) - H end do C 145 F = F + H C .......... QL TRANSFORMATION .......... P = D(M) C = 1.0D0 C2 = C EL1 = E(L1) S = 0.0D0 MML = M - L C .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... DO 200 II = 1, MML C3 = C2 C2 = C S2 = S I = M - II G = C * E(I) H = C * P R = PYTHAG(P,E(I)) E(I+1) = S * R S = E(I) / R C = P / R P = C * D(I) - S * G D(I+1) = H + S * (C * G + S * D(I)) C .......... FORM VECTOR .......... DO 180 K = 1, N H = Z(K,I+1) Z(K,I+1) = S * Z(K,I) + C * H Z(K,I) = C * Z(K,I) - S * H 180 CONTINUE C 200 CONTINUE C P = -S * S2 * C3 * EL1 * E(L) / DL1 E(L) = S * P D(L) = C * P TST2 = TST1 + DABS(E(L)) IF (TST2 .GT. TST1) GO TO 130 220 D(L) = D(L) + F 240 CONTINUE C .......... ORDER EIGENVALUES AND EIGENVECTORS .......... DO 300 II = 2, N I = II - 1 K = I P = D(I) C DO 260 J = II, N IF (D(J) .GE. P) GO TO 260 K = J P = D(J) 260 CONTINUE C IF (K .EQ. I) GO TO 300 D(K) = D(I) D(I) = P C DO 280 J = 1, N P = Z(J,I) Z(J,I) = Z(J,K) Z(J,K) = P 280 CONTINUE C 300 CONTINUE C GO TO 1001 C .......... SET ERROR -- NO CONVERGENCE TO AN C EIGENVALUE AFTER 30 ITERATIONS .......... 1000 IERR = L 1001 RETURN END SUBROUTINE TQLRAT(N,D,E2,IERR) C INTEGER I,J,L,M,N,II,L1,MML,IERR DOUBLE PRECISION D(N),E2(N) DOUBLE PRECISION B,C,F,G,H,P,R,S,T,EPSLON,PYTHAG C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQLRAT, C ALGORITHM 464, COMM. ACM 16, 689(1973) BY REINSCH. C C THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC C TRIDIAGONAL MATRIX BY THE RATIONAL QL METHOD. C C ON INPUT C C N IS THE ORDER OF THE MATRIX. C C D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. C C E2 CONTAINS THE SQUARES OF THE SUBDIAGONAL ELEMENTS OF THE C INPUT MATRIX IN ITS LAST N-1 POSITIONS. E2(1) IS ARBITRARY. C C ON OUTPUT C C D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN C ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND C ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE C THE SMALLEST EIGENVALUES. C C E2 HAS BEEN DESTROYED. C C IERR IS SET TO C ZERO FOR NORMAL RETURN, C J IF THE J-TH EIGENVALUE HAS NOT BEEN C DETERMINED AFTER 30 ITERATIONS. C C CALLS PYTHAG FOR DSQRT(A*A + B*B) . C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ c c unnecessary initialization of B and C to keep g77 -Wall happy c B = 0.0D0 C = 0.0D0 C IERR = 0 IF (N .EQ. 1) GO TO 1001 C DO I = 2, N E2(I-1) = E2(I) end do C F = 0.0D0 T = 0.0D0 E2(N) = 0.0D0 C DO 290 L = 1, N J = 0 H = DABS(D(L)) + DSQRT(E2(L)) IF (T .GT. H) GO TO 105 T = H B = EPSLON(T) C = B * B C .......... LOOK FOR SMALL SQUARED SUB-DIAGONAL ELEMENT .......... 105 DO 110 M = L, N IF (E2(M) .LE. C) GO TO 120 C .......... E2(N) IS ALWAYS ZERO, SO THERE IS NO EXIT C THROUGH THE BOTTOM OF THE LOOP .......... 110 CONTINUE C 120 IF (M .EQ. L) GO TO 210 130 IF (J .EQ. 30) GO TO 1000 J = J + 1 C .......... FORM SHIFT .......... L1 = L + 1 S = DSQRT(E2(L)) G = D(L) P = (D(L1) - G) / (2.0D0 * S) R = PYTHAG(P,1.0D0) D(L) = S / (P + DSIGN(R,P)) H = G - D(L) C DO I = L1, N D(I) = D(I) - H end do C F = F + H C .......... RATIONAL QL TRANSFORMATION .......... G = D(M) IF (G .EQ. 0.0D0) G = B H = G S = 0.0D0 MML = M - L C .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... DO 200 II = 1, MML I = M - II P = G * H R = P + E2(I) E2(I+1) = S * R S = E2(I) / R D(I+1) = H + S * (H + D(I)) G = D(I) - E2(I) / G IF (G .EQ. 0.0D0) G = B H = G * P / R 200 CONTINUE C E2(L) = S * G D(L) = H C .......... GUARD AGAINST UNDERFLOW IN CONVERGENCE TEST .......... IF (H .EQ. 0.0D0) GO TO 210 IF (DABS(E2(L)) .LE. DABS(C/H)) GO TO 210 E2(L) = H * E2(L) IF (E2(L) .NE. 0.0D0) GO TO 130 210 P = D(L) + F C .......... ORDER EIGENVALUES .......... IF (L .EQ. 1) GO TO 250 C .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... DO 230 II = 2, L I = L + 2 - II IF (P .GE. D(I-1)) GO TO 270 D(I) = D(I-1) 230 CONTINUE C 250 I = 1 270 D(I) = P 290 CONTINUE C GO TO 1001 C .......... SET ERROR -- NO CONVERGENCE TO AN C EIGENVALUE AFTER 30 ITERATIONS .......... 1000 IERR = L 1001 RETURN END SUBROUTINE TRED1(NM,N,A,D,E,E2) C INTEGER I,J,K,L,N,II,NM,JP1 DOUBLE PRECISION A(NM,N),D(N),E(N),E2(N) DOUBLE PRECISION F,G,H,SCALE C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TRED1, C NUM. MATH. 11, 181-195(1968) BY MARTIN, REINSCH, AND WILKINSON. C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 212-226(1971). C C THIS SUBROUTINE REDUCES A REAL SYMMETRIC MATRIX C TO A SYMMETRIC TRIDIAGONAL MATRIX USING C ORTHOGONAL SIMILARITY TRANSFORMATIONS. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX. C C A CONTAINS THE REAL SYMMETRIC INPUT MATRIX. ONLY THE C LOWER TRIANGLE OF THE MATRIX NEED BE SUPPLIED. C C ON OUTPUT C C A CONTAINS INFORMATION ABOUT THE ORTHOGONAL TRANS- C FORMATIONS USED IN THE REDUCTION IN ITS STRICT LOWER C TRIANGLE. THE FULL UPPER TRIANGLE OF A IS UNALTERED. C C D CONTAINS THE DIAGONAL ELEMENTS OF THE TRIDIAGONAL MATRIX. C C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE TRIDIAGONAL C MATRIX IN ITS LAST N-1 POSITIONS. E(1) IS SET TO ZERO. C C E2 CONTAINS THE SQUARES OF THE CORRESPONDING ELEMENTS OF E. C E2 MAY COINCIDE WITH E IF THE SQUARES ARE NOT NEEDED. C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ C DO 100 I = 1, N D(I) = A(N,I) A(N,I) = A(I,I) 100 CONTINUE C .......... FOR I=N STEP -1 UNTIL 1 DO -- .......... DO 300 II = 1, N I = N + 1 - II L = I - 1 H = 0.0D0 SCALE = 0.0D0 IF (L .LT. 1) GO TO 130 C .......... SCALE ROW (ALGOL TOL THEN NOT NEEDED) .......... DO K = 1, L SCALE = SCALE + DABS(D(K)) end do C IF (SCALE .NE. 0.0D0) GO TO 140 C DO 125 J = 1, L D(J) = A(L,J) A(L,J) = A(I,J) A(I,J) = 0.0D0 125 CONTINUE C 130 E(I) = 0.0D0 E2(I) = 0.0D0 GO TO 300 C 140 continue DO K = 1, L D(K) = D(K) / SCALE H = H + D(K) * D(K) end do C E2(I) = SCALE * SCALE * H F = D(L) G = -DSIGN(DSQRT(H),F) E(I) = SCALE * G H = H - F * G D(L) = F - G IF (L .gt. 1) then C .......... FORM A*U .......... DO J = 1, L E(J) = 0.0D0 end do C DO 240 J = 1, L F = D(J) G = E(J) + A(J,J) * F JP1 = J + 1 IF (L .ge. JP1) then DO K = JP1, L G = G + A(K,J) * D(K) E(K) = E(K) + A(K,J) * F end do end if E(J) = G 240 CONTINUE C .......... FORM P .......... F = 0.0D0 C DO J = 1, L E(J) = E(J) / H F = F + E(J) * D(J) end do C H = F / (H + H) C .......... FORM Q .......... DO J = 1, L E(J) = E(J) - H * D(J) end do C .......... FORM REDUCED A .......... DO J = 1, L F = D(J) G = E(J) DO K = J, L A(K,J) = A(K,J) - F * E(K) - G * D(K) end do end do end if c 285 DO J = 1, L F = D(J) D(J) = A(L,J) A(L,J) = A(I,J) A(I,J) = F * SCALE end do C 300 CONTINUE RETURN END SUBROUTINE TRED2(NM,N,A,D,E,Z) C INTEGER I,J,K,L,N,II,NM,JP1 DOUBLE PRECISION A(NM,N),D(N),E(N),Z(NM,N) DOUBLE PRECISION F,G,H,HH,SCALE C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TRED2, C NUM. MATH. 11, 181-195(1968) BY MARTIN, REINSCH, AND WILKINSON. C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 212-226(1971). C C THIS SUBROUTINE REDUCES A REAL SYMMETRIC MATRIX TO A C SYMMETRIC TRIDIAGONAL MATRIX USING AND ACCUMULATING C ORTHOGONAL SIMILARITY TRANSFORMATIONS. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX. C C A CONTAINS THE REAL SYMMETRIC INPUT MATRIX. ONLY THE C LOWER TRIANGLE OF THE MATRIX NEED BE SUPPLIED. C C ON OUTPUT C C D CONTAINS THE DIAGONAL ELEMENTS OF THE TRIDIAGONAL MATRIX. C C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE TRIDIAGONAL C MATRIX IN ITS LAST N-1 POSITIONS. E(1) IS SET TO ZERO. C C Z CONTAINS THE ORTHOGONAL TRANSFORMATION MATRIX C PRODUCED IN THE REDUCTION. C C A AND Z MAY COINCIDE. IF DISTINCT, A IS UNALTERED. C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ C DO I = 1, N DO J = I, N Z(J,I) = A(J,I) end do D(I) = A(N,I) end do C IF (N .EQ. 1) GO TO 510 C .......... FOR I=N STEP -1 UNTIL 2 DO -- .......... DO 300 II = 2, N I = N + 2 - II L = I - 1 H = 0.0D0 SCALE = 0.0D0 IF (L .LT. 2) GO TO 130 C .......... SCALE ROW (ALGOL TOL THEN NOT NEEDED) .......... DO K = 1, L SCALE = SCALE + DABS(D(K)) end do C IF (SCALE .NE. 0.0D0) GO TO 140 130 E(I) = D(L) C DO J = 1, L D(J) = Z(L,J) Z(I,J) = 0.0D0 Z(J,I) = 0.0D0 end do C GO TO 290 C 140 CONTINUE DO K = 1, L D(K) = D(K) / SCALE H = H + D(K) * D(K) end do C F = D(L) G = -DSIGN(DSQRT(H),F) E(I) = SCALE * G H = H - F * G D(L) = F - G C .......... FORM A*U .......... DO J = 1, L E(J) = 0.0D0 end do C DO 240 J = 1, L F = D(J) Z(J,I) = F G = E(J) + Z(J,J) * F JP1 = J + 1 IF (L .ge. JP1) then do K = JP1, L G = G + Z(K,J) * D(K) E(K) = E(K) + Z(K,J) * F end do end if E(J) = G 240 CONTINUE C .......... FORM P .......... F = 0.0D0 C do J = 1, L E(J) = E(J) / H F = F + E(J) * D(J) end do C HH = F / (H + H) C .......... FORM Q .......... do J = 1, L E(J) = E(J) - HH * D(J) end do C .......... FORM REDUCED A .......... DO J = 1, L F = D(J) G = E(J) C do K = J, L Z(K,J) = Z(K,J) - F * E(K) - G * D(K) end do C D(J) = Z(L,J) Z(I,J) = 0.0D0 end do C 290 D(I) = H 300 CONTINUE C .......... ACCUMULATION OF TRANSFORMATION MATRICES .......... do I = 2, N L = I - 1 Z(N,L) = Z(L,L) Z(L,L) = 1.0D0 H = D(I) IF (H .EQ. 0.0D0) GO TO 380 C do K = 1, L D(K) = Z(K,I) / H end do C DO J = 1, L G = 0.0D0 C do K = 1, L G = G + Z(K,I) * Z(K,J) end do C do K = 1, L Z(K,J) = Z(K,J) - G * D(K) end do end do 380 continue do K = 1, L Z(K,I) = 0.0D0 end do end do C 510 CONTINUE DO I = 1, N D(I) = Z(N,I) Z(N,I) = 0.0D0 end do Z(N,N) = 1.0D0 E(1) = 0.0D0 RETURN END robustbase/src/wgt_himed.c0000644000176200001440000000442713774372203015344 0ustar liggesusers/* * R : A Computer Language for Statistical Data Analysis * Copyright (C) 2006--2007 the R Development Core Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Used to be part of ./qn_sn.c Note by MM: We have explicit permission from P.Rousseeuw to licence it under the GNU Public Licence. See also ../inst/Copyrights */ #include /* ^^^^^^^^^^ is supposedly more common and standard than * #include * or #include */ /* --> int64_t ; if people don't have the above, they can forget about it.. */ #include /* -> and much more */ #include "robustbase.h" // whimed() and whimed_i() function called from C : in ./mc.c , ./qn_sn.c : #define _i_whimed_ #include "wgt_himed_templ.h" #define _d_whimed_ #include "wgt_himed_templ.h" /* Interface routines to be called via .C() : */ void wgt_himed_i(double *x, Sint *n, Sint *iw, double *res) { double *a_srt, *acand; int *iw_cand, nn = (int)*n; char *vmax; vmax = vmaxget(); acand = (double *)R_alloc(nn, sizeof(double)); a_srt = (double *)R_alloc(nn, sizeof(double)); iw_cand= (int *) R_alloc(nn, sizeof(int)); *res = whimed_i(x, (int *)iw, nn, acand, a_srt, iw_cand); vmaxset(vmax); } void wgt_himed(double *x, Sint *n, double *w, double *res) { double *a_srt, *a_cand, *w_cand; int nn = (int)*n; char *vmax; vmax = vmaxget(); a_cand = (double *) R_alloc(nn, sizeof(double)); a_srt = (double *) R_alloc(nn, sizeof(double)); w_cand = (double *) R_alloc(nn, sizeof(double)); *res = whimed(x, w, nn, a_cand, a_srt, w_cand); vmaxset(vmax); } robustbase/src/wgt_himed_templ.h0000644000176200001440000000540413774372203016546 0ustar liggesusers/*------ Definition of a template for whimed(_i) : * * -------- ~~~~~~ * i.e., included several times from ./wgt_himed.c * ~~~~~~~~~~~~~ */ #ifdef _d_whimed_ # define _WHIMED_ whimed # define _WGT_TYPE_ double # define _WGT_SUM_TYPE_ double # undef _d_whimed_ #elif defined (_i_whimed_) # define _WHIMED_ whimed_i # define _WGT_TYPE_ int # define _WGT_SUM_TYPE_ int64_t # undef _i_whimed_ #else # error "must define correct whimed_ macro !" #endif double _WHIMED_(double *a, _WGT_TYPE_ *w, int n, double* a_cand, double *a_srt, _WGT_TYPE_* w_cand) { /* Algorithm to compute the weighted high median in O(n) time. The whimed is defined as the smallest a[j] such that the sum of the weights of all a[i] <= a[j] is strictly greater than half of the total weight. Arguments: a: double array containing the observations n: number of observations w: array of (int/double) weights of the observations. */ int i; /* sum of weights: `int' do overflow when n ~>= 1e5 */ _WGT_SUM_TYPE_ wleft, wmid, wright, w_tot, wrest; double trial; w_tot = wrest = 0; for (i = 0; i < n; ++i) w_tot += w[i]; #ifdef DEBUG_whimed REprintf("wgt_himed(a[], w[], n) -- on entry: n=%d, w_tot=%g\n", n, (double)w_tot); #endif if(n == 0) return NA_REAL; /* REPEAT : */ do { int n2 = n/2;/* =^= n/2 +1 with 0-indexing */ for (i = 0; i < n; ++i) a_srt[i] = a[i]; rPsort(a_srt, n, n2); trial = a_srt[n2]; wleft = 0; wmid = 0; wright= 0; for (i = 0; i < n; ++i) { if (a[i] < trial) wleft += w[i]; else if (a[i] > trial) wright += w[i]; else wmid += w[i]; } /* wleft = sum_{i; a[i] < trial} w[i] * wmid = sum_{i; a[i] == trial} w[i] at least one 'i' since trial is one a[]! * wright= sum_{i; a[i] > trial} w[i] */ #ifdef DEBUG_whimed REprintf(" trial=%-g; w(left|mid|right) = (%g,%g,%g); ", trial, (double)wleft, (double)wmid, (double)wright); #endif int kcand = 0; if (2 * (wrest + wleft) > w_tot) { for (i = 0; i < n; ++i) { if (a[i] < trial) { a_cand[kcand] = a[i]; w_cand[kcand] = w[i]; ++kcand; } } } else if (2 * (wrest + wleft + wmid) <= w_tot) { for (i = 0; i < n; ++i) { if (a[i] > trial) { a_cand[kcand] = a[i]; w_cand[kcand] = w[i]; ++kcand; } } wrest += wleft + wmid; #ifdef DEBUG_whimed REprintf(" new wrest = %g; ", (double)wrest); #endif } else { #ifdef DEBUG_whimed REprintf(" -> found! return trial\n"); #endif return trial; /*==========*/ } n = kcand; #ifdef DEBUG_whimed REprintf(" ... and try again with n:= kcand=%d\n", n); #endif for (i = 0; i < n; ++i) { a[i] = a_cand[i]; w[i] = w_cand[i]; } } while(1); } /* _WHIMED_ */ #undef _WHIMED_ #undef _WGT_TYPE_ #undef _WGT_SUM_TYPE_ robustbase/src/mc.c0000644000176200001440000002674114531407074013774 0ustar liggesusers/* Algorithm for the skewness estimator medcouple (MC) -------------------------------------------------- ( originally matlabmc.c and also mc/mcrsoft/spmc.c ) */ #include #include #include // -> int64_t #include /* -> fmax2(.,.) */ #include /* Interface routines to be called via .C() and those from API : */ #include "robustbase.h" /* including whimed_i(a,iw,n): the weighted high median of an array a of length n, using the positive integer weights iw[]. * which is in ./wgt_himed.c_templ * ~~~~~~~~~~~~~~~~~ */ /* Includes the auxiliary function h_kern(a,b, ai,bi,ab, eps): the values h(a,b) needed to compute the mc */ static double h_kern(double a, double b, int ai, int bi, int ab, double eps, Rboolean do_scale); // Called via .C() : void mc_C(double *z, int *in, double *eps, int *iter, double *out, int *scale) { *out = mc_C_d(z, *in, eps, iter, *scale); return; } /* MM: The tolerance 'eps1' and 'eps2' can now be passed from R; * the original code had only one 'eps' for both and hardcoded * eps = 0.0000000000001; (== 1e-13 ) * * MK: eps1: for (relative) "equality" checks * eps2: used to check for over- and underflow, respectively * therefore I suggest eps1 = DBL_EPS and eps2 = DBL_MIN */ double mc_C_d(const double z[], int n, const double eps[], int *iter, int scale) { /* NOTE: eps = c(eps1, eps2) iter := c(maxit, trace.lev) as input = c(it, converged) as output */ int trace_lev = iter[1], it = 0; Rboolean converged = TRUE, do_scale = (Rboolean) scale; static const double Large = DBL_MAX / 4.; double medc = 0; // "the" result if (n < 3) goto Finish; /* copy data before sort()ing in place, also reflecting it -- dealing with +-Inf. NOTE: x[0] "empty" so we can use 1-indexing below */ double *x = (double *) R_alloc(n+1, sizeof(double)); x[0] = 0; for (int i = 0; i < n; i++) { double zi = z[i]; x[i+1] = - ((zi == R_PosInf) ? Large : (zi == R_NegInf ? -Large : zi)); } R_rsort(&x[1], n); /* full sort */ double xmed; // := median( x[1:n] ) = - median( z[0:(n-1)] ): if (n%2) { // n odd xmed = x[(n/2)+1]; } else { // n even int ind = (n/2); xmed = (x[ind] + x[ind+1])/2; } double x_eps = eps[0] * (eps[0] + fabs(xmed)); // was x_eps = eps[0] * (do_scale ? eps[0] + fabs(xmed) : fabs(xmed)); if (fabs(x[1] - xmed) <= x_eps) { if(trace_lev) Rprintf("mc_C_d(z[1:%d],*): |x[1]-med| <= x_eps=%g extreme -> medc := -1\n", n, x_eps); medc = -1.; goto Finish; } else if (fabs(x[n] - xmed) <= x_eps) { if(trace_lev) Rprintf("mc_C_d(z[1:%d],*): |x[n]-med| <= x_eps=%g extreme -> medc := +1\n", n, x_eps); medc = 1.; goto Finish; } /* else : median is not at the border ------------------- */ if(trace_lev) Rprintf("mc_C_d(z[1:%d], trace_lev=%d, scale=%s): Median = %g (not at the border)\n", n, trace_lev, do_scale ? "T" : "F", -xmed); int i,j; /* center x[] wrt median --> such that then median( x[1:n] ) == 0 */ for (i = 1; i <= n; i++) x[i] -= xmed; if(do_scale) { /* MM: ==> This scaling is extremely outlier-dependent -- it *kills* equivariance when e.g. x[n] --> very large. e.g., below '(eps[0] + fabs(xmed))' depends on rescaling Should *NOT* be needed if everything else is *relative* instead of absolute Consider replacing 1) eps[0] * (eps[0] + fabs(xmed)) with eps[0]*fabs(xmed) 2) x[j] > x_eps with x[j] >= x_eps (>= : for 0) */ /* Now scale to inside [-0.5, 0.5] and flip sign such that afterwards * x[1] >= x[2] >= ... >= x[n] */ double xden = -2 * fmax2(-x[1], x[n]); for (i = 1; i <= n; i++) x[i] /= xden; xmed /= xden; x_eps = eps[0] * (eps[0] + fabs(xmed)); if(trace_lev >= 2) Rprintf(" x[] has been rescaled (* 1/s) with s = %g\n", -xden); } else { // no re-scaling; still flipping signs : for (i = 1; i <= n; i++) x[i] *= -1.; } j = 1; while (j <= n && x[j] >= x_eps) { /* test relative to xmed */ /* x1[j] = x[j]; */ j++; } if(trace_lev >= 2) Rprintf(" x1[] := {x | x_j >= x_eps = %g} has %d ('j-1') entries\n", x_eps, j-1); i = 1; double *x2 = x+j-1; /* pointer -- corresponding to x2[i] = x[j]; */ while (j <= n && x[j] >= -x_eps) { /* test relative to xmed */ /* x1[j] = x[j]; */ /* x2[i] = x[j]; */ j++; i++; } /* now x1[] := {x | x_j > -eps} also includes the median (0) */ if(trace_lev >= 2) Rprintf("'median-x' {x | -eps < x_i <= eps} has %d (= 'k') entries\n", i-1); int h1 = j-1, /* == size of x1[] == the sum of those two sizes above */ /* conceptually, x2[] := {x | x_j <= eps} (which includes the median 0) */ h2 = i + (n-j);// == size of x2[] == maximal size of whimed() arrays if(trace_lev) Rprintf(" now allocating 2+5 work arrays of size (1+) h2=%d each:\n", h2); /* work arrays for whimed_i() : allocate *once* only !! */ double *acand = (double *) R_alloc(h2, sizeof(double)), *a_srt = (double *) R_alloc(h2, sizeof(double)); int *iw_cand= (int *) R_alloc(h2, sizeof(int)), /* work arrays for the fast-median-of-table algorithm: * currently still with 1-indexing */ *left = (int *) R_alloc((h2+1), sizeof(int)), *right = (int *) R_alloc((h2+1), sizeof(int)), *p = (int *) R_alloc((h2+1), sizeof(int)), *q = (int *) R_alloc((h2+1), sizeof(int)); for (i = 1; i <= h2; i++) { left [i] = 1; right[i] = h1; } int64_t nr = ((int64_t) h1) * ((int64_t) h2), // <-- careful to *NOT* overflow knew = nr/2 +1, nl = 0, neq = 0; if(trace_lev >= 2) Rprintf(" (h1,h2, nr, knew) = (%d,%d, %.0f, %.0f)\n", h1,h2, (double)nr, (double)knew); double trial = -2./* -Wall */; double *work= (double *) R_alloc(n, sizeof(double)); int *iwt = (int *) R_alloc(n, sizeof(int)); Rboolean IsFound = FALSE; /* MK: 'neq' counts the number of observations in the * inside the tolerance range, i.e., where left > right + 1, * since we would miss those when just using 'nl-nr'. * This is to prevent index overflow in work[] later on. * left might be larger than right + 1 since we are only * testing with accuracy eps_trial and therefore there might * be more than one observation in the `tolerance range` * between < and <=. */ while (!IsFound && (nr-nl+neq > n) && it < iter[0]) { int64_t sum_p, sum_q; it++; j = 0; for (i = 1; i <= h2; i++) if (left[i] <= right[i]) { iwt[j] = right[i] - left[i]+1; int k = left[i] + (iwt[j]/2); work[j] = h_kern(x[k], x2[i], k, i, h1+1, eps[1], do_scale); j++; } if(trace_lev >= 4) { Rprintf(" before whimed(): work and iwt, each [0:(%d-1)]:\n", j); if(j >= 100) { for(i=0; i < 90; i++) Rprintf(" %8g", work[i]); Rprintf("\n ... "); for(i=j-4; i < j; i++) Rprintf(" %8g", work[i]); Rprintf("\n"); for(i=0; i < 90; i++) Rprintf(" %8d", iwt [i]); Rprintf("\n ... "); for(i=j-4; i < j; i++) Rprintf(" %8d", iwt [i]); Rprintf("\n"); } else { // j <= 99 for(i=0; i < j; i++) Rprintf(" %8g", work[i]); Rprintf("\n"); for(i=0; i < j; i++) Rprintf(" %8d", iwt [i]); Rprintf("\n"); } } trial = whimed_i(work, iwt, j, acand, a_srt, iw_cand); double eps_trial = eps[0] * (do_scale ? eps[0] + fabs(trial) : fabs(trial)); if(trace_lev >= 3) Rprintf("%2s it=%2d, whimed(*, n=%6d)=%11.5g ", " ", it, j, trial); j = 1; for (i = h2; i >= 1; i--) { while (j <= h1 && h_kern(x[j],x2[i],j,i,h1+1,eps[1], do_scale) - trial > eps_trial) { // while (j <= h1 && h_kern(x[j],x2[i],j,i,h1+1,eps[1], do_scale) > trial) { if (trace_lev >= 5) Rprintf("\nj=%3d, i=%3d, x[j]=%g, x2[i]=%g, h=%g", j, i, x[j], x2[i], h_kern(x[j],x2[i],j,i,h1+1,eps[1], do_scale)); j++; } p[i] = j-1; } j = h1; for (i = 1, sum_p=0, sum_q=0; i <= h2; i++) { while (j >= 1 && trial - h_kern(x[j],x2[i],j,i,h1+1,eps[1], do_scale) > eps_trial) // while (j >= 1 && h_kern(x[j],x2[i],j,i,h1+1,eps[1], do_scale) < trial) j--; q[i] = j+1; sum_p += p[i]; sum_q += j;/* = q[i]-1 */ } if(trace_lev >= 3) { if (trace_lev == 3) Rprintf("sum_(p,q)= (%.0f,%.0f)", (double)sum_p, (double)sum_q); else { /* trace_lev >= 4 */ Rboolean lrg = h2 >= 100; int i_m = lrg ? 95 : h2; Rprintf("\n%3s p[1:%d]:", "", h2); for(i = 1; i <= i_m; i++) Rprintf(" %2d", p[i]); if(lrg) Rprintf(" ..."); Rprintf(" sum=%4.0f\n%3s q[1:%d]:", (double)sum_p, "", h2); for(i = 1; i <= i_m; i++) Rprintf(" %2d", q[i]); if(lrg) Rprintf(" ..."); Rprintf(" sum=%4.0f\n", (double)sum_q); } } if (knew <= sum_p) { if(trace_lev >= 3) Rprintf("; sum_p >= kn\n"); for (i = 1, neq = 0; i <= h2; i++) { right[i] = p[i]; if (left[i] > right[i]+1) neq += left[i]-right[i]-1; } nr = sum_p; } else { /* knew > sum_p */ IsFound = (knew <= sum_q); /* i.e. sum_p < knew <= sum_q */; if(trace_lev >= 3) Rprintf("; s_p < kn ?<=? s_q: %s\n", IsFound ? "TRUE": "no"); if(IsFound) { medc = trial; } else { /* knew > sum_q */ for (i = 1; i <= h2; i++) { left[i] = q[i]; if (left[i] > right[i]+1) neq += left[i]-right[i]-1; } nl = sum_q; } } R_CheckUserInterrupt(); } /* end while loop */ converged = IsFound || (nr-nl+neq <= n); if(!converged) { warning("maximal number of iterations (%d =? %d) reached prematurely\n", iter[0], it); /* still: */ medc = trial; } if (converged && !IsFound) { /* e.g., for mc(1:4) : */ j = 0; for (i = 1; i <= h2; i++) { if (left[i] <= right[i]) { for (int k = left[i]; k <= right[i]; k++) { work[j] = -h_kern(x[k],x2[i],k,i,h1+1,eps[1], do_scale); j++; } } } neq = knew-nl-1; if(!(INT_MIN <= neq && neq <= INT_MAX)) warning("mc_C_d(): knew-nl-1 is outside int-range -- will fail in rPsort()"); if(trace_lev) Rprintf(" not found [it=%d, (nr,nl) = (%lld,%lld)]," " -> (knew-nl-1, j) = (%d, %d)\n", it, (long long)nr, (long long)nl, (int)neq, j); /* using rPsort(work, n,k), since we don't need work[] anymore:*/ rPsort(work, /* n = */ j, /* k = */ (int)neq); medc = - work[knew-nl-1]; } if(trace_lev >= 2) Rprintf(converged ? "converged in %d iterations\n" : "not converged in %d (maxit) iterations; try enlarging eps1, eps2 !?\n", it); Finish: iter[0] = it; /* to return */ iter[1] = converged; return medc; } /* end{ mc_C_d } */ /* h_kern() -- was called calwork() in original rmc.c code and did if (fabs(a-b) < 2.0*eps) { if (ai+bi == ab) { return 0; } else { return (ai+bi < ab) ? 1 : -1 ; } } else { return (a+b)/(a-b); } */ static double h_kern(double a, double b, int ai, int bi, int ab, double eps, Rboolean do_scale) { // eps := 'eps2' in R's mc() /* if (fabs(a-b) <= DBL_MIN) */ /* check for zero division and positive b */ // MK added a check '|| b > 0' ("or positive b"), but said "_and_ positive b" (r221) /* if (fabs(a-b) < 2.0*eps || b > 0) */ // MM: don't see why (but it seems needed); the check should be *relative* to |a+b| if (b > 0 || fabs(a-b) <= eps*(do_scale ? 2. : ldexp(fabs(a+b), 2))) // '<=' since RHS maybe 0 return sign((double)(ab - (ai+bi))); /* else */ return (a+b)/(a-b); } /* Local variables section * Local variables: * mode: c * kept-old-versions: 12 * kept-new-versions: 20 * End: */ robustbase/src/rffastmcd.f0000644000176200001440000017422614437440476015363 0ustar liggesuserscc -*- mode: fortran; kept-new-versions: 25; kept-old-versions: 20 -*- cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cc rrcov : Scalable Robust Estimators with High Breakdown Point cc cc This program is free software; you can redistribute it and/or modify cc it under the terms of the GNU General Public License as published by cc the Free Software Foundation; either version 2 of the License, or cc (at your option) any later version. cc cc This program is distributed in the hope that it will be useful, cc but WITHOUT ANY WARRANTY; without even the implied warranty of cc MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the cc GNU General Public License for more details. cc cc You should have received a copy of the GNU General Public License cc along with this program; if not, write to the Free Software cc Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA cc cc I would like to thank Peter Rousseeuw and Katrien van Driessen for cc providing the initial code of this function. cc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cc cc Computes the MCD estimator of multivariate location and scatter. cc This estimator is given by the subset of h observations for which cc the determinant of their covariance matrix is minimal. The MCD cc location estimate is then the mean of those h points, and the MCD cc scatter estimate is their covariance matrix. This value of h may be cc chosen by the user; its default value is roughly n/2. cc cc The MCD estimator was first introduced in: cc cc Rousseeuw, P.J. (1984), "Least Median of Squares Regression," cc Journal of the American Statistical Association, Vol. 79, cc pp. 871-881. [See page 877.] cc cc The MCD is a robust estimator in the sense that the estimates are cc not unduly influenced by outliers in the data, even if there cc are many outliers. Its robustness was proved in: cc cc Rousseeuw, P.J. (1985), "Multivariate Estimation with High cc Breakdown Point," in Mathematical Statistics and Applications, cc edited by W. Grossmann, G. Pflug, I. Vincze, and W. Wertz. cc Dordrecht: Reidel Publishing Company, pp. 283-297. cc cc Rousseeuw, P.J. and Leroy, A.M. (1987), Robust Regression and cc Outlier Detection, Wiley-Interscience, New York. [Chapter 7] cc cc The program also computes the distance of each observation cc from the center (location) of the data, relative to the shape cc (scatter) of the data: cc cc * Using the classical estimates yields the Mahalanobis distance cc MD(i). Often, outlying points fail to have a large Mahalanobis cc distance because of the masking effect. cc cc * Using the MCD estimates yields a robust distance RD(i). cc These distances allow us to easily identify the outliers. cc cc For applications of robust distances in a regression context see: cc cc Rousseeuw, P.J. and van Zomeren, B.C. (1990), "Unmasking cc Multivariate Outliers and Leverage Points," Journal of the cc American Statistical Association, Vol. 85, 633-639. cc cc There also a diagnostic plot is given to distinguish between cc regular observations, vertical outliers, good leverage points, cc and bad leverage points. cc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cc cc The new FAST_MCD algorithm introduced here is due to cc cc Rousseeuw, P.J. and Van Driessen, K. (1997), "A Fast cc Algorithm for the Minimum Covariance Determinant cc Estimator," in preparation. cc cc The algorithm works as follows: cc cc The dataset contains n cases, and nvar variables are used. cc Let n_0 := 2 * nmini (== 600 by default). cc When n < n_0, the algorithm will analyze the dataset as a whole, cc when n >= n_0, the algorithm will use several subdatasets. cc cc 1. n < n_0 : When the dataset is analyzed as a whole, a trial cc subsample of nvar+1 cases is taken, of which the mean and cc covariance matrix is calculated. The h cases with smallest cc relative distances are used to calculate the next mean and cc covariance matrix, and this cycle is repeated k1 times. cc [For small n we can consider all subsets of nvar+1 out of n, else cc the algorithm draws 500 random subsets.] cc Afterwards, the best 10 solutions (covariance matrices and cc corresponding means) are used as starting values for the final cc iterations. These iterations stop when two subsequent determinants cc become equal. (At most k3 iteration steps are taken.) cc The solution with smallest determinant is retained. cc cc 2. n > n_0 --- more than n_0 = 2*nmini cases: The algorithm cc does part of the calculations on (at most) kmini nonoverlapping cc subdatasets, of (roughly) nmini cases. cc cc Stage 1: For each trial subsample in each subdataset, cc k1 iterations are carried out in that subdataset. cc For each subdataset, the 10 best solutions are stored. cc cc Stage 2 considers the union of the subdatasets, called the cc merged set. (If n is large, the merged set is a proper subset of cc the entire dataset.) In this merged set, each of the 'best cc solutions' of stage 1 are used as starting values for k2 cc iterations. Also here, the 10 best solutions are stored. cc cc Stage 3 depends on n, the total number of cases in the cc dataset. If n <= 5000, all 10 preliminary solutions are iterated cc k3 times. If n > 5000, only the best preliminary cc solution is iterated, and the number of iterations decreases to 1 cc according to n*nvar. (If n*nvar <= 100,000 we iterate k3 times, cc whereas for n*nvar > 1,000,000 we take only one iteration step.) cc cc An important advantage of the algorithm FAST_MCD is that it allows cc for exact fit situations, where more than h observations lie on cc a hyperplane. Then the program still yields the MCD location and cc scatter matrix, the latter being singular (as it should be), as cc well as the equation of the hyperplane. cc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc subroutine rffastmcd(dat, n,nvar, nhalff, krep, nmini,kmini, ! 7 * initcov, initmean, inbest, det, ! 11 * weight, fit, coeff, kount, adcov, ! 16 * temp, index1, index2, indexx, nmahad, ndist, am, am2, ! 24 * slutn, med, mad, sd, means, bmeans, w, fv1, fv2, ! 33 * rec, sscp1, cova1, corr1, cinv1, cova2, cinv2, z, ! 41 * cstock, mstock, c1stock, m1stock, dath, ! 46 * cutoff, chimed, i_trace) ! 49 args cc VT::10.10.2005 - a DATA operator was used for computing the cc median and the 0.975 quantile of the chisq distribution cc with nvar degrees of freedom. Since now we have no cc restriction on the number of variables, these will be cc passed as parameters - cutoff and chimed implicit none cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc c c ALGORITHM PARAMETERS: c c The number of iteration steps in stages 1,2 and 3 can be changed c by adapting the parameters k1, k2, and k3. integer k1,k2,k3, int_max parameter (k1 = 2 ) parameter (k2 = 2 ) parameter (k3 = 100) c int_max: easily recognized, slightly smaller than 2147483647 = .Machine$integer.max parameter (int_max = 2146666666) c Arguments integer n,nvar ! (n, p) integer nhalff ! == quan := h(alpha) >= n/2 "n half" integer krep ! krep == nsamp c krep := the total number of trial subsamples c to be drawn when n exceeds 2*nmini; c krep = 0 :<==> "exact" <==> all possible subsamples c was hardcoded krep := 500; now an *argument* integer kmini ! the maximal number of subdatasets and integer nmini ! their minimal size double precision dat(n,nvar) double precision initcov(nvar*nvar), initmean(nvar) integer inbest(nhalff) double precision det integer weight(n), fit double precision coeff(kmini,nvar) integer kount double precision adcov(nvar*nvar) integer temp(n) integer index1(n), index2(n), indexx(n) double precision nmahad(n), ndist(n) double precision am(n), am2(n), slutn(n) double precision med(nvar), mad(nvar), sd(nvar), means(nvar), * bmeans(nvar), w(nvar), fv1(nvar), fv2(nvar) double precision rec(nvar+1), * sscp1((nvar+1)*(nvar+1)), corr1(nvar*nvar), * cova1(nvar*nvar), cinv1(nvar*nvar), * cova2(nvar*nvar), cinv2(nvar*nvar), * z(nvar*nvar) double precision cstock(10,nvar*nvar), mstock(10,nvar), * c1stock(10*kmini, nvar*nvar), * m1stock(10*kmini, nvar*nvar), * dath(nmini*kmini, nvar) double precision cutoff, chimed integer i_trace integer l2i c Functions from ./rf-common.f : integer replow integer rfncomb double precision rffindq c ------------------------------------------------------------------ c Variables integer i,ii,iii, ix, j,jj,jjj, k,kk,kkk,kstep, * l,lll, m,mm,minigr, * nn, ngroup,nhalf,nrep,nsel, nv_2 double precision bstd, deti,detimin1,dist,dist2, eps, * object, qorder, t c km10, nmaxi: now *variable* as nmini integer km10, nmaxi, * ierr,matz,pnsel, tottimes, step, * flag(10*kmini), mini(kmini), * subdat(2, nmini*kmini) double precision mcdndex(10,2,kmini) c subndex: vector of indices; c length(subndex) = maximal value of n_j := mini(j) {j in 1:ngroup} below; n0 := nmini c mini(j) = n1 or n1+1, where n0 <= n1 < n_max := max_j n_j <= n1+1 <= 1+ (3 n0 - 1)/2 = (3 n0 + 1)/2 c ==> see vignette ../vignettes/fastMcd-kmini.Rnw integer subndex((3*nmini + 1)/ 2) double precision med1,med2, percen, pivot,rfmahad,medi2 logical all,part,fine,final,class c -Wall (false alarm): all = .true. part= .false. c Consistency correction now happens in R if(i_trace .ge. 2) then call pr1mcd(i_trace, n, nvar, nhalff, krep, nmini, kmini) endif call rndstart C -------- == GetRNGstate() in C nrep = krep kstep = k1 medi2 = 0 cc From here on, the sample size n is known. cc Some initializations can be made. First of all, h (= the number of cc observations on which the MCD is based) is given by the integer variable cc nhalff. cc If nhalff equals n, the MCD is the classical covariance matrix. cc The logical value class indicates this situation. cc The variable jbreak is the breakdown point of the MCD estimator cc based on nhalff observations, whereas jdefaul = (n+nvar+1)/2 cc would be the optimal value of nhalff, with maximal breakdown point. cc The variable percen is the corresponding percentage (MM: rather "fraction"). cc c unused jbreak=rfnbreak(nhalff,n,nvar) percen = dble(nhalff) / n ! the fraction, also called 'alpha' if(nvar.lt.5) then eps=1.0D-12 else if(nvar.ge.5.and.nvar.le.8) then eps=1.0D-14 else eps=1.0D-16 endif endif class = (nhalff .ge. n) if(class) goto 9500 ! compute *only* the classical estimate if(nvar.eq.1) then do jj=1,n ndist(jj)=dat(jj,1) end do call rfshsort(ndist,n) cc. consistency correction now happens in R code cc. nquant=min(int(real(((nhalff*1.D0/n)-0.5D0)*40))+1,11) cc. factor=faclts(nquant) cc. call rfmcduni(ndist,n,nhalff,slutn,bstd,am,am2, factor, call rfmcduni(ndist,n,nhalff,slutn,bstd,am,am2, 1.d0, * n-nhalff+1) initmean(1)=slutn(1) adcov(1)=bstd initcov(1)=bstd goto 9999 endif cc p >= 2 in the following cc ------ c These are "constants" given the arguments: nmaxi = nmini*kmini km10 = 10*kmini nv_2 = nvar*nvar cc Some initializations: cc matz = auxiliary variable for the subroutine rs, indicating whether cc or not eigenvectors are calculated cc nsel = number of variables + 1 cc ngroup = number of subdatasets, is in {1,2,.., kmini} cc part = logical value, true if the dataset is split up cc fine = logical value, becomes true when the subsets are merged cc final = logical value, to indicate the final stage of the algorithm cc all = logical value, true if all (p+1)-subsets out n of should be drawn; cc always true for (very) small n, but also when krep=0 (special value) cc subdat = matrix with a first row containing indices of observations cc and a second row indicating the corresponding subdataset cc matz=1 nsel=nvar+1 ngroup=1 fine=.false. final=.false. do i=1,nmaxi subdat(1,i)=int_max subdat(2,i)=int_max end do cc Determine whether the dataset needs to be divided into subdatasets cc or can be treated as a whole. The subroutine rfrdraw constructs cc nonoverlapping subdatasets, with uniform distribution of the case numbers. cc For small n, the number of trial subsamples is determined. c part := Shall we partition the data into sub-datasets / "groups"? part = (krep.gt.0 .and. n .ge. (2*nmini)) all = .not. part if(part) then do i=1,kmini mini(i)=0 end do kstep=k1 ngroup = n / nmini ! =: k = n % nmini (integer division) if(ngroup .lt. kmini) then c we distribute n evenly into ngroup subdatasets, of size mm = n / ngroup ! =: n_0 = n % k ==> rest r = n - k*N = n-k*n_0 c The rest r in {0,..,k-1} gives one extra obs. in the last r groups, i.e., c group numbers j > jj := k - r : ii = n - ngroup*mm ! =: r jj = ngroup - ii ! = k - r do j = 1,jj mini(j) = mm end do do j = jj+1,ngroup mini(j) = mm +1 end do minigr = ngroup*mm + ii else ! ngroup = k := floor(n/nmini) >= kmini =: k_0 : ngroup = kmini do j=1,kmini mini(j)=nmini end do minigr = kmini*nmini end if nhalf = int(mini(1)*percen) nrep = krep / ngroup ! integer division if(i_trace .ge. 2) + call prp1mcd (n,ngroup,minigr,nhalf,nrep, mini) call rfrdraw(subdat,n,minigr,mini,ngroup,kmini) else ! all=.true. : .not.part : not partitioning; either krep == 0 or n <= 2*nmini-1 ( = 599 by default) minigr=n nhalf=nhalff kstep=k1 if(krep.eq.0 .or. n.le.replow(nsel)) then c use all combinations; happens also when nsel = nvar+1 = p+1 <= 6 nrep = rfncomb(nsel,n) else nrep=krep all = .false. endif endif c seed=iseed c above: prp1mcd (n,ngroup, minigr, nhalf,nrep, mini) if(i_trace .ge. 2) 1 call pr2mcd(l2i(part), l2i(all), 2 kstep, ngroup, minigr, nhalf, nrep) cc cc Some more initializations: cc m1stock = matrix containing the means of the ngroup*10 best estimates cc obtained in the subdatasets. cc c1stock = matrix containing the covariance matrices of the ngroup*10 cc best estimates obtained in the subdatasets. cc mstock = matrix containing the means of the ten best estimates cc obtained after merging the subdatasets and iterating from cc their best estimates. cc cstock = matrix containing the covariance matrices of the ten best cc estimates obtained after merging the subdatasets cc and iterating from their best estimates. cc means = mean vector cc bmeans = initial MCD location estimate cc sd = standard deviation vector cc nmahad = vector of mahalanobis distances cc ndist = vector of general (possibly robust) distances cc inbest = best solution vector cc index1 = index vector of subsample observations cc index2 = index vector of ordered mahalanobis distances cc indexx = temporary index vector, parallel to index1, used when cc generating all possible subsamples cc temp = auxiliary vector cc flag = vector with components indicating the occurrence of a cc singular intermediate MCD estimate. cc do j=1,nvar do k=1,10 mstock(k,j)=1234567.D0 do kk=1,kmini m1stock((kk-1)*10+k,j)=1234567.D0 end do do i=1,nvar do kk=1,kmini c1stock((kk-1)*10+k,(j-1)*nvar+i)=1234567.D0 end do cstock(k,(j-1)*nvar+i)=1234567.D0 end do end do means(j)=0.D0 bmeans(j)=0.D0 sd(j)=0.D0 end do do j=1,n nmahad(j)=0.D0 ndist(j)=0.D0 index1(j)=int_max index2(j)=int_max indexx(j)=int_max temp(j)=int_max end do do j=1,km10 flag(j)=1 end do 9500 continue c==== ********* Compute the classical estimates ************** c call rfcovinit(sscp1,nvar+1,nvar+1) do i=1,n do j=1,nvar rec(j)=dat(i,j) end do call rfadmit(rec,nvar,sscp1) end do call rfcovar(n,nvar,sscp1,cova1,means,sd) do j=1,nvar if(sd(j).eq.0.D0) goto 5001 end do call rfcovcopy(cova1,cinv1,nvar,nvar) det= 0. do j=1,nvar pivot=cinv1((j-1)*nvar+j) det=det + log(pivot) if(pivot.lt.eps) goto 5001 call rfcovsweep(cinv1,nvar,j) end do call rfcorrel(nvar,cova1,corr1,sd) c if just classical estimate, we are done if(class) goto 9999 goto 5002 c singularity '1' (exact fit := 1) : 5001 continue call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,n,nvar,means) call rfexact(kount,n,ndist, nvar, * sscp1,rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do j=1,nvar coeff(1,j)=z(j) end do fit=1 goto 9999 5002 continue cc cc Compute and store classical Mahalanobis distances. cc do j=1,n do i=1,nvar rec(i)=dat(j,i) end do nmahad(j)=rfmahad(rec,nvar,means,cinv1) end do cc ******* Compute the MCD estimates ************** ---------------------------- cc Main loop: inspects the subsamples. cc Every time the sscp of the subsample is placed in sscp1, cc its covariance matrix in cova1, and its inverse in cinv1 . cc The minimum covariance determinant matrix is placed in cova2, cc and its inverse in cinv2. cc The robust distances are placed in ndist. cc c tottimes := counting the total number of iteration steps in the main loop cc cc The algorithm returns here twice when the dataset is divided cc at the beginning of the program. According to the situation, cc new initializations are made. c fine == TRUE : <==> We are in the second stage, where the subdatasets are merged, c final == TRUE : <==> We are in the last stage, when the whole dataset is considered c In the last stage, the number of iterations 'nrep' c is determined according to the total number of observations and the dimension. tottimes=0 5555 object=10.D25 if(.not. part .or. final) then nn=n else if (fine) then !-> part & fine & .not. final nn=minigr else !-> part - "phase 1" (.not. fine & .not. final) nn=-1 endif if(i_trace .ge. 2) ! " Main loop, phase[%s]: ... " 1 call pr3mcd(l2i(part), l2i(fine), l2i(final), 2 nrep, nn, nsel, nhalf, kstep, nmini, kmini) if(fine .or.(.not.part.and.final)) then nrep = 10 c ---- == hardcoded nsel = nhalf kstep = k2 if (final) then ! "final": stage 3 -- nhalf=nhalff ngroup=1 c ksteps := k3 (= 100) unless n*p is "large" where c ksteps jumps down to at most 10 <<- "discontinuous!" FIXME if (n*nvar .le.100000) then kstep=k3 ! = 100 ("hardcoded default") else if (n*nvar .gt.100000 .and. n*nvar .le.200000) then kstep=10 else if (n*nvar .gt.200000 .and. n*nvar .le.300000) then kstep=9 else if (n*nvar .gt.300000 .and. n*nvar .le.400000) then kstep=8 else if (n*nvar .gt.400000 .and. n*nvar .le.500000) then kstep=7 else if (n*nvar .gt.500000 .and. n*nvar .le.600000) then kstep=6 else if (n*nvar .gt.600000 .and. n*nvar .le.700000) then kstep=5 else if (n*nvar .gt.700000 .and. n*nvar .le.800000) then kstep=4 else if (n*nvar .gt.800000 .and. n*nvar .le.900000) then kstep=3 else if (n*nvar .gt.900000 .and. n*nvar .le.1000000) then kstep=2 else ! n*p > 1e6 kstep=1 endif if (n.gt.5000) then nrep=1 endif else nhalf=int(minigr*percen) endif endif do i=1,nsel-1 index1(i)=i indexx(i)=i end do c i[p] := p-1 such that "next index" will be (1, 2, ..., p): index1(nsel)=nsel-1 indexx(nsel)=nsel-1 cc cc Initialization of the matrices to store partial results. For the cc first stage of the algorithm, the currently best covariance matrices and cc means are stored in the matrices c1stock and m1stock initialized earlier. cc The corresponding objective values and the number of the trial subset cc are stored in the matrix mcdndex. cc For the second stage of the algorithm or for small datasets, only the cc currently best objective values are stored in the same matrix mcdndex cc and the corresponding covariance matrices and mean vectors are stored in cc the matrices cstock and mstock initialized earlier. cc if(.not. final) then do i=1,10 do j=1,ngroup mcdndex(i,1,j)=10.D25 mcdndex(i,2,j)=10.D25 end do end do endif if(.not.fine .and. .not.final) then !-- first phase do j=1,nvar do i=1,n am (i)=dat(i,j) am2(i)=dat(i,j) end do if(2*n/2 .eq. n) then med1=rffindq(am, n, n/2, index2) med2=rffindq(am2,n,(n+2)/2,index2) med(j)=(med1+med2)/2 else med(j)=rffindq(am,n,(n+1)/2,index2) endif do i=1,n ndist(i)=dabs(dat(i,j)-med(j)) end do mad(j)=rffindq(ndist,n,nhalff,index2) if(mad(j)-0.D0 .lt. eps) then do k=1,j-1 do i=1,n dat(i,k)=dat(i,k)*mad(k)+med(k) end do end do call rfcovinit(sscp1,nvar+1,nvar+1) do k=1,nsel do m=1,nvar rec(m)=dat(index2(k),m) end do call rfadmit(rec,nvar,sscp1) end do call rfcovar(nsel,nvar,sscp1,cova1,means,sd) call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) C VT::15.11.2014, fixing array overrun, found by MM C The following code expects that z (the plane coefficients) C are all zeros with 1 in the position of the variable with MAD=0 C If not, tries to find it. C if(.FALSE.) then if(z(j).ne.1) then do kk=1,nvar if(z(kk*nvar+j).eq.1) then do l=1,nvar z(l)=z(kk*nvar+l) end do goto 76 ! break endif end do endif 76 continue else C Instead of this, we set all coefficients to 0 and the one of C variable j to 1. The exactfit code will be set 3 and will be C handled respectively by the R code. do kk=1,nvar z(kk) = 0 end do z(j) = 1 end if call rfdis(dat,z,ndist,n,nvar,n,nvar,means) call rfexact(kount,n,ndist, nvar, * sscp1,rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=3 goto 9999 endif do i=1,n dat(i,j)=(dat(i,j)-med(j))/mad(j) end do end do endif cc cc The matrix dath contains the observations to be used in the cc algorithm. In the first stage of the split-up procedure dath contains cc nmini objects, corresponding to the original observations, with the index cc of the processed group in the array subdat. For the second stage, the cc data points of all the subdatasets are merged in dath. cc The variable kount indicates the occurrence of a singular subsample leading cc to the corresponding plane. In some situations the variable kount counts cc the number of observations on that plane. cc if (fine .and. .not. final) then do j=1,minigr do k=1,nvar dath(j,k)=dat(subdat(1,j),k) end do end do endif kount=0 c---- For-Loop over groups - - - - - - - - - - - - - - - - - - - - - do 1111 ii= 1,ngroup if(.not.fine) kount=0 if(part .and. .not. fine) then nn=mini(ii) kk=0 do j=1,minigr if(subdat(2,j).eq.ii) then kk=kk+1 subndex(kk)=subdat(1,j) endif end do do j=1,mini(ii) do k=1,nvar dath(j,k)=dat(subndex(j),k) end do end do endif if(i_trace .ge. 3) call prgrmcd(ii, nn, i_trace) do i=1,nn index2(i)=i end do cc The number of trial subsamples is represented by nrep, which depends cc on the data situation. cc When all (p+1)-subsets out of n can be drawn, the subroutine rfgenpn cc is used. Otherwise, random subsamples are drawn by the routine cc rfrangen. The trial subsamples are put in the array index1. The cc same thing happens for large datasets, except that the number of cc observations is nmini instead of n. cc cc When a trial subsample is singular, the algorithm counts the number of cc observations that lie on the hyperplane corresponding to this sample. cc If, for small datasets, this number is larger than nhalff, the program cc stops (exact fit) and gives the mean and the covariance matrix cc of the observations on the hyperplane, together with the equation cc of the hyperplane. cc For large datasets, the algorithm first checks whether there are more cc than nhalff observations on the hyperplane. If this is the case, the cc program stops for the same reason of exact fit and gives the covariance cc matrix and mean of the observations on the hyperplane. If not, the cc algorithm counts the number of observations that lie on the hyperplane. cc When this number is smaller than the current nhalf in the subdataset, these cc observations are extended to nhalf observations by adding those cc observations that have smallest orthogonal distances to the hyperplane cc and the algorithm continues. cc When larger, the coefficients of the hyperplane are stored in the matrix cc m1stock for use as starting value in the next stage, and the flag of this cc estimate gets the value zero. cc cc In the second stage of the algorithm, when the subdatasets are merged, cc the array index2 contains the indices of the observations cc corresponding to the nhalf observations with minimal relative distances cc with respect to the best estimates of the first stage. cc When the estimate of the first stage is a hyperplane, the algorithm cc investigates whether there are more than the current nhalf observations of cc the merged subdataset on that hyperplane. If so, the coefficients of the cc hyperplane are again stored, now in the matrix mstock, for the final cc stage of the algorithm. cc If not, the observations on the hyperplane are extended to nhalf cc observations by adding the observations in the merged dataset with cc smallest orthogonal distances to that hyperplane. cc For small datasets or for larger datasets with n <= nmaxi := nmini*kmini, cc the algorithm already stops when one solution becomes singular, cc since we then have an exact fit. cc cc In the third stage, the covariance matrices and means of the best cc solutions of the second stage are used as starting values. cc Again, when a solution becomes singular, the subroutine 'exact' cc determines the hyperplane through at least nhalff observations and stops cc because of the exact fit. cc cc When the program stops because of an exact fit, the covariance matrix and cc mean of the observations on the hyperplane will always be given. cc C VT::27.10.2014 - an issue with nsamp="exact" fixed: do ix=1,n indexx(ix)=index1(ix) end do do 1000 i=1,nrep pnsel=nsel tottimes=tottimes+1 if(i_trace .ge. 4) call pr4mcd(i) call rchkusr() ! <- allow user interrupt deti= -1.d300 detimin1=deti step=0 call rfcovinit(sscp1,nvar+1,nvar+1) if((part.and..not.fine).or.(.not.part.and..not.final)) then if(part) then call rfrangen(mini(ii),nsel,index1) else if(all) then call rfgenpn(n,nsel,indexx) do ix=1,n index1(ix)=indexx(ix) end do else call rfrangen(n,nsel,index1) endif endif cc cc The covariance matrix and mean of the initial subsamples are cc calculated with the subroutine covar and represented by cc the variables cova1 and means. cc cc In the following stages of the algorithm, the covariance matrices and means cc used as starting values are already stored in the matrices c1stock cc and m1stock (for the second stage), and in the matrices cstock and mstock cc (for the third stage). cc cc The inverse cinv1 of the covariance matrix is calculated by the cc subroutine rfcovsweep, together with its determinant det. c c Repeat 9550 call rfcovinit(sscp1,nvar+1,nvar+1) if(.not.fine.and.part) then do j=1,pnsel do m=1,nvar rec(m)=dath(index1(j),m) end do call rfadmit(rec,nvar,sscp1) end do call rfcovar(pnsel,nvar,sscp1,cova1,means,sd) endif if(.not.part.and..not.final) then do j=1,pnsel do m=1,nvar rec(m)=dat(index1(j),m) end do call rfadmit(rec,nvar,sscp1) end do call rfcovar(pnsel,nvar,sscp1,cova1,means,sd) endif if (final) then if(mstock(i,1) .ne. 1234567.D0) then do jj=1,nvar means(jj)=mstock(i,jj) do kk=1,nvar cova1((jj-1)*nvar+kk)=cstock(i,(jj-1)*nvar+kk) end do end do else goto 1111 endif if(flag(i).eq.0) then qorder=1.D0 do jjj=1,nvar z(jjj)=coeff(1,jjj) end do call rfdis(dat,z,ndist,n,nvar,nn,nvar, means) dist2=rffindq(ndist,nn,nhalf,index2) goto 9555 endif endif if (fine .and. .not.final) then if(m1stock((ii-1)*10+i,1) .ne. 1234567.D0) then do jj=1,nvar means(jj)=m1stock((ii-1)*10+i,jj) do kk=1,nvar cova1((jj-1)*nvar+kk)=c1stock((ii-1)*10+i, * (jj-1)*nvar+kk) end do end do else goto 1111 endif if(flag((ii-1)*10+i).eq.0) then qorder=1.D0 do jjj=1,nvar z(jjj)=coeff(ii,jjj) end do call rfdis(dath,z,ndist,nmaxi,nvar,nn,nvar, means) call rfshsort(ndist,nn) qorder=ndist(nhalf) if(dabs(qorder-0.D0).lt.10.D-8 .and. kount.eq.0 * .and. n.gt.nmaxi) then kount=nhalf do kkk=nhalf+1,nn if(dabs(ndist(kkk)-0.D0).lt.10.D-8) then kount=kount+1 endif end do flag(1)=0 do kkk=1,nvar coeff(1,kkk)=z(kkk) end do call rfstore2(nvar,cstock,mstock,nv_2, * kmini,cova1,means,i,mcdndex,kount) kount=1 goto 1000 else if(dabs(qorder-0.D0).lt.10.D-8 .and. * kount.ne.0 .and. n.gt.nmaxi) then goto 1000 else flag(1)=1 dist2=rffindq(ndist,nn,nhalf,index2) goto 9555 endif endif endif call rfcovcopy(cova1,cinv1,nvar,nvar) det=0. do 200 j=1,nvar pivot=cinv1((j-1)*nvar+j) det=det + log(pivot) if(pivot.lt.eps) then call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) qorder=1.D0 if(.not.part.or.final) then call rfdis(dat,z,ndist,n,nvar,nn,nvar,means) else call rfdis(dath,z,ndist,nmaxi,nvar,nn,nvar,means) endif call rfshsort(ndist,nn) qorder=ndist(nhalf) if(dabs(qorder-0.D0).lt. 10.D-8 .and. .not.part) then call transfo(cova1,means,dat,med,mad,nvar,n) call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,nn,nvar,means) call rfexact(kount,n,ndist, nvar, * sscp1,rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=2 goto 9999 else if(dabs(qorder-0.D0).lt. 10.D-8 .and. part .and. * kount.eq.0) then call rfdis(dat,z,ndist,n,nvar,n,nvar, means) call rfshsort(ndist,n) if(dabs(ndist(nhalff)-0.D0).lt.10.D-8) then call transfo(cova1,means,dat,med,mad,nvar,n) call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,nn,nvar,means) call rfexact(kount,n,ndist, nvar,sscp1, * rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=2 goto 9999 endif call rfdis(dath,z,ndist,nmaxi,nvar,nn,nvar, means) call rfshsort(ndist,nn) kount=nhalf do kkk=nhalf+1,nn if(dabs(ndist(kkk)-0.D0) .lt. 10.D-8) then kount=kount+1 endif end do flag((ii-1)*10+1)=0 do kkk=1,nvar coeff(ii,kkk)=z(kkk) end do call rfstore1(nvar,c1stock,m1stock,nv_2, * kmini,cova1,means,i,km10,ii,mcdndex, kount) kount=1 goto 1000 else if(dabs(qorder-0.D0).lt. 10.D-8 .and. part .and. * kount.ne.0) then goto 1000 else C C VT::27.10.2014 - an issue with nsamp="exact" fixed: C C Add one more observation and return to recompute the C covariance. In case of complete enumeration, when all C p+1 subsamples are generated, the array 'index1' must C be preserved around label 9550). C if(i_trace .ge. 2) * call intpr('Singularity -> extended subsample: ', * -1,index1,nsel) call rfishsort(index1,pnsel) call prdraw(index1,pnsel, nn) pnsel=pnsel+1 goto 9550 c --------- until endif endif call rfcovsweep(cinv1,nvar,j) 200 continue cc cc Mahalanobis distances are computed with the subroutine rfmahad cc and stored in the array ndist. cc The k-th order statistic of the mahalanobis distances is stored cc in dist2. The array index2 containes the indices of the cc corresponding observations. cc do j=1,nn if(.not.part.or.final) then do mm=1,nvar rec(mm)=dat(j,mm) end do else do mm=1,nvar rec(mm)=dath(j,mm) end do endif t=rfmahad(rec,nvar,means,cinv1) ndist(j)=t end do dist2=rffindq(ndist,nn,nhalf,index2) cc cc The variable kstep represents the number of iterations of the current stage (1,2, or 3), cc i.e., the situation of the program, kstep = k1, k2, or k3. Within each cc iteration the mean and covariance matrix of nhalf observations are cc calculated. The nhalf smallest corresponding mahalanobis distances cc determine the subset for the next iteration. cc The best subset for the whole data is stored in the array inbest. cc The iteration stops when two subsequent determinants become equal. cc 9555 do 400 step=1,kstep tottimes=tottimes+1 if(i_trace .ge. 4) call pr5mcd(step, tottimes) call rchkusr() ! <- allow user interrupt call rfcovinit(sscp1,nvar+1,nvar+1) do j=1,nhalf temp(j)=index2(j) end do call rfishsort(temp,nhalf) do j=1,nhalf if(.not.part.or.final) then do mm=1,nvar rec(mm)=dat(temp(j),mm) end do else do mm=1,nvar rec(mm)=dath(temp(j),mm) end do endif call rfadmit(rec,nvar,sscp1) end do call rfcovar(nhalf,nvar,sscp1,cova1,means,sd) call rfcovcopy(cova1,cinv1,nvar,nvar) det= 0. do 600 j=1,nvar pivot=cinv1((j-1)*nvar+j) det=det + log(pivot) if(pivot.lt.eps) then if(final .or. .not.part .or. * (fine.and. .not.final .and. n .le. nmaxi)) then call transfo(cova1,means,dat,med,mad,nvar,n) call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) if(final.or..not.part) then call rfdis(dath,z,ndist,n, nvar,nn, * nvar,means) else call rfdis(dath,z,ndist,nmaxi,nvar,nn, * nvar,means) endif call rfexact(kount,n,ndist,nvar,sscp1, * rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=2 goto 9999 endif if(part.and..not.fine.and.kount.eq.0) then call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,n,nvar, means) call rfshsort(ndist,n) if(dabs(ndist(nhalff)-0.D0).lt.10.D-8) then call transfo(cova1,means,dat,med,mad,nvar,n) call rs(nvar,nvar,cova1,w,matz,z, * fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,n,nvar,means) call rfexact(kount,n,ndist,nvar,sscp1, * rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=2 goto 9999 endif call rfdis(dath,z,ndist,nmaxi,nvar,nn, * nvar,means) call rfshsort(ndist,nn) kount=nhalf do,kkk=nhalf+1,nn if(dabs(ndist(kkk)-0.D0).lt.10.D-8) then kount=kount+1 endif end do flag((ii-1)*10+1)=0 do kkk=1,nvar coeff(ii,kkk)=z(kkk) end do call rfstore1(nvar,c1stock,m1stock,nv_2, * kmini,cova1,means,i,km10,ii,mcdndex, kount) kount=1 goto 1000 else if(part.and..not.fine.and.kount.ne.0) then goto 1000 endif endif if(fine.and..not.final.and.kount.eq.0) then call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,n,nvar, means) call rfshsort(ndist,n) if(dabs(ndist(nhalff)-0.D0).lt.10.D-8) then call transfo(cova1,means,dat,med,mad,nvar,n) call rs(nvar,nvar,cova1,w,matz,z, * fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,n,nvar,means) call rfexact(kount,n,ndist,nvar,sscp1, * rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=2 goto 9999 endif call rfdis(dath,z,ndist,nmaxi,nvar,nn, * nvar,means) call rfshsort(ndist,nn) kount=nhalf do kkk=nhalf+1,nn if(dabs(ndist(kkk)-0.D0).lt.10.D-8) then kount=kount+1 endif end do flag(1)=0 do kkk=1,nvar coeff(1,kkk)=z(kkk) end do call rfstore2(nvar,cstock,mstock,nv_2, * kmini,cova1,means,i,mcdndex,kount) kount=1 goto 1000 else if(fine.and..not.final.and.kount.ne.0) then goto 1000 endif endif endif call rfcovsweep(cinv1,nvar,j) 600 continue if(step.ge.2 .and. det.eq.detimin1) then goto 5000 endif detimin1=deti deti=det do j=1,nn if(.not.part.or.final) then do mm=1,nvar rec(mm)=dat(j,mm) end do else do mm=1,nvar rec(mm)=dath(j,mm) end do endif t=rfmahad(rec,nvar,means,cinv1) ndist(j)=t end do dist2=rffindq(ndist,nn,nhalf,index2) dist=dsqrt(dist2) if(final .and. ((i.eq.1 .and. step.eq.1 .and. .not.fine) * .or. det .lt. object)) then medi2=rffindq(ndist,nn,int(n/2),index1) object=det do jjj=1,nhalf inbest(jjj)=index2(jjj) end do call rfcovcopy(cova1,cova2,nvar,nvar) call rfcovcopy(cinv1,cinv2,nvar,nvar) call rfcovcopy(means,bmeans,nvar,1) endif 400 continue if(i_trace .ge. 4) call println() cc After each iteration, it has to be checked whether the new solution cc is better than some previous one and therefore needs to be stored. This cc isn't necessary in the third stage of the algorithm, where only the best cc solution is kept. 5000 if(.not. final) then if(part .and. .not. fine) then iii=ii else iii=1 endif c At the end of the algorithm, only the ten c best solutions need to be stored. cc For each data group : cc If the objective function is lower than the largest value in the cc matrix mcdndex : cc A distinction is made between different stages of the algorithm: cc * At the first stage of the split-up situation: cc -If the new objective value did not yet occur in mcdndex cc its value and corresponding covariance matrix and mean are cc stored at the right place in the matrices mcdndex, c1stock and cc m1stock, and other values are shifted to their new position cc in these arrays. cc -If the new objective value already occurs in mcdndex, a cc comparison is made between the new mean vector and covariance matrix cc and those estimates with the same determinant. cc When for an equal determinant, the mean vector or covariance matrix cc do not correspond, both of them are kept in the matrices mcdndex cc and nbest. cc * In the second stage of the algorithm, the covariances and means cc are stored : cc - If the new objective value did not yet occur cc in the matrix mcdndex, it is inserted by shifting the greater cc determinants upwards and doing the same in the arrays mstock cc and cstock. cc - If the new objective value already occurs in the array mcdndex, cc it is compared with all solutions with the same determinant. cc In the case of an equality, the means and covariances cc are compared to determine whether or not to insert the cc new solution. cc Otherwise nothing happens. When a singularity occurs, cc the determinant in the matrix mcdndex is zero and the cc corresponding flag is zero too, so the search in the arrays mcdndex, cc m1stock, c1stock, mstock and cstock is done on the rows with flag one. cc if( flag((iii-1)*10+1).eq.1) then lll=1 else lll=2 endif do j=lll,10 if (det .le. mcdndex(j,2,iii)) then if(det.ne.mcdndex(j,2,iii)) then if(.not.fine.and.part) goto 203 goto 205 else do kkk=j,10 if(det.eq.mcdndex(kkk,2,iii)) then do jjj=1,nvar if(part.and..not.fine) then if(means(jjj) .ne. * m1stock((iii-1)*10+ kkk,jjj)) * goto 203 else if(means(jjj).ne.mstock(kkk,jjj)) * goto 205 endif end do do jjj=1,nvar*nvar if(part.and..not.fine) then if(cova1(jjj) .ne. * c1stock((iii-1)*10+ kkk,jjj)) * goto 203 else if(cova1(jjj).ne.cstock(kkk,jjj)) * goto 205 endif end do endif end do ! kkk endif goto 1000 c--- 203 do k=10,j+1,-1 do kk=1,nvar*nvar c1stock((iii-1)*10+k,kk)= * c1stock((iii-1)*10+k-1,kk) end do do kk=1,nvar m1stock((iii-1)*10+k,kk)= * m1stock((iii-1)*10+k-1,kk) end do mcdndex(k,1,iii)=mcdndex(k-1,1,iii) mcdndex(k,2,iii)=mcdndex(k-1,2,iii) end do do kk=1,nvar do kkk=1,nvar c1stock((iii-1)*10+j,(kk-1)*nvar+kkk)= * cova1((kk-1)*nvar+kkk) m1stock((iii-1)*10+j,kk)=means(kk) end do end do mcdndex(j,1,iii)=i mcdndex(j,2,iii)=det goto 1000 c--- 205 do k=10,j+1,-1 do kk=1,nvar*nvar cstock(k,kk)= cstock(k-1,kk) end do do kk=1,nvar mstock(k,kk)= mstock(k-1,kk) end do mcdndex(k,1,iii)=mcdndex(k-1,1,iii) mcdndex(k,2,iii)=mcdndex(k-1,2,iii) end do do kk=1,nvar do kkk=1,nvar cstock(j,(kk-1)*nvar+kkk)= * cova1((kk-1)*nvar+kkk) mstock(j,kk)=means(kk) end do end do mcdndex(j,1,iii)=i mcdndex(j,2,iii)=det goto 1000 endif end do ! j endif c (not final) 1000 continue !end{ i = 1..nrep } 1111 continue c---- - - - - - end [ For (ii = 1 .. ngroup) ] - - - - - - - - - cc Determine whether the algorithm needs to be run again or not. cc if(part .and. .not. fine) then fine= .true. goto 5555 else if(.not. final .and. ((part.and.fine).or. .not.part)) then final= .true. goto 5555 endif cc******** end { Main Loop } ************** -------------------------------- c MM: 'temp' is thrown away in calling R code: c do j=1,nhalf c temp(j)=inbest(j) c end do c call rfishsort(temp,nhalf) do j=1,nvar means(j)=bmeans(j)*mad(j)+med(j) end do call rfcovcopy(means,initmean,nvar,1) do i=1,nvar do j=1,nvar cova1((i-1)*nvar+j)=cova2((i-1)*nvar+j)*mad(i)*mad(j) end do end do call rfcovcopy(cova1,initcov,nvar,nvar) det=object do j=1,nvar det=det + 2*log(mad(j)) end do cc VT::chimed is passed now as a parameter cc call rfcovmult(cova1,nvar,nvar,medi2/chimed(nvar)) cc call rfcovmult(cova2,nvar,nvar,medi2/chimed(nvar)) cc call rfcovmult(cinv2,nvar,nvar,1.D0/(medi2/chimed(nvar))) medi2 = medi2/chimed call rfcovmult(cova1, nvar,nvar, medi2) call rfcovmult(cova2, nvar,nvar, medi2) call rfcovmult(cinv2, nvar,nvar, 1.D0/medi2) call rfcovcopy(cova1, adcov,nvar,nvar) cc cc The MCD location is in bmeans. cc The MCD scatter matrix is in cova2, cc and its inverse in cinv2. cc cc For every observation we compute its MCD distance cc and compare it to a cutoff value. cc call rfcovinit(sscp1,nvar+1,nvar+1) do i=1,n do mm=1,nvar rec(mm)=dat(i,mm) end do dist2=rfmahad(rec,nvar,bmeans,cinv2) if(dist2.le.cutoff) then weight(i)=1 else weight(i)=0 endif end do call transfo(cova2,bmeans,dat,med,mad,nvar,n) goto 9999 cc ****************************************************************** 9999 continue if(i_trace .ge. 2) call pr9mcd(tottimes) call rndend C ------ == PutRNGstate() in C return end ccccc end {rffastmcd} ccccc ccccc c --- Auxiliary just to pass Fortran 'logical' as 'integer' to C; int(.) does *not* work integer function l2i(logi) implicit none logical logi if(logi) then l2i = 1 else l2i = 0 endif return end ccccc ccccc subroutine rfexact(kount,nn,ndist, nvar,sscp1, * rec,dat, cova1,means,sd,weight) cc cc Determines how many objects lie on the hyperplane with equation cc z(1,1)*(x_i1 - means_1)+ ... + z(p,1)* (x_ip - means_p) = 0 cc and computes their mean and their covariance matrix. cc double precision ndist(nn) double precision sscp1(nvar+1,nvar+1) double precision rec(nvar+1) double precision dat(nn,nvar) double precision cova1(nvar,nvar) double precision means(nvar), sd(nvar) integer weight(nn) call rfcovinit(sscp1,nvar+1,nvar+1) kount=0 do kk=1,nn if(dabs(ndist(kk)-0.D0).lt.10.D-8) then kount=kount+1 weight(kk)=1 do j=1,nvar rec(j)=dat(kk,j) end do call rfadmit(rec,nvar,sscp1) else weight(kk)=0 endif end do call rfcovar(kount,nvar,sscp1,cova1,means,sd) return end ccccc ccccc subroutine transfo(cova,means,dat,med,mad,nvar,n) cc implicit none integer n, nvar double precision dat(n,nvar), cova(nvar,nvar) double precision means(nvar), med(nvar), mad(nvar) integer i,j,k do j=1,nvar means(j)=means(j)*mad(j)+med(j) do k=1,nvar cova(j,k)=cova(j,k)*mad(j)*mad(k) end do do i=1,n dat(i,j)=dat(i,j)*mad(j)+med(j) end do end do return end ccccc ccccc subroutine rfcovmult(a,n1,n2,fac) cc cc Multiplies the matrix a by the real factor fac. cc double precision a(n1,n2) double precision fac cc do i=1,n1 do j=1,n2 a(i,j)=a(i,j)*fac end do end do return end ccccc ccccc subroutine rfadmit(rec,nvar,sscp) cc cc Updates the sscp matrix with the additional case rec. cc double precision rec(nvar) double precision sscp(nvar+1,nvar+1) cc sscp(1,1)=sscp(1,1)+1.D0 do j=1,nvar sscp(1,j+1)=sscp(1,j+1)+rec(j) sscp(j+1,1)=sscp(1,j+1) end do do i=1,nvar do j=1,nvar sscp(i+1,j+1)=sscp(i+1,j+1)+rec(i)*rec(j) end do end do return end ccccc ccccc subroutine rfcovar(n,nvar, sscp,cova, means,sd) cc cc Computes the classical mean and covariance matrix. cc implicit none integer n,nvar, i,j double precision sscp(nvar+1,nvar+1), cova(nvar,nvar) double precision means(nvar), sd(nvar), f do i=1,nvar means(i)=sscp(1,i+1) sd(i)=sscp(i+1,i+1) f=(sd(i)-means(i)*means(i)/n)/(n-1) if(f.gt.0.D0) then sd(i)=dsqrt(f) else sd(i)=0.D0 endif means(i)=means(i)/n end do do i=1,nvar do j=1,nvar cova(i,j)=sscp(i+1,j+1) end do end do do i=1,nvar do j=1,nvar cova(i,j)=cova(i,j)-n*means(i)*means(j) cova(i,j)=cova(i,j)/(n-1) end do end do return end ccccc ccccc subroutine rfcorrel(nvar,a,b,sd) cc cc Transforms the scatter matrix a to the correlation matrix b: <==> R's cov2cor(.) cc implicit none integer nvar double precision a(nvar,nvar), b(nvar,nvar), sd(nvar) integer j,i do j=1,nvar sd(j)=1/sqrt(a(j,j)) end do do i=1,nvar do j=1,nvar if(i.eq.j) then b(i,j)=1.0 else b(i,j)=a(i,j)*sd(i)*sd(j) endif end do end do return end subroutine prdraw(a,pnsel, nn) implicit none integer nn, a(nn), pnsel c double precision unifrnd integer jndex, nrand, i,j jndex=pnsel c OLD nrand=int(uniran(seed)*(nn-jndex))+1 nrand=int(unifrnd() * (nn-jndex))+1 C if(nrand .gt. nn-jndex) then C call intpr( C 1 '** prdraw(): correcting nrand > nn-jndex; nrand=', C 2 -1, nrand, 1) C nrand=nn-jndex C endif jndex=jndex+1 a(jndex)=nrand+jndex-1 do i=1,jndex-1 if(a(i).gt.nrand+i-1) then do j=jndex,i+1,-1 a(j)=a(j-1) end do a(i)=nrand+i-1 goto 10 c ------- break endif end do 10 continue return end ccccc ccccc double precision function rfmahad(rec,nvar,means,sigma) cc cc Computes a Mahalanobis-type distance. cc double precision rec(nvar), means(nvar), sigma(nvar,nvar), t t = 0. do j=1,nvar do k=1,nvar t = t + (rec(j)-means(j))*(rec(k)-means(k))*sigma(j,k) end do end do rfmahad=t return end ccccc ccccc subroutine rfdis(da,z,ndist,nm,nv,nn,nvar, means) cc cc Computes the distance between the objects of da and a hyperplane with cc equation z(1,1)*(x_i1 - means_1) + ... + z(p,1)*(x_ip - means_p) = 0 cc double precision da(nm,nv) double precision z(nvar,nvar) double precision ndist(nn) double precision means(nvar) do i=1,nn ndist(i)=0 do j=1,nvar ndist(i)=z(j,1)*(da(i,j)-means(j))+ndist(i) end do ndist(i)=dabs(ndist(i)) end do return end ccccc ccccc subroutine rfstore2(nvar,cstock,mstock,nv_2, * kmini,cova1,means,i,mcdndex,kount) cc cc Stores the coefficients of a hyperplane cc z(1,1)*(x_i1 - means_1) + ... + z(p,1)*(x_ip - means_p) = 0 cc into the first row of the matrix mstock, and shifts the other cc elements of the arrays mstock and cstock. cc double precision cstock(10, nv_2), mstock(10, nvar) double precision mcdndex(10, 2, kmini) double precision cova1(nvar,nvar), means(nvar) do k=10,2,-1 do kk=1,nvar*nvar cstock(k,kk)= cstock(k-1,kk) end do do kk=1,nvar mstock(k,kk)= mstock(k-1,kk) end do mcdndex(k,1,1)=mcdndex(k-1,1,1) mcdndex(k,2,1)=mcdndex(k-1,2,1) end do do kk=1,nvar mstock(1,kk)=means(kk) do jj=1,nvar cstock(1,(kk-1)*nvar+jj)=cova1(kk,jj) end do end do mcdndex(1,1,1)=i mcdndex(1,2,1)=kount return end ccccc ccccc subroutine rfstore1(nvar,c1stock,m1stock,nv_2, * kmini,cova1,means,i,km10,ii,mcdndex,kount) double precision c1stock(km10, nv_2), m1stock(km10, nvar) double precision mcdndex(10,2,kmini) double precision cova1(nvar,nvar), means(nvar) do k=10,2,-1 do kk=1,nvar*nvar c1stock((ii-1)*10+k,kk)= * c1stock((ii-1)*10+k-1,kk) end do do kk=1,nvar m1stock((ii-1)*10+k,kk)= * m1stock((ii-1)*10+k-1,kk) end do mcdndex(k,1,ii)=mcdndex(k-1,1,ii) mcdndex(k,2,ii)=mcdndex(k-1,2,ii) end do do kk=1,nvar m1stock((ii-1)*10+1,kk)=means(kk) do jj=1,nvar c1stock((ii-1)*10+1,(kk-1)*nvar+jj)= * cova1(kk,jj) end do end do mcdndex(1,1,ii)=i mcdndex(1,2,ii)=kount return end CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC ccccc ccccc subroutine rfcovinit(a,n1,n2) cc cc Initializes the matrix a by filling it with zeroes. cc double precision a(n1,n2) cc do i=1,n1 do j=1,n2 a(i,j)=0.D0 end do end do return end ccccc ccccc subroutine rfcovsweep(a,nvar,k) cc double precision a(nvar,nvar) double precision b, d cc d=a(k,k) do j=1,nvar a(k,j)=a(k,j)/d end do do i=1,nvar if(i.ne.k) then b=a(i,k) do j=1,nvar a(i,j)=a(i,j)-b*a(k,j) end do a(i,k) = -b/d endif end do a(k,k)=1/d return end ccccc robustbase/src/rowMedians.c0000644000176200001440000000774614123440375015507 0ustar liggesusers/*************************************************************************** Authors: Adopted from rowQuantiles.c by R. Gentleman. Copyright Henrik Bengtsson, 2007; Martin Maechler, 2014-2021; History --> EOF **************************************************************************/ #include // was #include #include "robustbase.h" // Public methods: SEXP rowMedians_Real (SEXP x, int nrow, int ncol, int narm, int hasna, int byrow); SEXP rowMedians_Integer(SEXP x, int nrow, int ncol, int narm, int hasna, int byrow); void C_rowMedians_Real (double* x, double* res, int nrow, int ncol, int narm, int hasna, int byrow); void C_rowMedians_Integer(int* x, double* res, int nrow, int ncol, int narm, int hasna, int byrow); /* TEMPLATE rowMedians_(...): - SEXP rowMedians_Real(...); - SEXP rowMedians_Integer(...); */ #define METHOD rowMedians #define X_TYPE 'i' #include "rowMedians_TYPE-template.h" #define X_TYPE 'r' #include "rowMedians_TYPE-template.h" #undef METHOD /* TODO: implement: hasNA in {NA,TRUE,FALSE}; and = NA <==> code should *check* R code {for error message}: ../R/comedian.R */ SEXP R_rowMedians(SEXP x, SEXP naRm, SEXP hasNA, SEXP byRow, SEXP keepNms) { // Argument checking and "C type coercion": if (!isMatrix(x)) error(_("Argument 'x' must be a matrix.")); int narm = asLogical(naRm); // error if it ain't if (narm != TRUE && narm != FALSE) error(_("Argument 'naRm' must be either TRUE or FALSE.")); int hasna = asLogical(hasNA); // error if it ain't if (hasna == NA_INTEGER) hasna = TRUE;// <- for now; TODO ? become smarter and check int byrow = INTEGER(byRow)[0]; int keepnms = asLogical(keepNms); /* Get dimensions of 'x'. */ SEXP ans = PROTECT(getAttrib(x, R_DimSymbol)); int nrow, ncol; if (byrow) { // rowMedians nrow = INTEGER(ans)[0]; ncol = INTEGER(ans)[1]; } else { // colMedians nrow = INTEGER(ans)[1]; ncol = INTEGER(ans)[0]; } UNPROTECT(1); // and reprotect : if (isReal(x)) { ans = PROTECT(rowMedians_Real (x, nrow, ncol, narm, hasna, byrow)); } else if (isInteger(x)) { ans = PROTECT(rowMedians_Integer(x, nrow, ncol, narm, hasna, byrow)); } else { error(_("Argument 'x' must be numeric (integer or double).")); } if(keepnms) { SEXP xDnms = getAttrib(x, R_DimNamesSymbol); if(xDnms != R_NilValue) { PROTECT(xDnms); setAttrib(ans, R_NamesSymbol, duplicate(VECTOR_ELT(xDnms, byrow ? 0 : 1))); UNPROTECT(1); } } UNPROTECT(1); return(ans); } /* R_rowMedians() */ /*************************************************************************** HISTORY: 2014-12-09 [M.Maechler] o Copied to 'robustbase' CRAN package - to replace many apply(*., 2, median) NB: 'Biobase' also contains rowQ = general row/col Quantiles o argument checking all in C o add 'keepNms' argument {and do keep names by default!} 2013-01-13 [HB] o Added argument 'byRow' to rowMedians() and dropped colMedians(). o Using internal arguments 'by_row' instead of 'by_column'. 2011-12-11 [HB] o BUG FIX: rowMediansReal(..., na.rm=TRUE) did not handle NaN:s, only NA:s. Note that NaN:s does not exist for integers. 2011-10-12 [HJ] o Added colMedians(). o Now rowMediansInteger/Real() can operate also by columns, cf. argument 'by_column'. 2007-08-14 [HB] o Added checks for user interrupts every 1000 line. o Added argument 'hasNA' to rowMedians(). 2005-12-07 [HB] o BUG FIX: When calculating the median of an even number (non-NA) values, the length of the second sort was one element too short, which made the method to freeze, i.e. rPsort(rowData, qq, qq) is now (...qq+1, qq). 2005-11-24 [HB] o By implementing a special version for integers, there is no need to coerce to double in R, which would take up twice the amount of memory. o rowMedians() now handles NAs too. o Adopted from rowQuantiles.c in Biobase of Bioconductor. **************************************************************************/ robustbase/src/rllarsbi.f0000644000176200001440000002450414440373076015210 0ustar liggesusersc--- For lmrob.lar() in ../R/lmrob.M.S.R c--- ~~~~~~~~~~~ C======================================================================= SUBROUTINE rlSTORm2(Y,N, J, YJ) C....................................................................... IMPLICIT DOUBLE PRECISION (A-H,O-Z) DIMENSION Y(N) C----------------------------------------------------------------------- C rlSTORm2() searches the J-th value in order of magnitude in a vector Y() of length N, C ---------- functionally, YJ := rlSTORm2(Y(1..N), J); YJ = Y_(j) {the j-th order statist.} C is only called in one place with J = N/2+1 i.e., to compute the {high-}Median(Y[]). C----------------------------------------------------------------------- C--- copied from robust package: src/lmrobmm.f ------------------------- L=1 LR=N 20 IF (L.GE.LR) GOTO 90 AX=Y(J) JNC=L JJ=LR 30 IF(JNC.GT.JJ) GOTO 80 40 IF (Y(JNC).GE.AX) GOTO 50 JNC=JNC+1 GOTO 40 50 IF(Y(JJ).LE.AX) GOTO 60 JJ=JJ-1 GOTO 50 60 IF(JNC.GT.JJ) GOTO 70 WA=Y(JNC) Y(JNC)=Y(JJ) Y(JJ)=WA JNC=JNC+1 JJ=JJ-1 70 GOTO 30 80 IF(JJ.LT.J) L=JNC IF(J.LT.JNC) LR=JJ GOTO 20 90 YJ=Y(J) RETURN END C======================================================================= SUBROUTINE rlCOLbi(V1,V2,MLT,M,IOUT) C....................................................................... DOUBLE PRECISION V1(M),V2(M),MLT C----------------------------------------------------------------------- C V1 := V1 - m*V2 *apart* from index [IOUT] where V1[] remains unchanged C for vectors V1[], V2[]; scalar m=MLT C Auxiliary for rlLARSbi C----------------------------------------------------------------------- C--- copied from robust package: src/lmrobbi.f ------------------------- DO 220 I=1,M IF (I .EQ. IOUT) GOTO 220 V1(I)=V1(I)-V2(I)*MLT 220 CONTINUE RETURN END C======================================================================= SUBROUTINE rlSWAP(A,B) ! was rlICHGbi C....................................................................... C Swap A <--> B -- Auxiliary routine for rlLARSbi() C----------------------------------------------------------------------- C--- copied from robust package: src/lmrobbi.f ------------------------- DOUBLE PRECISION A,B,C C=A A=B B=C RETURN END C======================================================================= SUBROUTINE rlLARSbi(X, Y, N, NP, MDX,MDT, + TOL, + NIT, ! --> final #{pivoting steps} + K, ! --> ("scratch"?? ; maybe interesting for debugging) + KODE, ! --> return code in {0, 1, 2} + SIGMA, ! --> high-median(rs[i]) / bet0 + THETA, ! --> theta[1:nP] = the \hat{\beta} vector + RS, ! --> residuals + SC1,SC2,SC3,SC4, + BET0) C....................................................................... IMPLICIT DOUBLE PRECISION (A-H,O-Z) DIMENSION X(MDX,NP), Y(N), THETA(MDT), RS(N), + SC1(N), SC2(NP), SC3(NP), SC4(NP) INTEGER OUT LOGICAL STAGE,TEST DATA ZERO,TWO,EPS,BIG/0.D0,2.D0,1.0D-10,3.401D38/ cMM would think rather this: double precision --- but it breaks our checks C DATA ZERO,TWO,EPS,BIG/0.D0,2.D0,2.22D-16,1.796D308/ C----------------------------------------------------------------------- C LEAST ABSOLUTE RESIDUALS -- aka L_1 - Regression C --> Result in THETA[1:NP] C----------------------------------------------------------------------- C--- copied from robust package: src/lmrobbi.f ------------------------- DO J=1,NP SC4(J)=DBLE(J) SC2(J)=ZERO end do SUM=ZERO DO I=1,N SC1(I)=DBLE(NP+I) THETA(I)=Y(I) IF (Y(I) .lt. ZERO) then DO J=1,NP X(I,J)=-X(I,J) end do THETA(I)=-THETA(I) SC1(I)=-SC1(I) endif SUM=SUM+THETA(I) end do C----------------------------------------------------------------------- C COMPUTE THE MARGINAL COSTS. C----------------------------------------------------------------------- SUMIN=SUM DO J=1,NP SUM=ZERO DO I=1,N SUM=SUM+X(I,J) end do SC3(J)=SUM end do C----------------------------------------------------------------------- C STAGE I. DETERMINE THE VECTOR TO ENTER THE BASIS. C----------------------------------------------------------------------- TEST=.FALSE. ! -Wall STAGE=.TRUE. KOUNT=0 KR=1 KL=1 IN=1 ! -Wall c-- ---------------- LOOP (Stage I) ------------------------------------ 70 VMAX=-1.D0 DNP=DBLE(NP) DO J=KR,NP IF (DABS(SC4(J)) .GT. DNP) cycle ! = continue D=DABS(SC3(J)) IF (D-VMAX .LE. ZERO) cycle IF (D-VMAX .LE. EPS) cycle VMAX=D IN=J end do IF (SC3(IN) .lt. ZERO) then ! swap signs do I=1,N X(I,IN)=-X(I,IN) end do SC3(IN)=-SC3(IN) SC4(IN)=-SC4(IN) endif C----------------------------------------------------------------------- C DETERMINE THE VECTOR TO LEAVE THE BASIS. C----------------------------------------------------------------------- cvvv ------------ 2nd-level loop --------------------------------- 100 K=0 DO I=KL,N D=X(I,IN) IF (D .LE. TOL) cycle K=K+1 Y(K)=THETA(I)/D RS(K)=DBLE(I) TEST=.TRUE. end do C--- -------------- 3rd-level loop ------------------ 120 IF (K .le. 0) then TEST=.FALSE. ! and GOTO 150 else ! 130 VMIN=BIG DO I=1,K IF (Y(I)-VMIN .GE. ZERO) cycle IF (VMIN-Y(I) .LE. EPS) cycle J=I VMIN=Y(I) OUT=INT(RS(I)) end do Y(J)=Y(K) RS(J)=RS(K) K=K-1 endif C----------------------------------------------------------------------- C CHECK FOR LINEAR DEPENDENCE IN STAGE I. C----------------------------------------------------------------------- c 150 IF (.not.TEST .and. STAGE) then DO I=1,N CALL rlSWAP(X(I,KR),X(I,IN)) end do CALL rlSWAP(SC3(KR),SC3(IN)) CALL rlSWAP(SC4(KR),SC4(IN)) KR=KR+1 c GOTO 260 else c 170 IF (.not. TEST) then KODE=2 GOTO 350 endif c 180 PIVOT=X(OUT,IN) IF (SC3(IN)-PIVOT-PIVOT .gt. TOL) then ! not converged DO J=KR,NP D=X(OUT,J) SC3(J)=SC3(J)-D-D X(OUT,J)=-D end do D=THETA(OUT) SUMIN=SUMIN-D-D THETA(OUT)=-D SC1(OUT)=-SC1(OUT) GOTO 120 c -----------end{ 3rd-level loop } ----------------- endif C----------------------------------------------------------------------- C 200 PIVOT ON X(OUT,IN). C----------------------------------------------------------------------- DO J=KR,NP IF (J.EQ.IN) cycle ! = continue X(OUT,J)=X(OUT,J)/PIVOT end do THETA(OUT)=THETA(OUT)/PIVOT DO J=KR,NP IF (J .EQ. IN) cycle D=X(OUT,J) SC3(J)=SC3(J)-D*SC3(IN) CALL rlCOLbi(X(1,J),X(1,IN),D,N,OUT) end do SUMIN=SUMIN-SC3(IN)*THETA(OUT) DO I=1,N IF (I .EQ. OUT) cycle D=X(I,IN) THETA(I)=THETA(I)-D*THETA(OUT) X(I,IN)=-D/PIVOT end do SC3(IN)=-SC3(IN)/PIVOT X(OUT,IN)=1.D0/PIVOT CALL rlSWAP(SC1(OUT),SC4(IN)) KOUNT=KOUNT+1 IF (.NOT. STAGE) GOTO 270 C----------------------------------------------------------------------- C INTERCHANGE ROWS IN STAGE I. C----------------------------------------------------------------------- KL=KL+1 DO J=KR,NP CALL rlSWAP(X(OUT,J),X(KOUNT,J)) enddo CALL rlSWAP(THETA(OUT),THETA(KOUNT)) CALL rlSWAP(SC1(OUT),SC1(KOUNT)) endif IF (KOUNT+KR .NE. NP+1) GOTO 70 c ======= C----------------------------------------------------------------------- C STAGE II. DETERMINE THE VECTOR TO ENTER THE BASIS. C----------------------------------------------------------------------- STAGE=.FALSE. cvvv 270 VMAX=-BIG DO J=KR,NP D=SC3(J) IF (D .lt. ZERO) then IF (D+TWO .GT. ZERO) cycle D=-D-TWO endif IF (D-VMAX .LE. ZERO) cycle IF (D-VMAX .LE. EPS) cycle VMAX=D IN=J end do IF (VMAX .gt. TOL) then ! not converged IF (SC3(IN) .le. ZERO) then DO I=1,N X(I,IN)=-X(I,IN) end do SC3(IN)=-SC3(IN)-2.D0 SC4(IN)=-SC4(IN) endif GOTO 100 c ======== endif C----------------------------------------------------------------------- C 310 PREPARE OUTPUT C----------------------------------------------------------------------- L=KL-1 DO I=1,N RS(I)=ZERO IF (I .GT. L .OR. THETA(I) .GE. ZERO) cycle do J=KR,NP X(I,J)=-X(I,J) end do THETA(I)=-THETA(I) SC1(I)=-SC1(I) end do KODE=0 IF (KR .eq. 1) then ! first time only do J=1,NP D=DABS(SC3(J)) IF (D .LE. TOL .OR. TWO-D .LE. TOL) GOTO 350 end do KODE=1 endif c--- 350 DO I=1,N K=INT(SC1(I)) D=THETA(I) IF (K .le. 0) then K=-K D=-D endif IF (I .lt. KL) then SC2(K)=D else K=K-NP RS(K)=D endif end do c K=NP+1-KR c SUM=ZERO c DO I=KL,N c SUM=SUM+THETA(I) c end do c SUMIN=SUM NIT=KOUNT ! final #{pivoting steps} DO J=1,NP THETA(J)=SC2(J) end do C Y := |resid| DO I=1,N Y(I)=DABS(RS(I)) end do N2=N/2+1 C SIGMA := high-median( Y[1:N] ) == high-median( |res[i]| ) CALL rlSTORm2(Y,N,N2,SIGMA) c BET0 = 0.773372647623 = pnorm(0.75) always SIGMA=SIGMA/BET0 RETURN END robustbase/vignettes/0000755000176200001440000000000014555212627014444 5ustar liggesusersrobustbase/vignettes/lmrob_simulation.Rnw0000644000176200001440000016447413774561564020544 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{Simulations for Robust Regression Inference in Small Samples} %\VignettePackage{robustbase} %\VignetteDepends{xtable,ggplot2,GGally,RColorBrewer,grid,reshape2} \usepackage{amsmath} \usepackage{natbib} \usepackage[utf8]{inputenc} \newcommand{\makeright}[2]{\ifx#1\left\right#2\else#1#2\fi} \newcommand{\Norm}[2][\left]{\mathcal N #1( #2 \makeright{#1}{)}} \newcommand{\norm}[1] {\| #1 \|} \newcommand{\bld}[1]{\boldsymbol{#1}} % shortcut for bold symbol \newcommand{\T}[1] {\texttt{#1}} \DeclareMathOperator{\wgt}{w} \DeclareMathOperator{\var}{var} \DeclareMathOperator{\diag}{diag} \DeclareMathOperator{\median}{median} \DeclareMathOperator{\mad}{mad} \DeclareMathOperator{\Erw}{\mathbf{E}} \SweaveOpts{prefix.string=plot, eps = FALSE, pdf = TRUE, strip.white=true} \SweaveOpts{width=6, height=4} \usepackage[noae]{Sweave} \begin{document} \setkeys{Gin}{width=\textwidth} \setlength{\abovecaptionskip}{-5pt} <>= ## set options options(width=60, warn=1) # see warnings where they happen (should eliminate) ## number of workers to start if(FALSE) {## good for pkg developers options(cores= max(1, parallel::detectCores() - 2)) } else { ## CRAN allows maximum of 2: options(cores= min(2, parallel::detectCores())) } ## Number of Repetitions: N <- 1000 ## get path (= ../inst/doc/ in source pkg) robustDoc <- system.file('doc', package='robustbase') robustDta <- robustDoc ## initialize (packages, data, ...): source(file.path(robustDoc, 'simulation.init.R')) # 'xtable' ## set the amount of trimming used in calculation of average results trim <- 0.1 <>= ## load required packages for graphics stopifnot(require(ggplot2), require(GGally),# for ggpairs() which replaces ggplot2::plotmatrix() require(grid), require(reshape2)) source(file.path(robustDoc, 'graphics.functions.R')) `%||%` <- function (x, orElse) if (!is.null(x)) x else orElse ## set ggplot theme theme <- theme_bw(base_size = 10) theme$legend.key.size <- unit(1, "lines")# was 0.9 in pre-v.3 ggplot2 theme$plot.margin <- unit(c(1/2, 1/8, 1/8, 1/8), "lines")# was (1/2, 0,0,0) theme_set(theme) ## old and new ggplot2: stopifnot(is.list(theme_G <- theme$panel.grid.major %||% theme$panel.grid)) ## set default sizes for lines and points update_geom_defaults("point", list(size = 4/3)) update_geom_defaults("line", list(size = 1/4)) update_geom_defaults("hline", list(size = 1/4)) update_geom_defaults("smooth", list(size = 1/4)) ## alpha value for plots with many points alpha.error <- 0.3 alpha.n <- 0.4 ## set truncation limits used by f.truncate() & g.truncate.*: trunc <- c(0.02, 0.14) trunc.plot <- c(0.0185, 0.155) f.truncate <- function(x, up = trunc.plot[2], low = trunc.plot[1]) { x[x > up] <- up x[x < low] <- low x } g.truncate.lines <- geom_hline(yintercept = trunc, color = theme$panel.border$colour) g.truncate.line <- geom_hline(yintercept = trunc[2], color = theme$panel.border$colour) g.truncate.areas <- annotate("rect", xmin=rep(-Inf,2), xmax=rep(Inf,2), ymin=c(0,Inf), ymax=trunc, fill = theme_G$colour) g.truncate.area <- annotate("rect", xmin=-Inf, xmax=Inf, ymin=trunc[2], ymax=Inf, fill = theme_G$colour) legend.mod <- list(`SMD.Wtau` = quote('SMD.W'~tau), `SMDM.Wtau` = quote('SMDM.W'~tau), `MM.Avar1` = quote('MM.'~Avar[1]), `MMqT` = quote('MM'~~q[T]), `MMqT.Wssc` = quote('MM'~~q[T]*'.Wssc'), `MMqE` = quote('MM'~~q[E]), `MMqE.Wssc` = quote('MM'~~q[E]*'.Wssc'), `sigma_S` = quote(hat(sigma)[S]), `sigma_D` = quote(hat(sigma)[D]), `sigma_S*qE` = quote(q[E]*hat(sigma)[S]), `sigma_S*qT` = quote(q[T]*hat(sigma)[S]), `sigma_robust` = quote(hat(sigma)[robust]), `sigma_OLS` = quote(hat(sigma)[OLS]), `t1` = quote(t[1]), `t3` = quote(t[3]), `t5` = quote(t[5]), `cskt(Inf,2)` = quote(cskt(infinity,2)) ) @% end{graphics-setup} \title{Simulations for Sharpening Wald-type Inference in Robust Regression for Small Samples} \author{Manuel Koller} \maketitle \tableofcontents \section{Introduction} In this vignette, we recreate the simulation study of \citet{KS2011}. This vignette is supposed to complement the results presented in the above cited reference and render its results reproducible. Another goal is to provide simulation functions, that, with small changes, could also be used for other simulation studies. Additionally, in Section~\ref{sec:maximum-asymptotic-bias}, we calculate the maximum asymptotic bias curves of the $\psi$-functions used in the simulation. \section{Setting} The simulation setting used here is similar to the one in \citet{maronna2009correcting}. We simulate $N = \Sexpr{N}$ repetitions. To repeat the simulation, we recommend using a small value of $N$ here, since for large $n$ and $p$, computing all the replicates will take days. \subsection{Methods} We compare the methods \begin{itemize} \item MM, SMD, SMDM as described in \citet{KS2011}. These methods are available in the package \T{robustbase} (\T{lmrob}). \item MM as implemented in the package \T{robust} (\T{lmRob}). This method will be denoted as \emph{MMrobust} later on. \item MM using S-scale correction by $q_{\rm T}$ and $q_{\rm E}$ as proposed by \citet{maronna2009correcting}. $q_{\rm T}$ and $q_{\rm E}$ are defined as follows. \begin{equation*} q_{\rm E} = \frac{1}{1 - (1.29 - 6.02/n)p/n}, \end{equation*} \begin{equation*} \hat q_{\rm T} = 1 + \frac{p}{2n}\frac{\hat a}{\hat b\hat c}, \end{equation*} where \begin{equation*} \hat a = \frac{1}{n}\sum_{i=1}^n \psi\left(\frac{r_i}{\hat\sigma_{\rm S}}\right)^2, \hat b = \frac{1}{n} \sum_{i=1}^n\psi'\left(\frac{r_i}{\hat\sigma_{\rm S}}\right),%' \hat c = \frac{1}{n}\sum_{i=1}^n \psi\left(\frac{r_i}{\hat\sigma_{\rm S}}\right) \frac{r_i}{\hat\sigma_{\rm S}}, \end{equation*} with $\psi = \rho'$,%' $n$ the number of observations, $p$ the number of predictor variables, $\hat\sigma_{\rm S}$ is the S-scale estimate and $r_i$ is the residual of the $i$-th observation. When using $q_{\rm E}$ it is necessary to adjust the tuning constants of $\chi$ to account for the dependence of $\kappa$ on $p$. For $q_{\rm T}$ no change is required. This method is implemented as \T{lmrob.mar()} in the source file \T{estimating.functions.R}. \end{itemize} \subsection{$\psi$-functions} We compare \emph{bisquare}, \emph{optimal}, \emph{lqq} and \emph{Hampel} $\psi$-functions. They are illustrated in Fig.~\ref{fig:psi.functions}. The tuning constants used in the simulation are compiled in Table~\ref{tab:psi-functions}. Note that the \emph{Hampel} $\psi$-function is tuned to have a downward slope of $-1/3$ instead of the originally proposed $-1/2$. This was set to allow for a comparison to an even slower descending $\psi$-function. %% generate table of tuning constants used for \psi functions \begin{table}[ht] \begin{center} <>= ## get list of psi functions lst <- lapply(estlist$procedures, function(x) { if (is.null(x$args)) return(list(NULL, NULL, NULL)) if (!is.null(x$args$weight)) return(list(x$args$weight[2], round(f.psi2c.chi(x$args$weight[1]),3), round(f.eff2c.psi(x$args$efficiency, x$args$weight[2]),3))) return(list(x$args$psi, round(if (is.null(x$args$tuning.chi)) lmrob.control(psi=x$args$psi)$tuning.chi else x$args$tuning.chi,3), round(if (is.null(x$args$tuning.psi)) lmrob.control(psi=x$args$psi)$tuning.psi else x$args$tuning.psi,3))) }) lst <- unique(lst) ## because of rounding, down from 21 to 5 ! lst <- lst[sapply(lst, function(x) !is.null(x[[1]]))] # 5 --> 4 ## convert to table tbl <- do.call(rbind, lst) tbl[,2:3] <- apply(tbl[,2:3], 1:2, function(x) { gsub('\\$NA\\$', '\\\\texttt{NA}', paste('$', unlist(x), collapse=', ', '$', sep='')) }) tbl[,1] <- paste('\\texttt{', tbl[,1], '}', sep='') colnames(tbl) <- paste('\\texttt{', c('psi', 'tuning.chi', 'tuning.psi'), '}', sep='') require("xtable") # need also print() method: print(xtable(tbl), sanitize.text.function=identity, include.rownames = FALSE, floating=FALSE) @ %def \vspace{15pt} \caption{Tuning constants of $\psi$-functions used in the simulation.} \label{tab:psi-functions} \end{center} \end{table} \begin{figure} \begin{center} <>= d.x_psi <- function(x, psi) { cc <- lmrob.control(psi = psi)$tuning.psi data.frame(x=x, value=Mpsi(x, cc, psi), psi = psi) } x <- seq(0, 10, length.out = 1000) tmp <- rbind(d.x_psi(x, 'optimal'), d.x_psi(x, 'bisquare'), d.x_psi(x, 'lqq'), d.x_psi(x, 'hampel')) print( ggplot(tmp, aes(x, value, color = psi)) + geom_line(lwd=1.25) + ylab(quote(psi(x))) + scale_color_discrete(name = quote(psi ~ '-function'))) @ \end{center} \caption{$\psi$-functions used in the simulation.} \label{fig:psi.functions} \end{figure} \subsection{Designs} Two types of designs are used in the simulation: fixed and random designs. One design with $n=20$ observations, $p=1+3$ predictors and strong leverage points. This design also includes an intercept column. It is shown in Fig.~\ref{fig:design-predict}. The other designs are random, i.e., regenerated for every repetition, and the models are fitted without an intercept. We use the same distribution to generate the designs as for the errors. The number of observations simulated are $n = 25, 50, 100, 400$ and the ratio to the number of parameters are $p/n = 1/20, 1/10, 1/5, 1/3, 1/2$. We round $p$ to the nearest smaller integer if necessary. The random datasets are generated using the following code. <>= f.gen <- function(n, p, rep, err) { ## get function name and parameters lerrfun <- f.errname(err$err) lerrpar <- err$args ## generate random predictors ret <- replicate(rep, matrix(do.call(lerrfun, c(n = n*p, lerrpar)), n, p), simplify=FALSE) attr(ret[[1]], 'gen') <- f.gen ret } ratios <- c(1/20, 1/10, 1/5, 1/3, 1/2)## p/n lsit <- expand.grid(n = c(25, 50, 100, 400), p = ratios) lsit <- within(lsit, p <- as.integer(n*p)) .errs.normal.1 <- list(err = 'normal', args = list(mean = 0, sd = 1)) for (i in 1:NROW(lsit)) assign(paste('rand',lsit[i,1],lsit[i,2],sep='_'), f.gen(lsit[i,1], lsit[i,2], rep = 1, err = .errs.normal.1)[[1]]) @ An example design is shown in Fig.~\ref{fig:example.design}. \begin{figure} \begin{center} <>= require(GGally) colnames(rand_25_5) <- paste0("X", 1:5) # workaround new (2014-12) change in GGally ## and the 2016-11-* change needs data frames: df.r_25_5 <- as.data.frame(rand_25_5) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print(ggpairs(df.r_25_5, axisLabels="show", title = "rand_25_5: n=25, p=5")) ) @ \end{center} \caption{Example random design.} \label{fig:example.design} \end{figure} \subsection{Error Distributions} We simulate the following error distributions \begin{itemize} \item standard normal distribution, \item $t_5$, $t_3$, $t_1$, \item centered skewed t with $df = \infty, 5$ and $\gamma = 2$ (denoted by \emph{cskt$(\infty,2)$} and \emph{cskt}$(5,2)$, respectively); as introduced by \citet{fernandez1998bayesian} using the \T{R} package \T{skewt}, \item contaminated normal, $\Norm{0,1}$ contaminated with $10\%$ $\Norm{0, 10}$ (symmetric, \emph{cnorm}$(0.1,0,3.16)$) or $\Norm{4, 1}$ (asymmetric, \emph{cnorm}$(0.1,4,1)$). \end{itemize} \subsection{Covariance Matrix Estimators} For the standard MM estimator, we compare ${\rm Avar}_1$ of \citet{croux03} and the empirical weighted covariance matrix estimate corrected by Huber's small sample correction as described in \citet{HubPR09} (denoted by \emph{Wssc}). The latter is also used for the variation of the MM estimate proposed by \citet{maronna2009correcting}. For the SMD and SMDM variants we use the covariance matrix estimate as described in \citet{KS2011} (\emph{W$\tau$}). The covariance matrix estimate consists of three parts: \begin{equation*} {\rm cov}(\hat\beta) = \sigma^2\gamma\bld V_{\bld X}^{-1}. \end{equation*} The SMD and SMDM methods of \T{lmrob} use the following defaults. \begin{equation} \label{eq:gammatau} \hat\gamma = \frac{\frac{1}{n}\sum_{i=1}^n\tau_i^2 \psi\left(\frac{r_i}{\tau_i\hat\sigma}\right)^2} {\frac{1}{n}\sum_{i=1}^n\psi'\left(\frac{r_i}{\tau_i\hat\sigma}\right)} \end{equation} where $\tau_i$ is the rescaling factor used for the D-scale estimate (see \citet{KS2011}). \noindent\textbf{Remark: } Equation \eqref{eq:gammatau} is a corrected version of $\gamma$. It was changed in \texttt{robustbase} version \texttt{0.91} (April 2014) to ensure that the equation reduces to $1$ in the classical case ($\psi(x) = x$). If the former (incorrect) version is needed for compatibility reasons, it can be obtained by adding the argument \texttt{cov.corrfact = "tauold"}. \begin{equation*} \bld{\widehat V}_{\bld X} = \frac{1}{\frac{1}{n}\sum_{i=1}^n\wgt_{ii}}\bld X^T\bld W\bld X \end{equation*} where $\bld W = \diag\left(\wgt\left(\frac{r_1}{\hat\sigma}\right), \dots, \wgt\left(\frac{r_n}{\hat\sigma}\right)\right)$. The function $\wgt(r) = \psi(r)/r$ produces the robustness weights. \section{Simulation} The main loop of the simulation is fairly simple. (This code is only run if there are no aggregate results available.) %% set eval to TRUE for chunks simulation-run and simulation-aggr %% if you really want to run the simulations again. %% (better fail with an error than run for weeks) <>= aggrResultsFile <- file.path(robustDta, "aggr_results.Rdata") <>= if (!file.exists(aggrResultsFile)) { ## load packages required only for simulation stopifnot(require(robust), require(skewt), require(foreach)) if (!is.null(getOption("cores"))) { if (getOption("cores") == 1) registerDoSEQ() ## no not use parallel processing else { stopifnot(require(doParallel)) if (.Platform$OS.type == "windows") { cl <- makeCluster(getOption("cores")) clusterExport(cl, c("N", "robustDoc")) clusterEvalQ(cl, slave <- TRUE) clusterEvalQ(cl, source(file.path(robustDoc, 'simulation.init.R'))) registerDoParallel(cl) } else registerDoParallel() } } else registerDoSEQ() ## no not use parallel processing for (design in c("dd", ls(pattern = 'rand_\\d+_\\d+'))) { print(design) ## set design estlist$design <- get(design) estlist$use.intercept <- !grepl('^rand', design) ## add design.predict: pc estlist$design.predict <- if (is.null(attr(estlist$design, 'gen'))) f.prediction.points(estlist$design) else f.prediction.points(estlist$design, max.pc = 2) filename <- file.path(robustDta, sprintf('r.test.final.%s.Rdata',design)) if (!file.exists(filename)) { ## run print(system.time(r.test <- f.sim(estlist, silent = TRUE))) ## save save(r.test, file=filename) ## delete output rm(r.test) ## run garbage collection gc() } } } @ The variable \T{estlist} is a list containing all the necessary settings required to run the simulation as outlined above. Most of its elements are self-explanatory. <>= str(estlist, 1) @ \T{errs} is a list containing all the error distributions to be simulated. The entry for the standard normal looks as follows. <>= estlist$errs[[1]] @ \T{err} is translated internally to the corresponding random generation or quantile function, e.g., in this case \T{rnorm} or \T{qnorm}. \T{args} is a list containing all the required arguments to call the function. The errors are then generated internally with the following call. <>= set.seed(estlist$seed) errs <- c(sapply(1:nrep, function(x) do.call(fun, c(n = nobs, args)))) @ All required random numbers are generated at once instead of during the simulation. Like this, it is certain, that all the compared methods run on exactly the same data. The entry \T{procedures} follows a similar convention. \T{design.predict} contains the design used for the prediction of observations and calculation of confidence or prediction intervals. The objects returned by the procedures are processed by the functions contained in the \T{estlist\$output} list. <<>>= str(estlist$output[1:3], 2) @ The results are stored in a 4-dimensional array. The dimensions are: repetition number, type of value, procedure id, error id. Using \T{apply} it is very easy and fast to generate summary statistics. The raw results are stored on the hard disk, because typically it takes much longer to execute all the procedures than to calculate the summary statistics. The variables saved take up a lot of space quite quickly, so only the necessary data is stored. These are $\sigma$, $\bld\beta$ as well as the corresponding standard errors. To speed up the simulation routine \T{f.sim}, the simulations are carried out in parallel, as long as this is possible. This is accomplished with the help of the \T{R}-package \T{foreach}. This is most easily done on a machine with multiple processors or cores. The \T{multicore} package provides the methods to do so easily. The worker processes are just forked from the main \T{R} process. After all the methods have been simulated, the simulation output is processed. The code is quite lengthy and thus not displayed here (check the Sweave source file \T{lmrob\_simulation.Rnw}). The residuals, robustness weights, leverages and $\tau$ values have to be recalculated. Using vectorized operations and some specialized \T{C} code, this is quite cheap. The summary statistics generated are discussed in the next section. <>= if (!file.exists(aggrResultsFile)) { files <- list.files(robustDta, pattern = 'r.test.final\\.') res <- foreach(file = files) %dopar% { ## get design, load r.test, initialize other stuff design <- substr(basename(file), 14, nchar(basename(file)) - 6) cat(design, ' ') load(file.path(robustDta, file)) estlist <- attr(r.test, 'estlist') use.intercept <- if (!is.null(estlist$use.intercept)) estlist$use.intercept else TRUE sel <- dimnames(r.test)[[3]] ## [dimnames(r.test)[[3]] != "estname=lm"] n.betas <- paste('beta',1:(NCOL(estlist$design)+use.intercept),sep='_') ## get design lX <- if (use.intercept) as.matrix(cbind(1, get(design))) else as.matrix(get(design)) n <- NROW(lX) p <- NCOL(lX) ## prepare arrays for variable designs and leverages if (is.function(attr(estlist$design, 'gen'))) { lXs <- array(NA, c(n, NCOL(lX), dim(r.test)[c(1, 4)]), list(Obs = NULL, Pred = colnames(lX), Data = NULL, Errstr = dimnames(r.test)[[4]])) } ## generate errors lerrs <- array(NA, c(n, dim(r.test)[c(1,4)]) , list(Obs = NULL, Data = NULL, Errstr = dimnames(r.test)[[4]])) for (i in 1:dim(lerrs)[3]) { lerrstr <- f.list2str(estlist$errs[[i]]) lerr <- f.errs(estlist, estlist$errs[[i]], gen = attr(estlist$design, 'gen'), nobs = n, npar = NCOL(lX)) lerrs[,,lerrstr] <- lerr if (!is.null(attr(lerr, 'designs'))) { ## retrieve generated designs: this returns a list of designs lXs[,,,i] <- unlist(attr(lerr, 'designs')) if (use.intercept) stop('intercept not implemented for random desings') } rm(lerr) } if (is.function(attr(estlist$design, 'gen'))) { ## calculate leverages lXlevs <- apply(lXs, 3:4, .lmrob.hat) } ## calculate fitted values from betas if (!is.function(attr(estlist$design, 'gen'))) { ## fixed design case lfitted <- apply(r.test[,n.betas,sel,,drop=FALSE],c(3:4), function(bhat) { lX %*% t(bhat) } ) } else { ## variable design case lfitted <- array(NA, n*prod(dim(r.test)[c(1,4)])*length(sel)) lfitted <- .C('R_calc_fitted', as.double(lXs), ## designs as.double(r.test[,n.betas,sel,,drop=FALSE]), ## betas as.double(lfitted), ## result as.integer(n), ## n as.integer(p), ## p as.integer(dim(r.test)[1]), ## nrep as.integer(length(sel)), ## n procstr as.integer(dim(r.test)[4]), ## n errstr DUP=FALSE, NAOK=TRUE, PACKAGE="robustbase")[[3]] } tdim <- dim(lfitted) <- c(n, dim(r.test)[1], length(sel),dim(r.test)[4]) lfitted <- aperm(lfitted, c(1,2,4,3)) ## calculate residuals = y - fitted.values lfitted <- as.vector(lerrs) - as.vector(lfitted) dim(lfitted) <- tdim[c(1,2,4,3)] lfitted <- aperm(lfitted, c(1,2,4,3)) dimnames(lfitted) <- c(list(Obs = NULL), dimnames(r.test[,,sel,,drop=FALSE])[c(1,3,4)]) lresids <- lfitted rm(lfitted) ## calculate lm MSE and trim trimmed MSE of betas tf.MSE <- function(lbetas) { lnrm <- rowSums(lbetas^2) c(MSE=mean(lnrm,na.rm=TRUE),MSE.1=mean(lnrm,trim=trim,na.rm=TRUE)) } MSEs <- apply(r.test[,n.betas,,,drop=FALSE],3:4,tf.MSE) li <- 1 ## so we can reconstruct where we are lres <- apply(lresids,3:4,f.aggregate.results <- { function(lresid) { ## the counter li tells us, where we are ## we walk dimensions from left to right lcdn <- f.get.current.dimnames(li, dimnames(lresids), 3:4) lr <- r.test[,,lcdn[1],lcdn[2]] ## update counter li <<- li + 1 ## transpose and normalize residuals with sigma lresid <- t(lresid) / lr[,'sigma'] if (lcdn[1] != 'estname=lm') { ## convert procstr to proclst and get control list largs <- f.str2list(lcdn[1])[[1]]$args if (grepl('lm.robust', lcdn[1])) { lctrl <- list() lctrl$psi <- toupper(largs$weight2) lctrl$tuning.psi <- f.eff2c.psi(largs$efficiency, lctrl$psi) lctrl$method <- 'MM' } else { lctrl <- do.call('lmrob.control',largs) } ## calculate correction factors ## A lsp2 <- rowSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lspp <- rowSums(lpp <- Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) ## calculate Huber\'s small sample correction factor lK <- 1 + rowSums((lpp - lspp/n)^2)*NCOL(lX)/lspp^2 ## 1/n cancels } else { lK <- lspp <- lsp2 <- NA } ## only calculate tau variants if possible if (grepl('args.method=\\w*(D|T)\\w*\\b', lcdn[1])) { ## SMD or SMDM ## calculate robustness weights lwgts <- Mwgt(lresid, lctrl$tuning.psi, lctrl$psi) ## function to calculate robustified leverages tfun <- if (is.function(attr(estlist$design, 'gen'))) function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lXs[,,i,lcdn[2]],wi) } else function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lX, wi) } llev <- sapply(1:dim(r.test)[1], tfun) ## calculate unique leverages lt <- robustbase:::lmrob.tau(list(),h=llev,control=lctrl) ## normalize residuals with tau (transpose lresid) lresid <- t(lresid) / lt ## A lsp2t <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lsppt <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) } else { lsp2t <- lsppt <- NA } ## calculate raw scales based on the errors lproc <- f.str2list(lcdn[1])[[1]] q <- NA M <- NA if (lproc$estname == 'lmrob.mar' && lproc$args$type == 'qE') { ## for lmrob_mar, qE variant lctrl <- lmrob.control(psi = 'bisquare', tuning.chi=uniroot(function(c) robustbase:::lmrob.bp('bisquare', c) - (1-p/n)/2, c(1, 3))$root) se <- apply(lerrs[,,lcdn[2]],2,lmrob.mscale,control=lctrl,p=p) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } else if (!is.null(lproc$args$method) && lproc$args$method == 'SMD') { ## for D-scales se <- apply(lerrs[,,lcdn[2]],2,lmrob.dscale,control=lctrl, kappa=robustbase:::lmrob.kappa(control=lctrl)) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } ## calculate empirical correct test value (to yield 5% level) t.val_2 <- t.val_1 <- quantile(abs(lr[,'beta_1']/lr[,'se_1']), 0.95, na.rm = TRUE) if (p > 1) t.val_2 <- quantile(abs(lr[,'beta_2']/lr[,'se_2']), 0.95, na.rm = TRUE) ## return output: summary statistics: c(## gamma AdB2.1 = mean(lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, K2AdB2.1 = mean(lK^2*lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, AdB2t.1 = mean(lsp2t/lsppt^2,trim=trim,na.rm=TRUE)*n, sdAdB2.1 = sd.trim(lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdK2AdB2.1 = sd.trim(lK^2*lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdAdB2t.1 = sd.trim(lsp2t/lsppt^2*n,trim=trim,na.rm=TRUE), ## sigma medsigma = median(lr[,'sigma'],na.rm=TRUE), madsigma = mad(lr[,'sigma'],na.rm=TRUE), meansigma.1 = mean(lr[,'sigma'],trim=trim,na.rm=TRUE), sdsigma.1 = sd.trim(lr[,'sigma'],trim=trim,na.rm=TRUE), meanlogsigma = mean(log(lr[,'sigma']),na.rm=TRUE), meanlogsigma.1 = mean(log(lr[,'sigma']),trim=trim,na.rm=TRUE), sdlogsigma = sd(log(lr[,'sigma']),na.rm=TRUE), sdlogsigma.1 = sd.trim(log(lr[,'sigma']),trim=trim,na.rm=TRUE), q = q, M = M, ## beta efficiency.1 = MSEs['MSE.1','estname=lm',lcdn[2]] / MSEs['MSE.1',lcdn[1],lcdn[2]], ## t-value: level emplev_1 = mean(abs(lr[,'beta_1']/lr[,'se_1']) > qt(0.975, n - p), na.rm = TRUE), emplev_2 = if (p>1) { mean(abs(lr[,'beta_2']/lr[,'se_2']) > qt(0.975, n - p), na.rm = TRUE) } else NA, ## t-value: power power_1_0.2 = mean(abs(lr[,'beta_1']-0.2)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.2 = if (p>1) { mean(abs(lr[,'beta_2']-0.2)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.4 = mean(abs(lr[,'beta_1']-0.4)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.4 = if (p>1) { mean(abs(lr[,'beta_2']-0.4)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.6 = mean(abs(lr[,'beta_1']-0.6)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.6 = if (p>1) { mean(abs(lr[,'beta_2']-0.6)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.8 = mean(abs(lr[,'beta_1']-0.8)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.8 = if (p>1) { mean(abs(lr[,'beta_2']-0.8)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_1 = mean(abs(lr[,'beta_1']-1)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_1 = if (p>1) { mean(abs(lr[,'beta_2']-1)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, ## coverage probability: calculate empirically ## the evaluation points are constant, but the designs change ## therefore this makes only sense for fixed designs cpr_1 = mean(lr[,'upr_1'] < 0 | lr[,'lwr_1'] > 0, na.rm = TRUE), cpr_2 = mean(lr[,'upr_2'] < 0 | lr[,'lwr_2'] > 0, na.rm = TRUE), cpr_3 = mean(lr[,'upr_3'] < 0 | lr[,'lwr_3'] > 0, na.rm = TRUE), cpr_4 = mean(lr[,'upr_4'] < 0 | lr[,'lwr_4'] > 0, na.rm = TRUE), cpr_5 = if (any(colnames(lr) == 'upr_5')) { mean(lr[,'upr_5'] < 0 | lr[,'lwr_5'] > 0, na.rm = TRUE) } else NA, cpr_6 = if (any(colnames(lr) == 'upr_6')) { mean(lr[,'upr_6'] < 0 | lr[,'lwr_6'] > 0, na.rm = TRUE) } else NA, cpr_7 = if (any(colnames(lr) == 'upr_7')) { mean(lr[,'upr_7'] < 0 | lr[,'lwr_7'] > 0, na.rm = TRUE) } else NA ) }}) ## convert to data.frame lres <- f.a2df.2(lres, split = '___NO___') ## add additional info lres$n <- NROW(lX) lres$p <- NCOL(lX) lres$nmpdn <- with(lres, (n-p)/n) lres$Design <- design ## clean up rm(r.test, lXs, lXlevs, lresids, lerrs) gc() ## return lres lres } save(res, trim, file = aggrResultsFile) ## stop cluster if (exists("cl")) stopCluster(cl) } <>= load(aggrResultsFile) ## this will fail if the file is not found (for a reason) ## set eval to TRUE for chunks simulation-run and simulation-aggr ## if you really want to run the simulations again. ## (better fail with an error than run for weeks) ## combine list elements to data.frame test.1 <- do.call('rbind', res) test.1 <- within(test.1, { Method[Method == "SM"] <- "MM" Method <- Method[, drop = TRUE] Estimator <- interaction(Method, D.type, drop = TRUE) Estimator <- f.rename.level(Estimator, 'MM.S', 'MM') Estimator <- f.rename.level(Estimator, 'SMD.D', 'SMD') Estimator <- f.rename.level(Estimator, 'SMDM.D', 'SMDM') Estimator <- f.rename.level(Estimator, 'MM.qT', 'MMqT') Estimator <- f.rename.level(Estimator, 'MM.qE', 'MMqE') Estimator <- f.rename.level(Estimator, 'MM.rob', 'MMrobust') Estimator <- f.rename.level(Estimator, 'lsq.lm', 'OLS') Est.Scale <- f.rename.level(Estimator, 'MM', 'sigma_S') Est.Scale <- f.rename.level(Est.Scale, 'MMrobust', 'sigma_robust') Est.Scale <- f.rename.level(Est.Scale, 'MMqE', 'sigma_S*qE') Est.Scale <- f.rename.level(Est.Scale, 'MMqT', 'sigma_S*qT') Est.Scale <- f.rename.level(Est.Scale, 'SMDM', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'SMD', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'OLS', 'sigma_OLS') Psi <- f.rename.level(Psi, 'hampel', 'Hampel') }) ## add interaction of Method and Cov test.1 <- within(test.1, { method.cov <- interaction(Estimator, Cov, drop=TRUE) levels(method.cov) <- sub('\\.+vcov\\.(a?)[wacrv1]*', '\\1', levels(method.cov)) method.cov <- f.rename.level(method.cov, "MMa", "MM.Avar1") method.cov <- f.rename.level(method.cov, "MMrobust.Default", "MMrobust.Wssc") method.cov <- f.rename.level(method.cov, "MM", "MM.Wssc") method.cov <- f.rename.level(method.cov, "SMD", "SMD.Wtau") method.cov <- f.rename.level(method.cov, "SMDM", "SMDM.Wtau") method.cov <- f.rename.level(method.cov, "MMqT", "MMqT.Wssc") method.cov <- f.rename.level(method.cov, "MMqE", "MMqE.Wssc") method.cov <- f.rename.level(method.cov, "OLS.Default", "OLS") ## ratio: the closest 'desired ratios' instead of exact p/n; ## needed in plots only for stat_*(): median over "close" p/n's: ratio <- ratios[apply(abs(as.matrix(1/ratios) %*% t(as.matrix(p / n)) - 1), 2, which.min)] }) ## calculate expected values of psi^2 and psi' test.1$Ep2 <- test.1$Epp <- NA for(Procstr in levels(test.1$Procstr)) { args <- f.str2list(Procstr)[[1]]$args if (is.null(args)) next lctrl <- do.call('lmrob.control',args) test.1$Ep2[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r)^2, lctrl, use.integrate = TRUE) test.1$Epp[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r,1), lctrl, use.integrate = TRUE) } ## drop some observations, separate fixed and random designs test.fixed <- droplevels(subset(test.1, n == 20)) ## n = 20 -- fixed design test.1 <- droplevels(subset(test.1, n != 20)) ## n !=20 -- random designs test.lm <- droplevels(subset(test.1, Function == 'lm')) # lm = OLS test.1 <- droplevels(subset(test.1, Function != 'lm')) # Rob := all "robust" test.lm$Psi <- NULL test.lm.2 <- droplevels(subset(test.lm, Error == 'N(0,1)')) # OLS for N(*) test.2 <- droplevels(subset(test.1, Error == 'N(0,1)' & Function != 'lm'))# Rob for N(*) ## subsets test.3 <- droplevels(subset(test.2, Method != 'SMDM'))# Rob, not SMDM for N(*) test.4 <- droplevels(subset(test.1, Method != 'SMDM'))# Rob, not SMDM for all @ \section{Simulation Results} \subsection{Criteria} The simulated methods are compared using the following criteria. \textbf{Scale estimates.} The criteria for scale estimates are all calculated on the log-scale. The bias of the estimators is measured by the $\Sexpr{trim*100}\%$ trimmed mean. To recover a meaningful scale, the results are exponentiated before plotting. It is easy to see that this is equivalent to calculating geometric means. Since the methods are all tuned at the central model, ${\mathcal N}(0,1)$, a meaningful comparison of biases can only be made for ${\mathcal N}(0,1)$ distributed errors. The variability of the estimators, on the other hand, can be compared over all simulated error distributions. It is measured by the $\Sexpr{trim*100}\%$ trimmed standard deviation, rescaled by the square root of the number of observations. For completeness, the statistics used to compare scale estimates in \citet{maronna2009correcting} are also calculated. They are defined as \begin{equation} \label{eq:def.q.and.M} q = \median\left(\frac{S(\bld e)}{\hat\sigma_S}\right), \quad M = \mad\left(\frac{S(\bld e)}{\hat\sigma_S}\right), \end{equation} where $S(e)$ stands for the S-scale estimate evaluated for the actual errors $\bld e$. For the D-scale estimate, the definition is analogue. Since there is no design to correct for, we set $\tau_i = 1\ \forall i$. \textbf{Coefficients.} The efficiency of estimated regression coefficients $\bld{\hat\beta}$ is characterized by their mean squared error (\emph{MSE}). Since we simulate under $H_0: \bld\beta = 0$, this is determined by the covariance matrix of $\bld{\hat\beta}$. We use $\Erw\left[\norm{\bld{\hat\beta}}_2^2\right] = \sum_{j=1}^p \var(\hat\beta_j)$ as a summary. When comparing to the MSE of the ordinary least squares estimate (\emph{OLS}), this gives the efficiency, which, by the choice of tuning constants of $\psi$, should yield \begin{equation*} \frac{{\rm MSE}(\bld{\hat\beta}_{\rm OLS})}{{\rm MSE}(\bld{\hat\beta})} \approx 0.95 \end{equation*} for standard normally distributed errors. The simulation mean of $\sum_{j=1}^p \var(\hat\beta_j)$ is calculated with $\Sexpr{trim*100}\%$ trimming. For other error distributions, this ratio should be larger than $1$, since by using robust procedures we expect to gain efficiency at other error distributions (relative to the least squares estimate). $\bld\gamma$\textbf{.} We compare the behavior of the various estimators of $\gamma$ by calculating the trimmed mean and the trimmed standard deviation for standard normal distributed errors. \textbf{Covariance matrix estimate.} The covariance matrix estimates are compared indirectly over the performance of the resulting test statistics. We compare the empirical level of the hypothesis tests $H_0: \beta_j = 0$ for some $j \in \{1,\dots, p\}$. The power of the tests is compared by testing for $H_0: \beta_j = b$ for several values of $b>0$. The formal power of a more liberal test is generally higher. Therefore, in order for this comparison to be meaningful, the critical value for each test statistic was corrected such that all tests have the same simulated level of $5\%$. The simple hypothesis tests give only limited insights. To investigate the effects of other error distributions, e.g., asymmetric error distributions, we compare the confidence intervals for the prediction of some fixed points. Since it was not clear how to assess the quality prediction intervals, either at the central or the simulated model, we do not calculate them here. A small number of prediction points is already enough, if they are chosen properly. We chose to use seven points lying on the first two principal components, spaced evenly from the center of the design used to the extended range of the design. The principal components were calculated robustly (using \T{covMcd} of the \T{robustbase} package) and the range was extended by a fraction of $0.5$. An example is shown in Figure~\ref{fig:design-predict}. \subsection{Results} The results are given here as plots (Fig.~\ref{fig:meanscale-1} to Fig.~\ref{fig:cpr}). For a complete discussion of the results, we refer to \citet{KS2011}. The different $\psi$-functions are each plotted in a different facet, except for Fig.~\ref{fig:qscale-all}, Fig.~\ref{fig:Mscale-all} and Fig.~\ref{fig:lqq-level}, where the facets show the results for various error distributions. The plots are augmented with auxiliary lines to ease the comparison of the methods. The lines connect the median values over the values of $n$ for each simulated ratio $p/n$. In many plots the y-axis has been truncated. Points in the grey shaded area represent truncated values using a different scale. \begin{figure} \begin{center} <>= ## ## exp(mean(log(sigma))): this looks almost identical to mean(sigma) print(ggplot(test.3, aes(p/n, exp(meanlogsigma.1), color = Est.Scale)) + stat_summary(aes(x=ratio), # <- "rounded p/n": --> median over "neighborhood" fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + ylab(quote('geometric ' ~ mean(hat(sigma)))) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(test.3$Est.Scale))) @ \end{center} \caption{Mean of scale estimates for normal errors. The mean is calculated with $\Sexpr{trim*100}\%$ trimming. The lines connect the median values for each simulated ratio $p/n$. Results for random designs only. } \label{fig:meanscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(test.3, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + facet_wrap(~ Psi) + geom_point (data=test.lm.2, alpha=alpha.n, aes(color = Est.Scale)) + stat_summary(data=test.lm.2, aes(x=ratio, color = Est.Scale), fun.y=median, geom='line') + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels= lab(test.3 $Est.Scale, test.lm.2$Est.Scale))) @ \end{center} \caption{Variability of the scale estimates for normal errors. The standard deviation is calculated with $\Sexpr{trim*100}\%$ trimming. } \label{fig:sdscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(test.4, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + ylim(with(test.4, range(sdlogsigma.1*sqrt(n)))) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = Error), alpha = alpha.error) + facet_wrap(~ Psi) + ## "FIXME" (?): the next 'test.lm' one give warnings geom_point (data=test.lm, aes(color = Est.Scale), alpha=alpha.n) + ##-> Warning: Removed 108 rows containing missing values (geom_point). stat_summary(data=test.lm, aes(x = ratio, color = Est.Scale), fun.y=median, geom='line') + ##-> Warning: Removed 108 rows containing non-finite values (stat_summary). g.scale_shape(labels=lab(test.4$Error)) + scale_colour_discrete("Scale Est.", labels=lab(test.4 $Est.Scale, test.lm$Est.Scale))) @ \end{center} \caption{Variability of the scale estimates for all simulated error distributions.} \label{fig:sdscale-all} \end{figure} \begin{figure} \begin{center} <>= t3est2 <- droplevels(subset(test.3, Estimator %in% c("SMD", "MMqE"))) print(ggplot(t3est2, aes(p/n, q, color = Est.Scale)) + ylab(quote(q)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) @ \end{center} \caption{$q$ statistic for normal errors. $q$ is defined in \eqref{eq:def.q.and.M}.} \label{fig:qscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t3est2, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Psi) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) @ \end{center} \caption{$M/q$ statistic for normal errors. $M$ and $q$ are defined in \eqref{eq:def.q.and.M}.} \label{fig:Mscale-1} \end{figure} \begin{figure} \begin{center} <>= t1.bi <- droplevels(subset(test.1, Estimator %in% c("SMD", "MMqE") & Psi == 'bisquare')) print(ggplot(t1.bi, aes(p/n, q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Error) + ## labeller missing! ylab(quote(q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) @ \end{center} \caption{$q$ statistic for \emph{bisquare} $\psi$. } \label{fig:qscale-all} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t1.bi, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Error) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) @ \end{center} \caption{$M/q$ statistic for \emph{bisquare} $\psi$. } \label{fig:Mscale-all} \end{figure} \clearpage% not nice, but needed against LaTeX Error: Too many unprocessed floats. \begin{figure} \begin{center} <>= print(ggplot(test.2, aes(p/n, efficiency.1, color = Estimator)) + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + facet_wrap(~ Psi) + ylab(quote('efficiency of' ~~ hat(beta))) + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(test.2$Estimator))) @ \end{center} \caption{Efficiency for normal errors. The efficiency is calculated by comparing to an OLS estimate and averaging with $\Sexpr{trim*100}\%$ trimming. } \label{fig:efficiency} \end{figure} \begin{figure} \begin{center} <>= t.1xt1 <- droplevels(subset(test.1, Error != 't1')) print(ggplot(t.1xt1, aes(p/n, efficiency.1, color = Estimator)) + ylab(quote('efficiency of '~hat(beta))) + geom_point(aes(shape = Error), alpha = alpha.error) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + g.scale_shape(values=c(16,17,15,3,7,8,9,1,2,4)[-4], labels=lab(t.1xt1$Error)) + facet_wrap(~ Psi) + scale_colour_discrete(name = "Estimator", labels = lab(t.1xt1$Estimator))) @ \end{center} \caption{Efficiency for all simulated error distributions except $t_1$. } \label{fig:efficiency-all} \end{figure} \begin{figure} \begin{center} <>= t.2o. <- droplevels(subset(test.2, !is.na(AdB2t.1))) print(ggplot(t.2o., aes(p/n, AdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=K2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=AdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=K2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype=2) + stat_summary(aes(x=ratio, y=AdB2t.1), fun.y=median, geom='line', linetype=3) + geom_hline(yintercept = 1/0.95) + g.scale_y_log10_1() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(t.2o.$Estimator)) + ylab(quote(mean(hat(gamma)))) + facet_wrap(~ Psi)) @ \end{center} \caption{Comparing the estimates of $\gamma$. The solid line connects the uncorrected estimate, dotted the $\tau$ corrected estimate and dashed Huber's small sample correction. } \label{fig:AdB2-1} \end{figure} \begin{figure} \begin{center} <>= t.2ok <- droplevels(subset(test.2, !is.na(sdAdB2t.1))) print(ggplot(t.2ok, aes(p/n, sdAdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=sdK2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=sdAdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=sdK2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype= 2) + stat_summary(aes(x=ratio, y=sdAdB2t.1), fun.y=median, geom='line', linetype= 3) + g.scale_y_log10_0.05() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(t.2ok$Estimator)) + ylab(quote(sd(hat(gamma)))) + facet_wrap(~ Psi)) @ \end{center} \caption{Comparing the estimates of $\gamma$. The solid line connects the uncorrected estimate, dotted the $\tau$ corrected estimate and dashed Huber's small sample correction. } \label{fig:sdAdB2-1} \end{figure} \begin{figure} \begin{center} <>= t.2en0 <- droplevels(subset(test.2, emplev_1 != 0)) print(ggplot(t.2en0, aes(p/n, f.truncate(emplev_1), color = method.cov)) + g.truncate.lines + g.truncate.areas + geom_point(aes(shape = factor(n)), alpha = alpha.n) + scale_shape_discrete(quote(n)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + scale_colour_discrete(name = "Estimator", labels=lab(t.2en0$method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + facet_wrap(~ Psi)) @ \end{center} \caption{Empirical levels of test $H_0: \beta_1 = 0$ for normal errors. The y-values are truncated at $\Sexpr{trunc[1]}$ and $\Sexpr{trunc[2]}$. } \label{fig:emp-level} \end{figure} \begin{figure} \begin{center} <>= tmp <- droplevels(subset(test.1, Psi == 'lqq' & emplev_1 != 0)) print(ggplot(tmp, aes(p/n, f.truncate(emplev_1), color = method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + g.truncate.line + g.truncate.area + geom_point(aes(shape = factor(n)), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(tmp$method.cov)) + facet_wrap(~ Error) , legend.mod = legend.mod ) @ \end{center} \caption{Empirical levels of test $H_0: \beta_1 = 0$ for \emph{lqq} $\psi$-function and different error distributions. } \label{fig:lqq-level} \end{figure} \begin{figure} \begin{center} <>= t2.25 <- droplevels(subset(test.2, n == 25))# <-- fixed n ==> no need for 'ratio' tL2.25 <- droplevels(subset(test.lm.2, n == 25)) scale_col_D2.25 <- scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(t2.25 $method.cov, tL2.25$method.cov)) print(ggplot(t2.25, aes(p/n, power_1_0.2, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.2) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.2$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_2} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.4, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.4) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.4$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_4} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.6, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.6) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.6$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_6} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.8, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.8) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.8$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_8} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_1, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 1) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 1$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-1} \end{figure} %\clearpage \begin{figure} \begin{center} %% now (2016-11 GGally) works --- but fails with new 2018-05 ggplot2: <>= pp <- f.prediction.points(dd)[1:7,] ## Worked in older ggplot2 -- now plotmatrix() is gone, to be replaced by GGally::ggpairs): ## tmp <- plotmatrix(pp)$data ## tmp$label <- as.character(1:7) ## print(plotmatrix(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) if(FALSE) { tmp <- ggpairs(pp)$data tmp$label <- as.character(1:7) # and now? } ## ggpairs() + geom_text() does *NOT* work {ggpairs has own class} ## print(ggpairs(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print( ggpairs(dd) )## now (2016-11) fine ) @ \end{center} \caption{Prediction points for fixed design. The black points are the points of the original design. The red digits indicate the numbers and locations of the points where predictions are taken.} \label{fig:design-predict} \end{figure} \begin{figure} \begin{center} <>= n.cprs <- names(test.fixed)[grep('cpr', names(test.fixed))] # test.fixed: n=20 => no 'x=ratio' test.5 <- melt(test.fixed[,c('method.cov', 'Error', 'Psi', n.cprs)]) test.5 <- within(test.5, { Point <- as.numeric(do.call('rbind', strsplit(levels(variable), '_'))[,2])[variable] }) print(ggplot(test.5, aes(Point, f.truncate(value), color = method.cov)) + geom_point(aes(shape = Error), alpha = alpha.error) + g.truncate.line + g.truncate.area + stat_summary(fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(labels=lab(test.5$Error)) + scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(test.5$method.cov)) + ylab("empirical level of confidence intervals") + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical coverage probabilities. Results for fixed design. The y-values are truncated at $\Sexpr{trunc[2]}$. } \label{fig:cpr} \end{figure} \clearpage \section{Maximum Asymptotic Bias} \label{sec:maximum-asymptotic-bias} The slower redescending $\psi$-functions come with higher asymptotic bias as illustrated in Fig.~\ref{fig:max-asymptotic-bias}. We calculate the asymptotic bias as in \citet{berrendero2007maximum}. <>= ## Henning (1994) eq 33: g <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) integrate(function(x) rho(((1 + theta^2)/s^2*x)^2)*dchisq(x, 1, mu^2/(1 + theta^2)), -Inf, Inf)$value }) ## Martin et al 1989 Section 3.2: for mu = 0 g.2 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) lctrl$tuning.psi <- lctrl$tuning.chi robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) g.2.MM <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) ## Henning (1994) eq 30, one parameter case g.3 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) int.x <- Vectorize(function(y) { integrate(function(x) rho((y - x*theta - mu)/s)*dnorm(x)*dnorm(y),-Inf, Inf)$value }) integrate(int.x,-Inf, Inf)$value }) inv.g1 <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2 else g.3 uniroot(function(s) g(s, theta, mu, ...) - value, c(0.1, 100))$root } inv.g1.MM <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2.MM else g.3.MM ret <- tryCatch(uniroot(function(s) g(s, theta, mu, ...) - value, c(0.01, 100)), error = function(e)e) if (inherits(ret, 'error')) { warning('inv.g1.MM: ', value, ' ', theta, ' ', mu,' -> Error: ', ret$message) NA } else { ret$root } } s.min <- function(epsilon, ...) inv.g1(0.5/(1 - epsilon), 0, 0, ...) s.max <- function(epsilon, ...) inv.g1((0.5-epsilon)/(1-epsilon), 0, 0, ...) BS <- Vectorize(function(epsilon, ...) { sqrt(s.max(epsilon, ...)/s.min(epsilon, ...)^2 - 1) }) l <- Vectorize(function(epsilon, ...) { sigma_be <- s.max(epsilon, ...) sqrt((sigma_be/inv.g1.MM(g.2.MM(sigma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1) }) u <- Vectorize(function(epsilon, ...) { gamma_be <- s.min(epsilon, ...) max(l(epsilon, ...), sqrt((gamma_be/inv.g1.MM(g.2.MM(gamma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1)) }) @ \begin{figure}[h!] \begin{center} <>= asymptMBFile <- file.path(robustDta, 'asymptotic.max.bias.Rdata') if (!file.exists(asymptMBFile)) { x <- seq(0, 0.35, length.out = 100) rmb <- rbind(data.frame(l=l(x, psi = 'hampel'), u=u(x, psi = 'hampel'), psi = 'Hampel'), data.frame(l=l(x, psi = 'lqq'), u=u(x, psi = 'lqq'), psi = 'lqq'), data.frame(l=l(x, psi = 'bisquare'), u=u(x, psi = 'bisquare'), psi = 'bisquare'), data.frame(l=l(x, psi = 'optimal'), u=u(x, psi = 'optimal'), psi = 'optimal')) rmb$x <- x save(rmb, file=asymptMBFile) } else load(asymptMBFile) <>= print(ggplot(rmb, aes(x, l, color=psi)) + geom_line() + geom_line(aes(x, u, color=psi), linetype = 2) + xlab(quote("amount of contamination" ~~ epsilon)) + ylab("maximum asymptotic bias bounds") + coord_cartesian(ylim = c(0,10)) + scale_y_continuous(breaks = 1:10) + scale_colour_hue(quote(psi ~ '-function'))) @ \end{center} \caption{Maximum asymptotic bias bound for the $\psi$-functions used in the simulation. Solid line: lower bound. Dashed line: upper bound.} \label{fig:max-asymptotic-bias} \end{figure} \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/vignettes/fastMcd-kmini.Rnw0000644000176200001440000003113213012615634017612 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{covMcd() -- Generalizing the FastMCD} %\VignetteDepends{robustbase} \SweaveOpts{prefix.string=mcd, eps=FALSE, pdf=TRUE, strip.white=true} \SweaveOpts{width=6, height=4.1} \usepackage{amsmath} \usepackage{amsfonts}% \mathbb \usepackage{mathtools}% -> \floor, \ceil \usepackage[utf8]{inputenc} %% The following is partly R's share/texmf/Rd.sty \usepackage{color} \usepackage{hyperref} \definecolor{Blue}{rgb}{0,0,0.8} \definecolor{Red}{rgb}{0.7,0,0} \hypersetup{% hyperindex,% colorlinks={true},% pagebackref,% linktocpage,% plainpages={false},% linkcolor={Blue},% citecolor={Blue},% urlcolor={Red},% pdfstartview={Fit},% pdfview={XYZ null null null}% } \usepackage{natbib} \usepackage[noae]{Sweave} %---------------------------------------------------- \DeclarePairedDelimiter{\ceil}{\lceil}{\rceil} \DeclarePairedDelimiter{\floor}{\lfloor}{\rfloor} \DeclareMathOperator{\sign}{sign} \newcommand{\abs}[1]{\left| #1 \right|} \newtheorem{definition}{Definition} \newcommand{\byDef}{\mathrm{by\ default}} \newcommand{\R}{{\normalfont\textsf{R}}{}} \newcommand{\code}[1]{\texttt{#1}} \newcommand*{\pkg}[1]{\texttt{#1}} \newcommand*{\CRANpkg}[1]{\href{https://CRAN.R-project.org/package=#1}{\pkg{#1}}} %---------------------------------------------------- \begin{document} \setkeys{Gin}{width=0.9\textwidth} \setlength{\abovecaptionskip}{-5pt} \title{covMcd() -- Considerations about Generalizing the FastMCD} \author{Martin M\"achler} \maketitle %\tableofcontents %% %% Pison, G., Van Aelst, S., and Willems, G. (2002) %% Small Sample Corrections for LTS and MCD. %% Metrika % ~/save/papers/robust-diverse/Pison_VanAelst_Willems.pdf %% <>= # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0))), width = 75) @ \section{Introduction} The context is robust multivariate ``location and scatter'' estimation, which corresponds to estimating the first two moments in cases they exist. We assume data and a model \begin{align} \label{eq:data-model} x_i & \in \mathbb{R}^p, \ \ i=1,2,\dots,n \\ x_i & \sim \mathcal{F}(\mu, \Sigma), \ \ \mathrm{i.i.d.};\ \ \mu \in \mathbb{R}^p, \ \ \Sigma \in \mathbb{R}^{p \times p}, \ \textrm{positive definite}, \end{align} where a conceptual null model is the $p$-dimensional normal distribution. One typical assumption is that $\mathcal{F}$ is a mixture with the majority component (``good data'') being $\mathcal{N}_p(\mu, \Sigma)$ and other components modeling ``the outliers''. In other words, we want estimates $\bigl(\hat{\mu}, \hat{\Sigma}\bigr)$ which should be close to the true ``good data'' $(\mu, \Sigma)$ --- and do not say more here. \section{MCD and ``the Fast'' MCD (= \textsc{fastmcd}) Algorithm} The \CRANpkg{robustbase} \R{} package has featured a function \code{covMcd()} since early on (Feb.~2006) and that has been an interface to the Fortran routine provided by the original authors and (partly) described in \citet{RouPvD99}. %% Rousseeuw, P. J. and van Driessen, K. (1999) %% A fast algorithm for the minimum covariance determinant estimator. %% Technometrics {41}, 212--223. %% >> ~/save/papers/robust-diverse/Rousseeuw_VanD-FastMCD_1999.pdf % ------------------------------------------------------------ We describe shortly how the algorithm works, partly building on the documentation provided in the source (R, S, and Fortran) codes: %% R CMD Rdconv --type=latex ../../man/covMcd.Rd > covMcd.tex The minimum covariance determinant estimator of location and scatter (MCD) implemented in \code{covMcd()} is similar to \R{} function \code{cov.mcd()} in \CRANpkg{MASS}. The (``theoretical'') MCD looks for the $h = h_\alpha (> 1/2)$ out of $n$ observations whose classical covariance matrix has the lowest possible determinant. In more detail, we will use $h = h_\alpha = h(\alpha,n,p) \approx \alpha \cdot (n+p+1)$, where as \citet{RouPvD99} mainly use (the default) $\alpha = \frac 1 2$, where $h = h(1/2, n, p) = \floor[\Big]{\frac{n+p+1}{2}}$. For general $\alpha \ge \frac 1 2$, the \R{} implementation (derived from their original S code) uses $h = h(\alpha,n,p) =$ \code{h.alpha.n(alpha,n,p)} (function in \pkg{robustbase}), which is \begin{eqnarray} \label{eq:def-h} h = h_\alpha = h(\alpha,n,p) := \floor{2 n_2 - n + 2 \alpha (n - n_2)}, \ \mathrm{where\ } n_2 := \floor[\Big]{\frac{n+p+1}{2}}% %= (n+p+1)/2 \ \ (\mathrm{\ where ``/'' denotes \emph{integer} division}) . \end{eqnarray} The fraction $\alpha \ge \frac 1 2$ can be chosen by the user, where $\alpha = \frac 1 2$ is the most robust, and indeed, $h_{1/2} = n_2 = \floor[\Big]{\frac{n+p+1}{2}}$. Even in general, as long as $n \gg p$, $\alpha$ is approximately the \emph{proportion} of the subsample size $h$ in the full sample (size $n$): \begin{equation} \label{eq:h.approx} h \approx \alpha \cdot n \iff \alpha \approx \frac{h}{n}, \end{equation} <>= require(robustbase) n <- c(5, 10, 20, 30, 50, 100, 200, 500) hmat <- function(alpha, p) cbind(n, h.alpha = h.alpha.n (alpha, n,p), h. = floor(alpha * (n + p + 1)), alpha.n = round(alpha * n)) hmat(alpha = 1/2, p = 3) hmat(alpha = 3/4, p = 4) @ The breakdown point (for $h > \frac{n}{2}$) then is \begin{eqnarray} \label{eq:breakdown} \epsilon_{*} = \frac{n-h+1}{n}, \end{eqnarray} which is less than but close to $\frac 1 2$ for $\alpha = \frac 1 2$, and in general, $h/n \approx \alpha$, the breakdown point is approximately, \begin{eqnarray} \label{eq:eps-approx} \epsilon_{*} = \frac{n-h+1}{n} \approx \frac{n-h}{n} = 1 - \frac{h}{n} \approx 1 - \alpha. \end{eqnarray} The raw MCD estimate of location, say $\hat{\mu}_0$, is then the average of these $h$ points, whereas the raw MCD estimate of scatter, $\hat{\Sigma}_0$, is their covariance matrix, multiplied by a consistency factor \code{.MCDcons(p, h/n)}) and (by default) a finite sample correction factor \code{.MCDcnp2(p, n, alpha)}, to make it consistent at the normal model and unbiased at small samples. %% Both rescaling factors (consistency and finite sample) are returned in the length-2 vector %% \code{raw.cnp2}. In practice, for reasonably sized $n$, $p$ and hence $h$, it is not feasible to search the full space of all $n \choose h$ $h$-subsets of $n$ observations. Rather, the implementation of \code{covMcd} uses the Fast MCD algorithm of \citet{RouPvD99} to approximate the minimum covariance determinant estimator, see Section~\ref{sec:fastMCD}. Based on these raw MCD estimates, $\bigl(\hat{\mu}_0, \hat{\Sigma}_0\bigr)$, % (unless argument \code{raw.only} is true), a reweighting step is performed, i.e., \code{V <- cov.wt(x,w)}, where \code{w} are weights determined by ``outlyingness'' with respect to the scaled raw MCD, using the ``Mahalanobis''-like, robust distances $d_i\bigl(\hat{\mu}_0, \hat{\Sigma}_0\bigr)$, see (\ref{eq:Maha}). Again, a consistency factor and %(if \code{use.correction} is true) a finite sample correction factor %(\code{.MCDcnp2.rew(p, n, alpha)}) are applied. The reweighted covariance is typically considerably more efficient than the raw one, see \citet{PisGvAW02}. The two rescaling factors for the reweighted estimates are returned in \code{cnp2}. Details for the computation of the finite sample correction factors can be found in \citet{PisGvAW02}. \section{Fast MCD Algorithm -- General notation}\label{sec:fastMCD} \paragraph{Note:} In the following, apart from the mathematical notation, we also use variable names, e.g., \code{kmini}, used in the Fortran and sometimes \R{} function code, in \R{} package \CRANpkg{robustbase}. Instead of directly searching for $h$-subsets (among ${n \choose h} \approx {n \choose n/2}$) the basic idea is to start with small subsets of size $p+1$, their center $\mu$ and covariance matrix $\Sigma$, and a corresponding $h$-subset of the $h$ observations with smallest (squared) (``Mahalanobis''-like) distances \begin{align} \label{eq:Maha} d_i = d_i(\mu,\Sigma) := (x_i - \mu)' \Sigma^{-1} (x_i - \mu), \ \ i=1,2,\dots,n, \end{align} and then use concentration steps (``C~steps'') to (locally) improve the chosen set by iteratively computing $\mu$, $\Sigma$, new distances $d_i$ and a new set of size $h$ with smallest distances $d_i(\mu,\Sigma)$. Each C~step is proven to decrease the determinant $\det(\Sigma)$ if $\mu$ and $\Sigma$ did change at all. Consequently, convergence to a local minimum is sure, as the number of $h$-subsets is finite. To make the algorithm \emph{fast} for non small sample size $n$ the data set is split into ``groups'' or ``sub-datasets'' as soon as \begin{eqnarray} \label{eq:nmini} n \ge 2 n_0, \ \mathrm{ where}\ \ n_0 := \mathtt{nmini} \ ( = 300, \byDef). \end{eqnarray} i.e., the default cutoff for ``non small'' is at $n = 600$. %% The \emph{number} of such subsets in the original algorithm is maximally 5, and we now use \begin{eqnarray} \label{eq:kmini} k_M = \code{kmini} \ (= 5, \byDef), \end{eqnarray} as upper limit. As above, we assume from now on that $n \ge 2 n_0$, and let \begin{eqnarray} \label{eq:k-def} k := \floor[\Big]{\frac{n}{n_0}} \ge 2 \end{eqnarray} and now distinguish the two cases, \begin{eqnarray} \label{eq:cases} \begin{cases} A. & k < k_M \iff n < k_M \cdot n_0 \\ B. & k \ge k_M \iff n \ge k_M \cdot n_0 \end{cases} \end{eqnarray} \begin{description} \item[In case A] $k$ (\code{= ngroup}) subsets aka ``groups'' or ``sub datasets'' are used, $k \in\{2,3,\dots,k_M-1\}$, of group sizes $n_j$, $j=1,\dots,k$ (see below). Note that case~A may be empty because of $2 \le k < k_M$, namely if $k_M=2$. Hence, in case~A, we have $k_M \ge 3$. \item[in case B] $k_M$ (\code{= ngroup}) groups each of size $n_0$ are built and in the first stage, only a \emph{subset} of $k_M \cdot n_0 \le n$ observations is used. \end{description} In both cases, the disjoint groups (``sub datasets'') are chosen at random from the $n$ observations. %% For the group sizes for case~A, $n_j$, $j=1,\dots,k$, we have \begin{align} n_1 = \; & \floor[\Big]{\frac n k} = \floor[\bigg]{\frac{n}{\floor[\big]{\frac{n}{n_0}}}} \ \ (\; \ge n_0 \label{eq:n1})\\ n_j = \; & n_1,\hspace*{2.8em} j = 2,\dots,j_* \\ n_j = \; & n_1 + 1, \ \ \ j = j_* +1,\dots,k, \label{n1-plus-1}\\ & \qquad \mathrm{where}\ \ j_* := k - r \ \in \{1,\dots,k\}, \label{jstar}\\ & \qquad \mathrm{and}\ \ r := n - k n_1 = \label{r-rest} n - k\floor[\big]{\frac n k} \in \{0,1,\dots,k-1\}, \end{align} where the range of $j_*$, $1,\dots,k$ in (\ref{jstar}) is a consequence of the range of the integer division remainder $r \in \{0,1,\dots,k-1\}$ in (\ref{r-rest}). Consequently, (\ref{n1-plus-1}) maybe empty, namely iff $r=0$ ($\iff n = k \cdot n_1$ is a multiple of $k$): $j_* = k$, and all $n_j \equiv n_1$. Considering the range of $n_j$ in case~A, the minimum $n_1 \ge n_0$ in (\ref{eq:n1}) is easy to verify. What is the maximal value of $n_j$ , i.e., an upper bound for $n_{\max} := n_1+1 \ge \max_j n_j$? \ %% %% This is all correct but not useful: %% From (\ref{eq:n1}), $ n/k - 1 < n_1 \le n/k $, and %% from (\ref{eq:k-def}), $n/n_0 - 1 < k \le n/n_0$. %% Putting these two together, we get %% \begin{eqnarray} %% \label{eq:n1-ineq} %% \frac{n^2}{n_0} - 1 \le n/k - 1 < n_1 \le n/k < \frac{n n_0}{n - n_0}, %% \end{eqnarray} %% (the first $\le$ from $\frac{1}{k} \ge \frac{n_0}{n}$; the last $<$ from %% $\frac{1}{k} < \frac 1{n/n_0 -1} = \frac{n_0}{n-n_0}$.) Also, %% from (\ref{eq:k-def}), $n \ge k n_0$ and $n-n_0 \ge (k-1)n_0$ and since we %% are in case~A, $n < n_0 k_M$, which combines to %% \begin{eqnarray} %% \label{eq:nn0} %% \frac{n n_0}{n - n_0} < \frac{(n_0 k_M) n_0}{(k-1)n_0} = \frac{n_0 k_M}{k-1}. %% \end{eqnarray} Consider $n_{1,\max}(k) = \max_{n, \mathrm{given\ } k} n_1 = \max_{n, \mathrm{given\ } k} \floor{\frac n k}$. Given $k$, the maximal $n$ still fulfilling $\floor[\big]{\frac{n}{n_0}} = k$ is $n = (k+1)n_0 - 1$ where $\floor[\big]{\frac{n}{n_0}} = k + \floor[\big]{1 - \frac{1}{n_0}} = k$. Hence, $n_{1,\max}(k) =\floor[\big]{\frac{(k+1)n_0 - 1}{k}} = n_0 + \floor[\big]{\frac{n_0 - 1}{k}}$, and as $k \ge 2$, the maximum is at $k=2$, $\max n_1 = \max_k n_{1,\max}(k) = n_0 + \floor[\big]{\frac{n_0 - 1}{2}} = \floor[\big]{\frac{3 n_0 - 1}{2}}$. Taken together, as $n_j = n_1+1$ is possible, we have \begin{align} \label{eq:nj-range} n_0 \le & n_1 \le \floor[\Big]{\frac{3 n_0 - 1}{2}} \nonumber\\ n_0 \le & n_j \le \floor[\Big]{\frac{3 n_0 + 1}{2}}, \ \ j \ge 2. \end{align} Note that indeed, $\floor[\big]{\frac{3 n_0 + 1}{2}}$ is the length of the auxiliary vector \code{subndex} in the Fortran code. \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/vignettes/psi_functions.Rnw0000644000176200001440000004152413012615634020015 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{Definitions of Psi-Functions Available in Robustbase} %\VignetteDepends{robustbase} \SweaveOpts{prefix.string=psi, eps=FALSE, pdf=TRUE, strip.white=true} \SweaveOpts{width=6, height=4.1, echo=FALSE, fig=TRUE} %% --------------------- !! \usepackage{amsmath} \usepackage{amsfonts}% \mathbb \usepackage{natbib} \usepackage[utf8]{inputenc} \newcommand{\abs}[1]{\left| #1 \right|} \DeclareMathOperator{\sign}{sign} \newcommand{\R}{\mathbb{R}} \newcommand{\code}[1]{\texttt{#1}} \newcommand*{\pkg}[1]{\texttt{#1}} \newtheorem{definition}{Definition} %% The following is R's share/texmf/Rd.sty \usepackage{color} \usepackage{hyperref} \definecolor{Blue}{rgb}{0,0,0.8} \definecolor{Red}{rgb}{0.7,0,0} \hypersetup{% hyperindex,% colorlinks={true},% pagebackref,% linktocpage,% plainpages={false},% linkcolor={Blue},% citecolor={Blue},% urlcolor={Red},% pdfstartview={Fit},% pdfview={XYZ null null null}% } <>= # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0)))) ## x axis for plots: x. <- seq(-5, 10, length=1501) require(robustbase) <>= source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) @% = ../inst/xtraR/plot-psiFun.R --> p.psiFun() --> robustbase:::matPlotPsi() {for nice legends; lines ..} \begin{document} \setkeys{Gin}{width=0.9\textwidth} \setlength{\abovecaptionskip}{-5pt} \title{Definitions of $\psi$-Functions Available in Robustbase} \author{Manuel Koller and Martin M\"achler} \maketitle \tableofcontents \section*{Preamble} Unless otherwise stated, the following definitions of functions are given by \citet[p. 31]{MarRMY06}, however our definitions differ sometimes slightly from theirs, as we prefer a different way of \emph{standardizing} the functions. To avoid confusion, we first define $\psi$- and $\rho$-functions. \begin{definition}\label{def.psi} A \emph{$\psi$-function} is a piecewise continuous function $\psi: \R \to \R$ such that \begin{enumerate} \item $\psi$ is odd, i.e., \ $\psi(-x) = -\psi(x) \: \forall x$, \item $\psi(x) \ge 0$ for $x \ge 0$, and $\psi(x) > 0$ for $0 < x < x_r := \sup\{\tilde x : \psi(\tilde x) > 0\}$ \ \ ($x_r > 0$, possibly $x_r = \infty$). \item[3*] Its slope is $1$ at $0$, i.e., $\displaystyle \psi'(0) = 1$. \end{enumerate} Note that `3*' is not strictly required mathematically, but we use it for standardization in those cases where $\psi$ is continuous at 0. Then, it also follows (from 1.) that $\psi(0) = 0$, and we require $\psi(0)=0$ also for the case where $\psi$ is discontinuous in 0, as it is, e.g., for the M-estimator defining the median. \end{definition} \begin{definition} A \emph{$\rho$-function} can be represented by the following % definite integral of a $\psi$-function, \begin{equation}\label{def.rho} \rho(x) = \int_0^x \psi(u) du\;, \end{equation} which entails that $\rho(0) = 0$ and $\rho$ is an even function. \end{definition} A $\psi$-function is called \emph{redescending} if $\psi(x) = 0$ for all $x \ge x_r$ for $x_r < \infty$, and $x_r$ is often called \emph{rejection point}. Corresponding to a redescending $\psi$-function, we define the function $\tilde\rho$, a version of $\rho$ standardized such as to attain maximum value one. Formally, \begin{equation} \label{eq:tilde-rho} \tilde\rho(x) = \rho(x)/\rho(\infty). \end{equation} Note that $\rho(\infty) = \rho(x_r) \equiv \rho(x) \ \forall \abs{x} >= x_r$. $\tilde\rho$ is a $\rho$-function as defined in \citet{MarRMY06} and has been called $\chi$ function in other contexts. For example, in package \pkg{robustbase}, \code{Mchi(x, *)} computes $\tilde\rho(x)$, whereas \code{Mpsi(x, *, deriv=-1)} (``(-1)-st derivative'' is the primitive or antiderivative)) computes $\rho(x)$, both according to the above definitions. \textbf{Note:} An alternative slightly more general definition of \emph{redescending} would only require $\rho(\infty) := \lim_{x\to\infty}\rho(x)$ to be finite. E.g., \texttt{"Welsh"} does \emph{not} have a finite rejection point, but \emph{does} have bounded $\rho$, and hence well defined $\rho(\infty)$, and we \emph{can} use it in \texttt{lmrob()}.\footnote{E-mail Oct.~18, 2014 to Manuel and Werner, proposing to change the definition of ``redescending''.} %% \section{Weak Redescenders} %% \subsection{t_nu score functions} %% t_1 (=Cauchy) has been propagated as "Lorentzian merit function" %% regression for outlier detection \paragraph{Weakly redescending $\psi$ functions.}\ Note that the above definition does require a finite rejection point $x_r$. Consequently, e.g., the score function $s(x) = -f'(x)/f(x)$ for the Cauchy ($= t_1$) distribution, which is $s(x) = 2x/(1+x^2)$ and hence non-monotone and ``re descends'' to 0 for $x\to \pm\infty$, and $\psi_C(x) := s(x)/2$ also fulfills ${\psi_C}'(0) = 1$, but it has $x_r=\infty$ and hence $\psi_C()$ is \emph{not} a redescending $\psi$-function in our sense. As they appear e.g. in the MLE for $t_\nu$, we call $\psi$-functions fulfulling $\lim_{x\to\infty}\psi(x) = 0$ \emph{weakly redescending}. Note that they'd naturally fall into two sub categories, namely the one with a \emph{finite} $\rho$-limit, i.e. $\rho(\infty) := \lim_{x\to\infty}\rho(x)$, and those, as e.g., the $t_\nu$ score functions above, for which $\rho(x)$ is unbounded even though $\rho' = \psi$ tends to zero. %% --> ../../TODO section 'Psi/Rho/Chi/Wgt Functions' %% ~~~~~~~~~~ %% %% FIXME: where?? MM: can no longer find it in Hampel et al(1986) \citet{hamfrrs86}. %% FIXME: 0) Mention our psi_func class // and the C interface for "the other" functions %% ----- i.e., we currently have *both* and in addition there is all %% the (to be *deprecated* !) ../R/biweight-funs.R (& ../man/tukeyChi.Rd & ../man/tukeyPsi1.Rd) %% %% FIXME: 1) explain plot() {the plot method of psi_func} %% FIXME: 2) Show how to compute asymptotic efficiency and breakdown point: %% ------- %% a) end of ../../tests/psi-rho-etc.R has aeff.P() and bp.P() and chkP() %% which now uses the psi_func class to compute these *analytically* %% b) Of course, Manuel had used the numeric integration only, %% in ../../R/lmrob.MM.R, lmrob.efficiency(psi, cc, ...) and lmrob.bp(psi, cc, ...) %% ~~~~~~~~~~~~~~~~~~ %% c) *REALLY* nice general solution is via PhiI() in ../../R/psi-rho-funs.R %% for all piecewise polynomial psi()/rho() ~~~~~~~~~~~~~~~~~~~~~~ %%\clearpage \section{Monotone $\psi$-Functions} Montone $\psi$-functions lead to convex $\rho$-functions such that the corresponding M-estimators are defined uniquely. Historically, the ``Huber function'' has been the first $\psi$-function, proposed by Peter Huber in \citet{HubP64}. \clearpage \subsection{Huber} The family of Huber functions is defined as, \begin{align*} \rho_k(x) = {}& \left\{ \begin{array}{ll} \frac{1}{2} x^2 & \mbox{ if } \abs{x} \leq k \\ k(\abs{x} - \frac{k}{2})& \mbox{ if } \abs{x} > k \end{array} \right. \;,\\ \psi_k(x) = {} & \left\{ \begin{array}{ll} x & \mbox{ if } \abs{x} \leq k \\ k \ \sign(x)& \mbox{ if } \abs{x} > k %% -k & \mbox{ if } x < -k \\ %% k & \mbox{ if } x > k \end{array} \right. \;. \end{align*} The constant $k$ for $95\%$ efficiency of the regression estimator is $1.345$. \begin{figure}[h] \centering <>= plot(huberPsi, x., ylim=c(-1.4, 5), leg.loc="topright", main=FALSE) @ \caption{Huber family of functions using tuning parameter $k = 1.345$.} \end{figure} \bigskip \section{Redescenders} For the MM-estimators and their generalizations available via \texttt{lmrob()} (and for some methods of \texttt{nlrob()}), the $\psi$-functions are all redescending, i.e., with finite ``rejection point'' $x_r = \sup\{t; \psi(t) > 0\} < \infty$. From \texttt{lmrob}, the psi functions are available via \texttt{lmrob.control}, or more directly, \texttt{.Mpsi.tuning.defaults}, <>= names(.Mpsi.tuning.defaults) @ %$ and their $\psi$, $\rho$, $\psi'$, and weight function $w(x) := \psi(x)/x$, are all computed efficiently via C code, and are defined and visualized in the following subsections. \clearpage \subsection{Bisquare} Tukey's bisquare (aka ``biweight'') family of functions is defined as, \begin{equation*} \tilde\rho_k(x) = \left\{ \begin{array}{cl} 1 - \bigl(1 - (x/k)^2 \bigr)^3 & \mbox{ if } \abs{x} \leq k \\ 1 & \mbox{ if } \abs{x} > k \end{array} \right.\;, \end{equation*} with derivative ${\tilde\rho_k}'(x) = 6\psi_k(x) / k^2$ where, \begin{equation*} \psi_k(x) = x \left( 1 - \left(\frac{x}{k}\right)^2\right)^2 \cdot I_{\{\abs{x} \leq k\}}\;. \end{equation*} The constant $k$ for $95\%$ efficiency of the regression estimator is $4.685$ and the constant for a breakdown point of $0.5$ of the S-estimator is $1.548$. Note that the \emph{exact} default tuning constants for M- and MM- estimation in \pkg{robustbase} are available via \code{.Mpsi.tuning.default()} and \code{.Mchi.tuning.default()}, respectively, e.g., here, % \begin{small} <>= print(c(k.M = .Mpsi.tuning.default("bisquare"), k.S = .Mchi.tuning.default("bisquare")), digits = 10) @ % \end{small} and that the \code{p.psiFun(.)} utility is available via %\begin{small} <>= <> @ %\end{small} %\enlargethispage{3ex} \begin{figure}[h] \centering <>= p.psiFun(x., "biweight", par = 4.685) @ \caption{Bisquare family functions using tuning parameter $k = 4.685$.} \end{figure} \clearpage \subsection{Hampel} The Hampel family of functions \citep{hamfrrs86} is defined as, \begin{align*} \tilde\rho_{a, b, r}(x) ={}& \left\{ \begin{array}{ll} \frac{1}{2} x^2 / C & \abs{x} \leq a \\ \left( \frac{1}{2}a^2 + a(\abs{x}-a)\right) / C & a < \abs{x} \leq b \\ \frac{a}{2}\left( 2b - a + (\abs{x} - b) \left(1 + \frac{r - \abs{x}}{r-b}\right) \right) / C & b < \abs{x} \leq r \\ 1 & r < \abs{x} \end{array} \right. \;, \\ \psi_{a, b, r}(x) ={}& \left\{ \begin{array}{ll} x & \abs{x} \leq a \\ a \ \sign(x) & a < \abs{x} \leq b \\ a \ \sign(x) \frac{r - \abs{x}}{r - b}& b < \abs{x} \leq r \\ 0 & r < \abs{x} \end{array} \right.\;, \end{align*} where $ C := \rho(\infty) = \rho(r) = \frac{a}{2}\left( 2b - a + (r - b) \right) = \frac{a}{2}(b-a + r)$. As per our standardization, $\psi$ has slope $1$ in the center. The slope of the redescending part ($x\in[b,r]$) is $-a/(r-b)$. If it is set to $-\frac 1 2$, as recommended sometimes, one has \begin{equation*} r = 2a + b\;. \end{equation*} Here however, we restrict ourselves to $a = 1.5 k$, $b = 3.5 k$, and $r = 8k$, hence a redescending slope of $-\frac 1 3$, and vary $k$ to get the desired efficiency or breakdown point. The constant $k$ for $95\%$ efficiency of the regression estimator is $0.902$ (0.9016085, to be exact) and the one for a breakdown point of $0.5$ of the S-estimator is $0.212$ (i.e., 0.2119163). %% --> ../R/lmrob.MM.R, .Mpsi.tuning.defaults .Mchi.tuning.defaults \begin{figure}[h] \centering <>= ## see also hampelPsi p.psiFun(x., "Hampel", par = ## Default, but rounded: round(c(1.5, 3.5, 8) * 0.9016085, 1)) @ \caption{Hampel family of functions using tuning parameters $0.902 \cdot (1.5, 3.5, 8)$.} \end{figure} \clearpage \subsection{GGW}\label{ssec:ggw} The Generalized Gauss-Weight function, or \emph{ggw} for short, is a generalization of the Welsh $\psi$-function (subsection \ref{ssec:Welsh}). In \citet{ks2011} it is defined as, \begin{equation*} %% \label{eq:ggw} \psi_{a, b, c}(x) = \left\{ \begin{array}{ll} x & \abs{x} \leq c \\ \exp\left(-\frac{1}{2}\frac{(\abs{x} - c)^b}{a}\right)x & \abs{x} > c \end{array} \right. \;. \end{equation*} Our constants, fixing $b=1.5$, and minimial slope at $- \frac 1 2$, for $95\%$ efficiency of the regression estimator are $a = 1.387$, $b = 1.5$ and $c = 1.063$, and those for a breakdown point of $0.5$ of the S-estimator are $a = 0.204$, $b = 1.5$ and $c = 0.296$: <>= cT <- rbind(cc1 = .psi.ggw.findc(ms = -0.5, b = 1.5, eff = 0.95 ), cc2 = .psi.ggw.findc(ms = -0.5, b = 1.5, bp = 0.50)); cT @ Note that above, \code{cc*[1]}$= 0$, \code{cc*[5]}$ = \rho(\infty)$, and \code{cc*[2:4]}$ = (a, b, c)$. To get this from $(a,b,c)$, you could use <>= ipsi.ggw <- .psi2ipsi("GGW") # = 5 ccc <- c(0, cT[1, 2:4], 1) integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi=ipsi.ggw)$value # = rho(Inf) @ \begin{figure}[h] \centering <>= p.psiFun(x., "GGW", par = c(-.5, 1, .95, NA)) @ \caption{GGW family of functions using tuning parameters $a=1.387$, $b=1.5$ and $c=1.063$.} \end{figure} \clearpage \subsection{LQQ} The ``linear quadratic quadratic'' $\psi$-function, or \emph{lqq} for short, was proposed by \citet{ks2011}. It is defined as, \begin{equation*} \psi_{b,c,s}(x) = \left\{ \begin{array}{ll} x & \abs{x} \leq c \\ \sign(x)\left(\abs{x} - \frac{s}{2b}\left(\abs{x} - c\right)^2 \right) & c < \abs{x} \leq b + c \\ \sign(x)\left(c+b-\frac{bs}{2} + \frac{s-1}{a} \left(\frac{1}{2}\tilde x^2 - a\tilde x\right) \right) & b + c < \abs{x} \leq a + b + c \\ 0 & \mbox{otherwise,} \end{array} \right. \end{equation*} where \begin{equation} \tilde x := \abs{x} - b - c \ \ \mathrm{and}\ \ a := (2c + 2b - bs)/(s-1).\label{lqq.a} \end{equation} The parameter $c$ determines the width of the central identity part. The sharpness of the bend is adjusted by $b$ while the maximal rate of descent is controlled by $s$ ($s = 1 - \min_x\psi'(x) > 1$). From (\ref{lqq.a}), the length $a$ of the final descent to $0$ is a function of $b$, $c$ and $s$. <>= cT <- rbind(cc1 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=0.95, bp=NA ), cc2 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=NA , bp=0.50)) colnames(cT) <- c("b", "c", "s"); cT @ If the minimal slope is set to $-\frac 1 2$, i.e., $s = 1.5$, and $b/c = 3/2 = 1.5$, the constants for $95\%$ efficiency of the regression estimator are $b=1.473$, $c=0.982$ and $s=1.5$, and those for a breakdown point of $0.5$ of the S-estimator are $b=0.402$, $c=0.268$ and $s=1.5$. \begin{figure}[h] \centering <>= p.psiFun(x., "LQQ", par = c(-.5,1.5,.95,NA)) @ \caption{LQQ family of functions using tuning parameters $b=1.473$, $c=0.982$ and $s=1.5$.} \end{figure} \clearpage \subsection{Optimal} The optimal $\psi$ function as given by \citet[Section~5.9.1]{MarRMY06}, \begin{equation*} \psi_c(x) = \sign(x)\left(-\frac{\varphi'(\abs{x}) + c} {\varphi(\abs{x})}\right)_+\;, \end{equation*} where $\varphi$ is the standard normal density, $c$ is a constant and $t_+ := \max(t, 0)$ denotes the positive part of $t$. Note that the \pkg{robustbase} implementation uses rational approximations originating from the \pkg{robust} package's implementation. That approximation also avoids an anomaly for small $x$ and has a very different meaning of $c$. The constant for $95\%$ efficiency of the regression estimator is $1.060$ and the constant for a breakdown point of $0.5$ of the S-estimator is $0.405$. \begin{figure}[h] \centering <>= p.psiFun(x., "optimal", par = 1.06, leg.loc="bottomright") @ \caption{`Optimal' family of functions using tuning parameter $c = 1.06$.} \end{figure} \clearpage \subsection{Welsh}\label{ssec:Welsh} The Welsh $\psi$ function is defined as, %% FIXME: REFERENCE MISSING %\def\xk{\frac{x}{k}} \def\xk{x/k} %\def\xkdt{-\frac{1}{2}\left(\xk\right)^2} \def\xkdt{- \left(\xk\right)^2 / 2} \begin{align*} \tilde\rho_k(x) ={}& 1 - \exp\bigl(\xkdt\bigr) \\ \psi_k(x) ={}& k^2\tilde\rho'_k(x) = x\exp\bigl(\xkdt\bigr) \\ \psi'_k(x) ={}& \bigl(1 - \bigl(\xk\bigr)^2\bigr) \exp\bigl(\xkdt\bigr) \end{align*} The constant $k$ for $95\%$ efficiency of the regression estimator is $2.11$ and the constant for a breakdown point of $0.5$ of the S-estimator is $0.577$. Note that GGW (subsection \ref{ssec:ggw}) is a 3-parameter generalization of Welsh, matching for $ b = 2 $, $ c = 0 $, and $ a = k^2$ (see R code there): <>= ccc <- c(0, a = 2.11^2, b = 2, c = 0, 1) (ccc[5] <- integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi = 5)$value) # = rho(Inf) stopifnot(all.equal(Mpsi(x., ccc, "GGW"), ## psi[ GGW ](x; a=k^2, b=2, c=0) == Mpsi(x., 2.11, "Welsh")))## psi[Welsh](x; k) @ \begin{figure}[h] \centering <>= p.psiFun(x., "Welsh", par = 2.11) @ \caption{Welsh family of functions using tuning parameter $k = 2.11$.} \end{figure} \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/vignettes/robustbase.bib0000644000176200001440000001361012737467727017311 0ustar liggesusers@PREAMBLE{ " " # "\providecommand{\noop}[1]{} " # " \ifx\undefined\SfSbibINI\else\SfSbibINI\fi" } @PREAMBLE{ " " # "\providecommand{\noop}[1]{} " # " \ifx\undefined\SfSbibINI\else\SfSbibINI\fi" } @Manual{robustbase-pkg, title = {robustbase: Basic Robust Statistics}, author = {Martin Maechler and Valentin Todorov and Andreas Ruckstuhl and Matias Salibian-Barrera and Manuel Koller and Eduardo L. T. Conceicao}, year = 2016, note = {R package version 0.92-6}, url = {http://CRAN.R-project.org/package=robustbase}, url = {http://robustbase.r-forge.r-project.org/}, } @article{RouPvD99, author = {Peter J. Rousseeuw and Katrien van~Driessen}, title = {A Fast Algorithm for the Minimum Covariance Determinant Estimator}, year = 1999, journal = {Technometrics}, volume = 41, pages = {212--223}, number = 3, month = aug, issn = {0040-1706}, numpages = 12, url = {http://dx.doi.org/10.2307/1270566}, doi = {10.2307/1270566}, publisher = {American Society for Quality Control and American Statistical Association}, address = {Alexandria, Va, USA}, keywords = {breakdown value, multivariate location and scatter, outlier detection, regression, robust estimation}, } @article{PisGvAW02, author = {Pison, G. and Van Aelst, S. and Willems, G.}, year = 2002, title = {Small sample corrections for LTS and MCD}, journal = {Metrika}, volume = 55, pages = {111--123}, number = {1-2}, doi = {10.1007/s001840200191}, issn = {0026-1335}, url = {http://dx.doi.org/10.1007/s001840200191}, publisher = {Springer-Verlag Berlin Heidelberg}, keywords = {Key words: Robustness; Least Trimmed Squares estimator; Minimum Covariance Determinant estimator; Bias}, language = {English} } %% ------ was ./lmrob_simulation.bib ------------------------ %% ~~~~~~~~~~~~~~~~~~~~ @Article{berrendero2007maximum, title = {{On the maximum bias functions of MM-estimates and constrained M-estimates of regression}}, author = {Berrendero, J.R. and Mendes, B.V.M. and Tyler, D.E.}, journal = {Annals of statistics}, volume = 35, number = 1, pages = 13, year = 2007, publisher = {IMS INSTITUTE OF MATHEMATICAL STATISTICS} } @TechReport{croux03, author = {Croux, C. and Dhaene, G. and Hoorelbeke, D.}, title = {Robust standard errors for robust estimators}, institution = {Dept. of Applied Economics, K.U. Leuven}, year = 2003 } @Article{fernandez1998bayesian, title = {On Bayesian Modeling of Fat Tails and Skewness}, author = {Fern{\'a}ndez, C. and Steel, M.F.J.}, journal = {Journal of the American Statistical Association}, volume = 93, number = 441, pages = {359--371}, year = 1998, publisher = {American Statistical Association} } @article{HubP64, author = {Peter J. Huber}, title = "Robust estimation of a location parameter", year = 1964, journal = {Ann. Math. Statist.}, volume = 35, pages = {73--101}, } @Book{hubpr09, author = {Peter J. Huber and Elvezio M. Ronchetti}, title = {Robust Statistics, Second Edition}, publisher = {Wiley and Sons Inc.}, address = {NY}, year = 2009 } @Article{ks2011, title = "Sharpening Wald-type inference in robust regression for small samples", journal = "Computational Statistics \& Data Analysis", volume = 55, number = 8, pages = "2504--2515", year = 2011, issn = "0167-9473", doi = "DOI: 10.1016/j.csda.2011.02.014", url = "http://www.sciencedirect.com/science/article/pii/S0167947311000739", author = "Manuel Koller and Werner A. Stahel", keywords = "MM-estimator", keywords1 = "Robust regression", keywords2 = "Robust inference" } @Misc{ks2014, author = "Manuel Koller and Werner A. Stahel", year = 2014, title = {Nonsingular subsampling for regression {S}~estimators with categorical predictors}, journal = {under review (2012 version from arXiv)}, keywords = "MM-estimator", keywords1 = "Robust regression", } @Comment url = {http://arxiv.org/abs/1208.5595}, @Misc{kolm2012, author = {Manuel Koller}, year = 2012, title = {Nonsingular subsampling for S-estimators with categorical predictors}, url = {http://arxiv.org/abs/1208.5595}, keywords = "MM-estimator", keywords1 = "Robust regression", } @Article{maronna2009correcting, title = {Correcting {MM} estimates for "fat" data sets}, volume = 54, number = 12, issn = "0167-9473", risfield_0_m3 = "doi: DOI: 10.1016/j.csda.2009.09.015", url = "http://www.sciencedirect.com/science/article/B6V8V-4X6VMB1-5/2/3a9a08575ea5e5e69ad06d720c627ec9" , author = "Ricardo A. Maronna and Victor J. Yohai", journal = "Computational Statistics \& Data Analysis", pages = "3168--3173", year = 2010 } @Book{MarRMY06, author = {Ricardo A. Maronna and R. Douglas Martin and Victor J. Yohai}, title = {Robust Statistics, Theory and Methods}, year = 2006, publisher = {John Wiley \& Sons, Ltd}, pages = 408, ISBN = {0-470-01092-4}, series = {Wiley Series in Probility and Statistics}, } @Book{hamfrrs86, author = {Frank Hampel and Elvezio Ronchetti and Peter Rousseeuw and Werner Stahel}, title = {Robust Statistics: The Approach Based on Influence Functions}, year = 1986, address = {N.Y.}, publisher = {Wiley} } @Manual{R-Lang, title = {R: A Language and Environment for Statistical Computing}, author = {{R Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2016}, url = {https://www.R-project.org/}, } @Article{RenOVicM2010, author = {Olivier Renaud and Maria-Pia Victoria-Feser}, title = {A robust coefficient of determination for regression}, journal = {Journal of Statistical Planning and Inference}, year = 2010, volume = 140, pages = {1852--1862}, annote = {robust $R^2$} } robustbase/R/0000755000176200001440000000000014555212560012631 5ustar liggesusersrobustbase/R/lmrob.M.S.R0000644000176200001440000001646314440373076014500 0ustar liggesuserslmrob.lar <- function(x, y, control = lmrob.control(), ...) { ## LAR : Least Absolute Residuals -- i.e. L_1 M-estimate ## this function is identical to lmRob.lar of the robust package ## '...': to be called as 'init(**, mf)' from lmrob() x <- as.matrix(x) p <- ncol(x) n <- nrow(x) stopifnot(p > 0, n >= p, length(y) == n, is.numeric(control$rel.tol)) storage.mode(x) <- "double" storage.mode(y) <- "double" bet0 <- 0.773372647623 ## bet0 = pnorm(0.75); only for normalizing scale=SIGMA tmpn <- double(n) tmpp <- double(p) z1 <- .Fortran(rllarsbi, ##-> ../src/rllarsbi.f x, y, as.integer(n), as.integer(p), as.integer(n), as.integer(n), as.double(control$rel.tol), NIT=integer(1), K=integer(1), KODE=integer(1), SIGMA=double(1), THETA=tmpn, RS=tmpn, SC1=tmpn, SC2=tmpp, SC3=tmpp, SC4=tmpp, BET0=as.double(bet0))[c("THETA","SIGMA","RS","NIT","KODE")] if (z1[5] > 1) stop("calculations stopped prematurely in rllarsbi\n", "(probably because of rounding errors).") names(z1) <- c("coefficients", "scale", "residuals", "iter", "status") ## c("THETA", "SIGMA", "RS", "NIT", "KODE") z1$converged <- TRUE length(z1$coefficients) <- p z1 } splitFrame <- function(mf, x = model.matrix(mt, mf), type = c("f","fi", "fii")) { mt <- attr(mf, "terms") type <- match.arg(type) x <- as.matrix(x) p <- ncol(x) ## --- split categorical and interactions of categorical vars. ## from continuous variables factors <- attr(mt, "factors") factor.idx <- attr(mt, "dataClasses") %in% c("factor", "character") if (!any(factor.idx)) ## There are no factors return(list(x1.idx = rep.int(FALSE, p), x1=matrix(NA_real_,nrow(x),0L), x2=x)) switch(type, ## --- include interactions cat * cont in x1: fi = { factor.asgn <- which(factor.idx %*% factors > 0) }, ## --- include also continuous variables that interact with factors in x1: ## make sure to include interactions of continuous variables ## interacting with categorical variables, too fii = { factor.asgn <- numeric(0) factors.cat <- factors factors.cat[factors.cat > 0] <- 1L ## fix triple+ interactions factors.cat[, factor.idx %*% factors == 0] <- 0L for (i in 1:ncol(factors)) { comp <- factors[,i] > 0 ## if any of the components is a factor: include in x1 and continue if (any(factor.idx[comp])) { factor.asgn <- c(factor.asgn, i) } else { ## if there is an interaction of this term with a categorical var. tmp <- colSums(factors.cat[comp,,drop=FALSE]) >= sum(comp) if (any(tmp)) { ## if no other continuous variables are involved ## include in x1 and continue ## if (identical(factors[!comp, tmp], factors.cat[!comp, tmp])) if (!all(colSums(factors[!factor.idx & !comp, tmp, drop=FALSE]) > 0)) factor.asgn <- c(factor.asgn, i) } } } }, ## --- do not include interactions cat * cont in x1: f = { factor.asgn <- which(factor.idx %*% factors & !(!factor.idx) %*% factors) }, stop("unknown split type")) x1.idx <- attr(x, "assign") %in% c(0, factor.asgn) ## also include intercept names(x1.idx) <- colnames(x) ## x1: factors and (depending on type) interactions of / with factors ## x2: continuous variables list(x1 = x[, x1.idx, drop=FALSE], x1.idx = x1.idx, x2 = x[, !x1.idx, drop=FALSE]) } ##' Compute M-S-estimator for linear regression ---> ../man/lmrob.M.S.Rd lmrob.M.S <- function(x, y, control, mf, split = splitFrame(mf, x, control$split.type)) { if (ncol(split$x1) == 0) { warning("No categorical variables found in model. Reverting to S-estimator.") return(lmrob.S(x, y, control)) } if (ncol(split$x2) == 0) { warning("No continuous variables found in model. Reverting to L1-estimator.") return(lmrob.lar(x, y, control)) } ## this is the same as in lmrob.S(): if (length(seed <- control$seed) > 0) { # not by default if(length(seed) < 3L || seed[1L] < 100L) stop("invalid 'seed'. Must be compatible with .Random.seed !") if(!is.null(seed.keep <- get0(".Random.seed", envir = .GlobalEnv, inherits = FALSE))) on.exit(assign(".Random.seed", seed.keep, envir = .GlobalEnv)) assign(".Random.seed", seed, envir = .GlobalEnv) } x1 <- split$x1 x2 <- split$x2 storage.mode(x1) <- "double" storage.mode(x2) <- "double" storage.mode(y) <- "double" c.chi <- .psi.conv.cc(control$psi, control$tuning.chi) traceLev <- as.integer(control$trace.lev) z <- .C(R_lmrob_M_S, ## NB: If you change this, adapt ../inst/xtraR/m-s_fns.R x1, x2, y, res = double(length(y)), n = length(y), p1 = ncol(x1), p2 = ncol(x2), nResample = as.integer(control$nResample), max_it_scale= as.integer(control$maxit.scale), scale = double(1), b1 = double(ncol(x1)), b2 = double(ncol(x2)), tuning_chi = as.double(c.chi), ipsi = .psi2ipsi(control$psi), bb = as.double(control$bb), K_m_s = as.integer(control$k.m_s), max_k = as.integer(control$k.max), rel_tol = as.double(control$rel.tol), inv_tol = as.double(control$solve.tol), scale_tol = as.double(control$scale.tol), zero.tol = as.double(control$zero.tol), converged = logical(1), trace_lev = traceLev, ## well, these 3 are for the experts ... still why not arguments? orthogonalize=TRUE, subsample=TRUE, descent=TRUE, mts = as.integer(control$mts), ss = .convSs(control$subsampling) )[c("b1","b2", "res","scale", "converged")] conv <- z$converged ## FIXME? warning in any case if 'conv' is not ok ?? if(!conv && traceLev) warning("M-S estimator did *not* converge") ## coefficients : idx <- split$x1.idx cf <- numeric(length(idx)) cf[ idx] <- z$b1 cf[!idx] <- z$b2 ## set method argument in control control$method <- 'M-S' obj <- list(coefficients = cf, scale = z$scale, residuals = z$res, rweights = lmrob.rweights(z$res, z$scale, control$tuning.chi, control$psi), ## ../src/lmrob.c : m_s_descent() notes that convergence is *not* guaranteed converged = TRUE, descent.conv = conv, # the real truth .. control = control) if (control$method %in% control$compute.outlier.stats) obj$ostats <- outlierStats(obj, x, control) obj } robustbase/R/biweight-funs.R0000644000176200001440000000446012420464132015524 0ustar liggesusers#### These Chi() and Psi() used to be called by lmrob() functions #### but no longer --> Have interface via .psi2ipsi() and .C(..) #### FIXME: integrate these with the psi-rho objects --> ./psi-rho-funs.R ## In the vignette ../vignettes/psi_functions.Rnw, we call this ## scaled \rho "\tilde{\rho}" ##- Maronna et al (2006) define their rho to be standardized ##- (only if possible <==> only if redescending psi !) ##- {TODO: *Where* in the Hampel_et_al book ??? } ## Hampel et al (1986): \chi(x) := \rho(x) / \rho(\infty) ## ====== ## <==> chi() is a scaled version of rho(.) such that ## \chi(\infty) = \max_x \chi(x) = 1 ## ==> Chi'() is just a scaled version of psi() : ## with current scale (new for psi()): ## i) Chi'(x, c) == (6/c^2) Psi(x,c) ## ==> ii) Chi''(x,c) == (6/c^2) Psi'(x,c) ## and Chi (x, c) == (6/c^2) Rho(x,c), where Psi(.) = Rho'(.) tukeyChi <- function(x, cc, deriv = 0) { .Deprecated("Mchi") x <- x / cc x2 <- x*x out <- x2 > 1 switch(deriv + 1, { ## deriv = 0 r <- x2*(3 + x2*(-3 + x2)) r[out] <- 1 }, { ## deriv = 1 r <- 6/cc * x * (1-x2)^2 r[out] <- 0 }, { ## deriv = 2 r <- 6/(cc^2) * (1 - x2) * (1 - 5*x2) r[out] <- 0 }, stop("deriv must be in {0,1,2}")) r } ## we call this '*Psi1' such as to not be confounded with ## the (future!) S4 object tukeyPsi() ! tukeyPsi1 <- function(x, cc, deriv = 0) { .Deprecated("Mpsi") ## This version of psi() is scaled such that psi'(0) = 1 x2 <- (x / cc)^2 if(deriv < 0) out <- x2 > 1 else in. <- x2 < 1 switch(deriv + 2, { ## deriv = -1 c. <- cc^2/6 r <- c.*(1 - (1- x2)^3) r[out] <- c. r }, { ## deriv = 0 in. * x * (1-x2)^2 }, { ## deriv = 1 in. * (1 - x2) * (1 - 5*x2) }, { ## deriv = 2 in. * 4*x/cc^2 * (5*x2 - 3) }, stop("deriv must be in {-1,0,1,2}")) } if(FALSE) tukeyPsi1Ex <- function (x, cc, deriv = 0) ## tukeyPsi1Ex <- function (x, cc = 4.685, deriv = 0) ## ^^^^^^^^^ { ## This version of psi() is scaled such that psi'(0) = 1 u <- pmin((x/cc)^2, 1) if(deriv < 0) return((1 - (1-u)^3)*cc^2/6) if(deriv == 0) return(x * (1 - u)^2) return((1 - u) * (1 - 5 * u)) } robustbase/R/adjoutlyingness.R0000644000176200001440000002147514052244671016207 0ustar liggesusers#### -*- mode: R; kept-new-versions: 30; kept-old-versions: 20 -*- #### MC-adjusted Outlyingness #### ------------------------ ### ### Original code from the web site from the Antwerpen Statistics Group : ### http://www.agoras.ua.ac.be/Robustn.htm ### which has a "MC" section and for the software links to ### ftp://ftp.win.ua.ac.be/pub/software/agoras/newfiles/mc.tar.gz ### and that contains mcrsoft/adjoutlyingness.R ##_NEW_ (2014): moved from Antwerpen to Leuwen, ## ===> http://wis.kuleuven.be/stat/robust/software ## has several links to 'robustbase', and S-plus code ## http://wis.kuleuven.be/stat/robust/Programs/adjusted-boxplot/adjusted-boxplot.ssc ## (copy in ../misc/Adjusted-boxplot.ssc ## MM [ FIXME ]: ## ----------- ## 1) Use *transposed* B[] and A[] (now called 'E') matrices -- DONE ## 2) use IQR() instead of quantile(., .75) - quantile(., .25) ##--> but only *after* testing original code ## ^^^^^^^^^^^^^^^^^^^^^^^^ adjOutlyingness <- function(x, ndir = 250, p.samp = p, clower=4, cupper=3, IQRtype = 7, alpha.cutoff = 0.75, coef = 1.5, qr.tol = 1e-12, keep.tol = 1e-12, only.outlyingness = FALSE, maxit.mult = max(100, p), trace.lev = 0, ## these are all passed to mc() {when applied to the projected data} mcReflect = n <= 100, mcScale = TRUE, mcMaxit = 2*maxit.mult, mcEps1 = 1e-12, mcEps2 = 1e-15, mcTrace = max(0, trace.lev-1)) ## Skewness-Adjusted Outlyingness { x <- data.matrix(x) n <- nrow(x) p <- ncol(x) stopifnot(n >= 1, p >= 1, p.samp >= p, is.numeric(x)) if (p <= n) { B <- matrix(0, p, ndir) E <- matrix(1, p.samp, 1) x. <- unname(x) # for speed in subsequent subsetting and solve maxit <- as.integer(maxit.mult * ndir) ## ^^ original code had 'Inf', i.e. no iter.count check; ## often, maxit == ndir would suffice if(trace.lev >= 2) p10 <- 10 ^ max(0, min(6 - trace.lev, floor(log10(maxit)))) i <- 1L it <- 0L while (i <= ndir && (it <- it+1L) < maxit) { ## we sort to get *identical* projections instead of "almost" P <- x.[sort(sample.int(n, p.samp)), , drop=FALSE] if ((qrP <- qr(P, tol = qr.tol))$rank == p) { B[,i] <- solve(qrP, E, tol = qr.tol) ## if(trace.lev >= 2) cat(" it=",it,"; found direction # ", i,"\n", sep="") i <- i+1L } else if(trace.lev >= 2) { if(it %% p10 == 0) cat(" it=",it,": rank(qr(P ..)) = ", qrP$rank, " < p = ",p,"\n", sep="") } } if(it == maxit) { rnk.x <- qr(x, tol = qr.tol)$rank if(rnk.x < p) stop("Matrix 'x' is not of full rank: rankM(x) = ",rnk.x," < p = ",p, "\n Use fullRank(x) instead") ## else stop("** direction sampling iterations were not sufficient. Maybe try increasing 'maxit.mult'") } Bnorm <- sqrt(colSums(B^2)) Nx <- mean(abs(x.)) ## so the comparison is scale-equivariant: keep <- Bnorm*Nx > keep.tol if(!all(keep)) { if(trace.lev) cat("keeping ", sum(keep), "(out of", length(keep),") normalized directions\n") Bnorm <- Bnorm[ keep ] B <- B[, keep , drop=FALSE] } else if(trace.lev) cat("keeping *all* ",length(keep)," normalized directions\n") B <- B / rep(Bnorm, each = nrow(B)) # normalized B {called 'A' in orig.code} } else { stop('More dimensions than observations: not yet implemented') ## MM: In LIBRA(matlab) they have it implemented: ## seed=0; ## nrich1=n*(n-1)/2; ## ndirect=min(250,nrich1); ## true = (ndirect == nrich1); ## B=extradir(x,ndir,seed,true); % n*ri ## ======== % Calculates ndirect directions through ## % two random choosen data points from data ## for i=1:size(B,1) ## Bnorm(i)=norm(B(i,:),2); ## end ## Bnorm=Bnorm(Bnorm > 1.e-12); %ndirect*1 ## B=B(Bnorm > 1.e-12,:); %ndirect*n ## A=diag(1./Bnorm)*B; %ndirect*n } ## NB: colSums( B^2 ) == 1 Y <- x %*% B # (n x p) %*% (p, nd') == (n x nd'); ## nd' = ndir.final := ndir - {those not in 'keep'} ## Compute and sweep out the median med <- colMedians(Y) if(trace.lev) { cat("med <- colMedians(Y): ", length(med), " values; summary():\n") print(summary(med)) } Y <- Y - rep(med, each=n) ## central :<==> non-adjusted <==> "classical" outlyingness central <- clower == 0 && cupper == 0 if(!central) { ## MM: mc() could be made faster if we could tell it that med(..) = 0 ## vv tmc <- apply(Y, 2L, mc, doReflect=mcReflect, doScale=mcScale, maxit=mcMaxit, eps1=mcEps1, eps2=mcEps2, trace.lev = mcTrace) ## original Antwerpen *wrongly*: tmc <- mc(Y) if(trace.lev) { cat("Columnwise mc() got ", length(tmc), " values; summary():\n") print(summary(tmc)) } } Q13 <- apply(Y, 2, quantile, c(.25, .75), names=FALSE, type = IQRtype) Q1 <- Q13[1L,]; Q3 <- Q13[2L,] IQR <- Q3 - Q1 ## NOTA BENE(MM): simplified definition of tup/tlo here and below ## 2014-10-18: "flipped" sign (which Pieter Setaert (c/o Mia H) proposed, Jul.30,2014: tup <- Q3 + coef* (if(central) IQR else IQR*exp( cupper*tmc*(tmc >= 0) + clower*tmc*(tmc < 0))) tlo <- Q1 - coef* (if(central) IQR else IQR*exp(-clower*tmc*(tmc >= 0) - cupper*tmc*(tmc < 0))) ## Note: all(tlo < med & med < tup) # where med = 0 if(trace.lev >= 3) { print( cbind(tlo, Q1, Q3, tup) ) } ## Instead of the loop: ## for (i in 1:ndir) { ## tup[i] <- max(Y[Y[,i] < tup[i], i]) ## tlo[i] <- -min(Y[Y[,i] > tlo[i], i]) ## ## MM: FIXED typo-bug : ^^^ this was missing! ## ## But after the fix, the function stops "working" for longley.. ## ## because tlo[] becomes 0 too often, YZ[.,.] = c / 0 = Inf ! ## } Yup <- Ylo <- Y Yup[Y >= rep(tup, each=n)] <- -Inf Ylo[Y <= rep(tlo, each=n)] <- Inf y.up <- apply(Yup, 2, max) # = max{ Y[i,] ; Y[i,] < tup[i] } y.lo <- -apply(Ylo, 2, min) # = -min{ Y[i,] ; Y[i,] > tlo[i] } if(trace.lev) { cat(length(y.up), "lower & upper Y (:= X - med(.)) values:\n") print(summary(y.lo)) print(summary(y.up)) } tY <- t(Y) ## Note: column-wise medians are all 0 : "x_i > m" <==> y > 0 ## Note: this loop is pretty fast for (j in 1:n) { # when y = (X-med) = 0 ==> adjout = 0 rather than ## 0 / 0 --> NaN; e.g, in set.seed(3); adjOutlyingness(longley) non0 <- 0 != (y <- tY[,j]); y <- y[non0]; I <- (y > 0) D <- I*y.up[non0] + (1 - I)*y.lo[non0] if(trace.lev >= 3) { cat(sprintf("j=%2d: #{non0}= %2d; quantile(D)=\n", j, sum(non0))) print(quantile(D), digits=3) } tY[non0, j] <- abs(y) / D } ## We get +Inf above for "small n"; e.g. set.seed(11); adjOutlyingness(longley) if(trace.lev) { cat("outlyingnesses for all directions (of which max(.) will be chosen:\n") print(quantile(tY, digits=3)) } adjout <- apply(tY, 2, function(x) max(x[is.finite(x)])) ##---- --- if(abs(trace.lev %% 1 - 0.7) < 1e-3) { ## really not for the end user .. cat("Plotting outlyingnesses vs. observation i:\n") matplot(t(tY), log="y"); axis(2, at=1); abline(h=1, lty=3) Sys.sleep(2) ## somewhat revealing: 3 groups: very large | medium | very small (incl. 0 which are *not* plotted) matplot(t(tY), log="y", type="b"); axis(2, at=1); abline(h=1, lty=3) browser() ## <<<<<<<<<<<<<<<<<<< } if(only.outlyingness) adjout else { Qadj <- quantile(adjout, probs = c(1 - alpha.cutoff, alpha.cutoff)) mcadjout <- if(cupper != 0) mc(adjout, doScale=mcScale, eps1=mcEps1, eps2=mcEps2) else 0 ## === cutoff <- Qadj[2] + coef * (Qadj[2] - Qadj[1]) * (if(mcadjout > 0) exp(cupper*mcadjout) else 1) list(adjout = adjout, iter = it, ndir.final = sum(keep), MCadjout = mcadjout, Qalph.adjout = Qadj, cutoff = cutoff, nonOut = (adjout <= cutoff)) } } ##' Compute a "full rank" version of matrix x, ##' by removing columns (or rows when nrow(x) < ncol(x)), using qr() and it's pivots fullRank <- function(x, tol = 1e-7, qrx = qr(x, tol=tol)) { d <- dim(x) n <- d[[1L]]; p <- d[[2L]] if(n < p) return( t(fullRank(t(x), tol=tol)) ) ## else n >= p >= rank(.) rnk <- qrx$rank if(rnk == p) x else x[, qrx$pivot[seq_len(rnk)], drop=FALSE] } robustbase/R/anova.lmrob.R0000644000176200001440000001245212737461447015210 0ustar liggesusersanova.lmrob <- function(object, ..., test = c("Wald", "Deviance"), verbose=getOption("verbose")) { dotargs <- list(...) named <- if (is.null(names(dotargs))) logical(length(dotargs))# FALSE else (names(dotargs) != "") if (any(named)) warning("the following arguments to 'anova.lmrob' are invalid and \n", "dropped: ", pasteK(deparse(dotargs[named]))) dotargs <- dotargs[!named] test <- match.arg(test) ## method argument has to end with 'M' (req. for refitting) if (test == "Deviance" && !grepl('M$', object$control$method)) stop("For test = 'Deviance', the estimator chain has to end with 'M'") if (length(dotargs) > 0) { length.tl <- function(x) length(attr(terms(x),"term.labels")) isFormula <- vapply(dotargs, inherits, NA, what = "formula") h <- vapply(dotargs, length.tl, 0L) if(all(isFormula)) { if(any(h >= length.tl(object))) stop("The first object does not contain the largest model") modform <- dotargs } else { if(verbose) message("All models are refitted except the largest one") if(any(h >= length.tl(object))) { h <- c(length.tl(object),h) dotargs <- c(list(object), dotargs)[order(h, decreasing = TRUE)] object <- dotargs[[1]] if(!inherits(object, "lmrob")) stop("anova.lmrob() only works for 'lmrob' objects") dotargs <- dotargs[-1] } modform <- lapply(dotargs, formula) } initCoef <- lapply(dotargs, coef) return(anovaLmrobList(object, modform, initCoef, test = test)) } ## ## "'Anova Table' for a single model object stop("'Anova Table' for a single model not yet implemented") } anovaLmrobList <- function (object, modform, initCoef, test) { responses <- as.character(lapply(modform, function(x) deparse(x[[2]]))) if (!all(responses == deparse(formula(object)[[2]]))) stop("Not the same response used in the fitted models") ## nobs <- length(object$residuals) nmodels <- length(modform) + 1 tbl <- matrix(rep(NA, nmodels*4), ncol = 4) tbl[1,1] <- nobs[1] - length(coef(object)) obj0 <- object for(k in 2:nmodels) { obj0 <- anovaLmrobPair(obj0, modform[[k-1]], initCoef[[k-1]], test = test) tbl[k,] <- obj0$anova obj0$scale <- object$scale } ## return dimnames(tbl) <- list(1:nmodels, c("pseudoDf", "Test.Stat", "Df", "Pr(>chisq)")) title <- switch(test, Wald = "Robust Wald Test Table", Deviance = "Robust Deviance Table", stop("invalid 'test'")) variables <- c(list(formula(terms(object))), modform) topnote <- paste("Model ", format(1:nmodels), ": ", variables, sep = "", collapse = "\n") note <- paste("Largest model fitted by lmrob(), i.e.", object$control$method) ## paste("Models fitted by method '", methods[1], "'", sep="") structure(as.data.frame(tbl), heading = c(title, "", topnote, note,""), class = c("anova", "data.frame")) } anovaLmrobPair <- function(FMfit, reduced.model, initCoef, test) { ## 'FM': full model; 'RM' : reduced model X <- model.matrix(FMfit, data = FMfit$model) FMod <- FMfit$qr$pivot[1:FMfit$rank] asgn <- attr(X, "assign") FMt <- terms(FMfit) RMt <- terms(reduced.model) FMtl <- attr(FMt, "term.labels") RMtl <- attr(RMt, "term.labels") RMnumtl <- match(RMtl , FMtl, nomatch = -1) if(attr(RMt, "intercept") == 1) RMnumtl <- c(0, RMnumtl) if(any(is.na(match(RMnumtl, unique(asgn))))) stop("Models are not nested!") RMod0 <- seq(along = asgn)[!is.na(match(asgn, RMnumtl))] RMod <- intersect(RMod0, FMod) if (length(FMod) == length(RMod)) stop("Models are not strictly nested") H0ind <- which(!FMod %in% RMod) H0coef <- coef(FMfit)[H0ind] df <- length(H0coef) pp <- FMfit$rank switch(test, "Wald" = { t.cov <- FMfit$cov t.chisq <- sum(H0coef * solve(t.cov[H0ind, H0ind], H0coef)) ## return c(FMfit, list(anova = c(nrow(X)-pp+df, t.chisq, df, pchisq(as.vector(t.chisq), df = df, lower.tail = FALSE)))) }, "Deviance" = { y <- FMfit$residuals + FMfit$fitted.values s0 <- FMfit$scale fCtrl <- FMfit$control psi <- function(u, deriv = 0) Mpsi(u, cc = fCtrl$tuning.psi, psi = fCtrl$psi, deriv) iC <- if(is.null(initCoef)) { res <- as.vector(y - X[,RMod] %*% FMfit$coef[RMod]) psiRes <- psi(res/s0) if(sum(abs(psiRes) < 1e-08) > 0.6*nrow(X)) stop("Please fit the nested models by lmrob") FMfit$coef[RMod] } else { idx <- !is.na(initCoef) if (any(idx != RMod0 %in% RMod)) stop("NA coefs in full and reduced model do not match") initCoef[idx] } RMfit <- lmrob..M..fit(x = X[,RMod, drop=FALSE], y = y, beta.initial = iC, scale = s0, control = fCtrl, method = fCtrl$method) FMres <- as.vector(y - X[,FMod] %*% FMfit$coef[FMod]) RMres <- RMfit$resid ## as.vector(y - X[,RMod] %*% RMfit$coef) FM_sRho <- sum(psi(FMres/s0, deriv = -1)) RM_sRho <- sum(psi(RMres/s0, deriv = -1)) tauStar <- mean(psi(FMres/s0, deriv = 1)) / mean(psi(FMres/s0)^2, deriv = 0) t.chisq <- 2*tauStar*(RM_sRho - FM_sRho) ## return c(RMfit, list(anova = c(nrow(X)-pp+df, t.chisq, df, pchisq(as.vector(t.chisq), df = df, lower.tail = FALSE)))) }, stop("test ", test, " not yet implemented")) } ## anovaLmrobPair robustbase/R/glmrobMqle.R0000644000176200001440000004264513325654420015067 0ustar liggesusers#### Mallows quasi-likelihood estimator of E. Cantoni and E. Ronchetti (2001) #### based originally on Eva Cantoni's S-plus code "robGLM" ## FIXME{MM}: All these expression()s and eval()s -- once were really slick and fast. ## ----- Nowadays, with 'codetools' and the byte-compiler, they "just don't fit anymore" ## including those globalVariables() {also in other places!}: globalVariables(c("residP", "residPS", "dmu.deta", "snu"), add=TRUE) ##' @title ##' @param wts a character string \dQuote{weights.on.x} specifying how weights should be computed ##' *or* a numeric vector of final weights in which case nothing is computed. ##' @param X n x p design matrix aka model.matrix() ##' @param intercept logical, if true, X[,] has an intercept column which should ##' not be used for rob.wts ##' @return n-vector of non-negative weights ##' @author Martin Maechler robXweights <- function(wts, X, intercept=TRUE) { stopifnot(length(d <- dim(X)) == 2, is.logical(intercept)) nobs <- d[1] if(d[2]) { ## X has >= 1 column, and hence there *are* coefficients in the end if(is.character(wts)){ switch(wts, "none" = rep.int(1, nobs), "hat" = wts_HiiDist(X)^2, # = (1 - Hii)^2 "robCov" = wts_RobDist(X, intercept, covFun = MASS::cov.rob), ## MCD is currently problematic: many singular subsamples "covMcd" = wts_RobDist(X, intercept, covFun = covMcd), stop("Weighting method", sQuote(wts), " is not implemented")) } ## (new; 2013-07-05; -> robustbase 0.9-9) else if(is.list(wts)) { if(length(wts) == 1 && is.function(covF <- wts[[1]])) wts_RobDist(X, intercept, covFun = covF) else stop("if a list, weights.on.x must contain a covariance function such as covMcd()") } else if(is.function(wts)) { wts(X, intercept) } else { if(!is.numeric(wts) || length(wts) != nobs) ## FIXME: "when not a string, a list, or a function, then ..." stop(gettextf("weights.on.x needs %d none-negative values", nobs), domain=NA) if(any(wts) < 0) stop("All weights.on.x must be none negative") } } else ## p = ncoef == 0 {maybe intercept, but that's not relevant here} rep.int(1,nobs) } ##' @param intercept logical, if true, X[,] has an intercept column which should ##' not be used for rob.wts glmrobMqle <- function(X, y, weights = NULL, start = NULL, offset = NULL, family, weights.on.x = "none", control = glmrobMqle.control(), intercept = TRUE, trace = FALSE) { ## To DO: ## o weights are not really implemented as *extra* user weights; rather as "glm-weights" ## o offset is not fully implemented (really? -- should have test case!) if(!is.matrix(X)) X <- as.matrix(X) ## never used: ## xnames <- dimnames(X)[[2]] ## ynames <- if (is.matrix(y)) rownames(y) else names(y) nobs <- NROW(y) stopifnot(nobs == nrow(X)) if (is.null(weights)) weights <- rep.int(1, nobs) else if(any(weights <= 0)) stop("All weights must be positive") if (is.null(offset)) offset <- rep.int(0, nobs) else if(!all(offset==0)) warning("'offset' not fully implemented") variance <- family$variance linkinv <- family$linkinv if (!is.function(variance) || !is.function(linkinv)) stop("illegal 'family' argument") mu.eta <- family$mu.eta if (is.null(valideta <- family$valideta)) valideta <- function(eta) TRUE if (is.null(validmu <- family$validmu)) validmu <- function(mu) TRUE ncoef <- ncol(X) w.x <- robXweights(weights.on.x, X=X, intercept=intercept) ### Initializations stopifnot(control$maxit >= 1, (tcc <- control$tcc) >= 0) ## note that etastart and mustart are used to make 'family$initialize' run etastart <- NULL; mustart <- NULL ## note that 'weights' are used and set by binomial()$initialize ! eval(family$initialize) ## --> n, mustart, y and weights (=ni) ni <- as.vector(weights)# dropping attributes for computation ## if(is.null(start)) start <- glm.fit(x = X, y = y, weights = weights, offset = offset, family = family)$coefficients if(any(ina <- is.na(start))) { cat("initial start 'theta' has NA's; eliminating columns X[, j];", "j = ", pasteK(which(ina)),"\n") theta.na <- start X <- X[, !ina, drop = FALSE] start <- glm.fit(x = X, y = y, weights = weights, offset = offset, family = family)$coefficients if(any(is.na(start))) stop("start 'theta' has still NA's .. badly singular x\n") ## FIXME ncoef <- length(start) } thetaOld <- theta <- as.vector(start) # as.v*(): dropping attributes eta <- as.vector(X %*% theta) mu <- linkinv(eta) # mu estimates pi (in [0,1]) at the binomial model if (!(validmu(mu) && valideta(eta))) stop("Cannot find valid starting values: You need help") ## switch(family$family, "binomial" = { Epsi.init <- EpsiBin.init Epsi <- EpsiBin EpsiS <- EpsiSBin Epsi2 <- Epsi2Bin phiEst <- phiEst.cl <- 1 }, "poisson" = { Epsi.init <- EpsiPois.init Epsi <- EpsiPois EpsiS <- EpsiSPois Epsi2 <- Epsi2Pois phiEst <- phiEst.cl <- expression({1}) }, "gaussian" = { Epsi.init <- EpsiGaussian.init Epsi <- EpsiGaussian EpsiS <- EpsiSGaussian Epsi2 <- Epsi2Gaussian phiEst.cl <- phiGaussianEst.cl phiEst <- phiGaussianEst }, "Gamma" = { ## added by ARu Epsi.init <- EpsiGamma.init Epsi <- EpsiGamma EpsiS <- EpsiSGamma Epsi2 <- Epsi2Gamma phiEst.cl <- phiGammaEst.cl phiEst <- phiGammaEst }, ## else stop(gettextf("family '%s' not yet implemented", family$family), domain=NA) ) sV <- NULL # FIXME workaround for codetools comp.V.resid <- expression({ Vmu <- variance(mu) if (any(is.na(Vmu))) stop("NAs in V(mu)") if (any(Vmu == 0)) stop("0s in V(mu)") sVF <- sqrt(Vmu) # square root of variance function residP <- (y - mu)* sni/sVF # Pearson residuals }) comp.scaling <- expression({ sV <- sVF * sqrt(phi) residPS <- residP/sqrt(phi) # scaled Pearson residuals }) comp.Epsi.init <- expression({ ## d mu / d eta : dmu.deta <- mu.eta(eta) if (any(is.na(dmu.deta))) stop("NAs in d(mu)/d(eta)") ## "Epsi init" : H <- floor(mu*ni - tcc* sni*sV) K <- floor(mu*ni + tcc* sni*sV) eval(Epsi.init) }) ### Iterations if(trace && ncoef) { cat("Initial theta: \n") local({names(theta) <- names(start); print(theta) }) digits <- max(1, getOption("digits") - 5) w.th.1 <- 6+digits # width of one number; need 8 for 2 digits: "-4.8e-11" width.th <- ncoef*(w.th.1 + 1) - 1 cat(sprintf("%3s | %*s | %12s\n", "it", width.th, "d{theta}", "rel.change")) mFormat <- function(x, wid) { r <- formatC(x, digits=digits, width=wid) sprintf("%*s", wid, sub("e([+-])0","e\\1", r)) } } sni <- sqrt(ni) eval(comp.V.resid) #-> (Vmu, sVF, residP) phi <- eval(phiEst.cl) ## Determine the range of phi values based on the distribution of |residP| Rphi <- c(1e-12, 3*median(abs(residP)))^2 conv <- FALSE if(ncoef) for (nit in 1:control$maxit) { eval(comp.scaling) #-> (sV, residPS) eval(comp.Epsi.init) ## Computation of alpha and (7) using matrix column means: cpsi <- pmax.int(-tcc, pmin.int(residPS,tcc)) - eval(Epsi) EEq <- colMeans(cpsi * w.x * sni/sV * dmu.deta * X) ## ## Solve 1/n (t(X) %*% B %*% X) %*% delta.coef = EEq DiagB <- eval(EpsiS) /(sni*sV) * w.x * (ni*dmu.deta)^2 if(any(n0 <- ni == 0)) DiagB[n0] <- 0 # instead of NaN Dtheta <- solve(crossprod(X, DiagB*X)/nobs, EEq) if (any(!is.finite(Dtheta))) { warning("Non-finite coefficients at iteration ", nit) break } theta <- thetaOld + Dtheta eta <- as.vector(X %*% theta) + offset mu <- linkinv(eta) ## estimation of the dispersion parameter eval(comp.V.resid) phi <- eval(phiEst) ## Check convergence: relative error < tolerance relE <- sqrt(sum(Dtheta^2)/max(1e-20, sum(thetaOld^2))) conv <- relE <= control$acc if(trace) { cat(sprintf("%3d | %*s | %12g\n", nit, width.th, paste(mFormat(Dtheta, w.th.1), collapse=" "), relE)) } if(conv) break thetaOld <- theta } ## end of iteration else { ## ncoef == 0 conv <- TRUE nit <- 0 } if (!conv) warning("Algorithm did not converge") eps <- 10 * .Machine$double.eps switch(family$family, "binomial" = { if (any(mu/weights > 1 - eps) || any(mu/weights < eps)) warning("fitted probabilities numerically 0 or 1 occurred") }, "poisson" = { if (any(mu < eps)) warning("fitted rates numerically 0 occurred") }) eval(comp.V.resid) #-> (Vmu, sVF, residP) eval(comp.scaling) #-> (sV, residPS) ## Estimated asymptotic covariance of the robust estimator if(ncoef) { eval(comp.Epsi.init) alpha <- colMeans(eval(Epsi) * w.x * sni/sV * dmu.deta * X) DiagA <- eval(Epsi2) / (ni*sV^2)* w.x^2* (ni*dmu.deta)^2 matQ <- crossprod(X, DiagA*X)/nobs - tcrossprod(alpha, alpha) DiagB <- eval(EpsiS) / (sni*sV)* w.x * (ni*dmu.deta)^2 if(any(n0 <- ni == 0)) DiagB[n0] <- 0 # instead of NaN matM <- crossprod(X, DiagB*X)/nobs matMinv <- solve(matM) asCov <- matMinv %*% matQ %*% matMinv / nobs } else { ## ncoef == 0 matM <- matQ <- asCov <- matrix(NA_real_, 0,0) } if(any(ina)) {# put NA's back, extending theta[] to "original length" ok <- !ina theta.na[ok] <- theta ; theta <- theta.na ## also extend the "p x p" matrices with NA's -- ##No : lm() and glm() also do *not* do this ##No p <- length(theta) ##No nm <- names(theta) ##No M <- matrix(NA_real_, p, p, dimnames = list(nm,nm)) ##No Mn <- M; Mn[ok, ok] <- asCov ; asCov <- Mn ##No Mn <- M; Mn[ok, ok] <- matM ; matM <- Mn ##No Mn <- M; Mn[ok, ok] <- matQ ; matQ <- Mn } w.r <- pmin(1, tcc/abs(residPS)) names(mu) <- names(eta) <- names(residPS) # re-add after computation list(coefficients = theta, residuals = residP, # s.resid = residPS, fitted.values = mu, w.r = w.r, w.x = w.x, ni = ni, dispersion = phi, cov = asCov, matM = matM, matQ = matQ, tcc = tcc, family = family, linear.predictors = eta, deviance = NULL, iter = nit, y = y, converged = conv) } ## NB: X is model.matrix() aka design matrix used; typically including an intercept wts_HiiDist <- function(X) { ## Hii := diag( tcrossprod( qr.Q(qr(X)) ) ) == rowSums( qr.Q(qr(X)) ^2 ) : x <- qr(X) Hii <- rowSums(qr.qy(x, diag(1, nrow = NROW(X), ncol = x$rank))^2) (1-Hii) } ##' Compute robustness weights depending on the design 'X' only, ##' using robust(ified) Mahalanobis distances. ##' This is an auxiliary function for robXweights() activated typically by ##' weights.on.x = "..." from regression functions ##' @title Compute Robust Weights based on Robustified Mahalanobis - Distances ##' @param X n x p numeric matrix ##' @param intercept logical; should be true iff X[,1] is a column with the intercept ##' @param covFun function for computing a \bold{robust} covariance matrix; ##' e.g., MASS::cov.rob(), or covMcd(). ##' @return n-vector of non-negative weights. ##' @author Martin Maechler wts_RobDist <- function(X, intercept, covFun) { D2 <- if(intercept) { ## X[,] has intercept column which should not be used for rob.wts X <- X[, -1, drop=FALSE] Xrc <- covFun(X) mahalanobis(X, center = Xrc$center, cov = Xrc$cov) } else { ## X[,] can be used directly if(!is.matrix(X)) X <- as.matrix(X) Xrc <- covFun(X) S <- Xrc$cov + tcrossprod(Xrc$center) mahalanobis(X, center = FALSE, cov = S) } p <- ncol(X) ## E[chi^2_p] = p 1/sqrt(1+ pmax.int(0, 8*(D2 - p)/sqrt(2*p))) } ## MM: 'acc' seems a misnomer to me, but it's inherited from MASS::rlm glmrobMqle.control <- function(acc = 1e-04, test.acc = "coef", maxit = 50, tcc = 1.345) { if (!is.numeric(acc) || acc <= 0) stop("value of acc must be > 0") if (test.acc != "coef") stop("Only 'test.acc = \"coef\"' is currently implemented") ## if (!(any(test.vec == c("coef", "resid")))) ## stop("invalid argument for test.acc") if (!is.numeric(maxit) || maxit <= 0) stop("maximum number of iterations must be > 0") if (!is.numeric(tcc) || tcc <= 0) stop("value of the tuning constant c (tcc) must be > 0") list(acc = acc, test.acc = test.acc, maxit = maxit, tcc = tcc) } ### ----------------- E[ f(psi ( X ) ) ] ------------------------------- ## MM: These are now expressions instead of functions ## since 'Epsi*' and 'Epsi2*' are *always* called together ## and 'EpsiS*' when called is always after the other two ## ==> do common computations only once in Epsi*.init ==> more efficient! ## ## FIXME(2): Some of these fail when Huber's "c", 'tcc' is = +Inf ## ----- --> ../../robGLM1/R/rglm.R ## FIXME: Do use a "robFamily", a *list* of functions ## ------ which all have the same environment ## ===> can get same efficiency as expressions, but better OOP ### --- Poisson -- family --- EpsiPois.init <- expression( { dpH <- dpois(H, mu); dpH1 <- dpois(H-1, mu) dpK <- dpois(K, mu); dpK1 <- dpois(K-1, mu) pHm1 <- ppois(H-1, mu) ; pH <- pHm1 + dpH # = ppois(H,*) pKm1 <- ppois(K-1, mu) ; pK <- pKm1 + dpK # = ppois(K,*) E2f <- mu*(dpH1 - dpH - dpK1 + dpK) + pKm1 - pHm1 }) EpsiPois <- expression( { tcc*(1 - pK - pH) + mu*(dpH - dpK)/sV }) Epsi2Pois <- expression( { ## Calculation of E(psi^2) for the diagonal elements of A in matrix Q: tcc^2 * (pH + 1 - pK) + E2f }) EpsiSPois <- expression( { ## Calculation of E(psi*s) for the diagonal elements of B in the ## expression matrix M = 1/n t(X) %*% B %*% X: tcc*(dpH + dpK) + E2f / sV }) ### --- Binomial -- family --- EpsiBin.init <- expression({ pK <- pbinom(K, ni, mu) pH <- pbinom(H, ni, mu) pKm1 <- pbinom(K-1, pmax.int(0, ni-1), mu) pHm1 <- pbinom(H-1, pmax.int(0, ni-1), mu) pKm2 <- pbinom(K-2, pmax.int(0, ni-2), mu) pHm2 <- pbinom(H-2, pmax.int(0, ni-2), mu) ## QlV = Q / V, where Q = Sum_j (j - mu_i)^2 * P[Y_i = j] ## i.e. Q = Sum_j j(j-1)* P[.] + ## (1- 2*mu_i) Sum_j j * P[.] + ## mu_i^2 Sum_j P[.] QlV <- mu/Vmu*(mu*ni*(pK-pH) + (1 - 2*mu*ni) * ifelse(ni == 1, (H <= 0)*(K >= 1), pKm1 - pHm1) + (ni - 1) * mu * ifelse(ni == 2, (H <= 1)*(K >= 2), pKm2 - pHm2)) }) EpsiBin <- expression( { tcc*(1 - pK - pH) + ifelse(ni == 1, (- (H < 0) + (K >= 1) ) * sV, (pKm1 - pHm1 - pK + pH) * mu * sni/sV) }) Epsi2Bin <- expression( { ## Calculation of E(psi^2) for the diagonal elements of A in matrix Q: tcc^2*(pH + 1 - pK) + QlV }) EpsiSBin <- expression( { ## Calculation of E(psi*s) for the diagonal elements of B in the ## expression matrix M = (X' B X)/n mu/Vmu*(tcc*(pH - ifelse(ni == 1, H >= 1, pHm1)) + tcc*(pK - ifelse(ni == 1, K > 0, pKm1))) + ifelse(ni == 0, 0, QlV / (sni*sV)) }) ### --- Gaussian -- family --- EpsiGaussian.init <- expression({ dc <- dnorm(tcc) pc <- pnorm(tcc) }) EpsiGaussian <- expression( 0 ) EpsiSGaussian <- expression( 2*pc-1 ) Epsi2Gaussian <- expression( 2*tcc^2*(1-pc)-2*tcc*dc+2*pc-1 ) phiGaussianEst.cl <- expression( { ## Classical estimation of the dispersion paramter phi = sigma^2 sum(((y - mu)/mu)^2)/(nobs - ncoef) }) phiGaussianEst <- expression( { sphi <- mad(residP, center=0)^2 }) ### --- Gamma -- family --- Gmn <- function(t, nu) { ## Gm corrresponds to G * nu^((nu-1)/2) / Gamma(nu) snu <- sqrt(nu) snut <- snu+t r <- numeric(length(snut)) ok <- snut > 0 r[ok] <- { nu <- nu[ok]; snu <- snu[ok]; snut <- snut[ok] exp((nu-1)/2*log(nu) - lgamma(nu) - snu*snut + nu*log(snut)) } r } EpsiGamma.init <- expression({ nu <- 1/phi ## form parameter nu snu <- 1/sqrt(phi) ## == sqrt (nu) pPtc <- pgamma(snu + c(-tcc,tcc), shape=nu, rate=snu) pMtc <- pPtc[1] pPtc <- pPtc[2] aux2 <- tcc*snu GLtcc <- Gmn(-tcc,nu) GUtcc <- Gmn( tcc,nu) }) EpsiGamma <- expression( tcc*(1-pPtc-pMtc) + GLtcc - GUtcc ) EpsiSGamma <- expression( ((GLtcc - GUtcc) + snu*(pPtc-pMtc))/mu ) Epsi2Gamma <- expression({ (tcc^2*(pMtc+1-pPtc) + (pPtc-pMtc) + (GLtcc*(1-aux2) - GUtcc*(1+aux2))/snu ) }) phiGammaEst.cl <- expression( { ## Classical moment estimation of the dispersion parameter phi sum(((y - mu)/mu)^2)/(nobs-ncoef) }) phiGammaEst <- expression( { ## robust estimation of the dispersion parameter by ## Huber's proposal 2 sphi <- uniroot(Huberprop2, interval=Rphi, ns.resid=residP, mu=mu, Vmu=Vmu, tcc=tcc)$root }) Huberprop2 <- function(phi, ns.resid, mu, Vmu, tcc) { eval(EpsiGamma.init) compEpsi2 <- eval(Epsi2Gamma) nobs <- length(mu) ## return h := sum(pmax.int(-tcc, pmin.int(ns.resid*snu, tcc))^2) - nobs*compEpsi2 } if(FALSE) ## no-eval version Huberprop2 <- function(phi, ns.resid, mu, Vmu, tcc) { nobs <- length(mu) nu <- 1/phi ## form parameter nu snu <- 1/sqrt(phi) ## sqrt (nu) pPtc <- pgamma(snu + c(-tcc,tcc), shape=nu, rate=snu) pMtc <- pPtc[1] pPtc <- pPtc[2] ts <- tcc*snu GLtcc <- Gmn(-tcc,nu) *(1-ts)/snu GUtcc <- Gmn( tcc,nu) *(1+ts)/snu ## compEpsi2 <- tcc^2 + (pPtc - pMtc)*(1-tcc^2) + GLtcc - GUtcc ## return h := sum(pmax.int(-tcc, pmin.int(ns.resid*snu, tcc))^2) - nobs*compEpsi2 } robustbase/R/qnsn.R0000644000176200001440000001305114220350260013720 0ustar liggesusers### Note: till 2010, a slightly wrong constant = 2.2219 was in use. ### Error reported by Peter Ruckdeschel, U.Bayreuth, 15.Juli 2010 ### correct constant == 1 / (sqrt(2) * qnorm(5/8)) == 2.219144 ### -- but wrong constant, 2.2219, is already in the the original Fortran qn.f Qn.corr <- 2.2219 / 2.21914 ##' Qn finite sample correction factor (not exported, but "available") ##' Version 1 Qn.finite.c <- function(n) (if (n %% 2) 1.6069 +(-2.333 - 3.1/n)/n # n odd else 3.6667 +( 2.591 - 5.5/n)/n # n even )/n + 1 ## Version built on res <- cbind(Res.sml, Res.mid) ## and the models there Qn.finite.c <- function(n) (if (n %% 2) 1.60188 +(-2.1284 - 5.172/n)/n # n odd else 3.67561 +( 1.9654 +(6.987 - 77/n)/n)/n # n even )/n + 1 Qn <- function(x, constant = NULL, finite.corr = is.null(constant) && missing(k), na.rm = FALSE, k = choose(n %/% 2 + 1, 2), warn.finite.corr = TRUE) { ## Purpose: Rousseeuw and Croux's Q_n() robust scale estimator ## Author: Martin Maechler, Date: 14 Mar 2002, 10:43 if (na.rm) x <- x[!is.na(x)] else if(anyNA(x)) return(NA) n <- length(x) if(n == 0) return(NA) else if(n == 1) return(0.) else if(!is.integer(n)) stop("not yet implemented for large vectors") nn2 <- choose(n, 2) # = n*(n-1)/2 (double!) dflt.k <- if(dflt.c <- is.null(constant)) missing(k) else FALSE stopifnot(is.numeric(k <- as.double(k)), k == trunc(k), 1 <= k, k <= nn2, ## but k *may* be vector is.integer(l_k <- length(k))) if(missing(finite.corr)) # smarter than "visual default" finite.corr <- dflt.c && (dflt.k <- dflt.k || (l_k == 1 && k == choose(n %/% 2 + 1, 2))) if(dflt.c) # define constant constant <- if(dflt.k) 2.21914 # == old default ("true value" rounded to 6 significant digits) else 1/(sqrt(2) * qnorm(((k-1/2)/nn2 + 1)/2)) ## instead of passing +/- Inf to .C() {"hack"}: ## if(any(nFin <- is.infinite(x))) x[nFin] <- sign(x[nFin]) * .Machine$double.xmax r <- constant * .C(Qn0, as.double(x), n, k, l_k, res = double(l_k), NAOK=TRUE)$res if (finite.corr) { if(!dflt.k && warn.finite.corr) warning("finite sample corrections are not corrected for non-default 'k'") ## FIXME: MM--- using the above qnorm((k-1/2)/nn2 + 1)/2) should need *less* finite correction !! ## ----- ~~ >>> simulation needed if (n <= 12) ## n in 2:12 --> n-1 in 1:11 ## n=2: E[Q_2] = E|X - Y| = sqrt(pi)/2, fc = sqrt(pi)/2/2.21914 r* c(.399356, # ~= fc = 0.3993560233 ## These are from MM's simulation("Res3"), Nsim = 2^27 ~= 134 mio: ## ~/R/MM/Pkg-ex/robustbase/Qn-simulation.R .99365, .51321, .84401, .61220, .85877, .66993, .87344, .72014, .88906, .75743)[n - 1L] else r / Qn.finite.c(n) } else r } ## This is the old version -- available for back "compatibility": Qn.old <- function(x, constant = 2.2219, finite.corr = missing(constant)) { ## Purpose: Rousseeuw and Croux's Q_n() robust scale estimator ## Author: Martin Maechler, Date: 14 Mar 2002, 10:43 n <- length(x) if(n == 0) return(NA) else if(n == 1) return(0.) else if(!is.integer(n)) stop("not yet implemented for large vectors") h <- n %/% 2L + 1L k <- h*(h-1)/2 r <- constant * .C(Qn0, as.double(x), n, k, 1L, res = double(1))$res if (finite.corr) (if (n <= 9) { # n in 2:9 --> n-1 in 1:8 c(.399,.994, .512,.844, .611,.857, .669,.872)[n - 1] } else { if (n %% 2) ## n odd n / (n + 1.4) else ## n even n / (n + 3.8) } ) * r else r } Sn <- function(x, constant = 1.1926, finite.corr = missing(constant), na.rm = FALSE) { ## Purpose: Rousseeuw and Croux's S_n() robust scale estimator ## Author: Martin Maechler, Date: 14 Mar 2002, 10:43 if (na.rm) x <- x[!is.na(x)] else if(anyNA(x)) return(NA) n <- length(x) if(n == 0) return(NA) else if(n == 1) return(0.) ## instead of passing +/- Inf to .C() {"hack"}: ## if(any(nFin <- is.infinite(x))) x[nFin] <- sign(x[nFin]) * .Machine$double.xmax r <- constant * .C(Sn0, as.double(x), n, as.integer(!is.unsorted(x)),# is.sorted NAOK = TRUE, res = double(1), a2 = double(n))$res ## NB: a2[] could be used for confidence intervals and other estimates! if (finite.corr) ( if (n <= 9) { c(0.743, # n = 2 1.851, 0.954,# 3 & 4 1.351, 0.993,# 5 & 6 1.198, 1.005,# 7 & 8 1.131)[n - 1] } else if (n %% 2) # n odd, >= 11 n / (n - 0.9) else # n even, >= 10 1 ) * r else r } wgt.himedian <- function(x, weights = rep(1,n)) { ## Purpose: weighted hiMedian of x ## Author: Martin Maechler, Date: 14 Mar 2002 n <- length(x <- as.double(x)) stopifnot(storage.mode(weights) %in% c("integer", "double")) if(n != length(weights)) stop("'weights' must have same length as 'x'") ## if(is.integer(weights)) message("using integer weights") .C(if(is.integer(weights)) wgt_himed_i else wgt_himed, x, n, weights, res = double(1))$res } ## To be used directly as 'scaleFun' in 'covOGK()' : s_Qn <- function(x, mu.too = FALSE, ...) c(if(mu.too) median(x), Qn(x, ...)) s_Sn <- function(x, mu.too = FALSE, ...) c(if(mu.too) median(x), Sn(x, ...)) robustbase/R/covPlot.R0000644000176200001440000002667113434014060014404 0ustar liggesusers#### This is from the R package #### #### rrcov : Scalable Robust Estimators with High Breakdown Point #### #### by Valentin Todorov ### This program is free software; you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation; either version 2 of the License, or ### (at your option) any later version. ### ### This program is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ### ### You should have received a copy of the GNU General Public License ### along with this program; if not, a copy is available at ## http://www.r-project.org/Licenses/ ## I would like to thank Peter Filzmoser for providing the initial code of ## this function. plot.mcd <- function(x, which=c("all", "dd","distance","qqchi2","tolEllipsePlot","screeplot"), classic= FALSE, ask = (which[1] == "all" && dev.interactive()), cutoff = NULL, id.n, labels.id = rownames(x$X), cex.id = 0.75, label.pos = c(4,2), tol = 1e-7, ...) { if (!inherits(x, "mcd")) stop("Use only with 'mcd' objects") covPlot(x$X, which= which, classic= classic, ask= ask, m.cov = x, cutoff= cutoff, id.n = id.n, labels.id, cex.id = cex.id, label.pos = label.pos, tol = tol, ...) } covPlot <- function(x, which = c("all", "dd", "distance", "qqchi2", "tolEllipsePlot", "screeplot"), classic = FALSE, ask = (which[1] == "all" && dev.interactive()), m.cov = covMcd(x), cutoff = NULL, id.n, labels.id = rownames(x), cex.id = 0.75, label.pos = c(4,2), tol = 1e-7, ...) { ##@bdescr ## Make plots based on the covariance structure of a data set: ## dd - distance-distance plot: Robust distances versus ## Mahalanobis distances ## distance - a plot of the robust distances ## qqchi2 - a qq-plot of the robust distances versus the ## quantiles of the chi-squared distribution ## tolEllipsePlot- a tolerance ellipse ## screeplot- a screeplot of the eigenvalues ov the covariance matrix ## ## Distance Plot: ## Draw a Distance-Distance Plot: Plots the robust distances ## versus the classical Mahalanobis distances as introduced by ## Rousseeuw, P. J., and van Zomeren, B. C. (1990). Unmasking ## Multivariate Outliers and Leverage Points. Journal of the American ## Statistical Association, 85, 633-639. ## ## The dashed line is the set of points where the robust distance is ## equal to the classical distance. ## The horizontal and vertical dotted lines are drawn at values equal cutoff ## which defaults to square root of the 97.5% quantile of a chi-squared ## distribution with p degrees of freedom. Points beyond these lines can ## be considered outliers. ## ##@edescr ## ##@in x : [matrix] A data.frame or matrix, n > 2*p ##@in which : [character] A plot option, one of: ## classic: index plot of the classical mahalanobis distances ## robust : index plot of the robust mahalanobis distances ## dd : distance-distance plot ## index : parallel index plot of classical and robust distances ## all : all three plots --- this is the default ## ##@in classic : [logical] If true the classical plot will be displayed too ## default is classic = FALSE ##@in m.cov : [list] An object like class "mcd" - only its attributes ## center and cov will be used ##@in cutoff : [number] The cutoff value for the distances ##@in id.n : [number] number of observations to be identified with a label. ## Defaults to the number of observations with ## distance larger than cutoff -- missing is propagated ##@in tol : [number] tolerance to be used for computing the inverse ## - see 'solve'. defaults to 1e-7 ## NOTE: The default tolerance 1e-7, will not work for some example ## data sets, like milk or aircraft myscreeplot <- function(x, m.cov = covMcd(x)) { erob <- eigen(m.cov$cov,symmetric = TRUE, only.values = TRUE)$values eclass <- eigen(var(x), symmetric = TRUE, only.values = TRUE)$values leg.txt <- c("Robust", "Classical") leg.col <- c("green", "red") leg.pch <- c(1,24) leg.lty <- c("solid", "dotted") eall <- c(erob,eclass) ylim <- c( min(eall), max(eall)) plot(erob, ylim=ylim, ylab="Eigenvalues", xlab="Index", type="n") legend("topright", leg.txt, pch = leg.pch, lty = leg.lty, col = leg.col) lines(erob, type="o", pch= leg.pch[1], lty= leg.lty[1], col=leg.col[1]) lines(eclass, type="o", pch= leg.pch[2], lty= leg.lty[2], col=leg.col[2]) title(main = "Scree plot") } mydistplot <- function(x, cutoff, classic = FALSE, id.n) { ## Index Plot: ## Plot the vector x (robust or mahalanobis distances) against ## the observation indexes. Identify by a label the id.n ## observations with largest value of x. If id.n is not supplied, ## calculate it as the number of observations larger than cutoff. ## Use cutoff to draw a horisontal line. ## Use classic = FALSE/TRUE to choose the label of the vertical axes n <- length(x) if(missing(id.n)) # maybe propagated id.n <- length(which(x > cutoff)) ylab <- paste("Square Root of", if(classic) "Mahalanobis" else "Robust", "distance") plot(x, type = "p", ylab = ylab, xlab = "Index", main = "Distance Plot") label(1:n, x, id.n) abline(h = cutoff) } myddplot <- function(md, rd, cutoff, id.n) { ## Distance-Distance Plot: ## Plot the vector y = rd (robust distances) against ## x = md (mahalanobis distances). Identify by a label the id.n ## observations with largest rd. If id.n is not supplied, calculate ## it as the number of observations larger than cutoff. Use cutoff ## to draw a horisontal and a vertical line. Draw also a dotted line ## with a slope 1. n <- length(md) if(missing(id.n)) # maybe propagated id.n <- length(which(rd > cutoff)) xlab <- "Mahalanobis distance" ylab <- "Robust distance" plot(md, rd, type = "p", xlab = xlab, ylab = ylab, main = "Distance-Distance Plot") label(md, rd, id.n) abline(0, 1, lty = 2) abline(v = cutoff, h = cutoff) } qqplot <- function(x, p, cutoff = sqrt(qchisq(0.975, p)), classic = FALSE, id.n) { ## Chisquare QQ-Plot: ## Plot the vector x (robust or mahalanobis distances) against ## the square root of the quantiles of the chi-squared distribution ## with p degrees of freedom. ## Identify by a label the id.n observations with largest value of x. ## If id.n is not supplied, calculate it as the number of observations ## larger than cutoff. ## Use classic = FALSE/TRUE to choose the label of the vertical axes ## parameters and preconditions n <- length(x) if(missing(id.n)) # maybe propagated id.n <- length(which(x > cutoff)) qq <- sqrt(qchisq(((1:n)-1/3)/(n+1/3), p)) x <- sort(x, index.return = TRUE) ix <- x$ix x <- x$x ylab <- paste(if(classic) "Mahalanobis" else "Robust", "distance") xlab <- "Square root of the quantiles of the chi-squared distribution" plot(qq, x, xlab = xlab, ylab = ylab, main = "Chisquare QQ-Plot") label(qq, x, id.n, ind = (n-id.n+1):n, labs = ix) abline(0, 1, lty = 2) } ## end{qqplot} label <- function(x, y, id.n, ind = sort.list(y, decreasing = TRUE)[1:id.n], labs = labels.id, adj.x = TRUE) { if(id.n > 0) { ## label the largest 'id.n' y-values labpos <- if(adj.x) label.pos[1+ as.numeric(x > mean(range(x)))] else 3 text(x[ind], y[ind], labs[ind], cex = cex.id, xpd = TRUE, pos = labpos, offset = 0.25) } } ## Begin{covPlot} -- arguments checking of preconditions if(is.data.frame(x)) x <- data.matrix(x) if(!is.matrix(x) || !is.numeric(x)) stop("x is not a numeric dataframe or matrix.") n <- dim(x)[1] p <- dim(x)[2] if(!is.numeric(m.cov$center) || !is.numeric(m.cov$cov)) stop("argument 'm.cov' must have numeric components 'center' and 'cov'") if(length(m.cov$center) != p) stop("Data set and provided center have different dimensions!") ## ?covPlot says it only needs 'cov' and 'center' ## Maybe should be smarter and *test* for non-singularity if(is.numeric(m.cov$crit) && m.cov$crit == 0) stop( "The covariance matrix is singular!") if(is.null(cutoff)) cutoff <- sqrt(qchisq(0.975, p)) ## now "more in line" with plot.lm()'s labeling: if(is.null(labels.id)) labels.id <- as.character(1:n) if(!missing(id.n) && !is.null(id.n)) { id.n <- as.integer(id.n) if(id.n < 0 || id.n > n) stop(sQuote("id.n")," must be in {1,..,",n,"}") } which <- match.arg(which) md <- sqrt(mahalanobis(x, colMeans(x), var(x), tol = tol)) rd <- sqrt(mahalanobis(x, m.cov$center, m.cov$cov, tol = tol)) ## *Never* here : par(mfrow = c(1,1), pty = "m") op <- if (ask) par(ask = TRUE) else list() on.exit(par(op)) if(which == "all" || which == "distance") { if(classic) { opr <- if(prod(par("mfrow")) == 1) par(mfrow = c(1,2), pty = "m") else list() } ## index plot of mahalanobis distances: mydistplot(rd, cutoff, id.n = id.n) if(classic) { ## index plot of robust distances: mydistplot(md, cutoff, classic = TRUE, id.n = id.n) par(opr) } } if(which == "all" || which == "dd") { myddplot(md, rd, cutoff = cutoff, id.n = id.n) # distance-distance plot } if(which == "all" || which == "qqchi2") { if(classic) { opr <- if(prod(par("mfrow")) == 1) par(mfrow = c(1,2), pty = "m") else list() } ## qq-plot of the robust distances versus the ## quantiles of the chi-squared distribution qqplot(rd, p, cutoff = cutoff, id.n = id.n) if(classic) { ## qq-plot of the mahalanobis distances qqplot(md, p, cutoff = cutoff, classic = TRUE, id.n = id.n) par(opr) } } if(which == "all" || which == "tolEllipsePlot") { if(p == 2) tolEllipsePlot(x, m.cov = m.cov, cutoff = cutoff, id.n = id.n, classic = classic, tol = tol) else if(which != "all") warning("For tolerance ellipses the dimension 'p' must be 2!") } if(which == "all" || which == "screeplot") { myscreeplot(x, m.cov = m.cov) } } ## end { covPlot } ## ddplot <- function(x,...) { ## covPlot(x, which="dd", ...) ## } ## distplot <- function(x,...) { ## covPlot(x, which="distance", ...) ## } ## chi2qqplot <- function(x,...) { ## covPlot(x, which="qqchi2", ...) ## } ## ellipse() exists in other packages ## ellipse <- function(x,...) { ## covPlot(x, which="tolEllipsePlot", ...) ## } robustbase/R/huberize.R0000644000176200001440000000312514222041416014561 0ustar liggesusers## trimmed mad := trimmed *[M]ean* of [A]bsolute [D]eviations (from the median; more generally 'center') ## = === {no default for trim on purpose} tmad <- function(x, center = median(x), trim, na.rm = FALSE) { stopifnot(is.numeric(trim), length(trim) == 1L, 0 <= trim, trim <= 0.5) if(na.rm) x <- x[!is.na(x)] ## TODO: consistency correction (for non-large) 'n' as a function of trim ## ---- not needed for huberize() though ## n <- length(x) mean(abs(x - center), trim=trim) } ## Estimates mu (optionally, via huberM(.)) and sigma of x ## x: without NA: na.rm=TRUE must have happened ## sets boundaries at M +/- c*sigma ## sets outliers to be equal to lower/upper boundaries huberize <- function(x, M = huberM(x, k=k)$mu, c = k, trim = (5:1)/16, # Lukas Graz' MSc thesis had c(0.25, 0.15, 0.075) k = 1.5, warn0 = getOption("verbose"), saveTrim = TRUE) { stopifnot(is.numeric(M), length(M) == 1, length(trim) >= 1, trim >= 0, diff(trim) < 0) # trim must be strictly decreasing qn. <- Qn(x) j <- 0L while(!is.na(qn.) && qn. == 0 && j < length(trim)) qn. <- tmad(x, center = M, trim = trim[j <- j+1L]) ## ~~~~ if(qn. == 0 && warn0) warning(sprintf("Qn(x) == 0 and tmad(x, trim=%g) == 0", trim[j])) upper <- M + qn.*c # qnorm(c,lower.tail = F) lower <- M - qn.*c x[x > upper] <- upper x[x < lower] <- lower ## store the final 'trim' used (if there was one) as attribute: if(j && saveTrim) structure(x, trim = trim[j]) else x } robustbase/R/rrcov.control.R0000644000176200001440000000430112440116711015554 0ustar liggesusers## rrcov : Scalable Robust Estimators with High Breakdown Point ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, a copy is available at ## http://www.r-project.org/Licenses/ ## "FIXME": If you change this, you must "sync" with ../man/rrcov.control.Rd ## 1) covMcd()'s default in ./covMcd.R ## 2) ltsReg.default()'s default in ./ltsReg.R ## 3) covComed()s default in ./comedian.R rrcov.control <- function(alpha = 1/2, method = c("covMcd", "covComed", "ltsReg"), nsamp = 500, nmini = 300, kmini = 5, seed = NULL, tolSolve = 1e-14, scalefn = "hrv2012", maxcsteps = 200, trace = FALSE, wgtFUN = "01.original", beta, use.correction = identical(wgtFUN, "01.original"), adjust = FALSE) { method <- match.arg(method) if(missing(beta) || !is.numeric(beta)) beta <- c("covMcd" = 0.975, "ltsReg" = 0.9875, "covComed" = 0.95)[[method]] list(alpha=alpha, nsamp=nsamp, nmini=as.integer(nmini), kmini=as.integer(kmini), seed = as.integer(seed), tolSolve=tolSolve, scalefn=scalefn, maxcsteps=as.integer(maxcsteps), trace=trace, wgtFUN=wgtFUN, beta=beta, use.correction=use.correction, adjust=adjust) } ## allow direct fast access: .scalefn.default <- eval(formals(rrcov.control)$scalefn) ## Only for back compatibility, as some new args did not exist pre 2013-04, ## and callers of ltsReg() / covMcd() may use a "too small" 'control' list: getDefCtrl <- function(nm, defCtrl = rrcov.control()) { callerEnv <- parent.frame() if(is.null(get(nm, envir = callerEnv))) assign(nm, defCtrl[[nm]], envir=callerEnv) } robustbase/R/adjbox.R0000644000176200001440000000735513762451710014236 0ustar liggesusers#### Skewness (MC) - Adjusted Boxplots ### modeled closely after boxplot() etc in R/src/library/graphics/R/boxplot.R : adjbox <- function(x, ...) UseMethod("adjbox") adjbox.default <- function (x, ..., range = 1.5, doReflect=FALSE, width = NULL, varwidth = FALSE, notch = FALSE, outline = TRUE, names, plot = TRUE, border = par("fg"), col = NULL, log = "", pars = list(boxwex = 0.8, staplewex = 0.5, outwex = 0.5), horizontal = FALSE, add = FALSE, at = NULL) { args <- list(x, ...) namedargs <- if(!is.null(attributes(args)$names)) attributes(args)$names != "" else logical(length(args))# all FALSE ## pars <- c(args[namedargs], pars) groups <- if(is.list(x)) x else args[!namedargs] if(0 == (n <- length(groups))) stop("invalid first argument") if(length(class(groups))) groups <- unclass(groups) if(missing(names)) names <- 1:n names(groups) <- names cls <- sapply(groups, function(x) class(x)[1]) cl <- if(all(cls == cls[1])) cls[1] # else NULL for (i in 1:n) groups[i] <- list(adjboxStats(unclass(groups[[i]]), coef=range, doReflect=doReflect)) # do.conf=notch) stats <- matrix(0, nrow=5, ncol=n) conf <- fence <- matrix(0, nrow=2, ncol=n) ng <- out <- group <- numeric(0) ct <- 1 for(i in groups) { stats[,ct] <- i$stats conf [,ct] <- i$conf fence[,ct] <- i$fence ng <- c(ng, i$n) if((lo <- length(i$out))) { out <- c(out,i$out) group <- c(group, rep.int(ct, lo)) } ct <- ct+1 } if(length(cl) && cl != "numeric") oldClass(stats) <- cl z <- list(stats = stats, n = ng, conf = conf, fence = fence, out = out, group = group, names = names) if(plot) { if(is.null(pars$boxfill) && is.null(args$boxfill)) pars$boxfill <- col do.call("bxp", c(list(z, notch = notch, width = width, varwidth = varwidth, log = log, border = border, pars = pars, outline = outline, horizontal = horizontal, add = add, at = at), args[namedargs])) invisible(z) } else z } adjbox.formula <- function (formula, data = NULL, ..., subset, na.action = NULL) { if(missing(formula) || (length(formula) != 3)) stop("'formula' missing or incorrect") m <- match.call(expand.dots = FALSE) if(is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$na.action <- na.action # force use of default for this method ## require(stats, quietly = TRUE): model.frame m[[1]] <- as.name("model.frame") mf <- eval(m, parent.frame()) response <- attr(attr(mf, "terms"), "response") adjbox(split(mf[[response]], mf[-response]), ...) } ## modeled after boxplot.stats() from R/src/library/grDevices/R/calc.R : adjboxStats <- function(x, coef = 1.5, a = -4, b = 3, do.conf = TRUE, do.out = TRUE, ...) { if(coef < 0) stop("'coef' must not be negative") nna <- !is.na(x) n <- sum(nna)# including +/- Inf stats <- fivenum(x, na.rm = TRUE) iqr <- diff(stats[c(2, 4)]) fence <- rep(NA_real_, 2) if(coef == 0) do.out <- FALSE # no whiskers to be drawn else { ## coef > 0 out <- if (!is.na(iqr)) { medc <- mc(x, ..., na.rm = TRUE) fence <- if (medc >= 0) c(stats[2] - coef * exp(a * medc) * iqr, stats[4] + coef * exp(b * medc) * iqr) else c(stats[2] - coef * exp(-b * medc) * iqr, stats[4] + coef * exp(-a * medc) * iqr) x < fence[1] | fence[2] < x } else !is.finite(x) if (any(out[nna], na.rm = TRUE)) stats[c(1, 5)] <- range(x[!out], na.rm = TRUE) } conf <- if (do.conf) stats[3] + c(-1.58, 1.58) * iqr/sqrt(n) list(stats = stats, n = n, conf = conf, fence = fence, out = if (do.out) x[out & nna] else numeric(0)) } robustbase/R/nlregrob.R0000644000176200001440000004760313266661623014606 0ustar liggesusers#### nlrob.() functions for high breakdown point nlrob() methods ## concept (and original version) from lme4/R/lmer.R getOptfun <- function(optimizer, needArgs = c("fn","par","lower","control")) { if (((is.character(optimizer) && optimizer=="optimx") || deparse(substitute(optimizer))=="optimx") && !("package:optimx") %in% search()) stop(shQuote("optimx")," package must be loaded in order to ", "use ",shQuote('optimizer="optimx"')) optfun <- if (is.character(optimizer)) tryCatch(get(optimizer), error=function(e) NULL) else optimizer if (is.null(optfun)) stop("couldn't find optimizer function ",optimizer ) if (!is.function(optfun)) stop("non-function specified as optimizer") if (any(is.na(match(needArgs, names(formals(optfun)))))) stop("optimizer function must use (at least) formal parameters ", pasteK(sQuote(needArgs))) optfun } ##' Utility for all nlrob.(): Find how and where to get parameter ##' names from, also check lower, upper, and replicate if needed. ##' ##' @param lower possibly unnamed numeric vector ##' @param upper as \code{lower}; both will be replicated to ##' \code{length(pnames)} if that is specified and longer. ##' @param var.nms character vector of which 'pnames' must be a subset of. ##' @param envir if not missing and an \code{\link{environment}: possibly assign ##' 'lower', 'upper' of full length in the environment \code{envir}. .fixupArgs <- function(lower, upper, var.nms, envir) { if(is.null(pnames <- names(lower))) pnames <- names(upper) if(is.null(pnames)) stop("Provide 'upper' or 'lower' with names()") if(any(is.na(match(pnames, var.nms)))) stop("parameter names must appear in 'formula'") hasE <- !missing(envir) && is.environment(envir) npar <- length(pnames) if (npar > 1 && length(lower) == 1) { if(hasE) envir$lower <- rep.int(lower, npar) } else if (length(lower) != npar) stop(gettextf("lower must be either of length %d, or length 1", npar)) if (npar > 1 && length(upper) == 1) { if(hasE) envir$upper <- rep.int(upper, npar) } else if (length(upper) != npar) stop(gettextf("upper must be either of length %d, or length 1", npar)) stopifnot(is.numeric(lower), is.numeric(upper), lower <= upper) pnames } nlrob.MM <- function(formula, data, lower, upper, tol = 1e-6, psi = c("bisquare", "lqq", "optimal", "hampel"), init = c("S", "lts"), ctrl = nlrob.control("MM", psi=psi, init=init, fnscale=NULL, tuning.chi.scale = .psi.conv.cc(psi, .Mchi.tuning.defaults[[psi]]), tuning.psi.M = .psi.conv.cc(psi, .Mpsi.tuning.defaults[[psi]]), optim.control = list(), optArgs = list(...)), ...) { if(missing(ctrl)) { init <- match.arg(init) psi <- match.arg(psi) force(ctrl) # } else { init <- ctrl$ init psi <- ctrl$ psi } c1 <- ctrl$tuning.chi.scale c2 <- ctrl$tuning.psi.M if(is.character(ctrl$optimizer)) { ### TODO } else if(is.function(ctrl$optimizer)) { ### TODO } else stop(gettextf("'%s' must be character string or function, but is \"%s\"", "ctrl$optimizer", class(ctrl$optimizer)), domain=NA) ## Preliminary psi-specific checks / computations: switch(psi, "lqq" = { # lqqMax = rho(Inf), used in rho.inv() *and* 'constant': c12 <- c1[1]+c1[2] lqqMax <- (c1[1]*c1[3] - 2*c12)/(1-c1[3]) + c12}) rho1 <- function(t) Mchi(t, c1, psi) rho2 <- function(t) Mchi(t, c2, psi) rho.inv <- switch(psi, "bisquare" = function(y) { ## Find x := u^2 which solves cubic eq. 3*x - 3*x^2 + x^3 = y ## <==> (x-1)^3 + 1 = y <==> (1-x)^3 = 1-y <==> x = 1 - (1-y)^(1/3) ## (where we assume 0 <= y <= 1, i.e, y-1 < 0) c1 * sqrt(1 - (1 - y)^(1/3)) }, "lqq" = function(y) { uniroot( function(x) rho1(x) - y, lower = 0, upper = lqqMax )$root }, "optimal" = function(y) { ## Salibian-Barrera, Matias, Willems, Gert, and Zamar, Ruben (2008). ## The fast-tau estimator for regression. ## Journal of Computational and Graphical Statistics 17, 659-682. sqrt(y/1.38) * c1 * 3 }, "hampel" = function(y) { C <- MrhoInf(c1, psi) a <- c1[1]; b <- c1[2]; r <- c1[3] if (y <= a/C) sqrt(2*C*y) else if (y <= (2*b - a)/C) 0.5*a + C/a*y else r + sqrt( r^2 - ( (r - b)*(2*C/a*y + (b - a)) - b*r ) ) }, stop(gettextf("Psi function '%s' not supported yet", psi))) M_scale <- function(sigma, u) sum( rho1(u/sigma) )/nobs - 0.5 objective.initial <- switch(init, "lts" = function(par) { ## and (h, formula, data, pnames) y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) sum(sort.int( (y - y.hat)^2, partial = h )[1:h]) }, "S" = function(par) { ## and (constant, formula, data, pnames) y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) res <- y - y.hat ## Rousseeuw, Peter J., and Leroy, Annick M. (1987). ## Robust Regression & Outlier Detection. ## John Wiley & Sons, New York, p. 137. med_abs_res <- median(abs(res)) uniroot(M_scale, lower = constant[1L] * med_abs_res, upper = constant[2L] * med_abs_res, u = res )$ root ## == 'sigma' }, stop(gettextf("Initialization 'init = \"%s\"' not supported (yet)", init))) objective.M <- function(par, sigma) { y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) sum(rho2( (y - y.hat)/sigma )) } ## => psi(.) / wgt(.) for robustness weights is ## Mpsi(x, c2, psi) or Mwgt(x, c2, psi) formula <- as.formula(formula) dataName <- substitute(data) varNames <- all.vars(formula) obsNames <- rownames(data <- as.data.frame(data)) data <- as.list(data)# to be used as such if (length(formula) == 2L) { ## as nls formula[[3L]] <- formula[[2L]] formula[[2L]] <- 0 } npar <- length(pnames <- .fixupArgs(lower, upper, varNames, environment())) ## ^^^^^^^^^ -> possibly changes (lower, upper) in envir. y <- eval(formula[[2L]], data) nobs <- length(y) stopifnot(nobs >= npar) if (is.null(fnscale <- ctrl$ fnscale)) fnscale <- sum((y - mean(y))^2) ctrl$fnscale <- NULL # remove it there stopifnot(is.numeric(fnscale), fnscale > 0) ## is used in M_scale() in any case, and in init-estim. if "S" constant <- c( switch(psi, bisquare = 1/c1, lqq = 1/lqqMax, optimal = 1/c1 * 1/3, hampel = 1/c1[3]), if(nobs %% 2) 2/rho.inv(2/(nobs+2)) else 1/rho.inv(1/(nobs+1))) switch(init, lts = h <- (nobs + npar + 1)%/%2) ## FIXME: "optimizer": initial <- do.call(JDEoptim, c(list(lower, upper, objective.initial, tol=tol, fnscale=fnscale), ctrl$optArgs)) names(initial$par) <- pnames res <- y - eval( formula[[3L]], c(data, initial$par) ) med_abs_res <- median(abs(res)) sigma <- uniroot( M_scale, lower = constant[1L] * med_abs_res, upper = constant[2L] * med_abs_res, u = res )$root M <- optim(initial$par, objective.M, sigma = sigma, method = "L-BFGS-B", lower = lower, upper = upper, control = c(list(fnscale = initial$value, parscale = initial$par), ctrl$optim.control), hessian = TRUE) ## 'hessian': experimental - FIXME: eliminate if unused coef <- setNames(M$par, pnames) status <- if (M$convergence == 0) "converged" else if (M$convergence == 1) "maximum number of iterations reached without convergence" else M$message fit <- eval( formula[[3L]], c(data, coef) ) names(fit) <- obsNames structure(list(call = match.call(), formula=formula, nobs=nobs, coefficients = coef, fitted.values = fit, residuals = y - fit, crit = M$value, initial = initial, Scale = sigma, status = status, counts = M$counts, data = dataName, hessian = M$hessian, ctrl=ctrl), class = "nlrob") } ## nlrob.MM nlrob.tau <- function(formula, data, lower, upper, tol = 1e-6, psi = c("bisquare", "optimal"), ctrl = nlrob.control("tau", psi=psi, fnscale=NULL, tuning.chi.scale = NULL, tuning.chi.tau = NULL, optArgs = list(...)), ...) { if(missing(ctrl)) { psi <- match.arg(psi) force(ctrl) # } else { psi <- ctrl$ psi } if(is.null(.chi.s <- ctrl$tuning.chi.scale)) .chi.s <- switch(psi, bisquare = list(b = 0.20, cc = 1.55), optimal = list(b = 0.5, cc = 0.405)) if(is.null(.chi.t <- ctrl$tuning.chi.tau)) .chi.t <- switch(psi, bisquare = list(b = 0.46, cc = 6.04), optimal = list(b = 0.128, cc = 1.060)) b1 <- .chi.s$b c1 <- .chi.s$cc b2 <- .chi.t$b c2 <- .chi.t$cc ## Preliminary psi-specific checks / computations: switch(psi, "bisquare" = { b1 <- b1/MrhoInf(c1, psi) b2 <- b2/MrhoInf(c2, psi) }) rho1 <- function(t) Mchi(t, c1, psi) rho2 <- function(t) Mchi(t, c2, psi) rho.inv <- switch(psi, "bisquare" = function(y) { c1 * sqrt(1 - (1 - y)^(1/3)) }, "optimal" = function(y) { ## Salibian-Barrera, Matias, Willems, Gert, and Zamar, Ruben (2008). ## The fast-tau estimator for regression. ## Journal of Computational and Graphical Statistics 17, 659-682. sqrt(y/1.38) * c1 * 3 }) M_scale <- function(sigma, u) sum( rho1(u/sigma) )/nobs - b1 tau_scale2 <- function(u, sigma) sigma^2 * 1/b2*sum( rho2(u/sigma) )/nobs objective <- function(par) { fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) res <- y - fit ## Rousseeuw, Peter J., and Leroy, Annick M. (1987). ## Robust Regression & Outlier Detection. ## John Wiley & Sons, New York, p. 137. med_abs_res <- median(abs(res)) sigma <- uniroot( M_scale, lower = constant[1L] * med_abs_res, upper = constant[2L] * med_abs_res, u = res )$root tau_scale2(res, sigma) } formula <- as.formula(formula) dataName <- substitute(data) varNames <- all.vars(formula) obsNames <- rownames(data <- as.data.frame(data)) data <- as.list(data)# to be used as such if (length(formula) == 2L) { ## as nls formula[[3L]] <- formula[[2L]] formula[[2L]] <- 0 } npar <- length(pnames <- .fixupArgs(lower, upper, varNames, environment())) ## ^^^^^^^^^ -> possibly changes (lower, upper) in envir. y <- eval(formula[[2L]], data) nobs <- length(y) stopifnot(nobs >= npar) if (is.null(fnscale <- ctrl$ fnscale)) fnscale <- mean((y - mean(y))^2) ctrl$fnscale <- NULL # remove it there stopifnot(is.numeric(fnscale), fnscale > 0) constant <- c( switch(psi, bisquare = 1/c1, optimal = 1/c1 * 1/3), if (nobs %% 2) 2/rho.inv(2/(nobs+2)) else 1/rho.inv(1/(nobs+1))) optRes <- do.call(JDEoptim, c(list(lower, upper, objective, tol=tol, fnscale=fnscale), ctrl$optArgs)) iter <- optRes$iter status <- if (optRes$convergence == 0) "converged" else paste("failed to converge in", iter, "steps") coef <- setNames(optRes$par, pnames) fit <- eval( formula[[3L]], c(data, coef) ) names(fit) <- obsNames structure(list(call = match.call(), formula=formula, nobs=nobs, coefficients = coef, fitted.values = fit, residuals = y - fit, crit = optRes$value, Scale = sqrt(optRes$value), status = status, iter = iter, data = dataName, ctrl=ctrl), class = "nlrob") } ## nlrob.tau nlrob.CM <- function(formula, data, lower, upper, tol = 1e-6, psi = c("bisquare", "lqq", "welsh", "optimal", "hampel", "ggw"), ctrl = nlrob.control("CM", psi=psi, fnscale=NULL, tuning.chi = NULL, optArgs = list(...)), ...) { if(missing(ctrl)) { psi <- match.arg(psi) force(ctrl) # } else { psi <- ctrl$ psi } if (is.null(t.chi <- ctrl$tuning.chi)) t.chi <- switch(psi, bisquare = list(b = 0.5, cc = 1, c = 4.835), stop("unable to find constants for psi function")) ## FIXME: b <- t.chi$b ## b = epsilon (in paper) = fraction of outlier ~= breakdown cc <- t.chi$cc ## cc = k; make c <- t.chi$c ## c = the factor in objective c*rho(.) - log(sigma) rho <- function(t) Mchi(t, cc, psi) M_scale <- function(sigma, u) sum( rho(u/sigma) )/nobs - b formula <- as.formula(formula) dataName <- substitute(data) varNames <- all.vars(formula) obsNames <- rownames(data <- as.data.frame(data)) data <- as.list(data)# to be used as such if (length(formula) == 2L) { ## as nls formula[[3L]] <- formula[[2L]] formula[[2L]] <- 0 } npar <- length(pnames <- .fixupArgs(lower,upper, c(varNames,"sigma"),environment())) ## ^^^^^^^^^ -> possibly changes (lower, upper) in envir. if ("sigma" %in% pnames) { if ("sigma" %in% varNames || "sigma" %in% names(data)) stop("As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'") stopifnot(lower[pnames == "sigma"] >= 0) objective <- function(par) { par <- setNames(par, pnames) fit <- eval( formula[[3L]], c(data, par) ) sigma <- par[["sigma"]] c * sum(rho( (y - fit)/sigma ))/nobs + log(sigma) } con <- function(par) { par <- setNames(par, pnames) fit <- eval( formula[[3L]], c(data, par) ) M_scale(par[["sigma"]], y - fit) } } else { ## hmm, this case *really* is not CM properly objective <- function(par) { fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) resid <- y - fit sigma <- mad(resid) c * sum(rho( resid/sigma ))/nobs + log(sigma) } con <- NULL } y <- eval(formula[[2L]], data) nobs <- length(y) stopifnot(nobs >= npar) if (is.null(fnscale <- ctrl$ fnscale)) fnscale <- mean((y - mean(y))^2) ctrl$fnscale <- NULL # remove it there stopifnot(is.numeric(fnscale), fnscale > 0) optRes <- do.call(JDEoptim, c(list(lower, upper, objective, constr=con, tol=tol, fnscale=fnscale), ctrl$optArgs)) iter <- optRes$iter status <- if (optRes$convergence == 0) "converged" else paste("failed to converge in", iter, "steps") coef <- setNames(optRes$par, pnames) fit <- eval( formula[[3L]], c(data, coef) ) names(fit) <- obsNames structure(list(call = match.call(), formula=formula, nobs=nobs, coefficients = coef, fitted.values = fit, residuals = y - fit, crit = optRes$value, status = status, iter = iter, data = dataName, ctrl=ctrl), class = "nlrob") } ## nlrob.CM nlrob.mtl <- function(formula, data, lower, upper, tol = 1e-6, ctrl = nlrob.control("mtl", cutoff = 2.5, optArgs = list(...)), ...) { stopifnot(is.numeric(cutoff <- ctrl[["cutoff"]]), length(cutoff) >= 1) trim <- function(t) { # t = residuals Res, or Res / sigma t <- sort.int(t) i <- which(t >= cutoff) h <- if (length(i) > 0) max(hlow, floor(min( (i - 1)/(2*pnorm(t[i]) - 1) ))) else nobs list(h = h, t = t) } formula <- as.formula(formula) dataName <- substitute(data) varNames <- all.vars(formula) obsNames <- rownames(data <- as.data.frame(data)) data <- as.list(data)# to be used as such if (length(formula) == 2L) { ## as nls formula[[3L]] <- formula[[2L]] formula[[2L]] <- 0 } npar <- length(pnames <- .fixupArgs(lower,upper, c(varNames,"sigma"), environment())) ## ^^^^^^^^^ -> possibly changes (lower, upper) in envir. constant <- log(2*pi) if ("sigma" %in% pnames) { if ("sigma" %in% varNames || "sigma" %in% names(data)) stop("As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'") stopifnot(lower[pnames == "sigma"] >= 0) objective <- function(par) { par <- setNames(par, pnames) fit <- eval( formula[[3L]], c(data, par) ) sigma <- par[["sigma"]] tp <- trim( abs( (y - fit)/sigma ) ) h <- tp$h h*(constant + 2*log(sigma)) + sum(tp$t[1L:h]^2) } } else { ## hmm... this is not really MTL objective <- function(par) { fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) resid <- y - fit sigma <- mad(resid) tp <- trim( abs(resid/sigma) ) h <- tp$h h*(constant + 2*log(sigma)) + sum(tp$t[1L:h]^2) } } y <- eval(formula[[2L]], data) nobs <- length(y) stopifnot(nobs >= npar) if (is.null(fnscale <- ctrl$ fnscale)) fnscale <- sum((y - mean(y))^2) ctrl$fnscale <- NULL # remove it there stopifnot(is.numeric(fnscale), fnscale > 0) hlow <- (nobs + npar + 1)%/%2 optRes <- do.call(JDEoptim, c(list(lower, upper, objective, tol=tol, fnscale=fnscale), ctrl$optArgs)) coef <- setNames(optRes$par, pnames) crit <- optRes$value iter <- optRes$iter status <- if (optRes$convergence == 0) "converged" else paste("failed to converge in", iter, "steps") fit <- eval( formula[[3L]], c(data, coef) ) names(fit) <- obsNames resid <- y - fit quan <- trim( resid/(if ("sigma" %in% pnames) coef["sigma"] else mad(resid)) )$h structure(list(call = match.call(), formula=formula, nobs=nobs, coefficients = coef, fitted.values = fit, residuals = resid, crit = crit, quan = quan, status = status, iter = iter, data = dataName, ctrl = ctrl), class = "nlrob") } ## nlrob.mtl nlrob.control <- function(method, psi = c("bisquare", "lqq", "welsh", "optimal", "hampel", "ggw"), init = c("S", "lts"), optimizer = "JDEoptim", optArgs = list(), ...) { psi <- match.arg(psi) init <- match.arg(init) dots <- list(...) argNms <- names(dots) ##' argument or default -> return list of length 1 a. <- function(nm,def) { L <- list( if(nm %in% argNms) dots[[nm]] else def ) names(L) <- nm L } switch(method, "M" = { list(method = method) # not yet used }, "MM" = { c(list(method = method, init = init, psi = psi), a.("fnscale", NULL), a.("tuning.chi.scale", .psi.conv.cc(psi, .Mchi.tuning.defaults[[psi]])), a.("tuning.psi.M", .psi.conv.cc(psi, .Mpsi.tuning.defaults[[psi]])), a.("optim.control", list()), list(optimizer = optimizer, optArgs = optArgs)) }, "tau" = { c(list(method = method, psi = psi), a.("fnscale", NULL), a.("tuning.chi.scale", NULL), a.("tuning.chi.tau", NULL), list(optimizer = optimizer, optArgs = optArgs)) }, "CM" = { c(list(method = method, psi = psi), a.("fnscale", NULL), a.("tuning.chi", NULL), list(optimizer = optimizer, optArgs = optArgs)) }, "mtl" = { c(list(method = method), a.("fnscale", NULL), a.("cutoff", 2.5), list(optimizer = optimizer, optArgs = optArgs)) }, stop("Method ", method, "not correctly supported yet")) } robustbase/R/mc.R0000644000176200001440000000727114233260125013353 0ustar liggesusers ## Left Medcouple lmc <- function(x, mx = median(x, na.rm=na.rm), na.rm = FALSE, doReflect = FALSE, ...) { -mc(x[x <= mx], na.rm=na.rm, doReflect=doReflect, ...) } ## Right Medcouple rmc <- function(x, mx = median(x, na.rm=na.rm), na.rm = FALSE, doReflect = FALSE, ...) { mc(x[x >= mx], na.rm=na.rm, doReflect=doReflect, ...) } .optEnv$mc_doScale_msg <- TRUE # initially mc <- function(x, na.rm = FALSE, doReflect = (length(x) <= 100) , doScale = FALSE # was hardwired=TRUE (till 2018-07); then default=TRUE (till 2022-03) , c.huberize = 1e11 # was implicitly = Inf originally , eps1 = 1e-14, eps2 = 1e-15 # << new in 0.93-2 (2018-07..) , maxit = 100, trace.lev = 0 , full.result = FALSE ) { x <- as.numeric(x) ina <- is.na(x) if (na.rm) x <- x[!ina] else if (any(ina)) return(NA_real_) ## ==> x is NA-free from here on stopifnot(length(c.huberize) == 1L, c.huberize >= 0) ## For robustbase 0.95-0 (March 2022); drop the message eventually: if(missing(doScale) && .optEnv$mc_doScale_msg && !getOption("mc_doScale_quiet", FALSE)) { message("The default of 'doScale' is FALSE now for stability;\n ", "set options(mc_doScale_quiet=TRUE) to suppress this (once per session) message") .optEnv$mc_doScale_msg <- FALSE } if(is.finite(c.huberize)) x <- huberize(x, c=c.huberize, warn0 = trace.lev > 0, saveTrim=FALSE) rr <- mcComp(x, doReflect, doScale=doScale, eps1=eps1, eps2=eps2, maxit=maxit, trace.lev=trace.lev) if(!(conv1 <- rr[["converged"]]) | (doReflect && !(conv2 <- rr[["converged2"]]))) { stop("mc(): not 'converged' ", if(!conv1) paste("in", rr[["iter"]], "iterations"), if(doReflect && !conv2) paste(if(!conv1)" *and*", "'reflect part' in", rr[["iter2"]], "iterations"), "; try enlarging eps1, eps2 !?\n") } m <- if (doReflect) (rr[["medc"]] - rr[["medc2"]]) / 2 else rr[["medc"]] structure(m, mcComp = if(full.result) rr) } ## eps1 = 1e-13, eps2 = eps1 <==> original code which only had 'eps = 1e-13' ## hardcoded in C code. ## These defaults do *not* make sense here, but in mc(). ## However, currently they are used in ../tests/mc-etc.R mcComp <- function(x, doReflect, doScale, eps1, eps2, maxit = 1000, trace.lev = 1) { stopifnot(is.logical(doReflect), length(doReflect) == 1L, !is.na(doReflect), is.logical(doScale), length(doScale) == 1L, !is.na(doScale), is.1num(eps1), eps1 >= 0, is.1num(eps2), eps2 >= 0, length(maxit <- as.integer(maxit)) == 1, length(trace.lev <- as.integer(trace.lev)) == 1 ) ## Assumption [from caller, = mc()]: 'x' has no NAs (but can have +-Inf) x <- as.numeric(x) n <- as.integer(length(x)) eps <- as.double(c(eps1, eps2)) c.iter <- c(maxit, trace.lev) ## NAOK=TRUE: to allow +/- Inf to be passed ans <- .C(mc_C, x, n, eps = eps, iter = c.iter, medc = double(1) , doScale = doScale , NAOK=TRUE)[c("medc", "eps", "iter")] it <- ans[["iter"]] ans[["converged"]] <- it[2] == 1 ans[["iter"]] <- it[1] if (doReflect) { ## also compute on reflected data a2 <- .C(mc_C, -x, n, eps2 = eps, iter2 = c.iter, medc2 = double(1) , doScale = doScale , NAOK=TRUE)[c("medc2", "iter2", "doScale")] it <- a2[["iter2"]] a2[["converged2"]] <- it[2] == 1 a2[["iter2"]] <- it[1] c(ans, a2) } else ans } robustbase/R/lmrobPredict.R0000644000176200001440000001667412221620231015402 0ustar liggesusers# File .../lmrobPredict.R # Part of the R package 'robustbase', http://www.R-project.org # Based on predict.lm (cf. src/library/stats/R/lm.R) # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # http://www.r-project.org/Licenses/ # Note that '# *rob' indicate adjustment for the robust case predict.lmrob <- function(object, newdata, se.fit = FALSE, scale = NULL, df = NULL, # *rob interval = c("none", "confidence", "prediction"), level = .95, type = c("response", "terms"), terms = NULL, na.action = na.pass, pred.var = res.var/weights, weights = 1, ...) { tt <- terms(object) if(!inherits(object, "lmrob") && !inherits(object, "glmrob")) # *rob warning("calling predict.lm() ...") # *rob if(missing(newdata) || is.null(newdata)) { mm <- X <- model.matrix.lm(object) mmDone <- TRUE offset <- object$offset } else { Terms <- delete.response(tt) m <- model.frame(Terms, newdata, na.action = na.action, xlev = object$xlevels) if(!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m) X <- model.matrix(Terms, m, contrasts.arg = object$contrasts) offset <- rep.int(0, nrow(X)) if (!is.null(off.num <- attr(tt, "offset"))) for(i in off.num) offset <- offset + eval(attr(tt, "variables")[[i+1]], newdata) if (!is.null(object$call$offset)) offset <- offset + eval(object$call$offset, newdata) mmDone <- FALSE } n <- length(object$residuals) # NROW(qr(object)$qr) p <- object$rank if(is.null(p)) { # *rob df <- Inf p <- sum(!is.na(coef(object))) piv <- seq_len(p) } else { p1 <- seq_len(p) piv <- if(p) qr(object)$pivot[p1] } if(p < ncol(X) && !(missing(newdata) || is.null(newdata))) warning("prediction from a rank-deficient fit may be misleading") beta <- object$coefficients X.piv <- X[, piv, drop = FALSE] predictor <- drop(X.piv %*% beta[piv]) if (!is.null(offset)) predictor <- predictor + offset interval <- match.arg(interval) if (interval == "prediction") { if (missing(newdata)) { # *rob: this and next if statement are combined warning("Predictions on current data refer to _future_ responses") if (missing(weights)) { w <- weights(object) # *rob if (!is.null(w)) { weights <- w warning("Assuming prediction variance inversely proportional to weights used for fitting") } } } if (!missing(newdata) && missing(weights) && !is.null(object$weights) && missing(pred.var)) warning("Assuming constant prediction variance even though model fit is weighted") if (inherits(weights, "formula")){ if (length(weights) != 2L) stop("'weights' as formula should be one-sided") d <- if(missing(newdata) || is.null(newdata)) model.frame(object) else newdata weights <- eval(weights[[2L]], d, environment(weights)) } }## "prediction" interval type <- match.arg(type) if(se.fit || interval != "none") {# *rob: whole 'then' statement is different df <- object$df.residual res.var <- if (is.null(scale)) object$s^2 else scale^2 ip <- if(type != "terms") diag(X.piv %*% object$cov %*% t(X.piv)) else rep.int(0, n) } if (type == "terms") { ## type == "terms" ------------ if(!mmDone){ mm <- model.matrix.lm(object) # *rob: call of model.matrix.lm # instead of model.matrix mmDone <- TRUE } aa <- attr(mm, "assign") ll <- attr(tt, "term.labels") hasintercept <- attr(tt, "intercept") > 0L if (hasintercept) ll <- c("(Intercept)", ll) aaa <- factor(aa, labels = ll) asgn <- split(order(aa), aaa) if (hasintercept) { asgn$"(Intercept)" <- NULL if(!mmDone){ mm <- model.matrix.lm(object) # *rob: call of model.matrix.lm # instead of model.matrix mmDone <- TRUE } avx <- colMeans(mm) termsconst <- sum(avx[piv] * beta[piv]) } nterms <- length(asgn) if(nterms > 0) { predictor <- matrix(ncol = nterms, nrow = NROW(X)) dimnames(predictor) <- list(rownames(X), names(asgn)) if (se.fit || interval != "none") { ip <- predictor # *rob: just this assignment is needed } if(hasintercept) X <- sweep(X, 2L, avx, check.margin=FALSE) unpiv <- rep.int(0L, NCOL(X)) unpiv[piv] <- p1 for (i in seq.int(1L, nterms, length.out = nterms)) { iipiv <- asgn[[i]] # Columns of X, ith term ii <- unpiv[iipiv] # Corresponding rows of cov iipiv[ii == 0L] <- 0L predictor[, i] <- if(any(iipiv > 0L)) X[, iipiv, drop = FALSE] %*% beta[iipiv] else 0 if (se.fit || interval != "none"){ ip[, i] <- if(any(iipiv > 0L)){# *rob: next steps modified h.X <- X[, iipiv, drop = FALSE] diag(h.X %*% object$cov[ii, ii] %*% t(h.X)) } else 0 } } if (!is.null(terms)) { predictor <- predictor[, terms, drop = FALSE] if (se.fit) ip <- ip[, terms, drop = FALSE] } } else { # no terms predictor <- ip <- matrix(0, n, 0L) } attr(predictor, 'constant') <- if (hasintercept) termsconst else 0 } ### Now construct elements of the list that will be returned if(interval != "none") { tfrac <- qt((1 - level)/2, df) hwid <- tfrac * switch(interval, confidence = sqrt(ip), prediction = sqrt(ip+pred.var) ) if(type != "terms") { predictor <- cbind(predictor, predictor + hwid %o% c(1, -1)) colnames(predictor) <- c("fit", "lwr", "upr") } else { if (!is.null(terms)) hwid <- hwid[, terms, drop = FALSE] lwr <- predictor + hwid upr <- predictor - hwid } } if(se.fit || interval != "none") { se <- sqrt(ip) if (type == "terms" && !is.null(terms)) se <- se[, terms, drop = FALSE] } if(missing(newdata) && !is.null(na.act <- object$na.action)) { predictor <- napredict(na.act, predictor) if(se.fit) se <- napredict(na.act, se) } if(type == "terms" && interval != "none") { if(missing(newdata) && !is.null(na.act)) { lwr <- napredict(na.act, lwr) upr <- napredict(na.act, upr) } list(fit = predictor, se.fit = se, lwr = lwr, upr = upr, df = df, residual.scale = sqrt(res.var)) } else if (se.fit) list(fit = predictor, se.fit = se, df = df, residual.scale = sqrt(res.var)) else predictor } robustbase/R/huber.R0000644000176200001440000000606714222041416014061 0ustar liggesusers ## A modified "safe" (and more general) Huber estimator: huberM <- function(x, k = 1.5, weights = NULL, tol = 1e-06, mu = if(is.null(weights)) median(x) else wgt.himedian(x, weights), s = if(is.null(weights)) mad(x, center=mu) else wgt.himedian(abs(x - mu), weights), se = FALSE, warn0scale = getOption("verbose")) { ## Author: Martin Maechler, Date: 6 Jan 2003, ff ## implicit 'na.rm = TRUE': if(any(i <- is.na(x))) { x <- x[!i] if(!is.null(weights)) weights <- weights[!i] } n <- length(x) sum.w <- if(!is.null(weights)) { stopifnot(is.numeric(weights), weights >= 0, length(weights) == n) sum(weights) } else n it <- 0L NA. <- NA_real_ if(sum.w == 0) # e.g 'x' was all NA return(list(mu = NA., s = NA., it = it, se = NA.)) # instead of error if(se && !is.null(weights)) stop("Std.error computation not yet available for the case of 'weights'") missS <- missing(s) if(missS && is.na(s)) # e.g. for x = c(-Inf, 1) s <- 0 if (s <= 0) { if(s < 0) stop("negative scale 's'") if(warn0scale && n > 1) warning("scale 's' is zero -- returning initial 'mu'") } else { wsum <- if(is.null(weights)) sum else function(u) sum(u * weights) repeat { it <- it + 1L y <- pmin(pmax(mu - k * s, x), mu + k * s) mu1 <- wsum(y) / sum.w if (is.na(mu1) || abs(mu - mu1) < tol * s) break mu <- mu1 } } list(mu = mu, s = s, it = it, SE = if(se) s * sqrt(tauHuber(x, mu=mu, s=s, k=k) / n) else NA.) } ## this is a compatible improvement of MASS' huber() : ## 1) returning median() if mad()=0 ## 2) " NA when y has only NAs (or length 0) if(FALSE) huber <- function (y, k = 1.5, tol = 1e-06) { y <- y[!is.na(y)] n <- length(y) if(n == 0) # e.g 'y' was all na return(list(mu = NA, s = NA))# instead of error mu <- median(y) s <- mad(y, center=mu) if (s == 0) { # FIXME? make this warning optional if(n > 1) warning("scale MAD is zero for this sample") } else repeat { yy <- pmin(pmax(mu - k * s, y), mu + k * s) mu1 <- sum(yy)/n if (abs(mu - mu1) < tol * s) break mu <- mu1 } list(mu = mu, s = s) } ## Originally from /u/ftp/NDK/Source-NDK-9/R/rg2-fkt.R : tauHuber <- function(x, mu, k=1.5, s = mad(x), resid = (x - mu)/s) { ## Purpose: Correction factor Tau for the variance of Huber-M-Estimators ## ------------------------------------------------------------------------- ## Arguments: x = data, mu = location, k = tuning parameter of Huber Psi-function ## ------------------------------------------------------------------------- ## Author: Rene Locher Update: R. Frisullo 23.4.02; M.Maechler (as.log(); s, resid) inr <- abs(resid) <= k ## psi <- ifelse(inr, resid, sign(resid)*k) # psi (x) -- more efficiently: psi <- resid; out <- which(!inr); if(length(out)) psi[out] <- sign(resid[out]) * k psiP <- as.logical(inr)# = ifelse(abs(resid) <= k, 1, 0) # psi'(x) length(x) * sum(psi^2) / sum(psiP)^2 } robustbase/R/MTestimador2.R0000644000176200001440000003647614511464163015306 0ustar liggesusers##-*- mode: R; kept-new-versions: 50; kept-old-versions: 50 -*- #### MT Estimators: [M]-Estimators based on [T]ransformations #### ------------- Valdora & Yohai (2013) ##' Defining the spline to compute the center of the rho function ##' @title Provide mu(lambda) as spline function ##' @param cw tuning parameter for rho ##' @return a function, resulting from \code{\link{splinefun}} ##' @author Victor Yohai; many changes: Martin Maechler mk.m_rho <- function(cw, opt.method = c("L-BFGS-B", "Brent", "Nelder-Mead", "BFGS", "CG", "SANN"), ##optim(): method = c("Nelder-Mead", "BFGS", "CG", "L-BFGS-B", "SANN", "Brent"), ## MM: 'Brent' seems best overall lambda = c(seq(0,2.9, by=0.1), seq(3,100)), reltol = sqrt(.Machine$double.eps), trace = 0, sFile = paste0("MTesSpl_", format(cw), ".rda"), recompute = getOption("robustbase:m_rho_recompute", FALSE)) { ## FIXME: Solution without files, but rather cache inside an environment ## ------ For the default cw, cache even in robustbase namespace! ## Instead of saving splinefun() ... just save (lambda, mm.la), it is much smaller if(recompute) { useFile <- FALSE } else { useFile <- file.exists(sFile) if (useFile) { ## load the spline load(sFile)#-> 'm.approx' ## check if its cw was very close to this one: if(cw.ok <- is.numeric(cw0 <- environment(m.approx)$cw)) cw.ok <- (abs(cw - cw0) < 0.001) } } if(!useFile || !cw.ok) { nl <- length(lambda) mm.la <- numeric(nl) s.la <- sqrt(lambda) ## MM: Speedwise, Brent > L-BFGS-B > BFGS > .. for cw >= ~ 1.5 ## L-BFGS-B > Brent for cw ~= 1 opt.method <- match.arg(opt.method) oCtrl <- list(reltol=reltol, trace=trace) if(opt.method %in% c("Brent", "L-BFGS-B")) { ## use bounds if(opt.method == "L-BFGS-B")# yuck! why is this necessary!! oCtrl <- list(factr = 1/(10*reltol), trace=trace) for(i in seq_len(nl)) mm.la[i] <- optim(s.la[i], espRho, lam = lambda[i], cw = cw, method = opt.method, control = oCtrl, lower = 0, upper = .01 + 2*s.la[i])$par } else { for(i in seq_len(nl)) mm.la[i] <- optim(s.la[i], espRho, lam = lambda[i], cw = cw, method = opt.method, control = oCtrl)$par } m.approx <- splinefun(lambda, mm.la, method = "monoH.FC") e <- environment(m.approx) assign("lambda.max", max(lambda), envir=e) assign("cw", cw, envir=e) save(m.approx, file = sFile) } m.approx } ## result 'm.approx' will be used in mm(.), and "everywhere" below ####################################################### ##' Tukey's Bisquare (aka "biweight") rho function: rho~() = rho scaled to have rho(Inf) = 1 rho <- function(x,cw) pmin(1, 1 - (1-(x/cw)^2)^3) ## faster: rho <- function(x,cw) Mchi(x, cc=cw, psi="tukey") ## NB: in sumaConPesos(), mm(.), ... we make use of the fact that rho(Inf) = 1 psi <- function(x,cw, deriv=0) Mpsi(x, cc=cw, psi="tukey", deriv=deriv) espRho <- function(lam, xx, cw) { ## compute E := E_lambda [ rho_{cw}( sqrt(Y)-xx ) ], given (lambda, xx, cw) ## for Y ~ Pois(lambda) ; rho(.) = Tukey's Bisquare ## ==> E = \sum_{k=0}^\infty rho( sqrt(k)-xx, .) * dpois(k, .) k <- seq(as.integer((max(0,xx-cw))^2), as.integer((xx+cw)^2)+1L) inner <- (rhoS.k <- rho(sqrt(k)-xx, cw)) < 1 ii <- k[inner] terminos <- rhoS.k[inner] * dpois(ii,lam) if((len.ii <- length(ii)) > 0) { primero <- ii[1] ultimo <- ii[len.ii] ppois(primero-1,lam) + sum(terminos) + ppois(ultimo,lam, lower.tail=FALSE) } else 1 } ################################################# ##' @title Compute m(lambda) := the value of x minimizing espRho(lambda, x, cw) ##' @param lam numeric vector of non-negative values \lambda ##' @param m.approx the spline function to be used for "small" lambda, from mk.m_rho() ##' @return mm <- function(lam, m.approx) { la.max <- environment(m.approx)$lambda.max z <- ((m <- lam) <= la.max) m[z] <- m.approx(lam[z]) if(any(i <- !z)) m[i] <- sqrt(lam[i]) m } ############################################################################### ##' @title Compute the loss function for MT-glmrob() ##' @param beta beta (p - vector) ##' @param x design matrix (n x p) ##' @param y (Poisson) response (n - vector) ##' @param cw tuning parameter 'c' for rho_c(.) ##' @param w weight vector (length n) ##' @param m.approx the spline for the inner part of m_c(.) ##' @return \sum_{i=1}^n w_i \rho(\sqrt(y_i) - m( g(x_i ` \beta) ) ) ##' where g(.) = exp(.) for the Poisson family sumaConPesos <- function(beta,x,y,w, cw, m.approx) { eta <- x %*% beta s <- rho(sqrt(y) - mm(exp(eta), m.approx), cw) sum(s*w) } ############################################################################### beta0IniCP <- function(x,y,cw,w, m.approx, nsubm, trace.lev = 1) { ## computes the initial estimate using subsampling with concentration step stopifnot(is.matrix(x), (nsubm <- as.integer(nsubm)) >= 1) p <- ncol(x) n <- nrow(x) s2.best <- Inf; b.best <- rep(NA_real_, p) kk <- 0 for(l in 1:nsubm) { if(trace.lev) { if(trace.lev > 1) cat(sprintf("%3d:",l)) else cat(".", if(l %% 50 == 0) paste0(" ",l,"\n")) } i.sub <- sample(n, p) estim0 <- as.vector( betaExacto(x[i.sub,], y[i.sub]) ) if(any(is.na(estim0))) ## do not use it next eta <- as.vector(x %*% estim0) ## adev := abs( 1/2 * dev.residuals(.) ) ## y+(y==0) : log(0) |-> -Inf fine; but if eta == -Inf, we'd get NaN adev <- abs(y*(log(y+(y == 0)) - eta) - (y-exp(eta))) ## poisson()'s dev.resids(): 2 * wt * (y * log(ifelse(y == 0, 1, y/mu)) - (y - mu)) ## == 2*wt* (y * ifelse(y == 0, 0, log(y) - log(mu)) - (y - mu)) ## == 2*wt* ifelse(y == 0, mu, y*(log(y) - log(mu)) - (y - mu)) ## where mu <- exp(eta) if(trace.lev > 1) cat(sprintf(" D=%11.7g ", sum(adev))) half <- ceiling(n/2) srt.d <- sort(adev, partial=half) podador <- adev <= srt.d[half] # those smaller-equal than lo-median(.) xPod <- x[podador,] yPod <- y[podador] length(xPod) + length(yPod) # codetools fitE <- tryCatch(glm(yPod ~ xPod-1, family = poisson()), error = function(e)e) if(inherits(fitE, "error")) { message("glm(.) {inner subsample} error: ", fitE$message) if(trace.lev > 1) cat("\n") ## s2[l] <- Inf } else { ## glm() call succeeded betapod <- as.vector( fitE$coefficients ) if(any(is.na(betapod))) ## do not use it next kk <- kk+1 s2 <- sumaConPesos(betapod, x=x, y=y, w=w, cw=cw, m.approx=m.approx) ## estim.ini[l,] <- betapod if(trace.lev > 1) cat(sprintf("s2=%14.9g", s2)) if(s2 < s2.best) { if(trace.lev > 1) cat(" New best!\n") b.best <- betapod s2.best <- s2 } else if(trace.lev > 1) cat("\n") } } ## s0 <- order(s2) ## beta0ini <- estim.ini[s0[1],] list(beta = b.best, nOksamples = kk, s2 = s2.best) }## beta0IniCP() ##################################################################### betaExacto <- function(x,y) { ## to each subsample assign the maximum likelihood estimator and ## fixing the case mle has NA components p <- ncol(x) fitE <- tryCatch(glm.fit(x=x, y=y, family = poisson()), ## TODO , weights = weights, offset = offset error = function(e)e) if(inherits(fitE, "error")) { message("betaExacto glm(.) error: ", fitE$message) return(rep(NA_real_, p)) } ## else -- glm() succeeded ## if_needed_-- MM finds it unneeded ## beta. <- fitE $ coefficients ## sinNas <- na.exclude(beta.) ## long <- length(sinNas) ## lugaresNas <- na.action(sinNas)[1:(p-long)] ## beta.SinNas <- beta. ## beta.SinNas[lugaresNas] <- 0 ## beta.SinNas fitE $ coefficients } ###--- Utilities for Asymptotic Covariance Matrix ----------------- ##' computes the First Derivative of mm() mmd <- function(lam,cw, m.approx) { qq1 <- qpois(.001,lam) qq2 <- qpois(.999,lam) ind <- qq1:qq2 k.. <- sqrt(ind) - mm(lam, m.approx) dP <- dpois(ind,lam) rr1 <- (-dP+(ind*dP/lam)) * psi(k..,cw) rr2 <- dP*psi(k..,cw, deriv=1) rr1 <- sum(rr1) rr2 <- sum(rr2) list(ind=ind, rr1=rr1, rr2=rr2, d = rr1/rr2) } ##' computes the Second Derivative of mm() mmdd <- function(lam,cw, m.approx) { out <- mmd(lam,cw, m.approx) ## FIXME: can reuse even more from mmd() ! ind <- out[["ind"]] NUM <- out[[2]] DEN <- out[[3]] mm1 <- out[["d"]] ## = mm'(.) k.. <- sqrt(ind) - mm(lam, m.approx) dP <- dpois(ind,lam) NUMP <- ddpois(ind,lam) * psi(k..,cw) - (-dP+(ind*dP/lam))* psi(k..,cw, deriv=1) * mm1 DENP <- (-dP+(ind*dP/lam)) * psi(k..,cw, deriv=1) - dP*psi(k..,cw, deriv=2) * mm1 NUMP <- sum(NUMP) DENP <- sum(DENP) (NUMP*DEN - DENP*NUM) / DEN^2 } ############################################################### ddpois <- function(x,lam) { ## The second derivative of the Poisson probability function dpois(x,lam)*(1-(2*x/lam)+((x^2)/(lam^2))-(x/(lam^2))) } ##' Compute asymptotic covariance matrix of the MT estimator covasin <- function(x,y,beta,cw, m.approx,w) { p <- ncol(x) n <- length(y) mm1 <- mm2 <- numeric(n) de <- nu <- matrix(0,p,p) lam <- x%*%beta elam <- exp(lam) r <- sqrt(y) - mm(elam, m.approx) psi0 <- psi(r,cw) psi1 <- psi(r,cw, deriv=1) for ( i in 1:n) { ## FIXME: Make more efficient!! {mmd is used in mmdd()) mm1[i] <- mmd (elam[i], cw, m.approx)[[4]] mm2[i] <- mmdd(elam[i], cw, m.approx) } nu1 <- w*psi0*mm1*elam de1 <- -psi1*(mm1^2)*(elam^2)+psi0*mm2*(elam^2)+psi0*mm1*elam de1 <- w*de1 for (i in 1:n) { ## FIXME (?) -- can be vectorized zzt <- tcrossprod(x[i,]) nu <- nu+ (nu1[i]^2)*zzt de <- de+ de1[i]*zzt } nu <- nu/n de <- solve(de/n) ## Cov_{asympt.} = de %*% nu %*% t(de) / n } ## cw = 2.1, nsubm = 500, maxitOpt = 200, tolOpt = 1e-6, glmrobMT.control <- function(cw = 2.1, nsubm = 500, acc = 1e-06, maxit = 200) { if (!is.numeric(acc) || acc <= 0) stop("value of acc must be > 0") ## if (test.acc != "coef") ## stop("Only 'test.acc = \"coef\"' is currently implemented") ## if (!(any(test.vec == c("coef", "resid")))) ## stop("invalid argument for test.acc") if (!is.numeric(nsubm) || nsubm <= 0) stop("number of subsamples must be > 0") if (!is.numeric(maxit) || maxit <= 0) stop("maximum number of iterations must be > 0") if (!is.numeric(cw) || cw <= 0) stop("value of the tuning constant c (cw) must be > 0") list(cw=cw, nsubm=nsubm, acc=acc, maxit=maxit) } ################################################################################### ##' @param intercept logical, if true, x[,] has an intercept column which should ##' not be used for rob.wts glmrobMT <- function(x,y, weights = NULL, start = NULL, offset = NULL, family = poisson(), weights.on.x = "none", control = glmrobMT.control(...), intercept = TRUE, trace.lev = 1, ...) { ## MAINFUNCTION Computes the MT or WMT estimator for Poisson regression with intercept starting from the estimator computed in the function ## beta0IniC. ## INPUT ## x design matrix with nrows and p columns. ## y respone vector of length n ## cw tuning constant. Default value 2.1 ## iweigths indicator for weights penalizing high leverage points, iweights=1 indicates to use weights iweights=0 ## indicate notto use way. Default value is iw=0, Our simulation study suggests not to use weights. ## nsubm Number of subsamples. Default calue nsubm=500 ## OUTPUT ##$initial is the inital estimate (first component is the intercept) ##$final is the final estimate (first component is the intercept) ##$nsamples is the number of well conditioned subsamples ## REQUIRED PACKAGES: tools, rrcov stopifnot(is.numeric(cw <- control$cw), cw > 0, is.numeric(nsubm <- control$nsubm)) if(family$family != "poisson") stop("Currently, only family 'poisson' is supported for the \"MT\" estimator") n <- nrow(x) p <- ncol(x) if (is.null(weights)) weights <- 1 # rep.int(1, n) {are not stored, nor used apart from 'sni / *' } else if(any(weights <= 0)) stop("All weights must be positive") if(!is.null(offset)) stop("non-trivial 'offset' is not yet implemented") ## if (is.null(offset)) ## offset <- rep.int(0, n) else if(!all(offset==0)) ## warning("'offset' not fully implemented") ## Copy-paste from ./glmrobMqle.R [overkill currently: Poisson has sni == 1] sni <- sqrt(as.vector(weights)) V_resid <- function(mu, y) { Vmu <- family$variance(mu) if (anyNA(Vmu)) stop("NAs in V(mu)") if (any(Vmu == 0)) stop( "0s in V(mu)") ## return Pearson residuals : (y - mu)* sni/sqrt(Vmu) } m.approx <- mk.m_rho(cw) w <- robXweights(weights.on.x, x, intercept=intercept) if(is.null(start)) { if(trace.lev) cat("Computing initial estimate with ", nsubm, " sub samples:\n") out <- beta0IniCP(x, y, cw = cw, w = w, m.approx = m.approx, nsubm = nsubm, trace.lev = trace.lev) start <- out[[1]] } else { ## user provided start: if(!is.numeric(start) || length(start) != p) stop(gettextf("'start' must be an initial estimate of beta, of length %d", p), domain=NA) } oCtrl <- list(trace = trace.lev, maxit = control$maxit, ## "L-BFGS-B" specific lmm = 9, factr = 1/(10*control$acc)) if(trace.lev) cat("Optim()izing sumaConPesos()\n") ### FIXME: quite slow convergence e.g. for the Possum data ( ../tests/glmrob-1.R ) ### ----- maybe improve by providing gradient ?? estim2 <- optim(start, sumaConPesos, method = "L-BFGS-B", x = x, y = y, w = w, cw = cw, m.approx = m.approx, control = oCtrl) o.counts <- estim2$counts if(estim2$convergence) ## there was a problem warning("optim(.) non-convergence: ", estim2$convergence, if(nzchar(estim2$message)) paste0("\n", estim2$message)) beta <- estim2$par cov <- covasin(x,y, beta=beta, cw=cw, m.approx=m.approx, w=w) eta <- as.vector(x %*% beta) # + offset mu <- family$linkinv(eta) residP <- V_resid(mu, y)# residP == (here!) residPS ## As sumaConPesos() computes ## eta <- x %*% beta ## s <- rho(sqrt(y) - mm(exp(eta), m.approx), cw) ## sum(s*w) ## we could say that "psi(x) / x" -- weights would be w.r <- Mwgt(sqrt(y) - mm(exp(eta), m.approx), cw, psi="tukey") names(mu) <- names(eta) <- names(residP) # re-add after computation names(beta) <- names(start) <- nmB <- colnames(x) ## maybe: dimnames(cov) <- list(nmB, nmB) list(coefficients = beta, initial = start, family = poisson(), # <- only case for now coefficients = beta, residuals = residP, # s.resid = residPS, fitted.values = mu, linear.predictors = eta, cov = cov, nsubm = nsubm, "nOksub" = out[[2]], converged = (estim2$convergence == 0), iter = o.counts[[1]], optim.counts = o.counts, optim.control = oCtrl, cw=cw, weights.on.x=weights.on.x, w.x = w, w.r = w.r) } robustbase/R/glmrobMqle-DQD.R0000644000176200001440000001276412150222661015465 0ustar liggesusers#### Quasi-Deviance Differences --- for Model Selection #### --------------------------------------------------- -> ./anova-glmrob.R ## MM: These function names are really too long ## but then, they are hidden in the name space ... ## (Maybe it would be nice to do this as one function with "family" .. ) glmrobMqleDiffQuasiDevB <- function(mu, mu0, y, ni, w.x, phi, tcc) { ## f.cnui <- function(u, y, ni, tcc) { pr <- u/ni Vmu <- pr * (1 - pr) ## = binomial()$variance residP <- (y-pr)*sqrt(ni/Vmu) ## First part: nui nui <- pmax.int(-tcc, pmin.int(tcc, residP)) ## Second part: Enui H <- floor(u - tcc*sqrt(ni*Vmu)) K <- floor(u + tcc*sqrt(ni*Vmu)) ## Actually, floor is not needed because pbinom() can cope ## with noninteger values in the argument q! ## what follows is similar to glmrob.Mqle.EpsiB except a ## different vectorisation h1 <- (if(ni == 1) as.numeric(- (H < 0) + (K >= 1) ) * sqrt(Vmu) else (pbinom(K-1,1,pr) - pbinom(H-1,ni-1,pr) - pbinom(K,ni,pr) + pbinom(H,ni,pr)) * pr * sqrt(ni/Vmu)) ## pmax was needed to get numeric returns from pbinom Enui <- (tcc*(1 - pbinom(K,ni,pr) - pbinom(H,ni,pr)) + h1) return((nui - Enui) / sqrt(ni*Vmu)) } ## f.cnui() nobs <- length(mu) stopifnot(nobs > 0) QMi <- numeric(nobs) ## Numerical integrations for(i in 1:nobs) QMi[i] <- integrate(f.cnui, y = y[i], ni = ni[i], tcc = tcc, subdivisions = 200, lower = mu[i]*ni[i], upper = mu0[i]*ni[i])$value ## robust quasi-deviance ## -2*(sum(QMi1)-sum(QMi2)) ## Andreas' interpretation of (4) and (5) ## -2*(sum(QMi1)-sum(QMi2)/nobs) ## Eva's interpretation of (4) and (5) ## According to Andreas' interpretation -2*sum(QMi*w.x) } ## glmrobMqleDiffQuasiDevB glmrobMqleDiffQuasiDevPois <- function(mu, mu0, y, ni, w.x, phi, tcc) { ## f.cnui <- function(u, y, ni, tcc) { Vmu <- u ## = poisson()$variance residP <- (y-u)/sqrt(Vmu) ## First part: nui nui <- pmax.int(-tcc, pmin.int(tcc, residP)) ## Second part: Enui H <- floor(u - tcc*sqrt(Vmu)) K <- floor(u + tcc*sqrt(Vmu)) ## what follows is similar to Epsipois except a ## different vectorisation h1 <- u/sqrt(Vmu)*(dpois(H,u)- dpois(K,u)) Enui <- tcc*(1 - ppois(K,u) - ppois(H,u)) + h1 return((nui - Enui) / sqrt(Vmu)) } nobs <- length(mu) stopifnot(nobs > 0) QMi <- numeric(nobs) ## Numerical integrations for(i in 1:nobs) QMi[i] <- integrate(f.cnui, y = y[i], ni = ni[i], tcc = tcc, lower = mu[i], upper = mu0[i])$value ## robust quasi-deviance ## -2*(sum(QMi1)-sum(QMi2)) ## Andreas' interpretation of (4) and (5) ## -2*(sum(QMi1)-sum(QMi2)/nobs) ## Eva's interpretation of (4) and (5) ## According to Andreas' interpretation -2*sum(QMi*w.x) }## glmrobMqleDiffQuasiDevPois glmrobMqleDiffQuasiDevGamma <- function(mu, mu0, y, ni, w.x, phi, tcc, variant = c("V1", "Eva1", "Andreas1")) { ## Notation similar to the discrete case (Cantoni & Ronchetti, 2001) f.cnui <- function(u, y, ni, phi, tcc) { s.ph <- sqrt(phi) ## First part: nui sV <- s.ph * u ## = sqrt(dispersion * Gamma()$variance) residP <- (y-u)/sV nui <- pmax.int(-tcc, pmin.int(tcc, residP)) ## Second part: Enui ## what follows is similar to glmrob.Mqle.Epsipois except a ## different vectorisation nu <- 1/phi ## form parameter nu snu <- 1/s.ph ## sqrt (nu) pPtmc <- pgamma(snu - tcc, shape=nu, rate=snu) pPtpc <- pgamma(snu + tcc, shape=nu, rate=snu) Enui <- tcc*(1-pPtpc-pPtmc) + Gmn(-tcc,nu) - Gmn( tcc,nu) ( nui/sV - Enui/u*s.ph ) } f.cnui1 <- function(u, y, ni, phi, tcc) { ## First part: nui sV <- sqrt(phi) * u ## = sqrt(dispersion * Gamma()$variance) residP <- (y-u)/sV nui <- pmax.int(-tcc, pmin.int(tcc, residP)) (nui / sV) } f.cnui2 <- function(u, y, ni, phi, tcc) { ## First part: nui s.ph <- sqrt(phi) sV <- s.ph * u ## = sqrt(dispersion * Gamma()$variance) snu <- 1/s.ph ## sqrt (nu) ## Second part: Enui ## what follows is similar to EpsiGamma except a ## different vectorisation nu <- 1/phi ## form parameter nu pPtmc <- pgamma(snu - tcc, shape=nu, rate=snu) pPtpc <- pgamma(snu + tcc, shape=nu, rate=snu) Enui <- tcc*(1-pPtpc-pPtmc) + Gmn(-tcc,nu) - Gmn( tcc,nu) return(Enui/u * s.ph) } nobs <- length(mu) stopifnot(nobs > 0) variant <- match.arg(variant) ## robust quasi-deviance if(variant == "V1") { QMi <- numeric(nobs) ## Numerical integrations for(i in 1:nobs) QMi[i] <- integrate(f.cnui, y = y[i], ni = ni[i], phi=phi, tcc = tcc, lower = mu[i], upper = mu0[i])$value -2*sum(QMi*w.x) } else { ## "Eva1" or "Andreas1"; Using two terms QMi1 <- QMi2 <- numeric(nobs) for(i in 1:nobs) QMi1[i] <- integrate(f.cnui1, y = y[i], ni = ni[i], phi=phi, tcc = tcc, lower = mu[i], upper = mu0[i])$value for(i in 1:nobs) QM2i[i] <- integrate(f.cnui2, y = y[i], ni = ni[i], phi=phi, tcc = tcc, lower = mu[i], upper = mu0[i])$value if(variant == "Eva1") { ## Eva Cantoni's interpretation of (4) and (5) -2*(sum(QMi1)-sum(QMi2)/nobs) } else if (variant == "Andreas1") { ## Andreas' interpretation of (4) and (5) -2*(sum(QMi1)-sum(QMi2)) } else stop("invalid 'variant': ", variant) } } robustbase/R/nlrob.R0000644000176200001440000004677314410263765014114 0ustar liggesusers## Directly use nls()-internals, i.e., its 'm', to get a next 'start' (coef-like list): ## (In principle useful also outside robustbase) .nls.get.start <- function(nls.m) { ## stopifnot(is.list(nls.m), is.function(gg <- nls.m$getPars), ## is.environment(em <- environment(gg))) stopifnot(is.list(nls.m), is.environment(em <- environment(nls.m$getPars))) mget(names(em$ind), em$env) } nlrob <- function (formula, data, start, lower, upper, weights = NULL, na.action = na.fail, method = c("M", "MM", "tau", "CM", "mtl"), psi = .Mwgt.psi1("huber", cc=1.345), scale = NULL, test.vec = c("resid", "coef", "w"), maxit = 20, tol = 1e-06, acc, algorithm = "default", doCov = FALSE, model = FALSE, control = if(method == "M") nls.control() else nlrob.control(method, optArgs = list(trace=trace), ...), trace = FALSE, ...) { ## Purpose: ## Robust fitting of nonlinear regression models. The fitting is ## done by iterated reweighted least squares (IWLS) as in rlm() of ## the package MASS. In addition, see also 'nls'. ## ## --> see the help file, ?nlrob (or ../man/nlrob.Rd in the source) ## ------------------------------------------------------------------------- ##- some checks mf <- call <- match.call() # << and more as in nls() formula <- as.formula(formula) if (length(formula) != 3) stop("'formula' should be a formula of the type 'y ~ f(x, alpha)'") ## Had 'acc'; now use 'tol' which is more universal; 'acc' should work for a while if(!missing(acc) && is.numeric(acc)) { if(!missing(tol)) stop("specifying both 'acc' and 'tol' is invalid") tol <- acc message("The argument 'acc' has been renamed to 'tol'; do adapt your code.") } method <- match.arg(method) dataName <- substitute(data) hasWgts <- !missing(weights) # not eval()ing ! ## we don't really need 'start' for non-"M" methods, but for the following logic, ## Want 'dataClasses' -> need 'mf' --> 'varNames' -> 'pnames' -> 'start' : varNames <- all.vars(formula) var.nms <- c(varNames, if(method %in% c("CM", "mtl")) "sigma") # <--> "sigma" part of 'pnames' ## FIXME: nls() allows a missing 'start'; we allow *iff* lower | upper has names: if(missing(start) && (!missing(lower) || !missing(upper))) pnames <- .fixupArgs(lower, upper, var.nms) else if(length(pnames <- names(start)) != length(start)) stop("'start' or 'lower' or 'upper' must be fully named (list or numeric vector)") else if(any(is.na(match(pnames, var.nms)))) # check also in .fixupArgs() stop("parameter names must appear in 'formula'") ## If it is a parameter it is not a variable varNames <- varNames[is.na(match(varNames, pnames))] ## do now: need 'dataClasses', hence the model.frame 'mf' for all 'method' cases obsNames <- rownames(data <- as.data.frame(data)) ## From nls: using model.weights() e.g. when formula 'weights = sqrt()' mf$formula <- # replace by one-sided linear model formula as.formula(paste("~", paste(varNames, collapse = "+")), env = environment(formula)) mf[c("start", "lower", "upper", "method", "psi", "scale", "test.vec", "maxit", "tol", "acc", "algorithm", "doCov", "model", "control", "trace")] <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval.parent(mf) dataCl <- attr(attr(mf, "terms"), "dataClasses") ## mf <- as.list(mf) if(method != "M") { if(hasWgts) ## FIXME .. should not be hard, e.g. for MM stop("specifying 'weights' is not yet supported for method ", method) if(!missing(psi)) warning(gettextf("For method = \"%s\", currently 'psi' must be specified via 'control'", method), domain=NA) ## lifted from Martin's 'sfsmisc' package : missingCh <- function(x, envir = parent.frame()) { eval(substitute(missing(VAR), list(VAR=as.name(x))), envir = envir) } aNms <- c("start", "na.action", "test.vec", "maxit", "algorithm", "doCov") not.missA <- !vapply(aNms, missingCh, NA, envir=environment()) if(any(not.missA)) { warning(sprintf(ngettext(sum(not.missA), "For method = \"%s\", argument %s is not made use of", "For method = \"%s\", arguments %s are not made use of"), method, pasteK(sQuote(aNms[not.missA]))), domain=NA) } force(control) fixAns <- function(mod) { mod$call <- call # replace the nlrob.() one mod$data <- dataName # (ditto) ctrl <- mod$ctrl if(is.character(psi <- ctrl$psi) && is.numeric(cc <- ctrl$tuning.psi.M)) {# MM: psi <- .Mwgt.psi1(psi, cc=cc) res.sc <- with(mod, residuals/Scale) mod$psi <- psi mod$w <- # as we have no 'weights' yet mod$rweights <- psi(res.sc) } ## else mod$rweights <- mod$psi <- NULL mod$dataClasses <- dataCl if(model) mod$model <- mf mod } ## {fixAns} ## switch(method, ## source for these is all in >>> nlregrob.R <<< "MM" = { return(fixAns(nlrob.MM (formula, data, lower=lower, upper=upper, tol=tol, ctrl= control))) }, "tau" = { return(fixAns(nlrob.tau(formula, data, lower=lower, upper=upper, tol=tol, ctrl= control))) }, "CM" = { return(fixAns(nlrob.CM (formula, data, lower=lower, upper=upper, tol=tol, ctrl= control))) }, "mtl" = { return(fixAns(nlrob.mtl(formula, data, lower=lower, upper=upper, tol=tol, ctrl= control))) }) } ## {non-"M" methods} ## ## else: method == "M", original method, the only one based on 'nls' : env <- environment(formula) if (is.null(env)) env <- parent.frame() if (!((is.list(start) && all(sapply(start, is.numeric))) || (is.vector(start) && is.numeric(start)))) stop("'start' must be a named list or numeric vector") test.vec <- match.arg(test.vec) if(missing(lower)) lower <- -Inf if(missing(upper)) upper <- +Inf updateScale <- is.null(scale) if(!updateScale) { ## keep initial scale fixed through iterations (e.g. for "MM") if(is.1num(scale) && scale > 0) Scale <- scale else stop("'scale' must be NULL or a positive number") } nm <- "._nlrob.w" if (nm %in% c(varNames, pnames, names(data))) stop(gettextf("Do not use '%s' as a variable name or as a parameter name", nm), domain=NA) data <- as.list(data)# to be used as such ## 'mf' now defined before "dispatch" to method ! nobs <- nrow(mf) if (hasWgts) hasWgts <- !is.null(weights <- model.weights(mf)) if (hasWgts && any(weights < 0 | is.na(weights))) stop("'weights' must be nonnegative and not contain NAs") ## initialize testvec etc fit <- eval(formula[[3]], c(data, start), env) y <- eval(formula[[2]], data, env) coef <- unlist(start) if(anyNA(data) && (identical(na.action, na.omit) || na.action == "na.omit")) warning("NA's present in data; consider using 'na.action = na.exclude'") resid <- naresid(na.action, y - fit) irls.delta <- function(old, new) sqrt(sum((old - new)^2, na.rm = TRUE)/ max(1e-20, sum(old^2, na.rm = TRUE))) ## Robust loop -- IWLS / IRLS iterations converged <- FALSE status <- "converged" method.exit <- FALSE for (iiter in seq_len(maxit)) { if (trace) cat("robust iteration", iiter, "\n") previous <- get(test.vec) if(updateScale) Scale <- median(abs(resid), na.rm = TRUE)/0.6745 if (Scale == 0) { convi <- 0 method.exit <- TRUE warning(status <- "could not compute scale of residuals") ## FIXME : rather use a "better" Scale in this case, e.g., ## ----- Scale <- min(abs(resid)[resid != 0]) } else { w <- psi(resid/Scale) if (hasWgts) w <- w * weights data$._nlrob.w <- w ## use a variable name the user "will not" use ._nlrob.w <- NULL # workaround for codetools "bug" ### ## Case distinction against "wrong warning" as long as ### ## we don't require R > 3.0.2: out <- ### if(identical(lower, -Inf) && identical(upper, Inf)) ### nls(formula, data = data, start = start, ### algorithm = algorithm, trace = trace, ### weights = ._nlrob.w, ### na.action = na.action, control = control) ### else nls(formula, data = data, start = start, algorithm = algorithm, trace = trace, lower=lower, upper=upper, weights = ._nlrob.w, na.action = na.action, control = control) coef <- unlist(start <- .nls.get.start(out$m)) ## same sequence as in start! Ok for test.vec: resid <- if (!is.null(na.action)) naresid(na.action, residuals(out)) else residuals(out) convi <- irls.delta(previous, get(test.vec)) } converged <- convi <= tol if (converged) break else if (trace) cat(sprintf(" --> irls.delta(previous, %s) = %g -- *not* converged\n", test.vec, convi)) }## for( iiter ...) if(!converged || method.exit) { warning(st <- paste("failed to converge in", maxit, "steps")) status <- if(method.exit) { converged <- FALSE; paste(status, st, sep="; ") } else st } if(hasWgts) { ## or just out$weights ?? tmp <- weights != 0 w[tmp] <- w[tmp]/weights[tmp] } ## --- Estimated asymptotic covariance of the robust estimator rw <- psi(res.sc <- resid/Scale) asCov <- if(!converged || !doCov) NA else { ## a version of .vcov.m(.) below AtWAinv <- chol2inv(out$m$Rmat()) dimnames(AtWAinv) <- list(names(coef), names(coef)) tau <- mean(rw^2) / mean(psi(res.sc, d=TRUE))^2 AtWAinv * Scale^2 * tau } if(is.null(call$algorithm)) call$algorithm <- algorithm ## returned object: == out$m$fitted() [FIXME?] fit <- setNames(eval(formula[[3]], c(data, start)), obsNames) structure(class = c("nlrob", "nls"), list(m = out$m, call = call, formula = formula, new.formula = formula, nobs = nobs, coefficients = coef, working.residuals = as.vector(resid), fitted.values = fit, residuals = y - fit, Scale=Scale, w=w, rweights = rw, cov = asCov, test.vec=test.vec, status=status, iter=iiter, psi=psi, data = dataName, dataClasses = dataCl, model = if(model) mf, control = control)) } ##' @title The nlrob() method used ##' @param obj an \code{"nlrob"} object ##' @return characer string .method.nlrob <- function(obj) if(inherits(obj, "nls")) "M" else obj$ctrl$method .vcov.m <- function(object, Scale, resid.sc) { if(.method.nlrob(object) == "M") { AtWAinv <- chol2inv(object$m$Rmat()) stopifnot(length(Scale) == 1, Scale >= 0, is.numeric(resid.sc), length(resid.sc) == nobs(object), is.character(nms.coef <- names(coef(object))), length(nms.coef) == nrow(AtWAinv), is.function(psi <- object$psi)) dimnames(AtWAinv) <- list(nms.coef, nms.coef) tau <- mean(psi(resid.sc)^2) / mean(psi(resid.sc, d=TRUE))^2 AtWAinv * Scale^2 * tau } else if(is.function(psi <- object$psi)) { form <- object$formula ## call method="M", with fixed Scale mM <- nlrob(form, data = eval(object$data, environment(form)), method = "M", start = coef(object), psi = psi, scale = Scale, doCov=TRUE) mM$cov ## stop(".vcov.m() not yet implemented for nlrob.MM objects") ## using 'chol(): --- is wrong, unfortunately ## AtWAinv <- chol2inv(chol(object$hessian)) } else { NA ## instead of error } } ## The 'nls' method is *not* correct formula.nlrob <- function(x, ...) x$formula sigma.nlrob <- function(object, ...) if(!is.null(s <- object$Scale)) s else object$coefficients[["sigma"]] estimethod <- function(object, ...) UseMethod("estimethod") estimethod.nlrob <- function(object, ...) if(is.list(object$m) && inherits(object, "nls")) "M" else object$ctrl$method fitted.nlrob <- function (object, ...) { val <- as.vector(object$fitted.values) if (!is.null(object$na.action)) val <- napredict(object$na.action, val) ##MM: attr(val, "label") <- "Fitted values" val } ## formula() works "by default" predict.nlrob <- function (object, newdata, ...) { if (missing(newdata)) return(as.vector(fitted(object))) if (!is.null(cl <- object$dataClasses)) .checkMFClasses(cl, newdata) if(estimethod(object) == "M") # also for start = list(..) object$m$predict(newdata) else eval(formula(object)[[3]], c(as.list(newdata), coef(object))) } print.nlrob <- function (x, ...) { cat("Robustly fitted nonlinear regression model", if((meth <- .method.nlrob(x)) != "M") paste0(" (method ", meth, ")"), "\n", sep="") cat(" model: ", deparse(formula(x)), "\n") cat(" data: ", deparse(x$data), "\n") print(coef(x), ...) cat(" status: ", x$status, "\n") invisible(x) } residuals.nlrob <- function (object, type = c("response", "working", "pearson"), ...) { type <- match.arg(type) R <- switch(type, "pearson"= { stop("type 'pearson' is not yet implemented") ## as.vector(object$working.residuals) }, "working"= { ## FIXME(?): from nls, these used to *contain* weights, but no longer object$working.residuals }, "response"= { object$residuals }, stop("invalid 'type'"))# ==> programming error, as we use match.arg() if (!is.null(object$na.action)) R <- naresid(object$na.action, R) ## FIXME: add 'names'! ##MM no labels; residuals.glm() does neither: attr(val, "label") <- "Residuals" R } vcov.nlrob <- function (object, ...) { if(is.numeric(cv <- object$cov)) cv else { sc <- object$Scale .vcov.m(object, Scale = sc, resid.sc = as.vector(object$residuals) / sc) } } summary.nlrob <- function (object, correlation = FALSE, symbolic.cor = FALSE, ...) { w <- object$w ## weights * rweights, scaled such that sum(w)=1 n <- sum(w > 0) param <- coef(object) p <- length(param) rdf <- n - p no <- names(object) no <- no[match(c("formula", "residuals", "Scale", "w", "rweights", "cov", "call", "status", "counts", "iter", "control", "ctrl"), no, 0L)] ans <- object[no] conv <- ans$status == "converged" if(is.null(sc <- ans$Scale)) ans$Scale <- sc <- sigma(object) if(conv && !is.matrix(ans$cov)) ans$cov <- .vcov.m(object, Scale = sc, resid.sc = as.vector(object$residuals) / sc) if((ok.cov <- is.matrix(ans$cov))) if(!all(dim(ans$cov) == p)) stop("'cov' must be a p x p matrix") ans$df <- c(p, rdf) cf <- if(ok.cov) { se <- sqrt(diag(ans$cov)) tval <- param/se cbind(param, se, tval, 2 * pt(abs(tval), rdf, lower.tail = FALSE)) } else cbind(param, NA, NA, NA) dimnames(cf) <- list(names(param), c("Estimate", "Std. Error", "t value", "Pr(>|t|)")) ans$coefficients <- cf if(correlation && ok.cov && rdf > 0) { ans$correlation <- ans$cov / outer(se, se) ans$symbolic.cor <- symbolic.cor } class(ans) <- "summary.nlrob" ans } print.summary.nlrob <- function (x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), ...) { cat("\nCall:\n") cat(paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n\n", sep = "") ## cat("\nFormula: ") ## cat(paste(deparse(x$formula), sep = "\n", collapse = "\n"), "\n", sep = "") if(is.null(ctrl <- x$ctrl)) meth <- "M" else { meth <- ctrl$method cat("Method \"", meth, if(!is.null(cc <- ctrl$init)) paste0("\", init = \"", cc), if(!is.null(ps <- ctrl$psi )) paste0("\", psi = \"", ps), "\"\n", sep="") } resid <- x$residuals df <- x$df rdf <- df[2L] cat(if (!is.null(x$weights) && diff(range(x$weights))) "Weighted ", "Residuals:\n", sep = "") if (rdf > 5L) { nam <- c("Min", "1Q", "Median", "3Q", "Max") rq <- if (NCOL(resid) > 1) structure(apply(t(resid), 1, quantile), dimnames = list(nam, dimnames(resid)[[2]])) else setNames(quantile(resid), nam) print(rq, digits = digits, ...) } else print(resid, digits = digits, ...) cat("\nParameters:\n") printCoefmat(x$coefficients, digits = digits, signif.stars = signif.stars, ...) if(x$status == "converged") { cat("\nRobust residual standard error:", format(signif(x$Scale, digits)), "\n") correl <- x$correlation if (!is.null(correl)) { p <- NCOL(correl) if (p > 1) { cat("\nCorrelation of Parameter Estimates:\n") if(is.logical(symbolic.cor) && symbolic.cor) { print(symnum(correl, abbr.colnames = NULL)) } else { correl <- format(round(correl, 2), nsmall = 2, digits = digits) correl[!lower.tri(correl)] <- "" print(correl[-1, -p, drop=FALSE], quote = FALSE) } } } if(is.null(ctrl)) cat("Convergence in", x$iter, "IRWLS iterations\n\n") else { if(length(it <- ctrl$iter) == 1) cat("Convergence in", it, "iterations\n\n") else if(length(cnts <- x$counts) > 0) cat("Convergence after", cnts[["function"]], "function and", cnts[["gradient"]],"gradient evaluations\n\n") else ## length(it) >= 2 : cat("Convergence\n\n") } if(!is.null(x$rweights)) summarizeRobWeights(x$rweights, digits = digits, ...) } else if(meth == "M") cat("** IRWLS iterations did *not* converge!\n\n") else cat("** Iterations did *not* converge!\n\n") invisible(x) } ## Confint(): ideally built on profile, the same as stats:::confint.nls() ## -------- which eventually calls stats:::profile.nls ## Also, do emulate (to some extent) ## str(lme4:::confint.merMod) ## function (object, parm, level = 0.95, method = c("profile", "Wald", "boot"), ## zeta, nsim = 500, boot.type = c("perc", "basic", "norm"), quiet = FALSE, ## oldNames = TRUE, ...) confint.nlrob <- function(object, parm, level = 0.95, method = c("profile", "Wald", "boot"), zeta, nsim = 500, boot.type = c("perc", "basic", "norm"), quiet = FALSE, oldNames = TRUE, ...) { method <- match.arg(method) boot.type <- match.arg(boot.type) if (!missing(parm) && !is.numeric(parm) && method %in% c("profile", "boot")) stop("for method='", method, "', 'parm' must be specified as an integer") switch(method, profile = { stop("profile() method not yet implemented for \"nlrob\" objects. Use method = \"Wald\".") ## hence unused for now : if (!quiet) message("Computing profile confidence intervals ...") utils::flush.console() pp <- if (missing(parm)) { profile(object, signames = oldNames, ...) } else { profile(object, which = parm, signames = oldNames, ...) } confint(pp, level = level, zeta = zeta) }, Wald = { cf <- coef(object) pnames <- names(cf) if (missing(parm)) parm <- pnames else if (is.numeric(parm)) parm <- pnames[parm] a <- (1 - level)/2 a <- c(a, 1 - a) ## for now, a short version of R's formatting in quantile.default(): format_perc <- function(x, digits = max(2L, getOption("digits"))) paste0(formatC(x, format = "fg", width = 1, digits = digits)) pct <- format_perc(a, 3) fac <- qnorm(a) ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(parm, pct)) sdiag <- function(x) if (length(x) == 1) c(x) else diag(x) ses <- sqrt(sdiag(vcov(object)[parm, parm])) ci[] <- cf[parm] + ses %o% fac ci }, boot = { stop("\"boot\" method not yet implemented for \"nlrob\" objects. Use confint(*, method = \"Wald\").") }) } robustbase/R/glmrobPredict.R0000644000176200001440000000547712113111622015547 0ustar liggesusers# File .../glmrobPredict.R # Part of the R package 'robustbase', http://www.R-project.org # Based on predict.glm (cf. src/library/stats/R/) # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # http://www.r-project.org/Licenses/ # # Note that '# *rob' indicate adjustment for the robust case predict.glmrob <- function(object, newdata = NULL, type = c("link", "response", "terms"), se.fit = FALSE, dispersion = NULL, terms=NULL, na.action = na.pass, ...) { type <- match.arg(type) na.act <- object$na.action object$na.action <- NULL # kill this for predict.lm calls object$rweights <- object$w.r # *rob: used for predict.lmrob() if (!se.fit) { ## No standard errors if(missing(newdata)) { pred <- switch(type, link = object$linear.predictors, response = object$fitted.values, terms = predict.lmrob(object, se.fit=se.fit, scale = 1, type="terms", terms=terms, ...) # *rob ) if(!is.null(na.act)) pred <- napredict(na.act, pred) } else { pred <- predict.lmrob(object, newdata, se.fit, scale = 1, type = if(type=="link") "response" else type, terms = terms, na.action = na.action, ...) # *rob switch(type, response = {pred <- family(object)$linkinv(pred)}, link =, terms= ) } } else { ## summary.survreg has no ... argument. if(inherits(object, "survreg")) dispersion <- 1. if(is.null(dispersion) || dispersion == 0) dispersion <- summary(object, dispersion=dispersion)$dispersion residual.scale <- as.vector(sqrt(dispersion)) pred <- predict.lmrob(object, newdata, se.fit, scale = residual.scale, type = if(type=="link") "response" else type, terms = terms, na.action = na.action, ...) # *rob fit <- pred$fit se.fit <- pred$se.fit switch(type, response = { se.fit <- se.fit * abs(family(object)$mu.eta(fit)) fit <- family(object)$linkinv(fit) }, link =, terms=) if( missing(newdata) && !is.null(na.act) ) { fit <- napredict(na.act, fit) se.fit <- napredict(na.act, se.fit) } pred <- list(fit=fit, se.fit=se.fit, residual.scale=residual.scale) } pred } robustbase/R/anova-glmrob.R0000644000176200001440000001337512425760522015351 0ustar liggesusers anova.glmrob <- function(object, ..., test = c("Wald", "QD", "QDapprox")) { dotargs <- list(...) if (!is.null(names(dotargs))) { named <- (names(dotargs) != "") if (any(named)) { warning("the following arguments to 'anova.glmrob' are invalid and", "dropped:\n", pasteK(deparse(dotargs[named]))) dotargs <- dotargs[!named] } } is.glmrob <- vapply(dotargs, inherits, NA, what="glmrob") if(!all(is.glmrob) || !inherits(object, "glmrob")) stop("anova.glmrob() only works for 'glmrob' objects") test <- match.arg(test) if (length(dotargs) > 0) anovaGlmrobList(c(list(object), dotargs), test=test) else { ## ## "'Anova Table' for a single model object stop("'Anova Table' for a single model object not yet implemented") } } anovaGlmrobList <- function (object, test=NULL) { nmodels <- length(object) stopifnot(nmodels >= 2) responses <- as.character(lapply(object, function(x) deparse(formula(x)[[2]]))) if (!all(responses == responses[1])) stop("Not the same response used in the fitted models") nobs <- sapply(object, function(x) length(x$residuals)) if (any(nobs != nobs[1])) stop("models were not all fitted to the same size of dataset") methods <- as.character(lapply(object, function(x) x$method)) if(!all(methods == methods[1])) stop("Not the same method used for fitting the models") note <- paste("Models fitted by method '", methods[1], "'", sep="") tccs <- sapply(object, function(x) length(x$tcc)) if(!all(tccs == tccs[1])) stop("Not the same tuning constant c used in the robust fits") ## tbl <- matrix(rep(NA, nmodels*4), ncol = 4) tbl[1,1] <- nobs[1] - length(coef(object[[1]])) for(k in 2:nmodels) tbl[k,] <- anovaGlmrobPair(object[[k-1]], object[[k]], test=test) ## return dimnames(tbl) <- list(1:nmodels, c("pseudoDf", "Test.Stat", "Df", "Pr(>chisq)")) title <- switch(test, Wald = "Robust Wald Test Table", QD = "Robust Quasi-Deviance Table", QDapprox = "Robust Quasi-Deviance Table Based on a Quadratic Approximation", "") variables <- lapply(object, function(x) paste(deparse(formula(x)), collapse = "\n")) topnote <- paste("Model ", format(1:nmodels), ": ", variables, sep = "", collapse = "\n") structure(as.data.frame(tbl), heading = c(title, "", topnote, note,""), class = c("anova", "data.frame")) } anovaGlmrobPair <- function(obj1, obj2, test) { if(length(coef(obj1)) < length(coef(obj2))){ Sign <- 1 full.mfit <- obj2 reduced.mfit <- obj1 } else { Sign <- -1 full.mfit <- obj1 reduced.mfit <- obj2 } X <- model.matrix(full.mfit) asgn <- attr(X, "assign") tt <- terms(full.mfit) tt0 <- terms(reduced.mfit) tl <- attr(tt, "term.labels") tl0 <- attr(tt0, "term.labels") numtl0 <- match(tl0 , tl, nomatch = -1) if(attr(tt0, "intercept") == 1) numtl0 <- c(0, numtl0) if(any(is.na(match(numtl0, unique(asgn))))) stop("Models are not nested!") mod0 <- seq(along = asgn)[!is.na(match(asgn, numtl0))] if (length(asgn) == length(mod0)) stop("Models are not strictly nested") H0ind <- setdiff(seq(along = asgn), mod0) H0coef <- coef(full.mfit)[H0ind] df <- length(H0coef) pp <- df + length(mod0) if(test == "Wald") { t.cov <- full.mfit$cov t.chisq <- sum(H0coef * solve(t.cov[H0ind, H0ind], H0coef)) statistic <- c(chisq = t.chisq) } else if(full.mfit$method=="Mqle" && (test == "QD" || test == "QDapprox")) { matM <- full.mfit$matM if(test == "QDapprox") { ## Difference of robust quasi-deviances ## via the asymptotically equivalent quadratic form matM11 <- matM[mod0, mod0, drop=FALSE] matM12 <- matM[mod0, H0ind, drop=FALSE] matM22 <- matM[H0ind, H0ind, drop=FALSE] matM22.1 <- matM22 - crossprod(matM12, solve(matM11, matM12)) Dquasi.dev <- nrow(X) * c(H0coef %*% matM22.1 %*% H0coef) } else { quasiDev <- switch(full.mfit$family$family, poisson = glmrobMqleDiffQuasiDevPois, binomial = glmrobMqleDiffQuasiDevB, Gamma = glmrobMqleDiffQuasiDevGamma, stop("This family is not implemented")) ## note that qdev and qdev0 do depend on an incorrectly specified ## lower limits in the integration. But this does't matter in ## the following difference, because the difference does not ## deepend on it! (Hence I could use the centered nui ## (cnui= nui - Enui) in quasiDev as the function to be integrated. Dquasi.dev <- quasiDev(mu = full.mfit$fitted.values, mu0 = reduced.mfit$fitted.values, y = full.mfit$y, ni = full.mfit$ni, w.x = full.mfit$w.x, phi=full.mfit$dispersion, tcc = full.mfit$tcc) } ## Asymptotic distribution: variance and weights of the sum of chi2 matQ <- full.mfit$matQ matM11inv <- solve(matM[mod0,mod0]) Mplus <- matrix(0, ncol = pp, nrow = pp) Mplus[mod0, mod0] <- matM11inv d.ev <- Re(eigen(matQ %*% (solve(matM)-Mplus), only.values=TRUE)$values) d.ev <- d.ev[1:df] ## just the q (=df) lagest eigenvalues are needed if(any(d.ev < 0)) warning("some eigenvalues are negative") ## p-value: exact computation for q=1, approximated for q>1 (q=df) statistic <- c(quasi.dev = Dquasi.dev/mean(d.ev)) } else stop("non-implemented test method:", test, "for fitting method", full.mfit$method) ## return c(nrow(X)-pp+df*(Sign<0), Sign*statistic, Sign*df, pchisq(as.vector(statistic), df=df, lower.tail = FALSE)) } robustbase/R/lmrob.R0000644000176200001440000006470514511464163014103 0ustar liggesusers ### The first part of lmrob() much cut'n'paste from lm() - on purpose! lmrob <- function(formula, data, subset, weights, na.action, method = 'MM', model = TRUE, x = !control$compute.rd, y = FALSE, singular.ok = TRUE, contrasts = NULL, offset = NULL, control = NULL, init = NULL, ...) { ## to avoid problems with 'setting' argument ## call lmrob.control here either with or without method arg. if (miss.ctrl <- missing(control)) control <- if (missing(method)) lmrob.control(...) else lmrob.control(method = method, ...) else if (length(list(...))) ## "sophisticated version" of chk.s(...) warning("arguments .. in ", sub(")$", "", sub("^list\\(", "", deparse(list(...), control = c()))), " are disregarded.\n", " Maybe use lmrob(*, control=lmrob.control(....) with all these.") ret.x <- x ret.y <- y cl <- match.call() mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"), names(mf), 0) mf <- mf[c(1, m)] mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) mt <- attr(mf, "terms") # allow model.frame to update it y <- model.response(mf, "numeric") w <- as.vector(model.weights(mf)) # NULL if unspecified in call if(!is.null(w) && !is.numeric(w)) stop("'weights' must be a numeric vector") offset <- as.vector(model.offset(mf)) if(!is.null(offset) && length(offset) != NROW(y)) stop(gettextf("number of offsets is %d, should equal %d (number of observations)", length(offset), NROW(y)), domain = NA) if (!miss.ctrl && !missing(method) && method != control$method) { warning("The 'method' argument is different from 'control$method'\n", "Using the former, method = ", method) control$method <- method } if (is.empty.model(mt)) { x <- NULL singular.fit <- FALSE ## to avoid problems below z <- list(coefficients = if(is.matrix(y)) matrix(NA_real_, 0, ncol(y)) else numeric(), residuals = y, scale = NA, fitted.values = 0 * y, cov = matrix(NA_real_,0,0), weights = w, rank = 0, df.residual = if(!is.null(w)) sum(w != 0) else NROW(y), converged = TRUE, iter = 0) if(!is.null(offset)) { z$fitted.values <- offset z$residuals <- y - offset z$offset <- offset } } else { x <- model.matrix(mt, mf, contrasts) contrasts <- attr(x, "contrasts") assign <- attr(x, "assign") p <- ncol(x) if(!is.null(offset)) y <- y - offset if (!is.null(w)) { ## checks and code copied/modified from lm.wfit ny <- NCOL(y) n <- nrow(x) if (NROW(y) != n | length(w) != n) stop("incompatible dimensions") if (any(w < 0 | is.na(w))) stop("missing or negative weights not allowed") zero.weights <- any(w == 0) if (zero.weights) { save.r <- y save.w <- w save.f <- y ok <- w != 0 nok <- !ok w <- w[ok] x0 <- x[nok, , drop = FALSE] x <- x[ ok, , drop = FALSE] n <- nrow(x) y0 <- if (ny > 1L) y[nok, , drop = FALSE] else y[nok] y <- if (ny > 1L) y[ ok, , drop = FALSE] else y[ok] ## add this information to model.frame as well ## need it in outlierStats() ## ?? could also add this to na.action, then ## naresid() would pad these as well. attr(mf, "zero.weights") <- which(nok) } wts <- sqrt(w) save.y <- y x <- wts * x y <- wts * y } ## check for singular fit z0 <- .lm.fit(x, y, tol = control$solve.tol) piv <- z0$pivot rankQR <- z0$rank singular.fit <- rankQR < p if (rankQR > 0) { if (singular.fit) { if (!singular.ok) stop("singular fit encountered") pivot <- piv p1 <- pivot[seq_len(rankQR)] p2 <- pivot[(rankQR+1):p] ## to avoid problems in the internal fitting methods, ## split into singular and non-singular matrices, ## can still re-add singular part later dn <- dimnames(x) x <- x[,p1] attr(x, "assign") <- assign[p1] ## needed for splitFrame to work } if (is.function(control$eps.x)) control$eps.x <- control$eps.x(max(abs(x))) if (!is.null(ini <- init)) { if (is.character(init)) { init <- switch(init, "M-S" = lmrob.M.S(x, y, control, mf=mf), "S" = lmrob.S (x, y, control), stop('init must be "S", "M-S", function or list')) if(ini == "M-S") { ## "M-S" sometimes reverts to "S": ini <- init$control$method ## if(identical(ini, "M-S")) ## control$method <- paste0(ini, control$method) } } else if (is.function(init)) { init <- init(x=x, y=y, control=control, mf=mf) } else if (is.list(init)) { ## MK: set init$weights, init$residuals here ?? ## (needed in lmrob..D..fit) ## or disallow method = D... ? would need to fix also ## lmrob.kappa: tuning.psi / tuning.chi choice if (singular.fit) { ## make sure the initial coefficients vector matches ## to the reduced x init$coef <- na.omit(init$coef) if (length(init$coef) != ncol(x)) stop("Length of initial coefficients vector does not match rank of singular design matrix x") } } else stop("invalid 'init' argument") stopifnot(is.numeric(init$coef), is.numeric(init$scale)) ## modify (default) control$method, possibly dropping first letter: if (control$method == "MM" || substr(control$method, 1, 1) == "S") control$method <- substring(control$method, 2) ## check for control$cov argument if (class(init)[1] != "lmrob.S" && control$cov == '.vcov.avar1') control$cov <- ".vcov.w" } # else pass on init=NULL : z <- lmrob.fit(x, y, control, init=init) #-> ./lmrob.MM.R ## --------- if(is.character(ini) && !grepl(paste0("^", ini), control$method)) control$method <- paste0(ini, control$method) if (singular.fit) { coef <- numeric(p) coef[p2] <- NA coef[p1] <- z$coefficients names(coef) <- dn[[2L]] z$coefficients <- coef ## Update QR decomposition (z$qr) ## pad qr and qraux with zeroes (columns that were pivoted to the right in z0) d.p <- p-rankQR n <- NROW(y) z$qr[c("qr","qraux","pivot")] <- list(matrix(c(z$qr$qr, rep.int(0, d.p*n)), n, p, dimnames = list(dn[[1L]], dn[[2L]][piv])), ## qraux: c(z$qr$qraux, rep.int(0, d.p)), ## pivot: piv) } } else { ## rank 0 z <- list(coefficients = if (is.matrix(y)) matrix(NA_real_,p,ncol(y)) else rep.int(NA_real_, p), residuals = y, scale = NA, fitted.values = 0 * y, cov = matrix(NA_real_,0,0), rweights = rep.int(NA_real_, NROW(y)), weights = w, rank = 0, df.residual = NROW(y), converged = TRUE, iter = 0, control=control) if (is.matrix(y)) colnames(z$coefficients) <- colnames(x) else names(z$coefficients) <- colnames(x) if(!is.null(offset)) z$residuals <- y - offset } if (!is.null(w)) { z$residuals <- z$residuals/wts z$fitted.values <- save.y - z$residuals z$weights <- w if (zero.weights) { # compute residuals, fitted, wts... also for the 0-weight obs coef <- z$coefficients coef[is.na(coef)] <- 0 f0 <- x0 %*% coef ## above ok := (w != 0); nok := (w == 0) if (ny > 1) { save.r[ok, ] <- z$residuals save.r[nok, ] <- y0 - f0 save.f[ok, ] <- z$fitted.values save.f[nok, ] <- f0 } else { save.r[ok] <- z$residuals save.r[nok] <- y0 - f0 save.f[ok] <- z$fitted.values save.f[nok] <- f0 } z$residuals <- save.r z$fitted.values <- save.f z$weights <- save.w rw <- z$rweights z$rweights <- rep.int(0, length(save.w)) z$rweights[ok] <- rw } } } if(!is.null(offset)) z$fitted.values <- z$fitted.values + offset z$na.action <- attr(mf, "na.action") z$offset <- offset z$contrasts <- contrasts z$xlevels <- .getXlevels(mt, mf) z$call <- cl z$terms <- mt z$assign <- assign if(control$compute.rd && !is.null(x)) z$MD <- robMD(x, attr(mt, "intercept"), wqr=z$qr) if (model) z$model <- mf if (ret.x) z$x <- if (singular.fit || (!is.null(w) && zero.weights)) model.matrix(mt, mf, contrasts) else x if (ret.y) z$y <- if (!is.null(w)) model.response(mf, "numeric") else y class(z) <- "lmrob" z } if(getRversion() < "3.1.0") globalVariables(".lm.fit") ##' @title Warn about extraneous arguments in the "..." (of its caller) ##' @return ##' @author Martin Maechler, June 2012 chk.s <- function(...) { if(length(list(...))) warning("arguments ", sub(")$", '', sub("^list\\(", '', deparse(list(...), control=c()))), " are disregarded in\n ", deparse(sys.call(-1), control=c()), call. = FALSE) } ##' Robust Mahalanobis Distances ##' internal function, used in lmrob() and plot.lmrob() ##' also "wanted" by 'robustloggamma' pkg robMD <- function(x, intercept, wqr, ...) { ## NB: 'wqr' only needed when covMcd() is not (entirely) successful if(intercept == 1) x <- x[, -1, drop=FALSE] if(ncol(x) >= 1) { rob <- tryCatch(covMcd(x, ...), warning = function(w) structure("covMcd produced a warning", class="try-error", condition = w), error = function(e) structure("covMcd failed with an error", class="try-error", condition = e)) if (inherits(rob, "try-error")) { warning("Failed to compute robust Mahalanobis distances, reverting to robust leverages.") .lmrob.hat(wqr = wqr) } else sqrt( mahalanobis(x, rob$center, rob$cov) ) } ## else NULL } ### Method Functions for class lmrob objects ### ### ---------------------------------------- ### ## Many are just wrapper functions for the respective .lm methods ## ---- sorted *ALPHABETICALLY* ---- alias.lmrob <- function(object, ...) { ## Purpose: provide alias() for lmrob objects ## Cannot use alias.lm directly, since it requires a "clean" object$qr, ## i.e., without the robustness weights if (is.null(x <- object[["x"]])) x <- model.matrix(object) weights <- weights(object) if (!is.null(weights) && diff(range(weights))) x <- x * sqrt(weights) object$qr <- qr(x) class(object) <- "lm" alias(object) } ## R (3.1.0)-devel copy of case.names.lm() ...../R/src/library/stats/R/lm.R case.names.lmrob <- function(object, full = FALSE, ...) { w <- weights(object) dn <- names(residuals(object)) if(full || is.null(w)) dn else dn[w!=0] } ## coef(): no own method ==> using coef.default(OO) == OO$coefficients ## ------------- ## use confint.lm instead of confint.default ## mainly to get t instead of normal quantiles ## Now imported from 'stats' -- and S3 registered in ../NAMESPACE , too, but ## still needed for now (R bug fixed in svn rev 84463 - for R 4.4.0) confint.lm <- confint.lm dummy.coef.lm <- dummy.coef.lm family.lmrob <- function(object, ...) gaussian() ## == stats:::family.lm ## fitted.default works for "lmrob" ## base::kappa.lm() is "doomed"; call what kappa.lm() has been calling for years: kappa.lmrob <- function(z, ...) kappa.qr(z$qr, ...) ## == kappa.lm(z, ...) ## instead of stats:::qr.lm() qrLmr <- function(x) { if(!is.list(r <- x$qr)) stop("lmrob object does not have a proper 'qr' component. Rank zero?") r } ## Basically the same as stats:::labels.lm -- FIXME: rank 0 fits? labels.lmrob <- function(object, ...) { tl <- attr(object$terms, "term.labels") asgn <- object$assign[qrLmr(object)$pivot[seq_len(object$rank)]] tl[unique(asgn)] } ## Works via lm's method [which is still exported]: model.matrix.lmrob <- model.matrix.lm ## identical to stats:::nobs.lm {but that is hidden .. and small to copy}: nobs.lmrob <- function(object, ...) if (!is.null(w <- object$weights)) sum(w != 0) else NROW(object$residuals) if(FALSE) ## now replaced with more sophsticated in ./lmrobPredict.R ## learned from MASS::rlm() : via "lm" as well predict.lmrob <- function (object, newdata = NULL, scale = NULL, ...) { class(object) <- c(class(object), "lm") object$qr <- qr(sqrt(object$rweights) * object$x) predict.lm(object, newdata = newdata, scale = object$s, ...) } print.summary.lmrob <- function (x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), showAlgo = TRUE, ...) { cat("\nCall:\n", paste(deparse(x$call, width.cutoff=72), sep = "\n", collapse = "\n"), "\n", sep = "") control <- lmrob.control.minimal(x$control, nobs = nobs(x, use.fallback = TRUE)) cat(" \\--> method = \"", control$method, '"\n', sep = "") ## else cat("\n") resid <- x$residuals df <- x$df rdf <- df[2L] cat(if (!is.null(x$weights) && diff(range(x$weights))) "Weighted ", "Residuals:\n", sep = "") if (rdf > 5L) { nam <- c("Min", "1Q", "Median", "3Q", "Max") rq <- if (NCOL(resid) > 1) structure(apply(t(resid), 1, quantile), dimnames = list(nam, dimnames(resid)[[2]])) else setNames(quantile(resid), nam) print(rq, digits = digits, ...) } else print(resid, digits = digits, ...) ## FIXME: need to catch rdf == 0? if( length(x$aliased) ) { if( !(x$converged) ) { if (x$scale == 0) { cat("\nExact fit detected\n\nCoefficients:\n") } else { cat("\nAlgorithm did not converge\n") if (control$method == "S") cat("\nCoefficients of the *initial* S-estimator:\n") else cat(sprintf("\nCoefficients of the %s-estimator:\n", control$method)) } printCoefmat(x$coef, digits = digits, signif.stars = signif.stars, ...) } else { if (nsingular <- df[3L] - df[1L]) cat("\nCoefficients: (", nsingular, " not defined because of singularities)\n", sep = "") else cat("\nCoefficients:\n") coefs <- x$coefficients if(!is.null(aliased <- x$aliased) && any(aliased)) { cn <- names(aliased) coefs <- matrix(NA, length(aliased), 4, dimnames=list(cn, colnames(coefs))) coefs[!aliased, ] <- x$coefficients } printCoefmat(coefs, digits = digits, signif.stars = signif.stars, na.print="NA", ...) cat("\nRobust residual standard error:", format(signif(x$scale, digits)),"\n") if(nzchar(mess <- naprint(x$na.action))) cat(" (",mess,")\n", sep = "") if(!is.null(x$r.squared) && x$df[1] != attr(x$terms, "intercept")) { cat("Multiple R-squared: ", formatC(x$r.squared, digits = digits)) cat(",\tAdjusted R-squared: ", formatC(x$adj.r.squared, digits = digits), "\n") } correl <- x$correlation if (!is.null(correl)) { p <- NCOL(correl) if (p > 1) { cat("\nCorrelation of Coefficients:\n") if (is.logical(symbolic.cor) && symbolic.cor) { print(symnum(correl), abbr.colnames = NULL) } else { correl <- format(round(correl, 2), nsmall = 2, digits = digits) correl[!lower.tri(correl)] <- "" print(correl[-1, -p, drop = FALSE], quote = FALSE) } } } if(is.numeric(it <- x$iter) && length(it)) cat("Convergence in", it, "IRWLS iterations\n") } cat("\n") if (!is.null(rw <- x$rweights)) { if (any(zero.w <- x$weights == 0)) rw <- rw[!zero.w] eps.outlier <- if (is.function(EO <- control$eps.outlier)) EO(nobs(x)) else EO summarizeRobWeights(rw, digits = digits, eps = eps.outlier, ...) } } else cat("\nNo Coefficients\n") if (showAlgo && !is.null(control)) printControl(control, digits = digits, drop. = "method") invisible(x) } print.lmrob <- function(x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall:\n", cl <- deparse(x$call, width.cutoff=72), "\n", sep = "") control <- lmrob.control.minimal(x$control, nobs=nobs(x, use.fallback = TRUE)) if(!any(grepl("method *= *['\"]", cl)))## 'method = ".."' not explicitly visible above cat(" \\--> method = \"", control$method, '"\n', sep = "") else cat("\n") if(length((cf <- coef(x)))) { if( x$converged ) cat("Coefficients:\n") else { if (x$scale == 0) { cat("Exact fit detected\n\nCoefficients:\n") } else { cat("Algorithm did not converge\n\n") if (control$method == "S") cat("Coefficients of the *initial* S-estimator:\n") else cat(sprintf("Coefficients of the %s-estimator:\n", control$method)) } } print(format(cf, digits = digits), print.gap = 2, quote = FALSE) } else cat("No coefficients\n") cat("\n") invisible(x) } print.lmrob.S <- function(x, digits = max(3, getOption("digits") - 3), showAlgo = TRUE, ...) { cat("S-estimator lmrob.S():\n") if(length((cf <- coef(x)))) { if (x$converged) cat("Coefficients:\n") else if (x$scale == 0) cat("Exact fit detected\n\nCoefficients:\n") else cat("Algorithm did not converge\n\n") print(format(cf, digits = digits), print.gap = 2, quote = FALSE) } else cat("No coefficients\n") cat("scale = ",format(x$scale, digits=digits), "; ", if(x$converged)"converged" else "did NOT converge", " in ", x$k.iter, " refinement steps\n") if (showAlgo && !is.null(ctrl <- x$control)) printControl(lmrob.control.minimal(ctrl, nobs = nobs(x, use.fallback = TRUE), oStats = !is.null(ctrl$ostats)), digits = digits, drop. = "method") invisible(x) } ## practically identical to stats:::qr.lm : qr.lmrob <- function (x, ...) { if (is.null(r <- x$qr)) stop("lmrob object does not have a proper 'qr' component. Rank must be zero") r } residuals.lmrob <- function(object, ...) residuals.lm(object, ...) ## even simpler than residuals.default(): residuals.lmrob.S <- function(object, ...) object$residuals summary.lmrob <- function(object, correlation = FALSE, symbolic.cor = FALSE, ...) { if (is.null(object$terms)) stop("invalid 'lmrob' object: no terms component") p <- object$rank df <- object$df.residual #was $degree.freedom sigma <- object[["scale"]] aliased <- is.na(coef(object)) cf.nms <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)") if (p > 0) { n <- p + df p1 <- seq_len(p) se <- sqrt(if(length(object$cov) == 1L) object$cov else diag(object$cov)) est <- object$coefficients[object$qr$pivot[p1]] tval <- est/se ans <- object[c("call", "terms", "residuals", "weights", "scale", "rweights", "na.action", "converged", "iter", "control")] ans[is.na(names(ans))] <- NULL # e.g. {"na.action", "iter"} for method = "S" if (!is.null(ans$weights)) ans$residuals <- ans$residuals * sqrt(object$weights) ## 'df' vector, modeled after summary.lm() : ans$df <- c(p, rdf, NCOL(Qr$qr)) ## where p <- z$rank ; rdf <- z$df.residual ; Qr <- qr.lm(object) ans$df <- c(p, df, NCOL(object$qr$qr)) ans$coefficients <- if( ans$converged) cbind(est, se, tval, 2 * pt(abs(tval), df, lower.tail = FALSE)) else cbind(est, if(sigma <= 0) 0 else NA, NA, NA) dimnames(ans$coefficients) <- list(names(est), cf.nms) if (p != attr(ans$terms, "intercept")) { df.int <- if (attr(ans$terms, "intercept")) 1L else 0L ## This block is based on code by Olivier Renaud resid <- object$residuals pred <- object$fitted.values resp <- if (is.null(object[["y"]])) pred + resid else object$y wgt <- object$rweights ## scale.rob <- object$scale ## correction = E[wgt(r)] / E[psi'(r)] = E[wgt(r)] / E[r*psi(r)] ctrl <- object$control c.psi <- ctrl$tuning.psi psi <- ctrl$psi correc <- if (psi == 'ggw') { if (isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.95, NA)))) 1.121708 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.95, NA)))) 1.163192 else if (isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.85, NA)))) 1.33517 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.85, NA)))) 1.395828 else lmrob.E(wgt(r), ctrl) / lmrob.E(r*psi(r), ctrl) } else if (any(psi == .Mpsi.R.names) && isTRUE(all.equal(c.psi, .Mpsi.tuning.default(psi)))) { switch(psi, bisquare = 1.207617, welsh = 1.224617, # 1.2246131 optimal = 1.068939, hampel = 1.166891, lqq = 1.159232, stop('unsupported psi function -- should not happen')) } else lmrob.E(wgt(r), ctrl) / lmrob.E(r*psi(r), ctrl) resp.mean <- if (df.int == 1L) sum(wgt * resp)/sum(wgt) else 0 yMy <- sum(wgt * (resp - resp.mean)^2) rMr <- sum(wgt * resid^2) ans$r.squared <- r2correc <- (yMy - rMr) / (yMy + rMr * (correc - 1)) ans$adj.r.squared <- 1 - (1 - r2correc) * ((n - df.int) / df) } else ans$r.squared <- ans$adj.r.squared <- 0 ans$cov <- object$cov if(length(object$cov) > 1L) dimnames(ans$cov) <- dimnames(ans$coefficients)[c(1,1)] if (correlation) { ans$correlation <- ans$cov / outer(se, se) ans$symbolic.cor <- symbolic.cor } } else { ## p = 0: "null model" ans <- object ans$df <- c(0L, df, length(aliased)) ans$coefficients <- matrix(ans$coefficients[0L], 0L, 4L, dimnames = list(NULL, cf.nms)) ans$r.squared <- ans$adj.r.squared <- 0 ans$cov <- object$cov } ans$aliased <- aliased # used in print method ans$sigma <- sigma # 'sigma': in summary.lm() & 'fit.models' pkg if (is.function(epsO <- ans$control$eps.outlier)) ans$control$eps.outlier <- epsO(nobs(object)) if (is.function(epsX <- ans$control$eps.x)) ans$control$eps.x <- if(!is.null(o.x <- object[['x']])) epsX(max(abs(o.x))) ## else NULL structure(ans, class = "summary.lmrob") } ## R (3.1.0)-devel copy of variable.names.lm() ...../R/src/library/stats/R/lm.R variable.names.lmrob <- function(object, full = FALSE, ...) { if(full) dimnames(qrLmr(object)$qr)[[2L]] else if(object$rank) dimnames(qrLmr(object)$qr)[[2L]][seq_len(object$rank)] else character() } vcov.lmrob <- function (object, cov = object$control$cov, complete = TRUE, ...) { if(!is.null(object$cov) && (missing(cov) || identical(cov, object$control$cov))) .vcov.aliased(aliased = is.na(coef(object)), object$cov, complete= if(is.na(complete)) FALSE else complete) else { ## cov is typically = ".vcov.w" or ".vcov.avar1", but can be *any* user func. lf.cov <- if (!is.function(cov)) get(cov, mode = "function") else cov lf.cov(object, complete=complete, ...) } } sigma.lmrob <- function(object, ...) object$scale weights.lmrob <- function(object, type = c("prior", "robustness"), ...) { type <- match.arg(type) res <- if (type == "prior") { ## Issue warning only if called from toplevel. Otherwise the warning pop ## up at quite unexpected places, e.g., case.names(). if (is.null(object[["weights"]]) && identical(parent.frame(), .GlobalEnv)) warning("No weights defined for this object. Use type=\"robustness\" argument to get robustness weights.") object[["weights"]] } else object[["rweights"]] if (is.null(object$na.action)) res else naresid(object$na.action, res) } #### functions hidden in namespace #### printControl <- function(ctrl, digits = getOption("digits"), str.names = "seed", drop. = character(0), header = "Algorithmic parameters:", ...) { ## Purpose: nicely and sensibly print a 'control' structure ## currently for lmrob(), glmrob() ## Author: Martin Maechler, 2006 ff force(ctrl) # ->> better error msg ## NB: unlist() drops setting=NULL [ok] PR <- function(LST, ...) { if(length(LST)) { if(any(L <- !vapply(LST, function(.) is.atomic(.) || is.null(.), NA))) { ## treat non-{atomic|NULL}: LST[L] <- lapply(LST[L], str2simpLang) } print(unlist(LST), ...) } } ##' maybe generally useful TODO? ---> {utils} or at least {sfsmisc} ? str2simpLang <- function(x) { r <- if(is.null(x)) quote((NULL)) else str2lang(deparse1(x)) if(is.call(r)) format(r) else r } cat(header,"\n") is.str <- (nc <- names(ctrl)) %in% str.names do. <- !is.str & !(nc %in% drop.) is.ch <- vapply(ctrl, is.character, NA) real.ctrl <- vapply(ctrl, function(x) # real, *not* integer-valued length(x) > 0 && is.numeric(x) && any(x %% 1 != 0), NA) PR(ctrl[do. & real.ctrl], digits = digits, ...) ## non-real, non-char ones (typically integers), but dropping 0-length ones PR(ctrl[do. & !is.ch & !real.ctrl], ...) ## char ones PR(ctrl[do. & is.ch], ...) if(any(is.str)) for(n in nc[is.str]) { cat(n,":") str(ctrl[[n]], vec.len = 2) ## 'vec.len = 2' is smaller than normal, but nice for Mersenne seed } } summarizeRobWeights <- function(w, digits = getOption("digits"), header = "Robustness weights:", eps = 0.1 / length(w), eps1 = 1e-3, ...) { ## Purpose: nicely print a "summary" of robustness weights stopifnot(is.numeric(w)) cat(header,"\n") cat0 <- function(...) cat('', ...) n <- length(w) if(n <= 10) print(w, digits = digits, ...) else { n1 <- sum(w1 <- abs(w - 1) < eps1) n0 <- sum(w0 <- abs(w) < eps) if(any(w0 & w1)) warning("weights should not be both close to 0 and close to 1!\n", "You should use different 'eps' and/or 'eps1'") if(n0 > 0 || n1 > 0) { if(n0 > 0) { formE <- function(e) formatC(e, digits = max(2, digits-3), width=1) i0 <- which(w0) maxw <- max(w[w0]) c3 <- paste0("with |weight| ", if(maxw == 0) "= 0" else paste("<=", formE(maxw)), " ( < ", formE(eps), ");") cat0(if(n0 > 1) { cc <- sprintf("%d observations c(%s)", n0, strwrap(paste(i0, collapse=","))) c2 <- " are outliers" paste0(cc, if(nchar(cc)+ nchar(c2)+ nchar(c3) > getOption("width")) "\n ", c2) } else sprintf("observation %d is an outlier", i0), c3, "\n") } if(n1 > 0) cat0(ngettext(n1, "one weight is", sprintf("%s%d weights are", if(n1 == n)"All " else '', n1)), "~= 1.") n.rem <- n - n0 - n1 if(n.rem <= 0) { # < 0 possible if w0 & w1 overlap if(n1 > 0) cat("\n") return(invisible()) } cat0("The remaining", ngettext(n.rem, "one", sprintf("%d ones", n.rem)), "are") if(is.null(names(w))) names(w) <- as.character(seq(along = w)) w <- w[!w1 & !w0] if(n.rem <= 10) { cat("\n") print(w, digits = digits, ...) return(invisible()) } else cat(" summarized as\n") } print(summary(w, digits = digits), digits = digits, ...) } } robustbase/R/ltsPlot.R0000644000176200001440000002352214436463173014427 0ustar liggesusers#### This is from the R package #### #### rrcov : Scalable Robust Estimators with High Breakdown Point #### #### by Valentin Todorov ### This program is free software; you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation; either version 2 of the License, or ### (at your option) any later version. ### ### This program is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ### ### You should have received a copy of the GNU General Public License ### along with this program; if not, a copy is available at ### http://www.r-project.org/Licenses/ plot.lts <- function(x, which = c("all", "rqq","rindex", "rfit", "rdiag"), classic = FALSE, ask = (which[1] == "all" && dev.interactive()), id.n, ...) { if (!inherits(x, "lts")) stop("Use only with 'lts' objects") ltsPlot(x, which, classic, ask, id.n, ...) } ltsPlot <- function(x, which = c("all", "rqq","rindex", "rfit", "rdiag"), classic = FALSE, ask = FALSE, id.n, ...) { ##@bdescr ## Make plots for model checking and outlier detection based on ## the LTS regression estimates: ## rqq - normal quantile plot of the LTS and LS residuals ## rindex - standardized LTS/LS Residuals versus index ## rfit - standardized LTS/LS Residuals versus fitted values ## rdiag - regression diagnostic plot ## ##@edescr ## ##@in x : [object] An lts object ##@in which : [character] A plot option, one of: ## rqq: ## rdiag: ## rfit: ## rindex: ## default is "rqq" ##@in classic : [logical] If true the classical plot will be displayed too ## default is classic=FALSE ##@in id.n : [number] number of observations to be identified with a label. label <- function(x, y, ord, lab, id.n, ...) { if(id.n) { n <- length(y) which <- order(ord)[(n - id.n + 1):n] lab <- if(missing(lab)) which else lab[which] ## how to adjust the labels? ## a) adj=0.1 ## b) x=x+xrange ## c) pos=4 (to the left of the observation) ## d) additionaly to pos specify offset=0.2 (fraction of a character) xrange <- par("usr") xrange <- (xrange[2] - xrange[1])/50 text(x[which], y[which], pos = 4, offset = 0.2, lab, ...) } } ## The R function 'qqline' (package::stats) adds a line to a ## normal quantile-quantile plot which passes through the ## first and third quartiles. In S this function returns the ## slope and intercept of the line, but not in R. ## Here we need the slope and intercept in order to sort the ## residuals according to their distance from the line. myqqline <- function(y, datax = FALSE, ...) { y <- quantile(y[!is.na(y)],c(0.25, 0.75)) x <- qnorm(c(0.25, 0.75)) if(datax) { slope <- diff(x)/diff(y) int <- x[1] - slope*y[1] } else { slope <- diff(y)/diff(x) int <- y[1]-slope*x[1] } abline(int, slope, ...) invisible(list(int = int, slope = slope)) } myqqplot <- function(r, classic = FALSE, lab, id.n, ...) { ## Normal QQ-plot of residuals: ## Produces a Quantile-Quantile plot in which the vector r is plotted ## against the quantiles of a standard normal distribution. xlab <- "Quantiles of the standard normal distribution" ylab <- if(classic) "Standardized LS residual" else "Standardized LTS residual" qq <- qqnorm(r, mgp = mgp, xlab = xlab, ylab = ylab, ...) ll <- myqqline(r, lty = 2, ...) ord <- abs(qq$y - ll$int - ll$slope * qq$x) label(qq$x, qq$y, ord, lab, id.n, ...) } indexplot <- function(r, scale, classic = FALSE, lab, id.n, ...) { ## Index plot of standardized residuals: ## Plot the vector r (LTS or LS residuals) against ## the observation indexes. Identify by a label the id.n ## observations with largest value of r. ## Use classic=FALSE/TRUE to choose the label of the vertical axes ## VT:: 26.12.2004 if(scale == 0) stop("Index plot of standardized residuals is not avalable if scale = 0") xlab <- "Index" ylab <- if(classic) "Standardized LS residual" else "Standardized LTS residual" x <- 1:length(r) y <- r/scale ylim <- c(min(-3, min(y)), max(3, max(y))) plot(x, y, ylim = ylim, mgp = mgp, xlab = xlab, ylab = ylab, ...) label(x, y, ord = abs(y), lab, id.n, ...) abline(h = 0, lty = 4, ...) abline(h = c(-2.5, 2.5), ...) mtext(c("-2.5","2.5"), side = 4, line = 1.2, at = c(-2.5, 2.5), ...) title(main = "Residuals vs Index") } ##' Tukey-Anscombe Plot (rename ?!) fitplot <- function(obj, classic = FALSE, lab, id.n, ...) { ## Standardized residuals vs Fitted values plot: ## Plot the vector r (LTS or LS residuals) against ## the corresponding fitted values. Identify by a ## label the id.n observations with largest value of r. ## Use classic=FALSE/TRUE to choose the label of the vertical axes ## VT:: 26.12.2004 if(obj$scale == 0) stop("Standardized residuals vs Fitted values plot is not avalable if scale = 0") ## x <- obj$X %*% as.matrix(obj$coef) x <- obj$fitted.values y <- obj$residuals/obj$scale ylim <- c(min(-3, min(y)), max(3, max(y))) yname <- names(obj$scale) xlab <- paste("Fitted :", yname) ylab <- if(classic) "Standardized LS residual" else "Standardized LTS residual" plot(x, y, ylim = ylim, mgp = mgp, xlab = xlab, ylab = ylab, ...) label(x, y, ord = abs(y), lab, id.n, ...) abline(h = 0, lty = 4, ...) abline(h = c(-2.5, 2.5), ...) mtext(c("-2.5","2.5"), side = 4, line = 1.2, at = c(-2.5, 2.5), ...) title(main = "Residuals vs Fitted") } ## fitplot() rdiag <- function(obj, classic = FALSE, lab, id.n, ...) { ## Regression diagnostic plot: ## Plot the vector of the standardized residuals against ## the robust distances of the predictor variables ## Identify by a label the id.n observations with largest value of r. ## Use classic=FALSE/TRUE to choose the label of the vertical axes p <- if(obj$intercept) length(obj$coef) - 1 else length(obj$coef) if(p <= 0) warning("Diagnostic plot is not available for univar\niate location and scale estimation") ## VT:: 26.12.2004 if(obj$scale <= 0) stop("Regression Diagnostic plot is not avalable if scale = 0") if(is.null(obj$RD)) stop("Regression Diagnostic plot is not avalable: option mcd=F was set in ltsReg().") if(obj$RD[1] == "singularity") stop("The MCD covariance matrix was singular.") if(classic) { xlab <- "Mahalanobis distance" ylab <- "Standardized LS residual" } else { xlab <- "Robust distance computed by MCD" ylab <- "Standardized LTS residual" } ## VT:: 18.01.20045 ## set id.n to the number of all outliers: ## regression outliers (weight==0)+ leverage points (RD > cutoff) if(missing(id.n)) { id.n <- length(unique(c(which(obj$RD > sqrt(qchisq(0.975, p))), which(obj$lts.wt == 0)))) } quant <- max(c(sqrt(qchisq(0.975, p)), 2.5)) x <- obj$RD y <- obj$residuals/obj$scale ## xlim <- c(0, max(quant + 0.1, max(x))) ylim <- c(min(-3, min(y)), max(3, max(y))) plot(x, y, ylim = ylim, mgp = mgp, xlab = xlab, ylab = ylab, main = "Regression Diagnostic Plot", ...) ord <- apply(abs(cbind(x/2.5, y/quant)), 1, max) label(x, y, ord = ord, lab, id.n, ...) abline(v = quant, h = c(-2.5, 2.5), ...) mtext(c("-2.5","2.5"), side = 4, line = 1.2, at = c(-2.5, 2.5), ...) } ## rdiag() ## parameters and preconditions which <- match.arg(which) r <- residuals(x) n <- length(r) id.n.missing <- missing(id.n) || is.null(id.n) ## if id.n is missing, it will be set to a default for each plot. if(!id.n.missing) { id.n <- as.integer(id.n) if(id.n < 0 || id.n > n) stop("'id.n' must be in {1,..,",n,"}") } mgp <- c(2.5, 1, 0) # set the margin line (in 'mex' units) for the: ## - axis title, ## - axis labels and ## - axis line. ## The default is 'c(3, 1, 0)'. if(!classic) par(pty = "m") else { opar <- par(mfrow = c(1,2), pty = "m") on.exit(par(opar)) ## calculate the LS regression (using LTS with alpha = 1) ## if intercept, obj$X is augmented with a column of 1s - remove it if(x$intercept && # model with intercept length(dim(x$X)) == 2 && # X is 2-dimensional (nc <- ncol(x$X)) > 1 && # X has more than 1 column all(x$X[,1] == 1)) # the first column of X is all 1s X <- x$X[, -1] else X <- x$X obj.cl <- ltsReg(X, x$Y, intercept = x$intercept, alpha = 1) } if (ask) { op <- par(ask = TRUE) on.exit(par(op)) } ## set id.n to the number of regression outliers (weight==0): nx <- if(id.n.missing) length(which(x$lts.wt == 0)) else id.n if(which == "all" || which == "rqq") { ## VT::20.12.2006 - the standardized residuals are in x$resid ## - no change for the other plot functions - the residuals will be standardized ## inside indexplot(), fitplot(), etc myqqplot(x$resid, id.n = nx, ...) # normal QQ-plot of the LTS residuals if(classic) # normal QQ-plot of the LS residuals myqqplot(obj.cl$resid, classic = TRUE, id.n = nx, ...) } if(which == "all" || which == "rindex") { indexplot(x$residuals, x$scale, id.n = nx, ...) # index plot of the LTS residuals if(classic) # index plot of the LS residuals indexplot(obj.cl$residuals, obj.cl$scale, classic = TRUE, id.n = nx, ...) } if(which == "all" || which == "rfit") { fitplot(x, id.n = nx, ...) if(classic) fitplot(obj.cl, classic = TRUE, id.n = nx, ...) } if(which == "all" || which == "rdiag") { rdiag(x, id.n = id.n, ...) if(classic) rdiag(obj.cl, classic = TRUE, id.n = id.n, ...) } } robustbase/R/comedian.R0000644000176200001440000001401712441664610014535 0ustar liggesusers### -*- mode: R ; delete-old-versions: never -*- ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, a copy is available at ## http://www.r-project.org/Licenses/ ### From package 'Biobase' (has only rowMedians + rowQ) / 'matrixStats' ### MM: all type checking now in C ## --- TODO: implement hasNA=NA ==> do check maybe differently than = TRUE ## --> ../src/rowMedians.c + ../src/rowMedians_TYPE-template.h colMedians <- function(x, na.rm=FALSE, hasNA=TRUE, keep.names=TRUE) .Call(R_rowMedians, x, na.rm, hasNA, FALSE, keep.names) rowMedians <- function(x, na.rm=FALSE, hasNA=TRUE, keep.names=TRUE) .Call(R_rowMedians, x, na.rm, hasNA, TRUE, keep.names) ### Maria Anna di Palma, without consistency factor 15.11.2014 ### Fixes by Valentin Todorov ### Martin Maechler: added mad() consistency factor, 27.11.2014 ### new name, class; more compatible to 'covMcd' covComed <- function (X, n.iter = 2, reweight = FALSE, tolSolve = control$ tolSolve,# had 1e-10 hardwired {now 1e-14 default} trace = control$ trace, wgtFUN = control$ wgtFUN, control = rrcov.control()) { ## ATTENTION ## ## Med(abs(X))^2=Med(X*X) only if the number of rows is odd d <- dim(X <- as.matrix(X)) n <- d[1] p <- d[2] if(is.character(wgtFUN)) wgtFUN <- .wgtFUN.covComed[[wgtFUN]](p=p, n=n, control) if(!is.function(wgtFUN)) stop("'wgtFUN' must be a function or a string specifying such a function") madX <- apply(X, 2, mad) I.mad <- 1/madX rho <- I.mad * COM(X) * rep(I.mad, each = p) ## better than ## D <- diag(1/madX) ## rho <- D %*% COM(X) %*% t(D) U <- svd(rho, p, nv = 0L)$u ## DD <- diag(madX) ## Q <- DD %*% U ## invQ <- solve(Q) ## == t(U) %*% D -- since U is orthogonal! t.inv.Q <- I.mad * U # = t(solve(Q)) = t(t(U) * D) == t(D) U = D U Z <- X %*% t.inv.Q ## much faster than for (i in 1:n) Z[i,] <- invQ %*% X[i,] out <- comedian(rho, Z, X) ## Mahalanobis distance for(it in seq_len(n.iter))# allow n.iter = 0 out <- comedian(out$S., out$Z, X) mm <- colMedians(out$Z) mx <- drop(out$Q %*% mm) ## MM: These are "raw" distances compared to covMcd() mah <- mahalanobis(X, mx, out$S., tol = tolSolve) ## compute weights weights <- wgtFUN(mah) covW <- cov.wt(X, wt=weights)[c("cov", "center", "n.obs")] covW$weights <- if(reweight) { ## above 'mah' = 'raw.mah' .. ==> allow another reweighting as in covMcd() covW$raw.weights <- weights covW$mah <- mahalanobis(X, covW$center, covW$cov, tol = tolSolve) wgtFUN(mah) } else # no re-weighting weights structure(class = "comed", c(list(Z = out$Z, raw.cov = out$S., raw.center = mx, raw.mah = mah, wgtFUN=wgtFUN), covW)) } ##' Martin Maechler's simple proposal for an *adaptive* cutoff ##' i.e., one which does *not* reject outliers in good samples asymptotically .COM.adaptWgt.c <- function(n,p, eps = 0.2 / n^0.3) { ## default eps ==> 1-eps(n=100) ~= 0.95; 1-eps(n=10) ~= 0.90 ## using upper tail: 1.4826 * qchisq(eps, p, lower.tail=FALSE) / qchisq(0.5, p) } ## Default wgtFUN() constructors for covComed(): .wgtFUN.covComed <- list("01.original" = function(p, ...) { cMah <- .COM.adaptWgt.c(p=p, eps = 0.05)# 1 - eps = 0.95 function(d) as.numeric(d < median(d)*cMah) }, "01.flex" = function(p, n, control) { ## 'beta' instead of 0.95 stopifnot(is.1num(beta <- control$beta), 0 <= beta, beta <= 1) cMah <- 1.4826 * qchisq(beta, p) / qchisq(0.5, p) function(d) as.numeric(d < median(d)*cMah) }, "01.adaptive" = function(p, n, ...) { ## 'beta_n' instead of 0.975 cMah <- .COM.adaptWgt.c(n,p) function(d) as.numeric(d < cMah) }, "sm1.flex" = function(p, n, control) { ## 'beta' / smooth weight stopifnot(is.1num(beta <- control$beta), 0 <= beta, beta <= 1) cMah <- 1.4826 * qchisq(beta, p) / qchisq(0.5, p) function(d) smoothWgt(d / median(d), c=cMah, h = 1) }, "sm1.adaptive" = function(p, n, ...) { cMah <- .COM.adaptWgt.c(n=n, p=p) function(d) smoothWgt(d / median(d), c = cMah, h = 1) }, "sm2.adaptive" = function(p, n, ...) { cMah <- .COM.adaptWgt.c(n=n, p=p) function(d) smoothWgt(d / median(d), c = cMah, h = 2) } ) comedian <- function (rho, Z, X) { p <- ncol(X) U <- svd(rho, nv = 0L)$u madX <- apply(X, 2, mad) I.mad <- 1/madX ## D <- diag(madX) ## Q <- D %*% U Q <- madX * U ## invQ <- solve(Q) t.inv.Q <- I.mad * U # = t(solve(Q)) = t(t(U) * D) == t(D) U = D U Z <- X %*% t.inv.Q ## for (i in 1:n) Z[i,] <- invQ %*% X[i,] madZ <- apply(Z, 2, mad) list(Q=Q, Z=Z, S. = tcrossprod(Q * rep(madZ, each=p))) ## better than ## S. = Q %*% diag(madZ)^2 %*% t(Q) } COM <- function(X) { ## Comedian *with* consistency factor. Falk(1997) was without it. stopifnot(is.1num(p <- ncol(X)), p >= 1) med <- colMedians(X) Y <- sweep(X, 2L, med, `-`) COM <- matrix(0., p,p) madY <- numeric(p) for(i in 1:p) { madY[i] <- madYi <- mad(Yi <- Y[,i]) for(j in seq_len(i-1)) { # j <= i ==> madY[j] "exists" COM[j,i] <- COM[i,j] <- median(Yi * Y[,j]) / (madYi * madY[j]) ## COM[i,j] <- median((Y[,i])*(Y[,j])) ## COM[i,j] <- (1.4826^2)*median((Y[,i])*(Y[,j])) } ## j == i : COM[i,i] <- median(Yi^2) / (madYi^2) } ## return [ 1.4826 = formals(mad)$constant = consistency factor of mad()] 1.4826^2 * COM } robustbase/R/psi-rho-funs.R0000644000176200001440000004200312737461431015310 0ustar liggesusers#### Psi(), Rho(), weight() etc functions for M-Estimation and extensions ## Use an S4 class for such function classes ## Follow a similar idea as nlsModel() {in "stats"} which returns ## a list of functions sharing a common {non-small!} environment ## NOTA BENE: Experiments etc are currently in ../misc/experi-psi-rho-funs.R ## --------- (FIXME: move those to ../tests/psi-rho-etc.R and the vignette ## ../vignettes//psi_functions.Rnw (and see ../inst/xtraR/plot-psiFun.R) ## ---> look for 'FIXME' below !!! ## ------- ### A. (Symmetric) Location / Regression ## A single function(x, tuningPars) ## a. 1st argument 'x', numeric; must work vectorized on x ## b. further arguments: tuning parameters *with a default* setClass("functionX", contains = "function", validity = function(object) { ## "function" is already because of 'contains' if(names(ff <- formals(object))[1] != "x") return("first argument must be 'x'") f0 <- object(0) fI <- object(Inf) if(!identical(c(f0,fI), object(c(0,Inf)))) return("F(x, *) does not vectorize in 'x'") ## Otherwise : valid TRUE }) ## A functional --- i.e. function of "tuning pars only", such as ## Ep(hc) = Int_{-Inf}^{+Inf} psi(x; hc)^2 dnorm(x) dx ##' This one is *not* checked for vectorization: needed when length(k) > 1 setClass("functionXal", contains = "function") ##' Here F(k) must vectorize in k setClass("functionXal1", contains = "functionXal", validity = function(object) { f0 <- object(0) fI <- object(Inf) if(!identical(c(f0,fI), object(c(0,Inf)))) return("F(k) = I_k[f(.)] does not vectorize in 'k'") ## Otherwise : valid TRUE }) setClass("psi_func", slots = c(rho = "functionX", psi = "functionX", ## psi(x) == d/dx rho(x) = x * wgt(x) wgt = "functionX", ## wgt(x) == psi(x) / x Dpsi = "functionX",## psi'(x) == d/dx psi(x) = rho''(x) Dwgt = "functionX", ## wgt'(x) == d/dx wgt(x) ## tuning parameters, i.e., formals(rho)[-1] tDefs = "numeric",## *named* values of tuning parameters ## FIXME !! {see 4 lines below} Erho = "functionXal", # = E_X[rho(X)]; X~N(0,1); Epsi2 = "functionXal", # = E_X[psi(X)^2]; X~N(0,1); 'A' EDpsi = "functionXal", # = E_X[psi'(X)]; X~N(0,1); 'B' ## name = "character", xtras = "list" ## for flexible extensions.. )) ## FIXME: need other E[] than just wrt N(0,1) ## ----- e.g. for robglm(), need E[] wrt Gamma(.) ### Constructors / "Examples" [the examples are the objects, we'll really use!] psiFunc <- function(rho,psi,wgt, Dpsi,Dwgt, Erho=NULL, Epsi2=NULL, EDpsi=NULL, name, ...) { lent <- length(dotsargs <- list(...)) ## '...' must contain all tuning parameters and their defaults: ## NOTA BENE: Now want at least one tuning parameter.. "worst case": a dummy stopifnot(lent >= 1, length(nt <- names(dotsargs)) == lent, all(nchar(nt)) >= 1) ## Definition of Dwgt is optional if (missing(Dwgt)) Dwgt <- .defDwgt(psi, Dpsi) ## rho, psi,... checking: must have argument names argn <- c("x", nt) for(fnam in list("rho", "psi", "wgt", "Dpsi", "Dwgt", "Erho", "Epsi2", "EDpsi")) { f <- get(fnam, inherits = FALSE) ef <- environment(f) nf <- names(formals(f)) # "x" and "k" for Huber's if (fnam %in% c("Erho", "Epsi2", "EDpsi")) { if(!identical(nf, argn[-1])) stop("arguments of function '",fnam,"' are (", paste(nf, collapse=","),") but should be (", paste(argn[-1],collapse=","),").") formals(f) <- dotsargs } else { if(!identical(nf, argn)) stop("arguments of function '",fnam,"' are (", paste(nf, collapse=","),") but should be (", paste(argn,collapse=","),").") formals(f)[-1] <- dotsargs } environment(f) <- ef assign(fnam, f, inherits = FALSE) } fnctl.typ <- if(lent == 1 && length(dotsargs[[1]]) == 1) "functionXal1" else "functionXal" new("psi_func", rho = new("functionX", rho), psi = new("functionX", psi), wgt = new("functionX", wgt), Dpsi= new("functionX", Dpsi), Dwgt= new("functionX", Dwgt), ## tNams = if(lent) nt else character(0), tDefs = unlist(dotsargs), Erho = new(fnctl.typ, Erho), Epsi2= new(fnctl.typ, Epsi2), EDpsi= new(fnctl.typ, EDpsi), name = if (missing(name)) character(0) else name, xtras= list(tuningP = dotsargs)) } ## Generate default Dwgt function ## Unfortunately, MM can't see how to make this works nicely; ## ._.. = args should really be something like 'x, k' {no parens}: .defDwgt <- function(psi, Dpsi) { args <- formals(Dw <- psi)# -> same formals body(Dw) <- substitute({ y <- .X. .X. <- .X.[not0 <- .X. != 0] y[not0] <- ( Dpsi(._..) - psi(._..)/.X. ) / .X. y }, list(.X. = as.name(names(args[1])), ._.. = args)) environment(Dw) <- environment() Dw } ## so we use this "less nice" variant: .defDwgt <- function(psi, Dpsi) { nf <- names(formals(psi)) eval(parse(text = gsub("_,_", paste(nf, collapse=","), gsub("x", nf[1], "function(_,_) { y <- x x <- x[not0 <- x != 0] y[not0] <- ( Dpsi(_,_) - psi(_,_)/x ) / x y }")))) } chgDefaults <- function(object, ...) standardGeneric("chgDefaults") setMethod("chgDefaults", signature("psi_func"), function(object, ...) { lent <- length(dotsargs <- list(...)) ## '...' must contain all tuning parameters and their defaults: stopifnot(lent >= 1, length(nt <- names(dotsargs)) == lent, all(nchar(nt)) >= 1) ## rho "..." must conform to rho, etc: nf <- names(ff <- formals(object@rho)) if(!identical(nf[-1], nt)) stop("invalid tuning parameter names: ", paste(nt, collapse=",")," instead of ", paste(nf[-1],collapse=","),".") for(fnam in list("rho", "psi", "wgt", "Dpsi", "Dwgt", "Erho", "Epsi2", "EDpsi")) { f <- slot(object, fnam) ef <- environment(f) if (is(f, "functionXal")) formals(f) <- dotsargs else formals(f)[-1] <- dotsargs environment(f) <- ef ## lowlevel {faster than}: slot(..) <- new("functionX", f) slot(object, fnam)@.Data <- f } object@tDefs <- unlist(dotsargs) if(identical(nt, names(object@xtras$tuningP)))# TODO: should update even if there are others object@xtras$tuningP <- setNames(eval(dotsargs), nm=nt) object }) .sprintPsiFunc <- function(x, short=FALSE, round=3) { v <- x@tDefs n <- names(v) ## do not print a single dummy parameter "." if (length(n) == 1 && n == ".") v <- numeric(0) if (!length(name <- x@name)) name <- "" if (!short) name <- sprintf("%s psi function", name) if (length(v) >= 1) { if (short) paste(name, paste(n, round(v, round), sep = "=", collapse = "\n"), sep = "\n") else paste0(name, " (", pasteK(n, round(v, round), sep = " = "), ")") } else name } setMethod("show", signature("psi_func"), function(object) cat(.sprintPsiFunc(object), "\n")) ## moved here from inst/xtraR/plot-psiFun.R; called plot.psiFun originally matplotPsi <- function(x, m.psi, psi, par, main = "full", col = c("black", "red3", "blue3", "dark green"), leg.loc = "right", lty = 1, ...) { ## Original Author: Martin Maechler, Date: 13 Aug 2010, 10:17 ## Modified by Manuel Koller, Date: 7 Jan 2013 fExprs <- quote(list(rho(x), psi(x), {psi*minute}(x), w(x) == psi(x)/x, {w*minute}(x))) ## build legend map <- if (is.null(colnames(m.psi))) { 1:(ncol(m.psi)+1) } else { c(1, c(rho=2, psi=3, Dpsi=4, wgt=5, Dwgt=6)[colnames(m.psi)]) } fExprs <- fExprs[map] ## ... title if(is.character(main)) { shortMain <- (main == "short") elist <- list(FF = if(shortMain) fExprs[[2]] else fExprs, PSI = psi, PPP = paste(formatC(par), collapse=",")) tit <- if(shortMain) substitute(FF ~ "etc, with" ~ psi*"-type" == PSI(PPP), elist) else substitute(FF ~~ ~~ " with "~~ psi*"-type" == PSI(PPP), elist) } else tit <- NULL ## plot matplot(x, m.psi, col=col, lty=lty, type="l", main = tit, ylab = quote(f(x)), xlab = quote(x), ...) abline(h=0,v=0, lty=3, col="gray30") fE <- fExprs; fE[[1]] <- as.name("expression") legend(leg.loc, inset=.02, eval(fE), col=col, lty=lty, bty="n") invisible(cbind(x=x, m.psi)) } setMethod("plot", signature(x = "psi_func"), function(x, y, which = c("rho", "psi", "Dpsi", "wgt", "Dwgt"), main = "full", col = c("black", "red3", "blue3", "dark green", "light green"), leg.loc = "right", ...) { ## x: psi_func object ## y: points to plot at (x-Axis in plot) which <- match.arg(which, several.ok = TRUE) if(missing(y)) y <- seq(-5, 10, length=1501) ## For backcompatibility: if(!is.null(sm <- list(...)$shortMain)) { if(!missing(main)) stop("You cannot specify both 'main' and the deprecated 'shortMain'") warning("'shortMain' is deprecated and will get defunct.\n", "Use 'main = \"short\"' instead of 'shortMain = TRUE'") if(sm) main <- "short" } tmp <- lapply(which, function(name) slot(x, name)(y)) m.psi <- do.call(cbind, tmp) colnames(m.psi) <- which matplotPsi(y, m.psi, x@name, unlist(formals(x@rho)[-1]), main=main, col=col, leg.loc=leg.loc, ...) }) ##-------- TODO: Rather write short __vignette__ with these formulae: ##' \Phi_j(t) := \int_{-\infty}^t u^j \phi(u) \;du ##' --------- where \phi(.) (= \code{dnorm()}) ##' is the density of the standard normal distribution N(0,1). ##' @title "Truncated" Moments of the Gaussian: Int u^j phi(u) du ##' @param t numeric vector ##' @param j an integer (valued scalar), >= 0 ##' @return Phi_j(t), i.e. a numeric vector of the same length as t. ##' @author Martin Maechler PhiI <- function(t, j = 0) { stopifnot(j == as.integer(j), length(j) == 1, is.numeric(t)) if(j >= 4) ## recursion formula -t^(j-1)*dnorm(t) + (j-1)* PhiI(t, j-2) else switch(j+1, ## 0: pnorm(t), ## 1: -dnorm(t), ## 2: pnorm(t) - t*dnorm(t), ## 3: -(2 + t^2)*dnorm(t)) } if(FALSE) { ## Checking PhiI() visually: tt <- seq(-4,10, length=64) j.max <- 5 oo <- sfsmisc::mult.fig(j.max+1, main = "Checking PhiI(., j)", marP=-c(1,1,1,0)) cols <- c("red2", adjustcolor("blue", 0.25)) for(j in 0:j.max) { curve(PhiI(x, j=j), -4, 10, col=cols[1], main = bquote(j == .(j))) if(j == j.max %/% 2) legend("right", c("PhiI()", "integrate(..)"), col=cols, lwd = c(1,3), lty = c(1,3), inset = 1/40) I <- sapply(tt, function(t) integrate(function(u) u^j * dnorm(u), -Inf, t)$value) lines(tt, I, col= cols[2], lwd=3, lty = 3) } par(oo$old.par) } ## Huber: huberPsi <- psiFunc(rho = function(x, k) { r <- u <- abs(x); I <- u < k r[ I] <- u[I]^2 / 2 r[!I] <- k*(u[!I] - k / 2) r }, psi = function(x, k) pmin.int(k, pmax.int(-k, x)), wgt = function(x, k) pmin.int(1, k/abs(x)), Dpsi = function(x, k) abs(x) <= k, Erho = function(k) {iP <- pnorm(k, lower=FALSE) 1/2 - iP + k*(dnorm(k) - k*iP)}, Epsi2= function(k) ifelse(k < 10, 1 - 2*(k*dnorm(k) + (1-k*k)*pnorm(k, lower=FALSE)), 1), EDpsi= function(k) 2*pnorm(k) - 1, name = "Huber", ## the tuning pars and default: k = 1.345) ## Hampel: hampelPsi <- psiFunc(rho = function(x, k) { u <- abs(x) a <- k[1] ; b <- k[2]; r <- k[3] Lg <- r <= u I <- u < a m1 <- !I & (I2 <- u < b) # a <= u < b : 'constant' m2 <- !I2 & !Lg # b <= u < r : 'descending' x[ I] <- u[I]^2 / 2 x[m1] <- a*(a/2 + (u[m1] - a)) ##x[m2]<- a*(a/2 + (b - a)) + a*(u^2 - b^2)/(2*(r - b)) ##x[m2]<- a*(b - a/2) + a*(u^2 - b^2)/(2*(r - b)) x[m2] <- a*(b - a/2 + (u[m2] - b)*(r - (b+u[m2])/2)/(r - b)) ##u=r: a*(b - a/2 + (b + r)/2) x[Lg] <- a/2*(b - a + r) x }, psi = function(x, k) { ## this is "optimized" ==> factors faster than using ifelse()! u <- abs(x) lrg <- k[3] <= u mid <- k[1] < u & !lrg # constant _and_ descending ## x is result for |x| < k[1] x[lrg] <- 0 if(any(mid)) x[mid] <- k[1] * sign(x[mid])* pmin.int(1, (u[mid] - k[3])/(k[2] - k[3])) x }, wgt = function(x, k) { x <- abs(x) lrg <- k[3] <= x I <- x < k[1] mid <- !I & !lrg # contains constant and descending x[I] <- 1 x[lrg] <- 0 if(any(mid)) x[mid] <- k[1] / x[mid] * pmin.int(1, (x[mid] - k[3])/(k[2] - k[3])) x }, Dpsi = function(x, k) { stopifnot(length(k) == 3, diff(k) >= 0) # for now u <- abs(x) lrg <- k[3] <= u I <- u < k[1] m1 <- !I & (I2 <- u < k[2]) # k_1 <= u < k_2: 'constant' m2 <- !I2 & !lrg # k_2 <= u < k_3 : 'descending' x[lrg | m1] <- 0 x[I ] <- 1 x[m2] <- k[1] / (k[2] - k[3]) x }, Erho = function(k) { names(k) <- c("a","b","r") a <- k[["a"]] ; b <- k[["b"]]; r <- k[["r"]] ph <- dnorm(k) Ph <- pnorm(k) ## rho(x) = c0 for |x| >= r c0 <- a/2*(b - a + r) ## coeff. of rho(x) = a/2(c1 + c2|x| + c2 x^2), for |x| in [b,r] D2 <- r - b c1 <- -(a*r+ b*(b-a)) / D2 c2 <- 2*r / D2 c3 <- - 1 / D2 dPh.rb <- Ph[["r"]] - Ph[["b"]] dph.rb <- ph[["r"]] - ph[["b"]] ## Phi_2(r) - Phi_2(b) := dPh2.rb <- Ph[["r"]] - Ph[["b"]] - r*ph[["r"]] + b*ph[["b"]] ## E[rho(X)] = ## [0,a] : 2* 1/2*(Phi_2(a) - Phi_2(0)) (Ph[["a"]]-a*ph[["a"]] - 1/2) + ## [a,b] : 2* a*( -a/2*(Phi(b) - Phi(a)) + (Phi_1(b) - Phi_1(a)) ) 2*a*(-a/2*(Ph[["b"]]-Ph[["a"]]) + (ph[["a"]] - ph[["b"]])) + ## the upper two can be simplified to ## -1/2 + a*ph[["a"]] + (1+a^2)*Ph[["a"]] -2*a*ph[["b"]] - a^2*Ph[["b"]] + ## [b,r] : a*(c1*dPh.rb + c2*(-dph.rb) + c3*dPh2.rb) + ## [r,Inf] : 2*c0*(1 - Ph[["r"]]) } , Epsi2 = function(k) ## E[psi^2]=: 'A' in Hampel et al.(1986), p.150 { names(k) <- c("a","b","r") a <- k[["a"]] ; r <- k[["r"]] ph <- dnorm(k) Ph <- pnorm(k) Ph2 <- Ph - k*ph # = Phi_2(k) {see PhiI(.) above} 2*(Ph2[["a"]] - 1/2 + a^2*(Ph[["b"]] - Ph[["a"]]) + (a / (r - k[["b"]]))^2 * ( r^2 *(Ph[["r"]] - Ph[["b"]]) -2*r *(ph[["b"]] - ph[["r"]]) + Ph2[["r"]] - Ph2[["b"]])) }, EDpsi= function(k) ## E[psi'] =: 'B' in Hampel et al.(1986) { a <- k[1] ; b <- k[2]; r <- k[3] 2*(pnorm(a) - 1/2 - a* (pnorm(r) - pnorm(b)) / (r - b)) }, name = "Hampel", ## the tuning pars and default: k = c(2,4,8) / 1.345)# 1/1.345 = 0.7435 ## TODO: Biweight : ## ---- -------- but note that we have ## (non-S4) ./biweight-funs.R already {used by lmrob.*()} ## ~~~~~~~~~~~~~~~ if(FALSE) tukeyPsi <- c() ########## ## maybe TODO: Optimal tanh() estimator for location ### B. M-Estimators of Scale --- need chi() and slightly different functionals ### --- ---------------------- ### ## one "challenge" is the a(b) needed in chi(x; a,b) = [x^2 -1 -a]_b^b ## for V-optimal M-Estimates of scale ## --> but that's solved (!) in ./scale-chi-opt.R ## ~~~~~~~~~~~~~~~~~ ## Then, I'd also want the optimal chi for s robustbase/R/plot.lmrob.R0000644000176200001440000001164712514020510015036 0ustar liggesusers## MM: more following of plot.lm() : ~/R/D/r-devel/R/src/library/stats/R/plot.lm.R plot.lmrob <- function (x, which = 1:5, caption = c("Standardized residuals vs. Robust Distances", "Normal Q-Q vs. Residuals", "Response vs. Fitted Values", "Residuals vs. Fitted Values" , "Sqrt of abs(Residuals) vs. Fitted Values"), panel = if(add.smooth) panel.smooth else points, sub.caption = deparse(x$call), main = "", compute.MD = TRUE, # maybe (n < 1000 && p < 20) ask = prod(par("mfcol")) < length(which) && dev.interactive(), id.n = 3, labels.id = names(residuals(x)), cex.id = 0.75, label.pos = c(4,2), qqline = TRUE, add.smooth = getOption("add.smooth"), ..., p = 0.025) { if (!inherits(x, "lmrob")) stop("Use only with 'lmrob' objects") if (!is.numeric(which) || any(which < 1) || any(which > 5)) stop("'which' must be in 1:5") show <- rep(FALSE, 5) show[which] <- TRUE r <- residuals(x) n <- length(r) sr <- r/x$scale yh <- fitted(x) if (is.null(id.n)) id.n <- 0 else { id.n <- as.integer(id.n) if(id.n < 0L || id.n > n) stop(gettextf("'id.n' must be in {1,..,%d}", n), domain = NA) } if(id.n > 0L) { ## label the largest residuals if(is.null(labels.id)) labels.id <- paste(1L:n) iid <- 1L:id.n show.r <- sort.list(abs(r), decreasing = TRUE)[iid] ## if(any(show[2L:3L])) ## show.rs <- sort.list(abs(rs), decreasing = TRUE)[iid] text.id <- function(x, y, ind, adj.x = TRUE) { labpos <- if(adj.x) label.pos[1+as.numeric(x > mean(range(x)))] else 3 text(x, y, labels.id[ind], cex = cex.id, xpd = TRUE, pos = labpos, offset = 0.25) } } one.fig <- prod(par("mfcol")) == 1 if (ask) { op <- par(ask = TRUE) on.exit(par(op)) } if (show[1]) { if(is.null(x[['MD']]) && compute.MD) { message("recomputing robust Mahalanobis distances") x$MD <- ## need to recompute robMD(x = if(!is.null(x[['x']])) x$x else if(!is.null(x[['model']])) model.matrix(x, x$model) else stop("need 'model' or 'x' component for robust Mahalanobis distances"), intercept = attr(x$terms,"intercept"), wqr = x$qr) ## try to "cache" them with the object .ge <- .GlobalEnv if(identical(parent.frame(), .ge) && exists((cnx <- as.character(match.call()[["x"]])), .ge)) { assign(cnx, x, envir = .ge) message("saving the robust distances 'MD' as part of ", sQuote(cnx)) } } if(!is.null(xD <- x[['MD']])) { if (p < 0 || p > 1) stop ("Tolerance range must be between 0% to 100%") else chi <- sqrt( qchisq(p = 1-p, df = x$rank) ) ylim <- range(sr, na.rm=TRUE) if(id.n > 0) ylim <- extendrange(r = ylim, f = 0.08) plot(xD, xlab = "Robust Distances", sr, ylab = "Robust Standardized residuals", ylim=ylim, main = main, type = "n", ...) panel(xD, sr, ...) mtext(caption[1], 3, 0.25) if (one.fig) title(sub = sub.caption, ...) if(id.n > 0) { y.id <- sr[show.r] y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ")/3 text.id(xD[show.r], y.id, show.r) } abline(h = c(2.5,-2.5), lty = 3) abline(v = chi, lty = 3) } } if (show[2L]) { ## Normal qq <- qqnorm(r, ylab = "Residuals", main = main,...) if(qqline) qqline(r, lty = 3, col = "gray50") mtext(caption[2], 3, 0.25) if (one.fig) title(sub = sub.caption, ...) if(id.n > 0) text.id(qq$x[show.r], qq$y[show.r], show.r) } if (show[3]) { y <- if(!is.null(x[['model']])) model.response(x$model) else yh + r m1 <- min(yh,y) m2 <- max(yh,y) plot(yh, y, xlab = "Fitted Values", ylab = "Response", xlim = c(m1,m2), ylim = c(m1,m2), main = main, type = "n", ...) panel(yh, y, ...) mtext(caption[3], 3, 0.25) if (one.fig) title(sub = sub.caption, ...) if(id.n > 0) text.id(yh[show.r], y[show.r], show.r) abline(a = 0,b = 1) } if (show[4]) { plot(yh, r, xlab = "Fitted Values", ylab = "Residuals", main = main, type = "n", ...) panel(yh, r, ...) mtext(caption[4], 3, 0.25) if (one.fig) title(sub = sub.caption, ...) if(id.n > 0) { y.id <- r[show.r] y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ")/3 text.id(yh[show.r], y.id, show.r) } abline(h = c(2.5*x$scale,0,-2.5*x$scale), lty = 3) } if (show[5]) { sqrtabsr <- sqrt(abs(r)) plot(yh, sqrtabsr, xlab = "Fitted Values", ylab = "Sqrt of abs(Residuals)", main = main, type = "n", ...) panel(yh, sqrtabsr, ...) mtext(caption[5], 3, 0.25) if (one.fig) title(sub = sub.caption, ...) if(id.n > 0) text.id(yh[show.r], sqrtabsr[show.r], show.r) } invisible() } robustbase/R/OGK.R0000644000176200001440000002043414123440375013375 0ustar liggesusers####========== Pairwise methods for covariance / correlation ================= ### From: Kjell Konis ### To: R-SIG-Robust@stat.math.ethz.ch, Ricardo Maronna ... ### Cc: Rand Wilcox ... ### Subject: Re: [RsR] [R] M-estimator R function question ### Date: Mon, 5 Dec 2005 10:29:11 +0000 ### Here is an implementation of the OGK estimator completely in R. I ### haven't touched it for a while and I forget how thoroughly I tested ### it so use it with a bit of caution. ### http://www.stats.ox.ac.uk/~konis/pairwise.q ### -------------------------------------------- ##------------------------------------------------------------------------------- ## Computes the orthogonalized pairwise covariance matrix estimate described in ## in Maronna and Zamar (2002). ## Use: pairwise(X, 2, gk.sigmamu, gk, hard.rejection) for the ## Gnanadesikan-Kettenring estimate. ## Alternatively, supply your own functions. ## MM replaced sweep(X, 1, .., '*') which is inefficient! ## == used crossprod() where appropriate ## ## I don't like the names gk.sigmamu() and gk(), ## "gk":= Gnanadesikan-Kettenring; particularly not for the Tau-estimator ## which is not at all related to G.-K. ## ---> replacements s/gk.sigmamu/scaleTau2/ ## s/gk/covGK/ ## -- also in the line of the many other cov*() functions I've renamed ## s/pairwise/covOGK/ ## NOTA BENE: Is *now* consistent, since MM made scaleTau2() consistent ### Documentation -----> ../man/covOGK.Rd ## ============= ================ ##' Compute the mahalanobis distances for *diagonal* var/cov matrix: ##' @param x n x p numeric data matrix ##' @param center numeric p-vector (or length 1 - is recycled) or FALSE ##' @param sd numeric p-vector of "standard deviations" ##' @examples all.equal(mahalanobisD(x, FALSE, sd), ##' mahalanobis (x, rep(0,p), diag(sd^2))) mahalanobisD <- function(x, center, sd) { ## Compute the mahalanobis distances (for diagonal cov). if(!isFALSE(center)) x <- sweep(x, 2L, center, check.margin=FALSE) rowSums(sweep(x, 2L, sd, '/', check.margin=FALSE)^2) } covOGK <- function(X, n.iter = 2, sigmamu, rcov = covGK, weight.fn = hard.rejection, keep.data = FALSE, ...) { stopifnot(n.iter >= 1) call <- match.call() X <- as.matrix(X) p <- ncol(X) if(p < 2) stop("'X' must have at least two columns") Z <- X # as we use 'X' for the (re)weighting U <- diag(p) A <- list() ## Iteration loop. for(iter in 1:n.iter) { ## only a few iterations ## Compute the vector of standard deviations d and ## the covariance matrix U. d <- apply(Z, 2L, sigmamu, ...) Z <- sweep(Z, 2L, d, '/', check.margin=FALSE) for(i in 2:p) { # only need lower triangle of U for(j in 1:(i - 1)) U[i, j] <- rcov(Z[ ,i], Z[ ,j], ...) } ## Compute the eigenvectors of U and store them as columns of E: ## eigen(U, symmetric) only needs left/lower triangle E <- eigen(U, symmetric = TRUE)$vectors ## Compute A and store it for each iteration A[[iter]] <- d * E ## Project the data onto the eigenvectors Z <- Z %*% E } ## End of orthogonalization iterations. ## Compute the robust location and scale estimates for ## the transformed data. sqrt.gamma <- apply(Z, 2L, sigmamu, mu.too = TRUE, ...) center <- sqrt.gamma[1, ] sqrt.gamma <- sqrt.gamma[2, ] distances <- mahalanobisD(Z, center, sd=sqrt.gamma) ## From the inside out compute the robust location and ## covariance matrix estimates. See equation (5). ## MM[FIXME]: 1st iteration (often the only one!) can be made *much* faster ## ----- covmat <- diag(sqrt.gamma^2) for(iter in n.iter:1) { covmat <- A[[iter]] %*% covmat %*% t(A[[iter]]) center <- A[[iter]] %*% center } center <- as.vector(center) ## Compute the reweighted estimate. First, compute the ## weights using the user specified weight function. weights <- weight.fn(distances, p, ...) sweights <- sum(weights) ## Then compute the weighted location and covariance ## matrix estimates. ## MM FIXME 2 : Don't need any of this, if all weights == 1 ## ----- (which is not uncommon) ==> detect that "fast" wcenter <- colSums(X * weights) / sweights Z <- sweep(X, 2L, wcenter, check.margin=FALSE) * sqrt(weights) wcovmat <- crossprod(Z) / sweights list(center = center, cov = covmat, wcenter = wcenter, wcov = wcovmat, weights = weights, distances = distances, n.iter = n.iter, sigmamu = deparse(substitute(sigmamu)), weight.fn = deparse(substitute(weight.fn)), rcov = deparse(substitute(rcov)), call = call, ## data.name = data.name, data = if(keep.data) X) } ## a version with weights and consistency (but only one tuning const!!) ## is in /u/maechler/R/other-people/Mspline/Mspline/R/scaleTau.R ## scaleTau2 <- function(x, c1 = 4.5, c2 = 3.0, na.rm = FALSE, consistency = TRUE, mu0 = median(x), sigma0 = median(x.), # = MAD(x) {without consistency factor} mu.too = FALSE, iter = 1, tol.iter = 1e-7) { if(na.rm) x <- x[!is.na(x)] n <- length(x) x. <- abs(x - mu0) stopifnot(is.numeric(sigma0), length(sigma0) == 1) # error, not NA .. if(is.na(sigma0))# not needed (?) || (!na.rm && anyNA(x.))) return(c(if(mu.too) mu0, sigma0)) if(sigma0 <= 0) { # no way to get tau-estim. if(!missing(sigma0)) warning("sigma0 =", sigma0," ==> scaleTau2(.) = 0") return(c(if(mu.too) mu0, 0)) } stopifnot(iter >= 1, iter == as.integer(iter), # incl. iter=TRUE is.numeric(tol.iter), tol.iter > 0) nEs2 <- if(!isFALSE(consistency)) { Erho <- function(b) ## E [ rho_b ( X ) ] X ~ N(0,1) 2*((1-b^2)*pnorm(b) - b * dnorm(b) + b^2) - 1 Es2 <- function(c2) ## k^2 * E[ rho_{c2} (X' / k) ] , where X' ~ N(0,1), k= qnorm(3/4) Erho(c2 * qnorm(3/4)) ## the asymptotic E[ sigma^2(X) ] is Es2(c2), {Es2(3) ~= 0.925} : ## TODO: 'n-2' below will probably change; ==> not yet documented ## ---- ==> ~/R/MM/STATISTICS/robust/1d-scaleTau2-small.R ## and ~/R/MM/STATISTICS/robust/1d-scale.R (if(consistency == "finiteSample") n-2 else n) * Es2(c2) } else n sTau2 <- function(sig0) { # also depends on (x., x, c1,c2, Es2) mu <- if(c1 > 0) { # "bi-weight" {in a way that works also with x.=Inf}: w <- pmax(0, 1 - (x. / (sig0 * c1))^2)^2 if(!is.finite(s.xw <- sum(x*w))) { ## x*w \-> NaN when (x,w) = (Inf,0) wpos <- w > 0 w <- w[wpos] s.xw <- sum(x[wpos]*w) } s.xw / sum(w) } else mu0 x <- (x - mu) / sig0 rho <- x^2 rho[rho > c2^2] <- c2^2 ## return c(m = mu, ## basically sqrt(sigma2) := sqrt( sigma0^2 / n * sum(rho) ) : s = sig0 * sqrt(sum(rho)/nEs2)) } # { sTau2() } s0 <- sigma0 if(isTRUE(iter)) iter <- 100000 # "Inf" repeat { m.s <- sTau2(s0) s. <- m.s[["s"]] if((iter <- iter - 1) <= 0 || is.na(s.) || abs(s. - s0) <= tol.iter * s.) break s0 <- s. # and iterate further } ## return c(if(mu.too) m.s[["m"]], s.) } ## Two other simple 'scalefun' to be used for covOGK; ## s_Qn(), s_Sn() are in ./qnsn.R s_mad <- function(x, mu.too= FALSE, na.rm = FALSE) { if (na.rm) x <- x[!is.na(x)] mx <- median(x) c(if(mu.too) mx, mad(x, center = mx)) } s_IQR <- function(x, mu.too= FALSE, na.rm = FALSE) { Qx <- quantile(x, (1:3)/4, na.rm = na.rm, names = FALSE) c(if(mu.too) Qx[2], (Qx[3] - Qx[1]) * 0.5 * formals(mad)$constant) } covGK <- function(x, y, scalefn = scaleTau2, ...) { ## Gnanadesikan-Kettenring's, based on 4*Cov(X,Y) = Var(X+Y) - Var(X-Y) (scalefn(x + y, ...)^2 - scalefn(x - y, ...)^2) / 4 } hard.rejection <- function(distances, p, beta = 0.9, ...) { d0 <- median(distances) * qchisq(beta, p) / qchisq(0.5, p) wts <- double(length(distances))# == 0, but wts[distances <= d0] <- 1.0 wts } ##-- TODO "pairwise QC" ... etc ##--> ~maechler/R/MM/STATISTICS/robust/pairwise-new.R robustbase/R/detmcd.R0000644000176200001440000004123012441664745014224 0ustar liggesusers### -*- mode: R ; delete-old-versions: never -*- ##' Computes the MCD estimator of a multivariate data sets in a \emph{deterministic} ##' way. ##' ##' The MCD estimator is given by the subset of h observations with smallest ##' covariance determinant. The MCD location estimate is then ##' the mean of those h points, and the MCD scatter estimate is ##' their covariance matrix. The default value of h is roughly ##' 0.75n (where n is the total number of observations), but the ##' user may choose each value between n/2 and n. Based on the ##' raw estimates, weights are assigned to the observations such ##' that outliers get zero weight. The reweighted MCD estimator ##' is then given by the mean and covariance matrix of the cases ##' with non-zero weight. ## ##' To compute an approximate MCD estimator deterministically, six initial robust h-subsets are ##' constructed based on robust transformations of variables or robust and ##' fast-to-compute estimators of multivariate location and shape. Then ##' C-steps are applied on these h-subsets until convergence. Note that the ##' resulting algorithm is not fully affine equivariant, but it is often ##' faster than the FAST-MCD algorithm which is affine equivariant ##' (see covMcd()). ##' Note that this function can not handle exact fit situations: if the ##' raw covariance matrix is singular, the program is stopped. In that ##' case, it is recommended to apply the covMcd() function. ##' ##' The MCD method is intended for continuous variables, and assumes that ##' the number of observations n is at least 5 times the number of variables p. ##' If p is too large relative to n, it would be better to first reduce ##' p by variable selection or robust principal components (see the functions ##' robust principal components in package 'rrcov'). ##' ##' @title Compute the MCD estimator of multivariate data in a deterministic way ##' @references ##' Hubert, M., Rousseeuw, P.J. and Verdonck, T. (2012), ##' "A deterministic algorithm for robust location and scatter", Journal of ##' Computational and Graphical Statistics, in press. ##' @param x a numerical matrix. The columns represent variables, and rows represent observations. ##' @param h The quantile of observations whose covariance determinant will ##' be minimized. Any value between n/2 and n may be specified. ##' @param hsets.init If one gives here already a matrix with for each column an ##' ordering of the observations (first the one with smallest statistical ##' distance), then the initial shape estimates are not calculated. ##' Default value = NULL. ##' @param save.hsets ##' @param full.h ##' @param scalefn function (or "rule") to estimate the scale. ##' @param maxcsteps ##' @param warn.nonconv.csteps ##' @param warn.wrong.obj.conv ##' @param trace ##' @param names ##' @return ##' @author Valentin Todorov; many tweaks by Martin Maechler .detmcd <- function(x, h, hsets.init=NULL, save.hsets = missing(hsets.init), full.h = save.hsets, scalefn, maxcsteps = 200, warn.nonconv.csteps = getOption("robustbase:warn.nonconv.csteps", TRUE), warn.wrong.obj.conv = getOption("robustbase:warn.wrong.obj.conv",FALSE), trace = as.integer(trace), names = TRUE) { stopifnot(length(dx <- dim(x)) == 2, h == as.integer(h), h >= 1) n <- dx[1] p <- dx[2] stopifnot(p >= 1, n >= 1) scalefn <- robScalefn(scalefn, n) ## kmini <- 5 # number of sub-data sets(if we use them some day) ## # for now we use it as number of rows in the returned ## # matrix 'coeff' for exact fit (also not used currently). ## cutoff <- qchisq(0.975, p) ## chimed <- qchisq(0.5, p) ## Center and scale the data vnms <- colnames(x) # speedup only: store and put back at end z <- doScale(unname(x), center=median, scale=scalefn) z.center <- z$center z.scale <- z$scale z <- z$x ## Assume that 'hsets.init' already contains h-subsets: the first h observations each if(is.null(hsets.init)) { hsets.init <- r6pack(z, h=h, full.h=full.h, scaled=TRUE, scalefn=scalefn) dh <- dim(hsets.init) } else { ## user specified, (even just *one* vector): if(is.vector(hsets.init)) hsets.init <- as.matrix(hsets.init) dh <- dim(hsets.init) if(dh[1] < h || dh[2] < 1) stop("'hsets.init' must be a h' x L matrix (h' >= h) of observation indices") ## TODO?: We could *extend* the sets to large h, even all n ## ====> could input the 'best' sets, also e.g. from fastmcd if(full.h && dh[1] != n) warning("'full.h' is true, but 'hsets.init' has less than n rows") ## stop("When 'full.h' is true, user specified 'hsets.init' must have n rows") if(min(hsets.init) < 1 || max(hsets.init) > n) stop("'hsets.init' must be in {1,2,...,n}; n = ", n) } nsets <- ncol(hsets.init)# typically 6, currently ## Some initializations. hset.csteps <- integer(nsets) bestobj <- Inf for(i in 1:nsets) { if(trace) { if(trace >= 2) cat(sprintf("H-subset %d = observations c(%s):\n-----------\n", i, pasteK(hsets.init[1:h,i]))) else cat(sprintf("H-subset %d: ", i)) } for(j in 1:maxcsteps) { if(j == 1) { obs_in_set <- hsets.init[1:h,i] # start with the i-th initial set } else { # now using 'svd' from last step score <- (z - rep(svd$center, each=n)) %*% svd$loadings mah <- mahalanobisD(score, center=FALSE, sd = sqrt(abs(svd$eigenvalues))) obs_in_set <- sort.list(mah)[1:h] #, partial = 1:h not yet } ## [P,T,L,r,centerX,meanvct] = classSVD(data(obs_in_set,:)); svd <- classPC(z[obs_in_set, ,drop=FALSE], signflip=FALSE) obj <- sum(log(svd$eigenvalues)) if(svd$rank < p) { ## FIXME --> return exact fit property rather than stop() ?? stop('More than h of the observations lie on a hyperplane.') ## TODO exactfit <- TRUE ## coeff <- ... } if(j >= 2 && obj == prevdet) { ## MM:: 2014-10-25: objective function check is *not* good enough: if(identical(obs_in_set, prevobs)) break ## else : if(warn.wrong.obj.conv) warning(sprintf( "original detmcd() wrongly declared c-step convergence (obj=%g, i=%d, j=%d)", obj, i,j)) } prevdet <- obj prevobs <- obs_in_set } hset.csteps[i] <- j # how many csteps necessary to converge. if(trace) cat(sprintf("%3d csteps, obj=log(det|.|)=%g", j, obj)) if(obj < bestobj) { if(trace) cat(" = new optim.\n") ## bestset : the best subset for the whole data. ## bestobj : objective value for this set. ## initmean, initcov : the mean and covariance matrix of this set bestset <- obs_in_set bestobj <- obj initmean <- svd$center L <- svd$loadings ## MM speedup: was L Diag L' = L %*% diag(svd$eigenvalues) %*% t(L) initcov <- tcrossprod(L * rep(svd$eigenvalues, each=nrow(L)), L) ## raw.initcov <- initcov ## rew.Hsubsets.Hopt <- bestset ind.best <- i # to determine which subset gives best results. } else if(obj == bestobj) ## store as well: ind.best <- c(ind.best, i) else if(trace) cat("\n") } ## for(i in 1:nsets) if(warn.nonconv.csteps && any(eq <- hset.csteps == maxcsteps)) { p1 <- paste(ngettext(sum(eq), "Initial set", "Initial sets"), pasteK(which(eq))) warning(sprintf("%s did not converge in maxcsteps=%d concentration steps", p1, maxcsteps), domain=NA) } ## reweighting <- FALSE # it happens in covMcd() ## if(reweighting) { ## svd <- classPC(z[bestset, ], signflip=FALSE) # [P,T,L,r,centerX,meanvct] = classSVD(data(bestset,:)); ## mah <- mahalanobisD((z - rep(svd$center, each=n)) %*% svd$loadings, ## FALSE, sqrt(abs(svd$eigenvalues))) ## sortmah <- sort(mah) ## } ## factor <- sortmah[h]/qchisq(h/n, p) ## raw.cov <- factor*initcov ## raw.cov <- initcov ## We express the results in the original units [restoring var.names]: raw.cov <- initcov * tcrossprod(z.scale) raw.center <- initmean * z.scale + z.center if(names) { dimnames(raw.cov) <- list(vnms, vnms) names(raw.center) <- vnms } raw.objective <- bestobj + 2*sum(log(z.scale)) # log(det = obj.best * prod(z.scale)^2) ## raw.mah <- mahalanobis(x, raw.center, raw.cov, tol=1E-14) ## medi2 <- median(raw.mah) list(initcovariance = raw.cov, initmean = raw.center, best = bestset, mcdestimate = raw.objective, # determinant (goes to crit) ## , weights=NULL# FIXME - goes to raw.weights iBest = ind.best, n.csteps = hset.csteps, initHsets = if(save.hsets) hsets.init, exactfit = 0 # <- FIXME ## once we'd test for exact fit, we'd return: ## , coeff=matrix(rep(0, kmini*p), nrow=kmini) ## , kount=0 # FIXME ) } ## .detmcd() robScalefn <- function(scalefn, n) { if(missing(scalefn) || is.null(scalefn)) scalefn <- .scalefn.default if(is.function(scalefn)) scalefn else switch(scalefn, ## Hubert, Rousseeuw, Verdonck, JCGS 2012 : "hrv2012" = if(n < 1000) Qn else scaleTau2, ## Version of 2014: "v2014" = if(n < 5000) Qn else scaleTau2, ## otherwise stop(gettextf("Invalid scalefn='%s': must be function or a valid string", scalefn), domain=NA)) } doScale <- function (x, center, scale) { stopifnot(is.numeric(p <- ncol(x))) ## MM: follow standard R's scale.default() as much as possible centerFn <- is.function(center) doIt <- if(centerFn) { centerName <- deparse(substitute(center)) # "median" typically center <- apply(x, 2L, center) TRUE } else { if(length(center) == p && is.numeric(center)) TRUE else if(missing(center) || is.null(center)) { center <- 0; FALSE } else stop(gettextf("'%s' must be a function, numeric vector of length p, or NULL", "center"), domain=NA) } if(doIt) x <- sweep(x, 2L, center, `-`, check.margin=FALSE) scaleFn <- is.function(scale) doIt <- if(scaleFn) { scale <- apply(x, 2L, scale) TRUE } else { if(length(scale) == p && is.numeric(scale)) TRUE else if(missing(scale) || is.null(scale)) { scale <- 1 FALSE } else stop(gettextf("'%s' must be a function, numeric vector of length p, or NULL", "scale"), domain=NA) } if(doIt) { if(any(is.na(scale)) || any(scale < 0)) stop("provide better scale; must be all positive") if(any(s0 <- scale == 0)) { ## FIXME: ### Better and easier alternative (and as "FAST MCD"): return "singular cov.matrix" ### since scale 0 ==> more than 50% points are on hyperplane x[,j] == const. ## find scale if there is any variation; otherwise use s := 1 S <- if(centerFn && centerName == "median") abs else function(.) abs(. - median(.)) non0Q <- function(u) { alph <- c(10:19, 19.75)/20 # not all the way to '1' {=> finite qnorm()} qq <- quantile(S(u), probs=alph, names=FALSE) if(any(pos <- qq != 0)) { ## the first non-0 if there is one i <- which.max(pos) qq[i] / qnorm((alph[i] + 1)/2) } else 1 } scale[s0] <- apply(x[,s0, drop=FALSE], 2L, non0Q) } x <- sweep(x, 2L, scale, `/`, check.margin = FALSE) } ## return list(x=x, center=center, scale=scale) } ##' @title Robust Distance based observation orderings based on robust "Six pack" ##' @param x n x p data matrix ##' @param h integer ##' @param full.h full (length n) ordering or only the first h? ##' @param scaled is 'x' is already scaled? otherwise, apply doScale(x, median, scalefn) ##' @param scalefn function to compute a robust univariate scale. ##' @return a h' x 6 matrix of indices from 1:n; if(full.h) h' = n else h' = h r6pack <- function(x, h, full.h, scaled=TRUE, scalefn = rrcov.control()$scalefn) { ## As the considered initial estimators Sk may have very ## inaccurate eigenvalues, we try to 'improve' them by applying ## a transformation similar to that used in the OGK algorithm. ## ## After that compute the corresponding distances, order them and ## return the indices initset <- function(data, scalefn, P, h) { stopifnot(length(d <- dim(data)) == 2, length(h) == 1, h >= 1) n <- d[1] stopifnot(h <= n) lambda <- doScale(data %*% P, center=median, scale=scalefn)$scale sqrtcov <- P %*% (lambda * t(P)) ## == P %*% diag(lambda) %*% t(P) sqrtinvcov <- P %*% (t(P) / lambda) ## == P %*% diag(1/lambda) %*% t(P) estloc <- colMedians(data %*% sqrtinvcov) %*% sqrtcov centeredx <- (data - rep(estloc, each=n)) %*% P sort.list(mahalanobisD(centeredx, FALSE, lambda))[1:h]# , partial = 1:h } ## ## Compute the raw OGK estimator. For m(.) and s(.) (robust ## univariate estimators of location and scale) use the median ## and Qn for reasons of simplicity (no choice of tuning parameters) ## and to be consistent with the other components of DetMCD. ## ogkscatter <- function(Y, scalefn, only.P = TRUE) { stopifnot(length(p <- ncol(Y)) == 1, p >= 1) U <- diag(p) for(i in seq_len(p)[-1L]) {# i = 2:p sYi <- Y[,i] ii <- seq_len(i - 1L) for(j in ii) { sYj <- Y[,j] U[i,j] <- (scalefn(sYi + sYj)^2 - scalefn(sYi - sYj)^2) / 4 } ## also set the upper triangle U[ii,i] <- U[i,ii] } ## now done above: U <- lower.tri(U) * U + t(U) # U <- tril(U, -1) + t(U) P <- eigen(U, symmetric=TRUE)$vectors if(only.P) return(P) ## else : Z <- Y %*% t(P) sigz <- apply(Z, 2, scalefn) lambda <- diag(sigz^2) list(P=P, lambda=lambda) } stopifnot(length(dx <- dim(x)) == 2) n <- dx[1] p <- dx[2] ## If scalefn is missing or is NULL, use Qn for smaller data sets (n < 1000) ## and tau-scale of Yohai and Zamar (1988) otherwise. scalefn <- robScalefn(scalefn, n) ## If the data was not scaled already (scaled=FALSE), center and scale using ## the median and the provided function 'scalefn'. if(!scaled) { ## Center and scale the data to (0, 1) - robustly x <- doScale(x, center=median, scale=scalefn)$x } nsets <- 6 hsets <- matrix(integer(), h, nsets) ## Determine 6 initial estimates (ordering of obs) ## 1. Hyperbolic tangent of standardized data y1 <- tanh(x) R1 <- cor(y1) P <- eigen(R1, symmetric=TRUE)$vectors hsets[,1] <- initset(x, scalefn=scalefn, P=P, h=h) ## 2. Spearmann correlation matrix R2 <- cor(x, method="spearman") P <- eigen(R2, symmetric=TRUE)$vectors hsets[,2] <- initset(x, scalefn=scalefn, P=P, h=h) ## 3. Tukey normal scores y3 <- qnorm((apply(x, 2L, rank) - 1/3)/(n + 1/3)) R3 <- cor(y3, use = "complete.obs") P <- eigen(R3, symmetric=TRUE)$vectors hsets[,3] <- initset(x, scalefn=scalefn, P=P, h=h) ## 4. Spatial sign covariance matrix znorm <- sqrt(rowSums(x^2)) ii <- znorm > .Machine$double.eps x.nrmd <- x x.nrmd[ii,] <- x[ii, ] / znorm[ii] SCM <- crossprod(x.nrmd)# / (n-1) not needed for e.vectors P <- eigen(SCM, symmetric=TRUE)$vectors hsets[,4] <- initset(x, scalefn=scalefn, P=P, h=h) ## 5. BACON ind5 <- order(znorm) half <- ceiling(n/2) Hinit <- ind5[1:half] covx <- cov(x[Hinit, , drop=FALSE]) P <- eigen(covx, symmetric=TRUE)$vectors hsets[,5] <- initset(x, scalefn=scalefn, P=P, h=h) ## 6. Raw OGK estimate for scatter P <- ogkscatter(x, scalefn, only.P = TRUE) hsets[,6] <- initset(x, scalefn=scalefn, P=P, h=h) ## Now combine the six pack : if(full.h) hsetsN <- matrix(integer(), n, nsets) for(k in 1:nsets) ## sort each of the h-subsets in *increasing* Mah.distances { xk <- x[hsets[,k], , drop=FALSE] svd <- classPC(xk, signflip=FALSE) # [P,T,L,r,centerX,meanvct] = classSVD(xk) if(svd$rank < p) ## FIXME: " return("exactfit") " stop('More than half of the observations lie on a hyperplane.') score <- (x - rep(svd$center, each=n)) %*% svd$loadings ord <- order(mahalanobisD(score, FALSE, sqrt(abs(svd$eigenvalues)))) if(full.h) hsetsN[,k] <- ord else hsets[,k] <- ord[1:h] } ## return if(full.h) hsetsN else hsets } ## {r6pack} robustbase/R/covMcd.R0000644000176200001440000012465414440373076014206 0ustar liggesusers### This is originally from the R package #### #### rrcov : Scalable Robust Estimators with High Breakdown Point #### #### by Valentin Todorov ## I would like to thank Peter Rousseeuw and Katrien van Driessen for ## providing the initial code of this function. ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, a copy is available at ## http://www.r-project.org/Licenses/ ## No longer hidden in namespace : ## easier to explain when user-available & documented if h.alpha.n <- function(alpha, n, p) { ## Compute h(alpha) := size of subsample, given alpha, (n,p) ## Same function for covMcd() and ltsReg() n2 <- (n+p+1) %/% 2 floor(2 * n2 - n + 2 * (n - n2) * alpha) } ## MM: the way it's set up, *must* be kept in sync with rrcov.control()'s ## defaults --> ./rrcov.control.R : covMcd <- function(x, cor = FALSE, raw.only = FALSE, alpha = control$ alpha, nsamp = control$ nsamp, nmini = control$ nmini, kmini = control$ kmini, scalefn=control$scalefn, maxcsteps=control$maxcsteps, initHsets = NULL, save.hsets = FALSE, names = TRUE, seed = control$ seed, tolSolve = control$ tolSolve, # had 1e-10 hardwired {now 1e-14 default} trace = control$ trace, use.correction = control$ use.correction, wgtFUN = control$ wgtFUN, control = rrcov.control()) { logdet.Lrg <- 50 ## <-- FIXME add to rrcov.control() and then use that ## Analyze and validate the input parameters ... if(length(seed) > 0) { if(length(seed) < 3L || seed[1L] < 100L) stop("invalid 'seed'. Must be compatible with .Random.seed !") if(!is.null(seed.keep <- get0(".Random.seed", envir = .GlobalEnv, inherits = FALSE))) on.exit(assign(".Random.seed", seed.keep, envir = .GlobalEnv)) assign(".Random.seed", seed, envir = .GlobalEnv) } ## For back compatibility, as some new args did not exist pre 2013-04, ## and callers of covMcd() may use a "too small" 'control' list: defCtrl <- if(missing(control)) control else rrcov.control() if(missing(wgtFUN)) getDefCtrl("wgtFUN", defCtrl) if(is.null (nmini)) getDefCtrl("nmini", defCtrl) ## vt::03.02.2006 - added options "best" and "exact" for nsamp ## nsamp will be further analized in the wrapper .fastmcd() if(is.numeric(nsamp) && nsamp <= 0) stop("Invalid number of trials nsamp = ",nsamp, "!") if(is.data.frame(x)) x <- data.matrix(x, rownames.force=FALSE) else if (!is.matrix(x)) x <- matrix(x, length(x), 1, dimnames = if(names) list(names(x), deparse(substitute(x)))) if(!names) dimnames(x) <- NULL # (speedup) ## drop all rows with missing values (!!) : ok <- is.finite(x %*% rep.int(1, ncol(x))) x <- x[ok, , drop = FALSE] if(!length(dx <- dim(x))) stop("All observations have missing values!") n <- dx[1]; p <- dx[2] if(names) dimn <- dimnames(x) ## h(alpha) , the size of the subsamples h <- h.alpha.n(alpha, n, p) if(n <= p + 1) # ==> floor((n+p+1)/2) > n - 1 -- not Ok stop(if (n <= p) # absolute barrier! "n <= p -- you can't be serious!" else "n == p+1 is too small sample size for MCD") ## else if(n < 2 * p) { ## p+1 < n < 2p warning("n < 2 * p, i.e., possibly too small sample size") ## was stop("Need at least 2*(number of variables) observations ") } ## jmin <- (n + p + 1) %/% 2 ## if(alpha < 1/2) ## FIXME? shouldn't we rather test 'alpha < jmin/n' ? ## stop("The MCD must cover at least", jmin, "observations") ## MM: I think this should be sufficient; ## we should even omit the (n < 2p) warning if(h > n) stop("Sample size n < h(alpha; n,p) := size of \"good\" subsample") else if(2*h < n) warning("subsample size h < n/2 may be too small") if(is.character(wgtFUN)) { if(is.function(mkWfun <- .wgtFUN.covMcd[[wgtFUN]])) wgtFUN <- mkWfun(p=p, n=n, control) } if(!is.function(wgtFUN)) stop(gettextf("'wgtFUN' must be a function or one of the strings %s.", pasteK(paste0('"',names(.wgtFUN.covMcd),'"'))), domain=NA) ## vt::03.02.2006 - raw.cnp2 and cnp2 are vectors of size 2 and will ## contain the correction factors (concistency and finite sample) ## for the raw and reweighted estimates respectively. Set them ## initially to 1. If use.correction is false (not the default), ## the finite sample correction factor will not be used ## (neither for the raw estimates nor for the reweighted ones) raw.cnp2 <- cnp2 <- c(1,1) ans <- list(call = match.call(), nsamp = nsamp, method = sprintf("MCD(alpha=%g ==> h=%d)", alpha, h)) if(h == n) { ## <==> alpha ~= 1 : Just compute the classical estimates -------- mcd <- cov(x) #MM: was cov.wt(x)$cov loc <- as.vector(colMeans(x)) obj <- determinant(mcd, logarithm = TRUE)$modulus[1] if ( -obj/p > logdet.Lrg ) { ans$cov <- mcd if(names) dimnames(ans$cov) <- list(dimn[[2]], dimn[[2]]) if (cor) ans$cor <- cov2cor(ans$cov) ans$center <- loc if(names && length(dimn[[2]])) names(ans$center) <- dimn[[2]] ans$n.obs <- n ans$singularity <- list(kind = "classical") weights <- 1 } else { mah <- mahalanobis(x, loc, mcd, tol = tolSolve) ## VT:: 01.09.2004 - bug in alpha=1 weights <- wgtFUN(mah) # 0/1 sum.w <- sum(weights) ans <- c(ans, cov.wt(x, wt = weights, cor = cor)) ## cov.wt() -> list("cov", "center", "n.obs", ["wt", "cor"]) ## Consistency factor for reweighted MCD -- ok for default wgtFUN only: FIXME if(sum.w != n) { cnp2[1] <- .MCDcons(p, sum.w/n) ans$cov <- ans$cov * cnp2[1] } obj <- determinant(mcd, logarithm = TRUE)$modulus[1] if( -obj/p > logdet.Lrg ) { ans$singularity <- list(kind = "reweighted.MCD") } else { mah <- mahalanobis(x, ans$center, ans$cov, tol = tolSolve) weights <- wgtFUN(mah) # 0/1 } } ans$alpha <- alpha ans$quan <- h ans$raw.cov <- mcd ans$raw.center <- loc if(names && !is.null(nms <- dimn[[2]])) { names(ans$raw.center) <- nms dimnames(ans$raw.cov) <- list(nms,nms) } ans$crit <- obj # was exp(obj); but log-scale is "robust" against under/overflow ans$method <- paste(ans$method, "\nalpha = 1: The minimum covariance determinant estimates based on", n, "observations \nare equal to the classical estimates.") ans$mcd.wt <- rep.int(NA, length(ok)) ans$mcd.wt[ok] <- weights if(names && length(dimn[[1]])) names(ans$mcd.wt) <- dimn[[1]] ans$wt <- NULL ans$X <- x if(names) { if(length(dimn[[1]])) dimnames(ans$X)[[1]] <- names(ans$mcd.wt)[ok] else dimnames(ans$X) <- list(seq(along = ok)[ok], NULL) } if(trace) cat(ans$method, "\n") ans$raw.cnp2 <- raw.cnp2 ans$cnp2 <- cnp2 class(ans) <- "mcd" return(ans) } ## end { alpha = 1 <==> h = n } mcd <- if(nsamp == "deterministic") { ans$method <- paste("Deterministic", ans$method) .detmcd (x, h, hsets.init = initHsets, save.hsets=save.hsets, # full.h=full.h, scalefn=scalefn, maxcsteps=maxcsteps, trace=as.integer(trace), names=names) } else { ans$method <- paste0("Fast ", ans$method, "; nsamp = ", nsamp, "; (n,k)mini = (", nmini,",",kmini,")") .fastmcd(x, h, nsamp, nmini, kmini, trace=as.integer(trace)) } ## Compute the consistency correction factor for the raw MCD ## (see calfa in Croux and Haesbroeck) calpha <- .MCDcons(p, h/n) ## VT::19.3.2007 correct <- if(use.correction) .MCDcnp2(p, n, alpha) else 1. raw.cnp2 <- c(calpha, correct) if(p == 1) { ## ==> Compute univariate location and scale estimates ans$method <- paste("Univariate", ans$method) scale <- sqrt(calpha * correct) * as.double(mcd$initcovariance) center <- as.double(mcd$initmean) if(abs(scale - 0) < 1e-07) { ans$singularity <- list(kind = "identicalObs", q = h) ans$raw.cov <- ans$cov <- matrix(0) ans$raw.center <- ans$center <- center ans$n.obs <- n ans$alpha <- alpha ans$quan <- h if(names && !is.null(nms <- dimn[[2]][1])) { names(ans$raw.center) <- names(ans$center) <- nms dimnames(ans$raw.cov) <- dimnames(ans$cov) <- list(nms,nms) } ans$crit <- -Inf # = log(0) weights <- as.numeric(abs(x - center) < 1e-07) # 0 / 1 } ## end { scale ~= 0 } else { ## Compute the weights for the raw MCD in case p=1 weights <- wgtFUN(((x - center)/scale)^2) # 0/1 sum.w <- sum(weights) ans <- c(ans, cov.wt(x, wt = weights, cor=cor)) if(sum.w != n) { ## VT::05.04.2023 ## The correct consistency correction factor for the reweighted estimate ## would be .MCDcons(p, 0.975) and not .MCDcons(p, sum.w/n) - see mail from ## Andreas Alfons from 29.01.2020 and Croux and Haesbroeck (1999), equations 4.1 and 4.2. ## cdelta.rew <- .MCDcons(p, sum.w/n) ## VT::19.3.2007 cdelta.rew <- .MCDcons(p, 0.975) correct.rew <- if(use.correction) .MCDcnp2.rew(p, n, alpha) else 1. cnp2 <- c(cdelta.rew, correct.rew) ans$cov <- cdelta.rew * correct.rew * ans$cov } ans$alpha <- alpha ans$quan <- h ans$raw.cov <- as.matrix(scale^2) ans$raw.center <- as.vector(center) if(names && !is.null(nms <- dimn[[2]][1])) { dimnames(ans$raw.cov) <- list(nms,nms) names(ans$raw.center) <- nms } ans$crit <- ## log(det) = log(sum(sort((x - as.double(mcd$initmean))^2, partial = h)[1:h])/max(1,h-1)) center <- ans$center scale <- as.vector(sqrt(ans$cov)) weights <- wgtFUN(((x - center)/scale)^2) } ## end{ scale > 0 } } ## end p=1 else { ## p >= 2 : --------------------------------------------------------- ## Apply correction factor to the raw estimates ## and use them to compute weights mcd$initcovariance <- matrix(calpha * correct * mcd$initcovariance, p,p) if(raw.only || mcd$exactfit != 0) { ## If not all observations are in general position, i.e. more than ## h observations lie on a hyperplane, the program still yields ## the MCD location and scatter matrix, the latter being singular ## (as it should be), as well as the equation of the hyperplane. ## VT::31.08.2022 - raw.only was not implemeted for the case nsamp="deterministic" ## (if the FORTRAN code is not called mcd$coeff and mcd$weights do not exist). ## Reported by Aurore Archimbaud if(!is.null(mcd$coeff)) dim(mcd$coeff) <- c(5, p) ans$cov <- ans$raw.cov <- mcd$initcovariance ans$center <- ans$raw.center <- as.vector(mcd$initmean) if(names && !is.null(nms <- dimn[[2]])) { dimnames(ans$cov) <- list(nms, nms) names(ans$center) <- nms } ans$n.obs <- n if(mcd$exactfit != 0) { ## no longer relevant: ## if(mcd$exactfit == -1) ## stop("The program allows for at most ", mcd$kount, " observations.") ## if(mcd$exactfit == -2) ## stop("The program allows for at most ", mcd$kount, " variables.") if(!(mcd$exactfit %in% c(1,2,3))) stop("Unexpected 'exactfit' code ", mcd$exactfit, ". Please report!") ## new (2007-01) and *instead* of older long 'method' extension; ## the old message is still *printed* via .MCDsingularityMsg() ## ## exactfit is now *passed* to result instead of coded into 'message': ans$singularity <- list(kind = "on.hyperplane", exactCode = mcd$exactfit, p = p, h = h, count = mcd$kount, coeff = mcd$coeff[1,]) ans$crit <- -Inf # = log(0) weights <- mcd$weights } else { ## VT::31.08.2022 - raw.only was not implemeted for the case nsamp="deterministic" ans$raw.only <- TRUE ans$crit <- mcd$mcdestimate weights <- mcd$weights if(is.null(mcd$weights)) { ## FIXME? here, we assume that mcd$initcovariance is not singular: mah <- mahalanobis(x, mcd$initmean, mcd$initcovariance, tol = tolSolve) weights <- wgtFUN(mah) } } ans$alpha <- alpha ans$quan <- h if(names && !is.null(nms <- dimn[[2]])) { names(ans$raw.center) <- nms dimnames(ans$raw.cov) <- list(nms,nms) } } ## end (raw.only || exact fit) else { ## have general position (exactfit == 0) : ------------------------ ## FIXME? here, we assume that mcd$initcovariance is not singular: mah <- mahalanobis(x, mcd$initmean, mcd$initcovariance, tol = tolSolve) weights <- wgtFUN(mah) sum.w <- sum(weights) ans <- c(ans, cov.wt(x, wt = weights, cor=cor)) ## simple check for singularity, much cheaper than determinant() below: sing.rewt <- any(apply(ans$cov == 0, 2, all)) ## Compute and apply the consistency correction factor for ## the reweighted cov if(!sing.rewt && sum.w != n) { ## VT::05.04.2023 ## The correct consistency correction factor for the reweighted estimate ## would be .MCDcons(p, 0.975) and not .MCDcons(p, sum.w/n) - see mail from ## Andreas Alfons from 29.01.2020 and Croux and Haesbroeck (1999), equations 4.1 and 4.2. ## cdelta.rew <- .MCDcons(p, sum.w/n) ## VT::19.3.2007 cdelta.rew <- .MCDcons(p, 0.975) correct.rew <- if(use.correction) .MCDcnp2.rew(p, n, alpha) else 1. cnp2 <- c(cdelta.rew, correct.rew) ans$cov <- cdelta.rew * correct.rew * ans$cov } ##vt:: add also the best found subsample to the result list ans$best <- sort(as.vector(mcd$best)) ans$alpha <- alpha ans$quan <- h ans$raw.cov <- mcd$initcovariance ans$raw.center <- as.vector(mcd$initmean) if(names && !is.null(nms <- dimn[[2]])) { names(ans$raw.center) <- nms dimnames(ans$raw.cov) <- list(nms,nms) } ans$raw.weights <- weights ans$crit <- mcd$mcdestimate # now in log scale! ## 'mah' already computed above ans$raw.mah <- mah ## mahalanobis(x, ans$raw.center, ans$raw.cov, tol = tolSolve) ## Check if the reweighted scatter matrix is singular. if(sing.rewt || - determinant(ans$cov, logarithm = TRUE)$modulus[1]/p > logdet.Lrg) { ans$singularity <- list(kind = paste0("reweighted.MCD", if(sing.rewt)"(zero col.)")) ans$mah <- mah } else { mah <- mahalanobis(x, ans$center, ans$cov, tol = tolSolve) ans$mah <- mah weights <- wgtFUN(mah) } } ## end{ not exact fit } } ## end{ p >= 2 } ans$mcd.wt <- rep.int(NA, length(ok)) ans$mcd.wt[ok] <- weights if(names) { if(length(dimn[[1]])) names(ans$mcd.wt) <- dimn[[1]] if(length(dimn[[1]])) dimnames(x)[[1]] <- names(ans$mcd.wt)[ok] else dimnames(x) <- list(seq(along = ok)[ok], NULL) } ans$X <- x ans$wt <- NULL if(trace) cat(ans$method, "\n") ans$raw.cnp2 <- raw.cnp2 ans$cnp2 <- cnp2 if(nsamp == "deterministic") ans <- c(ans, mcd[c("iBest","n.csteps", if(save.hsets) "initHsets")]) class(ans) <- "mcd" ## warn if we have a singularity: if(is.list(ans$singularity)) warning(paste(strwrap(.MCDsingularityMsg(ans$singularity, ans$n.obs)), collapse="\n"), domain=NA) ## return ans } ## {covMcd} smoothWgt <- function(x, c, h) { ## currently drops all attributes, dim(), names(), etc ## maybe add 'keep.attributes = FALSE' (and pass to C) .Call(R_wgt_flex, x, c, h) } ##' Martin Maechler's simple proposal for an *adaptive* cutoff ##' i.e., one which does *not* reject outliers in good samples asymptotically .MCDadaptWgt.c <- function(n,p) { eps <- 0.4 / n ^ 0.6 # => 1-eps(n=100) ~= 0.975; 1-eps(n=10) ~= 0.90 ## using upper tail: qchisq(eps, p, lower.tail=FALSE) } ## Default wgtFUN()s : .wgtFUN.covMcd <- list("01.original" = function(p, ...) { cMah <- qchisq(0.975, p) function(d) as.numeric(d < cMah) }, "01.flex" = function(p, n, control) { ## 'control$beta' instead of 0.975 ## FIXME: update rrcov.control() to accept 'beta' stopifnot(is.1num(beta <- control$beta), 0 <= beta, beta <= 1) cMah <- qchisq(beta, p) function(d) as.numeric(d < cMah) }, "01.adaptive" = function(p, n, ...) { ## 'beta_n' instead of 0.975 cMah <- .MCDadaptWgt.c(n,p) function(d) as.numeric(d < cMah) }, "sm1.orig" = function(p, n, ...) { cMah <- qchisq(0.975, p) function(d) smoothWgt(d, c = cMah, h = 1) }, "sm2.orig" = function(p, n, ...) { cMah <- qchisq(0.975, p) function(d) smoothWgt(d, c = cMah, h = 2) }, "sm1.adaptive" = function(p, n, ...) { cMah <- .MCDadaptWgt.c(n,p) function(d) smoothWgt(d, c = cMah, h = 1) }, "sm2.adaptive" = function(p, n, ...) { cMah <- .MCDadaptWgt.c(n,p) function(d) smoothWgt(d, c = cMah, h = 2) }, "smE.adaptive" = function(p, n, ...) { cMah <- .MCDadaptWgt.c(n,p) ## TODO: find "theory" for h = f(cMah), or better c=f1(n,p); h=f2(n,p) function(d) smoothWgt(d, c = cMah, h = max(2, cMah/4)) } ) .MCDsingularityMsg <- function(singList, n.obs) { stopifnot(is.list(singList)) switch(singList$kind, "classical" = { "The classical covariance matrix is singular." }, "reweighted.MCD" = { "The reweighted MCD scatter matrix is singular." }, "identicalObs" = { sprintf("Initial scale 0 because more than 'h' (=%d) observations are identical.", singList$q) }, "on.hyperplane" = { stopifnot(c("p", "count", "coeff") %in% names(singList)) obsMsg <- function(m, n) paste("There are", m, "observations (in the entire dataset of", n, "obs.)", "lying on the") invisible(obsMsg)# <- codetools with(singList, c(switch(exactCode, ## exactfit == 1 : "The covariance matrix of the data is singular.", ## exactfit == 2 : c("The covariance matrix has become singular during", "the iterations of the MCD algorithm."), ## exactfit == 3: paste0("The ", h, "-th order statistic of the absolute deviation of variable ", which(coeff == 1), " is zero.")), if(p == 2) { paste(obsMsg(count, n.obs), "line with equation ", signif(coeff[1], digits= 5), "(x_i1-m_1) +", signif(coeff[2], digits= 5), "(x_i2-m_2) = 0", "with (m_1,m_2) the mean of these observations.") } else if(p == 3) { paste(obsMsg(count, n.obs), "plane with equation ", signif(coeff[1], digits= 5), "(x_i1-m_1) +", signif(coeff[2], digits= 5), "(x_i2-m_2) +", signif(coeff[3], digits= 5), "(x_i3-m_3) = 0", "with (m_1,m_2) the mean of these observations." ) } else { ## p > 3 ----------- paste(obsMsg(count, n.obs), "hyperplane with equation ", "a_1*(x_i1 - m_1) + ... + a_p*(x_ip - m_p) = 0", "with (m_1, ..., m_p) the mean of these observations", "and coefficients a_i from the vector a <- ", paste(deparse(zapsmall(coeff)), collapse="\n ")) })) }, ## Otherwise stop("illegal 'singularity$kind'") ) ## end{switch} } nobs.mcd <- function (object, ...) object$n.obs print.mcd <- function(x, digits = max(3, getOption("digits") - 3), print.gap = 2, ...) { cat("Minimum Covariance Determinant (MCD) estimator approximation.\n", "Method: ", x$method, "\n", sep="") if(!is.null(cl <- x$call)) { cat("Call:\n") dput(cl) } if(is.list(x$singularity)) cat(strwrap(.MCDsingularityMsg(x$singularity, x$n.obs)), sep ="\n") if(identical(x$nsamp, "deterministic")) cat("iBest: ", pasteK(x$iBest), "; C-step iterations: ", pasteK(x$n.csteps), "\n", sep="") ## VT::29.03.2007 - solve a conflict with fastmcd() in package robust - ## also returning an object of class "mcd" xx <- if(!is.null(x$crit)) format(x$crit, digits = digits) else if (!is.null(x$raw.objective)) format(log(x$raw.objective), digits = digits) else NA cat("Log(Det.): ", xx , "\n\nRobust Estimate of Location:\n") print(x$center, digits = digits, print.gap = print.gap, ...) cat("Robust Estimate of Covariance:\n") print(x$cov, digits = digits, print.gap = print.gap, ...) invisible(x) } summary.mcd <- function(object, ...) { class(object) <- c("summary.mcd", class(object)) object } print.summary.mcd <- function(x, digits = max(3, getOption("digits") - 3), print.gap = 2, ...) { print.mcd(x, digits = digits, print.gap = print.gap, ...) # see above ## hmm, maybe not *such* a good idea : if(!is.null(x$cor)) { cat("\nRobust Estimate of Correlation: \n") dimnames(x$cor) <- dimnames(x$cov) print(x$cor, digits = digits, print.gap = print.gap, ...) } cat("\nEigenvalues:\n") print(eigen(x$cov, only.values = TRUE)$values, digits = digits, ...) if(!is.null(x$mah)) { cat("\nRobust Distances: \n") print(summary(x$mah, digits = digits), digits = digits, ...) } if(!is.null(wt <- x$mcd.wt)) summarizeRobWeights(wt, digits = digits) invisible(x) } ## NOTE: plot.mcd() is in ./covPlot.R ! ## ---- ~~~~~~~~~~~ ### Consistency and Finite Sample Correction Factors ### .MCDcons() .MCDcnp2() & .MCDcnp2.rew() ### now exported and documented in ../man/covMcd.Rd ##' Compute the consistency correction factor for the MCD estimate ##' (see calfa in Croux and Haesbroeck) ##' @param p ##' @param alpha alpha ~= h/n = quan/n ##' also use for the reweighted MCD, calling with alpha = 0.975 MCDcons <- # <- *not* exported, but currently used in pkgs rrcov, rrcovNA .MCDcons <- function(p, alpha) { qalpha <- qchisq(alpha, p) caI <- pgamma(qalpha/2, p/2 + 1) / alpha 1/caI } MCDcnp2 <- # <- *not* exported, but currently used in pkg rrcovNA ##' Finite sample correction factor for raw MCD: .MCDcnp2 <- function(p, n, alpha) { stopifnot(0 <= alpha, alpha <= 1, length(alpha) == 1) if(p > 2) { ## "alfaq" "betaq" "qwaarden" coeffqpkwad875 <- matrix(c(-0.455179464070565, 1.11192541278794, 2, -0.294241208320834, 1.09649329149811, 3), ncol = 2) coeffqpkwad500 <- matrix(c(-1.42764571687802, 1.26263336932151, 2, -1.06141115981725, 1.28907991440387, 3), ncol = 2) y.500 <- log( - coeffqpkwad500[1, ] / p^coeffqpkwad500[2, ] ) y.875 <- log( - coeffqpkwad875[1, ] / p^coeffqpkwad875[2, ] ) A.500 <- cbind(1, - log(coeffqpkwad500[3, ] * p^2)) A.875 <- cbind(1, - log(coeffqpkwad875[3, ] * p^2)) coeffic.500 <- solve(A.500, y.500) coeffic.875 <- solve(A.875, y.875) fp.500.n <- 1 - exp(coeffic.500[1]) / n^coeffic.500[2] fp.875.n <- 1 - exp(coeffic.875[1]) / n^coeffic.875[2] } else if(p == 2) { fp.500.n <- 1 - exp( 0.673292623522027) / n^0.691365864961895 fp.875.n <- 1 - exp( 0.446537815635445) / n^1.06690782995919 } else if(p == 1) { fp.500.n <- 1 - exp( 0.262024211897096) / n^0.604756680630497 fp.875.n <- 1 - exp(-0.351584646688712) / n^1.01646567502486 } ## VT:18.04.2007 - use simulated correction factors for several p and n: ## p in [1, 20] n in [2*p, ...] if(alpha == 0.5 && !is.na(fp.x <- MCDcnp2s$sim.0(p, n))) fp.500.n <- 1/fp.x fp.alpha.n <- if(alpha <= 0.875) fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) else ## 0.875 < alpha <= 1 fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) 1/fp.alpha.n } ## end{.MCDcnp2 } MCDcnp2.rew <- # <- *not* exported, but currently used in pkg rrcovNA ##' Finite sample correction factor for *REW*eighted MCD .MCDcnp2.rew <- function(p, n, alpha) { stopifnot(0 <= alpha, alpha <= 1, length(alpha) == 1) if(p > 2) { ## "alfaq" "betaq" "qwaarden" coeffrewqpkwad875 <- matrix(c(-0.544482443573914, 1.25994483222292, 2, -0.343791072183285, 1.25159004257133, 3), ncol = 2) coeffrewqpkwad500 <- matrix(c(-1.02842572724793, 1.67659883081926, 2, -0.26800273450853, 1.35968562893582, 3), ncol = 2) y.500 <- log( - coeffrewqpkwad500[1, ] / p^ coeffrewqpkwad500[2, ] ) y.875 <- log( - coeffrewqpkwad875[1, ] / p^ coeffrewqpkwad875[2, ] ) A.500 <- cbind(1, - log(coeffrewqpkwad500[3, ] * p^2)) coeffic.500 <- solve(A.500, y.500) A.875 <- cbind(1, - log(coeffrewqpkwad875[3, ] * p^2)) coeffic.875 <- solve(A.875, y.875) fp.500.n <- 1 - exp(coeffic.500[1]) / n^ coeffic.500[2] fp.875.n <- 1 - exp(coeffic.875[1]) / n^ coeffic.875[2] } else if(p == 2) { fp.500.n <- 1 - exp( 3.11101712909049 ) / n^ 1.91401056721863 fp.875.n <- 1 - exp( 0.79473550581058 ) / n^ 1.10081930350091 } else if(p == 1) { fp.500.n <- 1 - exp( 1.11098143415027 ) / n^ 1.5182890270453 fp.875.n <- 1 - exp( -0.66046776772861) / n^ 0.88939595831888 } ## VT:18.04.2007 - use simulated correction factors for several p and n: ## p in [1, 20] n in [2*p, ...] if(alpha == 0.5 && !is.na(fp.x <- MCDcnp2s$sim.rew(p, n))) fp.500.n <- 1/fp.x fp.alpha.n <- if(alpha <= 0.875) fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) else ## 0.875 < alpha <= 1 fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) 1/fp.alpha.n } ## end{.MCDcnp2.rew } .fastmcd <- function(x, h, nsamp, nmini, kmini, trace = 0) { dx <- dim(x) n <- dx[1] p <- dx[2] ## parameters for partitioning {equal to those in Fortran !!} ## kmini <- 5 ## nmini <- 300 stopifnot(length(kmini <- as.integer(kmini)) == 1, kmini >= 2L, is.1num(nmini), is.finite(nmaxi <- as.double(nmini)*kmini), nmaxi * p < .Machine$integer.max) nmaxi <- as.integer(nmaxi) km10 <- 10*kmini ## vt::03.02.2006 - added options "best" and "exact" for nsamp ## nLarge <- 100000 # was 5000 before Nov.2009 -- keep forever now; user can say "exact" if(is.numeric(nsamp) && (nsamp < 0 || nsamp == 0 && p > 1)) { nsamp <- -1 } else if(nsamp == "exact" || nsamp == "best") { if(n > 2*nmini-1) { warning("Options 'best' and 'exact' not allowed for n greater than 2*nmini-1 =", 2*nmini-1,".\nUsing default.\n") nsamp <- -1 } else { myk <- p + 1 ## was 'p'; but p+1 ("nsel = nvar+1") is correct nall <- choose(n, myk) msg <- paste("subsets of size", myk, "out of", n) if(nall > nLarge && nsamp == "best") { nsamp <- nLarge warning("'nsamp = \"best\"' allows maximally ", format(nLarge, scientific=FALSE), " subsets;\ncomputing these ", msg, immediate. = TRUE) } else { ## "exact" or ("best" & nall < nLarge) nsamp <- 0 ## all subsamples -> special treatment in Fortran if(nall > nLarge) { msg <- paste("Computing all", nall, msg) if(nall > 10*nLarge) warning(msg, "\n This may take a", if(nall/nLarge > 100) " very", " long time!\n", immediate. = TRUE) else message(msg) } } } } if(!is.numeric(nsamp) || nsamp == -1) { # still not defined ## set it to the default : nsamp.def <- rrcov.control()$nsamp warning(gettextf("Invalid number of trials nsamp=%s. Using default nsamp=%d.", format(nsamp), nsamp.def), domain=NA) nsamp <- nsamp.def } if(nsamp > (mx <- .Machine$integer.max)) { warning("nsamp > i_max := maximal integer -- not allowed;\n", " set to i_max = ", mx) nsamp <- mx } ## Allocate temporary storage for the Fortran implementation, ## directly in the .Fortran() call. ## (if we used C + .Call() we would allocate all there, and be quite faster!) .Fortran(rffastmcd, x = if(is.double(x)) x else as.double(x), n = as.integer(n), p = as.integer(p), ## = 'nvar' in Fortran nhalff = as.integer(h), nsamp = as.integer(nsamp), # = 'krep' nmini = as.integer(nmini), kmini = kmini, initcovariance = double(p * p), initmean = double(p), best = rep.int(as.integer(10000), h), mcdestimate = double(1), ## = 'det' weights = integer(n), exactfit = integer(1), # output indicator: 0: ok; 1: ..., 2: .. coeff = matrix(double(5 * p), nrow = 5, ncol = p), ## plane kount = integer(1), adjustcov = double(p * p), ## used in ltsReg() ! ## integer(1), ## << 'seed' no longer used temp = integer(n), index1 = integer(n), index2 = integer(n), indexx = integer(n), nmahad = double(n), ndist = double(n), am = double(n), am2 = double(n), slutn = double(n), med = double(p), mad = double(p), sd = double(p), means = double(p), bmeans= double(p), w = double(p), fv1 = double(p), fv2 = double(p), rec = double(p+1), sscp1 = double((p+1)*(p+1)), cova1 = double(p * p), corr1 = double(p * p), cinv1 = double(p * p), cova2 = double(p * p), cinv2 = double(p * p), z = double(p * p), cstock = double(10 * p * p), # (10,nvmax2) mstock = double(10 * p), # (10,nvmax) c1stock = double(km10 * p * p), # (km10,nvmax2) m1stock = double(km10 * p), # (km10,nvmax) dath = double(nmaxi * p), # (nmaxi,nvmax) cutoff = qchisq(0.975, p), chimed = qchisq(0.5, p), i.trace= as.integer(trace) )[ ## keep the following ones: c("initcovariance", "initmean", "best", "mcdestimate", "weights", "exactfit", "coeff", "kount", "adjustcov") ] } ## ## VT:18.04.2007 - use simulated correction factors for several p and n ## and alpha = 1/2 (the default in rrcov.control()) ## ~~~~~~~~~~~ ## p in [1, 20] n in [2*p, ...] ## see the modifications in.MCDcnp2() and.MCDcnp2.rew ## ## VT::08.06.2007 - fixed the simulated values (especially for p=1) ## VT::11.05.2007 - reduce the usage of the simulated correction factors to only those that ## are definitvely wrong (negative or very large). This is done by: ## a) reducing p.max ## b) reducing n.max ## NB: In general, "wrong" are the factors for the reweighted matrix, but whenever a simulated ## value for the reweighted is used, the corresponding simulated must be used for the raw too. ## ## MM::2014-04 : MCDcnp2s <- local({ p.min <- 1L p.max <- 9L # was 20 ncol <- 20L # the number of column in the matrices n.min <- as.integer( ### p = 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 c(1, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40)) n.max <- as.integer( c(2, 6, 10, 13, 16, 18, 20, 20, 20, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60)) ##was c(22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60) ## these are the right (simulated) values for n.max n.min.rew <- n.min n.max.rew <- n.max m.0 <- matrix( c(1, 3.075819, 1.515999, 2.156169, 1.480742, 1.765485, 1.460206, 1.603707, 1.427429, 1.504712, 1.334528, 1.48297, 1.355308, 1.383867, 1.319241, 1.36065, 1.307467, 1.365596, 1.255259, 1.352741, 1.239381, 3.15342, 1.799889, 2.258497, 1.688312, 1.906779, 1.548203, 1.724785, 1.500873, 1.573442, 1.417137, 1.540805, 1.395945, 1.472596, 1.394247, 1.377487, 1.337394, 1.369354, 1.333378, 1.3181, 1.313813, 1.315528, 2.12777, 2.718898, 1.993509, 2.220433, 1.820585, 1.97782, 1.672455, 1.770151, 1.587478, 1.685352, 1.539295, 1.584536, 1.499487, 1.50702, 1.41952, 1.449058, 1.393042, 1.432999, 1.369964, 1.400997, 1.333824, 2.950549, 2.145387, 2.382224, 1.927077, 2.032489, 1.8371, 1.877833, 1.710891, 1.756053, 1.620778, 1.657761, 1.558978, 1.56257, 1.508633, 1.534406, 1.46709, 1.468734, 1.432529, 1.455283, 1.386975, 1.417532, 2.229573, 2.494447, 2.016117, 2.190061, 1.877996, 1.978964, 1.767284, 1.836948, 1.677372, 1.743316, 1.616383, 1.655964, 1.55484, 1.594831, 1.502185, 1.543723, 1.467005, 1.491123, 1.44402, 1.446915, 1.401578, 2.580264, 2.109121, 2.240741, 1.944719, 2.043397, 1.821808, 1.89725, 1.748788, 1.786988, 1.659333, 1.697012, 1.610622, 1.616503, 1.538529, 1.562024, 1.499964, 1.529344, 1.474519, 1.483264, 1.441552, 1.434448, 2.165233, 2.320281, 2.007836, 2.086471, 1.884052, 1.950563, 1.76926, 1.843328, 1.708941, 1.741039, 1.627206, 1.644755, 1.580563, 1.593402, 1.527312, 1.568418, 1.501462, 1.502542, 1.464583, 1.467921, 1.431141, 2.340443, 2.048262, 2.161097, 1.926082, 1.995422, 1.81446, 1.853165, 1.738533, 1.784456, 1.679444, 1.696463, 1.612931, 1.629483, 1.548186, 1.580026, 1.52198, 1.531111, 1.482914, 1.484824, 1.442726, 1.447838, 2.093386, 2.185793, 1.948989, 2.02804, 1.867137, 1.907732, 1.771923, 1.800413, 1.691612, 1.720603, 1.642705, 1.649769, 1.589028, 1.598955, 1.539759, 1.55096, 1.503965, 1.50703, 1.471349, 1.469791, 1.436959, 2.218315, 1.997369, 2.041128, 1.887059, 1.928524, 1.79626, 1.827538, 1.716748, 1.735696, 1.658329, 1.664211, 1.599286, 1.611511, 1.553925, 1.562637, 1.516805, 1.529894, 1.476064, 1.482474, 1.453253, 1.458467, 2.0247, 2.07899, 1.921976, 1.949376, 1.824629, 1.851671, 1.744713, 1.765647, 1.683525, 1.685592, 1.625113, 1.624961, 1.571921, 1.581223, 1.535257, 1.537464, 1.497165, 1.504879, 1.468682, 1.469319, 1.448344, 2.092315, 1.941412, 1.969843, 1.844093, 1.866133, 1.766145, 1.783829, 1.703613, 1.709714, 1.646078, 1.654264, 1.594523, 1.598488, 1.545105, 1.555356, 1.514627, 1.521353, 1.483958, 1.487677, 1.449191, 1.459721, 1.958987, 1.985144, 1.87739, 1.879643, 1.786823, 1.799642, 1.720015, 1.724688, 1.663539, 1.662997, 1.609267, 1.615124, 1.56746, 1.562026, 1.520586, 1.52503, 1.493008, 1.502496, 1.471983, 1.468546, 1.435064, 1.994706, 1.880348, 1.894254, 1.805827, 1.815965, 1.744296, 1.743389, 1.665481, 1.681644, 1.624466, 1.626109, 1.584028, 1.5818, 1.54376, 1.547237, 1.504878, 1.515087, 1.479032, 1.47936, 1.450758, 1.45073, 1.892685, 1.91087, 1.825301, 1.827176, 1.745363, 1.746115, 1.693373, 1.701692, 1.648247, 1.637112, 1.594648, 1.592013, 1.554849, 1.55013, 1.522186, 1.520901, 1.492606, 1.493072, 1.460868, 1.46733, 1.440956, 1.92771, 1.835696, 1.841979, 1.775991, 1.766092, 1.703807, 1.708791, 1.654985, 1.655917, 1.602388, 1.611867, 1.570765, 1.573368, 1.53419, 1.529033, 1.506767, 1.503596, 1.481126, 1.471806, 1.444917, 1.451682, 1.850262, 1.855034, 1.778997, 1.789995, 1.718871, 1.717326, 1.667357, 1.666291, 1.619743, 1.631475, 1.582624, 1.58766, 1.546302, 1.545063, 1.512222, 1.517888, 1.489127, 1.487271, 1.466722, 1.463618, 1.444137, 1.8709, 1.794033, 1.80121, 1.736376, 1.740201, 1.673776, 1.682541, 1.638153, 1.642294, 1.604417, 1.597721, 1.559534, 1.559108, 1.533942, 1.529348, 1.499517, 1.501586, 1.473147, 1.473031, 1.457615, 1.452348, 1.805753, 1.812952, 1.746549, 1.747222, 1.696924, 1.694957, 1.652157, 1.650568, 1.607807, 1.613666, 1.577295, 1.570712, 1.543704, 1.538272, 1.515369, 1.517113, 1.487451, 1.491593, 1.464514, 1.464658, 1.439359, 1.823222, 1.758781, 1.767358, 1.70872, 1.712926, 1.666956, 1.667838, 1.62077, 1.621445, 1.592891, 1.58549, 1.55603, 1.559042, 1.521501, 1.523342, 1.499913, 1.501937, 1.473359, 1.472522, 1.452613, 1.452448), ncol = ncol) m.rew <- matrix( c(1, 0.984724, 0.970109, 0.978037, 0.979202, 0.982933, 1.001461, 1.026651, 0.981233, 1.011895, 1.017499, 0.964323, 1.026574, 1.006594, 0.980194, 1.009828, 0.998083, 0.966173, 1.009942, 0.99916, 1.021521, 2.216302, 1.418526, 1.635601, 1.31402, 1.33975, 1.251798, 1.210917, 1.133114, 1.150666, 1.138732, 1.096822, 1.076489, 1.058343, 1.045746, 1.036743, 1.008929, 1.049537, 1.028148, 1.027297, 1.020578, 1.00074, 1.73511, 2.06681, 1.545905, 1.659655, 1.456835, 1.47809, 1.331966, 1.334229, 1.231218, 1.220443, 1.198143, 1.193965, 1.142156, 1.146231, 1.124661, 1.112719, 1.089973, 1.070606, 1.082681, 1.061243, 1.053191, 2.388892, 1.847626, 1.96998, 1.630723, 1.701272, 1.521008, 1.553057, 1.382168, 1.414555, 1.326982, 1.321403, 1.265207, 1.264856, 1.200418, 1.21152, 1.17531, 1.168536, 1.140586, 1.14457, 1.111392, 1.112031, 1.968153, 2.168931, 1.784373, 1.894409, 1.667912, 1.693007, 1.545176, 1.582428, 1.45319, 1.480559, 1.371611, 1.358541, 1.330235, 1.30264, 1.257518, 1.244156, 1.221907, 1.22455, 1.178965, 1.177855, 1.166319, 2.275891, 1.866587, 2.014249, 1.750567, 1.829363, 1.650019, 1.689043, 1.562539, 1.561359, 1.473378, 1.488554, 1.411097, 1.416527, 1.35117, 1.361044, 1.30205, 1.299037, 1.250265, 1.260083, 1.218665, 1.236027, 1.95771, 2.074066, 1.847385, 1.905408, 1.71393, 1.768425, 1.63908, 1.67234, 1.564992, 1.562337, 1.49229, 1.499573, 1.420813, 1.424067, 1.383947, 1.378726, 1.33062, 1.330071, 1.279404, 1.295302, 1.263947, 2.164121, 1.871024, 1.979485, 1.782417, 1.84489, 1.706023, 1.734857, 1.622782, 1.634869, 1.55196, 1.554423, 1.482325, 1.509195, 1.440726, 1.436328, 1.386335, 1.396277, 1.347939, 1.346732, 1.310242, 1.309371, 1.938822, 2.050409, 1.834863, 1.882536, 1.737494, 1.761608, 1.65742, 1.687579, 1.591863, 1.60158, 1.520982, 1.535234, 1.470649, 1.486485, 1.42892, 1.435574, 1.384132, 1.382329, 1.343281, 1.346581, 1.315111, 2.063894, 1.880094, 1.907246, 1.78278, 1.806648, 1.6952, 1.720922, 1.63084, 1.635274, 1.565423, 1.56171, 1.512015, 1.4986, 1.463903, 1.456588, 1.422856, 1.407325, 1.376724, 1.373923, 1.346464, 1.34259, 1.898389, 1.950406, 1.812053, 1.849175, 1.72649, 1.737651, 1.646719, 1.655112, 1.587601, 1.597894, 1.539877, 1.53329, 1.495054, 1.490548, 1.445249, 1.446037, 1.410272, 1.412274, 1.375797, 1.369604, 1.341232, 1.992488, 1.830452, 1.857314, 1.758686, 1.763822, 1.683215, 1.679543, 1.619269, 1.608512, 1.565, 1.562282, 1.498869, 1.51325, 1.470912, 1.464654, 1.427573, 1.439301, 1.402308, 1.391006, 1.37074, 1.367573, 1.855502, 1.891242, 1.77513, 1.790618, 1.706443, 1.713098, 1.642896, 1.636577, 1.580366, 1.581752, 1.542937, 1.531668, 1.487894, 1.492039, 1.460304, 1.449762, 1.4219, 1.420953, 1.390137, 1.388677, 1.360506, 1.908277, 1.802091, 1.806128, 1.723757, 1.727249, 1.659883, 1.670056, 1.605209, 1.611481, 1.558846, 1.551762, 1.512951, 1.511515, 1.468948, 1.476073, 1.441508, 1.434997, 1.412687, 1.406782, 1.380452, 1.375924, 1.811415, 1.822311, 1.740544, 1.739355, 1.68127, 1.685342, 1.620281, 1.622572, 1.579611, 1.570103, 1.529881, 1.530097, 1.490041, 1.4947, 1.457329, 1.456344, 1.423363, 1.428653, 1.399988, 1.390069, 1.376594, 1.837723, 1.76039, 1.771031, 1.697404, 1.690915, 1.634409, 1.63713, 1.589594, 1.586521, 1.552974, 1.545571, 1.505923, 1.512794, 1.477833, 1.477821, 1.444241, 1.44452, 1.419258, 1.421297, 1.394924, 1.389393, 1.779716, 1.781271, 1.706031, 1.71224, 1.655099, 1.654284, 1.608878, 1.605955, 1.565683, 1.565938, 1.523594, 1.531235, 1.492749, 1.486786, 1.457635, 1.461416, 1.432472, 1.430164, 1.404441, 1.400021, 1.378273, 1.798932, 1.735577, 1.727031, 1.671049, 1.677601, 1.624427, 1.617626, 1.579533, 1.579987, 1.544635, 1.538715, 1.504538, 1.50726, 1.477163, 1.477084, 1.450861, 1.444496, 1.428416, 1.422813, 1.400185, 1.39552, 1.750193, 1.752145, 1.690365, 1.692051, 1.642391, 1.63858, 1.600144, 1.596401, 1.558305, 1.555932, 1.525968, 1.522984, 1.491563, 1.492554, 1.467575, 1.45786, 1.437545, 1.430893, 1.413983, 1.409386, 1.391943, 1.762922, 1.701346, 1.704996, 1.6556, 1.655548, 1.611964, 1.615219, 1.569103, 1.571079, 1.540617, 1.541602, 1.503791, 1.50195, 1.478069, 1.47678, 1.452458, 1.451732, 1.429144, 1.426547, 1.40363, 1.402647), ncol = ncol) rm(ncol) list( sim.0 = function(p, n) { p. <- p - p.min + 1L if(p.min <= p && p <= p.max && n.min[p.] <= n && n <= n.max[p.]) { nind <- n - n.min[p.] + 1L m.0[nind, p.] ##= } else NA }, sim.rew = function(p, n) { p. <- p - p.min + 1L if(p.min <= p && p <= p.max && n.min.rew[p.] <= n && n <= n.max.rew[p.]) { nind <- n - n.min.rew[p.] + 1L m.rew[nind, p.] ##=== } else NA }) }) ## end{MCDcnp2s} if(FALSE) { ## For experimentation: ls.str( ee <- environment(MCDcnp2s$sim.0) ) matplot(1:21, ee$m.0, type = "o", xlab = "p - p.min + 1") } robustbase/R/BYlogreg.R0000644000176200001440000003605214410263765014477 0ustar liggesusers#### http://www.econ.kuleuven.be/public/NDBAE06/programs/roblog/ : #### #### August 06, 2010 2:14 PM 9121 BYlogreg.r.txt == BYlogreg.R (*this* original) #### May 04, 2005 9:24 AM 6702 BYlogreg.txt == BYlogreg.R.~2005~ #### May 04, 2005 9:25 AM 6720 WBYlogreg.txt == WBYlogreg.R #### #### Sep. 27, 2017: available at #### #### NB: Splus original Version of this file: BYlogreg.ssc in the #### -- in FunctionsRob/ (from FunctionsRob.zip) from Wiley's book supplements #### http://www.wiley.com/legacy/wileychi/robust_statistics/robust.html #### see my ../misc/MMY-book/Wiley-supplements/FunctionsRob/BYlogreg.ssc ## Computation of the estimator of Bianco and Yohai (1996) in logistic regression ## ------------- ## Christophe Croux, Gentiane Haesbroeck ## (thanks to Kristel Joossens and Valentin Todorov for improving the code) - ## ==> Now "contains" both the *weighted* and regular, unweighted BY-estimator ## ## This program computes the estimator of Bianco and Yohai in ## logistic regression. By default, an intercept term is included ## and p parameters are estimated. ## ## For more details we refer to ## Croux, C., and Haesbroeck, G. (2003), ## ``Implementing the Bianco and Yohai estimator for Logistic Regression'', ## Computational Statistics and Data Analysis, 44, 273-295 ## ## Changes by Martin Maechler, ---> ../man/BYlogreg.Rd ## ------------------ BYlogreg <- function(x0, y, initwml=TRUE, # w.x=NULL, addIntercept=TRUE, const=0.5, kmax = 1000, maxhalf = 10, sigma.min = 1e-4, trace.lev=0) { if(!is.numeric(y)) y <- as.numeric(y) ## if(!is.null(w.x)) ## warning("x weights 'w.x' are not yet made use of") if(!is.null(dim(y))) { if(ncol(y) != 1) stop("y is not onedimensional") y <- as.vector(y) } n <- length(y) if(is.data.frame(x0)) { x0 <- data.matrix(x0) } else if (!is.matrix(x0)) { x0 <- matrix(x0, length(x0), 1, dimnames = list(names(x0), deparse(substitute(x0)))) } if(nrow(x0) != n) stop("Number of observations in x and y not equal") na.x <- !is.finite(rowSums(x0)) na.y <- !is.finite(y) ok <- !(na.x | na.y) if(!all(ok)) { x0 <- x0[ok, , drop = FALSE] y <- y [ok] # y[ok, , drop = FALSE] } if(addIntercept) { x <- cbind("Intercept" = 1, x0) } else { # x0 := x without the "intercept column" x <- x0 all1 <- apply(x == 1, 2, all) if(any(all1)) x0 <- x[,!all1, drop = FALSE] else message("no intercept in the model") } dx <- dim(x) n <- dx[1] if(n == 0) stop("All observations have missing values!") p <- dx[2] # == ncol(x) family <- binomial() ## Computation of the initial value of the optimization process gstart <- if(initwml) { ###_ FIXME: Should allow many more schemes: ###_ 1) using MVE with much less singular cases ###_ 2) Instead of {0,1}-weighting with cutoff, w/ weights --> 0 *continuously* ### --> glm() with "prior" weights instead of 'subset' ## hp <- floor(n*(1-0.25))+1 ## mcdx <- cov.mcd(x0, quantile.used =hp,method="mcd") ## rdx=sqrt(mahalanobis(x0,center=mcdx$center,cov=mcdx$cov)) ## mcdx <- CovMcd(x0, alpha=0.75) ## rdx <- sqrt(getDistance(mcdx)) mcd <- covMcd(x0, alpha=0.75) ## ----- FIXME: argument! D <- sqrt( mahalanobis(mcd$X, mcd$center, mcd$cov) ) vc <- sqrt(qchisq(0.975, p-1)) ## ----- FIXME: 'vc' should be argument! wrd <- D <= vc ### FIXME_2: use weights and "weights.on.x' as in Mqle ( ./glmrobMqle.R ) ## glm(y~x0, family=binomial, subset = wrd)$coef glm.fit(x[wrd,,drop=FALSE], y[wrd], family=family)$coef } else { glm.fit(x, y, family=family)$coef } sigma1 <- 1/sqrt(sum(gstart^2)) xistart <- gstart*sigma1 stscores <- x %*% xistart ## Initial value for the objective function oldobj <- mean(phiBY3(stscores/sigma1, y, const)) converged <- FALSE kstep <- 1L while(kstep < kmax && !converged) { unisig <- function(sigma) mean(phiBY3(stscores/sigma, y, const)) ## ------ optimsig <- nlminb(sigma1, unisig, lower=0)# "FIXME" arguments to nlminb() ## ====== if(trace.lev) cat(sprintf("k=%2d, s1=%12.8g: => new s1= %12.8g", kstep, sigma1, optimsig$par))# MM: jhalf =!?= 1 here ?? sigma1 <- optimsig$par if(sigma1 < sigma.min) { if(trace.lev) cat("\n") warning(gettextf("Implosion: sigma1=%g became too small", sigma1)) kstep <- kmax #-> *no* convergence } else { ## gamma1 <- xistart/sigma1 scores <- stscores/sigma1 newobj <- mean(phiBY3(scores, y,const)) oldobj <- newobj grad.BY <- colMeans((derphiBY3(scores,y,const) %*% matrix(1,ncol=p))*x) h <- -grad.BY + c(grad.BY %*% xistart) * xistart finalstep <- h/sqrt(sum(h^2)) if(trace.lev) { if(trace.lev >= 2) cat(sprintf(", obj=%12.9g: ", oldobj)) cat("\n") } ## FIXME repeat { ... } {{next 4 lines are also inside while(..) below}} xi1 <- xistart+finalstep xi1 <- xi1/sum(xi1^2) scores1 <- (x %*% xi1)/sigma1 newobj <- mean(phiBY3(scores1,y,const)) ## If 'newobj' is not better, try taking a smaller step size: hstep <- 1. jhalf <- 1L while(jhalf <= maxhalf & newobj > oldobj) { hstep <- hstep/2 xi1 <- xistart+finalstep*hstep xi1 <- xi1/sqrt(sum(xi1^2)) scores1 <- x %*% xi1/sigma1 newobj <- mean(phiBY3(scores1,y,const)) if(trace.lev >= 2) cat(sprintf(" jh=%2d, hstep=%13.8g => new obj=%13.9g\n", jhalf, hstep, newobj)) jhalf <- jhalf+1L } converged <- not.improved <- (jhalf > maxhalf && newobj > oldobj) if(not.improved) { ## newobj is "worse" and step halving did not improve message("Convergence Achieved") } else { jhalf <- 1L xistart <- xi1 oldobj <- newobj stscores <- x %*% xi1 kstep <- kstep+1L } } } ## while( kstep ) if(kstep == kmax) { warning(gettextf("No convergence in %d steps.", kstep), domain=NA) list(convergence=FALSE, objective=0, coefficients= rep(NA,p)) } else { gammaest <- xistart/sigma1 V <- vcovBY3(x, y, const, estim=gammaest, addIntercept=FALSE) list(convergence=TRUE, objective=oldobj, coefficients=gammaest, cov = V, sterror = sqrt(diag(V)), iter = kstep) } } ### -- FIXME: nlminb() allows many tweaks !! ### -- ----- but we use nlminb() for ONE-dim. minimization over { sigma >= 0 } - really?? ## MM: my version would rather use optimize() over over log(sigma) glmrobBY.control <- function(maxit = 1000, const = 0.5, maxhalf = 10) ## FIXME: sigma.min ## MM: 'acc' seems a misnomer to me, but it's inherited from MASS::rlm ## TODO acc = 1e-04, test.acc = "coef", tcc = 1.345) { ## if (!is.numeric(acc) || acc <= 0) ## stop("value of acc must be > 0") ## if (test.acc != "coef") ## stop("Only 'test.acc = \"coef\"' is currently implemented") ## if (!(any(test.vec == c("coef", "resid")))) ## stop("invalid argument for test.acc") if(!is.numeric(maxit) || maxit <= 0) stop("maximum number of \"kstep\" iterations must be > 0") if(!is.numeric(maxhalf) || maxhalf <= 0) stop("maximal number of *inner* step halvings must be > 0") ## if (!is.numeric(tcc) || tcc <= 0) ## stop("value of the tuning constant c (tcc) must be > 0") if(!is.numeric(const) || const <= 0) stop("value of the tuning constant c ('const') must be > 0") list(## acc = acc, consttest.acc = test.acc, const=const, maxhalf=maxhalf, maxit=maxit #, tcc = tcc ) } ##' @param intercept logical, if true, X[,] has an intercept column which should ##' not be used for rob.wts glmrobBY <- function(X, y, weights = NULL, start = NULL, offset = NULL, method = c("WBY","BY"), weights.on.x = "none", control = glmrobBY.control(...), intercept = TRUE, trace.lev = 0, ...) { ### THIS is *NOT* exported method <- match.arg(method) if(!is.null(weights) || any(weights != 1)) ## FIXME (?) stop("non-trivial prior 'weights' are not yet implemented for \"BY\"") if(!is.null(start)) stop(" 'start' cannot yet be passed to glmrobBY()") if(!is.null(offset)) stop(" 'offset' is not yet implemented for \"BY\"") const <- if(is.null(cc <- control$const )) 0.5 else cc kmax <- if(is.null(cc <- control$maxit )) 1e3 else cc maxhalf <- if(is.null(cc <- control$maxhalf)) 10 else cc if(!identical(weights.on.x, "none")) stop(gettextf("'weights.on.x = \"%s\"' is not implemented", format(weights.on.x)), domain=NA) ## w.x <- robXweights(weights.on.x, X=X, intercept=intercept) ## ## MM: all(?) the BY3() functions below would need to work with weights... r <- BYlogreg(x0=X, y=y, initwml = (method == "WBY"), ## w.x=w.x, addIntercept = !intercept, ## add intercept if there is none const=const, kmax=kmax, maxhalf=maxhalf, ## FIXME sigma.min (is currently x-scale dependent !????) trace.lev=trace.lev) ## FIXME: make result more "compatible" with other glmrob() methods r } ### Functions needed for the computation of estimator of Bianco and Yohai ---------------------- ## From their paper: ## A last remark is worth mentioning: when huge outliers occur in ## the logistic regression setting, often numerical imprecision occurs in the computation ## of the deviances given by ## d(s;y_i)= -y_i log F(s) - (1-y_i) log{1-F(s)} . ## ## Instead of directly computing this expression, it can be seen that a ## numerically more stable and accurate formula is given by ## log(1 + exp(-abs(s))) + abs(s)* ((y-0.5)*s < 0) ## in which the second term equals abs(s) if the observation is misclassified, 0 otherwise. dev1 <- function(s,y) log(1+exp(-abs(s))) + abs(s)*((y-0.5)*s<0) dev2 <- function(s,y) log1p(exp(-abs(s))) + abs(s)*((y-0.5)*s<0) dev3 <- function(s,y) -( y * plogis(s, log.p=TRUE) + (1-y)*plogis(s, lower.tail=FALSE, log.p=TRUE)) ## MM[FIXME]: first tests indicate that dev3() is clearly more accurate than ## their dev1() !! ## MM{FIXME2}: In code below have (or "had") three cases of same formula, but ## with 's>0' instead of 's<0' : This is == dev?(-s, y) !! ## for now, 100% back-compatibility: devBY <- dev1 rm(dev1, dev2, dev3) ## MM: This is from my vignette, but *not* used log1pexp <- function(x) { if(has.na <- any(ina <- is.na(x))) { y <- x x <- x[ok <- !ina] } t1 <- x <= 18 t2 <- !t1 & (tt <- x <= 33.3) r <- x r[ t1] <- log1p(exp(x[t1])) r[ t2] <- { x2 <- x[t2]; x2 + exp(-x2) } r[!tt] <- x[!tt] if(has.na) { y[ok] <- r ; y } else r } phiBY3 <- function(s,y,c3) { s <- as.double(s) ## MM FIXME log(1 + exp(-.)) ... but read the note above !! --- dev. <- devBY(s,y) ## FIXME: GBY3Fs() computes the 'dev' above *again*, and ## GBY3Fsm() does with 's>0' instead of 's<0' rhoBY3(dev.,c3) + GBY3Fs(s,c3) + GBY3Fsm(s,c3) } rhoBY3 <- function(t,c3) { ec3 <- exp(-sqrt(c3)) t*ec3* (t <= c3) + (ec3*(2+(2*sqrt(c3))+c3) - 2*exp(-sqrt(t))*(1+sqrt(t)))* (t > c3) } psiBY3 <- function(t,c3) { exp(-sqrt(c3)) *(t <= c3) + exp(-sqrt( t)) *(t > c3) } ## MM: This is shorter (but possibly slower when most t are <= c3 : ## psiBY3 <- function(t,c3) exp(-sqrt(pmax(t, c3))) ##' d/dt psi(t, c3) derpsiBY3 <- function(t, c3) { r <- t r[in. <- (t <= c3)] <- 0 if(any(out <- !in.)) { t <- t[out] st <- sqrt(t) r[out] <- -exp(-st)/(2*st) } r } ## MM: FIXME this is not used above sigmaBY3 <- function(sigma,s,y,c3) { mean(phiBY3(s/sigma,y,c3)) } derphiBY3 <- function(s,y,c3) { Fs <- exp(-devBY(s,1)) ds <- Fs*(1-Fs) ## MM FIXME: use expm1() dev. <- devBY(s,y) Gprim1 <- devBY(s,1) Gprim2 <- devBY(-s,1) -psiBY3(dev.,c3)*(y-Fs) + ds*(psiBY3(Gprim1,c3) - psiBY3(Gprim2,c3)) } der2phiBY3 <- function(s, y, c3) { s <- as.double(s) Fs <- exp(-devBY(s,1)) ds <- Fs*(1-Fs) ## MM FIXME: use expm1() dev. <- devBY(s,y) Gprim1 <- devBY(s,1) Gprim2 <- devBY(-s,1) der2 <- derpsiBY3(dev.,c3)*(Fs-y)^2 + ds*psiBY3(dev.,c3) der2 <- der2+ ds*(1-2*Fs)*(psiBY3(Gprim1,c3) - psiBY3(Gprim2,c3)) der2 - ds*(derpsiBY3(Gprim1,c3)*(1-Fs) + derpsiBY3(Gprim2,c3)* Fs ) } GBY3Fs <- function(s,c3) { e.f <- exp(0.25)*sqrt(pi) ## MM FIXME: Fs = exp(..) and below use log(Fs) !! Fs <- exp(-devBY(s,1)) resGinf <- e.f*(pnorm(sqrt(2)*(0.5+sqrt(-log(Fs))))-1) ## MM FIXME: use expm1(): resGinf <- (resGinf+(Fs*exp(-sqrt(-log(Fs)))))*as.numeric(s <= -log(exp(c3)-1)) resGsup <- ((Fs*exp(-sqrt(c3)))+(e.f*(pnorm(sqrt(2)*(0.5+sqrt(c3)))-1))) * as.numeric(s > -log(exp(c3)-1)) resGinf + resGsup } GBY3Fsm <- function(s,c3) { e.f <- exp(0.25)*sqrt(pi) ## MM FIXME: Fsm = exp(..) and below use log(Fsm) !! Fsm <- exp(-devBY(-s,1)) resGinf <- e.f*(pnorm(sqrt(2)*(0.5+sqrt(-log(Fsm))))-1) ## MM FIXME: use expm1(): resGinf <- (resGinf+(Fsm*exp(-sqrt(-log(Fsm))))) * as.numeric(s >= log(exp(c3)-1)) resGsup <- ((Fsm*exp(-sqrt(c3)))+(e.f*(pnorm(sqrt(2)*(0.5+sqrt(c3)))-1))) * as.numeric(s < log(exp(c3)-1)) resGinf + resGsup } ## Compute the standard erros of the estimates - ## this is done by estimating the asymptotic variance of the normal ## limiting distribution of the BY estimator - as derived in Bianco ## and Yohai (1996) ## sterby3 <- function(x0, y, const, estim, addIntercept) { sqrt(diag(vcovBY3(x0, y, const=const, estim=estim, addIntercept=addIntercept))) } vcovBY3 <- function(z, y, const, estim, addIntercept) { stopifnot(length(dim(z)) == 2) if(addIntercept) z <- cbind(1, z) d <- dim(z) n <- d[1] p <- d[2] argum <- z %*% estim matM <- IFsqr <- matrix(0, p, p) for(i in 1:n) { myscalar <- as.numeric(der2phiBY3(argum[i],y[i], c3=const)) zzt <- tcrossprod(z[i,]) matM <- matM + myscalar * zzt IFsqr <- IFsqr + derphiBY3(argum[i],y[i], c3=const)^2 * zzt } matM <- matM/n matMinv <- solve(matM) IFsqr <- IFsqr/n ## Now, asymp.cov = matMinv %*% IFsqr %*% t(matMinv) ## provide vcov(): the full matrix (matMinv %*% IFsqr %*% t(matMinv))/n } robustbase/R/classPC.R0000644000176200001440000000361513710224622014303 0ustar liggesusers##' @title Simple Matrix Rank ====> ../man/rankMM.Rd rankMM <- function(A, tol = NULL, sv = svd(A,0,0)$d) { d <- dim(A) stopifnot(length(d)==2, length(sv) == min(d), min(d) >= 1L, all(diff(sv) <= 0)) # must be sorted decreasingly if(is.null(tol)) tol <- max(d) * .Machine$double.eps * abs(sv[1L]) else stopifnot(is.numeric(tol), tol >= 0) sum(sv >= tol) } ##' Flip the signs of the loadings ##' - comment from Stephan Milborrow .signflip <- function(loadings) { apply(loadings, 2L, function(x) if(x[which.max(abs(x))] < 0) -x else x) } ##' @title Classical Principal Components ... ==> ../man/classPC.Rd classPC <- function(x, scale=FALSE, center=TRUE, signflip=TRUE, via.svd = n > p, scores=FALSE) { if(!is.numeric(x) || !is.matrix(x)) stop("'x' must be a numeric matrix") else if((n <- nrow(x)) <= 1) stop("The sample size must be greater than 1 for svd") p <- ncol(x) x <- scale(x, center=center, scale=scale) ## ----- if(isTRUE(scale)) scale <- attr(x, "scaled:scale") if(isTRUE(center)) center <- attr(x, "scaled:center") if(via.svd) { svd <- svd(x, nu=0) rank <- rankMM(x, sv=svd$d) loadings <- svd$v[,1:rank, drop=FALSE] eigenvalues <- (svd$d[1:rank])^2 /(n-1) ## FIXME: here .^2; later sqrt(.) } else { ## n <= p; was "kernelEVD" e <- eigen(tcrossprod(x), symmetric=TRUE) evs <- e$values tolerance <- n * max(evs) * .Machine$double.eps rank <- sum(evs > tolerance) evs <- evs[ii <- seq_len(rank)] eigenvalues <- evs / (n-1) ## MM speedup, was: crossprod(..) %*% diag(1/sqrt(evs)) loadings <- crossprod(x, e$vectors[,ii]) * rep(1/sqrt(evs), each=p) } ## VT::15.06.2010 - signflip: flip the sign of the loadings if(signflip) loadings <- .signflip(loadings) list(rank=rank, eigenvalues=eigenvalues, loadings=loadings, scores = if(scores) x %*% loadings, center=center, scale=scale) } robustbase/R/AAA.R0000644000176200001440000001224514531132254013335 0ustar liggesusers ## if(getRversion() < "2.13") { ## nobs <- function (object, ...) UseMethod("nobs") ## ## also used for mlm fits *and* lmrob : ## nobs.lm <- function(object, ...) ## if(!is.null(w <- object$weights)) sum(w != 0) else NROW(object$residuals) ## ## for glmrob : ## nobs.glm <- function(object, ...) sum(!is.na(object$residuals)) ## } ## Here and in NAMESPACE: if(getRversion() < "3.1.0") { ## cut'n'paste from R's source src/library/stats/R/confint.R format.perc <- function(probs, digits) ## Not yet exported, maybe useful in other contexts: ## quantile.default() sometimes uses a version of it paste(format(100 * probs, trim = TRUE, scientific = FALSE, digits = digits), "%") confint.lm <- function(object, parm, level = 0.95, ...) { cf <- coef(object) pnames <- names(cf) if(missing(parm)) parm <- pnames else if(is.numeric(parm)) parm <- pnames[parm] a <- (1 - level)/2 a <- c(a, 1 - a) fac <- qt(a, object$df.residual) # difference from default method pct <- format.perc(a, 3) ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(parm, pct)) ses <- sqrt(diag(vcov(object)))[parm] # gives NA for aliased parms ci[] <- cf[parm] + ses %o% fac ci } ## cut'n'paste from R's source src/library/stats/R/dummy.coef.R dummy.coef.lm <- function(object, use.na=FALSE, ...) { Terms <- terms(object) tl <- attr(Terms, "term.labels") int <- attr(Terms, "intercept") facs <- attr(Terms, "factors")[-1, , drop=FALSE] Terms <- delete.response(Terms) vars <- all.vars(Terms) xl <- object$xlevels if(!length(xl)) { # no factors in model return(as.list(coef(object))) } nxl <- setNames(rep.int(1, length(vars)), vars) tmp <- unlist(lapply(xl, length)) ## ?? vapply(xl, length, 1L) nxl[names(tmp)] <- tmp lterms <- apply(facs, 2L, function(x) prod(nxl[x > 0])) nl <- sum(lterms) args <- setNames(vector("list", length(vars)), vars) for(i in vars) args[[i]] <- if(nxl[[i]] == 1) rep.int(1, nl) else factor(rep.int(xl[[i]][1L], nl), levels = xl[[i]]) dummy <- do.call("data.frame", args) pos <- 0 rn <- rep.int(tl, lterms) rnn <- rep.int("", nl) for(j in tl) { i <- vars[facs[, j] > 0] ifac <- i[nxl[i] > 1] if(length(ifac) == 0L) { # quantitative factor rnn[pos+1] <- j } else if(length(ifac) == 1L) { # main effect dummy[ pos+1L:lterms[j], ifac ] <- xl[[ifac]] rnn[ pos+1L:lterms[j] ] <- as.character(xl[[ifac]]) } else { # interaction tmp <- expand.grid(xl[ifac]) dummy[ pos+1L:lterms[j], ifac ] <- tmp rnn[ pos+1L:lterms[j] ] <- apply(as.matrix(tmp), 1L, function(x) paste(x, collapse=":")) } pos <- pos + lterms[j] } ## some terms like poly(x,1) will give problems here, so allow ## NaNs and set to NA afterwards. mf <- model.frame(Terms, dummy, na.action=function(x)x, xlev=xl) mm <- model.matrix(Terms, mf, object$contrasts, xl) if(any(is.na(mm))) { warning("some terms will have NAs due to the limits of the method") mm[is.na(mm)] <- NA } coef <- object$coefficients if(!use.na) coef[is.na(coef)] <- 0 asgn <- attr(mm,"assign") res <- setNames(vector("list", length(tl)), tl) for(j in seq_along(tl)) { keep <- asgn == j ij <- rn == tl[j] res[[j]] <- setNames(drop(mm[ij, keep, drop=FALSE] %*% coef[keep]), rnn[ij]) } if(int > 0) { res <- c(list("(Intercept)" = coef[int]), res) } class(res) <- "dummy_coef" res } }# if R <= 3.1.0 ## Not exported; used for faster checking, e.g., on CRAN doExtras <- function() { interactive() || nzchar(Sys.getenv("R_robustbase_check_extra")) || identical("true", unname(Sys.getenv("R_PKG_CHECKING_doExtras"))) } if(getRversion() < "3.5") { isTRUE <- function(x) is.logical(x) && length(x) == 1L && !is.na(x) && x isFALSE <- function(x) is.logical(x) && length(x) == 1L && !is.na(x) && !x if(getRversion() < "3.3") { sigma <- function(object, ...) UseMethod("sigma") ## For completeness, and when comparing with nlrob() results: sigma.nls <- function(object, ...) ## sqrt ( sum( R_i ^ 2) / (n - p) ) : sqrt( deviance(object) / (nobs(object) - length(coef(object))) ) } } ## shortcut -- used often in print() etc: pasteK <- function(...) paste(..., collapse = ", ") ## stopifnot(..) helper : is.1num <- function(x) is.numeric(x) && length(x) == 1L ##' return 'x' unless it is NULL where you'd use 'orElse' `%||%` <- function(x, orElse) if(!is.null(x)) x else orElse ##' Augment a vcov-matrix by NA rows & cols when needed; from */R/src/library/stats/R/vcov.R .vcov.aliased <- asNamespace("stats")$.vcov.aliased %||% function(aliased, vc, complete = TRUE) { ## Checking for "NA coef": "same" code as in print.summary.lm() in ./lm.R : if(complete && nrow(vc) < (P <- length(aliased)) && any(aliased)) { ## add NA rows and columns in vcov cn <- names(aliased) VC <- matrix(NA_real_, P, P, dimnames = list(cn,cn)) j <- which(!aliased) VC[j,j] <- vc VC } else # default vc } ## e.g. for once-per-session warnings: .optEnv <- new.env(parent = emptyenv(), hash = FALSE) robustbase/R/glmrob.R0000644000176200001440000002561213325654420014243 0ustar liggesusersglmrob <- function (formula, family, data, weights, subset, na.action, start = NULL, offset, method = c("Mqle", "BY", "WBY", "MT"), weights.on.x = c("none", "hat", "robCov", "covMcd"), control = NULL, model = TRUE, x = FALSE, y = TRUE, contrasts = NULL, trace.lev = 0, ...) { call <- match.call() if (is.character(family)) family <- get(family, mode = "function", envir = parent.frame()) if (is.function(family)) family <- family() fami <- family$family if(is.null(fami)) stop(gettextf("'%s' is not a valid family (see ?family)", as.character(call[["family"]])), domain=NA) if (!(fami %in% c("binomial", "poisson", "Gamma", "gaussian"))) { stop(gettextf("Robust GLM fitting not yet implemented for family %s", fami), domain=NA) } if (missing(data)) data <- environment(formula) ## mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"), names(mf), 0) mf <- mf[c(1, m)] mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) if(identical(method, "model.frame")) return(mf) mt <- attr(mf, "terms") Y <- model.response(mf, "any")# "numeric" or "factor" if (length(dim(Y)) == 1) { nm <- rownames(Y) dim(Y) <- NULL if (!is.null(nm)) names(Y) <- nm } X <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else matrix(NA_real_, NROW(Y), 0) weights <- model.weights(mf) offset <- model.offset(mf) if (!is.null(weights) && any(weights < 0)) stop("'weights' must be non-negative") if (!is.null(offset) && length(offset) != NROW(Y)) stop(gettextf("Number of offsets is %d, should rather equal %d (number of observations)", length(offset), NROW(Y)), domain=NA) method <- match.arg(method) meth. <- if(method == "WBY") "BY" else method ### FIXME: the whole 'control' should be changed to "copy" lmrob() and lmrob.control() ## ------- --> *one* exported glmrob.control() function with 'method' and switch() inside... ## see >>> ./lmrob.MM.R if(is.null(control)) # -> use e.g., glmrobMqle.control() control <- get(paste0("glmrob", meth., ".control"))(...) if(missing(weights.on.x) || is.character(weights.on.x)) weights.on.x <- match.arg(weights.on.x) else if(!(is.function(weights.on.x) || is.list(weights.on.x) || (is.numeric(weights.on.x) && length(weights.on.x) == NROW(Y)))) stop("'weights.on.x' must be a string, function, list or numeric n-vector") if(!is.null(start) && !is.numeric(start)) { ## initialization methods if(!is.character(start)) stop("'start' must be a numeric vector, NULL, or a character string") start <- switch(start, "lmrob" =, "lmrobMM" = { if(!is.null(weights)) warnings("weights are not yet used in computing start estimate") lmrob.fit(x = X, y = family$linkinv(Y), control=lmrob.control())$coefficients }, stop("invalid 'start' string")) } fit <- switch(method, "cubif" = stop("For method 'cubif', use glmRob() from package 'robust'") , "Mqle" = ## --> ./glmrobMqle.R glmrobMqle(X = X, y = Y, weights = weights, start = start, offset = offset, family = family, weights.on.x = weights.on.x, control = control, intercept = attr(mt, "intercept") > 0, trace=trace.lev), "BY" =, "WBY" = { if(fami != "binomial") stop(gettextf( "method='%s' is only applicable for binomial family, but family=\"\"", method, fami), domain=NA) ### FIXME: use glmrobBY(..) with these arguments, including 'weights' glmrobBY(X=X, y=Y, weights=weights, start=start, method=method, ## == "BY" / "WBY" weights.on.x = weights.on.x, control = control, intercept = attr(mt, "intercept") > 0, trace.lev=trace.lev) }, "MT" = { glmrobMT(x=X,y=Y, weights=weights, start=start, offset = offset, family=family, weights.on.x=weights.on.x, control=control, intercept = attr(mt, "intercept") > 0, trace.lev=trace.lev) }, stop("invalid 'method': ", method)) ##- if (any(offset) && attr(mt, "intercept") > 0) { ##- fit$null.deviance <- glm.fit(x = X[, "(Intercept)", drop = FALSE], ##- y = Y, weights = weights, offset = offset, ##- control = control, intercept = TRUE)$deviance ##- } fit$na.action <- attr(mf, "na.action") if (model) fit$model <- mf if (x) fit$x <- X if (!y) ## fit$y <- NULL warning("setting 'y = FALSE' has no longer any effect") fit <- c(fit, list(call = call, formula = formula, terms = mt, data = data, offset = offset, control = control, method = method, prior.weights = if(is.null(weights)) rep.int(1, nrow(X)) else weights, contrasts = attr(X, "contrasts"), xlevels = .getXlevels(mt, mf))) class(fit) <- c("glmrob", "glm") fit } summary.glmrob <- function(object, correlation=FALSE, symbolic.cor=FALSE, ...) { dispersion <- object$dispersion if(is.null(dispersion)) dispersion <- 1 coefs <- object$coefficients aliased <- is.na(coefs)# needs care; also used in print method if(any(aliased)) coefs <- coefs[!aliased] covmat <- object$cov s.err <- sqrt(diag(covmat)) zvalue <- coefs/s.err pvalue <- 2 * pnorm(-abs(zvalue)) coef.table <- cbind("Estimate" = coefs, "Std. Error" = s.err, "z value" = zvalue, "Pr(>|z|)" = pvalue) ans <- c(object[c("call", "terms", "family", "iter", "control", "method", "residuals", "fitted.values", "w.r", "w.x")], ## MM: should rather keep more from 'object' ? ## currently, cannot even print the asympt.efficiency! list(deviance=NULL, df.residual=NULL, null.deviance=NULL, df.null= NULL, df= NULL, ## (because of 0 weights; hmm,...) aliased = aliased, coefficients = coef.table, dispersion = dispersion, cov.scaled = covmat)) if (correlation) { ans$correlation <- cov2cor(covmat) ans$symbolic.cor <- symbolic.cor } structure(ans, class = "summary.glmrob") } ## almost a copy of vcov.glm() [if that didn't have summmary.glm() explicitly] vcov.glmrob <- function (object, ...) { so <- summary(object, corr = FALSE, ...) ## so$dispersion * so$cov.unscaled ## changed from cov.unscaled to cov.scaled so$cov.scaled } print.glmrob <- function (x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall: ", deparse(x$call), "\n\n") if (length(coef(x))) { cat("Coefficients") if (is.character(co <- x$contrasts)) cat(" [contrasts: ", apply(cbind(names(co), co), 1, paste, collapse = "="), "]") cat(":\n") print.default(format(x$coefficients, digits = digits), print.gap = 2, quote = FALSE) } else cat("No coefficients\n\n") cat("\nNumber of observations:", length(x$residuals), "\nFitted by method ", sQuote(x$method), "\n") invisible(x) } print.summary.glmrob <- function (x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), ...) { cat("\nCall: ", deparse(x$call), "\n\n") if (length(cf <- coef(x))) { if(nsingular <- sum(x$aliased)) # glm has df[3] - df[1] cat("\nCoefficients: (", nsingular, " not defined because of singularities)\n", sep = "") else cat("\nCoefficients:\n") printCoefmat(cf, digits = digits, signif.stars = signif.stars, na.print = "NA", ...) summarizeRobWeights(x$w.r * x$w.x, digits = digits, header = "Robustness weights w.r * w.x:", ...) } else cat("No coefficients\n\n") n <- length(x$residuals) cat("\nNumber of observations:", n, "\nFitted by method", sQuote(x$method)," (in", x$iter, "iterations)\n") cat("\n(Dispersion parameter for ", x$family$family, " family taken to be ", format(x$dispersion), ")\n\n",sep = "") if(any(!is.null(unlist(x[c("null.deviance", "deviance")])))) cat(apply(cbind(paste(format(c("Null", "Residual"), justify="right"), "deviance:"), format(unlist(x[c("null.deviance", "deviance")]), digits=max(5, digits + 1)), " on", format(unlist(x[c("df.null", "df.residual")])), " degrees of freedom\n"), 1L, paste, collapse=" "), "\n", sep = "") else cat("No deviance values available \n") correl <- x$correlation if (!is.null(correl)) { p <- NCOL(correl) if (p > 1) { cat("\nCorrelation of Coefficients:\n") if (isTRUE(symbolic.cor)) { print(symnum(correl, abbr.colnames=NULL)) } else { correl <- format(round(correl, 2), nsmall=2, digits=digits) correl[!lower.tri(correl)] <- "" print(correl[-1, -p, drop=FALSE], quote=FALSE) } } } printControl(x$control, digits = digits) cat("\n") invisible(x) } weights.glmrob <- function(object, type = c("prior", "robustness"), ...) { type <- match.arg(type) w <- if (type == "prior") { ## Issue warning only if called from toplevel. Otherwise the warning pop ## up at quite unexpected places, e.g., case.names(). if (is.null(object[["weights"]]) && identical(parent.frame(), .GlobalEnv)) warning("No weights defined for this object. Use type=\"robustness\" argument to get robustness weights.") object[["weights"]] } else object$w.r * object$w.x ## those also used summarizeRobWeights(x$w.r * x$w.x, ..) if (is.null(object$na.action)) w else naresid(object$na.action, w) } ## Stems from a copy of residuals.glm() in ## ~/R/D/r-devel/R/src/library/stats/R/glm.R residuals.glmrob <- function(object, type = c("deviance", "pearson", "working", "response", "partial"), ...) { type <- match.arg(type) y <- object$y r <- object$residuals mu <- object$fitted.values wts <- object$prior.weights # ok p <- length(object$coefficients) switch(type, deviance=, pearson=, response= if(is.null(y)) { mu.eta <- object$family$mu.eta eta <- object$linear.predictors ## we cannot use 'r <- ...$residuals' __ FIXME __ stop("need non-robust working residuals for this model type") y <- mu + r * mu.eta(eta) }) res <- switch(type, ## deviance = if(object$df.residual > 0) { deviance = if((nobs(object) - p) > 0) { d.res <- sqrt(pmax.int((object$family$dev.resids)(y, mu, wts), 0)) ifelse(y > mu, d.res, -d.res) } else rep.int(0, length(mu)), pearson = (y-mu)*sqrt(wts)/sqrt(object$family$variance(mu)), working = r, response = y - mu, partial = r ) if(!is.null(object$na.action)) res <- naresid(object$na.action, res) if (type == "partial") ## need to avoid doing naresid() twice. res <- res+predict(object, type="terms") res } robustbase/R/tolEllipse.R0000644000176200001440000001100112440116711015051 0ustar liggesusers#### This is from the R package #### #### rrcov : Scalable Robust Estimators with High Breakdown Point #### #### by Valentin Todorov ### This program is free software; you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation; either version 2 of the License, or ### (at your option) any later version. ### ### This program is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ### ### You should have received a copy of the GNU General Public License ### along with this program; if not, a copy is available at ### http://www.r-project.org/Licenses/ ## I would like to thank Peter Filzmoser for providing the initial code of ## this function. tolEllipsePlot <- function(x, m.cov = covMcd(x), cutoff = NULL, id.n = NULL, classic = FALSE, tol = 1e-07, xlab = "", ylab = "", main = "Tolerance ellipse (97.5%)", txt.leg = c("robust", "classical"), col.leg = c("red", "blue"), lty.leg = c("solid","dashed")) { ##@bdescr ## Tolerance Ellipse Plot: ## Plots the 97.5% tolerance ellipse of the bivariate data set (x). ## The ellipse is defined by those data points whose distance (dist) ## is equal to the squareroot of the 97.5% chisquare quantile with ## 2 degrees of freedom. ##@edescr ## ##@in x : [matrix] A data.frame or matrix, n > 2*p ##@in m.cov : [mcd object] An object of type mcd - its attributes ## center and cov will be used ##@in cutoff : [number] Distance needed to flag data points outside the ellipse ##@in outflag : [logical] Whether to print the labels of the outliers ##@in tol : [number] tolerance to be used for computing the inverse see 'solve'. ## defaults to 1e-7 ## MM: This is nothing else but a version cluster::ellipsoidPoints() !! -- FIXME ellips <- function(loc, cov) { ## calculates a 97,5% ellipsoid ## input: data set, location and covariance estimate, cutoff dist <- sqrt(qchisq(0.975, 2)) A <- solve(cov) eA <- eigen(A) ev <- eA$values lambda1 <- max(ev) lambda2 <- min(ev) eigvect <- eA$vectors[, order(ev)[2]] z <- seq(0, 2 * pi, by = 0.01) z1 <- dist/sqrt(lambda1) * cos(z) z2 <- dist/sqrt(lambda2) * sin(z) alfa <- atan(eigvect[2]/eigvect[1]) r <- matrix(c(cos(alfa), - sin(alfa), sin(alfa), cos(alfa)), ncol = 2) t(loc + t(cbind(z1, z2) %*% r)) # xmin <- min(x, z[, 1]) } ## parameters and preconditions if(is.data.frame(x)) x <- data.matrix(x) if(!is.matrix(x) || !is.numeric(x)) stop("x is not a numeric dataframe or matrix.") n <- dim(x)[1] p <- dim(x)[2] if(p != 2) stop("Dimension {= ncol(x)} must be 2!") if(!is.numeric(m.cov$center) || !is.numeric(m.cov$cov)) stop("argument 'm.cov' must have numeric components 'center' and 'cov'") x.loc <- m.cov$center x.cov <- n/(n - 1) * m.cov$cov xM <- colMeans(x) z1 <- ellips(loc = xM, cov = n/(n - 1) * cov.wt(x)$cov) z2 <- ellips(loc = x.loc, cov = x.cov) x1 <- c(min(x[, 1], z1[, 1], z2[, 1]), max(x[,1],z1[,1], z2[,1])) y1 <- c(min(x[, 2], z1[, 2], z2[, 2]), max(x[,2],z1[,2], z2[,2])) md <- sqrt(mahalanobis(x, xM, cov(x), tol=tol)) rd <- sqrt(mahalanobis(x,m.cov$center, m.cov$cov, tol=tol)) ## Note: the *calling* function may pass a 'missing' value if(missing(cutoff) || is.null(cutoff)) cutoff <- sqrt(qchisq(0.975, df = 2)) if(missing(id.n) || is.null(id.n)) id.n <- sum(rd > cutoff) ### (2,1) is wrong for 'classic' -- we *overplot*: ## if(classic) ## opr <- if(prod(par("mfrow"))== 1) par(mfrow=c(1,2), pty="m") else list() ## MM: this is *NOT* good : ## else par(mfrow = c(1, 1)) ## 1. Robust tolerance ## define the plot, plot a box, plot the "good" points, ## plot the outliers either as points or as numbers depending on outflag, ## plot the ellipse, write a title of the plot plot(x, xlim = x1, ylim = y1, xlab = xlab, ylab = ylab, main = main) box() xrange <- par("usr") xrange <- xrange[2] - xrange[1] if(id.n >= 1) { ind <- sort(rd, index.return=TRUE)$ix[(n-id.n+1):n] text(x[ind, 1] + xrange/50, x[ind, 2], ind) } points(z2, type = "l", lty=lty.leg[1], col=col.leg[1]) ## 2. Classical tolerance if(classic){ points(z1, type = "l", lty=lty.leg[2], col=col.leg[2]) legend("bottomright", txt.leg, lty = lty.leg, col = col.leg) ## par(opr) } invisible() } robustbase/R/ltsReg.R0000644000176200001440000007073414440373076014232 0ustar liggesusers#### This is originally from the R package #### #### rrcov : Scalable Robust Estimators with High Breakdown Point #### #### by Valentin Todorov ## I would like to thank Peter Rousseeuw and Katrien van Driessen for ## providing the initial code of this function. ### This program is free software; you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation; either version 2 of the License, or ### (at your option) any later version. ### ### This program is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ### ### You should have received a copy of the GNU General Public License ### along with this program; if not, a copy is available at ### http://www.r-project.org/Licenses/ ltsReg <- function(x, ...) UseMethod("ltsReg") ltsReg.formula <- function(formula, data, subset, weights, na.action, model = TRUE, x.ret = FALSE, y.ret = FALSE, contrasts = NULL, offset, ...) { cl <- match.call() ## method <- match.arg(method) ## keep only the arguments which should go into the model frame mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"), names(mf), 0) mf <- mf[c(1, m)] mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf <- eval.parent(mf) ## if (method == "model.frame") return(mf) mt <- attr(mf, "terms") y <- model.response(mf, "numeric") ## was model.extract(mf, "response") if (is.empty.model(mt)) { # "y ~ 0" : no coefficients x <- offset <- NULL fit <- list(method = "ltsReg for empty model", coefficients = numeric(0), residuals = y, fitted.values = 0 * y, lts.wt = 1 + 0 * y, rank = 0, intercept = FALSE, df.residual = length(y)) ## alpha = alpha from "..." class(fit) <- "lts" } else { w <- model.weights(mf) offset <- model.offset(mf) x <- model.matrix(mt, mf, contrasts) ## Check if there is an intercept in the model. ## A formula without intercept looks like this: Y ~ . -1 ## If so, remove the corresponding column and use intercept=TRUE ## in the call to ltsReg.default(); by default, intercept=FALSE. xint <- match("(Intercept)", colnames(x), nomatch = 0) if(xint) x <- x[, -xint, drop = FALSE] fit <- ltsReg.default(x, y, intercept = (xint > 0), ...) } ## 3) return the na.action info fit$na.action <- attr(mf, "na.action") fit$offset <- offset ## 4) return the contrasts used in fitting: possibly as saved earlier. fit$contrasts <- attr(x, "contrasts") fit$xlevels <- .getXlevels(mt, mf) fit$call <- cl fit$terms <- mt if(model) fit$model <- mf if(x.ret) fit$x <- x # or? if(xint == 0) x else x[, c(2:p,1), drop=FALSE] if(y.ret) fit$y <- y fit } ltsReg.default <- function (x, y, intercept = TRUE, alpha = control$ alpha, nsamp = control$ nsamp, adjust = control$ adjust, mcd = TRUE, qr.out = FALSE, yname = NULL, seed = control$ seed, trace = control$ trace, use.correction = control$ use.correction, wgtFUN = control$ wgtFUN, control = rrcov.control(), ...) { ## Analyze and validate the input parameters ... ## if a control object was supplied, take the option parameters from it, ## but if single parameters were passed (not defaults) they will override the ## control object. ### MM: FIXME: this sucks ('control' may contain *some* but not all parts!): if(!missing(control)) { defCtrl <- rrcov.control() # default control if(is.null(alpha) && control$alpha != defCtrl$alpha) alpha <- control$alpha if(nsamp == defCtrl$nsamp) nsamp <- control$nsamp if(identical(seed, defCtrl$seed)) seed <- control$seed if(use.correction == defCtrl$use.correction) use.correction <- control$use.correction if(adjust == defCtrl$adjust) adjust <- control$adjust } else defCtrl <- control ## == rrcov.control() ## For back compatibility, as some new args did not exist pre 2013-04, ## and callers of covMcd() may use a "too small" 'control' list: if(missing(wgtFUN)) getDefCtrl("wgtFUN", defCtrl) if(length(seed) > 0) { if(length(seed) < 3L || seed[1L] < 100L) stop("invalid 'seed'. Must be compatible with .Random.seed !") if(!is.null(seed.keep <- get0(".Random.seed", envir = .GlobalEnv, inherits = FALSE))) on.exit(assign(".Random.seed", seed.keep, envir = .GlobalEnv)) assign(".Random.seed", seed, envir = .GlobalEnv) } if(alpha < 1/2 || alpha > 1) stop("alpha not inside [1/2, 1]") ## FIXME: change this analogously to covMcd()'s and covComedian()'s ## quantiel <- qnorm(0.9875) if(is.character(wgtFUN)) { switch(wgtFUN, "01.original" = { cW <- qnorm(0.9875) wgtFUN <- function(r) as.numeric(abs(r) <= cW) }, stop("unknown 'wgtFUN' specification: ", wgtFUN)) } else if(!is.function(wgtFUN)) stop("'wgtFUN' must be a function or a string specifying one") ## vt::03.02.2006 - raw.cnp2 and cnp2 are vectors of size 2 and will ## contain the correction factors (concistency and finite sample) ## for the raw and reweighted estimates respectively. Set them initially to 1. ## If use.correction is set to FALSE (default=TRUE), the finite sample correction ## factor will not be used (neither for the raw estimates nor for the reweighted) raw.cnp2 <- rep(1,2) cnp2 <- rep(1,2) ##cat("++++++ Entering ltsReg() ...\n") y <- data.matrix(y) if (!is.numeric(y)) stop("y is not a numeric") if (dim(y)[2] != 1) stop("y is not onedimensional") oneD <- (missing(x) || is.null(x) || NCOL(x) == 0) ## location model - no x if(oneD) { x <- matrix(1, nrow(y), 1) } else { ## x is present if(is.data.frame(x)) x <- data.matrix(x) else if (!is.matrix(x)) x <- matrix(x, length(x), 1, dimnames = list(names(x), deparse(substitute(x)))) } if (nrow(x) != nrow(y)) stop("Number of observations in x and y not equal") na.x <- !is.finite(rowSums(x)) na.y <- !is.finite(y) ok <- !(na.x | na.y) x <- x[ok, , drop = FALSE] y <- y[ok, , drop = FALSE] dx <- dim(x) n <- dx[1] if (n == 0) stop("All observations have missing values!") dimny <- dimnames(y) rownames <- dimny[[1]] yn <- if(!is.null(yname)) yname else if(!is.null(dimny[[2]])) dimny[[2]] has.yn <- !is.null(yn) if(!has.yn) yn <- "Y" storage.mode(y) <- "double" storage.mode(x) <- "double" if (!oneD) { is.const <- function(x) { c1 <- range(x) c1[1] == c1[2] } if (any(apply(x, 2, is.const))) stop("There is at least one constant column. Remove it and set intercept=TRUE") } ##cat("++++++ Prepare: Ready.\n") xn <- (dnx <- dimnames(x))[[2]] xn <- if(!is.null(xn)) xn else if (dx[2] > 1) paste("X", 1:dx[2], sep = "") else if (dx[2]) "X" ## else : p = 0 dimnames(x) <- list(dnx[[1]], xn) # also works if(is.null(dnx)) y <- as.vector(y) if(all(x == 1)) { ## includes 'oneD' and empty x (p = 0) if(qr.out) { warning("'qr.out = TRUE' for univariate location is disregarded") qr.out <- FALSE } h <- h.alpha.n(alpha, n, dx[2]) p <- 1 if (alpha == 1) { scale <- sqrt(drop(cov.wt(as.matrix(y))$cov)) center <- as.vector(mean(y)) ## xbest <- NULL } else { sh <- .fastmcd(as.matrix(y), as.integer(h), nsamp = 0, # (y *is* 1-dim.!) nmini = 300, kmini = 5) center <- as.double(sh$initmean) qalpha <- qchisq(h/n, 1) calphainvers <- pgamma(qalpha/2, 1/2 + 1)/(h/n) raw.cnp2[1] <- calpha <- 1/calphainvers raw.cnp2[2] <- correct <- LTScnp2(1, intercept = intercept, n, alpha) if(!use.correction) # do not use finite sample correction factor raw.cnp2[2] <- correct <- 1.0 scale <- sqrt(as.double(sh$initcovariance)) * sqrt(calpha) * correct ## xbest <- sort(as.vector(sh$best)) # fastmcd in the univariate case does not return inbest[] } resid <- y - center ans <- list(method = "Univariate location and scale estimation.", best = NULL, # xbest, coefficients = center, alpha = alpha, quan = h, raw.coefficients = center, raw.resid = resid/scale, raw.weights = rep.int(NA, length(na.y))) if(abs(scale) < 1e-07) { ans$raw.weights[ok] <- weights <- as.numeric(abs(resid) < 1e-07) ans$scale <- ans$raw.scale <- 0 ans$crit <- 0 ans$method <- paste(ans$method, "More than half of the data are equal!",sep="\n") } else { ans$raw.scale <- scale ans$raw.weights[ok] <- weights <- wgtFUN(resid/scale) sum.w <- sum(weights) reweighting <- cov.wt(as.matrix(y), wt = weights) ans$coefficients <- reweighting$center ans$scale <- sqrt(sum.w/(sum.w - 1) * drop(reweighting$cov)) resid <- y - ans$coefficients ans$crit <- sum(sort((y - center)^2, partial = h)[1:h]) if (sum.w != n) { qdelta.rew <- qchisq(sum.w/n, 1) cdeltainvers.rew <- pgamma(qdelta.rew/2, 1/2 + 1)/(sum.w/n) cdelta.rew <- sqrt(1/cdeltainvers.rew) correct.rew <- if(use.correction) LTScnp2.rew(1, intercept = intercept, n, alpha) else 1 cnp2 <- c(cdelta.rew, correct.rew) ans$scale <- ans$scale * cdelta.rew * correct.rew } weights <- wgtFUN(resid/ans$scale) } fitted <- ans$coefficients ans$resid <- resid/ans$scale ans$rsquared <- 0 ans$intercept <- intercept if(has.yn) names(ans$coefficients) <- names(ans$raw.coefficients) <- yn } ## end {all(x == 1)} -- else { ## ------------------ usual non-trivial case --------------------- if(mcd) ## need 'old x' later X <- x if (intercept) { ## intercept must be *last* (<- fortran code) {"uahh!"} x <- cbind(x, "Intercept" = 1) dx <- dim(x) xn <- colnames(x) } p <- dx[2] if (n <= 2 * p) stop("Need more than twice as many observations as variables.") ## VT:: 26.12.2004 ## Reorder the coefficients so that the intercept is at the beginning .. getCoef <- ## simple wrapper (because of above "intercept must be") if(p > 1 && intercept) function(cf) cf[c(p, 1:(p - 1))] else function(cf) cf ans <- list(alpha = alpha, raw.weights = rep.int(NA, length(na.y))) if(alpha == 1) { ## alpha == 1 ----------------------- ## old, suboptimal: z <- lsfit(x, y, intercept = FALSE) z <- lm.fit(x, y) qrx <- z$qr cf <- z$coef names(cf) <- xn ans$raw.coefficients <- getCoef(cf) resid <- z$residuals ans$quan <- h <- n s0 <- sqrt((1/(n - p)) * sum(resid^2)) ##cat("++++++ B - alpha == 1... - s0=",s0,"\n") if(abs(s0) < 1e-07) { fitted <- x %*% z$coef ans$raw.weights[ok] <- weights <- as.numeric(abs(resid) <= 1e-07) ans$scale <- ans$raw.scale <- 0 ans$coefficients <- ans$raw.coefficients } else { ans$raw.scale <- s0 ans$raw.resid <- resid / s0 ans$raw.weights[ok] <- weights <- wgtFUN(ans$raw.resid) sum.w <- sum(weights) ## old, suboptimal: z <- lsfit(x, y, wt = weights, intercept = FALSE) z <- lm.wfit(x, y, w = weights) ans$coefficients <- getCoef(z$coef) fitted <- x %*% z$coef ans$scale <- sqrt(sum(weights * resid^2)/(sum.w - 1)) if (sum.w != n) { qn.w <- qnorm((sum.w + n)/(2 * n)) cdelta.rew <- 1/sqrt(1 - (2 * n)/(sum.w/qn.w) * dnorm(qn.w)) ans$scale <- ans$scale * cdelta.rew } ans$resid <- resid/ans$scale weights <- wgtFUN(ans$resid) } names(ans$coefficients) <- getCoef(xn) s1 <- sum(resid^2) ans$crit <- s1 sh <- (if (intercept) y - mean(y) else y) ^ 2 ans$rsquared <- max(0, min(1, 1 - (s1/sh))) ans$method <- "Least Squares Regression." } ## end {alpha == 1} : "classical" else { ## alpha < 1 ----------------------------------------------- coefs <- rep(NA, p) names(coefs) <- xn qrx <- if(qr.out) qr(x) else qr(x)[c("rank", "pivot")] rk <- qrx$rank if (rk < p) stop("x is singular") ## else : h <- h.alpha.n(alpha, n, rk) z <- .fastlts(x, y, h, nsamp, intercept, adjust, trace=as.integer(trace)) if(z$objfct < 0) stop("no valid subsample found in LTS - set 'nsamp' or rather use lmrob.S()") ## vt:: lm.fit.qr == lm.fit(...,method=qr,...) cf <- lm.fit(x[z$inbest, , drop = FALSE], y[z$inbest])$coef if(any(ic <- is.na(cf))) stop(gettextf("NA coefficient (at %s) from \"best\" subset", paste(which(ic), collapse =","))) ans$best <- sort(z$inbest) fitted <- x %*% cf resid <- y - fitted piv <- 1:p coefs[piv] <- cf ## FIXME? why construct 'coefs' so complicatedly? use 'cf' ! ans$raw.coefficients <- getCoef(coefs) ans$quan <- h correct <- if(use.correction) LTScnp2(p, intercept = intercept, n, alpha) else 1 raw.cnp2[2] <- correct s0 <- sqrt(mean(sort(resid^2, partial = h)[1:h])) sh0 <- s0 qn.q <- qnorm((h + n)/ (2 * n)) s0 <- s0 / sqrt(1 - (2 * n)/(h / qn.q) * dnorm(qn.q)) * correct if (abs(s0) < 1e-07) { ans$raw.weights[ok] <- weights <- as.numeric(abs(resid) <= 1e-07) ans$scale <- ans$raw.scale <- 0 ans$coefficients <- ans$raw.coefficients } else { ans$raw.scale <- s0 ans$raw.resid <- resid/ans$raw.scale ans$raw.weights[ok] <- weights <- wgtFUN(resid/s0) sum.w <- sum(weights) ## old, suboptimal: z1 <- lsfit(x, y, wt = weights, intercept = FALSE) z1 <- lm.wfit(x, y, w = weights) ans$coefficients <- getCoef(z1$coef) fitted <- x %*% z1$coef resid <- z1$residuals ans$scale <- sqrt(sum(weights * resid^2)/(sum.w - 1)) if (sum.w == n) { cdelta.rew <- 1 correct.rew <- 1 } else { qn.w <- qnorm((sum.w + n)/(2 * n)) cnp2[1] <- cdelta.rew <- 1 / sqrt(1 - (2 * n)/(sum.w / qn.w) * dnorm(qn.w)) correct.rew <- if (use.correction) ## use finite sample correction LTScnp2.rew(p, intercept = intercept, n, alpha) else 1 cnp2[2] <- correct.rew ans$scale <- ans$scale * cdelta.rew * correct.rew } ans$resid <- resid/ans$scale weights <- wgtFUN(ans$resid) } ## unneeded: names(ans$coefficients) <- names(ans$raw.coefficients) ans$crit <- z$objfct if (intercept) { sh <- .fastmcd(as.matrix(y), as.integer(h), nsamp = 0, # (y *is* 1-dim.!) nmini = 300, kmini = 5) y <- as.vector(y) ## < ?? sh <- as.double(sh$adjustcov) iR2 <- (sh0/sh)^2 } else { s1 <- sum(sort(resid^2, partial = h)[1:h]) sh <- sum(sort(y^2, partial = h)[1:h]) iR2 <- s1/sh } ans$rsquared <- if(is.finite(iR2)) max(0, min(1, 1 - iR2)) else 0 attributes(resid) <- attributes(fitted) <- attributes(y) ans$method <- "Least Trimmed Squares Robust Regression." } ## end { alpha < 1 } ans$intercept <- intercept if (abs(s0) < 1e-07) ans$method <- paste(ans$method, "\nAn exact fit was found!") if (mcd) { ## compute robust distances {for diagnostics, eg. rdiag()plot} mcd <- covMcd(X, alpha = alpha, use.correction=use.correction) if ( -determinant(mcd$cov, logarithm = TRUE)$modulus > 50 * p) { ans$RD <- "singularity" } else { ans$RD <- rep.int(NA, length(na.y)) ans$RD[ok] <- sqrt(mahalanobis(X, mcd$center, mcd$cov)) names(ans$RD) <- rownames } } } ## end { nontrivial 'x' } ans$lts.wt <- rep.int(NA, length(na.y)) ans$lts.wt[ok] <- weights ans$residuals <- rep.int(NA, length(na.y)) ans$residuals[ok] <- resid ans$fitted.values <- rep.int(NA, length(na.y)) ans$fitted.values[ok] <- fitted names(ans$fitted.values) <- names(ans$residuals) <- names(ans$lts.wt) <- rownames if(has.yn) { ## non-sense otherwise: names(ans$scale) <- names(ans$raw.scale) <- yn names(ans$rsquared) <- names(ans$crit) <- yn } ans$Y <- y ans$X <- if(p > 1 && intercept) x[, c(p, 1:(p - 1))] else x dimnames(ans$X) <- list(rownames[ok], names(ans$coefficients)) if (qr.out) ans$qr <- qrx ans$raw.cnp2 <- raw.cnp2 ans$cnp2 <- cnp2 class(ans) <- "lts" ans$call <- match.call() ans } ## {ltsReg.default} summary.lts <- function (object, correlation = FALSE, ...) { z <- object r <- z$residuals f <- z$fitted int <- z$intercept w <- as.vector(z$lts.wt) n <- sum(w) Qr <- qr(w * z$X)# 'w * z$X': more efficient than t(t(object$X) %*% diag(w)) p <- Qr$rank p1 <- seq(length = p) ## even for p = 0 rdf <- n - p mss <- if(int) { m <- sum(w * f /sum(w)) sum(w * (f - m)^2) } else sum(w * f^2) rss <- sum(w * r^2) r <- sqrt(w) * r resvar <- rss/rdf R <- if (p > 0) chol2inv(Qr$qr[p1, p1, drop = FALSE]) else matrix(NA_real_,p,p) ## no need to reorder R anymore, since 'X' already has "intercept first" se <- sqrt(diag(R) * resvar) est <- z$coefficients tval <- est/se ans <- c(z[c("call", "terms")], ## not again attr(ans, "call") <- attr(z,"call") list(residuals = r, coefficients = { cbind("Estimate" = est, "Std. Error" = se, "t value" = tval, "Pr(>|t|)" = 2*pt(abs(tval), rdf, lower.tail = FALSE)) }, sigma = sqrt(resvar), df = c(p, rdf, NCOL(Qr$qr)))) df.int <- if(int) 1 else 0 if(p - df.int > 0) { ans$r.squared <- mss/(mss + rss) ans$adj.r.squared <- 1 - (1 - ans$r.squared) * ((n - df.int)/rdf) ans$fstatistic <- c(value = (mss/(p - df.int))/resvar, numdf = p - df.int, dendf = rdf) } else ans$r.squared <- ans$adj.r.squared <- 0 ans$cov.unscaled <- R dimnames(ans$cov.unscaled) <- dimnames(ans$coefficients)[c(1,1)] if (correlation) { ans$correlation <- (R * resvar)/outer(se, se) dimnames(ans$correlation) <- dimnames(ans$cov.unscaled) } class(ans) <- "summary.lts" ans } print.lts <- function (x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") if (length(coef(x))) { cat("Coefficients:\n") print.default(format(coef(x), digits = digits), print.gap = 2, quote = FALSE) cat("\nScale estimate", format(x$scale, digits = digits) ,"\n\n") } else cat("No coefficients\n") invisible(x) } print.summary.lts <- function(x, digits = max(3, getOption("digits") - 3), signif.stars = getOption("show.signif.stars"), ...) ## signif.stars = FALSE, ...) ## ^^^^^ (since they are not quite correct ?) { cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n\n", sep = "") resid <- x$residuals df <- x$df rdf <- df[2] cat("Residuals (from reweighted LS):\n") ## "cut & paste" from print.summary.lm(): if(rdf > 5) { nam <- c("Min", "1Q", "Median", "3Q", "Max") rq <- if(length(dim(resid)) == 2) structure(apply(t(resid), 1, quantile), dimnames = list(nam, dimnames(resid)[[2]])) else structure(quantile(resid), names = nam) print(rq, digits = digits, ...) } else if(rdf > 0) { print(resid, digits = digits, ...) } else { # rdf == 0 : perfect fit! cat("ALL", df[1], "residuals are 0: no residual degrees of freedom!\n") } if(NROW(x$coefficients)) { if (nsingular <- df[3] - df[1]) cat("\nCoefficients: (", nsingular, " not defined because of singularities)\n", sep = "") else cat("\nCoefficients:\n") printCoefmat(x$coefficients, digits = digits, signif.stars = signif.stars, ...) } else cat("\nNo coefficients\n") cat("\nResidual standard error:", format(signif(x$sigma, digits)), "on", rdf, "degrees of freedom\n") if(!is.null(x$fstatistic)) { cat("Multiple R-Squared:", formatC(x$r.squared, digits = digits)) cat(",\tAdjusted R-squared:",formatC(x$adj.r.squared,digits = digits), "\nF-statistic:", formatC(x$fstatistic[1], digits = digits), "on", x$fstatistic[2], "and", x$fstatistic[3], "DF, p-value:", format.pval(pf(x$fstatistic[1], x$fstatistic[2], x$fstatistic[3], lower.tail = FALSE), digits = digits), "\n") } correl <- x$correlation if(!is.null(correl)) { p <- NCOL(correl) if(p > 1) { cat("\nCorrelation of Coefficients:\n") correl <- format(round(correl, 2), nsmall = 2, digits = digits) correl[!lower.tri(correl)] <- "" print(correl[-1, -p, drop = FALSE], quote = FALSE) } } cat("\n") invisible(x) } ### --- Namespace hidden (but parsed once and for all) : ------------- ##' Compute Finite Sample Correction Factor for the "raw" LTSreg() scale LTScnp2 <- function(p, intercept = intercept, n, alpha) { stopifnot(0.5 <= alpha, alpha <= 1) if (intercept) p <- p - 1 stopifnot(p == as.integer(p), p >= 0) if (p == 0) { fp.500.n <- 1 - exp( 0.262024211897096) / n^ 0.604756680630497 fp.875.n <- 1 - exp(-0.351584646688712) / n^ 1.01646567502486 if ((0.5 <= alpha) && (alpha <= 0.875)) { fp.alpha.n <- fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) fp.alpha.n <- sqrt(fp.alpha.n) } if ((0.875 < alpha) && (alpha < 1)) { fp.alpha.n <- fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) fp.alpha.n <- sqrt(fp.alpha.n) } } else { ## p >= 1 if (p == 1) { if (intercept) { fp.500.n <- 1 - exp( 0.630869217886906 ) / n^ 0.650789250442946 fp.875.n <- 1 - exp( 0.565065391014791 ) / n^ 1.03044199012509 } else { fp.500.n <- 1 - exp(-0.0181777452315321) / n^ 0.697629772271099 fp.875.n <- 1 - exp(-0.310122738776431 ) / n^ 1.06241615923172 } } else { ## --- p > 1 --- if (intercept) { ## "alfaq" "betaq" "qwaarden" coefgqpkwad875 <- matrix(c(-0.458580153984614, 1.12236071104403, 3, -0.267178168108996, 1.1022478781154, 5), ncol = 2) coefeqpkwad500 <- matrix(c(-0.746945886714663, 0.56264937192689, 3, -0.535478048924724, 0.543323462033445, 5), ncol = 2) } else { ## "alfaq" "betaq" "qwaarden" coefgqpkwad875 <- matrix(c(-0.251778730491252, 0.883966931611758, 3, -0.146660023184295, 0.86292940340761, 5), ncol = 2) coefeqpkwad500 <- matrix(c(-0.487338281979106, 0.405511279418594, 3, -0.340762058011, 0.37972360544988, 5), ncol = 2) } y.500 <- log(- coefeqpkwad500[1, ] / p^ coefeqpkwad500[2, ]) y.875 <- log(- coefgqpkwad875[1, ] / p^ coefgqpkwad875[2, ]) A.500 <- cbind(1, - log(coefeqpkwad500[3, ] * p^2)) coeffic.500 <- solve(A.500, y.500) A.875 <- cbind(1, - log(coefgqpkwad875[3, ] * p^2)) coeffic.875 <- solve(A.875, y.875) fp.500.n <- 1 - exp(coeffic.500[1]) / n^ coeffic.500[2] fp.875.n <- 1 - exp(coeffic.875[1]) / n^ coeffic.875[2] } if(alpha <= 0.875) fp.alpha.n <- fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) else ## 0.875 < alpha <= 1 fp.alpha.n <- fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) }## else (p >= 1) return(1/fp.alpha.n) } ## LTScnp2 ##' Compute Finite Sample Correction Factor for the REWeighted LTSreg() scale LTScnp2.rew <- function(p, intercept = intercept, n, alpha) { stopifnot(0.5 <= alpha, alpha <= 1) if (intercept) p <- p - 1 stopifnot(p == as.integer(p), p >= 0) if (p == 0) { fp.500.n <- 1 - exp( 1.11098143415027) / n^ 1.5182890270453 fp.875.n <- 1 - exp(-0.66046776772861) / n^ 0.88939595831888 if(alpha <= 0.875) fp.alpha.n <- fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) else ## 0.875 < alpha <= 1 fp.alpha.n <- fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) ## MM: sqrt() {below} is ''different logic'' than below.. (??) fp.alpha.n <- sqrt(fp.alpha.n) } else { if (p == 1) { if (intercept) { fp.500.n <- 1 - exp(1.58609654199605 ) / n^ 1.46340162526468 fp.875.n <- 1 - exp(0.391653958727332) / n^ 1.03167487483316 } else { fp.500.n <- 1 - exp( 0.6329852387657) / n^ 1.40361879788014 fp.875.n <- 1 - exp(-0.642240988645469) / n^ 0.926325452943084 } } else { ## --- p > 1 --- if (intercept) { ## "alfaq" "betaq" "qwaarden" coefqpkwad875 <- matrix(c(-0.474174840843602, 1.39681715704956, 3, -0.276640353112907, 1.42543242287677, 5), ncol = 2) coefqpkwad500 <- matrix(c(-0.773365715932083, 2.02013996406346, 3, -0.337571678986723, 2.02037467454833, 5), ncol = 2) } else { ## "alfaq" "betaq" "qwaarden" coefqpkwad875 <- matrix(c(-0.267522855927958, 1.17559984533974, 3, -0.161200683014406, 1.21675019853961, 5), ncol = 2) coefqpkwad500 <- matrix(c(-0.417574780492848, 1.83958876341367, 3, -0.175753709374146, 1.8313809497999, 5), ncol = 2) } y.500 <- log( - coefqpkwad500[1, ] / p^ coefqpkwad500[2, ]) y.875 <- log( - coefqpkwad875[1, ] / p^ coefqpkwad875[2, ]) A.500 <- cbind(1, - log(coefqpkwad500[3, ] * p^2)) coeffic.500 <- solve(A.500, y.500) A.875 <- cbind(1, - log(coefqpkwad875[3, ] * p^2)) coeffic.875 <- solve(A.875, y.875) fp.500.n <- 1 - exp(coeffic.500[1]) / n^ coeffic.500[2] fp.875.n <- 1 - exp(coeffic.875[1]) / n^ coeffic.875[2] } if(alpha <= 0.875) fp.alpha.n <- fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) else ## 0.875 < alpha <= 1 fp.alpha.n <- fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) }## else (p >= 1) return(1/fp.alpha.n) } ## LTScnp2.rew .fastlts <- function(x, y, h.alph, nsamp, intercept, adjust, trace = 0) { dx <- dim(x) n <- dx[1] p <- dx[2] ## Parameters for partitioning --- *IDENTICAL* to those in ../src/rfltsreg.[fc] kmini <- 5 nmini <- 300 km10 <- 10*kmini nmaxi <- nmini*kmini ## vt::03.02.2006 - added options "best" and "exact" for nsamp if(!missing(nsamp)) { if(trace) cat("non-missing nsamp = ", nsamp, "\n") if(is.numeric(nsamp) && nsamp <= 0) { warning("Invalid number of trials nsamp=",nsamp,"! Using default.\n") nsamp <- -1 } else if(nsamp == "exact" || nsamp == "best") { myk <- p if(n > 2*nmini-1) { warning("'nsamp' options 'best' and 'exact' not allowed for n greater than ", 2*nmini-1,". Will use default.\n") nsamp <- -1 } else { ## FIXME: Add a test case for this ! nall <- choose(n, myk) if(nall > 5000 && nsamp == "best") { nsamp <- 5000 warning("Maximum 5000 subsets allowed for option 'best'.\n", "Computing 5000 subsets of size ",myk," out of ",n,"\n") } else { nsamp <- 0 #all subsamples if(nall > 5000) cat("Computing all ",nall," subsets of size ", myk, " out of ",n, "\n This may take a very long time!\n") } } } if(nsamp == -1) { ## still not defined - set it to the default nsamp <- rrcov.control()$nsamp } } nsamp <- as.integer(nsamp) ## y <- as.matrix(y) ## xy <- matrix(0, ncol = p + 1, nrow = n) xy <- cbind(x, y) storage.mode(xy) <- "double" # {keeping dim(.)} storage.mode(n) <- "integer" storage.mode(p) <- "integer" ; p1 <- p+1L # integer storage.mode(h.alph) <- "integer" ## Allocate temporary storage for the fortran implementation temp <- index1 <- index2 <- integer(n) weights <- aw2 <- aw <- residu <- yy <- nmahad <- ndist <- am <- am2 <- slutn <- double(n) .Fortran(rfltsreg, ## -> ../src/rfltsreg.f xy = xy, n, p, h.alph, # = nhalff nsamp, # = krep inbest = integer(h.alph), objfct = -1.,# double, if remains at -1 : have *nothing* found intercept = as.integer(intercept), intadjust = as.integer(adjust), nvad = as.integer(p1), datt = matrix(0., ncol = p1, nrow = n), weights, temp, index1, index2, aw2, aw, residu, yy, nmahad, ndist, am, am2, slutn, jmiss = integer(p1), ## integer jmiss(nvad) --> p+1 xmed = double(p1), ## double xmed(nvad) --> p+1 xmad = double(p1), ## double xmad(nvad) a = double(p1), ## double a(nvad) da = double(p1), ## double da(nvad) h = matrix(0., p, p1), ## double h(nvar,nvad) p*(p+1) hvec = double(p*(p1)), ## double hvec(nvar*nvad) p*(p+1) c = matrix(0., p, p1), ## double c(nvar,nvad) p*(p+1) cstock = matrix(0., 10, p*p),## double cstock(10,nvar*nvar) 10*p*p mstock = matrix(0., 10, p), ## double mstock(10,nvar) 10*p c1stock =matrix(0., km10, p*p),## double c1stock(km10,nvar*nvar) km10*p*p m1stock =matrix(0., km10, p),## double m1stock(km10,nvar) km10*p dath = matrix(0., nmaxi, p1),## double dath(nmaxi,nvad) nmaxi*(p+1) sd = double(p), ## double sd(nvar) p means = double(p), ## double means(nvar) p bmeans= double(p), ## double means(nvar) p i.trace= as.integer(trace))[ c("inbest", "objfct") ] } robustbase/R/lmrob.MM.R0000644000176200001440000016671514555212477014426 0ustar liggesusers## The "regularized" psi-function names: ## .R: the redescending ones: .Mpsi.R.names <- c('bisquare', 'lqq', 'welsh', 'optimal', 'hampel', 'ggw') ## .M: the monotone ones: .Mpsi.M.names <- c('huber') ## Note: there could be more: non-redescending, non-monotone {such as Cauchy score} .Mpsi.names <- c(R= .Mpsi.R.names, M= .Mpsi.M.names) ##' This allows synonyms as "Tukey" *and* partial matches such as "opt" : .regularize.Mpsi <- function(psi, redescending = TRUE) { stopifnot(is.character(psi), length(psi) == 1) psi <- tolower(psi) psi <- switch(psi, 'tukey'= , 'biweight'= "bisquare", ## otherwise keep psi) nms <- if(redescending) .Mpsi.R.names else .Mpsi.names if (is.na(i <- pmatch(psi, nms))) stop(gettextf("'psi' should be one of %s", pasteK(dQuote(nms))), domain = NA) nms[i] } .Mpsi.tuning.defaults <- list( 'huber' = 1.345 , 'bisquare' = 4.685061 , 'welsh' = 2.11 , 'ggw' = c(-0.5, 1.5, .95, NA) ## (min{slope}, b , eff, bp) , 'lqq' = c(-0.5, 1.5, .95, NA) ## (min{slope}, b/c, eff, bp) , 'optimal' = 1.060158 , 'hampel' = c(1.5, 3.5, 8) * 0.9016085 ## a, b, r ) .Mpsi.tuning.default <- function(psi) { if(is.null(p <- .Mpsi.tuning.defaults[[psi]])) stop(gettextf("invalid 'psi'=%s; possibly use .regularize.Mpsi(%s)", psi, "psi, redescending=FALSE"), domain=NA) p } .Mchi.tuning.defaults <- list( ## Here, psi must be redescending! -> 'huber' not possible 'bisquare' = 1.54764 , 'welsh' = 0.5773502 , 'ggw' = c(-0.5, 1.5, NA, .50) ## (min{slope}, b , eff, bp) , 'lqq' = c(-0.5, 1.5, NA, .50) ## (min{slope}, b/c, eff, bp) , 'optimal' = 0.4047 , 'hampel' = c(1.5, 3.5, 8) * 0.2119163 ## a, b, r ) .Mchi.tuning.default <- function(psi) { if(is.null(p <- .Mchi.tuning.defaults[[psi]])) stop(gettextf("invalid 'psi'=%s; possibly use .regularize.Mpsi(%s)", psi, "psi"), domain=NA) p } lmrob.control <- function(setting, seed = NULL, nResample = 500, tuning.chi = NULL, bb = 0.5, tuning.psi = NULL, max.it = 50, groups = 5, n.group = 400, k.fast.s = 1L, best.r.s = 2L, k.max = 200L, maxit.scale = 200L, k.m_s = 20L, ## ^^^^^^^^^^^ had MAX_ITER_FIND_SCALE 200 in ../src/lmrob.c refine.tol= 1e-7, rel.tol = 1e-7, scale.tol = 1e-10, # new, was hardcoded to EPS_SCALE = 1e-10 in C code solve.tol = 1e-7, # hardcoded to TOL_INVERSE 1e-7 in ../src/lmrob.c zero.tol = 1e-10, # new, was hardcoded to EPS_SCALE = 1e-10 in C code trace.lev = 0, # both for init.est. lmrob.S() *and* lmrob.fit mts = 1000L, subsampling = c("nonsingular", "simple"), compute.rd = FALSE, method = 'MM', psi = 'bisquare', numpoints = 10L, cov = NULL, split.type = c("f", "fi", "fii"), fast.s.large.n = 2000, ## only for outlierStats() [2014]: eps.outlier = function(nobs) 0.1 / nobs, eps.x = function(maxx) .Machine$double.eps^(.75)*maxx, compute.outlier.stats = method, warn.limit.reject = 0.5, warn.limit.meanrw = 0.5, ...) { p.ok <- missing(psi) # if(p.ok) psi does not need regularization if (!missing(setting)) { if (setting %in% c('KS2011', 'KS2014')) { if (missing(method)) method <- 'SMDM' psi <- if(p.ok) 'lqq' else .regularize.Mpsi(psi) ; p.ok <- TRUE if (missing(max.it)) max.it <- 500L if (missing(k.max)) k.max <- 2000L if (missing(cov) || is.null(cov)) cov <- '.vcov.w' if (setting == 'KS2014') { if (missing(best.r.s)) best.r.s <- 20L if (missing(k.fast.s)) k.fast.s <- 2L if (missing(nResample)) nResample <- 1000L } } else { warning("Unknown setting '", setting, "'. Using defaults.") } } else { if(p.ok && grepl('D', method)) psi <- 'lqq' if (missing(cov) || is.null(cov)) cov <- if(method %in% c('SM', 'MM')) ".vcov.avar1" else ".vcov.w" } if(!p.ok) psi <- .regularize.Mpsi(psi) subsampling <- match.arg(subsampling) ## in ggw, lqq: if tuning.{psi|chi} are non-standard, calculate coefficients: compute.const <- (psi %in% c('ggw', 'lqq')) if(is.null(tuning.chi)) tuning.chi <- .Mchi.tuning.default(psi) else ## wd like to compute.const *always* -- but slightly changes KS2011/14 !! if(compute.const) tuning.chi <- .psi.const(tuning.chi, psi) if(is.null(tuning.psi)) tuning.psi <- .Mpsi.tuning.default(psi) else ## wd like to compute.const *always* -- but slightly changes KS2011/14 !! if(compute.const) tuning.psi <- .psi.const(tuning.psi, psi) `class<-`( c(list(setting = if (missing(setting)) NULL else setting, seed = as.integer(seed), nResample=nResample, psi=psi, tuning.chi=tuning.chi, bb=bb, tuning.psi=tuning.psi, max.it=max.it, groups=groups, n.group=n.group, best.r.s=best.r.s, k.fast.s=k.fast.s, k.max=k.max, maxit.scale=maxit.scale, k.m_s=k.m_s, refine.tol=refine.tol, rel.tol=rel.tol, scale.tol=scale.tol, solve.tol=solve.tol, zero.tol=zero.tol, trace.lev=trace.lev, mts=mts, subsampling=subsampling, compute.rd=compute.rd, method=method, numpoints=numpoints, cov=cov, split.type = match.arg(split.type), fast.s.large.n=fast.s.large.n, eps.outlier = eps.outlier, eps.x = eps.x, compute.outlier.stats = sub("^MM$", "SM", compute.outlier.stats), warn.limit.reject = warn.limit.reject, warn.limit.meanrw = warn.limit.meanrw), list(...)), "lmrobCtrl") } ## base within.list, used in ../NAMESPACE : ## S3method(within, lmrobCtrl, within.list) fails unless it is in *our* namespace: ## R bug fixed in svn rev 84463 - for R 4.4.0 within.list <- within.list print.lmrobCtrl <- function(x, ...) { cat("lmrob.control() --> \"lmrobCtrl\" object with", length(x),"components:\n") str(x, no.list=TRUE, ...) invisible(x) } ##' e.g. update(, maxit.scale = 400) update.lmrobCtrl <- function(object, ...) { stopifnot(is.list(object) ## all updating args must be named: , length(dNms <- ...names()) == ...length() ## all updating names must exist in lmrobCtrl object , dNms %in% names(object) ) dots <- list(...) if("setting" %in% dNms && !identical(object[["setting"]], dots[["setting"]])) stop("update(*, setting = ) is not allowed") do.psi <- (hPsi <- "psi" %in% dNms) && object[["psi"]] != (psi <- dots[["psi"]]) if("method" %in% dNms && object[["method"]] != (method <- dots[["method"]])) { ## new method --> possibly update psi *and* cov if(!do.psi && grepl('D', method)) { psi <- 'lqq' do.psi <- TRUE } do.cov <- any(ic <- dNms == "cov") && object[["cov"]] != (cov <- dots[["cov"]]) if (!do.cov || is.null(cov)) cov <- if(method %in% c('SM', 'MM')) ".vcov.avar1" else ".vcov.w" object[["cov"]] <- cov # and drop from "to do": dNms <- dNms[!ic] dots <- dots[!ic] } if(do.psi) { # new psi --> update compute.const <- (psi %in% c('ggw', 'lqq')) if(!("tuning.chi" %in% dNms)) { # update tuning.chi <- .Mchi.tuning.default(psi) if(compute.const) tuning.chi <- .psi.const(tuning.chi, psi) object[["tuning.chi"]] <- tuning.chi } if(!("tuning.psi" %in% dNms)) { tuning.psi <- .Mpsi.tuning.default(psi) if(compute.const) tuning.psi <- .psi.const(tuning.psi, psi) object[["tuning.psi"]] <- tuning.psi } object[["psi"]] <- psi # and possibly drop from "to do": if(hPsi) { dNms <- dNms[i <- dNms != "psi"] dots <- dots[i] } } object[dNms] <- dots object } ##' Modify a \code{\link{lmrob.control}} list to contain only parameters that ##' were actually used. Currently used for \code{\link{print}()}ing of lmrob ##' objects. ##' ##' @title Minimize lmrob control to non-redundant parts ##' @param cl a list, typically the 'control' component of a ##' \code{\link{lmrob}()} call, or the result of \code{\link{lmrob.control}()}. ##' @param n number of observations == nobs()) .. ##' @return list: the (typically) modified \code{cl} ##' @author Martin Maechler {from Manuel's original code} lmrob.control.minimal <- function(cl, nobs, oStats = TRUE) { if(!length(cl)) return(cl) shrtM <- sub("^(S|M-S).*", "\\1", cl$method) p.MS <- c("k.m_s", "split.type") p.Lrg.n <- c("groups", "n.group") p.fastS <- c(p.Lrg.n, "refine.tol", "best.r.s", "k.fast.s") ## outlierStats() parts: p.oStat <- c("eps.outlier", "eps.x", "compute.outlier.stats", "warn.limit.reject", "warn.limit.meanrw") if(!oStats) ## e.g., for lmrob.S() but *NOT* for lmrob(*, method="S") cl[p.oStat] <- NULL switch(shrtM, "S" = { # remove all M-S specific control pars cl[p.MS] <- NULL # if large_n is not used, remove corresp control pars if (nobs <= cl$fast.s.large.n) cl[p.Lrg.n] <- NULL }, "M-S" = # remove all fast S specific control pars cl[p.fastS] <- NULL, ## else: do not keep parameters used by initial ests. only cl[c("tuning.chi", "bb", "nResample", p.fastS, "k.max", p.MS, "mts", "subsampling")] <- NULL ) if (!grepl("D", meth <- cl$method)) { cl$numpoints <- NULL if(meth == 'SM') cl$method <- 'MM' } cl } lmrob.fit.MM <- function(x, y, control) ## defunct .Defunct("lmrob.fit(*, control) with control$method = 'SM'") ## .Deprecated() till robustbase 0.92-6 (2016-05-28) lmrob.fit <- function(x, y, control, init=NULL, mf=NULL) { if(!is.matrix(x)) x <- as.matrix(x) if(!missing(mf)) .Defunct("'mf' argument is now defunct") ## old notation: MM -> SM if (control$method == "MM") control$method <- "SM" ## Assumption: if(is.null(init)) method = "S..." else method = "..." ## --------- where "..." consists of letters {"M", "D"} est <- if (is.null(init)) { ## --- initial S estimator if ((M1 <- substr(control$method,1,1)) != 'S') { warning(gettextf("Initial estimator '%s' not supported; using S-estimator instead", M1), domain = NA) substr(control$method,1,1) <- 'S' } init <- lmrob.S(x, y, control = control) 'S' } else { stopifnot(is.list(init)) if (is.null(init$converged)) init$converged <- TRUE if (is.null(init$control)) { init$control <- control M <- init$control$method <- 'l' } else if(!length(M <- init$control$method) || !nzchar(M)) M <- "l" M } stopifnot(is.numeric(init$coef), length(init$coef) == ncol(x), is.numeric(init$scale), init$scale >= 0) if (est != 'S' && control$cov == '.vcov.avar1') { warning( ".vcov.avar1 can only be used when initial estimator is S; using .vcov.w instead") control$cov <- ".vcov.w" } trace.lev <- control$trace.lev if (init$converged) { ## --- loop through the other estimators; build up 'est' string method <- sub(paste0("^", est), '', control$method) if(trace.lev) { cat(sprintf("init converged (remaining method = \"%s\") -> coef=\n", method)) print(init$coef) } for (step in strsplit(method,'')[[1]]) { ## now we have either M or D steps est <- paste0(est, step) init <- switch(step, ## 'control' may differ from 'init$control' when both (init, control) are spec. ## D(AS)-Step D = lmrob..D..fit(init, x, control=control, method = init$control$method), ## M-Step M = lmrob..M..fit(x = x, y = y, obj = init, control=control, method = init$control$method), stop('only M and D are steps supported after "init" computation')) if(trace.lev) { cat(sprintf("step \"%s\" -> new coef=\n", step)); print(init$coef) } ## break if an estimator did not converge if (!init$converged) { warning(gettextf( "%s-step did NOT converge. Returning unconverged %s-estimate", step, est), domain = NA) break } } } else { if(trace.lev) { cat(sprintf("init *NOT* converged; init$scale = %g, init$coef:\n ", init$scale)) print(init$coef) } warning("initial estim. 'init' not converged -- will be return()ed basically unchanged") } ## << FIXME? qr(.) should be available from earlier if (is.null(init$qr)) init$qr <- qr(x * sqrt(init$rweights)) if (is.null(init$rank)) init$rank <- init$qr$rank control$method <- est ## ~= original 'method', but only with the steps executed. init$control <- control ## --- covariance estimate init$cov <- if (init$scale == 0) { ## exact fit matrix(0, ncol(x), ncol(x), dimnames=list(colnames(x), colnames(x))) } else if (!init$converged || is.null(x)) { NA } else { if (is.null(control$cov) || control$cov == "none") NA else { lf.cov <- if (!is.function(control$cov)) get(control$cov, mode='function') else control$cov lf.cov(init, x=x) } } df <- NROW(y) - init$rank ## sum(init$r?weights)-init$rank init$degree.freedom <- init$df.residual <- df init }## end{lmrob.fit} globalVariables("r", add=TRUE) ## below and in other lmrob.E() expressions .vcov.w <- function(obj, x=obj$x, complete = FALSE, # <- differing from vcov.lmrob()s default scale=obj$scale, cov.hubercorr=ctrl$cov.hubercorr, cov.dfcorr=ctrl$cov.dfcorr, cov.resid=ctrl$cov.resid, cov.corrfact=ctrl$cov.corrfact, cov.xwx=ctrl$cov.xwx) { ## set defaults ctrl <- obj$control if (is.null(cov.hubercorr)) cov.hubercorr <- !grepl('D', ctrl$method) else if (!is.logical(cov.hubercorr)) stop(':.vcov.w: cov.hubercorr must be logical (or NULL)') valid.corrfact <- c('tau', 'empirical', 'asympt', 'hybrid', 'tauold') if (is.null(cov.corrfact)) { cov.corrfact <- if (cov.hubercorr) 'empirical' else 'tau' } else if(length(cov.corrfact) != 1 || is.na(match(cov.corrfact, valid.corrfact))) stop(":.vcov.w: cov.corrfact must be one of ", pasteK(dQuote(valid.corrfact))) valid.dfcorr <- c("mean", "none", "mn.vc", "varc", "mn.df") ## old: cov.dfcorr = -1 | 0 | 1 | 2 | 3 if (is.null(cov.dfcorr)) { cov.dfcorr <- if (cov.hubercorr || cov.corrfact %in% c('tau', 'hybrid')) "mn.vc" else "mean" } else if(length(cov.dfcorr) != 1 || is.na(match(cov.dfcorr, valid.dfcorr))) stop(":.vcov.w: cov.dfcorr must be one of ", pasteK(dQuote(valid.dfcorr))) valid.cov.resid <- c('final', 'initial', 'trick') if (is.null(cov.resid)) cov.resid <- 'final' ## do warn only for *specified* cov.resid: else if (cov.resid == 'final' && (class(obj)[1] == 'lmrob.S')) warning("ignoring cov.resid == 'final' since est != final") else if (length(cov.resid) != 1L || is.na(match(cov.resid, valid.cov.resid))) stop("cov.resid must be one of ", pasteK(dQuote(valid.cov.resid))) if (is.null(cov.xwx)) cov.xwx <- TRUE # == _THE_ typical case: not part of 'obj$control' else if (!is.logical(cov.xwx)) stop(':.vcov.w: cov.xwx must be logical (or NULL)') if (is.null(x)) x <- model.matrix(obj) ## set psi and c.psi psi <- ctrl$psi if (is.null(psi)) stop('parameter psi is not defined') c.psi <- if (cov.resid == 'initial') ctrl$tuning.chi else if (ctrl$method %in% c('S', 'SD')) ctrl$tuning.chi else ctrl$tuning.psi if (!is.numeric(c.psi)) stop("parameter 'tuning.psi' is not numeric") ## MM: lmrob(..., method = "S") triggers this wrongly if (is.null(scale)) { warning(":.vcov.w: scale missing, using D scale") scale <- lmrob..D..fit(obj)$scale } n <- NROW(x) ## --- calculations: matrix part ## weighted xtx.inv matrix w <- if (cov.xwx) obj$rweights else rep(1,n) ## use qr-decomposition from lm.wfit (this already includes the robustness weights) ## update qr decomposition if it is missing or we don't want the robustness weights if (!is.qr(obj$qr) || !cov.xwx) obj$qr <- qr(x * sqrt(w)) p <- if (is.null(obj$rank)) obj$qr$rank else obj$rank cinv <- if(is.qr(obj$qr)) tryCatch(tcrossprod(solve(qr.R(obj$qr))), error = function(e)e) if(inherits(cinv, 'error')) cinv <- matrix(NA,p,p) ## --- calculation: correction factor if (cov.corrfact == 'asympt') { ## asympt correction factor if(cov.hubercorr) warning("option 'cov.hubercorr' is ignored for cov.corrfact = \"asympt\"") ## precalculated default values if applicable corrfact <- if (psi == 'ggw') { if ( isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.95, NA)))) 1.052619 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.95, NA)))) 1.0525888644 else if (isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.85, NA)))) 1.176479 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.85, NA)))) 1.176464 else lmrob.E(psi(r)^2, ctrl) / lmrob.E(r*psi(r), ctrl)^2 ## MK: using r*psi(r) instead of psi'(r) is much more accurate ## when using Gauss-Hermite quadrature (= default in lmrob.E()) ## but integrate(.) is more accurate } else if (isTRUE(all.equal(c.psi, .Mpsi.tuning.default(psi)))) { switch(psi, bisquare = 1.0526317574, welsh = 1.0526704649, optimal = 1.0526419204, hampel = 1.0526016980, lqq = 1.0526365291, stop(':.vcov.w: unsupported psi function')) } else lmrob.E(psi(r)^2, ctrl) / lmrob.E(r*psi(r), ctrl)^2 ## r*psi(r): see above varcorr <- 1 } else { ## empirical, approx or hybrid correction factor rstand <- if (cov.resid == 'initial') { ## if the last estimator was a D or T estimator ## then use obj$init$init otherwise use obj$init ## that way for SMD we use the S residuals (and S scale) ## and for SMDM we use the M residuals (and D scale) lobj <- if (grepl('[DT]$',ctrl$method)) obj$init$init else obj$init resid(lobj) / lobj$scale } else if (cov.resid == 'trick') { ## residuals are in fact from earlier estimator, use its scale to standardize them obj$init$resid / obj$init$scale } else obj$resid / scale tau <- if (cov.corrfact %in% c('tau', 'hybrid', 'tauold')) { ## added hybrid here if (!is.null(obj$tau)) obj$tau else if (!is.null(obj$init$tau)) obj$init$tau else stop("(tau / hybrid / tauold): tau not found in 'obj'") } else rep(1,n) rstand <- rstand / tau r.psi <- Mpsi(rstand, c.psi, psi) r.psipr <- Mpsi(rstand, c.psi, psi, deriv = 1) if (any(is.na(r.psipr))) warning(":.vcov.w: Caution. Some psi'() are NA") ## mpp = E[ psi'(.) ] mpp2 = mpp^2 mpp2 <- (mpp <- mean(r.psipr, na.rm=TRUE))^2 ## Huber's correction hcorr <- if (cov.hubercorr) { vpp <- sum((r.psipr - mpp)^2) / n # vpp := var[psi.prime] ## ~= var(r.psipr, na.rm=TRUE) ~= Var[ psi'( e_i / (sigma * tau_i) ) ] (1 + p/n * vpp/mpp2)^2 } else 1 ## sample size correction for var(r.psi^2) ## use tau if 'tau' correction factor, but only if it is available varcorr <- if (cov.corrfact == 'tau' && any(tau != 1)) 1 / mean(tau^2) else n / (n - p) ## changed from 1 / mean(tau) ## if hybrid: replace B^2 (= mpp2) by asymptotic value if (cov.corrfact == 'hybrid') { mpp2 <- if (psi == 'ggw') { if ( isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.95, NA)))) 0.7598857 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.95, NA)))) 0.6817983 else if (isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.85, NA)))) 0.4811596 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.85, NA)))) 0.411581 else lmrob.E(r*psi(r), ctrl)^2 ## more accurate than psi'(r) } else if (isTRUE(all.equal(c.psi, .Mpsi.tuning.default(psi)))) switch(psi, bisquare = 0.5742327, welsh = 0.5445068, optimal = 0.8598825, hampel = 0.6775217, lqq = 0.6883393, stop(':.vcov.w: unsupported psi for "hybrid" correction factor')) else lmrob.E(r*psi(r), ctrl)^2 ## more accurate than psi'(r) } ## A / B^2 * Huber.corr : corrfact <- mean({ if (cov.corrfact == 'tauold') 1 else tau^2 } * r.psi^2)/mpp2 * hcorr } ## simple sample size correction sscorr <- switch(cov.dfcorr, # old: codes in {-1, 0, 1,2,3 } "mean" = mean(w), # -1 "mn.vc" = mean(w) * varcorr, # 1 "none" = 1, # 0 "varc" = varcorr, # 2 "mn.df" = mean(w)^2 / (1 - p / sum(w)), # 3 stop("invalid 'cov.dfcorr': ", cov.dfcorr)) structure(scale^2 * sscorr * corrfact * .vcov.aliased(aliased = is.na(coef(obj)), vc=cinv, complete=complete), ## scale^2 * a/b2 * Huber's correction * Cinv -- with attributes weights = w, scale = scale, scorr = sscorr, corrfact = corrfact) }## end{.vcov.w} .vcov.avar1 <- function(obj, x=obj$x, complete = FALSE, # <- differing from vcov.lmrob()s default posdef.meth = c("posdefify", "orig")) { ## was .vcov.MM stopifnot(is.list(ctrl <- obj$control)) ## works only for MM & SM estimates: if (!is.null(ctrl$method) && !ctrl$method %in% c('SM', 'MM')) stop('.vcov.avar1() supports only SM or MM estimates') ## set psi and chi constants psi <- chi <- ctrl$psi if (is.null(psi)) stop('parameter psi is not defined') stopifnot(is.numeric(c.chi <- ctrl$tuning.chi), is.numeric(c.psi <- ctrl$tuning.psi)) ## need (r0, r, scale, x, c.psi,c.chi, bb) r0 <- obj$init$resid r <- resid(obj) scale <- obj$scale if (is.null(x)) x <- model.matrix(obj) bb <- 1/2 ## this is always 1/2 for S estimates by convention ### --- start code from .vcov.MM --- ## scaled residuals n <- length(r) stopifnot(is.matrix(x), n == nrow(x)) if(n != length(r0)) stop("initial estimate residuals length differs from final ones. Typically must refit w/ lmrob()") r.s <- r / scale # final scaled residuals r0.s <- r0 / scale # initial scaled residuals w <- Mpsi(r.s, cc = c.psi, psi = psi, deriv = 1) w0 <- Mchi(r0.s, cc = c.chi, psi = chi, deriv = 1) p <- ncol(x) # possibly p > rankMatrix(x) in singular/aliased case ## 'complete' handling for singular/aliased case if(is.na(complete)) { ## previous default: work with full rank-deficient 'x' } else { aliased <- is.na(coef(obj)) if(any(aliased)) x <- x[, !aliased] if(isTRUE(complete)) { ## nothing } else { ## isFALSE(complete) : p <- obj$rank } } ## FIXME for multivariate y : x.wx <- crossprod(x, x * w) if(inherits(A <- tryCatch(solve(x.wx) * scale, error=function(e)e), "error")) { warning("X'WX is almost singular. Consider using cov = \".vcov.w\"") A <- tryCatch(solve(x.wx, tol = 0) * scale, error=function(e)e) if(inherits(A, "error")) stop("X'WX is singular. Rather use cov = \".vcov.w\"") } a <- A %*% (crossprod(x, w * r.s) / mean(w0 * r0.s)) w <- Mpsi( r.s, cc = c.psi, psi = psi) ## 3) now the standard part (w, x, r0.s, n, A,a, c.chi, bb) w0 <- Mchi(r0.s, cc = c.chi, psi = chi) # rho() Xww <- crossprod(x, w * w0) u1 <- A %*% crossprod(x, x * w^2) %*% (n * A) u2 <- a %*% crossprod(Xww, A) u3 <- A %*% tcrossprod(Xww, a) u4 <- mean(w0^2 - bb^2) * tcrossprod(a) ## list(cov = matrix((u1 - u2 - u3 + u4)/n, p, p), ## wt = w / r.s, a = a) ### --- end code from .vcov.MM --- ret <- (u1 - u2 - u3 + u4)/n ## this might not be a positive definite matrix ## check eigenvalues (symmetric: ensure non-complex) ev <- eigen(ret, symmetric = TRUE) if (any(neg.ev <- ev$values < 0)) { ## there's a problem posdef.meth <- match.arg(posdef.meth) if(ctrl$trace.lev) message("fixing ", sum(neg.ev), " negative eigen([",p,"])values") Q <- ev$vectors switch(posdef.meth, "orig" = { ## remove negative eigenvalue: ## transform covariance matrix into eigenbasis levinv <- solve(Q) cov.eb <- levinv %*% ret %*% Q ## set vectors corresponding to negative ev to zero cov.eb[, neg.ev] <- 0 ## cov.eb[cov.eb < 1e-16] <- 0 ## and transform back ret <- Q %*% cov.eb %*% levinv }, "posdefify" = { ## Instead of using require("sfsmisc") and ## ret <- posdefify(ret, "someEVadd",eigen.m = ev,eps.ev = 0) lam <- ev$values lam[neg.ev] <- 0 o.diag <- diag(ret)# original one - for rescaling dn <- dimnames(ret)# to preserve ret <- Q %*% (lam * t(Q)) ## == Q %*% diag(lam) %*% t(Q) ## rescale to the original diagonal values ## D <- sqrt(o.diag/diag(ret)) where they are >= 0 : if(any(o.diag < 0)) warning(".vcov.avar1: negative diag() fixed up; consider 'cov=\".vcov.w.\"' instead") D <- sqrt(pmax.int(0, o.diag)/diag(ret)) ret <- D * ret * rep(D, each = nrow(Q)) ## == diag(D) %*% ret %*% diag(D) if(!is.null(dn)) dimnames(ret) <- dn }, stop("invalid 'posdef.meth': ", posdef.meth)) } if(isTRUE(complete)) ret <- .vcov.aliased(aliased, ret) attr(ret,"weights") <- w / r.s if(!any(neg.ev)) attr(ret,"eigen") <- ev ret }## end{.vcov.avar1} lmrob..M..fit <- function (x = obj$x, y = obj$y, beta.initial = obj$coef, scale = obj$scale, control = obj$control, obj, mf, method = obj$control$method) #<- also when 'control' is not obj$control { c.psi <- .psi.conv.cc(control$psi, control$tuning.psi) ipsi <- .psi2ipsi(control$psi) stopifnot(is.matrix(x)) if(!missing(mf)) .Defunct("'mf' argument is now defunct") n <- nrow(x) p <- ncol(x) if (is.null(y) && !is.null(obj$model)) y <- model.response(obj$model, "numeric") stopifnot(length(y) == n, length(c.psi) > 0, c.psi >= 0, scale >= 0, length(beta.initial) == p) trace.lev <- as.integer(control$trace.lev) ret <- .C(R_lmrob_MM, x = as.double(x), y = as.double(y), n = as.integer(n), p = as.integer(p), beta.initial = as.double(beta.initial), scale = as.double(scale), coefficients = double(p), residuals = double(n), iter = as.integer(control$max.it), c.psi = as.double(c.psi), ipsi = as.integer(ipsi), loss = double(1), rel.tol = as.double(control$rel.tol), converged = logical(1) , trace.lev = trace.lev )[c("coefficients", "scale", "residuals", "loss", "converged", "iter")] ## FIXME?: Should rather warn *here* in case of non-convergence ret$fitted.values <- drop(x %*% ret$coefficients) names(ret$coefficients) <- colnames(x) names(ret$residuals) <- rownames(x) ret$rweights <- lmrob.rweights(ret$residuals, scale, control$tuning.psi, control$psi) ret$control <- control if (!missing(obj)) { ## "copy" from 'obj' to the return value 'ret' : if(trace.lev) cat("lmrob..MM..fit(*, obj) --> updating .. ") if (!grepl('M$', method)) { ## update method if it's not there already method <- paste0(method, 'M') } if (!is.null(obj$call)) { ret$call <- obj$call ret$call$method <- method } if (method %in% c('SM', 'MM')) { ret$init.S <- obj } else { ret$init <- obj[intersect(names(obj), c("coefficients", "scale", "residuals", "loss", "converged", "iter", "rweights", "fitted.values", "control", "ostats", "init.S", "init", "kappa", "tau"))] class(ret$init) <- 'lmrob' ret <- c(ret, obj[intersect(names(obj), c("df.residual", "degree.freedom", "xlevels", "terms", "model", "x", "y", "na.action", "contrasts", "MD"))]) } ret$qr <- qr(x * sqrt(ret$rweights)) ret$rank <- ret$qr$rank if(trace.lev) cat(" qr(x * rweights) -> rank=", ret$rank) ## if there is a covariance matrix estimate available in obj ## update it, if possible, else replace it by the default .vcov.w if (!is.null(obj$cov)) { if (!method %in% c('SM', 'MM') && ret$control$cov == '.vcov.avar1') ret$control$cov <- '.vcov.w' lf.cov <- if (!is.function(ret$control$cov)) get(ret$control$cov, mode='function') else ret$control$cov if(trace.lev) cat(", cov() matrix ") ret$cov <- lf.cov(ret, x=x) } if (!is.null(obj$assign)) ret$assign <- obj$assign if (method %in% control$compute.outlier.stats) { ## only true for last step in lmrob.fit() if(trace.lev) cat(", outlierStats() ") ret$ostats <- outlierStats(ret, x, control) } if(trace.lev) cat("\n") } class(ret) <- "lmrob" ret }## --- lmrob..M..fit ##' Compute S-estimator for linear model -- using "fast S" algorithm --> ../man/lmrob.S.Rd lmrob.S <- function (x, y, control, trace.lev = control$trace.lev, only.scale = FALSE, mf) { if (!is.matrix(x)) x <- as.matrix(x) n <- nrow(x) p <- ncol(x) if(!missing(mf)) .Defunct("'mf' argument is now defunct") nResample <- if(only.scale) 0L else as.integer(control$nResample) groups <- as.integer(control$groups) nGr <- as.integer(control$n.group) large_n <- (n > control$fast.s.large.n) if (large_n) { if (nGr <= p) stop("'control$n.group' must be larger than 'p' for 'large_n' algorithm") if (nGr * groups > n) # {NB: integer overflow here *also* signals error} stop("'groups * n.group' must be smaller than 'n' for 'large_n' algorithm") if (nGr <= p + 10) ## FIXME (be smarter ..) warning("'control$n.group' is not much larger than 'p', probably too small") } if (length(seed <- control$seed) > 0) { # not by default if(length(seed) < 3L || seed[1L] < 100L) stop("invalid 'seed'. Must be a valid .Random.seed !") if(!is.null(seed.keep <- get0(".Random.seed", envir = .GlobalEnv, inherits = FALSE))) on.exit(assign(".Random.seed", seed.keep, envir = .GlobalEnv)) assign(".Random.seed", seed, envir = .GlobalEnv) if(trace.lev) { cat("Assigning .Random.seed to .GlobalEnv: "); str(seed) stopifnot(identical(seed, globalenv()$.Random.seed)) } } bb <- as.double(control$bb) c.chi <- .psi.conv.cc(control$psi, control$tuning.chi) best.r <- as.integer(control$best.r.s) stopifnot(length(c.chi) > 0, c.chi >= 0, length(bb) > 0, length(best.r) > 0, best.r >= 1, length(y) == n, n > 0) b <- .C(R_lmrob_S, # --> ../src/lmrob.c x = as.double(x), y = as.double(y), n = as.integer(n), p = as.integer(p), nResample = nResample, scale = if(only.scale) mad(y, center=0) # initial scale else double(1), coefficients = double(p), as.double(c.chi), .psi2ipsi(control$psi), bb, best_r = best.r, groups = groups, n.group = nGr, k.fast.s = as.integer(control$k.fast.s), k.iter = as.integer(control$k.max), maxit.scale = as.integer(control$maxit.scale), refine.tol= as.double(control$refine.tol), inv.tol = as.double(control$solve.tol), scale.tol = as.double(control$scale.tol), zero.tol = as.double(control$zero.tol), converged = logical(1), trace.lev = as.integer(trace.lev), mts = as.integer(control$mts), ss = .convSs(control$subsampling), fast.s.large.n = as.integer(if (large_n) control$fast.s.large.n else n+1L) ## avoids the use of NAOK = TRUE for control$fast.s.large.n == Inf )[if(only.scale) "scale" else c("y", # the residuals (on return) "coefficients", "scale", "k.iter", "converged")] scale <- b$scale if (scale < 0) stop("C function R_lmrob_S() exited prematurely") if (scale == 0) warning("S-estimated scale == 0: Probably exact fit; check your data") if(trace.lev) if(only.scale) cat(sprintf("lmrob.S(): scale = %g\n", scale)) else { cat(sprintf("lmrob.S(): scale = %g; coeff.=\n", scale)); print(b$coefficients) } if(only.scale) return(scale) ## --- ----- b$residuals <- setNames(b$y, rownames(x)) b$fitted.values <- y - b$y # y = fitted + res b$y <- NULL # rm'it names(b$coefficients) <- colnames(x) ## robustness weights b$rweights <- lmrob.rweights(b$residuals, scale, control$tuning.chi, control$psi) ## set method argument in control control$method <- 'S' b$control <- control ## add call if called from toplevel if (identical(parent.frame(), .GlobalEnv)) b$call <- match.call() class(b) <- 'lmrob.S' if ("S" %in% control$compute.outlier.stats)# not by default b$ostats <- outlierStats(b, x, control) b }## --- lmrob.S() lmrob..D..fit <- function(obj, x=obj$x, control = obj$control, mf, method = obj$control$method) #<- also when 'control' is not obj$control { if (is.null(control)) stop('lmrob..D..fit: control is missing') if (!obj$converged) stop('lmrob..D..fit: prior estimator did not converge, stopping') if(!missing(mf)) .Defunct("'mf' argument is now defunct") if (is.null(x)) x <- model.matrix(obj) w <- obj$rweights if (is.null(w)) stop('lmrob..D..fit: robustness weights undefined') if (is.null(obj$residuals)) stop('lmrob..D..fit: residuals undefined') r <- obj$residuals psi <- control$psi if (is.null(psi)) stop('lmrob..D..fit: parameter psi is not defined') c.psi <- .psi.conv.cc(psi, if (method %in% c('S', 'SD')) control$tuning.chi else control$tuning.psi) if (!is.numeric(c.psi)) stop('lmrob..D..fit: parameter tuning.psi is not numeric') obj$init <- obj[names(obj)[na.omit(match( c("coefficients","scale", "residuals", "loss", "converged", "iter", "ostats", "rweights", "fitted.values", "control", "init.S", "init"), names(obj)))]] obj$init.S <- NULL if (is.null(obj$kappa)) obj$kappa <- lmrob.kappa(obj, control) kappa <- obj$kappa if (is.null(obj$tau)) obj$tau <- lmrob.tau(obj, x, control) tau <- obj$tau ## get starting value for root search (to keep breakdown point!) scale.1 <- sqrt(sum(w * r^2) / kappa / sum(tau^2*w)) ret <- .C(R_find_D_scale, r = as.double(r), kappa = as.double(kappa), tau = as.double(tau), length = as.integer(length(r)), scale = as.double(scale.1), c = as.double(c.psi), ipsi = .psi2ipsi(psi), type = 3L, ## dt1 as only remaining option rel.tol = as.double(control$rel.tol), k.max = as.integer(control$k.max), converged = logical(1))[c("converged", "scale")] obj$scale <- if(ret$converged) ret$scale else NA obj$converged <- ret$converged if (!grepl('D$', method)) { ## append "D" to method if it's not there already method <- method if (method == 'MM') method <- 'SM' method <- paste0(method, 'D') } ## update call if (!is.null(obj$call)) obj$call$method <- method obj$control <- control class(obj) <- "lmrob" ## if there is a covariance matrix estimate available in obj ## update it, if possible, else replace it by the default ## .vcov.w if (!is.null(obj$cov)) { if (control$cov == '.vcov.avar1') control$cov <- '.vcov.w' lf.cov <- if (!is.function(control$cov)) get(control$cov, mode='function') else control$cov obj$cov <- lf.cov(obj, x=x) } if (method %in% control$compute.outlier.stats) obj$ostats <- outlierStats(obj, x, control) obj }## --- lmrob..D..fit globalVariables(c("psi", "wgt", "r"), add=TRUE) ## <- lmrob.E( ) lmrob.kappa <- function(obj, control = obj$control) { if (is.null(control)) stop('control is missing') if (control$method %in% c('S', 'SD')) control$tuning.psi <- control$tuning.chi fun.min <- function(kappa) lmrob.E(psi(r)*r - kappa*wgt(r), control = control) uniroot(fun.min, c(0.1, 1))$root } ## "FIXME" How to get \hat{tau} for a simple *M* estimate here ?? ## lmrob.tau() is used in lmrob..D..fit() lmrob.tau <- function(obj, x=obj$x, control = obj$control, h, fast = TRUE) { if(is.null(control)) stop("'control' is missing") if(missing(h)) h <- if (is.null(obj$qr)) .lmrob.hat(x, obj$rweights) else .lmrob.hat(wqr = obj$qr) ## speed up: use approximation if possible if (fast && !control$method %in% c('S', 'SD')) { c.psi <- control$tuning.psi tfact <- tcorr <- NA switch(control$psi, optimal = if (isTRUE(all.equal(c.psi, 1.060158))) { tfact <- 0.94735878 tcorr <- -0.09444537 }, bisquare = if (isTRUE(all.equal(c.psi, 4.685061))) { tfact <- 0.9473684 tcorr <- -0.0900833 }, welsh = if (isTRUE(all.equal(c.psi, 2.11))) { tfact <- 0.94732953 tcorr <- -0.07569506 }, ggw = if (isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.95, NA)))) { tfact <- 0.9473787 tcorr <- -0.1143846 } else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.95, NA)))) { tfact <- 0.94741036 tcorr <- -0.08424648 }, lqq = if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.95, NA)))) { tfact <- 0.94736359 tcorr <- -0.08594805 }, hampel = if (isTRUE(all.equal(c.psi, c(1.35241275, 3.15562975, 7.212868)))) { tfact <- 0.94739770 tcorr <- -0.04103958 }, {}) if (!is.na(tfact)) return(sqrt(1 - tfact*h) * (tcorr*h + 1)) } ## else "non-fast" -- need to compute the integrals : ## kappa kappa <- if(is.null(obj$kappa)) lmrob.kappa(obj, control) else obj$kappa ## local variables ## n <- length(h) ## set psi and cpsi psi <- control$psi if (is.null(psi)) stop('parameter psi is not defined') cpsi <- if (control$method %in% c('S', 'SD')) control$tuning.chi else control$tuning.psi cpsi <- .psi.conv.cc(psi, cpsi)# has its test ipsi <- .psi2ipsi(psi) ## constant for stderr of u_{-i} part and other constants inta <- function(r) .Mpsi(r, cpsi, ipsi)^2 * dnorm(r) intb <- function(r) .Mpsi(r, cpsi, ipsi, deriv = 1) * dnorm(r) ## intc <- function(r) .Mpsi(r, cpsi, ipsi) * r * dnorm(r) # changed from psi/e to psi*e ta <- integrate(inta, -Inf,Inf)$value tb <- integrate(intb, -Inf,Inf)$value ## tE <- integrate(intc, -Inf,Inf)$value ## calculate tau for unique h hu <- unique(h) nu <- length(hu) ## Initialize tau vector tau <- numeric(length=nu) tc <- ta/tb^2 ## --- Gauss-Hermite integration gh <- ghq(control$numpoints) ghz <- gh$nodes ghw <- gh$weights ## Calulate each tau_i for (i in 1:nu) { ## stderr of u_{-i} part s <- sqrt(tc*(hu[i]-hu[i]^2)) tc2 <- hu[i]/tb ## function to be integrated fun <- function(w, v, sigma.i) { t <- (v - tc2*.Mpsi(v, cpsi, ipsi) + w*s)/sigma.i psi.t <- .Mpsi(t, cpsi, ipsi) (psi.t*t - kappa*psi.t/t) * dnorm(v)*dnorm(w) } ## integrate over w wint <- function(v, sigma.i) { ## sapply(v,function(v.j) integrate(fun,-Inf,Inf,v.j,sigma.i)$value) sapply(v, function(v.j) sum(fun(ghz, v.j, sigma.i)*ghw)) } ## integrate over v vint <- function(sigma.i) { ## integrate(wint,-Inf,Inf,sigma.i)$value sum(wint(ghz, sigma.i)*ghw) } ## find tau tau[i] <- uniroot(vint, c(if (hu[i] < 0.9) 3/20 else 1/16, 1.1))$root } tau[match(h, hu)] } lmrob.tau.fast.coefs <- function(cc, psi) { ## function that calculates the coefficients for 'fast' mode of lmrob.tau ctrl <- lmrob.control(tuning.psi = cc, psi = psi) levs <- seq(0, 0.8, length.out = 80) ## calculate taus taus <- lmrob.tau(list(), control=ctrl, h=levs, fast=FALSE) ## calculate asymptotic approximation of taus ta <- lmrob.E(psi(r)^2, ctrl, use.integrate = TRUE) tb <- lmrob.E(psi(r, 1), ctrl, use.integrate = TRUE) tfact <- 2 - ta/tb^2 taus.0 <- sqrt(1 - tfact * levs) ## calculate correction factor ### FIXME: use lmrob.fit() directly : tcorr <- coef(lmrob(taus / taus.0 - 1 ~ levs - 1)) c(tfact = tfact, tcorr = tcorr) } lmrob.hatmatrix <- function(x, w = rep(1, NROW(x)), wqr = qr(sqrt(w) * x), names = FALSE) { H <- tcrossprod(qr.qy(wqr, diag(1, NROW(x), x$rank))) if(names && !is.null(rnms <- dimnames(wqr$qr)[[1L]])) dimnames(H) <- list(rnms,rnms) H } .lmrob.hat <- function(x, w = rep(1, NROW(x)), wqr = qr(sqrt(w) * x), names = TRUE) { if (missing(wqr) && !is.matrix(x)) x <- as.matrix(x) ## Faster than computing the whole hat matrix, and then diag(.) : ## == diag(lmrob.hatmatrix(x, w, ...)) h <- pmin(1, rowSums(qr.qy(wqr, diag(1, NROW(wqr$qr), wqr$rank))^2)) if(names && !is.null(rnms <- dimnames(wqr$qr)[[1L]])) names(h) <- rnms h } hatvalues.lmrob <- function(model, ...) { if (is.null(wqr <- model$qr)) .lmrob.hat(model$x, model$rweights) else .lmrob.hat(wqr = wqr) } ##' psi |--> ipsi \in \{0,1,...6} : integer codes used in C .psi2ipsi <- function(psi) { psi <- .regularize.Mpsi(psi, redescending=FALSE) i <- match(psi, c( 'huber', 'bisquare', 'welsh', 'optimal', ## 0 1 2 3 'hampel', 'ggw', 'lqq' ## 4 5 6 )) if(is.na(i)) stop("internal logic error in psi() function name: ", psi, " Please report!") i - 1L } ##' Given psi() fn (as string), possibly convert the tuning-constant vector cc ##' such that it "fits" to psi(). ##' ##' @param psi a string such as \code{"lqq"}. ##' @param cc numeric tuning-constant vector, for "ggw" and "lqq", ideally ##' with an \code{\link{attr}}ibute \code{"constants"} as from ##' \code{\link{lmrob.control}(.)$tuning.psi} or from ##' \code{\link{.psi.const}(psi, *)}. .psi.conv.cc <- function(psi, cc) { if (!is.character(psi) || length(psi) != 1) stop("argument 'psi' must be a string (denoting a psi function)") if(!is.numeric(cc)) stop("tuning constant 'cc' is not numeric") ## "FIXME": For (ggw, lqq) this is much related to .psi.const() below switch(tolower(psi), 'ggw' = { ## Input: 4 parameters, (minimal slope, b, efficiency, breakdown point) _or_ c(0, a,b,c, m.rho) ## Output 'k': either k in {1:6} or k = c(0, k[2:5]) ## prespecified 6 cases all treated in C ( ../src/lmrob.c ) via these codes: if ( isTRUE(all.equal(cc, c(-.5, 1 , 0.95, NA)))) return(1) else if (isTRUE(all.equal(cc, c(-.5, 1 , 0.85, NA)))) return(2) else if (isTRUE(all.equal(cc, c(-.5, 1. , NA, 0.5)))) return(3) else if (isTRUE(all.equal(cc, c(-.5, 1.5, 0.95, NA)))) return(4) else if (isTRUE(all.equal(cc, c(-.5, 1.5, 0.85, NA)))) return(5) else if (isTRUE(all.equal(cc, c(-.5, 1.5, NA, 0.5)))) return(6) else if (length(cc) == 5 && cc[1] == 0 || (length(cc <- attr(cc, 'constants')) == 5 && cc[1] == 0)) return(cc) else stop('Coefficients for ',psi,' function incorrectly specified.\n', 'Use c(minimal slope, b, efficiency, breakdown point) [6 hard-coded special cases]\n', ' or c(0, a,b,c, max_rho) as from .psi.const(',psi,', cc).') }, 'lqq' = { ## Input: 4 parameters, (minimal slope, b/c, efficiency, breakdown point) _or_ (b, c, s) [length 3] ## Output: k[1:3] = (b, c, s) if (isTRUE(all.equal(cc, c(-.5, 1.5, 0.95, NA)))) return(c(1.4734061, 0.9822707, 1.5)) else if (isTRUE(all.equal(cc, c(-.5, 1.5, NA, 0.5)))) return(c(0.4015457, 0.2676971, 1.5)) else if (length(cc) == 3 || length(cc <- attr(cc, 'constants')) == 3) return(cc) else stop('Coefficients for ',psi,' function incorrectly specified.\n', 'Use c(minimal slope, b, efficiency, breakdown point) [2 special cases]\n', ' or c(b, c, s) as from .psi.const(',psi,', cc).') }, 'hampel' = { ## just check length of coefficients if (length(cc) != 3) stop('Coef. for Hampel psi function not of length 3') }, { ## otherwise: should have length 1 if (length(cc) != 1) stop('Coef. for psi function ', psi,' not of length 1') }) return(cc) } ##' @title For GGW's psi(), find x with minimal slope, and the min.slope ##' @param a "scale" of GGW's psi ##' @param b exponent of GGW's psi ##' @param c "huber-cutoff" of GGW's psi ##' @param ... further arguments passed to optimize(), notably 'tol' ##' @return the return value of optimize(): list(minimum, objective) ##' @author Manuel Kohler and Martin Maechler .psi.ggw.mxs <- function(a, b, c, tol = .Machine$double.eps^0.25) { ipsi <- .psi2ipsi('ggw') ccc <- c(0, a, b, c, 1) ## == .psi.conv.cc('ggw', cc=c(0, a, b, c, 1)) optimize(.Mpsi, c(c, max(a+b+2*c, 0.5)), ccc=ccc, ipsi=ipsi, deriv = 1, tol = tol) } .psi.ggw.ms <- function(a, b, c, tol = .Machine$double.eps^0.25) ## find minimal slope .psi.ggw.mxs(a, b, c, tol=tol)[["objective"]] .psi.ggw.finda <- function(ms, b, c, tol = .Machine$double.eps^0.25, maxiter = 1000, ms.tol = tol / 64,...) ## find constant 'a' (reparametrized to 1/o scale). { val <- uniroot(function(a) .psi.ggw.ms(1/a, b, c, tol=ms.tol) - ms, c(200, if (b > 1.4) 1/400 else if (b > 1.3) 1/50 else 1/20), tol=tol, maxiter=maxiter) 1/val$root } .psi.ggw.eff <- function(a, b, c) ## calculate asymptotic efficiency { ipsi <- .psi2ipsi('ggw') ccc <- c(0, a, b, c, 1) lmrob.E(.Mpsi(r, ccc, ipsi, deriv=1), use.integrate = TRUE)^2 / lmrob.E(.Mpsi(r, ccc, ipsi) ^2, use.integrate = TRUE) } .psi.ggw.bp <- function(a, b, c, ...) { ## calculate kappa ipsi <- .psi2ipsi('ggw') abc <- c(0, a, b, c) nc <- integrate(.Mpsi, 0, Inf, ccc = c(abc, 1), ipsi=ipsi, ...)$value lmrob.E(.Mchi(r, ccc = c(abc, nc), ipsi), use.integrate = TRUE) } .psi.ggw.findc <- function(ms, b, eff = NA, bp = NA, subdivisions = 100L, rel.tol = .Machine$double.eps^0.25, abs.tol = rel.tol, tol = .Machine$double.eps^0.25, ms.tol = tol/64, maxiter = 1000) { ## find c by eff for bp c. <- if (!is.na(eff)) { if (!is.na(bp)) warning('tuning constants for ggw psi: both eff and bp specified, ignoring bp') ## find c by matching eff tryCatch(uniroot(function(x) .psi.ggw.eff(.psi.ggw.finda(ms, b, x, ms.tol=ms.tol), b, x) - eff, c(0.15, if (b > 1.61) 1.4 else 1.9), tol=tol, maxiter=maxiter)$root, error=function(e)e) } else { if (is.na(bp)) stop("neither breakdown point 'bp' nor efficiency 'eff' specified") ## find c by matching bp tryCatch(uniroot(function(x) .psi.ggw.bp(.psi.ggw.finda(ms, b, x, ms.tol=ms.tol), b, x) - bp, c(0.08, if (ms < -0.4) 0.6 else 0.4), tol=tol, maxiter=maxiter)$root, error=function(e)e) } if (inherits(c., 'error')) stop(gettextf('unable to find constants for "ggw" psi function: %s', c.$message), domain=NA) a <- .psi.ggw.finda(ms, b, c., ms.tol=ms.tol) nc <- integrate(.Mpsi, 0, Inf, ccc= c(0, a, b, c., 1), ipsi = .psi2ipsi('ggw'))$value ## return c(0, a, b, c., nc) } lmrob.efficiency <- function(psi, cc, ccc = .psi.conv.cc(psi, cc=cc), ...) { ipsi <- .psi2ipsi(psi) integrate(function(x) .Mpsi(x, ccc=ccc, ipsi=ipsi, deriv=1)*dnorm(x), -Inf, Inf, ...)$value^2 / integrate(function(x) .Mpsi(x, ccc=ccc, ipsi=ipsi)^2 *dnorm(x), -Inf, Inf, ...)$value } lmrob.bp <- function(psi, cc, ccc = .psi.conv.cc(psi, cc=cc), ...) { ipsi <- .psi2ipsi(psi) integrate(function(x) .Mchi(x, ccc=ccc, ipsi=ipsi)*dnorm(x), -Inf, Inf, ...)$value } ##' @title Find tuning constant 'c' for "lqq" psi function ---> ../man/psiFindc.Rd ##' @param cc numeric vector = c(min_slope, b/c, eff, bp) ; ##' typically 'eff' or 'bp' are NA and will be computed ##' .... ##' @return constants for c function: (b, c, s) == (b/c * c, c, s = 1 - min_slope) .psi.lqq.findc <- function(ms, b.c, eff = NA, bp = NA, interval = c(0.1, 4), subdivisions = 100L, rel.tol = .Machine$double.eps^0.25, abs.tol = rel.tol, tol = .Machine$double.eps^0.25, maxiter = 1000) { ## b.c == b/c bcs <- function(cc) c(b.c*cc, cc, 1-ms) t.fun <- if (!is.na(eff)) { ## 'eff' specified if (!is.na(bp)) warning("tuning constants for \"lqq\" psi: both 'eff' and 'bp' specified, ignoring 'bp'") ## find c by b, s and eff function(c) lmrob.efficiency('lqq', bcs(c), subdivisions=subdivisions, rel.tol=rel.tol, abs.tol=abs.tol) - eff } else { if (is.na(bp)) stop('Error: neither breakdown point nor efficiency specified') ## breakdown point 'bp' specified function(c) lmrob.bp('lqq', bcs(c), subdivisions=subdivisions, rel.tol=rel.tol, abs.tol=abs.tol) - bp } c. <- tryCatch(uniroot(t.fun, interval=interval, tol=tol, maxiter=maxiter)$root, error=function(e)e) if (inherits(c., 'error')) stop(gettextf('unable to find constants for "lqq" psi function: %s', c.$message), domain=NA) else bcs(c.) } ##' For ("ggw", "lqq"), if cc is not one of the predefined ones, ##' compute the tuning constants numerically, from the given specs (eff / bp). ##' Much related to .psi.conv.cc() above .psi.const <- function(cc, psi) { switch(psi, "ggw" = { ## only calculate for non-standard coefficients if (isTRUE(all.equal(cc, c(-.5, 1, 0.95, NA))) || isTRUE(all.equal(cc, c(-.5, 1, 0.85, NA))) || isTRUE(all.equal(cc, c(-.5, 1, NA, 0.5))) || isTRUE(all.equal(cc, c(-.5, 1.5, 0.95, NA))) || isTRUE(all.equal(cc, c(-.5, 1.5, 0.85, NA))) || isTRUE(all.equal(cc, c(-.5, 1.5, NA, 0.5)))) { ## treated with in C code: in ../src/lmrob.c, functions *_ggw() } else attr(cc, 'constants') <- .psi.ggw.findc(ms=cc[[1]], b=cc[[2]], eff=cc[[3]], bp=cc[[4]]) }, "lqq" = { ## use pre-computed values for (the two) "standard" coefficients: attr(cc, 'constants') <- ## b.c :== b/c if (isTRUE(all.equal(cc, c(-.5, 1.5, 0.95, NA)))) c(1.4734061, 0.9822707, 1.5) # as in .psi.conv.cc() {FIXME? only in 1 place} else if (isTRUE(all.equal(cc, c(-.5, 1.5, NA, 0.5)))) c(0.4015457, 0.2676971, 1.5) else .psi.lqq.findc(ms=cc[[1]], b.c=cc[[2]], eff=cc[[3]], bp=cc[[4]]) }, stop("method for psi function ", psi, " not implemented")) cc } Mpsi <- function(x, cc, psi, deriv=0) { x[] <- .Call(R_psifun, x, .psi.conv.cc(psi, cc), .psi2ipsi(psi), deriv) x } .Mpsi <- function(x, ccc, ipsi, deriv=0) .Call(R_psifun, x, ccc, ipsi, deriv) Mchi <- function(x, cc, psi, deriv=0) { x[] <- .Call(R_chifun, x, .psi.conv.cc(psi, cc), .psi2ipsi(psi), deriv) x } .Mchi <- function(x, ccc, ipsi, deriv=0) .Call(R_chifun, x, ccc, ipsi, deriv) Mwgt <- function(x, cc, psi) { x[] <- .Call(R_wgtfun, x, .psi.conv.cc(psi, cc), .psi2ipsi(psi)) x } .Mwgt <- function(x, ccc, ipsi) .Call(R_wgtfun, x, ccc, ipsi) ## only for nlrob() -- and to use instead of MASS:::psi.huber etc: ## returns a *function* a la psi.huber() : .Mwgt.psi1 <- function(psi, cc = .Mpsi.tuning.default(psi)) { ipsi <- .psi2ipsi(psi) ccc <- .psi.conv.cc(psi, cc) ## return function *closure* : function(x, deriv = 0) if(deriv) .Mpsi(x, ccc, ipsi, deriv=deriv) else .Mwgt(x, ccc, ipsi) } ##' The normalizing constant for rho(.) <--> rho~(.) MrhoInf <- function(cc, psi) { cc <- .psi.conv.cc(psi, cc) .Call(R_rho_inf, cc, .psi2ipsi(psi)) } .MrhoInf <- function(ccc, ipsi) .Call(R_rho_inf, ccc, ipsi) lmrob.rweights <- function(resid, scale, cc, psi, eps = 16 * .Machine$double.eps) { stopifnot(is.numeric(scale), length(scale) == 1L, scale >= 0) if (scale == 0) { ## exact fit m <- max(ar <- abs(resid), na.rm=TRUE) if(m == 0) numeric(length(ar)) else as.numeric(ar <= eps * m)# 1 iff res ~= 0 } else Mwgt(resid / scale, cc, psi) } lmrob.E <- function(expr, control, dfun = dnorm, use.integrate = FALSE, obj, ...) { expr <- substitute(expr) if (missing(control) && !missing(obj)) control <- obj$control lenvir <- if (!missing(control)) { psi <- control$psi if (is.null(psi)) stop('parameter psi is not defined') c.psi <- control[[if (control$method %in% c('S', 'SD')) "tuning.chi" else "tuning.psi"]] if (!is.numeric(c.psi)) stop('tuning parameter (chi/psi) is not numeric') list(psi = function(r, deriv = 0) Mpsi(r, c.psi, psi, deriv), chi = function(r, deriv = 0) Mchi(r, c.psi, psi, deriv), ## change? wgt = function(r) Mwgt(r, c.psi, psi)) ## change? } else list() pf <- parent.frame() FF <- function(r) eval(expr, envir = c(list(r = r), lenvir), enclos = pf) * dfun(r) if (isTRUE(use.integrate)) { integrate(FF, -Inf,Inf, ...)$value ## This would be a bit more accurate .. *AND* faster notably for larger 'numpoints': ## } else if(use.integrate == "GQr") { ## require("Gqr")# from R-forge [part of lme4 project] ## ## initialize Gauss-Hermite Integration ## GH <- GaussQuad(if(is.null(control$numpoints)) 13 else control$numpoints, ## "Hermite") ## ## integrate ## F. <- function(r) eval(expr, envir = c(list(r = r), lenvir), enclos = pf) ## sum(GH$weights * F.(GH$knots)) } else { ## initialize Gauss-Hermite Integration gh <- ghq(if(is.null(control$numpoints)) 13 else control$numpoints) ## integrate sum(gh$weights * FF(gh$nodes)) } } ghq <- function(n = 1, modify = TRUE) { ## Adapted from gauss.quad in statmod package ## which itself has been adapted from Netlib routine gaussq.f ## Gordon Smyth, Walter and Eliza Hall Institute n <- as.integer(n) if(n<0) stop("need non-negative number of nodes") if(n==0) return(list(nodes=numeric(0), weights=numeric(0))) ## i <- seq_len(n) # 1 .. n i1 <- seq_len(n-1L) muzero <- sqrt(pi) ## a <- numeric(n) b <- sqrt(i1/2) A <- numeric(n*n) ## A[(n+1)*(i-1)+1] <- a # already 0 A[(n+1)*(i1-1)+2] <- b A[(n+1)*i1] <- b dim(A) <- c(n,n) vd <- eigen(A,symmetric=TRUE) n..1 <- n:1L w <- vd$vectors[1, n..1] w <- muzero * w^2 x <- vd$values[n..1] # = rev(..) list(nodes=x, weights= if (modify) w*exp(x^2) else w) } ##' (non)singular subsampling - code to be passed to C, as `ss` in ../src/lmrob.c .convSs <- function(ss) switch(ss, "simple"= 0L, "nonsingular"= 1L, stop(gettextf("unknown setting for 'subsampling': %s", ss), domain=NA)) outlierStats <- function(object, x = object$x, control = object$control, epsw = control$eps.outlier, epsx = control$eps.x , warn.limit.reject = control$warn.limit.reject , warn.limit.meanrw = control$warn.limit.meanrw , shout = NA) { stopifnot(is.logical(shout), length(shout) == 1L) # should we "shout"? ## look at all the factors in the model and count ## for each level how many observations were rejected. ## Issue a warning if there is any level where more than ## warn.limit.reject observations were rejected or ## the mean robustness weights was <= warn.limit.meanrw rw <- object$rweights ## ^^^^^^^^^^^^^^^ not weights(..., type="robustness") as we ## don't want naresid() padding here. if (NROW(x) != length(rw)) stop("number of rows in 'x' and length of 'object$rweights' must be the same") if (is.function(epsw)) epsw <- epsw(nobs(object, use.fallback = TRUE)) if (!is.numeric(epsw) || length(epsw) != 1) stop(gettextf("'%s' must be a number or a function of %s which returns a number", "epsw", "nobs(obj.)"), domain = NA) if (is.function(epsx)) epsx <- epsx(max(abs(x))) if (!is.numeric(epsx) || length(epsx) != 1) stop(gettextf("'%s' must be a number or a function of %s which returns a number", "epsx", "max(abs(x))"), domain = NA) cc <- function(idx) { # (rw, epsw) nnz <- sum(idx) ## <- if this is zero, 'Ratio' and 'Mean.RobWeight' will be NaN rj <- abs(rw) < epsw Fr <- sum(rj[idx]) c(N.nonzero = nnz, N.rejected = Fr, Ratio = Fr / nnz, Mean.RobWeight = mean(rw[idx])) } xnz <- abs(x) > epsx report <- t(apply(cbind(Overall=TRUE, xnz[, colSums(xnz) < NROW(xnz)]), 2, cc)) if(!isFALSE(shout)) { ## NA or TRUE lbr <- logical(nrow(report)) # == rep(FALSE, ..) if (!is.null(warn.limit.reject)) { lbr <- report[, "Ratio"] >= warn.limit.reject shout <- shout || any(lbr & !is.na(lbr)) } if (!is.null(warn.limit.meanrw)) { lbr <- lbr | report[, "Mean.RobWeight"] <= warn.limit.meanrw shout <- shout || any(lbr & !is.na(lbr)) } if(!is.na(shout)) { # is true nbr <- rownames(report)[lbr] attr(report, "warning") <- paste("Possible local breakdown of", paste0("'", nbr, "'", collapse=", ")) warning("Detected possible local breakdown of ", control$method, "-estimate in ", if (length(nbr) > 1) paste(length(nbr), "coefficients") else "coefficient", " ", paste0("'", nbr, "'", collapse=", "), ".", if ("KS2014" %in% control$setting) "" else "\nUse lmrob argument 'setting=\"KS2014\"' to avoid this problem." ) } } report } robustbase/MD50000644000176200001440000003702314555227612012751 0ustar liggesusers66887f2a4a84d1b8823efaadb4d2e817 *ChangeLog edafd162f229515c699a1e12080e3189 *DESCRIPTION b4885ab3a2711ac13563e2d7914205b4 *NAMESPACE 9c662b447d447065f1970cf8d9ba1539 *R/AAA.R d37bb3049fbf35e2f99ef3b0a868ccfd *R/BYlogreg.R 19d5e77537bfa1947069068a0e15b1ea *R/MTestimador2.R 05c9151dad5fb4bf5953fba77aaacf51 *R/OGK.R 4481cfaf01b87a16269d156e056b0a9f *R/adjbox.R 3c3888392a6105b40a1fb9e8291a4be2 *R/adjoutlyingness.R 959de6a22a12a273711cc6975c9dad24 *R/anova-glmrob.R 7372a74eb2fe5b4c97fc7315e924c234 *R/anova.lmrob.R 186ce151b871686bd85921f1843ed90e *R/biweight-funs.R abd567be7414be168c4d18995c5d67b3 *R/classPC.R b8872e7f5f158e3f742c71a8ae030ed0 *R/comedian.R bf0e306e91a9ba8682a2f229abb85321 *R/covMcd.R 6deaedfb710eeaa98ddb990382b51a74 *R/covPlot.R 31276c72c47df1cb6aaadc1d91f38d5a *R/detmcd.R 79316c1ca445f99587e3b7d583656aed *R/glmrob.R 47e68c210ff891cecec23aa54369ec8f *R/glmrobMqle-DQD.R 24d0de58576935e718ea701a10b2d4af *R/glmrobMqle.R 3f50418b4f9fd47918b8881bab58b6bb *R/glmrobPredict.R 0ca734c2ef526612ce55d4345cc2d606 *R/huber.R 08d32e9833aa6b86a3cf19a16e6b63c0 *R/huberize.R 51183c78cf39a9a79081b202fb76c5b7 *R/lmrob.M.S.R bba321ab0d6c97b4f0808b99d68648e1 *R/lmrob.MM.R 12529ead85f14dcb515c73cc5b5a5ce7 *R/lmrob.R 8130354e92350a040360b85bd36995bf *R/lmrobPredict.R 1264577d6a3ac8d697488244038a44f0 *R/ltsPlot.R 2c5513eab26503cb0e1b03fe602a65a5 *R/ltsReg.R a4c54a5e0f152cab1bb20dbe1d645414 *R/mc.R 621fe5b6d8087287f219f347b5fd5439 *R/nlregrob.R 91a0b9152a3ce7ca267a185336feea2f *R/nlrob.R 544e1b62b77ca840ea441f126a88ac9b *R/plot.lmrob.R 53800cd013755518211c5a47d48a345f *R/psi-rho-funs.R 8abd2b0e98ad2f49011e6a6daff6540f *R/qnsn.R 1d34823c1885d395c7ec8af83fcf57b8 *R/rrcov.control.R 0ee6d9e98d6746d0a2159988002c87c7 *R/tolEllipse.R 165858e109f0e833a1c6bae0fdb6ba1d *TODO 8faf250d78a120ba4c7ea2186efddd24 *build/partial.rdb ac44bacffbdebbf542bbd2d482c2f24f *build/vignette.rds ae030dea61f2f374479a49afe8077a6d *data/Animals2.tab 19e4eaf0977f829715ae36606a377bb8 *data/CrohnD.rda 238aac3ea05d306a86708f1bb39dcdc7 *data/NOxEmissions.rda 37e841b5fc4a056e5f931d43c9eb4603 *data/SiegelsEx.R ab736be6db06a387200ce0f6cdedc697 *data/aircraft.rda 017c2bbdd22e598584d8045afde64ffd *data/airmay.rda 20bdc21492ae84b462c7080ee6aaaad6 *data/alcohol.rda bdf56c95aa99517460a387f5fa1da399 *data/ambientNOxCH.rda 48a56104b0c39bf40fd10c8b19131eca *data/biomassTill.rda 3cf0efaac9b7ef93f8603bbf2d6cb337 *data/bushfire.rda 6e659a95f595d45c911308bc253c661a *data/carrots.rda 090b86c587b70c48367bb030fe2f9d15 *data/cloud.rda 4c30ccfadda6ce5afb38e46b07da22e8 *data/coleman.rda 1961f8041c73e1bf90c5847c108c5ae1 *data/condroz.rda d4d34970ff2fa110fbc21e3f2b227704 *data/cushny.R 03cfc37554676f5b36f9e269c0bd7244 *data/delivery.rda 7baef2ba826e98bb5cf01bce930ca49e *data/education.rda d737cb0f2d23c29f2a26e96276aaee13 *data/epilepsy.rda 7104e0d903ad8fc993b13257772413f4 *data/exAM.tab e941fedb2c9e4c6f71c7997f226a8d82 *data/foodstamp.rda d173f6f42e6ec9addb714375ae4b4483 *data/hbk.rda 2cf47a5fbdfc0f13ace274e431afe686 *data/heart.rda 46364afa47910f07eda003121c6205d5 *data/kootenay.tab b7ea6d1b22c131ee080418d729dc0742 *data/lactic.rda 692c41d4efee538cddea9859f6d230d3 *data/los.rda 453aeaf5b65b5d166f0b62d9b62e77a7 *data/milk.rda 8b64e711004e709cb46ccef1d935bf16 *data/pension.rda bb1fb728be30d2c32afc8881b0c5802d *data/phosphor.rda 1010341878a68422ebf3d09b2e053c5b *data/pilot.rda 735465ec97480fb1a6fe586e85792f98 *data/possumDiv.rda 60f04bab62d0679fcaf5231d7baa2e92 *data/pulpfiber.rda 5d86b54bde8121b6dfd3c801dddf2c5b *data/radarImage.rda 3075d6647227e039ec8fbae2d2a88b9c *data/salinity.rda 3a57947173594810a0e44798404afac7 *data/starsCYG.rda c44feedbebdeaf0fcd6419986b201800 *data/steamUse.rda a7a3edcf24050d5ff10e4b2367e16cd9 *data/telef.rda b8244eaef97350532d9124998430d396 *data/toxicity.rda 0b9904a2e9bf30bdfed52ca1f238257b *data/vaso.rda 40b3c269b89c039779bba44dd63d4a3b *data/wagnerGrowth.rda 2108ebd3782f664ce37eeb36d4d2bac4 *data/wood.rda dec2cf4733637e4293db3a103be23266 *data/x30o50.R 5735d59a0fd8350221e71c757aff3022 *demo/00Index 4eaa659c5287d2433f0c497b91501654 *demo/determinMCD.R 42fec336be1de77af06d15399c21b436 *inst/CITATION 5d05206ddf06ffd03093afc086a6400e *inst/Copyrights e43ed5e20ab53c1bb67b24fe731132f0 *inst/NEWS.Rd da7f5aa078e39b61fc11c70f79acda31 *inst/doc/aggr_results.Rdata fd283291fb16f251cb6d0899d4e9310b *inst/doc/asymptotic.max.bias.Rdata eba4ff0f00afa90a6de4faeb8fbeb118 *inst/doc/error.distributions.R e87e964dd949e0d5d3ac6677cf8ce850 *inst/doc/estimating.functions.R 84e4b173d0410d358ae5ed7d782d8c28 *inst/doc/fastMcd-kmini.R c8992760bbc781798df7d3dee8839d56 *inst/doc/fastMcd-kmini.Rnw f8640c912fcc0fd7f2831003107903ba *inst/doc/fastMcd-kmini.pdf 2eb6dae5393fc8f1f3613ea7eacf362f *inst/doc/graphics.functions.R aa4176684274cf23ab72b699568976de *inst/doc/lmrob_simulation.R ad9971a72d917743efbdb264b7057d97 *inst/doc/lmrob_simulation.Rnw 777d2f1ee6b2649e781334a97cabc370 *inst/doc/lmrob_simulation.pdf f52efc8694f09fdff19c6f54fe501d45 *inst/doc/psi_functions.R 5dc951a423ecf0cea599f84e69eb1055 *inst/doc/psi_functions.Rnw 919b0e74c27563148b826a0358b916ab *inst/doc/psi_functions.pdf 8fbff7afee995686ab14f490bfdd3bd7 *inst/doc/simulation.functions.R faa9ecb70de3e867cb0b0cb8f56f3074 *inst/doc/simulation.init.R e571b79e7316389c9251508b0a64775f *inst/external/d1k27.rda 2697fe55d9d2cd8b788e735f21fce763 *inst/include/robustbase.h 40a78aabaa45179068cf49084e280002 *inst/po/de/LC_MESSAGES/R-robustbase.mo 90290c9dfd9d241caf2eae13d29345e5 *inst/po/de/LC_MESSAGES/robustbase.mo 762910cf64bbd318d025291144129e42 *inst/po/en@quot/LC_MESSAGES/R-robustbase.mo 6fdc9161c95acfa8acb95c2694087540 *inst/po/en@quot/LC_MESSAGES/robustbase.mo cab4f7f139761c02c5281de692c2541e *inst/xtraR/ex-funs.R 130761ff8fdad503b3eabb72b95676ab *inst/xtraR/lmrob-trace_lev.R 2701625bdc27b364058c331b8f773c4e *inst/xtraR/m-s_fns.R a9bcb5636de95b43522e387217b7ed4a *inst/xtraR/mcnaive.R d0a0b73f433a52990c0266bc30d23d7a *inst/xtraR/platform-sessionInfo.R 87049deac986592f8a86dc25370a2832 *inst/xtraR/plot-psiFun.R 4e590639e1d9c65186294080cea011d4 *inst/xtraR/styleData.R df9ad021e5fe4f589da91f5ee859ec35 *inst/xtraR/subsample-fns.R ec07bde3c1debafbc01e5222fc7d1c4c *inst/xtraR/test-tools.R d59c9d987e5930b5518e5f115a12fc93 *inst/xtraR/test_LTS.R 1faee3e2b0c2c41380c3e25d3f5fc067 *inst/xtraR/test_MCD.R cc9b23ec76ba93fbd5c15656bffca0ad *man/Animals2.Rd 72a11539f6913d536aa7b1661770e74e *man/BYlogreg.Rd 64236af69ca22e65d23c6fded4f74428 *man/CrohnD.Rd aa9d84f5c4734da142178b4ae12d8083 *man/M.psi.Rd 6824d0737bc148c4db2525b346dbb22a *man/NOxEmissions.Rd 08467d483cced3e914773083f6d5ce3a *man/Qn.Rd 083dcbb9a8612332ac5e46ecf5815782 *man/SiegelsEx.Rd c3f637a0a5930223093b8ec3ad839b87 *man/Sn.Rd 1de951f6a203408a59c2bca6aec5bdda *man/adjOutlyingness.Rd 360fd8e7b1c72e10c2925ebb8d8f9a0c *man/adjbox.Rd e5ab4ece8120c2eec58539399edfac64 *man/adjboxStats.Rd 10c2c0bbc3e33094b52a0ca24204ca97 *man/aircraft.Rd adc69c47692a6374530016dd06284aab *man/airmay.Rd 64c8c4a203a83f12f93a4799bc3fbfde *man/alcohol.Rd 7ea04b818be06098d3d5695068c3aa69 *man/ambientNOxCH.Rd 66453a19f77de93edbc0bbe04c5cf097 *man/anova.glmrob.Rd cbd3b489c97a7f8a2af5f6f5d061da76 *man/anova.lmrob.Rd ee08a50d99ae4fded27606eec359ea0b *man/biomassTill.Rd 28a095234e74bb3a25b9d5b97245e6d0 *man/bushfire.Rd 162c39b3fe90cadbf7ef351522e3a282 *man/carrots.Rd 072f4e54832ae70552419daa0539f165 *man/chgDefaults-methods.Rd 2aa4b0be1d4af9f052af28d677c7cc08 *man/classPC.Rd 81bd9b7ce5691a0603f3cf4c7c6bd7eb *man/cloud.Rd f99a65e293aca936bae4d50b059fc857 *man/colMedians.Rd 1c06929304084ebd0f599ef857aa8714 *man/coleman.Rd 5e60414598e3a152c9894b47d1d29128 *man/condroz.Rd 6ebef31bebcc045b5484160facc4c439 *man/covComed.Rd d6040abc4784c9ed2698ea79185ab670 *man/covMcd.Rd 5cf99856fcd52c766bb062890465eab9 *man/covOGK.Rd 58a839ee1a490ad81e9f460aae800dd0 *man/covPlot.Rd 7a4c3b20b2d890cda14449a844ec1954 *man/cushny.Rd ffc71a09096c7ff759123bdb52811331 *man/delivery.Rd 951faf63d07a05ff53ca1d58ea8a65b3 *man/education.Rd 4c4b144e1b46f1587eb78d9131ee9454 *man/epilepsy.Rd 98afee28d8bfdbcccd61dd43e23bc1ef *man/estimethod.Rd 217b4a7355c2af12eeed03d3a1dbfaf1 *man/exAM.Rd 584abcc49b18120f1421ed11eecf0859 *man/foodstamp.Rd 5d06f4a85eae6ab0ec6eed5d0d3d9acf *man/fullRank.Rd 9e691245c9e0ba04ff72965a52165f89 *man/functionX-class.Rd cfa1f98cd587600bd8351e2342f7f2d3 *man/functionXal-class.Rd 5991ee1fa39a30d70fe961062fdd88b3 *man/glmrob.Rd 93e97c9b0bc9b0fbfacb666b1dfba939 *man/glmrob.control.Rd 84768b70921a2bbc585b95b8c4ed16c3 *man/h.alpha.n.Rd 3c1636c9c383c9211383efa380286309 *man/hbk.Rd f7215a5e99b27a7c233a9165944ec9d3 *man/heart.Rd a95b1266c928eaec27f8a07005c7b922 *man/huberM.Rd b2d8d45ce15d346fd1e8df0262c0dc5b *man/huberize.Rd 0e5741ffbae2c8ab72d6c806a9875a23 *man/kootenay.Rd b3904bd9a90c4a1744c7a10287729796 *man/lactic.Rd f9d8358491b2a12b00b2f8a46840c94e *man/lmc-rmc.Rd 421854278c0594171587e36b9fca8b52 *man/lmrob..D..fit.Rd 89dff1f58856e585719ac5100339be5b *man/lmrob..M..fit.Rd 2ed49fd73bc9195b09b89dc5fa3a01a5 *man/lmrob.M.S.Rd f65774c31c61ab60b11597877e1dc62e *man/lmrob.Rd b75a6d5c20fc53f3ed65f5226643e179 *man/lmrob.S.Rd 538f9547818cc78fb0c33b073853afee *man/lmrob.control.Rd 96b0314be3351c453b49f9fd99c9f854 *man/lmrob.fit.Rd 0ff3ff25d0c6407c0ca8a31c4135b827 *man/lmrob.lar.Rd d622e40aaa83ee46cd18eb6358781768 *man/los.Rd b67da02f6a6e8c88a465f9a6a1ab1933 *man/ltsPlot.Rd 3870a4fdc63baa4ecd11650abe209585 *man/ltsReg.Rd c4e8c8f778af02cb0f59b0cbc6d2a5ac *man/mc.Rd 7f90043269895933a5037812df80e98e *man/milk.Rd 35a4e8718b06d2eacda6e04a62b0a7dd *man/nlrob-algos.Rd 4f524b3a96b0e8a77c9ff7386b446f35 *man/nlrob.Rd 776e076e5e8c6958b4ffd9497d107e1b *man/nlrob.control.Rd a0174525d9be249cd7655ea3d21d5790 *man/outlierStats.Rd 73ac7b614b68fde4b56425dcd14c3a7e *man/pension.Rd 8cd4b8129699cba4317d4811937a9556 *man/phosphor.Rd 59bf70cca397d8ccaab9963f939462db *man/pilot.Rd 2cad8d62257bac1749e3c2f84871b2d5 *man/plot-methods.Rd 526daa81051c7dc411213912c5177f93 *man/plot.lmrob.Rd 9d897884dcdf746556c3d17c6f38dd9d *man/possumDiv.Rd 666a83e065be3fa28732922ac7bd0e2b *man/predict.glmrob.Rd b029d6d4e196a3617062e86120d20703 *man/predict.lmrob.Rd e8b57d5568e0ea910048d168849a0a8c *man/print.lmrob.Rd ec23a0d599e8694219f01f037dfa2826 *man/psiFindc.Rd df4376b1f8f500c99501d230a8ce225f *man/psiFunc.Rd 525bd5bf9d6fcfbfba6aef15f78ee9dc *man/psi_func-class.Rd b51db5a87f42e8420d22fdc796aefce4 *man/pulpfiber.Rd 9e3deaf3d63b851fddfa0684aced3527 *man/r6pack.Rd b17174d919be4a796d8431f01b2c351e *man/radarImage.Rd b3767d3ecc4211f6d8a11e6e89594e0f *man/rankMM.Rd fa74cc0d6c0aedc3cf0ba3965253b17c *man/residuals.glmrob.Rd 8d1fe97485ffc542a81a1ddf35001741 *man/robustbase-internal.Rd 1ffeeeda93ef6545e9e4f3e5ea7c3040 *man/rrcov.control.Rd 362bbf4ab0688eeff8703da6f0d8e2e7 *man/salinity.Rd 1fec7004e56dfaeb1b8db9ec7a0cf7c0 *man/scaleTau2.Rd f23358f6a5f3ba91ad590a1f97398c7c *man/sigma.Rd edb9e05b5fe937a216f49b05858768e8 *man/smoothWgt.Rd 47ec76a4330aaba272533c17baacff82 *man/splitFrame.Rd 2fb66f97bca59867ac54b8be9c7bc214 *man/starsCYG.Rd 4526219d6f49e21ae4e58ca17be1891e *man/steamUse.Rd d7189923c3153490b9abb2d43a52e440 *man/summarizeRobWeights.Rd be2fbf543226e5b43fcd4a3e1ffe8f9f *man/summary.glmrob.Rd ae03d2d8c3d04a3f4e6ce0aa9c0b3cf7 *man/summary.lmrob.Rd 8219765216d02b84cdbece813ef82fe1 *man/summary.lts.Rd 48c1775d93b2517b44554f48b0090c54 *man/summary.mcd.Rd 047c1d860a5d9d936abc2fb9de819554 *man/summary.nlrob.Rd e8d1c797428107b3174396b0d75b40b3 *man/telef.Rd 61eed38c560d7fca1d5a9127b8baf70e *man/tolEllipsePlot.Rd 2cf58f13cbf2bf1324c6d7aa3e410af5 *man/toxicity.Rd 426909afe46975b17f2409fb50e17f41 *man/tukeyPsi1.Rd ed7afcd97d343cc453602f724b0a0df6 *man/vaso.Rd 64cf61e38da1d48c16c348cd89a5df83 *man/wagnerGrowth.Rd 971d8b3f3dca2ffec5980cb423226f2f *man/weights.lmrob.Rd 281444c1e74efb2422bc9b31c6d46dca *man/wgt.himedian.Rd d85c77f6ee7bfc5f5d6fc78ef947f7e3 *man/wood.Rd 3204032f46beebb47c7ed7ff36b0fcb2 *man/xtrData.Rd 3be12d598177fd392ce38edb4062b702 *po/R-de.po aabff5feb60ff8aff7185da691dc8644 *po/R-robustbase.pot d5a446c5df9af043d2f9d0fd1dd6b959 *po/de.po 09e3948827597a1c23eb38cedce78806 *po/robustbase.pot b5ec8eb59d50008ef39073a7446433e1 *po/update-me.sh 6670f8885ea9588ef2905cb7b368a500 *src/Makevars 174d300e4520673a0114f4d6eeba98a5 *src/R-rng4ftn.c 37cbb87bdffad7231d34bdec470100b2 *src/eigen.f 7b7e6a30a73de4f1ab1ec57707a8f3fa *src/init.c b8df3c8d0031240222773b331a938db6 *src/lmrob.c d347ac615edadda99f64a8432cf68419 *src/mc.c 3d0b26ff876ec838a6251234417d7edc *src/monitor.c 07fcab6140b70af6edb46216af7a99c6 *src/qn_sn.c 28999bf54b45a15bee797d8e903c8986 *src/rf-common.f 7f21eefc9396c72aef5e022d5b231488 *src/rffastmcd.f c142a095ec08ea082ea84133dfa39ca2 *src/rfltsreg.f 17021e6a9619d735c5384e47d089caef *src/rllarsbi.f 93f5113f34542949020302b4d70e9c14 *src/rob-utils.c 8e55d7db42d0b0b88c8ede22297849f2 *src/robustbase.h 364118eacaee8ff4915c5f50097ad460 *src/rowMedians.c bc28fcadc56c4269604e622f460a42a1 *src/rowMedians_TYPE-template.h d2a3478b9dbb06d56cbf729dc0fe9681 *src/templates-types.h a3a2fe76cc1fbb676f92d2da764fcaa1 *src/templates-types_undef.h 178577cceffdc422ed73a07d4f89d085 *src/wgt_himed.c 0d3d1bebd878a77ed6d2e17a34b3a265 *src/wgt_himed_templ.h 5d18a48b53c3942cd8ba54b71aa86d5c *tests/LTS-specials.R 53111611c3e4dbfe6fdd55f89db16ba0 *tests/MCD-specials.R 6807dbadfd0a2c714169c466c10b85fd *tests/MCD-specials.Rout.save a9e012cd653dc67380b0d98dbf8c98c2 *tests/MT-tst.R 95d0d4a3215fd0c5c15017bf4264898a *tests/NAcoef.R 2c40e197c40a1629e3d82ec5f48342dd *tests/NAcoef.Rout.save 2c7b55a37d3d91bb7ac967daa654c0cb *tests/OGK-ex.R 2061968598e36f8ef9af0f6b3afea782 *tests/OGK-ex.Rout.save 95abceee1da97f2efc59a83579d3fd0e *tests/Qn-Sn-plots.R ee5551e06a97b2fa119de6e770e5e3a8 *tests/Rsquared.R de9d552599570c228f91cb4254b6746a *tests/Rsquared.Rout.save 71ce3f6b1a05667b2f4222d6f23a2a1d *tests/binom-ni-small.R 06ecf1e2d46b910a4aa0d648c48ffffd *tests/binom-ni-small.Rout.save f6db581dcd9c17bbe848995b33c58ebe *tests/binom-no-x.R 11f032be4f820f1326178912faa59eff *tests/comedian-tst.R 4fdc17e44096a893d3f2536213577153 *tests/exact-fit-categorical.R ed78593a1cb730b3a48709a3161d2cb9 *tests/glmrob-1.R 860395b194cbcf95736cf42be73f53f8 *tests/glmrob-specials.R d04013e64537927c1824d112d3fffae8 *tests/huber-etc.R 5deceffddfe5185c60c0bdde86a6e43a *tests/huber-etc.Rout.save 9cb15ced67c1892f22950f121ba2abaf *tests/large-values.R 6fbc9fe8346753a07f45d8ab56417151 *tests/lmrob-data.R 25d5025f65e217da76cb5854cf1382d0 *tests/lmrob-ex12.R f5816abd2a713770944d0078b2dda687 *tests/lmrob-methods.R 4def9d8c53c2a7d99455a0f7b435476e *tests/lmrob-methods.Rout.save 52977658c43665aafd4508975647b258 *tests/lmrob-psifns.R c9124d4362d0b4391061ea9a20f7ee57 *tests/lmrob-psifns.Rout.save ef94a8db6bf1fdb52d5e49dff4c5951a *tests/m-s-estimator.R fc2e03a5567789ba9cf6dda9f2c17173 *tests/mc-etc.R 1a6476e0514f3b2ba76560fcfc0f49fb *tests/mc-strict.R cef5181f8c1343505936180e17e80641 *tests/nlregrob-tst.R 7f867316f9b5fba73e9a971d680fda7c *tests/nlrob-tst.R 354982e2b341cbbc2d723935a6c0ca93 *tests/poisson-ex.R a2ccec3324ea3363e9fc2dcbb0aeab9e *tests/psi-rho-etc.R 39e466468e228fad715bc725f30eb678 *tests/psi-rho-etc.Rout.save a9b4b4ba78cc408071e294b4969bf99b *tests/small-sample.R 76445671c74a4ba3fc5e6ff008d902b4 *tests/small-sample.Rout.save 7a8b1f9604304826d8dcbedb7c8c5dbb *tests/subsample.R 91a7dbe2f427b417675bf21252fbd4ff *tests/tlts.R 2f36514f3f511d80bc2f08c296161193 *tests/tlts.Rout.save dbf309eb5429b937bb50b5b6fd57651c *tests/tmcd.R 9c23a8eae9aac1fbe6745808da05aaa0 *tests/weights.R 0801277ffcaa81e1599077a66981e46d *tests/weights.Rout.save c0c81b768815c0d6b240a0dadb065ea5 *tests/wgt-himed-xtra.R 8eca80b4303ef72bfaccc3ede681c0d6 *tests/wgt-himed.R 56f1be5e2e78bef6d128f3317fe64b9b *tests/wgt-himed.Rout.save c8992760bbc781798df7d3dee8839d56 *vignettes/fastMcd-kmini.Rnw ad9971a72d917743efbdb264b7057d97 *vignettes/lmrob_simulation.Rnw 5dc951a423ecf0cea599f84e69eb1055 *vignettes/psi_functions.Rnw f15d124e194c8c441ba8bc0833143243 *vignettes/robustbase.bib robustbase/inst/0000755000176200001440000000000014555212560013405 5ustar liggesusersrobustbase/inst/Copyrights0000644000176200001440000000243410353034413015455 0ustar liggesusersThe C code for Qn() and Sn(), src/qnsn.c is based on Fortran code that has been be available from http://www.agoras.ua.ac.be/ (Antwerp Group On Robust & Applied Statistics) (-> Programs -> Robust) by {at the time} Peter Rousseeuw (rousse@wins.uia.ac.be) Christophe Croux (croux@wins.uia.ac.be) Department of Mathematics and Computing Universitaire Instelling Antwerpen Universiteitsplein 1 B-2610 Wilrijk (Antwerp) Belgium and carries the note This file contains fortran functions for two new robust estimators of scale denoted as Qn and Sn, decribed in Rousseeuw and Croux (1993). These estimators have a high breakdown point and a bounded influence function. The implementation given here is very fast (running in O(n logn) time) and needs little storage space. Rousseeuw, P.J. and Croux, C. (1993) Alternatives to the Median Absolute Deviation", Journal of the American Statistical Association, Vol. 88, 1273-1283. This software may be used and copied freely for scientific and/or non-commercial purposes, provided reference is made to the above mentioned paper. where as Martin Maechler got explicit permission from P.Rousseeuw to licence it under the GNU Public Licence. ---------------------------------------------------------------------------- robustbase/inst/doc/0000755000176200001440000000000014555212626014155 5ustar liggesusersrobustbase/inst/doc/fastMcd-kmini.pdf0000644000176200001440000017477214555212626017360 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3568 /Filter /FlateDecode /N 57 /First 462 >> stream x[Ys8~_T*AfS#b%^ɱl偖hIT(*qo7Eݒ% n|}p"H(" hSK@'a4D£zA!=ǴOzCGtLC.jJ'tJ#: %`ަ6`W4+5N x\5 &Hf-^)(w}}v._>[,CyQﱉ<<ͫ;3Q<tIV[`0cp8KĀ0kpsf93mhÓ~چǟklcn 6k+דU MH؛Fd2I9`n$Qv&8W$2p %p"N>L&"+/C@K`L :0 :ۂ= f&mffL$ -\m5W晅q͛n@+A_lW' 9U\*pڰ-XF FJ7B6Źoi4qrugH|=8* U{@6D6>0Xr0`1zk,#_ x ʞ yY];U.,c9(f;2o8;L6lCi'@kg-;^ 0`W:SNFQ2ZogHYepnINQ=/HOAD/5#EtQfz+Ar^o@ |#;e+V]AAPV}J8-l&/SggVx_NL gMENGWdN[)whFR?`"7| -۔ Y^0[Nտ Y493$n ˿ڀQhCmc3b4ɽm]8Qvè~.$nEK2q(X,(]V*c\<s! A |jcK5U?Rul?_>[mSk"ZW(} 1f:lyb0D5Ck5bRp;"X.*A?rqgIQhMBFA"Y'Iيdz45ԖΣԡy4 [fu{M7`pW~{boʂ6u[ݕNڲ>hY>1tJJ-عi]e&tePX:9ƚ6VBwYm[U /QNDM9)D~4|re榑T@QnIywr5#ـBmxN lu͐ǣk41]^vf9nۛ(CNekUa%7䪥e{g뼜{rw}{~n]m!2F0-H[f6Xf Tݱ4kPV6bo,$-[ }+tx-wSM* *Y,4Nw&Oߜp>Ij^!:OJlAQo(?)5ilBW.vR__K?G+TW_(ޭyk4WV*}>61>`oQKKOmk_+$-7g&e[)ºvg~b.0V`fro4FgpQѰLRm9r^f SuP4&T!8 t6$4o7B-z1MJ{zW$^-k5L*r݄8L2Uňa*օQo#ky5JƤ zzΣbk =gN\H6?<$k$:~0Ba sX,򴉑i`nn!nmxټ-GQMl616 jfm׌Y5 <9 Rdag>]8L]و G1$a =j[bϔkur1 $h[l^O㢧=f<DIE0;Uq`'p==jc &Qa TBI{?=P] ʯX;@j^QDm2k)IZNt$*KM"`ib\_N6!H0 t"LM *ʓ?S[3Fj/X y3Q+$1˄SF=׆L{aG(:_k6{ͳ_3%$ '?U#P.H-v* V ;@ 4yApZW H"؇sSKjn\#:Hӆ.ԝt5_|U/$u` 6endstream endobj 59 0 obj << /Subtype /XML /Type /Metadata /Length 1387 >> stream GPL Ghostscript 9.56.1 2024-01-27T15:39:18+01:00 2024-01-27T15:39:18+01:00 LaTeX with hyperref endstream endobj 60 0 obj << /Filter /FlateDecode /Length 6580 >> stream x=]$qvk!z6?$d6r lm}H;ݻӝ =U$&{>"A=bXUa=0/mpM_vm/V'Nqngr|csBóS|kwu'`;nw3=>fG&wv/7N8gL ]Ҵ2 WK'Oyv' x65*-wķ&7y &ġGd4,#3 BKeZxm0='PM|q=30pE`2 I @t1L$gX!ǵIa{\6ʹ^qA:§k呌@93= &";+ꕶ.$R)@vΏ~Cl :AtZgA}$4zL; 7;`s@ƚwTHaC5 +ܒRwW/%}F6^ $_pb`~m ?`BA)3Y*PJk~I^.vUo+j0  <9j?;y\p78RSAuH_, >\q \,|]X-\P=l>h`D[΀s"R# ƻPq,S"!A$w" (`wtG–AE>D-%} <8O:X2@oiaFQk޲0 Az#8eDx:Asa&#ix#8qr_'sT ق198-i0|r]6Oz)#Df.IAl#c`S+tk@'iw='}H/qp/VJHP! P?VUZE^1Oӎ|ijH@P2.2:߀%U~c_|HH*^ܯ8 vE+ ;AJ6= !gg1kxݶuh~jLi4 RLMLRXy++{,#H{H+jRtlmNI=d @]]؁vE, ?ɢ=YM>ƅaw;iD[ is98 Ј'"2|?803 |Ni ;r@pUY:%.zfzGً9C!`s]=Y?zH!;PK~/-~,vv)N?˜AHT!onm sYmcYDqOWG#\=-kVd[KchFy&fFds3?w9&D\ 3 P"M.#l*ט\Z5`|q pw-)&ENr҆׫uH *!\TH-%=•)鉫 q~!} փtʌy8B;=2ÿGߛz+F3 aAJ$oFuPB͈`B df5, I|K5rəaZEe21rȐPxWDjcȡK"92$kČSjHT Sw,F7\H *t FGiMRV\EVgl@H6kd5|>c< s~81/ E>FtB5p@10BC 5Ïq q&Vv\`D +'Rާr2E 3I+ #ɈFI0^IcMܭ" -V}oNb69d}zYC2KPF{Aj/gS Y//EÛ16v#EjDR,Ry 4( p%?sׇq$8M0A ĿQ'x0HfdϬR,%.X-Xn4FgrA(86=wCݤAX,j)qTb ]Ǯ̾4627ioHíoD dK_l#AbV&bݥ) 2K|&e4V?0I@0*aC8 o#U$oWMR8>eݰ<LF ib/ 1:tr( :d S1X ,{Po%'f+/4 !\7b&诃{c\ r+2vDM==jdRK4a|MN{#0Kʯr* =8n(wRj f_UOCvQ|IH."1lB< 3UDRi.O &?+Rq!ȳC7l2ǎ蕊v :&Lk5*#jK7\z΍ g  eAJWr*oZjù w 5Eb[U,;$%k~5 O^/ioB-n!sF -BOgbv4m>ScjIO*_B!ѳ弰 0!ЪVT^;ƌ&ʕ( n1[}%pL'BVEXld3fK>6A;<`TI,׃RIb8jV#t ʼntQRco Fu`ƎiǑ(2Y(~|E"#6ft* m):)|S+vV#鱘#:WhҨՍ{K* Ǡ(!|%ij ,,籜 p'GBBt0|TV.AG8BNA~>fNك֞ .i)'RԡjD2p̃O8Sυ g/3k%t&D^+mv[F"@vȔ sEFY2xV5:}4e^W)ǖwЋ Dd$"!MdóR)0=o&wo:;D32qE%.oEEyz=*]Ɖ$Oz sS8RӾl"0`Ui]QQqd8uکV9/9VNEϢt SsƓ?~2ɫ'hw9Ĭ}Hp5J54+`mc?۱M2o+Z&OU)BA*˽c@Gfj}Uc dѱL9ē0Cd80WRfq㛫oO?h3H""G}>7 c74F,p֎u=ZyV̂H$7{y:$|U? B)xHZB.pTb݌¹;sk&&Hq^  %9nybavz:8k\ i8 iS|) T7As݇ygT/7fGw*TVU8IaJ2Jp9 vZ'LCR9ڠqr,:ՙSͰn3F[%:j#`} o4FZd02&[;]p))޵oZZސh_8p*Ȧ3Zf)fgAqbT2*5-錨ɫ-pW1v _lѪ6š +;F2L %@n߷j5j>uSNfv RaCPf'G~nzܯ7Ƙ"*T`|Me|Mmg4l'F`cH]2M6 Myl̒IM%Lm^hTU8IK-d.;e" y2Q11<q5Ya)aг!=ާI40rԈVTZKQI~q$ Zu:p nGTH&"XZڄhШ[ d/Qa vedEmPv\}ȢϯzJu;2T(em'c+*szQ$&!xoP;%".S/Q $2+r+@ F92i0ij* mHaP-#EOpwG;h}qEZyt\\:`%8P2h7|77O%}_ު(u!,.xt0 e sbV\٘v8=;%'x֒C)Lۉhu2kt b 24V eUoe02plj6^7ŗi!$Gb#l eiKZ2^sǁŦ3C;a:M;G? xiumM+K&}W)v:en85@j*&ZtI}%hҪ⿨!=E*RV,u<6}^D!bo %{4MƑ80Z4O:kkwxn-+wUk<.Ǩ"`5B17NZ(LF>[ C /R)(hɫ>xPec~B;[mw_"˻3RFI6֌?v Gendstream endobj 61 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2381 >> stream xVyTw UB}j=R{QmXԺTE*MEBDro@EЂZAi-ukk/}ov]߾yG8FP|{> ir U \Nɏ]|x}HJ 5j[٩ YLgf0kL3YǬge0>Rfȼ2˙ی83tXǏRNU~c'^։ptyss&MbVLj~. O*frEFV j6zB4RW%b/&i-fE0+eqҼ($Ȅ램@p@H[vtm8d|a!an'QM~~@#H{X//*LK݁s*Z{]OCM [z8Dy01`N~q_ G эF&)O&n(% \+p܇1TV!ߍ͊\9Z)G`9oNegM: N`Vf|endstream endobj 62 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1773 >> stream xTmPTFs3h{:Ei?bh(Jj(b`Y>v]XWe4 . ]lD# B$jp3ZѱS$㹛C^`L;_ϯ}}$$Iʣ#VO8Xz+) hZ,]PQXU7'wKWv"xXADD4Gu"',B Y9@GdYG~=Q?̀Iً2|*F,v .qCc YXc9^R.SПN汛N-XNAY A?p@o):+X9@(hh*=k+5Qr'(y*f#:y).zx -qRhEEq*w+VwpK}AG)v퉁͜4,CeaHizmj 9tơ|`Z4Cw<4 xO\MGe?#AX,Њ8_|vU׌s]h샗@iSt*)[[F8kN}5@= h]sR{ hI#+;k4`^KUص.ws'{m|IfAM+{/s)giiӉEzm& bذ}ɻ;{QzNġ;l>\ )1 eUځhY @1aLw)s}1! jhZ\v@ ɐҗ/#PghEaSsBBlԗ)3S؀'ݚRNy*kpR$ IJ?E9[K.{0 x KÐ xQ`}a0L*:)/ (m{\|,~@"-6OP[R_eSRg1(OyL%*~W]^? ZS{!SY&]_!wYoJ插2=ŋTkC+*X2۳ҎׇdVaqȷ9ZCul0uf2zӕvE'PC+sr6T@F6oS-TAnӍ>$yBۦvT & 힌r&6VdϏ&`M ]F:[ iY~|pnD';i)iH  L07/`#p.΃g pA#endstream endobj 63 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3010 >> stream xV{TgV;*>QT-ZTTTT@@<  PDOD# YPVwmu쩶Z{'~~ml9w~W(12:cIG%n.rU~VU=?O2lJѼzڨ7c6N0~:?cXd>8w@~ͅUIs3Ybʥ!ɔd0"N)7x21x)V:scE|UC2ݑ*gCKv%sܝk Y63GKdYK+oq{,.1COE"Y^/ŕpr9wzxsMN%Euq5&" @oa6C*pZGj]=|oğc5[iJ*mCmo#Y;N㞌b-;MsW+Tp#NΎb6j3# ю#u3v\R%㐆5Z.bE4@z#ӅmkQŒ O&7 B8? [`x!dNND²[P^dUB}wauSewL¡&_s)`=,XhWV(tv Yx xG:~o]UܷʠdBx.|JmrF8EFbFi1fL>:ibol+B& :\sEϵ&=҈xbiz::Xl2%7RۤVXr'9rTWh2C5'uYa6kVUd{)6 5{E`y=]$Ձ: 8pI{<㮯b+#ß Ȉo&_JA11 DY4.ypy-bSik G$e'>w˥Xpu1Q aQd vűGVCN)?#GLL.Ѯڜ]_2Nw(=ra-=|p~b2Z,}*踷Sꫝ[fM 5FMW_!~de3NڌdM70+֣_+@ϩ[ݟf:t.<@kG JT㝎]$TSF2I{N`!hάNh\j7L}WŮ삌-&CI]Ap!aB*x)S-6?*FjGz@#$o]״Z!f͚փV"!t>NB=/=k/{Pi,J'=~Hj.У_Y}GD܊9ӄχ~%~hp񀞦W:\%í%o,o7za|"l[ԕa=&m|Ǩ=>cosu.§eΰ(=+t+[ky)~p3C^{˕^m`P(:QhAoԪEK6g)MJ/{]~r܅7NUO]HU\\k]ӌeS?މį󆿜O3(.zezʣ5YV m m[^+&$3MMyƜ|0@j*:Pt r*]ABVM&/˲&_q꨺c=;v{]UW'N锝q2zz۰2')36zJ$I17'( ~c{s;ʳ %;[J-g0+,:j q,:8LtKI${]C mӓ >$BlCP,%m/2ѷu:Zx8 G.YfBf+SDTϳl9L+3ܙez Rq;H),=ֱ*G1|}?dZ%endstream endobj 64 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7368 >> stream xy TSD+TP{Ru:yGA#3D d'ay0phjj:[mmmv|h{ߺb-$=~ ӱ#Hd/9ƒ_pm7* u W/0)[N QȌЙa w1c~Ԧ y,ټw#cF3wǽ7\'8iАCE!0o2 ~"?yY¼,e0˘rf̬d0Pf53YLg3kwf&3fF3s1\f,3όc01N3=y gz2Ό Ӌatb`:3]20%)Β.<ӑKzI%-&UJŎ8,r/Yճ?pzvsH]5[n]=y%:ՊpJOX?.%y^лGoKN}2__#2E߸طWwNﻱ7ܟ5 `VqIR)1D1d4 I+G g2w[5Ah)cϽP ]۽W4G1\Iw:mFqcYWp5*8,;nKE?ǀ>HHw2;a~@@:~ vñK~ا,P'xrmjaVDKOO<]2P\dĩ@M'$~o'U;B6ʝQry$bA\K- UJ ' xխ:<\B2&hMdEӋXp NXߖj7fO{Rw PG$v;ݰk/? ? $Pȡ#]:=8ni-=(`mX%(O͓x2= (@\KHoI&)Hۓ(jSgp^ydED ҈kƒ,~.E>nry (oxBzp?xބ{s0yt6r w3?O Z.n_0feSV)>qX)ƷyYր!2=q|c1-2 D;,[Tc #-aM7܊i*O|!hruۀHqf );M/. 0w?+tf~р9 MR-͞{Wm޻!mA'lpV.d- m )ʍC9kOA`R!_\*cQZLb߲WwLj=m|New}.XY/2mxmW*/\VH:yBкԯG,U/?6\.E܉;!qBs njYe& {&/e? 1(S~CX 'BS`_rRչ؟sz!9r!u{jeE|;[ݕ'8gp[hQ^h Tnurq/]gE<8Cԭw  ·Gְl~>_tؚ[k KIRp5DU3GՠMA;\*v>cn6`K{TP͊KSgdD%CєC7ھXn]qr49~D~uN5&p0u?T#\0bQg.0ASյpKŻ_λ8]VHp ۻ~k?cSTye<'f^QQ{(C'6Rx}GWi> 9d%q? 9#Ї{$|uß4TEP/qNjNLUn4%_BBL͝7mح@!~fjh7[1luB۲V̫hV_nHd2O #*q# Š.}28ueK`#xinq\,,#ݏ莨UTMU~戾~+ΠlѪ+}N69)ST,P,Ǣ1Rn8'g̅dէȚi< ,\}'Gqm45-6CQ;j8λVبQ@\^Yn**ȟ?U]:V2|Nprq}8Vr`ߨnJQ ѭ/1 RLb[9PSNtK8b']GLqNeޛ$a+2J~\~;Iw\烩w,ЃC*v!uiETUi`ԩRUEdgWo wSotk܍JϘӲU >\Ok( Mq:b_\=RXQz~[@eҟߞq]Oew#]>} E뉋]V)_[Ɓ(OQaJ3Q36Y:t/xar,C{S[3B'O'eN5{"C?!A^lm@:n 0 ^ϽޟwwVe:dǩ !IPO 5x$q ȳkjSS>bNW^|t1~$.?߮SvfVJw!%ʠ sRRM;R}yxǪв-[BCl) *+.ҋ>K,y3-E%Ry86@e$TQNl7^p۷z@ n8'}PjhGGPg?ڷa)DDDD*JU^J}TH[іV?v&hhV_:~n[*@1xꂲ7#qΨXX<'Q :P=VOX6_A_{ȟR-&)q6Ֆ-(eEjb>85`wMzSN܇['/uCޞp9j2W^1й:~]XYKԅj QmEʀBaVAό-n&~꣨MaWd6lÇ? pk޵~%c/NaoJa[ߚ"lը&ڛh[jsEZkiBң44re3%"ZIǞ_{|(يM}_8`WWO?px?z`OdPppaP)/+;^)~IWj_cgWYfGN|=0pӣ9z[oe, )kPo6 1_c.ر})#=Ho:+/ Q:TUk9B :?nЙZaU(UcQ h'qUqwbn;.*H867  H42U"Q$}ޢ t=xd GXIҼ?kU ~q i̇cö1);;yKgWN[iBU qF1)#  uot`?zNuA(Κ++#`˔+ǏgilѝR 9pmxcI\v 3 耄MmyjË́rԲ5a0#ݿaoCI! Ld=Cp K+ʭfsk_>1t8"ܝ\z1/n_?*y16m SI4p'=A?sRwGO7PI`EёӔΕJuN.l/CF5v[ 񕱟oNր8Rd4O t*qk-_Ԕ_SvA)ܱ=x$Hae;FkYCD@jH ۞xb!d<ݼlp#? R (eSBΖpmV Igtxwg;궅Zu Dsqť;>]0e8鲊H"?^e[5gk#%NwφhWQANzf`08ع )jr_Gj..<)")VB$)Zl31vg#HiIQZYz5JsY6fޖ7o\^wXS7<>ɩ#Np[9dwӮQsB%~,D~:; 2c+-P"f-I0&LܘK\A*iKE۳ vI%xe8{QkdZS$m6qQeA 6Q.+|03TѹF\fsz{|O*.c4Ǎ0gEn)–iA %J(\_Qс}rCtl66V! tJxlbg;ýI~D%M5)6E~wY>zGLVnZ 0҉R߆ߐl }&iYmr`I ̉.JǙvk0 {ɬ{endstream endobj 65 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2330 >> stream x{pTϲ$|P9H:z*TQDH@ B$ &&\~9g/nl.!A1T.oaTAvZe'dô̜yG@̞E9,tL̤2 BȜ'cN]OҳJ*J+kkqZJ+*EU W>ԓO6 ~Nl$fx!ۉy|N xb6lL~5۞:MsιB(4wXFZɚTB%/E<ܵ-Hcک$L0CL?\luC4i1hnpYvE`6MP=[4]R`t;{ -&A0BvZrH\/sӲDᕷI]#-x].=hkl5l ["f=n4w%1G(wm'd GC7N%0 `ڵT;#ݎ#Lij-t~˷Rړ&ZJʎ9eRl>گw@ :Ȑccc5B6&ӯR.6sWЃƨ02F XO2z/{ˠmI)iO/HU^] H&EgcO}pwm`sn][q F~Ʀs .ъ7wSz)%JF`W/GU&fVbD4qs$et Er,&OU§yK|yG =f}3{ǣa-3 =%3kPڜ-|ޡWomTc &'ߕWq4J>M?'HkO@BZms4J#oy*+eTù;k-đ 5&҃7u )[הsm%Š14-k= UEG{j\{K'-LvOZשp}(򃂑I*J}+2h E2ta i)$5sr,;q ^2$:Cx}{ݧE<t պV*:'emfCj%oo-~նZ@'"c#z~5-O)'ә#@ϩ<rK>ХL-j^cß lj0ZMX؛uV~4nUb l.oKߟ 1|Bgw|vr* ό'OEUS+[)fZ<@cUʸ%^ 17&uNAwpm^*&T8Xq[dh+i^GpIS Hz ׼#L:|==͞H, [VmߴwW;H}""SSeQVk:ׇ CZB|VP_L]ddg;eob)>pݘ.`XױcOHO5n{1agʶDVpk *(+0lvOjx3n;_JX8"%]Vȩt=1#ת ]&En5/ t8NEfؘi[.En:=6=0J\ixt2CՀ2[P:M,|jCٛb#ya.X?r5FCꊍ"-2HmYNn >)/'њƚhc<Ʃ$ÙBm N&&M31%Zh?sVC mT]3_>z,'__-s? ZiOP"aE:ك2nG+o=;4J=0%]\oW7*1{E0s.df@my'A bendstream endobj 66 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1439 >> stream xmLSWo-vWt]heq>h( Z7Ji}wzJfSatѹ%f4Ιl9..|89[k`j4|?g/4`wz[;~1; Lٯ5nqShqsN*+W[*M$cҮtI\ &XـKvaC 1^šτ2>ƴp*k'{vCo\G6뮿EHNվUm\_7UJ#dLF<`}SN{9[w'%Ko|nt 1iL4?ٽdzPLR4N:=2l\xECP0h(֓QaSi Fn6XfG: ޿KWkݶ"'OI"Vc<:{Xd )Wj S+㍤X#EyU]1*Z *ΰescǬBE1N)Yl4"Q j.` aW6U^ЅCyIz/H [)z:-/i0v]dXYTDZ7E Ql@XTswoRo+h08[\;kֵMFpq4{~cF, Ug,"|q Gq6,*UV+-m-PtL&gЀڃVMje4%F{h=LIw2&}}7u::6t^p<  8ɖ'u'*! ,`aс_۳ɖ0G4u>Q"+Tjnll~g9!_vw5vt~c\_ `j[':27eeYe՚Z\ PPFeeJN '1ynwФʞ=%)RM`F]ٲ$؁9O F?u-\9_m]t喘wdكbe^Fo*̄b^pC$ ))R^L{rf;d⋖z/~ ]Ktq ($?*-[,[ ~y|| ܚrS+J!q $Ē7endstream endobj 67 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1769 >> stream xUkP^m yqЙ񴱉iک'qڍ'c 1B`!@ zҞ@"Nj8u4u'M c7Gǹ+JԝNdܽgsw]B'!Bҫs͈Oi1Zـ 7Bb<$&lԛQ&^1ظ;}ʢrzݮ*SVUTT)+k4*eQ*fZq+9Zb.]ٹ2MQՃs5ť%n1+ZC"l:Q<#Ui)OH%$UBAvA0,<$dxPId|\ZŸǟ_I$}Ñ !r;YKޗKs{RXd(F{6oNBY$#>P[..cn+cc+9ϖ+J!߄_k8o<5petH^] VtM%gﻄ_]^W-˴C;.zG= &Q2`ޅ~MEFyJ?[5XHv}[ϊm5յ3c*}|hC ihu:6WY0fz7С1K* _ǣLFtZNFFFIAd@( Ha< _`zDX\9P-*5L+C`t07Y77UΓBgSnz!#μy{;pPf,ڠs \@* e5H +_u$eB}ʑi ƧOLrd;޵Z"`<x`GPfb9/7!!x `H˰Gժ8F%0M|*p:2zԃA$ WڈRib5 +J&B9gC=s'p2:YXm Z^,?q^n4p0`щua>zOy,^_gy<#|Qj|!t)%B1=+,߇f <89-}8bYXEpv.2l ?1~&QM@ٚ0 ^ Hl]E6#ڏ ޾[#,= n$emglЯw:=$iQ1q^.6h~ҀB/x+ރ qE/($N,ˆQsnl5Z (T0]62(m]"@_P"JZ Lo밴Zub{(Ιɞ)P82>`|W8Qoaܩ_';ѯPPXa u񎭟GBz=6Ic[<^Pxa+e}HNz\$4@#($>%Ze8 endstream endobj 68 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 354 >> stream xWMSBM10SA_  R3fQ*|{d|fZsYH᱋ ' y{yzĐYYRxzyzRUkl&JmX$xU}h lx2~{w|pysF8* 7 .endstream endobj 69 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 501 >> stream xkQTӨ@BvWQ}7" EU$h6 iH2<榟3$H̻jP ?N.L8;# ϭ-Airf"LW tz"x1HF6/bɍ[BS' P= . KC76tŵUw-8Z#ۢ{4Zp*Z $JMdFP(q-;6O'mub:p-vTûEo>:b'4/14`w7Yd.dr!~<W&_{CfY:vחkpw?a?Ѕ7eSxr^׺ս8p<[o{O1T̶+ /K?pl+E" i=yg]KFzx8t >Uszz&mU޻|QH:yiXendstream endobj 70 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 699 >> stream x][HTQvN3g8]Jl )+$2".A/l 3Lyjrғ2(T>dt "(u]=M@(}ߜIJƖ`e(&PPJY˰`9R&s#'jۚ+]Ս 5mU5 MM?o᪯WjU鬎ʯ_ XIr 2:MQ(PiI(D$RK.Uh6FBjf3aRu/Lf{Tj~-x|m%noC*<8~C=,9ٕ yV鷸Sx=a0doܳ( B8!f:U "dI"|̄La;umG >Rf[$<hƣA /%]n 66: M%U<>}_qKo?-ƒ]~Hا{;> stream xXXgeava(;'[lWX"4 FAE.vApiJĊ%bYW14I4$&g; 'ef}}3H(s3J"{zΘnL+螻Y\k XKڼcp6])gE;+""c(}oY:csv`xOtzZIOAQ'ZKyQ(j=ZLMPKe+Fͦޣܩ%FP, 5ESAqʒO R(j8eK%@SoG1faf7a+ ̫,fZчeV}L!i9jn~>tPa{6q6KiN۽޴[afdL&ԂW=H );l~unxH#C{u Iɐ*)]]-1js躱1ZSjؕXȧG Hbs+`AF}bSoЀp0m  toYF۪x~.G&l%Gb5MNE?Ywx>'l*w?K 1Ei916S ~ 88|O/M̋!B^rЀW/Jp، .;AMubެw4q#Q Mw{# )%vdio΋fo8 gi`A5nFz|KWMxxyX^8.'/q[ 7?pJ0(y#fϯm 5~BB!A5`AzQ@ִUG/ЗƑ耳p牬8+E' )?ujߎE`r#bV6j?r\<=O^>Yk@*C%tB ]aDTgb5͘<qX'Rz!'WC:d[zIS* c?M4[D,">”\ 2jݚ0p0Y͍ giVă^Dsg/EJi8ӑߓP5[9w?ڸvM^6?{d'0o|Ӓ vKLU]ysuM9||Y@70S;\TJO  Ei Ió8G<#F.xK `Щy*HDM&G yP)ŠNp:|M7T `z69m`RO>/ 18,3 P/cy9Syl~xh{nАmAA @|?͛j¢jjk+Xy1x'6M_lk>(hmi(:*:ɨX[i+/^eww/fOUo)k:O\Gˊ!E\~TYԜqTwΈ M|הY4βc_Fݹ a&yZ%hemPf>hN6}:h2Lg t^i;7E?Y@ b?)|ba _UvAcTO/TRapPW*~\?:h kmϡ33R:& Cު]~ P̠yLZDf@,0nIx3|W&9tP&΍#j:oi$Cg&l Tª PuDTIa!y4p+389^Ilc5zM$ nb'K/9} Oγ)fB6~Sq$p[^:v`z-2XΟ;g{SR-a9 %5`]Y\kupO*`g^1ʒ;F?:l o]Xv}njM.!G;swmbEߞ?os_4z҃Yhv=vY'mkAϽbQ]:aV%L lYa)Y9)j%[SRAi %I7J#F'B ^^<;>fB{vq!hv <>#SXMx'y$ME 0Wr!V"hv``fޢܼA\>Ԓ\)K vwB#)ly|tb|HUV"Vَ=~-mZ[{H6lqe.@ SJS%ҴT؊8mmqÇ,#1ߕOH4C8@Jnhq$D7bYdžYkKk;o@endstream endobj 72 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 169 >> stream xaCMSS10Jc   R3O7ҋ#J2PG$PDDKP  7 =endstream endobj 73 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1718 >> stream xTkPSg>10@{mEd[өm˖JRM 7\L!!D\  Z(""EePQwڎvu]Я?Py;ǣQ<}㶏D;`;&<"> |=;M?ϦK*kJK냫JkL5|%t#߁7XൎbL(O2,j}uOe3&o.h϶ NyMJB:Ndh4;|Z?jJh t?&sJR 〈hY\@?E3C)=FöX )|oYF*'IyH)Yx?25{a?8Ջ"sѮ&Iϔ$;r&%A$Vq!k%8gPu\0asE ~5/V5Ϳq3AlS^alj!6.`CFRA])Y6䁓w Jc'c{ #ڐ7bΔLm/똺r[7O)w|=U:\KWV@pwbn//aܿXwˈϟ>Zg,>bqK-)2 mC{o~,NMfw%ZیfGW!G\Ju iRudy^EIiY5wyB#8-sj++4:1y :`7.4Ѻr}qᮝGKXd " $Dz |Wms/7ŪRe9kb d ׀|F;c PJmiM`q`#Eܵ&҇W<ג>+3xN`0Œ219Rȑ e9 Fn'Af;}E,% <-\wQ@|!ԁ ۷˼wv]8Md9 OF>\IۉKG7jZ& !QPŎe2 ^[F> stream xVitSe!/P:H s9Π`e -;֦tM4rs{6KR(]-ei-#Tqo@L?:_ܖ PZ$/;H=rc0,Du&(tNN5h^p6"6j}k]ƮX~9S4 20Zـ0#+@=GFFϴ%%1xyvRk_0Z3U9]B!Eg0xu-Z 8 tbzVA>:&s yς\n{b Y5.7׀\]@ n׹#/MΕl% S &> k7}\ϫn u ^$ ,%Z\(k {O}-{Peslv#75[ѫ7Ђ?x _أUn>IL eU!o 6'Nc.)7p)4ؠA~~EFQgth5X(_iZB l QY$?/ ~| 7hR3 k&毇wGT pW`/~XrnݭCp]{tU);^~߷:%}h®FsD:)9\E?0O-2D\owY^Maװ+8OMxMH@ͮ;E6 tq^Ҹ7Mh)0Q)"G 'uИ(Z04b N1U4X9A2*2EOfIeK=+, ~8֊@ΎΟT7fﴷ񣛢5C77:S#[#@ ?B(è(╵d.jWc^/0w߹?ڏ:Cd e9ĥ; 䓜? 2J w̪dR2VFA)̙Ա&:PE0&k(ڲ (Dجq7t2%P@4u LdN:dUWգ$; `3G!v6xt(\ޗub2l3O~mQwkk&!a7zRR%cߊ?Cg}m?:'S=:v=|m(ŋT5c Eb5QΘyR_>`UDD{Ae&m*w KsK"vBicF7+'>gL5EJG?u(ZdkcqhhF!''-rk ϧ}@\F PG"}CU<z/:٘V̮0G{WS-:UWT36qFiiAaZDx l7Q$ٍv [_*+AR1yTg"& >h S<ݴXzX &a4 ΦhiK ԃNWs>8QSztPBi9zpR7\* +rxHCIk//{J$?Ls;#c'l-#1R"wJ Ijѕ`\Hn<] ]D;%=?W+T_,%`9|vD40ijmPS C#yOj!1c\#cLJFў^t_UH}5h;O#KڷԵdro*;'ɂ0xqCk TT/.WKh e8z4ΙPyQTr^S7@3e{ bQjtӛi=lzuhBل712A2ՌfCSBrɠ8+TLhj"?yşg>t Φ[}npSz ŮZ*Zq[Zae`QOEsru7u5L^១!D}F]ѩye08>Ihl#ML.M2 2IgqEZ{q1C30mzhو;Z %YVrLy687-1h&1oCB ZLt$%|*3b@\Ya'#endstream endobj 75 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2948 >> stream xWyXw2(ֱugnkx[Ԫ^x*9CBr $@Ż*[jbuW[Xj]>N@ ݺn{}@ xŢѣ|?c dme @zEC>݇}'$&%KvFoyKnj}{io5tX#Gb)XN "*b&1MK!b=/я` "$^D0ћCA~,|cBv\aRK0Q#@zҳʠ.?0.`o?j[^ :ҫ{j ~ *ZnDQ,<`gpYAƐF ~kꨴ- L`|w03IY)J|N~JcWAc-M'x:\Ӳ<] n!i7ppbڼ;7 Fh0x=//v4biG`$Uf+% gBPGߌQ%g$5`5OxYBGvz+d+dZB?ԹgabP\ Z|'0 ( pn;4 (c/5tXUʇ*.b iA$ 68櫏޺s|FzV&-\1NEyh߭txH*o_V >$ӴGxb4Bx[._T, yz\\yt7I2'qY[ꗀ[A'>#&6O j&WZzW7u9|һs;cp n逿aq(H(ݜD[:M iM?g궶@}w(a >Ln' FkAeO+ r=HFmR(ٸeHjpWw쀁< *9|w:2+/n߿N6awm]>O<]Q=%QN3xSvl1j!7'I.ƨOeJCڎ$f˔֝"e@wIDS=@Yy[y&z%l텇N7֌7)Ռ@\$MMN`UPdN>އ.J-F3(ڑ&(zF&Y,,/"4wUd</ ֤|y֑&QOgcTBZY뮪jmz3Suyy>,n?^օWɔ;"!jE[ ߡJ/2E뽂 ~ǭ}wusޙ6D:tBmPEyF5赟?(]e_:rMIUuyMWFfocكI1 i6'[ K<7;@x9CQ8)3sP R'y&)!2݌: +2@%(7x8~AJK Kl.7͡2Pdr u,*^U)Nue盋|&M?gcQ~5pE񨿥-Ǚ @y왱Lݦ3KꀪT]sA4\u%D$y֦fN+=`6LIkq~YWr&@m7J'nSrmyWȋ\VU(TįwTeTIt:& $X:J3Ma)\J&V3Fzb*W;D'%Uufh.l6_v!#A\YVr-j3;OnކUnqB-*,@VAF3?M2+-^20> stream x\Ko$q`ڐotkTm~^dxkE2#=$M 9U00Ū|FGW+w'bu}ޮw_|"ۓE$~Yߝ3l/nf}&} ?x]o<3#zL%JzZcuv}]~4- qx@D:1TCXIܝF6鱛n l հbiƞ'ɜviIOEz*Sqvܭ!}Ua2-Y%KI6OR8rLaT5aL_%?/y/і2|aqBn+OHz]f,>pdg|1o'"nM3S;Lk>ގ0i1)g6+MՑ60 b${?`=م'D_boh\FRGkN;_n`GZ}C^g/ Bh6 p$wkL Q(,6ޥcڮ{ Rv;luo,ӲΙ`KAh0$i78ceP(j)LL@2B3&N$7_O~bzVO`~s ^ίN"~|s{Eۧ%}0}J^K v϶O% !iCPc{oZ:LI ٩mtH9|4BqbFizuq:OdEjO$t0l77qc!X/gYqA賂0F |h/B CW`sYKykuppI+2?N~F0d@Xd/2'F.hj fNaYu6U:NE.t;\.I]BPeV jb[¹5P<B'lՄc%h[pQ1I @}BPJrȼ@ 0MWsM]˛%Aڣt! i)ke[|'v1(| C=w-=HepϤA-i 0ě@_`:fG9k*Ov%NʅY##rZZtBZ7#yrIp3HeLAsԓa`uN` NO|ha(;B-70v) &Ec,~F}K᜻I#}T} N]" ? b #?pp0s ֊@'Uֻ^'4%ޮG a&Ɉs= ܃!|aA/vRނ:LlPS ^8?BxBnA =E8]I}]vdDESdL5V FmAdM(5(DI[⦋ƢtZ71:FS 3 ht8]X%)GIrvCy' i; >v'%^DxSq9Mu ֈހ1> )-,@L["#Cfąuua?JW>Vl}Fi!uN6$j ?x*6Fx?Q8LQ/CtRB HF_^u[&(%%m¥2e1ն˒ H5݆HaC9ZYX-҆pQ0ڗ9By6RIVMLzysdQmKCM2!$-ᅴ|$`"!#z",`3t+QW_]Ӓqb;&@J-85(mAF]c* ~'ƛ:DßGNBV=Sܢeq`7pzOqH\i'6v1z) q;2Y &c|Wb]4ŬKN3e\1FzcY] $2%DlyaxP k2dFMF;O-Vr<"&͆Z #Jny&?];~B@R),sטJ441%?Le2,)3(bay`6SGݔaIs/>Q6okkB¥|NyR6 vcsdYe)t9cYtH|Y7ojJ5|afp >h_ >!܎ID`ʞPr:/CA[ {G?:`EMd`[AUjSS!v6K(We*MzVp-*ȯX ЉvoLV6ܰR LOvp#Wp(y $@bh8s̡k"!-]֦iw5Ќ;B,,,Ԅ[dT@aO?-rR#^Wt̎`3boVGY*·|hv!A%rvwu5h`q, ! X Zn_:E 1w2{f .5N> d%~Z^:V殆˜Za (G` VCՇJSBFO][>Ğg8O2β9 p0 ڥ! G!iSm =Lr4sG">!>y%uy /V :f|hlxDf5N=,"Im@`ת&|ISi^i5_ -n.b\LHWQqˁ~4Y %|ɪ7x8QcyyzmhHk/0.e8 \v!ǥ8YLrʹ;OPT*{ ĀB~kT6TpYJ`%)H<?#widuĊ["wʔz!x ÂX"S:p|~A^ߒk#I2 fU#y+k1a9MAJl:>0@iCw=-7 7@z;vi$p.֢|ͫ5r|,BJ@u}Ul6%]ě~0K,$ aZj~L+ i1w:x#%x^*Gxc_YJqge<oXs92=~f : k,ӽïtm5ZHqU}Yc)@O݆ TW^A=bDVqIYz#2KsЛ;\^]JllNz;[G˜^NXezg[yiT >btL .0pU-ӽBgZgwbhN1xsǭ+;a .Ё_XL 頰[w~WH/ˎendstream endobj 77 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1382 >> stream xmTkLWeafD\e)f@ Z RQ,k E (vawFQ * ((]y,FbQcilڈU+6gLnn{*ׇRTEyS,qߣSa}-ΎaZ[c)jNRT<Dj2QzS.SUl.ro/~~yI񀄡hWxr'} !*~YDđ'w[;ųV+pt>`ĘFEư?md.tbi(auC.ҕ!EBܻe| (6;b G Gl3Yl+ iht'[j5n[8B~.OH3`/铙Yy9F#'YP\g,aϚeb3HRFGJ7aRɦ(AL)Q)jJ+F6 W7\61+ 6X rj,fhMh*Yp,5Ymb5iqVF'-1r23$'} fq7,ݞ<?|ަw@nUx T4@9j_E.& aI98:lSd1VI)0אs֚L5I\yI=1jXr!b1̊^^UL冋//m;Sb5fQHfe;#WtLf8(h[ -ͦՐiKF1uPk30gG"9$c쿮D-oRLp[w7%(=g1Y[W抝F euVj BKt;ڏwwWV$X M!Q #!H7ya4tZsV7 fh,ctÇ[̎T!5iu:Nޞ0J#Fq@Ng/Oz|}@?-E0endstream endobj 78 0 obj << /Filter /FlateDecode /Length 7755 >> stream x]Ku'7]xuR~XIpp;djFzFK9UEbcŁs&8Aa$9`gIJׂ I"o-y=#Ž»E6Ķ< .x;.]"QvΩӣ~ʋ8Y/9F Y V eZ(i2H9}"lgtm-ИvL1Ş} wTEW\'k M"-LJ(f(@Ks|:`9A*^n_A֬ [BY fͨI Rϼ3iϝ h=+VAuGĸi)U09kZETafĀ9ks՜v`O;Y/]b$ tR#GI \"  c7Y䋥5 '6$W^s 6=o+~>o2?:f;ſ2 9%3n^mtLmW~E\E -KJQ?@s'|WRj_'58EƆպ9qjk@ D`~}?/U0&lPL8 " RyF_<s‹Pz>OWQ#"I{^= )8`t&Tvb!¸0MժY̢uP;'N\Jތ[mP<:#L+:MmxY=~2`ԚXsSK 鳞,tYSAS> iq'jTeMf+(P*~K?V,r-5ܭi{GI\"*w%?%:v:Acho m0z?˳˽A fGe S_3Mg .45#oiRf KIjé)GhdP G+}|N8LTB.v=2B:5) U>7)l lWU| ܒmةڷN>FS2eU^bq@OIG |Se1D*ׂ_x OoNo2R3sy1}ŸPpc\?J :Qcgxv/}䄑SHeA_GsNic8:4͘4Sך1Jj |POFCL3B`<>}zKmsak:3YumPR d5bS/Vԧ{~$ emf5Iu0. ()T񚱤deYCuVmz]dM__; lLi""o:MWDMg=a0TPD-NT]NzA: 0=1UggѸi/>y97n)D顝lzstr:iϸcYaNDAMysˁo0D~:oRm/vb j` 8ɧFZH +=Ӱ?QaǷ ܞ|?6|pݣBYcT{\Pd 0yy@o,ϧi"*tlJnyIU22eofs7k OhzĹ&igR~|Yuy#UEIxbROA[#pFY :m5,e?7zU6K[^hi`4 I;2"}I:-vZRVņ r"g%h!9\ MJOĹ**5*H-aߌ+erl].Ɩ+Y߶{ɨr.Z2¸\z ɗQZـHM\jW\XQ ,;YrUL0E|IY@ͫ+` L#E.WiJrB(P06e*GBb$snJ#1E$И4V a SNnjj'f}At:E\.@0έ]xbf RvE@y)._b)zޣg>M%TMqgd'&$e{ŭ.eۖCo!uQ%'cs6l! ss*ҭJ6 z>KjCRަgjYMjIt~1+ږE'JЫsѣQ-wMfP5}#FQI `be"y̧I9ќ>yr+{b%+$&8G7MI9%JFtU/AM@V ܩg9'`$K,o$``y{khÈXSWvFYba6 Kݠ5L D~9ENq[Urbߪ^ҀYjsw(e7)7 V0Iomp7XĊsfC}{`k+zԓxo`^]p##pKԄs% ڨDWP`s#`LI3߱cP*0Pd?V&Hh } #1ne_!o25H}KPhq`z$)-} ANRnM S TI9>fNJ` ԯbTظD01$C ~¥8:A4!^î\ +}^/fu<3`ߛd-LnSb=1o&d+\c+v\: RR:D<]]i-i=(͑Ҵ0kJYU趜ӤH"SM],vNp2ؑ=YJu>9lJOs%DzZu{*yerܢ]_j_ D҆`=&IxOd›vh6'yU[Y]8c(ﯺBu/{)chvaGdz-!<(Dث 0&}@Bw-tVD >wig=9*<|hceUp5=Q` X徽Tͽ11#(Wy+*ab$LtLR4p8_a_teK"צ!aJd*ݬ8XxԻ ``PI/0M~$&[1 `a8X& Fiv15"oEU+Cj`]HG""T,ٯyps!=U)cúj5hʆfUoQSb]@;dj:EJR##IFOZ89hII;6ݚyA܍G|d^l$ÊUk6D%hK*0דQs9É, ƊbM`SA)rPnsq mʒ0$,DZoxJoLsEM6>*jcA#dI!y!ΨV\E4rRn 2Ij23{ N\tkD2(BԒY 3zcFN;hY A4(ȗ @_d~:LMsb2;Oa{7IkO唣go#G3Aa':0%Vkd6U-] $ShKNpְ` B`Øk4+J~M5ql2H,?Ò˷1d⹢^qm\Ip0kQ[:Σ$5[ӎa4ގ~(4]B5Qv ih=G@\];MnQ-Jנ`6'qz]O52N= gKzg^{h-cܫ$FjbIeԑbƼ5)Ԅ~ѡr=HyEv/y!=BRo엜:M1ml6P|c6njPX$(!96lΝ0h->go-hڔL#)ƅOEoB%\(1C1恜r U=.zQ:Z:uV:pĸHNf2ufŠW;WUoxEA;ngy@6LMyrPaYғ$!l\}g3ȜѢGR&(|o |'[].È\Jw<,a陿fGaTn 4#cb)|jnVslͧΈk2&n/*hy`Pw 7~zeH[%@ֲdHU5kmw=~֭j;vl.2 \bGɱ繾 T0 ʪ P.rK^}׆m.hN+}ΗIVl0ŴʄeԹ-W{R_I%"Fʠ" =# z!]t>c,Qih{'QFȷY>{}*J"+5 s~1wWUW\:icw*krI{2%JbMR.Cp/z@Ҙ98 v?pKG5A-#<ؙ)|:JjVQVԖoS8Fy̧Ex4s3P}e"K)5+'de;{ k3(M2DN Uc|NnH?v4/b|ec+ͭ}"r,}ofQ!,I4 gXhMk0g.L(IԹq>J<]vb:B?z}BZJ#'RsBb(g-!·ܳgXL, KJzXM >`?+& h\ 1wc0_oN}XZO 8ThVn,#X]pDp9)}[נ 2zm 8[+5hfjUn\Fy)Z?75 vWe([xUc2<5g4b(Bx!&xSOZ!ջa /cce.<֥\xIZJL /[aN Km9gen-n=6_H>c/v+Yt`yvGyf& e`k2)9IF9ԂaI/5O5OTTh RCWHIxCj+V[ -3bPZ\ɻe|ߪLqM/Yz0)ߙ,z"/JPҷdCKI9jp5?񘬴hQGޙ'&X Zr, r _r`7Z? eXl~;$P.Ady?e_,,aʏi5(E(bc<ei:7%Dgvp[|: Z13ldlsVy6endstream endobj 79 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 189 >> stream xcd`ab`dddu 21~ew3w 0 ##KVdZx~YJ;wv^{\ Mv_|ho<wv-]h?+ǩۮۻ[*8Ĺ.O|>g7W7w72+Lrendstream endobj 80 0 obj << /Filter /FlateDecode /Length 2293 >> stream xYMs9HqRJvkeiE;4-z3Ә]+G(|4_~3:tכ6ԏvn/aDS= KKF6_닭ѡzvut4Eoyc2-]1x ۼw]tq<;cX\]NemtE+1)E-e^xؖsTIK dԝ0\Qso!ϥĐ֜B&i3N7iqY QkmR)lL?9 vhXCCůf0k8Y e>?cffQt͚_6s%cMq.Ð7dHHU|Iچkum[*D[:3b,u>7m=>:dO4 X" z:dq:d"ƺ='7K=P7n9vu{x=B阺GJ#6_, ce` J6ep0)w`\ QgB0&jG[ô^s-19?,͏keXu hg {9ǀ!T`- XB 8%r2}5s_R#n])>YqIv"$Um4LY#BKc'O2 Ηe(ӚDՒ/ {Ҫ,>@C=tdv  B`[JpL=uk@n Ìi{Gqh=o-z\mG:sAxk {e >GfŶbmu!7gX)iƝN@bNXX?m;`}b{59)| 6 ,^֑w2zP* }}N#!/Td1jnƉd>'e?Z8 h)*&Ntt<PuxB#_e f>PC9P1a6x8jSRK9OZiz?=EH~b_Ϣ .WZ?>IkR)]֡G6AߒƑG+zJ^gﻘkmoV_m]wE_zG`yΌ=%^ϲz y>|GwXИisnK[}?% Hu=Q7uh,# @F}$=~rXKroa׋0(\-1(9{G'nz2{uB/`6*g/^p>4$Q͞/xAP=f;qLAK<YC ݧRL'su#tcN}<#F@P՗bfx0~=/\>xC<"SI{ Geeapg!,_oS`T.fv<Nowdڷg t\Ppl7N ϦsMꞘ7EGHK0Fw@Sn z@X]2ld-[ ܷ.D ͫ?8AErs5P b&tcՀJC[ъC_B(FW*`@ȟd*מ)p iSp}V؃q~8Ep75bO!D74Of2'QDJ4.;di'0QoLTnN> /W [ 1 2 1 ] /Info 3 0 R /Root 2 0 R /Size 82 /ID [] >> stream xcb&F~ c%0|'CH/\$8܁ U @H0~ BH( i@Y 2YvHO)eb endstream endobj startxref 63613 %%EOF robustbase/inst/doc/psi_functions.pdf0000644000176200001440000115466014555212627017551 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4602 /Filter /FlateDecode /N 83 /First 697 >> stream x\[s6~?oq!yj+U'cŚ'[y-FI5@PAQXI@l|n4'a1Lq&Y"%SLf2Y2q,g<*3I,Ir<, s5yDHdgћMͻoP>߰6MV(YU9Wh$gs??9+.ؗrunA{(Qe, ͳd7oNrZ+OUF)Vr5-z Uo>c$IUܜs5-Ӷ7&˹٪,ӖgŤϿ?L0Q;Ɵ 1 Z̯F t7n c,LųOst4O >.xA*yXXb+G d0yen wxOϞ?$jc;*،L:7RSvgS' ^ <͵a^ Mq?)ʛ[w v|Eqt**躌 ,*i42ZEgO4BO7Kҁ1C~e崀WsFO^ `W{G %aI4ZwH7'zU 5`㣓_k .i#M:o! hhzbe 0Vq\Me: Yۭ5M{tᡝ̈́/f{e~洔e[ExԉVyOŴ(<`()Pµ=j"_zZ͇I@RRK2:rObQ,%nMHYa=nk=gU2X})=RMB#𥨒(Y΂M7P^=||./Ȃ*H*$ګd:Pv0T~in! {ciTڂo I ckU0$s)Ȏp)>&Eo g!͡[ƪb({qx6Ă^C ض,bթ/&\/kqF\Kڊ+RJy}xu:ADZfMqp]SV{uf^c6P/fڼvSp_ߞ[c!S V $=4XG", f:R\kRޤ!0WY$9#c4BTIӿpR/ o6TJy^ݖB}ݐ],r`y[rs-xyk/W(KulY5@[hrLU{t0e2 ZZT1;0/x2@ |z&H˒6(`pp&݃VbMvT3&ݛ*,(Йif)4V f6tO1&SB/l2C( '" 6àR&Y`y1S:zU9=7tQthB}ྋ8ŸG'jO: {-(z>@"k&V_[MHX_+zAYxX!δ^٫ZIU1 1w!-1^_[ggC{|\ҔZ{I@zKp}h]{ngHIۣ. °=XC$D(eαyݝJ<+ i[~/VյV?So*5Wh(ai֐o&V +Ie!f\tXQcqKG(Fg|h' IΆ+i2:q1<Y+"Lk̡Ya4}4J*1#NS?ؑ6vuj{~W7o#7r:4־Pڌ WdnxWLw6a+B WAյbє)ff>٢TO(?|G$w7 )tҞ5"Y&Y:)UfLftY%pIP]IW}PBnhtΫ=xzN_yI 7ym({޾M9Կ5rIܵAw[ /m$~'tXvUwUw}?9Y/lf.u\\==/̧=r -7dG־X,˛ai$&(Nv:WI6{PoXb:λ\0"?lj2}vgʻˇh<[x^<`:v۹nB%# e~$VΜRԍe"ӄ݃ `64Na|<]#yvbG8:./N߿zl&dr>]5Ug s u%%v_:=ّT N/=e7tۖK6c3_P?-pxŦ\ǺMP׊)זly>81p{n[o"ѹreTw_lM8rmWQ(ۅ@d;N} #Hq(XPr#Sb;5uGw;auS }j"rf>%a0C+*Ρ?A|G݁L t]尯?)a^t=+Fomh~MhָU! ȎƎdq~|qL~NmɼltJLls`\.;포M&:5>mf?dl\fI868׽:FLEd~Q$F*ۭon(M K4ȡ4lIpIf4魭nۊendstream endobj 85 0 obj << /Subtype /XML /Type /Metadata /Length 1387 >> stream GPL Ghostscript 9.56.1 2024-01-27T15:39:19+01:00 2024-01-27T15:39:19+01:00 LaTeX with hyperref endstream endobj 86 0 obj << /Filter /FlateDecode /Length 3812 >> stream x[Iy$>0 SDN+ akI6?$G3KajvW~֪y>Q/{9gߝ.7O~̘)8Srv,f IfFȂr3;ߜ}C>/hApR} 樕dw?uxY6`"JJMQ%ԿϿ܀% u&fZ9g Bv"53|>7p8l/#ɀ"|< ȋZOddfᢋuoU[7xy~o;CHMf1T+@4:XpV0ZX A39vڑj  >_c bud^l_X4W/[ H @̨قS_F3}ݫyALBZR`ZHU#yiPrҒz;}85W^ZDZ:= |૳!lp p<޶f0 0­VR > q6g-UsYm^elwvR@W[d;EMmB:NeiyaI(,yTL|JRtz{ЩʾS҃[ 삦9- B*Z(f & %]nߠ 3žhLڛiM^ V_% kaG>\J+#MD4՜!]@uUzk];vM`T!2~sͮ*H[|,%~J! w:ex u ]C:1-U<},ws=:q.h[6 (2[r@w߻yEA#ČB,Z-A.nTe07}BNYCGΉůwt{ lC\o_{BKl penf!* ~J[elv{d|tLЭz"ˈԅм IS%Uqjr. +,b -UC}t1VY1- e$:hgZWd!. NOp107#p `!=p'=:?+Lgpy4˽BH0DXAdUYx@aA$:>wu⫢*d-lӻC p Yu1:uYp]+/p5$^]'N8Gu]@qac6[k؈n\¨ KPA"29 f!qxH{(.-mOó0@oMlRVaƀ y 7 wDx6L>yLJrSBc(A a]J]V]Yl"I:~{V裊eV9gd;p8L<G[֚GML#:}':8$Tv?㕂jps7"POKXaUY}"wAEHN1oCt&2)!p*)%e\9-sFwC[%8H2)!!3쮜6dpyLr>%Xf=2+ʠ6\lB:nUM r)eVL$O勗;R¸SQmfXְjWv/|/C1 :Q#B}[:Ҭ8'Iہpk2k?jRj<rqBkM?=@Y4=h&M} "0KIh*!LL·=E(,/-ߐ{ s'_G|˅~_ Ѕ4ݪrD$(+@j=V8RYv/lu[]7k+4 '.=n_rHr-/g Y%^:N&Buۑ_.BlOt8SKG}S(ir~K<ʧ?$ތxI98i3"vj|uS_HYQo]lBgc!V FuB/9QG32T=j>@a ޖ O }#`qi0`xTP1RSS崠 VdܹCJ‰OeS5i&0 j= ΁ 6arpPfa=˦cW_|ਊD*1}hQ5羯éwh&XVxwvBqBTrTL3̀d@99`c]ݛN%bMsr "S}]7vSM\A3TnRF^E(G&|4CBARp+ζMa! EoC\p D(1I"!t@#ęG~5eM%٥;کL# ԅ A8glG`6U'XԲv(vs܅@ʹE{PtGN]LOԯ f/Tcj0|\,"%KAI=T'M,7wM{(I^8? C o>g]|?uy7u'%w݊ZGs6i,P4Gx>>2Ka1$SC%hⰰe]||?3 lendstream endobj 87 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2200 >> stream x]V{PSW!rρ"lZwꎶTWWZKeV)@C$p h-+;UiQՙӮv=a; uvܙ8#&$P2,);cuܗ_xc'BR&%N&.M:$ʗgTUkom-(\]\:m6E͠VRoP3S9j-ZJ-SkKT&EQR6 QY̘B! yp¤ $Fh#<sdR% R@J G&T^e]oo9u#x>G !s~mnk4_y+9 &EB-JՊ[Ȥɤ-$h `J{fQA-$43 bW.I&ƺy!6G'{0Dǖ((mcŃ!GʵR0J^dOz}sc*Ft2nՕwXbFp;R٧ޓ͇UǷ,ܿR.Z,zulRL17tC'{|DLk75)4xJxpN,?3uӖKlNcK#N;kk:m9\YxebҭɑOb]n؍|FFnK~6@17|uJ@"B KzdPNDLE̓/~ @a}78V>xƭl}T*_9)V;.͖ Jf;0n?0=Ziܒ{.§m/⸖k$(c.Ic/}AymbSs~q&M]ݟ Isó> stream x%CMMI12`JY  psi `A-}Ȕj2|9*8lk\/f%⳰kuz_U"ut+f儃lkl{Oo<ڋ謯䋾msuswpWgwAKC7->m 7 ~Tendstream endobj 89 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2492 >> stream xViPTW~mC'Qz:  UQL1H1(t4uv^o{w@?DE qH,hȢ12L21)$S5R5~tu~|9ߑPa3(D2sM+V._ dɤpBdX˂YQxNϥB|uIM{3^/,j+N-%T H-6P6R(91j65/ƥ~On0[؏}xY @^HW̌g"޾#ٰsg!BW8WHP(ƒ^3'3Jë5$t6n2_ia wey z퇊W vUYJGϹ#JC~6='AHV&OGȯ~3{&1lĒģ@\Z>QjCgNVxړ2R3}Pu@`2c`l 鱦GF(r h3j\ ewmGUPDۋ]M^~߁{3vKd7ٱ4z:8ƫ&m xo0ؘG1Z$3AJ_hq9Q+b:b ˣ 8$Iބ3h /Ka#:lfI*l.%kx 7VFc.!5̊ A$.P6=WYM| 2a_錭N &j&/h\7L>PAT"񕍚GV}qٝ:\]W uYk|q4A4B M+慇Q'?aws^|݆&*5睇p,ũbC14̪?D'qj^&&.p C~<.sUxJ K!ˤTnI k4|&ӾBCc;] pkO,WQ_J]y7mjsi~je~I[VqVz:t_[ћDf‘{*sA"@$~ldSge'N{pe>tE2sni%35kd_A0uMs㷯 6ϓBp153D[ mC]-MByS8;z /6`S~ߵ [c1*/50 !/A|aVE4EuP{ ڕ(oYvkWOƋ0$cҾ{R٩{RXpP#"aOO{ QMSGϋ@P챦fHx<K{F3 F1#ha"R5-VoCߥdGj '?P:o~_w@yvOi >*O ǺpKzR%@چf);_quM:fjZ䯽6jh׎.\ z;1b, 4pr@wS9Ȧ9qx}98/(':,ks_ tDU_siӑ3!2"gAdEm?endstream endobj 90 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2520 >> stream xmV PSg1pRTbMVQZ>lՂ* *B@@8BbL!RjQZWZuWjmg.'ώ{C]wvL9;w>6DA1s]fG׸>q]>[2*e'&W&glxc6ìf˜&b2AL0gBPf ,gV2oFx1Bf0̏"4axn݆cGYaI<O&.hz^0TP!Y'itqÙ.q慕\PqJSS S!p X<]fGDNNu,.*}a+2߄@g 8wH-ZRB-M:mcZB**x v|3B=LW ^0oL`7pNb) "0@fؼ7<@r>{ eu @|—qVkՐI.޳X5~etb=(wzDNŷ} 򑱿=wFV@9;ZS4Xj~-7{ *];*ј,T EO]9<{yGҮ̏5m#YnVfVgZN~^m|1hy[o4ITg K8 4rUUU|^QQzTkl0ߕ%j3znj8PFUAW74̼l1zGO[#rI>k8t9z onV6WnPZsZH!Ph(NYfNI$Gt&Fy9!xQd7WfsTG:b.p|0+m96ʑ0|ٻl*UA%'F.hn0g o膁#>8/0Yk:;9:+@e?uzIf`{7 ?M '%[suF t/~hvU;eIKgǮ%?$8%MmX+z-ȊFrbZ=- gI[tn7;}5YЗN;=s%Q/$v*m̶Ty%wE,}/~nxNI0JCendstream endobj 91 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3520 >> stream x}Wit@!*v0XLBٓ4@C !cddɛdmOxe[^]Al@B iIYJCGNdC_ՙ3h{}߽!AÙ.ahߌv. !3jgoO==|ffeϋ$=%e;22w/]|包bGHĻNbH$ b"xH,'6[׉p!~EDG0b*lpp9⃃WCd\ el3Mnsa0]_8D/A^{7} 08<4QLEsP`, ~#N•$ՂZKM4% <®q Dt0(qI Հ9N5pj0l'IV5`GcPhspoYp\_<8pzMz@!Ny<-j7Oi[;U(_;;MuHvzQW~Ragz_x:I8#318czߛ%̖V]kߛ}]΂ojq tQ/ÿ߃w71ă׷&gxCΕ{KpW,nOLgz'](x@1U8k/@-)VJf:]vW}qߖ:H!6<> 2b 0b|8r=]'a0杖1># A)Ε]0m9do>@,v*f^>,,|C" ,h.@ͱ%mz-Uׂ& HQL_^Md+hfR]ا;#1:nT|ppW5k(Rmt%0Zj(V2¢Ҩ2 %&Q hRSFtGՠ?#xIC,0_s [uMA+/oF1d5v$+>O. `ف)ˀN.ݺ;A1:b|0'|(Sà@DžB5FEL#5[{õ=f2UWE˜dOBW\͚,>֧]jȤZ}قx3I Ix(d\_k51@'/H3(4 %7 >sqͯQ[A@HVKJRbHffRYT:s.t>;G<2nwnAE]7 ul0hG 6ȉ`yS%w˼{c#MbeEJiwVj *A39"Wzjag#8~!|z>kwkWCTКjF#w{N\DTx=o.)hx gN ?񊋕2P2*6d/"A|{ےĀ>/e++r~jZk)5(X J+%b&!T#tj5M_BV6 c<ԗF~΋*%M t2 Yl "ɴtl>jZ^wGj}#f Vf`i8#܌ƪ&Kׁ{C+eeТ hq,Z)d"ZKv-x=\.*kTlҀ_'Wxv:l#Q zUI=u p4 }5tDcƓT9P-TvB4Lm':-LogMjJ_A" (u Iޓ'*  :rztC˾|pQf=gc;1Y :92N:0.؄>Iq>D366L-z4hpnqP HEId-cnul8 vAa^ D:V;$GKv ;li mY͊E(]M U_*:( ^Q8!}Ӊ6ws|  *c9\ @AO_fRXkW#܌hi3z w>}v~Y*⢲ Ԏ^Okg 떊~ƖTۤ"#gUÈ2etqn}D%i'%j.4Q͂ш?jN<8'<-H4!4΃|a|Ǻ5i؉ޠ͔(f6|/P{|gD1wbPዅ4i=*Ȟoku44]+&#,GG[ӆ# z_%5vp0@?nD+).uJ`UbBQnNQODS>yȇ !oy{%*A(b@*,/תA1-:aW@V ) _ qx<5S_sdk( lL|(9N$P|4 Fa$º\õx9^5{R~)J*P8HǗhFbÖL~RZI>sc)əf￁D!hjMj⹵RsP_c3՘ z}@=KW"МC=}\WF%;f?N¹؀S9 >؝vv0HeJ4V=Ss= n*{?֞ E2>Z5BC224[]וNg/@-te dŅjyha :Z[ߧV V ZkB' NPYFendstream endobj 92 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2816 >> stream xVipTU}m'%0%{"`T$ FIwgl_{N:/Kgi#%C DYrfdt og.ZFkjGWuw!""8Τ56mXh+pt-0׸z,5|kV*rTYB*^#M2hW$,>YefAٟ׽kAx< !+tH!CX&PWYJ6nre]I"q9s9݇hn8 YWRTET{6Pd7jfnCh n @v흏u Tt*@&O'J՜xY#7CY8I֔Djکܲ&!|NaRvn  ->vM]冣q7<#2w ]l@<*E}0xΣ*Ѓ_85N:&[S *z^ 2rK_nj:7eKvYV[,hK[_u׆B '=606`)2@:u0mE(Wr233+ِͨw8#ka c[MNNjiiAи5a^ @N[eG 2F8Ts[VGiYFv=耔)2USa*F ̩pM`چwԣ7rRQQoFn0 }4׭[_+? lzB(7ӾV+*I'+]΢'kĎdӻ6> stream xz xSպvBޛAH[eQEQc#BӦMi9YiI9i:-PYPfQU\Ysi}΢aZy/\s4}RdjrODN94}iϛ]cI |L(nbHPl/;KZ('ʁ}[A]Ĕ $HnS5[M& -/PVhJ N_ 8tV}J]֮D3#2#_ p xDvƹ{F^h- JHWA^2"ɰV ЛJ[F^o닶5hNtZdc+$jD] w;c`cSw( ܆\X^GG",Fj5R*b5 9r#8O+ >?{[1;ME9ē/H3Xdx͗*O\n9R]iPz!}T&j@ޓ+_QsaBB"50`h!늭n+fu*"FN;9UYnnGN6ј7'H=  ¡/@4FP>`F}T?5O H5&+z;T?^5 q߳ xX^o!_XIVDVk.5n>Zkd(B@eT%SPI^!r̐$k[ Ga/ L .+kxjjZ]+ONJb(I-W֮ N#l6_U (i8C4|I}L/=o?B[t6ڈ6h!0_|rR; -R~g+gaWwt_.c5B9@9 h%-BQk,QF,;셳]=ܟ/ÒXĻȺj-F]2phcKgŝ`|_08h>$ЈĻ#HX4|[(8_'Pkڠ@ĺ M:o䥻\oPy p]2qg;H5?jRzV>% cT~\8MjZ4*Qf|\xxj JWL&Z9RIM^*Qx]G| C4=}P3JS|y h 6߶7G1TS V`_y/lr!}4XGy\DF&h] ۡ=PDԋٽkahB$552 ]4)$Yiba>rS%eC뮦H픇Tbw+ԏ?d6[0[>E.0 B,2dqԥjKe [. j< ]cy_}/<Gخ"Дbl*@7uMWh-y?8vCH6_<|ƔVlITTāWeW1ZFKi#υgh &ƠWGxs0(,5&LwqesIT\ ylxw8[X{Q9^:ӉrT|՘L)`ip8 N󚽈=9Uh!"P hP/T퍦Dn@}x~2w/Qd[cBH߸HXL 74 zDS#$y F67\381/iZ1#\uZؑoD3Du,}8ULdk?xqLdkYd"Ybųd0'Vh2ݒc.zqKUګStsD2AzT)M[!u*C-Uf%/uZVlITWy:GP_7p6V]p[ΐM(1C c&ȠB^ٽ-^WW%O$]hN@ޜ)c'SDs\ͳgsMN=GA'a-K(#Qi{ea\jMEfG w-z ],!Qm6%QZ2,ehpNSHK_yu'r^T@٥M1 iv]UdҞY esYr `ŝ:6VD$J`nv(!)v 6߃\nڰc &?4rf4j+.3ld{~-|S( OhcVp_$LG_It1#n ǯF R;ix 쾖鋨_9}GT7rZ<%oQmK%|d88{pZ )4?ךU_ /:pdGr5x%>&tfS.rq> .Cb;4=1qpo2Fs&^ʪh2qr5w`#іv<:(cvû.ɑTͯ2HN_`f9hHlX2Bp&p8e4\PuV!3 w+a7:&<WCmk@/DdkYMoِg(sF…a-F_Qr_b' , ^4+4Ƣg])2a* Ҋk*]CO-_<_q eSxF,{@$z`0=2ݙ'C>l$J.#>jR3WlT@{+ӟVB! =K~z D"߇U`Liru2{KgoU/@DTOo'*+(1u9zFW(; sUz>&n"΁k*|[)ִoJ$ۘ$p(tf:O*l2CI%l^RD2YY\w|wQ ƯJK'>yzԑ#Jfbމ|mo\SWTFOM *=$Q] ߄w6W+0Tr X,p7j d gXMJoH V64;ͶOB?]fʦ´&n?-@SR80UlYI7 e5ZmfEb5;l0 CK]oKUeUi꒭;p4tt .X )XNPއAx?SEUsa[iWӪ ,:9vJNlфEdoÅN~=j-) on 1`KsFC;n4xwlSJ9(6,ŝp\w  kT,|9 y*ʦZ *-EΒ΢΃p ~u1e=x4+&37]sKPJ,2;2'25-b!'m0𚩺cZ XeD9(78ܴFR:Ґro7xZVEEr(5#0M8{ڌxɬQEj椔kם#HS.g1 ̭!2m[GaAseYREg_ Vޏ;zP_&##s^jmJB- $j[urƯ"lC6>B󈞞'NreP6t/,$,Jjqj<h%5}#!\JzA`ie~@잜\4FD/GB4x $jj| |<Nioy%ka݀;p$b,.S97_G4Q K)4u}psbZ# I(=ljŚ((&/D%_Te2*'" 5&u1='\ oT)p,y,X5gMI{ڝaKvf=H)"[R_n(=g ~7WjF,/OD1.z%#c"=19hh£TG)sukAgx8-8 [9*46"뷼?|af 0!B#Vsmo5_7I)}~e>{`!`;MTO^%zbmk.];) h(%'MiY{9+<\Хc;.|PЫ T7묺:{:QI>$!Zx-x+~rag'w6ѢR3̹w\>{V*xSLC/y<~sŌW[t݋+WܾEGcw/ /n}V [Z>@N^ZycGCho#IU܃B_ o$\ôuKcRw} –ZGUyu,wRZzf*qWp.1"ep6z6ՑlIK@XThz& C0:NSzt@nr)s51N$ƍi Ldp}o27wEO"_^@tw;ToT X{EO@^8/"¶l[~GKK5]N`A%Ho"1(jJ!~cVB( ͩM ]n1US򁣵U P$7Vub5-'U8U0-Cs{X8XJfgNtŅyy<g96աc'uf#,iAFSȡmveT X%~;OYIp|-Z@uJ%ڒVuh:F$] {be8V 0f Wg _*_=j?XXL#ѢH.\6AU#df,/DR_X}ؕ>?[#䠑h =Vh/ Nf _p `GY,X4f FAo0LxV:,.8c뀼*ݞ57zhHRIYgMDa^"f ESHImQ>J:/}~3_Og}@M  .eH>IM|$ѸNmlyhQZVRj]QÄ~eIݺ^M\CS @4 avY`{t:ܢ3{tU{QpjS\J%HS{$%|\wzXvz,½>1~gpEπc|7]8P}.$#hh,uj Op8_@xaW)^W[֪xL@_*w-Z͇\fOlWjNBZGdcHИ , YhrcQtQ Xl̿O=GF,/^&e 352S ]iMa w`G޷|T[U]<9FCf<,aKW&?mt*Om8@ܓJ2Ee~ESw-Ϧ 1{֭QD&SGOğI2l=gOzDviA p5|> q8 endstream endobj 94 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8656 >> stream xzxWڮ a<$$(8 $l:nz @0t{fHiԛeɽaS ^-! invMvs>?<#1sw-8Ot$Q^^UV/c>)pg%{U0m2vCam0'Go ZRM [K2Vu2M۶WV^Ug?~zup{ հ| !I-MJJs2AA&y0b!|w"b.KCi.^{%p_6 QXѮOOm85C< 8Gkwl <)6:^20zgFdk-fC5h)@&%N7(`Vԃ/.,-7myi#QY|c8B<\k]PvF@H W^RURIŏ1|ꝞJ3zUIw|t}t|ԌUL%]n7}TL`mi2_㤆j }LX88z#љ^x& |J8f]# UJ41~O&&A`/-I}e7vzp &n}hʙ/P9-oYҹy0 _ -R,~6zDxQws$%|2}a,FQo+*m#Q<'R^}7>䫥kSz*<ǗY8ΆSa>%//ɧ}v ѕ(]!Eᧂ`CG0:=ގvX& ᮎ<ZJG ]\;d@uz rb|QPggeU}#`wIv|i6l%u, }@ưոEJ;)'p6 !IJXml\Wn ͡t5Vh܌YL!8=N/G0o|5AY.رAzߍ^}p%q`ıS- ,dޠ;8gICLt{_=NWx#S3^b;Ĥ/NWgNШ$vʉ7]"e:hi$ǵyW"(bf@fܖ/ُ :^ I+a>sԻ@p s P4+%a%/o)t`jf4_Y+ۄ3sRٳ Di9ܷ""~M PtQ+jXb.1XarXЖ G&LcкŲl":.(ٗ;KxPH7uNMmEvVr>~g$ir.įvbueo<= .w|E5m22s ye%*V@0%8u::k"MXAM?*^ڹ=p ;A } Rp*J,\u{P#Ya)LL8[E.X1(NO$B0H(IJꍱ1w /͍0p\ԢnT@Ũ@\Ǥ `Ѩn]_@4;\rGo~6זcv#)хPCi;ekBkF }~t !Ďq8o ]%`uۈ:Lvr\  <9 8:%LGLUj_|wߜ.o/ qMQ0<2|h`H['Nqc@wg\ݵ+~_Ă+ZS^X̃9jʩKh7ЕTCr5l~:+^ʤv|Ҟ7/HP8F6|FB#}o3q*gmQVƬeȤczSO:624&5G{34ֹJGG?{b6< bƐ,;U1 յ52 \&ފ?NOPʎ50@4:=IAW5ד卋R|s{e3By En3,4\!"gh֮qhIkZse>(0 0ŏq1q&0Qy_(L\ȧä ZB63$f$9c B$)v;!fu:JWCbFۀ+7bbVzYz瞊[]N˕@4+g+V}֢xa?Y3^dyRҟ@:mskwPwwsz[+ >ϸ۽ViCvRT #9e|Y zCLO\$T-rogWN a.#ï;%)6T,n-e4C=#d0^l?f=~GoہЍvt۪b\!π8آvWzd5!}{O+8VSZY]Mhv4CTHr8'C?'{w;D}ܩ+H?3mL>=Ȇ~DSNƳ:7C>99 'I!_xUvWa^w囃tb."]ΡsG `O_ӵ<"qmVY*5_]hXu` c8`זr8z\9|wO3!_ &@na~C ȢA<ۑJ"dXƾDtg`#xB<#v؋.2:,ʪwʜn;AbH\vwZX zs#)[Z/ܻpwg(T090wk 51}7}|u~6??~lw|neS~_(P+eoԮ:v̞7T#ݠGGUq4[n' i~=dI? '7`bm\ g0{P?+mE-Y7(VκXaXua:EA1d#iab?:lKK#BJ+c#X-oֵϿ_p/?T>sTUf!R u~q,Ekz_#[[M[ Leu-vhfYw"CZJDw @FS٘rҎ5y|ql Nm\J? {is7Ioo{EVjFMgh8EO~z0fe(c&ѤS6V kŶ]D'/;{,H@0GB(uϯlRd܄? (wqq qD+`fQG<{2'w $`]tZ'a,p=k;#፹Z5[v緡Z-.%]Ef¹8"iҊlvuR\W?aa4: <ߞ $^?$8V6յFzI.QZ˽!Q#t[ceC̽<1WsGNy&β?⑐'淳vd%V?ÑB=2 .'*٥4XKZ)^_.uQgNZo ťP!:0 Q?};lL,L v#wv$ZCVŒ]Z[Qe-W(>:4gX8vt}zBA; {"IS| ]uE|>Qܧ a}e(:OqE̩ЫG[6o/AVl{i-6˶|mDo_EMnƓX w tf/YCf]i͸.mNuu)I67R&6ޠ;[ SqSeW:UZg݃l_P\8 @U185)ZWF;ȿ6PWRjEc+(eJG%Bݑ%v ep]j~TWva@W.ޱ3JڣwcTv AxS"wK!%N&m{nNmB&K܄ìk[+, h'-`~N8RǠ`$PbųEMiS%wV"IKӾXPw퉍G0#|lx隒Ɗj۾)tٞmo'IQ4U81V^}+n< +ޙ ="ټ ?$ᴣ+;IOѱKMm(_}6@v("C |C!Bux r=,!f~2ӊŇ[˕$ģOlٜj$T~I*9:/d@!Yo.Zmvtw^_L4AL~sr_ i DfEk}K玿Ebޘ7 (eNvv#P(WSNĈ H8_#ߪ-l%W֍!kbqxBD̶uBZh_w )}9eovxiha w4!{{ F}ײN JW3K-`PJSp0i}FfY- |mF!nHA%C%P(B7))7,( skiE*LN;@ hblĵ,FM=3Ԭ^[M(JDK>\y;N8n|ՙML!!8ᓽ&|I8k,3]Jڰ'ٹ:5HxÞ']F)rK F5VCh{](rZ\2k";'!x ]d뒇,ZZ9XT䨞oykeoly^_z,+uV?Nco*CȢR-0Ų^_—=[Ko a$P]հ\NnWP sհ9Gs &.2SۚI `gkw(8ý#涫(a&,#ȵv /n\Qt{quc0MʍIL۬bmv-).2+qg ď!6!@лAO1VkjrOf w_~ʚgZJ۾y˖ V_EZ(/cmU⃚nco' BZSt1XV. GLKp,=׽4;?}kM*DޞIgTsegTxP|v򫡏/=?ekG1ݹgΏ[uN)a` oh#ļʖϟowRҔo>>ZF݀`PZhZOZOlZy}]c]k/=+;X"O=mꐺ,/޳kx/xsa|FeC-qVg.`{>ħfSvR_a堔J*oKkܪ=$ G=a{ML%^߿Zrq?q>pYRϨS2Eq^lL38$e(KP^833p}:٧f?\dE8ƘR&Wj#6BQ\yKkTlp6DJ:A [{vwEH &1Qq~O_xhوp>8v2c@~EWZ<é;F}6k=^}jR~#IbtShqV/KqN8N);TIu֪L2F.z =D.wU}d.Й9X77w\>]Omus}-aS8H ^Hju_9Z$zJc4/,1-4/`m,e#ݶrp > }D!{%,gqh9vdh uK=Z&6t;;\&^AZ`xIr)~14`#t* 8f7qN8M_gE%`ne&!Vk :8=d 9 VKQfҼu"/Haج)oq:5tYcށvyF.OAX\s!2+H;i'Ãh0DysisECl|,lokNBQfDO}(SVHh]R7wjx¼ͼho ہwzmSk.{C_&:/G>}gIN_0!sN]H36W FFO<} Шw횥d22E8Ȱ^ aS};_"s(<XOX+敖x5*/LVtX%ˋ;7N!w64|sؒFv? aK1FB))]T469OF@ަ 4-[wF>yRD/߶CLf\$%^/ ?_eVd6T|/11m T0?5TEk}o`h/ 49+j/ǴQR h -g-QW[g]bZAY<:5@k k$ MKM1Pp䐯KfG;JΌNe"UC2EOy?@dw֢S6#Rlw ɡ~]JVGZ/&TSI—=x'vP[e/h@O w# sh_E.s%Ҧ7xE+?մ޴2<hDOaT"~O"8766W> MOA.aܗ?d4̙˗buj'M Mu!)endstream endobj 95 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3454 >> stream xWy\Sg>1sTD鹀L[g\jq V!"$y#`@!EĵʌU2*.?k۹w_&_~'yAђgXTK=`eQثb]{B#|bw5{wXIS1 \FbLl%ˉjb-!&h=!!l ;b1x,L$FVxC>+‚L\1gaG*ˢd@\:(Q=V~nu4fј1bkxkdY`4Fh%ɆnE#3kIO d:eTBcRZCt7nX 8L!W\XYΰ{Z1@%$mtg7tJlghɊ%$˹^}{(]#.U%Ny'nׁBzDO{-8#xEGQj\̠fcK+ÿFȯm2jK]Ű"EV PI -lU@uŧUs!ʉ<<•E'U6*V* {EB$CI,3%5S_+]Jkªү$vItQF_1McOIrYZn;3H< 0x99}̶.e&fqx3t遖|Dg@AмKW6t =܁\O+><]ٚ͢$[K܁rX}U&ꠦrF;T^XT)YN>vbllMZ, Ǜ()L&/mG]IcraMf9MJ!jBJC}sq2aE&UK0%A<ħ$euu|ZCcUxە߂R3]0Y2(+*RKtյ\l')tHMMUPaeJNj|xĪ"%NvGP OaiIf&Fa_-zJ7^>L?DpZk(>$Ă\ 21Yv AuUkh:0&< @|6$zE:qUJ i6! 䠩wGOp>Ոtjоp{cϓx!Y"aYA*#U&g&C lÑ !_C/ԛw #(WuE_C꺼N?KP8Ź凔5z.oyUPard"(!KKc K5G6ʀz|ҍrH}AlP+$H⊒K rKkw>>(f,__KJ $  Ԓ5Sͺ-U즪t Ue3$|ZQ?nKNaEc^^kC?FL_1f>/|޹[[URQWefWYF&\78*4,TfkkF Ѳ42TnZ .ocZ~`>?s/8Nvۿv_?HE#@ Q{L"ٵ_Ά Og*&wɒ٠`Q>4P'3"e [@ LĞ톗vHd [cK?ruiʀ49(dl@=Mu$mɫ.5DGŇqZQ>a6nfwxFb ϩȂ>Mlt@;tCPGcQݥր5.}--DMLuDŞ={TDTWWTT_ݴhyuΆ~Wӏc /ie4;3-DU˿s + VR* Uy:uCj}<ڠG ̟ALNJ 9Taj$%C*MgX `~cWLb9-G1W\z{dquqwvdu4d)Cne  Xi-@ /iOt:}K6s&vguAdׄf4 z?.A2hb z*J2(LO&ۑn#5kسU܇m{92Ŗ ^qlhIlnCLג[ec[K>!=PZ Po\5.2U\)Ah=O97"g4/2נTد:v |?s,cNb˳xlrԹo[x~o#R3pSlD.h܋ߌtbSNـ/ډqX<,̜E؏p2Lfzj> stream xWMSBM10SAa  R3fQ*|{d|fZsYH᱋ ' y{yzĐYYRxzyzRUkl&JmX$xU}h lx2~{w|pysF8* 7 ڤendstream endobj 97 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1242 >> stream xu}lSUm{`L!^`{? 2q "1 nZf2ڭXKo?~lڎ1ald`41bABN1ޮf77}yДiF ީW3V);(],^{ biz+oRh ZUcSkVhUjjҋ^T++5*MJj5:tKFWju 5EQs}[QL%EiZ:AEi;M{bP˩O v\b2ݒ%vynH4!]294c&pViʠQ׈}>paXV蒓C,4JgeC7 @u>Z aAXNpHlt |5 x`C ,٢)}B:˱1̉"T3;͠5:>1FkY#q7%*GWBn%tW!n9OC*)Ϡ;/ ^L8y&,RgeZ}ךW(R }ȇ[ka;W!SYL|4LBd!QAl4N8iBWx4"{LFPRv9r")f"$0|g>YTG`rj^ءhkl.ɏ&E|^Bo~'JW*G5g3vvߟl{.3r ȳi:Ob_@?@/!dfQýv+lZ}= .tXp_OڼYzFI̬b2*>#[䝈Q.̟q(WCnxzkFwfF>-6WfUZػ8T6 @d-YmPŌza"NLĒ%)zr//d ;1>^}: 8RB^# b{O"#ɺIq|uT 1%leK5°?ݵܲ~ =:w6>Oz߻o) x1Uc\oa"{v]=OL6GJI.xś OƑKY?kK4i3x< Bn.$-RrIVZzzPrD| 2aJ8Rxt\&FYuB(EhE5sendstream endobj 98 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1438 >> stream xT{LwZq5;8ˢ&f2uND_QZRz| -"PR8_Yf%یF7p[?~yX 6lhkSgc9 8eayߞ;QðYaIn,ۀ6bf b03$EјbgǪ)ojy~h*4lo+acɤӓ9L3!k"^o@ o5h>=wqC#XtZXk"$`-/bq׎!~S@FYaj`q[<2.ZAw}ڶ+ RCj WhȄrEa""N lPo; DM>MOv BsÏVxm#6֛:][HLЁ( ]k |]zk/y(ۜTSkIUM)CjG鼁`c'x&aESM.6ɼ_osfe9蹮|{b$< -+]S5yD\)p?B)zTK%j#Wg-<"#3>tG@uⴷJ/ %F&`.P!;Lt(]ќ Cgz=/kH6, Gc3&jKZ. +|e;u Юg.::F=2Mmֲ&NFf]D 0##/_A5M77+PI׫IAAN1Lٺmn!w0<#7+7_T)UJѸB_{F8Z DJ-ӓ[y%>iY1GΊM<ij@<0o5endstream endobj 99 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 893 >> stream xU]LSgߗ"}C]Mvfx!\}%>t!$((-T*l -O{P$V煉 3nو7,,fY=[0['$?Fc\uƦi0*7O6ZxiJ6̃Tj ;ڬVKa n3s{oo{|j:ю"c{:Q `jYU"~KEW]EHYg`zjYAoJ0 S[gHƆW:S>l~8XcQ + ZoZoyr@ q/E: CP2pr:;'\Y_1~ԘĴ&ǵ㐲>{r4u׌K-i\[as߼֘!< LnÔ1K^ ;C47Ga9qH!mVgbx1z2Z`hӾ<~P|^Q!X H@aM%sDꍠVz B5endstream endobj 100 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1479 >> stream x]T{LSwHZfӅ|>2]}(sPXy {n Rc|k4L7bfzLQ9s;"8H0*a%cFQ=}fZ &.n8nw:@0"? uMAΛΒi2ggdd[s͙Y}Eb2VFs̗YbK΋0sCoEP.` xoISL%`xF`'bjf3Søa`E=ӟ)en+㕷is##L:B_=NU}|?mo_an䙋V/9~c` I{:)?`;m\o3uk[S-"ҒR=ΥQY&k r\ȍ\VJ=7`-Uy(eUN^ss JOTf2l`jnܵ{OPׅC 0{r fP'4P;yת=yxUd2c5>t'غ (.桨HK7#::bk<`؜lC ¤Y\&4K)c]Olj`8nmdʡ(y~Qv"PPtbMuNk6Di;gdfLRu r莦tOZGa}XLJ_nxn<} C;xR "HQT' Z3w<<k{e.yyRxHW!҉=iRz:xi Z žy&LWAĥJM6+ۢc}4ѕ/SjUw&c'4戢Y+mq-.[ذjNdKh0`W+ ۴?lv ]idAc14cvA\] ^As(Zk[C/R tp-u/VoD0;} C[/z^ ٨y}v^HyM[{p[N,3ronqZRa~/}AAPhendstream endobj 101 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 797 >> stream xE[LyCa:1qQAܨ ^cJ\11%A F6C"uJUTE&*A/ J%A_˓缜;aHl a&na꿆B2P5r z#nAϘ1s)鄘H2 ##KVbFcc2cBa{B8pcAk#F-n޲vopz *}ݚ&jh}$ ^AmO:OՠGqq3j/k5/]{RdR.Qm-I Kl.%m`f{Pv1Op77BoqԸ(m~+*JB2Ӏ^4y݆R* z 0%ـ<N{-x˄Ztl'uVoE1je0+" ѐ^1,@0X~?4q@`߯ +Xe]y;ۗ *E[8&߰T2IIj?tqT]6uGѩUpsa,a]S<.K).A4À7_zendstream endobj 102 0 obj << /Filter /FlateDecode /Length 4051 >> stream x[Ko#)S@h_2tw ׉>xsSckԒwC~{9$K0 ޙa?fRbRY5<{s&$3_P19,vWډszr~}mzLgZqPMswG06r~qV~zM4>qoWtF?V8KOc;ޔih_U cw,V/V,Vv-卑+-MfrB>cʫG g0T~2C{>%CHCT"T)K,gCK>1 &a9{o^no~:֕&5ut3ˁ*>=2&Rv>q6P0QJ3;(GNlMU%Tʼ6E=0RшSCi{qK]wfަg!UsUX^}}AV㼘lR3ԗf=E8,ct\Q/"~.[]B1/+d{r<[Sz?t`Kh@a"TWtrwәEӬ-r}[V5?9C~ T՚ܨ2)&I gbj6JsJb׼myW:E󮾾Y6*qʢpM=oaj r(&mno4ڴpX=vqkQsY-KSϢW55WGZ9S{WlCڗхRc51ZȻP_cxӞ:d ?[hrsS-ſֱ]tϯ c8@߮ɦLƜ'^۫l T6G`?;YUh4HxK `S b Iȣd2 I bW_nqWb"\Ѽ2!q^E;ĜM5 ~)@2x78Fॶכ2\ݭ8Kp4 (fx)L|[f隡kgp{@Ut: 5r{\T5UtҪa?-sBɁr Jm* ̈́#i:/ u;XF9ķ[of֫55viFPNE=TGmo)V']8E]y( ୺,ގPvE );_Y-c']' ":k#^4pu$#GFQ`^Ka8@P7 YdBlelԗy oG4˞_?U|-i_ ܌퐙TֻѠ)@y䄜bxX"Jf4c&!1PhXws 1 흐!nѹ^p@G68;JOz}8Bmʡ?<{*,yZhJ 3ȍ@Wj'8b$咿qԼrOSwy.51h &:~Æ W(_CrmyEkm(E]Jdyc.HE1r=Jm[)/kC6@8.WTSq,,^do)ۥRJiȫ P` k>|資*TV'ܠA#!FO37Kk;sK͜OH'o2cp[G/:8|o[2 BFQvcH7^aSWq72bZDBvw'dߙz<)\NM6`fPYSī%N٣lA7g-uD;0Jq LCkC-ey{ -0XBHH]*"@42xqd6388Ο?y|Nfc>*OfOBl=g\']?EQ^ʹIâȲh1rϰlOmNJQѧKP p S.j'&b=g"/f 93x{Tx?,d.8_$DIU:a9^0fB#n~<7B#ud:*XIhBɟ}EW̨AG0e0_W׸^#Y50A=bnpBf>$х#Nem*buV59Vs"!">Ȁo{O${I"A]+4GyڀC8A=>>{Lz2s1G.?'wTDh?P]"LuUlǪӡ16cSQ,?X.A? k܆2c9*&]44LbѷőR U*LܛbutɓYLf؊KRQaTVGk/M]_B%Ҡ|Ŵ˙‚Iiī^b&̑^mZr9NbN:B^_<2&~'D̗$k]ga1&5 hxP,+kƚ0-HPZJKrh۬4.0|Mb S榫4&l 4CXrq_ȣj&ͣx]a׊cQ+ٺ\ti,xAu>j<#ޟ:B:*mTCb(|p0XtXkP=:!ǧUt#hٮմYwD&vE峊섪9zY]&K9aL)HLɳLNjNɴUݴQԜVkNbW6_/ɡlˠaĽ$.pA9w9}7w kdlnxEpB KzT9V-ƝnOr?oBEٔ(L{ 5rj/ˎVDp:( 1AR2!.SZB.Sv|eK}LCE.9e m,劂+"X/,,9Er#\PZY >›:lJUۃs}&2|I#](?X'K8*㌹O_x ҪUEփ"HsE+@cy|A82к#b4|6 lJq9yΦHw]tT,{5|WQ2FPf VX.2E D˳9c*DU1ͥ!r $*No BoW3*g&@)ƶތizHBH^˖4]v/}U"O_̮_Zw%)n,-$P'ujq#ơ 4yjcxSBSY(> stream xX tU* mS θ!" "vPIo{zKw}J5,,TVgu瀎6̻f{T:礪o \4q^3L7\? oi>\yaGò?*kjgBZJ6lTyQYyg? ^S+ZqV-bcbl [-^^b5=c|=qb0FaA`l 6 ņac"/0ar|}:<5g9.9 _sCD%&biHWh·؞e8+>ED} N={G`#Vٯ  )'v8m|jk]&F %,-%oۣOK霮0y:3opYO y3#Ǚc]efy˔rxSLηkلIg⻎ό\Cz_Պp|^K. !y@![J\VFC3:WS9{ͥ <4|G|pgMz~1m\O3\L ثjQcaWoq]jY TձU(>} +I{W'^O3̛7esj. 3D pVX:tJ%p Y9=qN)_d"JsZv`EMa57ï0n&;e$ H|U Yei(z/yfH D%i8O|+r8 :hɚ@}<rPav$sJBQ a}$ũo C|"[ٛx^(.*lH'?=]_}_|[ {v~Sޝ]g[$kZ#s"02F"m9j.('VEr$Lr*.8FrʓpJ0srz#SJL'z\DxN!W,/\K4I ^ĪHv짠 N " ks@YhvΗx!KUww9UkY e*-UẦC`BrČCB}dL+`/rN2Q﫤fRZ:M%#شN |0I45$\,Id霯:OP]ho]ԣokzjUxv<Vjiv.:F,ƞe1xv@Vo*UbAXq8X4gSn e *hzABpqW@qUpCU$.|i`nեZ0r3|uBShvt ?8qת_V/9G̣I?pn2k {F`!n/ te;{eR&,1@v~S8sp1SAwqg^t۪Jdϐgư?= ;Ӊha2&q1aTl0S|;/){޴5+uIa!r",Dj |J{홌nj7 ]H}]?pҿ=p8BMBfwPo4vp =ұfKf]w%u\s.o=&ʶ]FpC=|k"d]%*8PFTQ V,ǖ]3\$͓q[AV"iUoO\ߪ4͞w6q@# wK|ԒjР7&\ Nr~T9*M&4I%r~Rbu~Ps&ٱjQcn 7n=G(YĊK.[{:xW>"Q.#8p>,EY jv!Er,xqoTN D%pqyhȁ:Fs1%J L &#'s$y2W( =m%{ `<H'N#~XPwkV$^YއϳO &.2 TRR* vOglcG2ܥh  f9$ffѻo醽|W "gn cARN#?x̩ 0f8`EMs .**|]_>wɞO& pv[ۊlڲFcl Qpv&&VKq&}e8cP R@pYE"s]Cе[ٵ M4~j6M ;NAaW[,HJ{@B7qIu.3CpD\bs]ɮݔ >E4V5'g\3oS(7as[_hY@w* aeD\hCBZT [xɭ-e=/މ3peF -TDhtSţo$x4SVԳOqe(HQPD[x,J4[ oF?xs.Wn+UvsG;v'(8tG{[xr#p:. C `&-Pbh Q෩M;(;,ş.l!/+3w$w,+ /.-IB pceҩt+)VH5;zI(mB3!Q*/5R-R$>jk=5f,7r!LH=)Ic l1fJvVOSCM}b] omKn\!'vDbRUR2qҘ} !^2 \b[`;nMB,ʾ1%"`uQ9)ΞxdU؉(ZA]ghDEcv[AXzw(>Rծ+(Ri@2E@mi; kͣtHW[9|W0nIݒø?a_" {Һ6=&*a8ى*Ȟ wqs,XE;!{.6k%&~&SA rT /Y/ΙE˜rjҙ*xP[$nϞB\.;Z}MY̼.=mi)p|$Tz\R99o0y.sm;zju,._(qkH#b՝$RrA o0ǰ#Z9endstream endobj 104 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 167 >> stream xcd`ab`dd v 500q$caa]I~= h 1I?CϚ> stream xcd`ab`dd v 5030q$caaV~= h ȞqLI7-?މ&;i?ٻK:s:s9 o=M`of⮲n g2F."ۯ%Oy=g7W7w7P?endstream endobj 106 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3837 >> stream xW TSW>8GbJ|.Z[;>|T] V@HB oH{~BK(Y딩m^ӹklw5;κ59IVֿ߿CN8]vF/m: c{`x 78-ykT5_,n3E'&m;A(}K/2^Xl9EmS;E.j7.ZOmQMrj36IES[W).ţMP4j:5zc4o:w(?x ah9o ܟ\0ڔS.O4 2G}kg٣f,x(~=&Iwѓ,oM]&,_7b..4 17r v$'DyIp=wV+NEy ܟn;!_xy 2p0Ʊ|g!-Y 3X)Ƞh(+O<, P? 'OѫV*`%s?kPcҲd+~t4_ސc7TmjQ$tqzC2m YYϠ3Ԋ f0j/U.ِۨوtgE0н:ٚhKbeMc )~4r$w!%yiFޛBHGhp x2&P|Fڐmh=} XOGS*OFnI畜ߞ[:9% [B; sD`9Uk[tyEMh/D!3IZv%QC=N Z{bai1TbnWI1OstG)JZ tKH/)㍊nXL\S:X x <Gv բ2t7M n u195kpTYF"<ȼgز͇ ' %1@MO^:rU,KZf=&[L`sMl gKHsFJܠU ,?$JyMV[KCodgk)9[IK<4r.h㽃!\>+lv拧Oa>zjGBlVơ T6X2r$ +рWBiԒk*=.p{7%7yA:RuEPmf5f ,aĦ~=cy|qpܔՄM)QPJ6>{tIM1D< Ͱ S}W($sr[ ˴ɛ_D! mƘJ}Lp,~ȾL#x} T]NI<|xd8"Ig%E$ڟ ~X FR&tQGlpK|D*~N]thƊŠk-}uУ8lrBRjsUՃUiq:#j"TUWWXFBCK#^oqAE_sU0D1h$ SM]-5zR   pzzmq.MfKMKL:֨SKq?U$H~-"0Y6b36BL'P !I]lӅ0ϔ=~a3P媋xn@_N9'R|/\v>Fzf˦ȍ޲cGI^ xFҨjqIZI K #&I `9 ^pO?t w}nM֦TAsٛCﵡe9}0͸(`ELJG}@A,L\]cz5-P%:-,Z3)Lϻ`IQ`(Ώ p$WlN8@]gkt* Wxy(K.b5&ii5r1H?!|wlZ 4:5lNRTI"4]s`#NF8a4ȟx๡^_#9S ;G;:5zfLJV!vNkjv ǥݏSnɼzo m _>o/^SI7 sz^#Hu6e](Or󮒣jC@7ajqkaCO?U8@sf?:ofBMqq0Ǫ)2팠TC?[%b<[¾&?xCL ɰt 7d6$ To?=|"VN|_xYQs 憲;C?r"yioe-`xobӏU3J4U}"k쨘^dơ4wU-!8lOWeMb6AIvBZ D)9w /3(ɨ{ݭT Ę2 n!}E>^we7&cs-WCFgR{8*8#<_7A',eQvpg`MXs aSj> stream x̽_7Ir%1e~\ldam셽3 43#V'"*"G/B;]OUeUVFdq_/Oryʗ_7˸>f_}.ᄑ>V̯]~o_}{|k8g>UY7/'շT|"wձ/SZwuXOo?|׸׿/~v#ݮ/S/oWoe}2.9ُ*w~^?}ǽc/Z?wG{$G=nk]Gp?3uS.q G݇}>r_׹qz]W]M~e^>oYÑo~G'k}}}ʲ>FZmOhe/c{?<8O3*i深&Y}qy9-kQ?Sb#So9#LJ0>_~[ֽz|w=u٣~:9s9:O&~ןL5?53)e||/Χz<~/V[g?zγw)u=뵟Xg>Ga8~.7 _]f}[Ggf ?gizB3wʳ?Yϱ:O؞Y?m{4~3_i?^j?}GBdyK}_Z`jo|!w$sߕ{ i }߾ɿ9~H;߅.߇?&_gk?3{x vMκ>{9߯w Ds&o_?xt(vj۟IR?W/ GovO]oeteK/K72?TN^Cr@ m+g/i{ D5Z@n87Rda.ۗ ep!r4F# {uc ] q'F7nDDK߾Ѷ?F^d)z̞iGLdc}6iD vv==λ/; kAF`WpkYõg$ȃK[U{AOvɠ!]>Ƽ4"M-+}k{06R<7ǼFDRy#|Rwk  {7.~x[L~1*Ⱦ}kN?ϒ_Av70  ¡z V.S)γ/p=[p]|N'΁x8*.?[y;}KX nq|{:/\|O0y_8˞Cqƥ[_~n?c`p\< \˾8ٯi8 j#0D۳xLEry/rmy/v#L"2gEu[cfjz׺bp=y0g\v,hZ!Y1G Ȟ .u]AyU-Nqgc%n#{M1{u_x{8g8u Oe#` $yޟ dܟ@CV'K`alಳq} owo[<|Am Yl`5oQ{5\{/<4J}9=Tf#7'A3kW%Q;kSйg᥇<3ܝIJG׿q_=j-Fm#Eg(K\ijv0C64Inr` ȭm`܈$=mmI {"t=aoIxlrY2CǞIc?삋Oľ>cAUyd/rۊ4K4gGD4Z~bF6Rպmėyc׾{a_.~cȺJWFnJc4}@]׃ _o`%zViLGGN\|AEX6R*az`U;0ٓ 4{4 V 3qR43X M{=ކzUl.-Meic.@#nR۴_kmZZ |S[{QvK{/S>]ZÀV3 "KF.5qp}gi!#d@U 5]X]n}#Z'xWUGe50BE4-.g^UNL. aheױ@n]4कiۥ_&h]C`|hcХ/;͞ 0e?z%Fn'2A~u-FtT.L:UF_rcgƥL~\{= 2mV'TGf(ȰK ^ȒZ'xLor#c@u5-S v Pi vmOox=sUn#K=tTC"-F|ߴL -F|og6h h>6r{n¾Bs?\ko}#uYqf A7!+Zs) j4xb6Pp7!hp~30E7Rh6"ּіWAxT L…W2 9룆 DF3 W >&^t˴G(34LbpuDa$G\u'M ]v\>װg =:OKe/昡z. .ZCc@UYxQcH6NV_u/mD6 L؆=Ran{K[8vI/Z\}B4L\ ^ZW/y!Fb#M SY<πgؐCV75[<+Ls`Fci}Oh}3 /n/ W=* YlIg$`rWt$E:Ev}/{)`SZ2zOA.4֊i*ȥHikk;U&% 1Ux/šn@jD.T>rypM[)v4CoB B E=Bn6_Ama-) B Uaj \. %{#2{ګŒ37)9Gm~ϋ3ML1 K5r}Ը{B™Á=<׸ha'!v#]l2pw2v,@}=f/A˰ݤZqFL72IdIAم, م, 2j!!9, a9, &9 Y!,+8k!34p R0A@<`0C߀<ߺC$ED%rH옟or݅Cd1dNП]oAwGUW "tiͮ"ti߂dc 1?= ū [ɫ [ᗾAi3bo 7,Hgo|5aKC$;LHwۑq ȃCӄg3}c _5/PfR ᛾t—Jcɱ;َɚO$W40k ]H.}y0/ճ/~'u]dSڈ/T7U+LW&s\IBEQWVIi ^3n_V XsWA;WAD9up#2^lG܀5VP—%LWSx͍rQ }uJZG Scb7QT ]R|+aቕ@.ujeG:,;򤌠HuGsIhb㹫$W3[}?G3_3$;׺[Fm1`# l>OJ%t雸1orF.} *$*x9BEEFM؈oFю)ID;eU~$1eJT 9mZ徹qd7}nSr,bH^mh.U̡W#U68l 峼Z$fVgهRXKgYt܈lZXQfLndZz.7"雙!L ]N-óɳ@+32HX~OވoF0}+a=VL Mc0}s+v;a=7cG:W#s+>y|َt>ϕXV̧߀o}i"3~e|޿ً%ԫ6 Gӷؠl_aB5 kC}`ۑl;F5,6>b%FCboFK ܪ>62Yrb!̿Wl*L*jP*5CJu ,..+ޕTpnR755}o6 [aM*+}6"[ T- m@HӷjoQӾFK_`t RBuS)+P=TIU3jXK&XUj*xx^UgUk*Hj*9BZAOkժwgۈyX*x=^;WCu]E# ?̿WUaR`z_E>WV1Gj"򲼰"/|V|XCkoFXlj5`9nV YObɊ>SV /܈jUf.VYAKwV)5ЍhV1\t/Gi%ZSGcZS+YmRkje~5 k'V[KVS2ۍKq+x^[Fz @n5aod|T/B P\aGP֛z)D8}˝+8QBIt>W(g[Y]a|k VgWcqz+]B!xPW(b{((g E(Tg(\譅 P_a EWk-Ha[9:}K+BYqߡ[dN_y)JT }!*|b4؈pAߎ0Bx 7 .G  x(2)œuڤ<&Gx9HxK›vmqa~^  wv%8 yc8]THQnv.JX~$'и S#c8H` $&SQ1KIBv~Ⱦ*%iKc804n* !N;lҺ&ǾtnM{_[T<*z M;Ts"iG.B% d> >lb;\ĎT$l`ahVa =Fxӎ6 +& ':MX}N&a%$p#zIPIGmz7YzLgUS(y$"ԱykuM;Iڤo(k m›vxIkg'kEK RZG6ļT .}k6Cc1BvEz4maˠQZEXVhec8X (50d œv|#؎O]R7f͗}k^a'.+5aF԰mxz!@ZE›t΀!;]<tV 4g o:z#Xz^tD&zLfi#›<8K7HK@bzIbrpm|M[VMsW}z[o:pYdnKR@"d"B[K8n:o@㦌 ; z4D 0p|6 0hBgi )^c8Uz q:P^9Gӽc`St Cxoᦫ!Y b. ~CHko3@Q7=8@dq:on7owq:ʭ{, (#\0l<"s qAH0Ap1rm`ҭ:" Gi.\|^!Q)xpChN=f%T q M:6Rq:WMHX(zH$|8Mѭ0R砇u(tz[!Zю)bJڇ`$ t@­g=3pgŧR@@ҁ:m x@Хu IMZFjٗ3=F!fzA*!$%3Et dMԘ1LH@BjBDBq>+"˴IR#!6KXabQiCfס|SҁX*=dqa(VR@ҁ$$`%d1=QtTJ<#TN9a@m*` Xv8 P]`磵` Ò16p0EB¥ǥ 6>vhPR#Nl CX}T=FMKS*aو:@1EWSx1<$Đ"dibS:tϦ Q:PB55>,$T&l2U: HuH>N j0ulzeȗ7|p1BDׂXtHw x WoOBTՑcn!K'3z[҉= $ 6mn 9|#s /QLH:E҉[tBm}dbt^DyFyq =Ot"lX{L)hߓt  fz/i94IGܓt"( DtB&OOCf"0@J'ۂqpf$ pvIH:/q" 0pZِpq* "\D= |(jPx|d*\:(lTK;I@T&fR B Ld]/=F҉MF҉j%TxiJ4iBN$ ,S}(YLh,WEbPc(FN8Ds1"=R$%E{uz2$,9G)R$e~O{7x/IX ą>uoګצk㽷vkEtqEٯppXIY6h!G[_yM h{TzY`S,ZﱏʈmX*@K7MC>]@KAnVE6% ^1Jo)D?",FUpUeQ3Ag6 NU% qIhVX/ >_ Md,jh"W5 UC*ި`ZD oRCyЄMlєJc^JE4!ୗRM mRDT;RDjz)*ye*)eǼhlhJpUMDUFğ^Ke4}uMz-RMTRDd4$(LFʤhB6R&)Ƽh\T WѼpMhbS@e4{RMaU42R*\hޮ`2%ve4QvKy A,UFe*2B4RQe4ozH&6 M52) O:JmRGf竣: R :˃hBdBYpy0!M)TH%D SRISvIIeOJ(uII. ZS,a\,Q4!oUD^I4q}0%Mz%UҔ iS4GQRө:, ?_-MəUKtT-Mp*Zx* IiK2)2И&I:f҄y`RBR)D6 *ii˄LL|)XӬ >_1M ()JgRLkiKQLv@bRޭ"ĵ4TLk<"e4(qmP1M\ML&`T5MH\LM?WS]4kov);R9MͪXrkf=Ӭ.f|4rm09:Yu9MYf9Rõ4VT9K9WN3iB"r9M-x4DjdIj͆z%5p6IjJP.Y5wl&j璚:Ij6kAᒚe&\aTIW$5]5{1JS쏩O&jx%S&䊚6蒚E]Rsv)Ԝ5541I\z55upM5RhQKjbiQR&RX~A`uTR5l㒚Ij>$MSS 9HV&V~*TMtQǂ.Jj>M"\R4͠QIM|z!JjV먤&bTIMz)7u$5N$5_e?|TR3MM3MMIMg45Q^U45eķr]LTϫ/Dh1U45;YWSSxmjjDTRE5%FED5WHURUMZ& "*Y?_QMTU[͊jb22j:秪T5:WUM鴚T5A2j"Lꓪ)SRUMnTTQqj" ~&E5jbީj"ͅD51TTE5QGZ*YŅD5.ITsjb jjWTsIEMĪUjR5=劚xFW6.&9m\LS]lRSSμjjʋjjsMMD)RŋPD^I%5% |5 MSsXkj"f&*7x\LSW*𨚚=TU6ھD5պ&JGRTT\MWOV-UWSjx)Ք>I]M)TWiz)ՔkQWQI]M<$. 򗪫9m\LW:TW3"ՌjF`h/f@TW3"Ռu5#B]]fDjDu5#B]͈f֌5#Be͈f@TYELY3 * <& ZrR Ǫ/hZ̤5<~e wXbZ`j '@J߉,W`g5/% L$*{) h%qj^GWh5+^@z Vˇ0(*{pF^8 &Ui Zp({ hpjX8#d/@I6 ЏJ85y^#Udw&٪_@PIl0uh$[Upfr &8~Zm%Ћ0MRl8#9^ZBgV`$Ы_@Pkl7%V9hyZoJv 7#JZRl73VrLA=e+} @=^ԳZ~ZmozVZ#Vgl/zTs<7% @c:2ZoFV 1V 6٪wP_hKll7Zz@- v vzlZ73e @o*u0`NL75Vdl0[y hunj6Z~l27`ժ`Qn]l 7%e+ @㾀fl)Vu(yZ~uJpPMd6%d+ @c2}gw^4%O@ @VzR4:*djDLyh2}c]P045c+ @(L5+ @'zdΫA;}; @2}U׀ w^Lyg2}՟~ w^#w^Dy Lyg2}{+w^z w^~e<;/ @3ljX3wv w^LyQg}%t w^Ly1 Ly)g2}煜q w^Ly g1W~3dk7; @n3Wm@l w^, KVy yf2{煚/uHWi`c}2{%h`hg w^3wV~?c+w^yAf2{c wV~g 1_d0;/|;/ @f3dΊ/ye2{煗읗]@ry2d .;+ 3{Ŗ읗Z w^hiY_TgV7QWLnRG[} :2 y7} wb3xcڪD%}N7\5[[՞Fd "c=[ z?l ~]"|Oj%•Sו.D8z):7r?rZ/OHTpY輙k"U*7+tS' 9p] m]gCϥs+_wޛˉ y:3xc';@HB0/>/T|0 :LPԒ`(@ȧ©{:~d\| '#@8 V064ll^Lw N>x)r Ф·Hkh@Ge ߴWU@qQ[?xx?xx$P:DQdz&O>O$J@8D_IZ1xzX,z2n F <<`9=`w a2`ԮpR{n"@⟘|@E&)A 1 UtCgeB?bp80fY7C(H'n#P ~\< "`PR9"\(S}t7#4%+ @ʀ) 9o>yGWfc;%a#L#"ۉr#FOAxg~ ^drNu6TbAc_Vx?@&0ASN͛Ӊe7Wh& „b+3>kAR=`@+sQUyƤb 6fQ77@ne SRfH1K9,RtR9.:^g):F\ ӎrQ7'v*sߕFy}S[~Uww7&w'6f2on#,09cEA&斋O d S{l s=;s{+y@*FBF^3 Y 0ezq|wJB9dH"2"|a8A/FFR,s;UA [YFF%BZ_LGFBoo1FK5!3Qe(6@6(5sgYn vlҚHAo%Qf^2%2Vw03,LMFV,1pZR([Pau"S~_`)1A5_/ޘܗMt$;C1PDrlLRF5#g_yCF29"Lc#DH&sZ9Gޘ~19 ә#|dzsD$9󜥋HQD9G/.f:G r4yli@n![Lm7ҀyB/dKRɖFliD9J4"+cF4"y#]tiD1Q#tiȗFd#$L#Ryƙ/Id\ ӈP("ЫKl9I4"BFi@ ӈ0 ӈ3:FᚒryY*r񲄯{Pn)$M#B4"$M#B4"$MR5@;G^z/27-1Z<;x捷j<ެų|[gy~-Q [<;xfl%N޴ųkˣoJ{x7m\+bX޵mtyK-[<{;Xwn4oIj޻ؼwyO-2[7q ,:ZƕxsW42ze\˸z7q0 bunjk9x{.3 mfL6̘q 3h%,ӌ\x^3.f\Rú͸膷1]o7o=Kx 3.%=g\měθ"wqo;&wO+xWQ3*¥ĚIg>3P8Yh)sL>4k?4@-C?t m`"fZ[ڀDH6s,6flk3k35}mQO6smT{ 6camPö6Hyభ{iA=p@gq5gr{̮ƛ`iUDܠIa[ioiFt3|ou3$uevE|tYv7a݀CRD@:0ZLʹxaghz3FYʹKf6:o㛩fj [ߠF7C7ֽoF_7[Dͬ h p&#{:8h<蛈v}[0z ߄ƚ;V]p@bP탃4 ގ5¡g3 uo+D䠷 "g*pXg2062N@̛3}[Lf=qӲ835A"'5vƙ'[ ݞz hs(qF8Ǚ6B@ ̋F퐃""ghm369HmDOX4ohN9cq|[ e^9Xf9ȲD[ԡ&J+r s !v^ZH=gh)$AJ̡gh3xIm3oߜh㜡Uo眡DCQ3=yPKm3}s t6r+:C -tFe{Nm3HʼMtghDEڰxGghHb΋X+d)t(6vOm3DzDꌇQgI0"|?>?HY+jCS<>Vpȧ|P*Y k|PW@2Z:6yE|3zưЧ}YP$g>#>(էi}fdIO}fMO!Rp乸} g )BO0\?IlfB0mWv K~ E_ESڠ6(r `h"imCa kX"֠.5("١ zh"^OmP,oPRoP,oP,qP,qP8 qWXKhBwB+Kk+KKEz倡ňuHXX6XX XwXXJX /uP,uд~Abyy0"eAH>A?A )`)H˷YTHw"x܆A' uP:Hs?TEM"'ABHV yPK.KWrɃ"k,AVC_.yPԅ<؝l_<(-`<(2Gv7ۃ"3AH锌E LiAj{Pd=(j[ndBR/+{P=ǧ=(mEZw"_9E؃Ԭ*{lɃHtPqw+ P& nh%7=3w: aN%كZjT@w5 h{&y@uBԕ(:!k5]\JdW( dS2A[EX+郐^:;Ea6P Ƀ\I(A(H4,'A<9k&TPX/PJ! ]-m” a0e߶AǕCC+TjUJL0I66{ҴfoPj JEA33!R .F$XG] $\RhqT" LpAHSVg P3Ί?(ؙ`~V S s&XU}9PS^+ę =Њ2f7bd)@ l&P\3o>嵂 gOjӧ4̻Z@>LpfE3(*zzV| Xdk| P3q)`ߔm)`ߔ)`@{ =N= $)`ߗ)`ݗ-)@+e{ Xez tAz X]Ю٦N== )`ߘ%)}ي}c-)@~\(;Oy=<,۲< <BV0oV9)rpzVsa?R/y?R-_l.Ƕ.әH "b6~rZ~L(blSRJ "9e_+9e_Jsr`r07`}9(0jȗ3Xya޺C&r0icu9(O1V0H XT$cY`*rT$[%]"9}g]pT rZ7K{Kӊۥ$?$[&fJoQrZu1Uɔ3p朦$3cS&/O$2`r.;)Ua瑑)HqF6ނZOr0)d9V,g`sreD崩хÖxޓ0~RƲ->!ʽCld9.i e9cH3zA: #ٕWw+gC"W&cYP˲dʣd9U $i9ܰd9mƖrPjە,g]G̲V{^aQy^v,,GeY8(0J`,%Y̶͔-g\zѶT}H$[嚑-D--y;Ye(,yHX`~,Al9f2r@rZ}~7m9fU8remAY~dA p<i}9,ҠH2"D<H#[N-g<2@r`)Ad.XCͺX_rZpPST]`u\r$l9)$[Nl9(0l9xD" "b#Y :3倰}9#ޞӗ :-s%aL :怰9(%ڔ6ɘ3*DOU2?i+sPB'T5Kz3ad%Զ3@9sFaәp)rjagRb9 9-;9#99 ܮ90[t"%6+cZ-cN!ʗw sƓow怰X?Z$a9#cÙv'GA,{iӬAkuZhu4JHx2D)s@fp_Qg2N$2Dv&)sZ9@)sVGʜČ0ʘ‹9 RȘB+Ș"9 Yke}9E%œܹ99 rH"ÍrF?/ZFs sZTAGH]$0o/` s@x,Y0lds0ѡ0g\<$0gfaN 0}9 ɗ"g }92$ɗJ;ȗ}9O,ȗ3.J/i-rZ;,aDP#a"X$jp9 td0Hp$pg_θ,O|9ԚHCaN O Jȍ"a" Ò8$PX"͉9 ̪viIC {aθ(&9X# ͎9B6kg9jmDc>2l9 ;aθD?Fdi}M-@!r@8>-Dr@x.g\ar\0.Ϲpw˖3ؖӖe'[ x%Oi]N!i]: ZH"]kI.B]u\N=-4@al9 ؖ"l9;rF;-WNdQ=la;9 2H뵅9 H+9$ &83{)N&?Kd9  [*H"m e9l," eNa[VP#[H/h&6匬~-/‘-D"r@xs.M-,yoӬd`vf$dIEE#}9 ٗߜ}909rM9}9 :-9J _%i_O&uUl崅a":Ⱥ#_,T`gg7}9 Η"}9l9 RI9 HSB"9(|  s@œ}90If[N< "[=E쬺-'m9m1B綬˩DANkB,T8h7XP#] Hhr@d. GC Q4t/r*Q@Sbr:I`_($@!QCyb0\9I|N)DQph V)DQdʕӍٕSg7CS£!)DRx%&K,)D1N))DQή8i7)N٨)Dxniʷ$+'_+ݲtqUNr:{&CS-;O1+' ]!)D閝,[v*֗Bn))7d9(EBS-;LYN!ʷ,D5)Dκ|N)d"J,D lAu9I)D )D z s Q%o@ n19:e9 l @9]9,lL•S- feJ+%[v-e[lnŶ$t~a)DٖȖө.jrؖ[Gr -r}4"[NxPs 0 s:Ռ aN"aZ0'9HœB(T79I,)DœNEwSS9Il.1s 1G6'rXa4\9~)QN"I@MN%S$#'9"'9Б%ՒBd)DÒS,9Ȓӹ-9 l)dvKbKN!"KN[rE:2i2%sl[r %ir:ON!$'yr:hӓS<9I֧B)DΕÓĞB)DUÓ$<9ȓSD9ݪz6 SNr:^SN!2"SNr:tSN!2"SN!2t.Bd)M7)SY$''jkr Y"BQN"!)D)I,)I3$XS3/mQN!ν bQN!ξBοL"QNrz ”Ss08 30-RWN!L2GMĮ^tBWNNLlL!•S18!332ؕEBtge&qZ)ĉI`N*bcH2_:53s38939a)IƜB٫|s qfgi1iYƜB 6\$S4CS5 ™S6rYIbgN!NLNLMbI0FyT)DBĹA)ٛiQ'$2D5q gxxl)dȘS883$@62y2q.g-7p6gsX\983)zS:ȘS:CdcN!N4 cN!2$N)9I)ٝIޙA,)E4x&Y""iN!G4'983s=& biNoVHs t83983>8z$5'~&Qg~7m,983)n 0FG5q hچޜBiINN boN!Nd 2M"oN!kR›SIIBĉI:ʯ$vM v$}/9ȝB&yI(ęA :'_p؛*8K 209 *$phkp)ɢI-A)`nirsc$k΃.'&_8m4u9(q43G&2t9-M>dq.'&&N!MR9CSepi< 62DMJĹIẄ́4q4984JXSSJ(4JqD,)di̐rHS984K,ME,)D1Bn:&4EM 49'KdoҜB5-6QԴ69(jZ(jB5-DQB5-DQS_ersv7ۏhfO|bBA咉26Ƽ2w&+m-(ێ(6ڱn]>vX9|B}ooi }d;^˓oiN~DYP|+ǷW>*o{}ϗW/v¤l/zeN1t۽ڞ~d=w/_yqAo9mn<{~sa+65X[ʷV{lt+cu +鄷~悏<~t˭+\#7|g հ?.2Dok/v.)?n=9R%aYc:v8<9]*~H叧uzyBg-vFo{nF#> L^3_~|nix|u=a|9 xӶu9|}u֓NoO L_k[m}xk>~{/tx.^o÷77ײmIoY)߽nP4mcKri#uڗ8y'Wzˈӌ$v)@6"}y}mվ}V+Te>޴4>|UϿ%~mxAAqK kb_ V0>/;uo1y=ӎ46^uxcz=YOA^l""Cٻ֌zse;B{y0 ۞`_zĥYChEt 뛗W .hEngon˶Qvi{;^ݾ=~] [=Emy۾>NvM{ /1omۏW%0]1ݸ^}1lygP UNOkho'|йٮO.O/0yx`vGlp3+ o?Dq|~[5ۆ7> Jn].ۯ7 v7P[a>t&J}翯xkz{7kچ?|e|B1s &00/U1vxhW $no=߶.n'ޔo_o>f?|QLi9a;}]}q?=>u\Y;3=jxmz9{omMꖷ}+= n}oEu馋0]L\ح/CL_pC#>+ņ`zQZ߸?|_i>޽&x؆ޛs;b}گdVӥ6\؞ycO|9-Ӻơo2ÔϜM8mC'?]Hy8^|Zo'<> stream xuVy\Sg־!pq&RZ:V-JR4e [9CEvPD%*Vum֩:=a^׹Qnr{sy%FD.C̙D |Y?nd?_w^;1r.^LiR!d9̠`*ZEERQ*JQPj&BQS+O?QS"wT5A1C0[1B2C^[]ɼzy#Z3o~=~(R#J,ΉGlj q\˾"gCAR\"A50ճ*>x˹; cړiGKgLi[KY-Qc`m^\,%o6_.~#V\t@/oҬ$E<;7vD+߁D4 I[n2E~ "8C1vvXG8}JFOJ1dш4+’ *iA 3 UJ("v @ t}LD^t$T  lݜ "YݛSNiv3N17{e-bk@_Ȓ d$f de027@p^?wO obir(4wv67k"jI=b3mlnnݱk)bu]-zƋOvR;t{/aѾ-nq-TM@p/6ADodIKέ:]2TSCdZ5hX.zeukb &XOC~M}ǝH_?Z/YUG=c|{`βqFl9{R+πAgUT(T\bCInpvZS>LJ*uz)8A a薓O!__V _]k.?S+N^sͬ'>:Z65V 0u2C *ZW[4ƘUMI]'asBάz Ũ{ފ31mfy-ؓR' Q/HpBwG"{z!JUXZCb 7$1[!KvO!'68<[6ߋWC <ɡN2$}Jmlxd^Lb Pȏ~ ݼleҢ`t}ڎp.6 tvS!㙄r/i3R.!j! @sZ Mi~ͣ|oq 85ScLv`M&IaHNߒ g{v*,E+댶:@U-,f1p6pd8SOѐaH,twgWPҘbخt^JjuuPg_f[6/2,sfa=NUicf/ =Ȫe eh!NG6`N8biӷq8ٓti5f[^ZXmMU9eR 5K^`e?~K.6,}jP>qibb{ GUo~w:X Wj@7wy?1N npkcjM~oK+]'^aʐ%IL:$н]Kwl -?讁z%˯Q g룗nڏ &</܃z>x^!] 4iޑub{՞NّO-k,ޒjKB!bIJK *-zxOg VѢ[%]ej+mYȔɴSdh5tBj#bk1VbVy  #m@N*1U6 V0󫶳j{\IJ  /%C>&K\^?8%U*\hɳ[ j/;lUj*QsSK}Xts u~8WE%=i>^>k!k€Qg3[WQS V%*Sj>ݼAwv;p4H댨8?G%)cdwCf$'i?vb /}j6f+dWfB6}>oб}_ ~d%;m ̦=wufNԋ_$.98硈1Nsf֘;-]r&7@[jiY8kn%dCsώH55@OֽE*89M [~bCHrovoc"zfΛ8TavכݑoмN׆\=x"½&)tNCz9FPĥt+J ׯ \:+tYajj~"p¼^zr?Ga`񳨷Ϟ՝?{jvg +~-,NeZUV^ouqRm51U w5JY0<A5AW:hKdJ%:qW}: ҷ[!]Wqš]3ů@?9'7uF`?w?}urtGawysn&G),7֒۹ fo'.^-^l\lSy^ nL+[M#cD|[0}IC.ع $U%fg;G~䅾(pB[ 8QVCHd/9{ k_o_ xendstream endobj 109 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8248 >> stream xzTպnrԶ0 (f$IdfIs޽:& YiD1p jֽkO@ީztUW /wq Y<}1_| #֜"ط;#!@Էgm5f%6<- J-*_bie1ό{yqK^ʄG~G}#EG99qp,,,LYřy34g:gg g&,Xl8γ98s98/pqx>~`N13s7Sy P{8}89}9r&pq&rsprsC N޵.T+(^=cϯCr#f xoAx{vFpC,Q>ă>,g}zGG:oaS7 ѱ6]w6˹P %,kiXyK2pL+}bR %(BÁz D>IuPB6S cgCz &az+$d:ScAfP2Ѐ෈E[1;.&oxa#!1ٯ.|s7l@3=SCs#el,. .,"4ڍ+t"a”BB-e$`1*ՊVmVoP3ox~֣O@b^͙)7 .wH\jm/L}p0?Z5S Zf9>g FG/)*"QHjaMzx}6_IEW^"+ii?j61Dn$)B|`[POԧ,v{stxZ-D& ,JI~Ё!PFx;lH9Iq)گj4 %Bnq̐"M[KW Fԣ$` !Uj%>C]*@H)zEP"yn7q{}7 6h$uowڶſA6L5 kx+s"Wv-2Ih ׄ$rU!6t+ip46HWJiWRgRȬU\*J!=df80PRDSB%!J r><#ߟEyq4l'$>{~Ec7P9Tmk;a`&듖X3wopcQ( ZL ̴RRW|S}Q/iVUp}Bsa2kJAZn-&5Ľ=и:+ba~^>.|+Ҩ>9}$gNJ g2rb:|1Mً/у?OB ,|&.4p5xiAv72riC\yT`-B+e:pME6ZS&ܸ}_O'Ϣ~Q3'X`Hm)WI <F-P!2d#`F0.B])Tp *\؁/g ~T-665tS" /]V_f88`tcOwv.}DLWF &A]WESsͷՖ=yqg-*D&oYcqNJyS*Mt,L 7 A e?O [d,a}.VסYzdŬ3-:֚Ś~Hsr@f$Q#)߉#句yq[zX3<3%t3TNfMq] sKua(s~saH(R AUr0YUlYŴڦhD͟?&0[|v?D}w9nS pS>XzM;u4Z-9 uJlaʫ7@"bə@uPo' @hHœޠ/F5Jw ~X3qJc,=[BU:6B[H@n>Kf'/E^98-]EҨ*J@9*gԕK|JI߀8e)\u'jI| Cb]Ŏ#T=}iHxsC)6\H8#0W.w㇢[o]:$Lݦ=婊&{)$^ǐ\UxCSZ7I r. =l5Z!q 8^49^ ޹t'$2 = QKPעmѵig+Q:!dVi}_9UR(%a^I撗kR NamۖP.uǦ/Fc6 7gx ϶:DPLN"t+rgqԭ$b%ci^6b;/LbͮX֍%3i{{_{Rq6pYEʨk@}3j.kDqC;Ios,E ZBk.)z!*wH:iezËeGCOHgŌt4Ÿ?/ 9C0\b ]aa{u,_. uX8S5]M(/o?^/jt5C&h&vt‹*]LgeaKEZMKc񵅩hfX^\_]Z$TyC#κ2PRdg?ԪuOR#_*nv5FSI2Tj w{Ovw72zY*j˜>6d<ꈕL#Ψ3X } [*XK:S-]W*Z(c7t]/Fk6W` mZ8sGRVFh4wmUJU'I83ˏ;rh0F ҥR!=6Mѓ̓& oN333;숰M5k8w W._C[mGW7Igf b@殪G+j6Saw7]X:͗qbxq"B֢i"PJr*ʩ@J(y|?` EiP4z2`Nl EX`bGm+8Kɺ-uu7,'hW$ ;mlWߊȳ|Fr;;{?~6/LߧZ:zie:&4%ҍks[<Ѓ!pZvH=fjkj/uz1A .O$C$&eLA Qe:N  'P=58v;;#?q8Jb#z#Y+=J (jTWm bp z(ّVt…z@W1G֡Q.s9)tcG@.m;Xk2iA;3ʀ*7Dn.HXdɣXۭlNVm0$ Jե~x DQ?&\z i}9Rekgꇊ`6[z|PC5aẅMX#{vq0vfco1\t^}rzi 9g<vzJseHwvBr\967r"ta4|WdiEkyopH5{yɾ RW~>#BjT8.x줔b[d<$,`}>xoɵSW!q¦T;jgO83.ub=D7` :qg#Z iieyyYTH:ݸIbh-&_` 9DB ,zdp.:ұ`2˨5 7?^x..-7Ω֓VEo3TqleutQo S$lt`,R_k"ܪFQ+;܍pV" |^gvQmFur' S| @M[0 Uī7ՌciUZm##? >߈~9~TӰYo\ZCc. 24}SNrQ]= e@)թ4iBCE,5g8?˼ ; l 􋴋 >^i@b>7k0|ub=E'+pyvt.8t *~;-rZiUPx:菒'%|WjIr9E#8)m9|凓p6< 'W+F 9zcVN P4*ᓥW^]vT7 A˄x(KJ?5#S}S7hMz}} Z BV;tH0Tz]_b4lD$_i5j0Kf;Hو>pPAܹ+,_nkǮB7`OA_ C *G}5813u 'ǟpRSՐP YLS_.zgsz&+XfUZ10cAf d=& A#fX8ʻ[QQߠQ/!"j6+y{.?P'Pz5@E0\k$aF1kk UuF8nx̞=CT݌ ^GD)IJxFԾKG G?繳RdX,-&}i9IPnjegFs6=jq^LiiчΞ*FKrh ntό*F6dC!; QҢ1ZPKBx7$͔=TFDAhlPC=0a@Gg2u] P*l?ZNȏ׀~s?)EDٛZzaҘdv@ y\{k>5d5)$z[r ʯZ{ h^Z4|%EE(4v {j`$n(1W5_(Ft>1/ ~B@4:BC!;HIM;.LꤓZAVՠΪ.D<dE[kJx'h4y/fI ?#IdZ " km0y٦2ae=GP zT"L`ISGXG`oDG.U͸tUXp#Ę4 492@4 #d||2f%*NtԡBg@߇u ݳBN#' 'Oendstream endobj 110 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 922 >> stream x%P{L[en˽BSGp>hlfd2 Ea+lRt#L5e#B3F M Ed$C B`Jٲ6HG. ;ߓAA20LuU͍T~_liT6l@i$r r.<{kڜS-I)'"TP6R0P8@}()%v^GuhmY!+*P{HKC&!8x.VԽHtneQ#-zt]6gU)~^ DմqE;i>aD "]Tp]B QCHt` =hi?cm|74Ɇyx= Ē+; )RX'ti>oLe< gr}j_xqrsj7T|&lc{7ytQFeoh+igA#$\{WM4|asP.n{4QXRg{&k/4w掯a^Z+Ӿׄ=a|ql{F}4-YGگv}34{QcFi˱VMggƒ~ YBֿKȉ\# }I 4<.{.q2P&2.sCc?vOa+w|mqd^C d:vGtgO4pߞ.(w1~pJ_B+g`2bK 33D/@d* 36k=x < ƒmb{V|3,C#?GKiIjqʡ|tͯQ5]!o^{[;+2dqӦTb2endstream endobj 111 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 635 >> stream xpStandardSymbolsPSrQsL   minutepsirho=()yr "m\"TT(/i{wUl) ӡ~]sw}qs18W'7jzn(M@޿&^;ktU~zs}qC)q ]Cthwwcoz:Apnyn}BtJ`Ipy={tbcy|lx̀D7 . zoo 7ȕ`l|ygdwtkxptUy=.pcI|~5*ev *"0ju}sevinqkohiDath?SB9baxF endstream endobj 112 0 obj << /Filter /FlateDecode /Length 31387 >> stream xͯ-ugOkm{dƃ÷l̈ȌHA݆ ˀ%==(TbczEQPðGȌwÁ?LJ_|w_~~G_|+}ϿOxo~?/:toy%?޾/`ωzrŏ~ї-?Oy/L8Z}<ڧz?ӑo~繯\LyOl~{|Z3ws9K_Uy?:ޒфGL/ܱ>;:i(Guz$z'=_|Mzn_=G{zw~_8n9~;'3]ԧ܏ܑ2&VۻU?|qwYZNo_|}^gϺ\r4^a1<4cU=x9>Ђw7'M?_~LN]Ϫϧ[~v3'Oz?}CoͿ?ѻzˉC/{X~O~N=r./7F$gOO?Xgp hV?QeSWf&w_Cl',_I:4Ts폾??5E`?w7{} _bZYc~mi pmmc_:2˽g̳NuQO_we=|{iEO-;vN}ʹ1/߻abէdo8#Ga?y.dݖߥT3Oȋ 4!9R8wwKN80j,~ Ӿ_R>9ݭrAn3g?ف7b \n^oK~o>V^־ܛ}>~m_W~ͯ_sV?yO7<][.i@^4nק߄EzO/v[;YwOl~ԍAkWfcx׿oPpa{O{7yr?mMvɻ?>c9ٕ緿6mR﷩Cq"T't,ǤW}]7}u}mق)e*-ǿ}gǟw~wzOD/ͯ1wɯL?m!v}-{'_|ɞ z 7oiW֯K/j)]}MoMד˵ïwƆxT oۅgo}vaym,o_od;Z[[oߎߌߍϿ=}׶S>-_l|8>||z:k|c,nA2.ٳ^6c3\|%!s[3}f?~?}7mI:gOO> gc-t7G{ow0|×GG}wE|q^׺+P:]ޱZso7߻ֺ?=;:FkX1>v1B㏟}Sk> M͜d|~?w%?\b\FGq+ݟ[խo3?{|M鷯i۟M_@z=OzzsbݪFjO1ֶж/эȏ[?؋y`-G^| 6N۩-c=M5}>?N\j/>s9ñ{nY޵mާ,ȡLFzm0~ ex[TJ9Ox?ؼn<<%7ߙU[~: in{`=f>}wN7{5 {0}[ W?~<1̾~8wd/d~Ϳz]{is}(ls_Lnjg#4}ގ\[~1@}=>NvFEfg~ӷ';ߏ_[+`G9v=w+?v1?ja_g>e*p(QiRY,bJK}ORKGCSP(6pMɷ)OiuRJ_7(} THS1>Gz?) E-L ^O*.ƨިJ%rg;yyPdHyJcJըQar۹z D%y)#{Tt#rvC]Ώ}^K>ߥ_~u85n~;6^n?~7Ws귾v^XPIǥt_>uy^=n~X=`P`^Ew[УxAˡ\ -Nu3Sntoy?;mP[LIͺ)n^c+U[l=טwi; fCݤ~r0'#P g:j7NgއN롭VBƋ&\ ]&9FgEnOw#g#S^S k >AϓBpׅC}w^?gڕK ;㽃ߊN?޻Nݧ(8i)7+z_ohu.ZX{!{aݮ=]iͺp>ا~jKoҲ;V6M߱ѭ+fHN\7G{\("*&ɓJueܑR*Ua&ʅ Jo gO ?\G- fW1~zPB q>@_8phF_{elY}ʁߕ;iJIo {Y_M)oëzh>P=AL ^$8w6u+ہ.#Ki04rCs>F"%b(bS~ϕk7HsBӫuC SN{Smb6`S{/_v:MMsߚڮdﶽ89^Z"f -tһ6K˔o".3cCv<^G'1cgYկn9laLO{dNp+( .:f]*|d}O?݇O0 Y$3FdO )ߡDLfP~'g3K/lruˮ癲]v48_T<ӕK/E^x7y~a]HOI+֧sw]_ڜlAP__X <59q>xgua{/CR}Ƴhk>?bK"O(?nTE|۬PzoS>

ur~sns̶N.y5¤ג6r8>rJ(J6”ۦ}8tݽT؄i`_RIfS4]>?5)ՌqX͏uT(*Po])6Yut?{1sufi*VmJ.)eoKG) Y{_{pYG+͚t\$.EbkO]Q-Va۔kb+{aDd1GiWnLSy-aS]WuoVԟp^NA0' [mgtv9ļa@ݦEcO#Ef]m!c n&a8HM\ u!&]}KpY&lKt˾ %Bs,stԍ_&$DW(T0cғHilsg *.JlCJHy0'ح0+؎1',`£j39ScR 0脥JD`muaOB[əWs! ʡ#&UɓOҩ\}O6f&[8q{[[ANXf ~v[`ShvQaQʩ_ZJ%s7۪Jn,Z'_gp,F^ZΣtWzfkvS3g0w% ùk S;i4g>/>ە։cuWZt,N%!&lq3}٧x ߂$TpQv[wO7R4 uݟVK'/e~Bvnѧ;n([˱.d5/ٲE@I}a=B L8yۄ?O 5;UWN9e /N|k\X-7r~:HA.KB~$~$$}4i淯FuKQ.p iC=a1ЅSs +I0Oj+V'rnKrr+IM(“G_9BJXoJ8ӕ g BpR0qr 6 KR]N N޻!-jz򭉽gen% ũn])sS-ޮ$:~) =_ ?}ـU :R! Bk8f|\tg>GƂCu}L5XbۜՀi⻥uۏ|X+P*0yn$"@z}x`aTI#w&},[Z&} 4©v*pO6Go7IHb[Z y51hWa ~}^]y[~1{6Wi=TYaҮ\zR_._ʡؓ?|4a/KNuڛ;ÙJ)tI1OiW]vg#3%vҝyJR8b]#uKBaa)QW~WOj ]9?ˡ}sDuX']\&OӞ؛۝eMv T0Ϡph)3A7Ctg'+*5E]~rleJM᠆rHE0_8\`"|aZUT|<+/죣YMhl| 136eLl?6=,L}б10s߹OsL c{70-{w2W>Ι}4)؟k~`i~7ritO8\-ϣmow1O4 :h6eiD>e=lm1 U:-CQjh EQgnn=Ns,ac~݂il y 1on;t3_Ǽ ?1b I?zΘai,a%|%Y452s4l81{$#m[VOMvedK2[Of K A-dnh{I_٣a6bAhh@='fZL,X,=lY[^Ѱ_MUawjin4H~ Ұͩ[uV@YLc9QCW(Akر5-tZf#tLK$syϫ(؟6ci5VsN+6lQ`:xI4WSX;Ӣ1e;Q:Abej;O:di~iے9J1??`]'d?$?|n3<;O?k޵~Jt~JrceVӎzXc7W:?7yW>vcW:9 t8|&?xJ3㩍-*>x2~Wy]4Jy*>\\vDWyQ<2[<^JcG8'mdGH ';""9ު "$'t#<0.<וf|^bii$y^} ˉY-#ܯ+[!;a]tڨf|=#J*v0Cb˿ؕ$Ppɮlgq(~Y#?+=O'= L] ]uHi(cQz>f-D]ʷPzMsFwOxFsqt<#գ+i&b5HJRJ2md؟6tŶGJ_.]2qf2]1wH1`ʶ'fxBNW]:v2衑s䟌 6lOꊹKGQzQwSRfJ]k$3uYdGvKSa6FœT sJ#+L*G,Գ"bґ9izveWWzDWY>v*Ӭ9 :LIݸN J#c9'R)9Ά+9ޞ);+U q^2oFv)W¼f,K#/<ÕrS-O]SyF٦>J}Vwr3|-YF|7H4˵ L-[JYGC SKgb٥p2tV]:+.[- K'%1tV,t^:+uk9Lcmyf~uҡ0tV:i( Ya0tVb:)Wڛ~Y,{/NYCa0tViU&JF$Ya4& Lgz߄D(Ϲ7a*,1uThgU*K ڞqUyXViro=L3tP,t*K nMyϵGma?֓iʡCI*-1=-d L': zkb?+h:+io]܉+ʀ Yijj %oeۯPڱ%YCLYnڞY=M?,nE.JZ۹ I@R^\~նnyJ0}J0Gf@()PZn5zZ{%3`(uowf餔${m(eڋ B h׵b<I]vߛ۽2UKPL<[/}5Z z6CKI!uj`Yu -p%o3JڎuJڞs=f<糙O:e%ofُԩmf؁EI[G}5~nz6{}kx޷Y=ױJU$fYVWfg~U- 볷 wj<{iOzQgUx+ }W< (UxV+W+  '?Z-p\p9p+u TwZ\zݴ+j8zZV+jtMojxzvʳ\-/|[z@AGyV%m~hJ}@Ab צ'ǧXb&窔"XϞ(g>Z_rlO,iWjlMJ~߾rj#Y[~[G=])PNeI=v׼ګ]GUK۾=V;+}ʽg=sf@ANZEP_~\-Ỳ[_Obt%oBϺHjArKopoV@>ܬrW+`73A׳{3Vg~[?k~5J63َ[˳oMf"&q{\W:js~Z@C\!*bWc@z6;|]Of pkz{6cւ$ @ aݛ;vҫn>ւi={|eW @j&{c-ϯvR޷~l>ւ_ D®O>6kA]u "ZvPW~6kA]~m:WCuWCNCb-(-(->-8WCu kGEijCQ_ym.ւ[˯ZP[~5_}o.ւZPZ~Z^7kAim.z۵wSӯWl.ւhb-ϯV/Am @Moۯ6kA rT_D%o_\)볲0sU6kA {~myͫj"yWᝯۯ6kAtjjZM\2}Q6k{~{X酀VEVEb-^ 6_;)o_\VE jv흴m.b\s?N>t$C`~gqgBAk1[LR wkj?Hc<3P}#Ew-g_=؁l "v_5Ǚ9VrFܕ',WvP\[*3}+JLjoIe-y>+˷tӷn)nW'f7> U99L?g =X8vVt4Z7~N腗[ՅӾ?~WIߍ }?qϕ(_+ NO fsNDe+Bꎬo3q_)&]*pb( CCF=tZ>tZVtY9BG%l1|$tUۻgtTDwT lwU=م2q1訄tB-} !ԎE]z`wT ndF/rrTBFaȝCN%;8 %t"Q8*aI"8J0t"rv"¹DbE4l| rTR,39H^39jM GY13e*Cr״|9^1_AbL kG8 GYN5(y jh#9*aT&RP44=dӥX zv$G%аy,QVSoQVvU\-4ErT]DGYW򺰯@Gvɇ$)BIeUku6J.1oG pTB`J2ޯG%3LJH@": Q @K!8&pTBQV׸%jh9B"A(+~NrT bT3$F%syبU2ՄB9mc?6*%/'G%T$GYekbDJ9*!W%eŸu?IJiBtTB UDG8d%4KkՑeE"8*M+YCp)BC&vbЬ$LJp, ErT:ssrTZp)CCzyT|"fft d !(@($2aU/3:*! ! zh(@@dEYܸ:*n;:*䁰P)CC%Nb"Gh@Q VDrs22LCIAtҝ rPBt'r u"X$l$b%6gud,[ÿ,DAQ6Q 3D@NdF٠(S"4Dq u"[&BLa_Al(SbOhQ6ηeJQ'6t2dK Se16e e0SO 1&-&j),U)j)BSý~Q6=Q4)oJQC!5jQ,>0QI/Ԩ [Ih|'2E#B*htQ#4H4jFMDԨ+5u7Q.)jmIͪFM_({C.,z4aQب’5ҢF^(Ğ"4>]cQ#l Z:`t5:Ql,ȨQ4!5ʆeno,xћEd^!Bl` kA̚:fh&GB✟ 'fM\@2lrsi[[<1l%8a;2R$upQnf+pQf e(32Mm /|e ^ԉjgG%6u"QPRX;\pQH\u#- GXb(d> ux$;`Qq:")aQ0.: X(d4qsXQR([7E!CEYQׅlZhQX%` *ctX^0 擎#`ꁊ$`"8F,^,#bEYOX(dҞ0"5Ĩu _"BDF!]HX(4i "i  F #lQ)($uk$Q'3F_&Ĩ8ĉQᩢ?(+$e9J3MwdTl2 FABFD`msGF$ҊȨ9J('^K(jQ#ٜSq0uFh_Faܨ_%n嶈-Enč lV8RF!Gs}.ܨn9:7ʊ?TFYyݙ}ݹQH&8(+È s̡Q0)Kd!ңGa>H(+KQpvpT>|wpfcGa.9pT(ς4,(dX,(KЏ%E-C(QᬯWK,H(#aSʷ~B 3 -3,SA)yHAPȀݗH?]CШC{uqËPt"VD|*q.njq9(+;,=蛧8Q G':8 N+Gs,$7 eF]76k`,6 ]C0anAs%(Q h(;7bGJgGx0qvnTb< _ ey.E+=_QSd(sQm9(9< "ݥpsuxD< t~ď#CIB8?|&Gee+(/C$ QB%ΒQV>>Q+L Qͪ= 5C"=j߂GΕãI(}vkQV!_; ȎBu)cΎÛQVwaG͎VjHgGQ8 Et0N5Q8:J@GaY7X4DGY\MtǴ3 }G(`RDGaWW`(CqeX9 ۵R$GM$$9+~Q؝]Q؝(^w#rHB)JXBGA'H(Œ9{Abbk"G G9.Q GyT G]n9)(Q]9lja,Qo9~M˧ Grrrą9 D M"8 Ae&eXS.MSiD4=Zpp)Up)Xˈt9ʔGN ibRk@BtŘrW;i"Gr%z=Q&st6>(ewZ!LA(S*aCDGfe`0V ':ʄ"PKMAst)!(}VG(p.?zPONv| ϝAvNL9x/,d]dGͷӟ1?£Gx {Bxܙ0NF$<ʔcG͝5G{$Q%z_GBvHӣvxxã_eC0KdGaQc(sx wB 8M jiLI5w:бQ6YFQ6! epGJm EpT! IXG=$>Qf3QE:d`x9-P+:GV ef G% Ph6E97*\*:q&Qfr %68 6ʬ_rr(3Y_بtl%ePH'"5QmG4WP| jALţ"5V-I1)҈56UruY\):L@fW|Ba5`YF*S-8bFقT/2#+Q=__sү/Cl .QN'E([s5ʖD-Dm:SlQ8tשQ jTB6ԨvlmYF%A Ei%lTi0fGF~_oa HEl(_#H]2SlL$Rŵ"5*!E"5348Y^#AgsehQFf-ݥدԨ(lOJ1(mFbdF%KQ"һ#h9e e;MFVˡQE*7 ͿE[EhTA`)R#5vuH24[`96v(J^Cj9nc0(P5F oG%+/«!8*¯enč2gӦ+ nNe"Aȍ2XAW)*iF@p$rD'L(Je~5]0Q9O塛Ep98׉T[f 웅e]y{EuN"9|":* t &E@G%T5訄GN).)Ď7w(s<2Q/vT[Ig';*Ew6Q ţC)|Ŏ2_`HdG%7E}Wߡy4èQ9Kk$>pPQ v,ŠBsJx;*x쨄g@ĎJ%4,C Q[Ax<*[.n jR*[~]N"<*Y7^,Ed' <|<.Q0E˴UvvT(A#`7|fG% ,Y<`G% G̎ >*=lQme(sګPO0.LQ/ #a{&v)\ceaK|Ď2(2QfH4(N Qb>YtV QBg:+ZQBtԢi:+ B(5{o:QBtԢk:+J<":jQ\EtRZMgܦPMB(t =Ҷ*tԢs:+Zg{\BG- ]ro=UY!:jQu: tJݚ.tԬ(yBG-J[;QB鬔i (tN QכLtԢ4? (tʳS%oTEtRM(tΊOg޻)gE":jQrY)[/:jQMZ:Pgz߾B<4BtԢa (m{Xm}BG- ]Rv5+DG-J:)QdBtԢdYCv(PZ";`(wЇ:)DG-JޛNtԢЇ:+KZBG-J %mQRnJtԤ(yBG-J.tԢy=zX%ofQ63@EI[G:jQQ&,Jn.жg%tԢ :jQ^:jV,ff(m3ZuS^fEɛ tԢ :jQ&Ѝ:+f 5GG-J^GG-J]ZY!:jQM(m(}::jQF&$:jQrVfE)k7utԢ :jj8:jQM5+DG-JY GG-JۛNtԢ ptԢptԬ0yzQj 8:jQj 8:jQQZY-GG-Jޚ.tԢhstԢptԢptԬ(y(}=9Q*tԢܫ!Y!:jQkW5+DG-JZGG- EɫE[/:jQjj8:jVZZBGMQRV3QVsQNJt" ptԬ(eV(m5(y(uV5+LZZBG-B:QEY!:jQ{BG QVcQR^*tԬ(yDG-J3%{VZlBGMQ7+@Ei tԢf5+DG-JZ.tԢ4U QVc(["tԬsoyɛ@t"X:jQMZ{\mzQBtԢaղDG-BL%oƪQRnڞ :jL&EEɛ tԢa 5+DG-JlmƪQ~*tԢ :jVBG-ʽDGBٲR(f(f(y3VZuT%mvQR7;@Yf (SvlvQR6;@Ey6cE{G} :jRZBG-J]|6;@Eɛ tԢuQv (f5+ڌUf*tԢl>VGG-cutԢ>VGG-cutԢl>VGG-cutԬE|ZEY}ZE{?}6e::jQ6e::jQ6QX5+i::jQ6fQ~*tԢl.VGGJ\ZY!:jQV-6%oTEY]ZE\XZkms::jQgs::jQVes::jQ6es Q6es::jVbutԢl.VGG-butԢl.Vas::jQ6es::jVVKE\ZE\BG-butԢl.VGG-bu|PO7n1c'WG 98EXR&E$.I F 2XлCdPG^(218 2X-HpEEf#+&f Wt.f W2NEAA*.F"3##( Rl"mvLS;| 6ڂvӹ`dI%`02"O8C&yn0d&&2H C61d"ΐAK6@ o!!mĐ6P4v-IAq& W3D lMbȠ#DtCd5L\CdNO 6AD9bFF Qlx1!s epg`]W, t/ĐA>bȠc71 Qx ĐWA 1dy C ~ȐB'CqG $$ pLT .!cUU(&Y42T)P8{DC% /p 5 Ȝ^d DY6" N҅""T^ 2p9 2Hb%?92 GU,.BYv502#D(Ȝ }UA-adiEj$RYNA/Q@rE%^$E@!pr I(#3+U$82+ΑqdG Ɍq8A2hrdN#đ9Ȝ*282ad<92 zMיx|FLtDg yj#k`dN # ׌2SdAcDI:I`dpFg:W̎Ȥ'Iq4"ɜ$΍.jK$3N]di͇'~$BYl[|b_E$3?SIm}kO d8Hfb $RY מ9fPdu~JoQK^@2DA]VSd0o$36`#A2! r= ӳ$t6A2 YpqdD å9+rdST8( #c"h Wp\'@όoIp BVDYXxe*R )2VYVRd< 12oD^:m6Qd\zȘ))2V{֡1,S܄1FƊ?p 6ȘM|+O;SqsB "G ֪Q1殞FU$L4s6 6odx8BDL5mIH+f-DXpS(.nQ2|(5c %c|>JF_A1_֏-*H2VHd. $.l A2 hVHJv@2 盗CLY +~ 6ø^,#/Qb GsI2d4dI2VE=$K.`2 ^d`ŋ&,[a\/<:q2^8C\Q$'c/b wnq2VE$⻺$s&'cIH^p2Vp2VWϜ8c~ ø^c. z88>p2 Q\ʈ [|YUmO ?(79H<O&}d'x\ēIk%:$)ד'32'cyBZ?y2#y2#oy2y0 iZ:N 'cbEV'cIBEVDdYat.6Nque r+)ֹx2d&cżn;c0KQv czU0%Wdv>dh!N&IaL/rudFd,[!N*q8I alq2Vv8d0[B'2kY0\ÜA8p2VST8Gbq2VBq2VXdX1 E_1IIzUT>ٶ1)謏 LB1yX5lmȓj,)3ByG 2V! p^$7"T&a P$Fb0h;ΐ)cU$ h^l)DU8f@v` yU#U0@)nG9SƫX)cU;`^˫w̋<[S&SjVH+YMBHQGđ2Vc2 >:VȔE"Ӈ1eqE!SnARSw")k܏6*0W_" eyb_ΔAEdFc S6Ia/2e(DY|v 77!fƋDc+ QVF"QV[Ԣ: E< $x9`3Odw X@zk@tF_e,e5(=,E[;e* _(]@R.%Eڡ߁2EQ楖\CZĝ(B~E<l&aNj“֡F<Kɓfb>qMdœ]=Xj/O<T! Wdev (3o;P6%(eЙKEW<A]CF"eX7T@d #ަ`6 8-@v:?\7,(Oq~ic@(bedē:x2-J.1tt;D<(|;'c["eڒ'ci O۱̓AqQQē"2ز )傃)֯3EJ_g 2= {4T WN"2\*,|nZS¯+AWʹ2ؚ3F\(΃`/@W+âiM\(c\(̊9N^,3+)#r>bF`YyT"1.G`Wfj28Wf@ʌ\ +3seuL2c2c72d'doddghAhhgt؊tخttY&E,3N,3W,3`,3i,3r,3{,3v,3v,3v,3v,3v,3v,3v,3vÝ,3v̝,3v՝,3vޝ,{n˄2Ù\opI8Wf-,3| e2dq8Z&\:N^' ϐe{dp0Y&PAb,ά ˄+2 L,˄-2 +nʌΕ[ŕ bpeXQhձ2ce½XpV&ܤ Gj`eXpV& n`eXp;V&\ȁ 7s`eXpWV&\ځ w`e5XpV&쁕 '|`eQXpV&D@`e" 2XQL1V&b+ΕxDLEpe""2`LxX&@,"pDȉe"*%2ln LD[ƃd-q4X`x8Ne"b'2t LD]&".QFAHDRe")2t LDO].X(D$We"+2p LD]&Bς.iAD[e".2.tsL\&.{Ax`e"H02Ht5 L#^&B/aDxde3_&b/D^Af<2|H`8^& /t Dʅf<'#3HL`&RD03+)rO"`f Bi$aff<7&3>HqL$`&u0<tDJPf"m(3Z䀙H> G} 3H LK`&R0IWDVf"+3H L9`&R0hDZ[f"-3HsL$`&0zD_ftH Ld]&.Aӛe"12 t1:]&*.A̠Dge"32puL\&RN.iՀDvke"62%tȤ Ld]&2r.)Doe<;82@tH2L$";]&r.Aase"#:25hȬLd_;Z&-9܁<`D.xe"_<2Sl;'[&҃-끖@D|e"K>2IhȶwLZ&r-y Du DA*AQAJ, &*|&ˬg̤|&LgP>e,3d 2,(a?e&3YfR>e,3dILdILq3Yf(2CLq3YfR>eBL 3YfV>e&3Yf(2|& 3YfR>e&3Yf(2CLgP>e$K?k"\r0b,IեcƻQa}qYT!1PX!1P.`,%/./LvH ,̣r8N"Nª`q,[5e t寝EQbU-P (,-U` 1(fY`$|T91X2sbsbI01 #LLk`bb9v@F*)}c!KR"L sEѲ(101frL |9(,-LLak`b,Z[PbR뎉A+@+-+/U7wL alQ`11eTcVG U!(V#prGhUypSb  A2ZTއbPM b 7 bA:@1X2BS5@1PZ b~ EH(It.Rbw(kvd(1SǷ)1L FKT ^Tab;gel$%?#LLi&[Bx|A`8` %CL υ=Jqbʣ"(ΉN;' `s  ^bݰb +AfiG`G^@1HУ({X.`BQK'*~00ZGGNbqPDrbcIp9A_Ds솤sY% m֏Ć&\\G^~$'2o\ܫ J G %ψ]" cu-Bb$0bAb(!O.H 6f.`NvbCbPPAbB;,H vo1bS!0 1y29ǜs HeEk@bPPAbA3!1$v`8\xΈa)1(.#Q"c3b!h"9Gt WHHp sbJ b.޵@`k̈<1WsB @1\C„*EtDJ1"ƶx"bP1(4t!>rCFk$4$s W# %4K&"3c?B9"Z"S2;3bŽU}|#ֳcdDH Ol wFՅFes DMK|Y[&D F⪗9:tqի%U_1vőҒA5^1~bx0r6]1 v<9 2,WX,(Xus`Ȉc[tD=:Vs> jO U"@ jX1wHa$' Aanf6xy G?Q5s: RsXr0XW\ `,EtlBa`=*`q.t~נ`)"0X!l0X uA]Z)8Q`d}{Vg`r.l 0#b:N sWN %x2" ('ø\۹ԯjXX.jyeq0 2 ua|°\$`als &K)ZąRJqaPӹ0e1XPdk9*'9MUFbI0e9"#$"`cUalXT.250X銅Aj.Yh[ èQS0(QʚtB {*-htA>n0Xrs6eXWbaLT $¨\l B`!y/he jA@?B Q f=1l,?/k%T7ƊR`H.ҽ†gTkq`,aŠ;.ŢVaPq0L`|Yy0Ef0X ;%極Kpalm S//\  (X XX`daFYWX[qV cp[`]  su aP[a0̨hdd¥ <C0 2 a< Ʋ0 ի;vaPV$ ODAs` w[ 3a* &Ia,nm2 0;laȮc h'9<0O@R @/bà&a` (#: .":fha<tPt_-K0X0,neF3a;.z Q7Gt(uuna樇'8 Afc)0{i^J&0ka2D8 uf$w86N Ӽ,PaT#"/sAX&BLaɜ4 90 ĵ}* ÅGE赀a&0P.Q_,OcےA")﹓ab0dB@!"4FN q[5h( crG$7 % 3" AI8aW2L 09&)0a C#50L|Md|}d/50A<2Lv`ad/ded#2L~C ^h[)  ~ 2Lv60^la 28 2 8&/0S_ skdZ&*A 2cV Sew,w%0!9 Ka%0f 0L?0Ak 2L=01 2.0tq.LQ 3Se¸20x`WF.ƕ?)ø20dW gads\T spaA` (MaJ` xO`߈;30N .y l<0.H\Aq.L8<\݂ #sab .@8Ƈh\ zpabw.L \Aq.LU,019&f`] ppab.L޹0n &„MXaw8f&sa#.̰ 3,% kʹ0r.̰ 3,7 0t00 3 I c0a:f΅f0a:fƎcav*̰ 3,u„1Pa;f  3 K ŒCaš0cPX8f 3LŒ5Caƺˡ04s&X9f 3V΄+Egbґ0cHX&֭m0abLX#;&Vv0a|1TX&Aucaހ0c0c0c¹0c¹0c7ù0cù0cWD\k\\\C\\\$0/r0Sr0wr0؛r0ؿr0r0&6ʜ 3Ҝ 3 3 3 3 3 36 36 [ە[۞ÌQ'ÌS'Ì V'ÌMX'ÌZ'Ì\'Ì _'ĖaƮaβaaa.aNana]TTTؖw*عw*w*p8f8  3  ca±0ïXp,LG 3<(F /SaƩ0ÛXq, 9&ApDPa"(0y@N L/&bqPXx8`"*x0< v&b(DY`<-h04h L sNຠD^`"H/h04 L&QAàDpb`"1h04 LKDPf`"p3x0< Lj:&H`xj`"5p08u &k!A0ݠx$o`"7`00L & mݿ"Gb'TR r =DR6~)3& 9&뱛k{>l0:c ` U`FC62*Pn2 őX-9.qP u\0ァ %`(GC=2`GC?*0@`.0̴'a@ "#*r, "D0 `D @ "D0-`_0`1 &PL0:`@zP B>d*#T0 aSʄ   H*)SV   .j_*f"P2D0Ai"4D0`k`@!3 "@;D0x`! ~!`Q@*fD *L  &*(MT0rbDj*QT0``@OQ@`E  ,*ZT0b  0* aT0Ĩ`56 42&eL0P` @G#Fem28ld0jめ * 8.} B ;2Hwd0` @՛  l>2}d0#ad0d02(V* 6(rR`#n"n"n `Bm0Cm0Qm0{n"n"n ````,6qL "n"n!6"n#n"n qL & & &D"n ` `bE\.W "9mKJ~1҅7E^J}.S5.UNԒ{/SŒurNRsJG6HYmbk377Wr6"pD&C6Kngl r&)&#]`R`"W:I󰁾Ю=P*I?C&Ωur<ɢ|&'!K,yRBX)ӛ,9M\fhjKfMvˇշljs;4naZ}0I/vM5"}fTtݥLzӷ^Z|a:n&OE:]ڟj(˶=]~Z2S8]/i+;Kh\F&"Rn*"\&endstream endobj 113 0 obj << /Filter /FlateDecode /Length 31327 >> stream xͯ/Ir&o{e^6n[+?*?(Ӏmo,Eslu)/;NȬL1}ϫ_eEUdfd|zM.oz_/~ͻ_.Rz/Oһ~_[}oӗ|R_W/ߗuk|;)~LS^YA~R땊 Cn'7\/|^_G pٿ7?|o_/O q]}w}5ć_!WqwE|}_<꬯w _߇DJp4Gsŷu_ҟ}2g{z}+>2}_U9Jq< xM:S/Eӕɥ7T.ӗDET 5e㾮Qq^{gN 1"\< __sb/^y7hx˹י㗢We)|lW^f~o;#ʹ^kk+=}W@!y1Ӎt^ƻ*sn^^Ԫy\_K{72?7y^7_䔻׍|O>9k_y&ܯ5[m A's'?1L}%Z>_6W*{i_s!2>= 9 sḱ_ro "D-ӿ{yȟ'UsM]kV 5(cuj!\RFmb J=a-z5(Z"}vh?ϥɯGXOuEE7,:N'?!n_ oW~Nf٩d$ywÏ0{`O<#&=-Nc1Wk/ӯʟznKEAoP|e}VӲ_K#XC3pU#XFzצfv9\ٺ7i[Jlj& ST~e.3wnۏ/Ǟ@?5~ɴ2lz?lb %?%ǯm%@̒[^!9G x3bMhb^]=dr?I_WSIW[S=|1,?#N΢gMS|oR:˽kCQo)ַy9d[Jg33ԾPz?ܾwho6/`ex/sW53ˑ[.yrsGzGELb$YjwT]YACG*{liH5Ų<0NN !_2~O`BAqzz:reC[͆{Eְx'*Q'!"o\IzCIx*݇ސt{ъ॥O,_{ ^#rxZs|%^Uܔn/ׁ9*4g69ŽiA QO<N.go8KC[zZ7?SZÖ#3f@6fr~xwu6=}^u#ܾj`{LPyU ܯ,KnaF []}o>7ac*Z en?k B02OIVQwP;~!V`{mb_Sp;sd!N\N />)I=WkHR|U~w_EhQQo#x̄SorӔh2!>q;es/"NO4!Ӽ޺ar]D^[ 'ſO:_#!z3VW_~s>J񓓷V,?~+(/&Eќпo>,4OY4ĈSEi=]&}>|ΟGsq$Ռ?bI>GRk/?%V7er/oX9|xcI^ճFi^ 5>is_-,k7_0Gw߭ }_VeevS-wblb3[bqvrϽuCt凯hO|IMx%oI"ܛf/o.+&gM^0Wt+}{9ܵC'2?ǔ(]mu^Ûv{?4_Wr!lS{rzATdۯ~Xjp](KoaHeUAOiӬҦʢ@pl3Q2d个ȵ.pS"RrXq LF zHJ ȅ)2tYͩ +n#NW/jMa׌׎__+o7tEsdL EׂR A[y_|VL< Ö3E .'Y0:ܫz]4Sɺ) H¤USP?o< n+ R쓋AIuVDn70 Xxpb|C8EuTd%\(4BySiܓ9Ȑ:Ŗ*F7n5 ƾ H)r+R>G,STKN|$WuyB]iK Wpj2ErMeatL'%W DF1 i&!cpǜM&KXh e)\t2e 3u,WX/]%̦|E*WEa⚪(EdxtQr!OʸZfߦõr> ٳ$LY +oMj2,/C˿P.1tZ~ɇeNfUێ.F-,4n,L޸ M _Z9ʐy /)P`Db42dfNt Hd2PȂ::Du.H¾ :L{s PmMM Am].oT&rQ*V- Q{@<0l,2=.]-''- VjHUBꕭV]116VX @23Y۶/1jaz ; YG&=D1I2DdX3܆\ܯyCL"I2?䎥=R× Q,6:22j ^3Lt6dbk} 4B  J/d. V!ƀX>&[V/gǪ+ʧ'0UG;t@5I#ys5HI9B F,ݐ;GI Ŋft( jaYRR1bAn": K*Xǖ!&Jڰp,D 7~ԗm+ okqG ͦÀ ٦ V,$Q 3 ǣ)@V%*8|2\lSإF&2)ӣ`l,Oܶ`y|a/HZG/ʝ}*0bs%W7{l-S &YKy& yI)ڋ/)6䆙lbVt`-R3Ҽ뚘UmĬH"r-1PYĔ/'S^)9.ἃ9\\.I *c׶) gF91|·©-s,*[.'{ [?\`CJé+{ zgGw ! ,8\2mpiO+.`qԧ}  I; ga|]I5+*VB_E9*yr ;)HV+\=ܪU]z$TՎ^ʺ: R) U*e~>SЌ$b *Z.8qDAd骠5 5)-(〃_k}FXA>.2#UzSDn[?\.A#*H~8bB8-+)Rtߗ+ $BR)7&SYgFzMT:Z098|7\69 .ap߄Eľr]ӒSÏJ_b*e~@C39ZJ6 UkϦi#Y~K98_d nEp2*8l5^[2#Dw9A \A*hȱN R}0N5䞉+MM[54F.8U2F M`GZ]6w1Q~ ',ql*7,n] 2xܥҼ`if=փqɹ/1o=tbAYsÉ XS&>UXt^b6m$HZbifq[96cżі D_RQ)hek1MS!"Ǻ/8t45+n@̌bӓk1T63.ÌCM /4Ffe"0v13iPOh :2s`ۇr)Ψٌom_=O;h>&bЗ9UE1)%t [?>,V~MLyܭ2?hfh8v~,q&hE }.AAE"4A/@Tn#Ą8JX+4n-0v ±o՗(ql(v0.Y-W53.c̀boju6vT/2ie]0x63.# { ͌˚΃WydmYFuGUTC(&00SvMş`͓ 2'.^ 0c";"K3<21vofZ(8K +RE.K3jPΏZb:zW1jf$ ;{;DO%­REhܷj+S)V}-wp"K(;LIhҬi4*iKPqQGxM6 ?C Ǚ/< kjԋ>tviԽBDpo]7%՜jKm)HN/ZjTҥ9pᵫe: D">5XtR|h%3bJfv ?8^U98(~huU>yн޴[cnMBOnۯbﵭ5!ۯiԭ+g+-b{5n]cnֈE:aDBRG,u{ 'wj Q~L {^ 5Ɖ~oEt޿~U{Gpd)8]bVjnB[U&ow ҥ{Me׽/lЂs-%:jn =%`a0UC1S[uq3Ɲ0!U}*s8'7L{`n;an]՟un%rß5u&<4Ù3G\;Jwd 1I'h=S6f$Y9묘8)m9cJB{| ex&>`=:b1!{K0Geܓ_j{]URcf\7͵*~]CO'PROWeAz]ch!O5C`dz6&mLHއWNGu.$ r&GL}ȬBqbۑl!.n-OcwHy)e?=>dL*HXLj$O1(|޳2BYG#_y>y3>1;#O Vq0"BB0{O'铷Ah!'3Gb@s2DD~Z$Pl*w@JHwJ(l$jjt "Ӆ`tt̀P\A,B;\B;WYGtE`tuY.tX/:A S e%]΂bFH- p!ӂ,e{Ue T}x *Cb!DiԌ [ ڇu Ct B.jX uA.8n;B;^[.H B;.O( R~a}Y!ݡ !ҝePYGtg"JV(Փ #;$oONDAd}t7!#;ҝQ[p9u(-\ɥNҝVRZ+ >Kv:v+0<[ *?!90d M!ݹ4c_6jNWFNA SXǾ3{ּL=!TP;YDEk4*cHU:VƒwVSA;hEPsQub"a{fmAy}LȺfl:%#;+LJҝXVX3k4)l% NbDcM[pt?J.NFHwڝPR !}O1]Y*J*1}o .vd‚غפּCְD4}o%. [ȍ=uS$.R=,-IN\fA`]KWJ{(Y.< N^aTY5T 3Y :h~JvZ\BlA`}ق]ئ;N-h7|zMWoA\NDO" tO (}1Wt,۰FJ.-jhq*~)kq*A`E4j22KV)CwNA]KVI x~@RWi -}ʇd >S$ڦ[S6]$or'(A4XMSXVJ<Zj|:E\bXk)*hdoӺU0VfﮠL]љ*]4ACCm NY\lYV)r;8;]vO?A+>?֪",kު4,(]WW5 j?hjD'SXi# jסR >UX},uZ},k},(^U)՗Za,/]v-/]%NV^TU*՗jׂH֗X-U5[PdzUʒҧ< އ+-7=O 0}Jњ\X*UR\lՕ'W6nzY X>v}UAt*VCX Wŵl>I֪.k.v 1ӧ[m2] 3} ZsXϮ= Wt]>u>]mWsb;kW^%Lr ݵ)'Xh0 ދ(t=FO%DOڵoPdWW㵫ꓠv|vt[a]G(6_uvP[_J濶X]$ cF`~œxQ@JX  avm+ZcT'>C iX=EuڵjQPk![OSA:J!<HcGjipBg MV7[˻u>pB1ӊS6׆~k̴}iutAM4TInkI { ɔvHhhشD6 68s=! >,5 ؎yX߰pn%kE>684a*Mc7&u D@᧚l9$`{v L.zi-hQ vi8Sp*OMV Z:SS9\f? ֎'~nስc/,]5vZYc~#k+1 _˃_(buz 8jvXzp:Ok<* 8}bl}iE32^rk촪soJ-|[clb}N+N h:dЈMCUuVdSPR#g77ZM+\⌮q ܱ*{- ױÉA\M0m &խ55tZm9lwlV3O̳S?@ї)(H6KYCE5MIAٛAWC7q:Oŷ޵#/WdT£Ba)(bdX|Ӫnޙ"޹d(8xtk몣7lj:üZ\Be4Zƚ8aºɱ*L,*<<ٮ|,'OŊtcwOdRS%fӽSt5z ]C#ZA8NKs9S-ٰk;p/ dRshdHTn+7U|CCEprWBzpxa‘ebj촢ZDWumi8P0y9Ieٮ"X S%u,rS+p_ulK DC 8i"koE:>*U2No]_7z+fL>Mi@NkզD4vH篊z*` e@b<U=V8*T O SA*=:8Z5p? 5p*H S ۻSjTGAtǪpVUMu"6*`ߠi촢5;US쮱 :UD`"bA%>NN+\'Ӛ1N|19pSup[CDtQBDt <hTtRrඝ_aN"#)q(l_^sfۙԵpDtMWBD$|SE-In4WNT/VUOS}*;#"Ði[LUkCk#x[VdC4tH2-} *R i@4vLQV@nRMG72.RӀ x D< Or>FOWH="= H;$ 9< E>r/Ӏh#|DçGeJ > }^4| z i@? 8NOnDGn)jPRk_ܝmy=¶釖ζlrh)-ʀSMzOȌ\RvHCx ^ZD8t1G&lrH88=RCƼ0Y2X,QMqH-"zl0$Ct9Nu@YɗlR=5ytiOqT}ߣf@aO<bꃴsPڮ@=jPm*:5{2[Aa=as|qQ;ucnc : @h 3ߣzG+E@g{yšu70V|5Rf*a"TXEh 4qH0=$K4Vqޣ=HBf @=*)5 z>՛€@a{z}@=k?tއ :BhtdZatᣢ=Zmh!zdQtGC!QQr|)JBh|Z9/a,#PTVGEE>y@Cr$ [: ᣞj~o|mD=Eh|?$C>G:q} @CY;qhi*B>!aqfҍ!?.i]Bh %G{Mɇ.^E=Z"?.C;*G+ QM?>V4mU4*2٫#lz()m &qQ+ & V(X|=&O"2¢o `]|f]wkc|@_~>OVw$9h;<`8$X<`Tb裘1! <`fL{QL˨RYHL{RN(vX_lmN4`l5F MNҀ{%) 644)Iu4`1Ѐe7ƚF\Wbv&%i1),5eBL2uc,`lH!Id&$i2Z4`YYlr,$ v49IxI0bi,ױH/Ia4`{I4`0~þvu.\IqiM,`%C&'Y[g,`~NבL[|Y1' 7 v dEc,`Yik20mQ< XF}40mlJ!,Mwb$`Մ$ X+٤T0Hnb$`UՄ$ XGdJFQ#fBL$ZYF}H,nbRLeMLiaav :6MB ~Ku&&I2&&I2LLi\׮"qCw8rl1FvMLe)LLi^$`эE v@LLeT JML庉iLSL#˵$"{ ¾v8:RLĔ8&ri?ci` i`"F0mlUuL+SNNU co;nQ*ReDLNRig`qEvHQ39I=39IkZKQ)r5m(I;4`ڍ$ XZ = XdӮ&e MNe-&'i2!L4`r$ "&'iv! X@#lo9<`v`{;݃X@}!pvvsRh8<w!X@#py|, [y=( 360Dfہ8 X dl6ְbDk͔GV5{фw]aN~>~>G"x# IFE0]bT%A" YS! 40t!e" 1UH´a/104j/0 4 GXpً#! CTɈHA%AT]B01~ /! s9d9b|a/!^#v9d9|a!_GMuyaQQ7aGJ|y`o ,=TWaؒLJۇM̤404) tF7}64b}XJeiayӇhK&qa7);l3-o0&q/);,m9w ҸÐgBwҸö3as) 107&q]3mfalaLF#\֛8 qFF 2.Mô ܮ!q,Az208loo0 Dkp 5.*#ێMUM8 Feap8a0|Kebq́8 ML#L8 ɯ3118Ly(of8߼aML [iaW{7o\x&9א7 ͔ML ۞\axîf3ia}ӆ)󓑄6 I Y4ְM"Yî{-?5 2_ap7kMNc r5HӒab ӓ' +צ2Ұͳ W]a媋{H K14Lݮ.zW# S7az^6*)r5G-W:EEԂ,xIΰLc"vSrzʰ&')x +g~ SGɩa71Iޅ)soh {|2\ SI0 ?).FVlta $20d +(s01Sy8V7}1)bB1L}<&$4XbB1L=C&$4RcI[è0ΰ'P8Êք+ iaFVЬ|# S6Lia-QՑF/KYa~3:laiKYeg[¨ob1ζ,GEYu.ic Sa|&g2}ELj[aykY-:撳Z ulګjaKe-97:I2ʔLc@FUY u%f@*Y uHdXԵkbv Ŵca.5LFb6V .9 tX u@acH틋XԅmrN cG9R|ܬa g܉th"k1!ծa1*F`Eڰ F꽷KX5a71eTMZT8X $%E!鉦/ְY2VLLF4nabJz/> 94!$gULJr)rQLMΓ?n0Θ Z+I) FsF0TqeiaHE"V*4eR1L]d ӌ a,<&2itˤ$c&ne 3/L HIIΰVaӧ!eXF%l70V"f7'FqE0E(1epo4hTd 3aA,2ɄYav4*&bgWc*d3ʰaJI0pǬe09MOH0$ %4GȄT0 њd "a%+QeY cz)2JMHR1{nQ=YN2LUyMiayРYжkX+a_c@բt^Cְ /aNo5LsLPiȾ.śda$ P JҰbb6,c->iƋ5,\d $3k+d 4abLLiB[5$0cr8LLNe198,rP}a\9L|$˃ >n0 ˃̏8| 4$q ˈ2${qXFON0M?a|hr8Layhù8LsMNq]H|4Z)iz<40M4^.%Ӝz#b|V8L.aO18LS5]Sؓh.Oދaib<ܼaR`цi\a[jb0m }%&i4m&&y4vz0ELLΐY&&y2\ T@7$mX$m 8vMV { AmX@Xy:aaGq KO=2X KO=S#ZzFzmGHz#>uAaGX}/ڰ#):iS6, ,?z!mX@OkO=S6, T!mX@X~zZja!mX@ʡ 8?iSSt҆dOeaaG!цyaɇ6,?OBڰCr Oa`GꡤF֠:aa GAuVɇmX@Z,[a(_XR iϏEڰCr҆ya:TXCPd|Z!Cu#FB !F+D}э6, VBVrцf%fцd>Ȉfц$fцyF+FuH;?=E5ڰ0ڰ006, 0HqFVzvцըQڰSQIqF|F`a)`aa9CH6ڰ 006, R Hfц$fц6# Hƪц:jaɇ`ai`a!mX@I}FyFza!mX@ʩ H? sц_hXCHrXm @?U iRCh2E%mXF~F҆TCQ6# Hц6# H9h2mX@amX@amGHr|,҆dц$6, H H9h3h3ahja!mX@6, aȇjaifa1% mXamX@amX@ڡFhzmX@V5ڰya6,6, 6, F@%dCh}-v[}ݩ! S_@Xg\ S_@Xg֙xu&ĸ ֙xu&aɃX#3H;$/W {u&-AXg6 Lu MŽ}aGXm~h5DM}L  N]`5'͊NR=}!yXщCڡֹ##+:qN`U'2!Vuzt}:q}hu dF||,%}Vv+;q8fuH }i`\rM_pv~1νy$n/ my$n/ w v~9ܦ_@jv~lrv~ŽyR_v~1|$jG/ %W;Sv H>׈e;<]Rbt Ȉ/ 9*jG/ % n0Zct H#/%O 8%g;䨦_@Zv~a;<]cc H~=V;^Rbx H?$g;?_@ꩦl_@ܧ_@fL[Rc{ a/ j H}a;fK˟C_@aXhX .i##Z0V?X? Xm<3`sH9#V?X?h!##V?X?COW?8!#ڬӟ!#0V?CQW?X<K`usuS<M]b#0V?X? :9:yku8>ֈo}GaZ? X=7| 7C>7A>7\F>7Ѐ =BP~6]| `H0(]dM0NK ܈U1h:)BWI0GȤb$ߓ=@f-z+6L$$ Vi%n +dh@Ua*VC)6wPH"cO0ISD,EFA`*LlTa 8#2JJ VTur$'ơm>K16æU_d_ǖ`Ad藑yJT3z#ϛ0%}pf@׍绻IL`7j>,F` b318`z"H[>X}J)/Ԩ&\zuVHb %t>ޠ[Co[J v2@2S]&@8[`)&g tI.~Ou_P}7* l>UOOML Bb7 &g"4ոqmijpYIZ`)}&[*BĦ[LhD}}{GcW7W{Jx=Hw] T;dc~QH&{*5YQnX0M Onz$6[e="8.=g6[c J`鍻w|$0"$?2"ဃlc}^ćF?>7q+--,O:SOW"4 }&412L:"NM,cmTA2vc |5垶*2[wyٮ$L'&4F]f =qN'Є랏:% RtcP^ nȦl |bzI9$LUQq2+8upM Wt.S$6J$L 4D[$|-3ȤTn|bO!芤"tC )Bnwd%~]",NAS$vPͨȄc7vTn>0I^̤CC$%wg{lE}1 >b/ 8XM9rwݗ 2"wHc=\yNT,nĞtB  ND7|WSy+y# Y|%ykbMDY3 {!TȰ_1-ƪiޓÁdy4o!Uf7'Ls"*LF*L4a7ηQ捙L2[[Vfyku!,oLvr,o f7o~3 =~ll}RIX11[>Kc7U>Kc7RӘ ^hg7V;w&yO0{2#Ix$oy0{2kI&y$odW$o,ib2{ o3l53.ۯTt_4c0 %?n=Fr,ͺv o z($?Ђ0[WF3fH*6 o4tMޓo*ewcEq2ӻѹ&X]ndWݺsnқؕݓ^0o2k=ލxjed]NDNHg's2onv#Cr䛹/&1{n4{RWScbdF&vŻ#%Ogb7%޵3{vy YW Ѽ&iݨJHJ1 Ӻ`N7>UxYɏz2Mtt@֜/MNɖSz N=YS -ROCf4?zILX8)(ArknpZO"L=wL֢9w.pLm]w2#ruUm6yH-`WRIzD2ۼk=Mlv֌C#&dd>){܅m^trkblA2۽g3dOlXDGH>93?B>=g~"|'rg~|'zh#=g~ODOD(?GOT99ϟ9ԝeȹȹ!ȹ*ȹ3ȹ<ȹEOO䜫OO䜴Oqȹzȹyy=y^Dwz=EOx= _D|=EOx=_D~=EOD=(XDO0a=EO$=XDXDOcq=ёM(艱(5YD;LHb&z=-f'jVڴD;ivhjզ%!MK$AIih6'mNۜD;9vos$!bD;ȨC,h+$b4sp&$!MHæhV7!nB݄D; vw`&$EHCʛh7!MoBބDoo6lD;H؈v}Pf#FCh7o6؈vf#Yhg"l6وvFf#ZIhE,27Ɉvn&#hhl2DZɈv&#Z h'l.Tv";3 #yY,aߦ s0; _'O:u)N dʞ#^lXdxS`_E>XDxe`ᧁEXDxw`E>!XDx`7EK("\Kp6" (HDx  '$"ZptA"g$"cpA"‚$"lvD!9@D"z{ ",|"  ",a:D1E@DaDQfJ0[" & "0" (p0!ˆ *YayC- `0ׂ!€ ."`0! a(D؊a=Bw2" @09! 4"@\!— 6"@!M< "00 uDAa}A=zDqC ˆ>"C "C$ >~(CDs>Dq CD*}`!Їx@"}!$Ї7 |@ !B+"| !B4>DqCDz>D4!CD"|!L<"|!U%}{!B\zJ q1CDʂ"rzX!k"z()9CB"jy8$9.$;CgdB2}]HЅ]+S_x?Hj~_VиܼB 5'ZVǃbBc|,C pٲ%d!)hPPmYBCYR  [W`!pU,4vۼ Iq_R`fDzOZ(PshYBͶe IWpK{BR/|O~"+$GRBR݄+4*X%ABm}a˗+LK+$=PBR$X!)}RBR'VH[T! uA$"PQE *4JR)*$e\؁ >j-+TH赀 6z-BCR cPSH)$a_'Klb IJd`C(Шbc(BRnB_/)4 ,RB I| R`VKG=GC!KtP,xTPDOJjN$.+QD!)u)QHʡϖJ(4<>" jɿLdBRR$x|T2QxB< I-rˊTʘ--8!)w"?pBc(P|YqBR| '$e*e=R)\+rqNHNHNhaqЙr )P)Йr 'r )йr )Ъ\pBQ.8UP.8s:S.8.8szv=] DVgWA)ǫN/ΧL㳢Q[[!_ZUVe@<r*?_BQ QhR*;ORWyuuۇ_/}]߿zkkJk:wW74{}}6_ojvsV-[~=imf]dVޤ[bkؚٟ>|_彿65-I7G~/m޾G[UrFv}EUjI'O¶yͭjw=u4>K~v~ոw.הA=ATT@2.>z'oGq^S6mMsS5Z}ԫ=o_*2M˛[;͟?ϫbлŲ.V#O'}I?lYG}x߿yW7޽~y g^]9;*o1ogh<ȗjI58qNN}кQ=nrl|9؟o>A9 36-m<[f$넞~~B8pmn>wk'endstream endobj 114 0 obj << /Filter /FlateDecode /Length 37611 >> stream xܽ˯-G1G6!m8- CzPb>$K$K_ked xwȌXn~IwOqw^Q}Oo<_~>zu{7~6?jo嚧{u{=6|?է[{xW}oh5J/~\_~~t׭S}iy۶_~ݗ?}wZg >u^g?'/_O?_|}_=3ˏ?|G_[i/_j7 4Wz7g/?m׋Zo?[RqpVG3?p{g֏~Z-/Տ_I1??QgGy{'KrgW<|?7G~:^zngE|_ü鋿m?uܹm{mu+?4[rͷ'7Z q}Uī|/>|VZ>ޗv׻zͯKَ~8·[˿mMP!K]Z_7D|8|h7cނ5k/?۹ | }_Oq =mqyAϋlIyxec5o}/ԏ`_o}{5~7Vף5~~ٽsW׳DJ}ee/}cJ}ҡsSp+rkl{,{TqvkNIϱJo_~?_~2y g >ݷSNT>돎߃>ߝeyZ|S^Er~?OK(PGgu91d>P9쳱;pP W>ySwO;q9Oy|>_k9f>ڿw:z_xmmȫ?r;dT͎/O=ްR|q h_\Y9k6ϰn4B~滯?8ei[7mKsF뵕s`ǻ9|cmo_~9>dqlylᐑ{q|w|\wHˏod~h;F/~ W󏎱<tw:_;^,gӀg7އoW\{= )4?D __~w_[Jmhd=9alA_\lϯז_~.s1uߪ.EƼGClOԯ|𾫍6&rw;ѿl}J#ow?^α1_o7s 7ֿOG͇_io}g.ޤ Mcz-'ݳIG)?S?R_j:9?^a0xrژ>1}38 7YnstCi7Q*/7~~y9N~}7|w/}A폾P+9grJȇ~r/r1c:!D_^Gk8\8\ "{Yw+[g+=fuc\mׇL?Ko?`cyIwpN:ߌ3_:7N縹ûc]b?'ͻ>8inwx[(sR/P}61gxm8 pSRp TMR-ԯ+mvͱ_\@M?bZZqmc eUJ}=yIMB4yD|+d\p4Ҧq6riA&]7D<44RTѹa@(Е2䮛5Hit7ꢣ̕s1t6¹ nӞbI7F!J ϽI>H`VZ_brEsG5LsB?tAfǏwV22o|j}`KR+K\@'6jjK?OeD7¹3n|?EٸVۿ0eřFa./lʋ!rӏfj8ޱOC@Tθ5O{>2_ 6pX/މsm8.]μ w'~gt> :iCniӚ,*sTR6\\8wpS/ f=^}v'N;:hoIiqc3{Wxpfiێ1X7_~>yK7Op }^&6_ٳJi|1Չ;ZSP-ƏiRΔ a|[)р9sy\sq?xS9jj`o' *ު>q?lvت7cCKo:hwwVaNu -{JBp:zxp/Uǝ>p>Oq0j_{#7 Rvu*Dř{*g4sZ>'ӫ,oN>ѐhuCAz*g~oTv T.v:_)ξgi&1;ͦR0A:.@66W|A(4w1gtbF%;#o|vlc3Ō#aS7ڦuNϘ3PDQ_.過~`G/uS|^:s<tnZfՠ4;+!s_q⟦=.*߽p m>*΍!]Bc/-U5L˛?̏}@߮iֺioa/t+o?Fty0Ol\ũ1 Wa-0 #8Ƽi ,)]˼袥|s!PyJjv KB'MNeC…Z *2ڥ[@*41΍UT75|NVa. 87^[ C7;z膠\|xPۅn*hn^kdyÀפt6|#H pxڿ擆μ[irS8]srǴݩpG< z{y|ͷj(d`oplo[G|1ڽxf1o5F uUlP^m%փ׼06u*;vM; _At7,uu qذQkS(^_Gɶ:8mCSLk{N>Ijͽs0OŃT*77ʁWDWܦI:qcTxzCkq*Tm LqSi\!Qx|c*FTt̊WRCPƛttzIS0{Wξs"܊훻T91l֝'#5<yqݝ'S1QhL 0*]VTT6Qsg>=! ߞdzw(MTvU11G7b6ur_5iW;N޼JS[]9ZMz)7'5f_(4Ү9sVnkt7{2o 6ToĜ`;=_SW3_9ALūib!p*UurݥjCb*[:۽0 7 %|ys>SWe)d+OnU,5piÜ̩X7'ΨodžsIO#\aJ%R1~Qk[;^*mg/k89-xZ;NP*(6|smZ| 8uQXT؈ݡ4w9QaLC{*Ulm]bR:Ξ&Z!:;;&EPTN>8fsޟ2i*-uN΅wnZSIVin8yc3_`ߠ7|M+׫~#) Jfۖwwy+ RUp= ӊ91)sF,bGt*R;}TLiYg) CGr:pt*6P[⹛v>znrTG%1}'Tt>S Z0f*|uLT+:ŠP!M}گLJWS zzWS'uf[Z~ Ѽ9Frlcv ΍.a}ckO?+o6Ir.ױwg_˻y/VwqЛĹg%Xxst,c\Zǭjwmɍe;><4]FQr=K=4GӉgtձI:~-S6J_gsm0؍Bp9S~lv^t.3މE:'ݦ˼ G;sk6]߾U9GmxNO1cf@/E&j 4YiSrBMBP6sm`/W |xZeA;'rax1^ ??Gm7M}qB5ySMA~te1Z4]*v..1\X|czi:"i:?} wا ܦ.d,ž8r7 c~pL2>c[sʵ4Sax6Hlmai8dbi8m2nn~pn{ܲ i}_ vL骞vΞf8t78g lt3~:"i:Qi6IS4#QS4#X2S'ئ0M3ơf`(U_$/S4-"idLEnӌhpfl`f#"2M3d*rf n2M3e*rf `etL}4S4lO?#q"iFro= wLH>v ܦ;԰/s%aO 1)1԰K,Swj܍(0KTB[" ث)\,Oհ%XjX?c"^! A}SNƋ9auϠ>GM+Zqk w91} om `D_ٵZ6E(C_z ]#Ϡ\juuKQD_N1- ˈAl_(`FE(c ZVϐHMNZ*fgeQ}ٰSΆ7N#s9CA-ᢌ>xRb%~KX*cKYBWB:CAkm eDҶ9* q+6˟a G`Gp/! vGr"c3PpK0sC%XVϠw;:hMO  ;a `f#ױDzO"w4xê~ G&g3قx΄ Gpz:_o(Xq ݦw!s9 ilw%2K;uaٖkȀluɒd7gdRNEmrEF&djKf'SܙmٟSDSg2yXHpH4mmRQۯDXqi,9iϹ?^vn v&CGl&̱M3wJ4Dvhy'6ld16~DL E4gVi&78Ǿ$ 7䲗%Gy 4<*(4slzC7쐍kənegӝV904s!읿o%{*J4'@ffTk ~- u۹dѷRifONifC;Z;YsQiVhϥz@@Zj4$)MTmlxDh9Υn mmCC; ԬаQ b*J6rS8]b%Qtfifa(4WLE٦Yad hpDhd/K-)04m4l GKюw;f,ѐ^Rc*J6!SQiik3o쟵HLt!܇ I;J(^!#]P\=e*5u uPbghye lyw[,~u)CťdS^sÝP+RlaYk[$JdS dSE~ VF\SQ)hUɦ 0hLSz*ց*:61աvtwV^wPуc=}yJ3d>J3e}yW wM1hP~3n~ylbdmξX*'ݥ aZRt4UElR(yΆ_4NR4zR zSF=y 'a:ჾvDU1>.)Z%18EwRSI)Ƒk]y\hF҆BZ@\R*iN7 ' S;ڙГ*ޯO]5R*Z†3:! C_i;sn7FBE 0͎=,oUz+m1+?"+a+ t~ٵz|}TdX\Rqˎ Ѭbgëco)Mp Eo)myY|iGvwY]S Z !9FN!N} s*z|TS{Aq SU+ZLXFӡ@j)KԀJWiCX*5\MTl}cLbC(pQvJV7Xt?6YIq.u5p_UʩՆzPBR*: KiuY=*m^.gW5b4;3u:P;= #~}=g6)r8R嚚+*]t6RR=Ty⒰(<:EO);;On7{~[~# ]hOAR(e4;xp^va>;%En4\~ŽTyJ!Se-JQ^Dq,79KSt&g)Cn~x`G;}؋BXPNY[[ ЁwyK C3\S^.{uCRkxaChr 8٧\Z] c/n;+R\Pt4m3I\BqU ҙ\gD,/RKRÁE3]Bi=hH_]ʝ*ER}Y+KYg> S|WP:VuڡB3 iR+jn}܈M'oBt }DsJ8pSTJ _SS=<Su7/4 ufy >(=O;4K唣 8\";젧"G)vR;RDz=r3>0(= Io4 ~ӓK+O"!\22QWw4ÕH; j+т& Y psŽiPRmBe8S& `kEwSFf&!3g9J+U~RMW⌗_&ےVBu&(ثTNô;dGp=hbaw+p&rܛƢV&D .oʩ;)7!綏bGD>-"O#] ?=5rtSRp#G)JCR虊ս ~7Y;l|Vk"O9T%yJhLVhgkcW)yJȦ/+r}CBM|e Hq[9m:o>: >ΰ"?)Kw(n;|-??1'p5:/\ v&RN+BQ;>$AQ eX]]R86+rBѪ ;a%eQ ES-~xrX~C(yJH-b"O) /pHK(_WU9_͆IWÊ<"O)@(_}nEUgJ)\R.?TjqlxP+~OERXnS}vdPe^ՋZNeRtUn |*B9S-$o>y"ٲK9KoeY!Y/E͊PvߊPd!uڢ-=$OAS(b")1H8ESn琻4&U#zWqW5? *Do$,?֭u{r+ko/b(m]w-n擢+ؚo?gǧ 8\*m^p奈JlSBeTn_[֙@(뷢(L-M% 0\desKv+Ƿ'2[|k O`aO?+2v< [ksH^s/>G[eGžԎGi̫~)vb.|7~cH|SѰ5_0 n/政8 hqO? ::-@Q;>FE5MB?B?FHP-=fRTLm>FE *|b>Ʋ:cZz̢RTKoS[*"*>FI`)t=W1**oXEnTJ^Wuq bQ0KK KF[*> cT |Lun*]*1][| >VԘ*!Q1U^Wtc܂UfS{ܻCNM?z§[# he_c塚ަDo,#*RAuye''bD[qi "+XEt"qLS-4+"q1?UCd(&=&UC!sDH!,*gX=V!,c%+0pK3xKXal.; w: J z0Xe DctH_aQ +pdfqИ`XAD:c$?%XacE$q +ȥ~ȝЙ+(1*4;V_q 1J+2K11ԍ+1 790yూ$+"]xHwu#à6 ¢qLjA_KeꞸcR=2mXN%v;VP,E c*ћhXAc%x `@q2€mu _tbq!>yyˈ@"|}:}Bu@s .=xe kXdXA{Ƙ Pacƹ/xKXAlm^M"| XAXfXR>>cw:c5sdh{Y=2ӂ=V{nXA!Ymfԫg߯u5WL 5A1omc)"=- ;|0LS3GE豂|, rJd:=VPQcMX|&Ǧ1>=bq,u{㭻ceD>@8J4'XaAKP챂t3k{ Gsci>F=[ *pC>HW+S+9Rf Ғ{3 RRu +،UEJ>V$ecU1Nާ.XAUȘRzd|fׂ u=m 3rf$&U`5{a=Vfc4T_챂Z*ghXAJk=Fʄ- \|W#f=VP7h7' A+!#0c41M|`WOOH>Vf%} ,? =F{םB*mh* }+p c%ׅ+ȯt_zFJ =VYi!9*k &F=V驂f:tǸ{HC"BqK.%N1.vtXǸz 76qMc-7q͍qh+Ǹiac|/|BeHi_JW1Lۯj$iSnrRgǸNcw [SǴ_ 8a[ce8u|Jߵo)Bq?Dk8^''KYBq} 8/ 9V-cÿ:5SԱLn5J1n ^FM [#*]d\3I1ykW}WL[ pcYٚ $0Ah:FYSChev% qU<tFCʈqnc\oB1tƸ0Qc\DR%bQʈqs :ƥSǸ udHq-{W]+}Moρ[ cǸhWO2v+}MƎqEi1 rw{;= L ٬7aǸ}A1¨ⷱcލcdXc\3 ;ձvŽ U׎T&:Ƶ :ƽ9cD| tT :uOqw2AǸIm :uʹ:M]ٶqo:6AVqۨ2AǴf W~Kw.1n&Tf ajSқ\M፥`ѹ%c\F?|椙c\oe<\Z s 3M=)c\]Wb̶.15s KIq3c6U:Cj_9;h03l1dbGB*+cnz@1R2r 6VY ocHTW F *j *2s `5A!nj뒍CM#!PCƤYk19S5#`kca]c{0C2{ycjzݼ1>J0n 6vO2n y79PTCycj17vs7Yo".8qc6n~>OƘ+ޘ߷7:ǐX+`JVOPSӯcՠ1j lSc{C`hw]88@ 88P]c0S2p )#} QQ1VCQ(cGu*]n|T`qncĨ88NLa3cH41lfJs ۉ9ш43`;+9pL CTԯc\51ĎшC;CرWI;"q$c4:vn;1]31Qj )M;#&c4!1t3;]DcQh@1N?1rl] ,5-9H-9̀,3ν1;fk;bAQ' !lC iعsl?o1Sn܉[\` nZX @v|ı$\HX -cMX~ıjcv"&ı<$UosȱԄD!29kIr UtrXLKq kLC8$hm^ƌ;rCcXFfMV3r/ &Jc^hLcDJ1DSeةd9^ F֌K^2ܭ3r Tc o]cXfqxf!qY}1 rpDaXu;w9RLck&!^9vD1DaЩcV}q ı# $q UeU7cGD%r f 0Bc,SrO)B!c+#ǸS9ā;L2o 8Ɛ<>ycȯPqU%o匫ư+1n Ofw7ddر;>'qc,#70   1˴#4AA⹁CދƣidLAÆjm i8f7LЄV7Hۈ,)ϗ nh*RAEѷ05aNsEҒ#YL#TO *۹7 8FYB um蓀c'6M,QUWd*%Mq[Ɣ8VL'_8VIQDCMF++z j8E{{]1oJv{p, o GphRnoNpO{f 8V3s rHϴg llxctƘ襺Y\ `]8F 9Ect<\ LE Fѡ 9Fg3*Ji2spU5s+sw]ce̟s!ZsvH(XE u`Ig>SH0rdh=`#3J1lDӁvgZ zaZCcM&{ic܊1J1hcL"m}ꦍqѐ-G_cFc1@ic榁cd{զD70Cƪnyc 07Jw|q*ca\%Xe9=5\1Fo qwXWl|EXE>C7VQ/mI2q#U&1H1x8Rı=LS<,XEQCDxe8x5S+cܶspr ѭp<>֨1F4AoIMf}5M-ƌ1lAƵ1cTfľ3kY1*V3vGHgn9c ָ X(ƨh5j?^"*&]Z@6ƸNEشʅ~$Ϳ%n̏\QQn 7FEӀqc ^Q1nzqc`_]ӿl#gL-S}UՕcZ)J1hmwUloi5AŠ8A`xcTLPoA:JB1A BǨR%Xe"ER(zMc\W")B\(v 8FAƄc0L1*GQ1J;J*cTtʴ`Vw>U`]&1mXGuQQmsJQ0J1*晉;F/q*P&`)cT1(n ZӢ1P/QP젎Q1 L1*jc|?UIioIbc;Db|cTA( :FEsMC΃:FE~MSfZ.,6uD1 zQccHSQ1J1* :F,+Aǜ\̱ H)dQQ pCǨ% C*6:FT4QǨ=3u,ttLѺAǨ3tJv? :F%0d[L1 ?o&u3Ǩ4s9~TbE[2Ǩ*&3vE0v >[ ]3Ǩ`+AǨtx*E:ƤuU7tj:v]cTt&A(&XT1*ƾ:FACǨ9` Q1{K1h7u]Qag6tN^ =lC*nxQ6tJ`,1*ZnDcŁˊ g)-Q1GK1*fm13<*%t:cL1pj:F/QǨxb؝1*\A(lVNw :Fh:AǨh0tp0Q1M1*b/:* bcN rBѨf]ctbQ0gc~pdQ0Js=ӊ1*N9FECcT1s0.+"%ARxKu>3ǨB`bcT "sb<ӈjSҭ[ E=1*+nxe W1 %&30qʰ"_)&tA*$oA`cTJM5rb:0M1*J cT #rp"Q zPtbAQP+6Y1*.+8Fx;q44q{Ҟp=P"Q1$jsz)6O)N/Iڜ^ړ>U^]w0cTL*N/I*N/uNe (jK{K{RK{Ko9;/cTLj.i`FQ1*lwviqb..1*! 9F0#K1 <_ړv:4r$9P_v:'M1*LbR)fAEqq"Q1TN1*zMc:Qt"Q1ıK'ȱ;7cT4$9F% d򘮊Ux?6?&#[qc4]9K&Cƍ=yKWxWd0nlU~(B٭t qcrGKT+rJv")H{(;n{H0<"X-gp{(;tO.I[kG4 ttb.X}b.GX UBN(6C.O@ a ?t ]@Wp& t%S'@WO*pM%I,HN`F* e:p`L`FT^N.Nb.Ј%Q9@df-X Z1hx(\L\(@ 4Z0 4~Q?@UbWV`Tb@#jP'Rjx@J` ~[@}D 2P 2P$ƌ(vewL/n4POQ=)@=FG\*hhHh D .vգ|z @Tsc4|e#4Xٴ[ TyӨd4R1 M;OfU84 DŘ"XOl * t@3—fQGl^wl ***o6*nSQKbU6 E_i7bxm0*{ QѰj0k@T%.!0K DEu1%0X.PAڋ"X@Q-@aSDCd A7h * DHV|к]&Q2"<> #2k.$2*XO8':C6fUu4]@)`6"@.~l bI{*#6 f-@T IC ܊FEDQ>XwK۴TL@/ -DJW-DzQ88oI"@)4a2ЂW4QDd *(LbJM& xIfDZFQQ01z@T+""F(h5Wd Ld 4GїT4N DE'0I'QQ?1HZ5hDDڢ2W "s` *= IRG@u "CӉ@T̐JU*HPܛDZ &Qc2SHVk8 -MbWӃHoDJ d *Mad)0? e0SQb0<[Ȍ Q1Gd *" D 3"Q V"h202η(ǃXR0bGH **oi$= 2bFr DE50MЭ#*; -@TT *avP[6 T{ *>*l=rƌo@Tj6. t$;*L(Ѝ(P Dΰ!1n;0@|TX ,+b Y:ި_G3o@i[X '."XqfʛǗZ I@,@.brD xޛ$2>鸠U!Mt6Jh qEmR>@KyEr0pJh Nhs4=4 CsDFo?뭚 JoXm/bA!۷ .*(`{^KՃR hXEPqEP- ^i/ôϊҾVVVA!ڷ *(@{ ^Eg߂@EP1E`hyl=JVBV:VpUnAE{=r  YW~fB{v?V?y?U>U0\)UUp<9?Uyuw[8krU)Vu YƞQ*SBJ!Tзg3g\\\*QnAw:-8-8-6-4UB{>|VuRg\\En=>0\,НEr]h=`va<,B}Y)9)igsR"Zj"gKY'b-9'_gcN"ԧ]lE8k:`"۸|>sr`uR"LE8s:gu=,Bӹta}~>srtn EhE99 :`"EO09О9ϹsaZIι͹s 9+'6'܂9pB sn>`sR56q9>N˹9+'+'@B} ʹBPrn=ldxڅIȹ^cs V x7~nM܎onAonBonDon'EonGFongGo!$&* Kn;$&<ɻIOPoW'%&}N ITowpo%&=`IYnҏ%&q I]oү& Z9hbnpS&&=IIogn&&IIkn?&&IOpn[('&ɻItn{p&|ɻI7ynҕt'&]ɻI}nҵ'&w d Bn2X!7ěyHME$&c'xIDlȑ0b8ws d HnVtO i7bͪT܊s+޴(ݬJsO^y72 5f7ó^ unn'b̺YY7̔;uAQ Ya7Y{vswjMFq%&}n7ì MF%~uiɺ_`d[n2 .X79$&#uvɈdQn2/Y7'=&#uQɺHddbn21Y7LMFN&&+uɺ(dSIn"3X7 { MF&ׂuDdkn26Q7,MM&&ruDdpn2L8Q7JۺM$'Iudtn2:a7hNMk'&cvqIdxn2y7OM'&cx ɜdVAo2 7`HMf9$&3!xy7M̸HMfe$&7wɻdHn"$q7gEIM$&sZwy/ܘdLn2&y7\%&yw?ɻ]n2o(y7[?JM(%&wNɻlDTn2*q7sJMn%&w`dYn2,q7YKM%&ɻd ]n2.y7LKM%&w1tEʤvɅ ĄDbn21Y7̆LMfL&&*v ̄dgn23a7LMM&&&vu9ɺɼՄdnkn25a7#̣ M&&qw)ɴdon2=8y7BL2NM&"'&x IɤDdbt"o2yZ̛LNM_'&Syi܉TDd:x"o2e<7TțL7q̛LOM&'&yyEyezzz% zU yyyz5zuzzz5zu!z#z%z(zU*z,z.z1zE3z5z7zMFdDdDd`޸RͼargM^A 6یٮ#!8ΚF G#o:b aƍ&rz] uޠgM^a0`hCoz8CoQ6zr<7PLE1!a% MហNe h >(zLm\i=y3onM\&7PgyE}67.= "F8+FޔL+8m =^Co Iz2z%7**7Ny7P3o _ym55MQ-'@>D@o$eJ@ y3oh6 cpTƩ07PLg0od7P1LMf 5LnU=ZIb7ȃ2ԛ{ٕ$R&oXʥ99,"e%ȼAx6obD\{ yD,m17(d\_CL,27HȉAP7' y@q~4% /⍶G%ޠL7HV77~A0bD$$!/LX0E2ٰ`c% (G7HkX0ENěe3@af7 ^Q "Dg\A*ab {3T3.fl| jao,G GͽAݛج ٽ&`w׮?yܛa. l|{3@7(0{3T.fX {3-7x/P"f؛!؛1{3晽A+(<7cf؛J k|o8Il ~J`(7>|/ 77>)fpմ$1x3 4o*x3FSoxěaM $`x5'19oň70c%oơoajq7-z3?FoiȠ7Ԑ7 D -ByF7nM%&oPZ.fH y7( a0yw7-z3iz3ƉT"fM&z7-z3f]ތ7-z3cޠ0zSX(zLȔؼ@x|F x0y7aKMK9p <6o e޴D Jhh%2olMKd7BCM%6oXR>TFoW$\efǼA% űȼ]@7HdȼAQ< }Yu[$ޠ7c h5`2`6} +oq+a6Z.V%Cߠ c* FƢ=f Q#SȾAW} 1;ohpM Fn}Y|}> .oЧ0~> 4ow ߌ~sMQ AntnH~ >oл! f$誄`FJ f$5#,:*7óBߌMMeo0#AS >ߌ\1e2P]yo^wƧ]ywj]i7/~7/~M˵7/̗]i]I߼Qf7 mԺZ6Soj M-x7\oԲ* \MTyM TM2TMJ"oR5 JT_ߤB&U7ĿI5MEoRSSԦߤ~&58I,NipRq \Tऺ'@8I1Nj1pRe5i8I5N1pRzn |m*Dž8-I9Nj!pRZ cd'e8)Iy>NJ1pR淁6 8iv 8i9 8i]#8. 80f 8i88iH8i&H8i4fH8iXH8i|H8i&H8iĉfH8扃v@8i ![8i,X8iP&X8itfX8 ~P8 P8 P8>1[8$fP8G P8k ޷P83X8鲋NX8֋X8@Z8X8@.X8dfP84X8验X8ΌX8򌅓NPc8nnX88X8\ `8 &X8飍^X8ǍX8n P8iP8C9NZ#ᤝ9NZ-+:N:#᤻:N:CI;N#;[<N:<Ny=N >Nท N#? Nळ? Nx@!]Qp! Nv+DɎ(8';#d-QpK# Nvrn08'Fd_I=)qp%N^88/c';jdM̉;qp'NvN!;8K'dOR[ Mp)NH@8e';dV o =_p/Nv@8ހ'{d[upqpe.Nֻ88ٞ;d_l ͂p0ɶ@8ٺ '#d %쒌p2N6dFɦH8 {?d{h l! mp5NF~H8 'b#dl$쯵-pM7NFvH8 'ۆ#dk%l> ʑp9N6:GfH80 { du lˎۑp;NG6H8In'[d;z lYmp=NYP(8ق'd+lap26 D(8O'# dABq ap2Q! .D\(8'dD̉IQp2m" N&RDЊ 8k'/d>F1Ap2#NuH88'AhvY >rpY;8HA".I|' vp$M 0NwF0 /AL;8HDYĀ +H(g9m119V[zsz sx]% _>apap$hgqp/99Lt/e75kÿ` 1M 93-9D1%æI$2pE@8{6RU$cϱsc$V7&`B$'I8GtK89,I96=ICdL aN`xZM??"K81e%tq$1᝷m gp;pz$#P%l X6 c @8Ǣ/dpPSN5;71s,ZsYd e97p@GpTq7X8bRcp0#1,N3 lLT c&`N$P8k(iL*2I(|# %p*R8=G| gDNKdL۶pݚ-iD y^ N08-3:] NK`{18Ni9쭘igspZ"%rp8MlNKi{98As i  L DNKLOYD#8yU8-8S|8-S ) i & NKdD\5>N%6pZ"g_d@NKX1E1pZ"%BpZ"giDNKDNKTb%Bp0f2!!8iD8iJԏhlK4y38s1"d%=mFNi G϶@g["38<18-ٖhlK8{v ).%=ے#wٳXA@g+:ɳI` Z?SV i͜*͜mfVb%9fhl%FpZ"%;(8iζdsӉrzyhlK܁VbsIhlK4x%GN4x18-9r';P8©N%pZ/ 'I(JLTೳI S8©dK|P8©C$ Ssԉ( L$ S)Jt'J?AcWr]Gg~ۯ??>hЁ_~Xxyo(NyϿMg9e>ߺ0J./]TU.ӂMoo98}o|'_m—}%_ʞ-R>y'u…ai}~nc?3g|0wڗ)|Yng}zLO% |Ἁkms|JVc{YϻkwvY(=Q^z~~>aw~ G?g_~ 2KYendstream endobj 115 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 239 >> stream xcd`ab`ddds4f~e C0c3##ˌ|?7!{>׾y}Oўމgwwfi.標={Ťr]Uݝm屿}$sS~׶wWwsft}W|/3$2/#'|U}gh|r\,y8yyy?,]uendstream endobj 116 0 obj << /Filter /FlateDecode /Length 37044 >> stream xͯ%9'ok%^ B7 _Ȁeu^L{]=]SUU5融vxIFUY#xa>uΉ A9Y?_Yo?_Vޤ\-w+ͱ<8}}ןS~^߾K)oO7l?jԄVK_Oweٟ?W|xz|<}xWqcPm[]e7Ҿ/#wSSZ_K,ӷ9?'ԏ<=ǷGWJGwbOo3dݟO>ަ>}޾MMK~_߾;xGgs7߽> nQ0 648u}<>lO2krޟ~x~MP\ 7Orn?'nro߭k3O?n* Wxmÿ6~?9 uYz7Q}Zs.=^7}ζ,gY-Gh{5kGz-KW)[-n/;|biKYk%vԺ/^aekOm{+|efٺHoZo3'v+L&9e~俦5罴[gWW>Y߬|)\s^&ݿǧ6.P6b#|ϧQO7|؎-A. c*緾WIO _6u٣̭5\9oǗv뛶V;2|7֍;?*3<ۏOg0OHmouyzm.H%c}.?,omqY϶"l+릴(~t]O>{msVm|4w^o?e)/?⽴<{Ni?o{\q|㺽_{{>[„ޮ=涚8e㣇PW(-09Rumҧ痶~tהw{(<2G?o󟗧}H鴽Es:}2NIi{[Wz/+ۛw{:}O̎繏'f[f_fizql~[320Gҕ?ݶS{fr}ya[(8p/3G=qs_\+/ݹ.)/WmQ=1tݙs9澫ǥ7ﮍ;۶o{owszqa +~k[hmmכض}x&s:;ɓ?ãpn`O ~eL軏NnG[]S+?&Ǘ[򇗿/.0] :1~<{}M],uڶ]l:'qs<U$r5xZ^<$O?}f磽?O|/Nڿ㺦y?JhU` m)հd~SgmIf_hc ӈ=¶/kq _6RǗv9.fh_jۥOldžeM?LZ`Itdx՜~/s_xIzcEpmė;e+8z .wJR겦R^.< Lk]KU7O=t0?|e$[6ӊ&.w:?V;k[^ {1v-8=iN#ZrU'ux{N|kmv l.]˱=}&O %xԶ 񉯦3-|GewZTj͜r<=^7ʬiuo xض?ɩ=Z3_i [MCi2n\W7HN.`_|j/֥]ͺ3<_k7+?kbYm]^ }ȵ\}S{/a#-UHt) wQK?} !h tS֝ǿOUZm{%GM-?A+啎 zG#"*o>Jm۲>n _Qsi=?]i,4p߭+FW?/g&} zߙϝ??D 8{>b/&裳_amLкwx~\'ivl ;&h:le{Eˏ?OmzyIϹorjLc6.'MY뮀qFkPbҧ^.ȽUw_~W4ӟn:~+IiJ~00 ۘYmyyܦOyx1uy s,_`O~7=wk+o3f1v}%/G-4tǶjұ ~Iå&a6O/;(l+ z%sJ?%1{=}M^ֶ}m?|A$x鲹+_ʿ?ڗ@/[~?n.ﮇ Yӱ7( `(h F)MY(Lv 眥g|`eۆҦCq=6'\甥ݥ y=lJ83BflڮJ3%] .a.C G.R ᛒAT= 3* J{(HN ^ۂk{Xt0@q[jƆnB+f:u6Mpb_+

Jڛ!ڶ]ļ#AHZN~R)İW.#M9bhűg*"nd\J[q!Zv6aᇱ0f #|TgWqś񯖷mfܸnl( .&_h])ۉl(Eȩfǥ1[y!Цým.F>Z1 Pգ-|R1T*hgPڧadR)\/זno޼RgV1 \/.ަ%m-mjvr/j}fPho~#%:^(6Ԧ6^0D^kq\3. 'Vml ?O6ef!,:m+)PvTf4e&TMq{F6MSm~gĸx[>"p"ERh&'0rp$r6jN@Y<(̰g ;MƨyiQPYcJ(LsnC{?( salczkƀTq+lڟ]+!J(K\tvXs= iXN hr;4#u.ԞY~fZ4brf,?y:wMEadz o/_נy7 nxpQmǵ񧧇,61JQ?BjY^Qfqa6Vاmw<%Voam<ۊ+3 3 oC,nC}W.Y-o#B)WZ x BV Z\t7FnVmW;=.ަT,K BMAQV+g`Xwj{E/ɴowS6ǵVX6QW@iB]97ey'VѥD9Ii-eP99Hn+ngM6kk[Wg}۔0+F))  % r҅GR7%Acd=N6e[QZkhIY鸶yˑalA{>MIkECS`䩯ϸO]mm ?ܖ/UMIRVV<+5[O80u}ژ*W*Jw9W'Yʆڔ,s)Yԟv\=zy/xt 5%mʉ?rSF[S2&|Vd_Ȃi f#xEˋY2¤Y ~G>C)zr혫36a;5y:cUpK,ҟqJs43 _|68TC[ejD q)` =xT8Lz`a[ƙ|ȢOaG=Wd`el3M@9FqcqZgT}*Noi'giGhedl-j3FuSN&:W)G(z4u ddg@67;G pm[3mSY1#]ZY)fdP~1*5q·K?!M7|d ax;=IMSXk@h{=` bgFv:"@, _\dNڟK,'wm;mo83zBϋ,F٪vi@0Pqw*_S.2!6B`Rs$&R\ ]Hi?V1Y=8n@ϩB+tr]\SYsG8O3Ni]6`K\g]yJ#@}:OKZjJ\ T)8mMfRn VX6~ .N24ma+?A/ F)̪ ܧL|CLo,VlxJ2?D go} ݧ2 ;45dBѣ2N7Xgq18X+٤9Z^b5,N>@mB9'4Õ6\淌aRa{jh;U-ÊK?S]\<<4o8Hs[UF˓e2$L ,)܉ΆNw?ڙTgVxЁ]8 f x+/,$JeUa@M܁ T4o rxHn+{l_\ݏa|TK<~!-_6Fi"6O m&v'/^AͧzP)Cw:P)hӲ}x?O)mkTv+(NNu6,DЃzQ7_&lJ|˵hfU!N-h c}`Q =ib/!8: q':Pm~w;(gy3=Ӂ:^$>hw ^?}]+;-Yy`}tK-ití{g$ݏ:Z}fwO_9>u^i@i.cUk_p– w}5S3x{tZqZõJڊ#c=?cY5D:~,Xx?gkHNÑA# y\m,~9r6Αt]kpjuXSpsOpn n,7q<|V睋7M$e9QxezljB:>6zP' w>vۓ%~<ϜcuqP |Ӿ8x8P^Lm Or9N>v0ƖN&8yg{=NC;*iŝ ]Nw ӾġsuSmj^yy&P)|=i{Lۼ>32Ǽq.iW~CM;Ѕ:}?N}e:$8X:"+=IÉu>f8#887zPǹDn:978P1Hʹс:]iΝw:;ylw?:JO#넪,ԿNNNXtBOγ脪x?Nx6NJt*OY>NyIb?XP!l<@Ljutڏ1b?۝'Ȳ&ODZfM򟎣O8p&O*lw?dYq[㰶f9Pǁ.Xǡo-srv (q\< ϩJt + q]WyPǙ8wlnɁ:N&8ůꂅx:\UuBUu3vfDz: txJ|Δzn*~q)ljtuy?|?p/P !tM)vG xjݡOӫMvXS?γ{kMʒ?n;+pB}ڝX'b> #;> $C@مYps*nd\M4ܥ69?(t˵rv[Q,mݧe ݧfx޸4HfL ,l.pBi`Nk`ڝ /𼟓 ;"iw#gjwxޱwY=4)F@[_O{ Cp8 `XyFc1EL0`< uyQߣ3 GplQ1F{Gi))X`'J"i:a9ť9Ů4E¨8Sm'Sx?Xȃm"j)=nxO8b{ X{OSA1B[@=` Ac i#1SD<yP#CŸG?UycBjb!~W'!~w0T5ݑ e_6't#ܰ`Id/{"#Mu?l} d1aJ,K^HFEf.Ot2Qɘi<Ze m \#؈A;MD6@Uu#6>ڔMQpq{Bku~[E|mʮG/C}h`FcЅ: 6sKNAΌ_]E.pchЅ:" Q<@n37=a⿙#^țkP;##lvi0?Bߕʑ;##^0{`P-5C#Z%78`oO{^@SnGvc̠SBAEVFG"q:%;0YlFGD f9Csz)y4&) <&fDsSJ48ϑRz町+68EYyJ)>I2BOqykSPshNa6{9ݓz!ӞT֤Ӟ@%$1!5)*1.S:Sķ)-~>N19 *&)9嘒O.F"sғ +oSBX'neJ,c&1%1? R9iRP{ᐂG"w>?R|k4euy0q8(jq6+UR UU T]p:JX T1Κ-@Ŭ#d@E ? TC2Wp (.Iтo)so)G훾(B MRP7PQ *xh[|dnFw4fV*<ݍHm_xXA)?#k%ZylSl7?u{ _ONeYNv,dAr0y Zjv"U]*:>+uJ/8H\p p8W^- nC$ו=Ŧ])}AJ9Im UWʛ(N$pNS*4Wd/S.jk#L>K)6e%o:+"LPΊRСq %Yq:+Nʚ_x$q:+,O$>JCt9yN'%u:+rų\R[ˋ|A|"餬UYt;MYɷaZ6NgEY΂|F)ruw:+|rw:+Ί|r܇)TNgaw:+P9J?6L=,ݏᶼ6n|ztV;,鬔8]f`(M{K 0Uv{:+#rp唼(ǽkЕ-ݛ}:+qmmb;`(vP6[?C9l EI9?bC`(uңxt; Ogev@(ǽgڕnO@[7B9 žjCu(+)M w(NJ^nv\vΫ ա73)Q)梔(^W3~t8֫n-_ϛح[yc=nkofPA m֍Pzs} yࡹpiuV@/ɥp\ 7O ΋!v:ׁjEٞoq3W}?+9ݚ!c!@aJo(ajwVt3pHZE9oV.Jګ>(ǽ:mP/JSd_ TͱL $]xo-?*z3)N>0=ϛ*W3Uo-Qu @0}븙2m9_$0>Ud4]_顤ًݬ8-Gg˹^7{>6L~Pgj# @[3suB2vf"j!s:HQgf UuPh6Hf ~M(QI8yl:S$&QW WS]aș%ČOueqƇB>s鮦O-oUDj\ԤqT6n{0gpU> "ܨ[zJOXV *O-̟9S)lѶ<Sxu>eeTMS\>x{OY|>5ħ=us{ ێAa'ɻOm =^[@%F\g¯`[_y\VL2ے8Hf >bAL2K~t˦ EOTJ%EL%C6`f,*%8ł0  %P4ɞ&,Dif,:1,!?H,TCƙV™Q Eoxf Y,!ߔ1Xo0RсfүVgs3Q6,1_(qu(D1όͳ%VѷD3#ƣX4t*ifDfXgPbt4fF/ 4%=fNfӨ4tC43R~ƒP3,1xR.4K_H43Q.AhBƺhf"o&T5̌t#s3KgfF&dd`f &1锰%3Kg,l0RRҮ1U2KeF,tFYl3#e],Yb`..YB},D3KgLD+58`pf`N2,XףHˠ4dbH%YB0]H03̪,أM̌0=;B1,!ZC0t*af+ 3K*<:,!/_dfδ%YBh%pӯf{0K MYPf !7'jYb="]g;͍,(3v,ݣLʌQHXe;ˌw(YbfZ#'Y:"!`f;r2YYf~b,-,ԎnX,t;'6?FB&YB>*2K.,#&s2mYBrZbqZ Nh< I'Z2KKľ1,HQf42<((꓏N2K̒ѕD2jHIfU-d~l'$t0/AfMȌ 4K d/oW#D2KGqc̸> 3.a2 2K2dGGWFq4L S[Y5E-,!J)f XzL1Roc,!cH36AL1K.3S 3Z1hN0KHKaxlN,emY:5|P2M3Rf tb bm[!ČFRhڕ3vFe)bFP)AUebE2}4xJ&`U]<3hԪf{M5,!11#U 3ZFnav%=:Ìf7aFS}3Yb%&sJ8}FaP~y aƝogFQ#CVYbqC,7-`hYbشW:B%^0K Vח0I,!rރB3nJNE3*O!ҾFo!̸ȩs "qb&%8 =5wHx`fi_) J,RU_<vfT%qkpeTq,4է4jQQ`Mu@6ϲ:fT ,Ģ~3 `F]#=̨\ fT= &]C-͞YQ6"W|"͍PPl.!.ŒH#G%߄01f"Ha618,tmnBqH>faFEUJ035&3XMf&a6^V>'i$xeTi0,,emT2NQjeLS] hnieT82NzlH?#~gx#/*J%TKoO2i_%I/K.Kk—qK˸D^e\F ,r˸H'|lۄ/KW—(VX~^_E5`vYCN0b>:G0㡓`FCBaL`C2hi6f44 0㡓id%D7+ 0ivMٕX`C'1D0gffe4hE|YGv+_턙b—qs |;4Ɨ8m&,1„/-˸3K] |3,Q!e<ψ^]ec/2nu%ҡJN/#R^ƣYBC2skzt܁EM/9xxce<$ҫjzYrG@x5 `|Oܧ—eH6 2})˖Σve_l@ v۲^txӨuu˰62)/$eL!GZG˰u2! |2blh1Y- b?/ /It|YrU/KA_2͹ _Q_6HT0+.1fEu ~El`:|6wfa@!ee{p:l`5HY/;|f=`v3,eg2$>/1eE/C_]2TZ jf53.fY098n旡^^!}wg/D3P[t˰2v7 6}2׾~ |Y_8A |YJ _6J/%:A)e2l 3 52 :^!]VSeـ,ː+C3e4 tY.]eȃ524oiekVeq52tqPFZfːҪ"e.2lqZ [ø.`aCceNռ2_6(1e%J}l`^SM/+9˘h^ƭekeH+52VnM/6CSGˊc)eUfzJZ ^ ..x?5wlUcː)[3e W]C; ^_C9e4p2Z^J2lw /a_^ȏ/CFelb xjx1uj7>.CW /`f!R;`y\/keuaVy \2j1e5lluZe.n2淉Z\tP˶%V872羸e: YXĄ,cʥz2M2ŎZc ;S`gj#bZXߤ"jx"j"d3 PA-2(ȭ*Բtژe0lN-/c-eLZMF^P˰ 0h2$wj, *e[ -RZfZ˶5pA,IJXѡA,0e5e8`bW'YȄ,ޫ dY`l`2_(xe zL2 yL2S\n+C&sWO +gm^W0ʶ2(!敱\k:lScʠoʶ 2()R+ʠ%^,W6}ɼYle XEXjݏ,]a^Y+=ye;?+O\;yeޱ]mqׁe1:l+XWm0 jWq+Fʶ2V&g+ëjИyexO-ʎu^ ye[T\뼲-u^:lshgǕaSfʶN' ^~W*pe[;l[qe[/-";W^W2آEWBWEaa peXc WX ˟js/+zeLl"\ٖqeWƔ @NXV(2 V>պ Z2 ݫpA+1Ǵ2Vƚjqe\3 vnE2$!IiWM؃W#NcҼ2%^Ne 3l)beXFre95YXCb, e:XLGԼ˲9 ,l"e<X yt`qeCw=f V6+M=#VWbe^ V&ذ RaetA%V,mрџ*҆eVQdʰ(x`p`e IaJ2Vw%XYFYeSD*e.ZE+ˬiogZcM+,/V1/\X_:c{2mB+?Bqe:5m2KSpetk%\YՎ1`̂:cOǧ,֩ pe ; 04Jrg4xegMe;⬺cʆ)pe%]EL1ʆ+he֍he+aIcz`D+c0a|8S:heIGFZ_[2=&XYf[ua_[tc*-:0݃bV4M"4 /VN?j)Gjut;{W1i2[@f)4It XeLJn.2GeVưV-eF%{LWO fiT2C1E1 mk WǴ ++,٬۹kƾ˴_ZB^rx VF?ƒqee<+9Ȯ#nהժ_%>,cfA>S8M,RF2Y+'X)) V`mXe% <^=JVBbO[J,Tm$*A* 'R E,ʸV6ʲ͏N*mRYFr=lYä2?5,cӮgRcD*c}g-)f(yoIe9wΒQeYGN*c)d"en7GK2/D*c66&Qq_0!@B*c6>QlĘ9 z*8N2 &tUFA/Qe g1IM2&Dh5,.g)߄*c$TYFE}Ȉ*˩'bQq(7Z*S!5\2*zw*cG0%['"ډUFŌ628b1`H_|̨)TSWt"HڼUFş4qPeVRbDJYe'|*cDTaTY13*c~*oTíg& U>}w Ө2q2*<2*yi ,rʔaN0Ɇeʞ0M& VFt5v[rB3LS`e S+U>lVFEieӀ4\Qƴ G8F(=ae{Y3p`eTdɛV@`e$)=jE+cR6,)ͭHJVF%[r ieT秊e`SJ~Ҵlʨh4zۦQ`ZY*ʠOaiXe =ʨ%V_]e8F VF VFEcͰ P`eTVF%fr5Ʀ);RE O59VFEie heLkZMA1U=AZ.ƗqO3D+Eȴ2(SnS^&=Z*c(* }2/%VYYeT6 B0KlQeE^SxȰ1ª_L2 I UF%w*p^S4*UF3L0Yʨ%VYѸ5OQȬ2*%Uƴ`Pew28Qf`Q)MZ1!Y,ʨ%PYgU!)_$QPwL}tPC*Sz~W (@QG)gNZS]+P#^5*r T6bTF*pP*vY?:nY2L.S(SFEp)'N]2#LL3ٍLyL%L)=Ѥ)b0eT.2*F SƤ#ъk S||5K)b8eT<)œ2`7vQ7~W)'(, 0|ƔQY”Q+L\t+”$Q5”QѯRF!cBc,cfSʨVN'koQ,#vĂ&r9e&)Rn3SFEo9e̊GҜ2Ջ=;qFќ2*z)"9eTL! QX.l;(eT+B.P+GP,|҄R4> ]ӏA((rB٭nP,|L(_A(([>yK'EM(Ί"BE)]:+rN eAYY=@"w]:)"]KgYtRD((t(~ B٬Pv/P/.J?,].~ke"BEtV/>L.zo.C1-݆ eEI좬^"_:)"]r&]ëP|_Ua:+#-4e%rLrܛ:t("]N~\y&]NL'I"i&<:בjB٬2gNL'7tPj=Z:tRf.L'ir\"BE)"BEvPvQvCQjBE)QjBE9_nv3 rfER?ˋrƨ e%.~3L(.Pam(fPvQ盰ߛ.B٬PvQ}PvQ`B٤PvQ2{:)"]rD(G7WC7C .y"]|&]fP6+*5sQ?6PM(.Jګ&]rfERnԄr^ǩ e%.qL(3Ʃea٫&]|oy]ov e^ e܆ e帙&]|3D(^5lVr S.vL(\.zWM((mPvQ "7+o ee SfjzPvQjBE7[ՄeY&]rL(( 0k(t4N{tp+27ATT **lpRAOA*h@ATz5%iQmS"j"D :ئeG>:5 DEejK,AT%A(f"hPFjSD(AJA,$A*,ATt *-\FAT4(;ѡAP(P0HgPL41PsobaRL 1 "F.%d?'"2|z[GĮ11F"D؞3`w: Vt Rt FUG2(#U]kfQQTWZw02FQѠ62: 2% ֲ\kqA 53A X T"e0кT @`P DlGT]!n}̠Gg3F >̠ TәAu33jb̠Ш 2A5Y tdP-d22h +ݔ##|  dUF91A=ef<.1 󟂑AH" Jy d*ߙdd "dU!]AM8iR'`cg (B<?˅E:A.Ġzt' ET ^ddB4i d9#| rm@A A1 (ց B '%dAd0H1fҏAP bLdbPi xtbP=ہAB "E|1A &Ġz22%= @A$A bYA ~ bv<1 bkL bPAObt-#z@}<#W hf7r HA R 'vhkH3AI fP/ ڙAPd3y6f1K23VL# bJA@ȔdY "0APdK3<̠mc0EH XdK3nf B`A1 (F>4fdAP< h`!ĝ%̠ hP5G ϑ`bPݗŸe4APdM2B"vOBbuUc< b);APt{&ApWF&A- ҕL s:1h NŽed#u BLBA0c ((5APd4FkOo)3XI13n.ՙAPW!Ta`#oA נȝEtP5A|+c$n`RR`4I6#vl lP0d;5C#@AQᱠul1ucU`MbaclelCc BD 6j6p;6hTcP pwAxvs$չA}7 7rؠwlPg:6dl\7[ӝn 58ow ڝ:e,U##OS R2btUv ݟA,P8Cu@ʈUU8L9(գXU >f @.ĈnӪԤA|*frPfO%N*jm&DʈUw29(/7ztpPFlІSb AYZԽ,:Zeʈ tgmN'!9/m tAwg.. vIBSւČDc*ll[:Խu0nnNu< b֐_>AIuAL:,8&A0A lo@M*Q4U69hDmr*&1aҜ#{Cy!&ˆĬTuANu lguaQ5išM bJ*›fsz2?}%lWHҭkpd1O]%S$z1 L b.J k]05T5%tr&bC!h )h"64[{K 0H?,*aAtVƪj8A Vܨ )<[  b!CYth#*mhh".$.A,UXL3]L3h lNo;aXFE "GӰA,cꑸA75Y:ܠ洹A,'(-sXHA,dA, dҗ6Ztf 2_gs( ؠ29l.VI kw%ASis.` pIqIqIqIq.5).5)*5 .52X2XYQX({(]eCtV.%^\KgEYtV.1,. ݥ c:)]yLge7]ࠋRO䠋"4+"]|B]Lg[.tE)qjtEtRΊ*a~f͊AEnYt6.ܦjқtQ]MgeRÃ.bxЬo:+AӃfE~YtV79N'EYX-7>hV8fEYn4AI1>hVVY9o4ARn4AY4+5+m4 ͊BgEݳrܛn|Ф4+>P6P4+0ݚY.f\ b?hΛ)Yɷ@hVvOBPn5Br, JALf yo Bo6BhV2BhRr3!4+ Ь({V֛ Y9of@0fݳ̀ % Bof@@feM9BRnjpf帙sfe} Ф#4+6L#4+ Ф#4+mGhVΛY)7Y8n48BbЬ7s58Bripf% q3#4)͊Bge %@Bo5@BrܛAB$$;$$$؈HoU#4v;]qpGhу#4{ 3ƩBCD F)GAHYYB,%`B%`BL&`B&xBl'xB'xB<~8q8q8q^8qDqDq6f8= 8a P? 8 8 8 8$PhPh,Ph>PhPPaOhsOh'4K'4T'4]'4f'4o'w'` ` 3 s 3 s 3 s 3 @ B-Mh&4> ?G/$B_DSQv*]B-@ Phx(4DꞤNΦNNNN V uW uX uZ#Cӯ/yBu<\@@@@@@4 PxU;O;^;O;g;P;p;P;y;P;;P;((]((]((]( }[?xB@`Ј Ј2ЈDЈVЈhPzЈ0PhMPhVPh_PhhQhqQhzQhă(4BF(4J(4BOL)A,AQ.A0A25Q79;>A1@AqBADAFATDDDD#DGDkeP Ј Ј Ј Sʋbe'ڻ]Ɏ ~sxb6`xBg$4M:~RWD G5"gFħ`LBLO(n/At*R9!5dqU )=ۈCy: [BӾZv~!%KH o*zpzx*0!%aR d`OHIxaOH[Q )^RA)kB2ЄzЄ0 'd+R}}ARU eCjBJMH<фz}z|T"BR&xЄES%*ԄO&Z${DSYėJ|Єq]hBJhЄT5WbjB8" MH 5WhB B&`@ )Zyx^5 ) LHA/ƄT_ LHG LHIxfƄ MgRc>͇ LH3LH PW&Ғ ;(OS߰E`Bz̜$0!%*0!%+0!U, )Y4!% )ɜP'd{2j5'9$8!%+VPt- M'?RH9!%xGpBJNh>hxZRi )A 9xB { i G< q9xBJЌڡ[qBS'|?^&8!%(OpB}G+8Z} Qt+QHwRuA )YAӫ.H>|$ RHQaxҼ$p{;3WqMBJ%(B+A.~&($&% ;q&ԶfWB3daB󧸘8B`@ uBJx4a@ .Ƅ )Sh=4 UHɅ!:8~BJv,+44ٚWO7+DX!Uē߬P߽w^ؙ`zM3LdvAb9POXXWf 3fTfab, mA,Q;YXC,Qe%Hh(cނ)?}XX){1 N n)bXX7)ԓ /m @򄦒>XؘbSyKSiۉ a.XVC1zg:cqb|7O'bqb"],Nl餋ʼn P\[*.S,NlQ m,8v3ĝbqBn#*|W@z:Pu0A8oTi,N. k;=BM;}5 k;P6Soi 5.BQw #`iBKY$46&4]E҄@1Bm>bIħ~BoWuL -?c2qPcSMθ+bC`eBB}9<ܓuPOvĺ&zX8N?,W,KQ,aXk^9[POaX8gބzoBM۶\ kg(]Df $Ε PjPe}KP-X8 zrPn$B}6z\’yYYX(0(ԧPOi(R&8+k*߈&b' $pƚoMB}\;ƚu{ R'=$ċv$.3ޡq0g`jJA R' H xp 6gXP"P1)Դ P9 R' jI>>>, 5PĒDNA\HBM)gTP)ԇ݇b=[B=M"@xBMD=z N4 M ^>pB}~8K3XN(5'Zyޒ#gXP)NpB}> %'6qj^ l|\ 53"0> ޙ}}˔P@ P*SB}|A 5Mv(neJq1':=*AW"ӯbr-%\LU$6Xn& W&_- b/:$4 jЈ{@B}X jوP vVGj*@ G{q`3`&(#Gqkj`X([ޒjG F3`#T 0>-@<<w"'Á.#T-P'u(bwP` =E[ ܺP,;n](BM%&@e|a7FHU'#Iz`T #'>EHu)<(BKE^!D-*UlZn 1QЈ2X0T̂![Ch AH D] !LY/A --#!lh@!1{▂fO!- ]I R Oq/r+AHU4;B5AHe5aA Thsa E m0o 4u2L&4^pԺ N3 \C @h6a!4*#Tу k AH5><)q+AHe?1R҂6n)-\!mJyP  284c :nf@HC:b!rlwzqQ0he@%it*}m@{iGPTB*RBJZ "%.DH3Bu@TȄ"-!6"$#+#B,1RS<'i 4ܽ *-%DJRԣ"4Qɽ\GCQGrV/z3B~3Bk:04$ER)CHO00jr !%'PquCB*H"GDhd[5^ PrK4򙢐[I(3BH[ NcX6"Ζl\QǭEx[B? !%IJ! 08Re Cʄ:{X !%H)F4x%!%;PTq'{&!w6Bm!}"#G]$׫i !T!Ti !4$OCI{BP%OC!4!IBC5~!Ԧ׆P4_VkE( BڈP7"TҍyB[߆P5 \T0!T&jm„P_5BbCJ0j-ņP%BjcBVvL Z!5$BdC֢l Piu/B6!ҙZ[3 Toj΂PY> BZ0 TjтP-NZLBiBB!jԄP-%WB,kBnM Z6#Tfj%ٌP6i3BjmFVZ!7#Tfj݌Pƛ{3BoF(ր>""T{VjÊPuX+BgbEU"T.VjoƌPߘ=3B dEvnR*BoJD(mDrk+JE(Rm4+BіPnƥ"vyO/KD(SC+BPnB"ff*BᙊPn"VmTJ!inBMB(ʭbBS-r:NAȻ BO@(ʭrނPm[m BU P :BU`@,UMPLX BYza@3 U0 T BU,b@ J U PեAUb?*`U,1T6惪|PAUc?e 22TH惲XzP3Y'AYe<*UuxP_ -AUe<*U53TUeփzuiptP8AU$g; 鰃vxXpP?AUh8pP 2EAYh8 e=ݠ*nFjPd MAYi3?me}(dPZ SAUj4JUU9Ѡ*y5TeAU8k4kUѠ*5TFhP aAUVl4JUyѠ*a6TeFhPUK jAUum5*UՠTUAU(n5U٠*J7TflPֿ[ yAUFo6JU5٠7TUflPu Ae}VUjP3 ʎAa5'eohP5_  Aa3=n0tAZRAJ;4϶)&Zz;TyWR$ބL) :H%ļyYb:HI7RH$]aෂ4\ ."> ,wRrb 1j;b8HIR$@ 3Ӯfo&Y>")9Hv5/Vnl$LCtyd:ɓ.9JoE)A2RB$ "a) %vbTD3p'H$G 8 qǁ$M8HI#Yx&4[Al R$;4G) %6AEA= vIb7D"$>ɨ RrB %)Y&]VҘAJ 꽫[ %TAJh7hL]| Rn`e8ݠ19Ln ) pИq)bI8'= R `g n6hp R?tOH`%b) r1rИ Xcҭ1٘tYA]$L@+Jv͢r0$Ơ19ƒ<'Lj14gPwLIc%ItА@)H;ZvhLYc;!ʞ:hff -JA Acu'Npv>+9Ú jИ,|/(8VCÙ)HV>+^;92A ,`0Acy#g\;r; jИ) ə3ՠ1Yr-@BGcwr0vؠ1Y p{ȵ3 %;疓؝9ɕb nИ,P9s3'I{5nИAC4&#`8hLWd1iAcrŞp*Y^, Ur^pP%$Ar2yAC򔃆) S)U*yAc*yA3hzaq?_{ '923Kыr)$u)U7飾޿褿_?n9ޤ/Iۼ/%m}֖?=\Guݿw uvFn!3|7>#/ӟf:Rzw *4JJ~ovn_Zvd>ݿ8}҅}~Q/Ӵl?Qs{/9ZHy8O*0毗~dӏ~۟?O˿C?j0WyTІ{Ns$W]|]qƩpj}^e?]0_Z_}6uK[߼Wf۟@}\-OI'آO91=mendstream endobj 117 0 obj << /Filter /FlateDecode /Length 21258 >> stream xM%Iu.ȼo$b Т{Tg0+'ZwKi>ݬ \\ 橸q̟q\?߯}s?Y}ۗ՛w5c֚^үhho5=wq<:^WC'1yc_zԙ盯~޾~\w~7w]KӮ~I}yz몳|(tM'[ [__5^3mgil_7r)q|Jc**]鬺\>}zcRSo:~=M_97} 1^ z'v?F/9;a]ǭ:Gџ$w>qtKy_j|u WrrϷ~4=ʅ_sRӣ2_}YԎlfx3~j3h.<4^_ӗGn/HO.Wy Ego~#D6h&OYF]\^:Vs矼K5E}d4FV5"пr=~uDEwdfΜAY~+vi0xGH]CjZ S?m#45]9?˷Z)@Ͽxg - o~xKs]݄J_dJcn.?jXqeގ_NVOֈWKg0K}~Af4T QdC AHw/'nZ~˷A1i'SWkyAGy;/K~ܥ^Ç)5mB߉wt9 b*g<􋭮҅|i2_:ww4e{:G`TSp8Jmz4s es\j$kX̹}TZq߿=s?ڈouq-ZqM~@xdܸN{>i|Y_)#7{Kl==v~Iz7O6wE|wS_h!Wܫ^MWS|1(h6k@9F^Z1.3W,)o U]ў?~3o+?P%k}K;,ο1~}0%k3s<~˔4t.T1F[1&/o<4OCG1d 撾g?1GO :ܙݤm#O?1P+e_iˣm'7/>$:kl|͇O_AOeW:<ï?{ 2>|?pp3:&/T$$r(5땞]tc&jq\wl65a"O&R50/?҈O niϸd;ɦ_kҥxų?Wxfn8cIC[Þ JӃ-SꕄjWf_\U n(t(qiN>hrE%&z9#(giD !76Ԏ0)ɢdj;n @J¶W4b[VڣU(ݮW|9en;fnw&Sġ{-0W~LQz^xdQ.Z$hyC&t{ʒp%fLڍc碡EI)X-Q\JL ( oܧ;!XXihomEKfLh^jCBJKCJԸ41Dhx%VLOr=VJB Go tJHJPJT")I!Z)Q$Pn;r#idi;FtgoéCNW R4T4+tYe$BſtH- n0uԢ)qF'dt V(ȅZ/VCf N _Z|_TKM$%m n5%}mqHXtfBKMJvVtz%F"xSfXAËl3JDtR066tiN4~&&tsC ?rYQ >ݤ A%!묝(>]mwؘ䄩dFijIAv+sώcLGJFԯ* ?)emS}HAobI)28.)*<{< )7K@]l9Lg1 oriΔo_5% L}N6Am-n=RCzPvL~Dixن9C8)YHiA)xNJYy ܍-S . "CP5C W 86 tc3Xi44ZrcSeVʝHL EҘïm &$_qp $QɭN  U'*X&Rvq,?&fKO<[[~bEˆ[ó"N|N=f}A%>oKnW ^* Odrxh ?$Ɠ{n:i~s$>Z~WgJJȌv Q8mGE4D!&ϊtJcPƃ7)pšWS*OnFצoe =SYt7:\8]]𸄍>ܨ\ P:oWNyK^7V 4kG6Ng60cOGķĹ+^ѰVxV.ixY#Aw=Q1mi J&>Q!M^#޴|~zx=96&q)ox7<1<9}Ujq?2ǻ\t=orMnf67 oQB%cJ2(IwA)l e8W)0p(E0g=sFRt=ḟG$)(I▂F+X\ʮ̘A}rf\{jŞ9UvNwA()I^mLJp4cm(=Ͱg9S3fb䨫Nw+)UjO6cyWfsWfw? nIE4iqz*Pd)==*P:u [2:u [Na) 1ANASa؀W;u S' ѭ$~~=6OS<=Sr]9lV<=ʺB Na)]t+T*NSF>u ۧN)Sp5S>uJ-l`))}OLO͚b) h YjH`-qi-xAVrW[!_m<ZMܭ~DN=q|6<@83$0Cg @-$mJ 5<"~]dS</ 7>@9z< 2_rU>V/a)jFIEwT"p>(#$]!-Ŗ>@1MCbGUyJBG y>x03ޫy09ެYCg-Ӏq(ïFq! wi#lC7ާ#,>no۝kHP ڝgHVQ 36B =&#0?x! @uDhw!Y!>HQ0ZF!Y!UG5$G),yQB"0F * -|"0U>w"!>Nݧ\Ѽ5Ҁ–ituu4`OPq_͐CK}pfBΚBW>4bħ>_E `,K}̞B0-3|!]#,(!JGYB `t *X\@J> (@#Y)Ň))g|*_GRR |V Jh\FJaZ`vgCKY)g|OVI)>LIifӀg%Rk+RSRO ދOHi> eĦ˧G5> e8 ᏎJz9|&PPNx>Pͻ>(}LM3<]>]%%L?z>(wc)k'G^pP|j>(X^oa{~+**?x+>(?x?wlw>_%[{R~j$RaӀrՂ?> (ݧt೒|wޥK J|&BYj<`l{)_Ʀ`lG)߾۬$~ݷxZXI kIc-XԃJJXKc-FX V8áMc%{~>Ja, kwX } ^+)c-@, _N k"<_9x}sX CKX y<ւU>e˨c-}r=VZP1OV އi)ެ<`ظ|wt`,3xO`02x+xf9;x>cBx1i@Ic-0CsX Vd8T=) zܮY>KjxTWy_)xxౖ*K<ւcԜC*y<ւe<s>TWﱒ<s(d౒Rjc-p}b9|h)xD?x+)b%%XO`V(:JX } b-L-b-qe>h[`,>=`"tݧm,gm(pwWX }žWb-\޼,VgM`PBZ`ca sX ~,V",`2!l,V`RBSf ,VRzlw kx,VMXbY1HGX y ߓ+E؞$AFv[- %b 8ʵcRQ|j06^0j_C?Uq {K8(</aRƠK _PG6t7ZW4?OZ.$Ť}bǥn΍υ_>>ʪpTYgk*o+ &e+N&Z g0++  `l*{ϖ;F_W@ f|%sTe/nXu/`ד_ ls;ze_B]; H~/H _>/W?%/$*%"X%"ܧVHBQ"?+]!IH_9  _I ks)l}O D@~к +]x\ "ƞ Hy!t?;Y{TN~_ _`ig_!/+_p9EʧrKYDž) I.v)lc`~Ay//<˽8T/TL~\=~)N޻Y /g{(+ ˰,~v g[NXXltOs'M9\Ķ /`Kw1?|mm\c.[ˤ ǢrϺ*E_`H/QEh "ƟqaQ˸`4~4~p7 G'aG+]aXr.8\ ,`/8~] Ǐu2*F Dz}>˾eTP?l|~#I֯ (&}cY K]cI_n}\<~8Mgr[c~@yx4ax ˵jm+^ g,XyX}cmtNbK@T0z g湴BXy-#a*0u3ʺ8 ?<(yǏAVñUl_>E?PW Z ; ?KCV@~ɕTπrkԀU|@~e`0 ?ZlR?o% _b3Rh\!ㇱ:K/N`{`D)   lD gDE,}Fc@$ `|~F= F aGրx=?LybހE?Ƌȏ_u"@o@ }뇁(RoDǿaǟΣǟe3V)7?ylȿǏGCzt<~x}|{a go:؆M ̻aw0+ X?E0XCI\&Pff񳣠X}E-$',~,rV*B -%"!l,l/$f e+]a+N0~DOIJV"`_5 Kpm4~19a3[6?L[Š#1n(_PmAE4ξ"PXXUTY=aecmIYbBB3/!XXzcwʿWmȂ2<|0;eh\d}!E UQ9$ǏL:?JPF(=P?8~LG**pJqWBUB@~Aq"4~\ 1_GF/(dXR [Asm#:/@@I*ۦcS# /k=Tо_?v,~~݌u/,q:c<ء0~@`\?V?}b0~GR7wro3C wcƯT?YV?sȵ?,~~+-|Ĕ! 6S4=0]R#[iӭ _n$~iz*b")b )I "驈mz*bvmt\$SS;懢1?w̷R;"2? ӭ4Ħ7pUINi]?d2?uȇA?d~(!Lx~PKKҪ7Ed~(-ĩOC˕!̷S懢2?4KT|SRC*TnBˋ~|+Ug~(=CJa$Si=C(Mg~(=+RC*)=>_ob$~Ӿgt̷b3 mWS{[i!P wOVzhO%̷b3ߊ}|+#b3JM=J?r>.ST%JB*$~ ()nnUIN!PSRH>C$~()Q+Bw~PZSs(USrH%;%4@INiJ?t4@HN! P+JwJ A$~JwJC$S)6GZlz!_USrS%;<@I()-J?q|UIN B?!;D@IN!P2%;L@I"$~z*-DOEHNI>aUSzhOEHN!PUINI!PSzH**$~'J?!;%Ŗ )-$B?!;%8USz!;0)9,qC˅6%;MI"$~ =J?!;$~p|JwJ i2C$~BwiC$~*T%;edUINɱBwJ)OE֕9D@IN!PSRHTH$~JwmWSR_?))3JwJi2B$~JwJ$SSrl8W-;a{$~()-J?&;!,@IN!RSRS%;,@I"$~()=dJwJzVJwY}c$~tr#񟊐R|)gFwJ~(5Sme$~))$~r!;,H"$~Fwi|`$~tF?!;HNBwJi}&`$SS3#;%@US#fLHNɡJwI|&`$~t T3#;e<UINi>0)6>_5)FwJ󯭘SHN>_5SzR%;%<@INh> 0ۧFwJQ:OJwJi2|j$~UIN6O%HN 6O%HN9جF?lV#;۬F?lVָHN jc5+HN OeUINOeHN)3xJ?%;%xFwJX* c5)c5+<HN Oܱ%xJwBXTIN OyHN OHN 8nEߧBT#F*,Eߧc}*Raq(>8ߧb+ؗS쇢Ke85>]~(36]ߧkO8UV}*Raq*Raq*-< 5>ߧ"Ra8¿OE*,N(5p!(TTZlҿEߧCTTflOEJ,NEJ,Ne,ߧC`SS! 0Hũ4P}*)>8؊O%eS!0HũԐTfH ~*GOe<"URb~*-CJy!OE,E৒B*ZH ~(?2BSH!]5?eJ2Ot ৒B`Si!( ߊBOA,N(5̐TJ~ #F?UJiaE0Pb~*#?U〟J78S)!0?@C*`$CQ  TO~*Req*5F?R#J$S!P~*)F?rCO%\@Q0B*`,SI?a O!@5  TRH~*@?eJyOev+ TRhO;4CQ TO~*)$F?[4C_B?Áop৒B`8Si! P!(TrHW~*=?Jy4`b@C!LPzH|+F?tetu!%͐C.$V ~(% ~(#BJ BJBoŐ{??՘c ?J /ENI!P(SzH ~*wJiB2BªTpt;M XpwJyrB \p'?;%Ǜ%\pr;!MNi!iS0)9wBiOEXN)f )#ܬg%;4@N!H TN! P4SrH UeRBlpta;%Ǧ )=l?a;D@N!P6SRV T"pp䐮*)#]NI~YVN!_>]U:SZHW;%@5:S ~*wJ W<WwJy|j|Sa@O [^. )eU@flyU[#o6B do ۔6H6 mo $7H6vo; ۲7H *Jj  :Zz:Zz :!ÄZ"Äz#Ä$Ä%Ä&Ä'Ä)Ä:*ÄZ+Är,z-./01 3*425R6r789:;<>2?R@rABCDEFH*IBJbKbLMNOP _E _e JU$|.H*y]U tvAWyqWℯ" _}(|/R*~=I ?')|+ORV%AeH%.pRi %yJ8)x)(GQ4Z(5# G 'a؁^b^ sD8))F5t}1H^Py IB/OP 2å\{KqJ,B*R@8V+uZ\VEOm>5Ut:>n>8ߊj Grj}9O}|p[IߺtиXKS{7"ppW6`HrJGw%fPtsثցT.[G^Po]ㆃ|Ut:u:2=0 *}+{Rsdc }nr*?R88"9ѯ4Ε Ey+_eV_I_lp,0_â')O4VhpShS)O4Vhp ~(O4<[y '|+O4Y[y?' |)O' |)OR,%: ͍#b}S?7 W_R*x^݅i#1s #s #O>&'(E~vAP'~e0'O~|}w_7GC _e::2o߼~~?`hlOS~I?o4G6^ɱMvo΄YJ|3F D9)Ήnըw+7qόz'))ӣMT_f2zM~o߾t4h_RH Woc&،[UM;8t_c1wui(^M"'S7y`w Mo'O㸻LHendstream endobj 118 0 obj << /Filter /FlateDecode /Length 38963 >> stream xԽ].IvBGvAX:`.5Anܮ{fw]]f |vVD:0Ȩ/哙">-ӂ-G'ɏ?Ӻ}o~'sy]<ױ}ɯ~ӷ:m˯xǺ}jy-[/{ _~o?o5yZuovGMO_}^^۲kvvѿv_ۼv{oo\xoou׿o|s}~եq뫟~ewc|碆Cnyut{o}O ?/=?rl_jGN4~S/nP;^qZb[<[Nki"z{;^N?oU}C?}/<$~pȾ\G~|? 5}z~0rh>gz^uVw?{>Ķ׾|=lq~<b8Oڵ!|U ?W{s=ʐvGbƴKOǎi h܀v Z}y<{|Pgvg}ӭKŸ#_և^/!zя?_[M a_`/=1<-\+xY!cWyc7cT{l7z˥}}m]9vY^]o>߾+nq}2m׿y^뗖k ޿{֗qԌ_|7o/nmj7L77YU7~o~<7V_ϫeV/^o 9)//^K,5}>,~׵m֯f6:8~1v}ͦ}J$kowķko^W ~>Ӈ>SkOǾƝ ܮG1Ggeaqkt;w_.|w_/km(JpIc|ۯfG:xHhoo6KkqVok{[8k'6nW_&{_/0sMrܿF8Fz#Rs[jv>}k}qV;َ{%{xz@_jLǏMn.۟{崌GٻL4S}r7?_QE2 /_}:\{sђq6Xq=EtMf~K39>um_|2adSF/=q?7Ifoyȿo?;G}@1nO?ݯo8w[_^灤Y]~:uu_ol3򃮟Ǘ>7D /7?G?;oJ.B]^äpDžvjM~^q]۟лeT nY_L+:MηY_VÞn0վ6>3|:bׂnU0$ե}ͧm7>nvRۧ+(W_C\FtҺ/w)z)PvI_%^_+n+V)->?^mM/včRiㆂŋ?P(k )'Օ~[^~st(\ u]{^+}hWKk_kׯbckY꫐Y ڽ_IeǵKզJƵ e[s뫏 >ۆ7Օ}`>teAҗqU ?|~{]q+eRG.8fYqw=/r{|yKx?ߪZ/ypCnz[7ԥjyu7I^hv _hFn_.װ΀ G}tQYMOJܯ]}5p8ћ8aWlxЕ . *qn_<O/`aS{' +YWx:*າi`.cݵ+[~(u}39q/e"{urLrJn>pN\Ζm\cim跊z[,{(y 4¿>o8~;8DDJo5t/z g W(ѳ+ˇ\x]X(0 rW`Iʆ OcEWNP.zфһ/.O.pOĥ%Rqq}/M;p3t|O~COL: %I[ʚv=}ďR$>jv>x|}Ƽ2 ~}*S=wjW|b?l4c缏 kyՅhA_ǂf!8[δxL;93p6us?a$`{_pW_=O_#&5x'.zyc̫'Cʎ^ X6)'IEi4諸6[y>_>kvWDו#Mw:!^+ȕk@Nѣ 7=Iw=IV&}ئ:5a|rbPޥlB{ `FeVjWrPٻ:)fYipOvUZX !djە_ i oSA; U_hlW?>i+kN}$kib;ruljKiׯ]3xSRаA@uߝF QNK΁z`?vV熁QWFl4nVo<4\ϲ45?iL[ƒ͎3&\|XBs\>g 98.; ݤ`Lhva+;{g/JӀ_eWnc(4t[^R6Np\>Z/)>bՃ_WXlDc]܈~Qt` b1/6۲qY7}] 6ńi.Ю\lz<O[Όw{YtZqi:N+MD[jǥSfdW8tXWԡ3cNťw8t6|1:s1ޕsoh{89Tve}pV=+t Y l ~{>4}$Еs?tKܜWف]Y>ǐ4ɉgK-w'Z2EۊOW/-F8ﯻ'ltW8¦ѥ 7n6#Zi""[)C3[Wh4wEq[ ̺z+E!_.6mW t]6?͵[[ >2-?EaWvuhʉvgjB$0Nٝt;Nkq=wm-ئK.#>&}|%ly7 , s4>|?+mT=9'oc{ra-o'W7m-rFU/MnXv[Cڅ>[DR`gN3 hQӕwW 'zh`?[Wnz`oVCYp]@*. F0_4!N#\vxQ݅=4½ڮoZ ^]^Ip0+5dq.maR6}^}@뺳hv?+ ]u;OE YuƵ%tX[fɵ҇7xf•VW‰/oԧ ׎W0>_jE6wۺy5/FfZOwP­f>IXFknD6-k Bas~`M aW6C.QprEWl/PuzK;<6-ǺrbEO&U`2hQ`/HCD 3 , ǨȖ[ؾڬp 8u[9n..xG.9+Z%-_`~x˝O=#~Sފ)Fqeџ7}2;&x+F鮇;{#&PyCY#'+ڃ lynEՑ]~y j6cvXIZA965]1.õФEV-ko9l>:h96vžʰ+ܧw+~W½ ˪WG0Y0 (aڕ]n֮ps>ɢ޾i7ɢO[2q`qK4뢑k_z ]YI`F K03ʦ/C F{)i=UㇼpeiKvͥ0Kvȶ=7Y 9n;=]ٿ48Y[Ai`G=e=^s=' l v''}W6t'=]& 'U\cJZfp*0Y܋nҀs4DnI{LR7?ƀKuד&Jn>>>*I%k/~B7i B7oD41,lmy ]ٽrvu%~&̍Op[7n.^nҀ{-m_3sIr>=%¦fSP^k@6Ɏ6n.چc"<ȿ:0WnKV ^~s?% nY;1KzI߶NEs!6)Ec~p5;.'I>|Chό]H/qע:) [Qj7=MO| us&~_x01HeWU ]0=L 1_)0sЮoavW~'m>LM~wS6mQ=6I/>+Q[&GNi9v.iyc|2ltk0DZcq46FG8{sAa5( #Fb=I?4ü:c^y'㢟t_`a:\1MapcEMqsI><آc=Is-kI>t8կy?WnM0EpNƹ Y,XÚ9z;X Tڢe7N[4M3BhY_g2EBx)Z1xnL2,)ZF]2EbhXo-Dlz6~-[K;i,h ;MѲ˱h)Z;E˾c3X`5` _Ibs$cj,Z2F7 (@gPX׼B-y=:װǔ?tp_G-~ V8iuytN+P8*$EI;.v5Wp~u.k]c}{,hXó)3sǼ60~wsl0v&ȅyRcM|Wׂx\G]՞u9:A<:!dQt6r n>c w܈,Dq#6)aYm]7uo1=wtl 1@ѹ[>LcCq-MGx?6m%TcDT>o2p7IPgk#{يILcC;0HǦ-tsw~ֹΛÈ|?stcƎ4ѱ}#`i?0j!cWuqPu[wF;Bt1zA @T1zp}OʘNuFo> O7:|otAUpJk})-F>7:67:|;A;|"ekHzFp)V,Fӧ\ xyx"-&h9"h9NuFәp55NAk4݆FY McWMe,av{_nήoԮЀK]o4ӵ+vc+v6SE{L߮5.5NCt42\匎k]k]k߁pM?rr5.5ZNx5ZCrǢx ]P7F+ XF+F!V87ZnxNuN~M1]o.]u،OܧOa{FGH;UeH$5ś*lĤ:Oq+]otĶt_sL@$Xtk^c;wBv>uDDUTOW.7:b@  HUF:b2 #N+4F,S'#)SGOk78GGdUnEeU)B+Sg$XFqdDUWc /֕` ( |t֕S`UkGG[4%H S)P.8DLtEk ˀSYF\_n;C 2<+5\٫ÀK~!FfbW]aW!:]a]hګˮp!:J33΁{zvGGhҀK1{vwtvڕ]Ap1(wtvpەC Ϡ܀SQ{c֯ޮJ3+ ᮜ vq~mSqtQ{GGrWa]2BnuЊ ш #ʺ+]tDkG|GGwWG`x : 0x1WzW ]twxaVP|~OqoGGh}W.g}Ķ幭p3f?3 c3ҿ+lJ,Ɍݨ {J;I+5!00$uW):̂֙Vj)rtŴLx1WF`C++'Ć?3DМU$LwMi&:JE #]%*sfCdKδZFC*}^VMOZI8]a::J2+'no(|J 8)(Wg^IΦ|JQMq1uEP]Q>iL&6rJ'l}l#3+VyZ[ѭ vJI+ +'D_n뜒3LM۝]QFiWӃO19ݴާLn8WwRJ+#+J)qb,9e1WvzRZta$w{՗cʔVFiewՙo4P+'?^{ʼ@:gX)V{@~O9U#RJbAWRZU có}r,%uҪ@6XK$C~d˳V]rPCWkICdͺҩkGK.t@tҩDǦ*df zXK"ak7sQ.i$xY+)B ]t gGz"HvUœp.OO|' e]O wRp{ulzܽq>7]"ß ];Y9tvkؿWofc+ǿ#I\2ݱ9.ji)~-w`%߸Lzo~K_&m.Rpx.g;ژqo 0w\+y\v#YQc3Jlvkt97~[]v(} =H [R|}UŚOp؁'O;gw=iރS*~GS˷܈?űq}Hٷ#e-A@7;CnK.ϋ:. -E t^Ƌ_^XItP+X绂W'WuQL::zYX7=;rd]wR4"g5˵p0&.Zn5$oVzHǕ?=]pGtv FSQxV=ߔ*P9G!-^ԑGweEͽ=*+NppS\p1hNXT>M8vAݣ]FiWBvKX+|zp6炐.E_,O~USzHô+K'>07ra#0tI)ur:qhU 5)nI_yw-BE3$R^q<] ܑn^TmnLwl=o}{곦BiW 4/ aг{Py^.7Nzw6= Pb씲O v4~+E*w˔F[}1AT吢nf}:s,\e8gr{9g}3Ny8eKE_3LqNWr v6>k\;f/rf_ K[-ܻl):~k޿ +z#倒LE,dZ;虺ҹh؅.zs)jEm8=V -OarNJCEpj\Ѫv_^2(@;ͭ8@QwO͸N76ږ]mdxI``"r ȺeQ9dcg}nTt)#o~-0NlSuZlhKb& ! 6` no[4ڹ`ݡoanc mد RM.7LzO 30'{5a:$\j4ߛ&5[XH8?U-춹a`ıaܦʡi^ƁrUעiٹa{w~K +8Z.o1Jh꿏;ӕKS?܊lmJE vS#c䰰6#v}f.޸ =TV+Rf+]T}O*I)]EC۔aQ ECߔ>Em~=ۜ !_}= G,*apWmM3o\=4R8,pFE=PT.+>M6״ pe:d{B_Je_PmBEsoCBS3f5T,KWh C*+9ΥڝBCQ](եaT.RnKhR*zC?L DD-_:++TJs mB)Y[0}(Q-OCDEs// ;8!]: ZͶ@W,Wo C%( }F[FSncص>sJ PeE[P-}({P.JA!ƧCp'|Rf*:/k(tΊv  ݥ.*hS wҡrz]mg(RIa!lPgrVS=wDQ+^AFSi(tΊJxz^<(ˇ}**lrVS`U:j*-\Re@>f`(ӡ?ok; 0&&x(m 0Na}:PVb0^nt(m}:f `(-[]JAt∎ -G(&6"۸Oea `(WuR+n ݦe:Pn\Pᨦr(^l %Ti3`(w-)pSmr.QS lC_ӖPnCӑ¾\ xEIꧩćuMSlr/e>H(-8 Ti*{eXY-GOY8^MP)MQ)z<-GN4^Me+{5lT9[YV}򽕹VV@*LJn uY!0Xٛy\fuc g_lŒ{]} F|?fXF1&_5 mS"b Ujmp1!@{o$T6@Tl1b?Kc‚*-s'[,½d1A źs,ےŘN!BbDLcO ċPB1qGDRZ,e2b"ZI" -^ 3K1Z,PѼ*E1eQ RB1EheB1KaNM %g3j|"W%ptmŘ#ЋbbGE=qXYA>b-MEI$b&Ld@iKM Y,yjbLZzŘdTbGYPb\FbOb̲2jQɼ0 ,,c ,>&q,Ɯ/3 Xե `5hJ\1& fXrkӉS N,X`܇f^(1bmKpn%΄^,(4+b|L l 6bĈ0Z,!P3Z,B->5@h1&ݺbo}F)5PmZ,Z,\bL:mXoT0/V &)QY,Ec Ř&%Xw'X N!0ţ2ZIbzDcb9g"EK{Ţ52eTŘVY,Z2D Tx6RKd͈(6[F1UbL5Ih1&6Z,Z-ZHAÞb!^ \nbb)iX+X LX%zYQ<# S5-(ZQͼdA7'Xk.b7eE`-rbL~Ţ)^`hlN xNmZc޸`1\,ŢexŢ"VSºr!2=bgiE /3`1˛|'X(FŘdIMBX,:VT:5GI&Yf-,kj*ƢCaT DCT%XƿN+*RT7qJ +M{aX ab yŠE[\`2b,àbK5$DLe+{ + K+,X1V%t)Q ֦$)Yִ%mKLc * oTX4MqyT,P1V0OMP1V0NT'Cvb;#Mcm=QX c=7SP@{b()}ІPisIRT7oU{AXEËbBCCbS`z{b pS`ßDcp*f*0LL IcMߟb;nX*^T1VѤnX NOT1ָGmX{F⨙B/U,ȧ٥pGŠ6@c1 p4X+> !1qTHOK\@f/sTEHW,Vac!^7[k:@+tWU j(?Y8! fq%s # $jCDYIb9p3U, I!V ǀ]*1TV9^n}*Q<_RT:AAE\1*`+˝Hc),U5W,( +FAE F&klX&3 bեzQQ}=s1ǺbVEaŨxHؖbTq,ULtQb\J&bTTT@1hQQjSŨ*JgnbTTT1(SL`nN+SdbĚ*X)Fh71Ũ)FERbT "R,c@%V"ӫD{(: !nx)͗V㖅%rr(fRB)aŨbӫ2V-aŨE6VHeY"jbTT\1Иbc_`M_bWf%+6}k+F`]rLfsЄX1H2 '+6Ċ1AgV̟1F4V a+cZV$>F\1$W-J{`1#RDct*Ed1$YlLxIclAc%+,)X.eS4VyNؚ5<,^d1.-sG$b4$V&YlmkޟbHc]EݱbUZ jf֢biRY F:d`F5Mhunj-SZRXъbv+ba(0[ Cjl14f&[ 5w%[ '[06%6Lh1M4C -feŰvR%d!{D7l1l|PтbXeX -NKEFYb2Zl0_hJFVGZla*uDaҐhUZ _ -ֲFPѿ2Z%, ?Ű!=b-R\ $Z kYbb7&XZȤCˆa7d-H6Y,֢^bǨ_,mˇ-8h1;zFaOCŰ73Z ;be%Z ;-ARlyjb}%Y2bBXu%WBYr0^b.T?b-!+A6W,2IbCAD=Enb!!)CLŘgu DPEs,4q,qf,+[NeCF"!Bh1|F.ƒjbȖ0?|1F0gi0z:Ƅ( c,.]Z$;ݥN&I+p>@Vy]18Ս0o")9L共HSa >c;8-aZR"CN/nƘ;`!@-O[.n yL 2yL `ܕ!cۑcbBId "v&>$!cCZ'd= 1LAg Dir =3g ԬČ-h<(c}K2%Ŕյel}&e V24"XJX$(c[]Eq%elxQvel" ciHS2Ƙ6>!Ǝ&bHc(^fŔQx2U!Ŕ+VR TNWR%e>gɚ1tc3H%S41cO+"@0cp[X3F9cÊ8cj-m/k99c1n蕗6h:F;kpmV4Eq4c ;ұZ1iENSW+rO7̠1DU.V7=jPLp- );1Ƶ;`a ;MK#cjk!ά^1п1$!mc6|1:7)rژ>$ Q!򚲶Qa 8fM8vfihn151 3Fıp"Ji&1ZYcn8F1'Y&WLC 0Ls٬5UqTQAC/cL+ٗ2q 5z}<(%%c,ga|8r>cec4cW 8]AC6ptc(~a8ij]+uC.MyɈ*#XKg6r |f#1x5'r n`12VK1(9F7$)29]&97D $'1x5&q *NuifOuu&q ?Œ!cbB]# 9v/iD$rvA֗1e"$q +rުR^ı;&8FGyN(nV9؀1#PH4#XW2r =1r 5y3rnGbC/C/W$sl`:BPLc7WAHmJ1-jX[KNc4vTUȱM&NL~SmR7qI۔c4،rcWr24XḴa$|ɴ'UV1RW:!En;K9F[V3c4w569:*nkG&j|2rLsu!hڭcyMiDoꛈcOkWM9Oȱ,m %.ٌƧ5J8%QfrUSX 7UegǸ!qɦ pwb0c$ U8[ &q1.D5y8bҦ98Ƃ&8Xuij3q,ӫ$qagϺXۜ(X#0D}&u$1.q}7q%xLcYoݍcBM}@7EQf"b5Ԛ8} }4&TnYıݍ";&_8]$s{8FNLX[]McusCW$1Ҹ5) 98w cp2+L1ryS*;cf6v,o\ ÄDHtBnr7;w l<[Ųk8E`W'rLW) cQszh{"Ǹ i$csKUFho1R̒rl9$.R2s,ˣJfqևqzf_2Ǹk+s12(&1frL~s,<19NO&K 7s,+sбf-X ]L92Ub# Cceh4s,s̱r7Efb+#Cc$|h4s,HS#sw\29Fcqno##X ΏGб8d&:FމRb6"NLX%XYȿ3I1ZN)rYv9ph>DJrub1ȋ +™bDc\s^xfU#19hc=LFU ;F(,R@ dlp4vƎѽ XKG'v,άÙ1d;Fךq[]Y9ctN>;ÉdzåI5c"i}n=jeǩ`1T'I-8Ҵ5v\Dqza#1X%Rv)=Nc8sYjX HՎӫPSD:qz\ u, )1M=1?  x}%Af#6tݿ) :Fw`Sǂ$qz?бNX SKƘ4h:Z;Ʒ7#X^}/߽%C!dǚ;lX-`%d5<ƞgc: n <1vr=3}\P |ߊn1%XV9NL1Dmy_CǠOyMQ,b1EFcEu,#k1)ncTLu,Ա@ 3DX]MSǨ&p4:1Oc"LHy:M!$?LNCE/ wa[ZWBяD"~8<1MJ*1fd:FEoԱ؏Vd2MIQіc4 uL:cdy/ͪQ$dzŽQеEQԱ`u޿c\hp6uQeLŰcTbXc9:FEƍc\1EDtB"4EUi H1*gMMwMe c Rt,P̰5AǨcTtkGrI8%O bTcoh29MNcTtb*5$1 k ;F-aǨ% va`ŽQiDtmRǸV}SSǨfcT 8vcT J?-G/cTCASǨ|bX+Q1*ɘ:FoA1*";M R0K"#:=cTIuJ26E>D"dcTD^2uNQ`BcZ}:LsbcTLRu[*&m:}ӹbcTtްcTyNY;0t{HF :F83AǨjcyR#DbcTV"\5d:H`!!V1 (fQ٬q E㉙cTuX q&ZK1*9Nh2syfQavJ1ǨV'A OMaԙcTN+B"FRH"ǨhrBXtWeBQITZ&cܭiSOA1*9FA]1*9NH86F7FA޼1*峚%X i5Q9k E` ƨh4plVDYהVL3w:͊cT+HCQܱYwi?I{ul/emV=qAǨlVi(g "$cES(&x :F$0ACбE*SUm[Jti=M/&c~Yб")4=yL3gCi4t!e Esc"l)g) pJZ4?ܨ"$i:)weC/˨r~hPcR;M'N,ݑY4;M'e}8^ӡ5P5MVMSeY9'95{M'^ӡNV4ᐫ+eW'M,+ HO"7 {-&fj ѡAr}0C5Wef@0ĠA j$5bH1XdxAmq"^Р5 Z *Ar40UPOhC6β4K Bi{5Bfq13* T2- (% bܷ~%hKGڡ2VT r ڠNfq}cJ$3Nw2VMfzXf!ݏРmYE%jAlРF%4v)S E#OȠ c|t IbA 73ftgYAwJT"-7I bb*Ġ۵2v gfbPAt9lC&]IՕT" ]{jhB]A1ȴI1-31eK6-hVNӂcZЬlCіĬtbfE; M4.hz: w$l\ &qA{KцI iU (CVEIF?NbI !AdȌIbTtL _s1Nz (4Ҷ ""AD?Ib1TcjXW1041?APd}'1(%1  œd|AU鴀AL^И4 D+a3HҔyA?fYB`,$@HEyP11 ƼHA4,f?YA0Œ^o4 %37gV,Gd {Ӭ*[ ر5mD)YAu%`VkNTLql QA0 1+h>XAc퐬 8t{Fa> ,e4}'*h ¢Ɉ4*hn_QAcᗨ &Wچ"Ss-QAXBFajYAXט)}6-ʬ }fa/{ø &":´ ڕ ʎIZqW-K:D*E VmTV!1VuXYL ڶӼÂ3pư VMфm{D.nX( ֫/*'-^>MZvdɤYJm תpZ_۬(vnۊb5P 8;2+h;1T VpAh ¦jE(zK)}S`Xw(z{3 ʤ JPv;a@A(zoP6En{wbV6;2+ ~b**I&+h_WF$ר :ufa')qB^_Č]C$+&Ҙ7OT*.bP%[4j0Ta BU|[*|PA4]ƨ DcTPF&)㗨 MÉd߭ Blq2v;kT blƨ i blݎQAlG S7+eߋDgMVP+6nYA,IJ}7+h {OV~Ǜ])4 K'+^nYA+EoW&ZDb! N9%?KBsJaLV4$+ͿR6 1+KF )|Km$w(dŤ_-&ӌ 1:f[K.Q]zK`AaY&[1,PV1×YA~$Ǭc/YAH5I l'-cVrv5@',HqR6<O]MX=zZa?Ӱ zfL →~e\(J/\v .踝[ (iL\v4m&.\L,\bkH\f₰.\bGU,6*aS$XIQvU{.EUi5qA,R͢\l BA AN\Щ:tTջM bE –峈=% I BEU·D'dXz0aOwgXti+3/YAUXAH Jsvtv\+Q=yAU0xAi\j<&0Y•ؗg?bKd^u$0yAظ \o$/zasO $O$/J+ kӁ –A՟ lVmvnj7IAcO8IAc8IAco9IAc9IAc:QAc;QAc;QAc?YAc?YAc?qA.H^p)$/hx4< ދ G$yASM!0hx[4<2I *M_'AĠJbPTn"+A*dP TnBkA`A`+dP9 TB3A+dP9TdgA|,dP9( TNBA -dP9LT.B۵A-dPzoT"A(NbPT"KA.bP9T"iA"PL mHZhg҂fp'ͧJ*ZxI ƴzy ʬbURFHVP%@+TT Tbn"UW*1HAAs4$"[M r AtmSPQ#)A 1&^ybh HLv4$&F_Y EaK#1Ad$&h>0At)cdL4&&1A+K^b_iڐ1A`NLMn n[eL7˘ ؎ ¦9Ӵ!h)s&KN3{ ($=PSl `9A[͚1'[T#q qk9AP̾1'ɳ>@ADP]ɜ <8T 'vY8Aq;orƞsrLDcܿrAя bl)Li Io1A,R5&h;s311AjνLccȺJL5&&1Pm|c$ȹg|&&h;(L ^̖.nL|]9&'h+9A,ץA1ʜ ,4%*1FWHqiXE4YAX^i/5YAP̿I:YAPd%,:ItӜEK N*CN'+hS  bfZ$*sMTv.QA۝{ blYAXfcV-YAá (q3,AJ,i[ (%A1Ȱ ,D nwю b7cXÂhhHXvᓬ ~bApGJX)aA˒을zKI,&K[B_F%iA ,hi !JXIX 3JZPjlƥ-hLIXвRŎ Z0`A-h1hAˑ-GbkIZb#lЂ3_҂G. ZP:.h9MIZbaR;M B];iA˕4iA˝С![NZ܉7JZк$)iA!҃TO=iAœ*mVL 6MI Z׏u5pA뚀aɫI\ڲ$.Nx $.h5}. ZIIZP3 ZɭIZHA R*D Z$-WbZj> Bn2,hnݯ`A{ԿEwHRT)}1Z};NZz$ɴLhS҂3NI ʸA B}?q҂3x҂3a?I ZM}4-G H\tI Z/w"D ZkĠJR>A $E A VZĠA B`K/-`AͻvԼ DPY1.- ZI^P[ϒ$ż R> jkjJք$/a7YiAmM/₦I5"H\P%.(/9H jN~Iis6[A%ꖘ +(I9MHaAg,(6sncASN ,(9&&`, |@]ɮHrr[Aш˙mEb=(&\d#*E jc,sdE^ j',,h;‚"ZZ4FE[ ȝZPkv悥E2炥MY|.-(\ZPC7@XPEXXP&`/hsչhyI/(Q0Z;yD1hD.,F o\bP$"IbP ;h`P y !L;q`PۊA$0w9z$Eu^?Ġv7 AӢ'5AI1(4,EdŠ2PD\r)@_-F=Ġ(.:H $!AӪAdP$9ɠ}EM:!"Krn1b$@$ GȠv;-2hZ D[OfPordE^"2hZh ֓MAmXt8I 8MA%csmdPiAH'էC1EdP$5(NRxE &?gV]eԠ)sAS^K ΘD.A1;CgAqo\lP$obk0lP$'+٠D٠8IX45٠gXlP D2ؠ &D)cIjP;u淑4 gԠť5IVbڸl/X D?Ԡ-4(Ny7m> jg\Р9Š6r^IJ!안sDE"'GdPZ[ I%)EŠx#1hT4LEO2h$X q쓈LT"z$1( "D"ޟɠHת$,4|əX ܳ,Eb($q4$1(  ˞~Am>" bP04JKGjbR,tkGio {%AudP\% jezMvI^]QZ͠NgSOfP(AQ+'ZAq-O0fPL͠VO59h,GlPIHi]A&HHnPTah&$.C`ܠh74O>= b6$ܠִ3.y g7(:YhEAAI8(*@r;$ E!+=feU2s+>(w9C6i.WzPHLw zI˽LjxPOp[yT'AQ5J lTx2OƒyE,<(Tr1ƒZaXwg'bZ)A;WJxPԎ鳅>[xP5;Z)Ar/vP}kHJvPT匳MҔYIL`Lr(wG,⦃jXAqkeL(r첦 @9;xIv-eAq5E5\[v Ey\v =\N2h8(f@pPץg8h83Eu8#8(.ᠸ/,8ha8hNA5ðH.ᠸ YqrRAir j|bS;ؠ;5mw܈jk ZU^øfUؠ %6(n_ubvp8HNzI^BE}$Aq|krwT ̑w3\z j%0vp44Ti:ŕ{DpФJb >l*pP sһNRm *1Sp|4͎%AgFgIf=@m["*)9h^ cO&L8@eYŧ~pM-g*:htТꁢG.tТ(e@d:h_TP1s Z|\!e7xx \`E\ Z|m8( F)AoY :i r|U,7H]pֽ"Ak>)7h7Aoq=nݠՐ*nz06d7h=L2c7DРU4 ( (ԠAGBA7A(8- rb6M6q6b ϱAl?) 'f2T <=tRRlAysMZnDnD =AaAvA b/F bOG h1qT _ A,Qj$lSjV ZA>q343(hgYРq Yh AڧLd2+dW1 t&Bq u2k3do &h@R 5D2V f- Z76h`F D[ 4hm6 uA4@hф DS4*hMZAzAhz D+4:hy ZF)T 6fP~ [Auc3nul͠eTwUFhPݝ ;8Aug4qh3'=Ѡ/5TFhP[ 6YhPG {mAu?n4٭}ՠTRlP2 AtVX AtyX ^Akb5zVUՠꡱT8flP "Aէd6Uߔ٠2T}\fLlP 4Af6zqg5Р4T]FhPuE JAեi4=UǨѠ<5TF5TݴF+hPuZ aAkl5zUՠ꡶.lAm2MU_ɠO7T}&_d]wbP=T ,ՃAbP=# AT`P=0TOG A`j ճA`P=2s# - 0GTA< Q`xdf0xģ9 /OxPh.GxA4$wASVnKd@_ouh#HLApP`d $?">#9{L4AlXyʲ4V,!h|x.8hʺsAW#Hr*RɯjHHDHh9(A-E2*ɧik.9(OZ54b"ɽlP$y54U\lPX$FrM#ؠ)'$8AI>5D ؠhdS6T[/Amj5Q6٠H O>6AjBS1ʇ\A.a$I>5yb5(X DԠHH EQ=mfܕ٠6rxdjWҞF GlДI>6dU |nIb"Xs>- F_XjP CGa4(RIV$F2(VN#AElP uؠQcx AF%1A YA]"7(g?wnP%vFuܠQ Aq.%F Yp|ASd4$ ";-KuQßႺ@\P uQ yA]"/h<AT.4?.%*4jXYyA]2NE^KD^P F /0A]"0h[1%*1%ƽA]"1K$Ub1hTGjA]"1KR bPH 3%DbPXu%&ƽA] 2KDUb2KD{N&DdP\dP ꒉMeDHdP%&DdP$"Ddи`d2KDuȠ.%^lP3@h͠ FO)*S^P yA]"/A]"1K&ىĠQ%Ub1K$uIA]s*%o%N"J "A bP%*DT͠JlUb3A$A'^A f fP%6*9NlUr ddNv'3kdeNv%AzՙA='ɮ " dd͝p Ub8A"ddNv'G0U2:92'9ٝXs%*^p pP%ɑ9̷ٝDTȜN&dw:1Db;Ub=IAALN6+AȤNU2)JFS'փ*YɉuWTJU^=yٝXd^u JF'B"AzP%փ*T JU2^~8z zP%փ*TrA$փ*QS+=ANJzP%փ*Y.DTH;D; JU2 prb/UT^ĂP%0*h89 p2_9 P%2*h8YG`U2 prXBJ0*!T JmNN`Ur U"dO1*Yh89]m2*9^m2*!D!THt{,4Wh(HCLCzbU0\eUR !'e9vRBN0!d^RBNƫ7io9)Cz;CIBJ:Cx:CIBJ:Cp! !'ݴ7Wio9)CHIg9)C 2Uo96Vo9Yּ3! !'e9YvrRj;CHIg9)CBN5_.3!d^RBNW;ig9XZ!dj􆐓B-? ]~WA!n#=ݫc/}v2Ĕ۞d:Yh%oKm귖\-خAʾ&ݮvާl[ڌK%>ɷo*ċwd$='?WQ?_o^gy&F.o|!_ILlo޽=ݼ7!+fW/wz/3x#K#IzI-u^G+}diq[;EﴵZxۗiݛ_/M̶L<߼~-?yޝimf;Oon^uhD;pؽF/;.^񫗇i޽|ηlygtw/w|ز}u} ݫu+O _=>{ko|aWGx'nңt>m]cK~~r8tQ?[b- 1sE[uc=ٟ0vZv<ſt:2m<{e/ZcGū7gw~bl=ðΟF폨vp?g<\wꦁ˼{{ݯrw|x~oΧq}>z/_5._l{_j]TV͔Gx~ܽ<<<|GG;RݛqDonbv޽)a}}w?EgڶD~ޏۻNiX/o oaO~_6~zT+mϨn^Fmm#p`pm|0oɸG}}}Bt; YԶF6gӶYclc-:n[޿z޼~ww^z׺%vvw~aw~߾϶qot߷zwͷ?)ǍZi;8bz>?}[(_Ƌ:w;NK;"mlڿ/觎ݰ}pԺӷ+awgᶰ1G?&ȿ!v\Q0n;Ӷ?wOuIm߾~=yD3V|纞Dzy<]{ҾgmS]cA9K x _k}~pʱ׹ՖݯҎK~άoV^oe6*Ov ,mTi-/_=4r qgmO]  endstream endobj 119 0 obj << /Type /XRef /Length 155 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 120 /ID [] >> stream xcb&F~0 $8J?P: m/(uA$'8 0vH.1m"(R "7wHVD\r) "σ3I#=DVH`]_@$Ug v*{ "'3 endstream endobj startxref 317442 %%EOF robustbase/inst/doc/aggr_results.Rdata0000644000176200001440000167431512321016053017635 0ustar liggesusersXIM1bBEńHƄ0`E@b΢(DPtsD]s^wg陞0it\\\<\/O>kmpolbeݷ~G~?T_O^6 L,aW_F$~09!>s؄qM= c/>m =wPjMC=#X֭'U-#yT /ӆ_ˌ};.?C_\ u MtkSO5>rTHv+P}x@㰡6i7OԔ>GG,%!) w<'C㥖L{Q=jo4 Ih0Wq)0cyL a" 9}u@SQJ|0R41QHXOcg%a&rġSҒxⰶ-uן~fhadgDg/g%Q]/r?._3ӏUSh,bzu'{+N׫hr1OjąV EBIzYF|ICmEM:=&2u5u5O*JJ̓#gk0b iii# Mmתi9O_?>u(2_#yO#%AqJ[':cMϲ=/9f6I?gsLz&I?gsLʤ39?/ BOvƿ6sV1gvsv1g?rDE z:Mk)PUJ7kWL/i>k%i~rq4tre?/.aRKri*HHKK+(KJϕKy׿tQvr*bR3ha"_=ljizCzN ibbF%)m4apOla 졞?%.BBAB!ф'N!F(F8PPPPPPPP󷜿-krr?]}e@doՅZP=bȠC׼ Aʃ "SY1݌vP!i#P5rkv!+%@/voB7,=iNyA:,waM7j/Y^Y{P!.mJ{@dmcVUO}GfʥW?h/bӬ&x~{w>,;>$׎x嬊94Y1Y!(rcnmjoBŴG}jBYM(dd>Qz#Y!yV({pb4|Qr@pԄA~PyQ8b Nm d?C;rAZ^ʾ'+Sv1pqY>e?__ 6/Ȟ' y.HOtXDCeT(̷O<SWO4Y~TkGVHP*;YU{xVe#j>!|-!g)MB/O|?U,l v9Py;d9oO|5!?j2"&UP`W_r!T_`*NvTTS} rV޸%buYiݦ/q\>ӠbӬmzUs#||.(3+: ODY;sPcH%z7a֭.PNY#*V:Gk߾si؅,17h0?"f"'Y9K+*W}b ,/X4kw*Tiؒ5|<#tNdd|X2Jjki+q /x~Nm{(gJ-qdn>0 9Pp픣r^ZPiw@KweM,y*:掳 ]5[f(!+Uv*;Tvꌁ7̷Kz'ϧ;@MKMD$}YO}2hHcvǟj+tDV2 Q_Uvi/ȊGM,V91Gi5APmaO8h/ooe{AM'p#Adn͚~ E_5JC;OS.[/`"/ձ+=L:f!Cy')"vcPݞrՆ 7<\麹(J)<)wP8T3r%?^)r$ktS.G*'\9GX5wh˾DV *uSBͬgVxlFV8 Xd{5j_~eW. / 7/޻Wd%Z̒=StPxd5IP9P|r+EP],jðwC22ZmnPΘKȊ9Co 0HMdybnxao _`P 3) 5)CCgG`grwX*5xjY/+SBVz蘽 *Kd.ߧas'doBbG?7y߯ׄ/@LԋfP=JKO}yWndeQK~|ٶ8뛷&Tb&iްcPx?/\#fCM]s[Ʋ;?3d0#av֥V0YS/ꦛ9-]{7rT3} Oثqߖv`PqS8>EOOgZgUKBg5P)m̭5䊿q /hrP9:0gP?ٲ3r3j7uߗ$O`%=O{3]=GvD<>dz%Y/)qPeZr6#YϜ.=s`}P5h#qK RRS9+U[QUλ?o۝s?%4"ԛ̜z^3\1 f5HE^'z ɴJHK8_, 7O=ѣ(,2xQcZ$|7ZLz5f}F:;@v֋8_-;ꥐCsS6,1_U~*^n?̅B7f3 VKGhI|;k*1vOnE~o~RJXn( sV<|&X|Ї`]3r=ļi~whf)ŰTF)8u[qbo{3$5<٬80; ѱ(gʱŃ=7 FI@e($pw8Y}Ku@@HpI|`Usu`=>2DYmw`/o**oΖĩ6!pcYv a:4Yrq|`?qOc_|lr(^2\qοs\P l x2b~Y"7<'2avd: t׺Xr6v2i>ea[N[vt-Efk ZHFvqr+>!}?y} KwZCPXK;!nƀ#;YiHJ~gLuʧT8iZ]h 0l88MlBە>w*H-^\^ q3]tKFp t, " LrbGe4@2y:$V#g殈Hqv9]A䚳Ҽn -=i !2 #!}"QH[c2^}Ao/SR32>^ Q,*CB\T Hw[[?k*wn><:Z$?iopEkCQad+y]%]e,?Q^!u쇐2ɶd~ߩl4$Mx6igWtUr/6lVI6#5]sj_`EzV''S0 eae6 Ar|Bv1PlZcG+2ʿFz#'>}g6پ@buضLIzlt4+rY 8-8qH Ʒϼ>8g j=_O}# yl=vv;ݐ>^nl . ȋN-aϑ+l_oɹ{S>g~jG~P-<"|=ezo+_5FKgwrAܴBo4*\HL .?fP9?,ߣj\žHiOY~P /Q_!ŪK?Q*,]i{29?ue,ٯs9JdZ#=?4!)!jW$P(e7||w쐄GmC;+{}}oO7ye^Lno3=/?H4"t.wyorwy{- 3T9}S:e_wzȑo}O!eGzؕ;!\Lu[н>&C`hjB]Q෤`íWA&Y g}Y1<#=bɵ5c!|Nm9\m%1"JB8#jev_[8ӗ)vK3=8 ҮA "<1#GG?%x5]/J_ۭӗќ-9LVc5 }yHoJնpe;y辧ol/BS>7MY`:ӓ6i]k0_ YܱP -pzNJs]g*A9|tTPoQbdA^po]ȆҮ7y%"6"egFU"ӭ!Oe!l_ytE\s_D'ײ~`z<26=,>vȵvdjѥFٙ^v2l\2z 9Ѱ5کt|]?yqFc/6Ce|pzHMzGo`m-+@v6 #beڶ?h7fF.i/2b tf9Oĕn8%P4>t̻"4\qtlk4:e34{lҸީCr @6??\2iOĝaZY /Mwo:.ׁlnܬ *:&V 9ݱoדs1woNqm4lϿSr;A0ı.;v5ڑl[l}FlVdITTd{'~01ףV$7Bm$Bٺ{Ac?a{kĐ˴JJ!nr"=ro q$NadMB Z4% [?ZGxIƘmW-/;ކF{x7<%'hăom@®@Ɇ-3#$#2κvVCFώ3_{2ASTe!2iwp)do!#%*5swG&7UUO=,D9x⢹ARuuQ'3bW+dkk#$߷,ၮ(ΩrhvyqȤ:RO~Cf.lvYLiq83{je{߸tH:h3dx6 [i Vjv@mkLӺ0n12Um>N?p_8qiK/Mz?4zC;V,TVjC򫯭/+ _+PhEF7[!5ڀ dHݛo#%.#+r_CPQyn( 7/ . E5:P\$N\(^Sg $y܀$gcɑݟ /kL2{9dA3,Qw֌dM2}a a̴իT!ʂِ\rn2|H15}׼q dV‚A׆  }5t؅i,L)"3,d,AszJhGY7mKAjJL=dO;1 26-g ogn2)eVOJ0g+=PL8>(a'U'?\:?v*RR0e iHҎi* IF/=X>O¢&Z!Y_N{̊ }QdҀEwr5zh+4*5X.R#b%2%7V}`_0 $H It$8ģrp=T_8CƴmPkzt\d}}G{t[|adC]S>WfƽvDw6l|dӪĞCwXӡ>OPe>[ *BkKBа{!OzXf:8]E0m&镵n>D6iހl|zҤBd _ʺ.@ ^l]ĂWLw; _}TT}B.tfMHB*ݟP~e Zm|-p>VJgmK >߾Ou w^ҸiZHyfA?v]߮څ ty Shv/ V]XuAqO_ W9|[ l0UP|Yuy}2VkLGַlxh0Ȧ}D#~De1P~xX:UuB)׸(%ez8pzEd/A&M~ 7zX"m}l9zhd9ˁw"[3vΕC6׊. ZHm,VnShoZ^O8,"A7ȔiB)g/"t.Eo6S=4[{""1V $7Dj): b~D ͳeSo&W@\熮TaB#O ӭlVL߭mLцg2Qc edկ#SY ӎڼ 9R]^HIq'ib4!;R;vYǔ96 ˗ܩe(K_kYYuKR8WAw,&VAOR( %;$T`((?"WH}adfj2ռJ"WCG&~/LVo~R7JɒVWL6 hPxX22M%)u$_sg-Hue7Mfb97$}[},2[-hldq/+MȬEKE!~#4hZ5d.#3,NԎ>yZ!HKt(f.k%N>-c^ULz<ָH4'Gҹlaφ"s6/^dťT]Χ"SԌKA'Nj7VOIcw-83Ҽ^@z[$2n9cdo7Sg!dí[yOӗF.1am,)r- )nʖwGP?r@ŻClBvW VݻSl-һhEU١Hkψ:#P 2įps}Vutp2#5O۲ x@ƨ}݃t,&ݏt?a;gvsd٪.!+lD/N iOHF}>G1Y<Ïz Z>DfNy[q, L]OW*D[,|lvOsd3q2P,z,?|>R#ţl8LP=~(uUˌ_εȐo")dX_ 9dlS:dge; .Qp5wgnȔ.Cз>C b}ADQu.( :ĻB!Y?Z#w82nӒ0diIQCduL!Sd|ߺ"sޭo7ݸS`IO5@c{W8bAÀuD8 +xSf%\N8n< 2PH#F˥ Yw i&XvxX^&=(qHT ^ `_~T!'~abzh)=+]w#ùg=E?7oY!SOEtʗLF~Zx[tpUOI7ROlWC39R4E-i{iuH?KGF72Xd ǑZKDvz<<"!,+o൑۽ "V#^;95 jk;~c5`ŠSϐo>?g.9}{LdT/ ,P!2ˡtkӧzu/=$ ]L̻kLڕu$}2I!|۠ȄQ VL݈Ot}u<2.;37H6nQHJ/! _H.nV|*92REȔ= Jd 27I( .6G& 9dr%_ 98HHsaC4M\Rv=dOS4NA)<~2.LE%4Y\Af -99 Sb[wwGn.ۑLLOm<b]s[HxhK;>9oA&.){"yj$KRgiHLo^u kOgc~zahSl?ZFgR2Ѵ7H㫄裃t3"\5QkᖷD ;NӞxdY(1--UtɈ~+&Ry$DW)_ֳwgB& *s{ZL|dCV8ֻg!YV7Lg#Əx`Sh_ ;ε AfCH]]WST$BqS;)|?d[ d7>aq9r$·o7I" Z#YdrB&INn1?f >p鰅7־7qqsâ9޺qJjRd  sFW@bqİArdnQdىȌY!ip86 2fGAԀhvށyYm02)K ꋤaB^!N)AFZ|@dV !/Ȩe/##/0X'"^H xvX]Muxڎ*qi'dʗ*H֕4{cjIE4Ј C&6ZC֚ V X_9>% XTN zٍWvt㡼մ4ʑIoFN^7Av/H֌I{NCƭ~F>%[d^KNHVZg֯}?ɀA]u!1R|02GI-s2:udNżȄwfRY-?1ZvޅG&λ>)x2?ːP%rh0gQJ~\=W uy5/{O?"_{G8`5YSoq)יޟt?rs{F/r.84~_Ag޺*G9~ 1ƁP_v_^cCc. ?~/.3Uh}SR^y⸏g{-5UE vV?/ڹ|㮴#g>Z=]="ǗX{sS唳8i߮XLo{ÙOѹ{*wًln굝8]!q\ya_vڵ&߯/q+)M^< *띎[zF_kG،+8GJWz 12[]>ZO˙^= Ï#g*rNtF]η^5uAˑ-?n 匳 Q:zϣ_M5= ^$#rN ~r ?lVݦ׆Vepů*>wS C==v;S|"N Nkz4zMOZ?t`q>a3} 1}\ l]NϜg^O'^؞kũglܗ4kq>8%.w\$MAu '*qգ\ j9Xz;_~3n)kEt~7ڧZ [5sO߽V n];U 匁O@92oM``ܥ8X/{%w=󪍃 }OMWE&rAA~UcDԟ2?  *2aNѵQΆ=s3n@qN'$;C9lb[htN6=}]\||:4 uoQ9䷮_}Ar)),,sr3>Tf% Ԟ;.aT/j.z\mx*GF' :W\= ' \9VլlVÓZBT-4 Nq  l 3]cYA6Cq҂2qՋ3`OWC>Q.dFXGhyU!367 D܀l.]-L+mgIhv 9)W oyMy{-U28 Zo~<> ]u3/Cns}ԉ`˵I.%#C쀹Ǡ +ˎA˰GseяreWws `h_{ٯ9}m15d_ ? \.xo?7ѶX-! .ꋯ+wZʼnԮfpٷ.7p^@jZ}Ǣ< BPwnF.l nOKzw0no <ǮNf.IരKB5&p#]ЦGNRnZb-wv̀GbDB[rugK|\I{zq$gLg>7#b}RB#g1ǟe ?{x?|Z/'CC^Jnžv&=FwDwKs‰o^ DH?'5ϟպ z-\$tg%`k"OC}sCE/gq ;'O.aDb_ j H>~6>>3~[y 4m̸ot/q= ]wھ>Ϻ=H۝==GǑϻWg \H퉾#czL ۃ])  o[A8gcW~z!hP~^gT/PGE?jGAY<zD|jT_P:}?#c{RO}"Vc*'͟S?LmXs=s4q?GS&jU!'O2ԾiPBr'zGۓ/OO1OZNZnf4 ۉ^Oӡ&OMn~+#ϋ'[byL?J/bDi>=hvg&a?Ek?-7_W1ꗞEEҋ?Pi?DꇖK>EO.9{La/)CZnzԟE$z$~I("777ھ$ ިL=%~5C{X~CT~,Gjz~Oz QZ_4q駩SRy>@cjssi܀~G퐒K?~ǗU<^SXt[mZӑf-O%j831oyBZX$nq/5mo/%/}zwGoz'1zL%rbMDG Gkt3FP 'kݱ&mw1C}Fg=%Qn̟5ghH/CqE&)MOZ$A;/Qf\NtZ>g~q?_F)i?W)qKX.;~W_Kr|13c~..T.Z/'Ĭ7z%>h1̇'>81HG!3N7bϰbğLuƛy&{Gr_HOzً3ەiGCP/d΋1rtzc7|˜z$rH}Mc~q|S4#AHǓz#/No7N @kXįHχt_8AIF4Jz_4ߘ? 1d\CC50'rD}?끖2ytJ}&u$ 'wfF!Zh91DvcH<@|WiE1Gaqg*=X!uty8}T>rΗq|0|ɧ WLǷTމ<1?QۋC1si;R8C~*ӞyOs/ATKɘhxh~'iH: VD;A= 3߅+ZX?N?'|<8Ү0 8R//;C~JGꅶ;./O_!4?Ӹ: &~q73^8˘a3M y;Geίu< s~9ÌxY^'֫džcu및('3>03dΓ0Mexo0όyhZ>J|:0wz1yNqs}qcb}1Df\6C1.d;q*L}aQaȍO2-yrS Gq1C#)|;t\72獱 ;3쨷u&>-?,9OȔ[8/d<#c<[ud~h!i2J*',3AʓA}&)o.ʹ#<HzK$^L?::./A3gv!yPH!KYlH%HZwwid3f$WsH?Xz6k֝P.5hE&Lbi?J_,N,gi\~;TW-H30 g3#=A4/]I\Gg:GI ]_4c4Nq L,z"?GDP,TS"G,NDŽ$L"Kҟ?? Y2YDoh{x+'bt]G\:~-;ZB{h~?%c+O`7+oi{o,ǢIyIkk~p 񧴽踙'qcc_ oB qgu+b43zo'3_9$f7sFvBRz+"|\'̫PHܙG>+$)$"bE_)&qT1ߢ:b ~,mo(AI2J/?, 8YHGO;L!)W IAIY@<NL?ʐgw9A4~%c ٧H^rA2id2O&zEh='>C%ğ8#9ݳxOʛOJ?#A,"]4NF+'i:r1-zRd:㧚L&O='\AǕxH9|O+!~AC 'KXD_H,ҞoѸ77N%!~]Zo餜8%E3OK|<^ȸ(ݯL)7Dހnq>8zL=oc:?OZ/2~k8LG夤t3O$*7&ٳ_' ?f\Nc4.24_B}8~qywrD.{G} ݞۓ#~4>cĽ `\ŌIyp_ig,7Z~\G#vKc{ho/[q1">\~4tGkqc_"͏izFcjLBڇފ~ĥ>K<$$u%Y$N%oU>H|SM*TUM柪\U$έ ('b9w0c,!QJ/#[Fr+h.'U7ML2WOZꞝxA9Gʈ_*!Pwi\D[1R?DoK:i YB/d^R!DdޮYd=CJ'zNʓN1ĥt^ޏq 4&2-Wq iAI|O-鸆_}"H1n'_dTDqP*!zZBb1+ r0 I80xƝiyhEi=d82KrVdm“3Yo"zI3HLYń4NzUN_Njt[I+߬$zXNQA$RQN22M-Myn!Y)$u!;GL'D Q(q7c~)@(_~7Ɠ?t_OA%q3={u z?Oߛ 4n={ޟ=~??J杂ɼ}@欤ϞvB/Oi\MBڟVğ3Wzi}oxܸ-|1Aq ͏'2cގoc8A3"Ki}i4 gJ@t1/?K R^Zr1+c;ccgw@ۑ3} zcIj8p|σ?o_ =~;%D?hjy%R=Sz|OciVnii8K9~OyIw+wZ<2CjϵjFۑI޻}4CU}oDzs?%m(0נD )2͟)s^'z✗|<~wJx&'< sޘggTh܅$M:OI2EO䢤8ߴ/~,D|_Γ}&zW2@3(s}OP?ߍvDR=bC@풾|o] 2f͟^!=@?=-?JwSi~m?jL;fԿtz|?Jۉqr3~b~B셎hS?\W~ܟGY?11'b|G~vD;t|N*u!wz{wɘ)uЏR=ƺ0oِcGw]1Oy9?8gcQ?JD~GarQ{zF27bzPyiL=]=IZ/&>Q0'7>W>iyThAubJIh~?`Z =y:?F(tݏJ7/T>lO߄#{^}=N ɗNz@`O4~s,c4ߓ~["~[A4D8F;c*7`t]<O35b$矫Or0CFC#q}]jWTqU>{t_1]t?{~T^臭t7P̛ӸT?hS}~3(cR~~wە&~/J~yHAg{4O%'}$_IhT'Ej[ux&[; Qe~;/~ s#X_t"?>vjWr%xi~d_ s {74"qf,/o^2։{t_Q?J^=~/1/I>Oz7NH p?X_~P{*==`1,0PGq4O:+ vB~ (}n? ?zoC(ۣcGK_~ÿxq3}5ksYɒˏkcu& h&#ޥȟJ9q.kICmrw^ sW62I>2|4thHKHQHoIO֟k1nrX[XWkxam '~'v_g}=@?ѫ<#aC=c{TrCF{JJ0NIsJ*YΫ9j.UT?zԟOH>F>@_~=@(UunC4ÆZh˴Mϰ~F%tOgǏjL#|jdS~ko '5 Eu~#zsiHnxɏNwPZC0z;ldO(5?k`9jlҋ4aǿ2 *̿{܇<OQ #v;BMÆѿ$>QSXVB~Ish2_+=?_~? Z2GLI48&qL:f w/&VwEN;s895뀦D?ahbyQEÆRg!y_axT#X I?cZF~<;Ns2#]fk;yCyi$1}08Nu$@gY/L&29LzIτޟ29Lz Ws&=EAqw?c??c? ?~OnChQOٽ c-E v_IfreP hq E N##I(N(A(I(E(M(C(K([r59Nnظ@mAPta2ԿR{/j וֹ%9!Ț1F/fD _!`:Aߠ:kf@V1M!PLDPԟŎ'>l$t6dI;ڛZ@}aCӑM6PV½x#&}znP%NXWY:k]dղP~bhUXքlX}4kjގ ,G6g _/6eCġ}ay?n#躻!lwxYw{PΕӔ6bTSHn_J]% '>_tYiコ,R1ښ~B lXOItoFe\ŚÑ+h[4`fbHEx9^u/7&8(9QZ)q\){{ѪqȖJſ|_rTdKک"l?[-.dOGhMI{:ZT}]-o#NL߁l*(, K M<KV b@|6;oXdO_RKuW؈omck 6 ӝ]}͕/[-bTz@SS,\xAאm[- m*z*м9J/|\Q*`wdl1|{%g\ *P7U vD%rٛ"aJhdS.MBSؔjhΰjMrPIJ;Oc멼s _R*qqTa9|eY#<ߟX׬N![r%C]^\{ن"P\4pA!_M[ a?WS,eJcQ&qXPo5t !:Pob83˳zJ3|1> ,hBzIri?yߦCCH 6I 4fm?ڥG,jB\neؿ-1sZ8ҿZ7y_mVǮOHoɮ0WZj6uÑ:EUٹкr8+ %(I-RSHcҶAJrr/iҠIϧF]HX;+% ·V\wb=@y; k2yw~B**,&Jts^6vv+Q9қ'iZj@W|OcSeS+[?,xNˊ+qxx}Ś-㻓Gzs5ZH$]#i^-4/X5b'~}y,F:ߎ1NC \C o_Y-<BZw 0w15;>(EeCڹ N;V]Q*:Oiu>}k'_zn`NS32e5mEz ͵p}GOqH_ OA>TK^. ~V DKWIp~G̯JOD:jDGޛb Vo&אַc΁oVq0Lbٷ#F߅m|g B7[okRg?o+Ex}%t0'Y dPS1ɺaf-6F/;;Csa# ͬeX;(? AxSIzCfgI(T4{rR߰)|]1ιJOd._wMz\vٔWc*%s^'q'/[uXwj颮`]S7n*߁$% |U!{!]Ď@"komu6H<{bVg=2U\0 $0I/cy>_!{҅RT8$4kP m983®ywH{6˚0`Pqc72uNߐVyfAzG&C!̞sѸ7A[ׁf7ߩQ :!y xa@8ϯ3"C_!C{V+}J^"}>L7:vܪg6LͫTYCЦO%OV#>w  .}A7]ym?N5݃r7'ڎ,yqV, 3"j#3|YvJ2_oPR@&[8  y"2 Ek ]!ދ9o^Ct!Px#hgd4IWVя;˗GC74(ϴݷ>%;^@wLؐ5Vk"_{=K( %>cjŔ2O WGڢElU|KJGlZu]IXJ;%Ncϖ3Dxm#dnL7ո7!vHVճFփ~" uj8dl9!q&Hv3L2# :R=g93/?5{rVSE>\)$~u;a4$Z42uD mɳ}WXVB)[ ėE.Jv}X%tMM)HOVS[ "&qrW-$e:iH/M]=gyc4'15ve,~ Rz? o<۟a#.&iPfm"G@"Z:F'2 a͜U5޿-wyCp翘7NC9n0|Ȫqtՠ 9ғʬ[tOn8{˾wVoԊKCY:ҰYۿ)Y/7c%dS΍/Hadi܂1=4;y vhzne.A؈özݻ -i!|\lTUH >l{Wxtmnqqˁw!W9_lp{\eUe!ME QlA7qdJ Ohp6geQN;/B87Mܾ{:Sd䗱=c`<&+wڟH {4([đOˡY-tX(=!I7#k)EI_d#)s{JSqq?d}͏bK-LG6}ٲ}- [wX Wψ/M^B{dַo-hS<"4 ~)e"۹KY@,pm~E@Kx>_>%VCow,axӷ% ϕ:AC6N7dݐRghE6f_CuG+<>S+z nzW81s2urhx9JEE-ly5azdSQUFU#4- ޥv#lrucKT=fh3l+4.. (aS\iя '~л_,cݪFHo)!5c}E JHoIOn#ۧs/.@z߷H8y6W߲"ۡet 'v|X^7B>^o˚v?0D\S|fCӊ#'7-Sb?ҴwQ4TYӾ2}8s^Mg:%={Oۜ%u-?&:PdHkhι;ߐ= YA,ݱ}4TLC "д+?y !,[B]| |& l\4Eox$2m} RDp2v @tݥ;jk=2!İC"#󄆵v89#,Ꮖ 慂 qJ#l. e#O ,V>#*~s{uGFdSq"}wW;g CvYU-!2l}+R/!7dl x7 :EȐ8%מ_/KSA|1cdh?\ CǞ.өz/P;1h_mWy" 4 {?&Y-ʀM衯yk7ෛ{F>HKjz Y*rG!= H@ԥsu1Tqh5nG"dMR1#.o#:\ PgۉjuRV6YlyRh5t'#K4ؼr#]E9'mz3j(,>rzdl@=d*5ڃ_x B>Pjᄂ yO (]ђ潭mlh2I0`wͽ!˧C6nY9 OYd=YA~KI~>D6I9*c7N6%}WC}V_s_= vzNS=4<^lt}'ƮޛOF6}MvٸqKjfݰ '4N["=omȦbAצ++kSueAJ}ϊ9F3xV;}TcȎy@v93l\' (A^##;>5[PB׬}7v}vľP,_ |2 Zԇf Mc$ ?OS;lZ( |Sc>-4.D6;/{g&dbi֭oZbSjy܌.',7攖SQk\'' wˑŮ-Yiu=${r_nL^lo||kE®,ndz9/]{Ƕ>Gn{ڕ]*ٟU~s=lWp/%G6خ{L1.0inJf?wPo=ѸD( R BǞp\zCf"E^V.8nlj̏lܱQI3E9޿nJ_V.W#RLrG?cs= [~תx4s`[~{ȏVvWK޾NuruHgcه%EzxWwXZH̐~5Yw& 'sNy)-MH]' n: .3(Rʸ`⠔SY<:"ǩ.(-!Pby%2@t6ydpȩ.\`'kY=Bod7d؆#K4z9pD]T'_cc7ANAJrχW3"?!u)_2m֒]HVʅՐ[EdMh<[^s6'tit`M aeH\zV~x F A K_6nf ʐ~U:?>!}'tlQ| sYs"% UA+h#CdO`?' PTF+wJvByvGD.KHxb{bΡwPUW*k!mZ]Ց ^t"CbwW!C-L SJغx (F*aEWSANB]2Af=g9萆N4_t涑wa)dp. ă C}׽_3jL!kRU!uriêŧꭦ VJ @7 h/~H*B}| uO6EkYoL'deg#&l~=y.pLvy4(P[ >y%(O_:ń5rm!vȂfqBW1^BfȂ0+'&"K^9,0"h<\dq~ gg>EB@7?"Wok&.%ZL~Ȥᳵg^\2<깱^5/%3dAsE!ƤӆD Y13C<h:P]d":rr%=^鵿t72w6#`dO7#Df\Zm̶?.Bx=2gqZsȞqE1r/d+6kAX>1 u3VƊHǝJm^o j\Ga {k/;g=,:mڎa<`vODz 7,چ0yhHgwi[?!aZ|B IgJ"/Q;:RJ2ۅ7ej>A A L7¡d~ לth{OweښS#w+!CIuڟg%qA@!1a(2TDp0wx{ 7afu\ ]'Mewa/X Kg#"~{9[n 2dl(2!WvJd({2ȤD#=;_vS]RT/`γb=al)>H`mdF}Ȃͅ!O2=DZǷĺ"K^?j)d{ bA[1Ppv0չP&r7"-ۏҜ$M} Ğ.7qO!d|e6ȤG_G&LA&OZⱒKx--dQőYi{WA,V?MYo?7+HQɒLƼ]2zSk{5ޏHEN=I0m2o)wٱғ#\RҲY R'+~@\<,7UEBJJqe'!'Z) z|/Fz_1I4PzqUdQ!xrH+pY[qecGe!d\kN44YxЁn6l7mtY dʫ֌@ƦV6KB,WvA&.}qRs8dުd܉ڥ4x /SqYGH|čB︃ }CV *+EQ’},%_>c쫾M"mfϑlcU2@g }4U{¦YE,Xwi9ҧV#3QJ%_!zq4ɪWs!f5$!_Yyd ;72pqcuJm"#CMU_@~y#[鯺DdUNFzKouk aYp(pS~:pXX2,eI;Q L> T I FF} Qv6QS:s25A g >‚:[v+ m̜/wBȕGz3 41*HoѳLB^W1g#籒}d#2L^m3@C鹭|},һS@aHϠ,>i ߤ.Rq#(tBhiVܟcU& 'pkgpCG`Q)i@![ -X0Ά͏vx/ _EO.@zO]887z KJ૱~yY; s(.v5 |殹@D=l/-HÐYpauǴ8'Hz5WBeۧoY}*lV䙊3=12`vp)y2:z+\rB)D=A~s~slS6!$"JX 8#g 2倫d@@{>!&*. z}$H~`|&iXaO*@F:? 6!ȨNJu6"Cs͑@F=3-2D7#_i2v|TDdؔXW=&rrخ& ak]'_5>Y4$L5sonNFzqzHםU\BwDKt*Dz*Qt_Ho(Lsn[zs8>UeYTq>X}5Z^K0n2nn- R+WԕgOEz4p-DG:x#G]x^fyAve۩앟 Cz8(J|'@z :.O劑ٵc*W'geު}u6t!}KEcMZ}~EոoJG4/ʪ*t DE%s dp׍c }nqG>[m^ ]JKvVJvaԏ'JESnqO==fŽ 1=e4s Ȼvg>Kn,{븠yFy#)o:O,=}zO7ޫko<$\sry_]۳OJ_:CvԴOs<mqʿZǢ>?s2v{zw5{Oy|>A! !;{Op {{qf\Gn[)g/^!d-.W,XŇی X)#.bSoj{ׯ¯SF^ci5}Lo1n+T,9ډVDK,/5<=9{[c"?oЛqE{S[S\`ƚ^_O`9ryyrZ pw9SU5eĒ+zsBɎ;9Xzu_sЯ]{MwZϽt! 8dO9[[O8o.n_zg^,C X>B/>/yͺYNwY{R7{?eSBUN;*7/isۭCя=ЌU\~/xo[罨0Yk8ӟo98bM$yѼ7? ?N:8XfuMVqA:ko(Ej%ٗO\{I/ug'RιPGKəP˳kd)T,``AkLJ{8ioD$4&,xсYB *>G z:';ST gGd1g]9pGՓ7E9XҾHRzChwjI;{2yt"HQ7tzxz0yw7X?TOQ?yz7ry#}$K3O^GO*/OTOD^ЏtwXoTs\T޻?o?9b ԿS\5A7[/X9ߪ# !Kɘr3%I墤Jw䘒sN&iSD)3_HyvlzLc??? O*gwD߱~{z@zdI*gO"?_ǡ>Zom?;0!׿i阰^h:C/H~ycFOYOLOzLSߘ(T>Nz7j_xڛ˔/VGxT9bȷXN*/}>G[z+oS=Im޴>hk*%)ii]1hDsIj:uZ|sssSE \7wڄB y:Qv}y?;pa%ۦ  y\ytrLyC9J lt r}T^D H{sP@gj^(`;YAk sXUMX `\(( *vw؊Ԧx78@>nQIq6eُFq4zI8\PI~b"|"IҀ$ZH7 *ȹ-C"[f4|xc,?O$ō0^}N|ϹQ~')z\"h$30gÑǹyDPB6я|2:;k rC/%-G =͍hOZ~=ی;ZI@Kx-Ώ#7AhE6[x6qY:"$xK(wn_ \\\7 U|Ks, -9v爥H쏊&t|~FI}4'S?zOG˅Ҩ]QS~.zM?+Q4/.DS]Po-ުc?u4,;b' -ڙLt5ɿEI"Wd%I%W-'Iu79\tikH˟x=OϝD$G_h?|`Ø$uBNf8riKm0Ƕi?B?Yz>$_Kq,$}`;t=ҮI:GSNv4_П~"vYKGH?$~6ƻ}TGR[ag*-7 bhMuʉ%Sȗ>?~K ?Yt/sЖӺZ_B$$-cH O-ᱮ|'hB|Yt#i>O@i?5_>Fk?Eu*E*?lZO4_FG*~ǡ8Z14x/I?LѯҼCI\I%%ZH|3*:AJ*G^~pxu(~PBˁS-Y \{n擒v) 7 V˳Wf/BB=~o)_$G-M gqD!;S)~e I/OˣO|[-f|ytcg>Q)q[!e6Յw|Tހ?ݲheQb@Kh=AE"Z?ETiHZ?%o-MJiSz]5n) GۣPn)AB/~ ⻘\@㣀Y>g.~Z@+]Hw |w_՟b:.q:E빘/@ zQB F{7ufL#%[DyQ/xGp ʧNji|)"Dmr6p^ܜLq_i 7mw:/z viUCPx˧O"t_H KWL>NhBڏBEt_۠s4Α4Ϧv>fTէlԟ8> 4i?evKGi\HW/'ڗOC}H1_Lf,g.3ڝO=>??YTiO)9@(Eu+3ϊcA>R28)ꗖ[BBE-y+A B?BVDۡW!7~t<P=(":N|Ji~_J; Ow+WJǓԆp1!ρMzz!1W <?Ki{q])l~/@(ut\JȥO(:\@ 4^Qu_.y4~ h߂ޔ|ډM+<FXPPqyg9ӎtgנ3TWz ?|n ՗6Rێ[gx>lC9Mg q gk|4f@dg$[ٺ.Χlq|`/u_WaQhh_S #n4/ɥ|}{ ,R?|%t'B^M{ hӼ4/qs.>:}>ܟQ4_8σt|_?@YqE1< 6[ț6ߍ]J[VGp}=~,EOwlmޗ֏7q>γ| Wh>0MR7f&;gxʧawq76w>%);>8#[~whx[+wGGYy*Kc<Lviom/y!^yWnx Ƒ!!0674^zO'[d}<D?ܮR~Oi\僟iNWȫDyIV^ʓxU^?~PO~N迬Tv:_ 6q8{ ooxU$;15/';{t={^޷6H+ws/8屷}cw p?{/sz7w_ tߛg~d14[ ﱂ}=wz^>,mG%xp=_z{rºv:9 >{{| d~ze%=QxOޯBN])NöCmUcc@yGߠ_xg ?CP\ZzqBz,Bm5a=WCzP>_N? MbK C۟9v]::%.E~ ;FՀނ~/ڣ-uJWډ]~(y?P}zx`9<~Rda7~]Chg{n 5Gy'z^\Wg??WX=`cCcHgmh\%c:~סB\ץ ySm켄訸[Ά^ (^P?+؋vC޳kY~~ ~JכA]e:g:Ov? (Wjw?l=b6{XonaQP)q`m}'p= {77l݅z^vܳ@:~_~7X 'l~Xwq7u]. avc3lc}zd~ uƥx> (;/aƓu棿Q2!b^@^߰<: zQ_7YYh4ldoXz+R[: ((mwx~Ǚl{q2NkAȟ[0ϣ~  bsQ xqv?e'Q.BCAXA'N]n+'ΐ,֕|vᗆ(S? 8/l]q`#k/;^?φ5eg,0ּ>==aYWΧ@v= fϧ`'\:7ּ|_֛I:ȣX:q';`@뷂<sGCddyDžs_QkVȳ&m?(j'C}=<qcO'Ni0ym$Q艹p vCBc\WuyfIy(k=Txwv_~&T'!n0q7uxEC4^AW1~B}Bތ"T15υ^yf/A;oQv{x w<5?laQi% /gyteܧuq8- q~uhe͛'|qL<~ЖR<GYqe b0Ef Y8{ ga9غa~ O`<-G=Im'\wQ;nn[GYv%;6(iO;?ßy)v+jt s)ac^UEiվ}W]wjվ}Wvc2"6m6-2W`l-abhn N޶فUS-v-ft-XT3ܭm!@g3 7IMY ˶hEas_w0c] ㌟.137l:lK똴_wH6cmuyΓl݃go?Y6n\HzXnmz*ʬ]j{(޳4y=k YKRu55uC 7FFMᰉ}nSpi8?@ jv'}ŽAt,t7sgȶh8 |G~觽22ZZz1ٶ; ~l똵PanFlѵf?㏂¬O{eCeԓ̡_ SB?a;؁?~Dl/+ẋ)6S Cc#H7wm44߾C GCuh416Շ|ۣ5~VUV[semi/_M"g&Fo;uT٩cvB^XLϑ⺻oHEw)jﯷfZf[X765n6w',Tk)TG{eCeF`6&lbdM5QI2"^f{PĆy/?`h/} 5BlƿolPMiq?܏$ߏo7-EK~:s?OG22 9[kapl21 HjOq2Oױ{T J@RĈp'RO?ÈO`]kRn0oDKyP O[hlqs >j9×pzg@Rwni!)Uz_ꢍ_F0&2"sHHJoO_fHk Ew3l}K_!j#kcw'gZ!E _I׽"@R'`I~,q=ʫ6nwׅݼq. !|9gؼ%皣B{WrP )x5FjeKF0Ս ֩d@Ro!ޗ|86f-÷Y V=z%5W50B-NZFWwlXO:}uC|yC {zQ+dխPE+fhkms?=\:k]ȂOUD Ur\wnu֤`fm~HRtd!֒JC%IYhhR~킐|RIV`^h0#}L ^켉;Fo,&Rc\s&X M :Yf "edB׾+ g>)Hd:t=XXp9)_&9CR6i{[Z S.BHe[vcdv~oWFjef1BKC<"FHG_8E=_5#>PZT4/9}i켠#ZO?-㔙23RħJ0Jo:&s䆧!S}„j&f:jzfy̏kKMfvY=Zp*O` fq`=?7trDNڌԦ)oĞg]0 Ӥ8V}_-X`7Ϛ?}*Fj@t-iCIcKQSySu_ HImJ@\M*U"iW6T#f0Sty7GgcS=pHU(R~ǣ=0ˮutjed{|)搸|]Ft=<G=XF8u$RþIC,t|e%fHUu~nV'#CUf"Z>9Z)-.K^-#e@[F H{ ,G_}*vl5>zWKan W!0-Y|$ߺ R`Eu[ ϺJENǻvvN`"NdTs\ˈuq ODm:34×P꽆VI4W$ul_%9TY2_ SO#Wۛ4OU׏6j2nFE@:Fnqle]?Ȧ)lt޿xi kٯ׿{}͟I:-Sy_ %X2)mZA[c[v|ψ59n6DKZ/9# )u]ac8յ>#r ψoNFz`\&l҉dZyO{i;׭4)=o5nx]<'j7օ!ꆃr+}XַW23Az+#s\]"2{|r߶SDMc ]4^pp]2XG lr|n^y#x؆*n&v/w'p!3o xw8(y(ӡQ:qEqGKGNKU!=yjCn?·{gom>Tg& ReG~z"@o}Lrv{Gsnn0dr^o2]"lx[_ON0f麗pUfzmu^8#9R|{~݅ٿmije!i&FP?Q /[!"ܼmMS˞v0%W9_Xaާb1׬g;bUUѫ9-튪/#w!I%vhog Z)F3ȫsjukbDsmr!vQ5S9tD7[%_KL9d䳰7T{:vN48=VY[vwFa~f¢CfϽ',@;v5!o 8WH:>-KOƌUٛ qHӫ8$sOAS2x;3>E'7%Ή!SWt s:cRs'Rh^ ;ފ+2;k !mS8o|trTfr/nOLy5} srV7jG3]'e{8"Þ6yHe:N?ncN°z5{MSEڕAFNY>ȌIzt7cS{/ ^I'~n&;֓郷?>}/[=pNɞwXdLu*HÓI|vUaGȌ'a}]T NB 񽲛4VV|kBjΌy,tC2)xuoe!˅FIFmsL^xu('G?T Y[?n)'lqR{gvgϚm'=, <]cR:F$c2yg3.ޚ$Πk2_c7]1T|TdRiWJDmU"_"u!Oi O'L.LffcGZ)I/y鴷}S8Ck5Xb4Ŝr9 {V)am*3yn"Esx"X5պĖxe9yICxT{bьV. ENMO`H 7&!kFt/{£Yh1=^o~Dėc&#ȵM3Â@޼ars'W'Gpؘ0$M+-K<#D%ގBk֐O}s]ԧ{BNƞJd&#5&?#kC6I^^Aǩ^$i7F"VԺi&!ƛ+NN[@*^.Iox?j$=,QhUI3Y1W">5eV.dVY(a]V fQHԳ4IqigW3[/Y9qY[Zt' &v˼@j3nsҗ"sf*9^U!& 瑲'B {#3w+ XXD2(d%zLEs }Mj?yW\|R)Q+U( %od4mM)?1¶G.I8sM?Nt9`{ FhUօNȂS;@ 87L}&R4FY278ӧ/˸N]ą쿪ȓxa%fܹcn8ivwlO;W6w]=>9-G#IlȻ=Ґ<{2DXdmt7ҥN@2"Gexm;[c#:?iDkd@ M?>>ǩv\!Bמ},&V8ݘZ;{wO; 'B "H&d鈸jO}ħ!?8 Ymv2]2YVNBٯ|,-Lh oRdrd)yX5N0QTi$% 8Z#SҴ/rU<\|Ȼb٪ >#4Vg_6YbfȲB2de[=FV@ )YbDR]~tE6ל;;+#+^XN*[+O,&V\Y%poޗqr7nȪ׎U ˓-^ZQ8/2ǚ\(Rn$zk):)+ޑش?"ў6")YFD~2Y!/(D6U=]1hjq _iݴ{=O^i՞L\O˽0TH} 9c'RP2RqJ=d%΃JbꙒl7"+{uùY=)z/#Rs T{[P0T+/|L*|`J/}qɾ S+Ů"k>ȍ:x|59.oYBjYg09Ț+"ͮylL4)Zs=VRsRpR]y+'H5Iε eRquS2>PRnv"ŎɤéslUSG~3g`J CT) o+CV(p=t V&W=D:~O"ҽ++ N$-B;O\Vm8],Xx l@~ ƍg Kׁӈ[M{[2͑bR6V"9릳-H|u!Hތ$s*, oK<}_ygY?6p0,ch BPs%>{⥘4Cz+ՓVg^z>;g!}?9t_){> Hځog!}u_]y]!=JV=@i}޷s'_n˟Z}yF}ykY>w*ktz+ =&:~!&UNԜGzˢ$?eoYCr^qW>|ƞz1.sGO|IZb9P3UvɚE/S~'1x5D|rHg7'!< #7ɖ(uxz:ؘ|%cGOJ<>rk\>xq Uux\y0Ho9+o"^B}/Ά}1H,G$~2>ZHzQ| KYfCV0h])5 Y6!}i,dT2+IBc#+tK~!e_嗯'e,'Gމ:] uꍡǦZAWO +,oyΡU\ge_T#'}<(O*8Y^@=ew41 2&5e/쾔iOYGLºL&%me{7Y1bшd| dș9RȊ'WV"6ܷ YaHyȊr7}Ot^r*eͯ]KJ7y։ԽJaz!)N{ݯ+GxcBH V3'r(2z2jǁ2Nԩّ)IY8H$B!WaONV)(dlǸARnWD}9Y[і''㐎n󛾹0L񔸜eץ7ov;5>c7ݡV&2QĽȳHB%CnfJ.B +ZtHI/Bz]Yro( Xv?pm{9MF3ISe)O!V";/riNBW| Gz=3+9w@l~D Veѝ~8 oM8X.a;33aú$͝Bc&#*]lG,ԤMQ{}OkL9 @q2*g[{i]N_hS6+D}> (f47$1&7l~ei_8G-Jpc=ky3JަH̠n .:Rǚn=.Ce@kx`DJ5+zlC,w=x*6ݚ@<3u=v-YX'o:{+u%=f٩5H%ou'>$vVn^iι='*]򭙶Qow-T9V/"? g~5Ǘ`);:>]w qԜ7q|dP?3QM뤷,2CّKHW"DLc{!]Osb &!.n&nK$HuXM;9gf"AgM_4!Sz؟x3'8dƼ״yŠ!s%6}#3o|,6o]2gu$#|IzҾVZ$vH}5/b¹u!jGdg !^pBs1Hbg##eff][S3ی'W ]شϘ#{V%VYF:v#O=D~8ɇ*'E!A'ض.沊M\<=YH\9K}qW,; IHk|l,|i17t+ħzV4ʎKoF>=2IS[+Nm$'ݵa[C+~اn;{eͭHCuD=YkRWbgbCS2ڠC]%FP5s{9^-3(η]i7kT7׽4Hq-Aވ=杉 ۧ1,ez 6vgŊ.nO\]| ls@OŸcOL=J7җOf3TNpKҵI ӑ&w!F?i2rԧ["M0uqRiysq\7X&>昛5e/ 52ˠ/Q lؾ#sj}K 3rwn,})2g٨[^$#wKIzT$z/3HdP<}2GzR~1/{o'dD|7L@wsn>#Vu(s]C*()Qk_$"g/ޅ6_`Kw>wYqj-fa+MIZ s2afmxw/܈x>HSM:o qjͅhkYsZy}wwI>v+U)N.̖nB:?ݾmk]յnÑ!]er1DǓJ\g_x=g;,m `sia0Fa+N[t}ѠZ#UHe޸t+6y!)w*!ѯqevRR"];}p[T3΅a5sꇄqj~FvGueݸ"{ҍoΰӦ[̩^뉻FYcF-$&L5$kңҾ&+Zɗ~ulH %/EW;0e#K}磖#?2T5"=¦٭@|׈ &z3|$yO44p+oˬgfBz|)xm}?qХ+H1 ut,lB"ݤ܈zp{_~ݞ,_#Ъ72RI! kK吶3iVU&m/93eh׏3}C6-w[,torܺϚwN_5}/ >_|uxo> Vm>*Ukx-?'=_!_,sĶ&u_WǍ޿jz[K|>^;b}.Tǧh P?k+ui;K.Gխ%Kv[?gw:Fm~j6?]sbvǭg\yûqoEctڿsݘ6p(7m;V_'*sȝ tصg<_;wĦۮoyG3x;/pOѣWMm?jNmNGoȵyw?3iwdCD;ݮN[]_j'=.:m i;߿[o>c0fXN*6td({a+N`eᯢjQf/{+ÞT]f-K+B#Lw/S:,gݚ,HKdIFMsHɐ'D>?'Io?s "yx"//Z=>7MTF%}L\_êq7/ǯZ*!g$rN$oʞt٤rgFR1}ɫq"hgU / NHkn}:k]*$Ern7Fm&E΃3xXy.>fdVAϻFJ#:\vVW6=fg<-=݊]1'k=N/ՙޞ mZNr`X%~v(>:\[.F}cg3^oUbUđsFbW[Yr .{s+Ȼwf.!6΍,70K^+T{_\Hh}ŗ 煅:wqL~8ʷpr.\|={ Y|*i>7orz<P_P'$gKޒUH<.@>'D9t yrxygGZi@ڍLGk=RsqY3]-`"AbvyNo(.q?}F_?}ޗ[*vOy^5#w zb !ewQh'<-aa Q'ީ}F]mZ.<'}.{OḗҼW;;`C=:%I/~vplLۿcv=W+Zl>PO _le_l{8cp+ %U_/w$%W-yA1G[X~: ?>p5+l]co.R}K7;dp_ ׁN6?p|Wwđ@7[iy}߂)-=]F;!?8ƃwݩX_,}~ο@PF;i}:RFyo;W y _a؉:b ^zni${t$qkZnDi}$4׶-@;-cH"Dn# $$HlyuI$IljIԾĖ" ~si\FJRo ;м/%ch $I^K$-D%I*mjZZ^@R[_RKiP?$㊐$~ ţ;[-5.jy'^~RIJWԹH"pn!"iD4~^F©v7K|u'M_YW_`kS ~BIHdaćƽ;og=|zӥ9'_5_Vxz|qD/n`Ø$ΚcgqH,{ImW -iF iq$wXoy$3h%mq $_{u#~ͥD;mӫv(_4|}v2LbZ*‡zygLm#1Ebs|*mqtosO}D"hs57B M8O4jg }X:nmqIG,؟FTz~ }N/]tK'Tcsq^ ?#[XD{b]cꖭ^iR}`56v7 ~F?AZN\ב8Q?GG#Fնiq~$x+s?S}BsGo;Zu#+PZ`G5;R{_ DzN'~:erSG&<σTRhxJnڰVAoyų妔:qۜ@3f Ec>2oI!w؎H/ۛFhJu7{ _ Xw|G?~ONB hqI$I`Q/y;)4^ea#l{5gχx=xW< h>X[$W@ЭaIxwPqUѥ7JQ#h~qv}H?~u[>4o+,~^Db/, 728'TdR]ͦq[Lux،ϢH>N#,clo|3r.m_3ZTshKͥys.'(ˣ]@7r-K:'_:Տusu g^3~7wA2?Q}+~U@'yb@z]>\OQϥS4/Cy` mB|l<_mB;^G&5h^[wV#-J[ҽT'2iÙԟiO0^if@=QȤy8cyt>kGxΡɥ媷ڧ3~AgBqK[k_`(Y:id(@x3,duu_i^MCgP=z}(1ANrm|oӸ.zqQ@?G(vv* <΋@sA?UDu?| 6۟IƼu_69ȵ&E>< 1Q.C3~@|RZ若_.RWM6XB'޸S!xsOU}vRx>Nmggot i38_J8g\{|! /_Jhzqq z;@AGѴ[NkVտgxsE/0[ӥn{Lt/_sZЪ0mm/xnt tdvCqSk]h><-r*)3h0:> !E4~99t-Y6w8 ]H=rv) > 'R]s@_/ C}nc?E'K鸪γCQH깈jr?+̳`ި^& vȣ_'8΋b90w-[e ~OmJ֪g߲r{K$%tOyt`ჾsu"nk|9sC<5i{m,乎w|bȃKUw*JJJazDiXD dO[o *TOnm3G~_r:V~@؏N>bmaT8b1gfv~׿y6Gӑ(_%̋y_jyڗ՜RV]q bx@?a| a' ǩ?[0fClz*s/m_z'ہGGS+>fЍ?Iۚ'7c$䕐o#CwϠ\ h Qg?mo(o<;㻿I>Qx/ ^(E6{t>| ߷r`x ck:Lz\2n6kz}6x_E}=7t==dձ~/ {+9=O=p|ώ3xd_ہ?º >^=jGWp? x%HXW֓nLkExG;az?ޣe? ~wdg/6sOe~?޳Š3XÁ~ º]ߟY|7h?Ԩ#}_/ޯ8:%wOЮ3=^=q ZW·r^AG`89Bq;@e.^Qi䡐úYu0z5k}%Q엚W⟽3 Ov qA~(x:%KO z:Jc'`B?`7G0^|1{v귱 Mx.\׫@PϠ7v=rCGP7z'z :HX7l~~>Qvֳy [Gi:JP﨣4/<c3{]$WY![o:nw>A?$x"p5':n롳dB 9J̟X.{]avWQe3 {\f~A0^Xc<:JU/8KGBgygXǛ㺭yc+Q\gkKQz_m׳C{@C |/q=kyv>Eb(x[3:66ud_ۚvQ0?aCy;C ]`(Kk{yt`'̣=5;gӶ6=QV?t<'y;]wBigP 㐷wٿCb167mO~ڥKivKjt s)ac^UEeJ]wjվ}W]w&Qi;Lt- m`kCtKp?=lTn.0ۦknaҧnm :mIjʪ`l_E,Zrgtfce[X_ǤeeCm~6ud<;1ߴPPDwwƦF;׳rm֫WQfR|(޳F{&Ycxς]p;pٯj?o1 74 5 o MM0sG7K4!555_hP?-% cLX=CE;$f'?#D?eBԫA ͶmTcXǬu2ws6Dd7{`_f}l/*,hd\XϝB Tx#^f{\ă(d>L ZAټk:f8lgBC󦁎>\狆Un9}ప +nÇO{eeh9S70xSީmN3ص 22p`}ݵx@u.K@WKuxM-0Ӳ03Bg!w;e_K٠:l/*05ac%kM؆g2%6D 6p΋1C{K"f6|glbO˨~$~|o)Z әo2%(`onacuO:f͕1R:ʆZ*ck.-5aܹt# 122%NӦMղm[Zv=?ZߺhĔ!}[|Řm 77;=+OWձm;~fMZmWoc?3y֟4D76he'Ni91 wIF۸ϡc\ZʪjjGkjjQjٲFT_](څOB!2OAe"d:587fZ .o٥ѺC%Jexs(NF5O֯]XRR ewޔ))QTF9RRRRRrngvGu+;OpЂ3v,vX"Yu3+-'%# J1m;ɭ#YɽFڙ;rݡ,M*VJoPvd{ϐ&Hoeڴ~9E?* 'Y+tWY-F os֓y2. RgEWcIİDRhRyZ3 s^aPnl $M|H=–.c$.Ĉ=/(HuzK~?mWfν:$!w*qZ1ec4;}ۑK18}5ļ|L;zȾk"¥̒E䜘: ]G$WA<7| 3;[,m-^]?n|c#w#{z+vaLo%H_ڐ~)Õi= ?# N۩H5]_l ՇrCAo:j(#SK&53vגF*"̐aF=1',ߓx RF~ORUe32.ίZW9x!#$d9L{#+;[?*#-Y1#gϷK(O$Pfx>b>K,#>\Zt3iDsiףH_xuᛞ|aDYu H39|7 YJg/* desmACcMCS#wA;psD7h?3hڰڌ¥[~gJg֟ͪSB f<{'}0O!af|`g#)hlA$GdwlwkdvGQ W?lAfL=Z62:~w<1$R4J)`%8DR$Mto{26e[dhӺ݃5'#e+ߍU2}z$sn|zdyN0_\*{l$[siª$w;)nIao cqDIn\E'tCؾIyw] .o&Rf׈ 5''ngBvr|VFaL̚4.;d\ZXA>~ʌK]=3U֝˙vj߾~cw}N?+~9a2M VnݕiPm--u,xhbQљM{*/˭q{nf426u>X}qqQ)LSLC)ƳL:iշ]iɊ=iû׿ÃIu"uqn{05]te?5@v#~oFb.#ξczWݱ/R2O[vi0"[Ȉ_ Maz\:ef#^@;? 4䧈==sDl}̌ !r˄Bf൚,rA;쯢+ԊFi7˳cD T.rt3 lggȲt)w>3M[2ڹm{=fHΪ6cQ$3HKZ{їBN,bv35)ALBa6ӣ|_t_H1jrN1¯Mgas]5eymDMӈKg>Ly`;Yuv+1Ce z}^{9'2A{`jQ =uy9Klݮz Q5H >2g3_s~5s{H3FNz/~s-3xK u(fR:&)hlz=jby2~l󠑗W*or[۾i<NjkճY'ulˎߑ9Ⱦ &6+}8-kz:R6aJ4ˏ*]#o 9^$&?lRnP;2˺&+m, )1ys\nЌ3ks1]~o5x^sbɆLyNF qč܈w#D;{e?} "O1 _51|IՆ?Q=m#숱iq΅7oṮXzfm)q6LtOt' `:t*6D䮇=ɰy% +E̪C skVVFcJCy♑x=VvytگId#mfdmI[M#"; !fW:#oH\R\;Wqoj} cwҙY۶K#>tܝXg[9lڒqoKEcKrCkn#Vґ{xo b=6qէRO4֘ț&ܼ\DE70q0mHd6 ;pnU=K\ra əJN+->JdKdgY]=qqب`'n:g#pf`221j;ցwHΚ~$6:D*}a܏T4sƾ'̰Q2UJ$V_HY_^Z}S1qn˙.ӥBϯC;k>),gWgR홍;^_):D651f.ZrT`9LfFGI2iR5w>3gY 7Z3e(:ʬ>q4 fvΡ,CK-0iדEkmȼ. Gl=W|ڀ]Ȣ#=;]LϟTOj@jU}/#|zIfiPoztb;+vj.fxc0&Q3WW(`5=RPYE/sEkX )cj#x -c$陥OReTȗᏽuc3:of4m8kG9nD-[y>3X/aN0bO#q@RUƕ|O2١mt{W )7=o/)3;m1?wn N[B") GRXǤ}uv$iwLxty$71VCD[s%ojb+[5z6miE u3j^yE])?R[ss&"t-|CLde{4`c_CL:iݚojs"/Ж vA'܃ KmZ097[b_]IdUsIL#!_= LJ@E^fqJƸ`s?Re1`R?޾:qF1 k>Oa6d :vFONX2o ^r# %WF|T<8{! 8m;3v2<^1#Dn5f!/Z}p#9פa<30Lmf儀Ռ)Hz,UmRGCמ#ڌV*#HzNJ@]_zU=9ɥ@RDуd i  ʀZH_'S3£Tg:~r?qcih)+Y"y.=T!8bP{;R݆wa:!0#Gmuنcmt㛳wz!Looivo/:=^d 4ҪX )קuN=q >K?H˻9{E ۲w$v(K#<}kǚNs]\e<ߊJ[ѻ-|rV0mggߵ1ۡˏ{os\Ō on}orsx{bgL&m;/,Xw1?x.geD2 sQR΁k0Ozu'wfIm] ّ* O}4=ڮǤ." /zs/I5A=K{\_-}cȅsv]61Sy[]%KH N>!f:2ߎ7KNO`Ls\fKAFlwY5Ҁ[ŕ4~jR "n,u|#dI@K8Hإji$#ϙ$?Yr0PR.QzD;uYlxbԙȊ:FEh}GVYx#{^)R0{s[IdCQ9瓯o Ҟ ecfTi"8"Ygg;=n^#\nVVV&eL,R Uô6t9f\6h@Dd]|n d%1YgH33rC2b HY9hhǤaӗOOPB6<_ճvoD&H4xUK@Fҭ˓̐ClL}gYBeo+! +ܛ"%IK#G@VdYw<띳tU%|{IU7Xf_CV+/oYcp뷯"ߗxنT%ORuu"R>i9Rn28y*nmR;i”uȚW:|kDV3?zǣCk H닲3dBU\^dux\f! 24n\?RR(TE_۰ YD{)뎄jdU3G߰J Y2H}qe0үW/zx S$@Kx1Qmo@v}Q ec@B0.^?9IrιI ݿ>K{l8kfkS?t$^3O"3x: 8f?񘟻A=RރEH!Ae*ĉ7|' gpA%1d2{"Z6w$޾Ǹfm&s ]UqDO#CqK!Kv"7#3N!KS\e>I0ª(%di8埢%ٟ'EݶsF'RfWC7vaGDnmQd:*Sun!eEzs&!]vj!d;R0Wq zՉUj'6Hv }OrSsIȐUeT+&g/;4Ho7Kd؍l(rȹ~Qdz,Vn4׼q, izz>ܾK)0YM ͅ&ř.&cwiW|!F{6v&e5;"7v35-dY ̐Uat5 qX@'j89Z%YzG2|yg '3}53**V2TݩS[B*}!3T'N哛v*fH i|o5r_fJEVt 5`u 5pW>wLGmUېa\d}3)Wrp'ە|-<x=*kHRi QRkB:R|fG>Ѓb{#sG!˟dȽ!+L=Yhɓ!"~j /Fc\bsaRҥK1RjՀ/q>ĸ_*wk臬*n*P(;8RB5.jflBUkpY0G`dUÀ+y-6%dHw$߭Cu48띔GV jh~&(7Ϻ{xz?HR;G^?`ㅯK/ނt^| ~OIicea#N.L ye{]<+J)LM |69['Ȫ_??sUpa҉ s gu{Br=N}+tS+[_!'9C/:Mg21'~ 'igEB_b4 (Bz'5p^Mq;R:ژM #'d;!c3?L5 (B l(8.h&d|Q͗9$a!$ba~w<% gE?WWAςېCD ܔO!=wZ.>`iīGƊs@H~MHsoAz2sx,2' !O;hpK^WXyx[8$̑qwsk|)gy2 AL#H߻U @ϮZJ΋/T__N|}mFEX-mwI3~NCG~%~[_kYJ'#sE0a9!Ȉ}rrS$,Ssmbu;{mZߕ|xI8 [M IhAwы'{EBd+~dй$h}2e{v@F<ͺ_mKjEjkA I8,AboO}kE IRo#HTS+ m+ˎJ\ntîa@4TD@3 c:xUl'cJn;@0$Gd'_ {q^I:΋nIHjI~K^9o iW3ca  4>ZH^ 8[%v -MʧBPsNsed-vѫ"v; {D42Ή:h+' ܦ`y'^m}Q# y6~|Hwsoe9ne݃W~y|5|LNdХ1f瑻^}0Ӊ{+:rUmID0L6'Hxǘ$$_x9oQr- 0d})̦@9M6# V3eo$:xbx \۾Z]/ۆא5'h$$r^=얤ybr+cCGվG:/|D Qm j:9,  |"|?2|:Td1ޙY =qz-WdY&dzSE$w.7ݺ$kӬyEy\ q@WD^sɵ_j_!_pF>.م\H2"ͶHkz}̑". vI]` +;_Tl> v9j;& V&yƎ$2'EFhjc =^,2)Ʋ{E,H1ĬHFT9HD1'YL3 3H*$ÒQT[Vg^sgLTWWU3GYWts3ُk#fXdܼ;1޷}YC_QO;csIT~(I,tXsqˬr8㈺$1nW.Xy$I.$) ѹ{D q5*Fo ݌)[YCb{vl$nuN~C&v=%i߳|j\/N2Gܑn$/wm++;ިɧdVs"b\Udσ$.=aԑԁ]SV^+ѣvL@96 mıx#aeqKֽnMɾ"yS4p2zE z)䟘\W~\DwOMܷp˂Ϳ#fP"PoPѤraJ5Mn\xy~(po;aO< sĐ!"84uYON1b4ȦEﺉ JVan z }11{}HcT=3`UlYp?P|#A e8x$sPb0#H`ҾՎսێo~'w1>iaQҶ2[5L}HХ+e`1IpA@j)bx_^C1e xl6Ĩ Zw=-:aHMxkDyH|G"sq ◬F+A ;.Ƭ=פ$$/~}vq"cr>)]n}M؎vyV+Nq NVO0A>TmkJ|؊58]#_YK }^} ?du`Ġ!{D"3s|'ب#>{In@^{KjhWwD_?;f^ L&^=O;iW@W?=I.| he;@p% 6I: =wFoD x%S;{˪,)賠¨Snފ >! V_s| .9cg$HcEʷFtNʪLoN|q/;Ip$A V xԵħG}N(o݀6-d=}}.3\N~ w4mM\$`LiU돑@q+g?86O6?Fl("-$P_jC_1Le|}yj+3MW;mp;"ܔ8Hn":W+lb\2&4o{{ˎ\e0J]) IUo$Wv}^;r3+ .*?E}˦/KV[W䷹=԰5+.vsKSm9+ :=ia,Xyy`}V9r;ߖt Ge.pEˇlWHBEE|~/9|BѠ>/LXj ʞIXڱJ|*T:Yz TI )X8Z/Ip;vUS/g1~[Πv2R&]ˎ{9\mC}O n^rr${|' ,w*Pp;xF{׈D|cow}Ϗ*z @c X5JCS3ΆG7(_UsO-WKjZ֒upNuTZ[r6}So);6 a1{"~_·KlcKw !fTym%$YwޫԚI(i>",4ʇ6wx{eoa>,sc6)S9DRwIu$\>䔉 мE}otBwp,ϒ„qeHN"(gڄy|X:X'fhRlAv|XDߒxRYVӷɕyd !R7kRo市7RO/R:u^~YF*D?>UHj+W9·uʧҧ: OC˲S^yծɦa|X8|iۦY¤6q@ '3od(sh%]zVYzzA|,5xV.3]7w>Դ_}[3/F63$r)fңU˘j/{vdV]!m)ϴ#Ka&ssٸv!t $CRa^ڔαƓ)ΒGCﳬn|zդH{6qn a9ct.)+i8 Τ2?+uɿ>М?,jdM\G 8SΏ} ~T7WǏݖ}~.);.L>b){!fʚATs۰}*bB>T8.X!>di'^ms$Le!Xthp OqsB'ĹuM" [`+>$-+j=bm#&c vv CwtWIIgR'3ni< URuCrQ1o/ΙEBe3j$TA.n1o$ U N-ggM"λs5!\WCw&5֒W͗M#NyŠN4+SD{]rKs7H\Pk'AWsE'SEnZDWPcϜM3ǐWNmO j][EG !ϸ>̐9ihB Z+}_v㾰hwVēfD|؛*Vhn,!빖0 }Rq97@֯ ~Ax)W UHO (d$+ /^_NoS y M~!.$qMb|Ls lp$trkW1~Y^劬t!ʇzronle{X73' |l~}whGzqO7wN5g@i)W_%LX.6~%ڧlπG/vz]5&[wAV@OBBC?]C=A^?A~*'=xN#xvtBt9aVQ qM={MK8cv ߃nCv~.qm.1j~BY I@Sܗ>Ǎg!xk 3;q jh ˑW^{ӧ[;I4~<=]i8~H2'/;:q164ϵC}Ko-_UXLbIM~$Q]֢5$Y$ W.2:IjtH mG2TT@y*fjM^CR$dr$R{K4UJeH\!Ih$S{JmVF74|mU!fX0ھ𤦎OiNMP@dPO4=}sƓXI(|ĹI~sr'IʜeU.$%F$ebKLHWrò5$Mw^*lV߬r+<AB}t:ܽ ēڡ 7\!`H(GиHY07Xt/Ems7jVE>Mj{g2 ]cHEI+ЉDMIBAɍrHbnF㑦ф 9ԟ@?3_gB =x7_.ԋv-wH# }σ$t9k|4$E7{M59k״=ި72Dg\$BG˨~ g}oRR5uѩ;6$E (S*IUJAI'3luH'RF˨}5RJʛٔorh;r9 9 '%F>JJ{02{~z$o;fjGJ9z.-&?&ugxzuURB{OwrS>8 *oL_2i'Q}BM\ӨfP=eQR?,d<5gY_s(_{7 ;RD2h{vWrhSyO>O^r4KH|6ʅvL)Q?̄8M氵m&J2?׹=#:o[_O2x=k+汅40yԀdj$׽N\nldѸIti|zOڗOJhYN,6SCBʟEh?˨S>/~/XHvr"tx{3 e$踢yl(<;/q.?chG-Y676{L$EGrbRwn|޹ ix*f ǥDIpR]l2ѷ"e4_h>ZF S)BŴh~ȣ\?%/ir('MM!-'lڿ94(T[apּ)nNu<5+}Xwm&s-q$߸ǜeY<\|d^=z AyӠWhM>RkLY&bq$}[?Ԏ h8e`);j94qF_ .r qH'ʩ?qR-yO1b EԎrCĜqo\GbZ_/#I|j'h\|Oy&5iT%"h_phF8 u~+hpW~<+l˹D*|M>W~L*hPYA94zB~ǡv%+};:*̷i -eh bε5<]IIJpҫWNuӌ7S6OpLN5ϳf jeԎGP9W!C xM {./i˥\9oԞ(CAy?:?QJ}8c#84II*N(vy)?~27_ =i9ͻ+% "!|[@8jXRJ0nYq/l!)vTGz)i[RFiWN)Nh 4-nIQ <iB 7"R{: z?-R \$3Mz7Fڅ7ByG y#r:ReeMǫ|WDh?o~`OvR?ApFw_ڹ]rB|MWyq}F*^1~!Κ;/T 櫊h- )!ʻ0QBa~ wt~ ?)_ /A?{Ayıo~+Wv'<ǔ`0D gVQq-a8D{q˾#3|Qŵ/lF.q'䃐זP~@}^~'+ @KmnC#/W\sp4:/I0cՇ{#ǣ0.<y4;]RB?* '"䕂zp?|PNK:~ >ίq\:#r oC:nNJ Sq Ϳ`^J 7Dʯ%4_x":5ίcx/ rs{NS%/<ש _PDq`܁4,5+Ir -NQ&YO8W?>v4 M{6E|Gy$ּt)r: |`~ Cd(7{~8 ,Ex'waIyEq|A7|!BЎ ;a=Az߯H`;;y{~/~}A~/_eo|ןR?>X<_Ox7M:fŔ {A/bo)|a9N+i?r> y0Ga\q=_y_SSȣ.}ID!O4j~o<̊Ӭ rţp>0CdΛ O +XBFT؎כnϡVM\ auO~-Xg-P?y|Lt χuqַ>T.\M utou>;a} NѕKu;1`adhk:PZ\[qt<zX >u^JN_\M} yWXz`=8wuBnG#z:@\ N]W,g ǰ?AN\L{f47āVcM%H3zAz}(?7U#ϛZ :L[:8. v !q? ~=B^ 0Q?$Ag)$Hic޸̓þM=yuЏe'^BN, eނ*_?A?xi N yh@9# Åup+֍:kX_C?tM`] { \Lׇ `/h= 'Lz@0%EwM_)x~=Ñ77zpXg}O?a] xқzh:OO^h?_ցS4?_;-hZ1a<~ O~C4~b#g5i_?:Ou=l 1QN;// 9}l֛|a_X |xևӰ>6ߣ4ޤb\gp_"Ϧ(\5< ϠbH ]'w$ p0#7tjvloyk#V+.S`Q:_ a Е|g8+[>hǔ ΃ܯ<(~vP(#v ϳ17 Rg}ܞ5t wy3Q:No7=uR}@B>:v yYQ[د'z'w9 Op'~O|*q=m4>xx}B!uЯ?+_(7OH~ q;u8׃+l㻬a #̛)xE.( إ³K:.y M̛> ~y=2'S=x )߄qwz%d/p'v~#KqQOp;?B=7S=b8"|7M+G+e`Yy(ίY0yJ,>})0Ѽg_}e0gD BAn>®?@8?Jy}~_qG?Q ~@qΓ橠r6ܠvDy;<>WK?@?͋'e^3O'}?< < +/8Ndogwh=ԮpŸIȣ)̚?ʚyqطMO uѴ#6{h`q{~/d/`{`妙d QQsjo0? z{q xw#+'QwXH?gw=MAda[6B{ANQ>=B^!-{qaވ-; v{ÜcaLjsx((YNȣiBeA>zyQȣyxm`-{^OllˡxTHT ۚjo] /NIT˩S-ZNj9rT˩S-SB7㐱۷lz&pd-T $O ]wmգG蘙uXtSS^Dho t@9?n:2T,i˺+~XOp6ktXO{KDkL6x_(v3xUm&\ocEgOr=FzNJ Vz˱N)jj,UcRJW6p'Vӟ'- ¿l ] {$},F\o0N£a`hj^GU^n2=Cp[٧MuTg] Y~>ӯ`,鲳3ޖ:[iԉo%N9MM9?52;pU ϕX oBJxwJ*H?O6iߥǞ;~ݿǞ;~'Z?mHPK35Ubb Mb6a j+!6* 8$<3鼎4Ӄڪ4y 7M22CNf.U%9EqTTT+ʿ[_ݵE Q_" yG) l\R¿x>;ɫ6ͧy':7ذ&PЁW0避X?a!;QFE))P80#((GQEEJRT-r"gkr Q.缰{8"6 ][CtRK Ǎ8rTl-)oT~ V,|j+X=!ƁT_uI iv.\0TZ|VIRlZwܕKPjۊ6+]ۿzq|lt<#Ty:{/bE[ %3zzJݻ߀Yb+1U>kR~HuҏRѽڿ@*8MF.3^T-~STKb(gGBO.w'mHAjKn Bl9%5TYs>-zGOQĪYNJV<$U/b^Ct>XgRYwsxRpXcR[u,aFzԐuӭOxuf6 N}(ͻUo21"ޢcwz0"y{_[δڼx$HI;OXgm"yz>4;N:ۥojR>B2.Vѓ5zģ|j_/ !ս$Y&+ӕV8DӗjW|%Ձv>sNtĚ]>o@j1GN'%GxNBz .qoDj6\Hk761a m8+# v6<Rtq[Bg2B"7g=懎?z|5sZ0#$!{rM؝}m?3,irHu k [єgbHGߑb'y`#%TtE~;:\eԿ~tn%UCW&UOJ\Jyh]Ru᫳d]W<<+&YɍS{m@]f9]}ƮT]k&9ɒ*ڹx[9&tM, 5fcpڻ!¡ոn\V9rb1ts}zRZ=gR9?RR.R>{ޝ<9Ws4K"֞:+N*qYRck{[Uj,Z\6 T>g7}M ܸ{`f]!V>rs/7BCTiXp&5]jnu*FX 2jrďf9_#հL9"֮3ڗu=x"}+Ry~bbq{R?^Me3R׮"qrp3ESv$Y_ ֻʩwF<%Ş2Egx e#9vU8\qj r;!Ib]?o849MҽvUJF_gy WHi+KRu96"͗nwYh5~CB;rΫ&uƄ^49lrWҠHNkenCjtl3|&gNűkG}c׷椪;TN$vIL;&ͷ/(ʈK/O#'lG>?G_y-n!ڊ+g>k8zuOΟ_EðxR; קQ7sKQw_[7+^n?yȇk=+%\/Aj4is/_Ѣv ]U"Ⱥ˯r֭5lÙԭ`isW'5 Q]Bm;ۧhH}R%6g[Ώo˗}E>z__?Ijk]oUK"mbk7F쎗Z;A1 R%ǭWƣ?|Btg=Fݓ8lM qvtm &yHoC"_}Aċw\(H.4`c)mw 9m;MlvtyPIpʓzC/0%e|3JPg^Ft,=K Rm:S׉_}}>\^HoNx_H܌Y줾P[ĜGbFʰҫIٱn5'+g '#kI>^TrFD-b`Î.}#lSHļգ=E ;wWqHhgY#.p^y}E& δXvsHĪNsRNءIG>bˎ,Ħ$N;!퟈4F+X:Q,D4.=)_ 1Ri֕+ϼIć& /N@ ИUE*$qN>ƾo!޿< L}Q:HwMzXk$e[ ý '#2׿RNQrKo#F@QyiR+[=XOĚ;5&xuCMR|1Ru!:m>}$UoC|gJ$?⤺ڡ2nnzKjetp4?xo83>< M)&:u^ |ѸOY{*bVpE$hi$NAqB8۱x$|W$ژ_HuQI$ҭ:$gm7I^)or>ejDdUj2I4}Tt"} yC %3.81^tHԹ5CҬ@z;uH vY64DȺNhyDϟqP=0)j;Gng3yil1&wFwn%ZǤ ퟉"$0uOz?JEGUm4v_?wcR|K5ݾr]kVl^<^,wT+6a&~IlS~8.7|s8vfkO PLkCg#^_ʻigGuIWICY2F>'uWC 5b|۠x,4WC#y19KB:o3n;.| nc1n+bRJR7$s]˚ˡq[-"~7Ru4Gďa63?SC2JÂK?ٷ2uiv0Mnhcne~|uw6T,ubwxzFh Ryr]۹%r*n=%É'UNK%U]nLqJu"\׎DیZobei2[߄u@)+X}ŞC'#V000=;OA+;k=]Tp~I]o=fW 0Ms蔢jQnQ¯DmEt,jO]{ѭ~cDG/G!cČ&!:v~kK+DYI!n=lV_ nB[^jE#>M2jY>'yTv +s@ }Wf]3xVGe"ѻwC#VC,z$Ԛۚϼ)Un Ӟ(ý7)D |U rpŅ#*yغa-MΨAcoC@}ZyW$@ aduٓЄ}^ZN :AZV4U6btGfD뙟W[#+mj=ݰRda%Di2E =F +آOnO| 5<;vcO J=L"y~^cD7?!j/#Z~ Vr:sx*MM/( -Gkm [oz>*K<\iSXFbڌÈ;woxX&s Q +Y>{+87n{'Cbe.BF0g+.8{ϋ SX~K-N=_9V VbNB#Vmz;XSh3&i!U깗< IlAHU=kGɒrmaěXϫ5XH꣛/Fv@9$%bX>Bд5{o?+ӘFRm0llWRݫow.2ZC${GS\/Ի"6|6'5ϜwF!66ӮE6dZ:Pj) Ő4|0?vd=K_u3;zDZ!6zZ=I*d]<4+ہ=m"Yyhx' l.;%3YDʺ\r(wg uX%63y8i=Rxx{V<\"z4X3dĪF_z~$zcn>p R5E*Rv ippH{#񰕃֡Ջ*l'XlZ]}VX1 שO7 V__d:"h_kdíH@WѤzV6a5R-C/}܇ ~ounnzԽGtn}<ݾH搊Vy}/ z$ p1▾!ծTI{uxNE'[u?Zh7Nۛ9mj-sCk7QUJ W"3=^sL@Ϻ{Coo}}g8bǘr$bOeӢوA=-ޭ|0Āzwf!i9z1 ڈ> N?h2JĀw',uC 9$wl+(K.nEb·.en{~Ę]K!&F$b'{$!K{G1V|ֈz=[~$fdߒM̊\0xM7bY@|Ko+݊9wPROO9/=_/A;gDe+>Dm4X~5{NSm!>6_9믉礈|16k7S{aşN桟AjnpGr4b g8D Ȏ?'cOlw3b[ҭZ%I5I+lHeyٯy;f^D#^i#D>B93-Z]L[n텉=̿ک{^ֆ#EN0yPx=`rpϠ! \?Xu:qwȥnsc=ێq} Q31X56;`55Ð>f^6oC]7>0{bU07^S]ȺmnnTgEֈ"9~#ⵐkSbVkN`` SrRZ +d {VA@ˠ~66ofw|Dˏ&1U߿\VJ^d+g]-!:Y_o_^庿@ <ѹ4Z\Đ!mTn$NG $/gi̊$=χir#I1e97Q@|Q8;/.oۣ'Q%+F{L!]V_?> Kq9L?]x=4 A 9q]sb|]]"&;'.dNuOxީ)="sICH7ϵkOrC>~I4d9o-}UBxEG{>u)K1țsvԆ fe.3DM$}sԔ=IwrUI׹DlI/rqX)\N03W":3CtKX򴭆ӡob}}vDtV-ɁvwxV*?yĿY瀕䈹ȣ^[Z s [hjU,ɂ[]p@drIv٥~#,T$C ,c^hin~W"=<@k!CbwjV9#:9} kL| 1pgT>,DZB q|gONO%'5 %S4G&VR7*%>ev :Jm}w#/ C^~;uEax\%xõG-7MJHځBO&p׺eSݲ̺ Sc˓3nڟxw1[jV{e*bN3+yiS6BHhWi7A2|WL ^ul卢gԤ)z$]u 9 w1a ^1)b$#Ə[~ 1ЮBۺ &۾P>zrٮ$S1$@eoRlG<'inǍm1ΊΞ+]VCtI_=\D13mFtRog:8#Ag=\_L#z-oI56nw-&#}r|𞯟QI sqa[5qK8pOڊd ݝGM B.5cW z,ghD}@) ѭGwqDOτ]kgW\pkIW+]]tGxG~du߷za=c8DƽUz/|4+6##Ηv=A-L==3Ǣ o{p."⎏#y}gea?n׮_]*WGܽGBtVtI}jSgGh w;ѩ}Y D n#:>18ѽ8FCfKq8ҝ+֮zuio91aj\32qJΈ^ ϫ~;}Fa՛5jo*EL {鯵}<7LJ%޹%8m EIz$xiRg4Kzg2ksM}ZiDo ;t됯GzhGD "S>Ӫn&Qq%GħFҸWw!A+%E.X!Gpm%bnbR:߀4ěK1G[顉ippJ@]q8̍@;3"kErCkKĞFt=mNr6a Ljg\F\O5g qup~b Xb o'} o]Bخ'0 62ˏ_T4bzFs[{]GTJYH*2olv;(5}S?ݞ9(!h[+>ɯAy BJI-畷'u׳"\>OBۧLoF~Ҿ?>6_nguzpΦgض,aDO{Mh>c%Wq=w'炰;/ pRp;J_xv,r;ESpyVwm( ݭ>#o_3)wQ=rT.H.wU;ILpI,|̏WY\#[dǏ~էX !K.qz&ܼ3pv6́WUC9w8ŏK{&5ѷޘoZs%5֚vLJ%cKHMJIvhu ?ޚ8q@Rz)mz 4x=(]e}]k>9;TfKy1pˌ~d6|PwjO٣ EVw7ЎP ];¹|wU"m^QE΄U97HŦ1(g])CdqZ}H+-vbsdsm2jHJTV^<#t׍"ރeNĒZDɂe\p#~?ݍ."7 ?.0ňDRǏO]@>d-7(o-ˏybo1"OBUʦG\7'j1"iU~q/AYP[`SÈ#b{|SV|p.Kjyŀk/aeXOa>oci(ZzhGnnSbfuɧ֪/G|yHbg'0xy"A$n7GwX&^ow휴R!O.( إB8SsifB3xGe3%7-Hj8pQN˟w@^N]%e'B}U.vuM$/mӵ"|j0y Xz45!| }aĵhai>L$NTu e#z_(VX/9~A_P񻂈m\ǾTcZzB?lqn~U N sj'Q^+jOp ;c@5t #[IzfI~Ln "ݰk|P ǓtW~~s j' [~[xzE_p Ϩ{'h/O@.@k|G^P/ _GA^sՐI[h=PK="&/&7i!/S~{;e?wl+8<,Kb+UY*"y/ yA/oOh<e`/le~yP`,}'ПzyIC{a_*^ث\N4D;'H  {ʟKr#<*g)qmly9NJkB{ [Bl~yEwޝ3E=ą;;@zd cͣ:d62:=!qInNcIFB'MC2Zd5&+8kmHVWA$늞Wr2IۛI4?7nZ&dr8ir9a$ӅHθ9O*^RFrwɥjdg4)flnϝnɽ,W>t1CI!X>"7vJSIbsHRL&4h6~$ʛA)liM$A]%q;A#I>Ib EyfGC!$k]53H$&zDPHh6q(FSrQ; ~l=Ix~@8DyK:13YHddhw8$mxHG[$-sU{5w2jve$'Os)@h1I"Ǵ;;ےsUU$ecFI# MIMy^KہMʥ,!<'HNԶ0)S32q!ԇ6֖k$j_?w4>gSȦM5>ɡ|#q ;)Tٔ'Y_AD?#i{#'|$on4OE6*  |Bi6:\:G!⩟C<{|~/IHy$7PT+Y )O@|8y CV p܀Sq]FCC좄>y jg7 6L \kP_iD?zzƿ<xi7r]R=qY2J)_<Ƒ2i +kЧx|ȣp- HC0g(.b;hpyUsz}@|Ҹʡv ȡי]7 i=z)?P//?Etq@CJ6W| m/+is3ħb4(_F 簜`weT2jOO'/HB}et~&'DU4c~A4C?C> vzKﻧ G1+_ *.,šk̓.!7!L*hCC "!Iy !>S~9}\ y~/`ECetz)#B;AT^}:Hk+?uzͫ wa\Bh7,r'@AC7pD>㿍A vE\9^'@ vB\\z/ǣ)]o#0CAm`'G0A:Y֔$}".1ALC=RگX?|< '(o1eW~d2_~88Oָ1/a 0ro{T^O_7~g7C 7i=bO:<I j7` E <<q씞y_qSP?̯qJ? ?||LoԮ^_ Bgyƫ?}y"wa|N_|Ǫޓ^?ۈg?_^<w=?Ohxo?ˣW~w>Ml/]?^/*7wFZϟs9ʣmݼ㿍"'G{Ka=d )< p]5 K \oIS>VL e\ XzqåzK8F9Xeֺ99,gutl=za=} duNSB}uq=2=@\O;=ŷ~^Wh[NXG˖@ yX_FXw^@^ou"eGeuPkp?7BR=#ph7=^g*g oB>W@ XkZZ +#<֯aדyo{3k X ]>a>(}Jh}``leò` SX q4f(ZOHs@=`xO+zNAߠg/h^8x~Lg-ԇ 08|J܀o@ȣu*m ;G\\n!~B܄u}XZ֣y~A^w; wuv_r T?hQ6< l6a@p?{?GnC?^Xr"R`{"iQ_Ϋq{־*[ ͣl_;|$\A rB{AQA!O GildaQ4|_(0GY~ɣ!?X& Qq >ki<γ?1e e;'G1켄ͣ8oCaǣ~Y"Q'<)< ;k,~eO<_({ x?{, Gow y*?{γWb װ Ɠ|}[Yix!^|?gASy8l}<85޷>//{NF>Q7X=3Gacʧ3N'}^z`C-aW#[8~ ;`pVZv!͏0|${Ǿ-8ィ.t 7Z?vȗ8[h}=b Qv~#(Oav v >u?#lyюXҲC zEqoVɞbsAolfQ!o`<ţl{{#[q ϳm7}VyA;eQ(0}Y$=N{z/7G^/ȣx=7A<bopd@>WzyC T6'}tD CS8z|=<ʗ7W<} rٰE=(k=^fϣyT{R< wQ{6FOa~^B`<~~mģܟn˯?;ktϷoV|?/n$b-6whyy6ז76}^ѵÅDL&#"'J-u7d`բԪ MM6mW~hm;!=\ksWNL-u7SQK12* Ud4(O8-Xlk:quK-u}u% affjXͦS Ͷm6f9mBM[^?4j\X9 ρb9y?rï:[Dsko721Ӷ6aS;kNP[l?/&s:r]NF* 榆:koW3j=l ';wTUl:[!<5?'ldM$?ދ1QZlo'9V o[M15֏5$Qזk'FӇi흹._W׽Ζ::r0J33ߢma;<1mn:Cmbxޟ=~?fLRTO#N|(qijqf,8?^c%\JxwJxz?U;s%?V U ϕηGA8}I.m?c?c0?IhCZS^l 3]UHU;5] YW_UYy%硞AMu6PEl66Ve/U 5_ i r ?4Kv*)(*"2^ >UݺP׿ꮅ(ZQ&?Ja8d{ aM^7i>;ѹņ59&WM Q؉b7(JQHQ,!QAqE9(*RT8wlE9[_S跏TwgW&DP(ߕW@2Fm<H$Cǒd-I;-S\FfIOUg:Iv}26ߔCJ;7|tt;DU Ìԥ^Zye9j;3(87!ZTq!4h!sR6pHS]#{z0ǧ9vV!mg`Eq> hG57 7qKf[~>:3{3To옱#\Zv wSs̔zPN5ݞ C3ݓXsV8fuEaLjrgfͻ?F.YKyNOCҝyYLKgG܍(nnPj!È͗ {z\iY3Lk 5$WW1%Ơ+UDB\{9,}0Eb8_M*8HNfV42ҞQV;KvNt]ly鹸S]4ȌY.qmif ]P!GwzQp(y; 2C"f V9n6>$p3TΝ5ٲ>Oε<_Uœ;$0奖IYǫ[%9i53V,zdktym )༜913ǵ 1;Aes/޷ە$?@#[IWRnўj"5d0ߌTė~8|l•K&)2+zXvq2 a"-F)SrG3 4,eV|nBYP1,vv&ˀ~'ux r._DSCNrVj܉j|P41hՍjJגXUeTt)׫XUtQ5sU{p~sV[;wvRNjj95d5*I>rv Zo/̟3;UzPyq^ћKMy1[a:J* hk ,flPuwO}'t}hs-hۧ[~;{N>q[neUjc^)ܺrK'S{R۽vjnB /<1eFf;V̽I \g칋,Uݖ{?iGTP7CfZzd^$3i}ʭD /{g(}>yQyJwbGa+k23#NY|c ;T{Fqe}|O#*kw`}$>9rFAsa1_<u5aS̸sï?g63n Zk1oRpFDcIBbkǨ U_Yw9?{-4ٺnK# c)qqN#>ldfZK fX7Ѩ=R> #7]1y3ɹS^Bƪs3׺^p)v ɜͣl$KXՒʐۺu9,Y9*+/ӰRO*f'+/sw3%>2>PW 1c9^]f4-cr+ K7 T$s;0;Ml/W>RR$g43bŇY%, GgohGqjD^+7wd]쁪ՔMY1|Uʇ3N6XNG*j2{LJ+7zSX٤G;O3`k0ߔ\敿tBG4O->̍DN.$V;W*A\/xM{88:>wD?!Z4j9oa?rq/}hoˮZ,~YH]Ig M·"Vڡĭ6kD1k[A34"I!NzxBuK,JY7 r2j^rv@k(2!e{fJ͙BЩ] 31F*(ZJہ9gKvICl-%F.PtL_6=lHczjَ93k╺dfCcQ=N b S٣k.c< L񞘏SM27ROyhx[E𓹖 a/?(Xq'IB.stZ+}+#&֭Μ8C дsޫo*\OPEj3v{Vh@(fǩWHDDcLH)bS%SL|S'0m$VlM*e8fL/E*sTf0[XrVp?,8YlU8DEEYfewxP;ױm%nlޖ͋ӹ\gaJ@ҨM@@pse1ic6xhmc"rJWrj1D>WG`?*(- qM3mTe+g!vˬLWY(mMk{mη$%s(96U#3F0'v2#m WnejIYǣNK$0Bhv'"ŭ%DnV#^y$YN+y~NNSȕ9׿>REl 9N7i)DzE‹rQS>˷{v!{f4Z!o!޲y8Fӕ\S_I&Y5OkU ī.j OlX<[~{CfwfbVoZht+rMDnMlC\N%柝\GcodWkdsE9,:p| KLmP@, wn[C[. mhҏ1Hr|(B4+a.mld6v2Qiӈ1˸EmQl: f ß0BFz6uRטrL^uk!vtԝԊ*e^,`gX놄Zķ% Y"[FUgMR,ܮ}r*Nm_?ZKQ5签r-$M(#|3׆cڕkfNw9x-"Ρ:~bjY.3.ǝml/پipg軓Vȼ?{#Inju0ZHrD]|#MGLg'|~}@]j2sQ펶:o:iD~%[2KUYJ=e֏U}|9B2 pUn@8NںZ~>!K"֟U޾W!cO&O1ʫ֙=F8:~\ >C"8գ6UL ON`.2;|!^94sYpEN .3K>h^luL\UڹZ1e*ܯ@<1"e971Ob_zJp!8CEkPcK. =_6hדXA%g QC$}C:KQ63ח!WX>LPqiT<_W[;a%׍uپqJ48KNBrFYÖtWtj}1m6Ha>~9_BS ڊob$q_pٟTqnfB2J2Tnw\p'DuqRw2'^:o_B}Ynƌ|g' [,囥[zٯGm<XF'_I*VF3+ϟWdi)cƗ-z%O ma%}1F7U+2n}W0yfY;3i$geymɨ'J]/L> s0spP[Oweԝu۲\}e =ɚ 3fi^B,v]됎[n{g9O-Q'7tּ^Yb|j¹|Jgȯ}XTr<л@sїWOa._'0FWt+۬:/WwO|ήʹGJ?X|jAn{,kNB6NDIaX{1So"1BTUx|v\9җa9N~:l|Qb"]흜{˖=i<[X!}w \ ^36WrDN.)kg5fp]ꑯH́k56v%~E>>1|!x;Ī.wNX*buBF&FkM*gXmx/˖^ pT Xg3}PUjJ/FŒ0iA+OJj/wf_LgZUrNN_HSl>ز271B]7<Y﻾z5D-WA*ݟF0i%3{{2b"S7I1/kfEyv`ZwL]8\wȀTK){'hkS;{pGň5_..Xq=Ͳ*U夻q]iqJڪ>ޜHm [cZq<%*|e8INhXeRm)g b}C/FqĆ'gv% 33^_z 'V쇚Ia_m6γcDjF''LCAT3(),ѱVtLa9+}6V[T.e{37X}ƃZ{ߢHY0w"h~ԇ=TYBNh9&i{~PQ-۾7 `@Eň#JPĉY *9!(&A3t793dD _wý{cUj֬͵9{t쓃SKS굲Uaa[Gm,ag鼉UQNM8MS5zԉoM{GN K8Iٱ%_G,ycDhc %M!~4XADP39D,Ɏ]* 0X@1_b'70:F.ہqS /P?l&{V!¤e ~̾I18GNW3$L_@|TJ )a\8%>WEgH7hڙɇBo Wd /Iu)0~/UqB_-#Lpj3[ʔڼEo *L2W7Ԥe-E!SWm dkl *e(QYǴMdk'̈p=#E8⡳S ܧٕ-I2Y@S999Q#} $5p̒LoK\GۜuwEgAV 1@ `pv$.O=a .ɾ۪7 f"לZdCΑ/)^ӕ"?eW,$xו&I"W2x &C'2(r{L]SkMaPGnhxG BTMɌ0-C]mzT $[&BUg~tthJY7-x8g=3-Y6:=ب0*–eB}p%gI 0-'o}!cOfu!+:L}VwaNcz!^}hܳ(HڻaGMqhvOLHW.x%b@زY/iba[N@kh˾ho#yR} akTEЪ/~ٗWla6A"QLnxV@ˊ{B'l_-sMqҀ]߯$YMx6b$Žs;T^-3ʠI 4vObr@dY*mZg;cXLؕ}h$S=[ }оXMZcLeک[TuG2ށ ҪnT4PҍõvUx=׸pYف)죹AK"GW-PK^-g5\EczY YO'y:*-?J\ݧ[ž2Cyj;ڼo ;g|h&=[ -҆ ޓI}ͯ{mS]]5)yU" >ʾ7[ A>Жֳ?\9)M(„]_Vk\c/A5 .=5ߝj&|[3cC>v0jͻ5llv/}MEwEoGywGJCWDhbj._fͬ{l 2yBemO\ ڎpX-U|TH _ngtK^|F)g)s\;tt.92xRh1rˉ-8^={lw:dla{eA&+$ܓLؙ{atHKž$-IJn~nQŒnORˎAϮ{u/MLt^#.9<{jG*GS6vf(]JHvP"{0>4wBcSv܉:I69a$=i C/WI |w#!>W`q%#k<3b0m[iHZN=HFXkQDԀ2"PB?e EjvbSX򞐚{x+aZϧ5Hk8uh$LL\ej~)v*ńi'U"_WvHO{$u?ӘwUeE|PiaŚ{>&,_a/a'3ucFʾ0߄JaKwX,#W5`/s#vC듍wA{ˡmJ"Ll.+|r놄ʙ46&^z0><3ɦ yqp?P->_=ߐ0~%ϓ"q&]iI;w;w{:H{>^FR!~UeP=}YP{r?d<U|MH<\rf;ɧf7/!̸i=sܛl$۵9ܛNy콛993Qilڌs@RΏ.X7ǰcdPCb^?C59V S¾z;V~z Jw`b"a܏ y )k|Z nQA\Ķoe]@|E9za|KՐ1OO;ɻ m`aә޲@j b @ҋvgӣ#n(Rn0 &L1̓ mlC;)o>dʺWE#إ%+xkk@CE) j ^Hj]R.$Z%KcVb?elj%(/iWT sFؾ0fwQ1Ao~hDa~ vYCcW}MO~+kojw@TVv/n>A"kE63NG]T4Lx;z/evxb\o>d`BM7C숀`֨9Ǥceu}EyC`Cm8F-"$/qbGlQhEytsJ>|$q4Do{=+Yҳ[L0{Ro(څ#OCFQpGÆ: }^0 &l}/4T'ВCuʷc%uJ].{ >X^a*M;3&}jH,۹;ve:]i109od·.`LY7e;v\oȦ 8)=dz̨7}Bm~DŽ1w͊Z7F=do!&I:#;%2m<%(92캥.7+#!2{6JGkz0}`'&tj aNaQnwswCjxSm8F"x0-G?c0’ϒez|PWu/$' a .u4"( ٟ$rt,Gg-OEae&&ؾ^d8t:͜ULQ#,{fC6YPj=yͤj}ro#@5;mQ1Xv3'd NS:.w241Z˜OÄ/'wϵΆǺjQ3FQƄ~F W]~ߣ Odyh+&PԚLF{wU윣s<oj(mѥy '|uU CA+fIjc>3uy'&;yPpt`=od@Bөwkyr!?\%(TO80+qɯ,!9 3i=o Sppr*$<99=,HZFʻ'ɥO]մ04X6A#t E`ݯ @'ıϥZSMTk ^vٸl6abQ`*$X,Pn˜s 1 ړr妵Q~hU-/mfyn/2Kj{N S_7PNG1!y_'!r+aꨑAe4S܅CgT +qCu܁KaeT~¸c NkL_>[fEͤ̈́4)w#G-nH7Xoܸ˸=N)*$ _OZysHo]Ҋ.m}04ڧi10eo%SU `8/c ixG_w:g9㐶-cQ =js3|Ԫ3Ge)V& ^70.ݣs˼xV_熉NGH{UaV,Ծ}#3>uCGqː=!|hO|6 ҾnR@?051>Rnazf[ _l ]St Sh]CHY4raL!mtm@T$/(ezb vj7l_f Q2N?.g 2O0iaG3Nv/{a c@{C=sĕʹQ)؏^iC tRX?Vu %M4xYXq8<^Q \nޱj>y˱]ְ}6GUqSgOc+v9k5~tƾ 5TZve J18A fN<͸M$mŢO~4=Mo;t/v=u>Gvf,U4ktʽ{:MevGoi.O ⊷v9Soܠv T;{<΃V4B|ibr&OfၪQSᦷP{wyxcCj4\߇t-f unk J}sF?3i}S*Emj?jw<3K [H9k]fTKD)\lk}Z˟pmWDiۡM֬-Ƅ r{,}Ac&PtYKŜPT͞>r:pe4o ?~A}4'm_h4 :_~{*ws%/Mgehp}d\NfpFEx=N,fG߼nT9$.frLeOr+k#T9$LzvL@E'E^\e;Α'/Tޛ%gH}V/lrSyC3&p{}vAӶdߪ\gyf EobߑeviJjt1-^\xj'*AUF٤uFWr~{_1wٚy\6OHw&mZvz.7W ?x-. f[S_ۨ_/59H͇zuM@U]QM߫&aMk^qyO5.yrd%YJPIg BH4n$. N 7A3KG<)DI9KCC#ۡylN\l`xvn>֑sш5Zۢ'0E|=߹.-5E|7mʇg.E]D<4Idh e%~UPlo+8t+f&Lfw3GHB|2ѷg *-|303a~3"|rp1؇3ps2R>!']k;-g'?\o1kWܷ|2@V8bѐv:8ħq;s 7!Kǣl=!l!E{/ެSЀk=>삠-T7k_r? ȭ Vxd I|r]U{[H(@; ^ވYλw ́EW;w oʜZaq*||o/W&^ ,׿)l6OOV@%X (x'jtg78Pf1k֬O? U!Y s ;%9Bn,HFTrXci%{/W3cpA+jϬ:HEHewSkHAOf$-7hk!s>n!ڗ {Ȍ9I#/  n@aQ|f)dZ>Վ` 'E if2I>ՖBr+}m a\3[7Y ;M @`ZE'>q}-x|BDN>V5ow@ % eSA9)Գ0#4"3b=! E B=`n\%+܏|eŧ~T9 ~GQG,,?3+BïH̾9r_S ie[Yrb?X}& %~> 4d_ .ޟ8f D5y볇Z2l[ϊ#; au*_[AgCR ?*;vnofgg^Qbͪи#>9 UP _~jǜS=sc}!q66}#,G~&^eFiX'71&4Olp*W>IH$ݏUMHyZQs^L`wlH ۉ@ȮnSot@Ĭ-A_&C)ح5hS?|A+RYnQ x nh ,7 hb'lGJ)G?n&r=rvZ]c f ϰ?vZ6BkX^3mqKѨI'/uBd&YgB~E0{ cYi%)01=ѣD#)[X/bxvtvk4q ȏ$:>NeUl d IAuf̣fc)9R7B/{uU!E$V .q~)Cz|&%u<]]#6.H?[I&ЯM=by͑l' q,$⑽nEt#ַgS⧃rj/ћxGGOkwL^#OqVo3;W)l<53 'Cӣu|bƾO*\qu;Lѥh|ukךi0l!UϒYn#iBě/=A |{G8/;FPE# 3S?R uhbơP kW4^w>b! ujDOiq襨-&RAÂ?HXjRhwΝYH1_ ?$  =G_BP6)@';ĺ:|/} >C?u4`D8ڢLw6i|1h#◰?B /=:'?$MҜgp=`?Iƕ,diHb}gSom*Ǝ[Yv<ѣ?\ozѬBECİIsZI?Hї0= ߧ3{ԘT*dCV,f! 2QO {b Eq>+B,;C 5\VC S1lYNtpDHµů!]7h]h]~~37?Tp>q(bQNw+Bi:4h @v[BN#u 47&r$< @Gi(wT(cŴ,73u#AgɿF+jvk, q%bʱ8_ uIH&~i#/ꗈDH}|x@}1ܑ[q#<(Uh=kBvd~|Np(GzwO>J?JxA.QQmiBh#~_ X[1=l *PW>#eUՠנC^y:GWS9_}ۡ`j7#{+Gx dhͯjмGӱ2Fr}zPǺy74PݾCFPc5P9|4[>rtYf(dw5(C߫@vP.9\z4r Z=P0":j~.alׅcƷuwA=NJ!W%Hߗ"%Hy qƺ6 '<~q1Á Qd Hg~d|!jmLH@Hwl5J2Ѹ)#hSQQQ}屫{CqlˠUg^>)L*ط)ȱ/.uke MvB=ҋT ;D=<bOuCG8N].+g4Tɴ0_)u$BP}>J~qFj7⹶B`BqNktB݊]V_d ?ۙ;HGW ]Wki3(@\K%q]ZLZ5(U#uGLj|F5 PžA0\9~r4,EY⃕*PR][zEDz!ڀtv ԣ4"=YƵz'SD04uT94vBv]@5*w5_*L"l 5,5'FPo5ճe\h@S;lƠߪŏ6@y\5oV@ңhP?t\~KvC_8d\ml{PMq\;N5(NYe#ǹRJjF.C2Qi'x$Et/G1~.B% s@2hSC:;{'RYGW%wW@j٧P7~މ8=fI,wCEx+=Ɣ:DC%KNg@/ @yWs2L~ |v.1Z׬E$vlv Ќ99h\ODIXa?ϥ1,4l^^-~"k{AEeŠ۠*иI}*AgQ]gՂ 3=kDrHOX޻Pu-h%FbFR6T?TDUhܨiH/usd>8Cz>^|%H7V`c LgN-3QT\e*BNj . bl^CqBe ?Ԁ}5y>6zofuՑniE]:jwj騟 ҿ&6:jn@vUCӌ ٘.1l2vQZj챀VtM&mh:q%Q?mE'2&0~EGȭɵШ7U>ldhF )iZ,% ÞJi*ʊB5 7B+kRt4>!و滸Qo@FZ>тgKGiف?ҟHWgvB,A:Eau14#d}ල6;$A.r?M~q?nB:S4qt!h(QGm54leW 4j8wyMAh9ESA w7z?u -%zR:_å,hA}š%|V Bڛ=az /U"jzTߍ}nfGީYpE 矟1,?OyZXalV{r|Ӂi[Ͱ5o~s(GMOMh݄5(S[ th~@^ z.${ Ԏ>( Cԉ_qfd-hoA&]3rAs;VrT2R [X,9xv|0P#ƧFݽo%Gg* Ӫ=_a]u}7T[a(eВu::hEqGx 7~pңD:lm%X'gvB.F]_CL3d(DW<(^h}93< %vY2ctqi#:#myam/2^yk%_W+Pmp1iwl54}+G{뚲jCP?bM!vN.ֿX64:QKU Rw7BVaPƝB Q|?gmn?'~dz Ϸ~G< Uق(Okם8)w%7-Y#ͬN+y % M_J-uڨ ,7Y@[Z-h04s)4]Rp*wvhFm'~@sUC)絸7-QkoyY ۽N&k i}Mlwd\'+Q؏b?׍h:d DBteI9s Iu-r˖<Q21{OT}{xw~e3oEv@qH$GX9xW<0` ;úg_1[.zu0DMq\ {Kũj`s1@Y'T{A+ziȟ~t(鴆VBKC&8hlq~ho7`뽙J_ 7[_oё_#C:cљ_wѸVM"Sh~Pڹm#4Cvvz]\:j{癢tS "\g龋Pkۧ'[Q^3K6ҿ hlDaG?m ˓49ٟ .H٥y*4m/؞נ{fuΨ1?~,\ctrpܫ?U};-/mENU[CYhdˊ+д+-h|"ɫq`9>Ic.j7PƔ.MY|7f{´W: d;3k߲IkUIqR׬^{;{S9t1~vMs&]kAt` u:KYC+kxt/1|o_@7]fztZsnr(dOOQ1$)&NGq6ݣ- 3HGc=KtqZT̮ӘW. ̎Wm`^ #]<G%я~여GzEGq|1E1ɸzk/SgxjwREvڌ Fz6q?ߗ/AH<=O)Eq ܀=4^~Z|u 5轷1~/M=AM#+px{2lG 3&"~5n@qmt ?:wL;g;hhV >H' O0кS)XY1Pu%oQ轕j!`hE:|:4) Qܺ!bM(?t;׊D:Q|O9+*,OʹT dxw5;Yu~i>5dY;gNc3QaO"5]أG:[vv2Y0yx2s<3T(JEs=9DG {c h@Zoh}4Zf_";`Iy<*t'c/2;fHң hx4 =]=J.]rJ31ֆg/ǣA,W ]]~3Σ d 4ItL?zARי~Zp1Am%S\ǞWU?'_%?6u wbA4f?8ք޻-x[oq!qtx2s^z]~_К@wM}KFj'r̭~.f$!`&1ggq7_.@1ވXG׳~hE*7yӆ{&&~Wdeg#ݯ }-*{nBk56}`7VŴrt9Wazk/_?ڿYh`=L߂VqܲwHm-GiE_>~0 3ӊ6hyN" 'Z:k:ޙΒS?":z^T;[%T}o+GKA'mQ<B,ah|Bmux=/klu_!p(#z[4>uH'FzpYD#]Eգl@oG:0.Tw{;~B a Ij֣u!hxt⶜)o9Q ߏg}Pdx]m szPdGd'Z׭WuQd=z,G2$h~\~ KEJg%|gY߻BߎBz\n\x:S|ۀZ%K_QMzyY뗰]"ݏs x<`4Y'&BvOo~ ~ hOBq l h <Ax2nO~x.o5?zly$~9-{xYχ "?FӅ6/~ 1cف :_*x=02{S+PhUQ5j~~ωEB>_Z+ p)NYx;E*&RkxϜ.Q4OLcJ'Y_ś,?m&Y?o8vG!#oT/x](y} {n? Bo]!B<j`8Ǐ/Y53uh~Oz_Ou>H~׉,x?7I֑Nf9&_z&2>Q'u?י}+nPSJօƹ<LQGqk?ާezzg^Ley<"6Oɧ!xY'/"{|3 IZdȞ># ߞHeab}J^؁nRNo'x:Y*_F)t龝hp.o/DI9'Fhlx.^yӘ5#">.?J]:^hop?!~Szׯ؏uZ?aq%#ǿx'Yo>ִ7w-F8 Sx]}|SS\o?@Wo/Yܘ5/G>/ݾ9X%9 k"'W(ܶ(ȏפ=Q?/<ďp~4Q4Dx ȟuvx 7?ާ*CO ꇤ>%7{~'o~cJg>'7HApY5ɺ8bud=(XGY脒u~_UP|&r"}=ُC(^X`?כuh+ Gޢ굘=|ȼ?Q\KH?}4p4G|ڙQCߣeM'n7hNʉt^/׷cz\v<"Mdd8sN"~`?h%7 OX7GRˁ+?kjAW=엁%QֆOW%!~^ٟhxlocA(ԣd4z&<~p|ˏ;ZksbZl|OxPѠ0KBJxp|֗L*@Yg?p~;ޗ"^Զe׸惾H`Jt} yٯ= oG'G ~X=Ⱦy281KtǣB*ז02m ^EOxL$z7L\d\}t/'>>l~Xߡx\:۶_Hs\Ox\l0Ӱ[G7,Cd|BQG=C5EzkQdyޏzRUGM|TEq,磸.~>ˉESF⧈Bv/)ApD<αA$}k ..Dr9`~_>ξh} 4N}R%|߆h$0jVO9~'ڰO?8b,i[VQsa'#zǸ?9>SxD'~goC?$>p Q_Ϧ?b'oOTbS~Jpf<ѣEH-o6 Ǜ2#8>c/$Ql(>*[w/G؏rfh(xgy4XG`D%o|QkL] 玴|1NJx4p_GEq}~x<'"./P}qMR.iGϗ0LB,ѣ(S8~)~;֣xg%xOS˙N 8^u.:h'G~ӨވE'Qm˻º!,ln c-a=ui6ď6xN?`=G+uGqOEǰ.$G1hg\bd$00> _㏡Y Cd{a(?G~ry={rd_{3]e~ >C?Ne>C? !qCxX=?H&, HK:5tjЩSCN :5tjЩ?y  OcFVN]G0џJ5QJMI#t4\℁&z N>;3\kNVJx_8ɖIei8#Ɵ~bfdhi|LXHϜ}yhwLOܟ 4{9gȷxO?o9bl`ldnpSr#F <%?y⌑!CRNrb}kVpK[R>5K'd =?ܟ|k:IlnvҐt5#fS]X݅aߺhސ?il͋7 =3#/7D>g~23 }g(ϡ<Cy6 5#z֦VI68qZJﴞq0P9翜1Gi@s :̒نGذP068JZ15=sVYd1PPr ÷m-1*F k8BMdz?8(>Cy!yj?Ԟ.zE,NzbXiY#oaxg#Ld~& 9?_:$֭R#,? L8 3 _3s&_e9L87!_dmӟQ30O|F?_ _?q#%5P6?a4Y5ai87lxDYn9g7dt[/R2 2r7nQ 9?9 m ,jJy XZ`$xv[TKX]'$`]otª @8q* وs!G\q 4 ,rrpC*P9_+'򣿿r9 "R}hmRm+ns_kچwƯ )֎h/&Qƈ1>1n۟բҠ%n6vRM*hZ%|;hӊ1t`O׺ H_Ϡ]\rrB8ŤQ~s~B4g`}M߿S;{O1[lDžnvK|:4d}:re%V(zR8 Lf"lvc"hC[:)s.5=z msӻ˳iN9CUhf9͙б~ȷurT_u襌SÝƌTџ?_s:&~22^t$^,vamE·+2}3&1zt RcՆ;.߰ fPګfB_̆>5tBL%Ʊ*<%^8yfoc&? )k[vUrؑ@} mu5msO3rm.3IyөmoTDv6&lslR֚SBQbvæ_GMwgfRwbB7v!tlA㳀i#/qT?>[.ݟmF Whq{=_s8`w-[`dܭ^z:v6lL`-o];N?zdI@qg.C/KСԳeEiC{z 易tYԩáaʶ=e홢ڛm<3 {Tf;=k7b@{DGZ30/qSAYn{5Jftu*! S\y#M ;Uk37dn̢/ ;ˠC= [}L-#>v3BQOB±M.ڝGsK3MT.ҥ?`ӓ#X.9V,]`%m6=if61ims0caTÍ9.~ h 0W O;6;[h2Y;SCigCW|FI݁RW l~3?u?V'd<:m7IV@طK'_,d2U1#X5e8VtH[y6N+w$^ho{xF t%yJp)hv1wttQs"l*о>Fе&@m6,xʴO/QzaV,aQ|t+}zKPK?j}>!ӽkt)[m|UNw%b' !ˡOaؕO{ }&~[;o]]-:7konS1a[?|omߵM^5_֤L}]xa1 !|~pCw98-Cpm萊>zڎVֿQ'lW189@[cH a]u٫1U C{Vh?U:L6/>3im^[P#ɺqd^!2 3G&/QNȧrJ.~I{mvë~Q7a^:'o Ӡ]iu %l놎Pjr(d*tG_~5Ca>:t,)z$k/tD<Zb-_2s>f~z{˦o3[.H2wI$ 4Zh˚yޓ4--RDIt~tyv;|_.^"H5!*.圄0kVvd*62鞝n'd4O{oV=NZ-o'1AKBhw~NǔׂW7@wN}f@?:Hrw0 Gq9Z; 2hZ1^7@ܬ/xᦍAb?}|*w6<~;JXo/w=㏞Lo;ot'Mƕ[Yfk]sP+ȚuL=`.>2&طyB[NI\[N >?1WuMg ^i}s͡;=8qrcWz-KC+e]{GLVgnƚ>ӻg\6Bks/GyP7vp)P'.)vtΪ&r( 2;DCmqcu䘋0>;]V1BЭc~}v<"~_ :m}%Z;(om0˝ίus& /a팱l\u ]?WNP:2,W:&}9/Ax&??r6xjЅSLjÃJA_>*B@v&g .?bjāSfArHYn&;B煔KO=o/Ac (=/?E`HHɌ5%= ھ>|4&,*n!Ҍ0rDil=v~HX6y1 0Ahsr:mq翖 cWSk3fOˆԊ'OEC׈BR;BsçTD]sNJoH9%Ϗ&aItwT\ഛ#WIؘ015`sp ݝ0aޙb\H;bf俏0)j.<rG( y/{^BÕACK_ǭI9=^F--_YsK߭/3nY= ߝ'd :+aaUR?DXi{8,ד~ mW@pMU)23iďQlOsõZzL 38|td,HaW9 KSc߃_F !yӕ:ww?˩wA3W8 8tAZܖU {A?\4zd6E+ >׫&, ܽ6݄֙ȳ| 8!Q=_t!{ 1>6i_\iKnݓ\ ~|%L&O7a ׹Eypu 淘[ҙlP _!yhOsc !mҤ =YrJҰmQ-6nߺlT&-^G %E! Sذ I|Y I립L³uJp FhPuK.MVH[4(a4f0 $QI_Mۭ|iI ze8Z:GGԆ:9vW٢R\~{ѭ4%~u`H3}}INqDGHq9{{ad5CkJR6:PRlt ]cU4>N],i7I;ҁnV{fAq*yA9XȐ]`~;+ ܫoAV,#[&(9+T!R,p5‚_:7P.Q!aGkkYw6'2 rnO=A!LyO}. BъY< ̹)I9~6,H6 ,{Aʿ2 R1~RΪ$aBN*!+ɭp䖭P\0ps\t}}4M]nPapHuH4栾Nq0]ݐt#gQgy!_(}Td,֎w~@yr]'qFp|W(Կqo`ka־lpRs팝l 9d i2b[-Kk]͠C30rMY t^@+>I#cG1~t !FX1?5+}Py/pO%+%4]t ,1J\lDN8HzrwsSrE VݞjΕvE>]OJz#|=|vXWzO=۷2I#3"sm\).猈3 JOR9,Hi^ϕfs}DepոRRxe\o4IB=nBf*D]J/}$q\;p@rο]lfuȾ!` Wz W͐v]mWz[JG׹3^[kV (&6,\NE i&ۤp甤/zr|s+sg8Zac`K 﫳S*< '{|㶿uiS!YL];bHرrHׄ+=LZ`O ۛ+-}YkK)<$e+N 4j=l;B f9}Qx)ퟲe'if7v,=293ٸhWtt %ݺa.a /S¬Q8zQu'y?U"X`HOZGog᫱Veh]n#wbaъ* ט-CRnFL?7'%NgS!oAHk`UK f^ž0n_Xa„;:jFƉ V0v֯q3fg&x" V3C!a<:p³~P|(vL_Dp+ rt/Ԓou- wڗ$pbU)=PQ& nתq)sp7Z9ٗD8Ԧ̹؁ÍΡb;_kXp0sïl3Ȑ<-e d} r2ū2Ih߁R~ +:|>lمWUsL&,6.KR!\;쑚8%ݮ&j' 74]uO*fVAʤb&P#~W$ZFF F{2؟G!Yos0#'} L huk:aVu隄O+^#L~[20KZ #dg,b6FQz!=QID߹S=ո0VHpai;NfFwJf=? /NGMiga'3GlKfeP:ՌCr1ame)H4WZ0/o¤EG£~]L!LO,` !|-c6:JxSaJSϑ!mե^n8>:HѤ-~mw :I8ayysF$&}q8H9W[+"L,`d۱G!G'L9rt:T(i8$yTLcY3'=c37@vZqߒÔwWK?S!dT-4Mw"lFqZ!k6mchz*]d9qGڽ:thot>ʟԙW|፲[zn-M11vy-aUp@Žt+CۏIVHvXsWCAW C ~.60ᵜKb-9T3[5uF 0_$8?ǓMe4.UWov*:f=: _3V?CE|SѳA>QFV#wn 6QvrĮ꽄?hWOPʉ!LbbB)Ʋ//!yVX| = LFk)nݚ ҡD.amݟ*k 9.Д E=9R-McMq"laK#常z N~^}5{QЧziر-? {#}iNا|߼3N4@w:IGֹdiĕK9tH98m3rZ~>ez?@G:_/@BD͋giQ7 ;v9}"8\(<̄Ǘ_tC~ٝg,;қ r8bW>hk`rlh_g,hgop+6\L~a˾Ց9y ;*q8pJ?v/\0AUaGyʆA׳]WIm0n'w in@͍0NݍӄI{=Z=剉B5$,S%7iAs*K~B3ŭ987)cI&L>ܢ0usWe*^i4\55aOw5֌1!uzi˼= k҃3t6#L9ÿ0oI(^V"̈H^:U0sgsVު c~FBв@-.1U* "3~An U\-", rAXsGrC/ krgJBQxNd(nu\X0W.R ۔ WkϙoަF>pȳLVN}2f.H)kBڊD< #>MYl4h ]f~ZA_&sLֳha% rߊ*<97dGސO75G& AЋI]3{͢]G nO" ߵP`i+[3!愡5g !8tkD7 Jz -b/!;Wy>T'~{Ήza2~(wKr89g)(QΏɯnӄ ;k-]bǃBsJLӛuoau`yLux~^J_s?{^{BӴF͘)зtƲ/yJ3qyF{ߋJ#ogjؖQf2GX=/iV-%|霴>aB{:!-^'A3^\FcQmQ3!J4_""ӫCQ:!u/Ӷ@ɦ= g[}",͟9aEO |ާTVIL:ya9o5:[7+TZK\ҝ㻾)Aiه'' BA yl@VyBg4aZy)Y}R 3׮NmtzFaAY^ǔOUfv -RC?멄yuuq}@Xo2dJ>ThJ.}FXb.?k!aI܈Nkϟ1z.ab\g’uY'.xm'Ra됇V~~(995sR,S)Ui][&AžiI&2-m9ۏ(_})" ܹxCA;B}-b aԇϵ{6?x._vg槊,;Y\9 3w_s _p#{ۇ=$ y$>[adfC% 5.&y8=0fTb7-;:Mx(akY SY/J|̨QM0Eyb6-$f/#L]; :w?H7eU/:=4ARC Lf/I-q‚#̖t~0l1l¬} j:͟D=!0tY:[>k4Aw)qܵ(*pBڨ“!O39P KWwQL+a¶s?#AH3ܼ:0КRs!v H '>>)$KD=<*r]\~B⪦%T˺ Q"N:HH{YsgBx;}paik4]I I}A5C1Wvl  f}&L )_JHfCx`򣄔з&N/>EHwnH¤(I[i$$10{ )BJ`-DͼaHvFk”MmN%Lԕd" L~ȷ0c L@wڲK'*"Lg_(O׹P&=x$38~T57$G~R6\>J2a3}T~KTL¸C| &,90nƛ=8Ps>aޭղ;!NZ>H8tlI¤Z3~ ɇLs\W&\(xid# 'u]Kx.!-ɿS7i¹xAr:RȜ3] R'}E}M ”5F&i cf m,¤"&&Wy80u*k S\~_'eD37S*o',(f55˞n?GU{Cauw ̻q[F!JԺ/:g>aVlT(:Odٳ GWzB]1c%}|Qm"!|*B|I- ^NHg|6a,2 !Ql3cz%O;GP*&+ȹʰV} V)?x Z)bjrhrJDj ˤR<JHi!S(Ɔ0!ud<:m/ ^-P9o $=B0 3 _J̣&įQBHI<$a⌬3 )wDL$L>=n~]NK@Vǟ JmD=ݿ4|,丏'P LT|?h#TV鄉WGX2e/Ճ<̳#1>ע0xL%:+=N w~JztcKO跃.mV4x+i~g~l~ 6{q <fgOH9p>p#(7;O?jstk =3UgsLj9׺SMi/ts=.v% "t0o/3[V n/`O]<9C&m|&VM ~o^~Ŏ~8x_4[:F?崽qpyL&l {գin[ ߯ss~ -*|77+Ry%zcu~o踜*sm>fH%*6z Uپ0t1\ܲ ZSeڪ=J\\YџwʆORx~ŴU6>{WE\J_R/frM֫wd~=_0K,-+|/J߼#f?3]EKv߉Xe–f=c*}FfVAN[?3o.) ^*M j4bћ!.>>f _|G)tk4|F؇kVoZ!?̬2/sn+ \}6tJB;?>*wD#; bC T7! ]2QfXwJI9PzX]U{[RN|}\Oˇ#h Ս|>3sT[y˦rߚ_7fC rB᫒^fz\Aa6|_/c~pGvyop{c|}VivwcnqCzÿ#v5>>rvx6g)/WT.?;g CPpl?+!}@݀jvpAO8Yx!~~Dv {vk5?8>]vd"]b;GpUK}cB3ӌ9Et+A'ݑ}~QMHÿۏ7>yy8!&֣T0>bYS`XmJ5w;g+X{Vx18!5jH84thְ M'Ka _4v|@Ā,Y44ųO ZN@+u{1oH4, h"}0aQ"qH?ٳDRpPa ).?xG9PXq1 G ㈮:ǺxށHyB ҉]yM ?<ױnGae5pF R5ʧ>c6PcH'E"]t&Ő|p{cDz3p{"0Gv"l7H&[cgh~70_;v/I쎊I`Â&H9eFTwr.WoD#NtB=e<$ "nGܟ0}~jvnĺZp>0̌7IW&ANCCE"!=:!qy-G:I~!DۥQrSk(E"4>!Q9~ "/C=B)+$o%{rE BC %lY H_D)B:_R\7SC? =to8ֵx$=\0Pe#]+Scx^uP}Ӄ D|r"=&lToǖ!+ o6?Q=z,d:a\DYH?g!ݚ#Q^ӛ? / {A~]1RnNeM!]}Pa%*O*<7vEC>ih>QlHULʫU2ӡµoْPn!{D ]9?ήBvn]w,5%‘>E:$8\+Ͱ~$z ԞX!:]/PM%_G*TuH֢|jQ*.Dv^۫n|h^TUUhc![TG+d5(]ϊ#=_ʉۭ =z_P=yV1#?֗?5zj@:ft]|\H"XwEqu?p4lͩ#KsqC׌M(}یW3j*v7< 5jP`7vnFv،o~pz5H~s !qCȃ(GZjEn+ ]DGzwQ#: q:?h"!KΣqw9j oHysjNGQ([P$P:-(JG1Б"ߡQ zO:t_h\o@X7mE:(>,ЊFhфۊu|rB>K#:_C: *P=Tz }kATN^u>ntnQpyoD{Q|̿salHOy@[~ {iE#;c݋3olAvL;t4lF%/?ޚQf\3G8!?"A2eq c o>c < |yaq\Xr"}@{86<)I=@:됸* x݊e a ]Ch%DҊt i'n8Eg<;[QO2:_W5XWywա9NCe2nx3X/գvCz=o==Mոq} =FH7s__#D:G"u4]+U |#_`׈ڻ!G`]ۄt/ot]|šqܸ?ot̻py| f)3`|}1{F7kspuhH?@~E z.+G=M=:ΏEop~? !qC3~h}CC~tȏ!?:GC~tȏ!?:G8gȏqCD!?:!q3?ӆ>C?Ny>C?h=qCGdq,?3ɑz憇d :1tbЉC'N :1tBn^0b st, C|(:z4sO&jZ_IY;ikZ0 Dr_oaI`zGqk J;E/YGVdKÿ$4|O?1324>jGr,dfg>P5M$N67;iH:). +0o? n4oFlQy ŋPJOꈅ"l3<0Gfd &Է~]Ѩ_4sdw9IEZ>@2__ww@24^U=F`oXᬏ:'Q3'jKil󈹮byPBI^ vR?W_ThifddߧeQNEbջ,쩝~DGCe cֻ0?GWI9,{㏑R#No_V/? ȿ g$bgQ51yqJ\zKӥuz{<=jko!S?b%^r>mnKr=􋞦얝ywY^^(3cccyY.M $WexHVs@ZIa`>xy+audΊ/24~L3RI@2&2ZGO_?d2Ӗ_ ׃g_K%W 9fyE?dQuK OA0fAHw1t+rw_#G/41qFŶM1LjBL_.V3-=G̻\P#{jX1Ü02ET3S\ZSO3YQbSFNd4)+laֵW.y\Qf?*~%l Qmy㙵<);Y:JfA[fmJG2m||.0kss)wD.k얮̯[^US03fA&a7#c܊ÌZ[zUIrh׺;ݙ!ީcSjI*FZ-ݳCYQ9YV\Džr]SWUfHג B)!v|N VKwFy|{($Wy˾̭粝02#z1o0S5V9ܮƤG6p>CBBK*\uȚ fl%Rf>ƌ _E%ʱqOrѬĄ5|/;I^@oHi {@K[>=fKM)ԑR%!T&5 zOEk$dvio|>$dYRLo_\lԪ$ c\3G]b;/,By&I]]p@-o%Owf<}1|=/.K҅Us$;WR/S¬aIZL=ڰ~Pgw75?V3k&wf^QN0~&_\$&2OF"׏,Y[ w4Lb䤳c[Ll91šǎ*{䷛ND|Wa{c {.vE*'WjiOWVyHq7D8#uX_z<|\7*SO^7T,(pgj=RIW d F~_ly@5r_擋5U:w(/1eC$9Zon |5J-0vlO`2O^[?`><pzsn泃& =\Z;Y[@cvh9¦ȱptÌ^ӱx c*DX)~sg4E˭^, B9V9/"r!8Td}:dz3G.MY ៻I~sOJ=:mN bV|=m9z3T!Rqp#F ̡͂[ޑ&q-5c,Mo߰)XfŌL]D]l:}1(՗St_G{(R;[GyMMgHi/pW]}7>M08?0[uôH Cj_yj!Gym+v됅:N9[RITQ^v嗸Yfיr7ẙy2;%&qo4.?9@8{^Ϩlb҇\4Be)ѵۏ^rL8_ƬɻTS2GI,aLQܺGt!AcfEkFK'̱e,O*KZY$}͒Yy~u̓r qsݘ 8(_dQw%Nf˽6(;Gyf؅I %% ^-z,u2)֙q1?agOI̢GϺpȫ;,1Hm\l09xomf1YAGO 0_Q.2饭~Fjm}Rf2S#'r\c~EdNf-rɓ|KYFffrUc3]cV&}Q.ŶZydYaE@Z|>4W}|x^wmJ2Ӫbɭ"R$\T=h8u>w{VBª6otyUܵ[e0Jdo ݣqD# /PNk _rզ\- u "*޵j<쉧t6]ܸ͍$fJMTv>|}jBwN]kIػg-i'OŒ|./&\3 wmvpS6M!'b@Nfic#njƬ+=k0uf %g׶|,ݸ]!F)ktS`guWݘI':Va&]f`F5F0xGԵ+֋+6wƛcSw}{б%pHʩ>߭Old3}㍬#hۚrޗNv2mKKcZ)u]uw0[:hh-.9 v1w^(}g"]qinϬ{| t.h ӣeOe=`l/1dg' "'oq`[xs/#}n_v93cfk/b6 I_b+ci8x (#+mqs?1eRG.\~KfiH&Cb ro\oiv31:숈{ 3`cv7|gd]&;D{0I&@|\Lͧ#7,Oެ`h&zRrD{3#m~݂Ba[>O_cL甉.L祖ms:?pK4}(Ѩi8yinOO=&<9 =r9ydө ~PɳP_$isH5)枿>]Yus3T’l`ec̹$VV\Lz0ԓIڻaTB}rXr\]U"iisDΖYd`o sɼ0Ŗ$09Nd#6k3}:]G{:-Xn{=]#gAc 3߫%wcv~Z-8]4_岐̹;L,2{Gw}ğr\.aj\W:&d&<̞ ˻ꢤ.Br 2n3~,?` G}BWg1SNuo˹]aP#6k[9.bkc |)Ֆ̆S;C&p3UFdZ鄊Wm97?iIst?yr]b Г(_)SF)/`d={##r$ؿ-]z1U8 WXmc O.$|ݡy3\O2kF\^Q~&<';y`S[gIy'ewޝ,;AkalN효1`4VXPԟTM:0Vں/i6ɹu"JMA֔yhzdXȺiʶZ ֮yA,9dǾ2NȦo}EKjo0!ZeG4Qlv^eN_} $gJ~2\"Fpɏb; 7 =6<МEV<;W"nG;*LDKì仂tHdRO 4-By6w/ৌlsVxɤN祒M d#u6~HE6y\q l6eHʥ@v|+9W|;ddrY)#g ~Ϯ"Vz#[V F6Vk,>sVd#O7v"ux,"p!͊[9 GGɑC$5+}x=FVXm={YQGvBϐB:W^lE ٳHT,-~"d 5-qbbդ5hUR/|OvQ;6 ky"5{dq߀r6kTJKdnF5N)?rm繁k# dx䐺F RS&Te:hƯc? 3M=L~~Jo2f5ŦF#␱oJڃ*=oy~wt 2"zdӔ_ 936,20'#o:nA5o_1L$qi2}H{9e-G?Һ1ȃνN$N0tȤgyuhDŰ$jT1#Fs(L_r84Iq6)fsߜIÙ{C}7E>y$.N=W撤 V7ҒO-F$[} Iz>9eO"3;+.m2u,S̳Af$ ,@f?ugd.ӕj~$garŏ$( IzD47?&{mjZzd~ +?#O[wo12i] 5WO~<xɶY=[ZhS9^L`v6qZz) ],e֨nؓȴ'AvK q984F۽qB넾VLUu9=&rXzaq 26~8Ip:IQP o5Pg*OM*ؤ,c>{@fz=䃬Ojd٧IS"k t|o9I#/ XD__uQ94 H394Ij4Z("5mcUxY95~]N CVMK GitdLyJdó7E֟.Keh#U_4T\_i @eVFV?]x Yq"@H٥2jNEsdSrP#nCXMJF6)n\.@꧋lH*g¯ M|tJ~$z䡇FFw~B&ySh~2uf@d3Jmô%v]F&խOď_=@DΉDԏ=Tӗ$V~Pʃ/3.7jh du} 4: 'g3"gčKj$UĠc5'/Z12mSIRV“{H$*du.= n#2SmU;?/2j .2ryaG}dJ~__etZWߏF=oXzHIzuݩ$ukDE6!qIfǢH=M)w](=&Y"U?aB8(!C+v' klCTNh[l9`35T[tR0 O€&l&tGQq_02iUΖȌ2/y32Yd^ǻGlr9?hueÞRd _fS]!$vIڬ\ oGfiݦ]̫x3.9d1OI2Rj'i $d8Wb#2beakʜH2#ryM-$TӮdI{)bd2d]hۭUs:#rhan,xv#SٔE^G&qL8bv2YΧ,0W6{j\yW8? ;NY"S#O(}A&]^¯c tzd,m瑟Ʃu _Xy|e]jEg_ϗLؘun_FcV0Uu?=ue2Jq}dݝ"nyKN~O{ہsjɉ~^>ӀĿDL _#57oL~@_TQ% ";4IZN醖1@?!FFf"O(? $.Ş.kf&}jyHTTFG>s-\L>M^*dTNE#ΐG +k74w 1s|Vp=}5v)_4`iǕW#KvZY<Ѱ)Nt3cՑA9Bґe !g=U^"Y0:}Ba/)qZF)pS}F>?W1x4<ccVJx_μXuyZ~߇InȄ3+w"?|wgdo"nm w&j{#cR$ 3]%p@&8KoHCFtykAY<̱IHXSPayd!ȒOBŧbMF(*B{u Y6`39=yBd͢iڳyYXn5)x{H,_Ľ+m[?M$^eA!  N9ަK|Aиȇ-~d ROGƔttz;hmA+76yn=#ɗ.w}/+6ox&2T2t# ex,~a̒5{92&_m9&N*TGfkbIX5|zAd ڳ$Xl~N/}h3@x=t E:4c˲o'4A>*|WOl>>犾|e#o?CX~D\pd_9L7\:8oŜ@?%/~i%k8LH^q>1䬸#%fIcHl!Sfm*eݜl%g~ZZqp.G|;7&$z#dر">uFfx HLDE`62J#!,7BE 6F&saIjj)bNb^}98$_شAkof`Sd͂' EBRIZU{iv(29k$9WB_02!@v.2bWSgV!ɂYl?ȿWc,Itv<=!< > S<|=trez:d̅W'#Lqlr*2ˬXyx'Wya}t;dKHFKH6+<9jښS߳tm(VB2Niewop7^d'YMIC )AO|1H6|aEb$C;$GI/ĺLpi;f 2ɇ+#SMu-:LoЛrcȤR:uG#SM44 :0SA:QLR"7HwGDSoH0\K$y҆.oBf_ȪDj,@fq6=} 2wd6MMːl""ݠG\e].XlD-,o)U?$-~t29V^ LXxYvRK$ s$iÈ~c_sVA&.Ѻ2pqLI+v*y=^OhIn|L.?dq[<@&ѱJd &wȔAg .Mxr F`ڞoY7#NcǹI/1/tnmNy'_W|#s-0eO2fȌa]IYɗj *FJ;I. "?|Lt?SD}r$U|;_HF $an%bfΞy9e})2Ii“ >1vIVGW-s9Ò6GNܹf!2\H02{J9ETg~y:$}R+At3#g&hZ^ zΔyw?9JnrͿ*{;;w [)'W{u9%gt.iqzY>N](XpO_刿,g4L畨*Z琾U层;Xp/|y_oOEߕޞt!ky!qzO]ճ d5YάL?{\K^CN>+}2yġW$^^킳;S[GZ2HuBʩ^#Kջ]138ÎYf1^Sy"nGJv>Z͵xXӛ%?sqsJ_srN~.SRw}2ؙ<&TGfR:+бQ53# sBu#ilϲ0.!CJwq|R)Lp 6lc4}|)-]TE=I?KYvlei*5RC8MF$*|47Mqrq}- J^}Bڸ>"}HT5iLr5Rp~o#)X#s6Yj6*x&'M3 l!-OT H[ׅI[^`{EcHìHT  G U#mGY14G|M[đ+* '7|TO/vA+9qAX)!0{'GZe:R[]/¸T3p3,7gIo~YPzj!ufǺ3$a f_A6RKl ;u 3r? [P# !7IK?oZJ{/ϱW ??E^fkśpWE֙IY7w?/D~^vI$ݭ|aI,)8I=X̅ԗDM);wK]du_M,qI6u݂'Xh#!w?t2ՊE0>l0q?m(=d#_t{H~J"wl@Su>ǐDB7gC@,IֱrFـpPiww%2īu$g3Ywb׿!>"m*WP oigOnx;@+g"n/fD"t[u|]ܘ[F"OWKX7krZZJĕ7;mWhT.nm[F䧀l9{{|Va=]Y]z9=Z ,1`ȻSKAOXv|fSmZ;ߨ[{Q~ m0O"G=ڞ598ۍ Tzw#~H;wcXje/о?RqvSރFv,ĉw&? T?"I<kWvpjgpSYww_#^?7}m/ԮG_Y?;G>'POvb{ Vׅ؟J~ Շ,ޠ\_I|y0VJ[%a6WR@yqG) Gf?-ohh{R'fz с,z_wخ"&pw@8Bºa;o 9;Wz>ygЏR_SFd';TC1$9$v\y1y ԿZ~tD' XhOcǥq(՛+JfŶIkw1$D&޴>Ow=A]A@?Aߣ9~$+\='Km7 O4;9ᾠPN? #ۜC|iFIۚM"D"i;^D6F"d0;5_m-6 KPc{.O=ED88A$4br6iB}~x yy#pGx;ڮ.ߦjЯ`'p>&ԃ7r=x `TNyDq=>1{qHVԿ~Bv<;4?98 a.j p_P/%ׇ;9'oDw|@~PzB-.YbqG{3z?P8'!8 y+fk%8 #O S  ; >}v~xw%at0cӇnҲ'ǖ>f^}\BC|ۉ"S~C#i G?B.{h/跷 w&%*s5)w:QŋDgT?z'ߡq%~A^~ŠP;?y;A/0vz3j`.aj#l_Зuc?iNaYAc>>p>G~쇶+  Amş7aˁyTa^-s1 ha|)? a|@0~{`|?^g蟣OGD$7x H*_}8scaqTKO6CS~ "?4o; +/͊w=a\q+x ~@/};} 0Ȍe7:_ q ' ̫|"+v<p=?|8 q=rj +$j/AC6Λ'#'x rҏ)̏X_y([\_a?1H`uG8 /]C=p>'@ovʅzB?D-g׀y? K>WuH[X J{!ڿt\ u gȟ{z~ 3TqJ8 Tq:h/8Άy.0 |z'<{@{\ 0 BB>q\H< σ&= ?GC~ 77%'#]'\s=Maw}}DKc0NO< k}Ⱥn$ nG؏0}0~%Bx ~+ΓC/jJ\w{з8`_?#ރ^Oj<>t~ü4osQ~4X?=> _$]IP ~*K("IzE-LHE8G|zNPg#MUH\"٤5 OJYmk$] {Rg֓$çzh\ʦy[.]MzHjȷw1bM !r0uD` >dzT4ʠq(vOOQ ?c~:7şFwG}Y܌㰾"PgC~B|u۞y_ݟ=}=vt )xăΗ [>'a _2i>EK`} z~Ka|@GX`4jy7eA1O$ڎ44_!.83GyA\u=wD4,c]Ih6]oL$to! 㮞>t>>/yZW`"^Ɨ7pX_u7>ЏP/B^NsAׇ %ʠ[@R}9 CW:_j]^__jOi~ 08AG |?Mt/T0Il*4|% yawϡ-y !ށBEϕJƫo }gShxAU)[kp?"j NFTKytQhk0`ߜQi|+գڏ4ʡ qмTGB`!;p2}oC0NS{-e1m4jwtJaMSf80MSyJݣq5 7oC~S>g_๋*+‚.GHU$7;2i=9 8_E4ya%9 ?ƹ0q%M90y#KuI&w`_BL.0ߔK̛A8 z p?>k;s4uGϣI!kp"vBfn .L ԟRy y0 >;t 3E?Ar~ɧvzF+qOX8OO%]I2 q]CBA8~ ^6~w/+&qo+?#qB~ےKHımf <`Go˨_,yA9=<R0/= >&84S6l$5m ) |Pq i=i|Y&?A|,'?)TϠ@i}?)=6]HS=)/~o`GOKW<]w;׿.mWOEJzrqgE4q/ zQL/{Qu ȉxo>OP.i/OIsD0}V0< T:DIa W0z$<y/)r@\8hsFّ߶@R.ܱ$~= y1Gt\TBMXGȡO 0B0/ׅyp^ M?Z3C{F")zi9ԯ/zM0.( ye0MuL_Cv4)cb>rYyɧH \RK * ~)jTO0*\2#yCn%T`^  wa\Dc|Wt< 03y,C:c<6췀j]z| <S?q':>Hst^?gA}üqo<a}aS>N;_ͰH%~τq7q\=3ѽ :c:;@NX7$P?9vW$dj>[=׽ o}Ht  ~¾v| AnC9+؏ y[x^ v<`#>Ig=k0pڿ~t<υTa2 4l֞yx1B==I}4>EXW}m%>Q{z>-('ͷ!'wqr>l{|= p S|=WGA<] qyo}pchW/0G߃=^pp]N ?-{>O) J xO5 !9O`^da3/8'?cvGB wXp?5_q<N@܃:H}`> >_cyO3'ʇCQv {qs:׃#*/<}?oߧ}߃wOGacO@{J0BS99(O  W~ z旐ǁ?}}`װ. yV6mwͰ )+͟`__No ;Xo<slPp{Ճqe|" |{0=s?P}x_K?J=߄6gO?t$w ~5 q= v~|ߟDS=s_8y/O8u`G- 7S¾M'b'8Qw!p%a;'z>>WC< âiND4N G>\=k8nvOq>mNc==Qhx/Q:~IQߟE?v^zGi~=|doGCO ~g> ~ SQ(z8J'_о_a.7Qސ~8O vEK0¼ABsLRG[aA? <\øD}a1~R~/ס{>'y37?: =(<~?ׁ=u1$Oal\GQdb^k|*/=|g7k&lW <Y0o彉q2{1{Gv۰6>7#xΆʋz'`<'}OqOσ`;Q߫"0oI<ڳ=]iJ^X*'_Sj>E¼?g#yko z#!~4%:o0? ~hu~'}99zx~RTp~?ymw# wh74N~A?8N׃yx.cف߅۞xe(/G=q=kQujO\7+8\`~/~L0İQx?.s?|wN=Ր?5?P a 7JG!.AA9n8y ` KTNX??tH5(̓w9;3Ю 7< y+5AjL=l78?㽎8. CwG/8^`g rty3-Ǽ8qP?IB8z 1? 9X_e^('a qQȇ`}F> lxOQя{=G=Ä~#BߋM{?Xhv}=D?JǡO{a~ޏNu{$ԟ&Ti3:q#w:~~z`}o@>O#`|~=;?pG`\}:]@{o QxOr ~Wf€n??=ooEGXkiKF~#>#>#>#\|_MB|Ӱ410ѣxUYJpJU~]M`>ms 3_7c #x)gcmk}-~#z~.]R=p>K,9γ8R=>]?T矌d^#ssbxDM3=s?1Q_5JX(KCJ2Rx궒W -4,`Vu5տ,(Yz=ӏWShș,azu'{c9)87er1Ojĉ E`wp$Q,?8/޽1N KB}iNOcQTGt -{ a-4u(R9i_ܞnl׌wHi0@8%$Y3p9<ÿk%,W_+aO?W>k%*as%Ѝ"8`wkn'>=gvOs>?0䧉wnS âwL9rXʉ)(RSNI)I)E)M)C)K)G)[r59j%["-ׄTM{pCHʅR ZہQX#*̚Ԩ5?Bj').T~h35gL8y\il^爬\%fN%z;Oȃ+g/+Iغ;/Pz苛{#;v ˹ _O\SEVW}7FT}HTwVO(r$'ϝ"ucDLCVu^TKLt"UwwZμ@?Jn'o?!@굮d|_crvTXzٚslii-[!iΩ^)>ِ_i<_r8mh38ƗҗQbNyU“u% izZK  rb9%@NF@(c|5ig3!haS^xS]iͯ1!jwʒGOrJ";v7A:'s #][l UZGt&]>Ss?Ldx>*Up'>ؓo_YtDgB÷~Cʑ*[f= %; +f6 2ȫ kXSK'5n^grBI}3OfpGMaCIK,N?Pp!͔̅'U˶JIO/)|IU }Ajr%w*{OY!k\^'5,DjMv[Y]k1qxA*j?E꾌~>}7++Zɶj=_zyf䨫CHZE>Ead͛Q&^ia('ݤ!9(ּYg1YZ[pp ^e|To]T4GF߳ $+J*g:;Ax3:{ H:Mp.$ș6C9s#an= IE9GLt3rT1<1ǯV忪b~ȟ[Up"MrKf,gd gj{QgS(^(׳o  Wrw bxy4״Spԙ2e33r鏮Sv# VlqY׉B^ߛz3Ͳ Ҹː/-[EO)`3ٓ/ICJky%KyĐM ē&-TפAkҴhryH[Kf {m9gőV΋-CH_ԑVĐֽ׽ M$M"dS+bΙ:Jʎ4ŧc"ruҨMV\ kDIn'my 'ƎSJI#[':oDZVf<.lնB>lќ4;n7xs3Yqǡ[ь4mtl|v@Yp|rB ?/g`TtNiv%4i\[Zrv}䋴. g)bal6x'-R &?lן,izŷ`yߍ|;d5g{4di,k2$H(rѤVNqNj}P"MOryҾ4AfaGy^Fg i13lͅrJlI ΰOokYꙎ[ήM."]F\hSNY7]NZ<U4Q4M +oYxKNn,C<ĕwpQ?zw$5nf)q{ܨ$ Wy!uZ~9<hxfe$l~gvo2+"vW=D6mܦ]ߚ IGG-"QÖ 6$}ߍB9kLKp㛷ʂx/t#wǼ W"_ :%y%H{hAo<96n,@֫%|EzE qI cIds' <4H-Vs$I/\@9ݞ{Bb#+mO{#;C_IRȴ!&sI3 4Hъwa'j\%O|ǐ̽E*iy/;lIAܟ_NEj7plq{,H7|SwՓ{9>ōGC ?:-j`œI8m ds_$-_gMNid&;JIm\Ku#_TAdv}KHa.z~+/\eCZǖ4g$ngl=vlSlҠ{rn>7氥 H S}GW,"9Z Ym K8ه$v;L jqwCfrN쳇d]DHb_'9MN_yѵ L!.n(gIÏ+cGiv( XۘOJGto{YGi&)r3+ƶ8*9?"9Ff{$+jfc1K.;CH3%C8%^WT_m{D%/D"NGFTBI&=_Jo<5ݓ9w H׭}W|!3gO"?t-I⾆.A&7N~Eg_jXnHfj^q1:K6yorMP[؂ =sG! ?#3wj=s3#W#F;ktlǒE {&NmpYi iwpCV|KbʅR)%q\<{Lb.^D r?q9<$-ɜ7#H˶ . >^ÊO4̱G GO-ANܑs3yqXB`lN1$uDוH&s^5Z3 y>A49ї΢HϥDefElt͉᪵"H~d$oc~l7vٸ1i]5/oҢ|Ț c/W&Zڏ|8xgm~3m 츴Srto+.7-6<縭B[Kvԣg+ ]W Sol;.iN7w#˛Z#s[g$I#'-=I_7T.N꿐=>T- f)=3 :-1[%Ž?+ׄ&EcZ'Dzr Kr5E_Mg'UF捞a?b6Yy.lT>A*bRO]9hl9yFC';ꤪqx cH9,_Erʋs&UƋe2l.0G$z}v5` dw=Ƴ{GgNm-м G+Ǒf(w* KO-XoP?!4Ky K Hk 8s5 uŐ\S+Qa6HyߕyR p%%[8-hۼyʤcH-#NJnj&mͳKrW潵i }N}5~2BU4'it9ʓ-dCq;y`S탕b"Hͧaҋ.bnjsj}6 }[ 9bUC,͟uVrB6j a3Y#g?r ٰ7iYqȆ߷!J "Ǚn@s64{|p#o͌;fk\ih0B^sI^Y+"=lo>ӞEfC}.צּI ᦮}fг]Y*elfk6mhmQB6+~1r47]-7ۊt1}d|Ʈ*KHS٭kI3*[B j2bdma.ȶo[]@t 6yl?e?椻& eQQ2o'A>2fu4\JHp I͏uHݹ˖7骷en76KKνFK8_} 5gnuMl07d - eyn+ŵmےQ} I<%j;d݅Cd%woIl0{]he# _:hzKw">)xV͗Ic/9JARAJU< vf>|z%.]>'|t "y\[62hF-2fOA CF<&a&}jN/6& I8[c4>pg氹xǓ3ђ{|1F ~]a#Je\1D0 }#}xڃS$&1kn&1>G>oW%_}t2ԙ^xEϭB&pO职w{q3D9d /$،UCBcrKX׹}̙$n#٫Uu'I#+" 8]b(ձ9QwCy~Yl]QHgd>9o"_>tl9Y54[#֔#m$ {ɺ_1C?Ȩϓ?F[E)"_}kݗHdx(2G"dl}9sI!;אv[{A Y1IzImZ Sc2_T)2is /6@& A&g2鈠37oF.`[I+~ܧ$i?~$m_c$?l*N"VNb>M Nlv_o _ڨr 2^+d}ϑ?.o%fȘ/]1.A3-K>-&ꗢ7\I&Vk*b:Dspc"2Ȩ* ǎR6tn6D37b3OH02LLXd܆e VF?ZvD0(-XG8 y Y`T.wYdQ5d#un< >PN~ԮoIMggXNRU!i~ij;I]֔{z%#{9^1avENtT(t ۡ3ٰ}u+!><};|9M GUJ_\O"ߧ N|HꋧWil?%jQ$s_v!)upEoԱ&>B6Ōkzl H3}U1S{ UE2m$Nv7Yq\-Q҇"}m$1y Nv&om6s,CZԒ8c p ՚}뤊4t^ʵ]|1S?ng5LIs89~Bl^٬TMa ~lnΔbQĹ}Xz7nesb;;ݿ45q}k4%Wq-I3I CcYyԡOoBp9B ۾xP>`ŦǛG=n ۇLV|ӟqC9{!6Ol^81o"mڟ>9o%mkϓ6q?ʮ}!Mxkk/Ȧ;l~= ~SK)MlXQ6Ak!VQc|3ЊY9em/iZ-~*}Ǫ%:/i|7i!̛#+/Tx4i0ԋI)fbSo.EO_P;pt]'{`#r? !ƅ_C{Vl }td##en :QuCd] eEȨ??%TՅH r.Ir\bV20+4]V&2fqPdI1S}6~2fٓ]vn7_-2D̰ńuLPnc3ΗM#q:&qֱ j\ٸo%O;<(!yO}%l,r>p/CyQxxNd&S=% FマZ־2 Y1~hdgaɫ,u|JnLi,{%&;-Ny&ttFF{2^,oCGjDʽ|F87 #dn/LL9H26'܉$q$Wّn*D6V IT(g,M՞; ߷!fX`eufd!coڏLIU IeyT :#a|$bDQdM{A7(^İ&ndxS.Ⱥ**#õMs6J; #_ ]> nwU;&$/^~D&6YX[D"_R뙥5DgLJ3}2nȨK#P1ȨRl5uz6Ղ("ά]1g;2NI6dӅ$ڄ֬lگD?w\k+G>J6) #G ߭u<yԽvq+!OI5H'rxy~/#>{oиDJK#t1.== g_@&̻z2L#k4vTCZ]52`nE+yύjZ; sΡI~ Iw+IĕxX?@?=k=w!}_<?EF[m":OcE G~ڌ svqf$-pX q? w$'|Dod'DI~ ?Bqʚ&6G>ь,aң D=%,T4ZUdAx~X;gE$OHC^UZ+DD I]Fl# }zW,6b";l=8IW$E7ߙ$2t_dZi+O!=": #ث.2Tlzy 2 Y1bq72|o{6hHh!üŝPFszu m1b_ȣexBwn"##_lVXq%Am!묠V$h҅tM+yCl6)/tחGm^?_iZ@ӗ'Nr ZVjIjWá#$.&x)'ϺH6W0.zf#&Msr-l^vM̀^@L~Hx?e=$q՗o wΤ^ Y"ӿ_Ks8ᝐ8:9Oֱil~T`ӂ]"6w anD~^ 3cywvo.AO`dd)l2֜Un1/)]NmcԴ:au?\r)XdgE`9{F ydOpdq7J5"פ DFШ,?.gwQ$2zVI$뙻Iklf:ißrӅȰ!UCN#+En*Z1Od2̃Hj>HxggYi y8._zzOÖSs֖9u 5rڑ0gƛjZr5OvI '58vűV{w..RubŠZV_HOS`BƗŇH3Zd9Xj󤗤:kN'tnFR371TW&Ϊ?a*{ΰzn޹YC8)V|/<~n`C1=[dw`׸vM=̣oqRPҢ[Ig RI}WFV}//d :e&ߞ?5ZG~4i N{'׽];yC Nθm1T_.]CH5j0~Gܤ5r};r%6YeH߯ڛ8CDx%2f>]%\,\z\ktf;&8u>\nD3KH+neXPϪ2f[#؞5ן`퀧g;BZ.o-fWVN UTX>zN鴾#me1:"et<#hީdqo?H\qץߊxpGa}ۂ+/^j^59'I[Uu㔀݊8ݍ ( JXtw7!J78@|yyyYs5טc1sYݼklÊlo#Jrfg*D%Ci Ʃ3seMzM :w~[[+El~8v ~yɣCO# aS],Z(n]~̸"$!,^/+Ll .JfI.G<ꕴj̡b2Әʌ''wEwVn70ۺ)ً_yEh}`[j` _Zhm~قQgίc ](х}zqhy7g3+}D%ױCzVg؈_{ދC $zv`P?35:!E/أIߴɞCvg}_JϽ3ZWBz (Xʬz;b99{8P/jCϡ}o[̬gYUR@?o?k]/?^61:5թ/ r]rlfS^__"\f^g*!=!zSTQN[ʅ;p|QCOģ(/  y~ɾ)c{G8:{g˄LfV?g,f[+z?}^9cP~ou(&;ECE;EE{C9~RߌBp8h>'֣>Naя8v͏(6uBHv~G~q Gqߢy qɯG#⸠|wDloG/vϡa(}SQ[ | y<"A(.Aw ~vv(- xv`ģb9Jc+ؑ4,s4&~z`..'籜y@nu`sen]Gb.z8x+gO=a%N-km^y ^'tOwwrW&js 11u#ڋ՛^߯<؋'+'x3ߧ5 򡼮/ Fdw?'G5<{{U:y! ~>?w`r<~gg/u֧}q&| {9__P_$ r_b<= GrA )>(w Kk%|y V.ˮG޽^ ȯ/rxS7݇|vOD׆O n 8]eB(7<x5!YBFbހv~zE?GFDACA>A}c&/ʁy5&湘?9l7xv 4_^?boO!"_wGag|y(`^czz=^ȳpv$X.I,F?v邨,Rop7[# x1 #x?*qME<<vzayL"yW4!CC 8 yb\Ycq _Sد =\b ~'}@NĕneQrÙ(~<>/^Bi! yy0Oo0+xȿVz0ogoK~bc`/gA/2fQDA!> B @\D{=0^^({0+=cKؿP(Ch?a2տ}7G8ay G(䫨pB€=`Tv|@v <~<, ;y(&;Ayo;[,-|> R{XX_~@<~ SNZ, 1qW$z΢!OF^zx+,G^HE>thc r2boݰ-G"7o |ȣK%BGy?F@~|~ z"?"#  >FC{"_DA>OD?p@<` ?Qyſ )_, qAމAD}FCrb@^v y;'<y0C G~E}Ѹ#/C r 8B~0>C>v !HS$=gI?%B|B> ? _Q8^~yc^^ᔿPy G$M=gzi&) YY, ,WWCƄY3 ' E;.< +}km6f2N5C> u6iO:Y0?"΀xBd`i'@?ȯR~ yY:.- q$NM?;΄OIyaAF/G!?ZO)}vc!AAOa""7 x2 > ޓq2 <0I`IPN y"1+I"@7H!7?|.O4w$A:o2y`7ϩ00L; x6v*f*ON:#ʳ3A/_LK9$d |<ʟ ^FKxYg?R`i>L{ c3`ӀHC&đ4LSK?2?2 <vK,,K]RG-KXT4,o dC  >? ]} =9y`Oٞ+[zr ]"˛T=l3Q@r Ac+Ʌ_Dza\!̀ue+9ޕ ~ ט?b>2xO|¸<>Mנ$kȻp '[R!Φ|8 |R%@ Ʃtx.)'8bqEɅϣ+)i1H^K;T0〼ABDSg\OfC0ȆxlX_}eC~ xy4=_ ^@惙`?4<'?4v~ zF?> w_3y[Mx_>590y0A}w<eӯmg9Ӷyr;]%* !'E{ yd٘ߣ##gCޅ|?Sσ?8@އ}}yx"8;IqCLOqKALuט`;0%4Oy/Dxać _.G4ȀJA=Ώq}~Z|4 4w`~4ǀc(]גa1G ^?ul 0^@ޏq ?m<(y|+_xbrCw|?8Oa3[>wA>zh`q11X[wլ@ƮDLfYR{Argz<ɉEb8?DyQ_8 #os4ƙ<5{ȇ!~{p1/"yد8+8Ly0ǗO!'"y/oM"s*EYf,_9xb=D̃#T}AϘ|xC/B>Dy!;_~q|@j9Q '4G 2<0iӔtXx#Q̏ y:;aރl :X/< ˁ <reAy /`ja 7-^737^_N!N/Kē8υ@ט/AOG @xMa<~ʃGm9Nw-fW?נ߇~}@73ojsҾ}~P>~ߛi?;}w=~DWq9vwXNAOwwḯPl{PnM=P=v+a<i5}. P(gco(shv߿~D?Vi%Q^g cq~DAп>?"ѹ0^sP'g;?&އ>۽r߇8|ToyY9`5}f==U] 1Ѯp;}q:g&;^s8Gys5|Q<C`|}ObXOz' ߸_nq/G`9~;'< ?Ez~?8θ Glw|h?t~N_?Ec8ȏϡ@>y~y,_!|hDy ' FQ;BD9Q<3^ >7~{tG<(< y?׆S>>1i\+~| |O=:{P@>|>< Gvh0\*:oyϯsམdO!bA}<_㉼8;l< = CßWj?QǍ̓ϣzsP~ @s0GG<;k9XO @}GOQ޹yģυ? &jp|G~?n?yo8|?EtKh>\b;O풿ǑoB<ʋ87;7|Q^LQ+QNs}agt+%z[<?.(7G['/t~?{޺˨7>!~ 8+km(]ȋg|寻b>(q1%ͧ?yXw=guQ< 엿ȷo|Rky͋h/8sC>?5z(Ky"6z(~߀O[y(Gc1Ӽ ]99΀}=(|OoӶ srdO>ϣς L(oyy/Һ~9|;%=OLߓ{ Qy&>?ȣB0?aAo]>M?hbMß_y)j&l&l*Wǣ""TD*5 1VW<nDh \KۤW+ -*Y9d}\I6ݤgfnJMֵMM* 6lݵ؀8jJ(t?na{;~{@h.]q@פ6,g㦵?'{i(D:x 7o4ضr& UTxEjEuw k]wi5R.,jwYF6yŰ/~#Phꢛx>9DPrfuR,Oſ%э6h6Zo^h^8 J9\DKG>Mm6Ygbh`b 7nZ]ץiCdy#"ig.~9ǿr ߽n4?N ,Y|U1 t&Ÿ?XlI@_LI >RMm6ϵ?H%PffiFL7m٪kJͺ[ЅZ]5lu!zJuÕ4FA}ރժJ*~Wƿ{>Mm6L40ݺMiަmJtMi jSMm9%--h:=-!`enjAV-F˺pbia:ljj_NafÈg&K5IH'{q}OSMmCm%:ؖ65;]9[! "##ۍ$>mvO{CSMmsmpn`fnklEα~axZ3?%fZ*pakddꊴTm- ށ[`溦ZMqVy:(uX4.K>QOo?"ʿc!uŘ6. OZǞ779)㯫ckry'-bA/(+OQuM1~y(?x5N,6Z~ã4JJ- ֮3W X_lng5i܌y:?3 |۟\=glͦ66) -q*( WTV7"pHC4HÂ4HCjF[&^+b]X6UWvuW#H?hC,YTdiCЬ/efZ݉YPKac;}6V5.je{ ߪ/oXc]70wI7 [~뗞0%U 5#44TT5Fßնֿ]Q4Br@!m_识>;ΫE m]l`S sEm^¿XpnJH?NPP=`g }8P PPP pp@_$gMr6"?@|vDy,aA,Lu̠,QUzȞׁe7li2=`6fV_s)9ò3:~?;IGda؛7NK,ġAZ,=];_fRzyg*5}/!#ə5ƍ0Kҭ/,\J\Yśz2{OũFnU!̴8Np_` Xn+wC6fT]<~z&8e%Uξo%&mXoQ,Š^]۴f?cZ) 6vw)˿p/ zN75AG{]G(t@{˷IJ,YUˤ}ǪF. ]:cDVy=[YͫV h~uy&mB;Gդl4a~* ~aky"rѬ-'Fyķ;1 9g$ tی̉޾|gҚ*U6M*&Up9-aY útѓXzWƄ;Xr'~o߱ksb9ѵӕ=Yt];k{(X5'%L5^;PġCʣ9gESٍ*}ZUrƠu|&~vC8Ӌq#/fۻьk$,*ģ#%4|b>ff͞Weeiq:~IV~9!a|^gr*XDz=CXA7OfKxh-8#_|[(lc; gYZm·EKX~ˮrg^=4 r7maVCANi}:~Zǭ;XAjkqVPfvI2=d?-"OaN%62%JsۼҩSsN媫C# NFr,ض0uN+\z;u9kv#xdYS74ftlj;^8*~"UF%Ka{DVsHOfYIuyN<~db*N259SXeUaq} 䵴̴[ZP|gA\r'tu==\H&97#)7Pur9?9{;-ŵ?yFBϚfsRE=YζkmXewWW )=F_~mDqbnUZ/qo N}9U"qWy4IvOV`(=+.BP1w>+\e jpצB*hɋ=8 _yqNcrw (j {o$Q=KV m,ƾ(>vH!+ΰ;;@>_B>_+ߕk[fk\{9"T/ ~U 'y.տ-grV{^82Ptk _]2Hy*!}w}^ω t.Yž[;/F7^[- ꐿ>g|`NԦ?Ʋ#,+]7h%;⤻;OӎY; rg+O]> ^I9ػY4O(2=]^޻:%mm Җnyyhq읦Ih3+#O٢a^76׏HPӗA};gP$|\J2=[zrGvDNv/Zl{]x}= iqvFE,XHv{QNNaȃՄӻKp<''͞jtlDv0a=E͖LXt;bZt3UEcCvgEH ,ȏGw갇C<ר&D^f/9}gغ^2'u9AS5|* G/v;1"v롓6ЊΈ!X4׳cں2ͳ eodԅ3f4efo=wѵ\cWrhuמŵbɆb8A$\-B(65bN4LfE䕽GqAf,?}6HdS²ȭcv.g4EyJpR'Si^}er.~)Vֵi{Va0PmkSaüƽ~=@(zXxDYV^U.HX"@i VGFi̻+wNbU],[ ,v]3%+t=۔~] U)@'T(Pޒ`6 9FPB N6\Ή+}K"SM[U_eŒG Of>~; W0.w?aҷE;?7ɛyΟUOeq |>ZnւE/e{ ŘLdAco$^&|0tۣUê_;aFk~j3^Yʱb>g^ł.Wmy]i4ܴj9hv\bJb^Ks0~IU. bCNĿ~٤Y#a=+{Z|scND97j=JVgiNėqr3N$9?O7ل0un۹\l΅-9OwzPހs;to6Wٮ3k G=|{R_nȚ_ovS|Z|=hC.&?tͿ[:QS02k2Sо' =0㆘~tc'a[r!"+n)|Or40U 10WūmgXqvgOC#Ň$,vy콼5+~ygYm٠sbj^7sQ&Y36s4m{'l}O2{,'nوoQV3^=86kfzZnGrEmǻt8ѾlDVQ_,[:Ieqӭ  9#Z3ngT.IhkI8HFΣ7xrOUϔc'FDd?89knXlc.9wtcٹgXyV^F"P.KVy!N9qV]P=Um{nqT/!9g9ӝpsSE^m8*P>ot{#A|J|cZ/a}<l:j훶o90buJnH. 7[}zcg'(#m ?xG +;w̙uߎ@NRZY}l]\q s_myUr/¼iS޽?IX/ǽӄN~2t($,9^-;ٽC`T6kҬDI::{B/ԜD*BF2 $[>yƂb׶҄s%1J+cmύmt\pA$Zg _O# |-{vc}ru3aQф&wm7NFXr-GeEE?~%,ռ2Dy04br\ŬT5?`buLT IQ9R/LXͪ x| V0ɉ&{s]KDNt!' RYŋ>[gma9rͶg4egb8m /K[T߳ ךӘ-Z|aE+9{Xmm4!,sx;-{. ~FXth}*>Y^~6taɲa;RgAY G i]Q.˺%bOI~(қ#ykLn;Kn+b>k2#( MнϾzx>J!tMݛjзs%2EgЎ[^0+EUSY'mg-aF<1k Vgw d a`oWl0pS4|p0dJVOY#ZaA]e wd?f鶺ԗ`oAztcO B"9ؾ݄A){KyLT o+cLUg̝p# _dgAK]{ORbY~s9e cRclYx/_nxl|wGmá̧$́- -_7Z(<^0tayr] }l,BꥁXBg<# [.9=S;28,ֻZUS,>s &S5o,ߢxa],J{ꭈXG/-E _ھ!>UT6Gna5W:umG(j;m'[/^07B7:f~‚۟z [a2CVaA}{zNQs G"B{#.cK;Vܾi:= gOTb+g ,$9yݑnK%,zwk5a=x9rݢEi҄nѾHXZk,|L6+TXx.MVxawy7P.ȤM+|kiHDTV¢4/<|-/x : ,59WHXݶC ˤ%xNX6hǬ~e>_dZܷ& >󉋳lIIx,aC\p3Ӕjo% LC)Y{{{Yobm+ .[F¤YDᖽH0Y=E S^>ڭ?aGsab;׉K GZ;ǒMP%.{6*%^s[rwgzO|+ڔ񄾕%%!bIAc< %>fv8:ap8Xw0he׵ ֜feq݃yKYY&ߝDe#ӿe`ykv#6daͪGc{ݚ9a5Vq:v/"2}BpmbYSY]-pqfn7@}ǗYW^uJ8kGZ{Z="Xplx'zC01Ó /$myyEijШe]bv,5I ٮշd;7Ov|gї7>WZ·LK.TNx;L 5BkC_\%tW8Я QgO YN._m6{E#4Jۭe- n88͏j}D0ѷ]g nE5E6rk'ng Y*oMd%B,#3٥Z%hqТLL-ܜpK0xU-9*-990G% FB?f_' ;0g/ mZq n4#z1 <#P ~+,h,p~x8C"=˛KԎ%K?Zي>=pC&ʌ |V»svm$ b>ֻn Yʆ}Ɋ': x4E.QGߙ6IazBۭÊf2s a޷); R[' &`_ZFz-q< ATi S-+a0`3= n{S3k}hPeiY1o{b:_1N73'[xs&G2Q]{q,:%N" X#!խU~ }&t嬜}yc\#BUr]֞R{šĽR`Pgg4`ggb?N_p2԰# <6 j?-nϒTz)gi9|çFz]6 ?Y˪?9'wbW!E,PѩOyS]r]" "`au;~- %lU5Tn'Ǵ[&M?tݓԼ$|Ү?m <~10_ZPkEL'BR xpA'gMFm >r|ۮdBa9%]n^u,9ݼ CڼoжcRX:VLda>0 mVٲ/ }>޲=ai>5m]N0oѰz/O^YLs/3/nfܑ)L݉\oblǹ!=v^y3a@EBvH[ n^\<пf\:C/cY)Q:la?V AHUveAk/,6`E)WSܪ9sQjD İ ** QlNVkqi{¨I-Z:w_廝05Ka K֦cdᒷ}nw71[{L`i.>oam ڌ柧!^,l8u HsLo=|X*j}0(o {8M! YF<Rucz;7Q_Я@B׹#:o4k/h?cƹ;a( 2w* GJy HͲm kyk+T^5qƞn'd &x}Wnnl~6G ˩y[Ad{.β5ZHZ;_2,7,T^8)_3s}ɉL.\iw4OyAw owow*Ol>P͡|P(z}.gsV7^"ZϞ7$:xƃY_登ڱCJRʹg}[a9׈=9vvqťJodR\Q;#G/x27Zo#?*XkUFU~xï 6O*Pf'5v_WԾcۆ0˻Yh\Y;h9uhݿ,[ohܟlOyBr޻XۂTfP'9YN_9 F|~3 f5Zo`(v$P,Yd\X|@Kouu88ANdaor^m/{5΃v~q5qYBw}ִa,{wM.|rHv;TFlb?#lgN,tuƸUsȷ۽-usFnEx8wQ!6u?~>|tz?dnJl+1Xa n+r>r (몷e;əY{40'XbuYTO!,2nŷvh6V;v8e?{H>+uR Vd*[Vmѕ,KϒEoS+l߶a?dy,щ^hkq܈ژdʳq,mxXA?)ג` Y('7䡉_,+ ;f2y{O\Y[57&,V0#E#kMPG竹~}VF:~ʍ:vobn\}>>s׶=xʾ+>؟yE~F70juE !!*9Ke+z03\wxDV܋faySn~r<6J'9ˣr:NZYSt a儚SY9|'Ysh{+0} \#JUv;h$aO+<4HHaIf.|%,g_ cHcX}[D.(b_kS'.reO\3MW{D<>2d%/sDYQju@%gQ̝>|X|D#!>h௜fǙnjNCDz~|de#ʸj''<^=a +WvdžZF`099{eFh_4w8e>{nB.~ɾK㎳W\.WdWUgmg/~~տ|2< v=Aʌ'~whJu+$#8pܿ@ajErXϲ .~ >ԫ͌Y nwxa9ʉ﷪U Q/(a?1i;hJ̾]@?a#< _tyNxZph`(7ڥ}֫7ɉa]7̪JFhhǡn;_$9.uIr|hIbw׵3{dVϺ>9qoP#*w#OP yA8bm :l#X!A=ͣ6(3ۧkcswCF"X2W*G}!?`9\?5)))ϾNQ?8>ʁ=O+s,,cth[ysh|@;#"@chUoaON أ/"$?G{{vvGD;Fzx?A~~;c{|Da '/Gr'ػٟ C_+coXwd&`fTvIrg)!N&~C.* QN׶'$'whhW6hǨg|/3Wxwģ^'C.gNo#bނyƜ6sF\cn;Կf,fNg/p<~G "# w&9x:8P?_W9eW&ׄqF=/_K!:x؈MEϧԱ֖zh߇s^~l71_D{msDɮ<|̎'|?FDC?! "'#ʁzޏ]+??]{ȳ?Gp!8a<ϏKםKiegOVUeNGuO\ۻ8\z 2WB _KG=/ƛϗ|;?TT[mxB%;ģc_8+o_S\],^0V?.g+q<]Mȑu^]>^'tOV `q80SDyC?ՠqnt^|y#~D'~1oA;Ⱦh)΂4X+9q&oo7Ggkω7FMBcǍxƍaHGi-P8к,+ϝ =\GQmS؃C~֣.!cnE1w7P^ʻ`0~a+=G DžAy%޳ndz_?#"??a^vB< s WXZp;+Vv!~D.ˢϢb"  q2J`jX,8;Y99> b>c10ou6|ow\,Y$ x_,O4b033P_mT ,haW~ RQ ysu <!0!ПЧ/ȁo(Q0]x:71_G~e^o>a^do08| Wϟ}~?o3?^:8EMAx rP!`g!0aWauRJcaݷ8+k@aP Eя}_;> #8?c'~@?_7Qo0-Ud=}J ~"x@W1Xo4̗#NEb;xp;0>`G1W888ģGcWA_ȟW6}bR 5;e"woY 1>-g{Xص^TO~KxƳؾ/ ߃y W닷=#o|ϼiqOqy C>|!1ȋ#! 19q/]/?iQ7p!@~  x@\v ~y=$_S}:hgG:.aȚlc2>2!}A| sM(ƿ ~ c<%e(ϛ8`Ea,a %/_biitXJutttt3!΀~*vC:3 #)T;)KR@o0=f"zs頗t/%CTYT[_,d23wD.EƳhsjIJ~cHI<; 7v~y"X ΃`=!)cw#AoQ_`7[`Wʷ瀟0B/A_I0`Oo@k#8_CJEDA :p^ OHc"7o`ȋv ]ƒ/Mdz =,c0x0i~QiYRp,KxzFA=%C>.xv3k%A)p?Q/EiD_G3I0_H|0 3 & #qBCLywì>Kqx4(_*a`)א3`!O&>|$ +7.Ɵ 㝊ֶN<ݨ~YR4iKKv|v|L_п!ߡqH~o~%h4UH ` MODA $/8o qDyΧп`J< G}[DWDaW۾x]$?: [qq[1`8o{Eypޔ}qH u]̫}=E' De`RЁ%}Uң%ͪ*gK8veI,=$%׶Ӹ@ y4S$p27ȟN~$ORW) 919``ģyfB\˂TU3 ̅q΅|& \\x_.9 r?sao*K!φ L_Ey ˅Q\[q~iGg sI,e_V]fOc4K]BP^b^q׍s`~]׭rfY]=>aYu]U3[Y)7ҽ,CO+X^}C>\[h`_4gP^q2 < ~C:@9'@^~yH8=y{&D'g&YBK< qOG?瀟~sp lȗ/2nsAģȋȧH>\>?#1F2v8x2ϟxQ]?@屶,A ?"S~ '[`^ބD{l> h 8{A|ra~ˇ,9 /|q I9 / q=.$'!kj!fb<v.L`A.%=sp\0^bkO5;o\uTSp~y%l*M;0ceo_?]c|y9'CCy ò<Q~\7A>ʄ *̛ϳ2!g` Ac57x絸^q;^h by;y("׉'y|v{]|C@_f{+rgWtStcWw$u{4~JKϑ/2!@~qp}}cyr2̿{d(Oͫh] iy5 SAdހߋ0g< Y 65#i6#jkȉGO7/Cb>ï"}N;47>>/o^ Axe=>f'vW{@a(W~`93}K ߌ}{OqCG>ǁ p"O{i"/ߍ@ I {o7߇r>(]76פ}> >~~nD#=µ菠GH K%xi=q;)}d ϯ߇vG~/7GxO>Q+(pT5wGA&􂸎Kg1;|| ȟx~C:_nџ`G_p(Sܘ>q7G;}րXvx}dȟ('t^>O[%Ks9 _(/ΔEQ]D Q߸_ϛ0vA?('-o>1Oq]Of6S׸ k8ף>qDDKEiq9;y|k`9_: =Fs]}G0+_/y߇r>3x0/&E;{Q3[t.EE]'xzQ~w0Xv?_ 387FFykEyўPx $?q O-3'ƫ_x(_# y?%^ӹ4s}n O}">U{v={ <'GŠE'|JQ1]sk{Pq<y Aost|硍Sq)tޓ;3 |M~~vHcn^|>yZ<77Bs()\oD9e8~W4_qrs:7rȃ4iB?lϗQX%(wdxNpgQ_x?>o]] ѹy|s _EdPyr_' /<ǝ{3%Q:~M`އr=a;D%.;'Ρ|~L癔ox7'Ρ(?S_ G;OG8^gG@?Q̛<;@~B^u\ \/!q狡hƁ.+=?_ll^O:ȣ 9^\?@oC>Ƞ7z=Q|#)o[_P> Gg>r!b~v+@σ).?r?1LߋJ<GP1|?_ƼyTZ巏z m @B?tuPxK:=N1nϟWh矏 G= Q zXB>6t;\hW<y篈GwoGzBiyy-y?yh]sA^ONvo~d?9ȣ>JsN^Q~^,j }%声^(ȋyb=W9x3 D/~y~q7:{0&?\_<(/?n7|ωqBq`<%U)y} yQ{Z'=aJA>`GywOoUǣOOiOϯ~ڄM؄M؄ 8TGE$D *5UMMMMMMMMMMM""bOȯy8 =i&10_Io512Y W6m?[Tr>5Q|fmIܔJk6T4\m ٺkqՔTQj\K]7TQL@_wZc]㺥I}mY SwMkN'PBwn?t.CC#=# ohmZ %߼t*%^pQ]Å.|]w ]QA3ͦ^~1_~(f61ެO0@0&ܥY-+vrItM6d֛8(FpGzuzf~jbkuuײoU7N;ҤPR[zKOkڈ*#{j[kꮉ(OD!9sUE6aWXj@"6.6ΩTԹ"V6T {_p,87P'( (3`w@y@ 8 p8/lI&9MN wF~kCRo5=2 YZS8'KM}3u,FW S[3K |5{rI*nlTOT+9XןUYJG,Ufa7~<ٷe\ڰ}vmm2tfHvpҒ0u|;9,ջu =iF,uauM"˪Uu0%CBYbzKR}'z,[bMLLV8Kph[g)qЧq{nSX\V/KجWjg8u_Z'xזPc79K8,&椆pY"ce&=rwťhcW8=}zdUoFunb,,Jrr^>Ǧnpҟ.PpS9wEoB\uGdyNM9n4z9PE"i59N墎l9 T,ฎS|)LBѱ8BgM+ǜ25z7O okgn+/Ƶu{7͖7[)<*QNja_9ؒ^BIYc[ E/ؿU:>.t6;^<, Tr84y9vîzf(xs_{2k.nvc/OqQٰ0I,[?X*)wsl K{nkGD>i:dif=>2<3>=놩-|n5euW}z ߆<{EaҜK?;vs}k%?mx<Ӷ~==LOrm, 9/p~GB kW]/pQose6y?r]ە{,?3D*p8r QF*,ۼw#͝ӱ7RJx68K#]ܙۣ 7pXWp{upFpԇ+\qq;ylk;R:bZ$^v}~#<5C>9|$)qzdPk)ĵ]\ZN%zBJRưKv\ۉKnl?|P6=ĉ`#ڝkٓaFT= {~MM#6WHNV&.s"||,p eq; ii͕7zOG2:fWi {lahUr] ?붥ք2^ %ӡGHpkV4utV٧6Mr>h975n^H\o%z$lm#K8d>?zdI֎P%j\wN{JqrK svRrĂJrRgxEI.(8?[$y㫺 \Pgxϵ9[F%d5#h1+8<䎧p(խ+Y☋AÄ-j9.:sSZ`/SjLYŒ{ܗe {+马u{$(ˬ>x$0h{>K>݃ev|l܈՚[seÔf(w~w˨{ շ;^n5\G(/֛+\uŗ[ΙWp]_|ڤcV&T?ݵ+qn"[o\6'QB|aYs?pK/M[.T9^GV\!Tvɍ6W}"d\kIʎrr=>S2δ9~Lϴ-ss "ގx'TʒiQAϸnw]ҧu\XS)}->KG]Ә}ܣXj-=]oAy6#9mjԔnZ١CJ%܈E#k^-h7OAԸ@a}ٸ{.iõ>rXBu9c'kK9MlX)fWsvΥ \#~⽏>'bףw-vi7v39oZuݽR;6xp{Y;"wLԕ-oKegO)?d$}mj)AOSlzsj5;qc8c D+vVmgubǫYG7z(Hk|vd<)Ah3A~fm'nr9aQ;+|+sJ>Y'gϐ('~xʀy/5໷\"7썭~ޜџ&7ާ4iNL}F\҈*}ײ*KŔ8&zȊg-ŠZTieUmErZM:5hء?76%mu nNHl}n7F+bՇ3ޜ( ܄v-"=F.9NsB׹ѥ3K8TֽC9CnTsszdvFw1vY3I7sx']#Y,%"]Ieц8ѐܰ]ֺ"ec?\t7D\Ȋ]>IU:.'>>Գǭ/s|b9|dˏsINMNh*-{nX7ߋfGn-m~ VGV+ I:*Y"o}ۃ\y_9ʝJcq5)=/].Q1vJgZO12֊og^nu|[7[3g6>5]/ ̂yVmn`DE^b]0jnl:[c>; [&h_-lBuig%9, X&ݍFgGgog=h980 VAB߰rV |daf_=>hÙ6ssbV x]ʝ}nэ; :x >6ZQ_2_+z5J0O0|n~Y}۩ܰ^*צn~I~- 9- F-:5OofNS?Չ\ cZjgQSƍ;)3O QqGMu2&K)k?nЧ[gx+=ԿRL-~&#LR8pqW- 8 :tTS&:ZdcBj\/}x7Z kCZZnXI!|xel}V qܼf7Lد9wݍ#tLN]>[JM{fr2מjYsWX1p ,d5m>)u^qqقwܜ ׋rsLr"w24E-HΌ"n_yT.HwKŸ- /~2+[ukɵ<7ΐI܀;isCm-ȼ=sn\qS>OXF,~ƞO4X˩^mfߞӕi&k^[5llue\+='Z9q_xs: 2 zKdHo!K7v\.VxȓANƜ57[ rew?_yPsjuh۪rMv:6?C\+FG8oMoamnc;nu03GȜzV#G^ۯ_a@ŀ;ENBolVTpwW9fsyHS"s?,s =5UYq|Qźy,3/7w{)FRGߕz,yȸoTs& ==zW8RN.Tص9qPn7:q"gw\seYNFd̈́T+.S^2dP#eN_5WE/iU܂Mu{Z<7Eea}ګ-zZvoIEͨ-/s^s_^O8-enVm9O_ 7'ܣfjz9E9թV}?T"TuZ|U^ շU h{n:ZbnsSK";M<\ǀo ڽk%bB~T;yk@Q}NНCq~KnڣWv7pg%T/7a/KNnחg…'_TZT|h'@i&=, {'!f =P!,P*0G-خ}‚P\KQkeX‚]=EX<[,6ngWY,eRJlJTȱ7='~}CX~*!K>N ,zfzaAs+O`@pI;[dK<맨-7_嚰!Sl$?<{9~ؚ?ž7u2٪Ja%$8 ,:tH.a ˄iS>m_]wxz5L1oעBVN3+GCwVjW.q]:Ka) nz]v\9jvV/WU]և -3JFD־h_ռNe~]7υVȳ/Ų'ϪZ̚/T>cކ%",^~+p+U /Ƙ)@Xpf8믄9sv4@X8w %V<|X,q{Y2Oxowk+RtjܥergcK[M'p0K3;&?-?…,~S=_-ٳ-N˰2/-| o=|C+]ľ>Vrs*OiGLZ$1H摆x‚3jIX,m=a2:-)n@kYʴ6÷;Xް.؏m?e Z|]M #N<76#,lu@aiW lvL|oN}l~Y.ŊiWcEC¦¾1/'uw\_Y#DZ OKЛлMf΄^]fz+ܜ~C*Wvetcy+c!SP7]قg7O3 Zd*7?i:%aAӛ YqbHzaPur'4hXE๧ZINl6am`AY:aX-ϯl]Em˵|͂~,eCeS)KvrY-!u0ɥC?t.m@~r3Ym+BW&g;rSos~Ǔ^ϋ{s_h+QڜYSЙ6z3M`ފs>wcW0S!̿e|=F¢SK xWy`JXiHMTYzӀ_toU$gp5ScXY1ۙQ5 #B|\%ƪG"&dϴyӂzs_(CE^Y:;+j?V`h}]쾬Pe4*ўy/xHxKBЎ;^5\} c}t?/t!{Wu>sc7nN{? (0-ILs#e:E+q#XQ+M-j>iGtkr1pɄA6MԀcd65`II]4`:au5y,j¹/GXзttX%XO/,Ǚ;لr$ ]t⏲"pgYOo&Kyniktöw_?\ƀ0X f,V_K~.`K. ܝ2Sc,u{Ni]%g%LiQ!}y6Eti>4MY/a?"L?6=amo s?Ϛb,mfM{ R!NYOLgTׅ vkml}@35>u`!Zv$ 5$ =$ V(đ▯,`YX͖{FfaݧMq'z~JV6%U _̹xa'=Y͍PmO!]FjlEۯS7wc |"\GyٱEWB"~z;=${0n4g~60EM+".ikK[ߔ0pkMrP;v!7ȏkb8OG"r:Xx|?LQ<3H wP֞0HS ?ˆqk& gnu\ӹn=з嘫Cm٩{-:|UeA~iP˂V+ly7;SGB/U}Ҍ{KP |5ֿNBO_7'kۖʄ GxhvrYSoN~{CGʉ7t 'f=y [gF,X*c@?B5ژ}#~ "YrPBbYA(3(*ւ9%H2s# yIɒ$d$忋]^9s}ezfzU31~ !hx/;O-~|` "b#D1҃1O]g,g}; 5@jw`ڞw1) |B1=7W`€)+Hb6 2KJ+ Iٜ mxSLvG";RZLn_[QoQb?+(0BQox  |vNu6]7DM5k ZeAN">.zI1"W ~b]} e>+6Gک5}Y{DF1ܢöXBy"^(ZFbr)z]P:3჋b){,Y9Ĺc3PhlNniX,xkYyЮq^ݨktgTٱn6P,8<8Lh~P8JEP$ !״(ThXml]r9O\I'΍U@Ie R-$;m)',xIK%a(U_pb˼BBqQL?W&Š׌N(`%}9|=ݬ]N߆@5}9Bz򓐧q o?jܬv0dua3QL\^2-Ō eS6-/sGv&-WRIY_崾^R]+ AQw j|m !]w3Mc݀wVY)&(2MYf/Dp_T]hq-}!"w1բ M,qeLqk(=}|bd}JD]w]2M흐Q]'4x|P "بYG}~8b~h7ms֋);|^+>nI+#)Y5HY)}}0q} ;*=uJ8o 'n j!!0ń g^6'kv%ۉV]=F/8eI1QӢ$ ڃA½M=-!q0k ݵ$]pqN1}۰}.r DcL('O1q <1"mМ#u ] d, aH4NOH3A*|MePL5`J6Ť/*PL{1b)a=(&l3ubҍ'?cd9O Ťz0] Ԃd5q|'}㡡p!]7d QL:AŬ A֖]S@c 2)fE9D1{ThŜsV8RL t a[{sH8ƦI,y^ ƅxY"_aƾzt$wBJJ=gGrn!FI2(e]a&Ťn<)QbOmWm'FXkb\ɍ!.y ~gwH:Ehm7 >IHFUO) vF3X{RoL\bXrbc(F ={vzq_Yt e;R)&|^YNRL\]z3ŘwM2)&<gyϛd$߁nĪeXP?{,Ť{VkMN PB1MiHv0G1i-u(ՖXtkڕcd%$`_} \3k_@1k{WPL+23&qr ; [*DbftQG) PXO1Q8P4Ŵ&@ZV=|>ʶ2p7H|>k&'D&?ˢBnSNYԬC1R.Nqa*|n bQ+@1*:nIvm4/ߜW357z9ć ,$A1p)" i=IR}Wn׾}A1fM+q.ٻ'^s_6ń=λXyxFfap!V^XsUu0HԽ st@7 en_Hb sҒ)F5Nb„#o6S͡>JK8H0y $첛ey<4AI7Xys])M5qxыQL"8ͣm2S(FeK7=Wk=VN< nN9pyL^뛺<_:gLsf}ew |g>\卼2[k?=ZM]6t(*/UpT{5Z:#ƛvf=<_#>ȥqF O:uMnkv_}/vs^mMrT4}}t~;mٱ塠^λ1F]?guaLu}p54Q]r˹~#j]!l,s]WU34,#Ӵl絢Ac$tymy/1%?zvţqE1zVJR{8ߌa Pkij#z,JtC kՍQ|nra$]A+^A} %D(z'o Tne}+7~dl|sjN!Ņٹ:N+6$scREP?n݆ܸQi ԜsL57N7+}j/vs2 n'x64<"l 7UW]pƂmMiŚ+y\ؘi65h\Y%L,{ _CMo+y 5/ʍۦu3Cu\PBNC$ϖ\(P'ږ!5Hp算\(r4cYAB 2KUbϿj[AET;s6qaMA3P#[ F_v.i"qaoy4;rj\e5[ }hAmm}oݩB&f{F+GNw~28 H9yЉ|`ݫs}`:8vTNk{:0Dž={]u/q`![DoNfMώ IzVie;d%": !}|Y'{mDEB2V^㳱 uT ;zo]g VLkld߸^MRHb{F@N雿C0:'{rKZ/ԧ##OOR|Oi!ͩ؟lo~Ra)R'c{P: :cB"?]>r!rʉv"Iq~O5y#=Q7{kLJ 7!1\Sh/rr(a.3A9h@x ~ƼPwD^|..|#A蟜'cwGt07( aL1/ bg9oۜz؇9%_Ѽ4a:qľh`Lsԟ>_xg1 AW/ȧ(vr;gx=o4&Phhߨw~#+8.mot\D)P{ qIc|> ?ʏzBTbq"~$Q_ʊJzy#XS38!"Ƒ`̯ިvchbQF EAG^BD?xNw|ʣ!?@CB=c|@ $ }'q: AN'AdyC8qMoux>7(/pEw8oĮPOvB}^e=+yljt8IzyOIyNy_zg{i}Dt`?I5ht<^{?qi7#_H{8'i?rb<q ul7j\sʧ~NWl:?'qq ^hg=c">s%4^7l'm1Oxƒ#: Ϋ&L&d"o ׾G)Ğ?x"R4!g,x{>ݑh‡H|!@*OIܡ H'~O'|m7b%H\#I ~H.g?!y3݉#٥>'1l5H3(+r}ǿi4.`J%rGÈ?߱}t\Gu] D"93vI9C !#!$~K'̻ *G2?:=$?BL$|$yI& /d~DJ%qہ8w@"GQ8DK",T5XwCQDD(f;|O!q^~OxgZ՗ưv|$#H#vLGD{`҇Kx7ը&Tv#T"''gc64gYلW^3ɼ!^<~M8<#6g6@WQ$>q=CِB#'Nz-9Dߐ< cßȆ@(G(!oc~ވ`p"#ϡ\h?G#9D'{$OAQ>̿`\|yYl_c_J.1{Da^0A~xCNT)u R^Uǭ^>nMRl0KlGզO/ Dtb> ~1^b\p{h‡Q~Sl7q ;QU"{3(a!M$yH.\£yrI; BRGMaBg?'OS@g"{!'WD"E$~;'#zøO{%'7*/y_ y$^$ywOg^. |O%-;E| 7D.I~٤ Q?`zH\9k?W|O?73Ю1<\?/2="aN /0P$J&;='$N)$3W1ߧeIA׃q$I;:5S8CYD9$O%~sO8>C#yjY |N 7|9=EA$N}"#B.!v\Lbe<)1W`82 _>B?#GAy譀WH7\_D"2~- g1NO'gC?@{'"$_C($蹘] +?-$`;qy cIvKD'E6ɻcR aLyQwN} $)i;wP`~EG 0?18os#d <|>Uh)ľpAa:q<g<p|h7 %edeqVD.nGpyk7I?~| y4whwd܁+d]AYFSJ0K%>3 /a`?, ^B[L쮄QUH쩈o)"vPFQ2eD?%)b;ț8~Hx%;Ⱦ11 sB/ZLE>>Ayn?dzIy6.yc;Dydc|G{ 8s|&[y3ةBysB{ƪ6PHcK UTT"VA1Uȏyȷ~:%z'PBO aey/$|B-&?3b_ S"ym.yd~xy]))/%(#<_BxQN촂䥥dUAWIx/&%h#=D/'*e+_R$y%[轒irb8^Bڇt8~rN ˉ?< 8BAhe^KID JϷLt8/;X1>lU MGo@Zs`3i;H 8K=$a^F1z"y^LpܘMat#=pQAOi&X>>'@? y~qAG?o1/v!_c>v2@;B'<:B#qY6k:ͧ>/FQ,$Ϙg\ߩn%r"!ah73,YO( r<y Ǘ8P1߃~dCy-W<.#v[Iyj$>W?B{&q衚[5J`Ec>\N4#,# TjR}>_5jTU|+/988oYJ즂;ƛJbd>-dTEWE])o_ @O # {ϟA>D8ߒEEtj^2I~q 乘7R!|QB-% _2Cq'Η$~UN'q> I.%K P/4(~;e$!w#rbݞ߬áq֌;P} F88q^%J׫+H?!A 5$D?CpS~O _CP(?xVE OK _S$*#ɣď<'/P_U$nTxPMƍQ$.S}ޭ!|_M wX)27 ]*r/f%iOsijHRCm>i7#'Wpޖs~RFd uGUx}>A*4 DdW:&~]Y

SN "l+>-&|QĞҀ*f T/T3H~RAl̯0Ç"`țH*!W~~.']s.pMGzp;,"~ߵw=~ϊ-Dn3ùN ׳uq7w6|w ~u{k-(/~D9ۏI {O]@P\_zs~ώwո :~Jp}=]J>~̱ qL(' *??\Oz8)xMz8ׅr# sw/\q7+\I[XYqCY$@D}I>Y;[{zXJPu(wzHv/K%>+Ўp=s8CDWoʣ8܇G} #b*D~\/|.:pR/ I&a~K ~MWpxyu.ϹOk)S2_w>Oȉ9[R'/B9~^= OßiQ>X/z$>Xgw/ݸ~LU wt A9\!<˹^s}焜~ c(U? CM|\ܟs;|Fyv\W9A= sE ?M'l7' G| (7%Fc:(/hWnI=z4Ύ܇(g^cS^G1/8yD2?)r?E.Y](?pRp ]g@kLE;\̙8Q/K$/=!"?>+ğ9#hG/s% s \¹ M[wi^OqGz)QR?z{oOȣĞQܷs :'170\gס^"DQ"@<8 ~*Q#\@}29sq\N?9գ9ֵY;~8ǣ4/}(Gq"/^yQ"sJ(獼܏G~4_韮某ю7z!{II8tcކs$N\~ܟ vegh/s_Q]'/~0G>ȹO')ʣ5J }$+4$ϡyW+<k~8݇M1;(t:,GQ4WGs}=> /{&\_ʹ*縜s\z'+?Q}so'8<)O7>Nsb>BsO瘉#(լ3/匟y19D9gak1 'p<o:Q/S ~|zgOB@i+N?<#ż/a\78rqzv"b~<"_g"SBq=(O'+~B|3(}D?8~QGq-W羁?6wi\vEɣد_q7BǙʣps/<Ƽ?89ʣD>KY;?N=>z\Fks8?D7&{<4D;&~AQRʍcoPt?|sY7=slewhݿϼu7vc7vc7[P6b< xxz`y~s+tT 5uu1A]5n5}~^#]I~uv7ɶh=U8yX[_wCmqVa¯j~:ǿr =뮳οߜdW{(``۬ٓ'جQ9I翖^ t]Wo @,hz*1d48bOGGЅzhM~i8Uq\qir~/Ӓ~Uƿ{?_~uv7,SP1:l&nk,bO?\3Afr`1?eOgQR4󠪬qj/kz&N=&+KPuv7/0=0b r$k|?I;{pm':77f=l-G_bwX>__o7$'`~ opwu}uqn`.Køcbx8⿍;J d%ȁ1qMCyXp]$+%A ;n*(QPEP>N|g%٬OrϫhײtNs_?+(\eZVOZ'w3Mbyuu-ϑ?igޕ7A$^)޺6 >)jܸՃb!oKu[(ڸ3,HPzDҩP!K?(I\b8_IX\5;FHkmeSfܓB9#3j}8!CA>ԝ`}XAic2̓%3DZs~>E^^-} Jk&V}T>4^>N1\/[tS!~T݄1U~yr> +/fv6 2>;gU#ƣϸ|N; IeNJyIhG")=]>; &KI s)1&lp`~ks}xOJ918'~{@=H.ug2ju D]}.).߼zmA6W+0Ed> aj{-kcQշƎ ½>Ė c쁒1GϞsj(70mzol/~K(ܝBq%ucޭOJ(Wd aq; vk1f($1wh1<(|_sq(*߇yeNn\i'o[,}'qrk-+Wv{KU%7wjHm| "#O]a[&;rז/Pbܫݟfr Y^=uyTz7XVm;|œ]V0&,IXGЈ0%_yZRTsƈGeNף2ۗwkNd40`BߦfL1!Nr =.x1_B'JV:qǦ&"ZNY oSvnjS! ~.|v85 ee |6e_y⨥5](Fv%cdzņ/Qlqwb clƘ˙wR^-3tnOr&ʩ!8οuE? Rcxۺ4Ƙ(4:Ȁo61T{T /5jfV~EFcQ־iCpY_w)?+{Řmoܗ C~r!Q>aF6<>5lrÌiOQ0#!I92^=;/Y$|TcM*\C}Hoߖ14qˇ0FLۧi ~8+bG9JJm{{.)|S>bL!ԾԽy/t%!,t!4}hsYV7HuPtJ\+~v{Sk ɐ;}~F=cIՃOLQ&WҸ΋(~SaA gQ|iy|âcϻ^zɯ'uiakSЩuV{ &7rnyϰDC][=8O!:\O]=/b7U':W,ai JpޢEњ1YkREq/W]e8P[[Nf =2,皷R 0Vh^1\|j{62e ^+U3"آ kz-P?RDD׋"U]^\uSW%<֯Eƽ:oϽL7&›ɻOD9R9D6@T wB7ܬ`C1;%N:6Iġ )K)Fo>V5f+|TD3z>Ev"T\p$izW!Yjr{hFbޮV̦\X,_EBSR듷3f榜g*R9C+l(I⫏xp_;n+ns)5sm v闬ax׾Em>,ǰ8ܱr]o(*:,@2WB; F2UIV)^6ƛhS9?a?CQ JFP1`Θ6iޑY_AYʃzo- km閎yi}{X/ԷX@%~UPU'^sgm`^yc1^Q9gOC5B72FJj@Dy!%Vd#d]>PeSe9T鯵K Ge,?,ڹ($V,5ꯣN+0DKKIJrk$/9y"Ma\dbDTۘ1(fn9R퇮*}[#FPLqf~l $.(4e T|34 gQ"Ƨ@rxU*zF[FftD\PӾ3t4m|nl:Ţ Ӟ9̅́^ f9T|Emw1Ӡtp+IO^ IEU}@AWi<(۞냬NdF行Q6Kͣ nj2f Cv)x 7qG׿5G΂'_iϯQmSC먼̧h/Ri?my\?ToঁxVq,;{{X'>ysngF_IKmE>Q{/6\<> Ƃ{Q&x\v4 = oXCQ)ST~ٗx9;ガHp$k_+x[Qwتڃg?.N>Mqݽ[K7X%Pxq crǨv"g$}"X[C(fs4D(vV)Qe@O;{3Sgy4bA: uƀec52ԨsYFʶ2Gd A(OP6qjz{O=*R,`VD7{.n''?>47r)^SΖKj6mȨ{8u^)r{_g*`z)rQC>`\6瑦K=nS}TRSRϩ9Ca>f銌}d#B$e P0y~ >F6;J25?9e_]@o?xT D6G1+J(iFT 5)>8PzId;chL-&EyneCkdH٥U(]InFϨ7=`2F^²Хb gw2i?70Udq^|ϩG ?z)ŭ[[˜YjlRdR?*6 JLn y]#ǗAK4żKOWU\u?Gg !L;8+,#ʞFwCzG3s\%?=Vg/H,e\]䔫çQ[R:6UDžcLgI "|=>Wж}>ߛ}[ijC9+Wf:)1Mtڇ #<wSeKQN7K߂-\_Ψ*PX)R_:}~ªM vۋ[^4^9Mj5K9GG=nKf5}ƟOT-ǑVMÍ]ۓ~oq?LQ,k$ũr2UF }`0.k?C뼈Ķ3^3?:KJ˾ձ*PZ[n?i.Pg?~E͠,QfxoqQ2<ƇskŹ?[xtyG/"%v-#XP_SuVy@ɀͪ7ҭG\G|!P:VԝZQ) o:_Zpzֱ9PJY΋}> %0.Ns:s֬Zw܊]=s̓Ynqm=i97\oej_Xm_՝'@՘p'l|x y9h>e{(;Mh_Svdf<\3fC OsfmIt;s?wΥRho8Y4=.|eGg)P`?nnu皔P}hT=y jͬ$Lz݅M<k3b#\6'K( ~p! ?k4jśUAjҪP6lp&5y:T8:[3nXb]ņy;m6oYJ27ņQp~5 (;˂-qbГۄ(oSS&0˜WP7nẼŒ Wm /e)~n<#_5n;ߖ[f8rQ^GF DWea3yEnp(6 M%z@QPSKsnB1BŶo⻍i@chaO͚EP'l>q@e>NM- FBsv+F̞U_B+TQ]]t UK+:_J~ܵ4XhgET~d'w#(>2ZnR ,_C F:[ ~SGE?Y_B݉~ϏnGZ_/NS݂ [f zQPh%/+ַy$_l0|%rxV9؁!UwRl8D`ҏ X _k9D|Sܮ]$^VPparu!?_Occ,5DV-Ybː$b71bԞ9dtbiRLQ7CD^P+Aqr1>0>){mKM+ Fhdb]/8y04tY&W GPt"s~Wc`Jx5JfD 5D0pY4av$n6wxM{\nEEv~oZQ0Ic|}?HjGZF\U)s;0o\{Fa%0twsE)\i/Yߐ _4vʀʱnZS/}$=KJ"K~T(M[ QV jzKĆ^Xsf5P;;~(ֺkC8fi%P}h[.B߹3L`ڮ PvJdP?4< U7^Xw1T w>c.Q?0!2x ]I+0fZ9e5"Oˍa/z34̗)2x?FY0ݾ1 {恩oTnzj >ާX7d^å+e#䅎@)aۊ)6M߸;~MMoW?Slp5siljРb̙,;z7)UGY=|'4:_Em)M56⬕Zp$3%,i.b){KBWFAp#d(~=hTq*gAaK2|45aU{QVCWoƲ jy@޹kx,Xňw(7> aEPUwDB:"ņ+"|y"UM$t"RqϚYn 410fbG= Yi ָEGt%rr'ӟJ_uV&]v[Ekh/qKaF GXO74Y+.O1_ǨM3?ȷ|]SSW!I)\kw`V1)#oM.a.+؁[(2l- a]} p~AL\~$y'0߄fY Ξ/'.s)vaJų^^kHYqVR=C RV - R{L`^GǕ&,oU?<`)p{w;n""WHBuԲ5|a/)+aL7_:"Ы`r("TO9$MC+j9UyيpuYi_@ mׄ[vf…^?)z,,T#1*M@ ^SIF1jڳu/R\8]v>0.K>bTzbN{]f3PY_<Ȅ^JA%Moo烨䃳i% ֐btbmof;LzVAcv,>o>f3۽]4"+F_h7,y͙ CQmiouE1+U~ղ*6nryQwP3m#D2@1&8u|hυ֋!zlg:O&11`sƋb3 /ka3M&ߴj8lt $ tؾ_J_@C'ӞWLXM1o!6Ct51OKx̉V®m97Ի$U-3K`"[d}3LRwp y,ڰ>A_v7q S(.+X@[;.mwdb ڱr.r= <GሉZog> /z<3|+ZHpO0u ~{蛚.d3zO3Z{, b^ut+7}i9tvgEKsAѦ:+C +)rPgS=_1ۃdU+WZn8 U*گpr<чBPz`vݥPx]al1J{Jk nUP w7=r?EǢugSL66kX s<)&~1GH,$0S &j;dY!&3Ė#vh:Hnky!Y]5hjх…fFCՏ[QL)NS/[XbsNV rt<|п}][bN(ECNBPuգgPT]r  ;6k8]),ms{.G2t}|r/EzMuR pY[|~}}5~G^wk $d[gueJE!q Xi/3c x* |ć 8fӌ ?/O z{n nnZhMӤ-~>xc;^SbGw^_Ty͑r#fnU2ALXAXƸYу]ËPgt\o3|ග)z)߱bD|t27e}Nhz.#eA+ma渘čUmeVYKv?fҘ{>{-`-tܾw]1ymc!)zlަ͚Ek)\+;;z8Pt;v 8m]kv\Ыj~ ^sDbVNlR ΀`Y_ڌ-f- D35ӸrbpM[ډ-||uć`Ա(29FD!A:ee]ִfmʐp!85S.u?Rq7Rt/X6UY?F9yԘ 9q$ 'ty~U KM+Lv oFn6 ŝDQ-u 0?i<\ǗLOæz\*\1CҿPx&Ы(]?ȜwW!}Xv ggJE{c̗)7Kco):Dm7byo+L`3N>&2hq Jo)z>%':P2QKj8HvWcvߛധ"lE+U[y4+'l?;G*\g9?y{/upzI>^Al"u-l}'Zgugmf5NODeQN;^&qMx:RpspI(ɻ)}Zs}#p{yJ<\,ybOQ<`}.EWӈZOPtRsM-כCʳY_,;_3oڹ):(fDSpuZm' >^Sy~ƾy8²=sQkv逫;aFK,Z pSh=.la5V./hjc( +_: j_^;?K|ǼGLљa7]aϤ +6a9{h<gٽ% le.烎A(=bpoL/r#):2$WiGܢ&8DIea6õ֨ͬ=X[3:Rg'NoZkGų=NaPp]599v:gN,^,@UpJ!I.bKnO[ŗ#ׯf_L>3{7"7.hᄥ{QW4KuUf|9ஸ63jkNMpQsuImΣТ&"SC`ԞoWX ?{(0gR)Ba{wఊJGp۱-}pebCZ.mNn/V"SF9c zFh /+5V1c]yEiߏBFmeFV*9_spzjrE}_ ;Esrs8pkuk-kp'`yLAv|.-zaq6v>?{=!|㺾ޫj.׶]Yy/v}<שV߫tϻ#u\>Z\ᱲ󈮟ӛ͇{WSǻ>US[1\ҥ~ܱb[Sω;/ްÍJϯY p?u9vZΚTzm]3%]?=Œ9?tq>zx{᧺jbb=\X~Px/*y2.s5_Ye 7V:{kWsaC1"nB[iyrрP&iQuTؼp*.l\o|i*Ξ *~O6e6JAxZ,K 22Ԇtƹk#㠲djtlmo17zvKyr2 Xj=jVoPc81l7JΕ0w0_]1PnU ̋͢1[̗j}DgauJߕDR}qnpr_n >dc ޽54.|(} k`jvL1A똍w@ZiyPU_o(RM,^5J[ 3l>0; f'W>r9͸߅ Pj#J/8@e04.gj8 ߗ]I 3rgM0̈́l|ۓ ye*yEo3xWZȍUg-yӤ;aׅ雴 rU#Y61|-4fKf1%~M*gҬI6xh S*ϸ{dۖgi#/{=b ;/6NY&={)#&}zK͑We6ʣvC.w $}+=|}DJN)?[D Y+;?}˞)IW j^se guSmR\3oVWz%ĆK6-zށJag 3[>+/WBBسׅ[ ճJ`r빋h]+='ΨdޛÒižlw=nT*7Ga%<qe{Sb= i=7V._<$\Hm'퉞mW!{LAoF}sxZz݇{Ɖ p2X <{\JUfmmU(GAsIo&<ϵjPp=eq#b4LyB ui۰wmgӚCs/էZUG>X]y7kbpޢ8 .+DhxAc_!ޮ ^埔3{$$|oZnI2:%[GAk3BowwH5y8K\t%Mn&8FMz˜[@Wp C,+ߑ<ǩ S!!uKf߻|prb-x,lӚYVU6d@!`:T3;eE|%9]&;[g_($v{of5f'\#=dӖ׮煃[L/ayO^j:)GXŘjaUؿ쨀փGݲC0u]r@0W/Q$9l%Sv}xa$7ʓ-ϭmygX 5yYYa73ʣO6L <?o/Yz[b4xk>Gyej:S>_ ưZ|~eKg\z;o }5lÑ4b=wPx]9M:!kkbo=o۳ڻ5g֌- ejЦ3(o;6 4Oq?;ުY|:S8ǰ BaCZ@5'i'rzN{6):zܳ]lCk50ݴQ n҃-gџ02 O#r=a{ qD^NDN7}!0Ŧ/jI"Qju.>zl%WHq%ݡɣUJH(%}ى|(gӸPFˠRrH@ՊOvb PYU;v|;o"TZoKPq#!n4T?*'ovt@(/t⋗PlRyg/|*/J1eKCO7¢R(ҫo^exT`>KFF15^?u)QgTu3P^s 1_^E}X#PD"v= 'ie n;ʀIF24yH0s=$IzyO IlwV $BigGĜ>gLɄwR|Dβ#sԶLˢkK#A%Tě"YK;.9 ksG KBmnjB(~>G(Ѻf.l= WUNf/}zz3&/ԕK A":Uס-(}ATPJ_LB7ߠHn<nț@5*V85y .S\;3 ^PgSzu(@yǰf$/$vP@~Pzq=]0WګI~Pԑ~M^O_;RU("NE&Py.itQ|vZo[ďN۱r9;*lEX,ʢR("V])ˡ]>1,^4?>~ ,#ry?C<4֒yOamnGbm/s=~ tj7gƼӉR?7[?ڎN$D@BZ5j8* >΢z(kl|1Ud\]1 5j"iPJMġlD1pߥ\55,4@͜ TF=U^EPN@wwl6X܉&o,xܵj_gW\C5$אxJıgZ2(EjN!'@횳ZgC&k{A-ZU@E ;\rHZKDe?/u?$?%y2w)Ihvgq2|~~΃p(.(ꋳ]^.vԌ=v5@q|Z>gUb$^)4whj{w=V0}tYst'=8yN܉r2X9E9tUEjT:Sh1PaydK.PA>34'mvPF%w@7@5/u"|r;2)#E7䉵u~7q67 g=]5g7|;SUc" wkٯ9ư+™4vZ*p/˝4lI[vP#yԔYTIѩ?9y1! :}ܷqGuqOԒ<Nq⯃5w]ߡN(R$|?xx4lYw\?3]}˺_F}a~7'}gysO 1ޫGdٹ(_;jg_<8bK4_$qjԟ?ծx6i9vE^}oţ]_a_)sϧ~_kk}y"ɖяGA)(gaRfN_s̉fč8~ؑq?'1"uY_^A"gry(v>OYJndG@W?ͣZwv)̛w%ɣt>y?9o^۱Lo'NrksE9yj?o~Fa <F|m '1@0/IqoLwe/yGqW"?{O &߽xk ު~C^lQبh :*\Fևzθ~ ױz=O=/7H<%w]R_m ^d}`Ctegлדp}<ŝKUzQ^&t=[ޏQ5"f;]f;r>XULuLtxf/pYIlȺF~\ȹ. c{+"/ϖ //l͞vu~_i< KyT~ d\/s8 .|E[p훼OH|sj/Ɠ$m p:ɚCyq;wS}p}}<(~G;,%gXiJQxJ4#}B/1es/0veC `2}}>q#Dq\@q8PQ}u}eQs풂^}ػ삧J|; ol/Ɨx~^"vlAwg= 쯞 vX'plqfP'%:U=nrQ?"J,gw ru.@yD{L{3܉"N5YT><"_B(}9/)װdKbzᣚ!O̓vv١yť?YIX[˞l;HpQ' SIIMIףݣx̽7/ 0٫_.`Ԙs5:;69k:@^B#\>1~?ȧl )Omx=84ZgΞwR(~?8?țԂ=fzȺPeQܧuMQ%ɧpDt}=;l(Wwd*)}JpoǘPGyThȳ@yKi'S<`xN*hĖ#{>%wPK̋g)G9Y瘉# r!zB)>@9)eGMyC SBQ{k=q(G>rǓuG~d>d>*EQ>EJ쎶+%hP᯦x(ysCq9̋<I˜<:<|œY1qkM I _£oۨWv9GB~Bļ((wV&n%((g>Jy9s\@Qb'G }]R%Vuȣď^[2JBkG<ʜ;\@Q>ȣAd^ |1Whqf. UELO'~th $| !{uH b>rؐ(+8x)9dECH;ػ0(NP2DO}Ae7~paݗ xEC1=9Sgi{QbÇͦ#s?a{p@?r?(g|%Vܧ p "?Kd2Ofԧɋ8vZ} q>Eq՜7QW<0p7 /pWu>r>)d9Cy݅[y c?Iځm xyԝ iQqk߼Փ׹_ 'i\"o~(΋z=ņe }Gd?RlΏ:~A~$Oͣdɺ`SGzH'P~q=gѯ< {ռGg*:>1^q(EPdu ΓџG1rV9pcjhgh7t\H9gQldmF[FƯ A<('Qb_(ϙċW(}x{W<@NʣG==Gqm|?O#(SQy~E;Gyqߜ+.-C9xZR\B/9گw˯::s4>e*F čUuUU0g+p&uv߬_L0g,,>JfT5NMe 5U0T1Udɱd%c)::F6Ad3Q'Igx]gwS?݌оǑ-"r@1lcTa|?}7e~7tuCmuYƝ%óƩ0mlQb&+AUkΓC"Y) Zv[VAӏb*q?+feZ (^-Gsj石W^[Dw,?/˟W~°:N>wlϫèk9~_:9~r93]l׌::)pM#) RR,KZ ˅U%ſWYk%zUY{%V +,4x”qu![.m?Yٿ9w8g ~OFĒ'wMw40PTU}%f5 y].W58˩:,~-;ye+M:j/R]s%ϗ^0?gK]n&KD8Mbt6 ѿп:_|v۫y:\l۩$f]O<1?X2x !80#8$SN#8,%J"8\~brv-69y뷻76PX5vBT9Z߇ZI\=W|/N!]T$ z 'Z/LĻ ,4=<UvC oT hf݁F^Y|5~?S;2T4{Ql}F ,K*1^pͽWvO/wn,hԹwa6ٲņǕ,,E;[@uF3q~oq%xAs7*g󬤆{]MVg[Ffz&]1gʪjh++b 8MaaQRjhX CünPw|T2d78aR(n1p4Im~4&b:-{k&S>mEa')=LaшSRCS}7(%VW܏o RfrHub&{iވsY\4xO9O'$B e{<_Y%bmR6Eo"AkSU& ڬg;^A]{kg^ G=:OrY]Q?e{j u5=W@}+2P-r6zšN;{Efk>B}yHK}k]eڕł6d ܓP\?{ŚRֽ,u#lfvJߊx~Ǘ̠nm^P_bT>ڳyhL~'ܘoi_s"qKDdc +m jWCAvFפԈ>G=1M3X,!0d4ԧڹ_; Z&XK& *PHR@E< "$$d0(bD%@$%)$H9gޙݻw>wC>]][{jag?< қd Q%oϦGvbݑ1L͗x10HD\yXle胫SK+kt\75`L; ԧkT.-/3cV.t[1->ZX19uZ-r} he빆DW6 IY)/_CW?Z67s=s`#fi0hoCOz C pO{QM~y9lcuWđ뽗ٮmM\0"J8|o*؈|ZzX"u8~ɶ\b}l~ Dx}}5^;(C zo_RגʦY}bX(!qe0R7ƀ]1.6_OAg(U:CM5ID\Y,KdwY /$8 v~i8O| C/C]6[ !?UlDD"N?p'lvݹ%E<"I}dBqz}^<D! }[`rrYBa9ac6>Or%'p؟XW36Pڽk= ~55vOj8D:nњ5iQæs'X7Au}G4)sQ-K[QimhQT}NYşqt_sR=_w&'R؇LǰKcG( àMh-.i%&g8o~bFbﷂK?~/uJ0D=)Gk2Bb(K/Vmhc4LRn pd* JMUD.YY@y3smha0rf!&h C;~^>-0_mN&f~|l9za76/ѡ4vDpmon~`z{}~:] ׁZ(#{M>^Ж۫Fٞ{vShe&a @ӹ)ZGgϰ qb](4L>jȧsS֤ 0?o;;:beåVLgL#NAw赡CEQ4ӡQ ݉׳f{hG?K2έmx,,X5o}ZUxTYF^r%ruF|5ӣ̨ >Jrr? gNkqOhL?b˃s_n0 ?n"1XcH]t4%y7$47#nׅX,B.-b9Cksv3JҝV !&YȘaɂ჏!^.n;*H Il{ ͡rg7ӓuf) OqɍEΊXUZ1Lm={nPD 3AQ4$QTCV8i "6hwԕMm^h. }+l4tEqg Ǧ C[b햐<'/Su"F kB:YR>Fg4L~@"."FZ(Ȼ]m!{*9];)uC~Hs M(a٠绪 TU@C_w4\9ReI|2<)m#k1mgyNm\(me7{K8A ϝI _{uJӢf᥻oPO h0#W;Ļ>`ug%HBo- ]0s4j21 cY[9q\?g[PCкW!(mY4u5܁S@cy"s BR{^bA&9yEVZH9xMy ;b*7"L](d>ݧkeK^kAQe^K౛>bcNA\lJrhau,m]9#Pȼm;HG.<Q t5ܴFu}#GD{v]L-jwYGW歞?`SםSĞF9 40̿a7R 7߆16ٸ^%V1[7xC$ˇg|tE6PPZ '.t*ذ*o Ɉa;'2 , KH~/b)+f'd]!j).},y2u9hb ~|{J!6 [Gb!`I5- ZefMDN ,zCFd!eD˫WĈp RHZy;H~ aۜ s0Ǚ3Ԭ>EN~8bcbQكn`C6ĝ@ _tf7}+ bGϿٻV1Q.;g)VM\fus.}ia ))rne|qj'2Yv~۹ $٢wdc1}v~ bwjq AߗȆC1I{C,krא#)u9#&D9"m􍨛.zB|e<9tFY>{M;\upEҁv^m`Nɇ-}eupFjT םJࣰG9s9"LO'7l2K+OR>tC߶8~bk[>3j4΋<^e7YM)Zyr`PK6yFc5/gqXH;pãĬ nHٞW^$6\p#a! {kmaujL ~ӫ??o 9F\5ʷ1|L~\>lGa'_S=ɮqO3@V1HS:8J^PsO=,eX[ ^P?Haf}Է&9qdD}0#L9Ha-^|(bdv*e=[[=MVn޳i5E i^ɪ=מc4b?no~dA_V~ b ] /1/#اsZb9 +ӷۃ_ضx|OU}=~%]_~2y7{g)~(-śljLjUlF|tj=%U~oYm6ҹ*u35Þ{WI~OR0xWI6}V0!9=rTFƖDޢĚ] <2ޚh5GC\)5|,7 [WgaU|v0"{W:;afp,k'f"{%߰@:FKdUpA d2;`wtCAı >"3beM^̄q,[<_|8fc@5?Ȇ8Qm8=Xjy/72YLg5"ԍ0: ^o9^sE7Β}k ɽkps 9U!@~Ynf7y %c" |"X@ 0qJ|FZ_"w~(qp%]~CġҘhFA%}2#,9Btf_yqԞ>9<%* C'3<: >=oAQ:wo y^'j}}#ԛ=DK"F;kbZ_^y_E/ēW~_!R IIc_ %{3bB >| 1bq#m.`M2_V#LkMX"o~Q!bhGGa-Np+p#ANv1q|dQ"!F"İ҄GO@ wa:1Kffyw;]bXbppOͲpX咖>R{>Dlwⶻ$ϑQMp/$gcǕivs+qtǸBdS#":>j)Dc";nn1ۂؿxܙq}/3 /.?qbbڀi:n~Գ"7!09J7Zq2zߨQ:yrnǎ/YWR,ƥqD:GG}F|lmKVDm6|h,t1. ;``']g]wWm@o uf9}|ӭc8{BVA1+^aSڊA 98"ܸq~!//e"aހpy\ xj]vRaN=|[@ MMRA譪vCL8mH[;3zSS GVLiÉQ3>J- `b:n2a>LE} {Sei3`5#yβY,  pj/35?RA,%M=՟$1-ו:G.۸F_~Yզ#\FC09{>;~IY = ۄ8VtxRf:YVxpems#އ#Nşv8>!VwqKBI57x'<0^#+ .0oUpAc2U5-{{(;:N!~!58p5sn+Β5g1 6/_hO3~|uKvBWY~w1`fvq0qZ )đeT?.FUxQq~lDƝ嶇!Y mT[D|B% 75IJ[Nև=9~^Ao~ wіZ2>epB<[yџ4=' w}A6_L!DH0'WoC 1ܺr$bpY 2Ġ;t R6:sK_炰/]#*vD iqlD u'kbKc$ 3_# ,B9TFG!l,֟yK=0)x *h>bz< xT!7iňjY?LDq1j+N-":! J4}Ap;bsڱ{ড়_p|b X1_(=n1qnW,u{F2m67T)Dplb*V)l )bޛЧ![= Ajaׯ| b͛M"v#w2p?D{ŷ7,b |0İ6ňyffJ k:b:u$rfE)D.R9 f@-(^2*+?Pvq-fĀlӦY*&[$5u}E z}BKTѿ}gkbZ@L~ Dƒ̽!Hnwx'W8)1L)1K'ў^C[";d#K,:1ϫ{5!+.اE+@0 F,6ma\G 6 VNwNȭQ nj9F^x7 NPaZ ۻ;qN+4ķۗ5֕ގa 厄we#ZOPXhd!DL_* 1rnl5=bwy MDS>)hxAUĨ+/ͣYYF\LFLȼ"WĎi*9l+?ƿkfz;5R׿B]1JƈɈǟ.lGR1Pi -Y:!g1ijx'bWFKs/utt: pF :uL a˔_;%+?oz ;zMW"ֵ><ؼXC~r4#|=b=.c%sXt;69 ի{蝄4ϿBSM` s 1UEKS5bx]U b\8}篈bif  Nd !F5C̴oTB$OIƬn !+֦"sH*z`f$bȳ' L0A̫?ޯt3b]kbh,ӟמC2GDn5}O[v݂4mZR:|c#{.h|j9b+Mt(b.吶;]fGD>iYɌ^Jy)懙nҜs1A(f"J&ڝ!Cw:yYliZ i5.1~\ by=ww"q1COn=>g.7(G,ݽ{1W;G#)nJ<(.6ܴs͇RNG"VUZeX1~K(S/OcDk;y8Xb=e݈풇#DܮsjK pc]рV#pn}p 6T[<5El%XLd0g7ޅSrK7ɻ~c102Gy5[WwnOkz޺M(Ȗv&qC+D̽Eb品/Дr:Y ",9,x댬i~]IňuiuȢ0]qfhr9TiJo9Ův$@LøabG3BOSC`qnG&8t"j{2"Fwm߄ЀHƥk'yd@xK Eag['=?ImV|OXl؎8bC[ |tW83O #{>= GOtLOXȮ\\ +ikAja5$r={['M+KADov9 fTyێ5At<;׋g!}IT+woDO^D?Y ֜*M/.QB{n|탠/Uv#~1һ}e*D+/= >T݁gA+v 1ktғ9+#9* A[iPI@`T)(cVl&>_~0Ɓ5AU0*3Li@ 6j`.ŖrY!K}LAu4/`[4n~a009%8YMXt?iڀ blzH!eK%=y/#FSu3 Ɨ KE4' Fz܈,{t"=g^+zl#Y{VK}46"z^SD񯈞㡼Vy.3?;y06Gb~tS2͌-|B,&|wG&7=U[~W{3 -fy|]ڲO>-JWw!N0C?e9Y 5L˯r~qqEEӹ];~)ŬL&{>cGԚ"W>rѷTmGج!DOx8 >XF 1@?F-RWeS|p9M p ƳtXCoܰA."Nw'_s^De, J,6MDtKyM2`L1:W&.Q}?Z;Rw?rk^NՖ~>nӓ Y=ѯ߆jyDl+~yyFya迪7- }_r<GsruPj3]}1P;6Z,61s#ud??~Eo4B}:4ֈ77]mCvW^ 1h A8 z|4AC㛫[_CpS& :N!^{-[&'1GbR^bPmznAĀ(/k!WoJ` <lnzzK™G JU"1Bz qo&_YN#Ɲ+ 11sU{#۬zl̞})RD7"WmӂR jêt%L_ ߵCey0ҍVDeO ׶O=ډI&PDDOiv1@9oNTnD>eqDߕfAE/=֌3ct wVw)DZ me!sI| T~0knS8y[9D/ Yξ2 Ear?sѿ/YF"뵗7 J4#0ZoހcIJ27A T`hkz ;' `춾jĐwL+"6L6]5ПW7g&ZD%rF$mis=?#)?qQqW0*[SZZ~WJFr3=ޭ<>XL{/zR(9qe[闲G"m/.gn{YEŔ7z3>웚_[!Asۣ^o5={rRޔ ztt.4[єb Y~ngWPG}9lw<5{x:qHjn{WKsq:یğ}tmfR:D>]e ZɝG+y漏t6^?\ǝx9v=vn{Y|miv~YKV-v֫ $綟R_rnە{oZH'Ge;[!rúY-eblN sv%S%vsQytxQv{OיϤѫ)ԫh]y>uEsOqf=: vnO,2Od.s.pN_}?4W uް}ן& m;w{R10fn ש]nO6y?Pϊ'lv|0K\QaNyNNwSv'LAy=_~ůT#̃#K(_Q2q~:xE9?h]OSOw[GA[ϥs z}xGR^v7)P7(p~yGO@yJW$ 6$&CeyߪwB/#>L sQ&ɬ]UC<,h))g, dp2u˔ ii(ט>gt k8 CX4v0А\i=/ZCmƘa*;ۜuu%]|Lc0mतZԆkCv/yw3`I`XLR Ll.㽲3ywtgSM2W%ƬpKueg̬\ PP`\z3;n42y(YxL-`cYHڈY]"XqjϾf=)eq6\[RK7{,I]Ke2t*C(10-7Ml@?>1d|kNI ad/9Ig)Spt ͜GS{)%/JޱF1x}oaDFqSDh}:;݇E\mO,X)sv[OE҅[$Z x~nho͓;ȓ=+f;,>3zfM3 5'xnze6x/nGx|qDfjGud+ѧN:D!'"ޞ$='kVh/Xcf9߮hoψޟLgIjޗmsV[y}ifymn»:Ft~荝MI߾XKucFohHl-#Q< "y!ؿ l%u8Y^ Rwks%mItL8eC^]$]"zCl&=v(UL?p'*oC ZIۺgCA+/'NCVJ҅Vd)->&{Ow7]u?;ڳgf,efaYC(~ m2Lim-bNnoR2g~3\3U}~aNTA{uuh8>Wb=|)RY^Ytz'RxM)eՊjts|f}GH蛹]?J ~,q"WͿ82q? Ў6[}:G?@ūcf*Y7 l~tx]SwKI:?]|;IϷv?w?x=ຈHjo||#vO))~}#/B~*ҽ:uדvT8"5OR5IzS86no m-ipj'-n3Ws?eJbv"Q(X{1$ vO(jlMgIvxw/HzX3DEi,/IBQO%OxJVR{;܉:h=x>]˟:[${ˌCsq+%ҡۋ/HyJL3 IoQ~kfIҕnIbY*i,8Zu{ ylLЭ"aϳtU8AuO9-)goa_kS'~I~3$)(_u^l|X4KҸ?Yoo?KƳ\OHKGMm<Gzn%wygs[QyY|?ޢUxw~t[uμ6;:3T:>/ч Ťo_{C?/1[uz>O?Fp?/1[u~WT:+{XB>?/1pi<+|^> {I m2Sq/竒ϳy?`yh>7>7|%͓#q>3yQ}|z|6R{&ͫ'.?t??'Q G>ɉ%'w$ͧwJsh=>Ϟb^(^~h>%YĤ&4_ox%w$qxIN4O|'ZO'ylx"]gO;'v ||||nyoyA?]tK'q֯ *ޞ!k>#~IhĞb>dz'_?P';_㟤h'i-%[߄\P%+=ys͟''yd뇠ydz[;y\POghyu຀ϻ'GqX߄q}S#+ :%dqLtA딠qsjM Zl|XOT}u&]z"oO?v%_W+^mu^vUSd흳Y?Qq亊k !O/rE@3ޮ.瑨}uVt\7׋ Q?|y^@P(Y~JN/ c*_ןu=/ʫ!ۉ~Εx#sB:K7}:?Ebzk#Edu%C-G(t|!d뵐W߸ޑ=ΥxKfpk> 95]I~R(:GhԾup%7Q<CGי!;?s(^H^?Gɞ"Q?d E:xsRQwF:?S%{+!(t4HSkG!skGC<^Qg||NoO(I'w}$Mz8 0)s'%ON> :?\r% KIDQ!Q~{IxoKs?ld߷TN!uߡ_dm<,ŷSGԾ:׋_!nDQ4.xl}\r$7s(DoI8Q=)ttqs(:_Gk=4)"CFozNߛoum<L}.hU 3%%Ld?-{(vC:[L$ɿSGI~Iu{*\'qXw,Ez'Y>J=+s({?2=CHqLGxޟH$%ϗ Gun.%:J::d~C(%(KGtBGID>>w?x?ZCZ3:Jj 4w *UWѯ_E~*UW[D`]\2Ҳ76"m/Rб3Ʒ]27%mg|R46Ǐ淸fCڢ9lfenaE735,$n k,Ԗ6̖Ŭ\d,mbmk5PGgc>ƺ?n~~%h/jS|/uzchbsY|[ ?Y0e^;(A(^{ /Z%aTP¿7Ż: n624F CF™pyM?6~["<8_}?|{Q7D[Bq;qƟN5z/?Iϯ:79ӫzW30XjmjK.[ khY߂̿_uMu!(z|s|\*H\@/wT/y3oC_u}u!hH= h&x9SonzI =mΔ!tfSOD?hc/dgyp_ 7ƿU::hȑ9q7ZƗ5.kߊBqsf8z媄,eÛW0% bdmlom>UTbcQ~1~dc1'똙I3Eل쉏%D(>G9^LߙdΓ6t5 5$_˲Q*we1M20&^.Kwץ%_HTXXLLTTPHtv"_P&XEO6,D___cvLT ,Y$2[gm *AP g ;fN'I'qkId&Dv9I&q $O B$ $_~󗟿'IGUZĚ9-洧7Wd 4'>BXI4| Z]{xe߲DjCtLeVN\; j-q#PQ<4?. SvԪhfV rB10 jtŘW@@pbu3-{.XyycPW̝,hYVE[~hZ|@[󲺟 jΊfd5P988'&yм{rOw+&r(qoYgˢTGLe0&ED[<ʭg݌1WLkd7Z[oáCؖy;-z0V@kӭHzڻ_=1'eʑG nFt }A8Zps#- `]7ϏV8tX-., b|qZ`k=,!-xŗX܈ 4[`!"ۏa~ƿ9߂e{ӂTe69J6ۘx?Ρ0L5n;"k_˅غS1.F +^+2m^~w=!v}FהZXj}(lɪإan>5yk #?Ck "^h .Z)ꞕ&`:&A(_w;^D޷^i`lv3.DmnJ3U1z&٘`j9Km}e:&583LIJIA4Sl뷘MZK-5]1 ]kFO&^u7{։ 3A\vfƸ^lRWH/,DcE0Xݝ1Novkm7 8Z&$ؿCdu.LVhL\}rD+{`Ϸի\pZ`j[1oę9vAqY)l 6ge%UySaLnB؞w :ZIX7Xu* ,u']:X/(\ M_B].{즭INU}ڎ2b9S*P/wKx)4Sw2B}UBumPa0|O ewTk#O|2b"bYBh:nxt:tj*Ozm)ݽkt("J/gz:;BD(g41rVW#os%o'&OD<{Sru{%O%0ț+5wډ%xǜT.nt ~ >4[|\a]XS3Qhh/P NڥYTމ L}rAԞy19Qj睫#I˝TUBv;WJ\tu!ք;=&RWH ӟX4(sR|)vKKag[#rQێx]qrf zLy>D.6Lc#rM=-شz㱃Sxbۻyb& y&9+A+*8%Imo\!jǍ#l+|;R ?9Z'QNPeL`X( gnֽ︍I,iuwa7b6:JN9|8Aau>i.ϩmEap)ISuw_\?# <;_Aڵy08E';keBؚ3~)G/^ު{ZnU?;ϻ3ĜԾ%YMa7~\<يy.>ax¾=ϻI::|oƣ*R#6xH1͌%;nk;FaЗtK70r~8]:hʼnYij4!fÝK8L zok_#dաp*еYP6tnKXNd3H:js v!ڰf(EꐳB }ŵ8l>A)fgBp{tL=YxCp/w鴛m{XHwsqyj? Bj^ŌhQ?$;UA x߻n; {؜E|ź f?=!gw6=<'VF*uu| >1BوȓuG.?ڼGyv>Ol{a!B>g*}8aHID ؆+|=sƯUi:g#mVؚiZ2jGaʳU9_&yҟqaɑG '5[0'3$7_ۇexXI y{xaEhkrvO½/#pzG#?EzFԦTi9H7=Q?\*J/H,9HݢO׿I3N_L9~88GL&fa b0~yRvۡӓpΩ0"zS/8_cwx7v,.Fc iUӳ$–ъ1oAj}1գn{\T:g7+Sh-ks8푣pBt}c4cξݱc[gʀcCc F%Z] /s:b^ Ą ]ȡ0rlӉ;?+<0,t;8.K0L o2T[ N/#G17f9 ^9Vҽq/Ofܫib@T*c(d”Vo* -ݚ9އ.uυ3؆ 2^ 1 $0D\&U 6O96[IӤ'tp}֢i<հ^үk`Bhq]AEJj؞ϧ؟yQtBxkD` j`qU:5WS@tPX( {?>,Ն6U<jzO$ j( g4%7 NZ_ 5c{&^+<5^ O IvUbzO -x閉Ո Aă~3}󳜆O)5|e1 yo.“C Gt*汀.Y+ƥi" oGw~J.XcUM9D1[vO%vt1M* co\z;~@MX1ƁM䦕˱uE:azT2"__BzLP3~WU;^ekih6ֳ-h:7ҭp2Q ۬N}HnAg] =!wDtuW)A9[e"vlmW۴1Ö>Y8|}gloܘUOSWl%eN22El(=*hʻj+:}72X ˈ\n <=W7moӰCVXPcG_5/Gdp~CpI–ϻ7-XݪVuݼ,[+^^1Sr׭+C>-9srpO{g{,]UJ V+ص7Z`jD!#3S8qTu"]olўE0YV_|R ۧ.|00,9e=Fg# bVσ R]{Gb9-Ͷ83)aq1c5mC# [0 = ".c΅˸6/&Zg_dghV^M$MW'QrB`UGI\瘺 =c9 P}Ea5} uf9rxT14p.; ;)>;3B]sNs {qs;3Me5Ws `=) ^?]\So/L8u] y/Eal\}` ΖpRZ%V{!gqc٢ٱiX[R*_!(Jǿ8q{wB&,HbZ2뫾O1LQY9ocSsZS,– Ktu-毐$" n]؍e)/5d0J:rd22xUs](vgΊboOdQ7}aӭbehN7e?=x6XylV?>m+`={w:hFإ-p3B{Z6{{ybk\la|VKsn5bˁIBg>*B6%bEQĮřwCIвaǧЦ ?RM} .#:;=rjWަGWh`dG!$B$f%:`) l5 N\ޯP S4.7ϕAGȋcٓmP&Տ(_&uo"J#l<)8pXs8!)Fzޥ0u#_nS-qsD{\Rau0S=]bU~,Hwv>`Q]+dl S&kC!7P6ː«/jVzQ}cؼSk}b}aT֮~ qOVgysD3'ݹRjGk1jfgcAq2bO\YC_n+ +/F,Oi;#4X }rE2t?Rcڄ8\,U\qV/f90|p{ ͺw8M\ue?Cj/K#W>C8vqdekG ۆaͳnk׺e9#`evD܋K`MZ0Z$dP&&\.6A8ΗFWmRuOE1%;ҿ^6Gt9Y={>Y&t4Fdk|JEgtػEο`4U /^J:9W[U2N0s0NFgd}oطh\8Cq]ܲH)#5wTh4蓆IvaNA_tCez_qXw| g5&7܉-# *l!#h'pGw ƪn/p,>qFBC[Jj`ñ_?FC1Ƃ3 B+) ^ uAcBlI굽7>C#NprN<>)KTC$}*bmsH8z/qZzE~1MɴMwFWW">61VA-uœS]\((?b0~NceqC&z^4 mdmE,8ޱb9)YJ PJ.bxz%xPm=M!{Om# <#pvC'ynR>+yoHrKۜ1>$t-EL~6Ĉ{S7Rz}߽ewB-f"ymX b{g`C1]%yi"D-If !> H^hyl 1Ǒ 9Չ@̤dFdL&*${~bh=-, 1!T> ƿВad'[΁jIHXhU1Nv9)W#N\݉w֕C8ݴ/͟ :1S]"$ mϯ'nD^~"';(@Ws_ F1=lstv$$g2pwP0flj<r-9V-vHD±#ʟܲ1u'] ?%H<=ж"F7NH}<ۅg9)Pqoq r8!GG!Apԧ|m1Rk|Gz 6}&"0ts<kйFn1O=!HJL@&Z<NM9>ј.Еy7wxX Z6&*>6m0Ut?yz,bg u={+}oZWhž0bzRbU>~ϵR0<Y\aɈNh%rC1֑gb>Z_ :>ڤ;Fv/I-wC"<+{vLӸ!=i)ā\&*\0<Ӵ=aK_g#]g@ SS#C-iD*eRW!V 0"fV|-|1Wfd^cA5k[. Sc 9"P(^,[6^H+GlmeqP3D^8m8Q d:G!.HRՐU<آ&kq nb:}O^.y6ؓjh#b)bۧm;?nZ7186'>JSZ0C.?"3F`g 0G%82MCShZΜEOkОcJ_(8g#Rrgc7Kn짉1XT^if@|-U͵'f#D˱sҋVRjSҙ֫=DJժfyՄHX(ڣ])cKu &}ĸPfjA8[P-@R{Nj"qd%)!!%~ś´w5w]bvƤXe|?Mw@\ץ*2Kc Z2Ǔ{"^p ʅwP-%{CrMuDO?1/.5EL3[ka諬zٝzb=3V\Q*57].1B| hc"n;JqL w7w@ɑ{QgOB[14d0iĪFAOZ &(~~.}7³F6/ BEzu(ja$*\ Ő= O7T/%XeC@+il\gZCܣͼW p];iVHop< i:c*v p ZcK=ΆD%B* *%yxp ~݆yۣ R@]ݖ'IeUx,A+?\ -:.!*pBNgF{#-kEL]1qR /gcV;vW#F+H݂hEQX7 eK.A=e!DY҅xn 'h+#b՚TU:JؿPR2?x |毚oWGxUǕ _wmwG ;i0nBeQO ]s^!s?t-0w{o~_킽t {T6^­ _XRܶiCּ0psndUcA{AjpO ^aNHVѬ>#iI/y/#y'dO!+sj=1u\sv/ъnBe>zo}'!t7ME ey'D>[qq"* B8PNb-67, %[6XJ@I)k6neJ/4ko_e2-'ڡmk8-bC؏-:m4Ff+{HW2vCδčJGbiqZo: "Nd]r%Tk]H:lV:+9L{I(YZ,Q݇bI(: Lo xzC̲A/YX+qBc&$(p?FHyN'֝]ci k *O.5(8YMxUߙz7rS `S@]{U"qP$/}Vy+m'ltxT}6q9CʊY|k4D9iMSѴ#$~7ތwď-_!߬B4{F[󌐿R"@>/#^6kZ y\J8!{ 풛6@j*ETNr\ jf-&b[B\GNǡ\`9D-K>h4wr<-[1(Ҁeu݌5[Uz履~J@jO&Dl,q#4/?QCd{$WVqڝdh|e-$%frw{<|hԡu2:|͇d˻SM{ŧ'O F[[M"(A!NlM9J>IM gc9pBAGL]8X5bTށv"5;BFL=+jC!Nfac_md%TVmqsbmF"U. ROf^2HPEL&B-|!1^_i> Z"~ܿO" џNCgvf7kvZ)Ro'@*Off QnlkUIB6KO_Z!YW܁ ~Ү#]Pk5vl DˍH%7:qBj;t$na@xOI'|i^9Sh|i<.F`KvtN ľJVq̿ o9gCx40uJ cO|}׾ɞnӶ~dFѯrbrrS4$Q j4Zd)vPgq]xr=_rT:V-#"[!}x ODym?휀8XSJb Fo21ۻLE>1XVk.P7lh.WPu;J\A*!ՑΉ* rTS˝-8c%"!c CL1 t"K)e~bp nrd +.XW̽1svDy+ ?)A;S]O@J|ܪPZ5^GLup>QhD1 q_!rWV sP.O%Cr%JP[1ݽ̀V֓D~r2;[ Q%4<(sn ~IЪL&tkR?bjz\1򕑛$:- +z+A2G#F.&t ",Zx 'nG W[abMA]1\ ^:HG#jJ?sNg!猪6b媈V+2UO!&mt,t;!'"F풛 ¿.nQW5k#v0͵7/.1N'͆2 ֲ m}IJ=v"A-Z12Bid 1J{$Y]dl93_؅r"B8 ֱ/HHk,X <ފz*dC$VZOby'a#x~_cSgVbLJ Fm]NrE/!fU![* 'z?c",u:0QvR%tkrXN׽ Cm_C4m$柇\]z!bSCZڐP 1YPմ$문Xљ O9pіBn-;ݻ툡SЖ7̩1sjdۊ#*r8Ǿb ]$ꥉjHnȽqϗBL=}.`Ę ^{ҕ']!)1Y*ĚŴɱ}2l7=Wk'$c 1A4XX|h9ّ; x3xYy|E:-rS^i,u>^6ʱ?^pyPf rT黮1ԳPH8@ ju]@x=LYU^Sn!F1EAyĨ/ljö硐<ȃ~;DRoy_ksbH X D`Lv[t16IZ&!*CȻ1NNAYȍ>6=Q|8/WB%ي숩I/AypUbrh4Qb)A; Q<m"q|{ q=Ǥޏݞ=: R5|ڼOU9qG>Ԕ12%yzۜ";+(Crgֻn s_GN|̟SriXZFQyNK|퟿%ߙ)^Mc=l+?+Qއ*4My?_w=L/ iwW,U kW0Eme*ޯ,vRf霿vc&lr֕QL702i*ez0=S;uP{ֵ|;؜Aɦ /Kn_Ja/wÐҚ^ NhĿ ]ʺX)^yFr a|pG 4Edg|ˑؖA&vrMj|xKF}v B- ]FOLݮ|׍  -H6Շ//Ak']q },!ZL79A:e?8گP;Hl7c3% U@*/;J)ؙq=4{2gRɽPh WەP[Yu۶Bs.}r\V Ij;:lG:X([u@*mKѱJ&Er/PPؼɵ |/] j~~XHlxLi|ޕF%!%g=/!-rN.ڽa~8f(~?)Z yy|"!n*PwzHi(Hpk JJ&R6lut?9\ҐI R7zɞzi[ߚ!?R7d+z >KC0%ŭ¢!Z4|zuva-Tp_{Sm!v=zS4T 5~ŵ.P/ Wn Ql+?-E3_>* zE/A|d 97o9'HLbC7 čS@̑Q/ Of}'R8-0,_:ێ;@EmWKbJ[8]:fBRlTP~˘u ^io",7݆hE]kqib.5D;xkJ u'?MԐڿXx fFRz<~ٸKu1ABQHfJx}qCi/W#ܺޒ)a2eywL}8*D*[HMi tM;w5ՠ'X葘~(A;("@0GߟR1\R?;'39H6vP~ x^vG҃ 1/x7\7:umw9q>Z,+@8xӸ{ҶB$)?eI2BxZ}pGYv@%3Ws ֤0A>wn͐񠮱;dE.hE-0/;!.lu7W\ kGvg ⺘t*1 +9$Js,ft̷;1e'~^| v~Iqlڏu48HS{-< ʐ]<^,r M{ . p& ryb/ +"[ iͭt?e*'ݨK'oC/pڍtAJ1PH*opQ 3KBrQIN;/n$fyB㛭Ee!Vp!ILqf(:`b)D&*gB޽~rS?ӊbf,9GfSYk[ P/L   '=} i pc26ȅSM7tB~ʴT קؗ{fAI㚞lz ]OO9Ն^Qo<{߉w{W̨Z9Ue>v„7GeTBKkUUsAꤟon{}toyx3~SgjH&M-i \ I6~f45QSf&,siBG>DH%JI_!dF.ԮqwxC][W!$myWoB(^4Kjff4~Thz\HªZh_"ԋz6tҜ3Рc[ ;6uqA]\O(  *|^_./|t?#i\H#yes趖#($_rJNr(CьlN 6FkQP7Pio_eY =q8KE}h$=Os?|QzR^_vFHA{LGTFN5ƷFY'Z4~ *OϜh{Z9w@UeW-:Pe340~Ãwۏwax#[s4Τu>킆JY4Skb1ĥz14Ϥuw\mt2{P{$/j>:- ebX߄JIg_nP w98 Hy^L3DLi5gMU'vRxs`3}(/+253j$a:K1hD~b"Mq4I45~Sen_r†th4H6K,mJ6u>SGd8 m{ޯc,\Obb+" &~WQDZ@LL.QRNڥ;83s={ٝg|gcKVOUV4T/BXU%H &x씲%%UP~Η{<~A;k磡2#}b童gtɩ7n *C]r=Ogq0tdBo" oEa޽ J~q ?/}8{AS#6 ?fN?^d~~`"|3LD 6-aPTFIt#;l8w( 6 G§]Oǵ8ޢE+XT_󭆜S3 vfw[]R0zk)}û$кIq dk}B Hޣ<{0VFAM gЭՓÒsGbw9 叠q}}|OirC1Z;ˠB̬m TX'6}Ttunt}eL{#7}D։>0(vWP8r[&FF7Ȧ虜{#R/Ww;!Fa J3VN%hgFP7V= m;KԻ"(*z^^{!N-}>hhGYFrB^KHӯ_TdkA}_Qf:Y2OqZktFyډ4^Hugꏴ3}TdҐ=BX6:Ե|J3[ \i[ he&2̧2Mр쳪eCAuXaN#ޥ=ܨqj:9𵪛IOTͯz:QxjMwF퇾ՄPM!wgwHt]wRu&T \Z7t֘ [ Ōֱ,ަCM eEGX_Ѻ{yʿ𱹯>a_WrZBRk;f}oz3i$TOCmVB)Ժlvy#]$KH-8am$qqS`{ UO({) 0&luk4.Pg̠ۚemgoY#QIȊ(74Wk΄@=㉃t333G*gQKh4gK-~sęHag5P&3z!hb뿻N`(OľSGQ6c89P]CuPC?y 5v(ZƮҺ]LDDGW$jG&:^_擩=LMܜݍ@L e/.y(m*>f*S?-מA赫}܄R Kt7;kKQ9yD9i(W\qT Wr[ȁjh&Ӎ}ϵwWv C6۫ -LYmUE^QľRzҊmAʝVFKU/m87ߙ{-P кxZ/Cv(f=9w(wlej6>O[*4xAW4'F;osrPx|f =}5h_saor ԼyX!m "џG}źQy֜i2i}J&d/P7| a߆l;wtˣәu4=pI|f}lU/&:J׋MǿQ$g-'բu>YZ hnt;rcaz~64 zKrd֬1Zē@m'0}wNm7z8"E+y[]h}x0O#?1ukL1N?}47޲mepd׿M,ʯW9b5&]~Øu3w3#ah?*.V桕h?>tO)aD}E)T0t%kaW|w4Bq5*zEϧ~&:}[T_OcXQKO7\j_^^}ߨzF$’Wz1*5b-Z_m2i:ЖRjguPRnz1VsD8ʬ|UP$qI=mRn'LU7_!$t?k{fV:sy=4QI_~<z`Hxʧ3qn>~\OGԽ,N}:Zѷ:j׮dq(>/$h:TqN]~ckT f{%NBɐ}:PҬk/5}gk*սϿ֠u-l_R{U]?s(}#&ED{^jWogߺzFՓb+zom} 6ۭy Lb'yȏcn7_+{U C{k#K'ao51nj9;ۅ_!Yv"[Q4uԙ?gh>o|?俚4`j6<]Ux4?OFTxݖSg8x?ˌHt"c#j/R<ꓓAߋ@mcq֋h ~)4CC71<{NMˬuHGGz?GegEoÿg|:u(9ul?^\UdOνz?Eڧ7 -=ܘZa:w|p0)ڌQ>2b/ |'c~wwogS4BOGQ?٣轗~GGk .O=(~}S;^R6{5Vdߋ &m]{wҵ3e#6E|9(Gt߳l/F?/ۣ6{e˧焣27h2z>8Dtbdy+}<ǬXO]glwwcZ/)点/n> ݞ m3U\(zϵs#{/)NC75BڨXKE -Xyٛ%;^p=>g˫s}˸:rq@|9w ݙw X} {Dh?4HF*)d=r HxP1hn$ѹz[t>3~s x4ga"O%"G21u^ 9?r.t9HV}ĉ36 7E{C~ mg9WEEic{BjS'ⷑzPIa&-cClӦ#[x9{rO,)Np)P6}/j\u P|:_#] <&ڛmW~=> B\{|{*z-c{|zOڑv{H>ht!xYyHt^:oO :7: \>__C:I6zOHE\XvB-boM}ne ay\NYOeeO.hwBO*/5U3imljح|?m9 b,.G-R_Hί9Ruw;<|y\R|p괹?z/$U^_?_N;A1UoR {Ry> , Ϭ Γc|Q2srwx1y{"?qt5;|-=DG:?f2:1tKo}ȹ\K{:8M/ a+Z? 04~gzsn E9tƋd°NOB'Ƌgg+ȏ:G ] yW }q Dilx6ڿz4g,4g#]H~1?.bcfwh,zSq` ᳅0 F\$fywyuF9!I$a%Hb?߷01  q\Sk5EB)qi,8Q\x< 䛦b?گu47{'7 8}PaHtgQ430J<2f\m67SRv<Se%sNK!֓yu::m?Rftqw5}6"%H0U_'> nJk~Wl4^f[O:aJ'"MN2CGF 0<O~ `=%}J;E*~%մ'e݅'DG~ow tD/c'D3$~<7ÕDH4`N 8rJ'Y-?z''dpgWGZޜ(>Db]oBQOt^]ۆL`1M:dt8\t]vEsѹ%KBto>nW3l(^@`%:[gBx`="~?~uvPm3EyQ"Jb-TixC={o~^MZױhWؘWRteXA+$]~:Q|7Q07~[d98YvxA"8WйPρ3b{??(R,;4^GJ:=!uZ{Qf?$~p={ ?Z#r^1\{&dZ+=2%:Jϱ"?I|$FhC)b?%XG~b?%%:80=\˚=S{luMq%(Ә g3x H\W-ߺ+xUv3;Wu@᪕ϲ(ѱ(BL)}ڞ-;u6Dz^vIz?%h":Q]mՈ?W,_E8٣yWV/YBGݐ b}\6wR"?nˀWw7 1udM$Gs"O&Qt bݕd>'߲l:=azQ>ΪYy{fl7}#zGXGѹZ7_w?deI h$( ێ]{]أ }L¾#DGG3 l:gEan":m/g3uۣ:Z{%ǝZ)RNuT޾H~bϛ7 >RM֦y}r&Db Z:S~1'A:8Otq{[.O+G{ڧBQ~Px9=oz?O<ޣ%G/ *klxDlcY-^ItW4>^Y xg>lO]?蘂3WƲc^xT|^$rabE.Oo3isN3G듲%FAD#L2!~WveZvz3_Pj'f>_*\4ӏ/79~ۥ8b?d(1|IDOl;^ia?XGT8nhI8ʦ^QluR><öP6s}ܨS38-\:3MYs Z\A1aO-n:ʺо=b{4`uA6=hB-'š7F{/5taɗPY":}̄{f&oFd^QlwUǜ=mwo#u4R2./~D]1V2uT)۪#xk!ƹ \"n,y[?"{P=b(*Wh^C=_L{#;dc}" 39 (HZDGcyZܞ 1 /E߱٣hAtx21dm4kG c<_fݧ}%}TtN,ځ<=ǠsGEp n=(ٯEX?Rĸ+WcDG5[ є˴(/o\qlѫL hUO(3MQ/u:Dt)uE~IҐ'7V\œL{;vA2(ڏH71QNJDdg:Zp4Q.JGQ~Xu4(@<E'C~=ɾ{EG?H%GQ~􉎢x퓅xBQƬ^ _u@Ex~鉎>K,> 'bbE%|<6}[ûMSXGg4::qA~XuGLDGdĸ/$B%:ûYaB-ͣ /{u;X2RpD5\}GdvN9>8#hx1&G4d$$Ԥ$$ z9~̗dw5?agt]*ji+U#zGtjca?~j fvX ] jbajfBĄ1~e/xxWj$`ƕ3Ta,??PKTOP\r=PKH_i0#sbfQ"̜khSc976:e&&!$ dؿ%-9o-f%k al?_|!pikE#? Fwal'pYw׏p^܍f с6WѾō'Mu3u1muC-Uc4uq2FHs Ϳ(MF­j.C,&FqWqin Ax5Ï:̿{=3@QtA%sqK* O8KEO֎*<:{4? F-hzFfzd3=jnBf0 c\kMGbk1%(~Oђbr\5pHs Ϳ _:79 Zf0:&~L݄LcǺ:@arbǜQ.3š}073Чan.ehGd ''L6\i/0\[VWm4xL5~F^Hs Ϳ(_/d͋S\GU_kp11:3Ę fEpgǏ ƋP3K9_/BҵL͎jɓqҷ0,*nc*ܼ/TKKS^]3 y) `ط-w~8A3u3y}͉ʥ=+X>~VZKﴐ^xws;\;mo/cw+;7$/_bn /;5X0oטK-Eǵtt$Y3#X5D]CK~*vZvZn|jZ?[iLi4N1EE1z+8?) D& 9/0Ms"_% 5fH"?eo$Q(W6_a]+>Ňu g_E69|r mCHE#ݘ|f N kglNZ!j~0W<=7^Wa!cP+zodP𣈍T0agc%OAס!;fISUvRnkKa(_j/!lXPr!m󻀶zǭr3_<9g%IkIc5M@]h:GjTl\A٦xLxALl\L0.~+P/Z]h:s"z5g9EBǼ/b_)7+9"j_a*ZF8[QAA52wOCMSB=:/ χz=2A/YTVIgP$3N\jE"@G! )ϖGC(!lljYZ|KNdh*;74Zg˸nۛ4~&"̋BO4T̹-*v6Yym44-޸}y4]a: {CslJ[h1YV[LN=H`38=2',L9h~}8NH?؅hϻ9t<ۡc~iz-_u0ȫΜ*WClA3Mc2~)MٹkKD ¶AC:mܓOؼ{²=Q͏w>)8Cذogа7 4[V|Վ~ wցC 5)ku6-w:(jMzoc&B|=|:$l)Ѣ-*9BKZM ;)MLt\ҍ0]@X~yO4 ݺ_hT_ tz+J$)m.ގat!աͳ >pNcԡq詇NrXt PZgOzڎ k?1j[Y,u<<گ^ (PˡZ:.ǜ? 6`SNjֽhZ*u6<83 itOX}}3 _9wua7@ I3~5 m{"F&cn# ^6I'G vBGœ}*:4z͂@ϳOee\&6{탎*xz@wYEՄYAҒYxZhAޟsqa3ʡ8MRoSΨ sI9OMB~f GSL7_Ps}> ge)=^mͤwŀpѢ#oR쓴{K߿s;i|ޙ&PfIy:ؤ{eS}H+Emc_^"Vf3?J4ͅ9\c<=klܕnc*<;\kwhnL2Z]s=b6O3FO> ~6{ә>U"/jyPCc667jea|`m&~Wzq2akCkgh'h??ڗ,07#^ϰz ݗz;6/&t~9LtNhXNUDu1whkP_ؤJبtyM=:e C+M lX*:WtAx$m3tp>pp+cyU1uKV^͆[)^am l>0eżЮ[\>N۟mVQ}(t\6u NtFˍGCC{Ҡ >N nr:J˶}Jo Q`7| ԝ:~pX'{ɋqg}ȋ| m_)C­3F C4z8͖ oU5eLd] {>OBiY]IU§5+@њMl8Η]˕mSI0d':͞_Jc+hjU\O窽c7^P#`5! `rVۛ{2P{yX'%BN4%"hh1!k==.n]hZ&^OYɁt|,̈́>/[/󜫶 K'5L/g"{y`i}|n>p/tC*n\e pU+\Ix鼈hXU6ܷ=hIxk*|xn ܯe9mMhuF[s^H9u!m~C51IE!.Eӵ"Crx!\iR&HLt1Ղ]J2%)0ON'Ȗyi=e'ebEI¹%uO)s$O~nD~?ĢqS&;P ef9ل|n*)M"L(d<ńI3ag<ʸ L\hճ7V PfdLL3Al1/̪k;FjûdGF/|ql D:t{7DOO=N% >!oJ:9HF[|<ĮP2gW 1cx!aj/7I>ӷq;^IzGn~W.$XL'Q]Q#bveTh񛿖~|Fyѷl9D\vs /ٻ'NkyiQn͊bi%u-H颮$|oiWm!̼:oD>aN5H^EJHt1vLeN A/{! !cߙ@杹k!Z{@4a$5q]2oG*O fҬir$pk<Y \Ǔ3Uyeϭ|O݁ mܗON$ډOHXA:" K!$0E2aqnj'~ x-lKH=<"o.y_E Rz.i+TZz}9f/x <҂F>!4kU) G̓vUI]I`n\+-cV>)wBYkot^Zա^u1i)gݪm oEҟ@@F!vެ=}gXϿR'rT/!@0'QzdBZ^\,铨xO[ WWlYYԎLK]/$%?(QwEz tRxm4n [Hn5{ 9γ*^c)$m>Q?$:jnVllQ)縌}CSOzBmiVW'Qreɍ AGde$WA4jp6[C1aSH~-GY"eo)osm>*)㷭Yz$ )eקl! ؛x ƯjWߗGvqr;ܞiv$Mt7[Ovё7]s"AK4$SԿB\և!!k}ɤ'7!sz6#ΏJbgY0.bYD%k D!4j˜crQo/d8Bs1ar $g_oşXV_ˁh)ez=Ⱥ>At! np#|)iokܤ}. ~.Xɼvn7gHW)B+ٮ͏ܽKfg=ss TܛDOgzwcHZ\$j\+>7/KH,;knWs< {.KCӖpL:9H,@0Bj;gWxx÷ sU 1nzW BӷwykCߥes!)f1,p*4yGsZlW fT8l[+]5hTNI/GpwaH-ӛgS0IƼ:J厦ݚú;<_dY1a 8|}qW -Џ,dOzKs- جS(@K$Jd-GOÛkB6E5l>);/?pNNqhvL,IN~-~71Sx->.NLvi)NduvMyjxGxm_})ANO]YnsgdyϨ;sğ.e`bh,2b%4f<nS^J(,@Y>i_Ao~n'؂#^ a9> " n+/E>$ ʝsaR8Ok ]]g cBdzCCijB{R&@ӯ>&lU| nTWߴ6yi{eh^uc]֜8ޢIؼZts 4]<^cÒ=MFzx,dyFд^qs1?h&7L("J?ZDua2YLFZ/LVh&Ϲ-_mLGؾ.3g԰ msx&FWZ$~ĶQKL06b]k]Gn$ٲ#KWf![;vQ}( !뾋U|YJ6.DM<΁E}q\ *k< mJ& v]BgOٸлkG{BsqlD>?< =#%n.Qu%n|zyvOaڣB_4CMe|Vi*!-Eki7oZ [<*p-<ºSyyvsuoPfl"a݈ǻ't:SncZܬs>5-; BƖs{nn:8R&>p[ȑBº8;AƄ 6t5}pJ`Aija6tT+^ֽPG~wb޽P5yZX)g?<7t^4,ի*6z0ݞ3˥{K;J? &l1۽8aõCNw8Xͪmc$tCcA½Р[nMin1R([G Ovk{ %Ӊtޅg%-ℝZ7o`RbXpKaO;7}¤pZn1RF:p]E {w*= -w+ l?*F|Ϡ^6Otn]jMk?B@ԶxKg8y|?|E9rl|ezZ2rQƀ0dB#_0TH*+C:3*bC-O<0*p[y̹ Dzp4 ^ؤC6/""vb~Xf x\vPy)-*MYYG8< z\>.䇅'<~.5?<1?0ߧ;r1WQ MpZ;kEPSAG޹o"%sQ`"bbԷ́/6㋙T⏻'3+|]'PٰF>YU|'֎! T(MԕG O܄s H㾳F$&J\R]5(3wwn,-+BkƄJMW+ J2;@NQxKiMXTm1OlJC'N ,@LZ$ K[oY I8EyryjIfKH2ta !{sZVc0%i*@a!ck"0%RuVL銾y;/DAFW!@-(Q{ mMt`ÃELJM4!Me7˭̂]AuNit*^XQ։laA[Ǣj(hݼFC ƈ)s=~4 >Za鏦{swʜ$x<42ʐe:4*aņG ˰gk_A>Z!lSg ߶ z;0ZEۉnapsݰl[Vǽc w_a )H&L-P6atfaƇV[ cw儚=yq8Q 0ZK Ÿ[^BTwJx '9 o;0@{Eƌ9abLP h*{ ;wKAenΕ1AFg\h%ІfHρ]KGE.4@=?RRި0 ǝ| {!s赇b wa7{0%t90&]L=B%OGQ/FMz&S_H}N )mၶlKp(#sk&K= 臞4 Ss5jԜj vFaXW̙E\%}faT0'J't)a9à2po>(5S,g*|LXv[>nt5] u%Xi=>E->$t:VYE:t͈FmFۑdFHBb >j#WGES΂{Kz.8K*h;5,P`*8 rj>Nu?Dnη0mΣFWD'o7 >a*˜{uy}\u(9R˜/_x7<-[bN貾y[=s<%tlңC>~ g&굷ֆû/njKρo&gmuf⣄^ֹG W1bՊ>gzi y;TB/E'cbHձt x1IAHK=]/AE3f\>gW[~$[e* +997B%s2%[AY%Kvƍ%w7'&1^s]uTE|G cLSܷ|* K^Rf54]VaJz6ς-_ҦqC v;à~VSî+pl>aД`1'@Wo@Qd[AOA4HA8M==nZ/>y7}cLB^(҆i:zrX\GqCn,sT'tLl t+$!"BE؎3J Y)Wݏ]†a?_Ȍ7 ёw*{2qB𝉩:SGLkj]FL%t1»$B|Dg$BBNOwZLZ6DFQǝ}y4p_;K~tp:AYjQ-?Y vϩlXMYЫjLNey$>vu/&t=`i1U ]KB8{'zʉ^*G*GnQ/u^6qyfvq]BbE0]rGQл;C]wj|XxFc ՞ބ>KI%j:K5x-zm&9y_^1y"|[, ԀSǿx An $ K۝/(a"/@2=={f&asGOR A/O !5 LE0! CCzKb;=_YE#z;+Ɉ5ox,.?nO8gYL ^px^κB証|p\]eyf'Z$t _>~=qwѵk*~k"+`n]Fw@jŻv)%{)]ܒL ].-O{.GO\ŲOn*uOo {;.5vKVsoS&Cu8=|K/Zh,5rͤW\zq:e7oT$PzVS7(w Nnܻu-K@Cnׄǎ|6hH'P6BkW,?ޟS[&..~_q&_ۈ׹#?)"rwNw?O7T?W/o?{ |ҚcJ}.F?<"zݷ̇g݆ύcx>?={vW>qO?>d<}@=~lǥ<-Nܶx#u6^ rwvd=_?wUvd׹wv?hs:q^)k8} :n],o8#_԰SzJ+p'oJ5A~o{MϡrdY{Ð;aF8^O41sA-N%æ,{dHtlҭ}N)לj.8os<#h\vψ]UW_jϜo?>}͞ϑ!ni~%K_'JͰZkv]wk(]sp@+VeIW:/MYIw)6R^Uj+Q+H=*`Pl̅N4s&|Ksj\Po$>j,UL|;FYoqbA;'iJC#Zv\4 8U+b(ogPN/cvH8IO?r[??VrD g!<4sbiz`K;;u(YCݽgʕsSCЩߐ{*ݡNu\/csOvOrBxjG} jl<.3n Q9xh,4i)ѓNf5JE(G~nN])g 'NJ9y|b^RlIy8[sCyPkY/(X3'J +3ifI|,I%όN)VGzel ŊUêM I;7<ͦ@YO c" H>[sWFe>Shzw՞unտ]yo(ܼV3a#r*z[{hn/A̢S+cH>۝{T^3p*;PqbcD+Kwfר0Ԟmc %~KupNx8f_!Vu_#>y7ۀW49{8kzMx*uk^shpbv{ɧO܏IU:߰}tXjm8x,;w8] #E&81B3;Fo.xI,X͖ohIin<ru׈ i?ppu]E*qk //L7Mt,N~vHnO[8=}?uŖwUǜW{m3sQؤ J0!~WzѫْwWpWׄ^HxfuB'`n[e!B8s=xO?qi} `!x\4y %scιo_wTv}7H׃_ʑ|zpz&+\Fi[\j5y~n]"ޞ]"G$swϱ҆PS@x=/.O8]ܮ񵇷T$8߸=Ƽq #n.S?w[\1<F3:tmvG%kKd>n_X't˛3zn|ө1d͋pg>ptyW ? DNpĺӑceD@0p,qtaxѮLNo'/K~zK~Kxy1/#?[t:Ugipjx9XN%^W7d&ZnxyE1Puz -޼Ǔa1'. hjH0zqF~v|r~i/S渘]Gq?C#O~q}t Wc5&-S{S-~݀;.tqBî߼*Y͸ֆ7;ItM***s&P'UhnL }nZe @5/zKWTUQwPiP'}VQgAhk@kPޙ]3+ ׮ϫW6j4%h9y= ڦW] hK{xMp&fe*ФzB%)k偶qs4%ҧ0jl_ACM;{@e^DϧP Tԏ>^j UeNH@10d@5]T&Bۍ95@:,1e?Pn^I5T!hWqP22,eJI;4Yu|Ta |͠Oo1v`uBz6uH{ٻtP){rMjPyZr*#MJFqt7im,#qP蹪Q{F*hZ+vt5kj[6PZ9~۲`Qm߱A&QezVTl㖭P6'>ΖQP&wTq?J4ɂ2ԢEUW]6-P̠M.&[@}?`+J֬"׸=x:;+ YPgp̢%@I9 On8^M5P`?W *dPd S꫞Z [w*n>;2 KtnҋK?~v2ׇvmnIS&kےx#/=q]V&7=9I4^~_c}BwӿϬ/| |v5WtT]e@oO^jH]&F-r׸rZAuby }Ш=CՍk -b&5[׻(Aխ 7 I{zI&[=tUh$ *Vգ*h7BAw@U_]UQuՁAsȸIS/9"E/ϰŠvLN]K*< !榭 T 3f /4r+ZBڂ}cF3^g~.څ֠u4N0FYk cڞ T&7N䀯ULeXT3\j~zKI"]>fڃh}W-ሗcXP嘕;)T>;*N~ŶDC_FoWj|?T>OGv>:(5Z?xg0#8=p@Gt}r LG:p@Gf L<>k|ua~0Epa])L7v~K[,<_Č#yYr΅ss|<㔨@'9oő=p|.g;W#KzYے|(:w@s3#Y<>z$;|_C#֐sp?$=QG8ɹu|0&=wr;9Aȹl<\(.'?|_sׁ\gv qs2y]_|3=:G΅亻c+]~E"p$POcsQ;z.jD3?ʙ(Nտ?:'Ǘv`:Ae7i /x}qv8oq ͦWq:Tş 2P{ rgKsvz ?OX'XLK*d 'ˁb=@09@Tb!~vP;b?OG1YuG#tsIRX7fV9/.5'Ox}I>eQ?Y'ɺ8V{ 7$Dx|Xg݄u#xt w;(߱}J^E:nb]fp=Cy2~za]QoY;|5RX8Q #d^Q8(zhq(/"#K,GY;;UȺ%K>q}dqx=4Tl8XQ< Xbb xzg}XuDa=,8}엖kX8s]CON\#wZ`(j_5=.'"+X?iV &,:](q`v*YEIj g8_Ŀ/^Aze߈?t^Y׵Yp:ˣ_xY;Qo:[^z$~sL=K&g"daQELƦd^ϲ΅ :N(]UG9N՟/QcٯC`DG(z%QlnQP:ʲ:G(g :2aQV{[VE<$ﴰ2ѝ~G}Qz:-XCd%:QI=(J]GgQ}~&__k(xŞfQ4bQ%:Ӱ؟xI:w>n>&/m:1&G4d$$蟁61A_tͫekT͏AWܫ?5rIc-t5x f ٪]JJLgx/׃WfhS_j`oC-MS=CurpC-#}?kw t~a?g{#![G[ZzzZG4N2sehluLZ 踖K{[2ߒaؿ[8h(R$`ɯR?_H|!r?_,%:1>bhI:*.݅LeA]?yq75F_F7n^B4ŴM W=opnh}xg ́44 jԪ @3]ǥ 3@bnjOV0[?:ǿ83 @iEi)m`U.EjK kjDDOC18~B^3pq 9_濜pmYG_ !,7PEF2yg ́44o5/NsU%]П~c6??v7/xB?g,#8@]t AɁt Xm)ª'[1 *c#4}ۄ_ߩJ+_'@ݗc>6C@^=^S`UK_{uKQPViP'$ENIKZ| aՍ*5jP2ikΜpj)GTڡRVZABBv'.vnW{r +NJJV}N SʪՃ.r'[_p>)ClރkGAG;R46͘HȡyyG)ڭ>R8ۣ>ZiflNò=ݟ -3j-'R8QgPO.2|f@Ɋ=.|^0Z\G;Hx#W,Ňr"e뱇f]T!T,js= ]Ωg'ܺ:bnjϨp鄲| ,*U}&8]%9oNJY *(k\M}V:V&mZ5o˸ [pѺWLh\rIRZ渥Apt|# z_?%F톞.P"l;w ]BkX2гT4c 5(\cbo(1P5҂:Ŭb Ň(ufXQc$)\;VS:`BNa%S89a+ odB Myf#'Hu9 V;Rg92S2\|v`@BiOCZ}ɽ uS:ʭH٩űkЮ|*Cоl坰 ( aB ed2ΖZ'} ]N[[w>U.!}$#G>Hr&Zyaй23G.w2ٙa/d ccB,!WCˈ3&Rx4 <-„֎L]B;Ɂ$[]ݜ)*|zl;$lK=C6j@XY̝{.e%VLuyj4,KXs{h|T,v6t >@ץNОCNʻPwlfh(+y?3CKՋbRܾm +vACF.n*/Ɯ腪1M)M +/>%< xߧw_~fj; +DÞJq;\tr:x|4ԯZuSgy.D8QsUcRk ?>DRe kܙ2)KϾńΈ:Y3mݔ  p:A{hN/) |ÿgRΊ<_E7j>/6"r,;u}ϕOvLudo\w%l[̘cK:73z0%Anña{ =V^8̱ _[UCHEYƒ [J.Y\MSS'RG-K|{]3>O좤Hy8|5ۋSɒS3ٳL3){42Ւ˜Щ$$ӓ:w5ϩI&))d O߬XӫGQj@sW rxx ͒"$6ޟ¯T~PƧ]e</^~Y?>;{)ԍlT'լ4^ |Res PF?| [1{Alzb-t5XO֦\缷/c2h Wړ6}8~Jl a[O:M'ӈnOUy٬pԫiU Y߲u k_䅗*WjFăRvsv6Ca21S¹av^6d^[J g\P>\hѺ<@isv*;Mc I: |0/mU=2<ۣ-xNx~ƹq6ᬹAnYkgڻ&u^ "V-BGWEt^56 ^.T~Of[%>Ek6aкGVBփJq+%Cs w!LҺw*HһkȑSm^aJ H01e9|Al="PٻJKVIypNFf%Y@y\'7܎~NJLr8IAPXUO\lvsZ^M?Z aKɱΔ+Nʳ#&LM=`VK2DC/eA Fj ByhǷw YW}#Yb Yn'|˿oW.8]V^>^ =çB뗕6H>klZ7HsGAܵfPʩ_}kHCid،u+BA܄Rg+e )W7uVIZfqUy(PY,#gՌY6 P7CNw&T~H8s靳+];0AK !$w|{5r<60.abu$NR$Y^FlH5ErW$@jBz!_TlV*8Au>e8(`k@&̏F/|V.xgfkJ@+|Bv^dH$X!|J;L[dJnů͟h`kSz;'>+Y,8l6u܊'0=rz+ҝ[HhaOd>XɶXd,k9 ߹Q%X::΍8 ˷,~jkS)e> 7_DM C ӂ2w#RRm ^Z..0}֝t(Ո5[/(|+=s}6iknǎ%Ofw5;k,<tƦSȜfrS0ӹ'\(tzc|#tO~5}h(=m,K6VD55:7[s—w$i# zL]@i/L"9`1hܠ)DD.|luy0aI`z f.t>0v;)coŕ(' i8JC-KqJa w?]tōrH7U>QkykŇU'6yUQiyw~ Yz L(M}F;u#R%Cۑֱ>Q6_lj>ل9]KiBЩ#bC)O=K /G/tqRYVJފ,, gBO%I]ZОov<m tUt)z뻣 CWh(kд!$n4a ՚wm-յq:s]NiZTzaT KOy'5 hVLmB9*&t|NrZj+bPwdFtA!-19㌯\4َq_O Ca9"NẺA֜iՇeg[M oA}C''T-r9oF?K MO{F[ˉslH(̍>9.0uyαMzj3!Ą0;Y>7k uA]εQӡb+/DyuXVe%UP/bܻl/(+lwoƏ9u_/GpnM__H-<)[ژ۞VWS*Ǐ_<2'C/[ a] ʁct>ۆOtDh9lfj0k]J*wdG9=N;!|R-8A@ atSe=%dAlώ%t /}Qa-~1o~ʊ7;ty%AB-gnVLHo`7TC"J>p9->Ź*x"KHQa6?zg{3dO@U-U!Dj$k&_2aҳ%S/"!-k̃/u|>DJAK\ilY}~jL[+MrUHs/t.!kqeN?>Kx,ޏ>l2}#'<)`Xt2=p~| [{sd WɎk}۔uP{)BjjP{$O|z2Jkq.Ue~h΅u#:D߸BX1}U:΅ytn{hx--FF9zPe-l9h}uk*=~T5VAH[?UFp]i\'vIo6ȧ96D 4 t.:!>il9GX{*2ITn;êW: 6X9vIoHppF2!_@i))Nx^8Y2Ut=..i}&ly(Vr\ iv_NM;}4Aʢ\awFС"x=3|Λtyp,t~Yda ՏLZ?~rZL 4131HL 䎶k(\_KvS8؜co^= OƞͶ>O#lҬړXüc06DoJ6/͟7d\+aqk8f.oݩBNjYn3&\hz=vJhhn. 2r?!tu\#ͣougrT_GWuQ9[ \Z@P=Y\ M47VN#J.S;x*oAy^Zk3}s}.<*th~ը^ʡsܡ+@ӝ$a]h%>:Vh6=2>7s$whbM_1٠uBN)w a'MSU;Bݼg)m O> j|:ih' u.: :㿛Γ4͔~7_^F¦\'և?_;+e/ m&l^9]x uFU%hxB׭#u }1j1c7@#AX9h K5^]wy_-O '@ĺH癟՜0h\$p%MWRq' t}T: }zOzk&}^>ď,`\YQIՂ9_Jk/K#5Q=^dqN1l?aܤB/A+N@\'] \DdZ w>ݠTv ̱_vCBXH~#R6-i^'w^,r05z9x? 8ޑ`r3o3v>_X9>MRICGO#9*eBb!wdǓtUϫH-|F 25@9oOIR 4Iԥ~gVfm3㿧@.bLIL;N[Lf]Axjo >.&W, /s]a Rϴ̻!m X&@ \ƥ !Ƚ;ǴYkk.6ҌnB=2b#&n<*rݣ <Ӛ_ڃ$[&zGZj >'3Pdx&B?#.ݽ9Yyi=< c>'×N)d0@HF+S״/Uo% }`BdFvܞdϺNq*x7 \O+3Bݏ0|ߪGRzѤ)r ZM\T>L 9Es@怀Jb3*fP@rΠ$(sF@=sݽ}ه}Nꪧ{{ǂ;/'Pi:o5{[c̅jGCq[Yv)V n974~4JK]T\LRh1(=A|Ruy1׊bE*aqH"Pox (:e;}cůT*<>GCLg1iˆ4kO|a]UO@g']qӚb|cQ(?\D2rIe63Z(Vm!~n=v5N#FO&Um)wWI ݊ UL0cÕltzr8%4>ZXeU[>|] *?u.ܙY&xn%YOѱ0mp̝m w]dH`gzUzsl}h v® Q]o&]:UC4H~dA'㶂/n+xcz3B%PqVpYPkx͏!Ŏ&IѩOSt7s/U nz -\0*^rB! 9 Ζ{VaF%t.kc0'f%g*Ӊu7!lYe4#+ߋ/w[RNJ)šg(z%Y!fip 2S%(39 2朗ԗWVKx$n~/~!T2bgrwߙSCroXm3hd.~sb>eMxDzJmx9 ģA<7 E泮L Jyŷ/6Q]q:8dseXnM1yx}٘g\~"6znU;:b5S̼ٸVH=D/w?Oro?tۑ'>StOueq:Dt )RVޤq*4z"Dߐ1Z>D_4pw&{'W6M1Gȣ^CN9H _/ZLZ~llȸ@IPc6|QLl{+Pm>AҐbn/W}*!UKF[6Q{?~8Xw2_+B[S1C=EN*EwLcI*|Jz3C|֙US"ppAa6pa%!r:_M.!šB]`؄-}s=ߖ⵵rBgM.yrܱM52$tޛ=uWD}*<$O&$J-:YԹ#')dĐk1C-nlM~e?7{I}.ni oڕvu/,{PP> ~WY)6م[djӸ4c%ǚ!|e=]":Cb5>Zka1ʟ&쥘S<3gn .J]}`1b-M&P9wD5qJ}z9rFl>- N6d˳Qkg#H7HFu{ [۷Gm|ol !:'n쬠,4vipw{n`wj6"Oby l<i;/bZU=52ןXrXW<nʗ@]ݽ<*yqvLLӤq4W VyA i_^_=aN۾ow  5|-@" YwC Ў[dCO~w~j˷L|{ldr K"O6g\Bkیm@C؇7JtBoFcJ@9Bت<`u8zF-!},e~>a}F]ͅ9)z;^sO}=^ PL:ӆbf#SL=㹐j*9b?>a/5ŏ˝㧡kW B|Ewjm|n^ Tp>::i4 8f<ȟލۍы?!o\Gp[QAc:*6 Ffp4c,zd8})d 8[c[˞wxzN' \^ I婶_):W)۫M7ͻE5"9'LY.feW$x]Zpvp :KktOe~qi=eYKM>b 4p7')Y|x,xd<ҪY''3;Q"kep 类nur[KGStɑ96W\>/k'0D1xcb]sIA6bP[5|!7G|*M\>v`#<$18_8lz{WeM(:m髪y6ٲ CE{GN܄,7Ε+/4R5|am5⓾o 5}&SuV`,Z%=ߨ2w\7897G\pQww3ŵ%\ުE6˞_{$_g,)i(:m/KakA/PhQ٤-\㎁SQ[(7ʖh}\t:pΞWz8_Xcp5~ }wVnFp閹E>7X.œ-%saU9} R=V-\5YM)ߕjfˡtȸ{k;GiX{'ǞW/]3k$ !E52]Vl9X3j)䫖OxMG瘦:ŀ*o89ѥ+ʅY>CF/Wel:7nA(NXs|" 5zʹ'|U=ѪfTE/?Yg<5b"‹74?/禅Or!;^?a jr)B3ԟk3\˦_o>eRUJr]f4NC~T {+>Tzr {}Iq!ToZdSRulX4w9rZUPzG OnWL;Qݾ\97?-3Qzs݈uM~/fmH~;J2N)SR|PMqHa(]pńYzJ<֜2 Ԙ~PXUxI ]^^X4ݫ=@aMPx]jh9< j(M,|a΁Eo_)0|n,ʉ>|# JNjoklP|G$l }6hUlcZQ j͠^B)Xb9[쫛^s`%'r6,:E-&.d4}ֵ<ŁE{擆GoJ!6B}AdpDH>O苫 Afo@ٸs οw2n4׏WX^\0:*Sz \3ܧi)zβeEcа')\UnpgwpQ#9*EF'wls6 ?TZv #zoJ}t8ZvsX_ԅ<<{Ȉ˧Mk"xjzGonMJƃ#YUGL^D^9[(9y/[n/;Ru׷JC%iW!`qʬS{A:tM%pSm ^gF=u%af @h|ռ]'9~'V~\tbV}%LgC=lVo)i~1rrMrsx|rk.p?~<+p z&GkC65#ob?^~&v>Yg}sȳןgQe;UrD(#iMxOY?{N>Gkbs¤1mćb& Q};/*RwB%'=' vq;,0抡vz_OyenӷtƃTͫwSEy]?]\ik;r?^;<*z<{y'cI68^a'ooBy՞گӢ׃ͬ3:y LE8fG2z?xdA"x?&ot[w %f~a_Q9~p2>1Heua.:;(G&Am^N% By='r?V,z ؉;"~!OEp?Tڿ8_^*gLJOY| w}6ִ!ܝw7,)#!<:p']OvS%%~QzGyy}R-H@ѣq<5P1# @1œ;bl18p/f-EcO ^zs.s5mR6ֈ+(0S. z^az-HNS H^(7Nte8.[ه\܆ ڠ}ϒ7NaE (P@챠C>ڻVRH!~G?uWr}Ǜ0*7;xdȾaPD%e$n^mXvgGoxńO3ؚ|屚)PcZ\y(`U T2YH)(iwqP} yƔ4!-nPJgM7gџ)evn 8] Cb'-˔cA(`)PUJ(,'w*8oξ5X6n,bkO&[f_Qaۏo?=pt@Q;1JCoqHyxWS)R^Eeڡƈ5P֬17N^@>E^z2enc , U% :Byą_|7׵kCZJ!;_=U֪;ԠIQΦE:I^W]lwJ&%hԯZtngBׇ@Z\2 ;^֣[YRPfs\>#!AD;O~{eelK: C;(.=kJAoN \e >[gM#JO-UUx }3FoZ8 d|2$3Sx\2=wd|>t>/qw8k)bg_ZOR#Cɏ{u!PJggP^3'2NYGǵR(H7BaR@TrXۛW=ɗ =Hgm>,ɗ$>gW fZWZktک* "mmXHCx#ą_ :yq͢ȵjw8mbj?4+&|>׫Ž4 gl>s C;EQǼv4BY^wCjG4Uf7 ẗ́jp@<*|⇲vu[=p|T̋VH'* bgkO##A |;A(!岹 H>][R rͧī:5ټ. /~SV4|gϠ1*,S|S٬Ǧ>(HG2ٌ_زNO]m̏Fk;#^/T b?ո_T|X d鵲%; vRqĖ;Ur|ᵭ\_a7P _??7\v(olƩ~߇bwɦ;l;Ծ҉ {rv:`97·<ДqU2yRU1z>x|*4{8{6-/'v4o.X eGvLJՠ9?n\Fx!!ĥEzqcy-N$9Wluamؼy$bw'q µ# qG ?z=[ҏ(Y%q?i9ʇN<%Y7Z#c:`f,C|{}-O@_8'y&yJ_f('\GI\/^ai9aޕ(y 5~]VE~  p~+k^*ߕB3>ur;kZ 'C@ǶmZ$[Ƴ u $8Yޑ&P}>'n_ؚƘLvm={H 9߃v5?xn'sm'_@[\nuf}R o9x^_KcO;ܿg8vWW=ܔu'LBQgy'sI|_{E>Hܗ Ƒ [zAaST^<G>}d?K`D2wd_߀`<@f(GG}!_!(@zOW x=㸻,ir=7"yd/~@ K'WGrk^M7z;~p:_葞2sxb;˭y$t=C'NcU v4h7<~r0ޙx^COϞs6:60clcnI"Þqъ'?f(_:x<>k}֜=)_yEv56t ۝瞐_zEmߥx=^zD{Iyz^b>ōG1CDRs59S2xM&<7(Ǹ<݃3w>(G] "e>)'O&h/4q0(Yw8Oyk\z(s̟G(Oʣ 9><w< }?'bG R%|:ǝZsL#O'LeKQf>jM k>3|F1u9^؞q."|\z#( H09ȗ8nɹx."c?i>X~y Rgؒ8y"|jV㋼|DIy$Ǽ y퓞GI׻EDR红_y?*>(Ky7Hy3?鼞\3S<ɣ\2uRg[<9e@ɣhy39''74CQ_QCehHVPx)hONCd QW3g}G\Oވ"(b/6iQ\o@#佪y=g>JxϝFd< |@3>'Sy+z)wx(2Pʣ(9Q~wI΃qpx8QGXPy2(\(/g2Qem| z>E^P\<(#O|~<ʜ ɣq&}QKpQ yc};ʋG1'xQ;ʫ&<9ef~Ky|$_])jק_8ۅ]؅]؅9O;tgcO#u}dew̔************{x؟6inM5Lt ɵMZgM `){>PP\ huV B@U]9_^FVZʤXƚG4uo=; ~m^39_~q_z~Cڽ[GCGK_ìSr-=}Zv0QrCZF;v09y4λ8Rk&]X@j~!_/z1 _/fFy Z_P:B-Ptvwi_;O[[ƟBF]6ev?/n#u=-Ï!O>fWSV͇V-@o>=#]2j?/ԍ: d%&\Dh ;{)PZjͿߜBX2>c|*Xת,$ b*$-3OjͿAX2Mьu Mta^Kxmۏ.ԣr>4ׇ&Ԑi&HUK2kjͿ=gnu}&o24 ȨP7؟EcLPj濜`e,>H&P4Rsw)heangþl'Rl6S0buc$kI7{p}"677Z|ѿ֯ٺ?"'Ӌ?%eqӿͮ66s8K26WRqcaxHu#mc%ZZJBOtY,^$/K :VRQ!:;Au#}F}柕d?-\?e\aTQ 9c]vOZoMG0.ϙ?i_7@$^)ƿnDqbj:7"8eTTd+Xf_R6vHgtHotHgjF:{SEޔqua6]?Uٿ\;>\a;>BGt?F7bɣ :f~i6aj;16kVΥ|*rlgL{_&zzJkQN7.츱;Ӥ}~-_4UFNA^~ Y9Sl[~iꮋ(Q$'نtTWljjy|{G΂.&T]+D:+O8_XnGp GMP8R'DP,A9FpO쒳K.9oh[#b~R,Qyt ՚::Te+~XScO](ڤwF(?{,$N";m=Q,,+$_žo7 ]<{;(6| ZSr't۸FbQnjIϵP4Si#0-zba e@d߁ 7B1!]f ?!% }׹B 4(D]zi((M`W-9Oܓʝ%cVC9Yº.78Dl_7q#ֱvf?>?~|4{7`Y=DR.}f%S^A(S<&!E{L֙? ewق, 5jTi3#X2gJxUɳfm3ukܓFy( x4vyW J>='D*.’SoO-kP`L:žY%?cU"k߲Ɩ.2b}sv#Kw4-TnZ/f7qS;0_6孈6UCKzD݇#_$,e"|WYtG.|.lhR4'{3(;>81<+uierʞ"ߜHGx?\0h\y/Dh- ZN4m~^KdzMkC]h7`(}6nlK{n6LQs)-foY.2`4DsP9V}Z?@5!jK(6lq:JV V)ˬCDȊoPweOiK>sh". [3Z TZyfy%3{:B*@tOuhçs%X*c}o8 '(O&],^S&݄&X?FT?FxjX=ӳY+x5IH3gDmgQ,꺉Yʲ$nz(k<zLxͫ:Ǡ+ʞ@]5C)պG w4:~T.dƳy3ΕCC9gbT7AڪLPy-d!k}wP)gr=X*wly4S*>MI1/? ϟ|x*oͽj[CRP̿߾fT^úHh&x"JoxƮ۵sQ隵VP|i5T7-hؐ- R췔W%Y] C)N^WT͑}LF$G2Y/EOZ;ZAۊ? `Bz"2R9%GJQ4VzXG.nuKgg /^,>_v#BӎXqa$-o/8d'eD)qrḀ̇̄T!B%1lTo@9YV;oUl9 x>[OQlm3q{ǩU`C5y5μQa}RzKDV]' .CȇS6^pM-)%,lOy٫nieN?}%f=u{˱h-b񙘖Gf;?fbf#Oșx$T%4gZ>-:UJ^}_~ky\v?/Ꟛ=jkQTKSjc;A$aQ׍]JQH3@/%8-%bٹS<}cbJ=;G&+"Bs U<~,M#nDѓ%6Q5ք5y52yP8eQR!TB|7~mWrcyc̷r *78lP_%79J8r"TV<#KmCmBh͂c;&?/jfzeWdrV> G)G^/{O߳8?h{WޯY߲佫ktB^ C{J0g^lGw>Rt;i%ζ3-BUfCJ_C䤴^WoZrxs?GƑg^3Sx~Ė/Q,-$3iW(1¿7G@|UP8ץ#(fdM:s"2 _:M{Yg]eoP˝*,,15t~{,X?5j>gue۝(JWNjjeˮ%Ոb[\1: -.,vCbxd#usOQKQ6)ő/}lXNj8%u"B"SOԡ>,K y"B?+B=TG ֠el+# /%ؿ$pg~TA kҁ!WE9'xJWCG Ύ3ǥ_~VwΩ蚘l?d*md+ O%[l1Kbjk7GW<)2ܞPm)r%mY/LD&Oe[y,k/m/bEeP4:.wV!SF1*o{5^{þIAES/"_Go΃ hW#uٓY[_~)[~QQ.YL=Á~:n| YUxc4)ΓX:bDD0PXaX&2*sn"}DvZTA=Q외\zVo߷F-NW[3%EgvUm/ z%# +\:cTd/R^O\ m ϰ{& 79j%/-͒dcVж; ό5AT)}qn'?jKXx{CIcu^|Mߛ,2.vJ,iǣXsz]y!(ɣL?9^s5V2sF.G0iI.["v r!O[P_%Z#W݋%R>ni_ԇ=o;4}pnuq9s EN[ja DFnҾgh-kj^PQ7B!7%7/ G}^T9P#YNeaE]6~{CѬk_8Q_:T<;?ݫW_S<ŸU,9UZt\I ctZpSh#k^mYp7țbO{,/שa ygsNxG%q$Ճǖ8[4XpuVPr$""Ri(%`<Ŋ-(.nB#Ŋf'^Q,,ui@l|) $sޝbEjlhA݋+MRl8m|''E Js)6|h&,dQl7~˭ SIɓ{Im* .Kl^P?XJFW;xyGP* ΅bj0tPws&|j DORܘT}fR~0 -,+F /r fǺA:P.nh>XBzsN|<ơş(~z+obL=(՞%Hz. ->:ڝ< AUyc&_3wh~8aUʣ/su~3[YubnRt6?Őуf<1J#*Ő]mQ ! ŗ\P^m[5u Dqovʇ#[IB}c6KgȧƟo(kwᮭ`W3 5ubjТN v{f'j?I#bkL"W( QOLQf7Is .TwԞAVQDlG1.7i &^͢Zuy'ӶFhUwʲxoNo'h.xgKO,kY#3s}[;U)+>~finB=)ݍ!-`E1T顒)Y=ʳb^7QR6KAŊCT{ށ>5A= j{<_N:j;NsyMi/x5bbu۔)F\!=y ŨG<.?@.oJ!Jo~ ĄR{p b*=: b7otbENZ~V'ztp.qko6Z֟qZ)@S;~DA|큝{!Rj\HI@q4D(bt8OLj 7b}s;qg􎶾#;܉cCV^D1:mc߈:;"tnk0M7|ϑIz! $RCEq]׳hdN(3DO!:d0,cBG*?8ZNMUz Z:Y2VVC+ڰ,R;j[(I\!a|b #)<)XsvY1&X*"ԉ.Cѣu\y]{kcĚy_Zslņ;(~YO+S&Pl,p2/OJشU~.ņ6DhPlJ>i5RAcP_1l9e=:~mp5Xi׸Z_(qX#a{J1u]1(v7wM Xӽ:d\w[HVCrӎm/̇ f|G%l+!6`NTy*Z&DMX{I3X{ܼꝥ* )6L ?)bxq QP?˗4Z]:eEg>,z.o-]J15ѦRNԊ貱bHq yÍ+g~E1DiȹßƺU?(F]JzgIz _ӷf ᖫ3eO*Ad"c|nQF(O1zn(F]&׉^$JiwbhubP8rya=е/+b)D}|Y[j;fjC!>(&l7Cjcqv{OϺqbQ|>bѩw5T))=\v \uˑC) ~j q' K[eB܇5g?S~ޔC1cQT?s k>)=Ó*(f前aӫ}/򶧘k6ⷓoN   "J;qΞϐv׈=Fλ\nYB1z1$v„bEKSnkňl}UbdSj-ײ"wl? 1 ڻ>=XOAa]Yh4hhޝ/QUJ ~+/x6b܏6? N/8WkAPL1aHx:w H02HPiHi"|_C1zm^YQ˟nR?GwM,>#/_щw|ZҦ]X{ya16} 2V42q*Df] ۾6BԱ 2ngPLi/إF1zJb]:wwvWs(F߮#2ɖb/)YN{WHY./aϟ?? y4䬲oB"hW:aPt;{RS(]-- Y|7S U%nOvEn󕊄S=$8 {(k"oG>7M 5"^,x(b!/#BbâcB(ic,q1#sx_tE#6Vuj(\j8k3SHx|DW^v~2}xb͗ 5(Իx ~+$f+g"͒bfm &, "(;ïz!83_B Ϸ\ \=5?dKR=.%zUnΝIӀy;7S|Vd<+[]vo_^ﷹ+ƽG B ;UYO}`9dS5e*UC\#z4Aj#[s#\hl" :4^vguO9;bԶgXj]_z^ ·(J y}cJyA s[N]S CX}bi)R)PjO1E\r&qI[O3&.A :Mb(ՠ0kgHįK&~hy:tOCLYKQn~)/YVs+);ooC  Oϭ{4iǦCC6H0:7t0r}1`kgcP=+{sDk4yX= `6HSQ<~a:%Gҩg>UyP[Ҏ=~(ڧS v[n].Eő)GLM%_v$DoˋVvR,׻,wVY++): U~cpt\)+!t&_Wv/)5&{P4ŌCEyP̺j0љb=uQ͘vvW߃bsS,z4ZVYl}FeY. {?h%q Bnf{C /(F >7g wG7,\惣Ek){7잕*Ŕ8GBV fYoZ!oU~܊_ K{;6C̉תQt˄9^RMW8l ŏbk >O,v8RaK _wMX]Lznw˂ҷnUFT/sCL^~hݢVWCl7yoQuF>w7m2\! XrKAo Zs2ŰATjTS]J1im){I%:Jn,ܱzZ]ȝy7&׸l߻="=ڞ4Q TRA2[M!AW_j)Ŕw3~dS gjqi'ŏ7=c?bŖaW+՜GU<@ؼ9}yqy=oYie'no}4оQs W%\-RUecW%E1ӓlR%v"D& Q;bfA>KB]g (l?Ũ'"'1sNQ#'d ^ÎNxb#PQ72Jvw+GCTIy9\P5kg+7SqW$S-D]>)wmech)#-^=%8f8uZzj-VPqbZB4Y'?Cyo.DJ<4 ?9b %75_5/䉏;Q|/E`)'(I gM}u$~]p^N\QTDܲ^pO'JݥkI'nNbٙ ')7MV~ O#FVDQ(F"^~f Dn3 1vP}?|Y>BKbDlyt#g)Sz;Ƚ'Q xC#[P|uj4NHAd| lS9kkeuunkC1je SJS8T/QC L4OGe$(1;YI1\c;y'QR.{Q?υ3uCs¥!AI?k>_bܰu׷ B1f‰'(F[n>?,CIQ%86/ά)٨e\F1`$BQ2<ͯ O_暉Cեs̀ĕݤ\C]\~#z8t]GQ i2zA_!l_uV09Dz\]dC儕&:@D9[(\f0Ű/ߊ̡qhIbQ!w]C炛CD口E!i_]>QVnDN9؉J^mD֙K+RqevQ2Č)?7ňl) 0G%(UWJow[EW%3CS'șJ13}h8ň|1B_xK?~2HwjCNG1e^fA1R]0Suqw]1׋ֻ,{;O\띶DQ{?|qRI:\=$yԥޏ;/롐~|7GK_"i9_[˵Mh zwaYF8fQZg/-_j~7dwq†p7^>+?sg^sWQW7F-W5=r7rsI\))s g[(ͽkNNµM0uC)9wxO5%w9lwr1Isw|ݒ>_Z|i9kE/Zoҭѳl3̶k]vn嵜]gyn< <T}_>{TsG/gwG{Fmv7k/ˉ_3KD|7AmғC=8ndكlhllUw%jjvF9Yˊ(rM/g-[)=G]t#f`^x؃ AwͶe@F]0$NTL2^r5%3DXO-oz)wf? :H'Zo6_ &s,E0N,}9ϕ,ď@l.h 'o}dxgُO}u [$O뵜gCss+pե꿺/u<GF>YN9k~/E֯Cπs7qEf>I-d5Ac%Cx/Op D*nK%ܾ͋-w$W9#(<^5sIEN llQhϝSB_ )Nf׀rǞZ/90ñ>G!h/q28D&Ӯ G7`(p㔣-Кi]ƪO/3xڇg9ͦ8|% Wt(]{{b7~# ]}9KՅ_*MkOYK\בr}'r>PhgLD{~HCP>3iC0or_inި>ZvɴCcp9MG q!zvI@yQ~o"Qly47RܞPc/³8<ʃ`9/rQ~B^%vE=b?џPo_᲎K&a[7"I;KPN/f PfԾ.3UX;} +y=gɃ"c_^e^9ڞ(|?O2L0yt}bv0.!oR= oѼyz״8(/!Oprzԣ=R>'vsf=? e;[ǻGEE $v~;8 OhD#^{3o"DR5# 'Ñ~ğ^_΂S^zꐃxvcg$J ~ M{32 t)ķkk:Bv$F$`y$Ϥv󒪇wngyK&#w^ I>;؀7etO_AOGyJ8?wQ*P;K"b8Q S=֭(qD[J,Mq)T`ICH}]x٬17_xulUAFxE!ȅסdyD__ Ŀ~|_@Op;2I܇Y$$b_}}! sv;n_ w|3 ڰAf8y(o$>bX@4;vq`:\8w_7xHtH| vU=IN%LɄ>B<>~|"㋘FEߧ M) ?˟ɼ#O/Į~H'vE3oi/{3HHJ)E^Bx=?ɺG_~ %~1$|C;~\2|ү\d ^"M7%!|O&AlD/ecy:/Dl/8K?=#Ie?ʣ9$Cg$relw?lw$"f|O6;I6ɿ:c?'Ko |ϹD/xy/O  Hy<y&{&?O$΢#{Bx:P.wrd#gbȫid|z'f8AxHx3S'>_IހFD·SH^B‹4.Q..\2yDnʣ ]qE''|IaIoIܥKg"A#$ aL'|:C=%z@I#<-t'?eʣEDb"w /,#VF]FYB{(#(!yFG922e$/%~[x?v)-%^N+'GXHx3 ?8#'XHELJ b%8/!DYs哼#?R}vQ$gJodCH=M3Dh$NR!.%F9 /I9_m)s#dPN햐~?=;+!O)$PHF#*%|ZJWH ǥ<乔GOI|)&q- 賄ėR2ʈ_`{ed~[J-%q yH/#vAy1ʣĞˉzbo9qI}q,"~B3_ a;.BOKH~Lf!׸D/=t^Kʿ>ՔW,{%z=8:=٘g?yGm|DvA%\ HQڑRE?"E#~P@t]ĉk?Fׅp]U@]?"༸A}<,-9pHob*8J|HNQ»?=Q@$Q> .%&Oȓ$#O?KDd^q*)'p0. v~F%G?%AG+0O$z YAS9 b_Nˉb΂y3!굔/x"t>HZB=%dމ㼪K1>HWĞ.PПo#z "wo9'l_J<=!O78PF/tH*N@Ďi$Oż䭝|?(Y\x{t0$ץ$3gO8seݥ+a]M\?"\'_bCXfSkd@q;|u'y$cYg?h<%J;S$u'O9YOqO)LE$} d=o< ן|wk$zQ\"z![1|x\o@ 'Ǩf>7Џq>FABc[=W=!O#8dh?\:>AƝ]bĞ9|%4?kGȗ\IV 1cW(b?0k&qgK$S{aŘ1~Pg;7=ʿ֬ ~Ӓ<-{RX (3npض&CK:K1Η0`8BdQ=83/l<ĜcJj'\ /{*#߃2/]gh00++W&ZyF/<$w= Y̛wg>How2xʃ~GO8qj,Kqc8e'G8G0vq}(:/=zcщ-?Cqv 7!(yE(OY>{q>O277Y,&y$ߥ=ȣ̼ Nʹ7&x͈8:Dt~Hx- GN~2,_}_bp&i/23kvdIdp<ژqF}ksZeshω>F0}7K4& 99q#?]ܿY[`/y>_; bG? ociKҙIa '2v$Gvq-?~lâxvv~cK2؎%A_KܧAbK03]s#wbOz}7 d!G~p)AG; &pq_0#ǡ OR9(S|Ϙ=<M֫>wCq|ў^1^0 Z[1 0.'o]2z?J?zy =h`!c_;+qg'd//#y9>=/l։|Tl0aBϩ`G7}D^ySz c\v }}W=7q*ܷ8= cQzG#>gsjq,;gvɴO&` {OfG bȣ< <*[;ż{hX`C{n|s1a vL^{8'y//FG}1y*F>ec(=G'#IF~vBdoCBbkL>E;C3C y.=7  8 |'%r`>Q#^Dxk7Yf9s=1ʣ~#ƒ~nQ}21nG\.WQQszYWsdĕNb><<8qhOh4o"y3?e`9ֽp= #s<2QK4c~ ^&2ϥC>?ˣ4%!0Ϗy(CxŬg(ד~ ~~DʰSr1O0y>MO< '(9h|R̛ʃ \#?'(0?>y y3>qK}oy2=7Oԫ{(t>=H\@{ d(20g$s\u=nguGsɣy#|y3``iԾ)?B%\uas漞"c}XnϴS9sw7ތ8\/f39z3΃0E^̟9u|r>UcPro h|DFyw8v8뢸3֕|RCQo_3q\qQ!* pGGq0Q8_Bkw:c̸r2P} &>q7y2!ȣ32f/Q.Gq?;@IAD^w#h|#c ?[FE#{.*?=0ϝ}Gɼ =GPvfGϿg(ͻ"p~ȣ7#?3{"~X3gGIhNIQ\|I+1ף7=-{{&1y9]iO3/-"ysvava[><#ӽW5wL!'UUUUUUUUUUUGiO#wt[lacOTLh:S}}mrſ6SJUc|jV52061-/22d_66 4Os ,۵pIgo=5ty/iihk߮{iwgs"*^%. ޥ{YZz`^ˣv퐕as5iw)pޥyLλ;%{<-04B׋^Lb ^@b&uZ.Ҿw?˅Ѝ$lFN0/~4/^MuuFzZ;;8m7凟|ȧͮ66ۭZX5>=$}zFdL~^uȰKLM8nu v9S0 fWS9 d}WT0UYH`|kU?0IZfz#fW_0d(th~Su#:l/۷]Gu=C-|hM!~=MFA=X-'#WefWC9?zBLdhQ?nD?\v91#6w%N?Y|Mܣ$7i0R21DTIKP}N<$KP]mv7/0|+a=aH;5!o(D>]mv7oo5/޵N-B12_juc:fEOOo78-JZ?O?W]m}mqelD@gIF2;J4dɅ!XHI^u|Buwb&F&Jp?+ZZ(^-ҹ0*¨-;r+ƈ0󟴎O̭'9)8a]3"N~97-o߱BIREOo#.Η0NWBur[oxde҃Z:{LB,gkh)-b-N믽{/:9~r.35jͿfJ}8DqʨȰẀm,,_aHgt4x)vmwLJw|LJw|?~nĒGk)ulXS S>wblܭ4KT.Θr 㽿T-M.ԕײ8o/o\qc{70w`I [KCi r 3{ٶֿ]Qt%Z)%;IN ί>+@};\Lݩd'"VtVLkp,!؏@C $8$qN 8 AYr N%8%g]rv&'ѶF"wrAsc}ziyv~ |2)n}(;|)%S(=kcK5;.u.QyUp)^qr}{_8 y r]6o8dR(9pJ5xg%VS9 Ny74p Mo٣J%lb4m+y!pHߡ3bN̞/@ۏL"_(UPZO1`׹G@{B@nBKS2Z](Cd(/8{R5hZ^u]%KTgܻO{9؛ս4ljVǎOZ),rX"=&JQ{,IUg֘7 lnc]ͳK\ەbQ9O3cwkldNh=ZcKYg5w1_[M*o>=DQ䱐+sYr|8˚jv@1ʏ:o`IOңXŒQZT?"B#c 7;5 ]~4bY%\m94`KT)?/lNqe1 XBoWKQ˴Im,Dg9~tt Y7{~Y=:8E!qWa Os\"4TDQd5pgS9*[Ч(8HVuKdfɬ>^+OS<4}|"K:D}KݵȝeriEbJ4쒲Lcu=[rV< %k&`%[>?#ձ,a 4ߋ8ѩf\X@q 欁WETT˶9b@0"(" *V11" H0'̘0 I$(&$9$k5g}w{ Z֬Y3Uuuv0i6H{_Nf^|dS{ƠK/߯&\6cY:xxs}@q4䜝4Q," S߽ dLU |z'V.1rdɄ/>zReIKH.)_owYL"e><p|c)}6RS53}w`.Qp()?eKRUu1ԎXo4LY)K*> Yʼn iG9LF^ﭢ{:L*r]ԛ__iYvIYcHE:7L&&ʍ;H*!KMP@Jnj?&e:Hu짺ďIwKdI,pOY<9h:dV0mptg(>ԯ`ā `/ݔ&\#n٢rn!|K󮸡IڳN-؉;=qUST)(edØujg<G>xz2F|sdyΉ[B +/#9^ו1n蟉h26ZWʽ Q~,<4{;Tvg 㔝n@8r{N{Gr^..b&2a$:]>QDۮnX?+v(5Frʘ+P{q;rXkwTuWo[̨L:CJۃ%.935öOҌa*sOdHM8sKv]ƶ rbOPΈ ;|_wyuk{;;Le{颵*)5eN/a +?iG >rnrcbn{J!EIaJvcJl)Cq:!!Όij9֯fݳ[[O~}ruѵ{"qU`L7,Zqk7ƈ~ QV'0)%vZU! :1i.[?뷊^[QW!$%bV1Ħt_xݎ>Szlkch ȣCoSi'c:4E)r̩vG:}Y?l-!NHWx![Ҵ=1&t6HyG\)"$fŒQlgb{2VqQny,IpO{]T746ms0\g}[[Y805MyO5oxf8g<[sT?z9[44gON ZwK}},Gӈ฿i[9N=b|:[YV}}̤1 (yڅw/8۔o˹E:M]-cc3cX wXyG`i`x!5:](Y?T\~o9O]yg!VK ƘN <SxxcZAϼM^wscЄ5Ar2hzacivV^?o稺1Xyc 8iC-$.6Tz S&w8zXȞ) VqD9 ~bo|X$nt;n>D}Uߒz|9I'beqMu=. $xiG$uJv$f{ .NuG=;gLEkbJ;??THBmmJ-o%ռOқGG~ұ_dlIB\Vot|xyﵦ^) Z$WAW<:MBV㛫o%ɘ*?uR=rݰC?=d|wl@G2e;6Ld\]k4HN1Wu'CXNI YKi  T\9)c#>xLP䊩(QG.yU4qUS%Z . B&**OR~Uv#秋|{.!?d+ ʼns~쌜a^~9ch(c:1 7_+U>"5I,;QN;[݃k#o|3˙1r-?[rC>/H{4~XPUed >&2uиƤ9y0+r5H3ϓ,2h&<C0FL-pF9fpRdA *'[1c>>"HA%#;1tB@BxΎF4زH{h;LI:FV1m~ƮT#.̝zqK2rhRf0)?gj첀CTQ)!F~~]4Yfd)0N/!]5Byt1{k:}TU6>@o} K+\;%).Uf*$- ;ZI7+ 0Wv6NDLd|kƜo?GCnGiHH0tuX>9Y;9C|!r[ij/eƒe͌a½ 0f|xsaO <Zå+'L.͎"1cc"H7/a+]E6yUxs-]Iˎ؏p@JQg?#~=*qho*k0\F d0x}C'͌{J1 :Dп0x%b-jfcLG˄ _fp=[9a"] U)a(hښů" :mZ_)c̻M7r1Q/W1F0 ưҏN֑c萖a |#7kf}Hi4 `S[2$ &_͘=_&ь+aƊf( 1M[Q0grpc޹\z.Cvec<UBrVe_Ilcdt@9ƫHk'acO[͐}aOƲUfk3S(7DQI$Rv1OkDiyy 엣 {Mql~HN#gNڍI81u;ywO!￙ݹH*[F;;BV֏V Y7C&Ye1W>d}S[Ȫ<'"뙚Z984[c%ڬp?D)OՃT=;=!4ep)p8.aQTu>\:2ɚ|<5 YU_.~zZ,d$g/rxb]h /)R4xb>g 'r8\%q@.?^ɺfhPD9owL^~vExC&Wvۖ W[I![ZMB^+N@L\~uH#[&}x#xg!z#Eš=Ș4/?thU$ٱgt"[?m79Ơa~gj*d9CőT=#G/js2lxW ӗG{NZ$;%-i&sǒ~#-G/_&u=, k,.BiLߏilLuiȺN1,7[|mD[ 5:$ڵ$$t+:ya΀`H_9} wK "֎w"9ƵZkUg]PO=ZC_eO,C=?ղٺٮ=9XidшU"zMgxmP -Rq4px͢Pban3+2RixY&2tC7˃!|s&^G^'R#2]NE:24H+2h.GqH#I_=ǩЬ/^nI+&fW1y^ .G2KX&qV%E,FKu8Lϝ^C$!̜G)NgHבãn/|O"מeI$1D'M{ToE&mhŠȄr!u2,GƸE\0i[wd|<[bBV_ '2IR|$VGC֘?# |/pF ~Z%uda$~#,_\dQ˹/Y<<`zdmaLdDգÃjacF"cG"zGDSM"ck!)~dI F;I1׼ۑq7C%n U$B rUM.6!y%pajM g6f-2%5a2^9OY$NϨį"@' Lѭˑ1]fLG2-!&ɜ"8[528Ӑ+| ႌzcȨ| 4~B|w\}N|G_KO@?np}427~j$KEvMds8q²h/,]AYx$;:=cG|JE>4vvEIUOBFlڛVO\ۓ!e$l`N Xl|T;uy啾MDF{9Hۭ2amN=;t nr㖧=:>!GΏD2_ݟfcN"g y5O]z7DP8,pO8_J#<2a dUhdmurdL6x"_z1|n\uK|\bːS=RxرQ*җ>Qk§N%lG{NQEh֜"%+Rj:zan ?,/gK=])ψkfD?F"nxd=2mjdtڋE 3  / TMvܻߺ +dwɦARSrD= ILgDgێ n^hDb V }2Ö!.v=ز%>4"c2+xX/WݡwHl!ȸCoL]$sIo/@OM_}{3IPS8S)" ?TMc ogC%#Û*AFܼi{<*oqdwxpĢ.AQt}12Zb@zlB"mtQjI$[]7<;xqdE/Ks ɂϷZF)o%s[pxFOrN\[I~ _%r-'m=pdTY3X!+'Z볤_mp2a&"XcDscd_!Kqr"Rn?ycmeX"osd򅏹RU`hZPo`CskJ藒X̸@w&kC!O*_t+fHKU*V#U$!%M퀁6F*\Wl#aϕ⌝ 'vgs*3t>ziϞ6xe ^(Dsx$v92zm laL˔矲Sr˒dk M~,VJ!Ss2Y(CϴV{R|QyN>d\GJ4dq8\%*r/Y4a30RX}\Id:pz5jM8Rڒ$wmޙP[=G r.'£A%l0_Lt8*Ko~]@#2ɰG;]Bvdm7Q5dbD!CHa$`OռKM R' wY$^y|a:2$hFf\0@m9&2W5ISNa7yg+k@lVAR]Mru$YcUy:w@@cN^/.9Vt}, zz+.^$A9u~>?ݼ@”jռDNj|ڠ;y~3 8tKc`ruq|d4u䣇onzz SJ]_oYe`B̗9"S}_Y: V]]ǩ琉7N3"A W.&>ZvWWn%R2:50^ښ}c`~QwߙP (|F~Ũѷ7җF Ɉ#5Hfַ5rȫ1Rh  ֌5;:NY@f՞{D?=?_覼D%\,jܣU<.GYvS$&)arY=N`N>JlrCIiƇGxba;[ۄI$)ٻ//JΝ? /5kfǓEg?!f^ o]ksdMއKgC&9eN >SlU% Y8w)MR"ǭE9=8: ~026٬Q$%ku+e$n$9Hvо!"3]ې=l].:gǨ0+}hk~H wZ0ܒȏ@,MNjWrSNשV)NsUNjmxٻ|VGS,Qtnv`2N}О/vwe<&c+2m_O!ENtC9MdbޝfOs]v B88m κ~o5OÊZD& #9MjVtj4a=2(L֣aoA*vO;~XC=?*&ⷀD![)01/76f!gX~M9(d*p\jd װH٣#C3Sr8yÚhvR$>0DqkyC+_(D_tdB&j}{,|ZzX7ų/@Fn9;9tH oCF_Tɬu9\%-*{qKY=[Wa0SaX=S@ꭒF!SΆzGsAj[c'VEFfz9xh_s9D'IEOגۢ3>ݧEH@ƪ>9>r`dd%;moWTK@7?rr42RGswdC\Nd$NtGC$$ڛI> km:ԳIܧc󓨍7ۄhtFF9>oCyT),QF2.Q@F)":i2Zu?2Z@2[gjsy@DU:yI6]HCqw!o@&&>Nd:ÐI)_E,)n$ITupA$ibY#ds6 ]7K$Z%gڐirzqJuL{ G*S* NJFc9f=dxkeAn|-XDG$ǽ6Cؠ?4k 07^'o!fʐQck;̸n3O wV <  $!"E 3 SV&VdȔw-EFT;!LTy*i5cp`6qh♱w~h䦛X5ώ=AF|Y{.ˮK:$6M3 qTQG/5ߕr4 f,ﳽaJہ}J~k'gԮ97rsNWߔwR;nqt>Q e(9e97}l^Hp۫^K޷ʪ嬯YΚMq^}V}>ȜPwM׽ʿȐyjп>Jǿշ>+#F;^;1ea=RϳAFjA}?}[%>|H?yɷ7ѧyb)˙7mEٱ̴qu}ײַr+_&]Sc-L?HtU`|~R?,ysrSfy.Fr/[@f힯N_-gi⮎fszu~rsBȚ+q}^d|Jynnuu9]͗gCם[ي#Rl.u";+ؗ))NZ ݞg{N|4讬>({ɯ$r@e_y%?}۾PƤ}ۧ]_êRMIef)6<3P w2anԦ8ҨY˛G7xV" ?f% /3 cVCMw.%أ<,^z³?Fe3Y4{j񞯭b@Aʧ\tQӃ4>0ӻa/Bj4<9dUyyit(`6,0#4ml^6㻘=4%>U_w*m$͓^dS08j0%aSc;i|׵,옷Z9^-Og.xagi=ߣ]w~<Ҙ(`|i7I{9{Iת7WH/w Mw>݄%!T]q$%rV>gK$->sMi"3dGĿ\ΊzSzosbm ݲxGz)vxGz6k۔Gcө]Hc/ɛ҃,6NoA~;z $,7gPzFo3DTNUvAWGt< uϿP^'VTqOģwGkZ&Jxh|TP4lH!}uЏ7S'3,#2w<[Q(=7!6ëf=j *o[c $G{ψWW[/sAܹw~ky_P~OO:Ot_ڇGLP.nyEx+{Hbj h`7G a0:Ol^y|f- H[jWpGz kO' uT ~5kY-q]%Yr|qӖP=Od p?>D`q1#UwPp? C{c QW%?Q\L7K7= >?&Cw[n{vq =8'Lzu{Nqo~_wIfa\D9P}0; C\Nׁg>Ԏ@ᆭ\L0QQ?9!ރ|氋y2|;j :':'=Q\8 #rRmƿAC=B<8}T_"\ywQ|*%Gi~~CY"?h;{Oz=ȁq?üy7*;3=gX'A<  jxy%5+(7m4i;!u쇒hx^uȑMM"{::R<3W2C8Ƌp^ˉ7tch= n9:Q0{MzӚ=}wHccUhE]!=xU&踙RDNrjA)-.x¨]\g_`#z'YTOZaEbzT.. $$#"!4N[ 9/H?߁_:._Z_Ay%r?Ոz_8(c\Rp?H~ q>]D$zoa@"zMi;{G±+*WXnaB}Zwq5ܙfF>Ch> 0B{.gΡ~E? {v'K^C,IDю#iqy?¤Ny,+L*gop?&C"i~yB|!8>*a7G``4`q %Q:~TrPϣ^i|= 7U`4Ohބ C0j.$'۟NϠ7ky̟;`_=n‘Dz8o`H_{9j!^Bu w. 7;=*·x r@|bA矻>񂾠 ;2S?H}.}?@/^8 y֛h4>`IBi~;< q%?b^# zu& 'S >A>O j __xAߒ0% u6c58 zjhTJ"N^\zEt~ P WHw1#<{͝N)TD罐ƱbW{ )rT5aV8)S_>Q))>кhSDA}uti|=P{A.,4Q?qPfPh#qlx3E d:I1ItF$OC4.$"c;:4$(- f$P9b CIozSdq(s?M]xTLqbZ7}iyɢW4.gR=BE[INMyYʟJF~ c>g%2 _pBZ@̠ jW9=ő<w1.R{AS?)~RH q' ~G!C|Z7| vM`> {̵K"/hPLSPh~$a)f]]0}z6pv )ug >kit K&j?0iT/ߦSi?SJ,Z/Sɠuso!>l"''G ֹP㺈ƥ× !77[+l9l4@y!G EtFL:Y|S_4|oh;?-E\94~ h5P*})-cE$~OC Nz!7sh*h]RDRʨ>hWNU5jZwB\NWAt>z+!-t*g{T#3MP+OXSy^s|CLP:4_i|Rʩ}끰(vSJJjG54oXBD;.rT:_jo:ߕ4Bus#U4oCWKHg9%ď gi;A9]xTC R_N穂m_5o ͻ .0Oiu/SH/Jg謉($ 9#rgVϊr9rjTxal4RF jUT*w9?q/2쟕S8JQAR9]oU;IePk?C^VA8z ԉ4VQ}@8Z#{0_C^;5Tu4!ިG}Z*\_Ov89VK-ޟ_k1+;x|'v5#럝njzg3Ԯ NҼvFN|qQ:uta4z휮/1^R8 c|-~?SJ(A ~R}R2W6"á)^J1z5i*W6 BPC}:'_,q)~/NѾ ^R?OCDc\u>::/멝B\8 e]o@*pCb]~_`gsiO/|N 9)a>ޭƸ #y^jb>}!'WPuI%4Q9a]ݬYW^JN9rU'Hg Iُ>@P/:m'u;/%?onv@5_C=W\Iwzz 7sjPeWgh+$:`=}AzʏvM xwO ~ u Խp?X¾!~BfEɱ ȏBJi=]W\zi]JN;, u%e}a_ 曻mPq]qH+ 8W<}R]&bvO3a%s C0a}mԎa}:y&a\1{Xc= tb݆Bs=Re0{ R7H%{+9ZW:khO=4.;Xq3{?kv߃)?`)p o|Ou1+9@t#_d@\~x>ן:E-POqY&-ov)`'vhax]Q|/+ ɒ+JWyC՟+xn~.4|Σ?<'D;Toh8~'rVƲ]zĭҫg 9k@x2yk` N\=|>H R;}s!_h1'7 |=s[5~xW;W.Gy9=Ia ?Q"o>>H _?}9 #: xy> x.}/}NOr;)ob;C|~.T;~S@opf!?|}9k[z ߇\os8g/S3^A;]]cx? ϳ:ߟ@q);%'|=\Ͻs_ [s5+ g {9R^P)nz"S > n σq)S|G׎ ȍIO<=0[|@m?!ޣy}sPP;u1üs_ߧ@=.3D:x ωsU4Gx? C=|{pHT;HyKQ7S >?o/q$-/) ~q(8e_P_A.}?R8 x; sPc\U@&8 <qC3ߨB ~qwBs@nVq/}`}O;osu3+Sp?|`yY1r' x;w^}cT4@ 'C;v(#K|ޛ=9킿~=$T?mkx=oO| OȐwAPsכ1'q (·}3w|(8J$ϋ08q?s>:|G!/~輁Gi\ O aúQ\'{53^(8 8UBy|X|ɢ/ys4/q]O/S,w vQ*?֡ǰõqM({n%s|~(+QQ\̝[R"g:-rfK@OuO=ʵ~w ORqQoTGpG!o~xޛH 3|okߑ=Z q\8OӨ<=mG}yT>A]U7{&S|z:M[/)|!E{M`^!.{[7AxkC=𽎔'_v ~?.ϕ~9}kAG='?C8 A|y~qʍyԟ` ~#]OA=q f\i?`0KtmuQ\ ASQ?A|G:֝Wb>a?\x ra RA;سz$rGqL}rg^G>%7~s({Y@3W^sqg=J|oQsgqIzz;gΣ;@ ~Aߏ{`ŕgxG?3mT;>)q>0=fa߁;{/E/3`^a>O'B=rB1'&_ܟu W>~/p=^%O9q;rջ ;Sr8exO*=Q i?OwyqfX/s ພkݎ+`? uGz_ s_ X`}gG83 ?a?R?Hc{|14>0?g9ϙs3?g9ϙߜG}^!PT!=ܨcz6m333ԥG+F5=JY3F:HH1^{_eli>:4ueʁ˱Pϑ)D_EL>0+m=]M?d>,LThCsꐞ 3Mc\m?.$sxDH>$͙%s@VQ}YNvr?+z O_M?]oZ_ 6abMS "dy/7a>? y =mU"zل" (npz(}8oo$oo?}qѰJ1154QA89ӻ1Bƍlk8ˎ:=O|;as_;s' s_;s':N8cx1={g?ǽ;aO / / %lSKclbxܘ4{~Մ zܬ}XE}~${kSZ@W@S~-+{¿pe=Àځ]&gCVOӸTeWRRT'P~ޫWZ_(  fΛ#7 S<58ggx|S {]lfS͛0Ӱ]zcQ SE)F)N9r*tʙ)PRΣO)OR#?r#?rMN>G#מCuNXZ6ԕV OtG DWOx_6iXDgkd}\nH}ReAL"<&Y!ެ~% %R/)s[sN^(-00# -Ovɡ7 d; .'-sFuM=\dm%FZ~%863b?vkyy.PfqY&6pD6献dwk@! 6arS9ŭej`EfӬ\/ERiAҼodq6'HK+Ȇ&l}4o,- ٠\:4}g:ºs ?i>6oi}.m {Eڤ/u UApء`ψ!c*1E(Q)5①!4k!*'I~Amֹn)nO䛑-;5(8MZO|9Zґ$Prt/A}~c3Vt:ϱϰ~&K#/m ;2юZHc[HցA~ aW߿"*Gr$]9Y^G3Džʈn' g9y[i|$?p_spᧂ}-!ɼ%>L;$ i;O:j̇d,8ܽ񒰶oϼ¡Ye!".dAKu/[{=}w4VMq';Msnz ɪҐnЛGXϛ.&w~MJl6n'%"s-lS4NR<aY)l7`#iMxvzi4;ՀIulƒaڿQV9Ñ-*Oe.n{ H@`e>_zKɢ!IȆqo$ ?ob:Ok#fU4Rw_Pš*i33$neW1oV>GO;!ꂬ0}RkM#ix?5n5;}5MU, <Ƕ7̂gI)'bO#Ր37Ic]UߺǶ :@e3ZFB6Nua>@$Oޓ/@6:Ń4 6=e9N4sY3kvQx{YݭhDɾWy͝.! ".LXN]M} 8M.VTC9x-Vl=o+v\ i{"47:ObD,Ώgi癯LIQΎҤqmIsNE~ȆА~x:V4 9xۤeV~'f R>/z/8*U:irУ-eq6& ُ_sl~S̈́tF_iٖ:RƯV/ ӾOAy!vlYq jF7JuIҩUnQ[+g}vs:HcMө8q."RϘCt uۭ9ST񦉥 kX;nZUViEM_KF>EX. j Fs*mQ1>8-w3m)w/) h78yEX>Fi!׌ 09Ѹa.(tg&i<- %!0#vAo8v|R,>O^m%,f;1W[N|vqYd놝ѧHpcoqiŊUMSkOOܟ͢mgVs" bJ$e. M7Ll8F9!iuM{"x[ۦ'Ikvˎ4;GYA9Puy@W)C:sq:iKxd%}ɴ|,`vHMac6VҪ֒ [T_ږ_':+\䎣$ G*eǽV#GS7U2L:t03O];"u[R%GaI zq.>mqC"I}Ţ%6i&Ol9e06γ<4.",G+yYl{ryXQaB iA3 A奻 mg|s.g#n{+]uyjFʦQm®Wgh;۶oB6Yi'7*4W02gobhB6aܮ7B]a=\k-g+I%UŢiT`s]Xm^~}vgH}B6~Z] j/h.rL~ [1K"]uG&4o0ͪa|3Ҷ)rui{뚖ezaHa'uZϲZ<|4^='ŖXȯ>lL˟I%u>xDZ%~{e@m Z2^tQfBɛ%'6폜N g:Cz4X|ێx,q1 1!"('`/|ٿH$.|7ʙʷS +C,#!'G.7~N^x6LB٭Z|J")$yk9mmg:e^-3)V$ >$D&Ƴk? 'vIyn,, 652u|s{ԥ HʯV :dx@zA+M_$vE:}1jpǚ$ǟ|H9n}9oXv}8g!$i߼UlHҒɫ .,raPg 6:ǎ&]Ƥgʻo⁦׻8Cȼ^ !ye +A[cJ'MV 9IύF7ߒ&azY.d')a[ $< k2JJݍWHpBqp^JdԸX$j5-V$q9ن_$:J.$~&˧&zzA%97ǣMH% ,%I)U!qE./e]Hq%cHxOc3*ȤUJV Y+(Hξyxt02Vqگ$əIb_t9O;Ϟdž_I֥[&wF9#C\yx>=IRZ~bЯȈ ?}uN&CO^IH_cuU"sGl|OKxG[?r0'+T D#S?8ُN "[$+qPΌs˜)7;=")Ku.yLxCFW\DQs8dŋ? ʬb6nYE&Ip?MjU~$!Ёo`VcK_ϸ6T ^\۴y,W$rXٚ#g^XJ#a7SrFz69\xx z|e4.oLB~e6w4G!S_D{ץ? >jFG>qw8%҅զw>]/W'& [q6v<-r:2Ӎ]_ސPSw  iJ#F呌omT !sMtpM1$&鷚y3;x؄p蓆ϑȰ㋧>Dd YHʀ3R)(HZ[IAE<@U9\8)% dH~'9`xW|7o"- s#kv.=uSmRH[fY;Kn)7c񰘄,3u )1 o0x%-3GH?+mH~7$>3>ߔn鏌|bF$Z&JxRo1衱W>HN&Q>+eƑDc5O>VEj#VNDKk;W;"bk$^QSI|/NVXa$I9>ڧ[\F_{zuI8Ȩik7RmHEkcOժ'ç9t;'4!}QC/]l0B>o6)$~7fP7`IrfbmUdR̽3HGY={B~L2ru$rqGe۞Y[ [%XM/ϩ !WIY1{ q2!cX٠:L}'I6hGV먏=A)C ۡ,ۭC$bfN?/HM$ G:rC޻xomt=-ì俏XE-ELO{kHNi/ؗWR$ޛ3cޭpJ`Qw^?9*Ry<5{_HYei NE+w[]'sy_}X2ޱ/ȆN !_6l5zZqD>9r|d3J59|?=aU+'fH}~cH|DjovP݉/P~Hț"jI6d6-E{!9h$i*it}6" 5*H[U&_32^Ows'7MN7^CN߀l8KɺrlW>dWoA6[bӼ62̊}7?~j"ǐ,Qvkp..ٶ)" ,ڀSTi 7Cv}]y>qFOi sr n1~^|'O=;}ӼIN!- |A6(Ób\C a9c$V5Ҿ9&c@Fٚ77_qraJW?~U~ FF:<<V!N9"Bο^gWG9^t@0~پ&ӥn:"fB9}x1S9t5E^+5XR9i_X^ibn{# _laX/l\vOU<6Yz54>!Y+|l4O2A|}ȡϳfl[6:4i&% .7"6D NZ.G {HͅտN#KMd=vkbٺֲ@)dӆqBoUT0;^4{.aNٽh_Q٧.v$&EZ~#%l ;}5?v CȠ;#K/H")Mi?c ٥?wmÁ3IP*HPI#ߐ`FP$Rau`w3 -6n|o 3 }^U2yd(ǐp!dXeDfdJo#/=H:Gn C>ϸ$B X.jԉa[%IW#c 򺈌*Q) [92ʎI3## F9䁌*r/ڑC}fHԘp{$jDy__r\LHwY#rlTői?!s Bzj2of fȴtl%Bz!I:K$m $mϓeH3]C"̉K/}DKY }Gx<4r薝Yp%{đAk\>A?!2ȪŇYK-/*MI;S#ᇓ_T ڝO#QdtI;R nuJ $ScdЈ;W E{_:4+C+qw/If+5~Dm9[]GyJI[A$$O_nol> Yg3m3ٷC'񉴴C\0Ǒ(}d~I#II[6yvSHHǭH/Iؘųv 6[K< -ՀUS8Vr.dZRdL]B]%g.Wj y-Ў ye,fv';{=9A߷D=^ĩGڍ>-'[0姭GX/dÈk#7"cZG6R~V8 Ywd$cusi0i"MtV\vm S݅H̥nC_!wilzv(uص= xPb|\$+09ad1>6cIٲ/izckkzBxfީ8ܛ$Oq9c#Yvk%N!ٴ2AEڤ-BP'ф%v󈁣8C wn,.LEEl<[Zo]_ˤmpz3|y +N 12:UDěZH{GKԌ$^*Ɔx{Rlor^-i1k}iO~ pfX9q(c^᫭+lpcLЪZdkzs{]rQtHCWcM4>=Qޖt=C2i0m ##]!۷[|cal_?n-nơc8*DPvM'ˬ1g'vw.zc*iyn"ʶ߬+W rª*U9|1ep;sUm\ր.HVtܸ+ 7*^-{J45:?Cx]f&mjNXG>Ȇ s&!Y_զ!,ξzTɲ||jK9cֈH֣W8^lbia^|+LXZogք_"fb{|Yf/ u ]GX"6sOيo&A\ltcn<DmP,vT*2Dg6M$oA HPiGWO<[d2dƮܻlZ͡]Cԃ9wi߇ H|pmul&*xs(8~b> (vP4٫HՈ/7"ლ,D >S؟7Y6 IQcס6"kϭ 17Tg4ne-!1;p0a]JT5AF;}nw^!9jI$Խ+;8[!W4rY]\iaC / s}Yv.kwJ*g|uM{,SANdXqqmPU͟OHڜDzd]#2ƹm ӑ#Պ3g_ )JGK+Z ?W,m5dCFw9 {h* ]5eZ8d湑y w rL4䀣; ҟDO3+߭noϮNIe$oГ"d,_`E;2M1zdpaԹ:dF-iCn: [|-OLzRT8 HBrd] #e*_`׻-m!#:0t2v6t#R߽Cv4i OmA7#/KB?czAk v٨>K: ub灶%6lތh.T0s OmOܼ_͔B>=7XJ&!3f ![}yMDx@gМ q7Oyuj>:?md?8`ILgMtL[d?4+N" ۾mEX9o;g@r<-$ɫ,E5wLx8=kKdaC"qC&[LŐdrGp{] w!ov76G#"KI蹗{瑜C=\[2wdvѯΟG/ s| 8!Q 2we-) <]aK^vP#9q7l?DrlL9艞u{:IAdq3>\bG!=* u"?O\K/a}K9JjHymoI,EӐ<wxLzԘ;x:4)-$n\gM"$O{W]D~8s_dPջgGBF߹C|!kLܰڨkȴAFLhmI  a@k`%I#}L?فj/o}S:Ts[x5XxSesf"k/h; \Sݒ=2`eE ;Mg'"CcZoC<&cqc ɎOe?%H91V$ے  C-TC8m9z5yodpsϣ|qf!Cv+~4F&ort!p:$jDTXYx1ǺÐy&f#?\yCŹ|FÐB/A濈f.,1)dZ=wDX;ngSde"q{REi:G i)D=ߎC&Gx]kE&\id12k@,d[͚:~dς .{~'#gH|7I~7sci)c`2sɼᛓrڻBfg/r2;z9널96s?ک sOUD{ci,>w{Y{-aɝ"9&^Fv/FjZI܏d￈tv5!%~Z1$"f"^Hp-)_Ҩr f} z+1e4Tg!A Q/Ie5HK >ft7&2FyvL5Vq%g=Lf6Ԣd󕪧E`A%H0:k[. aW}g]o}弐. 3dqaTtM/\xsw 6`m%)TcE'2xɮ_L?Y<}}dȚCj}4I! <ВKI[aZcV_y&OX/\sc|vu%d蚁*z`~}#CvK{6T>pdն+IX}'A'H[хH9}A2+Ɔuc"lW-ުdI1~ ؙ ]ײcL92L[];Psū 忑S*0S}]hC箽zZF/$*Ob,݆L*T90h<2Iʉ#*Df˰]1K|G& +2P|ӿH6FrKI3۷Gfo'.$I&/ߎ-gcd`@bBHWӲB>?h;^ H_KuN@}|gA$p"U$hM#HD֯ZMKH3{u0b9l/c]FL:lG̤q@ar~~akdg~ϳh}6a]ZJ'BF(Į:^4G. CG6aPԹ?8 Q|>֡V ~2ץ@[!7uc?\6oc" 8)Ks8|pc*u[a$s{td.uɌ%y3gY~Feh*"'e==`OZScA$DDc2 1Ju‰NKHN#Yߐ/k3KGŦ&mD7'ҟ nY2?fA]F}?!2G2Єyߎа|I# mI U.ε}ЦWBSЭw{f*DnP}(ZK5V̌tavEkٴ>;4M-OȈ'$ViJ- ׯ8zZ2ի岚AIU d/氾amPsKYa__` 2Ep%BK?}3˝UWɍߌ!UGD {r#Iy!cg60ciod[n ȇlZvPw 2\_2:$(߮v8t|$7Fy^5H{"'m\f 35oyV|D{\:h mFxQ+& ]o|O->xl l*yE$h]JxȀȠFt@~} Z'}RCƭ϶F0槩 |15p<2Z߶ݿO{?Px`vcF#̃UR6m!Ey=͡wnȐ^S z79dpS!QA"1[AkK~>Ei_3FM=xyx/De1v]54.2d$̒Y ^l8qȐm6CȠ;'/G~ j'j +V9t!;UODB,/8qR])Iv:n /T=2Y,)sao?_/:wS2u}qsx)z.t(8G,~mKnj5aQ:]n_v$#n_Sk/.r>PWU?R7/?Ob~u,p%?~my4ȴN{?wI߄%pfc/F.xwdzUn)_~_ޣ&Cn׾}A~=M&jw wq%l8bXl;=Tޞ9~YD8=՜kY}<4}OH]yKD/n&l8ڀcy;gq`^9ܥ|ex~lY(혵;|TqLSpOnLμgK>K/|F(:=G~m UvwN7~IF+esS:jDž.^ x2]/l1}cW7}[|R;>=ՖZ[|5/wt} {>wd}=y D9kZEeqhlJ?igdKOK5&0J4̌UadIgա[^`iꈥ쯴uot) 'cM痛׌y6kIzh5uLFYX2 %lx[sd dW6=$`_<albxƶn>ZZ ue5>2yskʅ={?yXķrQ RkWwxy8?)p^&yHEIaιZƤNetvRn}('k}uor:G7}>>53:Geӆ4=N* y3yr6հ?a-tw8aY/D9[67ߙsN ڏY{?7sCjO*owoV0/d&i2{VVRҲ#yh *p@^ ?2Myq_tv\6 *N1@D<ek_`he6Փ/ÚX_C=5>c791IXnz$b6C|9(@\ž{{$ʋQ~ la*cГ]O[jp|}J.PڀrG10NQ@_wch;OM쑀msSq~W} ?5U B;c+#Ky z.sg U]Gy0P}@ߞxWtɶ蠫q>SD9}@N vS=(/?ErzǠo c ycn=|r_R=y'wj/@c}|¶Yxsʇq>ǖ??z~Qw}/}\>\x򁝡?a8?\mߠ' ׁ~Kcsv\籎u:pÏUAO Syy7 W}q4w/Ğ/Ir$ʑA{HKH4Bi=ө$g{RIwHROs+<Oϳgl1dZ!{APDnuXB"j )tywH OIjϧ&!|Jɑ{Ӝ U0FqD0T]ID(?V'X3"{3Pyb=X[0d='^sQG{ AԞh+ Ls֟^Qoc$M(o r; cII"g7Dz=uJ7Pjyz0j' ^%.1$l)ID{'(D"t^èyd\N|NAjWpޣCt~=q7 s=Nh܊5Qh/tATL$$Ѹ7$At߃iIvK{cL-;9O .Ѹ{]`z q0'7M1qT 4O@&=z HB~yK4"Lq>vKād:=Oj=Qt'~ҸB'Om $T:4/p C!@QN?~s*{c"|꩒JpJJJ:/4n`4xz_H[N~kྠ2Z'i<ҼYLb_1C4˨:94:KhˤE5C`;~>;t dE*)Y9͋ԯi>(L:Oy4~G3ֽy4o!iϦẓ=!迌']HS"~,/YDXL1XGdA8.UDU`Ŵ)yƻ*gUtA02`)+ʨRj`e4oQBTʞ0L2UL/WA=4A\xI(%IC\_I室JW0Rt94_zW5rjp\F8M/uCo1;./ɴ}o2hDR}[gP`@SD+TBP{.q9ĭgTHgTh7=E4O:1gߨ^boSzh=[O uW=z:_4|@hy s]:j4OxWG uu4Q!'S;CvXO]O3u4UC~uCK5 >kxӵ4*fM:~yc X~U?"O Lop\Q"ϡl~)v+D柰[|WOCO0?[t=_?|4V/C~`;!ID`y?dc #;|f̎1gz|y2S>7+0}g!s E,dBʣc~H?q^_DVo(@GE¸z] <|Tm!O<^c<#'Џ!"!ݱ DwEL߉qyTFeb;#蘏}/ljP>ﲸCH~gy!8>P>c d_|=b-tG;b~&;ʍg4.h'xy|Gy?cSǏ/ymgiGdw<%mj<>ÎwN1>oj^3:3>c7=c[>|.QȮ8&gsL~\˞0(`|y~6'o_13gXb#O?d|Nօxu1X|CBV?,bAqY;=<=!3 ;(''X}x|M:CօM-DcM^OFY;??߃zn{NUꟅ'iOc^f[ ՏS]g'YIu/cǃ│"v> @&f&qz^?5w4ag6?&^_!Bq3~a̓1׋Y=[X'P𾚺?#|ˏo4e œ|OxA~[];7IhQxM=>:/\_g~uyMub^'~qpLh=˓q=;\7?cMywq]+7q%3j.bli=><оxqH>OY~__zY<7b1_lEi"{Nxcz{Lp}+mj]:QcGh](}(돿c篓'F`QPNW4|l^~u}Oxl ߧǝ7xz?7&7 >&S`<#oQ&?x ̞<#>^=='w_8?si>"|}>썿O{d<}#C}6Ͼgxx#W i ~hO}Oߏϻ G1?yO<ϏC|2ȿ~@x8lQ) xL@9/bL.OU8|~>P/~쒗_1^$e]! '/"/?< yܯHSIq/>]W^?NChBƋ5a~ɋkg~'ģ8nؼ~9}$b;xGq$S!|y}rA;&I)k 턿oO)aI_Q 3&7ߎqQ|wh*Vڧ-O]Ӿi'~ ƣǏGQNQ\ūW>MO7yG}FvujB!E>Fy 0_q<"?6{ zJ|1Jϙ2$D7O{DX&8Y섷o(?᩾<ʫG&l9SS#o*~8GYꌼ}d<ʋp^Q~]ϓ('sF^zp@zŋϙG(yy/N|AiG{ȣM_7Gy((kB|hy6.:2OSyQsx?oB<{GQv.ģ8''uΟ/<Q^݉8~$7g!ж<*(:o 8TGEH PTXWSINS'~}'~}'~Pi&&ԉ7+ ZNjcnhb̎%4כƆH|O_ⷦʚo߬ǎZ75137ԩi G&xlv ;YAVq#f3_[ ?}HOH~9nmep>~=ѦM&?~w]3tC713(Mzc֛M,L55dyO_(|UW) _5N*<%WzBN4lc~>P@∢nfcͺdSSa47>-:[GImP/qT/Q]-s-Y}S-#=8\Dyӌ}~zCnƴZl&#Sm٭hji6jfބHk ?_ 7}~_9dMfW4б/Ҙl&O-7T> 42uO}{h4YB43CUFZ0ܬ?mhe)e6mAjU^h~_WGXQVYwwY^VNoσG>LںMBBVB˔?NY?@>}}N{ AӖԈ:#7FZ[U6 6mk!>/Sj1~ Z5@Mi^o=J>mHmPKtؖ44So,P}RiȊւO?nԿܞ.-t|: >2O}Y*E㙆ȾZf gtU؁6YCsU%9<0jxuw-Cs-SsMX>dԿ,ʡ(}X,ӏbVS?7fѐyY6- OjǞ_nwtbT>[?Q/? ȿ3A1U#-8U~ Ɵfeɪ(zBqZTKGOuڜEsz~,~=w2Q.?G?G?/P#NY Y=` R?;4Ƃ/4?w"pӿvx-O%'Gߥï~eć_a'>O?6Z2Mgjjlb4hLWCU+7*5q~L籟u6Ԥn Tm~—* 6\X;¤M&t~/1 c*++*վt(~OD!1wHam+_cZo ɆSJ'LlhQɍ7ElhؠX~r" [2b(ð+Þ 2p 2p$CYr *0P[rakkN<#!{e^[ q*T܅wJ ˢ]7&>Jգ R%s^w{~4.0c`k`H'pKF Ѳ7O#(Oz\sN>l;=?0яPJXu ;]VsI.SfiʍrsOP{ʍO9/qeS-v';õUmN;3h)q2SAʾCĞW-_O) =}|*e\a78հY%E”cOt׀"ju9+'4~ݶϝ0}vO|ӋɺX+F^~lWsv[6mHhVWrh8gܺKă֍#v~{Jm":W1SdÃՖ Q9'PmWjRQմZEH7B5zgզhn\#myo{.R*r84%g5n#,Wx@l]7+j"[ٜ;_xޢ Q9lMȹpbÍKgRG.ZM[u:;Yn^┯ͲP0*yVEm\W&ӬzNk?L쩱ڴno.K?PcIѧň\Z7%%ةzZr>qG hl/~r2Xiå/GvW/㑰;|UTL=~֋0}q\M.Z"–ݍ"ǂi\gr[ D \׫ǫo f[t:amz]5\!͙89z'7sC, U^4+FY<[Ap3Ԛ/|Ggg{ꠒjb7 >A䲻 h^ޗ4?x 9~s!pk\d}!Vgr:tuS.Vj/ܲ(m=w3VfiAܜQrVWJg8QX5ddrCAA_" afwbۈ[q/҅5 U][pJܻ +-n$6'WDŽ~;ōn7ȩߖSk#3I#mNEB*~aj {[;{[wQBg{^%ʝ2uXߖsqBUއܒ5,j_*-\%([tQ7B3赻G^/jZyr\oyGn}>7 o61Ef|v4I9Mm-j]^C]' ]34gJsdr7_d@EIȠ5Br6zܨCSj,d9nLU+1rj?y,Z%Yt↬ Qfc, v~ƆXn 7}H[@u6<"G6u3GYܮ| 89$U5DS`^2}O1'fUׯ\7\;c<%P~Od| ǖmǧNUmXx/*.pw}gl9+b͌n~];ޫB'«J40G*z>e|npwlI]pP*| +}$\9ק}mcqc:hC8߆=,vs#8r`6Y,aowI[Jؾhp>)*?^*R(sOƳc;npKrN16~g -| ܥP9iv8\Շ}^{闆qz:js[OǏ^%+)!ܫ@nDvX.PhmK3Mop:v"9Ui}pr4Ez8rX1ȯ)E\6zP&fNzЏqRo=_^xjUI=+,ҙ`ōnuH|nBov$s>{Hcu޻Gq:K?ғpXgFnff%f DS܀YO*>#m ƜZ_ aDʊbqڵlVE4T;wyPcزsqt,og^s0XcE\:ON|mZpbCs}GG$l~1\W4$¸{w<u;\nNu3Hť utsi\\n.FnpC׈[MnJ_Н\Ocov$}"N$:ܤzgp J7൮ȇ*3!KvF7U=puc{9E7uwf]W >&7\ri%1OͽW51-ošԝWqcQԉwka62$&43»O/G8^sn^6,Z{ƣ x|)"1 l]J{wŖOY0r!g[e-; LXn+[=?ڃÙ PNN+'\V)^qm܇IWޘ9Sucw'5>_'$qcBNqQ]͜XOgn"mgPrnB赎 "QQ?G'tp{7&r(t3jj3Y9W]Nݕh]#<&[u6|8!cܩp{hϖ{ iE}9 -d ޤۜF J[8k276O.pĩ[vշ-D3gpێMb坹EΗZ)&W;zSËOzKߖspȴS ?Wy\+Ծ٣TsfhK}އ% rj~e6'Rbߖs_޹ÿ:QxxPn|}iH=ڷt3ݲbG1T )ya:[Nu6gN ןO8JX?یٹtu/NoN5]G/U/!>jQ0Vn _U>v;pߟ0PaDrU,e|j׸oe8|gpLmg{O%q uF@`θa7$ys̯K>a!k!W{nq߾t"a#v%,і23M> պw^ %^ o<>J<ߪ} a G KvsQ0obHf3DWM%,2y[WkĝO:y ݝ{ՎVO@eϒK# ͯ]ϋzPBXbWy4庋_$,u?+#J *fշi ߜ/Iw'2Jh\2'm1ƤCiU.7ATǚ̓+V,xѓPY=1+Ix{V >8ѥVt3'GPy{ mk'[C(1l\9Y8ѽf?8V+$@մᚗ%CŨ =w","t$a:3;'=Pc3 ʓAϒYP:,ao'M[JŶPH' UCPzȤUThL|waɏUȩ'Xu`ձL1y-K@eWgXֶTtTG6J׫F|շfO9<۟閷 *{ԎOA~QPSUQd]c7Fh*QKW<0Ho#g?_mW7_=is$!#L0\V' ~s$LZ~ aU2Wz+춼% Xiכ0Ϫɢ_4WfBN ggf@p;q;D'O,{^Fy|W}OwDuj.aw?(s7 0[wѡ5o 里9BIJAr Wx^{A̡dJl0gy,n W_} kT<9ݴu28Bg2Mdc쳕6W4 k;zlDXްFlW0Ϋ:}k.ѹd> ~w _Fxmu0j~ k~85a {Gޢ}ߺCi#j2E[j]1f0jyO#gG@ͱpG'!L~Zu[[t+1 -֛ ?W90vˆ{>yqm.tq"l, jUK:F~$hU""?uS҈0dF ]q= o  Gt<ީBL4k3.Yvľ#X^r0J&`soؒ00av~AWFd\xO0fmZ ~CP%!n&{ C:UAx{W" q6omC Ik@:EV/Ok0Maa#J;*hE^>x8lUu}x@~)^?02|bS{:m!{ 9YcNxmqIנ_oS$7  z yӵAXkAfr}tN%,Y7 Qɷ[sxYRB[DG<*",R[~GZmt#vvɑ?KX4Z°Z;FLXtݧS\rZ7[ވ} ʧ,[3<;mJ/l0j93T|ݵ(aY^R)ң]UudOX*69ae>PYm٬G-uJINXuiEP?e耣P&󭡼ԃ܈I"ոv'A(rX:Uvs{YON(q '|JBqi}+ K_ aq_M"rZ,'Jy^NBKɜx+{Uڏ\[ ŝ[1#,{S6vC~NE^6>NNmW\(#}TI.?F^s+ 6'&6|>b(Ϯng蝅9*ӲkU3,oLXqPsY%: WT.JVNX+/9{P0Qw@HElk*/j {5TN}&TRÏ$8J ",Yt^¢+Q#K?toJX1m9a=\ e_%:Ve;׌9(?k g#J{u3jWޛZ _t',?%e/{DXqDo67iC7NʽUAy-"ѕ(KXaKTˮnݠsڈ"P1$q݉G=D=)r_Vۆ[{4w2av_vB˴G<+k!ע7Bq }/)Ǐ;j}g vуo D_.. iƧ;d^?a2RV #(9.rIX=9;kn0N/j+;m0'W, t՛C!_@T&JFWT'|:a* >*Fzˋ #DN~Q\U|ނ<ˆEγZFGxZTC0oEAIwJw-gP6&a8\ [̟ Qo"?ކ6_SZ G $,Pozf:a`uaK|ZhU=2UhEGv;r^p78 &na~a[NP402D5Ͽ"2%:p!x#7)&?i@&rGK?8y0C!"N|p[';t\m΃[mkѶٿe6L%fW/hMf YY¸*+{mK=P/nĖĝE\>a'GjDφ!۷3ӒY7|۱CzB㧫kC^m~3;ŭ>Vx4HPU n{ & 77>i)FԮYB^w9axMq̽>6N?~;X+A֤Q er<>aDnPa~} C>Z0x2A3G(x qB\tF<;fv' (y:^0ҩxGI>׎S!df{!EK yռ(ΗcEF% \УلQWhkK\xwkD[ $|DŽᓕ˟$N*a #!o+^ݔ/rG^U-WEf$tl|&-6zd:Vs< \={qD}ZDO+Ynw6烳f29)W#7]~F6$: x=Lp8']pKoӯ#t1IMsk̓u\zI_z7"QpfD݀"p$ 7 ' Ѯﻛ pZ^ݏU}H?p({#>,؟o@qо#ց$!-0f77|m.e O.]D](~KZ+NOX2bR8H,v>;ua3;K&xm$J)f 󪺭>> 5_GϜ/Dt}d0Bg> \ۄb6 r9L=]ms<ޕs/>PΫTmF jǂ;Cwu:ɭ2k.pWL\X/AufwU?\lsMf atlv'YV|8ljAAF]f~:t%8j}x?,b^0~l~n13Vq\нk Ǭޝ/m9Μ0,! Vhm-/*]7z쏩`2 Vok]_45qtǵSFtp .1{}]ʞ_CO6'to=s唲eQW_y WɃ^/[ 52 ʎg>]Bx>27}cVUZ'$ {an:=Ѻh<6H~I1҈-g?|"u,n o*mഅ ߲ 4[;a}LK!|%kN* cd--ja~O {¼{  ;/cIXV(r <5d#M 6e6gӒ|ߘniAh?dRǞjbչsV'`^\x:Bo>x=xPpB@Qv֟:~nDҚmZA;Y)GI•uFG/WAkAs=;lk(ӱiQ?&^z*i&. L3w״͐t`6BHf Sn/h&a2RH)jSb'c>ϕ<4bKV>'y:Enp1!B殸]|_%tg3ǍoYf"a#?sanR0t( 8#j~tg #jm8eJu|;Gƍmo,naڜ TO) bP!K}K!8ka'5bJ䷍ r0!1lCv 9)!]!c({AUڡ! @ #TWc=@=% ħGxBӄCt8"L0r!r5nV-VUj?(o>RQE!dn !oƙCNH^O*!*Bȗ7+!ls-xyGX;FLV% ~u,ˆw7LR^IN" ~&qɇY~kǞVUP5!q{v{!x GbX6p^ja5p'ݦ!Oa~iS5_l'cu€TS!%ل~ '~Rzz]Lp՗Ы`]GڹɋnT}!2=ytmCfE7 õ{=vA/v-a.:!tۃ!D~^SJm$ PrC2r9Bfԍ ӹS^B_9[m-LT'}~BHm'ߤ?/s:Ck3#’ =ޝ05q ;rAv,o-pJ&Gz);U-a!ҙ!h59'!pQO^yto){P^SXWݜ\l/Ƥro nʉ}+nQ{ݴC-?|d{Iյ |n}̤eJ_t}il?NN&ۋWxg]tۼcħ G_3]׶6ٞ?:`&vkt{-jG yThJ˿>K ܧbΞkM*ˣn!m"˪O_Ɲڡ.S: Mۄ\̰ sa[=u9{;xqb|sSr Kϛ_0ޅYVYUxx@&ۋv?ҹe/w|?/g<4?sE9acaj*M;oWXNefII~>5riMi_1#F -o9/1j'M{lMy[4 ީ lZoOj/y\97ۖsIdkj+t^^ap"i~HH̛4,xZz@yS7ijǻ˸u=˚s ;?sxUX.wJS< ""y$qdu,~Fz"'>zWk a˔ӡHBXzg(|s5¡9=m%]̚6]Ilk_v.тIon(^(sNBX:|[P*TG\⺮\|_$z= {$]\;GrM20>P$2 j\'2+aZ!r}{K!3@| |KܔqS'-LjҎMu4O~S;CUY;ZL+Dsxu]g!̘RjP4}QLP>³  ƙ[.Ґ5ew ]3w[n Y;GB~dqӔo1߳񱮯2QZ_r1¯$f=J*^-=7-qqu YAҾ}Xka2ldq a15BQaBm-Ap]+ k[˷\2%>c[(|'޷xLw#CWgBc tÐUv g×qbZ =qfNP2‷5(oi% lt#oaOm"n:9~ !FZiJlYnśs窪xM:eJ.`l}z>uf<.i6tzh)?8\*M6=ۂý/(G:'Umen=7¶:oo@z@T!@ |,HJfGj_b`sH|#rpninv7}'AԎ&Z)mHpZqv0A2z؃key1ӫAL(jG|z(g=OC}NޏK Nvlʞ~t;ޱP7}?XL4A\ry^;xjOPQoNSݭT&|̓AsGdM`\-Τ 8/l|}d` Iw#W"Z,G g>f)4 7JQ7 e4;DZqqcE-فOɲUǫA0Af4̾p_5\ޭ>j_A§Ep<<<Q΄xԓнY~g(;|f'|kԎro`bۇ Hvv nsLn3T/нF!A/,Za>b݋#q:#?[ &؃sz[ʋ%g?%p#Gf@[ z7}!ݿðO'^gͅOVJ!м3CegӀT} 8+;G_88t^^u_ߟͻx%G|}|7{iJ=|VALnv >Cg`Cɭ "Mr>=Bpk֖=ݢ?F{(Y}+ՠCHߐ]:8޶#8?Sxpa\~QП}`]G?:miއh^O/^7h<߭{Wt4<]҂N1GIwL7J=ma?o}Q"}-׮ a!`'`cp0A$av 501p}2zhgO,plx)xMbW&Q4 1zsᕛt0f68>īG_I~w; y󊏌_QOq0n@ɮ>U[nH<6;Ⰻ LQ_Q1/B6]S pѾ0~1{$\M= x6~0w'CÀ5͵)>b纫ayhO;'jr {:Tr;794K6^ȃo~0GS+[ ՛鉋v'̨^'SʛMq;ާނ<S.G w'1OC~${evO7wu7]&8nxP|LgˏLOܦsl CtYE^I=:N)NY9hO4ެ? 1 '?g<#6v= 7/K<0Ls!qp;, [] gG1~g~9џF8i R'z̗_aE-L]҅dwc{$X|7 ?q$3^BIfB o&1~L?>| D W᪟:&Mj3&iA˧Ø1`[!ċ۹n6SR>9~L߱?EkX)Gn,Ÿ _i'00%̟1fXz롺 5޻~N@V>gΐ b8Tg(ICXirMܨA;`|' ֵYVgb!GW>&6\N|YnT4O0?Bѳ~΂wz>>Ï'?~Ju?g'iP)Wp ֺэd/ ԫ6QD0?Kd5TWA%z wϓMr&6gt*Yuv"!diR.q4c>,\8 ꫥ!?b^b?58N&2hz7Ͷ`3#{Vz2x=~81LO'#Y>IԃhƇģ}@xT7Je2?|oG(uxQIQIhR}ɁGP{q̲Wu9qLX0D;î6a QlI~k-Hy91c ka!6Tf&#X'>euj>j[| ?{n|Gb=y$v[{ g o1~b~2#M}uD%Qtb8vp^1j\Uuxũ~WݪE߀``;9E糑,/AA{b|?@lޅ3cX>K:\)_s% 歟U}D21wOțX|Y+Sl#4<셯NSЏ _2>@{ź=-m~kKwG=%1~L8=Me%Uux2}Ig]VOc;oy̯Ĥ͞ rXk_!l64;l*Sl3gq`!=G9s~eex?Ź9O,BNĤͺ&x8}3SY~'-)VQGS!Jd1c}b~4i糘e~ +~9P??Ubyks?3#sh~NV?}:K:'Qo1â=y![<qLx~bl _wnvDzN14KaRZovۢ!GY永$aZAOwqSOCDI2WM5ʓ?y¸8= <]VY<Þ3{Ib k_cX<x4@e}* L&G)+UË ?UO. \{'?Hc '݊7geC.ϒƒLO‘gY^{/"^HטG2?!Q0i 7 C }02f18WSCF>͂;>KJ<ƥi,LcjHWa0mӺzOd$ŋ9^&O0~eq^vw^avbz6زCKҙ}d"t^x7^30= g/gs|TZz f(1Qӧ.a (6QLX&#[C{3>3bןu*2}'훣 #^գzB™1<3C~q ٮ !~Րzv_5jm7Hr'd0 &wm"T=OɎ=ny} 2^;xaU x h b9=VŲ&3x&8NJUJ>Oztfi4Ǡ2<1t[xfyB6eV/]!Ճ3ΐ 7LdhALM7 o1b|L092/տYih -)y$S-HvyrxEm߇ZϢsGx0=c8t wiNGCn}@9}BD!1>.buNb'iB!Mv_ YXXݒGGr0Ec|W"E=Z{eQI~fE~VZdDYv,by|~|ͷBk_G8^>a_?=L`~8懰upʇ PGR}}c9VOQ!Z)vj¨f!FT͌|&6|h=aq+%Ɠ8ٌ_s*N+)R<"F?~W I>3+웯ji;;c=akG|]}Wl9|ʀ4tMq`& w?f7LsVc[yueqA><$M>|l~ ~33Er+*c{]2~+duV!{6[ 5pų/$&Qf,3|UZȳ %V%Br4fϞ0{yƼ7 Wjw3D.IJ<6s?'b~.CuzyEﲺM?Oˏps"v\Ef(6?Vi ^Y3{cvJ37٬?|3?|ubW#|kflSǂlvyv,Meyj&Cܘ?ʶ=_so?,zXt:ODC~3Nv t-lLa}<ɇ?#6D0> gƸ1_ $6KbywNq D9QaJ1=x+L NZ9OϜ(R0_Ozn Yz1JOW )dy&Yia}&tfϘO'?~r3 % :W e)(@uX1a`y+X\Kx@}a4~; ȟdW,*b9Hgq']LXOk=$&G\|}4 mQBvqB>'>Y܇7=y G{{` ס`Bv^P }9=:5'S|n7dGY^uV):zƞxʻ1ļKf7/6*8GY;Ƒ0( "Lz8z? zx{~r3;zVZ]m7~{O9>pzz?Bq@,/'=<7dionL/i}{z1>w/t9I#=e { :\GCx0`=]'˺k ?PNZGV^׿>K,ޤrq8M>ܯ[~C\Gl;u~<οm}C@'#{] tch{"YY}4\c;)FwoIO[_^l4gѺ>7h'n-ۿ-gi5ҸoOȣ_\Oq')/ox@a0Fe<xޫ%|H:KOlaL ^ \Փ,藐'Q~\?r:'wO a}@G\o<-9i{bCpoboL1O+9~T^S Cu3W{('(%ƧNnCH`'*v ;i7;v>}JD\ﻺ kI?yrҺ6׸1++icq?<q /W!NOq{uPh=*Nr`+?S?G'U 5=j\w@ƙZد/'q'Sz&f:7S¾zzIb ]PGW#|!8$ɸ>l=:K%\gN뺑G6 lz;~O1{8k1Dpo3\ۢZ3Gq_"|>S \D=ֽ0縞㢃J3~ףbUOWnwn#|=?&jؼ2vEpڇϣqxx4gKmȟkվt !c:i_L?ȏc^^ڋ"?ԣ}3deu/?#o ^~.@S֬j}+GN^G=%DWx?kaαǝeGQ^ :6]zi^>JܑG3}02N8|QѾywy}X گQ}W!z,rl Y0yB$e<Ɵ b{xx3s w|h?Q/1xǗ\q<紻{a \T=Gr,Q}>8+z.GQ7$q==ev~'/y=grް 1p."=Bu'Q`^_O|>BqPs^?Grq<`Զq}+w?GcPyt!:>G; fq:?ː@8ȣ/gw^{ {xV1}jqV%`݁W#TguG ڧ':5<%?͞o}1楨_sa:%#vpHlYrGqY7ɉ.λ2-33)AFy=[Mkri_YύɅţ,x;O',ú%~*S\~"?1hd'_vm9L=g(Xú/{~D0x_H'RQ"/i[L^uI(_8/2b:ţ ɏ8#/oxutKGߦ|Ǻ9#yڇ,2ml|}φP< )c1}k]qUQ߄x ף=Ӿx=C}ds&z?qK_oȌzƼ3zƣXQ $LO2QQ;LG' 4+җţϠ|XǛQ6>=>D}4H?q-p2fWXw L>SbvO'ȣX'}}V|s:ʒyGC|#s6{a^e?/Qbo;ȣO׫&nmZy"(v?a47Ho&")VZƺJr J>3>3>7gD >H >슖ӷc =&xx;bb q!~kz|S3sSNzfj\pdk`jdQ ?2ol6. \Wt XFzZ '~mdb|gx׵iWG}>Yb͘Vm2Bddj-MC#-3[̛0 }-sAq+@a>Q4g, :VESٟiE'fZfNBi}s}b-&PffjHk Ͷl2lܦ-hBkm/uK#+*.?6)-Cy0c>>14qzZ[7ZXjYhR)5Si/PwUa;hxڒ^GU~d_]0U575(\kfCa;fMU-d>>e #6W =Ư[K^&I ދcG >> y.?0۲&bj㭟Z > YZi#Ӎ۳ޥOGGF}s}b0K3372S%h(fbe׿JqOcba:U9Y%vR`<_?N+OjNWNܯگgX9Ghw%S*c)!+XYtZ?_s'W4?wXӿvxN_uxNϷ%I8dhOUï~u+G?i?FKLMUMLf{Mb*U%f]}U&Ώi<3Ɵԍف*-ORyԆ ocADp?ݖO:FV^YAaXee9yqc^ڗO}~o($)? s%k \`CZ pJt 7* /ߥT'°%C)2 2ɰ/ 2p( G2e(P1 *-o9MNƣu5(^6nuz(P:K8}" h[kR_(ʙ+/ EǗ{BnvC{֚ >tSu~<JUGɎRYc5Hβ=NAXأ@(*]J_:$= ,̛<3V(f杉P:w儅sVCy- ӔnɄ wr8 vkͼҡ$Rb.Bfw} e=j,Br֥צݗ#zYi-|}i7|'5GSaEH֍K"tGI:*wprŻCyؖP驖ieuPzX(P7v('שٗ \Pyg)F݌;oūKZk;;Jx]"%mfz/im,nTj"YS=q-;=PւkSd24KwN~*iˋAEш)kWHA: Ptʩc^̳q/~!N̈́c~r]ܘҡ͠BoxPR-N#9¯ZZX _)=?L;twšy7I7= 5ډgVW||@Sw,IMuVY*tޭOm,0jk,TI%9?WY;V7;ʸŅJ~U+eMqʣ:~#n@T ozgBe'&,3IIZS4ʶ(ocʭTȌg6u9jΥǟr}ځ#&fa*P[5^ >m T\}F`2 F(r> jmW²[g:_ȄM/2@m *˄xhZ|m8!-{0ESu.ⓟ:AK^u E_qe*.? +nLFX{8'(h2v;Fa4aWt1v+3/CZ0uw( -(J|bd(ڱY$g+W? ,m0IBcWNXT~蜊&-y:6HٙH}嘮x?w{pnpΖP4j_@ٍ{߇yLo.?I3|/P4qbն̂=BWVm6sN޽j<OXݹًPQ}[P3͂ÄW@쨗Cՠ1Q߂/;˝~g95hrɩyFsu7Ss95_w=OV\{3r~2&}{qNY~,;Iڽq0A'Nj˔qVՙ\'–q;F혬׳}4Pj/bfC>~NVwrz TUܻ q74@Xqً-ȷWa{J'\vҌ4uu B[AbۅkbDjH'<0OsVD(:|y u,73O!"OO͑oxi&9>W|߱|^]?P{9kAIWNNQr9cNjw)_;Gߩ˻6#T{P{i_C; +w ԌJ>m9u?R=qMBɵt{u; ͦ K&XT?;͵a{eHkuǵHaaO=Ont;:Qlc\FaTvRPsCZt1YwTٞW#_]*`=}Npn[I 7;Z3|{/jrqtz#aPo!gGJK+"rR: J YJn>qۓs xPxU[xŌΉ8\xxCr#|pŃBd_-ʵm4|Go>Am17^*g|]o.+_,U:|Z"ǽlsC/:u,+p|'qzÐ% olfݻ*17ݎ΂Z.n2|^n׍&cߑ5mfIIڄ 9q{/#I b;Bk$gqUs'r@] 2#n"R9p⯏lsh}VgjI/o,ISC!SZV;bAw3-[=* xߘagC9Oo{1$z؇gK?Stcv1 1OL\^0Cv֏_'?) 'Woof~ yn?ܧ>6WK+@FJE3cgfj[\25Y҃6Hڢ"-JQ 7׈GF VܵiM#z P|#$}6aoT)$b1K=Q8r-ᢝe:*G%}?YXyÍuS8rm|0x3pwY,ge:R0(R;vNUk?+!DykL㦧d=?Aeaҝ9]kt^fMs೷?oIq U jOv|dH{ aNۯk@ fC^DB i_!qvBِ8{g]P 5Tw=Qyˍj@ 3||m͝0s邮>~1|]}j'Zu- }$s3Z-ON"CUO՝^[u*}Q&\kǐ+ސjVMmc\j+ #^! ])iq;a͘!q~ķpZiÝ,"1_#[nպg='[9KS`೐~6`A¿j< k ~O-I /V2 +DpY%]@DA{&m9K'&~ : tNF kF퍀 K2v }T&I,,<1{*73yD𽧒뢅"ϡhoA%6;JXlAd7MXڙ 1#K[29>J8tRIAiזּnRoAgZAS@Q٠PF%'>&aiK-YcFX.NOa;',.zkxK=?]x/2[!g+&Mp $rL0 w>g?=&uPJX~ü|My#ZGŠ D&.",_ u2a%QHXko2!z0ŠOJϤ[Byj }yBOӱ7nj~+/GX3>á>?/[ZrT%*2Dq2KmU[~7ĿPDN [εjfMuP^%j۾1}UӫLr}7;Zln ڇ\{-ae#RtZ'nMXP5,=da^}A御זoʬ7']S"7CЇ㗫gͻ?߈7l_ԟfv\ȳF{T6rk8}ڈ6◳=-G@MOkZCNĥ~rV=ȓ)uxnnqXUe67'=^# / ,$]o WS7͉a};>,|o&m =+5캿mx_).R' zhoCtٛ1.G :vO'Qwr'?ֶ|€4oDnMuͨ=6f9'9z=^Da&ń'E&n:i;a ~Zn*1N$nTL"L^3]"f!aL愔X;lЯv.c yWp5x$;jO >dUuC g*>$tWrYDm^}r ~*o9;}-jc{Xw t!~)_~Vjjو}5buބ9nG~8zC3Agz{EKmzw&CfCpG{85 Nm sg;z=5M% =->HHzj>^+]gWwL{WISbot p=pO*-EϺԥ8倸B;MzOt~/׳ݶAަfN',j'c^*8@X&mZ7¶a~ֶub6ay%0hɥӂG.4)LˈCf/ Cٳi_w8?JUoDوˇm$RC\'}.au˷gΩEXVQaE|fx[e|;e~\=QSiDXKDȵ_>H^jw*g"?5l$lQ¯[N:ȉrG/D __[{Ⱦ j0?TOR*ݾѵ/4 . "&u+ʈOev)VpU׈~W`8PʒNUO:gl{u#tM|VB63 t{eUk~/~9 L2>R)a˰eRJW(NY|S~ƊiDvO&mwћ3zmhӇbϫgjKƻs3JO 1Ү&Q=N068;$Ya򹕧B*zhv}aD:kcMrm*d3no0>͵ sv- K3c*f8xk}"xwy5BB!^Mt蘳&[UL~8^$G!{?űww־3$Ls| Mʲȥ׺ۂ6] |*% nqQXX [46Pc1=v*{51$"`i KQ *v+]wV 4W3Lhb/9^Gh 2voR/&ke~Tv1㍰A"1wu c6r&{\0Ơ?f E 琉>?>z]~w|_i"7w/yƾ}S?)`s+f[>tIBoѵ_֯zŸY=mU<Ε OO5qS ',`chw@[j\hzM.Ls../m3q]xᴵB3G'WVr`<3cUIzpÂ1zO1LFxkkzWxdqsYGOMqz:'ՎsnXq8ִUa"p2uzV2(Ao8yxc/vYsׅm7m qQOc0a; ʊ0?MvMuz[lkr. {}Tt1֜(jcS)d]AEc~Ō˽1s֤LּczTAFVD&WmV?6P$ix:ѭ"|QȚM}^bTZT1fUG2F ;"1&jK> cN7g݁/""?VDO1 \f%"&n8c-?צyCYUR5]3>IwcbK1&ؓwDvvz=c^̘۵j17jݏ~&F;XFW52i;H C7a<;ϛW3rK޽EM]np`\`tU4 Su0 CN\pU}߫8>"ٴm٢w-etR~;Φwݓ攁Q1pÝgeyqg_Neܫ%TO-N/*mZ\O,Ŕϫ>yt}&<.e>㖺 ]-^2WWŜF :x`!}ezctgJ~VHDտw9k"jV˴m"хc+{-x;|8CGİ.CĘlwTk:}ɘ8ag./y_zcj)kX͛] ,.*=f:\TfΌ,50~7G-]Ng X>+F'lOe Z(>at)Y[O 4!lj6sɍ$qW̎7ͩđ˙V2q{*1&FւQmi?}c ;]c ˿c-WUyBݴlJ7g7JͅbNCb FoZZhϨlqՏ :]aT7_=jSDqM q zV-55BfB3 &3Ku$+ncpʵzUզ[Ԋ1 Z2{ _T?Kqm|ZAFv'y-u}&AnW}?=-n"X{8chګ3o9hhccKqਢ8FͻB3wZ<AV[ga N$B#|*L.^VšS+vB=ݎ"Ga8~ cboC?a~s8cL<ܕ1]]1Z9]_gjĈE!įf"\"ճz=ԥF;mO۵IzU1cT =_2B挪[.ek\U u m~Yn~m:rnsf-xȽwބ 1/kQ;l3r|BFߞZw0l]^#e;wuİ[FXܙQyeQzkCwGof!4łsBJq^&{5Wv10n.e \٢[֎Se cq׺˒Og tFw5̨ڛYFu}WnZ̪XϿP|N4ɶ>BW{EZ@uveT >Gwc3gTYtvovT7ֶ*b =u,w#K+~u>&xhJg׬;¨jxn+? f|1c`̜kS` `jv)wk!B}t_Bf=/۶/YN[%4-:Z1i+w5aTi}[Uowmͨ>5D5CCfճw nxd FM#ڗz{ZFh>S/'9nuߕJ( 6 ܾiv{xo_wżw4Ry-6_7|1.xMB*ޫݫ-&ģ<1~rY3ӍO:ohV*{"Όs>rԜ3}hТY[zq9gq^XAQ_ɷm?lTq}"͜}PsYsKgLvisǑnׯ=gǹt@qߣ׻Ggj>l8O (Kb5oht!e^9 gU;'NW|0>2Z~cq[>u}/9*ݮw8|pr׃2nS]mA19"2k_=w`ƿ#NtꚃFCuJ\[T"-.wqU7u~ZUZf~fxaQZ^ff{Qb;ӕ4V6HS72rhGGn=zI|ע(z~b=mMExG1Q6C &c6z`w֧iՊ;*/n,?$W]}coM>HCG~YxXG(vó[E Viеԥ9;̶x-%WlWxĉvsEƉS)iG71[S9%i-xx]W}sxRc PnXǹv(^\>.H_:cmS=B0bS~$0iIxBpCŕ\/|8qS՝S]<wM{->)O>>'[=Ug9(\[=WZ~=ӣDCW8x|tO8 vG7$'E0B]QxCNCA^;|W^3a="9ޠ_%wy#r׎z0hF\zH??ꡍ^%,/ṭ!zv&>_W)Sk¿=Mk8ߢy ҃?wȉ$9GIO?쎸= ) '>yF뫔$ᵥ/rh7RSv!;|ooј{TD ?eH@"}A("hцό"K}贈(1-J1|PDѸE|,ؒD'T+^/aT]"#E ?d6DLSĐqKDxD^O87P6]lV{5ڣ;%%ɮh~4[^5K7,qFBM]#^''=7%ӐQߨN*m?~ɮb5گDG%|_Cr|hzG>f0_Qj"/&w$T"$Z!E r4*&`g9?KN_M=F__^D(e?x;RTD "jV/q;C.)G| )q'7;qw:츈5Wf5W3yoa"RṂķ "OAoц0@T?P!ATπTQ`OL oBg%GInc%C~{ȥw% [E6Mٔ^C#̢M̦qgSA\[.\,3\RҸf>rΞo<:;7\F?DV/DG" ωd$C|.{S99,w}C\{Gu{ +#?Ss]7ÄE!'C]r]iq=;3ȗ~:{&$^ˤzg )$_ t^ɟEMk }'x/a&ə"pG{=;}/ާyڡ$T{գi x/x3MMp|PKqAA%;Q=OqGCupEyTOfeSܧQ^ˤ5"EJA$g&]}?IiįiO!\ZT+Ox+Qodw/" |E^YT'3SoTȺsL"qz 28 )gṢ+琏o9N;=wnσhܙ;#J2P`"oްxhFBTX!b~P? 9hIw<#C]r8zDzG8?t #3(^IT%Qwx!Q;#kR Q7O;&Nx҆۹_B%r(>ZZё)kGudGnvy[4<1xK6O4O#(8;ˈKIdϧxu=<+G:Cu\O{[4^I/w)daˠg,Z?Ȣ ?d>~%yɯiGR<"{Rށ}귔G>~^KO:ė9ϊZ\?8;*(<*4?j)^t'2jp9n=[Y4/dDt]@s!^OZʏ_؍?H_#=KN /~y:L:4ʣ7niOTny(+}T!os%^*Kr>!XO|O8<:+2y^?:87^72/#c~y{xd:_~) ^i诀ƣz6ċ's3|xF ?`} =ˈx3GA7Yo# !87߀h~+_緲oTCnz;eQ}vY){>/3C FInw/0W'Ov O\/o ;!&>C~Rzy,C^E.uZD=x~o(Gk2OBo$O!Ϳ`O^w54Oz ~"cG*|q:QOP_S}OR]/; w?zבW\GoI>C h]/ygdG~N`иrRryxģ|~z?և/_?OQ_I6i?|naOyѺUC|~B~/)G8SR,+CNǐ ;>@x}s@\Cȋ?}9Sއǰ|b?~]x5N3g<'f<'u"!>Qb<̣8E:H|!?.ȍ׈K?yBǼ9[4>^E. uTp^Λ168֣K$yJ7x^$Eކ8/KPOR`y%ϡ zλRF+xT'ȋxyC3"Q.DŽ>ür?Qb2K\z^Wxrbe2_X y,(xYSG|4溝/C?8yuu<<%͟OD o_dP!yy=BQOUR='o.Kߓ | uz%7=2BWi"J;dܼR KRy^%Xה|Τy ;뿈#œxIO>y^;ݘ1>xT>OƺVtky~vρsXzR:yЧoN^#x8s>4r92ar_uXȅ<序WH/zRya8Owy=L+2 &|r<>IF|*w+/.qY8{IJ^?Jq|ϑ+)MoR29Q7)~ϓߒIϷu#ٿY:xa^a=K^U-3\8s7ǿ@uyz)G̣%(ƋGr;ZQ7I~dϑm}r\HI^F\qGzh^'8sR}y^Qw9.OzQn߃Hr]y_xN}]QsI<ϓx㍏7>x Keߙ`?q"{8qY<9tUߢemKW.OF8,с{PPAGL*շSxp]RrXڼtߚMΒ^^t|=oUln|oT-yf9W{ $=W?Uf g(={y3WJnkxu'Hxr[vh/ꤼUTS]OY)R>MnUS~ߨ0s/:}xË.^X~xaEWh^f1o7pP.%eݻWD5}ܫܫ 2k޾[;:̳3y\k<"1robustbase/inst/doc/fastMcd-kmini.R0000644000176200001440000000134014555212577016771 0ustar liggesusers### R code from vignette source 'fastMcd-kmini.Rnw' ################################################### ### code chunk number 1: init ################################################### # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0))), width = 75) ################################################### ### code chunk number 2: h.alpha.ex ################################################### require(robustbase) n <- c(5, 10, 20, 30, 50, 100, 200, 500) hmat <- function(alpha, p) cbind(n, h.alpha = h.alpha.n (alpha, n,p), h. = floor(alpha * (n + p + 1)), alpha.n = round(alpha * n)) hmat(alpha = 1/2, p = 3) hmat(alpha = 3/4, p = 4) robustbase/inst/doc/lmrob_simulation.Rnw0000644000176200001440000016447413774561564020256 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{Simulations for Robust Regression Inference in Small Samples} %\VignettePackage{robustbase} %\VignetteDepends{xtable,ggplot2,GGally,RColorBrewer,grid,reshape2} \usepackage{amsmath} \usepackage{natbib} \usepackage[utf8]{inputenc} \newcommand{\makeright}[2]{\ifx#1\left\right#2\else#1#2\fi} \newcommand{\Norm}[2][\left]{\mathcal N #1( #2 \makeright{#1}{)}} \newcommand{\norm}[1] {\| #1 \|} \newcommand{\bld}[1]{\boldsymbol{#1}} % shortcut for bold symbol \newcommand{\T}[1] {\texttt{#1}} \DeclareMathOperator{\wgt}{w} \DeclareMathOperator{\var}{var} \DeclareMathOperator{\diag}{diag} \DeclareMathOperator{\median}{median} \DeclareMathOperator{\mad}{mad} \DeclareMathOperator{\Erw}{\mathbf{E}} \SweaveOpts{prefix.string=plot, eps = FALSE, pdf = TRUE, strip.white=true} \SweaveOpts{width=6, height=4} \usepackage[noae]{Sweave} \begin{document} \setkeys{Gin}{width=\textwidth} \setlength{\abovecaptionskip}{-5pt} <>= ## set options options(width=60, warn=1) # see warnings where they happen (should eliminate) ## number of workers to start if(FALSE) {## good for pkg developers options(cores= max(1, parallel::detectCores() - 2)) } else { ## CRAN allows maximum of 2: options(cores= min(2, parallel::detectCores())) } ## Number of Repetitions: N <- 1000 ## get path (= ../inst/doc/ in source pkg) robustDoc <- system.file('doc', package='robustbase') robustDta <- robustDoc ## initialize (packages, data, ...): source(file.path(robustDoc, 'simulation.init.R')) # 'xtable' ## set the amount of trimming used in calculation of average results trim <- 0.1 <>= ## load required packages for graphics stopifnot(require(ggplot2), require(GGally),# for ggpairs() which replaces ggplot2::plotmatrix() require(grid), require(reshape2)) source(file.path(robustDoc, 'graphics.functions.R')) `%||%` <- function (x, orElse) if (!is.null(x)) x else orElse ## set ggplot theme theme <- theme_bw(base_size = 10) theme$legend.key.size <- unit(1, "lines")# was 0.9 in pre-v.3 ggplot2 theme$plot.margin <- unit(c(1/2, 1/8, 1/8, 1/8), "lines")# was (1/2, 0,0,0) theme_set(theme) ## old and new ggplot2: stopifnot(is.list(theme_G <- theme$panel.grid.major %||% theme$panel.grid)) ## set default sizes for lines and points update_geom_defaults("point", list(size = 4/3)) update_geom_defaults("line", list(size = 1/4)) update_geom_defaults("hline", list(size = 1/4)) update_geom_defaults("smooth", list(size = 1/4)) ## alpha value for plots with many points alpha.error <- 0.3 alpha.n <- 0.4 ## set truncation limits used by f.truncate() & g.truncate.*: trunc <- c(0.02, 0.14) trunc.plot <- c(0.0185, 0.155) f.truncate <- function(x, up = trunc.plot[2], low = trunc.plot[1]) { x[x > up] <- up x[x < low] <- low x } g.truncate.lines <- geom_hline(yintercept = trunc, color = theme$panel.border$colour) g.truncate.line <- geom_hline(yintercept = trunc[2], color = theme$panel.border$colour) g.truncate.areas <- annotate("rect", xmin=rep(-Inf,2), xmax=rep(Inf,2), ymin=c(0,Inf), ymax=trunc, fill = theme_G$colour) g.truncate.area <- annotate("rect", xmin=-Inf, xmax=Inf, ymin=trunc[2], ymax=Inf, fill = theme_G$colour) legend.mod <- list(`SMD.Wtau` = quote('SMD.W'~tau), `SMDM.Wtau` = quote('SMDM.W'~tau), `MM.Avar1` = quote('MM.'~Avar[1]), `MMqT` = quote('MM'~~q[T]), `MMqT.Wssc` = quote('MM'~~q[T]*'.Wssc'), `MMqE` = quote('MM'~~q[E]), `MMqE.Wssc` = quote('MM'~~q[E]*'.Wssc'), `sigma_S` = quote(hat(sigma)[S]), `sigma_D` = quote(hat(sigma)[D]), `sigma_S*qE` = quote(q[E]*hat(sigma)[S]), `sigma_S*qT` = quote(q[T]*hat(sigma)[S]), `sigma_robust` = quote(hat(sigma)[robust]), `sigma_OLS` = quote(hat(sigma)[OLS]), `t1` = quote(t[1]), `t3` = quote(t[3]), `t5` = quote(t[5]), `cskt(Inf,2)` = quote(cskt(infinity,2)) ) @% end{graphics-setup} \title{Simulations for Sharpening Wald-type Inference in Robust Regression for Small Samples} \author{Manuel Koller} \maketitle \tableofcontents \section{Introduction} In this vignette, we recreate the simulation study of \citet{KS2011}. This vignette is supposed to complement the results presented in the above cited reference and render its results reproducible. Another goal is to provide simulation functions, that, with small changes, could also be used for other simulation studies. Additionally, in Section~\ref{sec:maximum-asymptotic-bias}, we calculate the maximum asymptotic bias curves of the $\psi$-functions used in the simulation. \section{Setting} The simulation setting used here is similar to the one in \citet{maronna2009correcting}. We simulate $N = \Sexpr{N}$ repetitions. To repeat the simulation, we recommend using a small value of $N$ here, since for large $n$ and $p$, computing all the replicates will take days. \subsection{Methods} We compare the methods \begin{itemize} \item MM, SMD, SMDM as described in \citet{KS2011}. These methods are available in the package \T{robustbase} (\T{lmrob}). \item MM as implemented in the package \T{robust} (\T{lmRob}). This method will be denoted as \emph{MMrobust} later on. \item MM using S-scale correction by $q_{\rm T}$ and $q_{\rm E}$ as proposed by \citet{maronna2009correcting}. $q_{\rm T}$ and $q_{\rm E}$ are defined as follows. \begin{equation*} q_{\rm E} = \frac{1}{1 - (1.29 - 6.02/n)p/n}, \end{equation*} \begin{equation*} \hat q_{\rm T} = 1 + \frac{p}{2n}\frac{\hat a}{\hat b\hat c}, \end{equation*} where \begin{equation*} \hat a = \frac{1}{n}\sum_{i=1}^n \psi\left(\frac{r_i}{\hat\sigma_{\rm S}}\right)^2, \hat b = \frac{1}{n} \sum_{i=1}^n\psi'\left(\frac{r_i}{\hat\sigma_{\rm S}}\right),%' \hat c = \frac{1}{n}\sum_{i=1}^n \psi\left(\frac{r_i}{\hat\sigma_{\rm S}}\right) \frac{r_i}{\hat\sigma_{\rm S}}, \end{equation*} with $\psi = \rho'$,%' $n$ the number of observations, $p$ the number of predictor variables, $\hat\sigma_{\rm S}$ is the S-scale estimate and $r_i$ is the residual of the $i$-th observation. When using $q_{\rm E}$ it is necessary to adjust the tuning constants of $\chi$ to account for the dependence of $\kappa$ on $p$. For $q_{\rm T}$ no change is required. This method is implemented as \T{lmrob.mar()} in the source file \T{estimating.functions.R}. \end{itemize} \subsection{$\psi$-functions} We compare \emph{bisquare}, \emph{optimal}, \emph{lqq} and \emph{Hampel} $\psi$-functions. They are illustrated in Fig.~\ref{fig:psi.functions}. The tuning constants used in the simulation are compiled in Table~\ref{tab:psi-functions}. Note that the \emph{Hampel} $\psi$-function is tuned to have a downward slope of $-1/3$ instead of the originally proposed $-1/2$. This was set to allow for a comparison to an even slower descending $\psi$-function. %% generate table of tuning constants used for \psi functions \begin{table}[ht] \begin{center} <>= ## get list of psi functions lst <- lapply(estlist$procedures, function(x) { if (is.null(x$args)) return(list(NULL, NULL, NULL)) if (!is.null(x$args$weight)) return(list(x$args$weight[2], round(f.psi2c.chi(x$args$weight[1]),3), round(f.eff2c.psi(x$args$efficiency, x$args$weight[2]),3))) return(list(x$args$psi, round(if (is.null(x$args$tuning.chi)) lmrob.control(psi=x$args$psi)$tuning.chi else x$args$tuning.chi,3), round(if (is.null(x$args$tuning.psi)) lmrob.control(psi=x$args$psi)$tuning.psi else x$args$tuning.psi,3))) }) lst <- unique(lst) ## because of rounding, down from 21 to 5 ! lst <- lst[sapply(lst, function(x) !is.null(x[[1]]))] # 5 --> 4 ## convert to table tbl <- do.call(rbind, lst) tbl[,2:3] <- apply(tbl[,2:3], 1:2, function(x) { gsub('\\$NA\\$', '\\\\texttt{NA}', paste('$', unlist(x), collapse=', ', '$', sep='')) }) tbl[,1] <- paste('\\texttt{', tbl[,1], '}', sep='') colnames(tbl) <- paste('\\texttt{', c('psi', 'tuning.chi', 'tuning.psi'), '}', sep='') require("xtable") # need also print() method: print(xtable(tbl), sanitize.text.function=identity, include.rownames = FALSE, floating=FALSE) @ %def \vspace{15pt} \caption{Tuning constants of $\psi$-functions used in the simulation.} \label{tab:psi-functions} \end{center} \end{table} \begin{figure} \begin{center} <>= d.x_psi <- function(x, psi) { cc <- lmrob.control(psi = psi)$tuning.psi data.frame(x=x, value=Mpsi(x, cc, psi), psi = psi) } x <- seq(0, 10, length.out = 1000) tmp <- rbind(d.x_psi(x, 'optimal'), d.x_psi(x, 'bisquare'), d.x_psi(x, 'lqq'), d.x_psi(x, 'hampel')) print( ggplot(tmp, aes(x, value, color = psi)) + geom_line(lwd=1.25) + ylab(quote(psi(x))) + scale_color_discrete(name = quote(psi ~ '-function'))) @ \end{center} \caption{$\psi$-functions used in the simulation.} \label{fig:psi.functions} \end{figure} \subsection{Designs} Two types of designs are used in the simulation: fixed and random designs. One design with $n=20$ observations, $p=1+3$ predictors and strong leverage points. This design also includes an intercept column. It is shown in Fig.~\ref{fig:design-predict}. The other designs are random, i.e., regenerated for every repetition, and the models are fitted without an intercept. We use the same distribution to generate the designs as for the errors. The number of observations simulated are $n = 25, 50, 100, 400$ and the ratio to the number of parameters are $p/n = 1/20, 1/10, 1/5, 1/3, 1/2$. We round $p$ to the nearest smaller integer if necessary. The random datasets are generated using the following code. <>= f.gen <- function(n, p, rep, err) { ## get function name and parameters lerrfun <- f.errname(err$err) lerrpar <- err$args ## generate random predictors ret <- replicate(rep, matrix(do.call(lerrfun, c(n = n*p, lerrpar)), n, p), simplify=FALSE) attr(ret[[1]], 'gen') <- f.gen ret } ratios <- c(1/20, 1/10, 1/5, 1/3, 1/2)## p/n lsit <- expand.grid(n = c(25, 50, 100, 400), p = ratios) lsit <- within(lsit, p <- as.integer(n*p)) .errs.normal.1 <- list(err = 'normal', args = list(mean = 0, sd = 1)) for (i in 1:NROW(lsit)) assign(paste('rand',lsit[i,1],lsit[i,2],sep='_'), f.gen(lsit[i,1], lsit[i,2], rep = 1, err = .errs.normal.1)[[1]]) @ An example design is shown in Fig.~\ref{fig:example.design}. \begin{figure} \begin{center} <>= require(GGally) colnames(rand_25_5) <- paste0("X", 1:5) # workaround new (2014-12) change in GGally ## and the 2016-11-* change needs data frames: df.r_25_5 <- as.data.frame(rand_25_5) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print(ggpairs(df.r_25_5, axisLabels="show", title = "rand_25_5: n=25, p=5")) ) @ \end{center} \caption{Example random design.} \label{fig:example.design} \end{figure} \subsection{Error Distributions} We simulate the following error distributions \begin{itemize} \item standard normal distribution, \item $t_5$, $t_3$, $t_1$, \item centered skewed t with $df = \infty, 5$ and $\gamma = 2$ (denoted by \emph{cskt$(\infty,2)$} and \emph{cskt}$(5,2)$, respectively); as introduced by \citet{fernandez1998bayesian} using the \T{R} package \T{skewt}, \item contaminated normal, $\Norm{0,1}$ contaminated with $10\%$ $\Norm{0, 10}$ (symmetric, \emph{cnorm}$(0.1,0,3.16)$) or $\Norm{4, 1}$ (asymmetric, \emph{cnorm}$(0.1,4,1)$). \end{itemize} \subsection{Covariance Matrix Estimators} For the standard MM estimator, we compare ${\rm Avar}_1$ of \citet{croux03} and the empirical weighted covariance matrix estimate corrected by Huber's small sample correction as described in \citet{HubPR09} (denoted by \emph{Wssc}). The latter is also used for the variation of the MM estimate proposed by \citet{maronna2009correcting}. For the SMD and SMDM variants we use the covariance matrix estimate as described in \citet{KS2011} (\emph{W$\tau$}). The covariance matrix estimate consists of three parts: \begin{equation*} {\rm cov}(\hat\beta) = \sigma^2\gamma\bld V_{\bld X}^{-1}. \end{equation*} The SMD and SMDM methods of \T{lmrob} use the following defaults. \begin{equation} \label{eq:gammatau} \hat\gamma = \frac{\frac{1}{n}\sum_{i=1}^n\tau_i^2 \psi\left(\frac{r_i}{\tau_i\hat\sigma}\right)^2} {\frac{1}{n}\sum_{i=1}^n\psi'\left(\frac{r_i}{\tau_i\hat\sigma}\right)} \end{equation} where $\tau_i$ is the rescaling factor used for the D-scale estimate (see \citet{KS2011}). \noindent\textbf{Remark: } Equation \eqref{eq:gammatau} is a corrected version of $\gamma$. It was changed in \texttt{robustbase} version \texttt{0.91} (April 2014) to ensure that the equation reduces to $1$ in the classical case ($\psi(x) = x$). If the former (incorrect) version is needed for compatibility reasons, it can be obtained by adding the argument \texttt{cov.corrfact = "tauold"}. \begin{equation*} \bld{\widehat V}_{\bld X} = \frac{1}{\frac{1}{n}\sum_{i=1}^n\wgt_{ii}}\bld X^T\bld W\bld X \end{equation*} where $\bld W = \diag\left(\wgt\left(\frac{r_1}{\hat\sigma}\right), \dots, \wgt\left(\frac{r_n}{\hat\sigma}\right)\right)$. The function $\wgt(r) = \psi(r)/r$ produces the robustness weights. \section{Simulation} The main loop of the simulation is fairly simple. (This code is only run if there are no aggregate results available.) %% set eval to TRUE for chunks simulation-run and simulation-aggr %% if you really want to run the simulations again. %% (better fail with an error than run for weeks) <>= aggrResultsFile <- file.path(robustDta, "aggr_results.Rdata") <>= if (!file.exists(aggrResultsFile)) { ## load packages required only for simulation stopifnot(require(robust), require(skewt), require(foreach)) if (!is.null(getOption("cores"))) { if (getOption("cores") == 1) registerDoSEQ() ## no not use parallel processing else { stopifnot(require(doParallel)) if (.Platform$OS.type == "windows") { cl <- makeCluster(getOption("cores")) clusterExport(cl, c("N", "robustDoc")) clusterEvalQ(cl, slave <- TRUE) clusterEvalQ(cl, source(file.path(robustDoc, 'simulation.init.R'))) registerDoParallel(cl) } else registerDoParallel() } } else registerDoSEQ() ## no not use parallel processing for (design in c("dd", ls(pattern = 'rand_\\d+_\\d+'))) { print(design) ## set design estlist$design <- get(design) estlist$use.intercept <- !grepl('^rand', design) ## add design.predict: pc estlist$design.predict <- if (is.null(attr(estlist$design, 'gen'))) f.prediction.points(estlist$design) else f.prediction.points(estlist$design, max.pc = 2) filename <- file.path(robustDta, sprintf('r.test.final.%s.Rdata',design)) if (!file.exists(filename)) { ## run print(system.time(r.test <- f.sim(estlist, silent = TRUE))) ## save save(r.test, file=filename) ## delete output rm(r.test) ## run garbage collection gc() } } } @ The variable \T{estlist} is a list containing all the necessary settings required to run the simulation as outlined above. Most of its elements are self-explanatory. <>= str(estlist, 1) @ \T{errs} is a list containing all the error distributions to be simulated. The entry for the standard normal looks as follows. <>= estlist$errs[[1]] @ \T{err} is translated internally to the corresponding random generation or quantile function, e.g., in this case \T{rnorm} or \T{qnorm}. \T{args} is a list containing all the required arguments to call the function. The errors are then generated internally with the following call. <>= set.seed(estlist$seed) errs <- c(sapply(1:nrep, function(x) do.call(fun, c(n = nobs, args)))) @ All required random numbers are generated at once instead of during the simulation. Like this, it is certain, that all the compared methods run on exactly the same data. The entry \T{procedures} follows a similar convention. \T{design.predict} contains the design used for the prediction of observations and calculation of confidence or prediction intervals. The objects returned by the procedures are processed by the functions contained in the \T{estlist\$output} list. <<>>= str(estlist$output[1:3], 2) @ The results are stored in a 4-dimensional array. The dimensions are: repetition number, type of value, procedure id, error id. Using \T{apply} it is very easy and fast to generate summary statistics. The raw results are stored on the hard disk, because typically it takes much longer to execute all the procedures than to calculate the summary statistics. The variables saved take up a lot of space quite quickly, so only the necessary data is stored. These are $\sigma$, $\bld\beta$ as well as the corresponding standard errors. To speed up the simulation routine \T{f.sim}, the simulations are carried out in parallel, as long as this is possible. This is accomplished with the help of the \T{R}-package \T{foreach}. This is most easily done on a machine with multiple processors or cores. The \T{multicore} package provides the methods to do so easily. The worker processes are just forked from the main \T{R} process. After all the methods have been simulated, the simulation output is processed. The code is quite lengthy and thus not displayed here (check the Sweave source file \T{lmrob\_simulation.Rnw}). The residuals, robustness weights, leverages and $\tau$ values have to be recalculated. Using vectorized operations and some specialized \T{C} code, this is quite cheap. The summary statistics generated are discussed in the next section. <>= if (!file.exists(aggrResultsFile)) { files <- list.files(robustDta, pattern = 'r.test.final\\.') res <- foreach(file = files) %dopar% { ## get design, load r.test, initialize other stuff design <- substr(basename(file), 14, nchar(basename(file)) - 6) cat(design, ' ') load(file.path(robustDta, file)) estlist <- attr(r.test, 'estlist') use.intercept <- if (!is.null(estlist$use.intercept)) estlist$use.intercept else TRUE sel <- dimnames(r.test)[[3]] ## [dimnames(r.test)[[3]] != "estname=lm"] n.betas <- paste('beta',1:(NCOL(estlist$design)+use.intercept),sep='_') ## get design lX <- if (use.intercept) as.matrix(cbind(1, get(design))) else as.matrix(get(design)) n <- NROW(lX) p <- NCOL(lX) ## prepare arrays for variable designs and leverages if (is.function(attr(estlist$design, 'gen'))) { lXs <- array(NA, c(n, NCOL(lX), dim(r.test)[c(1, 4)]), list(Obs = NULL, Pred = colnames(lX), Data = NULL, Errstr = dimnames(r.test)[[4]])) } ## generate errors lerrs <- array(NA, c(n, dim(r.test)[c(1,4)]) , list(Obs = NULL, Data = NULL, Errstr = dimnames(r.test)[[4]])) for (i in 1:dim(lerrs)[3]) { lerrstr <- f.list2str(estlist$errs[[i]]) lerr <- f.errs(estlist, estlist$errs[[i]], gen = attr(estlist$design, 'gen'), nobs = n, npar = NCOL(lX)) lerrs[,,lerrstr] <- lerr if (!is.null(attr(lerr, 'designs'))) { ## retrieve generated designs: this returns a list of designs lXs[,,,i] <- unlist(attr(lerr, 'designs')) if (use.intercept) stop('intercept not implemented for random desings') } rm(lerr) } if (is.function(attr(estlist$design, 'gen'))) { ## calculate leverages lXlevs <- apply(lXs, 3:4, .lmrob.hat) } ## calculate fitted values from betas if (!is.function(attr(estlist$design, 'gen'))) { ## fixed design case lfitted <- apply(r.test[,n.betas,sel,,drop=FALSE],c(3:4), function(bhat) { lX %*% t(bhat) } ) } else { ## variable design case lfitted <- array(NA, n*prod(dim(r.test)[c(1,4)])*length(sel)) lfitted <- .C('R_calc_fitted', as.double(lXs), ## designs as.double(r.test[,n.betas,sel,,drop=FALSE]), ## betas as.double(lfitted), ## result as.integer(n), ## n as.integer(p), ## p as.integer(dim(r.test)[1]), ## nrep as.integer(length(sel)), ## n procstr as.integer(dim(r.test)[4]), ## n errstr DUP=FALSE, NAOK=TRUE, PACKAGE="robustbase")[[3]] } tdim <- dim(lfitted) <- c(n, dim(r.test)[1], length(sel),dim(r.test)[4]) lfitted <- aperm(lfitted, c(1,2,4,3)) ## calculate residuals = y - fitted.values lfitted <- as.vector(lerrs) - as.vector(lfitted) dim(lfitted) <- tdim[c(1,2,4,3)] lfitted <- aperm(lfitted, c(1,2,4,3)) dimnames(lfitted) <- c(list(Obs = NULL), dimnames(r.test[,,sel,,drop=FALSE])[c(1,3,4)]) lresids <- lfitted rm(lfitted) ## calculate lm MSE and trim trimmed MSE of betas tf.MSE <- function(lbetas) { lnrm <- rowSums(lbetas^2) c(MSE=mean(lnrm,na.rm=TRUE),MSE.1=mean(lnrm,trim=trim,na.rm=TRUE)) } MSEs <- apply(r.test[,n.betas,,,drop=FALSE],3:4,tf.MSE) li <- 1 ## so we can reconstruct where we are lres <- apply(lresids,3:4,f.aggregate.results <- { function(lresid) { ## the counter li tells us, where we are ## we walk dimensions from left to right lcdn <- f.get.current.dimnames(li, dimnames(lresids), 3:4) lr <- r.test[,,lcdn[1],lcdn[2]] ## update counter li <<- li + 1 ## transpose and normalize residuals with sigma lresid <- t(lresid) / lr[,'sigma'] if (lcdn[1] != 'estname=lm') { ## convert procstr to proclst and get control list largs <- f.str2list(lcdn[1])[[1]]$args if (grepl('lm.robust', lcdn[1])) { lctrl <- list() lctrl$psi <- toupper(largs$weight2) lctrl$tuning.psi <- f.eff2c.psi(largs$efficiency, lctrl$psi) lctrl$method <- 'MM' } else { lctrl <- do.call('lmrob.control',largs) } ## calculate correction factors ## A lsp2 <- rowSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lspp <- rowSums(lpp <- Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) ## calculate Huber\'s small sample correction factor lK <- 1 + rowSums((lpp - lspp/n)^2)*NCOL(lX)/lspp^2 ## 1/n cancels } else { lK <- lspp <- lsp2 <- NA } ## only calculate tau variants if possible if (grepl('args.method=\\w*(D|T)\\w*\\b', lcdn[1])) { ## SMD or SMDM ## calculate robustness weights lwgts <- Mwgt(lresid, lctrl$tuning.psi, lctrl$psi) ## function to calculate robustified leverages tfun <- if (is.function(attr(estlist$design, 'gen'))) function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lXs[,,i,lcdn[2]],wi) } else function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lX, wi) } llev <- sapply(1:dim(r.test)[1], tfun) ## calculate unique leverages lt <- robustbase:::lmrob.tau(list(),h=llev,control=lctrl) ## normalize residuals with tau (transpose lresid) lresid <- t(lresid) / lt ## A lsp2t <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lsppt <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) } else { lsp2t <- lsppt <- NA } ## calculate raw scales based on the errors lproc <- f.str2list(lcdn[1])[[1]] q <- NA M <- NA if (lproc$estname == 'lmrob.mar' && lproc$args$type == 'qE') { ## for lmrob_mar, qE variant lctrl <- lmrob.control(psi = 'bisquare', tuning.chi=uniroot(function(c) robustbase:::lmrob.bp('bisquare', c) - (1-p/n)/2, c(1, 3))$root) se <- apply(lerrs[,,lcdn[2]],2,lmrob.mscale,control=lctrl,p=p) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } else if (!is.null(lproc$args$method) && lproc$args$method == 'SMD') { ## for D-scales se <- apply(lerrs[,,lcdn[2]],2,lmrob.dscale,control=lctrl, kappa=robustbase:::lmrob.kappa(control=lctrl)) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } ## calculate empirical correct test value (to yield 5% level) t.val_2 <- t.val_1 <- quantile(abs(lr[,'beta_1']/lr[,'se_1']), 0.95, na.rm = TRUE) if (p > 1) t.val_2 <- quantile(abs(lr[,'beta_2']/lr[,'se_2']), 0.95, na.rm = TRUE) ## return output: summary statistics: c(## gamma AdB2.1 = mean(lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, K2AdB2.1 = mean(lK^2*lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, AdB2t.1 = mean(lsp2t/lsppt^2,trim=trim,na.rm=TRUE)*n, sdAdB2.1 = sd.trim(lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdK2AdB2.1 = sd.trim(lK^2*lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdAdB2t.1 = sd.trim(lsp2t/lsppt^2*n,trim=trim,na.rm=TRUE), ## sigma medsigma = median(lr[,'sigma'],na.rm=TRUE), madsigma = mad(lr[,'sigma'],na.rm=TRUE), meansigma.1 = mean(lr[,'sigma'],trim=trim,na.rm=TRUE), sdsigma.1 = sd.trim(lr[,'sigma'],trim=trim,na.rm=TRUE), meanlogsigma = mean(log(lr[,'sigma']),na.rm=TRUE), meanlogsigma.1 = mean(log(lr[,'sigma']),trim=trim,na.rm=TRUE), sdlogsigma = sd(log(lr[,'sigma']),na.rm=TRUE), sdlogsigma.1 = sd.trim(log(lr[,'sigma']),trim=trim,na.rm=TRUE), q = q, M = M, ## beta efficiency.1 = MSEs['MSE.1','estname=lm',lcdn[2]] / MSEs['MSE.1',lcdn[1],lcdn[2]], ## t-value: level emplev_1 = mean(abs(lr[,'beta_1']/lr[,'se_1']) > qt(0.975, n - p), na.rm = TRUE), emplev_2 = if (p>1) { mean(abs(lr[,'beta_2']/lr[,'se_2']) > qt(0.975, n - p), na.rm = TRUE) } else NA, ## t-value: power power_1_0.2 = mean(abs(lr[,'beta_1']-0.2)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.2 = if (p>1) { mean(abs(lr[,'beta_2']-0.2)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.4 = mean(abs(lr[,'beta_1']-0.4)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.4 = if (p>1) { mean(abs(lr[,'beta_2']-0.4)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.6 = mean(abs(lr[,'beta_1']-0.6)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.6 = if (p>1) { mean(abs(lr[,'beta_2']-0.6)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.8 = mean(abs(lr[,'beta_1']-0.8)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.8 = if (p>1) { mean(abs(lr[,'beta_2']-0.8)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_1 = mean(abs(lr[,'beta_1']-1)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_1 = if (p>1) { mean(abs(lr[,'beta_2']-1)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, ## coverage probability: calculate empirically ## the evaluation points are constant, but the designs change ## therefore this makes only sense for fixed designs cpr_1 = mean(lr[,'upr_1'] < 0 | lr[,'lwr_1'] > 0, na.rm = TRUE), cpr_2 = mean(lr[,'upr_2'] < 0 | lr[,'lwr_2'] > 0, na.rm = TRUE), cpr_3 = mean(lr[,'upr_3'] < 0 | lr[,'lwr_3'] > 0, na.rm = TRUE), cpr_4 = mean(lr[,'upr_4'] < 0 | lr[,'lwr_4'] > 0, na.rm = TRUE), cpr_5 = if (any(colnames(lr) == 'upr_5')) { mean(lr[,'upr_5'] < 0 | lr[,'lwr_5'] > 0, na.rm = TRUE) } else NA, cpr_6 = if (any(colnames(lr) == 'upr_6')) { mean(lr[,'upr_6'] < 0 | lr[,'lwr_6'] > 0, na.rm = TRUE) } else NA, cpr_7 = if (any(colnames(lr) == 'upr_7')) { mean(lr[,'upr_7'] < 0 | lr[,'lwr_7'] > 0, na.rm = TRUE) } else NA ) }}) ## convert to data.frame lres <- f.a2df.2(lres, split = '___NO___') ## add additional info lres$n <- NROW(lX) lres$p <- NCOL(lX) lres$nmpdn <- with(lres, (n-p)/n) lres$Design <- design ## clean up rm(r.test, lXs, lXlevs, lresids, lerrs) gc() ## return lres lres } save(res, trim, file = aggrResultsFile) ## stop cluster if (exists("cl")) stopCluster(cl) } <>= load(aggrResultsFile) ## this will fail if the file is not found (for a reason) ## set eval to TRUE for chunks simulation-run and simulation-aggr ## if you really want to run the simulations again. ## (better fail with an error than run for weeks) ## combine list elements to data.frame test.1 <- do.call('rbind', res) test.1 <- within(test.1, { Method[Method == "SM"] <- "MM" Method <- Method[, drop = TRUE] Estimator <- interaction(Method, D.type, drop = TRUE) Estimator <- f.rename.level(Estimator, 'MM.S', 'MM') Estimator <- f.rename.level(Estimator, 'SMD.D', 'SMD') Estimator <- f.rename.level(Estimator, 'SMDM.D', 'SMDM') Estimator <- f.rename.level(Estimator, 'MM.qT', 'MMqT') Estimator <- f.rename.level(Estimator, 'MM.qE', 'MMqE') Estimator <- f.rename.level(Estimator, 'MM.rob', 'MMrobust') Estimator <- f.rename.level(Estimator, 'lsq.lm', 'OLS') Est.Scale <- f.rename.level(Estimator, 'MM', 'sigma_S') Est.Scale <- f.rename.level(Est.Scale, 'MMrobust', 'sigma_robust') Est.Scale <- f.rename.level(Est.Scale, 'MMqE', 'sigma_S*qE') Est.Scale <- f.rename.level(Est.Scale, 'MMqT', 'sigma_S*qT') Est.Scale <- f.rename.level(Est.Scale, 'SMDM', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'SMD', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'OLS', 'sigma_OLS') Psi <- f.rename.level(Psi, 'hampel', 'Hampel') }) ## add interaction of Method and Cov test.1 <- within(test.1, { method.cov <- interaction(Estimator, Cov, drop=TRUE) levels(method.cov) <- sub('\\.+vcov\\.(a?)[wacrv1]*', '\\1', levels(method.cov)) method.cov <- f.rename.level(method.cov, "MMa", "MM.Avar1") method.cov <- f.rename.level(method.cov, "MMrobust.Default", "MMrobust.Wssc") method.cov <- f.rename.level(method.cov, "MM", "MM.Wssc") method.cov <- f.rename.level(method.cov, "SMD", "SMD.Wtau") method.cov <- f.rename.level(method.cov, "SMDM", "SMDM.Wtau") method.cov <- f.rename.level(method.cov, "MMqT", "MMqT.Wssc") method.cov <- f.rename.level(method.cov, "MMqE", "MMqE.Wssc") method.cov <- f.rename.level(method.cov, "OLS.Default", "OLS") ## ratio: the closest 'desired ratios' instead of exact p/n; ## needed in plots only for stat_*(): median over "close" p/n's: ratio <- ratios[apply(abs(as.matrix(1/ratios) %*% t(as.matrix(p / n)) - 1), 2, which.min)] }) ## calculate expected values of psi^2 and psi' test.1$Ep2 <- test.1$Epp <- NA for(Procstr in levels(test.1$Procstr)) { args <- f.str2list(Procstr)[[1]]$args if (is.null(args)) next lctrl <- do.call('lmrob.control',args) test.1$Ep2[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r)^2, lctrl, use.integrate = TRUE) test.1$Epp[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r,1), lctrl, use.integrate = TRUE) } ## drop some observations, separate fixed and random designs test.fixed <- droplevels(subset(test.1, n == 20)) ## n = 20 -- fixed design test.1 <- droplevels(subset(test.1, n != 20)) ## n !=20 -- random designs test.lm <- droplevels(subset(test.1, Function == 'lm')) # lm = OLS test.1 <- droplevels(subset(test.1, Function != 'lm')) # Rob := all "robust" test.lm$Psi <- NULL test.lm.2 <- droplevels(subset(test.lm, Error == 'N(0,1)')) # OLS for N(*) test.2 <- droplevels(subset(test.1, Error == 'N(0,1)' & Function != 'lm'))# Rob for N(*) ## subsets test.3 <- droplevels(subset(test.2, Method != 'SMDM'))# Rob, not SMDM for N(*) test.4 <- droplevels(subset(test.1, Method != 'SMDM'))# Rob, not SMDM for all @ \section{Simulation Results} \subsection{Criteria} The simulated methods are compared using the following criteria. \textbf{Scale estimates.} The criteria for scale estimates are all calculated on the log-scale. The bias of the estimators is measured by the $\Sexpr{trim*100}\%$ trimmed mean. To recover a meaningful scale, the results are exponentiated before plotting. It is easy to see that this is equivalent to calculating geometric means. Since the methods are all tuned at the central model, ${\mathcal N}(0,1)$, a meaningful comparison of biases can only be made for ${\mathcal N}(0,1)$ distributed errors. The variability of the estimators, on the other hand, can be compared over all simulated error distributions. It is measured by the $\Sexpr{trim*100}\%$ trimmed standard deviation, rescaled by the square root of the number of observations. For completeness, the statistics used to compare scale estimates in \citet{maronna2009correcting} are also calculated. They are defined as \begin{equation} \label{eq:def.q.and.M} q = \median\left(\frac{S(\bld e)}{\hat\sigma_S}\right), \quad M = \mad\left(\frac{S(\bld e)}{\hat\sigma_S}\right), \end{equation} where $S(e)$ stands for the S-scale estimate evaluated for the actual errors $\bld e$. For the D-scale estimate, the definition is analogue. Since there is no design to correct for, we set $\tau_i = 1\ \forall i$. \textbf{Coefficients.} The efficiency of estimated regression coefficients $\bld{\hat\beta}$ is characterized by their mean squared error (\emph{MSE}). Since we simulate under $H_0: \bld\beta = 0$, this is determined by the covariance matrix of $\bld{\hat\beta}$. We use $\Erw\left[\norm{\bld{\hat\beta}}_2^2\right] = \sum_{j=1}^p \var(\hat\beta_j)$ as a summary. When comparing to the MSE of the ordinary least squares estimate (\emph{OLS}), this gives the efficiency, which, by the choice of tuning constants of $\psi$, should yield \begin{equation*} \frac{{\rm MSE}(\bld{\hat\beta}_{\rm OLS})}{{\rm MSE}(\bld{\hat\beta})} \approx 0.95 \end{equation*} for standard normally distributed errors. The simulation mean of $\sum_{j=1}^p \var(\hat\beta_j)$ is calculated with $\Sexpr{trim*100}\%$ trimming. For other error distributions, this ratio should be larger than $1$, since by using robust procedures we expect to gain efficiency at other error distributions (relative to the least squares estimate). $\bld\gamma$\textbf{.} We compare the behavior of the various estimators of $\gamma$ by calculating the trimmed mean and the trimmed standard deviation for standard normal distributed errors. \textbf{Covariance matrix estimate.} The covariance matrix estimates are compared indirectly over the performance of the resulting test statistics. We compare the empirical level of the hypothesis tests $H_0: \beta_j = 0$ for some $j \in \{1,\dots, p\}$. The power of the tests is compared by testing for $H_0: \beta_j = b$ for several values of $b>0$. The formal power of a more liberal test is generally higher. Therefore, in order for this comparison to be meaningful, the critical value for each test statistic was corrected such that all tests have the same simulated level of $5\%$. The simple hypothesis tests give only limited insights. To investigate the effects of other error distributions, e.g., asymmetric error distributions, we compare the confidence intervals for the prediction of some fixed points. Since it was not clear how to assess the quality prediction intervals, either at the central or the simulated model, we do not calculate them here. A small number of prediction points is already enough, if they are chosen properly. We chose to use seven points lying on the first two principal components, spaced evenly from the center of the design used to the extended range of the design. The principal components were calculated robustly (using \T{covMcd} of the \T{robustbase} package) and the range was extended by a fraction of $0.5$. An example is shown in Figure~\ref{fig:design-predict}. \subsection{Results} The results are given here as plots (Fig.~\ref{fig:meanscale-1} to Fig.~\ref{fig:cpr}). For a complete discussion of the results, we refer to \citet{KS2011}. The different $\psi$-functions are each plotted in a different facet, except for Fig.~\ref{fig:qscale-all}, Fig.~\ref{fig:Mscale-all} and Fig.~\ref{fig:lqq-level}, where the facets show the results for various error distributions. The plots are augmented with auxiliary lines to ease the comparison of the methods. The lines connect the median values over the values of $n$ for each simulated ratio $p/n$. In many plots the y-axis has been truncated. Points in the grey shaded area represent truncated values using a different scale. \begin{figure} \begin{center} <>= ## ## exp(mean(log(sigma))): this looks almost identical to mean(sigma) print(ggplot(test.3, aes(p/n, exp(meanlogsigma.1), color = Est.Scale)) + stat_summary(aes(x=ratio), # <- "rounded p/n": --> median over "neighborhood" fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + ylab(quote('geometric ' ~ mean(hat(sigma)))) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(test.3$Est.Scale))) @ \end{center} \caption{Mean of scale estimates for normal errors. The mean is calculated with $\Sexpr{trim*100}\%$ trimming. The lines connect the median values for each simulated ratio $p/n$. Results for random designs only. } \label{fig:meanscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(test.3, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + facet_wrap(~ Psi) + geom_point (data=test.lm.2, alpha=alpha.n, aes(color = Est.Scale)) + stat_summary(data=test.lm.2, aes(x=ratio, color = Est.Scale), fun.y=median, geom='line') + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels= lab(test.3 $Est.Scale, test.lm.2$Est.Scale))) @ \end{center} \caption{Variability of the scale estimates for normal errors. The standard deviation is calculated with $\Sexpr{trim*100}\%$ trimming. } \label{fig:sdscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(test.4, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + ylim(with(test.4, range(sdlogsigma.1*sqrt(n)))) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = Error), alpha = alpha.error) + facet_wrap(~ Psi) + ## "FIXME" (?): the next 'test.lm' one give warnings geom_point (data=test.lm, aes(color = Est.Scale), alpha=alpha.n) + ##-> Warning: Removed 108 rows containing missing values (geom_point). stat_summary(data=test.lm, aes(x = ratio, color = Est.Scale), fun.y=median, geom='line') + ##-> Warning: Removed 108 rows containing non-finite values (stat_summary). g.scale_shape(labels=lab(test.4$Error)) + scale_colour_discrete("Scale Est.", labels=lab(test.4 $Est.Scale, test.lm$Est.Scale))) @ \end{center} \caption{Variability of the scale estimates for all simulated error distributions.} \label{fig:sdscale-all} \end{figure} \begin{figure} \begin{center} <>= t3est2 <- droplevels(subset(test.3, Estimator %in% c("SMD", "MMqE"))) print(ggplot(t3est2, aes(p/n, q, color = Est.Scale)) + ylab(quote(q)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) @ \end{center} \caption{$q$ statistic for normal errors. $q$ is defined in \eqref{eq:def.q.and.M}.} \label{fig:qscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t3est2, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Psi) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) @ \end{center} \caption{$M/q$ statistic for normal errors. $M$ and $q$ are defined in \eqref{eq:def.q.and.M}.} \label{fig:Mscale-1} \end{figure} \begin{figure} \begin{center} <>= t1.bi <- droplevels(subset(test.1, Estimator %in% c("SMD", "MMqE") & Psi == 'bisquare')) print(ggplot(t1.bi, aes(p/n, q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Error) + ## labeller missing! ylab(quote(q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) @ \end{center} \caption{$q$ statistic for \emph{bisquare} $\psi$. } \label{fig:qscale-all} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t1.bi, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Error) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) @ \end{center} \caption{$M/q$ statistic for \emph{bisquare} $\psi$. } \label{fig:Mscale-all} \end{figure} \clearpage% not nice, but needed against LaTeX Error: Too many unprocessed floats. \begin{figure} \begin{center} <>= print(ggplot(test.2, aes(p/n, efficiency.1, color = Estimator)) + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + facet_wrap(~ Psi) + ylab(quote('efficiency of' ~~ hat(beta))) + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(test.2$Estimator))) @ \end{center} \caption{Efficiency for normal errors. The efficiency is calculated by comparing to an OLS estimate and averaging with $\Sexpr{trim*100}\%$ trimming. } \label{fig:efficiency} \end{figure} \begin{figure} \begin{center} <>= t.1xt1 <- droplevels(subset(test.1, Error != 't1')) print(ggplot(t.1xt1, aes(p/n, efficiency.1, color = Estimator)) + ylab(quote('efficiency of '~hat(beta))) + geom_point(aes(shape = Error), alpha = alpha.error) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + g.scale_shape(values=c(16,17,15,3,7,8,9,1,2,4)[-4], labels=lab(t.1xt1$Error)) + facet_wrap(~ Psi) + scale_colour_discrete(name = "Estimator", labels = lab(t.1xt1$Estimator))) @ \end{center} \caption{Efficiency for all simulated error distributions except $t_1$. } \label{fig:efficiency-all} \end{figure} \begin{figure} \begin{center} <>= t.2o. <- droplevels(subset(test.2, !is.na(AdB2t.1))) print(ggplot(t.2o., aes(p/n, AdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=K2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=AdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=K2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype=2) + stat_summary(aes(x=ratio, y=AdB2t.1), fun.y=median, geom='line', linetype=3) + geom_hline(yintercept = 1/0.95) + g.scale_y_log10_1() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(t.2o.$Estimator)) + ylab(quote(mean(hat(gamma)))) + facet_wrap(~ Psi)) @ \end{center} \caption{Comparing the estimates of $\gamma$. The solid line connects the uncorrected estimate, dotted the $\tau$ corrected estimate and dashed Huber's small sample correction. } \label{fig:AdB2-1} \end{figure} \begin{figure} \begin{center} <>= t.2ok <- droplevels(subset(test.2, !is.na(sdAdB2t.1))) print(ggplot(t.2ok, aes(p/n, sdAdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=sdK2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=sdAdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=sdK2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype= 2) + stat_summary(aes(x=ratio, y=sdAdB2t.1), fun.y=median, geom='line', linetype= 3) + g.scale_y_log10_0.05() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(t.2ok$Estimator)) + ylab(quote(sd(hat(gamma)))) + facet_wrap(~ Psi)) @ \end{center} \caption{Comparing the estimates of $\gamma$. The solid line connects the uncorrected estimate, dotted the $\tau$ corrected estimate and dashed Huber's small sample correction. } \label{fig:sdAdB2-1} \end{figure} \begin{figure} \begin{center} <>= t.2en0 <- droplevels(subset(test.2, emplev_1 != 0)) print(ggplot(t.2en0, aes(p/n, f.truncate(emplev_1), color = method.cov)) + g.truncate.lines + g.truncate.areas + geom_point(aes(shape = factor(n)), alpha = alpha.n) + scale_shape_discrete(quote(n)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + scale_colour_discrete(name = "Estimator", labels=lab(t.2en0$method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + facet_wrap(~ Psi)) @ \end{center} \caption{Empirical levels of test $H_0: \beta_1 = 0$ for normal errors. The y-values are truncated at $\Sexpr{trunc[1]}$ and $\Sexpr{trunc[2]}$. } \label{fig:emp-level} \end{figure} \begin{figure} \begin{center} <>= tmp <- droplevels(subset(test.1, Psi == 'lqq' & emplev_1 != 0)) print(ggplot(tmp, aes(p/n, f.truncate(emplev_1), color = method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + g.truncate.line + g.truncate.area + geom_point(aes(shape = factor(n)), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(tmp$method.cov)) + facet_wrap(~ Error) , legend.mod = legend.mod ) @ \end{center} \caption{Empirical levels of test $H_0: \beta_1 = 0$ for \emph{lqq} $\psi$-function and different error distributions. } \label{fig:lqq-level} \end{figure} \begin{figure} \begin{center} <>= t2.25 <- droplevels(subset(test.2, n == 25))# <-- fixed n ==> no need for 'ratio' tL2.25 <- droplevels(subset(test.lm.2, n == 25)) scale_col_D2.25 <- scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(t2.25 $method.cov, tL2.25$method.cov)) print(ggplot(t2.25, aes(p/n, power_1_0.2, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.2) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.2$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_2} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.4, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.4) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.4$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_4} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.6, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.6) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.6$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_6} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.8, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.8) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.8$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_8} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_1, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 1) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 1$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-1} \end{figure} %\clearpage \begin{figure} \begin{center} %% now (2016-11 GGally) works --- but fails with new 2018-05 ggplot2: <>= pp <- f.prediction.points(dd)[1:7,] ## Worked in older ggplot2 -- now plotmatrix() is gone, to be replaced by GGally::ggpairs): ## tmp <- plotmatrix(pp)$data ## tmp$label <- as.character(1:7) ## print(plotmatrix(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) if(FALSE) { tmp <- ggpairs(pp)$data tmp$label <- as.character(1:7) # and now? } ## ggpairs() + geom_text() does *NOT* work {ggpairs has own class} ## print(ggpairs(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print( ggpairs(dd) )## now (2016-11) fine ) @ \end{center} \caption{Prediction points for fixed design. The black points are the points of the original design. The red digits indicate the numbers and locations of the points where predictions are taken.} \label{fig:design-predict} \end{figure} \begin{figure} \begin{center} <>= n.cprs <- names(test.fixed)[grep('cpr', names(test.fixed))] # test.fixed: n=20 => no 'x=ratio' test.5 <- melt(test.fixed[,c('method.cov', 'Error', 'Psi', n.cprs)]) test.5 <- within(test.5, { Point <- as.numeric(do.call('rbind', strsplit(levels(variable), '_'))[,2])[variable] }) print(ggplot(test.5, aes(Point, f.truncate(value), color = method.cov)) + geom_point(aes(shape = Error), alpha = alpha.error) + g.truncate.line + g.truncate.area + stat_summary(fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(labels=lab(test.5$Error)) + scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(test.5$method.cov)) + ylab("empirical level of confidence intervals") + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical coverage probabilities. Results for fixed design. The y-values are truncated at $\Sexpr{trunc[2]}$. } \label{fig:cpr} \end{figure} \clearpage \section{Maximum Asymptotic Bias} \label{sec:maximum-asymptotic-bias} The slower redescending $\psi$-functions come with higher asymptotic bias as illustrated in Fig.~\ref{fig:max-asymptotic-bias}. We calculate the asymptotic bias as in \citet{berrendero2007maximum}. <>= ## Henning (1994) eq 33: g <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) integrate(function(x) rho(((1 + theta^2)/s^2*x)^2)*dchisq(x, 1, mu^2/(1 + theta^2)), -Inf, Inf)$value }) ## Martin et al 1989 Section 3.2: for mu = 0 g.2 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) lctrl$tuning.psi <- lctrl$tuning.chi robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) g.2.MM <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) ## Henning (1994) eq 30, one parameter case g.3 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) int.x <- Vectorize(function(y) { integrate(function(x) rho((y - x*theta - mu)/s)*dnorm(x)*dnorm(y),-Inf, Inf)$value }) integrate(int.x,-Inf, Inf)$value }) inv.g1 <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2 else g.3 uniroot(function(s) g(s, theta, mu, ...) - value, c(0.1, 100))$root } inv.g1.MM <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2.MM else g.3.MM ret <- tryCatch(uniroot(function(s) g(s, theta, mu, ...) - value, c(0.01, 100)), error = function(e)e) if (inherits(ret, 'error')) { warning('inv.g1.MM: ', value, ' ', theta, ' ', mu,' -> Error: ', ret$message) NA } else { ret$root } } s.min <- function(epsilon, ...) inv.g1(0.5/(1 - epsilon), 0, 0, ...) s.max <- function(epsilon, ...) inv.g1((0.5-epsilon)/(1-epsilon), 0, 0, ...) BS <- Vectorize(function(epsilon, ...) { sqrt(s.max(epsilon, ...)/s.min(epsilon, ...)^2 - 1) }) l <- Vectorize(function(epsilon, ...) { sigma_be <- s.max(epsilon, ...) sqrt((sigma_be/inv.g1.MM(g.2.MM(sigma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1) }) u <- Vectorize(function(epsilon, ...) { gamma_be <- s.min(epsilon, ...) max(l(epsilon, ...), sqrt((gamma_be/inv.g1.MM(g.2.MM(gamma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1)) }) @ \begin{figure}[h!] \begin{center} <>= asymptMBFile <- file.path(robustDta, 'asymptotic.max.bias.Rdata') if (!file.exists(asymptMBFile)) { x <- seq(0, 0.35, length.out = 100) rmb <- rbind(data.frame(l=l(x, psi = 'hampel'), u=u(x, psi = 'hampel'), psi = 'Hampel'), data.frame(l=l(x, psi = 'lqq'), u=u(x, psi = 'lqq'), psi = 'lqq'), data.frame(l=l(x, psi = 'bisquare'), u=u(x, psi = 'bisquare'), psi = 'bisquare'), data.frame(l=l(x, psi = 'optimal'), u=u(x, psi = 'optimal'), psi = 'optimal')) rmb$x <- x save(rmb, file=asymptMBFile) } else load(asymptMBFile) <>= print(ggplot(rmb, aes(x, l, color=psi)) + geom_line() + geom_line(aes(x, u, color=psi), linetype = 2) + xlab(quote("amount of contamination" ~~ epsilon)) + ylab("maximum asymptotic bias bounds") + coord_cartesian(ylim = c(0,10)) + scale_y_continuous(breaks = 1:10) + scale_colour_hue(quote(psi ~ '-function'))) @ \end{center} \caption{Maximum asymptotic bias bound for the $\psi$-functions used in the simulation. Solid line: lower bound. Dashed line: upper bound.} \label{fig:max-asymptotic-bias} \end{figure} \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/inst/doc/psi_functions.R0000644000176200001440000001026714555212625017170 0ustar liggesusers### R code from vignette source 'psi_functions.Rnw' ################################################### ### code chunk number 1: init ################################################### # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0)))) ## x axis for plots: x. <- seq(-5, 10, length=1501) require(robustbase) ################################################### ### code chunk number 2: source-p-psiFun ################################################### source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) ################################################### ### code chunk number 3: Huber ################################################### getOption("SweaveHooks")[["fig"]]() plot(huberPsi, x., ylim=c(-1.4, 5), leg.loc="topright", main=FALSE) ################################################### ### code chunk number 4: lmrob-psi ################################################### names(.Mpsi.tuning.defaults) ################################################### ### code chunk number 5: tuning-defaults ################################################### print(c(k.M = .Mpsi.tuning.default("bisquare"), k.S = .Mchi.tuning.default("bisquare")), digits = 10) ################################################### ### code chunk number 6: note-p-psiFun (eval = FALSE) ################################################### getOption("SweaveHooks")[["fig"]]() ## source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) ################################################### ### code chunk number 7: bisquare ################################################### getOption("SweaveHooks")[["fig"]]() p.psiFun(x., "biweight", par = 4.685) ################################################### ### code chunk number 8: Hampel ################################################### getOption("SweaveHooks")[["fig"]]() ## see also hampelPsi p.psiFun(x., "Hampel", par = ## Default, but rounded: round(c(1.5, 3.5, 8) * 0.9016085, 1)) ################################################### ### code chunk number 9: GGW-const ################################################### cT <- rbind(cc1 = .psi.ggw.findc(ms = -0.5, b = 1.5, eff = 0.95 ), cc2 = .psi.ggw.findc(ms = -0.5, b = 1.5, bp = 0.50)); cT ################################################### ### code chunk number 10: rhoInf-ggw ################################################### ipsi.ggw <- .psi2ipsi("GGW") # = 5 ccc <- c(0, cT[1, 2:4], 1) integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi=ipsi.ggw)$value # = rho(Inf) ################################################### ### code chunk number 11: GGW ################################################### getOption("SweaveHooks")[["fig"]]() p.psiFun(x., "GGW", par = c(-.5, 1, .95, NA)) ################################################### ### code chunk number 12: lqq-const ################################################### cT <- rbind(cc1 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=0.95, bp=NA ), cc2 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=NA , bp=0.50)) colnames(cT) <- c("b", "c", "s"); cT ################################################### ### code chunk number 13: LQQ ################################################### getOption("SweaveHooks")[["fig"]]() p.psiFun(x., "LQQ", par = c(-.5,1.5,.95,NA)) ################################################### ### code chunk number 14: optimal ################################################### getOption("SweaveHooks")[["fig"]]() p.psiFun(x., "optimal", par = 1.06, leg.loc="bottomright") ################################################### ### code chunk number 15: Welsh-GGW ################################################### ccc <- c(0, a = 2.11^2, b = 2, c = 0, 1) (ccc[5] <- integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi = 5)$value) # = rho(Inf) stopifnot(all.equal(Mpsi(x., ccc, "GGW"), ## psi[ GGW ](x; a=k^2, b=2, c=0) == Mpsi(x., 2.11, "Welsh")))## psi[Welsh](x; k) ################################################### ### code chunk number 16: Welsh ################################################### getOption("SweaveHooks")[["fig"]]() p.psiFun(x., "Welsh", par = 2.11) robustbase/inst/doc/simulation.init.R0000644000176200001440000000530212321016045017410 0ustar liggesusers## initialize R simulations (also parallel workers) ## need to export the variables N, robustDoc and slave if (!exists("N")) N <- 1000 if (!exists("robustDoc")) robustDoc <- system.file('doc', package='robustbase') ## load required packages stopifnot(require(xtable), require(robustbase)) ## load more packages if this is a worker if (exists("slave")) stopifnot(require(robust), require(skewt), require(foreach)) ## default data set dd <- data.frame(X1 = c(0.0707996949791054, 0.0347546309449992, 1.30548268152542, 0.866041511462982, 0.275764343116733, 0.670798705161399, -0.549345193993536, -1.00640134962924, -1.22061169833477, -0.905619374719898, -0.678473241822565, 0.607011706444643, 0.304237114526011, -2.14562816298790, 2.34057395639167, 0.310752185537814, -0.972658170945796, 0.362012836241727, 0.925888071796771, -0.595380245695561), X2 = c(0.119970864158429, -0.738808741221796, 5.49659158913364, 3.52149647048925, 2.02079730735676, 3.82735326206246, -1.24025420267206, -4.37015614526438, -5.00575484838141, -3.56682651298729, -2.82581432351811, 0.0456819251791285, -0.93949674689997, -8.08282316242221, 9.76283850058346, 0.866426786132133, -2.90670860898916, 2.95555226542630, 4.50904028657548, -3.44910596474065), X3 = c(1.11332914932289, 3.55583356836222, 10.4937363250789, 0.548517298224424, 1.67062103214174, 0.124224367717813, 6.86425894634543, 1.14254475111985, 0.612987848127285, 0.85062803777296, 0.881141283379239, 0.650457856125926, 0.641015255931405, 1.51667982973630, 0.764725309853834, 1.61169179152476, 0.596312457754167, 0.262270854360470, 1.24686336241, 0.386112727548389)) ## load functions source(file.path(robustDoc, 'simulation.functions.R')) source(file.path(robustDoc, 'estimating.functions.R')) source(file.path(robustDoc, 'error.distributions.R')) ## set estlist and parameters estlist <- .estlist.confint ## nr. of repetitions estlist$nrep <- N estlist$seed <- 13082010 ## set errors estlist$errs <- c(estlist$errs, list(.errs.skt.Inf.2, .errs.skt.5.2, .errs.cnorm..1.0.10, .errs.cnorm..1.4.1)) robustbase/inst/doc/lmrob_simulation.pdf0000644000176200001440000217514714555212627020251 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3972 /Filter /FlateDecode /N 60 /First 489 >> stream x[kSܶ~~L#˺> !@ȒB;`v t٥ݔן|.1zf1ndyC0ɴ`8dI#a*XbeX̄ KPu&0D EDB.Yk+&E1AH4"LH#:0h0FĘQBLhbgF(IXhX$$:d):%ƅ*!_ J$1%&@fIPC  \bLHP KW(a&Rx & Rg/ i xIPHeb`4(1BSFXPVhF4A21& UӠbՠ̭e-L(k&XMROe:I)>b, ,M|z1 {6ye>ˌ0U AOe1|gd51{s;/x/YRzlII~Hiݰ([2#W%r-q Ӎ@ w],^͛zȳe6[VNI6ODvc%^ 9KhP-(+g%[tz U{ 8v"OF>?'|3+& _׷邠lܺ`uݜ㴬BWWK3/an/L¾2<TD5(hu*?q8N4m+hJݛmvcZ\s7Kf 8M-ҏ-s~S~|3~o-zxΧ/W099$ Yfgu-LL4]A>H(9ڬ.-<n*(v s>Pjg s CCY8m#m6%[? '>JN&Q֑qُUPa}|Xve5;~tww) r"DO`lלP'A:u׃CCZ>ñK2!e"Ҫ,4iZ dC1j|"'(g1  nAj$Dt5H'0pld BcU ?] (t;|{|Jο\+ >[I),+?Eq| BmGnnK&r:~ptz Zեfo*|Y(xMtpK+h6Lr"]mDr`]'Iakjun^@"q c-=O6xo}bf́:?2{0 lxk8 8ɡ7n;/Nn`^z>浇y9jK[R6 yUڄc%i)2Clh"8}Qf=rd&NZpN`6fC7dp to_ҡ,MW RQtҶޠ;~"u3_'XїtZfVx ;fٟtJzM|ivtO K,K/WtU?2\M]rr7PE1_oXI[?“Mx>|%&Oc_?^.+C}3-]C]GBN&1mx $B:h$Za$›NL sKk,+.TOcCv|ʖV-Ƃ {c>z 7^Lt^Z5&E=  ;g/F'[tڕ@öRAئxl%5JdW4[g zV0BK7ˢ?$<6Rji~w|:~ؑyxҷ=(7K4\q(ɾJV}'Zw+!UCKuv󛎃FtW>2U/qr6uObÎ M~~m2;R!ȾZHl67hЊݶT4=27Ʊ,ڳk!%kd9%wuѪ|DFo 8ww˨Mnt'L3kF?-63"A}RTvGNp"w;~J$Ps$Gж"6C|"O~_=r;C!hc8ϝ;׉%B;-Fkgiz;RB Ӭ(\auw=}φJV([%g؈uIE=lsbpp2 N>1 IU wtQm'\xB D*iylrp[7>y4Zyݬ1kQ-i:,uVY0\{0 :I)Q&JqB|xMӓ^kY/by10|ؿ<_IO6`Z/_ÀTJ;|8ja!2 5G]U꜐p"ZLc!w:g"E8Eip}N+vP={(u QdGg/|~q~pw4Q[=EPS]G$L(eGZۊ|4ұ}$tz{ތ<ўt CqUHqtm}PO@o//u,+6}I*`,sb~D8;S ՜H3׹k6 @Ϟʹq:!-7!CzY^XD9tОR#ӱ2!mBcÜx3`kptz.KTA^[k >lZmZsSE'Y{63ZlYnXDm'ͺ!n~{8ǦSP(ehA^ϽXg"L-1Zjendstream endobj 62 0 obj << /Subtype /XML /Type /Metadata /Length 1167 >> stream 2024-01-27T15:39:18+01:00 2024-01-27T15:39:18+01:00 TeX Untitled endstream endobj 63 0 obj << /Type /ObjStm /Length 1406 /Filter /FlateDecode /N 60 /First 497 >> stream x͙mOHߧwZ{wR EwP 'n0i#؉jQQEޝ}:VSL֐R%c%YGNSl {K.&D )'IX8EMN!&gHs${+rU|Lҡ$% O'&@Xq)aje5`vRNy,M:VxbH (x!VA阴-1\#IۼIvX5i¨Ar eƣ&PZ5&p5@AXؐaXؑ- {Bp +2 Qgm{~LYFчџGǯNNXV~6_Of\>!Eb|Mzb-/qrR/ُo)E_]LKBW^?%ĶL4{Q #:H}H'+}.K%Q$w}- `WwPWm0GjL)pܦ<Βdԛ)Dr F β"f 7s88_\fegrxbus\sXDhPaz]Kc n ξɹA-fbqr'-)&94gߝ5$U\#pr;lK{p:r7($y[+fr.eyLeŶ(#R^PQH>Pe[U(>8eJylGVe=-DZ?EYه•MaؓzTFz`q& F9S-Fxh:]Gl+~r(Y&~jURpjUjjCٕ&~M6;xtlp~$lZƜ1/m܊PPt,Y11k/ Fna+˃: ="$y҇Cv9ȉ|pjCLel1O'{vt%YW'=zgtL>:6SD7~ynx\3GvK!zU _KcOPM)z7.!Ȋ7| q5a/n׾vCYVJD*6ok7שLnvn(m)N)*)T36%\þRcU[zk̯:2T9}GI W\|#-U5eRck|9s0>H ׽r,^(WG{'|(9ܤP}ÿCgV80/5endstream endobj 124 0 obj << /Filter /FlateDecode /Length 3394 >> stream x[Ks$ W̱'qa8Ď]ݩrFU,i)HdwW9M\{X&@l\ Gݞ/'2=]gϷ'+{%Ym/Orm勒+u^moO~ޮ71k[ݮYtl c>/㄰!t{6"vF K|a)"AI|A1|=F9 |A5+ [hͫ, 7!J9{qH֓Rv`+Zzh,X(%Ps(d4;cIHt7ؤtSۯ4}VsXZ D4Z:>>)3Bj+f,,U!ǰܱ7Mn 8L&WypLhĕYw]tqb{-QB{|dCPs{G {go' AA3@x>b:Q6'r҆Am6_)[݌ϳI\IkM72^JݸͪAN"wk-3ZiJVj%Lja]!@Djד~zNB :d0cp "* Sb*!~8>#^:n=:]PDjT-;%GIٴTClC0(\/I$rI8gj#uoMTO.ݤ0;#wTHh:gOԌñ rꥨP: \%@ W HȘV#`!H8@'Pd.)ad(c3Z޻6Mꃅ7') )cAMAKxP;0\We6K,SC,̌Jp*@8 -b)C3C6^?o*@"B58V{$`z_0_]^@[J*vOC~3HU:h(Z7oY/d, '{^ ^)D&T1\1ā.2|AR $F2^0xT)CC=C L_pf(<f@~ܲʆwyzTjg;*BI _WI j'@l3Tbl[oX#/g7lam E i0c(]x+vߎhCUhQYK׽n1z:,( ^ۡ yl3}΢>KhQ'z˩afmF) VbQeܹcvg|5n6ah;`'dZ;[uMU XeMf`U}te鋿vƎeaʎ"oZ2%֤lHsPZuvD4X@7m3{wiSrv)1ѨQT^# e%",g;g*O n'}$F=b`jlK#.`y>CW~ IVq9~Ƶ2FG1Fju- B5Ÿ|Nui>E#aD©RY%;K#䘡ս`ًLib, AyF5b2^QrrXEI$RU=qWh/!iृw': Nl{y#ñ!2>Ѫܰ#g]Hë&;HH&ltëUy5~ZtFbG Aj$.g KZ/𐅜#X|Ey0_qwttTƹ6)%-dZwA! p;헼!`i]>xp;HV'%#Ute(bBlՒ@b@ I$Gj9{~mjJ~@ /s5Ιbz(S ?301 &$@# R;~ "WAK~%$1&DM?Ec+2R$u:)xrB[bWZif}TolF  Se6le{߲7-{F)`6Ŗ_tmap>%MeB ˪糿d{/0dU4l-CXv]dIBiYSy9-PAs鵙am3(!Nn^gzДˮhHזe+#4>mt%@\CPC$)b;=^%ƭ6ܝL y5m쮕ӟa\sVYCdQHe> stream xViPTW~mc' I Lb16:C"Y5tB4M"aXQbq$q4h12Sskw^{w}WD9̢D"o/pݍ~,g|CxGׄĤ40yxDQ~1qR6j;@j'z^^6R^7VQՔ/ZKPndwʁz"ϊ.vgKJRi/zIeuuZɮrfw7n a}:]Ǝ¶gq- v̝-Oq+,Izm/zS6zRͣpg/_{l_m5N `˾",Z?}!ї_y!>oJNSB/YڻI#ѭ B-ioY$06ˆ~k0n#%ؙnCJ[W|4oW ~ kw`bcmђ`Z;DR*bЂl#&U}W0ާ1ceB t83PQiAyꢢ2u U4n:m=?Eɐ6sJ*cѳSb (z r!XnPTTefÏ&,Q};VщuJ}sqY V>KVD 2Bq-F0r{L`5 QO̦%ayz 슟r7WzOKWh4>wSgY'fɀIx ݈F93SGZzb~ : Ky?y;ȡN:GɥyObhGm=pm`SMD\lhC&/V-R 3h[CoWV) B^ɌԢ0-e#Qu~/[aUv"hjS^#1d3^)z[:%-Q)|%d2Ueʚ,KlxL7Fb8yk2E 5Dv =獝HfNj#5 4E\\IztB9XkY*@B^W22koey V7[M|C`\BXsNSa]s <8hKܐi7SD\'P6uF \_ps*|p/Y;d ALF\ dBsЖj M3%Hؠ ̅CKA |愴8 U*p@q;f֕Xj%ڒ Q.H~xԅDH&FSU^&eusdF2(͜w0oФɥc~}PWr>#;^׭:NwT\LoQ d'RZ?cSȽLG zmIȀϧoD}~gH֓kC'*VhL'7&A+wNI}?NM=>?#GixC |6Ͷɵmb'} ɹ lkV8W<5v&*A<5sLK|-ȉCʌ 5ɐRO5jAЁέj/$^2AL1P|.Ju‹#MEF"HHO]!kIˡ :#w"nݛ2p+ЖT<{E:2b]09:瀳#8;<\KIendstream endobj 126 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1710 >> stream xuUPeۭEm11e8EԈNh )r1 E.px7qp;~8p Pb-*1௎:5ib;~{.3݃v:|DxA2Y2!2zz/"(w.k~E/Z(ٯI۶`oª6Al!~E,#x"&"^ X"$ KpBLE5Rŏ&]*eR,Q0b7ic6#3 [~Bi K`c{ċ &:Wi@8.D=ֳ?U0ʣM?n\j}w`?|L3tI nG>?%bqO1#cB-CJ4h!q.[Na\=j'7=uCp`ξ T9%٪-9lB@:59V͇jAzS{W ˵ QJ`u| 7{:G'ddUd4M|&_ 0Cw SU<¸ nT:~ﲛ<}mwQu"5YiKioaX00^*XVDxYWStN/jЙeF]Lqil\Rq~1^E "ߥD94lCzq0"-X J!g9ҝjjMc1O7Y2ZY}>hl>衳(qܓ37zUN|D#8Lf j"84TFYCeAeÞ1yZp/EuC f(+lkezBd6b0׶B#[Ԛ %P C2 djًa1y`5+ r٫=ږ`p>_*l&w[Ii5&tBN&'4);jW3EG؆́{bHnY|d˱A^҃u>"^Ԁfb!x-&JJ5t! %8EI/l\_e=ƑXWp4z)QDt6kVʗ&U mK E]lZ/> stream xW TTW}eQh4OߋcGM8 8QTTPre % 2Y`U*YDBDB1AnQN'm GD#~^.֪}~L&c=dӯ]> 8P3ol- xgDިhuVmw Z:ݙ6e*ÌbV2f qc2f-1 YLe</f1Yd173¼PƅȌo80*YMIuW+;SRhc.tw<8'9[2̥sq` Υ l8&-+Ic#Tm 1vp]DR7}P_BS66 CxNZL[qp4+)*e~򞭓YH#GҥK6"C9p3 ,=PW2`HR;pߞ,2ߴ^;e'uje~jR p ;-?ݵWֳ8'?̾Ed8hg}C|c).enAE2|Ϣc!a6~t9NEVΘry|kS2rMaSpz DVd(fN,i8s Ëwsp\ZlW̨ɮHhw%u%=ld*q#;' G/d SqIIhd̀e0ՠ1K=>VE,Le"EV'h${eyTSgሗX>&o߷{x%  ȟ:Zp}p~JUB5HoS_558 5d2$>"aԁGE)JuF <8MKvΥKCy`a9q:,2a\82OD+V*'N|+q X*0I }.EqF Dm,pjKly^(J2}vCYuYieEzJVߔKy|dĥ!9N8KԦ v.L2tN+R-)!!fp!`N"ahgX8^t&׋F.cwLT^2q&iJ)Rֳ6W,RcU;%7 Tv'{mtWh c Z#J [cֳ64w>a׫%n, m}%.HWXo-%ڹ4D0lʝqH8i-VCwqPug`OhK5KM&\ÙW;zPǠ='%ZRDH 6^'`O϶,.mJ-4es.-ł tjij <߾ KRgTTU담bsKVwqq+K%3tkST? -Js&းP%~ʬՖ#SGgWڞ6*s4+[p`*-Ȝ׋^bZ](sd5q b?3 'z"Zs$ƌL*PNReUڷ˓,صf[2u1@Gx*)mdO@nxğ?y1+n!K7b@nX ;(o#MVbAi w8` u&VAmn R\5w{y!Ht*9K94$Rك w)::?TWk,F'|n"$7RmKQY 8e#aAou8kcG6}-pF5r˯fa\iHV:LiL =b 1U)KMt/kHdZaJ8频Gӓ ҳ`UWFN++LPKW'T|FY $C{h{fFmՏ;CF?}]/6YN~,zv%6>T !SS-/]N/Fhlgԡo%~kri=KӡJ{oSh=n﹑qN:0?Rt)eK5tߺ$V:uE5'KlqOzf #iQt>[B]6KɠO⒒cAm9^RԸӺh:o'@uO,2T߫ZY|@Cqn%ȷůh K+ t4se15*&F#˶ek> V[ ){a5s} ^UrISH52$esr?w[g­Gk&r//VQR/8vPI|}$X46\2nSuO\A\S;MY_sPx¡;)&.WZ]g>k 1ζ64c:J$d_R2Xi Q':_zH9C`fIҴZі_ U\cX:0,,pڳ4gD@UqaN?^yliRm@`m =CW ޣ߮O03ҧ@ t{#m'M˃';lr=%{BTl)Zx95H> stream xW TSW1{U@L"V+j넢qVbA( IvA|ř:uxVm]NZ}}^ "=|{; ƦP(sݖm*ҾWكlvR{pd!ӂ6M%Ȩs,Z8賍c:nu`|f,d0%RƋ,cܘJf.df3cƕd25a0<ӗg`b^ uab nӻQPڌ 6NeacӸa\yw?},a&"Ǝ%蒦U*0Cv$V yFs(E{[,T%ClGҍtnΒGr*RiK"Ujj٩UtIL<9?S˖Ϛj3r񙸎Eŝ ՅSR[r_vhy K&jYMLP#$$,8id{=g{?jpU<28 7\oqBL-2U_U8'?DzfExb3z @j8 }`A\'k,*샙G)`'~8{.~|N sj(lhVX*C4CxIUR\QUٞ>YQz$s; "È/E& قD6;Hb?oL%i8mF—_|y78~RD4H2TM$`֩_Pӱ܌hϯ?Qܥ p_] 'Wy`}$zp?ΣS>"j݊ oa+YT k'ց8NR~^ '¦\H9)g(aV5-gq AnbI07(j1/Zk,3bkͦ`,xWjn\.E 9Uu'V,4vOA^lߨR6^8ljoJ9 q¾Kc1H/oVS/fn#yڂb]`m%zS߃!ㄔ\$oĽ&U(ht w4n ,57+2&QO'K8@BCR>d?+_7]Nh6-VPf!Cp5:TPɖ#e$3l%v8,jeھsHbvr/ɺғ>/ٷ YDUhI |_%6k(H*?0k)>ZTu'*-[o~-t=N܈uʎL>Ͳy8 B!V"S}$A(lOJ 1e&mYayمɖ?,*>{5rgUA.B8H-0? 2lqH&reUaÁ=:PQezq=fHf"ytJD1Hv{4!5SzNk(*g4gd~_fHq0m;r;RA:~TaI>h:]aq4$N%:2V3qc:f"3`)>Fv#}m29Af]3Ezl*.T HXb *;lY*h pzc9+8ja%7G/|~{)j)ao%v#mH^#qp ɬi,Gweo(>6E\Rr/5{t<ׯ_o|d\_CtV&]NOCB2bUlDLfiN,v<ښsWZVɤvUYG0}d$†C<ćr=Q\xJm ?Z1J޵/6ee C%HFPQsTrx(%T\@GdaAʮ6 * }RBY N욟I?*e?:>aeGO-^{g\]ޤ8o|go$DS%y |L9ک hM=q<-Oܬuuq[il558t\V,[7T66XBRS_ͫW'7;iCyNڬpι[֓70п8sR~OVYE)>bApueTq(E)pBfBF$_[qe2eZZ\0qy}&j*'~3zq)J܀ qQ2Yya+wf?#~YqKXA0ݥkjC޺7]Zn2 ag9YDYJk3xtQi1u;R guiKwF*[կٯ}NKm=_]u)ǖ\>|ϼ\a Oo]7[T8ėJC#Qh0DU[ gqDԲMJ hꃷYItZIцq& J9CmM}G|TF;E%>|U/J# i?jGbJꊎ&TX2kugěƁ:GӬ&poV ߞ*iߢ4?_ ƔNIs>\\Zu9Fig%pߙl5Aɢ^oH1$ 4QٰWhR-;BX%*-z9rocGg]{@ é;͸ BiYgB?T" 8[-Ѳ;owACTx3s[ xv@4<?$CtAZ)9jJC~@L}Npd^Bⷉآyֲ/35f!&*/j ii,}6UL[Iu^^)'CZ1L'o~2vK_ -,1ɔ.:SQ[-ŠooYSpĬY= |(-a!f[D]_sͯsI8t" ?OR2k/sDǏOg s!<V| wk|^srcMM@m4yoԋKJ.G;':ձ7.y.i.b땍^NX|7d@HW%`Ɍd wfPQ[,D'oEPEi5In Y^tN| K'ܨUX/|ќf|]wv=N0|Iendstream endobj 129 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7525 >> stream xZ XS׶>1sh%={R:[:(ʬ2E d%aV֡U֡jNZ:w|$眽Z a1D6eaC)܉ᕪ)]:C')t+_qOtz蛌kޤ͓VM 4-xzȌЙa0'mn=x-Yqɦ6|QF`ߕз_ 2b08yY̼,a2K~2?`2+ f3̬f2C54}f:3 cf2lf$3eF31|Ƒ3o1<6tegӝȘ&it`:1LgƁ ``H:H:2iv;F'&ɗG;NjdGol׍h?}y :wt^J_ޘƍ7=߼Хw}Wo+ߖ]ux׀7;6ۗwý/Y{BSIqE|{z뵺WiWAM`UqQR4I*j'jң ԪSJeZΏ-ׂ5UÝz[Ur.KWĮd.Ydg%( AmzK$bҕ1Y<ړ$L 6E39b y'<)a)a&}P~;ӵw֞ r3 $^A%,_mL&" .qk/XSi\H\phZIrބ-'L~U.b؎G-[OoMٓC g w[m_^h.c/5ʏ? 뽱W4G1I&Z帐~̩+Q8jYjT/n/Ige.e ?r~= vñ6/|}zX \ֹn^\K$ ޹+u?KiܵA:=?voADRRO@%VOku/!CY^{'m_VkD.AZP#un.c,M-ǟ5{h2 Q\*AM+$%擕;B36⊝Qr(y(c@\J-ter2%&yծ<<2 ir 2fǗvWXğgI+2.\o} Sר/pDS{8g3Ԩe_y)O&0@y a+<#=}ŽLݷ0sL"X:UAbh~ax#[w[X`}h>7l{6pstK5`%*A饘&kUէ~}8[X?p&h2l p$p|gWfk ~`rQNBz`v;oI%KxW{YDRvLxX(s TlkBE棞D>ZĒFeߨ ]pjZ~|l$v%D)6k5'NKH?D%}_ Ύ\3cFm}:+v A<8쯨s[R oȾ![#O񭑕7 o{bǫ[\,Y$bTw}uTB5k[mzV$'+%NTzPD] @p({RT~؇SZ>)r!%3%"+t-^w)_djsv6ƸNf+ttݛg芮O x-#PzUdugÖkXa40 jH 4ICIS?JZuzn056kWjB6EUTUz(4I9tc)mw u޶;C:ɯN1kiy?c3R [nJV  {tXm#s2wxk>}JN}8]RnJ>* ۻc52߸XTV^s(C'<+X+s?\qOtppOq}AS6[Vs86bo cx22FlaԾް6ݣM xڶ-!'NxАSXӝEJ3c!­7V.-)m.$GIq;v:TVnݛ`E_D\kE&#mIy&o5oiΐilC;K[Eh+EiDxE 5[ُ-[tP 23$B>*Ou_ en_||tuVء;7Q۰[-H}h%B.ӽMUfVB(͏ͬN ͯ_u{Qf]"*эbe>iFڠ2r7bp/͂ynZ,,Qͱ\nkdewcɞJqgv'}/T[dIJE2`*#Q:#Gж-?sDW(\aX[xMI{hMYڅuP, l 54Kbc6bÖLI@iȒa< 4<,]3{O(ąTՙNoţuiyshwVC,T^Qn,.)_~Gڭ ;eem5ePFޤȄ^GKa퐾P bEڥƦR:g!#%Mt͗dm]Uge~UbY[g mb$ h>D3f-ތjw3&@@.˔dYQ#J@sL:-ޖFI-Au)s [}t,_~qn]_^ O?C+*KEf]z+CvUN@eW1Jіu[>#7VH1i8EЅ>Q-U5I% $6E%j~Ӧ ShWk6&X9*'6d#fS4ǼYoJf2@O˞C((溽@(-J'kT 0}Gh'pjiEaMc%=>6y#דtMfd$qnq6N ߒw9ޗa$y_'[G0I{R"Nc|H#c! -^o,^>ma l2 kvwK1/8A#Q;XiQc!! G@.@y|wJJ+ï5'Ub0~̏uUfEFNiA@ot"!.4!J {|µRE"$%q[s K <]Un  a >#Wy"6:rp&r=- wnOK;rzM̑E0zHlm>#GoP|&v0jUؼ})[=9|>wh 35aőE(*J!dWhLޝ2S<埔]? ׹+/l_!V@T]3a6jp oUAv T`X GbtOޱx.SEԖ*1}4JNݭ"⫂KMA sIU0K Ag˳iB.3щ'oY{ T٭%mڸDgt~󃣓*]oOܑXUEjʘS-Sj@6 :}v:eYT)[c4%'|'dQe5/2v]9I$`;Wi  Zq<>4Į2l搐͛B*+*=Y[$f̸fiӐ|3"HU<_2KS J(&6BL/O 簷l/iUGl bߔGF%K^|(Ք*a(ٳ^Y"#&G*tKGVڮ0QV( {8Vykh6{NZIBDz^Ӛf󿷫d՛LewbyISOH$->n{嗺We}ڨ- v)AҨȜ-NdWL'r:ַzbt+Tu.>55╿?Zo;O]soRק߼>?d+[^H_ ,:/kSkt9m BkD'|:&p@)jS4itq[8n{N@ x9E$2JTj W!%) ] m@듞cT'לf{ue7J0@*̛3|LF ˹|T0*_Zݗή7\/~8~mJ~p"3ke;\?ʏlt^AAg`%2k`VokD ւߙ:8 4m?+ꍅr|SZFV}/%w_JqCdY ~lnf)OMń9~W7\YֆT=tB.[n%&$&0t$99.68OiӤ&>-So4.;>S-)1Q%13p2Ly楍[)O:'wh ['pĒ:5/۰XH2+>+K!!|\-9zdR/¬B!.ho]xo[Zr1J KŎMl""JU@:VVJiPN7J`u3HHJ"Ⱦ[TdOjtރCH7 6M|~1hAŐ'<;ъʢmY k"k  :Cɫ%FO'x$M.苋Nޅ kjlPm6 +I8"FcB6)?E޸;; CGc}v=xK~16T*5h#?;AF2ыL%oXVU)iYDglRc=FhG*ƭsD*Aqx8DT1mC,"F(ZGO.q#qo}n RM(ec|~V~BFQܦ҂-4t\I$݁/X[Z^BVvW6d{[ r2^ P讒[*sₓl,b%.OE,-f$ &KdMal +Lݷ1T\ytl09m !|2l}.[yW]HN] &yXhAy Qvg+ƎW3P #e-03s-_g*DG*:T\jԐs4sZKz&' 떤7p+^zDG{ɭs*"@'h٭pDoY> stream xW{tSuOH),dQ^uAJyK(}>4mM;)I; !-AȮ ʈ8s=/V_JGwκ=''>>R&|%WxO~9O4CӾ|~ 1u0󑙹EŻs sJKsJr**r*$UE95GUTVU#ݕ:l/|Jx[e6^m-5o)7e引V(T4^ȧ&| [yN;—LHLL3H]1Izɖ)OB1M(soM˘QaNO|$7g. >=#j02tFi#lpvZlMS}aWx;)MY *b[oѻhӭZ @-goBsG[]-|{;yа.u "ЃhzK7o-Swv ƲdōkjZeSUBG{{G71Rv$˧nc;Ջ3_cW7K6ڮSɚwMqsY}ྖ>JS$5r1L4 j^+VcV\3. ㇁c8@Ů<rH)ʳWeؤ s} 'qz [bo:DζMJ j$PI{Ѿޒ7WR.% -RCKBhWW»!Xe2j~j%;PtHCq'>Z !%\Q2bwc0E-5fZe w()Vna>2 1t>zb[kq_ 򨀦a ]e,c T.hf&h6эk8:J=>{D?3M+G{2y&" dV1"0N/iA9 kSQD4-gTK@(~NDTNN eeoa,N2Ǹ9O^ ճ3W| z!CqllIJ]@Tm<kolcs(jjhr%d`RwU_NѴ*vp@ zl]VE|QNw{wtZho޹,@PW1FkYEdե 2 fUI9z]jW[Z7_]P V6Ʃ(,~Х+v+-; ׃Hؚ,\c2Lxr n^}$.<=XCQѳH5,@ڑB _њ*IYe US4ņZrrDNih.g~ݥd݋B{7^j\lįm/X~\O"QD^aU͝-Ƿ=‰Yܣ -O4=]L&\8%W??ey*6xi<{D7oAl8>"2S+ J( *AC5? ^jNj 3չ4ah44vr΢ AA32-1J FvH<>2r_"y^Rp/pޟjH 0l1$Q1Oc1 In ODkM&V[ œAZAGGQ[֠za'\gNtgO߿pO>-}fѿ/|As3ĚQpuGˊgq]x \NDE ;OA`nuȩN T[Ma,aT*:#{l*~]x)n:hn.4Jɞ? DFiMDTVuHA2~!ZD:8{LTWTTVHhW\<&v ~h&o~*R=,~w ˚3%ZV;Օb%mG&{]ƛý˛ג-F6b5X:4TKMX 5bQ9m #b#'LwA%. ?X~aVy}e>woe NM^Ak\kACǎ/u.8Y:=x 5nѠ2IF3Yg@\3(e aiM+uL)1e.?i B?4Y{ ݖL bqU74EdFQzws%Ĺa>U9MB|_0Oz%0uuT+9}^_]Ds>rxlj0zy1\e=[757oWwg x>w> ZtD+AXoVX;{2 C˪cY6Zc\V^ w9ґt#:[&LjQG:[]ƶ7n.A.ϒ( Vy;~@r1aU({}WyZh1vzRfHr ؕDxufڼ 켂/:ER]XDmcS#y'L iUB $l fߋfçp/y\On_^UUYh׻GZ [aզE]_ -&0 2 ɺv +bhLFB({o+O1.B\pi Y}TJqE^;/[KB_Z e8⣣B(=- :ȃą[A?V91Sr׍J}-E*G}hcN}7{u\$p2rO]{`킩8ŕs wy u~<-(~[;trJsɐ6<dendstream endobj 131 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 144 >> stream xcd`ab`dd v 544q$daaG~= Pnѻ?GMe?$ ^#@Tơ[CwO^o>9m]m|r\y8yyyEC)endstream endobj 132 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4586 >> stream xX XSW!D!:^[R:h\"ZWE b8]=ljZK%h[Vj[vΉbү|ܼs9s"a,-D"[sIaw+l,=ۡQ55,|%ȨMqoYu6?@!g̜/6N~i3<ϼżͼx0U'Yx1k:fYLc0/3K777f&b3Jƞ3Ɖ2c#0kf83gF2ƎM=c,xgbET* Znr:z7ern8 ?^i#0"rēF.5Ul7Nkgϴ?)WO>zh7_3AC ) 7*MA"m##*LuJff*$q P!VAsK,l8+X J'dJ3sA Yd5DoEC[穳GO3kͷBa!T79x X+bv> ;v7>u:va!ȕa4DB ^j͎AY{%ՠ*mGNRnH'h/IBɮ#'r솤ɇ/4v);2!;aU@_BxE*A-єp6߫ K/Ihn麇:"chkB^QM".q<, &֕5k(hwj@1p=Xw%;)c^pIǴCwaG B/21[| u;IG'YdK`j`:GV 1XhM&Qdӧ8l Z1"~*ie*li4bcha˔oΰO'9|0y LˌqaҴU0ñiI^*! Ԋ9I0O24ETlOشoK#;Ox*W+5o Tulًz=ֈ2}DM@:mEqCE4|7Qc MY'.)6Xiaa49R;fb N*V VO% tECF"K*ڳ-? $@iQQinWXPrEeU;UfVE ݫWzӚEmI]Mڌ]UԖ4ƵNY-& fo*IILdWxRYWhk IjM$HߩLH.M)ͩ4o~VVp^Y 6N̐~ISjxfMc#+?(c/?e] ՔLKogq'U:8}-b-%}f$ V~)5&aChUpEhc>.}rȴ~ p(JOu=n{vmf|"dc&<Ԗ55э55$lУi9?N}G?h&DzAAB'ЩOe\zop]F,eV:ifjd>8ݔ}f4N&5ʯFẊ DٰC'JԽ桂FZdf5G޾frާaFgTԂi}ˬ0xEC Hb=olƔ mmEcI֞BhGin}=sw<Xy}tՋ[h*7H6^S"#6K]T6+ܵ#Whil/ Ul#jG HJ(ě\yb~~aTsuqՊheP;|u 7skލsh?GUG|)>ݭ;S+؆N&9{ A|c /ٙB+g3ϒC;lթ r @NЗ͸Bur7]gy,ҡ7Ji},>~#]'}~̻^+|_= g[o/z]y eo/3LCw_w?VŭK;{ۻg\_1\s;8뻇:j1P =p1̜nlz11/`JF6i &vrוÍQbCPZ5dAV^"q=\k5v܀vRaC)BB#ZԷ>tў]=֝?{qYWז]tKG$>p+1'JO剂]EP5 ??|R? :5@g2~/')*k>*_ I\TE^m]IgW.XEMF,Y@XzZcv2znH</ihCIڠDd*_NAH~avZ9'2KotP1ߖ.̃2(:"l$ ?:}:hސ_]df=Z9p7\] "36];#.DRFfQŞGD2YI%^[߄)?FcXD]@ /I',u~=&"TU$5⍶Rd9)*.dԢ Oɩڂ (-L3B U2)6)^;sO vrFR&:eCAG]QGASA[qS'~螌 doR76(#-X2YHA>̔iq] b`SwgA,,Kh#m}wKTh&E[Afg*' ꣛O82l6Pt9XǗ0R2@f D7L22?ߧYWGbA,''h URĿ8 '\"H&đe|0`c0endstream endobj 133 0 obj << /Filter /FlateDecode /Length 5997 >> stream x=ێ$ULMN~+ҟOWluኇW _9 Z~jw};|_gY_]]}zZ4+eZd=o*.|zu?%k#v1]x{eZV:Ǜ|do+>>Ew{m][+A1!oo_k{u+4>;Vgr\EZ5KҷBjf@VBq>>}u5]oS@\c=.02;Im&yXo ƕT}O1q 3 ŇRP9PKw DiKiZBՆVk"1X<5@7ZAT|sqxDyY(w^lH)[ 4= u۴P#A]_mh5"+>͛dӠ㗪eL6_pp30nv& Ñ2'`J 1A w @懒pZ.zZ}Y 0,i: 1`g&~s* ShV'DAj '"˸0@JmcϬ+9(9P PNF"Gcj6ؗ)2-}/X;9P }Hŕ_-]# )\}@tVK_,9I^w (I.:hwhD.SΝVn˒:2hkr 0>/M7F :VWUY}䯮hFҴNHAOme:y>2 y,:?ɷPqY z!@ں/~Rr/@?9P D%$![Kd0'ї!&"D-QSz[BCxA:tvwdˍYmE/.I``NQmS*4LX[1p?_ Cl՚SEh֮ak C +tO.Cق 0*@հ*5̬5rőbO\NbAL}PܵRm+|ʶA"0 Qf~M\H̲m.$( y  Xf"~.zY4 >*msiNi~ yg[1`y <@|w,[ @4UdV"lM"& `HQP"G-LVR\Y`L铜GV!4Myj0^_!Rѝx(WL-UJ^X[a[<\įiW2[-AX9% ;UE)@$%edZ\R#z2X,5da!D@@@2G,XT>&@æ?u=).Y /^k?/-HM`)X (@lYh!/2pG,f0@)0 !0aîb3*6Wl ŸfЕڌDVKzaRy 0SIXG9jdq Q.2}&+z!""%D}·Zf&D& 032rAF`hh KN`b(4vPBZ )W( 0]^l(P8PHvB1B ԥ,WTmnPNp'\jc!Ae|D2p`/!m# "1Hljicо%^k`7 rfU*BG#rF# _h'S@`67f+X"CO 42}M2Mؠ"=a2C[ӢXe lo>/Mbc4,OؠgS<͸*]O45.)n"LoAR_y QS3űs=Q%@"U!nȃSD~#{;"Jސ>ݺ|_ՅK]ܧb4e" 9CWTäO!FzJa`Ӊ_Ezb8ve͡d8(7`#q~NV8~4`E655 *֬&:n;%s6|Ho6EazENo8o8Xq:GU!A+0@ypH; nF3ٯ̔mK}}ݑNx$ NL|IN18){A] 2 dBTx!tpSv+silWp8llpƵ\xioq<2h "GB9KX035; 7xL64ԶNN/Oۗ}?_U5!C+&%Uk|Q@Mj#|:C9WCPFi ei?GmPe Ңu$D1 6@ya7BSnA9X<@f0u>¾/IU]вǩl:,P.pI% )2(3)A܈\&7l,H 80krw׭$cň3i\CֿFgaLyY*R"ʔ((c2|n8R$ tRᩧAk%$#'Y#cА8з4v4JyF|T ZΊ#@'O"YP( !9W.K2P9C)dxlJ3 D"0[~T#6ASsel}NO/kkxcrRY:/PYy$lT^9,DZ O+T`wX-N\2٥,]lgzhҕ*p@@%9n򎧮(sS%f(k% ZNu˂=t]恨 K׮,AcY3[_=Iۀ2.8RFVvװdAUp+E3R3QB Bj2WJ/(c/ntr8C,[}ōþJqHa-27P=C@!j"%z>i4혏{,~cأB\w^!Vg!aej)oYCq1ޚ8Á>arl N&tapRE>|wb epxYը'4 n: ãm9 !7: ':'Ϫ:N+7W(h?k n:|#7tZ6RꢓmWS'h_e9iYQU& ,YoYBmnVeZ;ҾpJKrQ:W:MJuuaz!;?7C{]{MCt_p9JQT' I=|+ BԬJ3S\=zUt@|i8.LXk ]ur8g9C&LP * QK*A@=y:uo9j6΅%r r>st%#AOu<7y0R~O fLLcX89wV$uka!O5ѽ u$dvYJVS?CXذZ@7}K7ήf'~3|f(m!#s }tT7Kxb;Jӣ@lrhF89aE|\ĝphAt<_vBʝS-6j Ԯdo:xt#2ݻEX@KaKSCҶ~uEn4XG t /]6Uʃ߂~p$rVFF@js܂pNshZo}5۝aX9 })z HެgJKAM4xOE8Q~3Y>bUdglvg?4FB1tj|J^>"S=ӭj$_7?&.E,q? [@+Կuwp5V먳YeLxzȓNrlyt9J %u+z*(ՊK0Y3ЙH~We`ŜW2&?=QC?1?9ރ!~A"#v. ediPHܹ)$=3>^7Oz9dw=tR ~ٖ5Uer=s+Pjѭm+ǧ\R3&hš)a@N^۽*%Ɯ9JєBTi 3UFIg*(!qr FMMvF0伲dv847}^pu#xXCh#PXt\=\*EJ(T\1ym\I<:b79˜|2^Du6.>NMKU"z:8H5"?#[p߈s^5*E# !ymOw7Ѳo2l]ooX=NlDUc񏇥w/_\n /?o/_NW{ngo5=uG<*g<=Gfw/ IqwprH;ώtendstream endobj 134 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4838 >> stream xX TWNϪQ_֥NvWjZ@T.HX5$dIHՂX]Z[;VLm2KM37Lvfp{gyr9#Gp\7[0?-=chؑ?n A+{S앢ՙkB=g}\ÒR6FMZKf0Ù sfr"8r6qfq"99[8s9[9sVqVsqvpp^r^,,llY̙p&qrFqq9sY9#9nn$yij#n60JІKգE")O;fQcO=Nx*t߆L2qĻI&}6S"|t_ ^{(H. 'I!_Z:mŦ2.W8xTϸb(U2b/Բ7`_q jm1O@ts7ưc$ Tq*]DLc/\MbiHEf rGEc\H#>ޱdgX~1< ɐ_PZBncN6Wǽ@x@$1=U1j2K4>ʮ|&|~e+`gfSh6ɚX 1 ⵼Mu k>Ƙ؜iah@cry^.zҿL+ՙ3V r+AƳil+ey&"Q=lrPm2vxeY4|=Zp.EG@|EY4]UiFV/zaVKى^%aQm0=h˜erldVR,sa})As?*}e?*+3KgmJdLjޠٱۀ߶t kz?Tb-b*)t"P$JE9虥d{?[ 8 PHV#nLu@|F4;9\Ć<ڱiMܫ@dLT/Q2W pV%dIiSVN X;:< T"@h`%&L@ZnZfO-*Pf v)OxT@.@-SaD4Ѫ0b6}ϿI: :. ~ )م] j ÿlg*"V4!nWgPeeR",b)1@h<qdj}fJءBWZ'kQ\& PPlW(JPj(v`0¿!wNgv%tu-. @|Fh$EF({Oy-y{Nt7lnL^߹^|r, yg׵ @9l!2_a<)&O0慳4<7M7` G^RВuhK K[Z?LYj$NƾQ L_QJZ _aY䙒D*+4k%DDonkn#Q9jβ>̠!l&GEJ"7 sI{|mAl9aU< ѰnχhPƯ;Tkݴ"JISWl R3z0no'\Ňwp#ҺC'|/mWcl2U͐zUM{B CljYPm\Dw}TQkUB%QKgΛ|/77:K3nߟlٵ_qa?}F[.NWKAS-wNJU술uayҍxUPZ r7Ɗ:p8;;/%Hn.t 3Dg~9s0ZBYS^e748Q]'Ƶ+&d$AS5\|G/lnɓ(C}!" -y~nwN;+p|䘩u՚uFX:(ˡ 55 S{O`Be:&NNlnEoKH%7S 1y{~ubUAp끤crӪUIevah~\y͛h?}YMӉ#}WmEJb?=Biѿo"\BĤ+Qh{R/=(̩OrQ92%Ac6]llRr?d R iMr2+c6D%cM8odR!Tu \V^E%A"4Cnd;\*S:a#0JJW}$L SI'Nn>8\ ;jZ*ZpaCYroiX,nWkwWwcbP>҂9QaTF[Ѱ7k?8)Q)ܿMVXm,ސ?N͵p2v̬/8E'2TVh֠"uԴ QY}o:pvV Z\vV| Pԁj t"RT{15,KvKZIИ-&GNZ-Vs?T e:[Ν);j*Yn5h +-9U uCunDgh׹y` Neօoo>uUz0Z5HH*U^7ժhr) ( Ȭ7sgM@tRlMb$a=b1yDU\?JȺD 7b/z+PD x|oJ/|P kzvXLdnΨcC`$*Y䭽翾  DIdvsّhԹCG$;e` \lFOPe#dձ8Prs#w\G;yѺ/Y/UW870=LfȔELߑzbOlorZr %Je/9qī|gѸ" WtS=i]?Z%4Cwn v2>aHzS Je@2rI3w%\Ή^rم[y#@TV)3w'T PnEׄ RWؕ S6(I4ixSDiWH#? ; B812]=DQ*u~Md0W\cG'`J .endstream endobj 135 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1330 >> stream x=}LSWZh̽ĩ1&~0D17P2TZ^(ʇ@  ֪8sQ&Dũ`[Mv?Iyϡ)EӴ:!^lpdW@*6NwNt{(ti2Xҥ_7"EyS>?5rޣާPG)WK)%uNw:8e:=+ā1Vf+vYY=1"/ E"M[LK01<JF*y Uln.jӅ34t:n&7Ht?Xmo3߷Nj,2;#[ T>NYM堃R:;d /jc_^8⇁$D_ &[0ZD8+M>xE_"DML?w^fsO=G*|E! ]dAQPSpX#Ab\Vs 3DT23֚ Nk.R)5LӰ 6!ĉ0 LzI&TsENJJzog_ e!ͱ45,o*pL:Iw0܀P(@ ;^%葂 B [V+3Jf(-*-ln sJ%Y$UfkNY3Ѩ/>\Ѩ+ml~kx$NV|/ 5xi[xw|>Jwo- j8ûDN }mPepj[} }|qaGH0q!> stream x}LSW-fvUM222iMØ|LbRR>ZZPڷ2hkiF TL%k8aˌ-bs˙fWXߝ}~KbA$mwN&œ㶅x D Ł5;ѪgQBo BKʃ:PY~XUaTT:!Z/5^cRUV)uAHhKDJڦtH%ćDql!b@Llv{q&ߒΟ$f́/wܮNp nӡqwkh%= 4Aݺ"eUVzk?R:̾?]ڇ{M!N0lg Z p[q #*ݴyy$pW!&omᅜVEQjBlf3ke#` } JXP,Gd"(eKC>^#]0+x7m"/%Çi8ڒ*r8 P lB,5 hPvB5zm}B{S? 0̠ROh#W1{rx./ KY(xG,:?y-C(Diu/!âNOmſ% HHPOEpcR{,;D. 降hæ׫a/(9#*հ' YsDRc9'8) 7PR> <;]2G]ƬS>O~7.MW?scAkao6!?ݜ+m, S*BɁA ?N f$(Ci,L?LB-§DX/~e/{Q?g,' f`bzSF܏NrStUƊb!)iL+p4ٛ3`~Zi7^[t.l*OWd`e]/ܽ44~IVa.ü"B$)~"}^7YmY:6$/$Y) qCzc 4^~;|BlPAS @cendstream endobj 137 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1350 >> stream xkLSwϡ;@A 6=\[Lɸ &'E K[* -i{zZ +"N' 1i%,FΘz}X_OOyq0y֯xՓmL'ls힟~vf. IUxo ðf-[翆aM[a7`)m]isZ/\'^zQUGTO4msP9Z%}Qu̡X铗 J˷VRJQ 6nm -4ViԪ xu՞ dW(uSѓ@|}dKkI%P"l\Dwx_<Ā]z@צF}{-,W/4^aH8b 9!z 8_MQv%u8u6V*!h``@W;(dQAy%Cu@shZr5ʪt#P?+٥*?z'Sc@3Z/6jL`?xh@օT˶r dmXD) {ula6?6xn𛺔A{#7,% tq9Stl&\A[(vB}Q}G%ur敎֝o$kSP*m>\aW1 A1{dYP̨E;Q \&]-rJceQ",'^rlF06hTHA3IW ,f,o^}6vP V+/*d4}ٕQ ^#ٕz yafBH8wCKPzOG#0V(42{rfkׯ?u9RГJV2'$nNONN׷R/%8'J%J2L**jdmLR ks:: p[l=vr 8ƋM~IOz;j Őנ6(z4x1Dp6pR~`p;]<7l&.t6l\aUb}ؕ ߅7>dT]yLg eendstream endobj 138 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1137 >> stream xUmLSg媥*K'KlΘE2q4s KR-ԾXz{h1cK\p肋F34ifb4D͌ -벋Sy^{yxΠxYfن9aQDzzI2nLnɘm~"VO&|%KVj&w=gw"]կ 5fg4߾u:w{FwoTzz}]4V i&LVڽ9 o|OI \C[եt G 5GRvz$9£|C2Rv%K@mW6W kPGoE +<ĥ,Ŏ0[K ɶ\_RJNQu;7jsXdX]c]O- hMHA]N&:L2V% Q1Оu)ڮ4<ќj}L+e-|EքK L+G¢۷OXemnV}*R+@-OStIWN;Sak+(fsG' _~"_޼]Zz]4Btv!͟o+W8tuY>*g.<+YP] h/ k]=y{#h&"GT:TH&< ( _bP-ׄXα=/vYDz-G3ԏs7YAI86càԂ$kk(RƙMڙMRBflrrq3ʂY2IϢ$3dxG;WPȬ]T {j@Lm[CNg? 8 4{{nm[qD Xt, ayuڥCbUH#";g^іWaO!`eMv:֭XgP*ꀊh NElqHE"n)(Yu HX ƣɨ'郣s^pƮYN*,h (r¯cFN4[vƁi<&) c~endstream endobj 139 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 198 >> stream xcd`ab`ddds 8ew7'e2000100,clg`fddO~J3^.:{aE.Uv,-}˖? 3}< 3'uOX0?!].S;6vvuO4GJ endstream endobj 140 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 304 >> stream x%CMMI12`Jt  psi `A-}Ȕj2|9*8lk\/f%⳰kuz_U"ut+f儃lkl{Oo<ڋ謯䋾msuswpWgwAKC7->m 7 ~~oendstream endobj 141 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6028 >> stream xXwt^!/X@v PCL16ɖ,YUw%Ƚ`:[ $@ƒyIGr.9w%R~'YX;7,hߎxA\jըBӑL}M|Nay7ثKKW3#>aV9som_$ޱsI]ˢGؽrUظFKǼ3hؤ7L2oia[o1 B2Mb91XA $VUjb XG$ab61A%FQD81O!cE8b1@l"=-DO&z>ўKx`":D01xJt&]DW1cof+woߵ}`HZ;LpD#(q?uש9`k3^Yehsי]v v{(3m:~+ *&O>{ߙ}oOD'O|}u}gM&+̕on27VklR`/ڃv=KJZP>!5`_^Mǒ0A)I15/;Ac^ˢ>\ھy$ǐ "$AB{(q/nz焘jl#U$ ewp= o7oִ] 0Ly`fzv\ 6Xh TPGD,ž(w 6\sFOS CP0XD}#;N0A278ja%[.StyrC| k0#WlN%U`yVL~%X0 Q@T*/ր`?ګ?Ч:{x,D=&bbp'IB;\ WҨ-"a+BǠ;GwYN&Rdh[ssYɞ3 I_˕Ӈ%l4$|W8M-pw~V:}J .SpC4ET1PQ.uQ?6`g |MiLVˠ^d*R*G.a׍no> #^(ǒޜPdkSqZI]s6M :_/F[oo=u@UL!ዤR`7Uh`a_R@Ik?2zءbP_@߆_#c<@|ٖf\87,_$8MY7nϙ ҂w /Kq@X͒TMVRb41'vni~>y0&)O0O_Q yK+3:bQ 6~SUI` rKRR\ s_e<8d]@i3%&s`?ȝHʄ%//y MV&,@- 0G [Bu{Ii UfJ6\ZW55ךj͵+!8vz$6a Qryy2GEƣVKx5yݳD ,e@4 C>sN%:G[Q\!E\bXE'pڙ3o$Z(&ʗnW8! ]-<4a).+4SnnyiDMש9 `j+|j~)IlT} JoRSݚ̨9рZU ٗD#/Q>Vȁ +^~>z Ac[قD>]..IZEctCSĝՄgjM&.+C 8e`zT}OVp'V>G|/l' Ͽ:cq*$zvbݼ;7]NN('n^}g5F9@' E:lCQ[y{]gy}O.A?hžVf*Ȉ-R[ [=z^T>l(|p }(.&0mpՖjIDc3Kw-BQ5ީcN)OԀ+[N]qj4*.g#+8gދ e8ix7w9YwO;Q?~bn&2l7Rp*2LýP.S:TQ;>a e3As#$DS]"^L'42KskLr:dL޺C&gXgI+U~CEn ajiؕc(,^F2y&,ܱhԁlĭpgͭ<.Au ƹ/(y***5Y^rP޹RY(ak"9ٕcU\S,kZMMrΏԀwFrԬ(EK%HY6Rsn,}pǰ lcgϴ۳yKx =x%CGZ \Խ{`΢;{2X__\so".*f`ȮdUFɱS%qo*`p(L^J"/]D}]lK-lU>odl$ID=C> PdV HU:lULޠv Z Xֻ?>t|B]^y 44>MZ&Ra勁dQN\w\īN5u*TiET (WH!A=k }ӺmWrp`c>7;GWbS\iuUUu @hw #xՇOЀ _ 9Ta?m"Lah ߉wL(Kck_gGHjYcQ0a0+Y aԊd4h̔9< "6Fd cJCmo8@\8s g*kAw }B.}ɽQy!^jt KF!T XpfW/ uX=tWH zy \T/Ao) 57ϫpPv]Bso ^uբ+#Wn!W0b A2؝I,r1>*@-Divcs\\ n!u듛{iW /I.(qUT(mj3Sqs⋘Pи \%hDRW}{d1(JC>"MZ?E.xOɧL\+ : S{@$4NJ8F4>;BUHj34RD#ݮ܅RLyYzP~=`i2!溼-#e0K ' <`Ehh[ܼCx"_Wl;cꠌ1YnTu#u#X=ɮ=yAK1?,TS-/ 4du u6MmzZ'Ogw%iwk:-/f'\`8`YN L SA.gDHvJ 2} ň_&V\'Gc#!sYs=V2lmǤlT"~#X=|zXVuFoo8̷igLU*v Jh0b4Ns%MLJ,K؀7\Ђ(8|xsz!*ΪPotA+hAbC410F]]0 le\cx[3hn2vy|"l`1XUX")NOR.S0.L$e :Vr(Xb6W9Ԓ*^ua6|H?hn<7בO޽;.2(itf`c3W%j2ԌVҢCBIC)8&h저Num%o`T x)M^Rm*32C +]2N-+#0O~vBʉ}ʖ,[zCгPs~]<;bK3,y&c^F(vd9 mVAD^;u=ZN?QTxi7Wl9>wx &m}w1ЊҪϚ851m%_e{(Emnm"y')jxE8=ub{YI5"\Yڶx׸x>g/lEjh(g "ߗlXAZ d4 d2E-ǪbaEJi6{./hTDӽb1O_M{E?+ٍ:^S)[3u*B{cZ0;ZRMǦq48Lϑ)m R&(V szS[w]\2oD"YJ1uI V)gPYXmg'Uk=N.Zrr_Ne 9H*̢+.bv(ӢJQ(֚zZy?<38*ξaX GHp שbM&΁9Cocp🰒T0To AX,-ISgkd4D9#PJZa]7|T4gb2<ʇ9OӚd@23Ex%,,sE\e׍{4])* tz`g`sfNQcs*r+#ZOWr6 ;#7>*yLPfɊCP\Y6&U,ȕi ?긞4BWm(/\2ȁnw\/+ƎBL w endstream endobj 142 0 obj << /Filter /FlateDecode /Length 21114 >> stream x]'yy߷ 0riJ t!p8jå89OdDtp(Ȇ` N?3#d>x:W~߽)G}!|=]1Tq՛7gP~߼ozt)!)Ƈ+Tz?oߤ#~?SO'SdM_֧(S2'1OS")Sr鸦D̉J>sN$#>;_^|+`\3Nʜt9)T*+8g) mN$e.֐SsJ+>2'2kSsJD59ŧc. W/%<2X#*eJC0o܉\oW˾[lŇQ#])O u_p󺎧ҍpӔJ"MxL)`Z鯐)e;z)=M׻ͬ Y]iOP J\YH?_od Ey!G?[ӦǮ0&hoϧГD</߽bvc%Oxrji5Q҈p =ҧEDC4=J%O 'wש ݱgm nhB/(xn҅=IjdzL $ДE"Gb~Q>ˋ Aw'Je}S^. 4U 7ZiRzx"MvGiz&}gݻoM7`/[m즣H뀯a9'A"_ J+P{ï8ut# m&Ii[ʽwM0Ro]-p m"4J"5 {D . WRQ6\le9Dzkl.B3uC=#J!iДe$T=Lw |z33 *)֨_x*1#^_˥FK͆#6HsW"^(~5{]fF{eAD3RLޅɜD5_sR̕nq3] }:)6'AvWslQ~1#Z&S)ޏoLmy_C-Rzfنm dg*c8j{2,n cC=-|a72Mvz25H69sZ=3XlQJw rDk1nнp $jSkW(zq*7t]gFm1,x kPLf# Wi`¨)4Q_`6S?ͽ4j)9^O-Q#? FVkP@1DK`B͋JܮNv$3aI61\uF"[̄ {֭cfιk;Ņ)چ+&hg%2SV;zf ٺu@j:L#&30in69-$h($yzv Czz ֝s%h^:ݍcV:}61?n|eppsrzI[J6/Ӎ+>|{ݻMn/j3Ԩu$>:l9֥A/ݺs;JXŽ1l͚>j]\rVW]_'ks(`$K9%NK9qNЅigt4bJG)VtØJ0wݺa|D4^4PHŁJ%],K]D0?;MLy)qfjJR{c$^nɆ0C 4jєfn11/\lDq{M+ta 1DWqup?rUmϲp.+:.( ? {5떲f :0@y$(cdeWS-]iO4]+Af:u`W*bn ={veӫi޽7a~Jņ8؍ pF[ԴI!ZDWzL'LtwN[t`~wa Tl|tDئUmW9:e,А)H&6ӈy/3V(a%wBo_lS+|5.|t5o_geE> MJ6OG)ѕF֕l>ؚbk0}r ^?+je,20Ѿi`#FLizKaTrxs,|f.BX~aBBKA\p ;9$iz!{'/].g Z+JcN Kq2;)Уu 7MuUkb{y>$agJ9"jxix׸O2'V%y8`=qiչ+ŬtMmqTUᴎ6~/|D71K'bg p`5SIR[ЛظvraY: -'-M90~ DSotCt3nG X-Zܽ`gڲ,_,\H=H5^jXA wIIR`i1tc..b|{{.&T뉻p 9|4YX0ĖtVLVƵBRu&'=Uļi0cT2,Jw =6rIy bWx `<9YrWRm;{=XEJ2/a@TVpkH׉9F|c"5Jl%{/4b9.A>[jXglhߨ }J9۞AWe%U^m!@%rA[imbUUalu`?$IΉ9خvX OR[ms%,hoX|کL$XK4@b+̄U{k]NPeltek)ť$EtNMF{7R0^T]`D^>p8~:hIl{=T%:H<|Ai&Y^F{W6UM n܅Ed'/,b")EkX-IOw<.NL֚pa8y\X҄fYkn-JaJ#J4+J[fJiIY@YL/MJn;nR4)uՀl(^B/ FږҪMhfOBdZlKqQ%iy9[];6/ށ-MټK FۢK;'UIZ5 4ܢ^y s qRY% -ztlFj*H-H&o 6nuʚ6#R7#znFj6qKS6#vlFj6kls1Rq/JXԕ) %iN].JZFfuFJZB]8¹+SE)H] ^iұjCEOkm$_OV#u>剹H\]Eq5MRV/,5MmEɫV/!hyˢz<楳;g"V/aimiliVsV/9\yd(͢Kg̛6/%ov:#<Μ6nw=`lR6;섈kKS0InưNgƺ:Nב7;]]`Z)ayMsŢN?fYj[vm^yV׌ks&U0[݄ZXmߒQʹCj'k7;]XvXӴcǭ9oyfeIc3*iV;.ǚՔE6;b-̀g=yUVNX-Y`-MIIj'Kҋg-.JZr`UmE)B=g(qsV7vln:WnۢM{guږ{f67E-Ul{qMӽ)vn5M¸)v\kh?-J1+;ڶ4uESB}޵tʹ6۪O6?a#zSlv8cQݳ_k¤%m~Š'w~:ڹ sSb̢D[ݞ 쫟RRmYYW%o~JqAVҵ)a/J O)-I`U_Cb[7?!Ru6V-M^%6?TW%m~B4|6?asZt~WO)k6?!v?~Bйɛ)k6nv0*$.oOJ]݄0MYcsSFiո e[ [, JN]ӜqsS9V{λUd@& nBawA4&IkxgK67tmn*)mn*KܡsSqSu6\ R槂3qUºݕ`Opz\[#gV?Tϸ=kO4XT3 '+-M&^j̛pmSM׺ݕ"pd >vTU([3v*̳.EWfʃ:7;UD-i%ªNWh 1eKvm;pW;nN8)کq yQffue+\@jN:ZssSyV'ת)qsS+[Yǰ&asSywV7vmKM83)"kY^JZԕ)",i)kWV4W\k+epnJXԕm+78AqJZԕy絺)"l{p\%x>ߴ?ZNxߴ2`,NkQ l91p>b-Y9-^z[\Ėӏbuz>El ĖHlcF7%02.Yt`KkE[p.؂Ƃ?O̖dd]nɈ*tQ{!%7.^j!%T++rKF`[2Dno'cEnv6Pc%#%.rKW ܒ1+R,l40cd.f#!pKFے1mb&nKԊ.nKWNؗ9аd%#털-&̧q[>n} r[/nMܖp>:ܖ9. +n}/bC¶d%c^Mcҭ¶d٭ ےq؞v%<>5a[T:Ķd,5+<+l}sa[2|m `l"DmXRa&j}k=+jKF*=+jK>x$yP[2_ٳڒӁd,ѳd;Tڽuj L85}Ϡ`  mĂuhE"*`E8|43[`;aQ[sf "iWg"$/"f֘lA7 22^GċӴlB*M0NhZg#[Оقgقxkl1=ud Itd[Fق5m6lw@b Y'Y' 'zدµEAd 6k}~K:UL ׂRop-xxZcN2`^uZ x]Z/kւ=,kN*JkAcg}ڀA5k$~Hle)Ckl}`XˈՂCS(V UY6X-8{u:b3*V8X-X(X-X:YZa6\bKS|:Z σՂ^b&j)>Y-0ūbj){bR|:ZlaYVEZ2xZ0rP-4\ bkSP-XbZ6e[Tj)˨Zr!+xZtZ2CoRM*$TBR FF RME*$Pj3H-&fTT,NU^G| W>H-Yj@2`.@:VOZlq'P}e IP ֦hZNn;\ Ԓ bkS@-6\ Ԃ/#Nf.i)˦sZlmP!kS1-zҶiH[ei)˧cZ03mӂ)1-6etL*ÁiT@JJ0 y"eh֥x !Zv ZlaGDK1B`]M-,, bR|6!ZNx!Z,E:%ofkNH-XRbR|>Zlažw ZQiTB@ˀ@L5*`eZ2E:VxZle#@ V!YawYla*Q!VxYle(> Vػ9tYlqK|Pς)vnv$7ŖMYle'> Pb ̥,0ŧ@:Oɫ2`b+S|xbu7{Hr xY220,_,:1Y.0pۖ7\ܒ,U_9?,8i#, o: @":˥,~>禳0y$,9T.DgA~B< >]c!,bOn<˩o,ز :K8}7%(iY#!IclM:z`Y<,XfIkll̂aoL. V-FbpY2\L²`PXNٰ,KմcY@(|aY]b)IJQyaY*(aYjU`[%%,MS²ah˂Xʈ˂pdeJ9 ݱ,`۱,P)aY*O=X(,CDz@a:Ţ2)²@푰,jDz.8Rօeb_X~'[T:kNeAfʂbTSo* j-SYYy,d&'9I]!;*$9lʂ`6d>, & +q& J-3YPHlɂ+ɂd4M@T LT+)g8Gqd,3YQ3YwdLd$$ <ߤ1VNd"Ȃ,diV8wY87;cYn KRǒלX%VdXc%.|<!,XxcXN?1:`,8EcQrD6mƂ) cA$Z8&ʼ`,)ʂ`vxeX,, %{$`Xho. XppG0ŒbŒJ|Xp'tbi>X,w#g F) Y,3!7ibAL4bdB,eY]G,uX0ubBG,Ґł5Y,8kbަ4d`*vKb'6*d ,xaXl.do:K(f;sC@vK䉂łseA (D̚P,6慅bAL!_X,ʘO, B łC<(($bAF(W rnwK5  3xTQ,\XPĂJNb J$DX @H,7Q|@' ׹+$ H,XID$ B @,!ivXFo;w֞XubAh# |Ăk]$؝Ă8^"SNbb,b =@,A2\MĂR DYay)` sX.zA,XWG />,v " Ve Xp@XPfi_r6#b8;\QS;6HF w hX . yqX&Df!|Â7rz`S(qX8v8,=-Âv&.)\7_;m1Ⱌ|9,h=EY A,ؓa9/b(b&‚^cN SX Jֆ,82F>+ D4Wp\B rARgh+b*9 Px$+l!k 2CsI ,br\^`GWpZ,X"[!K6`Nj`IATX(6?DĦ_U_I*lWRiX0! cE[XR:,JBXTp-P , gw9%x/B!X0 %x`I>0=cb)mF$Lh"XNXs&mԉd0XMCnŶvK!q: Ub]1X,"XRBK„୉`IX Rv$ ,v$[2XLYf[$Չ;- 1, 86\°$aIXf.JQLUu\5H p%>zl'!NsgQWSHsp#KWMaI%DQ TSbIJc]ֽ’GObԢĒʘ܊ĒʹX7GE"$<).KBn8%C".:i"$EkIbIXc^$;QI,ɪ&%E6zZ /B %eCrK.9sXdKdIXb aX2ҌaI" ,∐Ò~"fK;|;F7U#La( 8,v%U%~ ,K²Ik FHaVq#& N1~aR }.K0.m 4Ă EqVEX6C8=`8w q^}X~g{;8%=T!% XlgWlXp #'$.w9ݿ °$.<v&pDs?aIQ Ea.aI6簤( G`E'K7q '`䦂 zXB% I,qXl/bR{b'8)pp88" A$Kl!'At f!|=`X-V(ZłŲ4D`~1P,9ł&\ Q,Q~g`氲X0$n¾ł9IB \;Ţb)dD]=,;{,ʁ_Q,h;F,̤6"%ҵσ}%jXbhC!gx-`.Z!%(X,1\.0Z8%^Y(E(#'b|8" «58AY"b)ł9w8F8kGXo. V1뚑7S  ĂO:I,X$4$,d(GS-< bMK `&`&,$,Ă$BfNb:" Bbr bAQ,ɔł%_ł$ Y,E,, " 0+D`ՌlGұp-Db brYX,XE,āłX(}Sc ܎xq, rn" bP![cop,*> q,XTYǂ`׼X""RHc- Bوr VЋ+Nb %d c "lX,X,^X,XE, i+d RYXMXpOqAcAij`'`Mq 7`,P c9Ic9޳c9cTc9yXXc$eDcA9q U~`_icAE # U8 ,b'/`;"/49Qi,{w> i,W]P,`;f#łhzWbf 1b|(@:4lX꥞Fzop<@gq&KQ3:P,<qX7qXF57[Λ"aaafRe}$)7E+e(Yy ò(rXf"ò(IyHy+E+eR^9,RsX=?`oz8_9,*W+6A,IA,Y1r)7%7vKnKŏ G2sX$°:drcX.??0,!y:%Tea1,MǰWlrHaAl'o. YNa!=’2:(,٫r`X!vÂlf9,X9DIG},]3aA0. fpRD+°KFr |Â%,0 9,ƥaIaeFr _EeÂ8U8,79,&UÂP9,8Nf!U 2B VX- .RbXZI>ǰugR$İHg>a)G_:.rX "NaWlSImR@b))8<5}DKA)K9޻x)b)8G RFRp6 T6"#m$K[gW`OR|,/x b) VR윩 +Y04>fKȦ  bg<BK ~A,2"wR0b)icFb)1 9,E DrX NGDY BK8,'QqX ̨8,"f&]qXJ "ZqX +9Z"&@6RҀRRpJIlVmqX 92pİl5R2'YMb9,{"lpDUp|D!qX <8,m5SqŪŔ,"G =K2oE Kf% KW d3Xf!,L"MI('i3Ū#j !MOa)8!xkbX 1p~# K#`aXJbpt*bSi|aX $`^Rl[~ c, KA*% b-p$$#J',8SH,1#,H,C}D6X H< Kqjz`bXz7CXmǜTaiEaO39/b{3xr!,gnz[WYaA]nS aq ܈`@CX, O( VHTĂ`Tt<06$rz`   D" fŧ,v>859Ŏ- , ,v̖`#<#}Wo,Ut,vQ 2Xly̘gT],m $;u ʂu)ױ/żWP׾2ebz~pKB'Oe bl,g{M ;gKL,iDt9^/vˢ5, X,JZ12+,JZ{2+,J|ڄKaYm\Y eQs,~aYm1,B ˢarX%aqXe ?ʹ;eQf'Xf(EIkXf%lbYI,Eϝ2+d,JX$ˬ;Ų(Xf%meV궇0Eپq,;eRdYJTY9s,ˢl{ebYeCw,ˬ˲(i3,=tDz,?*,"OIJ,n;eQQ,ʶ>,".ˬNfYfe `[q0ˬNNfsFdYɛ2)׶>,^Nfmr4ˤ2+q,QG¶hIeV&GL,6ˤs=Yfnnr6ˬMfgʶ>,679eV1,"6ˬMfYf%nnr6ˤ-xtYflne&Yfmn288pIeV%L,נgō8ˬ5(cYf%l^r8ˬ5XlYfepIgy,7/9eRg8egZ,V+9eV֋-jtYf%m^r>ˤ2+qY&EYr+h}뀖Yi2+iZ&EYIkLJآFeV,6-7/9eR9:-6m^e楺֯Zf%m^rBˤ2+q IeV%'܊Zf%l^rBˬlA2+iCZ&EYI2)Jܼ䐖IeV楶]VY1J,HL(-bY؂FIiM2)´J܌䘖Ii[J^40-V# LˬH2+i5L0-iaZfY)eVjitnAiC!ej$ǴLB"FeVꥁiziZ&EY!eVj A-ҶsPˬKNjZf%n^rRˤ2+$Jټ䨖Y 2+mZf%ovej u?Zf!$NjY-"Vˬjm;X-7;9eRj)2)J䰖IeV=`-"XˬNkVZf%onrXˤ2 is`- Xˬĭ2)J䴖IeVf'JYvW2+i[&EĖYIih'L-7;9eRDl՟[&EĖY 2+m; -b,El[f%nrbˤ2 Al'LJ4-ĖI eRAli@[nš-Ж[MmiP[&%lnԖ[qjˤO77 j˭8eRݷr+LBԖ[pjˤĭV2)aӠ܊c[&%l~ؖIٶnlˤOr+mu.r+m>ܖ[qnˤlo7V2)aLv|LJj e[n%=mIn e[n%lǏopPڶ6[ne}-[InrPVݷ2An& e[n%l~-R[\ɛfrWrP^-Jn+eQ^-Cy%+E+eQ^-JnJnWrWrˤ['Qlo·y6lb|e,vo?oo~n!ڙ=>|/'>W|囟<Ē~_?%y]l$.(c w铘_ZZnOx R^L칙F9XחdT"۝Hœ(ȅ%Ʊ`DDߑKtPs6ѾwO(iDhi?}T)p;cz2_m׈eJCP[}WCJ{PoQ2/=?Φ,>_>c6>}wղg_{Du1 Hlg?~Lq >C;:q#>zeaez4tKtqmxᭌxb{wTlj ÿ/oݗ|߾y||wPm=M>Z56r+l'}~Gnssswq>~kqO8=O Dv v{Ԙ;3"&?AN)??g履cKySE/1nW  ~ŋZ[t>L{oU'yRz2˶wS~x|&ieL&&>C7?{ǟ?}>{~[(KқzS_gwJ-yo?}kpb6 /3w>~5n>~_쭸_&Q^}ݷw_?yZJ ,7۷Krz'_1. l2Bg4J쮒OB B:Qd}T/)|=w}^yqDΟTOӨ oקoͿOz?|w_oFCLOx1Y&/ ^ Q@=<+:?}Ǿ]ʧyߗI_'H g=ejM3F]S-1_~FZϿ?\in|>|xE7>M} p@&_|_3?iTϋCB!=J{48Q%cxF _aӒ_3qo'o̪|=boR[J{Ou)t&&O-%J./{^@Fu="8T*uR8靎Z4T|{|79t#bÈXV9 F+ZcϘ?{ 8l*y8xWwQtݲZd\ \Ws45~Ds<^Z?<ž.3=c߿EaTcOOH=lI5uWp~wsy7e7 >'zJ21{x^hǑ;~Dim'^H,μfҴ;aoy]Ъ@ۑ{{_=&P9bbwX>/塎ھj*:ŸPlC}TRCWTѱ泩|2Wh Qǭ{eK8{`4q’P{^‰{6{ޗj[鈞߼59T? ds v~ϻ6=^jW <{o~8~8\n/fdb)SA} wݷcqzaDe_ύپڮh3?nr-2[tdYꋵC]Ӣ”3>]U4g{5{ji?XC>܃r%,#-Y_ʀF hJհ\0CXҒd^'!E&ulڂӆ?N?3+̿Gq!(a uP6u4x -#k}զ{z3}h=MrLJeO{#/g3?|g/ɴJ{AV2 >Pj:ۻݲԗ"|_,sw^-XOGEKD?|}V%G:80rV^ر״3g?ܻZsk#6/G )|WqbxA٬^_,^?|?|Q7?N0wG3bu]saε+vwq|ǵg<3:fiUN=`i}bpY U:jA 7r]ZǰOLQ>g^F18w4m{ (;t9Cyc_%we׻S0>ze?jFj5:*ؗ?X/g(f[_zr˺~ZOֽZWPa?&endstream endobj 143 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3773 >> stream xUW XSֽ1+CT EdpDayRV`m9ժVQ۪SPՂHb ԨjO/_I{{^0fD_YrELr ٦Ĭ?dRJ,Jg wz֜3/Wg ZjurмEcbdjf?~S|\8w[0L3dF2Q(&0L3gfd\?ƨw&`qL̄0x& c™I5`33a$PƖ1J?s1g,+=f3 bhތ3Y\`  "W*.~$&} 3g 7`Ywv,-saNǧ%|mO0fcc"Ңee凖 OY=y%Ik=$b,q- ZG#m2WkYv\"Eb"&6jA,mpS^bJ*s!գ _IS jzh.VJY22X؈{2D 8$4Iv@U 9pA7*W PjҫE9Z[+E"d{3z1djf\DtG-ύȫG nH(X6Kؗ8x暬S4{cyS͢s3 k6%qr\ŊzpHC?T$zIjSZX*<=EntR ZVҩ륝CBAJX oX} ށi|3TOh;:$q̰fS4L<`q(o4 >B筩@(/' wOLEv-k%]z)̂vA5|Ck֣Rm,OqQ|wO|j6c[8p<_^wHQ.l}-ZBȧ:O 5o"l~0c~#|z`9ދ݁VQ~h[T"9g[qTY۔r8@tJao<#0ڳqgjڶD}k^ )g*LjjU/ZU  77Z@[F휩LHX׶Ži6\qxVrrwpJdtkc~Q*Hʼn FاWϥo=T{@He,:}^K5z#4"^n8]F.QF{pg}-ft*z4I.vVU~&Y88}`% ~+Sӕ+h]ߗl& KT+yףڼMKօ/L"K]ouUGo(?;s"{q4 d593~8!in-گwpG~,cgL1!ZL܃aX${-$ &ɺ{Gsl ~Tpj2GxSTz`.7WT͜,2U'9łOa#mZW$iNwh4졔D8(,GL_AlGvӸn:BN|q#_\||?MY^ }'gP;xo=8Ǎ0xj"?d»6@S3P>MNܥC Yt f LRLt"b?gThQS D )J*ڀ.4UO^>z:a˨gDl)+Zu_?ՃwDյe\CRӻ ,[-8$x2˫O}rju~[nk])$A^<@kk0  s1F+w6Q=wd,xU('/hA tDAzqWT=h+KYe>Tk;zXyq! ~I <Ձ V1oFE-J~Cg;䱷H3BΔ&xjO1Cx6w!8.+p*k.zi#Ǽ;lDin$0;nΪ;9[3"m'ztnq$`-|W*i 1$P^q> VPrKtb1M.J؈ւ;A N. (=, (ӻ\J B0,4+g,a> stream xE{LSgϡc- \. At1pD[--m])\JkZZ(-FhEp1/5 325Dcmϓ<(BBPe&(|yzZ" Z@-SV#qfo 7Ʉ!ePX@gB47/\RAJ0KG;h[ YR (p%F+PZVգ !#4dt!ѵE/]. r0Y>֩h>$ %[$Evit.?s8a>(|^B3xn\|ފa mM*oF }`Ggz-./E*SlK"Vb VPgN{D.4L|[O?p!Hԣluj zMּH' #)l*M2jenw{0v!\D)ly]jՍ̓Q3{C},xz%ͽ=lVm,D?4WEW!ۄy. 3xvԽ-d[Fά}sk#d ShL_7uԜkuz}-h5'1֛AǎUS#&\mMLid&]^5:"B؋m}!izpBH9V&+ulD 7JDc *PLF3tN,ݠX ,i퉎Ma~UJϔHJT\}*8ۣ9+SHIc #Eb_)'WFf|y&?`+_?t;F?ΰ&\8' [FqYKtO\G,~+eV&{}%05I6P~1С5(҉cLNE5~&> `:܂ ae6O-4aȨ*(_VD_Y,-{W3ƸIg4>~H鋝lwUwcrjb`¡fC W`48h`3BEr"vXyR37p̞#T$EQp bGutpd &}La߽x%ak\|)_eRRXm4kN2D*endstream endobj 145 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 179 >> stream xcd`ab`dd v 144q$(ddaaX~= Ȟe?B YUQ'Q9a\?9s~;g2GEsJtta4{nnn!%<|endstream endobj 146 0 obj << /Filter /FlateDecode /Length 41054 >> stream xM.9_qWn_URa@;[/U ؐ6fddd}g.'$d1Ȉ]?<9ƯY~^^~|ܿǽ2+ֿ?d*gm*Y>.YWj>g).gyǻ**ëëuv's_c|>3;-y>C2ՊKݫ-3v/Yv1(Q+g^^ۛᖣ>WsW?¹^Rveoǽu\Yvl&^}qd9J,ծܵYWjYϷXV3q]f]{vXrNT߰ϧ}Zن;*, S7'-j;۵.ŀVv⾟r5Tefi\A{[gu-ײL~a9x~2Q~,Sd6YXa9Nc|ׅ 2׺Vv/sg=\GlNkԇn7a녱8Zi3%}'X\rY}A;Y,zv2] u^<;?b,KK;p3亭9^aW=2z Y-]hap?\M4 ^nek[iƕ|̧+a^Wx5}+-iiNJZznγ.YVZK܇cvmse^>JZh'Zi4ϵZie|.+XKLfV;,׺,}xg`,(,cPWL~Wò˺x]&Xe,^TWA`C8]=d}8`.˺W-W^tbf,ǫWXoY֠LCeQ-nuo^IK Ljy˚^y6jCDMcJKg!׺G^yoY^F]-xq Z^Vyy2G˲:\ǣ~}]Ju<2LFLY}^L"`z= iuy2=,hW'f=EzW7-]7KӲ,Y&?rLY>kZD2բgdwex20.zix,CbY&} LĖe)-O,3|:mOĭt]f5Wg\qY.z7v5;e%xOonyNd-z:2wWc\UYuYlJ,_5Yw.;˵=nu}aLk"FoLuFN^ֆvcwrY:nx詼ם>q,n@'Rm+ݝ+:9t{9leY,YĆ5/l;z5tX.}O|F^ .fBLkJx.dGJSe1LCޚY-Gr6c;s=6Z+v\MȺru~Uݘh^ϳLޙԧⳎVC ,!Lej>`ϲ5\ dc<ĸmk^Rne==`n,+2mZ$ Ua,]fuhz!oJ<`ZoC>/ cu|0~Ed[/f z"`NP-n?rxR0_I_)zYuYՉrAYCYtu2 :,K^a|wCz0ڍZ'[ G*}kWq\ͻ^k>XMlXL4}An/aͯfuu0D ; ~=8n˥!C|wq^:RZk>zs}0u.Su}N=Ճçgeyay,ǫhظ,Ӈ.Ƴ9 []a}5aTooS}CGg-LWfz!ïe+þph(׀{cTWGW&-~_{]xա/,SPnlj\O kseyC ʪELS~mLݳاܸf6X vAOg-A.8VW] ۥ7 }2<>rzYg,3XVW7ilƄ~l޶d,7& f jza!,/ = zc1C_M" t0 +i6Kr3=LvjAFÊP,`Ar44b9umUI?e~뚅_Y 1~؝]c#;= .\#rqѠ{WYg.m\zM1^T5,n6 2:9dv,\Fe(zBfዌXjܼbPppY`pՠq4B_,33^ܪW]h^V!zN z9{*Spɠaq[a3:9T}X5XG?',Sjaa5u,SA;he9vLZxlh{|zB  h!B H>AhaSn/䗖pYQ|Z^:ӆ:X8Cxze#0zVyEeG}ѨpzgzeMZWoMfP;Y]*^3q8ī\=צ^60זd[[6 ߳fOJzm`ᖇBj@6ԥ].O~~34vep,%MuiYOKkG; F{V0 ⤮{: j;o縹XuW]Cg¾ ij߽񄢿{\3KE{7E{d ;tyJ;8I2?'VpVwgc^ Usn9U@VgxYػy{X{{߶]6Gƙu냁WA8mwp{i 3|ung}h1O1UD9:<|HMN87W Эw稇46QFver|+3KԔ©rL: ӭWLz#:'<6c'};;uPԱ@`3l=p,Z9Yasr,ߺڨwiG`&P.o,]WǦWfNy_T\͠1 +fBo\/KTffBϣ sW=٪S2Bc^_ ǻ> ͯ8;Tε /z\W ֹsۺPgeGog+({z;["K.\V`B\}Ѡ>EUp_nmMjqY#׺&`5rdrPغ=}gߣ e!kb ޶re[% @0Х2]0V_g@ԧٸ:eD]iy/';0;BYU| >N*|Eo8;:;b[aCvoB b ]:wG:1 b}^27^06NqK'b2jG\3 :$v_ qr~A+FkD hX8|=jwlRcḺ?ؤ?nRMl>ɲoyZX&[d9l-6659 ldyN+,oZۼdٽ|%/ w=sN+;}k~e/w{زaZYv/ߴyɲ{KZ2OV[?2a\ u )/"[⬑/ lnm6s_kg#vGV/?5PYRXnoMU|(os)3'Yׇ rf~-3 g/]{= [wYװf׬V0?,G@4HK7l)#4 Cjg;q+?xjČr&m$_/r&pL<`zZG#|kln0^,CY'*:/s!6(-0hڇgFPڊHCQX㽷"JCTlWu0fmMV`SyuV m`JU:J R(_QZF|&/ߛe}}K56҂_ے, wUW =˶*c.ơ-\ 6؞F?^ryOF4HK@(`#k!#|kߴASM̪W\G֐FߌM*i۶d,i4;le_kώi'/s%FE>Ny>G"A*G6Ym[oũ~ yͿ0VG !,{`G pca`I$ =jߚ#[#gq 0rm֚jv/b |Ai+# L3C*G+ԋؤvնf t>Ĉyq_(G'"(2>Ls[Y>ᵌg;q,b?qЖ g =찄!6Y`Ac}:EPe|}*İZCTF#)rZ-&Ȗ#O,ͣ.ˌ4H 8)`ɯ2&6twM c@zmt`ӝE ˵n6GH}9lo|4z=[;q]u }Ơix 2>҂meQQm[vm`ckMf J+Tp[L{Na1dE|}5Ug(_QyD=Vfc{~aY یRδ?Hۢ!/遯w 4KH v_ڙ,鯏4Au974Ygdӽ'?8>>|l7eYWl@**4|< O}*F % "C$gD甎qnj˻-Wnͫi7x6Y7!dٽ.,Ѕex-5qZ̆㚸弾vח>vbԺ~ юe^^BKdc nQK+& xzÁE4ؠ#BY G֎z@_ɹCXm7@&:B$ Hg(1+mg3<M8l?φ?`ѽpt!"(26;Bˆ?`P-llj?\Z ;([?C"DelUJ@˳ 7AQK'8kn7!<^J[Gj=|ඝnY"8#;`{fx%074 ~"VĆ>%Ǘf>*G+NG\m0-|` A i8۟Dq>#jpNwL33  xzá',m#vdP-llj@}G RΖז耧7B!,cC P7sC c A;P}AI>*G+NĭaOҪ?vH {G",cp;G=jg;N/Ȇ=.oȆ 0QV2,Ũ2Q'jQA '`vELX,(B y@߈+;E0!Jz'Cr??8 pP+DY<ܓu7<ᵌg;Nࡐpm0[]xpC  ,bп14K`zuZq <ɋ`06+DY 8z`+Ml" {dM$h~v ӉM*gl?5h lbmpH"A]ai?\SL'6"8{&PzߌfH҉M#!,#N[kp6~wp,ӕM[J'8"BYFkZ͠stqT~oaVU*] N4HpbUkEL'8"8BoĚf?~ˀ5ͶSgNt"=>dYF)ౕtZ*َ8^Ot(3dc/L@'҃CFPe$>ѱ _vtZ*َk':ǖDq1=P: Otx_dY OGT#-8ø9oP6a37pO&2wPH2|}*5ԋn} F #*5@&Vi*7KdJޗw*jt"A!#(26r,D&CT2giiYӤ&:Ē'0ϜIFOPb X3< JGT|kJ$WrJ'(2,!A b1>"@`:@t**ф1%:NP 5FIHB@izh*EXMpUJWQ&%(7PrEgP¸w,X超ci-غp(ҳy5l7i%s%KKDzɲ{ H/Yv J@yr^9 ݯ-p[78- ZYv/ %%p!dټn0g- :d薃p0A"|+-V2B$A|FaX8"Eb@pH!"(26swW>yrCP&x=o oYDJ oG"%vr`}DlckqNZB"8胎 |WlX7!<MJ o"jXgm?Ῡn 2@SRE~|!>< @BDPelPd4ۓP<* G֎m,2mYUl _쇞9 pAi/cp@C"`%EexzqB;XZ[xzC񄈠w:. pURVx}LZƋ+s J'<@Y:`S@ y>kߣ[cQ\!pǫiJFvOQ D { =PL. C nqrԥfb~,}3M/vYްL\JUFhwXn2r0a;(^&q9p4A59槇1*8 %S]M0vY+HV4:, Lr(0N{-Cz/]ޓ)Kk:-|[ /pbAda)oguYLkP 쭦;p)f !T0;kN !1 =^].I\,0qr{} ;@l-!B-P+ -A<Av^tF(DcVyz.r[>__n>&;*}w\d пPe>.Sw'j Oq2ꪳԗy{HjnV,?#fJBY;eᬝ7qiWy֬GשBZ{4cE'H͟N4vu6)lDn4c^!geM2v%E ;o=0Z< xu7=G 6FxG^+HB9zYnEv.WXU:;"OoqY@#jcƼWJej"MC:k^^W[ 3\Y<;b'/p{fȀ_x^T'{_ B'yV[ 3^5viYHi֗*ч}73}bMKh*e@. |l[8+{ey8ڡ4L,}Qe6NP|R7Ǻ ku{ʺ"rd[Y6Zp۝/6ODXYiEexO'-|Q,Fc JBLj{ WѰJtcBfY&ס駧v. [+YywE,^bE t|B w1/pR@?E\PV?ܫdjC,ye.f aճѳ]ݸ0 ,- Ę~b`+C0:X:kS<0SS0Kg{: vYƾr}i`{j V_e #RBze]Z>0CW?8Y4k} ] [/5nv8{&<@5:tl kT^;Xߴ#Nwss+ꈁvYks3 3  iynaxM &[1GyC]VIkBf/ 6pyiŞ0/贑|V ޘݝwTPO Bv.pP>lHh> 8l^ · 0C6tM^IKa.ûˁ46*_xٺ wwrg#&0Yl5`6;׻GYQnB>òQdxq {lYя `99fV`:+^  WN8y(N6(M+ aUS )Q*HUڸpw|huotWQФ>u3O.F /WObꏳ`05qϼu="jn!-8>x<4-z{PX ^=yͯdpD+ Ҹ\\h\/\Rt՟EM_|/Xq8lJV8'(oPtՠrd=;٦VTy ZoU1ҫ&eb|gY mR/Ke\V*Eԫr1n~gGx=%">L_;plvsqj5R(HP, Zl9^nMN8DJn Ⱥ7Q.o4܄ NׅTZ}CLu~d 9^r~LՕ/l}M;Z^+A;+*rY*l|&(:5vFn b2U~)uby }j塸4d<9Ž~MldzjYYsX6=oRg>eM'HG~)lF?f.ug#%cNz밷Z>>$Z:Xʅ۰3;r2؇ܺ8A g`97~BX˸=gSe^[V<YB/Pť Fgzjv޸3Kkঁvy/mێ>ə7ݗ:Þby[}hfqlkgݶTdKOlq{i=" Paȃ/E-'*<>, ^u>nlyn[Qnhfc!v~T󫵮{)6a[VMxo'_xů1̖5kijonl7 򮷑4,21k ݤ 5GoO^_7L( 8U v'X~:IݝYfqU"4)oʆ߈FXjXۻ xޢ=&|d*QU,=d,NJׇD]fa'-F^2xɐA7ha^JL_٣TM/KQqM{U¡.]n\qImw ?;M )'iS@ P@ M1B1vDWKU=Yv=WK/Y6د^aٽ4M/Yv/WK/YP'1tNbxNbxm:yw!u.H'1B$ƎrՎl,v,kuO:1NbZrhW"ڷiMU/~/R=^JK0-T9TA2jg;lntMIL umT{(h * :1Ѹc%"(-!ôP0#p=ːXbB"`:# ĴPVMؔR k/mexVD%d~FB2B,1j"`*R,1-:4ɴA)h&~!h`LK&oWR*a ᥓ*g$HSкEt*('n{JaqEH%1+8wqg4@CXk* iXH4,m)@DP @ @,BY ypHL m/T@4 6j*ߓ/  #73d{]:)7N{!CN&8)gC1,.n?T)Zjg;ظjHyİo+CmNyD߳GDP:GZCTlmaq}CUbOӄB"PnG删t($A*#U yD-=·v6_>IL %  DMs-'%d(g M}(~S<۷z/{#چXSA}dz<`qİaDp(#U 9xheER1 kLnPFϧnu8"(-e0HPE2*"0MH!= =W #&gn/wF%ZfFE:AuT~ӞkNT Muy"OmSm&zd"[ @x[|>5x"$BAC A%½"DZriTFa!IĬ4gv"%m[(!3q BÌ@=,CY P%=وHĴPP0$^ aZlKO0=, j&@E%j ($!jHߓ=7pPbTbV |oTlRaqCBAM*a İaDp!(#P-lTTb\Bt aJt!=Bd%:.ETbTB"8PM+1,u@h!:%:Ɛ 2ҡW;.eZb=[;"]T $($!gH!2']0Ҕ:TBJ#h&((DG6D҃XDP:2eJT%UT Z$5) D6DG҃hDFP:T2*QTg#"UBC!@t(!E5  BÌ@ (BY9 p@+6İ¡p¦H!FD~%C Q**3#t]co?GL HP B# f!"B%]ffGZCTPi@:ꈂai)چڇQrP=V@E#҆ $}(!4 BiÌ@ʴ,CY ypc*6mD\ؐ0A:)#:ҐD"2ҡ5TTFJWQ&X"fHPpB":D#"IBz[YFRO̲C5gO!BXweĀB;10pH!"(-]ôP0"2bډQP-llje⢇Z(PMn+DC61,.z\1ĨUP&C#"mC-KP@# E BÌ`Y#fwUѳkā>>b(oW? d ;"Ғ/L m#FJWQ&C $"Zw D!(#aq Q@H$F *ل~RI,_I%EG4~VIZO!ğ?$[i;k軾л?mt[?)YIa@yݣ/^8"ig,-Wk "b^!Hd٩K,W:²{{[y}ūחaũ'{Ég͑[9h9Iɹtރ>%Zq]aqdٯ.~u^Ȳ߃C~ϿAx+t_r5\6H͋F C3}yBfuTRcGa-|yn !`eyㄷYl?@`џ(%`d:2] y0F;O`rS,Ƈ:?;KL u;-Q:ydHoT]W!7>^j6/ q%ZÅQ xꥂxIWfBxɍo/l96v&~$'i4r|]HhrUtE SH\$DEr||}9qRBu⊬Nu3Mj|?2 }9E_%o-X}n}@b|LI@B<LܰѢh0_`W۷e*bIq3H.Qqn(#hH΋9(.'}?B_̖;U;XNY lsѽi@CbQ|%-vNpRiau8 H.?oPlrL^Ut7t1 0vL@AvY}>"N^o=zoq0рI߉I~| Gπ7I%L.\ Ll;㇨AurcͿ\#8R4 [p Pt V8 "i ;>$8BX^6(Y JC؆9§Yu}w!yvDȅ`qck fqb: .6 P)F= 쾠u1,e@~C-.dFŦOL>RICP*$l>T@D wįe70H@9 RKǤƉ\=oCfAHcAWWH zYlD NZ|l0\[V!V$'524rσXc[fR#,ŇDu&7Vu0.@`r,!/>$=A[.+/;!1~&) nR0q{'2W%5~YKj|HCx#^R'ɗЂ@I"+SFYo)/40AdƯfZoBA2yNL2C$l:A̳ZIwZ. ә@>ݸv;~>Z~/>jB 4>7X/ T7`n2N d~;9i(H ͙Bf|h2/0-+zW2 eEr&Uɶo^yGdȲz9v+= L_)PB[&MBLC LM>ipfll0pѯXL55AX UZ(ar0tNZ5 z8`mxuh7pqezܗWMB݆Y9/ԡkk {> 䚅mgiV .NB ]Xys 0\sگ &i~r?6嗟B7Z84s7-~gnk~mvlAf\U qІ~4f^_[{xr eN#o$1sΡl#r wiutT#ar@S! MBP44>"CT/2j rLi+ 1.Y/. Lmh͢H5 4c#Nr9N>t/onϠ ^ny^d=XV}`x Rƻ9TCsInOB869=Pi9B T,^>ܹbZDtLY+3W-xGt,3&wϕ*Biz{8eB!lz_ ߕQWM g7c3k*M7׽7daT6K,Yeֻ3u3"6YEXN#+1c%T8` 1LV9i̋*Îl*x/ʗkUCwxxy]TPs}UC~\ )~_5x^LjMWV>C!i~A3y0 hBA՚JFNbU|RPK^i:8=WPozś3A8e!5}[Za7;_^P! mBVş;L\! MV=`ՓB]jԙk[ܟ-P?[}MBn/ߜ,EkR-q?`(t[<^_K ˂PEh+WBn ~y }s8[J.f [{EPlH39cNࡆ] 1. ;3x\B%ڈ :7F=,/Aa"yyG |0(-;8s}r+ #Bڭ{F]'W3<nj=Yb1%@nB8 ~s>d@O!ŤN y Z8nk|W{Ќwpa'n0q1 T-SGtqa8NSM(w^,{n_wq=fhX&jBa{ZcQ \9pB.~K\ـ=v =2 %fV(7yuX띋4Z˽ᴛvv'4s-\/zvڂau3KsG@\^4\i; |ݶtH9299w8][h9BFfllOdpЫn"ۡGBF@,CY ZzodҺq:΍--'鬴S2+36Gt)oӝIBRf{I0nK3+_I묭rl$gHu9-edm2D뜵jk?sZl;Im|\m,l-!eO93asFvB,W͔0F|q|ء0mlX-[9,N9J稅ݷ;sZlͺlH{Og/0h8BV@,CY ypßb6^8-=hp:cJ:7P:xiUD:G:* '0Ydƞ7)gǰ偋lR8%sDp(#HU wP=/&i!%h~Nl{LZζ㩛G(\N I#s6GARv=ŇzNt^%m~nؖ3?=cH$Nٝ9 r稂(NtoM8l.km_̚W'ep "(aqQFP6G-Z*َa8r;!sesADt) lZg;N!)Z#VY@x8~fYĺbeV۬Z2"8$n oZwfQS2ˑy#x07GP +$wsXyYkbf֢r7  "g^V13nV-CT'i!Eͬen0(AoNEsTB"8oɗ̱́ CD|G{9#9{sTB^@vX洐x99Y@8;rHɜڜpք\洐s999p@"-Zئ@V(AQ=jߚp g]¾H͎ `mI9#|6G *لZ洼Zᢾ2Dz70 i* 7g-Zzo8nN w33́G{'I9Bes"?хl3.k]_ZOfaADP:8) mZCT'i!r,;'sisv~=YP-l 1ssZȻAx8rXw9"8/sQZF'Đaqe-YA8GP:eqepjf/ US28/싔Y+I,l <;JmsXt9"8)sͪ;l‰.isZHN+A@x8vrZ9 mson )$ssXyYK"f֢27 `Et7ɗA*#UP-llj-}sX|999p "x:BQF7G-CV'aB/rfIA8?wNу9w8p ]xt07ũ#17GwUQFBP7˱̱́Aiq+$I oZjg;N|!b ̱O~Xs@ݑVRi!CQ';s 欁{D=·&5sWUg8?7IX?\o W!pVC8vHMN; [ ]_~C \ƹ]ˬ5}hNҦTjZ%.M?jY&Θ/-UәrzmLB^}$fqtPoчob y`5<:&9ة۷ߺvW-s.tpkcs:ɹST:Y[C?TAƺyL[b9,=ȃ8[lجXj7MG,ף>۲l\u~ƞ`/KP[>oR=ws_]cw dxtO[ Y)S՘7__s:F>91gl9ݖ^dt$w>-.u_npbMK=~?!b o>8Ƽkrhs:@JC)B |yN-1IY)'[`Pz[j %x@M(Ɲ_v-f<\Ei͉kThIV6> }Eɓ)rP8d:93 B|y]$PΚNv@gl㼧%ڵfvg'Y|d̦=:~{J  Ac[DHx5<='OaW: &;!5t$nglq]$$Ƙf㹸gϡ16T txlذcC&$KCR{_D.ェS׋_|Ntd~a_<ߢ}e>!F] l037V2!`vxEMyHԼoYwmP2w:W9WVw\Ij=#QS "yyEgmur6)xb5G\#H"H<װcV 'L! ^;xwTJ9+< b$ gyMl2.hL9q6r" 3\pI;1&hJ< gDW۵Rhd (В ; 堓a$IrӃ d7ZD4U 3Y>KDAJB| lz XD5:(@Ӗ IGxjG(CAo>QQjP(7*ezNŐBn-*' N $$J(TMƋ.Go[! j ,$ y:Bf )$BzV׊)D9{3-(uNCBV᯺% $R&LP 2dѠ2_EgưWLx%Ō`$AWMw [F]S" %@" 1DʨЕ=_ B5JuA(T@OQH؝:́ *h8fśb K1xdPzӍvghJ48 eh A \U}<>%B,e5 )2B'%!V@: A0edާZMTJH'ٔ $2U%vʮLrҭ Ar݉JГR wș RRBDq\၏Ynַ(}X b}`#f8Ǡ+<7؈IQ(E~l0!AFZ^o 1) 0[P\ 1{a%9[5vĢA|ShqǩmÎpe+6[(X-b}$V/Z8ǡ*@q6 B?vqȨ}+(3oCwX >`}_}4J8bQ \Ņ^98"R,:oCvXٮ aZbR[.8?#pSQEzt'%k` aQZ 6G^X 륵pC$f{G, }-S`7LKCފD -oxX@$b~cH^KkⱶP$&1eQ5+B V:߆(DX-!b}0$f'vC+c\Bq0I @b|L q+ !7 @bBK^j WPIُ7IQ8κ ;$` /y+2[@(`-<}cHX/f lsdB!bFCj1 q(~+QTa@Ѷ0$b+T1"D%Dz[ &^-oyX@a(Vzi-gWc@0#KAހ$`=$&yZd|^jX&89T E! 82E/GX\#!.@La<, Q f a08GC0F158(L@Rk@/IQ k0" D0 Xsc6E DD18r0-ÐW;߷qt(@a" GbuA .jTAPb)ĢP/>8 YMK;#~âb 8#o] tĤ( |P#ѯ,4[GbRZ0‘^Kk) 25GbRјp" #, 8ɹB1NIQY_Q"&쁖pG= 4B~%0xArH`Ё{5EB> k;Da"1  6̗0$T %{5 $>i彡26s/Zg-/}BVG+`ZJ'YJ)G)$n}/T, f! z;IF٭Nz2^ExR^KpVF wmk,>gǜr4C眑rΙNkҼ]JA{^J),ŰmFgӢgTap*L#*}n`lܐL [LǞ无P6']`ER#2rg2rgf-Zmdj #%@S ,PIFgsG"@_{x(9B_{P8e"Ѭ]~ 4Vl [*ULh!hM=b=2}GT眺s#]ΩK%1/br$\%Jg]Vʘ1]Q1~v%=toJv3=$G6i/f4mpȪJV|mBW{te ]oE_(BhB_{Z:UbnJIIyݖA]JW{8iYCoejy'ƪ0!x|c{4Ó:`\^0@_ ZɼbeQ(p7`t{`rkt5Jѯ+"Ԛ"x]2sP$<W:N ]}R4A61nj/irD ]답;B_{b-O_{Z+k/_Z1#xf*>Ľ6(f#_+ɏzQ7mw\YRyHg{yDՖ=E:nP8]L)^ڲKE[H_{#:{(VRSJI-l?lﻆS$-v%ySƩ%=PË[Pիqٱ{9]^l7>8;xOJS8)^S*@Lb7exJ>$^)t!AEpT*vSa1%UZ 2B g)%bmjRAǽHe{j֟D{a<'c-" M90t46>RU."Ma(d#Nj=$#ʋRkn1B9e"{ B{=A)R(UJUopUPf7cp$SRgNgw^)} \QDJ}r^YЧKt&|ʉ7SIꨟj)4D4z*<StA;e| JR[<Y_]xK+U@YlwCDZFSXA=$e iVF['%0Jde;%ubj]wm(p YOʦIx!AV$SR+OF (uZ)Y}qƋOp~8I!<;dE#1Jkwn l8ʸ yj.;E"-Jhk6O5!$ 5"1$z|5 Kx:d!!K\㩫qD!oLo9Bn) J tb(;P3 F|OJ'鍈-˶ʶ$?_zGǟQx@ORHA<}+R,P(˩210 (XVȳ+@0!;/`>Q2f"I8,BJhpdo 9>%y"!7'x[1JjS^ʰzi 녖 ?)yJVK!duOKZ*=[$yu;Е$x?wHUHGSIh㘍F欫PxURޮA|od1q'MTEHp ԅUaR.v^s?f "O` r !='Jmȭك| ᮇ+I-H:(ԩ)hZ! )™ G$5Y&Gors0DHN Ē$n?3dNL'苲Ok1-9% B$TЩÕA1SAe]@0y2\$`B4Ãbr PB_׷$ !ۖ|N7(irZ`Â;x.uPSDXu "7Vp Tҧ4-K$"!Sgɠ53ؒ6B7㩤oAᘁ#nDoNM "";?ea J u}Ez59`zׁG"oVO?C:a??%=6BӴ5xu/:C ް}s)Q4hM>1F`OIė ͡v Ol^lWĽg8]ȕZ]m54Ψ46O`btE*Zb dS!2[Pxv2Ҵj6(ɦJ^%a.uͯ[r3>aZ;,TmaܨwN}߼^76$;g{~_?uO6} ;OU?qO7= F"w0/O<O7} cq"pLt86mIJ$Sw0 EgQRЩq~+TfγRRnsB@"$ˍV'˃H60x Ƥ[wo|6įdl1 {[!J6 xkt&WY-x)1+nl.oŀK),RJ9J:^J)G).{)|CǓ#d2қpc=uL G.R k}TEy4Q WJK)(՘"t+O!jcy]sLVΑw{rKN9RQ*o+o+%A m/#8"|N4ܛ"OYeʙ5Ӻ*$׾rSkJ u;p,v-\oPKvlYv- s`9KMYj뜥&X,uO?EBjw_O!Sɫ_y]/89D?7S[|eLٹ._~2{?yp]x(1Uke5+=$!?_7}ۚ~V+-A#@Oo嫽\!mMdL/msy8bx:rc|流! .y\ʗQ_=~kM|y+;ղ2}0;R/?x7h|46SZZ!YϜBhl '1ԫ`6Ƕ X<;lDT5g;NpW<| O٨fwqG5\68Ļ׈w9%!n1 k5 l _ 2'q*x@1npϮ;ӽ),ADF"Cq+sw,!9Yo":o1Z8amגI֋lqqco;/t}߆ǮnR;Vܞsbz5O~i&sm*TߎٯN`Idևz?:љm>{!mlok=/tt_|x/uM8n.LNtin0xǯu]/waܪG7I[m\5ϗ}߈%\]@q#->x5Oa3 q:t;nngAZ߯q)}6>vMy|nc8{Կ֘oe{}߽{-X;ެ0=]5/8i×Ԙuiؙ߷% h/Ŷuj}~L3 nla|ԅs z9p5BDov&zFS:"(|ltv-#`glcE=/ϵcNX8)Cؖ9}`qU$烄)ulxyvs<-ލid'dʿ{ʔ?FlE&sgίj^k9{N>* HFQҽ7ŔȾgK߭?$__ɲEHd!r-'S/Z|32otؓ ~/;oڸާoX3Ɖ7; W/u{9^Y<{W~*( O_ҹ1XwxUTfp=?CS@|23pHnQݹxzǧpy1B-w\E7Rμr/҃r+:LcYDi<덍4`Wh_Czϛ!6lEypq+!@ߗ[Z[߬pGQģc;4~S#{G_mW+z&d>ĺI%:bQpO!i(U~Uo@N*B+DVXV 10>an ի=ҽp;"ɪ9N.wEk;ѮJUU|7q9 au]72Q='A5q.0=N=F7JD}?nߢ!;vѷoSJpA,LT^+% '+!ݱᆝݓFf0 Wع/KP4`{N6{<&% ˧Z,5֥Uˣf/\ u仫?G۷=ː LǦ< 2~ut!ZC/W=LRql^H{֩JNgՄlo%-y1# Ú r?ܿ7-ݰ'U1w{ #XGS2z>7駨w U#@PJWZݍQ}oTlgfjZB>.L]>}'+}yۗNܗΟ=7+;vWtt}!yI!)ėQƾs{>OE+ P껖}W)7j$/߁ PA-DzKŽmgԃ`ֹp+xmY umG Is.qK|sC]sw,F> ^åv&]?on4٨עrĞb/MvTuO~fXmE7# 乜m$_mC?ʧwWv`P^n_o߲nC8O6|!sw1{:v6޼{߼}7 J|_~7w U|?x\>E={D˚ ֮v8o.:\qgO^RR d`"WpU!=Hd@Fi@DG$AM)o1Q?XhUC5{9X߄0Q]Nx+ֻ۞{H@bmݳz|Uύ=~=8Ob\zcKɘc4\g/(}wƎq[b ήw߾[ddW%;x5gX<ܷ25cPjV6@d;!H$22hAyZq&13)W y5vg[;7f6}8Sj-v=Q +*۫/o.p.E.(li:pP;ɐ9."Ӄ] bR?z~4x(f4WK 4f SfFQ]$,.][ 'wy (VHXˋqm!O݈}>?|O_hKsvG/%Gqlrј+,Ϗ~vӤS^wҝooIE~oVqqN^kfl.endstream endobj 147 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1097 >> stream xEmlSU +B ՠb4#81Falvݺ7ֵ>nmחu+ne"| EqfbON?ϓs"yi˖ͯy fY;;U>G[Y}1r]b/o/WsvTZ**9am+q+`97ۊV<[ y&i)}cdJp&G+6V\ү4ޕ#IqմhT0 S4Pu>pH=nA}K~I f6QʂLr:/&W+PWr9|=Z#r,9KDa;LX}R{faaba`pCjvɓyjHcFƛů4h̖l5wmZ:tK4_JTa5WolqJ:Qt\䍩Ǔ@ma90U֚Zto@MޒB.|D8諃{3=Rrpem( dr b![66юپ &8Ιʭ21QQQV:wP}i@7&zx[èY[O-4WjTBop ]CV`,IH:k6P#8$9 xg$E1&VكP( $!PYd5: xf#%gA?la7"jNhKMp9Eaj%ۚkN>> stream x/aWN&mLb`  uTq~iXJc3`p3IJMR2OhľCvޒx~"9BIޟѐPw)7 LΈ3Pf섥Α {]Yҍof(نeu Z^m/-jX|.S!nZA/ yVv/{ݢk+{#( jdXYx}FKP,i1Kʼn:nLֶu%S6q-peSNbv`q! ~wso>@/|endstream endobj 149 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 425 >> stream xcd`ab`ddds4Fa2Q{@)/cďSW_cM;m:/;2(>aJ=fO{뻓[[ J[Z [Z#[9J4̝;gr0SIKN\4e6̟mn qU-U;}]baoڮV갤nY O3eM;v>һ[G E]Ʋ?d]\9 0hnKV.QSOq8rUjoҶ3&N['Օtȇ˔mʈ$++/?q֌U|r\,y8yyyh鿆endstream endobj 150 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 162 >> stream xcd`ab`dddu 21e2A}@ѐAGߏݛ~ߴ}>/;W[;#}i9guש%}ovjˣ)]{y>9.<<\<> stream xX[۶~_lc(Y!čz}h<4MV6(5E$e{O{I R$7 "lp-WXvIcl$2oa Q 5b}zCrWT1:_"HpAKcKNzgE\2Ңxkw(6Z᠚omj5ymVkNۈ cAZcbt´5V>,a4)[Ѐ9#FV :~(Q4"e%A"Kڇ'6Vo^<؃^7dDCnyI{] ^WaP11oyu3d&]p,dE!f-ƳG}h-E`g9&MlΕ,y~"t89r/yz޴$_/U$Ǥx5ys"<#^"VG\eȃjU7}&UQHKE<f/BUPEc Z@2orVz8,>#t%( 4&Ә 4.\|/&Ur8k}U^qݎ_ 8 @2Xs;N:ޅ/|H<:)o\ φ}CB=6 RHRcXe;p3uxM<ӳy>kM { *(OHc1jpj_gGy0H{~ɿW=v,ԓC!(&f8䴒id=c#tf8b;lՎSlH 8aU קi9;P R{w1+ܸ I_fj3`n!Luz…/N29d9OwE\Uݥ,b ״./:j\؞TfFZ!44 @4& B|3qLM 9ORQsB!q at8j> OTgf^#G hTx7Sn'#< O=,Ȧ8oEb)؈pB2ns+u-fkyvmtͯyj & |݈Ma7ns1+ +?Ãq].B\-nN= n e"K~aRf^$ e7Hr.D/rPFB)$ RT#x sX~<'('3jk ZTNC6cjh#VhەA +_c1endstream endobj 152 0 obj << /Filter /FlateDecode /Length 2739 >> stream xZoGAJߋ-p/ž>ŋBob;FK@0 93ߌ`9_07+]^qtm]I㋻ǫp/ ˭ cLnu\BYsY՜m|L绿b@姫Op 9ڊ*7vj"W-oiwXd<>FFPBX^|JFka8my*ǺW˻_G #e \ w$w./ώMjByMp&aD{[}-j5W\+dcH=zE8[61Or!TuCvSf< MUm ``ȃ{j{+Tq>mNWn6,O&W=˦^/cpO@#r1|";m5}їMT/^FTS_s spbH8\UaZ=gz:+qi@f=^ qnfdLA;l=7?, ܁0c6ayrΘ"y999I1Blo T.M~iN$dT IKYuר-F1W$kn} 3HI!'PkpLk|GEn( c/&T"7$Ύ1Ai`(\AڛKI9RҤ[9nlG9 /5UQҠB, \W2πN!]*8lU\mQ֭aI!%MA B_"mH$,*ȉ;mO(F!( ([kOXTq4p/ !J>$L&*@D[Yc@f7M.1Mf;P܄&Ot4}" i7ZU@"n/ixκaFfV\Ow0sg$Sy炊2sr8$7,3ZGX%IԻd&͛r0NgC?cNǛ3؜ 91܎;)A:]DiTB0y(ݮ#EZŒ{9=y4*΍i-h9Ӭ9/b uC<>DV$}!OZbt<:fZVpn[uqҍn_˩ O Nh:kʗĉm|؜u 8c)n꧑ҡG?1'$)$f4d!~V W{q&%WΌ1DCA{6=`09eK M &Un@Me"L˭LΔnUvLEzIN Fֿ'/>'WѸrB8ߘ~C܁;eN T=+uu)d@Ĩ1eml*[i@ IÅXf~mEŹ`h!`/$IɜD͡ Y/]Q[[;7b$2Q$터CF:ѷ9PJ 讒,/\J|vzvwɎE1m~@ 2m祿w0)ҁGKEut_+D 1_7ƨ@fREYK)UZ*Z]FDeJn/z7XB5":&5BhU"t}cć:1"$ǣXij0;)0><}x=Ox2N? P4v`PȜtwԓp=|ey8ٴV&!0 Wp0:wRHL|)4) }Q2EGfBcC/)Ǘl^y<O5g aw.wU/'Ξ:*?xNe,&p۱/es5r&&-h8txwis/hi!t*#=7m>1&ζOn _6U/Oӷn ^ Ɣ_moendstream endobj 153 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 404 >> stream xcd`ab`dd v 144q$-faaCP= ʐ ?~>0Wsn9Sg8Qv#nnwk&wNnnj?9u~>'uXXPdȣrk9Kϛ3c|97 dfMgq|OeԾ|FrOgw66u7p-_,˶gւ%_ZNžvjuU]]?En?1;($.G)+tؼn)S{q@|cB߳;~'hyGmEw9^ ^^emwn]!`z&vOtsYe '<͞GZendstream endobj 154 0 obj << /Filter /FlateDecode /Length 7687 >> stream x]Yu~~Hhڵ/6;rHq,209HiLKi(S9Om=fuu-g2"/+qJC'w_=g“%(JȃKp_9AYqFxx)&zU'!p<ϰ,GsC1/}+e;6&(hpu<_$R<%ik\qAZҭVz 岂j/k}\m?+m:{ ^]qFj]T|N%ibEzw_?I9(l;=؎a˧ޔHX6|X]㟯Y %WbZ!ϴ zw|O,_Q:Wp1 #W҆avDD@>Zxynf2g)Y|"op.DU}UZe*񩄳PpYpĀI3'F<==Mq@_s.UY`C^z,/2\>*g4LuG:nk܄+CӠD6jV upA:=Mc\ϡ=G) D>]P=E&*2)倥oFA .;La* nώ7! V7Wy#$ωi߉FK r<_-F.)(o*¡Tb;t,IjvQ] u#+amnc,P?zO/cKfV27A@Q9j0NTՎen* i~vuKU50 6 sp-A',4#gE@"pks۵ d'lnN gn,34?C&Av܂KDmO ey)l/ orkobQE,䏣Eu6u8uؐ1ĮJVxn]!6'WIw߹82BE75A0ۆg}f׀m"`d\O$v* hA م6\ܗ/%{ш >~f"ESIc޵D&۳ǻz~vwzl &FovٝgVKgnoiM(cH.Q<]g`s Uՠ}OV9=⪹*11ugj͓jXp2DrwB_ZhyܼO⺍݌͸!+ l]lj?Ϸ& qтhJ` E;S(.7.q\H8G=MHnĒ5]&j Fqe[nV7V#?[oQQI"AxB}xv0P :M`&\N<4 17K B FR~X I9UpDB.F,!UB:49ǾIбvA }MIZrK# VS ,]B< Yɝ WRƀ 9IBf0J#04}8xh }pd 8Ga#5N -=8NRoN~D%YHM8!O %H(o_Ц92Z[0NT]h+8WF`M\h@D㵝Y$W"cF ̓PG+88R J}VIsqctЀӣ"-8ebu0ƅAZUrTs$3{S ]&F)'.բ*[$͙Z?1SL-$&+3} ﺲe& Ak{*0<4sƊp^kv=J5!g:I!GԊ4_QY 7Zo!ǧgUzf)K?G2YkLwĵ[OmG,;lGpr0Z棠YQڞ~~?c_Y,Ns>qd&($ DLA]0ãWijrqYH߳no"W˄>٢QX{ń,KRL!{F "1Vg[+袼;u$&У°ҧIcS,-]%Y"ȩpBo>_A^1Ml1J E^P9Ag#6+jhSx3"yi5;m{>` "a=X0tmX,5g/$9s,)q)\.e-H *c 5.44s' 3ŗO1K)#4kEVcvV.}q"IES,K}ϛЇ]Z/U5Ic6(K6rM!4̯,+h4}:a5.DhEX4 E*!XEL~j=~(B[\N?J@A Ǿ%!y7ViK 1#  ijK6cH, j9]j@r" ;L' ۧ·N k: Dh$#ĩC M{i! zƄ3z?^ ASџ uMz|HLyyQkh6Tt;.u11⽇L r#0yQ@+A  rQ#PŴ%Ml&ҊxoFf8>J_(S)mbY7|8Gɋ#'ur<,yΆ׋0s]g,+'n-Ŷ Z3够.ɔ[ b]Lqr}TL*%lXض_AV "U]ewiw_m1rr")3@jVh(!fe@\m J0Np&xK`ekƃΌM 0MrF80%AyuŁԍ.TaU()lF-(YiqNQNv`cD2US Dd2i^E>0aPjZN.+;OWDp/}NeYKM-e^2iŮ0qn;zƪI.[?EHMw\WSЋd8mPXQW gN^3eM $It ezfzbtYDJ 4KDRi9+޼#UVܵ~ &8^5~{8v*gy4(Yg' 8a1mӔӦئ'>;q N1/QTMIϼyGIVYfH%z6fZwd33Cǽq'ObR2`e5^ L}B "UQ5^ L,o'gQ%ip5w H&h9T.Ѫs8"xM:MmNn;J*…}r':VY4[AV:(\/~'5'n94Hgm99(*p2Ɛ۱Wdكh ^W#?H*FE,o,mzN ~e ݵ5Lg@ᠬu1?vuwhYs|"}#qT~o`YJl> 83J ȇm(\"d/uVP9iuj,xU 7b?i)@W-a_`GᚃatR-8X1\h vF 0@~ھ]#P!:eXhw- #?ރ_|[Cܧ%Hb.RL s*->^:J0n*hZa׺Ga2]Nފ "n!v :sܧw7J(!G],vSUbo%5NҔՌ zGWEZdW|weAb8V-:TRz5f.4K[VƼsWV~ݽHjy2k1|k_z6/Zu_K@wRyQr RDtdR@ ͙|]}d_OLmbk%C#a F UZԙjyUL)p^_F)|қᄴRj‹ XUS=3d]#hEiRGyp\v^X%X)9T0O^MX8jIH)8E%IMhJݼ1NXdI6ɵb~35"B0Cgu#4X n#AHgR&D6Λgt~*v;AɻAI? |z)*0P %G﵎Fp647ZLĚ`5]2=Abtl$bdTG"Q"5yϚ0S]܍ѪR֠[xI6pFj I ȼE-_|%2YxOE`iy8*{ e ,ȇ.S￴G"Ӗn-aHrCTp* (aת@9grL|GQvzq/gZQQnx4+P2ތZ VcChkFc``΂k$&5v N9J9Ʋ?^0)_= `j`䳐zml"JVj ե"];"1F&c OK~Bd&%1 z2`gB9G!2_BLp䭤Ot:Cbޯ4.;NƖ:, h_tvX,IU-LJͿvx>:6uݴ (]΋HO;h4*<X,vNXo+]ϤUCJvzH`HzQ`|ڢx$#[Nu9􇐫Tr鎠͛t;<C_gC>oϾ= y0DD L̬i)2 &e[I@y8࿤]t8#fA`|W`! )}J?季 vξagg)̤#hzݎM=QjqQD5S%m]@m|2ug%x?|?51768?cL"dmiOx!,Z,~mP9Ho9&ٙ\`/e6r]‚[bRyoОOf"˓.tbc\YkmyeL-z068q옦/M+D*mh f8|V VMu/WS *F1+B\]+>wer06~ZN#o h<` 1,zZûL6YfhtM~x + R'QN-pnbJbfd-uQص-NYDoV̬~J ڄw6ؤc45}Heuٱ^!IGПg`O`_tDq@.p{r&%&'y{@|@7;H,*"  V.Phww(sB%Aa*[NZ)@ݱm2cu)1@dmz̙;gB̮7 t:p%94\ D}YRnC׍kA{l7uj;z^SLjY )BE?߇vJGI:F4q`xǞu،?&JaC8 SA6|Hۑd;V}!{X="Ebu4(4-7-yaEؕ,#%ŒLj\Y Ӵo(2py,O?ouy%QSȌIΕmY[W[M@l+ӆKww`?|; AhT T/e4`fZ*]GF{F8}*n*۞R}UӋZ;OώF;5eyH lTG@I"l̟o/֪mCi:Vm+sZϺtacsǍ>OIr8V9<;Xcy;\g8Uo!bAcZȂL9(yU'[٢#ZXs_ݵEjI@ʖ<%#q;jendstream endobj 155 0 obj << /Filter /FlateDecode /Length 14703 >> stream x}ݓ];ļle2>-jlU*?IhġIlOq9m4ЍF8,pw/{-TMe=7)|nSn޿7o^(TS ߞ(7/^o",[Kj)䛰e !/Z|"ؠҿQ+S[WhIu=`EYVvj/^.R= T5XSIZC+:*rL< 5vJ-mCBFar&jʭHrѶzji'JrQ%d($gDzn2CrX)Мj[)GO%9C@HE}liYp+6`b,mҐAiInU/0jaPUꗱ2XRj\*-AyW`sȧ\ ~p"bm~:l06:VN WÄ|@<1v[-&Ќ nPjB;E ;ƂkiY$\-mR@-`mzI Y]у¯":EKm0O۬й,jQ JiRp|v yh-RcmW#W0kTaJ.hp"\Cs-Rpǹvrk帲nir22xrUރMMөYyF{4W%4g8D1!I o?k(=QD0ZĂ\3oFSV!/ kYb&Ca 8Ĵ(t:F$mC)֡:T~6ɤu(U0th;I0OC%%՟C(|m qg\޹'a%Y F;4-XJB֦dו$W 4JàXci(6P"rYu Ѷxprnh٥.mIw9 `994 m2EMJ+`isߴuQߺHj,$WEm39s^gI'.Mc˧1xA;\(zj6 XVi$3jma/ciu+6M9s|ST"ܺ !ڇK27NJ]ŐҖdN΄N''n)i'Qh(tWYO,ЧLѻ9~:~pƹ~5~ЯBa*F9NRa+Ep48DQ9ȰܖKk1gȰ^p٥E!ɠ$t Lh^[L]Z{C$2"$2F`h2tv >8P֡TSj9m}(?kIiMwׅ X#vM  .5v.i<~be28 [r g`Xj, b5p X@( F0<:7LA@&'t I#$եԟE-lIޅu)b$,Ο]N;f)#C+!oN0a`iwZ089@WIߥJ`@i{0|xqNY;.rhN@iRDtVKGC`/5C9HN[)T\90599 ̷uImIw-oN9Ӯ*(AXP#FrŶ7>0OE1yJTz!ba'Ox5>J7ռ)*ylmkC#@sV2[=$O|4+)U\repuqi>/9ca*nah<. .l_ۊjb/YVc76?~:^Z'XK\\>pWéx眱pHG!7.E TvӲYnK ߩ.X^c׹yݫT\r8)$m9 !|LodsTqAw"F;+lUhձ04"s]4Qga/3)C=/8s6k,gjͳ`|AXqԗS^0A9-v`25фm:˘Y{ {i`KyN3 K?"Nc*s!&`3+,͘I1EwXk̊7F2fC+X씕2+p(lHΕrð `a :Lk`JsfCxHI751G!<<B71E(#˙A,Qy]AyŐ<#N>!th`I$m9ͅ~'!is'!iZE1 4G 5O%4G 5GNe 5O%4G 5_:Cs,CrDَ3"඀m%ydsT RӇ<<J-f߮ i5yc,`Њb՜ନ\`kmAYeZ‰l_W)-[*tCp/1{t "Fm3\مx@aSZD[(CqGi]OSLZ0Ȉ樫˨DSuR4GOeBp8F&=}%;S鈭莲4֑.?3v4W%x6̟\I94@< pO+I8!gHM N 7.%gDJNx)g)>J$,('8m91-r_!"4m`N@`^R-2K-r?u,;Sh(}fbaA ly,WA4zpXIw0K+rN~ Ŕ(D,-? $d 5J( 9%(fzm_ȁq4A e~n:)xM4Zs5gc"Ox1@-p -Um  ^9Ƚ ^kpz@m'`pm!o9.7 ` (1R39ΰ2$XKiM`myxnYgkt!xm9b ֮Za+Dj*s+qv1٨D3vY꒸hNzE{óMc/a($APesV˄z8<<40[0|Xes>VT/@9H+xtT|@AArmͿ9UYiuT3PLe8*TimptDqʇ)cW{w>ʽT}ԍʅ#z A@s-º U&9/{Ds>"x?w]Xxi&ByFXdĊ1,IMegm8Vj:'_Yvlni5YMhKX]=UaWe*[`r2A2Z` o^W*<^[1WՇ8:L[A^ MG!P1,Qc~C*uJPY}kc%#rs$+O`h/YDȗ,։ޮ9WJ`P6Ȣgj625pQ kuL"%#`vH h(VrV wrɧ2a06piFp{pXS3])c}PVwΗӾZ& ƦOo^_'s8`(M86gAJp\"\FοgD7Gcxfk4Ԙ&JSSQϩ R)֊lpkFj{Xnr)~F8XS*&SNC kRh+Ns|k"0p+ëz:ֲ A6D]XQ.,N-*tSh+NuՈb,|O &Y.H *rt䇮RA6nx%iHIiD$.6Scs|$-i/RTNt n/dm`{/BaKp嘯t_n `"NJ0 n4P$ q`+'_^Q Nx $k@ Fh<:P ~.@%K]NI:9 ZzrrEqR/I'H(BkD E/FQ:kb*ȥ%I.V:쓮Vh k=u%RbcOV dPjyIrnЦH_129جu超=L$Ń6HIRbNM%7m>V!c?sű1H05?5vU/=h<)yp9XB2Ϝ\ ;u1+^( W<…lyXb%g`׌Wˆeqj:fJ9e)=OM=xJŇ':V(-Ѻysce@f8ꗗQYQ530 ]qdo3<4svZ)S 1]93@k]HR$aZY2AdpynbρN𪟒Q`$~QTkaFXc-kĭ PЌ""Gd>3q\#U1]jMq/]ە} lLn9]dN>|]$1cM'֠קu>0yVZsk[mQ:Ɯ!_9 &UNc39ӂg‰k?:x35A8dC6P3آyxvā!c,/>$7QFьCv ?fl֕pҎSA ^1w [xhNڒyxv؋Wc[CCA=Vc9`XB'(ȩ3ҜJX !p=gؙSW.C[er YE!}YђgF&>FhӱYѪE) L= Ks.]m+6%xi'KH{T/Gv*@r J.G:*@vd'/К ZQFޠ5*{֨#֍v]֮^:zkD&G(=ޏt$t{kL.G:JO%˖҅gm3kb-`Gy/e|73{w9!De憓_ЊY,m5Oof_?ޮ]whz +_&]C42ज़i`ʹDӛiD5ʂbj`5DK_?Pg׷.ݨ'nT]$ި* KrA'+M - !b'虲_t]`2a06V<^UW|GtI9(鄓Q-I3_ m:8eufk@2D}%{D}Π5qF!j`i#MQs,Q鎼Jᰓe7z~A jvI;R(@^9Xpe;ЩEJ;93vCi#8BL];EE,,-iCXI V0\[ktv mrN)Hk0!_=_"i'Q(AE:>AHNu} [9r< D-r1pEzLzIw``_isS(ʡ6`?6p;R9rx (.Uitj hS)$M,Ji [&Tn:Xv ;tQ'y+,YAodn˦%* 1[Ye|#5AV@ F<3k 'Ԟ N{0`&`굼$@l+s `y9V Js<*gLt{M9[VcӜ >rH"zoAiG0ay\rF;9:ׄ"f2~n@hkMA >%)=3@{pThn Dr7YhUme*KX-Oi)$+¬5cx)Z^+j/@Py a'd$\z>z| /.ew.mĨTp(@KE@P=Vn #KyN0U am`{3K` 0,eS4ᕚGCX~=X\%Uj\<<4 i )>*s眣b&9p3gF)z"G/11|"ǖDζg69.7ê;0#͹&z9v)S֭僱#UWS-)yP7?o-/n|Dro'"݋۷}˿ i6ϗ_59'ƿܽAzcg})hywK~$O9]DxA XGB09o[/wWK4yF+U컫F+QWj%V6HsNJg\Qռ|2$V-N|9c FQ9`$H0"d% F GF.Wd{Vs`d~ qÁP/х3dwl |{3iybRܫܺGV"&7urOrvxcDFE?' Q8/C{:ge08/C6;a=C/f|ŧnpȽA|ϒWHVDLa&w嫀$x J~B A%Bl`cH`3" 1GT4SNzpT+tbl<=Qp[7@_y?N%Uc.CP?'8R s>~46`W~ | 1 M-_/H/h=4ӆ;=;̀[ueQp֤뙚zX]h{giKhF܌N|zM4F4Q"ܣ dw4@-k=:{ǒh4Xh]sF!Gw4#Z- rC4mhkS>;Ms ]㥭sÃ{6wiDj| 伹FBq 0Z5.9HCwVvJ ~ONF!47Xc`0b%ZZƍ@υzje.U㈃c\j\r ч4#TX,ƟƍBi 458cisupkZ~aГW*QUq+]rv|Pf4șI;hhkpzNjvB(C]F M/ȤD;)6Q͑6ni q`׵Ҁހ6lI ! ?Ȗf8\`xņʺ3t&շu㇨OuaCc̗ (a9< Niz©D>^м$P ?FaT#@ lO*AqF2S-pVm)z0i%>Tz#悽g?73=3b2ef3Jhf?s `DO+Gn76/i,mƀZC_c>k7M Z~A"='zˏGpg] _$Jp [ۏq||fb'ya[r7)Ιd9 RsXL9% C7)!5E6//۰?|*ڄw8c,~~CSY 'H_bvhZ?Vu eWu]6D؊ro"?J]o1-kRi]oF ~,Q[RP_0h"F?kn}-2 [7ҽ)uЅ} T+76Vac qo'H*m(gȩ< V5גz7X,/1T ]ar?NZH'R6P &N5cs@0|~N=7(˰aQ4%A_Sk-~I1hIyx`kXni5mgX̵Ô%a5kXg`zZaa.6$~1r;| IU8x?~z ZVDlU'}xď-zmK{0}*߆ٸVӿcY#Yk0<_Tڊ U)e^+~MG|z, k#73?5Au6,&zc3ܢ׷ڶ}ਢxdj"`=7o]G ͆^:݀ 43A^.(|{k~e/dnȾfO۰˹~}p3t{ݻn֚pY~Ҏ s5>£o-3 d qrVO/l aҍ{鬀kDẂA}D; kF{-7 ->@L?cU31tu gv7lxS W]*d_!zm7Rl RBn*!Y !V8^e~eCQiZz05|nBLo_x LڔM%Wυ~fO5@0)/{Q5.tzmf#^op+GDf?t vB].OٽGwxuEuۑH9s 0/'&ͩ̓A;]přgX"f;+J;T;[e~oM$`+Mjg{A1Qh>yopv.-6*Y`*Y+01\` f(syz⭻F=Ā̯jZZMj6 j 0bV{c c'̣韡q:;;ʥ'#y0)VǑ}7)jtj.(84 ?v^/7lF|},;9zܟ;afM4LiW.`D\e g7~-9rV9Bw|,mX`;ٶof֒[ySJtawt6Ɔ;=P_@C=q<z@gp)]pÁg-ݝPjGţH?߄G B}_3qYؔiEwOt_%kA-3ȵc0k!MsL*~c3"[ &!+(MzX|Ar0&P=<)VVzfSwvF[m3=ocךq75TSC@~VȠ󻅋I%rwfe ,/ԯ v{~y _Gդ`H. c121WdszZ|,gDxyz4YR0h|]DWsO^SVGtaD{ 82Ⱦτ-9ݵ0̈oC}A}ӈ=Kewڠ/&3RS(61mX'G@P׺ GR/ZN]ʞLk(gG*U3wA]66mqHpS_;<v:ua'fqo?BV`AO9'6z)~@vh2`[ucx{;Ag |"NG ~?=wa養' :zn49e|B ZӰ>U"_0_L l>QoDfwHK;Ry-n?C-y?{p{j"LnŸ?/N|榦 Q5־se[H%,Cx!tZ4Kdr~FO@t]q]N I00~gEJFKyeDu~-ׄKjip|%!CUw9(Pѷ]ПUƈ8 7.;_ݭ:0b=~7b?Ym鈟7,M}tf{j߱x'GzwE H7 cV{aeB+$z=jc}HTy3/Mq%Rm|Z~ OAvtl+}jջA>-r-@'g,&?qIm5*3nw3\B5;>>A URXs+h81 +5':><;2Jk>^%ж5.4CF Ydz騱F؆qwb9`r|xP sk mK1 ^چihWOW:evaqOt0 X:1 mw.nW.te725X 7[^Xtс (ga>ȿݏnC&a,3j5Xs{ǽݓ/}\䢿8]g^ fMR5mT|oP:iO܌bg@&^sqmGsuu0C 0ڭrF>}1wF4?aq l> stream xܽ]%; ^"_ }gB5 j6S;-~:'f-'CQ$"_}ޟޏ[[ilӟ~>Oi<,Ǘj>ﳑ閟zꭦOj[~JmQb3/`~zn[9fll#ɊuS,+ۧ/6aCڷnpHHyoR<Cj%YsrwI "8#,앛'ΐ@ί;/~YH_ݓfH a0K ;`ۦ- mÝ[Cz?ZSYOqDXYr^)# 8HyTtͰ*iB @MdETލ}'+J Ԗ[vzRxBr3(p1=Jps~ owrM~NWBmkY#$p4[9B@H xTY 7( =cVd)zx*c.{ }с l1z`G _tQp_UMuNnFlWaEbV@t9٧qRϏ60A+Ǻ&9T+#׈b-"Lmi[ uzf :XT6 .BL;.P0m "rZu2g`#X)-cPH +pV(ٚ21!L (_z6d-aR7u.^^KpawpgOG xAB@Ԅ|Btg>f䢳|*)d(NF40}YF<*ᖙ+K@L'Qv }@`] XE-`hp F[:v[:ެ5j֯A1tb Au*lkl8 lNӮHTv)1i|[q:/PXS *z /щ"Ų-#j;U>ɶ n[?6&Al΍xڥ*m,I($ӂ"*+^f"lP0D1-SNQCkN W͗ѳȇOF01|h4`DR^'@T\ T _,QKȸGOW9mf>Ѫݣ(O gmعY5K b&O5 < Uqzfѥ9X1<1́gaCCŏ<4tXаGKgs)JF{!\<!i 6G[ &|^ |"?yjÒ?/%u n0y_&S|ezkm7Gw 3vߑm[M~ཝ6s|V x7) ϣ~p._5zs~58Έ2\ X[iH咽%ݬt ]kTXo4/{s&<~bXH\9ѫ7{RݤL{E.7jmеY CQ냉ul< 7W@j.3Flr*ɡH %R*9XtBWܿ>j7C܎gDR+`Rrψψ& ψl7!LHx]ݺtn67*)\v "M8IpL!L ŋφ+ ] w.v̶?l{Z[D^ms[~Ff߅1p5z?4Z<$3Զ4Ϩ Rgf_jKțx>Y0x0FU_|Cb7 s9hc)JFnlUZ&n;WVr+j&hv[C"C$|C{ MLFSivMWDDhp&p´B->UG!H%Q@ жH unsxX.AIi3$ h6_~1|YyrZ{:;# ^TOHz2ʚE |g=MH%(g 6i0muF+Gxb.o,0[0ͭ._P483$ރE]׋0pJD! E[ L@7T2P߄۟kNVD z/ж J0yB ypUpi~~ ovx`ΏN&GoGG!'tS> *.ݐ ;r ?&W^.М zs-ft%TR#塄6l9.穡{ G(B yw|c8^# M29(OTqào ejîr~`+(Qn T Q^3rCf ٩`0 Je=zLzHcuj49 i*`s\}a?x޹KNbn0Џ] %o?۠`_``os19s3 {*_vVlGe/T vT R6sm;lYKP [(D9cuXͿ;H%UviAfR6 ejkZHl\0r93ҏdXs<(2/gz 27=܀$LXn@\hգGw!^Ag96Zd \Df,-x"fqdG^Ÿi6jvzvU&2jZΤZ(TLŇ^2 U^dMSJկ=@BRV?rڪ0bʽ@;Q;UW^MWdmSdT >PљoA3L@Ԅ7t4 #v3qћoAxd;2uw'8qOG+QP*F x# ej\'g*F NZhx QD)5,_a ]aS pٯaakQ5,Qg5lCO9[2S05)D95lГQqC]0N[:O[C0`k 'I֭a> 5쐇CaDa#8b/F]tKn_tfrߍu~CwLAF&ثw~w_X<:hNA֌ev>/g+y ¸.2ƚ^A 0/r;yvC? \[e`vFe]2֡J//J ]ݝVlT6{FԄW{qPY+)z/䋏{eM=bE$#npZ{DYb)9ōf 1fZ n?G 7^%)#8N]Q٫DMx8u斝F,c׈DPt +=QHeCP2 &*;" 9ސ 6|[& 93GnuS;wr ^eKyx*Wǜ'Gq4"lMdX3}B,x>!똟c wuxw5ǣXG\B7ȣ2(Q$KS7'*)CBԆaC~l`=| $`A8 $)1^ /LS -T byWQf99nxu~A/1gm 7A}ۋ{'m}[&UWͤRJjI^)P~uRAwr̵ط~5Ax}K#<w6?^ 9W!Y<ߎZ~p?/W+@E)  .ykt^x0Bz85_tiR WѮzLk;8k{ls8ĺ!N3 ǤJeuRk2mjAg&* 8RY9 tQ~T0!iN(}CU!LqQk2kjgi?LkA^b~b뛤W!aW&k}Z`o44RR MA!q `GBzVe!3!U(os۠QSJLA`Ȁ.I) 6p(\ E#v3ҾͩC&ܭI3V%0"eV1XejÄEQ2a%vAe06N%!t~āEú lX*e Xej ,\ ,2$QX8մN4tB'+dqbKJTF^΄y>#S1T.po]簟@ /cJ^=|| *Je?_+QXTR Lxpj\`kA,.[0: X,yߙE*؉ N 02RV({!MO9 ')~$DyX\v*-gJ:dr00Hŧ6mD:T*2BAʅ q%(/䲓U1t1(ǩSΎa%}h?4U-qH%c3.`xm`` R6@ 4}2 sںkPC R(D9Ǔ푩1 B!=vcNX`Q , љ(PEk#;q!¥Prj^FνԺtpԃ!&t ?@ۍ+4ZU]jGΕx|+O^ \N x nei=jl~P{UPa ;vY!A;IwjG(n_%w  d.n'` Iqr2 "5 {F/ 0(p?X'T;2HD .:9\LΞzXOE'"B pf/]мp '#pr}A0l8nm8n<(NFdQs5kHݴE7!'g۳6d닜:mZ909d D5"5NkrfZSXioGQTr " Nz\j2 A2LyyB^k~p8+8CqG$[nZ'Hd|Nh.P}li @HΫhD6A7%텏ՔMec=9gWHp;΅ ,nN6*p} ami՗͐^R(ܩvxNA)TLDO}\}-z^gF82 SQP끹~쇩yߓ{J'TRgs6?̙)q!- t1?8$&gHz"CjaF'Hd< Ae2wrdDgTl0vvL2G~YaҜH3 ς_|?~(Ӝ˜>3 ^̑)aH2(meup&tSq3a-~7RjC@'2l̻sD6/I~RmH(A_͐@L0dlB.&F@6jH'Jh#d!YħJqDsG LB %:gm7;BKjغ'3K01<Ј܌aKeJbT s21Ԁzxi=ÐK L+?,A2~E'<։}O4E Tpvx7V | Hv ‘SӨqG(\$_Q:;.>1:%[ڊQpJG!g޷K;9}Xwlwt_q O;RW(pQN<{acdG gξ< 90_h8#RLF(s #$S`ݞEB4cbv6s*<# zZNV$<[ M 3D!:lcQ _Eq#+t/@~I+{é ϶b{jOK9o(UF_RC1(r?I mJlwv1C'%B8óR RPrD+\*( 'G4GA(D]) tr>Ym*Hg.RgGdCXc \Hh;gpvAYLR!:#SEIݬ)ƀ`B r ImJ\kHY&.10 Q2gnVvVYʄb0J!HA#fڹcvg϶ݑ-kӳEJ묯 ȶd $/2` e ,Xk6uVDNZC %qB8Rچ&V. 2'C;4ט~6/!ImnDsLA=8qA(QgK'¥PrʞC;]靝nNs7Cu=SP]/lm@,Ѧw\=j4KA(ɳpqh9ih#SB\ V2@+}Im 0(," y9ERD?rRM)IfPrgI䆶C2p]-]-]-]ږ.FEYm¬ζragrqZhr1\L .\ڭ\\u+WGr1\LAW"t[_Z\_^d513)D%-UC7LA< (PgkCB\2E5-S0T ;u6XN m*P0T1)KfPvgӁٔ6-(¥Pr,]C;Էœ\އ=8sig4С^^1tZ-QSl0y6KyU Kh҂J\Φi\:%V%UL %Kti4Jm۸C_^>OhKȵiC.H6’ ښf e ,/*QD;JϦ%AD(,ɛoB?[ ڸ /."TV:Kbi[5&įׄ]_%ׂ~-7ye<)?W*G*1M ltBdxCyl/Y#~2<2OKj}0GrEz ekɾ f %ٖvv϶n* l68o*@64[-K i15&0|.c h3X()P6W D\$LH=Ύ$ e ,̿sgCHlNJaejlǘ y۟†Ǝƅ`La]K2[ SRH*"P26D;~FgزYTv*0/Id 7CsWnv%wgqm)y*#IQq P. C Q"჉u Ï g#J$F%ʧbHm)J`POI ^b0(/-oStG6I݆C 0gLmpNn*>6lF@.Bk~<m;;$*uf nfd);aEQ R6(ޖK ¥Pr>w.u%5Ajmkh\‹xYf5G%\<楄sa9 E'"ZԀI !\T雾SA!Ex#GL$'xZ[OH]>q}dr O{0$ ~P|@Omi'Hmw>'ܮ1%?ރ峵8C2[aRAOsܞ S!oVvP/Ho-E[+6tk iZ]D]Ow$u$Aֳ$ІmWSLK,OE|:ֻ|0!MW'oU~KVf0*]T\eŽKW7JF!/44 `U ov]\= \l<SKy~s$2'ywL*I z`WBR[-A(+bq*i7 &7 R4 aCRDNs*iB0Khw%!RJ8ϳܕXQh#,yiư"0|5(tdoGuoќj-g ZLTFRE[Lژ a)D)C%v-,LY^7oRN.N%zvvWL쮘*]92]BdU$کB>5agd4ޙw;vUکFB#8ZZVigfJjkf1p}1pgPFhN6AQMo) Bū]2FDRJ:E?˼֖zV%溔)HK.ik\0t- Ź)/mLѶzI.i]AV// @Iiz1(ºzqizahzuvctn鉑^$ڭ^wz1^L!\H-CbŴniΗ1t,)3 ΗDWHΗ1lK9(iQJ'Ԗ6&N _z#m([! .BcNrYJ#OX a](HY>-{]X[3yX$v6|z `6|T6| el6|F>חe@B@j6|z6| cl6|FP=fJ;L 69fqAY[Hkf>f18`hX c af1W"Pڴ 'v~bh3DMqIBiSm!O(H=e)]5! p|eK(m*EJN)]3rBiS!P(HY=ߛ 6$qb.P5 ,>%xPbϙ{X:lx؃xe"'z ̬;aFoSw@ܣ֧Aq0H-E%^I#oX 1 ~8,8IB$7,v+oy癳ʭC7?!,f!BB? ئU^fgy'҃)oY nL msK 6q$aFhy=vewH*_:;u6ĢooAT~ >\MVl?sow4Je(Y ^" >a2-TONK"ظce)wR@# ejjOFϵE@ggf$_ ~ әw^;(*"SHg-sPbc^ZK6"AmL-؈t[d64[TA1fQDhb=m烟m@o0݆*%NPd'@kK<өt<6L˽'e7$ K/ۈ@]TQP{Hk=-#Hm/>gw C5Nָ/g4c?L Ĝ2*{u.R2itϥR%foJ3i-m~R+Qv*б F/(P8;'p+[?P n1>W3y z_b{l[/AR52G _,gU~.&R1R6sdl0HM)S 3xZTVEn̢"MG]>=wxIH{ 4HM+\ ZA;יWA@^ _>bN)p&j$]+R+EPŤ&2YxFB@OsO?|"[ ms ݜ/mܿu;}Ϥq~B\~~Oc*Q6cosokן ?<ӿ m" nsH>WGZS!’{}R?;llh{?iNI1sst*nOAS'j8w\:FSCꮣ?Hy.h#nRd]By~G9'|\#\2T,35!0S@߁}vO0=x,ݛٖ1J ~[Hu{a%sc~U-[ 8ݕg#:l$yqu}6$N92zrEȤRsFY|2f _D*S+FG*"{$x61=OD )sSj9 vDσ!+o K  \aॎa1$hs; S߯񽑓$ԕ`G)C<8D*A&CbHZGY> Z\1$`BX ->Jf~^d6|j2Swo 0|5]Soխ)z-~"ks{jSyqDI7 jbAN5Ouuofa"mjA" $n~dsVvMҥ$8)SP ~"a/˳Px0D)`q%Jmw'kBj"25En&M&DUQD)uҏj6eSG~TAFQH9N7†)Mezqa<<& pAp$|)s4dS[L,(~ѡ8Ȟ@NGO4ttr%w4.qLr{PrJvItTufGW'ЃַO*kuEU}QHWx՜^g;tS6*ELr쒐yr@HB4F :%Z84/'{ o<]<^[XG)-Ң~\v]BN(-Hq]W0hl+ZYp/dp)Dۦ|>xQfaငw;wKGAE$֥J. r39!`_Cr))K-$ i(̙6AEwRQD)>RrC:ۏ=C}B oCܡ7jr>wcwt?|YD W3hQQ=N} sm Ƀ!֣C(s}."}2=Rۨ[nk1?&@k8`AJgJNJwJ,;)Ő\[3&FE\L\61CdFE ĄWMHe\ Gf6cyX3/v*p7fZ~m^̩4CC6GTYśi.;gV&NꢐtWm!2g6U ďN7 iK8?qee{<(IȿD.Cl *}[휍 NCҟ:x GYÚir 34tD{0a|ZݗLRnEJ/ kdtÓF@ }4 6>z SRLd*mq8*DbP5MXl3sH m"Ҥ3 QR}%'}HbØўŭ|4CbYI;e;jV{ i6\]%6ٜGf8dMLT4Z+=YD)),nd5N3Z+=#OnJׄ6*FJkn+Ǒ0؝;oM-[{(C[KԗVIRrwV7av`ܺ\h#DG#=,c9CWOn/0'׃\vGYWv`ںj-+dF)dCSOB'3̧5+)>M:;VȾMavgg-ۭ0<=:;Ej"L$zIx@~h$g 'M`F/Hf:}汨^2gSk\]9W=T*;ޟ:SjGv3<]]X 0};7x2`W'u(ӆp r8\bLU.og$^;`\4Y嵁M1[l,>4Kg`S!8Cu=yPqm_d٥̣>QH:j~3KC#G/i_>#$lvNhs|9"^)_:ht=ymnJ'{ N0s$Lm,uh{i)-E"7U~HXiX(D/u1V0IJe/`pBAYY@0@Qí *N0z/kHoAXqc zVayud;͸ũ`` O׌+$)׊)D%iE-~p:<> H64&?E["T`AxQr3RGAmXTX% el 0X2A[ , Å .2C%gQAR&c$2S *:IAxQk> b[ATd>>%LHCDK oBNg!'/j]M2fqKIw:T_/ߝcU םo`YTnUxyjXz _4ڄ UYήOJ&.d&_pV汨Ř %a}@H^>ӺaQ }lߋv2s\mTy{Y?K(Dnj$f)"|2r8\+0Y| Ly&q甁9;$葩dWtMx63D)cF ! pE1+BV^~,~PDzi H&z~a+[)-H—/"=šȐЏu/h"=C9&-A߃AxT1(8UA2L@Ԅ? BEQD)oI`Y%҆9=D% ="0Ap*25D0Mpщ(Ņn :au9{5-AX&h2E@*Aw:¢2\7مoW#"Lex#еuV@1򔪣@eijddV L`в!eA\-eWM+$Qm&+hc&a(BAԆcA06U R)D9]_|lO~d4)hq@H8ڽLB!UQ)_(B\dV B&B91s㨀S!"]7{{5HQvLj=p}JyUIy>KtCx%=!2@_{/B'5PR*p{ڵypӸ>ʶ|kOW>H$"%jvndTn!i[\@c,ݠxтzʃ#2 [Z=}r%5Qוe*Tmdzp/[6mu.XT/pIMD>K3Aư?׳H:֛FMC#hoJrNm\^֟B\ky p~ HCUO%&=JZ9\]?$3Z]rv۞o7|GjE',G23JC#I>nZTJ =#o)74u͵z g]6\[MN]6 yP%$CƗ*֥~;]@( SB^Tᄂ SB8TPC6lnǙBzhpfgG9u:>9CG)8Nl ގjNc;(;1.L!Ήv!J.QeGb\\1ѻ8\'nJK2(@qdz9BE) Y*dMQȱyL bEojNDA^KNu~MB/*_aPc֯ K!L! $u;9Ut8ӤΉﻤ y93nbARqZ_y}s^2H !1Cp%UgduN,3A9lBZ&a#|N(ȥ]*,d c,7pVAz){ 6LЂ(7&hٵ Y os]U \60OC⇹Μys·PrPn+IZF#a16a]0e'b(JAԆ6mn‚a3TKTv*p)yes' )vn<W,aeT^RJ8n5@e'bB!pmyQu\SvY[A\df1!INDWJH!&!IDWhMH ݢI ;ߒW!= ~vv|8'x?170̕;?*+Q܌(@b\wrdٱ̆LR'M@gun.}pv cr0 2[/CBȸ)58- <.SKnh;GIXXTjTe!˒VJJ^!YF<FZT/|\$ _C,QnAgT$q!(ü8T/02F{`t+enka, c Tj1JQЌ$2 ~ =(!<2ks jSХiG[XeJHwWʒGzbGǥw #^ #v;RXҗ9j8*p5 7z'gHкǺ~CZb _wLIKScR[]SJDvSWrScA_h=Xz G]p'?r :O!^(֠I7OM];$? QB" [yۃtP pc T=Sp1uvQ=CF~TCSr<0!n (~p逕(C9J)PL DX0ڋ ejj0C@!察 (LaL4Ws%#z)@"i.#.;aEQ R6eF3a<ڠ!%QX0hnJ}Q76B1+uwmK+\VDa:1^0b ;̂6*<}XźQc%$),RrsF#ȥF?fl®HS>x9|&iSNga6Ysq @ ɶHlvkLh3 nE(d v0+mDA\ R?/wHG_;V,y,֯hU#D$G;OD= <ֶ٫1[t,<{gIU{E0ENg),h{=YjhnxF.U" < |f?r>'nW.v6 r16%4.19t25vE] ѻ$8` 3ҭ5 JZ 4?"WKX)g  nI!ssҗƑZ/M4I&@}X;J"9ZhTNan#Ƙ*=ĭzqMe&[K5.)EjL#A+Mñw{wc~D I;k~D b Ui$UK9 ɬ ,ob-6-ahBm ejç Ja>\T)C QJqe;Hl_-p-Qwif; 1Be/`(B܆vnŽ j\v*-i\#,F: #FYNFFB8֑~`1aϕl` lZxIbBA;[L-S58)?oC hPC28k.#>vx0!g[uqy)mt\I .H$?`/d %%Oc߸'b't7Γ^+qtc~k mN, w/X$yү`3?hջQ6?sdz/wHvK]ɀ b pm Ǣz`+@)(Eciz"Y.nmatGGƨZml(AFUhXp`hV/$cTՓ2F%,1jѝXHo!TB:bS_iKrY(ѹqW -'V{+I dW }U,= I|vj֥Cw2^>mWmyܰ VrfU nBk?`oXu醶q#uSrRً*"P2aS:(2)Ae0KHRD븒ߏ6á] +Sq-=2VVe Xx(IR^!;ysrϱ|v*Y;kZEM0,֯P݋$pH<0UL0.ԭW1W$G{5GbGg/ kk$-aOObemM2=0v͞2lQV|%6odv6=TŽ,[Ln9gLB7gX —qrXb٭P{1VHc׻L}?nd sGʥY ]Ics[ ekV swuFfAZ.T/${缜$%cg[[gVJa$]T4O@j326tS9G0 qc!J!ap@k%SX'ۃezl14gANXŐu[(HpfBڠV0Kq*Izd`QؒiƿT#+2nBAԆ=u2r +¥PX)F暑Z!R&&^-~2@P0255ƓPP'jBPx0QD)#JvHI. OL|PD5DHn;:qiNTE@0ow NT6T*$2=ȳE d}IE? ]a]痊! qJ6`<ɬ:$utr:dLgHS9eHu Hty GT(|kWNL%BA:Θ0C;.髗F$:?-5]ixtt{\I{tzmpmLk罫\C1һ^)@ڷ{Jrk$p{ɓH,w>Zczsnrk7}`q@g/pqMkabM1ļE<4=':'nDG9mo&/HgE"6 ,tSLe)DC$lƙؔM6!{3Ë*8H̑dHnT^\{J =酰Hx\t$C: -0G0 $\> ݓ^_GWWqaWm!#ЂtO9}b OSѶ]Yn}o=G吝ŵLeiR5OZ$هD铻E'"J@D&LԹo?0گvuٶ$AQs7\6,*Qnc:L(䋍*Q1lTAG D)^54O} <-X.c <<8}bxZ?@Fʍ\<8,^wESl рߌH 8_(1,XDx!=RfM'.6wѴ۷46#H4m/Ѵߖpژzi6{q %n+$md(]H o IR%?'IףQ$I9k gl`$I{%G5ʃIS Twd<$i/ 4x>Qe9&H]Z:ˏ1Wi%(yxɁc#0{ۗK_sXKʪ ,-_Fg%[$Gz͇`$ӕsC>k?KZ6K]WjMo<@6H@8 1/ A¼z$<6eAn/ZՐ6u5͝kz"[dˍdmO 3\η 7/]$_X'6{IDmb^^QMcU K38JnZ׼H?_"u (iTx3Ul JŜH+VF,<r;Z+="iTPaw 毐emAS׷ [8BVT֝h)$"}dFʝWlpy%lIhVߒO0lC_aRiЉc:A('( r}ug;1J6K<3J]Cȸ9T;8=kE݄9K#m̩,}C>IC jɵG "zi+T #,y)2y^Mg./"J_HtN` < DY!zcӶa:m!C*[%r hkn`}=s!4 澟,tWM?/({tBo4 JE[JVfX\b#֊uO}*[ $%궨VZ7\XȌ`/\bțEN಍{2"y[nec2hbBppw΢ )?rh;c M >ޘXɘ! Mғ&Rv"5㘪ƎV~M3as,"kM}&27!sS鸄fnK:.q>oC&]6[H?X&Tqr(ŕQ B)HIj˰Bϼ,)D9^0]w^2za!~PCL(HZcI)J^X*{aC2(+LC9άəF;fK,7׻N3 oMBL<2 Q8. y14 Cue84xyp Nܑi!Кcؖ4@:g@ x'-Ge=8݃e<8w/&Ӝ3^ .gRhMT`Qo'~/qQV$G;E6 Lg6 %؃>8$-.O.݇d[՞Ґ/N|~5 n=^r:':!/ ?O)?G y_gȏףf#w,̑r5WV[\ZتmetZoG08^dt(A?eَW Ng2IGx殁?@j=_:VWU+4LŅF%7knh@\皋^"_Xzz~7SMiM9#u™b>sFnԣ֣wsN_nKZ#^%Ozz\<씙gׂ.ySb{>cvH\5/ee(GKVYBvaI8+&u427|`olsB _ ~AIk"[T96 I8]v><0崾7 0 >uRpߥnԡ2/zxHēVYQ$̎XeN˓wRhʯ,@,%z89j8A&B{:~Idc1.5oy%{ *Ѐ`A~v~v5H~h=]xЈ^ }ܗZSYヶ勶Oږo_V?ޤ4RymS.eN%~'C%X5d ~mϛj>O G'NFyٍhQj4~((r5\93û{C:+)u'tfV'aJ*w;Wzzoۨn_- Nےm8N?W j̑^T_,Jd#I8} _;^^wqE6WP:󊨡z$Xsi[3D4л`'ڳX_hb=a}4rXOBko/lفU3wp'V*pvX N`&p64nX~Hl72~htrVऴLκ\|GlWPMY@˪_f!F[2ǰegwq $[1lŝc;ǰwavŏuV_v)&.9Eɜ,xVKQcQ]#LɠĚE5MuM T#èŏ).fkL=:;b{wg֧#\䇏H9^#RC)jGT nt#@BTS"/Ͻntӳ_XV[2Jwٍ$oq]D%|7$x~3:o:RGbɟ?CfeF E #w"vzzlH䙓ZdyhKw2۬b<>aIھ^Qw,ȏL3*S Yxs#q{qqhUT1#{A3wɍwlBahΧXpJ>\Ucl&;eÑ1-Ɂ~F è(_ v i"S3N/~'^´,uc4G)Q1Qf =dIIvT.M*fc!t8YqUk1^,-zkSM41ft)o=3j-WMz4F|QsU\C|fhZL?Hx&~~tcgrdhYάgԴ)_3LS晲iP?Rң_зף9֠-Jʤ,j;-L&g]\q r.e,ZdNjJ>QծxəS+5u |3:&cX(3=hԡ2YVŏ&=ŒnIwtRz.)9?*r#(x3ppOQ&[+1nCdO5@;ƘqozL&HUo |CU0󷦢4XyNGNL!XD#ayFsyP퇞y\ع$yc%osҙrYud%u4o_=|Jur?:\GG9GKr=EߔkI;&:6} 僳+^o FkIn%FEg1%ޱ7;UCʇIo}ݦĆ(1w5'z`\E L[0En𴿿K ]7Lό;D(ZB!91;r̃ukzHV9:s{#ewzZ)+7DFVwYt /f?h&c}3Oma3SYO]HXEHLqp@szm$=p+97Y W gB+MyϿdosYZ^#vs"-ϡk93uK~i)g @A?gG]>p ӄ3(=\hd*0Z-;e-})] nWGW\jY$ܥĸG\|=U=-ȝ*xLVQv)*q2$5WZ&i/~g" h# d/k#`+ =oD#Ǖ6 H;I]@gS,ȱ,7I?>WiX>;/Ύg"P  VEv<\ӫHM/s/55(ȴ h}GG d/5Xu?9`U2U2FL.S8`b2SF\f~ MqIpǨdL\Aϛ_2mA7L~AhrʝђT? FM ( ڨaG*sFt5WДx2C)`ԃxD S&D+~ڼ`j( TbZ>PԄ!vޠb!fFfb"ĠR!}zk:LD*gk>GDzMNCp.4JX~dعC~pNf՝*6s k[us27B_r9@+a!eKFt{zD;"C 8&iQS{g ىلZ6n&u;{&Ws &nr^0[*Th$,$/vMUh)X**Z зnnLыަ26hݰb7@'*@zmkZ*   yjTt oYXHVFzW rt A#\eW  oYX<囙c1|3SeB~XK-̔*nv:TˊRGvN7;'y V.ER9װZ9NQ)#NQ2Gix#Ev̌c {0G}́u$li86,86(_4w"hnɀN9AsU#j!{t7md@Ń[_T]Z \Zw1\d&Men6 @YsNw' eMw mt7xn0d- v N (.tdbp Yԝ:wf h$,$/g v[qzIbM' #P2h'P3/lfPfabԡ23/lV",d?s`TD ZsRL+ Jd9r1bKԡ29h%,d?$XĞ G1i,7@ "Le-A$*rH=ro<''b|׍ 7XupD*h:,9G:T戄2%Ub;9?9@a#SF(#EpB~45L'Q'ZzS2AsbZ<ɏ, \ѪPQ@6@"6ɇϓ\ýkĮT 1\ mxUk3ܵ ̪01\ ٵ2Fصz̖1k" p7Y ׄ^9c%#ޞ+X@bj~WD.xpM8lJU$\ kw<r,X[8ψ><]~٭}.x[סVu|& poeNu1&STb *8(GCʫJ9(],$/d%'x( )'h NFx?>~w 9B`uˮWb]c׃cYpGϮŵ{q~p^\kkaGP,Eۉ(88("pLƏh+ RI`/ ggUA2p- ܋1Ż3p'=hD2=}?!5~yZJ,+NyԽT=7*H*\OK.2\*_92jxP1zOeBAF:<0@nzo7<)z 3` xKL4hԑWO(.(,@:% zB!{&BVB#97/~)5>(5N(\hx&BYN %̯Ju΅l)5m^:\$ޖ{c@'XusrL9Tw%4h^,AyxzL2JIuSPA ,@:8(6ad!U9Ut :f!ܥI1y[>.ϝ/kuG-\l9V!)i( ;VCqBbL'5鯽^;w"`|< 754H3)wmsDZ1)繉xrmhc{'id y%Buu7oD+Grnɲ[-q]vhY]Ohï)vŦ[ۄ^Jjau!uvO{kT@,dr5nr?EIdX8/OF&sVT KDFԁ<ReHh ` ą*Ulç@ElENlR xdla-@:ⳕ23YIR-~dgM});۠ g, vNULYI4R-,^Ȯa /ia k2Z [PH-VՏ#b8awV V1pYpnۮ xQcċCk7 LjǂZqIx\" [ksE'z#pȞȺ%&~ҭ$kXd|;OCB=@d5JX~,1Xk~ȢAҒ9&Aec:,n}VBcl($tPviO70@RdQcJ 3Jh%,d?vbyE0cE*3Lh:,@:9ʐsJa{p0Y~0 &^>N?ޒ.Nwӎ/:0B| #ďy+ jB\-,.,UA0cKpIhK?=MMxTX<`tk^ X5@E˂}B`:6E"MkXd|=aefw7hx+_&ctl2ápp`י"&ǸZdו\܌&N1h?xz@C ׷`cAXYYeQCyVC%!roGD=-ԧy~IQ4jXpxLQx W{ix+2@e~9>G^ ,8\Ig_|2Gi\-,~,:rdz'ʹ61sʛ۶Ԃ3 8fЋwڔf`xÓ)8Ó^,3&Nr(L'$YrPL }#270rTJ3PpJ'3|~d*QrfEm^lB&c`v +LɕX&g6@bexDIDtAtLzx:,Y`P6 %T4Q3=/M\Aϛ_(>rd^e @3C,CTC&!8:i zt^ wt=,:[MQ@Ņd6yBZU^ß7Q+USaY5h*34f`qaA)667j$z(;@GeϛJiԄ;7 e4s6N7A @i}TB84rR +mhw"\\rR?sA0Gw/:wⷣP3~]T#j`i2ՠOeÓqڨc!;Atl4 **nH H(1~k 1~hUIjuGe, yd2y oXZM^1Z9`|dƛʞc`a|w&nNO9T5bVf)6w)9E^ uHe +AL< ]A8•Sӿf%*g;y ;_c^>}*4|i ي-^_7[ݘZԚT{ :i&<~ :teR ⤋hiϋX\X`TIU/0)=Y@@W (h8l,8hbB݄%9aLf4˗~#F$JL; Tf47U`$ &NP-l GDy vK9UMh kXd\+MC 5˽7@+a!bv#ql>1O1O"۷]3 83oT gXF _Kҟ;O@5} (*e;~uqt @+a!8W6w`sT nI%LMwv}p }4#wۧ i3f!8h1Z|6y1-/m&%Q~A(2H<m?07ġߙ`yhX: 4%,8m?^"Gv27IuML· L1c^2;BmNAhl p~ ǃ0ѩ)gw2)_V_^q@~2 5Z*'?^kXd*if4h5@+a!l6m"#4,xn6%ݔ=#rhG2*Hxz%֏X;2#4lYp= BaCdHh8Y~,NfeV X+ҁus}9~/Pevh7,d?{S MGNԅLuh YsiD{2eu84@uŏUBP1x]+h8X:5ь$?4Q%]3yO,~,@:XߌȌ0 5>'ay.rf@`\4 g+w?RnR1nac ˱dSۻe`p+حC3)-[p4ҡ[G*Κ$&sXFJJud窞s6gq_8sΙwΝ3MwTP;wDi˻gۙbpMۏ>E㲫sH;^|>w4g*T8ȿ~Fu4\rZr EsgCz'G ]eWd!v)kilZfu5Nmimڔ磻CޚoGs?K׆e>>f^9r@K!DU5Z7t+]dfIqp(=jL2Cv.qMvPܶ3g~xKbr*Z.(IXj JS.ލlmaNS(&J$EooffUP"6EOE;HJZFJO,={y#΢s2OcPP{{ZFJǍm>5$Euݐ}?M|Kf -|}*񼻮D-H-<[0Lw^{_M[)tK҃ղ-v~6h喰J/sW㋹F۞Z-yb FLxZ90Ų1t r 5QzN| 4 _+f4C&gmǍUM牤4&ZM$ǒDŽY(dH7 ,.^ܢJ8* l/iEB7z$$\c 1!Ii%2O] kJu<h#Oϥ/OeGl޵ϕWm2x3'>7݂l^warg^}:5mʉc9o$.>/=xqrC2;Wk}=پ Qҝv0oݍwkïØg{߮cVR}^Mk[.B^"6׻v]>?Oa?O?\ HW̿c<{_w[ݎr݆&&,[;JRx5٣nzE/CU9&TD~\77JR7DY{rw.#ށw 7R·їG۫e[z9ͭ(:ZyfQ@makz~zƨl]hm_ے"X*$ ޝ>2{"Fc-W9fV`CNx߰e3.0lLn{&{>yJ(.JuLSB-|:5XGn%L5SPOΦ=MeoQf@ ')Z ُ`}{Bޖ:y[bi!W`d1-^[gZlkݰx}J҃\v͕m&| ۏ=];V= X9)0/0>[sx>: d㼟ZHDY3 ޿剂떼o}^ҚextٔC/lIh.2^f"w4} !~b(.?>[{O)\8Sb۲:ee} gWKۗ <(s28"JRrjx uWյ:MZ)Hz55eXI6K%cD[֣ն-}~vgܔFRk$CIPLj%c[irG*1hXкZPrV?)+Ihs/t'SO'UiϓZXxsZ[Y=VxjY6rP3/f܅ H҃wrP3 =-u}{r8Ѱ=wjfy07ySW`AֻYN0u k[~9-·j`c3w{Y{U(JsO޸؎* v3L?'6`ݙrP~m?yG/mR_f֕پ%ζ$پmzkHo6gxy[alIKXK㉭'})%Ń!CċWi])FMZbOޯJR|Bi.woZ }–~-c-}.7jn7Rr_?(2V,[I>E/Qk5}-%[֣6~v䖄fӰ˔~k_:O[֣ۣ"(4;͒T䉘0{2armWabb:~Uz3SAfT#9nwj<ZO%kP%g).0s\@+a!h,$IJ|3߽/]XUޤЎyB _8JΪ$ðPO%!*F6@B~UϞCmW#7'ODr7`UA ÂD&V@ѐY-5}Z4G|ggzr #^sU6oSz Pꩧ{e9\6>PQ]6 Zi,=ٸ2ee-`E3Bn _cFV?ӷVkNtIhsꆼ/gIm>:(=|SKb\^E*d$-t;^2v:SO^}ƽVl߭~Jw<%s޵Eh8>xt?X{638z])ұȊVjisxc_fs{r CVCuc7~.Lfݯ՛hq;OaIJr;v\zנ%ټdj:kPFԪh|5 럱U/>۾~m_͆nP0LJWS%YCa@ad0ٍ{gHyY~|r1ƑsM$ut4|t2 /v{lT,4JX~|zA7ӃVA2A}<@(8?W¼_#7 ,.d_ʭ7ףr٘V7\E3TdBspZ!mf*`*l3Y0N91F0>Nw=9o?ڝr_Q^X/>:ɓR^ ]DWwovj,פ^[ /x]g\KKZ;(ސPҟ{dVTsn?29un9)VxRPVVF]~)}V "Z9>?Oޱ^YA6[y+͗}{}dzgY@::TU_ ZW+ծk1J>B-wn(Z?E=",OHQ~46h46Y695]zGI/z5) +NᒎO[Olt)xF _= ˩cvLC䶕d}J|瞼q|+۹ȎtXQϦ>:0bsb46QrUᎲSwp_M!2()[ O5]>8l}]^0^ۉ["{Iҟ4[(Ha -{' d%,-#-{/^z]ZZZ\?[9HP"mFE%(K?W^FIN=ZOhsR7JRCL]9D37&?&$1tۻGXp[GGmkz9NK*fkaB Z6l=j=Z_m%>tTp ޘTy4|fI4Sl19 fA,$/rR9D-&WMꋧ UFSj/$qn]evh,Yݧ%1n29{ e۫r<9ë1ӘơUn!ODx/aAC寿E+$ZV?t}dlJAj$F#b29[0iD܂CxNuۨ#?$2ӒoqO u]|$‡lpCn?nK^. )6Ւ4滯S )7Ux|ʂ6Qreיz"Am 3Ǚm+H[I|;(hqy?(I8ƂZZZ{+|JW3"PV"CZ'';{]zg(EiݓZ̍.% AxpC uygj(۫LֵNYTc(Hqj,> =hA~]#MZWӞ}'lp&km?YX~Ngx4*WoE4 f(rlc_RJ|)h Ta&(9msMg PciD#ԅ~1.87~d3wO iͲFeSPs`rX @[O{UNdlJףU-x(;QrC.GP- CvQd9f5 9([%[֣Cv]oE)|!Ecg+%DskUjs%>L.LJ344s],$/ߗoE&I{ƨ'#a©9D#i4Hh$_vJ,d%~7 FKņژdKh֞fΛDF#|?EDw)N]򉔉a q"{M1tiexbs@4OeZOlNCqyltY*G N;F{6dr4wYӄ0*rH Qo\qE!c/ϥV E4QmnQr-wU/@EHzagQZ fD?I{^#j)BMbXr. $;bx5lDÔv:uF9S;ވ|;k k<,e$ tJ{MSVŏ}f#9+)9Qˑ)Cf%%'"@y-R^y-Bk)R-,.>:M(onxvI<+AP@~.Q$M k򄕀Kq5Dy F7CgmR/)3P\u8 vuSXRT%rFtv>x? Uwdmv宺ρo?ؓW19FT|`f#-PVXynyWwa{h/@8cu dʇ;'7jlm{f%dl*(lZF(ݾ{&͸ֵDg.(Emm=ߏKZʸ=~§?+ >ݏ,Vr槬JmZɵLK6z%48H-I_ብEC3 Kmz9Ss(͓V- $Z[ZM/ߝy3 CȘv:ya5ލ%䈐pT%Hڟ1>t;O-~^B̓'vK*Mf]sǀABR3S 0 S S aAq2/;Bn?A(`rN9S=Vv+sV{U6_ʩFPTŅOmك=hPG{*W`G!J*gW߲ʽy*I?~)GDwTeuڷI JSi Q"ywy?/(}_2Y9u A'JD7Ac6IܟկJdZV≖>W.ęY$򢬌DLuJR V&l}x?Q|@͔I-<K+g}Dcũ7A+VQ2jIq-U++ 汾*J%ߖwqaxKl8Ҧ/Q-z|o?j=Z_m>NplݙL'MTμS!x{w# fޮc[oF z[+ix=B4 U@tQo72M#fHI\BA {]N xzekK4ph &CzDĝLojlBrBMh%iu2d}<ҿ{ynlfH[5/LI)` BIrf<>P@(y >2W=~i.vٍ){Lx׮M-lM ZI 6#)CefSGf|{Ôݻͣ6NjgZ*/e@h[u0SvQDv'{Lm'Veu$["Dv}*Ȯ?$[F釜%Co`g&QY g^]O|&lcBt25JgY#cUc?ePe mkA2K½n,MVd#nl Wڊ!HN[PaʖMlz6ztUksgs_<8k84=/I2[Vw$ْ/eqc{sfZIj̦LN kzZCqo^_)L=(=^- ?Sc7ub/slh})~짉K&e3%.y:v8ÉKn~w0q￴ooXsB"jVϣDejB,@:x11HemBGEx '8i萻R@|+ <:\ k3xXu' auBX& @+a!poOȿ$;쮌^)Y׀+Y gS?Sװ&ك7lpJA6X6% N%~'Js25$ِܖmK:8WΓMppb/w7ƛ#J*ނeR\(5>GI2~Iu -)۱y@8ҟ< .]ʵ?+|B!^vXksxnfo%eX#$!"BV8Oڬ1hmFvhlvjIndh&&MT4@X.|~kfTT \yl˙N',85ٴ,iyo hY~|nDIi>4briLf4hGӈYsF]8^'. 34fa#z(> Fڦ4pY`"= "v Pǣ7 @Ǽ)~Sb1+QxhhJw+Bɛ=6flȱ1a٭q=35\Q>Qe:b EX9X=zs\Txrhk&7ݺ%W3Cbz5p4-z׷'~qĪFWqt9V=tƄٍi̓ D.,x$D8!0l*C=;qwlsUPp73> \AG:zi~sCɷ} \wZ rYͧp}f\ {7 x,/ NW26-Yᶏ7oyLOffx v9-N3kEj6!## ŕ]`_\2n>O^e8սQs5FVo*\`B ~,|l+~whd V;4rovv~vѴ C(8O>(=^ tp?OuC%x0g %Kңl#ሀWE{A]|.34%N;y"se68O=#oSbK,6¼DrҐ$lhT @Ukm J' 9m̗&k6@bqYFq*/ի"4 kmK\S]f {[&9;S P& d>E5Ws,K d$-LNnf%mX% /Wv&jᗰd //h0}"5_F:H~md#T gm>iW o`!'d *;vK/>'>JB9m$j$yi$9L9VC ?9HzUJd&[`e)_N/#T3 oXXSNuľe2u*(XZK~g7|HZ5׫ŅW=]:4KfgLdr nD uB3i5f2 oX\PjMUlJCvKN/UzdaSoe/+FɀY(9g2P UP~V?G.}(hVGOPt;bQeEh aA>1uX8/E3@+a!8礙^r(ÓʬAMZ;W c4,&s@+a!1 v~,sJ(ub@kT&k gޑ}JsdB@EL40hd&A#X<`\XlqSO9i V-d/gsn8Yd `uȷ,b rd|m CՄ@5~&aU K@*G]H@nj2G$4tJ]BrcGʎV,,~d$ߕn[X卐+ ͂_$i:E""Lfhix+Q'"uJΰI<1gM-8~r:q=0B=ƚ7{=U$|Ȁ H2aXSeLE#B}zq~2΋~alWvR/Aŷ_ݝ6i+&ʂ-_4Y~d\$҇a\>6NUBXuzg/o7.Ѿln~I'q<݉Pdr5n<fS2 U2h$,$/PnEǩk"8-u\PtYPl~8׋Z1P4 EXBU&H\z̡ ,@:8 C :Txu2H]uqUfΘwvzva):Lf狿cΥ%1<`E2i]rUXZ(CsYaTXWy?[0DNN 6)I`_Er<dqWT2LzM$\ k`rɄBܨBErF^,Xw% MxM/H"@ZJo 5W@a {Jo9`4cѢ"#n0Y) )=P h# d/js^2Fi^j^B 34 kMiL?}pv {Qr9AȱkUW.5*cV \%lِut!\?4ܮ1"b* 7,ԮgdyZJ]g5N/NΙLι5-@:9 JuLιZ ُ\kF%}gs^kXd;|kF S$wО.,w;e "R.R D ,xv=6m&=g*sDB#,d?6/x,sXhmx  kL4Qԡ2G$4ߘ\pn E8INxQ#Yp x-Y9]s؛LGvIhBձ ԅ3s Y&aYס2%4JX~b9hP(iwJ+)ÜTjV@1z2c8FH``q!%aDWҔ3x2xdEp"A3d #É9-lO*-:J 0TftJ=!ccRK&SA={>rVp$}&Fj`qaLz@hbP@B/O5p2FdW7K)#7[*[P`17~q¯M)_VM?vQC]: rF D2a`<< D+@{.3#Ca`q-!cʙ3 cO3l OL$1>dL9rƔ35cJS1/A7wvK.lw%wO&'N$ 7 N<+2&KnAkxJ Ȟ 2w;ٵO-*[*ާkXd|k_MQGx}*o_`!'̎8 WNtOc kcU8x)6L戄wXbRsqP9jsHTl LRTraK3X㟖,MûY~dl.mE fyvRM.r͚.א7ڻ\θGvN`u= 'r!= ;Tа$^2l)T-kҞ{&sVBÜSFKoFݔ^bN:Q*NbZַ(s^a; ZMT ٳV.3K Z= L5-@:9M2XPhZXHΕ7J+4KLaE5[hor8yodӽeXo[F&s(VՏ5&19Q=BFeJhxX쭇e,\[Ca<ͧBGv*OrN[%k4EkXa@ I0 P\hZXXzJ߭ y}7:軙̾AM/{ůz\k{$ڭV?s2bמrA8sA ,luslR :kXd 0g!~qB~ jcaڇB@6@[e:ІBtUݒ鶟`r6 ,.$ 9Lrk:So hݭP͎Mݫ"d5;YW9r o,A(=/ T uxБb HLG'2EB` 7t詄5灂yZAc)H1L>Ah>%D3Us٤]=V+*{DEfJ[ڽ7+@ =L&d9`{9gxt;ktFboR`oR K_?#SN8UZUKE9r0{g2w(X\XJ8t(:JcBi@ %SB`tcVsRP_{#LfiBdjHE Z^00.4XgfȗP@gt4S f(@F#b}4( YhS0ShSӊpKtX2ek ]ỲQP+;/ +-9ٌ`M;Gv?pUK9OIab_IYRNcB#;wˏv9h &d2ryVAݛGPuB6L ɋw39u^& _v;0?vƣBcIA)&탺Pk*r@4D5ϸP > Tf4@X.~F~Rffa,eV47Νw՝3;wU8gY~=ۙ*/?FG-C#L ̘k`j8T'E)8NL$̂I6{\ $z;N/Wm+CѴD7NaWzA'`؝  f R@P 7*x_4$* }H 5ތBʰ[d5n\뒛gf2h$,$/ֈ()"yydGyazDixDjӈȪT:E!i- I OxѤ>. bJmQ,M M ' BN^b!mDB(8L"vRyj Bj>"Mdנyj B]'OC;>^˿C21ǗF.yX1Q[k0+I?g.etH~eAIhɴ92c6y3V$i@eܟ}_ootՆIv"mWe^ua%)xAJ(aCpw:Z0Ufz!}uFI qu=)YIҒi7BJ-"$o9~e+1:NJXI3IJRU7~Z܅ԂS$'tf~BIzѧ yJxV%ɒWrxP94U7+[y',ZaCB M M`TK˼\ٯZ miZ03\t]A2eL\H ~:tg]ר{j>n TB{Ӹֱ8xNBr>z/Ӑ$(,ׂxS{BJ0 ;DZ)bZAa[ %\6\ &D)22EkW=U2 BkQu7#;!/ `µD)~\ו,N=DN'?us`h 僊& e‰ I wNJNNǍ(Nk'Z x}!O*z}2L؅9I=2L~d)k]:qRWrTm|,L20+ :e3\ mˑ2.nT L)(TB)(DqLP&PAṮP`zn,%UD)HZ_nۜڢ=Qʐ~fK~*vQ03}4( ð{bk?646 1G݈!I͵ A?L_B6_aCm 6.tmvrZhԄ ͎k׺Ѯnӄj6עTM-5n]\n}*56UC 1t-m1qXXLg޶~<ޏ3dxanУ/\[kzmL9m߁<{w04Y9&ry*j7DGC@D^k:OEU#hQBw%9vf>?2Cl_i%T]vf̐76ȘZ|D>hmM.n4ί;)>,kMFEuE.!Xx\RMnO^Z+kq EűWvt:e#\MI6V!z`+▩&kL<[,N"k"&%c.73Z!3&å> s[UGoqu{z9Rx9QlV3; dWI5볰|cFt^ *U}޴]ʢȴ"DwXDa/5ɓoi-!DwX| =Ys $ [ֻ {/O`.N(SluA ._JǽBIMVDM " B ^"2uH @Aw[Qo(l+Kҹ͝wڥ[E^xz1m圂Lഗ|Yxe7% _n'_ngfDݝݗqWO;;90.-T |ݲ):@>u>nbi9< {~ӌ>NgӞnnH1@QtV9AAA0% ^ë@vuҳi2Z{)RTbc3)~|IƀEg EԄ?U$6r`ES Hm/)g"Ķ4OHOEA"XyFDtNj<9ٚ^T|*zu S&hP` 8fޙ-`^L;Zop"c@A"lOTg_[goA-3 }[' D'p'p2hSumpH_j>UN(t.e0R~BEݔ2ZDk$'L&p3ڸqpͣnXO@qE:_F2_cɺJdt\,`JµrGZkxܬƉ@jͬrnTkdC}(tѳBsAvq~A6iLc81m<8WN%xAɢ*4DLUm76!=O[SΚ8u1􄯦AL K+DjohM?c(>9ՃȢ14W9 v_N ͓1 ;>,% sS_xUe7|dÖ x;5+WMΨbJys^# 0y*.Np Q!\`/jMw\fO@;kPƥa e[pMpB!\ BψlwwS6 QF,"HYH ~n]MA/V)FA\Bc{ZdTF وLjcNeWP=2j>Z'h^[(IARϋLBW Wof2oB E.K9`xBb(Ȅ:ˋY[%[$8I" G3BJĺAe/Px>3RKCv1IWKjvBY鲜=_Nf7NhHgj}š) UQ yTČ#_"z^Ӥ`{e8{q ^ꉽ3&~8r}QJJ O=/EQS93*g$D!Y<8p]Ir蠌g;0vcAޱQ9䦩§X YeTͯ,z w4/Z [m~O]\ {w3 }ȁy줯ܖ_Gl먛1*߸n7"?Wq0 W]y%& U=l ݃Q @<<lAa/[>`7N)  a}oL{F+ZBEzaȨ<ᶚq$<|yN}9~\Wce7ihҳ2cQ1:Iԅ0ˑQu8zq8e;5 }hĻOO5yZ+67}RdeT_7Ms۔Q Jt^h#0I5q0)0bkkOO ւ~j.[T膽"?>)hHTلaa4s:4ꎾb;ۏ3)R#'xr!'g`^P/#m/}뉖'~!cZ}1ٞwy$ %Q ,"HpfH=RM|]NX.!ʱ=;ֺ驃KO<;B6 #嵄^;'^CJK')!ʱ?F-5S5m0Spu WC co7jSѤ)NuMTLDDNTPQAETNT<)tz=`z)TVFrrc11J,,>͠te'R\ʄDP OAJT̞o(\bbZk'!BT$ͥL^*)% g}PY(ց}p}# T_|{aϡdz&k4MԮ6OCARn5B*qMX` ^N\e$Jjj~|* bT5 ?0>&[eo}N1h=.p,\xdI{iD>w{T*>{=8d?p=Wy"jB9.p˺7r"f}p ̥"D9vB6 P,&kA粓U)DA5R^L*N AƝ\ ^P.!xTd*D1K|RRMrr6_]ޟC~ع?UTh 7Ln_>%mSl)IXuoalؠD~۰ˠT&]Ơ^-~:Ƴ$U5:o\nneϏVnqh>u;4 gHg%\fȍ<[SC@D:둡1`m_ƄysJX2cV _1AJXƀT3wJF^B1.:UAx\*ׄ *jOlIыRvQQ'k"ז|Bӧ_ dR=$]DRlR2gךn)eE޻8uiveozǚh\ofE~x<0ՅCܝᇙky6Y 2) cP2 }u.C6X7;M&>R(N>ZyYmnX7;?ZrPT*71<㩆jzhuKkF4]\ϸ#݈n+B+sMdAh_ úQW7K ICp@Vɝ> Ke-LވR܇W\0*7r'IT .!#ll 71 9jl~P{MB e [3fF38*;aB(ɩ/;oi{Nxu;]S,SΛT=)s=*%'AeXPŞы*}ހ={CKm*{aYme ob!qvV(KArlϧ13-'2j^`0hE8f[Ԥ D9_+] (К௏J̳^lls̮ԻF"I*+_6hEg\ԁ*(Nפi^Lm{Mx}aaDF [¥6U 6ӑ)TXB0a9П[>VMDa"ǛgNb.wevi/rZnyp#Y0DIׇIBhc~vFgW(=7ڭ${WR  X9[_;\KD~QN/9wsM/퐋\S}gkvϱ29a`I]?:*=-oy>!k$_[OXOn[Bn,Mv6|d~#r,pv{sTS]k۶#ٳ?Gg}8,P88߽mnGmq2׏r7R'>)O|Ӟn3q<}N gTPFE"F&0:f R} :.6,*ͩ)>婟٧*GuzUVzL o1#X0zR*o }`T>KqV4ꎾbOF 2.K^X<٫)7d]Q>)`&r;ӳxZ+O &~4(¹~ cuLyZ+O蘙˭<g%/TXU M S3Y:! )cٻ%V+W[*Dž2p4 Q; $Q-S&+g5Ĭ䳸>l*rQēcQ{z:ՋwB1G㓲8-O %J=JeT0%"׵ǍN$m^elQip,c0=%~w뷃UZ<8e; ) bSqM9h d ¢ԗksi K7?O< ^&E' eIZ A3IjrTt*(Q)APNKTTŠR&P2t頱Q46SUL:e(ã)l );:"1)"NЉ޽B*7T 2}sʍ\mu2m\B1*=R-Wi뽦DnJ@x(śgS<-ǠƧ.wm ©gxSlњ<׋N+T0vfk÷'gpֈg YũtpͣDSrP%iӷzdJLiov&f`C]sbhRlzsGjZߔKTb/ϟC}(tMn34xvm)UE^5*TAٴb YA)!Y V UVhT_ 8{\LgY(mO¢5~~ž+EǪSR!Ab\穨]fاcvw_G(:6HfMxd#_ #Ye&Ng<3, 9KXV[醂IxG7,cbؤ:& 7x\EȶZëUb^6`GSMQ.Bt^9k'yU{۱6&l._O}-CZ C&^m> %ԲŃr' t)OSb+B~8ONR%` JI)UkO.D6I;'mp7˙-4+.. {T<)dyY[to 6=|)޽ {AT&PQ Ji.N/Yh ?rA232EFq *R>tm YT…PgM_X y!b)#\v*R>csC/n@U RgM: pNV9Nq\pZ4H^N ^uJeSPc(Qӗ6 ŗvU;N:ނ9N4tDjv%CA@ 1즣䔡\#lw%ْ 64% qͣdmIthw2KDv1~|ee=%00)-گ/];!7fHm/$S-jMvmAm6dCal W.WKM=>WN`"-Y.[{ ."~! S*jne鏇M+dž8mڢ({P0Dd?- __,2+ak>] M_9b_ݻ&m ]zܸz1s"ooR)0)>-}&'d0:jd@?0|bT^ &D65ògQ/,:}iED6DdMl*?43 yAzsZq g~4-~l>kʜtɏoV ,y>GO0|أNә8^%dTݱ䧌N#@ ;"Lrt$#&Qؾh25v^NCFCv zGA}2bǀpn!ܣLd(Qӗ}<7a"X7a"lZ"V<ɚm짫agTvc,- W3s "gk/Eࢧ IkIESobNG $94M V0n$ONa&^ԁ8ϓqs㋉NJ%` ; z VNY5af$긽Sl&&><Q&&2VPXl],vF7b{J7`TX$0}0Ii2=&؎&4IIjc^+)8-⥡fS&a7Elٸ/UtrhfК~f9౪B+́{6f{>-HxHE SjNN% ;rzWū/ _^ΠIb[kBZfLa3YUΣ9oyc-%k)uc}ʴB5&LbPd)Uj ߂{g^f#p<|Yzx:i &>ܣWnFxqZBr6kEGz5I۳Q;2`BX(|MF&'fzZ7y2Be'R(@eÄŋTay}Hل5 [~t1*5%RR٫U(T͔j)?;X:5X!ʱ?2bta*#&+S׌& EM)**R؟`ϺޏzQs%ET*{QBDq ߝ/XbU*WPSzjphG JKRo۬*]6 chP !ڸ5ylMqT:DsCQ"%}zKD#Q{8*L<ٮi)LZdH\I+? EM.LrMMGsPT"l Cҷ\SύoYNn??%pV3;AbO` 5 Y-!ӱB/'h@Eu;ؙfDX,Dyd`PQI1tv厅 CP4fb"#((msOvv 8 "??v[?[HR;ԙTT(zp-35D0O1y4A zGWmX)rN9TQ*j%W;1alꙦ}5cTwl`<ݎae8k3ݱW%|{0$l:Z+ E5ˬ< K(tRWw'MR ݊Pt"fY&X~D #Z6nk&91Ä4{B(tSQG׶ `SdX.;Y%)RN C8[?Tv*(Q vj7Ni`jw!NLjeUk6Q+zZVFr쏢g^m8-6n7n ņg^m-6N7N ņq X;-t~=w-;w y/K'S\VS&>m}q@y`Q)3L6; ]I6]A6> - (!D9xʶzyR7Ԥ yh0J(#6) !c? FFrϨta_k6'yAj Q ԇS,}` *EU:~`v^#~B֪䄬U޸ 5XFPsvF}&UkJJe/P6!pBs˯Zs-.3L" =Yd ~ oz6iYA-w0ى;v%C@pw&5(8c'2T}CRw%eq֖<{6ުHͣ ˻d׈lw-g\-_bÝ. {YU$إLϯm5['}L M9rSNtĹlwC (c|ɤEkyY63gOYl낙|vZ`?;Գ%25a^ WƦ?__Bl1 FFMF(!'Յvq@}9u8[C^Ͼ2:efY u@D9|Ob~QLi]MnlnTw[>'_[2GRMɯ~NMvǺQWC+/v(-q;+Qݡさ ˮ8T|x__$`/o]"xܩ~ iMH@/5 [Jv/fΤuS7}%an^M}ЇVAt9tf^JtuLD넟q"9ցmr&TnZ11d!ޙ9acKe=HsUgs*тPUyOQ S2iJPkT}.Y ib9&S4\eWpY1jY'u։eNX'1)M~U?f~3W2%NZ%LZ(Oi}sQp0k[yD7m>qb7۔~aJwڵ\gburX\5Gx4yEgiHV_@o|m"sZnh%'Oא|^Zﶮ٧9q V)9&?CW3=6rs!Do=1ےJ %w6*1ݒo+vxj+ٓ.ǚƲn=U= UyCR]ml7&(?$4xΤ<5[:gI{iXUF٩QQ-0eߑq C_Jxqc\Rϱv&6mhT割F򔹈Gx2J3dU3HO9ETɯ,&(ie2@T琟ݟr8.P'V8Ǧ#U@y"~D߇.CN4 2fT3&]`Z\G,:ir^bo.ݦshRT; 8!G_5~g<`}7\uK0wf^9kJuG_+?R (G˵*.k؏T[]XPt :JIsZ c%=we5놵R?=y=Tyb\YQɼaR6oQB%}$Aq9aFXm슌_i ,BP)k.e7 :4 A$,ٓցm%'s}SR*d _ Oqة1Geա[tȭIJ#J3"]N'AaLVUD"fp;3ؙig%bo[FP&"X[Uڢ/~1 8dmpSk(O.XM]EtBb~v/+9\"O P(H@/5po|[TPETOMg5Zi7ZR7FDU EL}8a{: !6NX.!ʱ;r!©u wC%Y |]FZis]Igk]BϩsM?۫kd> 67Dh8YX[/iM5z9>L_p ?|Tpw̱t4Gvq?3ZAژLoFn4V" z_Tno-LWn?Ju/;1a{cCRTukܾ0渖uiTc£5t@[\J5av~JWW"7sic45%zri2a߯uлV|Wwx0JuauF4 h2۝CZC˴>H |yR@K:tOxP6T -|p*{PN&G}GIwL9.ɂa\sti>il))?R3E#\v*l7AʍQ)C˸ R(獛C9qywq^DP1 NE<{#|L]\-g|4 hB < @(}YW^c5My}eJNNHNJxgTI9ؾ>J @`pFP8}bOLy`b'> 9cQ'ވ7}N:pd'D)W8Z7":AE/M40 /pтzL ~`Aa+ g6^bk \$ظ_]j!IiΚd~qesFHnrP>8F@p>4] P!Afpc)5x3{.T!ZcHp]Sk5]rvxƲw8zvo,׺ FXo[tM|;=Αq d=8J?a;ŏ sk{w1/ݱT~xȶyx8:Gfҍh˒6AYC0WWiVpK~c.gJUMݬcnfKYf] A3*T/?R&?-ѩ>#Sϳޭ# %y*je8$dLJ4hn(O~Ema\50I5V9Flu!LGm5~϶js-q6S9˛9 ga6yh˱)}F3;o?EEjzJCг i%C/o\zmj["zj?7?i2O`ꁹG&7H+z~zd}ȳRGmAZ k&[G#)5Ilu$Jt^߽d5 r3NE4(I `+F% 1 B2KWqP&89ٻrB"$qI! ݄*MH# }d@V0c :MX $&v彐C^;܊R;# JdHU};-#n `M[nݦlSZ۝3/&fLxDf7;S6&}bEy9&Q400W⏫٥Hõ"upS5:-)R|bA"`")=6:&՚4$UɝZ*J rp{)R&'D}{E'0 Eئ_c3:9:IEN&p;7 !e:!iHEN! OdD!lNXl$ګMJ"4)!f2)K6;J՚oTHB&2jo,.UidZᔛL`s%aა5!LkR5ڬ ab0RtKR8H֦䄤 A lF`*jWܼ͡kxׯ=ǨM~%méP==-FQ kY$wKd;¿Pb+08Δp9L@LjBsDgǤ!_ *%?\Ai`$NN!`9;^ xu~YdR꿅;iߦAnE]Ψ˶2/5_M]"e^tteYML#. '89tzcrҜ)HV!G)L*K;*á.ګĕ^xyzs^h&P BCdZE/+p^}؁^s>3E(R70T2@:b9ʒ\k KT>8))r{uZLzύV*zSwp޺N(H1hD[\u{, G 2y_/Ko]7FRuCm#xLQ{+b3ڃPf+F9Š-2@Ⴣ!E1'ު6Szjf*ċ%J,' A8&p΀#(:Dlꐥ# ޜ MJegB'3!ys&tW鯅xs"Y=I ׼9Jpg%,A߇k><:'BoޜMJ|>-k0BpQox"}7 Er:?>y|ģ$WқC!Pj¡PTE*'LKat+ rrPA7mЧ(t+oҩ.r]蒿BC7®:5&\i* ='oW7"xO G'}t&&DN|Aŧ:/gbϟ| r-߰|T/V 9LNZ1h _^G(nd@HmGGLRS LIxY{NJ?!NNH6v89T aYs&-*`h$sL}2nкibkz)SHe /5fV(T8FPѰ/N;F1x^LaGlG_Հ|+̋}시IZ˸*UV4Њ~}P8iFlfE xGjvraүü-;3gGBN㭗wiA<zk=LMA6!.1BT- &9=ǟr&>$\K;ZSŧ6ONa|M0<5aKDr'0bܶ!K=B-7QQ:1ֿd'sv_i3u MQeB]GSQ{5pj󔨝H Q@O]T>?0tR TT/wSٚloOכ4S9C$7+`tbzk 79ͥIl.uN5gsiՆNQI5q8%ay4}4uӝ_eǥ\*n7ɫvX+}H}ޡ=|{n&r;?2k[s(1XdJe`Tw)=ҧ".AݗxD!Z3}T膽"K7B7Ý7N%b. P?! XEQR0%aǺb=(JRzjM$ R66hI iE79$|lr*2tI6'vu:% hXשPNSAIYǺ*Y5?CFX'L5i5gFϝ$aQ<؇QC_*,lO',L=×өzзgɽwjFn=(SdLRq8QpE2-w_8QQn^O%\Z2おNH?K HNk4~T aP0Y%3h7N啣.N&1Cd*gyU&R6!=rRqpúսQQً*" nӉg^> ۃ)#6xjji^XQAԇ67aI*{aB(͍+ka>5Bxpi_nZ0P= UP x.9ҚtjE~ B Hܝ`x5 vW*0P 5yԑNZ]dFm@e= \\U՛P*g|o*p({M䡫RJ |Czj i(}(q暈=OJ5fy~i;]XEjgtۥX *ss鶱tG؃_~⛭D?F#%yXa&8V!ÍL/&ru$ gTT穨V>\p!A:OElVw"  q;݆E]cI-ဉT}Va^}l%_s;!SWQD_/ ~wgX![]"̣).vSUMB3pM`23 'TΈNzXǜsںd,6mcWKueD:&t&'|v|p2r9}41v7_r^U}&7gySV?biI5QCsPn ֍ꎾbO<MYee% yGݰW䇓)xb͹7Ka ,}|b>Mlnu8OJ(l5'ly<^LE'_73:; ^F7רP_G!<שPߌӨu{ :#r +j%uK:W .(P r䬉F`)2FPW(L Pً*¥ D9I;#}$tk>'Xr¯<\ ΂iZ0i\.Zs:L|ח&FZ.]F \ @_J}YhBDI[7V>h* R.sB&4QDUZۧRBYn5 R!*\3R+ T `JtL:V#@IET!P%2@bP5ڌf ™)oNE#ofX ~/KG&AiV MT,QJ&:vzvD**B1@0/a/a\_$KvfZ!#!f0Vsk`FU+3Rw2U&9ۏ|9`Sb )RwmZ0Bk)07:z zARF>,5FVl@1bV@1ǐ&N<\ xv_WAMwI+xwc@D,8AlXѷANr9ѱQW z҆烉6~t$OsshօKxYTF9}ft9+|316׺jbբTVu'ԝ$Ww鋺;Fcrljl%ZdsڌU6;]Xҹ eo7ass(]9]1VtWOǽ^m-F5J?|S+fI7?p-La75G )(k Jt^^7v.K:>^Mc-3=ɀ;]Kp@ EpGZ WMْ>0s΄{}",gxPu0"M;7w K%("-\KNNɍH&eIڗn+ŧIižDzgp#83'gfd^Wgfdr-b?Ƶ.Kyse`=nn_D/ꌧ^tK K良MwoL^ysQjjV[Z%/NNT\\ij[\ԓN6eoن9{}^_lrh ܡvjEw0!UZ.K 'ç?Zw{qCv[R39EXT IBk B@< Mօ ޝܺ y%pщ&¸hpAn[՚|hJ < @"v5>_ሌCe!kJRSb˃k8S=|F:zڴ*z[, d* ^(?UsC-d1$-~d5yq B|ϥX&ڙhN4*zј@gVԅhEuC'9p-Q 1﷟l>!:7,-Hwn/E״e7.-h"jNK&co`pL*lB p4R!vL 0(Q(Wɝ>M`AeQ䢓Koi/E&N.% <:FњY 3jVB&,^ԁ3_%Pr.z3& EJOJӜL;dHL`imAL_\`@("W*VW)I|1F7"u׉94;4 s8ra!sn&8t/EgB &H8 B%4hX]^/>wn.B(G }N8`AԇBNg'&r7<9ջYP ѣeV=3%Zݣ3%WWY]{s1ܛ3oΘBQ1BbnhU:5DhBA& t6ݨ짛P߂X;P}Zr}41s_(BAA2ʄ AeoB!\ B~,Qs`K eI(49!Xb g T Ed]Pgjb eA2 յ > յL J>.PpTA2᢫k,T$o!ey|BdE(| ҙִvA[yTcB2EM|(Bf>C2;}Y6I(/s/q9c FfY6j=҇AjG=璇"b >-4LbqRK'{c-NlxpeoL獏}D*YU&\..˗ :#pUTJ œh\1 Gqi'o'fu:VuEX)  h!5^P.^"4@x(E͒[ %EX*f\p)Q ^K w]QX&Q eÄ+'f>lp)Q(\ '6uaܨ1pJ+'j¥|Ae'RXb< b{PCeᲳJ)c(G3JQRPa bG2=[Қ W:c|I(ܚFBX,C_?5 'QU,줚d*X_7"#l5hc\D;ohcmn+dʯL1ٚXA9Ee@ X Hx-3qnsegJ!L Bb7^\ uw:* )jw]Aɀ6zKAr?,wz pw w;H\tw@q{){YkkXw7/תx4Zȵ)nr^__OE]v=.Ʌ<*b%`a }>nAa2ݮ 2@!|_ϴ \; UEc'z4}z.=}`z 0(QE^ˑa^_*z@*FRQ%bNů"aP:X8%TV*fb?&[܁[ Y_ķFSVQHm&uP(.wIÈF !H\s}O7D0`> - .!!j_EomW$7;WꥌȈ mbEمoʔyg8sKJjr  ]l5h813]US8oc5tr; Vv5üVþtDt֛U 3º6+At HI׏a&U>YԻ@B)hX?z^[ .^w6 5G(^.L|=}]I}0bi7;0=#Fu*#%qat1tܑ@{7~6P;M*P0b.˛SCQgdNB&-2'Iu:HtԿzW$ P W];=̻R2 OQE^({ץ(UãMB6v*RRPp] 6RH=S$P+ 5N&!Y-Jįo\ۺh) *M9i@kd۶.c_7lsh َKbw.R Z)e n)>9϶O'>  $d;.A4ǩ.w _;<qx39/b4AޓohlŮ2EخOx%Qg0 KKՀmox+ͽ r@7<@,O_+c$8o!߳ $6$< 6 َ zܞIv9ë@Q@QXD9-±&j`9}|d dzEbZ !b! dV o;U}v0fv-(3`p&lvlrI#bK٬d78Me}1 $,#2gb95 H:SYsZ `NL`N`N* 6uHKDwM`9 ȩJHr*ӳpD#}o(NǖPOHFqNUbЩl?!!Щ 94Щ,ѩ*?7#:UM<\@r/N18xaGCnswo:sh;cӢK:W5(ل1chrh9Cb8B&:YWt}8 F'J 4dI]~$aSߊMgFU`0Q6vu9aWGWr(لk.ƧC-l0}9@G28 PQ95r;gǏ[ (:w KE{$`6 IFZԥ@D De,< * J*<.Fz%zogAK8VcR])D dUw+Sf'A]4AsM"Cӻ92Q@M6BRyn}Cv@i7)v?D =e#f-=SZtFq8ݘZ+VNg$Uzr: ьi\ ?/& p TliAH (aJb@&pIG:>)>NyFs(8(/롍0$!9`h<<a] :h9`AQSC}&!pxTS6oMoSllRW ds@kdZ R3{ٵph َk|^Qf+>By3,󖍜I) Am(F,桍i)$:=p @Ȥ>e[p5ZBBb\]d߁{';ƝJT7v}*2a_0Sp,D1Ƙlm^e0;e(H .IK_:I'B'LMv9i NkdW]G@B&ZWĜdJ9#OD1Gn.Y^vvg-@ $wN=⋒}/4G (>'n/p˲7[6&ݲgX|`& Nns@Hv\r,Js@0 B,ƃ 3#ڙFZuo:/$Iflո.d\SgS$ƺfÍ3 n\ghn\נ7 gnOm$l 'IO79^Mi(EB6fEw$@?77Lozm;_ݢqozǙ78-Д3)N؍SZh"E{Ăr|pIAJr1x4 pzf[躙z*NJr#d+i#rΜY^+7@` @`&ȴښ@cwLsيC"r8伢E9C.z]R!U@8$!:s\5r=91Cl!5];&#θ 2)%5;}Ply%!G D9עc$)H0ߥ2brH6 d ;71L_uoH XIǻ߼ vޏ.Ore1SUIac6>xL͌сfxD$)knE͇r3߾* 2经$9ɿHJ E29=39f߃)0X֍d2ٻvAJLc|&c2f2T0OWl8 1Y 1'ym#=rX ?NJiںLVBɟI6vC,mZzVVL1J =)|~D4zlp >E"}lF]zLD.r&{Xkc4${`}$ 1Qp6.X1cд~9l~ HT cv]D㡮kF#A ?Z}_b/3 %R׋m>wI@Idd17ƌ?e$S:B@BMzʹM9> nF֝gZ1tKR@?os;Ov8kްmzdJcLٲn@Y]jE659>BKb_x/+ӈ(^VA E ɭv2Ie7wO2og$zAķLZ\ڧ[I?skwjr5o g+դVfԀZ/y\ |:+a[=}'(YR i }eSzYKrZjIr-@۴c;r|WЋO鎌x8 f7'KfIRopEV+ӭ`(ntY~1YDWY*} BDp7c!]4<3ـsO~;F=%:l#{c@I1(o-Mdd-(ɳF0@EH6xMǬ!cVuW J$ a\)w4@F3h xNq]g \+Q! 7jͤ֙z(s1('H flvE $;@$8B~ | do yó~{2WvPPǯuAc7ԙ-Šf>@idZ3H֝}r*49JxJY\9秒t3,Ct4'4(!U!F[iWDo29xj*=hY0YOw$y]'ꎳpD -i֗V]ƨzd6OT쒻m}p;+aBvכG~UVRhR+%d>[= 6DHג[mcv9 qGҕVEֿEuiZΆCd1H2݊~IΨf1(6'J\7|d ߚwh&O0KdiLp d~L}Mx iQ"4OnH fζٕeTx̓$; B@6RDzyEI[TPH}Ӡ't' YJʩصPBbTC p@KHvv &A{KQr7# a+{? kiPֆds@KHv<^@!jly{Ce%ޝ'ncnޑ mtA^3-~wǞ!.{Taԇ֣J,f;]ge%c %&Y :tnkȮX*``- Kw >`UpaA%4qYqYOwd y}n=H k0dpzxg 7QbZp<&/$(IITyry=ߔZ_mr+lXe̅Msk V,~%&O5ADW-wj` NɺrJʶ7Qp<=(oH>I.ry!u+"(:[_Y&*^̈@&2pB5`T q&`ہbnbZ(5$$+~#2BbtV?&z1Tw@kd,M7e]Y3 V0-]BBpW[( JgXcwШʨZL_j<'K46C z{s݆7O;x%5fǸj^i29`mĶz$4ޢ alvRqˢ p2T*۴oJYr -Jz)kYM-j֎nA޳5} XIj`[$@(geI֕VUŹ}z=fd;Ғe"]n+\үž#Tݣ`8κaemyXZ\|63&hSi{gS?R/z;=6bYlmAT|g꠻xA+v X'@c4pN iڬÕ u !Y2#Sȸs R"ɗ(j?踙K&Lu3u;7ɀeM&h29Z c%=mԖ=ZBBj'^T3&x8eE/4ov*蚄lǏi}{=aybxx]NbJ4O;$mm%JNXސx|d@yHAB2,t kt t,`(tt$\GAoضgZMZ% `1b b6ئ.)ZZBŎ'CxWr=N;)׊&2^:_]  ف;.KY9 \<*~!X{Zpokogr ] JlQQg*&~ eng"Vؼ4J$=ݳ '<l\Z\͏O yTcb٥`@Pi5 /h$I3sj4J %3j71MB&8sC1ˊ F\V`Xmq;P XInYC ݑu底~mgFߨ^"^GOoHKP"w./;D]ne_%?;1KjUXuVIKEIVŜ~ǔ̪; ,E&mJSsev<%38nLޮaO>۞31}cVh9sfsuK,8湦q&bǓäesc0Q7]\.sUK =KL ObXgK)C(e-()"$0Mux_*۹j[#-$;pg d`dDs"kn8H `lxrOӽzu-qSVU.gه]z{r)foz^#d^W'~|U`cm?] X|^O$6d+au&#ҏ\^n$%3}n/185ap5G7xgOMnHvF4+5'We,=߸+JE+hw\7W5TM p1ʘK%5mBΒVjc`(8݆x {SВJIry!( BߜFǼ N$\ˍ ӭE/ҔK4Z'?%yt%Mk-"9?#ᚫfp:B@45˞|!FDImu:6[ 1"`g@Cteb>A9T#Tgx|Dt< %E,꺵ꁅ$=ӂa'!~8wBhmCjXA{a0-ôt ~݁EFe&$c{hZ(40@/HA[+wJ6&côt َ'0xWeN0Y "L%ޝ'A HߚyCkǷp9 |cϐ}IqY(F\ "!w5u J*Awfj/$[K u\ =;pgy՚ w4HߌI}#goh*QpIJǗ\2zy'[H͠7|Ro}VOJ &3|Kj=~r+'/׏\Z\]mMJ#fw>]&rAp+%Z~䷢L¯q[71JIoSSd> QW[WO!}2"O ӰCqU0ng|&MYT86p Y8|f? {UOSz*ēNN/ImIAn<@ۼY (Jp@EH ^( ޤx Т^ c}5 W@i:{ =`lCJ2t184 tĆ1t5Wp& euuSE>m*ЫQ+k=ΖŌeғ| :{s8y mf]dd 3,ED zWSIz -WFE Fm- vwL~va"bDZ/l6 :{8EϡЗ\:{ÄK'5q6E.Ā7v4`-a\!04,55fnMpC6{p 6,`=r4`O= Oq.kIznxD#t+"7[@[z/V֭ cIW[ns'+~u}{==/Dm^ LrI?& k!ߊEOƏb:S1֘&'J@DJTJ2D6 Y'r6aU1zI,5}wGR[-eCgo>oƬa%c蜘|*@ZPRM v6AѱߤYwvJvE̵Kds.AvZ͐fhf4OM 1hm~~>e./J>~u;Ob $9>A^sgbz\rVYkt.(_K<[YWKR Bp~JYw`:UxKDpZPiVR'V%v.ӧĥeUbn^9k1 mcb ~P~'_pisiߕJ(9-m4Uˎ SXQ+IZz RT`w볝mQxZDY.J]%YW[Wـ6z8 ΅qS .j0p=FIWWOi~SKM'A4>dQO/KKbJSc׌';r`RlŏϿ5˞|""&u&>Փ`ɠ< Z cyZHhc.a4Kv~m{g ȴwPRV˽O*~*^eheV-q1C'n0LiFONHJzLyr[qta"Tgy֗TUlbf4<77hg'A Ȝl"CoTǯPq*/~W,:HK_)糛@+C%z ˟ uu+* oSC\LxAZPErF ׭ك0p!lLv Rvc $y>fdKkV/%Pi6W]}Ub6mVziMn;1IL() cΥ y h َ/gӄ`җ7`|0L Awg<;EauN N3tV;2d@,C:!S"5EJs px*^?o#slvR˙)RV{9h}e9kf{ |1s۞z8wPz)}g,gʢa%.>[+95V~:Yr湃ΈkABIp"]xRΖ$K9[v$$yЁ%A'%:Ѧkh"8;c"fjCx*Z)7lVJ>i4'% :V< 獦u`jkS?zrcp(ԕP\q"45Dzy $d;R7hDBlKU)P?R,B5gb3ZAJ!Bh-lsP $>*LԚHt?Yǯ$sV=eSyɗ2 HWO˙0xcW؏MMeZb,#,e$? vbIz7u"oP+pŒ Џ9>AL%FF9}#goIޠ1U?Ș8#qł1{Ń7="'Y&j=vrȵcC.#WIޠ™-^RT6NEɘ/OI\DA,< (nn_e_2}cz=4V=vQTϋ[ٯ4y*&T4A9ȳxJT >2 g4E䡓(,F9U !Y1Þ ꚉru9Al+RΠIA9L @AM3hҔhdOᶮv%(+CUoyQ }pUS+!imb@I?'¼KD4:kM)M`:lU@~R.I6I 1NkdgrCI:O=oOk%F(yzb֝gmbJ7'VI<|kW~eǨokx.K&kp"QFQ}[pSu[rp(86:IqJHt]ȮXX!u@ꂫ/Oj8c~8;E|#gm O=zܺ1tضk0{Qo{w>].@`woQgo!'+!v{ZYI ܷ01D E3!ϧ&TИSM2MgV& AޔYGI z$s?50fcms(n7R >z._Il+1~[;~ !m1P~28w4)"#ozC+ 1لySf=Y4MmB s.< Abb \Ab5˟yJI@ |e_^(-TEѐb_Y7=u1e/.oJ{uLO7E[ݴыmM</AT3IiW7YIOV7J=mu%iunz l5R^# ,ہD(W(?KsOpb?mߘxr\Z,V5f>~ǗĦ1A8ufYĮQx[>)m~F ҊʓݒĜWKt/s/C>9 |P^ F4zpr%X+aUΕFEV70\3(6>vݯ's+c~}Л< .o6BVV`Qr &͛2XSOUJ$sL_Xl3zW77 2لW@\oE0 o]oz#x>iP\4G*r3$HƛV A\MB#\=۞]p<(} \@صqڐqm1dF c]OwGوW ] p8bu1Xch@n+ ړ[<]\a˫;+`d~=zbJF82O! u9$ uaZrx Gim.L S㌃_ǚާ;~9,z.L7,i'cG1^&ŏ KhҊ.\ }%\c2%cڰ^)~CWVC0L@g`g+Iz5)*$A xZ$ƇLCԅVU[ Zmߎd/Z:PPҏ[YW[Wٯ̔C\R23mK@%pÇ>Ptdb0P֑*;b&w{o϶'剦QD 2=9`al&R y v6-躌׌e!Nʌ}ia9ׂj6&6OH Ǣhцѣki.v{OEKlİGyyc\Etg3*}(_b?`c]Tm,.S#u9<ʃgoE3w7C \ZY?{w(:dokTc#AqpQzx~]; ,]ONK%3O I>27Wr}ͳ1;|_‹&Cψ\ |exl;wa.],ɺrJʶ{=ߨ^@nҒ+m%~^yO֕VU>19S 1;4zf4i>7y:fѧUŎSz{=LO&[o1уJ΁I46@,i46Õ4 يW` "8*g]a!77°P1,C&pf7!LцҌh َl{5/a685k[c7o`n6S $&M7G8$d_7&^Ga;%<(㹡W{wz?pd/`A^%s̞~W1=vc[W>Ar:^LkV"SGTr`BDy6^~YO$!­$ _~0%  a L6۬鎬g`p3ė,E;/XYrm&̦E 1c[Â9sq$++a5En鷤 8 Ȩ}1W$ hpCIRt_W(Eg@p,[~$?e7xK>T2ʾJ}SZ=VݫbC;-˺ẕ~*Ԙ(iaKMvbCS) ;X=^@l^ 6p3޴mO>4d| XYvv N'; V\I馬WzF3 8$d^7gB19L>N /U BBjcn1Ek3n1na naYndSm/Pdz(pO* Pi \ e oF?9֛V˪mq%qkↂ_Y1[EƎ~f_~NB[u^ 협w+=e:RZg8@V2ضzn. %=[gf%$.}y!Z0H_ /@mvN@ihAε?{O~BYхeϰ,b7Vn73;$V $,[v#ЬX嵜&z&po;oQgXײ;wmY6 FUt,4-a8 c  IW[WoG'o{V^K,7rH2݊~[I12M2V[14Rii&L_fZ[im732ꗙ5@.HW6MγvMmR ?G@kܔϸom)'H}㼙l8Aw+zX=f2e-̠YWZRZz<6hFKyCsÞ$? 0@kV CZ)l^ YK7oU)~VIΞ,A$7YÓdO<٪S2'=d|#=vǮ88Mig![0ބd3@GVdzo9-zia7s,YSXةX['R,HE0x1ي/EWGpBpGcp4NkdSh%C|d#4g&x!w!% Z 4M3W8Mi:H)K2MI6 >l4ĸ~t=$/*OH, qU#+_uلl$+C^2P.IvF}@%)!H?Lvz ll \K%\ؤd?Ƴ&Va o@3Ϧ$ ӄ^%+Ы$EF*h}Y4'WI>WrjBrل 0QzM„zձLP4k$xV4f,oӫp w? pJrϝJ S)ɝ ]롷O.e$& ybVTWǃc]yb`bZߡĚy- s@X2ɷ! vɕuHUg*eU~@˵XԼqDƬa%H9$( 4oP&?jPGSم&&4v IN$9>$, v6!Ue^3L%eB(n-UaWp!AvIl[°sD240c\B∲'GC6*zY Cu H>o!G^E|#glS_I`||3{Ьnl1P+.rT(.c\h!EmxBijNu^sw#SPe<9ZN{оCF-<+ɑ|=Ǐlū>ZkJd' "CO4#`֐Z`lx!:jŮZ;-WP$#E9$P̍QMc-wPz!1a.@̕fsp $$+eL@+e-ds.gw\ByF8'3,],T6 P"ef XlDLRFlo]a60]hmyX=8ɀ2 8.kɸZnٽdLC3RU{yX9Fup4 h-w [F <=niTh.{QrhN*760!@[> [ 4瀎{ޛf!K7ss=NiqސɈݎ䔋nC4 FivJp hke ͣdsEd+.H/MxlwH5 6m15û7 hIGxnz"n8ym99<|j={-%>EǼއB#~FDNIc$=9R4,0)8Tnjz?r0LM_NHbAĀl9A&#Euξ]^G UɞcL:wc[#qG+#M8iA|G.THDzϪ4,M=N6͹}۳4zby#A|^7 8ʨl?# j'q }2 J%4tSqxЭɾEz)mmS4aj0a_vAh!LǪj#lGu|Zk8VA% â]hm#dd#Aq-!!qq1,Nk?]+'Ni{ UwSwRw]4~A ӓieճ^vǯE@z BP&ѥW=d%=Xɛ.)8%$d;. 9 *m{gT;R8?} "K-lU2%?Q#e}=R6ZF4tm0⊄ |A:9B%|$Re&M1J89rs %' W&xEڴ1 iK0MpӞ!dBdJ&#CA@"6ffߎsW ;z< ӜwNkaژJ904+.YlINZl*\ _M`Eɾh Ѥ4ıS}1:fb1ށd _L@"v'3[;F-Fi`6AhOAh utlťm]eiӷnvL f!hD73[&[>d&0@GV\ڗH1I*8 _KCgZ!2s|oו9yi!QV5! p9jCuIšt./Cic HmB#ʌK%̦SϘt^`6d]s9ЉI&fAYΠj{}S[5ƸM&Ql.^ɰ*$.N8Vz%<'48Q @(, r;/9~ pl.8!]G@N!9ƀRs$sF20lr(لK 䒸@K} PSZ6@}JK510Uz_qFVNSc5#N2x8`nۺI]4mo4mzApqL@6җ$3^䀢'<w% Z)T*E6^}cB3لkQtWejGyQFN"Hc``Aj4X=/LH,! prazͧ>Ww2ϴ3/>PT< [AX$ND#~6:(U| Z4Ô p(XO+">0V-$z]c\x4WIh,_ȮeX %ﭩwy i!Ɲc:*98h9Lsz6AIBJщXh4P`)Tkr H|M)8(0m8K_RHv\|R"lKmy&d;jT')+<Pc!!pk뫆XIKb4$q]88 Bu6X"Í3 َk~p%vz\Xg/{ש%p總{3a- &f$+p%Wb bՑ9J ADM (s.V\A '@h(s4@Q0QP P= U.Dɖ(4n 0 682sJTv+!SM9i N&ȴc_Q^d8\IzdӃg>G#y#x l .9#%-\R9%Ps̥AqFidLi0o%}zFY&im!tʜ&&:{K?:oQG! MO!am,v3-l:t,YSA;e"68cEMwl#ݑv6$6ݦ6{b UQ $A@$68H`l֘MEv9IS?0&c./ha-8rS')D-`QR> \r &+HL0~q,v9uc6l>x"K$Zg&+yX;ǴR glp "r$;ژAXQuHh Ȅe2}0Nm9osWP{Y~^7XypHpuQ~~ gjBv1DVs*Aăkަ?zHYAWY̛QK1șj)~*v~? Ao%usQRJK h( لG~&Ly&l)S)ͭX{(yЦ'V&*YNuivn3A!?DUӐ<% ewWdKO|~*,`wnSf Dn>ϔ6=?%.-c.wO˱n-zpqAfv ғ(c.\zO%縤(ėeuc$"b;MOVS |L5nn_ezO̫*>~66Xyp:ష@}%c?߇.$cߡ&H _;0;Fθ1 }X0ȷ~3_wrgȑm\Y}/1GZ>XeJd3?:뗄_OK- LGn|3"2 lW#ɲsrz撶zww-Y%u̘8 v݇^ I7/>cBy?|;YoGLmzOx3T3 ܖpg'2UZ\rw63냱!J>0__SNRhM3Dsn9ފo*SR!K60̥4 9 2.APuCEpNjCiZBB;y}F {&bY'F2:H~jo%C Y^s`2M?l茦]hm^ϲmS唀ox7;4ل{yO=#y~l1̾cI0gf`\g(I^ǵ9ȜןDIRCyHmK)GXh3o44瀒& [qަO\{$דp<&OtC Y^Rڭug|.@ӻq{,ybp(5ލ nwMG? lnIWSI|{M$tC Y^=x.2YAIz5m"GbvtoAlY2fWҗc[+md-^Β4~b赍ݘc;>W9SںؚyԱ\Z[2u(mxս̓l NOeՍ_i;{dM$O2oɘj4¶"_*6~e%o{߿q?16a|LxRH~LVՇE9e,qwס/K;>w߯rO}/Q>2~5_=IOwuYD-͍r~Gmz[`z?hnF׾}Qnr>S*-Mt?7XpZ'ïYզHJf4 -~Rcv`JgMyJeYSLv&g@ ݖ}]VJrË1T$u +=|Ig/ Mendstream endobj 157 0 obj << /Filter /FlateDecode /Length 11389 >> stream x}KeGr޾qw..;Ӏ72k! 5!9ϭfs) 8]GDddOnO}8[ͷJ[9r?tS\@OPH:Ҽte{4J~KYrނ>}|j,(?"} #/S (?>}ա#Q#y_-'PJ{>)a\!u{0B AB z@JvT1/]V{L(c_rXB#B)ABQJA}HݯX\BrkzP+tH {Zڜd(xCG/)J_.fU \2eL7wsUIǽ 4JeP>T3?o;#hb3aQom%SPFX2Jo^'h{~@M戔:M`i.s(s)m28 9Z S;^ ֑I )P:Vv $t-|LPgr[#^)8ia5UP8J;}>-2b 3%/,  4E`@Qܜ|9c^1TBZGe%NenxaFdNB84d4_%pshUT{#h$8K0 :DЂ;s4[4 Qr8hJ6qs抗i̡PR9XqU@)  ;53q4RIt/0h*'L&Y:UB)Q0tZBhn^udzpge$ \c pl8b˽@Ci_:=<œQ2qGW('wH8yʉ[ؼȠ1t̘&۠F.X|;0F(о!wEz(|,EIOl;$7%Vz}\(~@C eCLBF@ eFy<%S7t2%1Molގu.7r2zK wT/fݿe*C}i$kn2:D~,`#zUD>R UTb,˚v"Ks.aCVyvZKpjGp-Qa3̝M;KےEFwyB8"!Ԅ(CScD:+Ϛv,;Ms.S*M3/&;@ V#$0@T>[mɵ!,;Ms.kM49P]JwR9RzC 7#5勤ܤi2s4R: Y9TZ(.;Bkv)7(c(SfR:LeV' zҕ /6DHAtlPEv (QHTJٷ>EhV (sp_}Cf2ꪇ8-@S1]a)NSi ;tr:brS> v5.bPr9)C%B]Nrӝ9-챜Lrz9ǂ6nY͂͂n(3*ztEE l` wیB גall`DP?nVJvD&5#ri=2-κ+hq_ةvW90 B J1~hWJUЦQwPo=aPfmGoUŝ23(7y Xn+bVV;.VX["[a-Yasȗ@U52{w&ՍJ],MVi.<M;Ǩc/\Ņ2zl=?=l.k=蕫*<`̢Y&G (-O:Xp |Op( - Mk xy2q)"Ξ^+2؛r= z0lŀWӡvH~Mrz3aʧVN2y8x QrlVl(?vXOpTG %3<&?% K2ΝӀ{.b@Z[,C;%ȮVcX=4m47k fc,lNIoܧ?S )!w{Dk~l9\MsC)0c`4Z(Bkv\~U5طZmvi4СRH:4i.#4v7{> NBkvvOZ(Ygʻ[6M˱{xhCPPaa` uSAw{WPC̘Qb)(&GZlw;ɗv`g_3:c;ei (]l#J&'b/}6划%!txw>H+ulm)Q xDx]ACuIvmuTS(Gb$?c쥉[B_KE"N7/Ivf6Y~Mrfvk f+Y=2؛*Zp(ڈ4m(k 0 TK 9Йg&zV3j-R(pGCZFCl[q֞t soeZ(Bkvl0MPHUտ@$m [ލ4UX4x$Ghj貴X0EVYYք)+)ڞYqZZREZޒpEYqՋaMVBԋϝN{ EfYw!JIQUol 9\&mwདྷ}GE7W*r(%UJ%>ޫ9; g+>N$Cm9k'Nh cԹQ7{$ū$7}7̧Vvv!Siy~[BfI@-Oo~ Zo}|⋠f DcjݾO?}ݿ=;huxͯoFu@=4}/? W~\sȻ͒|]C5]֐_~akɻ!Ggf:{k.muMuM:G:ёԩ }l_sZ<~q/ 5:~QsY{o@t?D}PaQF5w;;Cf–>,b~ gTifbf"Z;3 6&V ??LCguDgw(! Je26m;+[w |gm֯*XJ.46 o K|/$qa:nJ[ '7(Z|i@tr߾|^x 9>Puy<>P~\$ֲs#?16>. QS)NH0R;.^أ/l w0qF/}%*g-k7Jaӟ _F(lK/쨹دQLP08ȎbʎDb))|z eG嶭BQf7Sv0GMs 3[pl% eG.7 Ŕ}gE3Զc i1/5C EeCM &:^"ALQ|$j D*Nbʎ"8); $O#5 c(PtyCeG- QLPxnBQI Ŕ_c_E q"6:,dXKQB^G"xUQBP=6^Bm(l(0RD e+1@m`Jb)et+[ eC%u=3P}!z"}ŇI}&k4ʾb~ߗh" LW&:B!T#Qr#}9D(w<`m+~Gl"/l(T@K([ވ~Ee-ypX+f1d8x.-,nO#ؐgޕW r*ދ'@(=W@׉X7i( I(lڹR6m+R6ju_S xk>:+0/Xڶ1e>'-b jR["*Pj(bJ-^SP(HZ¹xkɂ)2$Zhv"F0^SRIȽ"-4^+El/@m58%SO@KRTnp h6N9x)rnvCNr-u-l4 cCzƁtK 4E/V[tK`oETXr dfaRPP;RCKsnH%3ˏt3ņD#E{@+)`:fIڍ`s4F Q2xv#!Ԏhh_)ex)0*v##$-e0 `l!i7R-NxH*MfZJF^?,*ee*R45T(etzbQJ}PQv !l{¥=vk1S$J0S(wX lM4ڭRFz@f@Lسlp)2eCyhf "!bH`p*r^)P=|`p*j 7T ~LIA3{\)ކr+ޢ2W CW C++ԕQJa(s0TppR8])rW C++ԕQJ(upR8J])2W CW G+ܕQJ(wpR8\)rW GSpJ(upRJ]) W ǘ+cԕAJ(upR8J\)dQW +ԕQJ(upR8J])%2ÂԕQJa(s0TppR8])rW G+ܕ"idW5N3!NrU l( p@DT%6Rʵ#mHm#Ha=) e +# H8RroB#H8Rri#%HF:`#:(er=;4c%y)eC1O>Jw kv^m mlmFIcp˚Z}- 崭(yIJZ|jSK5VJ,⃇*?Ƞ2m$bĢ׮q$ o/MWDҡMsB̈́J:4ZK尷㕨ܵR`zJKNǣP&R Ȉ$װ`k[Jo0g$M * bs  ;th!)A\Fh,^~qPCc T{;4L7YU-ŠeS.{Tқ3Q6_n|֍ZLybLyJT]3'lWK _m[#4)vGVf.l;kh{܀И.O)&DF'loG_~rs V=R*䔏f Fzn㔏OQH+ˊ$?N]upG()ö i"?_ #mx8ܸ#qÀ_-6_x 9E`4m6_A@hS]{DgxCS^{(?FHypz"-zP뉄ˈ:]O$^pz"C ./tS FԗK*ZmϚܶo8mzNܦm;NA?ΣG V{;ߌW$|=#}-Jo "T<8ژv`qB:Ε+lE;k6P|9!CQ~&vmC9ۆR?S c~oJO6!Cm(C>xщ os+j MPHcW|C|C|?vvvvv_FM{10c*~K+;c8Yh6xd$%╧>2Z笾㭏rַxn/O~~' /{Ùi˲4~{ßдwwn|4ߜA!G+/ARCO}[kql]>+%@ٺ3Z Nߥ+5&u^4? QR E4V蕏Ni:^Hhא~W>2+| s2w??y:gիî6.%#20^O>+2ij؀o|dtYV7>rA:7>`!|uG[^zG>9);_h >R˴GQfSǎ#,h7>t|=O|5Ud|y|#>QC?0;2F`/v>JɰBleO|(My|_ȇ+LDQAAW]endstream endobj 158 0 obj << /Filter /FlateDecode /Length 26864 >> stream xͽˮ-Ir8ϯ3*ß!'(AZ@ ԀJR̢U~{F\-0s_/G_v|m'um|_V_?/C޾3???TDq/GRJ>ec@N*H-V [|Zq6>Ώydiu:P_]Z?52gb>=Ly~lgbg%L!G?,cvfAs~M3'nmʋR h[bGs4Eղ[zP,Gmrc}}A}x?A+pYf ]xƞ]J毕n <~ʪs~l{.㜹N3Uqh_OO|W4͕Y+R4h}~z߰R˲\G( }vm\]EiћGS\$ޔ-\ھWP /Us꾼GUy RE~|-y(ICw_67Zo2yq }4x>G 4Sڭ@>(CeՏ_?΍?l$guݩ-pV,YX]'V ʻf~̋h4gT#7`6z{ '[< *''+dDI?4Ԑdi,\ҾI;τ zix0s(dkix\4/)Gv,$ayELN:\lo2H TUݫh?8Ҙ{FIiY4qȀ:'C %~ Mc9Lw!jۦY7ג:ž d7}RRi"Ok߹1Inny5!2ەɴʋemq -jkuܚ|ɓHzIȲ[1n4J=ԡ~s߽keweHNP)9u|nzc^=VE6\1o#nySNc]"cQ@=Rh*WFl4%7BKZs?-`rJ]-)c-u'3<ێl5[kåg~[GHU*Y  "@UXoӖsqQgb7!:yD$/UP KPWm N0Qe@m F7cQgUVg9ã=rU RBvK#:y\yq#,^gtLiRs;+O@%,@2":7,?,(CՏ1ݥ擳jQ :<ZBr#{ Wk\rrr#,^LK9caLO6@'Bajx3rSZm>jk-9{p*rv 쨒մ~CEZȱjNQETuJ+# NG}UQ@ʰpa{YVNR7d~M0n8׸lWK%D,< ؒpA847d];!N <\"LfâԈ(phnɓ#^@",=CsL*0D V)!h^DFn-i2}j0phnsjuF_@sLN3*Rt!"'=,dx'*LxL>D7z{Kh4L-NMcKxfwYVhNv@AQ& B49PdɁ&&LȋYI}L7 O]&>  h2 @AQ& Qɶދ&1 c^\s4yp3sDI·TRqP4̹CɃ,Nb!<aWѧ,| v^t{YSO/C?-CnQ^Ž0Oqs~M(NW;&;$GtY# ZFWV=rWZm*֐|VM2Ŋz}w]JȮR1krQarvuՋqdjX6"J $@-leDputbY.5E#V\  qA)[Y1I٣*&?91S4;ɸz |W9T ۡ[h\<*N#ү#:x /̌#TVncǣA t^q]UG+n>z$gXCzرt3HAUIєvGrU!A0`AUiT9(@}:q>yr :ZSG'n@E+!<ǰ0^ީw"9yTV'^mטk8=:x  1߇@h-eDD uOP9"Z?nǃh5`04Y0W`e-#9KkBd+rrYx㷅q/49+XCs}0 fmjMŷ3Y= K]5z!^(7W~t!$Cy;%#+ M옾\Pbcj|mx䠯knB6bA.T$C[ :v.({Y1CTuи?q|b}nԃUfXŨʻ['btwKyDpm%x yFZ@9/tgEyc[껫D WN7#~=@-A`Qyܙ ,*{B5gY+', i V?tV])G8$^^0W\24W 6"fOwzeĢF'fcCChTaY#񞻊mUuB/*d-faa|7P@-@-aauoE%;i{x[9ZHR9s-@2l:Ʋ*/Mmkt:Yb"NZ9g/,fp,847W~SucAAΓ`?Hty:R X6j,Dq:UhR ͐Oy(Z7n8@Bc:w;uҺ1WSMK*1ژQwyw0q,y':5427zJPݶONႝAot7 Z>=>GY`k dzCZ苚U_p!u:Jjӎͻ)D-o\rIГھStC L* d7Ɇ=Ae.D0yrL{9 Vd<}L`2Ld"bIhtȦ#-# ZFWg_€V"Tj PI3zqGf&5R3Errp kUmoWe_ H2xLhqkxAXgӕtBk tR  _w#꿣vmy8{ 0%CN~:¼p <,"P9V?^Ff;"y7##BMnget4 ^ PKXX )O$;۔?+brr[e$gB/;YG2 _bڢ2# 9bHjX /a` ^%VGzmLy!Р[yamOGnr74Ɏ*ٱ(C#|bV?^ƔAkpXs2<efuQ݂ZFYAz86P9¬@-ƒ1e oA1eY ,@2'a2099,S0847Qɣ1C硱0͋2F{yC)2MJvKӡyW9c,+OB76w͸杰}.i-^C_ƕèQNDq8&! (h2 q@AQW)rSR p_QQXXE\9\&ĕMF!(h}C\94ry_LR9!c;"Q?ʒp/87*SwyY+/!<,GX魰ব[Bᓂg) 9@aau6<?Gӊ[)Q |s -t* /ɲQFM@h4,@2":/acm#꿣vm^.rXTٷ&F*&';(ˢTzCs3Nq͋16] Mxg;U؁杘]06b̢: n/b\dW䌕QιĹ8sdS,$U B Sdub1&/!s2 oYHdJ ߼㔌uS,(|u)1W"bq\bF=Y@ 2 ?.HC ggz/)oDR^DzUP;^EJ;EQf$կ&{?J,dM\[W)7]K_Sm߄^{-j S:TyO? K_z yo/z]L +nPgۿ铓+x|'.Y+tkXBe #ˈZZb]ΞBTeIu\w^2` g#U9; ;gV?.#97TCC&M;EMO~ BW +UZ_MaaQ|jj O-2L}Q0W܂VFrH2LΚu ɀ{2rvǢĆ^@Wm0HNkdW@#a@ Dd8`v >\t?rOGnćڇZt&j.̈I>w*+"S]*K)+ Akd)/m>^$/Be0W,^<>}eh'h)pI#YHrPߙP9 ,3*UL=uyڞ YixZR *?ꆫ.\ +%[ijþY{,A2 H54u+W!c7K4f1'+ AhƕYf[qMm !iH"r <8Fmնrfۭ}ܬ>K7(,^C_£QND% I,TP&gŊ 䄲hqN'O),f\M~?GpGwTZ潵78ē]3Pw Q/N(3ʾBL} M88=W9aG%ʸvE-_cCV,9rY+3bȲ>vEe}{\ݔW$]Q\M'f'",>JTYa:7%,,r^EBr^",{ꙃ7UϏ⪻*%U'suI1U]~5D5U~KL.\\QQ * c9$\GSn "*gW+Y[i~}?c_F]aN^}R" 90iX|x f8*t#꿣vmX6i057 ]%Zty kA QTUG2` x4~IPFyYWQc>#kY^4_@Bb_t8hqRɺ@޾R?_/O8W2w?go>_JA6_zWÝG;൙xҀp(?owz<$j$ ^C~OԙG?>>!Ϸf~oR/OH]O~TxS;_k3WhTw+}A<{ӗfډxⵙxxIhv Ͻ K/=KwSǹگ$r CvI:1 \]RzH~rZ`XxNpDc-S`ȫ4%mhF:9[YLY}9 /=^@//GJG=^J{>wy7o>S4==G::cZ(8*M[1;DCz%qJ8<(rJr.҇3S9(Qm<:;5|6/`O 96=(TǹW>{z TdI"oʻ&;ɞ>$qTR3)vuHHf&ܞ뉖;ga/']<5lj̇//4ul?;7M4J䎄aC`?us|?{nu+_U֏O߳^t_g>|>;y!G>?<6^\ͭ]O=#ޭYoh^Kph56ǂR͂]Vx&iVԴhD!t4+vg7lLT8ihMrLvI'gjVqTs_YP5rMj3eGfEMypL&>PYQ|XPy@zjBfEq_Uڵ_N(Kg(0G_jVi1Uo[ <;y&P҆%T:@(Ѭ(?QYQN5iPc}nFМ܅Ng'4E#Q90PdM{(hn Qɶ΀=1rOfGfE(xy;L?YڌfD:CjM;fNͩ}Z(JBaDɨ-4u@y)vQ.5}t_tJ-k9>IvGxn*V٩IƦqI^4϶&/Wf7{ 켰7v=KoȔt8e_ġ\bLI2*܈ïU} j٧*oިV_x̫,[yUsW=e+6p8ݻWrɋIU'C;+s";- /Ĝ-@r54] a f2_ZZ5{,GjN-bO~rC[.@#PAXX]ft=74ԧ2vQVz]HœB:eLp25oԠ9K٫ͭ>w[bM4^mwRޟwٟx ?T׈eI}12b籉]q,fDBP.*ϭwt Elo䩰Da G$ͳ{ieC8-Cj5A("⏅ ¾U0s󙯵| dC,tEN:Y6gEBt"]χvC|0P!\N'6Ǟ,EMХ2^bh΢a\ $C[eiQ&|DzxYX!}D fAd7䚴åXa2KV NV2+WySPyhrc#hjBNHwE}ofAq\joś((VLvܚ55͊kT2abpg7jg[FΏAJ)@fE}0j[YQmh+(լ(G<ݑPQfjVgxԴg%'a@` wRCHu`+qζfۡy@8QyQӖiPÝfEՂ́RՂuڈ4P'FLCލsd,Pe ٸ[B!PȽʳq7899f+YP .i^a5/ڒW͊ܡYf<6 96Gm573.|zE1B hTʇyPB3Pa9*;2 ],Qʑ j 852b86RETΙwԳBOJnzmr%,~NmfK\Ë2{$"0z䔨rd-#U4xj ȑr-y#MT*7,~/]W"yMNQRЍӎvYzͶs԰ێ$*()j̑Vnp4/2s!HՃy0{J&3G 6b0T^db"Xt-9jفH&3W'5g[ ͢a0͋M%]D AB-47csCyf)FSsH&3Wyq,d1YW,ښ+יk5C4OBłȑЬ(gg2u;R͊:纛uapiVT3@5gI uI͊YQSg(V /cfE6{ Tl\o_fEѽYhPzYQ? LhLzmfEu;5PYQc{,Q5KRjG1ǒ3j=v0jo~oXwBu8~A8R/PgqOT?VSX0.2͊ҀWF5}<·i hkv.(%ueB. 벭osE-ib衿 eY%c3bmMl24+j4kkkS͊:A>·4yl)40$(%ueB.6`JoP{ٹ¹7y;wM)cF1RŒ%*L-ɹsK {.܊N,>^' ?ۻpNӷ3ۻ*z\%ڮsErϤ,Vd]ϯ),Vx{e*AWɠ~"\[f3.r3t➆ ]kEwm' G* 'Bz! t z B^5Wnsj85rOʥ# YH~^6D*'WZƺ%n9I aLN*GX6-@2Rj\h3rW4 ~'n$qUpBR@of=Kj̔~M9JՋ¡?h52sRI#YMEțvfj(-pFfrat 91/iHj'/7qu 4$S4dYSrre[ҐQƫ+ˬ]3rr^x#TU4#\""gUxeپ&$5Qʑr f1l⼑+|NMi)O6iV2E0>7#rYꯞmO>54NY4S,JnKV0]6%۪Y(ƙȢѐvIgѠXfjS)&[dXmS,g=9&q ],h,g 㷫-0zR`4OiUj2k3؛'5zY Ma9:h9r͋k2kzά&EWxvC^\_h^{m=e֊,IΙܤv>?9W)2k (dBs*7݈4/Rt䗞Ě ewkkбr M6R;:-3{Beh`t3Z:*1Z FKG9eT0Z F@2Ph(0Z*-F@eh` i:Qhh(0Z*-F@eB+1Z F@2PhP6I( F@2PhPhPh(c 3Z&1Z&3Z F˄:u0Z:--rF@2PhPhPh(0Z*-F@eB-|,1-e 8.43L1'x1MBZdf\2YQgg(Y*,oe g>VD|e2ZDYF|2Lo>%>'grI(cydq(!TO(F|"n6g{$/tdt9GN)E-85?ʚNL*yLDYiM^ x3W $x_a{c˿xe;?Q'}*C:!WĎFk6Sj:Wŵ;*5stU=WTli m=4eru\qȿ$Dk-@lCkFך'{&V PKXxy nf,١x"&'gaZFryux[<_L`^KeH.DV{XZm 2@H .k ,(;D2ZZӧ@CF'ge |s4PgB8+e,vำ6rgrI&g ΚՏ,M=Ѯָw3)+䥾Z).ӫƧ PG!V-"GU՛ j¼& Y]˜Ћm!=*brCO^/=jA49 W ޳&'%k٧l~ |LeW@"%]rMW^ vYG+n='gS,Y@\;3X9tN!=%p̫]\nerYjfn'w2ViLi*mi0'k#^AdrdƹY7ޣ+G!:{4S,SJVbדi ۱.oUS*yvgJי&Oҙr^ܶ]eaﶽM&v,96=9g6심l8kA>Y#6j 8nٮY)A9 })ѓɴӊ`sqCc 8u1}R ٦V.@|lSyI30gz±C\lIWX}?\ڡqsl!\)|9ӾqQf6 3[R< X؈ G2:Flƽd2LFf&@2Pd(gtTb ,LeQdd(c 3YL &@2Pd(0Y*,eL=eQdd(0Y*,M &˄Vb ,Le2EO(g ,Le222P` T0Y&ԁȹ|_d`L(cL(g ,L eL Ld9g,eB=@/w*3Y L &˄zh2eBEB9Ƴ#h9B.4@˹%p [< 2XL!x,ec(X*x,5ec(X*x,8W[l 5ό[B! &C,\(0Zz_N/HU9J-g3cUV}^UF78ZYe,hh^e0}vYvflQ gFCIe}u66Fͤ_AOC:!8y7qlro4N^\TC\.4s\#g GZ*t HKMU@0-8kaYx6G݉ r=1YG+n\3eY2TN4 ~פSLV#e9R[p\YlXP^([<7Mqҥ#H9gImێc.pEsx RQSI?V}+ W \]P^QDl0{~LJ H!V8w8y+<%'L"GyH},ڟ_F&(l9RД%DN9ׁ<ʼ#'Yx6Wv%]@]R􊘜|u kYU8{RpV!f 8y_ rdg6[ rR@  . zP9Z}nuqv2\C5Tn.g LrTV^ޤ9V[!XdI@rN~=D-sRwrW7U#f[F{FsMV =O~r.̫+h\x{|W'z \[ق @h),leDB4.*sHHՏ[sx5.d,}~ǞU9; ZFr HK]cbr|ueȮ)U}ɏ%H>}_YB)p HI>"#Ux}J1Q;jhW.fݤNs_<6 gNA~/`!=ϩsmv2TήkYF%Vc[&DŽ^>AË` 4бm108slM 2e2h;RԠsMLٴŶiV{Po^cJ1jxv,4韶zi"V(^Y-WO?p.&=}&O2}QF|e2DQF|񝀏2LB%>6yf&tM~,4nCNOƊ(h2Ey_t)E7#L",ga5˗͔W ?[ RasvQ1wyWculi, L*> {";#87q~dO$zt,qlo&*jSƚ!Ɓ*{J~&=ֻi󫚬NS(BWv қd )WoM,V 4 pBcGa2_apj S6|8ڻ!$V[PG !!' MW9y _{zқ/$[ԋv :˿C`NdW@ ǗT|jj OhhEHRcH#S ܳ# j=e7xD*g7@`a:ʴJ{H4ORViw{22bc D ^$,@"(4xx4QR*$,,^r}iE 8\Z_ܾ夜%2Q PE|s[(7Ŏ-p^OGnd^E~~nFB{*'OZuSDy>ƸC3egfaw=h/&'W#0\=69r4\sUf%:/Q_^&it>\uH{ډyA(i274GNgAwc -j~fqAZ q۝ϑٶi<_mwz5Sl`m_N*WPxwiA”f p>@WC}TVqӬy{=DE/7-aXNoo"m׆NWΡa0e-yRSzj Trqh6ޝLrL}V,u.ڹ[OG<c^e͹+l5Ǝ~ ­kK!%әD=&C7=WX<ЯNQmK7LgʃFʹ!>oQ>795//k˼\R"*y)^@/Q2P TR ^JG9//e2@KR(x)^@/eK Ry)^/Q2P TR ^ʄV x)/e222P TR ^ʄ2^ʄr^@2PKP/|G_KL(L( x) e 弔d9/g./eB=@/w*R ^ʄzh2/eBPCpƼ вo |\FY-K(d[B!2:0SL̔rfJG%fJG933e `^!)%3eL(0S*)=3eL(0S*)8W[znτp4?Q˾'|\FY-(,%OSϗ֜l Y"ö"EVe]ʐQ4\*tk:ʒߞTֱ6v_nV:jU62ΈChN6_L^Eq} U*WyqpQޥj׉{UW[qO?UUNxjy-y84 3wPUJDE'S#ODUUgarzm6rxD7λs#&*&O񻹁!/q2 b~wՅY9̩*푕HUB'Wa*#o˂_S*G*$,,^f~cwYE+E-arr[e$_%1W(#PKXXx#C„kHW _xm7"MdTX<14C [g*CHWՏܜ*kv];bp$gMN:\q ,K=)P99V?^yZ'9a%S r$a(7 + UeRn^ =aԱ. (&7WO@ܳ'YUT*p\",4Wyٓ*'WkfߖϠGζ=x^'siMv kϠYC ;BV]9!ԭ(0n83c67G nUMAL)wDc9!ַ08ӘyJ\dz?f3toSsrj4/9P}\4rNY %z3rg3Vj,ް63G,V\A&Zhn2sbInwJ: Ef+SBB∜P MfN*b)/r͋} _#̜D[3m, EfNKwMQ >$Bsӭ{6/rrHN9-&&%?tZhnrB0sO\!׼˽,Bgrmy9j056_@زU~/~˟jyI_Ͽ'R\{Ө/g0_ۯdwG_L-'}vD|a]ytwҵ^w_~#}oяO(Bk37NŌf '?*SlpfBPu$_|ym;^;r0x{ˋf;yz/8|eȼFO6&!"CN*}4,.arMBI($K T-S Mܔ `+mݛ*fWWBB˿ˈ2+x|z`e:AS@S0ΗMF}5uOmm~s"ZpCuͭ:!ۺVz̶UG=d[ܪa!TyKMQLWFpA%]PͱMz|QOϳ|>J/'%__ޗҀ v$w.:=bZX6>FS!DTo+3~׉(Fnr^([;k'WmWZr~\kI$us8?N;Ie)bG\ Q3Ҹ T+XpR_x2H ?M6~Oo7m<74  >a "4pWؖwta @ތ9?,|ʀw<RŸ~q;nY9|]ER~wF<UeK}}խQ@;EeM%8&!&{s;{`K-MkO%OۚfXMW%ʦK*$yiFiZr6;OKnjv}b)'ms3Cfp)(|ޚ4/zd$uvZ2bBk+?ײɟ.RDYi!b ni[OxT> stream xܽ[';reg%( :j$HV_=LfU-18S-y Fi{O|/u;)׽=\%'h*Ua2YTg97{2moyy-cx.mk}uֿ>Ú7r`5k rwR !3'EHWH28 9X )av1X$<> /t,LWJ{]5QISN1CV!R ¨zrƾ pȼb Cip: i*7n_#6߸Gy.q)W8 2¨rV.}6| $ʾA{;,嫦_ko;z* B|2ztePP)CdIPu;YmIgmi?z_;!}WH[ߥ']lH4!HLٻzkٕAi!R )zP.Tө84X7gSZBa2p"Hpz>L!NKJ:%w37V|sqpsڳkFl=CZK}ߞh=V&}kz{4=2\p.|)}FŢK0eе*Re=wGsnp顅_Yg=6ٳ;+\Dop0!K_C"\_WHNAuj >5b玸W͔;oxLu3:#5> &e롈'_}2#*e/.Kv<'<=~؝2aqP([Q `X@y e9 Fftνv6﨩/;qy< y~7y 0\VL2#a\z )KNI({cu>ɀW0śK՛0;b{ϭ(ة6ⱙ2`'is`ISM#u]NQ>vKa3FdX9LK)wq$AӾ_} |PwW,7׺o%BYZl-p [㵾 EB˧v iC0}Za`R<8`nS .M.m0"vOyp ,cXmY4˔n:|;4?} 6l=ӽR^?J@'%N8pAZWI(id+@; .@?4w`]$p6D8BYڊPlTm`&W5 )yg̍K1S֋ݢ4J6R/g62Zjeg`òr˴u :?^|Yㅛ}K&jS7lwpVkmWOpt9Aw<'X;{{eK2p{\L;kv{8к?&p\s% <0.}^cCe2 SVx :.y^൥NY~]yc㒁`2n2-:}3$$i0_=Vaҹ2'~v6⺱QTo/-aֿXd9-,9U@&Q` ó˿tZAs˂EA7}hf?;xNTI^ɼYSFa&8ڢK,pk ATYa'e2(m!R ¨Dݖvi QhK) NWMAT׵f4FP]w H)o)F/Ih97^,F1aYjFM٣KW:U9T8FHt?^WPMATuO0g 5Ug4ED>)q߯v ^fG5$ ^{ E\6Q  fh"g6qt\n9Wb#Aó ;(? [4"xbȃT"!rIoKZIBjhfGIg'$_i]=_i{$m~ }$VFЊ8vB(m7x7qJrile"ֿo١\ERY*K-3E--d=$vBvml=bz{b-Q 0o,ʔXt^!fkCPMڽ &ΰiXK[ųv DX䊑aɲ 2܀0z ɲ"[|ki]WN(SK-meY eps~$&ʾ#/l.;O H%vo,z+\NP˰p"+"/,"wnӀM۶+%tVXi^69Ts-fau$[PM=(oy卍ȞͶ4/Vy`^f7 {^Ll|KoZJ_6_p5_995<:F1]3-hev6xJy7UD6û+bk3~86x}U[՗rp9[[ݚ>l,)i){Oثw إܯPh^LR.+KujGo+\ߴa7#X%۬K6ͪ.qs*K\R\ .7-]]ѱPқ:zBZG׮B\9=w*kZ'WĹ:QZ'%rK\r.q^B 1 ՋBbP'[| ^y_LxKܴ5o+9M4M')҉f<LKF> >Mwtq>zIQB)cﻘU\s]KXrmb8Եf֥|se|닕֜= ߦ1j4Y~b:Wr-օPߒ+ خ\W-9c^6bae}WyM2.׷*ɯuR/>3*"1.S}/TߒjL5W:BeQݖӉiFW!]{:yOGJ">!''`T_z4 9&w i*r(BjuB4`)CQ{`.V4 O[RSpmW NWJ{]C4IS^׾Z2Xv*H)/345l64lcR++Na l,C iph+cJm}>(nض: /I0{t 344 {_RX++v8+n|@+*w_dϠ20jv^H=y/N65LSI@z(Ӵ¹dX4MAedI2OFi5UӆVNUe`E@TS5&NE;7R' 22gƵaF9po5)[UZ ɷA@Z ^ת`Hˏc *uc~ F(NhCb 䴈įLfIkkVT}+3%3VU<)C|U/_Ҷ3sj[( ]JT3+<_h!,>neOu7.PIQ ͔, %D1= X6q}6:Vv*I׻SrU8R;Ai*Qj421Aip޵XWku7/ ]!;C1Jœ\Z若!¬zQ _aqyaBaq%c5q4ՐƱZӬS22 |{-'bCȩwf!knM|WJBB+( %LOKc鹼l~a[K(a{5984D4Me8] aWKﳊ{TnLfS )OU J{=$MS^Ϙ)ZЇiZ(S9T9F1B\W/::a +0 F0b%p 9FeS0w!؁TIR #MtBj#IR^]aQwQ5BB).W|qW r \inĝ HLB,2( 9D )a?.ŕ+{gQAn^ErI5QISNT FrI J;]CQo2孂# DDTT`UAT2Ԕ -볅5J YZH+j h,=Ҿ QA^][cP\mA@*) աoY9(;cZpFbMK931(BT@.pG#0UU9=m  ažF!ƈ5ă$ alu+d9=fʀ 7 yyEuw8SpX0l#xtO$8?7pXL:lضc כrQ*V6x||j ':)ޔBo{!N8g{ xpG<#U4=QJrO]`ɦG& yЪz1`*lvV(xAU}mf _W@:T H;xD{;S̒Y}~H.'N<&tcڷb ;+*[71zΡG}=aU^hи*?suK{"'=+( 젆7 <,q?ssh I| bSShz᠉0B]C);wZ#?!X#_zI3 a*B+xpM#jg`8' 8V.N?t:B׃s@F' yCb0q89j zAcK(Uל4 lӧ)ˣKݯҫKy>BoLmA9b1 \<8 u0J<  Wtw9  6Mz ,O8b-|X` o +}0G<`at8]d0.8 R뀓6|k0L4V6`gM* Hqjai@|[E8 U߸Cnk找DXr:lKu:C0<2JGx(F|û QҒCQFw "qE'0JXaB"? W_3A/!fp]VB8ijY ^wxAAy*WC2C +;Fjam/nѫq軰})\t W,k>Ӣ5尥;ɧ+*&`|Xg8YAD7ө&8}|xwZyQ|(Aܦ|qqjJј\0|kE4|Yx_;5 $#kRפL.^ri&r񚤲]|}_ RZ8.}دKZפj&kR.פ\.^v פ^3\&N}bK|_:aN~x 7^xn7ſLo!zFbrk7LK3\g 7w-# {j_F w+ž/J)RkBt o{I2^i*= eVS}k/% *o> P=;ٛ^ˣ!tYPa XZ=I2ѺI!"%!zסC*;:N xm&ˉI)L n:=-\)񴉠"JJ1Qv (/:Tķ5紽Q~.0JN"ZqF=^v~@eWaئE)poO]}prp* h%RuqNWJ{]CtQ7.a`KP61`0*r%I<޲K 0zl->xLJ q0*-=bGKy ~]4*%aH3-%ѠSU9XE4t +`wtj"{dyܞ!C(0lo= l(@a3c?i荬&ov Pi0cp/|Ga&}c?oZ0 oQ='SoQJ [HKK=կڭvLvB/zS7r6W Bl2x+ww'rl"=v|%[a[6$vFZ0ІPf){E 85_ K uPO5b!37{Vq}6]bj[Z3i8M=P0Yvgo`5dJx`i7œk:J|o(-v#7W2o/75-Wha?•q G(oxŒIb0;B#~0aM< EC}3UL!z;~8]Fs1=Ijފd= `.&ВTz/6jrQ땴a]鵤/*,oZIn2p 7p VncYdGaX|}~Է׎,Oျ^}_~OǗ/Sm2h"5)AY R2?I[` 4ۗ$;߽?_?soOeVcAX5{`.pLZ\֫?}I* j !F- ߝh~[Xeoп(_>{PнQ|F185!5k֒~9 cP .?'`g438 tf)8( |C{J>484EkHhL֧mT(ki_%x,4n-B$m XT:6}u! ;iU.StI1qLu>*E A}W hp*\B5?'aޢw횔)[9D>xOuRD8Uvx|[=8iwp:q="JZeTSmuC-Jzr" ҮCdQT)*͔SueE_14̞D༣CWc V߰nsj\N5?'L.$57y4xAi( sV):߽QON/%<\Out?Vs wvhËV ӮCQԽi+uϔw,/ιW/boVAp4wO? MAq_}J.O߼6lT=G{KʽS)9w[8\\Ґ9ݽh蚁to {O-_HR7V@qKN +.(NȫqKO!rL, g7DG>,!=Tk? {+nA*&wi}1`:uF]=*ϪGg.t85y1 F0K}T7<=r{Q⠾WTB#;Ot1x'W5HIRaY ֶ-:+ S;wJP'(juZPiq5H~רs}]9Ot []\v$`PTŢg%_oE5ki{,sɵ͟MU(YDv惜rfd;71MT5qIZMug3$uW3$?@Y 2eIԉ >Aﯡ>TSy:z?f)cE JL }Ɩ<\@R2tx,Za/[_`3 {<8πnCIːyܒ$xgA=ZniPPF^Xj} <%jn; 6m95F]Kuipd91=z:nxרEE<6a(U$Me8cktdټnNPٙ ¨h_`ޑzu i>`t[8d$}D֩TO2"dGU|̓)2J``&oR+*!H0Eчٓ8S[UBEB=>&xbHN+V>Qt4'*s&QTcgYVt/aJi߬¡Hg5=2˫Rp i׏fD9uQWtpp"X Q UPm>bC1-'\\WrWGQo Yru\ML_ lɼѴ3֯^y%+; P9*WaE+6yÒ#lj')6i=J杖h+V A&8;p2܏7Yx}Rnz'tcKmWX^Ny.7 3}$(gPF&Jw6)WP`EM!%t$ 2 G/DX 5sHXu:|I>TQ'bC:dbWX'3}w쨝80v=R,)Đj>tb:awG2#\}b, Xv[ Y@$MeS}`ۧ']$ܣ /~jz,߾Gs^0:f*P Ƃ AipZJzifiiCˠT8DJAX9zеk,׮.L]Vzfڕ9]A+ghWf"zZ2(UF=GRE ,(j˂Dq4p1IcA pFVlqOL"l娔6Z @ip i*kwQTpג%d2&6q =Bipv%6qG71lʠW81¨d wP|qWѠQ^?b|O^ϖG؊#e7P!ff)*A]=L7]miMy3:2Zʄ~yڰϓؘ0/g_b?5S ps`G.MUĤL(&$xF%0Ç(֜E4Eև"aܲ(q_9|x?Z)bS qR 1ZvO'w$vNj@IL$y5J Q %QF)fbY "oGF1rH^eHm,,vjiFr䄽;Rdt<+eAGG然-u:|I=T[Mq(ਸ਼As:&9uFσG<&SaOHg|wh_ r^"[ j*C_L1[f1Ý!%b]{ !#z_m{DeWǖv[џgz>g|tœQS)Tm|AEzW (UdsM]"H0ufA޻H5RF=狁=:7J D-ڞ]KC@s v&кEs) "p0njyzGS YqNRZAi0_Ԭ\iy<{pL:"g_6Nך45efdQ[kSSsCDwNȶԦ)s 0>hP|@h?Iܢ~8 Щo Ep6b4ފ 5tPT(Ŕe sSĂQxsi5㎎"-`x3&n%ZnK5f|41}0dLDsL _? 9c<73ΊjeG⳽Zܓ̭ dF;L4Dg$36}x'2I.d ;ڤa3`*V)ސ`78Cڶ| }&S 佴 JpŷceAeM>"#Q&(!y{#4ʙfLˋ)/M}=P!KܳzPkt`)tƞ@SJu2Sy(t^^2JWQQ:h/f3}w~?؂Y;ˆǣ[LUF @9D@FTÊU*AONMzkzV8tVz4%SLFv}+29uՕ&=gb)\aD زM 0!•LӇk\e׸UeXBv{/|#&FypASWOcߜ2JNV& ~M0b$C:1gF6o>e|B?sK 0~Aӱ;ŀ7݆ʠLH/tY=dd}8:]^<Ǩ"d*b9b}( *"r0Wo͕AC&=`u;=׌^+`+e@&̡0ם^ b0MV¡¤ڈ)6.иa )e У)&N>)La%I%B7qsVu'D1G|@8Xwǀoth8BYv*0_->gLtk5!{cF+.#w@o(_NL_ݰMlhrxa%8Y8촲'AkΚMoP P[Wq2=|x>[HiD{uR/lWՋ@ [)l3#uRI}Nn:s{KHA45z9oePmCdI?3=n#*: n 8aiex]*gNWL2HkCGam/ܴ$oEN;}CMcN]xg6Pک"$#Zܘ:|ߟJW` H&x4!82R)w6Se٭+ҐCR-A04zTMOQROiK^S n49Mi|TgNSTTeUF=n ^$M",\Q00{r2#2}ӚSOJLzuwQJX;2pxl! :#t2.NTq=Kr89,TwM23PUm65`e{bZBV#2c?Y?׾2ڨૼ_..֯^to~>1 $?ۿ`0[e3ہo`&"NhmQV<e%E "͠όD`@B\4(SJe_lr.zOwbAtb [N*CŬM81V]âg|4g0.y.Eŷ:QE'űQvƻ-K2f[@FauFsu1~>IBRdEJ2H3 {FzY+on;ַj nGISnG> ېr)í aeX5cfuhپ3 ɡ]wUmhS4&˵7MwbmJ֦0zЁ*.11\b Σ@s5 Naʪ Lvz1 턏w1MA7cn ÕA["t>Wpgn\Ԡhm^̗نfWfw91 T1[+Y W JHWΕQ${_YiBq0qq>:.Ƅ?Km,3 şJUO}y95Rely,"F!tdLI"܏0"?"Dii+jh5aIv2a.(xRB _/(R@ş/f ͦRq)< ޥ; ЮC6WP3 |3~ޞ^Ko'cdwJE]@\'֟yTi浽sȷNt{%{ƽ9;{%)f ʮW\9/p;Ff}U/ʭJ6]>/mo>9'¤{G<6JC!-*REN;uQISN;Yv J#GSSq1mÃmsڅV m2,6>G=oB I[m)aX~)$:8RU]BvR*^$Mex]8tՕ\ #zR!õmڶX2jڶ;ִ;צ{`״ִ̡MGѡ8ch*%Z~8X Щ Pt+ʲX_ң7dSby G=P=屵Mw%_+?i4Dl&<4c q 87ϓ.HeTB7'#e$m'EF20>v^/FK2>ss":( v8q2evt $8n\D &qU5/]bzhPP@TYb`Cf_v 4(TX *nm|g;wO\f89uLA9«@Eʲ+XV*/M$,kz~jsi5|{wiuskH8rb50UG:a*bL3ү\y cO6[i;y!A<6<e#p <>a qǵd'Rgc6XLlѤc'3ō!J0{c+2m`UIS UH O[+ҺU0Q*ci\=a|yrK9Z4k# @uj([zpZ}2쓖a7$ܐs \0&SU9X4T >Tps MUe`%͉!{pIa'k&/xlh=xa.pD[{*/FАG .ep !E tbZ]lRӑB| ZV>$-D1T21 J"JAC-O k4ѧ6hQmP |bp Z7( JfASq nN W֋Ti|@)8pɷ;4,; ѽFal !_òtn\u8x עudn$'{99 6?-,L{mcx}LeSptOr(hS?Y;{)y<IUа Ll6Y#ԜNLgPkpjSڞ,1wk e3|=/QpFWNurc6 B^ЍqJi(O9PMJhj^1L0̼}U@:#1)87&ӲNIL:gщ3 _uwp(6FR=w'Dqz"ӳrkӳ';E15eQ*ܸK+$qɢ\9LbCnAr3: ;,?'`g8&1 οpy$>F~Nx)NV%v + E_wB9s^EP`P櫼M}3CC X؂5Mچ 0ʽ>hxAes-~i(1Lau1P>sꌊR0DQ.e"6˔lnn<| si9fB,oJޕG+G@:%`髍pS$hI‟~.絟GŠoXvwTyԋ*&uia /9$01͘82{G#Ӹ`` P[.(DHcJu\bzh혜KY Yz(~Q()T1Weg!CJUDVAr @g*a k(Ú3yr]ctiji%hu/f3}Y}刈2tѹn4:uFo ~A`$ |-9me@ p;2C>A8 /gԺY=^q'i&jJ~Ѻ[7՛J8_IJmp%m hLI9٭ JCB8DJFΏEdVv:xJu8F^8銁:N8ZnzayiQ5Sf^aPu* g+BߕʠWX8TAB *b Cpظļg (.N]'4x~ ^CO`ZAV2:f;]`V2^7q/ø<|u(& djDzӏ("-~Z7S^koˏ3<eyd8k␉A@5˰n|־T#=uzfFlƎ3JƟKUwRY)m@g>&x!\ EQaLma#]FYD 06 X'3-?\ z@$=Rg9с`g1 jf:cȷ?X2XM/N\g6a)]?*dU  a)AC*c CdIaz&n`@E(\D\ _`&7^1 @4E6]!;Ia5T҆j8D.S|Oo_R։>c'b}RAO#H#o7 kL&@ |3wP }Zf {F3u! ]L;>n[< ( H *vRJ=.rQ?3=Z߃vaۅRyAc8PUlxJ!1I&j6ơo}z"cfJjF| iF~/i26)#XƠk֦XkTL/#z#syQg{E:Gz64vkXC#¨t GOS3(% 8"QEFR U(Ǔ=(ЦrυxbiAaV94*#z/s{Etd|":3}Kخq+X/и̡@X(wN]UũBy+3T!`¨%B:![)x!2َ![g>\WHotǕ?YlJIǙOcVML% \D]<|Y@ZvfοE i*h#Zh\1Hjԓ."|%AK8(ĥP}ݝYZdr:{(]8,8rLJГInc[bF<f *M*DB VozXB.KD]>ALgy <>:Q+0&[?S3҉=#'s6rk2vv8w׷g}~Ohg°k -&֯<nS]tJpZFΏ`z.'jS3Ѳ;ڶ~wz G.c}7vx5)2¨;"<~klo&!AT;+\RwZ9rhjFxiupA9Zgs L̡-ڞXps;_&9F=]6 2̻-4]LxNOS/r$5ͽpg-< s9Ih=G-:,GXe݅kكo'eCcmY,chFquPX6 K,Ý2%a.rQZL@D< 3wQ(N Aip?ې2q |3JL )aǐ]c0B3Bg]fٝ-sC ;2:U) cB<0~BRF=nFcxii!Bw4A @TT v͕AiB((l#ju!՜(%odp 鲡%*!*c0D4f"(timldCqURSieh>FEj}s-eR7 o 4] |Zb8BYYʲ)RMіk;XVm롋/e\Xf. (}2<_yb@QO~64fA,XQ\P2;mfQgc"gl^T|U(^NN?>ncD3zߔ𥜠3e@ͷg =K|&ht"F'0Joɛm+lf|Zzwv2\weuMS ^a\g6.4౺R׈bJHܮKҶ0 i,6bMOFm LnXBxqS=_F 'FKuNG}tdՑT§]dƷsoLɝ_ߩ JrAм np+לfL:EwBsKp#F з\1(mz*'ׇ[mRQza@ Ӫ!̖rATaJ/c`F+8(2(s!R2¤>n-#+;-(O,ӕ:i)}nAi+sJT5UCH ]+^WPac}><6nc㢳ޡq5nɋ|9-1t]K~7+Q u1>s꤂Y Qʲ܊,SV9>ClgY.r8oh.(n\|l˜W!J'xvy41}uC&)\D\ _?Zy?~x}'x;4Fwi_ٹY[y X {bP5X4ߛBÜHr^)!҇H2ul*MIY"sP*Z[%jjT҄%drFyvy?Zb+'U3aseM\TdWci3e,ml#s%5=#K$(ŠbBcTC2u>V`9}b ։>cF*[<\;_x`V[YOX'3}w(#}L*^J7M*8ݾA8dMrʣКM2z2( !R2¤NjP]}Iee<ܒR.MN;]C5aMS^W u/hWR*sz1eft UN;CMSNចc3B L+Na)ap N\|:٦`3ݵp4[|"d2t|2e| \B]>01~@nB>)~4]$ݾiuc^}NDaPu*`iC8X])p'BXsڇU&=^&'I݇Jyh߆s."I 4pft&7bL v99-r"ϵ`?w%-d1?ǛqGWA)n8*|\`DQLGAM7 ;ϙK5(Ef#+OiT%Qa<@A nE'cXF# .vaZ۬ҧ&'t4¡qJ @5ol3h7E` 6͠fՌF-lZ0|8ؖ}Ӣg״Mš9\`Tд_S/״9ia1zlba|7Qw8yŸ́7CM j]bʛ&InBMZ hȴF߅cC[{#aQ,āp 6x:`ބQ6d9(R`Ӟh(\BA.pRb|Ĕ#rLLL5pꘘ"Dk#pegJ/N(8kM`PrK:IN ɸ:&㘎> A'1'_P<d$}ۘdV=| q/CnX^j6Mֆ i*æWxm;,hL\{T߶璋.nv*dG]')aB\\ -?Ŷ/]M 7V8lUdb$瑛ƖR,7mEL8ß eX[#O)ֺ}29WH(ֶDĴHC&f&f$A̝ۘMBa: e, ,>N`։>cG؟Mtq>YM>u:8 7 Jn75O8xx) k̭[Gc/C0υQʮVx_'stRyRvtcm߭X H)oABe OEG0=XbI1IS@ XXuePH)>x#권Y{8ӬR%-A3U BE N;UCU!&08M)5Ռ-ތ%*&Z2Kd~827Mqs`F`џc݄.?2Zt"Loav"Je(_ajMDF [QxtOLhܴ(l|fmbx(YEhf6sc;c)~0}Z ͨk݋>V4rYN rɹɉQ /=J.`Gr\'0e(`OIX'3=#lr-:AORŞՐc:JCuF7 *K^0t[<$Gw+t" 2Ov+=aTez~0=W=ON|Eg rPi HD3Ű2(u0¨M[x'~_eG?gZ9>~!l0C491r!$42(@X )aԃ>n(Xy 8I '9 ۹ Np@㺾s3n2-$<LJlzcN湶hQ"pA~g^sŅ糁Ão:mGᡠ+B\}lĔ+<[=hmP\8Pܕ$ղ|21+)v.^@1Ť|nq)"g׊B(e^TE:3gd Dd苟PSOTF5 ٢XZw^m uFoc|74ְVmt~ +jI\}5lit9+epڭCQx@GTvvUJ_cu;ƍ! A4t]TWJ{]CQAˉVQJkӾp򊥒=HH9O8i.N*2-ri躟UJFt9ocIye<t;ɔq;ilͤ*2hEM,Y1Jl,ߩ&*w}.BEnغ-:]tMq2/x -}N.9ww,mEww˱gQUfP@ˀOߊ{%*쪽hd1Vc|IAQ4'x3;~i]ljJ$sJ\ ?[Ҭ"b'7=5`ŋ_-zoo;yKR%))Q.799"jmx%)%*c%Zן2sU`8Gʦ#ƋjG ʝFލPrW4Эe(4z||`J8=P5)]gd#V݄{I3)HG:ʖEbZWi@|nXtX7l:L[$ú aúqz6֩(ߌֲ{~7a9n+@fm ;;>M R4*)$ ۮnY+[5ɵ"h07ة'O eWݛba Zz7t8;%R%+"pq e>J H'%!0ECLd`lr"y$?o_]^C!CjɌ}Y%u,\ژ:Ζ0O=r)U5-7񇊟>O"OU-w>I\[݂p(iQ÷Q\tMp,Y״a'v״vب;pOls׬*uy;Ag\@TKBc}KuEc9VHtQ 4+]'/J 8 Kx!a,O4e 7kAT nb$#D>L[otշdJζzLYC01qocϯ+'0Da}۟nrmO8Vqs;[5 kpڢFJ \`o \`I+gB4JFx7v|gn1ktW%DmT vTEC1z|9$KJ˶kQ)3O}>q/gjOJF֋ib|`o]59(y ] Y07*[x22o#h\lFjXiD[d=EZעsUנ (S5fE#Gf$I]\˫v{X8zr* `I(P,Xúh]gl9QvT5R'iR%,hpyS.ZWT|Zk*ΔZ缇\6S n]&QURՠl<1X) >uKg'V#fZևNpwS tysoH眶pD|*ZЛnMVon/O h#H_~NfZ$Z^ğa n?1N11Ue6NodIsΖ?nWR'I2G!f+-'^ӮJ^*`;]@0$D;$lv֬uEO#psg*)|xxk2GݨO5u) 7!*lݏ71N2F>q\{hQZ(Y@iOQ4"#(ōXM +em\G5JA<2Cw0)n4ORN G,g*iBA292 M WVL P+aA ޗ͵7&;QTrZ~8>iU5 (w,sϑ ^Ag$f0dCE32X:! ,Mu))7,K~ {a|n@Oq$/Jq~qW_VˢE2En\J >{$Z$ZE/?w9+ˑȼ#evtTJ N2kY^E];]RފHdLIf3IUbvK\E$|cO Me0{x.K.h0{a!%xGvH%`pݨ =IͅIIaw$&5#D'q_0X|Av84bS8З@65&kYjCnivQ4DOJBBL1Y8?7XIʂnHsDVʩ|yʵId V|ּAkvZ$Z(XY8: mV$%!FAĮWY^Ӭez5;5-oi<֪.HR/a٢2IoIoMxo%8yqs $)Ca4W}ƞ$]0fN v냝$ v[ !X+09ėTr>᳍5%mC=֥2lQ9] LCJ|GnɮzkHlFc*3 .i*M7|+uӞhI$vmt-1Ӣ#ie0XheUplQV"Wl\~si7#?Z͙zwϥfEKl3.ogp1>)Q.i /`: iqCkm 7M7_Цey)#}2#ScHy#)[r Ԭ$Ir/V"u| . _@N_r.yjPW+-W>yyr5խ`XW+8g:8<:V!0%ڪTxn]G)a*?P{ZoFsѻ{`"M4vq(P)% {!Nt l?IAkD$$ݥyT1%<c'cW]yL1 RႢew9϶LiY񒺔AvphB 4 :Wth6^=14 RXO50\O!{6#ԩo]^`ha68Lat9VpF(!cg,G_CN@]bbn~sq=?E.Y%ڠ_$#;\ eNrD#yQEP,!(6zJl6}=5l\ծubL& ob<(q&alg"0+ d)E=zqvaF.3(x=n N _RZ\xS/)բ'r<{zνQ㾎h'X.{'V@kV@ջ .n)T1a懪X+-^ӮZ^j`;Gv$铯IId`]3cdmꍖLǭbCB6/FnUL8_NB'S^E1 ME$Me@<, BaQʠ4+DC1oLvޣ5fCp)9^ĐlW&ܫP85gYpARs C`)g1>h8zmxB%?v}7FpX|**֭1ײZ(Uwk7) e$PZN2^7 0_R0ɀƾX|Ϲ=mJ>킇j*hQ>Ҫԥhƕi$%M0 yZ"5Ω]zjۑ8P;}W;tW;AP;p <4fV]VJ%K+ PnVuѺ|E/jRk;hne[:8zبo 8yƕtC>],ܑߋֺ~3~~Yhc g% w!%I!9RpX"(I(b_މ]BsYϜzf.YC3:ok*߰Rg R"{=8NC,ͻaVti'@.Ai,"!m Dr5}0B`y sO蹟?k\^P*r0,$@"L;WtM1Ӣ*r`_=k~Y=l5U8U- o/p{Y05s9>3cO^̮uW3R+OCj yBYuE| `Z#!L (x򻄆npntfbhqڍTC#0B4x#J8:l F%X.8BN;!ATz#6+}2(X)W  4s2nf)'X::D0<0wJɖh贒"i}h=- @#nY5 ZH$XCA.Pu]a]5e0B3IuH'"l jm`12a2ÒO#6C+zʵ L8?[gaXM =0`Pn~{`aW?1Ё'kdɼ+q8eN?$fP0J?`_ E#IC<+EZףW3t9$R}Mvtى|E H|9Uj ].`]3v\$x5/gb2Yαjg)ZWK [!&h ۧ N$MexolܛEFא2(⽩X)GP~"!E26Ɂ3q-%'-RbK5$ hL]asz׵"($$ 2$O4F]7Шv 6t(0邿l ևBhXm2 #+}DbV\vUCc1F<$ө$Ĵ(  _I &mwX"; PEqÈ9aN}k?j6ϙk}mE|G:\S.x%y%7Jg.Y -.!,. |\i%9!JJ}~I,J$٫pY]F8 rQG+Z=/f梖A)FsdK<6w`V$ZpwYyl].<'6xOc% OPSx&sk"Kv i;:7 DC dߓu*<ٗTzj4ɉ"NsC Q%Zq\U6pb:pe Jj@DCr|pv'|7@784[ ÒS7JW7_RZ7\j9 |v@2-[{/aퟠE2E8u>0)D⳹ax|}㩂fN Ud^c`ox%@HΙ'Zv=Ԣ3tY:z vkE&YS=Ǧ! NLvu'E`z_ N) E>cKp״VZؕ!UlbX+t$Lv֩]}~Z'Ɓbګ&vY|y=%mc>2l̇ik18/X#edWQ~U-@|ATFG dӞh0<B/jLR+]?9u~JiQ4ԗ<#hs8rpUу{ 779M\,Kr !x<xa!4""EDpED|ED` @1ЃIpqXi! D1- N2.&H>p~B =c"<*-[sENHzLzVӐ-P^xF9|`׎wp9&+C6q,ɟ<8zCM YwEeU,!-Y<{}_̮u=z5CWՍ`y;[. &6{dInl#؉*%ョc, Ջ}ƞb"]fRv uvvyΒItX+b@e7-8LG[ k}vC>5`l 746( 9|!V2#DxCn.ɮ}Gp9Abse e\Zi2(r#b"RgQ{ F9YC3:2{ͽ|3ν|C^5Խ<#D1&q{d]_塚z;$ם+k(WFp^KӁ+k(WF<>Yv͡(% ηo.o9&H>K99%&£Ҳ5=ZT\#aOs;8Shc>DS>>5f@j26H io{þ5%8_ɜ#-vFC*%MYr/WpA1bvѫns#I##:;9~Pf'Gltvj,FsڙECdXZCР%z+>uѺSPk Jνvv ft53FN+-$t\0_{$_h?yA4=΃+<΃878/b D1.q{dW8g)TOikn0R0;(A`IS1:wcgT7SG"#?_\Ιw }9:Psv#ЁatVG B+\#G(\enAmY\,ڧxcʪ$ngw`7p'o  ^Oxͧ C"XB;Oss8` .4ѡRMu=u4k<6 H֞:Z,A=eTRV9h-aTI*,'Œ3~֜KKRe plYI8jiY)3aI0u,dIЪ;zwZ, Z. KV /Q$^K:+cܝbE3XeZR3ǎ=i!Hʀ")|22DU+DCB1Qw&;@1f=E+l[k\ECrO~ȱOjpaQK'ʿ+/)( 4ÿhN,/$Gʭ~x##GЈұd Gi)n9g݅b5v.Ce WwOSVLuw}u"^>[\zpn%6Qbcenc"&¹΍v'稍|' \"N#xN?k̥#NsÒV dj@n^U'^5TDžx$,_c wKJ_\Oǃ$?t4 Y=ug[^y IiLZ0ʭYvyS_V"\ -pE<ޣ\.w`>Jkk}j@7KC|_DݚU`OW75Ԇ秩OMj(~{#yVk}X*tN|\y4-y-GZI=܆OMKSӼ[5kٔ_E?|6V ț3X|§Bg;73&g1cW;lt$!ɔJIg=> 4ٹCw]_w/t2P`n߫Mn*/V|q%*u7 LԬtpKZu~Ƞkjgb%CJWYY&*}_z!C}9x~3 kᄒAyЬsβjU9ͮ_8xH2h耍tM-eZ22hE!ew$me|@1%+B#P ]cN@9J~5cHN22b ?w]|c[33*V&*=Ap[3h 7P!/6 6LΎvvr S1ߙf48LvGrw5ӎj"#D˻%Utm53=d>t$orpN}@T 5 JC6BD(2{,)iZUv\ϲ)n)`ǀ[f=K i*'ñ,0ʠc) ln+-ˠɪepGKL)JԠ]gy9V_;CZ^d+"7c2E*0:qC@O(,(MoiO5L}30YUn(N"_:eeQuSjkeFOϾcHW(<* -8zxcX}+Vn徉Y}lnSKc΁ȐD^S) E8Ra}?״j|_E3m2k3}kSjv .mTgets|Ώ#Fv1ag( x\!Xu=!KbA!6 5Y=#v+tN$VJۇ4Y>c/6q11ل|cq14_ =.6{HNܫAdhW΄BŚ3F+Nk)ұ5k]gFdeX7[]DT$M%5\B.82(mk&J@ߋj n4pgen2aAÜ+sW d !!c;~i495;|7 9mTG?h|w<ɢqtcqd %Kq MJ; 8pho<=,KlHbsߵfRfJgB˟:;9RDA^/FMnҠ4N σV9m9i?`ڷIV?PXo"s>\IٶBB^ ft▣۞ap`) !'ZxZ!Yab DܮLv^>Vfx\Smݨv60TEC c ncvEPژ)΢ltBQV 4;$Y*J*bN}";qe &*LLU+/3Ҟh(Ft֚ *6BiT4_vm&E}G%(hM<f`&dlrFJiHEC%#Dכj98m;XQCõJaJ~~;MMK8A8h j8"mT 0AθKn[ZEc ,룀XGL$˗ε#\+kV[}ո(:t֝O6mb\ `ү:i!ݺkI[0Og-W765dɂ~&z(M*֧XX?=?ľ6hɼVRS5{xTp@QC6*[9'srAHJII8h#IۤH [Bsz!-\F em$ly/Um$I02yܳ/{T6ia2C9x @2 MT>g(E2Ee$<\vfAwg}c kQ`kH55fUOٌ깛o> O$·]+Ԭty;%^K7D0 -[ɒh%G0/$oCt3j;RԭphJ3j3R7>eHyr+v٥+Ԭt"F |W\w=ZX3cTY >C?mDZ]}nʇ[H;zrʆj7SQȭIj aݺ jY7cT%?p/UKs+ebhtex8XaPun)fG_B4*<<2 rtʿwVg)>߫B:0ôg*ʌ"=Jk ȩeZb`S*Ze2r* !R7Zƴ'+J"vD/Xt,2&K|E mfᢔقB']6cúymPIkҞ.ҵ`9k-b!Hʀ"y }_)ҐC` &]]۽3iZp\񆚕^AiU4+#(Wm\qqmZ6v\Iøރ)UYkd-|QȲe%eIđpő@ˑ% #K{7\o6zE { :-F/f  cUa:ӫӘ+=~t-Z$?Q~h /N;(ȏ䮁PA87o?FlW.ucb^؏i F<>xgX)= lO/3{/KvZ^խ`:nwg`~g.oAP)/;:|qc=*pbEX<EV<oӬ9RB>d%trD jI/ z9UsYbvZ|:~̼FI`& f go&K&3{63wr5@-9dG=)ohKWClgͬ.)k's^{B97_;9\>t4ci]kMX c"uX}f/jM^$W~ydJU Αs:Y>cCh_(7y5㠝WgB;I=#wu+=ݼwƚ z%I|Tv}}3Ip3 昿G5k]g':|/$;^%ՕK5`gPZܷ* QJanC4JFxr[ޑjw;:+te(=W& i*sP* YiǬ` &.]u CZ1C!Ě<pu1ī! V=Y0r0wjo?U8XT gxS-S-[p Z5[5ݬ9VU2Tp~S3 #Mc249Dr!lvw JC6BD({)i%ZUƅ*u@ 8ͳg)A$M%xB nWKQ`%p:"EPG.A=jYNM  \ha/"YV&J@vX⭦a?%4 $v(A5u3iHiՊ#) Š{ K JmLkn6YC&hl׵ Xmv0%&+Hr=APN߆e{ ͪv ~hU+.0gm˜ [ 8c(nƗ+QQ`l~GH˷- v8m]謾fϙ pykiہGYX<eT\/<O:rWEXu:Qݽ~1mOU"Wl exvmgoQx$JQuZ$1D49aSz)\47i8ӢDJѬU6M;EqTIqz6dBق@ o&KBhE$qiԙY媤Y֥3s SgfNNL̔ԇBP'] [b7'rAM'TxSϜ. WPFw?Wvns].ZWv(^ծ5 vFy=6$D;GM3uGuE. \q[ $x`s~uѺ؋H>ȣw½,95=ؿ.ZWa':Ӎ]3KBюb׼kdX+]# vbVlr\hG\DCW@T_8+2/B v  +?P;b),6nZ>cɰjJa+|9TuŴÈѭv f+ ϟo7K$-aL(BB$-Q5ݢ*@Fr x϶5Yh:9:S.ӞhZslH:'_-ګ)x)v8=r'5VݯK %D0?\[Oq{%93Vے<$aȲL@ zxC?`5b'k0MS'ʒahgu JCl(Y]wevskm^GStL11cL9cJƔPSg%pH=V9X\TKlƲA/X6gK0`,w~..K <>ˤ%e7#U)}ae}ON9n(3'U`l0ꔣIqd%ÕHWox'=t]r{Gt1ezG~i<ʅb|`oi .cquz4.5 KDg=5\9|bA'Gi΂b,atqys=_U|뛕Y녗P\,Ou||}Iւ"ZE}F>]y 4.ɼ# S%sEGM"K QKí"9l½]j>iPSaN;ztvf;ptvv-iv١\plﳡ@I |CK -璜I/2ł]}?udձ;;/e`Sgg%}y(XgHѬ_RB|'EnZ۸C!\UuE"ku'>IwrDI•=ZPuE!\=B(T{Jz`F }^^ti3ft%Po ].35HD ៨HB'CWW.ZW$p3k9mej[[ X@T[(p-Z cZG T#DAlߎrTCcLKQN V+(홲 MS) Eq[6 Y,?*:EݽԂ*=UְHFNkg}ciϔYZh*= a+ڋP{Ij/x[jo(YV{1k/+8jɧƫXoGF9EHB*m wqG+t)tʸP ۯu8l @T  [A݊$dHb6CQ-S֤MUp{;tT͠g* C$ݹ6ITP+"(陒l$}JV*[X&ɅnR1\0b)4aT˶eح2aTIb6> =J:6:oꐽUL V@ߪѷZvoYy][E{0XGG]s0iuG\+9Kw͕26N]sšk;] ˭am(qf#]Uj"@7Zbo溹T-۱jȁiFWQDr{h8D%(&([b _Xpưdy=: H,XşpՊtWה\C.o\({?/uq亱NLs ̢u w(}2d^nP-}JG+`ȼv-gJ輫aJ( O`!U kƶxBE84UCnmgrY-#Č!.S-y-Zt0F$tӁ}pf7^Фe+~%´ \d6-y-MlILE,{y3$6.gW rjRpaq %wGSPG&+ 4K`V興 v!MJW )̤sg(K'_iRBOOA&+Κ-EbS/Et i*/E3 TJ&֓?Btc7ݏ'Nn'_+(;PvrO (y#h\%=G80q:1Url0mRZAIOTHLq6 PSLIb6>~arUrN5 3ܚ$xIO( bk0QIzwMy]<)rޭ3äsN 9ҞhFyxC!ʠƍ&w!Məl%Ђ8FӁ,k(YB0.; +rXjː7{p?::0[|A1" 鼋U*.]U`Bx/Eic4/dґ1~";$;To߃;CDQ|35g|`yr8,k/$ՌNkKVu?I? !rG翷H=Z},r#]_sr bS*Sg9~ G_g >K߁ U 1uLo y-ʷ$YS͙z-IS"ʲWqv/'~>tV%._ҪuS~-3WM>Bˆf3]oDFNLvy}#/~5|1iCvP -mFgD/~+1ɂUoƙEcϹ7:褐ۥnvDTR>PS<޴:Y=#EzA#/]"i`$J&+>JF֬uEW #84|=t5k]gHf;֏v5*]g qtGMJGLrzkֺO u;ݯg rւxni:l$ RnQ#>n|S[ .GоI]N=ez6W fPQU 7.I%x0Z̎)U"(홲ͯabq rz<޻J+!븼ݧ O@DEʠ'~>5_4jizeDAC '*,ZsxTBVZoPKXO*;7[ Ye_~qa aqɤiw49Dm Z]l(?PozO=i[DEJ:d,;Y4 :;6EC8{tA!%QˏN{٠Wq4;&k(Mp4Op4gyiO&/> Q ]}%|UEPU+UfbU)vUUV뻎=C=$w˝:wG5'(Ճn:LJps#𤟅& /w"4:d\K|mH%?&Х{bh%áG$ X*Np@;ߙ4]L^<:fv>=Bq y%˗/Ov[K:/ss-!M/{fjsAHo]L]\j/?J[lL:/eKgk%S`C;>{ T%yz%WyCZI'n_Nl$WG\|cn8xVN{\Qf wF]lTXs$#ە֬uEn7jq~ #t$ \a]4ukֺO7ecBLFѴBICIB!PJ˫[%9m `REf=Az v2 ~NJ{@a B?ܒKQ`%pQu3]+`'i2&óAbskFVʍNٌtr4e5uWUYCg uw/om5}쾲=UY& ֯/2c[<>FlV\L k(ou}N;`Fu3fB!BG!Mr=u{r [f>-i5Lh\l.I%> p yq+8'AJ8m]7s]E}7sOϯf-)?8~D 'š eO HL 1Q"@SPD)kwLcS LriLqUnUe#痮22eH~?;tud_e}dW,Օ[ump0I:Zi!ڿ4EY(1;rL7:!d@tq-td)J+=__m缄ZQbwtù6C9h`#۶8p hmK{P-P ֎E +-4WtLJxka.ݪ`MbSr ,J_/Ygμ>Œ*:RaEߓ.ɜ#07mIazEC7 l<ƓSIPtkl(6tۙ~hu8O2DHi;9t%冬>;y*Z3s6Y++j0),,Λb9򭳕$sVR`eE}T+Y{A8p-^_v^Ry\1w#?xՌlF+}.KNjϒP@M,鐻wX}zQC/}cÆ >pPB(:.E3ԤtŞW >K(F֭|X>4JE.3jSSVre$61r3Ӗv5#l늫STN@ʹ[7Y>c?mJ$]H. Fθ.Дt;uSn RlIjJ:2LpJ)Ր D>8aDr5 Z)PlJ;DT#; 5,;t(퉊(V=OsٔNt ! n(1PnӞh(3F$=/:iv]e"1M|Ҙ]0ⵇʻqT ]&ޘY:I}-5\3%^ڋcTG<[tb:e TLaw,yi̧TA `'hz И;`q~>I.`OE,i|&Xi]-wbsV.Ƥg"ixs+cHiGQde7*(pnvå6! @UtJyfklx ) Fd 2 ;%7D6<)y(bOQ3rщ9-|G!vL[Oue2DHwew4Wnt n/e媑OϚѦeI^deW$I021EBK, `JzE̶f&i jffV9)>5 [O͝1˜fc&IfHg*ڹaS!T;(ץvr Ny;U b7>K~=vmzjֺOвVtB/if$XI끮f+ jֺOˍEX$~=6Y.e@MJWy(]ϖ Iƚ1iX}~\ ʒѝ̃[Y >A?n-roX!H&b@]w+*5 R)¶՚ tLnoؔv5! @߆z>WC #0<(vZL0;viӐBg9smǤȿq9ZMTBUx vEAƅ/)7ޔ#9r2KP^ ),ytW9rX) RE9'*6 KTBQTJ~NRFX& !Xn~ڣ+(/'inGS9IՌ1 Ll@Gv2 H(!d22o V2#,1kw&;;ACBWr\h\YC2r2ׁΚĮLz0*q4_.sv9d a!;_RzCO4i'TI>֢8x*p*2WTbާ L}=V@)=U`8fX-3 wJap[on14,Mmb/70P2fo绉M8q*u0$0^0mY;krY tу#z{\5n޾=eZ}4Jyn{r}ی$gʰ#MJ\WiZ$ZUxx ܂i6J߳D`06=C &w|8)yh:"Y7$0( ૂp47ç'sYsEOX8i)Vr*#i$\q(Z7W3tH{ONvlsC%UnI;>۹O:Zoh<恶lc6s\g&oO=,y~@̕T*#ƶr56_7c.Z[G ZIBoPÙ/=7]Ћxzn}{Ԁȃ+5+֬uEW qijrĽ3!F֬uEW_w:68ljW:g:B8nn9C5TISn9qXU*F2(3DCd߫!hɮFtFa*J) ҞhA4NOGeiϕ5JFx[ymm[+cgjPړ }m{z2^2(ɲ'|Zx[mh o\-t?bpXsաP[k1!L<M;;5)l#6-j(aFP;- 6KɆ] 4hSWU~,4 M8apvHJ"F1̒AiF(X4OV79-7h7DH>%K` I*~$ B[6&J@vRuUF]hWͶ 6A$ @HQ &z41Q3yJ=*:`R3 a(!^H/П{V&J@a#i:ޏLr>5} m -KdOpԵLtZ&}d6Q ˽HǠ G75?ΥP2$5ӷmXP7g n[ {-ⴃε*UQJqy^{@,Xk"9~ZWKi]Hr_K <{[s%Ғo9,7d_ҨU}1+q~c0*7d[Hh≸oQ -֢$VR`-$Ÿ́`% cqoe:D̼saV8`es^ѪTeg[LEKw>o$Zzٙz6i UO܊^d D^XiZ琔o@]4mNUw=+y킸I0Cu}kg}^l;9l&P$%pry7P+t4l8m$fɅB=b]3P6pW"Rx߁pf+׸c)Krqt:Nf*kH*n- vZ:hR;VL(Q ?|7V7͍.J_(=`-Y-uA[untdX(sț_*EY+-p0ncx_eI%=Kй0b)K -s0I6 rid~+a Fn]L \Ža=M i*Ӭ^ ЊO1h^xt׏ :3`* ߉ڱVKƚfk V ᴵ J+ 9 yԆWQ z;F)EiǙct{F4Mu" ΁+@JV!!b x]]/-6 hL h͙AiT4)#(W*^7Y2dɬ =H"t*fdDtdtdYC2˛)^HUP,{뽑`1v#I/SCIz;ͨ.^΍<~,\E{V^g[MvQC~@Yj `34߂|In4tyVr`V ԬtW!vfn̽e+ԬtW!69H#_9 3 }X.P|~Kogjn3ӝH[~[DeQa/iK WK 20[tXb ]`G*ɮxֳo~7Xb":kͧ 5 $^2(X)HS2Tۑţjl㫫F5,#(Y,#?NLܑuH.CDR,kR-{{Fc:T`pUolZrOH FӢJ)j a`: i*G խ*jAiGX5JA<1 1>_o8AN;zqM?safSu49D!lxEʠ4`#DMƂ|KV0ZIsZ{VqgER@!@h$U!LQ`%pReQ’ōegƒ5('v_vˎdƓ;VIQ\ªi1|4O_R@+1Sړ %ʩGW+.,0(R;)o~Rs\3J6Kлfpk@9ܚZM$)o~ R@!6:J]*m?̥g9H[#۶(5Zp@,m TpOђ#Ƌ%jhk%˷M]q]<į,a_sH)7#W4-"-<Ps;s :?kvp\Qu [˞'eDžH(A^T<;VuoqL8,>Z!8KD'iSQIVHCdm8{~L(j&ڠuo-9'^^*;{`("DX)\8!"!-cEV#kZjf}3ϣc(eB{m<$ƪVmOWldZ/f|1sj :F>WݻeNМzٽ6:ݻ>dڙV3γ<*gxܮ3pENyK"񖞃+4O9⬠g]<:Kz>xm@;kֺE~^k؍w $\ު~5k]gUo ˚7-dɻQ,sHG3ɥ;Y>c?~ Rp +5v]M-v.$0@ՇF[邺 VN6ߊji\~m^6)ZۤKo}&-u?:TZ "8bFQp7"d JK4+{ML{`[j%=|ߺ~Z>#_t,{|疹O-snǓGF4(/l$>6v:T*|h2yL ϶pv"\3 @vb-f3|Xma1tӰsZi1~kp۞xIG_ZV;;+nD=T6I8?W\rgYy_Kt|s;>E)^qOTYe*/{,||Mpx /J_加=J ݃;*Sw/:aWv*6,.n}* t#Y([X}k^cIXuf+]<&SYwMg]]Evs5x0t9EC@+9NrK)88+8{Zތb4B\,m{>j ȿew  b[A8ȉ[&O#z~[ ~՛Ӂ Ӵ gwN9>sUo-a4{_U{VՈVhp%h OGi_UYA*XeL,T&/M2HdKsPđKV_ FѤ:_ noCch9G\GIӧ%|H5`S`RJ}x:RvnZ!`b DĮkot^8VZS TZFkJҞh0Q$D{Ldϑ.My;hJ/OI%Hd!yx#y`2cuMxeH~kQR _f=M T=s( Wc$[Ejhhbgzg7yg5PIiS4X[-q5JiOU4ǒ9=N)yK~,@Z-mW۬ ̪Ej ~I2錄0\k Y{JP[d$|a!i(m*q@aiO>8t_prOg8;ݼQ,`EOX,,_6 fR| v]kypo+WcȊҫ7\s43gVB;N)(Kh5sgF81ej>c_m7agvjtсfUC8p`*]s%Z=БeH%"#F< [fg» ԄKRb§zDJӧ2TD PRU&s0q9e-£Y8Ƣ5~ׅVv|cQ;%vQv[n"m-pMtŴ[fmcqvn-לT]]EǤ \6  @E7˾W[$RX+"#F;{c;=S_P nհ R~j #o=HEB ^Qр:fK+fSʑ1_Q#qF#CD5G{I?Xy4lfӟֹ~\Wz-gʕ,)TXZL/2Rw[\H\X 7x'L3W~ Zd(9Q(9Q閃{ 򒥑_q1C/oS 7 i L/g~ʅ4˅{RAj}]*Bzվ6U8HA.-*e/nLew9EɣDF1Y'=#i5zf7z&Sz&34zV8Vh3ɓ6(houqV3ЪUu_#MÛwh;o>7ﻚ7Zhsk|P"0 @r^3}Ʀ$7ɠtsH/f}]et4P3{F^ry>$8.ePvˠl'X#}ΑGƜ)EJ?ƫ vHY`Y@]Y͕-f#{*˺! " 6<Z{iwfr3E"QFؼ -`` 'nT눔Y/QPЗjyk܊n& hyF}L 7±}m)/6\Іp#%ԇ`hZn?hZn/ۖK :`b\Bܨ X4l ז2[5-8#< + ]ɸC:=.X;GSfO*qz5bS|? ^<X3\0Ӫ29-< K ~S+3XueƵo]SHP iO7$G ޒ4%Qjr ek ['2,f*Nw$TҠ6UN*`ٚbTY5sjIJ5H6U؆FcMm}}\ \ ˶24Tf%؆ ᳒i1#2l.3\0aia m6{dCx~o}8a)Y 0L8ʌ NBE6Qi_a#68Jqlґ"W۪O ˷̥a- .s+ADBb,Z-nxQz֣\şY^w kxdz/o*0H U9nzM<7xp$Y&8u:Y3"U`Ų` QcQ\?wa^SvW1y,z17>~$ D]6nh!!x }*/0LQUdwZ{my58j&$ }hOg>˓G!ղ `hՠu\G5dڨf}yQdU8Jb,L*%=pqhqyceͼQx5JTkgk5}%μ{{XPQgch͙/ьu:ظpz Dq(Bp ޝѓ(͛Lz*ց>c/v<{4 )³AX#cT@%+-Jpu:{fxNՌy3^1oSf+P`3u.&ǸF:<.&fe3^eib``:]c8Y3Y6qCLd6?AJٚ׫\6kUdjcĥf1b#q 6kð5R}U~3! SjcjcѠyE.g7i}8XhֱlLdMT~}G3>.+ G˝&S"$aZnRҐ Х"!D `-ascێCVZ0RHTJek+s% bltiIJ96B8hz,6i4aMi4Xxe4v/C%ю;Z܊>V܊GAL2V2b_V؊GIm=`pm-xZ\hpivg[wkjz_߁dT'M7 8wCy~w@q Rq(!i O-P3P]Dc,a{ e9ĀvҷRe݊aMLegq>j+]l?Z۝(Ȏ W푲P1l0C=QӋn*.\D3\T 7x<̧fmk Ӡ@=-/a^,g@˹KدDʗH3@E:3)Mi)R 4K@)Era=CSxCdG$)twKZ{m5DSޟreϣZF-]-%;aq&+:V[ -O)#^~9ðV}0W 9[oJ"mSiڷ~i,z&t^\ޥ p4nra=kYѕ)V5Z]uM)'ʀ!=mpHzۏ7sJFb[o*OE7?3Σ`'iG,n~Lq~53g՝Ga)5p# zu<e 7??q8P3|(S`͔& 9Qz ]l]`v(.f}T<ɡ2lW4C>Bxmݥ brY \f88B.cX+cW{$y*q:!vT"{t" f1br/!.cXLpB56C58zeqa܊7T,[côV؊]j[qIc+.in.2hAN"*<(Cecp9@ea ngBeglm.ZyhW_o~K!mopjo|¾'諸G nlCkyNGE[j'翥yHfdכedA8>6IrOFA ZgZ¾8<@OTIs֛D^ܬӺ w.pʄ@ޡ[VM;9fi;o/hd}>Q]~אOϟweXv:/H݊)=*v(ˇhҹ聳/B :_̆))02mR8X&Xp8&}8= õAӛF?jF?Xe"ey&&n$[xr{xF)5oT{bR) S ,d}~(`-a8Gۖ[pwib n<>"f4[3[Cl- lj+FV[+EQ[Cl%юW]i>˙tw[n5bW(PBCW6+9伩tQi&bCqROn\nNw80 ~"z_N*iXzݱ=Kf"Q`،'rÀ^u$˃J]fmw֭v{؏M.Z-ilep  M i j7TMLjG?jY~z}dӒw)hmti53gaƴZ_ݚ;XZJa1q+c O 4g7le88 8\sJhckC5 юߟmC>*R=ecp.. kkp< ʘI\qTyi%ABJ^o}U6Mk$T~j_mR(Q`($+؍ ,C RHE?0׹S3ٙZg,DY.C$.T]`XOa++kj%1YT0aVJ]rqc_'0/J)sF&܉R l.]h"ƥcR&<_V-;3s1!a"ľ).J`(Z*S୕X6V2k).P(jxH#Wke{'<("pe@ @b B*=w9i%#v;8gc٘4=]K[K-zP*d e(Vcjk-Ek(.ok&DL<ۻ({]HaJy~wNKc|qv ,npv 5~9lԯqڿiݑ.kZdxͥfg3Ӌ6/i#\H\X 7xt">\.237Ǝ raL,8DXmղ>- i!WH;NYҙLQR EޖZVtczZO`sd&5 &o0jVJ2jVIEl s>iDoxO24p2c34-ܤV'&<4G<0XdL#f |H \Y_ wz3(Bg#pB @=5yo5J2`œfz|;KDx`YP#&X2 0&H!茡l e֚+֑.DGvlIqBeH{q|(j0%i C8 ŞSёh'4xDNOu+$3k?ïf~;GL3 A,`,=(\Cl-%5&;7  S5e2̳o~o-*}0;0AzV. UU[,M0e4CG{i+$Z[Stv\[̢qaI*6p;.?V3V|h|FS\HB3Z>cłUިVmO0RF=23ԷLz*uDP&J(9Y> TVWO9{)Qz>]ui}#euC$Ɏ'Ѷ}.}l=J-핌.Vek+s%@eal m!ש!zdrـioL}4YkR8 H,\D ʁ>]fg&;iegIsB?b\Y~U.{0m GMn iC"4 -A 6dnmL<, t %wQ6wڣ$Rk%mfaj_HX*G>R)FľJX\SؿqlMc֝.Z1%@f@f``>="RJ&F7}a!8\|'X!S,RYW' (DA) .uIyVvU$OB,T6v2Ajxa-ͽK{52X ~ tiǁvҤއsxi ӜL֧0* OYh5+xҢ5zY^PHO~M6gm)o/Lfޗo.d58, vz|]~z+k>"V*lS/ T'CGOS6zȕSKL]8x)wE)q&!E)5 wQ,TLU^oҲ<}Tp2ճ}L\ꙮ#ҾS2qn@GeuDyk6:i7U~)-zvW|%z UCs0 Sݓ{s}>pP.|(S.!~ePY|PzrBHD *K EXK 1>XZEk)RTk8@;SFWYkP"Z'9lJ8 W-VEm´?VM1'Cmg? D^CE[!d@g…4˅`}sk|OjEքi'LD{lAcKIkSL3"d6#jkqw;:-)j_ ˇ3S%.ede4ߢt l::Q,:.tXծp@0AΚ)yl2Ga CGoC4ICy#UX]0|Cvב'ѵ$d=420>\*= YŒ*cل'۹ˬp|gvFThԊ+fBR3 5,ށVJs͔ xʳCL3.K3f\{@0>`сːlc)wP ' '=A62,!JhE?(pݾWK-Z+a b,;hFW}O*VJ,[+CBɄ;,bZ_ +(x*N2h~iqB/ge#_m/ЗobZZYN+Q Ms-E[dvA|h/QO~vRd^o.葰/,ScUDm-dzȅoGO(M"s'/ײݜ֐6îzB(24' W0j=Ŵ-}`6*x+Ǒ-p;Z>cMި6)5)x4>5sgInp޺{c s8|T6p(R"W^c[l5ǖcQF+ [ҕoQ]PRiwWj`Zb"HeXSVЉUX0ԃӍi4gH@0GC}W"ʑ,S n,hbT0aL=xOV]+3MZcQ6&$z0ؤ `Ɯ6 ;+1:CJfԃ_ hrLmp ;Ѽ}Ly~w_ txg307)dY~-@:M EE`a4 ީa' y%@khPK4}PdP%C][T,P`a4b98֓DV zGs'VZ!hSĉX7LHc? Jt2C#.f`  YT ,3Zu6'5@@VDN7\#LrǶ`R]-Y' (˧/vgd.6sUkCGL/zhg(+#'+`q'?|pdcQ<Ҍxu1Pjj8ՁS1pس@8޺T/u7Ժܔ6 aWSͳ%wZwO*捗YGpΥ0|i-2>Qo!L)yy>c2muD=Hn0)py i>c_.`Q @M'w`rt粸vл֧" 7g J<չSVFL3Cٹ佃Kn03ccV^4d@o5ֵ{j]o cZ_iЅjv,syN$K{1dioLWg94K;"LvImuюnLeskۨA5㤥ePe0ADˈkZF̫"6c ) `QYŚ36gMN*zh=e`ʙ j#uD/8S"~[==k]<=2wqڗaٺzġDx> gzľGc~bƥ=H_Vd6R)mP f~ңΚI/- ac7<ŦgV&:mz$ 97l J֞|#2;=+0` C*Mq $ֈތ9$Y&m~X& rIg is|ݐN׷}d4NJ"idPe%RF-d Jْ^8o4rVQ)Ѧ~ɚ3ց>cV:kMv4 RF=[a553gv\eœlw,[9L8rOV*e1iT0;i=6ߓ=xس26%JLPek$sU *yINff.ԆM s%&ӏ6cQQ_RmV쁖8*f/9ޜ6 e0+ }u >b}_?XQK@,j)n뫥8RB1@аnr$>A| )N;Mp8Ak8LY>z|)1u7ji7/{3Ӌ(p!ra/'X\q5Wy.L(9Q c4A@&C@Uas~]D\X O 2Ajp'dzqBƭX$mnE7窭_1jYw1*9(5"(9"<+Lx"Xt!+N2>'G=ӡޜ3N<- w$Yu>C_PhT[CkE8a)_G8v]A:uޯqxY;0]e]v1.ߡ=% ۢ;3j$E/)VPm."53&Ln[tvʅek&sdžFސK=7śpj~LXv?j>@X'&ɪ\we4 F!cB--Ts 81S.!C1lƄ$b%̠f9Sd(4?,AjL_sxavI8{28D%࣋a²:uۡe،vx,1{`Rvګ}9"}ci>D|O:Z}\E/XlcnL`2DW mGkE/s6ބEPL 9(7RZ*eAղܘ&0buJafC)~cp$t-eAvY{gX3}ƞ ܖ^nLcdl: j6 FXQM93>xsx:R!V_1xe`/\:-F7 "LxZ̕2Pd_T:SDpk`7nL` SA1uz8h<&|GL":}o{ѥtX <<77?0g.ޑ˙W e'"3 ̟8dObu0{j3`wfH˜w f2q9ӑ(RDdȕ-h["GMjfyw|lhYi֨e8;kIh_FܵT+(9̤5(kf#= 5oNCl/m7-Çj룛=#![wqJ]{ )S74]!թS޷qL]9:JFթSVF#l3l9Nߟo OU<-i3l 4D T5tk _,CUdш|{qrdJ0l`PlC׵{P,?A4DY(QDTH<͚P , RXGljI5uJe, @HZPXj)A%:2h: #s`l2\q^0~{<&yttP\eeS N=OK{3Ӌ6*W.Y. <w;uWLRbI z\"^Aw`)frAz5Y3CN\R #-2jپ{`)HWp:a:pg)kpk$piO8^Ts5ȼnqu:o=&t#wPO)r` ٛ:suC{*3' !׷شwdzl#C{(1n/0@2c}cpc^M͹6i lLd%e@kG79#{$ H1v{YPM8cpq NSހ*y;*gW[ D74f[{y8VB[ҏ:s[{y8VBxC؍(zЋLy~w_ txs XT\Ly%48ʢ5a F[?u~bcƘ +f~ÓGo.*ui0/+6GdI֛\L6.\D3\T0yr-`#hm Fbךxl2V{_OF=E46LڵՆ,[ grZ'J!i⡔nJ njU \&B}2չzaxPP{pe*0OKO%gJ)gOTߜX DYH( ǯxaT Z{ZZKk^4+)`b80kfc..O[PuVvKDfB "RLC !h`ؤe rѫBȕCl)h[Pg oA LqO\GW}T=#ڥ/&( 2nr)©#rKS. :|}/K @52Cr0d+ɗ4,5?974/>o?si׾{9(?ODQ }8" ;d$~0Ohx4O??}-K,<04U?jq˟ݤÃ|$P&(4Gݩ!=YhvgS;~9ӫ|?~~]~M5~~?~ASVf8B/? C>oCtO0nV86~{~*{y*%~j̷ \6=^8? Q~YAd`x`-a ]ԚP5 Jo;}=is{8up( C-MgH]WE5v"UY +>TE2CSo|SQRф7lH`Ӿ^6gMe󬅃&p%݆6M7"lp0Z3'ǭ\t`.a,yv^LP`<}TrჩB' VNlp0񖧯'.nW?4gmks?|?G/|ZG|@gfàxs >wq>7ȗW цRo^QMc25g}n 2A{Al )>۷4]p~}{֜ϿZjxgOm|woӌマm56?}~~Ҏek"ߙPRa6%m`ib_󰃝gk{ֽ1Y- y2柬cv*$ 50u fVT ?c. _}ꝧM3Kp. .&]Z硉gnn$ Fm.hJl[sHІcgF#jC%7\ endstream endobj 160 0 obj << /Filter /FlateDecode /Length 52035 >> stream xKm;6Z|ʖ%@<薁[/Aߌu25FVq"x_k>z*Rj+1ʕc_oKz_5kyw?˿IH{^|FꏒRjcsp׿U4yOE{c1ǣeo/Lm ,T|߽eIE!\Gȥ-ȕuZVcVҖcĸPZ1c d\ ҆liJ"Ac_rpqѯ- Ҏy߁OtǍs}}㖲Õc`J;EIgh9WbQ&2~;}GTS|[Q [k:F/?Xw*tktѩ={o"=Yk/{Z]V~c]_wƜoH0ZƠҷ:cM=la4 '_߿*iEi 368Jph<=km!45I8qDXf]I.soi._J{J|a-3Ӽ\c Lx&4Ihڕw z<q8`-)5x2- 'ha^}Awh'!k#;o Gഌ{x6Va i q,M‰әx\Ko`k#U$&"4U"1&݊dEB#XYW׿i=ZE~;Ѫ('? 8V Fį {;ݷro@`6Ep59FH@j1ɏϑk7Dlt JmaxaR  +fZ[Zd9^5H[K=8R k NZ 9;%LߧC_YR%puv븟xfK'4U1,=-B#XYuJ:ث1u%N=ӭuW?[XO_XV/C%Q͞_yocoE͊F^{=<ȶLoMx¹&|n ~ѻ} ֚oUZ[+ji,(;ﲋ /(+,T1:=MqJ=V{`]~)\ZnY 8{ofUJFY {tLiޅ{7.-ё{KL჏};v-`?;+4Zn4AC_]$Lٔ+i5_S4)7 V|(hrCP,&_O')'])r.R9:#VXG>z]VurA :iG+N++OܐK-sZ6IU*þc--򩇵d )6z$'haN+ې+MQAcM(?!dkc 3|7L^Jۮ\.вG$;EUߑb+F]#e׾GޚAo}ր;8l*w$鳍ЯIqVI)8 0~J_WLr“_;V/agXug>ZڤiF[4+CJ[t? {-!Ij}cis>GpC@z ^7^䘚</ ӐFsT7OCVxCV} ^ ?FH0);:=ŗ^-,6[5oN-tZ{ͧi|u.m')[o.[8h;pF|;rC8OzymF͎IH|mx)F`ϲc.=a> -cVΊq]&)x&K %C$zKwͷ|[h^:6 Y 1t+~]8>b ,C,g;> -ز 0 0RFQ|ox}q#p~?+>RD̄F1o$QxkDY=K \tIY},r( 0Ip5YŔIMR%(^DŸ̄FT2[ySmGQb̷V[l'އ(~;Vuc-a1NNH !4Y$8lk)."WHԔΡ\2OW} ~ fiN/[S6ok6oyFW9U Nh{UL6Ipx'm/Vg|} #c7>oN]ub Mvn pZcw7vi &B>GpC񤥿|.s C/^b^9ܲW7>LNs›po 0 0zT&mHhc4IC&?ޱ`R&,dFapL8$ X3ɿ)0hYlKa0FՠtvzXNkg|O܌%@h3-w;&.c(Op,M m{6[=pN%8-c[vO0v#0n37/w9}C?])v󉀭|񵟑޲WC6:væf?bn^O/Jc8BzӋd<p^ho6KpxٝGyx(LhGk+Upe Ūesi"t9I)&Am~YO$ɳhM>c QeMNYp#+QPS-94r#coxāx3x[{J;#F:c+HOE'&z/l"/_$|_Y x~@߀_%8^ݢ+(甼e?m.5j]Π&iS[cuFH9(QP9):hR}=pݧJ?MƟrFGPZpHY0ZGPiPm1`3&L"Z-Sa4Z&Zqߎ!xBC 1H ~1 /`G x8H8=dFYo'mvrI(- 5PeHf6ApBx:o%5lB0ot] #10o?F$8^ȉE *]r#.ىQ8ʺjq(.)p{fݤ5 |5zmԄFa`%r3}>$:gwwIW}3W^DUp̐qU8Mnjqۅ;!+#g{#Hfnʬo{* S*X}w&T 9z#3X# P %|21"^8#(• ݃@ k84%-c@&{#%V!Mh@3I8q<WzJO '@18,$-c`acWkH|R3ƨz3fa%Ne49xr9X'&"-k^Au¯C+H4`4px>/K~'s?qZ':v񪡟kHhiКu W:l]gPK8-CK3y6Fl]9g -5o lxep" 7xWxE kiдa)MSMS6ͻ羏@wY 'BsaϹrmojlߔ:/kZ8Ͷ6q &(qmk+a,N4|swhpp)Ph JOC9ܩ!ܫq4.v~!]"'"DQ<8#D|NB8I((oLJxl_)5(ah(q*H̼^i)xKWPJx:mw5fq–&d~Ʃs:\xa3S lK"ٺ83SG9Q Nh5~hJD#O)j'm}I.),x(4*GY}FqC䨿UL$lD=An\V1pYj8sKUq+-"oc-Ȥ-D hϵՖ޿V31M \hzh=X\Zfr#\Wi n>+[=8%omk|UdWO|N^iuk(C?-A/gjȾy<ͭ=Y"|8UZI~1Yn!Y!3hE||9mPpA2:'$,[l"D>a$8>-x`mX fN3Z0sZHpqʟ^Y ]ހU%(>ǕNjRk~gĂppʏV!,28,z?Z \kq ph:y`t pRqdt͜XB3[oOw ,G5g+r[c#"t޲/5' Q }oC? I8q|Z_`'vvI= `#;us5 .`9 'iZI\x=FƳs 0RF֖/mI tl>}U ħq,LzX hDM)#ҞěYw H/Hixf/Su75~ѱc!bKmtNNFtl)MR%(>d7 HÁ5 G4i@4 U:MR%(^+,[VZ͞Abit>,, זC8)&OV5#ul0?)ϭhOlQ\҇pn9'u?̌ձiIRDk9(KjR$@;<N5ɀ Al/URߧXqΡQ.i#;QB;]B˘SI8q<{Уr~ҷ_8HYk|.yW&|p*b85մ l;b=%$h8-CDXKIFĐ>GpCdD~dM02[kBxgPh&iV~x F U[Ԍ&F jٚyKgh.qVGpC?ޒXI6"=lۧ`nh׿&Dnx+Wpf/`dd P7roG8b6|"W^v_H./` O % rA>ᥤh)r^r9Đ,aOlrlpOsŝ8}`}jJ|pp*iG#m#N/47Jx}jѬ3\*vY7bp|[9|g[%vb8p.s7]h68_R18o/~U&+W _x.wÑǫz8m|*:Qhp*&yK0q,sMΪi7jePSp}twf[ʄ㛼PA9|*Ez|] .7ޯ\ ?%i~e5X{ףj_/4^#Kjj?өvj39\S2˹!J6dkg([kP6@#OxKg()E򼅲Oz䟻̵B׹j46" !gChl Q{_Ui7z炶pKt,Bi0]9(ֆ%auh{t5aST 7;t֙nb@bFp}^6tuE^ $4{ݍvq{YH?%xl$iF) lΡi.% e(C0k:2n*4O9 my^Ѳgg<[L=-nK@qMcr8rpBx}3;C>}Qs%c+܋عs?d{v9a(kQعع~x:doϔaW|PuJs/4ˋeܹxGJ}??Œ8dܒrKV9/4ovF;ӎogZpCV%(7{?S2,-XVo7+~b {a7ㆽݰWȖU y&hd2g м'#Ax4d0bd22d2(fŸp(7<zo߂Y };u"&Λ}ys .7y# f&+&7`7ECyI3xyCy$Ǡ |7ƞ(a3 N2`rhMR%(^ ڣ vDG|BZ3D) QkΡ1i.ht%Ї|BԚ6Kpx>h=8&>b:=@1{1{*c##xHI1&zxzWV94Th4{&2a^R ߄i> _pW%87uWj\W\WJ jؚj\WJk>rV!kYsxKZGat,BO8-CY dEC6csT7/twNyswfÕQOFpoNUf coN==OXQpE7Gܛ{sUpor7G8Ub P9xsMͩjV7J{_VS=9yȅPoL9}s)#O;5qD)ʾqU +sF1Xy+<`-9qk hs>xμϙ?ϙ9CxfIɸQG4Y},-;4c q]kx"Npv^4`U͈qz%R> =pCOԚ/\;>,=;Aj9j1?|`deBfXn S NO-y[]pKiLgO$plsIxv+N;T'v>)#v0Uj;?g܎uݚatQ-׸ p.maոM㻷l`QA\%/v\Lp·I[K W\)_&t>1K[u6(J%ekrH=erpQq\m9֍<Z:&}K[ӂ;@rBX4ѫ9ѯ׊)rN7g^#4ыs:?ewʤBY?j*\y_+a4gtbJaib\KjeKX-UN7d7ŵe+URQTU3 FgKY-;O`!4Y(R%p_'(H;4b"B#Xp*h#Glւ4V8HD N1%q ZkysHf0}_e-2W\X1N67z4z|Ѳu != Ywe@8S sg{n1~i/f { 1V}oE'Ѥ=ȆAI>:HPZǠ"et]o+Pz,mN^W`rvRl#6It`,tXCV9J8q||uk%`HIR3χ"LZ83FWE"&/\Cf6Kpx*Ӟh9Z:IĩU}шqxQhhp%X9/|5Ĺ-߳'vj)FL^ ~JZ( [%PzlN _Pu#Z*q^ !5FumSX+yc5iV 8QHZ2&DKۄi헭&:%f6qrS!ohYݹQʔ@&ZP(r@,-˵Ҷ8 +*Y ۶Ͳs-dPw'.q!\Zn—<Ք~}%` gtǹjֆS2n ǿKb &|0)4zG˻_0QU Ag^TXؤKZ֙&.-ニM^RQݹ ^}$uY?iw/Z5]> CXI/A}4 oZ#E[( %Z5U*qșO{.`<9]?Z(2)Cû;0*&|ȱR.)[C8l&Et͎h [_7;r8u#s}8 W>G8J8q< ?+C SniJajI0Z5AtM] cjxp,M‰wuK?_Bgj9K0Z@t^ɡm_ho$8 t??-6$S\Ak߸NJZf"=:EF pZҲ?o6}Na$i2g]\pC{*cbo$wk Yy+ozT }ـ۝pi ':orpL7Ѧ Mڸ2p 1$;&A^1ca4 'D">"\gY_4Sܮ_RF|K*᫄Ǔwy XSoدJۻgUFi6hM!e|C<uSMVY鵡 M=dp-ཨL>QmN4Z@ z)=Ey{b}ba4 'gEhM޲-۠ 0_I0,K*r OCh(GlD Wѭk/~7M5[sBst?EE dzs?~.2x/ȢlP(ΡntHhPU9aQQ5NAi~m$CKm-u.'IC`EweNTֶhSyP/c s,M‰-fٌEMG$e4;s F[hɝ 7Qc4-fa6Kpx N! ^>^=^pJpVHp BLN!pJ8q< ٨f UCKr8xpx`5@-K <]$:a dR\#Ċ,x\yvr%N{~՛G7PyvGh7Py+U\EKU A+.9 n=UԪ\`*_upx.H>:[7wu^ KςKq>,.} ;pL*oɿnN8!~]FO@F=0 \w+pݬyZB6ēfw |xkf8 0Z@뱄:f`(^T.{ C]RN[u&iƝpc"iG^qAbud/uMzUß=sp88>E Nj{گCM O[5IwIo]e-HC6=aS6 'g2Yy ߂3qzf53H.7 F~A[`AV׃U‰i# 0t #T ( G!2L9J8qzn>' Sa(N=NKH7[I+@ef_apI8!<}\ n,saOC*Cur4+tt@q8XpxY~*^W.JШbZpZ|);`SC*D'hh00!8> Mr^`r/0oA̓.U7U‰ňREKG<;2"Tp*a羙KvJB,1U@! NN+zz D<v޽}!1P|* ISo.Ŭ>иnӭ|_cEKb3'$\H%fiNO{+OnɆ& Nqg0$YB#|p*:p_"'n(Tgkz.ta I0Z@ @ $MѺ ,rB+e@,`# 툷G*9$ P3Cg3M\_9JpC>|"_9J8ql&E{E&4AFJ0VF"q 1BdnVlWM@>V?k/pO%8^t1mL@WxC˸S!4zC5eL{V  d 6GpxԚd7KsW}p->$Ul #=8IH]pu1q6?2x 8 8{C2:U7ݕ-VO5Ϻ;ݻ ԖY-BB CRO< (E=bQB`lQimM.4@}~mJՇL< Z`K-0wbU ;@FP7U ) $q|WpU7gorXsaO?r8xpxMO-bOrovZ64 c0и`6Kpx:P .`$\<)QzBc$qxJ0ZHБP} a9l&`ope= pJpGCt*3}@UR '}@iKkp]N p'JH8 "B T^ORN/I [Kܙ+U\E⸠+;T~mkCYGҟǑ8<+J8qpx@+gdo$<ߚ$ iȑEr$f(Hȑ,NO;qt a#B6Rl~b>8%惣_׹n8^Vn*mSGt q7bqx9^|܁z*J8q<85MZ,Nr8*A!hgŔ{,k{=jW,$8#-*l9Lr0~aLaิՏY!y?[;(I-&/76Ή( bBG6|$%Nu/Y*K} 1آ+)PbB^qQG[>&?g9PNE?+'\рXQD.9dkhcHՌ6 猏1FHE O+ Jy:GJ'}GtU^2:£C2@G5cR%؈lFk7j֣!FWa%CB#LPh`[UBq.e OWd [fdknoldt=4uzj}N R rD`6opf _+ `Ur`i98"v `U‰ZO[r`& LFFvp6uH0:Yn+oن323{a4 'O mB΀,gYBlhV%8Ֆ lm[o`` J8q~LI-MsbVS=dc:8d:-C>5Hq yI䍍0 8Q|ZYQiTp:SJ6 qZ ؟ߕa18AZ>+e* ?1I>(BB!UZB,e)/iQ?rO LQa݅vun?)mcZIe尨~;iaUٮ זC8=z_[z݂V_Kjk9^+-T'U'd]vҍCh!Z?)*ߍ( /9=گKZL)ry;F?*ߎŀX (Z>*5US\ʶh'Uj]b@[WRˊE'U,]?D<YO 4ˎzsb@IU-貇&![~RUhk]@X5乷j@IU}m(V棣kIQFu0TڟlZՀᓪB,仜q3>)tVEѨ4[F$8 1Q@0%mhKY A %lq8P.ȗ`4eD}I }&hN/ ? fM;s\FXe PGzpQIлw>!<*uuCBy,G<B r#:4`7=+W '|!o^P,+9nd9 !8%!dw x2{x%[`W/EE~^MY !O I:Pxr`إZdF%s!!h#"}!!tD$8<>J7W>_'~_~'|(d 'T8^ ڪk?C11~U*Y#tDĄK?C跊HS iH17+Y$8 Ĉ Wپց-"bh %X"E >\_ho6opx1vpDDhfP㧪0z~E0g0eȡ8R%p<>e9>9'y9dB_%8,4T Q4/_$pCg<~̛REqxw`\HH c$ )>?~CdmI( _L !>$J+|  q&m/G&aQ~pk,R"\,SpXL]sDK.ɻChp8$8!]GsxHi8Jp>|9kAϚB;+W 'A(m0Gq i0iL`46I`mceBMdnaHrQr(>[*V%8V; 6dw I2,-@pxV[2 )6O񴦫/* .P 0ZM_M%^c(>g9œ'HAgwzҳ=\wQV9-{-46 8@] n-Rg UR"rJ޻9@誀NIT;ۏ>[zp1 93a4ՀYv +*{Y 0jPضuDΌ<akn7x=!;v|=_'+ iY0D~9$O4'GI9}4Oi3oH?%w8.)Β%YBonګe ~i_f~9l&O[c/yHr03af&7~u=,0hnOH'_:*^2.RpoI` ? uAsC%;NW_׫qNG$5kt|vCg NJ>K.ISvr,uC9N.v 8Z:B_ -iVi/Yj_957@)6O>lC/I]8yW!ǹc'=˚-"2]9Jp51'Mn%'b^9J8qj 0lBa(iݭIfOyVOyF6t>p[&oYΤʾ@^ջЕdOr^l7Zh6V =%'Bԕga q[9THxzß臜]:_-&яz;7sCp tx?dƥBCanZxX(>ƀVs`Jq?4lQ6WU‰i__R?9ZB ,'bjSً\Nqj8~UI8qlP!rb 0R[S^`(i&˳EԬW>?cq@ӬowKOӄ )V:uk@%@tXOwE>_?mK!]ROK,aRS`0htȰ-`}M4,sAUpxHRݑ;\hDn$Jr E :?3~A׭hpG#NG;">qG9cp*~8k6(Q 'g  ##YP@q8`t"N NΘq,M‰P@ Gt{^KHs u3NOIr I@5X8c)qu"4;<'$v SN 2r;k*T87q\-HVνѸn1p rb[h6#ulfNO{R31_r8|q] 9p߲~FC%MM-R8*Q r9Tu FSZp&hJ6Kpxxb4VT50HOOfB_f*Sb͌pS%c݀,Hoh65k9n馩 N1E~q!.}9ctM?mS:k^C_%8|uVSAu OHC>.:r3qsChwe,J3Ai? ~*ZҒW:rb o{-r1O8t6 8OP?~ˡTnojx~/ cߌ"L2ƾ!4ƾP 'CA4GJgX K1~݀$UoZ<8#K8ql5 Jz3CSy>܆1T[A. V & ʜ`2,: RT‰i#p3u4 .S9e ?Dw6J#|4h@5oP#]Go{JuH\ <Ӹ|!h36[/ `s4']BsrwSn.KruKl2ww[^) 뼏 u_ۯ1MZ&:hN[X?kuwZpJɟGUJm1J@87OR)*$Q5K#Q|Eiwe|R>$$vsiYfv_ihጆ+д0Gy.le;J|<"` 1zlaY[V@; $η.K[[*#w$='W7O8`>! @rc &-5@5UJje 0)A]\`@i0fiNO,id nuY9cU+`&WrO%8^uQ5\Ap+Q |Z Yܼ` `rIL9J8qlP O>(WD4dۑja 7 @>dA$^qzAtJi ptxC4*}B t 'ob٨^g ºB24 F % DT¾'D*`T 'ͧO^h~DWD?wq_'JQU_qDmXv e`JIfe))ڃ A4 8QބK$߷X2B4>TzVQW- zP}RG%*mQCVEhiTIJ p?" o!4On?[ɡ7ݪw3[[9,T'/_6,'Qz(_8(I~ic)Š0k^5P\̟rEu?׿ǿ:΀LJ=V}L2vm^ $?O LL3E0 ħP0yC0m[>[U'^?7?e |D_c7wA.@T \EVieL"WgI+e׸:+Zӭ1=y3RiH:i _<]T]ۜ(z_H~'~ q4.m6¹.i$skߑژHW(G߸ 0)bƤ 4AJ,x([ltN~_\|z *}Ҷ=nV6/Ը hlu/U dvM~lʻy  a\ۦ w꿠O4gw-owjLכ{hZ˱T2W\\vs-'"Z7G9K{4+qRPl='@'Ɍ.QK&H)0gNͽNrz`e۫|:BG|4+o]p3Tw6 U^}}ng |ы_^wA 9:g :',DStW8SqTd*y袥rFPx=zYNR ޭ%<ΧӼLͱ!22LBhC%8-C eB¾;b q$U o"Ns7k=vF5) (A&oZNǐcPA'0ZFr-u ŬCi!p? {M8׈7Pj2|J%38D0ZF|L> Q:Bq F oe)$NcM0I8q||uco~-nt;k'3^a8 -kzvヅ|-lHd;T|~cZ9Z䅂&sUhyfV(Ԏk oa߁~Xjlydk)w&(~io^4eL9߾_|ڨp2R_bZ˻ ң+!d-ͪ;>ÜLR/9䒹ՄwhD!mqIh uv}x-y$ M[UMʾ9<[_!Z>+k6?\:0qOI-}SSE)㨩JC` AaS0FuPb N9l*Dzѫ-}oy0. 03&`T2p|0Ј8l*㙮̹ͮaA)_kq>*NiPʝCun`t֌ђ + Zs,M‰w[[#i$֔__R/0$= Ki6ŎI폮XvWuy{hRɥ y'`tի-d?@|a&Y96E~!}ٱ'b}6Gjr~S;"s F&w$c)pI8q< -k؛-QgwL-W6Wdr8\`y "{HccfiNϾ̑@#B6q˔%tVI}RԁcK0ZƠ{.M֑} 9t.w!'sBmRTJĦTL{)yKѿqiBژqM ׀tް94C1@{+ ~p/Jpzjh=-:Ip+]_.g J%}#4#]|a5IpC% Gr/L }8(V;gù{8uǑs[-#㈢(K c#g)ZW|E4n\~ˇ윋9Ե~W8 0@؅S1Z `#gzнosn-"KCKckp,M$lY넽?Ulڨ[KhtTNNI_c5\Ly;LUoV눖dFϤ\&%QP%-cC(j19l&G5aқKIt9̣f1F6F8`&ᦿ{Ќ>!40JX8c\->^r 9up6,@>a`\DF ]_lx -حﺾ1>40JX8nωg @gr!X2Dp,l}P@d_KZh+D\Hh`nx:d K]#ᚅXh$5NtM$GG@xࣸB-{5&[r,p-F m@B4]_9y2䜴n40.'?9!>eUBkP5$p',@FelVW}LQh`Pqw W cd;CKAat:[pI:<!1r e&51t H)--0 va `2G\ݰulh9|:/@ Tw#40j$tUzk$T@w+Bao94K 'd85+]8vZ7€}0 7FB-S0lWiL`40JX854?dC302 ؍a@=\}0Fq0Ɍt S7ܛΖCyI ;y&)d&FB;vD&EӀ02̈́C4/(:3oa[~ D~`K2m%Ћ.gK94082G:b.ipNgrpyB{eAXtga2{( yjPp]\崛l=s y:-\f?9݀8 C#OYͫ$~4"40>X2eM6WF]\q>WMS5y w|բe afa2⠁QBqxfJEy)ʴLCݹb4`4d%,TOYcGZk.xhwX;+[6.!Ci]c~tp7(S*URf@l2!oj~yׅ>/axnxts-3aG3c<elA44T4";7Bۘ]u}pz Aw B?3"_Jp?c*ӯpPMf6k= ,AN>8'C73P u%wuMa_*9@̇qDT@S>S3iK)kh*@J-UA,&2J2qBéna@d8aTH\@ِ\@  p`!;sL4/ )As ݀ROvtAk -3 `sA=cAt *LArmd܉2QBÉnaA/?F2!Bp1D>d kBڠ<4[<\97cͣ 50nX8ňn* Ng:c[BSjr̼dA#BtnQcRϋѷooboeϟQn+ń\Ije\ʨkrP!< IqDbrSf$ m̻ZjX {]iˑ}L(aM}6}2c_Р@+^ٰ.'̡/h`PPx{VC 'nxQBq)NJp()BKN!rSp S84wB:NHPF8nxe5ܗc5|2\.c=4[3fbA|< {&\w , v `i?9u!KjBy_p/9m$qF&^IDcs~e( %,TOmK#明Fހ̇r̐Vwx4#[2u`E^qfHsȈf^pO[=!̆C.Lʆe6W=fч [x{YLDA a )arq$䡛s{XDdXܵ *쫤u-^c8:qY}z'Xl)Y K sZ$Z6;}+Y ~v/i3i&2Y\%kZk_,OQ2K{/y Ūo\Rj(VŪ^-\ԦMӐ-%U{EZ)eR%RZֲ֮-W) U2e%kZв7_-kZrhIbN;|q(YKZZ-"[9q*RB*RTUR W^4P:Nz|'{[e;JH'[3zKպ|3>e^,o gX#5,h\>2۹kb$Xh%ЇH *Q7K-&䨛՞Ȋ-i1TXl}0Xq672F dzzTsw"ŝB s뎬"U;%)Im Ŵ>娚V5"KZԕ.3qxadzTJٳ-^2 -oT;O}L]i_NXjj|(Q9*=+FyCqN c-FդBYm[K5>q{p^1Ij x(tڞNsة/[iVQ[K57N @|Zj~Y"*R}Y[Dl|-/~P4OQGwHms2RA9Fܐ8ceCI=y.s'PFjvezT}LZ*[>G5]2Rsz[0ʖQu5UQiQQe'&<$՗ ءT|PjZ=1+ʖ'O(֮IũA g-4<B>2kL8 Fj-#sux~o]#ᛅHw_=[Swo^pnAV&[4'#xn!kX]w &ǗWg]WNƬk[Pg~z3c0 输 >J7!rXLp41=9dE%U ̑1hD$ Dl$ٲzAl%*lLJ&Gq"XJb ,!KG$9C,6! FXBp3r'x\X؛/=]@/K^Et;BxQXgd@#0;@&Qê7E@¸FBqCOiY n B  ?#nsJц9G5#uZ^ɋ4""-THA[J9Z *UÆrЍ d k' s7C#nxbN1漙04yjK290XH L ᨥ}I}ޅə!50HX((nh17 #3C`譊>w nؤ50JX8hi\.lh / HRʽ$o GerCc{܋ǽ5cwVߵFr|>4[݊|}k\ Bt `D.bYg7O8&+pae|0HEDr;GWwqyh9/WCc94 [RVy %,Tw5)e޹ZGҩI)  9`!K)/-,ZD^T$G -l:x|x~oUx5sn}y4 $ggVMR1 S<$4< [ Yjn>LxHh`Pq I~_+7yS#o%<}y/yBC\feXre8'˜v]&)4 Lk-D%qE}]&)40JX8z,0U7x;/B3NNaXF F}<e5Z I;m?Xp IjP9D4#1]Ivǭ V^'AxgH [,UxPfPfS&&k`Pq< N<qc麃5ۮn!d"ui~Xhȹ<:0PA܍ ,C$U7't tHf!;{ߑ]=Ix8*,SW煢V!{-190 *QKrYy]( C߰4Âppa!„'|%s) t C`?+Tf6$a] %uiJdUkBph^ taW7~};LiPD~t|s@n2#F 8_[9qݲ^t{o2Hf!dO±Z>I7I*ۄ@!Uiʼ.G7e5khٕY!3zh`nc'`^n֑i!s-`k},X>mYg9v+YN9  m'`}ݘ1Ktz\#&ucQ|1 t^7fF@ucVGLѲ5 촾/k.G.4 [đOͪz:SdȅF ]$\ 1R쉾)Ç<㣶h $*гW sh`hPIY &s99K9d&A7X]07nh] b=wMk`PPfn22q0,G+7;=~\=&z-TO}NKP-Z}R]ph<7,@n+[˼ͦ},]+G *ێ^RT] y9zFZ(EU n40HX((.bi<"=)r9hB =(WB+b( 7WF~^^(]{1%p_"xWkBx.B¼ov 7CJjdm5q&N]L$Z~whƉuM4Ǘdyؘ 8E{;>( vh80z ChbyMYx(q|xbSo\Mj&̓hq.-.4@⚔MZ\HĻx1B?BM<s!/B.Lc ADع!-H;ax9O#{3a2/@LvH~ ᮧùiW'b)cˌ-@>}?m>ࡁQB43Z㈇oBm[LxFX;dvP҉{t2;BCBp7ԳnH}-B8$*CLCÑa_WQ0?;\f(ax$cd.n*[ 7ˋE+O rrRØni\8Y/[-1:raЈqnK;b&5F4k:1 ]&&`h8/,@.xY4`e $,y#6emq\f"4@|NvMHКF mؽGH‰s\-5WW#ā<,{glNquO}5u1=/\]W-{a|ӂd1\,H!_stuBquKL:MC%?dB#~need;dBiG2ȇ))HtB>7\|HY?|$8h^p''jRh2wwr).c&YJ+40J)𼚧`KF8Ig$Ȝdp.zDCn8Gp..40jX(2~Y&lY/yoS }h,@FK|a-y>Lf(axGrGzR|nWL&(4y8 ABㆅUnh`PQuVKĿMS'OJJe&A?Xl}U֬e x= ~h`Pq<&]^#Z#{^ϖT  Bp./-O'1Lypj`nmߩ^;.-q:d\`w1G g&3|h`ߝY9u:q0䕕h\= ޼*UP}D)qgRҕ+!)n-ɞ"TI_9 alaHE:Ҳ&dKVRz.VίW⾤ ?(B.Pi`0L09j>̧uœ=z.񩕌5,_鏕\겅]֪T[ ?y-JZRHꆖT봜ǩe-EKL5[hyK:O6yP%עe-UT-- JPtt [Ve-Uk'2roZjԲ%OB-%fo2%OI-Rlu꣥5ΐ%V>oRΛ3"c/75)v+-EK ds-ڛgZ!)_a-nWxz755m,\9h)iȳR.-Y&r=05'NZZZ:=bpy)jGQo76ֺJ<*qѦ8JQo'ތ: XAR=p?.fkp*s+|yY4@4s}ƢUhJ^?{JN<ٗ#ҵ[Z'a߼YQ&~md.pq8 g8G2 g ϏJ%$9}OZ0S d.aqjvԸ{և}IRzX,Qq I,6ɖIS&Ga2PBZ,Ge>ʰPqaY/2ٝ#Uɇ s7KQB6Ӣ}KM+]dd9C:/j(t/W- 7@WʡBP``e?:,40JX8޿禑UY 8ZUs7SRV @Z6)Y:iHN Oݓ-s#sX>s(axBis0!4 ;v]L`Cá2„X lh`PqܫKj](O?ZŠż!ȵۢoٲv7ux˩ƽ(|PI+7l8YWvץ+Zv])a4n-+ʊHsQa!7ɩ7i-ᭅ*u`7=ߩeq.b[#i l^=(JF~F "K#_u絿s߄yh4n-,/.~K%)[9jց3jmwjSxqZ/ gy6}|uovHw.7Тohyera(Zxk2za5SbvCqX0$.hyK"c{we:WWt`Ɗ񋴃w=eȤ.=Ρնk]_rݲM mǂU˽B{zkr>t6,߬bXټWY[zN;hS*{Fë-9$.wcL.Ϗjѥ|im,@^^8[֦^HX6>L#0 h`Pq`2[-긋aZ0k,@>?4|aUXiri 9qv&S{ l8f%ը in(~ö?}Ѳ3&%DΘ4cA/Wi)3hh`pPcET݆>Zv3VXO]+CCnJnGև-[KĔ5bnǏ^XE>=_\>/:MCs Oϐ-GJC"f&x=臿G*dՙK[/_Zul]i˲\)5vҸv}Zp#r6s ]e9G7,T?HÄZSC?4[l}0緡7C]M7BoͶ".fK;oN/KF8fXѲ\ʣҸr u?xCfh22A  [8-$M&QBq¹Z*6O.b"K1Dȡ3pRLd@_dfT_ˮX)xI܃sjffdcԒ9 h`Pq}D+ehg5EKT#E~x|\ i%E]"P[g 5aad7s ̲̘8"-m{  uwg~H[Zd#CSed (0%Cd@Gg~q$ti0Dt!f2ÇFs / Н7[ʎ>Lf(ax)z4{>e2@NNXl}4? R6;FGWw>2΂|~NF*dF@3a}0дښia2F m0B ZYV lW**&sYX>8t#N30C7(aa0̌o%J7Vגwcd&3zhZXǷ>|dӀpua2 [0/#"z`^,6`A%ͥ%]]D-Ŕp9#(`euqxE,m \?ki\aᐭ.49YbM&ࡁaLwf]H~{adoL=|i[ q@M *a(dC39k, EA7d&3|h`Pq #-Z3X&h%,TwWvl{r`ݿ⇉;8r2_JKץ _%h`PAxaIVh8+BzwGB;,` =3%,T7@ZEv.r`*2vhfKyi| `a C?ڳ(̖n:N;.C&l85l [9 4mCXmApj(Bqo? C+$S̄µ \Xl}| 7w-G&(ax uXl@7ҷ5c |p',@^=SaoX"L>Qh`Pqu koW]uW l}m!G]U#໅Kh5Da;"nF>x;DRO2DABAq`Lĸp[XpyG֓Wg97ywxF-0w$#Нݞt``zNSbDutMfBk$#遰)1A ֐)& Iɢ9۷eÅHPqܥSfC pES$|>4[l}0m;e *c8M%5ZN=xo2^"ٲNZؔ}`h`Pqfn^&[,3W%$dC?Xy9lZo&-93cz59 h`Pq<]$5[,Cssh23a/ 29áQBqw-@X8t=;>-\ d-% h0C!3ܣL; ,`oWKYc(Q1]Dsp&,@> k%j؇n>L&'40JX8~d(|HFp.3|h^y >+ok 2* -rruPLAGX,[J@uѨ)1B?B9N ۾[2C`ifes~^LCG*@rFċv?T V7 l'n4"ʆ0N+KA@5ͧ_>,#tNBh8ư (,H ]%,TwoZqݤ LEtN7Ft SOuM-{p? ':]׀ w<"mEdvQBq٫ޯ$bWo`h&3|h,h}m^Õ7~ "H;GWe OI=Z$IM$Rd׈$n!$`!e$ɚn$w"IkDPqܦ^ Ðg7|) Q>4k~/>O)(WKЈnx[kvń?KxxFdp c7do¦ͲyJ Թv{l)WIjBQ5 [ &awh .T]G #-_Y]yqh7,@>h^<6K,^йL(a !LtWДeF `.y7r<\f a[* x95el\,*ģ4cJOo ǐ IuzPgBFg5kf6*Oo;:JW~\='rӿ[kǻ%Ϟ2SüYHwn=3 G2 ;$,_ (`SN4 إ1pf?9v]v$KZA}?M5? ϣp)e+7!BphE$w+kSADu֦W=h$pZ-Ťƅ!$uӾ)qra@4U(88=x@W'%Rn!O)X%dRpk`"ӎ>JեL8=KºqF SJaeRbH.AJ"X\|c++SuٙU]G=sɄ34[T]B.{#BT]O+ymށ԰EZl}d|Km|I Ų3đ%,TV{4/Z6q5aHw!yƹK6:0qB YQ^FF5]cojZy%hƇ޹Q|Ey wVp6cY(o(%ް|w)LG+lfx,9?Qs6yraR-8bH gxk]xiײɖ튪KK_Qh)\.Cװ?I5-釭6Ulאǧ5êlZ|)޺KwkRbZ^//ܺRR] ,(%-Ԥsbh%4OjRuh嬒)I!+I\fJRAIy:w+z_j^OJRup-.f/%-֧WCׇ7 IQ餈Dxr%GY]l|~RvȻ!_.&Ad|!!PvUUݘgޣVw |},1W4p99ZM}۷2Hpk {>:'֩i ,Twη{i[Ӏ^BA.]#vJ>Α!sTd;FGWL+-^Pw6tI$C R)G&\%^wXreчəs!4bn|]Qw6PLJ,[doqG&3|׈QO3Q-0pGd?1C-bV2.-QH;+iry0tY;J:Ȍ- h.['خ]!l~3/cdt̎ì3|3IJ SGt%aЏŖ1 ;, (B&LtyWM 6X]&IWoT0A&𡁠,]e̓N񏱉9 7`!!Ge^vaAj_ԇ x&T+`nc8*qY y,Մޢhs 2au#2KBÃ!q8dOᒱB?q*"X?bl~h<39s_hpLf@GL ##L`SJُ#uHʦPFR͊)!{򛌤@qX<,O6>N6&Eݿ'9h`PP//Ѳh4N؛v${ Rb5`6=`_GJ{ y\>a-˷Rti%D--<84  k{>EpIu axȳSyvq>2̀CavA`yK.D{'4  -QtVM|z! Y`Fw G ׹R׹Rbznr;Bxz< T7h D}ۋ$k%0o 2[Wg<1cۨxFl_᷃[J?1| 1o_-o(i!3  IÒ(7*, IN~FPBqgW}/n!p{ }@ kKMO|==+.?]>u kΒQ`zKn `]ΖSI;4}Z$~1/n3]՘'b!_dF@n)I3Q'M7E4bns߆;[1Vss eć-og n5\#nnM+kcC@# +j>%N Sw@z/Y7 fšްi[!wЏn}6ѲnV:q j,9VcFqRwr3/]#Pq<9'&ђ3+!4 `V׽.0KvNLs mG&KWu 4܉/?wK|$w@*mߍ  jɨht:S Nt!}iVb"msɣ] MnI-ٵ nA>Aũ.hB%NĄu'}2v+HL8&[&TQtp As dN)<~v4q $@!$r5ȇSk p_Bx܏oKysLHB̶ p  GɷLAʷu_/E]f&48X.n733.A^/IXl_,7sbhۅ><4[q< e}Өfo߮~ J7Gί/Ю7_]oBlr;P-|B&qBqoR2!;-y4IL@ K8aV<\&ࡁBE /8K,l;T8h A;^baBQd~G%\f*4*L!No JۤkM~L5^_g-g 152x".OZw\7X.-^\(_sŹ6-PVR{߿1zY&3zhz3E&Ep*N-_m)w qPk^ֈ4% `Q@% 9"&Ӭ840JX8Zd@};e@-94lAC`!M¢g\ <+|xy$]JAHQK.kA-A+r(,H,r \f4$*5fī%>'|n!S9DwC$Cpp+Q]#໅i)$mT  @PzbD/-} eߨ/7.9pӣS]j_OըF< Kˈauk0)?,S^G9l^y+:u1QA)$,wd)]*+h{[ӓg_YeTMcUZ b9jΟ@jT%dScQsjIݩڕ>b/6#4?_~>|{__~_qUTsRGt~~_~BYO<ۆ o׿8cYE^CJKIԦd#Y2~x[k:*eh?wht)73;\>[=(ϬW?3wM;(ES^ۗsLa9PH@ɶOՎ}">uvE_8by-Lk̾4g&uSݱ\4!/a]] i;6 GӘj̦7Esw9_jiHK:Gr^_ ›ܵ}}_VRBo_IjTSk)YX8i뛗}廋/~ͯ߻mw2ں|GvLwƷL?bj` wtue`ȱusn|@ߺ5uҴ_c_GmZJw_z]-iYm]Շo;o/?,wի:=<7ӗ]<7~sc_3Bkfc>O>:U=M^뗟%ݵ7]i+i?endstream endobj 161 0 obj << /Filter /FlateDecode /Length 34169 >> stream xܽM%9r%Ȍx\k'I И^t-(J52U*q^wE` EҞЌKfx[B~?/m{oR߽ۿP2km}Ql,cx/子JiXs;?e=f YޏZ1VuS~$?𷦵/S6r-HVG-$:k. K%Ik}ԨӨ%lQK%Ykm$-$VrD-dm}_J k{ldjڵ֨sU[$imKaTZ1#?+Hb>ekHֱ-{C䑶5=Q6HJ%oƐJXkW%O|󿃖JA{RIz ))$i"$^W~V{տ{FfIOlC&vZ.JΡO-,}B?&Zȸ~̡=u}NQ>:^iϬZc>9Z>ԢmLjk-dy[oܛAj4ZZx-:1/|^Lw}sv!T:ËaƅgOZU`_|bg55z^돺9i|ycZcXuLYj~|be.={e_wB2(y!mJiT|Zֺs6> /z /J5 jߏ5 *˜+ԅqK2v寳Fk5hѵlAՀd?hEiemhjz>Ui Egokfs!ZΚb(KeYߛ;w@ٝu X !졒3rg|btU?Pei z:uc]2XM&!;|NC={ӿ_7DWh\Fg>\NΪ9ُ(Οr|smB/@SE.Xm}s"*m H9ah@ d>\oNe_Ҫq~jJc_笸̏5|-bXP6$!lmhyրv;^߳+TZV?serЗKF,u5^91_Z69T1_ -î8BnqhL+˜KZ*.eΩ-haX*KEBX `&&['?'٧I{w6MR߳})6cl5 uGl{.]_C]akan7~㫜6ntڸO .\yuoo^o#;qχ*GSR$4uh$hz`'8x`'jy$p:>J?>BO E0H/f>ZKyգlhPoc=4An|诈O쓳(eJ~?#+keBVY{$s,Y#1vz5p0<0 j:#$MD?R8Ik%c c*CJǾЦf>jhӮk6ne:s#0}ڷ$E/~]UUOfOBD.^NJz~_ZCؕ.!6*~|'j'ʟ8*9y#D s6taILr]%S.?kGt[P?{STm49KNM.+M.e+mLKNǸ32+ .SI-r H.Rވe`}EF2\rʖI:gGߣR&{mArJDy94INIh'PGS檫6a:$%ll_;INZpjoa!w[I]Py}k/,拺{Lr6$rɯB$ԟi1hS┍hg9~Drn'p3ZNLQ%/W5RC9WˢP.q s ^ K$6?d뢭:Z[gmtt񅋯4ڐqHB-ot-:o^8UǎX:ocա{gcpRcaYMOxI*c@R-ec}x}oEFoȹHtEͱ+ӰW2 IZc 6-dYsKX*IZݓJҵe/BJ& WbAҡ@'(d}I.C %sмkg靡{EAI %\+r%r%UPP¥ V{%޽$=ʵIjtJ>eA3MזH_aǚFHsZLCt" Jx%+p(^ DAI礠DC=Ny:8^@:4W&]jK^^:ԶA??>wurm-Wy.>k021k_v9uv {<׾ORr.ݲJt/wk/6Yv!{uK?gq@z5lxs[bM;&'jmI[½F%׺y徻!ZΚb(KN7w~mH98kُ1(pթ5ӈ7WPΚb(K̙1'+!S8ғ-=AJmROxn@@Yڈ!֕9;HI欔Ѐs@~th*WMBF> /\N=ߕWVx'^~!nkh@NuC\|s%қq8in d?n/NpqaS{Al"Fč~%8;?]տDTN^"hEbEFL/.cBNLv ;҆篏poCNLv X %d2.x D0ėL04C@ٖL`,І/`iPf'bcdBe;dn1{ h#Q텂o,mv;;mRΚP6]sճad+7:;Mw, O5JAK8+Gi!$\ xpn}O );K7R%mWIojY+;-rp4E|W>qZ+І*q+я[r}igg/Y䢳RBtkEDr5YKc{7/aUfd uE=Ȯ#ȮafK/mϐD2\- ⣁wZD6$ &&&|JUw7,en>gݦN=w!Q,f2$騭HKfG'WlY,EWLrm]E ldO%[>5H%}a ClANn)zaHU-WC\r nC"-{!n_u6]{)M-*B=x~O=GF{!I|^*S+SO *#|kUƈK/]rp \$|{dcM?3>H2 va7[Hҹowa~28wp 6XH8yN $K3O?2!H2xoy'ŝ'OItݱasylȶӭEAu yz@l{2 D/4hߔi@0/ @4lZdZGpg%;+,4(B{͞CmH&K_{2HBg"]aG2/ڼ.FgPkgNi\"7=jI4:Fʜ(PGn|)rrqrPp%imX|I4`7٧UlZ~>iDa4%;u,4 ,mgxo:&|5 ًwIccwoHV[h؃S+KDxD -p1>W;,م˷=x_)$bu]ŭᨣM7!E1$"EQ\t-)S jIt=mzW+b5G@&!zڰY X WšɫɊǚThs`adY;,مϜϧ/y}xCS{M- >v[ݡWvMҘb^ B#c8[ZrwIs#MwbY,ք#8}1d[8J0^gZ:kl|5eij@BKJSoCTV@V!q3UÀŝ]f׮Y-gMC]1,EG^8965 ;ykKC7V[{pv_",4 P6am6=E~y }YbC3E6yfaMPFXHX  -,p X q(}Z?K/U(s)S%rc!J# Q-`ei!Xh)(\ )&j+Eu^R3,yǨ%4 ')$,քISP ~rMb{q̆kXo>\hp$ #v%և?cc=M) X]~C+`Zo3jذT+K q/z[qH _ޝ ^[R{_bn0hL=6  :u@sx6${\Eæ+kM_u{[{tZ'"H}r,pI㫥'qHpldp ~4#Ͽ/5y/*G˲Mo09nO ʃ#+v[VqIdcOy}lM]o-:H2qupdeKd!:".Ā#H\8u:O}.Yϫ_>KS|$TF9טn+sǠFF.*u5⯍Gc:19H2#\O0 C[ ض-BiK]$$/sEOΗExyӱ0e=(0I@@hW(gW.&F#$; {]c"H"*%r9|bTpvslybVtdٲ929rZ<->Pς &7ɻt5-JR 'uLt|,uwt6M [6ȶ+n/i3mdp4흅.^siH,;H.hbh<ddsIwFq,ݢџmeIoURyPz8Z;Ì=Ij,LwgBZA_0dxv5yO|nArz+l@&9=N88PHUo{BD_K5nLj@slKaɨ(H.o1k bfX5?ȦyS^2(SH.S%;Ƞذ=Uen(^\^zm/@h"5۾N-cc2ʪ_ZvH&hP6#br%ϕ %HI0l%7kB$Εa?*?v:h 䔴geBQcO@x0<-1 S&+d3|hHf*_lzZKD {O  ѥԅu5H~ zv#x(BM{~1Y 7/deX 7ĬeYM+de4-hZ!+k!kyVFӲ2ZgZVF{ !+kiVFWcYM)det-det-ZZC4$àzgJ&(i87AI%D J&(!YVF{HRHgZѴBVFӲQF2ge>t<+ $ Q 1ky$N$0% dt4MP06An\ 3+Y뜌{P]6'#r2"Kk~(8MUԒE#ڢüWêx[^!Uj.xͭ^cʉΘp1XxіƋ8٤ג8絞Zs鲦Z/aѼ'nr^Jl񥖮"ЛaCfŒM6DBDѢҧ3tA;Crpl]\Zn6~5n#,mӻAGFֆkJ d?nqpǠQ/sQzb(sU@7!iH $/nqx(%ɟIv }[ x-[j'H'CPֆ2+Lv!qt*ft 'WčUM+w^wVDDHbE/ft ]`0ן%Ċ5?)Q=l6!k4 do1N$ڨI¯@7b@\,1[l/ 4YhJ d?n{%f-=㦪\p% A"ֆg Ӏ@~'M#Ի!t M([IHHO wX w˯ %zD#}orQ]r4 C@Y~Қ~ hmH9jُ;߾xZV`9ŃY"gx0 ӪڛŃ zZ<g$NdK9C+B+tW Lrϻ|5)&\ ]rpMr4)Ynh$Ϲss%79U˟9r.:4r.s.>EZ H&_ 8\80p$cYd2uI[q}y3Hz$NgXEEܖ_fJ|G7 A8%a I4qgf :$$.|n w( rHnoo`됫I.!py&ʍ$`Q=gb.4쐟ӱ\^ytcY?, Cu ;\1Gb #FR|# .2͍O@pā7`c@G>KnlFfvOwŧod>}m.ad9М{I@ܰgkCUg !\\r`.s4-=YsInHsl@H@ܐ=!aWK6fW(^ܐ>Vf@Da҈䆔H@ȑ@ܐ)46 b9-䆲;z_3%7eAxEP绳U/"3$7 q q>בb8Sg6\>Ljp1@ +"46(Z՛?֘(v}zKw"s^j,-ʿt_$QS?̿D83>a(T ;57%Ә凿afsqoq[gqBM?_fHYN|Nɉ/F&>k?q=Ξ_ϧtc?Laˮ??./Ntc#n9 +}JfϒHZ& Zteת1[ϵ\յLZsA-jpЊ;}}O=_WTXcaCB]3̛zd&)e6of7wH%u_TywRtB9V~?|!U9TmگҦ?ߨ*+)tr[6=MӹewGiZEմBUӲnngQ1dQ5-ˢjZ!iYP B? mӶ4^y@k'_8R9zi= L˖/ 0`J#x9ŠS5(}~loG_ÿhODz.H5k*?ݢR۟~ݏ./_~XG4&ih,ިG4Q^ToLeo2XU37N\}hlʄ?zݓ9spDx՘ևyn[.2ЏjHL >_`sWͿzAm`\8pBui.nh猃wIDoF϶Q_g?28P6$9#TmHj hJ d?rmM{@1#mH7@?~axdS?sy)(:EˡMC69>fid 7X/vS{/>6-?*x{%/AИScR ׮=i(sT H MH9iH $/^t}]N[|07r?ZZHKN wTg>rs.BsX@L^ ->7 +U]q# n޶A wx܄a}@^3 UK*;@WD4%n4`#t;nWS ay Ɂ+TuCv!,m_i"kLGZoُW:]FǫK烳ޣ,1q#a6i[9^p_ˡMC;P6BaV-!+xetJ^9L$rbh67Q??_OwW^YW*L3sn}*pπǟx|l_ok/ؿѿ (c`~Ηf&HMDB?'\2đߝ2}+v~bCl)X Ӯ'| %2n;?Û薊wtAs9F)wߜQzЗ+ZT1g`1N-~kMsR gӏօ5WwSF۷UwMBx 3oi e\C&6G@y7پ6SkJ d?i)3_vٷ˷w5Enr8c hFՋV svJ?qgʠOViKi*ctDLލ*XKǺF} 9f*MsBm˜M=kpXq1qXЛb,ҏ/ [8ˌKǟfO/uN }c_<r%ɿSWoRLG#,t?uo]N̵ԥϾeԾIR߸RZkntZԩs`bo߿qEh~_ ˕eO9gG>a AsjGm=?˳@ŵ:-X\_~/2–7X;hp ZZ)Z.F]sO<7ځp@T̷"1iwWGAì694%9WQ:%f^)$idž;MK%YrFZ"IZ{HEʏ%5MK%Y?woII$IP[P%։"l~q igbe4-HVsE%D(?O ZuY8tk$bI_UzN% 2䑱쒥GH3(E'm<R+- $[JQFԂ$jqL Z!ONآI"C`|FȯGm2xTZ΅Ƽ`@f%Cs=-g3QNBXr-blW|ݐl 8ˢ/ gN5R\TueHܛrNtcq,΋Yدj=9e+CmJg䈅6\l{t |8ղk: p2uAמU{EϼR猪/ԡ;}#+o:%wEDĤNu7\s7P6O+-2ՀЀ@.|=d4?敱bʞb}!Zb(KV9#*kN!iJ d?6T^^mYv(Q޳w8uG r TGӀ@=%ãCWUVU%~J9: {\? =8;+,4I(B2gszw]I[%k uB )߶{{/1xE(KT;xCې2ՀЀ@~\]itioomm)GgWXM|fdoCYP+ !qsJ8Kv^id`0I`j ,mDgN؎ fmH9: X ֔Ӫm0ha,4 mg >T0rp4`7B1;7]kޥN7 "( Ф<'Jq֠AtրA5 X*4E҇ wىw,|Q==:@+mlf~FID G!iJ d?߱DZr0ٛ rt",mg6srp4JC~l!C5f?Y)Gga)mDggr"]N.?4`<]?aB sO4t~Zw1 Cli06krxhP`P;|2|%1,4*϶]xp(!ѡAwK@@Yڠt6L5`4`%?B(q+]LXkr4C@ҭߣ6ւ;,D:mzs$%q7|յ;SQEI=>v=)GӀ?=,D70ClgL\ʶ`076l6++ urAۣ vDN=YZ9qlIPT8JWG*Iz$-)PWI5*Eg #o"J6% "m)$"Ž/-íiH.P " . NkhEEx. r Ϻv_\$~t AW$ I:8|hShD*Ic0Ɋm9¶v*cx+Jr Io꛴ `C: ;.ODf]qԕI"Hn6 ޮf`7>V"t𒠇zfu=fuyi+3]5s9av1Z( 㲙$6 ~)c8ſ \$|~˼e ^L".[N_W$F_f|}豊wKti_{JV,BEF $yΦ/[ ΀LrA8VR駋uCʀM~CeH֬-F?ӕ8dn@ß׺ g`8ƣ@rm\G_$Gz;^)tV)lmU"<InDq:Lr@t=lO@e8*:[[?dPl7`V9I-צqDƒ@&9;0iM}@ۻM zK@&֗G] T4xgӍb{+]X Kt^dۀ~4'89B{Ա.qmQD[$km N‡ՊmǐaQ%P^x! iPQH)/بxhn覶{_& .ͯ*,k6?s ڢRH=*19 ߫ GGxEz!#`H"+qb.yJsG@9 uvJGvY;DžZ>>Ͽ꛵EKv5wҭk9w'~o~aZ3s7L>֝>ՠnDRQy;<#hYp ^x}R^ Uُ[iCt"8b H98kp(KYڧ_:XZΚZiُ[ GuggJ9: sEP6t vJ9: X -҆eƭoilZΚ\m}D4oGX}{RBV!!&y z ΈzNs !9҆S{^((N mH)x+e䛘F&iTpLn0\6tUg)Zf mH9: X - [,2'DdjؓUdw;OHx OV59E~|$\nG`UZvug2gUCegṆpeiYxy7y.: vvGZAs-kȈb+4 P6^3!iJ d?nykvoe^n0$rjKe[V&(t H@r1&ArC$兦p +ā !ʭt с H:FG߲%.!rv}M@F HnӢEiNԺ@&%:WrGJ';i|9MvY=p*|S9 ƚp avLJ7$u tEܢE60pY):]@t tВH?$7\Rt]F G%Ar%Dj..iS-J%r.]r&cnu%*oxs-# *.$5l@&&|qzs/6WĶ` W-jlGKivė)'oز]_Cctb)Uzcsqm //٧1 r4 5 25IzLҲ Lk^KdPByyuBd-:Z"IZY0UuE>]I$YkSs"HC:+q$k!EIw%aG>,Ikdi4j m4()4(k4(Gu-F'i}T@yZ*clt!I~c*πGzSDU'i#j)s4hK9%@ 0 E+.O|Rӷ2J /Ã^0J }`HK'fF`zxZh= E]z~)UFk?(-`/:o}Q&|ƕmWlBY9 lҲͿB-zKK 4cAXQvg]C\q wv~۴4n )+e2HY[p ,4E@YڈV]A> )Gg+ei?8KGJÇ|ZΚb(KYTl2pmH98kُ٤sIpЮpe,Y(;5lZ6W*'TsX kCNs -tڦ&LVVvH9jDP '4Uք%:<=J[pLXM sN5n#8K^ {pVYh唖]h}8;t=r驚{(%oۈYЀ}@ܳI4Z%lU7Ja*6%=Н@@Y7'jlZl v_I?4& ] .݂k]-LC6?xF'gct BRvL D`5Йi:BͲ݃[;,`₷Yk)>$}H2rKG[Kp%!@H#k 2Un}Yg(;!5n(KN+`xL;вs\V!q%"ggXVZΚb(KYEYB65 X -+'KBofғrt5\'ϵi|ZBV!q+ &SM^~?Հ+@@L ֆ7Ѐ@~|5(!%}*4yZsv5 Ŵkb%SB8>UArC/-ted}WAtvӡCۂN8.b>#' K.9.+zi[q$LtN|$ x a^ @O'?45ŏ =Ar/EgFj]#~"7 sx+)0Mr/-pCCj}PܰKAaHZ!H8ܰKː{V.uҹxM sWڜw&KnإavN:_-iہ=5o ,qxcbّA|ҵ\l{O [ڑjŮE]1L ڢ-`ScD\ݣ1y[45&g?eN ӏHs 60Lk]8d ?VdZUd_!X{S M2ޛVxZH@Z޴,iM2ޛVxZ< +y{Ӳ2J7|#:ỌnEy G̣nuIZK 2i{rޛ!罏2a}}l 罏 yO9{{VyC ߒ$QkP=jA{w)7{5G*t-Hn2޿1Xڟֵer?.z\W #U&'w|%ϕ~'5ETˍ|*IZTZ3l9F]jQڄ]ofmV)1V<$im/3QևР7Soi׹+P9{}8 F4[sXa'>: Ǟh3i.n'#{`^+i8q~.~}Qx %cXJ*kATC-ڇsʚulu-P祺 Z^:a(K V\6nɅstKZTnЀ@rMt ASZFh 45xgэYj钟&4-0 Iݱদu7UAmFZ!.tk^^H:k =H,Ө 4+#K$]#H:{o hJ d?NDkĈ)G_U,s69'~^Mi~jj賃/X3!RΪ?7A+#aʃmH9: X~\biGsŐӀ_'^5?!si>A{m{@@Yڠ*}H,xeVZiُ-\%~̅'}Y3 rp4C@Yҹ,NۺiJ d?!AI ʬ79+"i ,mDgVf mH9: X ֔B3/!MBWp<Qmh98kj!d?>C(O&qU&t~p6Ӧ&5U9)Re? |4Q$g)/Ϟ ;W鐕 Ưtj3I:&lbЫthdeIa+:uoH2x];[2}s$ĉN=8v`X7YJ8mS%ݙŏ?w X-BN $lls켺-AeA<|{5[8|H4 jGG>%窯nݠWM["m~A}Oܠ7UDMzW/􂿡V:z=or^>QI:])vJs}s8VkI$z8wUg".γ^7{ fH28Qn: jrU$0;QMCMϴ~Ј3釢} m 6Fc8&9= 8HLX4;\Ui@ږ[ %3+GԡLUBp~ {?ʢZfq;rCفJQ-:j1 8z2mźaylLū|\=pA`A3*r;2;Pȑ~r@gF%|k/U҈tnArz>wZ" `68{dEe>Arz@DSQ)~I.7奱z'&Sm4T$;%+ô^c]\`FMHs7!ٛ06 '8M/_"17gWS"FDYjkW 7T[5Ԕ0{inv{dtF˾5)[-{ji [At|Dp94Nc>No¤~D8tξ$M+D7-ڣNTKHЫ5jiQD&$k |JOMf鞐Tc?$kAmµ(voP%+2A$&KWiЂ$j)4hA` Z Ibz')O[z*I}b1}y `gȊ8>2|HSXPDxBg NA ҠIW~$j A6fVis~H~Fk}&)kHɭi}8zi?A2mkwu6<ͯVK?gj]C%WޏW]ZxA2)fHCzFl:s>1 ǔ3^,2рȽǔsC[-1D2%8,NLD{!|<@YZpg4)NLDڝ1\AMD*:ݤrp4 @YZnDNm7nɅ;t;mB`Ѐвl14Бвi j3'>"/A"C{pPvi( L.\ );O5`%4d%`f*U"j+`ei#8;e rp4JC~2Mw~7qW!a(謄u mH9: X -Ӵ]ilNE(3DYh Xy b.f !6*s++n^&)|fU+!P7I8 S^rN=CKBMʯZ :b(7pL.#Bךbp`#+)I"*hF␘-P(,MWD1:g%ڄ6@Ci8#R*9d,W)AgJ3+KNٛsg%|=;,.ܒNk'"3D9BOh XY~jR]CtVkK-WMV*ƒSϣ`Û7A H98 wE,mDg[ICkCYhJExt&&SL=똜;'qr!ѹt6lv2+a;=LJSΤ&|4^'WƉ,s 9ʛ.\±~.:U vvoZ9N,7ɵe9if"N }iJ d?nȋ]ѝ-Bq7gZ6]쬺RBv!q7S4ΥH)7k67p(o)%_7Y6BQDfeݎ*!v|i*XdWI X&i]M9VH2v/ƅd/~7B^Iڇ^&tM+E\RHڜoʫ%RY!-Ȁvyba_MC^LgbrhoBVY%V I:r!Ko`m:I׭"$ӁlM?W<=uHr[;x= 2DS,%0+!Gh"f8"Ƹ#kP݀ls!wL 1B*1BkRJ1nz[|!R$oUJhZOJ24S=߽C!}XC|DӰ/{0bZ]Ϛ|3\ZK\c]Ҽ9%7LJwP;-D]r4@Hm@{UFAr4 $"$0MyLSSlie7o0wW4-F Hnc܀$;U\J8./>E$di0@EfT<:@74ztLzѽ4 ɴ6^ZSsُ[Vi9X%g(cwlY0WFt{pT( )Gg+U:_CC%d_Rqs <˜CIy ;@YpRefk&ƅKMt͉mA3  g138цT?nǶ !|Z671 ٌ9 SU\Q+KYg6}^ ƒҩЭ^lOl[ ]sV2/[ttgU#%XFKf:-1)'LQΪ;hw$5Y;a- ŇBbtϹѠ†eYqs a9܄S&<׀@H^K O*_kY\KFUw!D9jُ[biw=/v[+Ѐ'@@%p[4l=%ЅNɄLF" `FŹ {`@;Q"bېrt&-2˴IH"{U֊s.8boS IeD8WTZ! \$|N /Ѱ۱=AY~MJ>[oB$q#q* zcA-MHDDu'bWM"Im!{X_;Z)QH+$39S2k*IQpU \2K ]M { d^дFhnlJB 1L ]g#xHCj)j}--*$VD 9p_fu`Lnu`LTKD B$2'1-߯(1'~bhB阱Gf!*UTjnlx:wk -=<iH*Ї&Bw7Po|#PR|XJJ ] bi%@E$7R~e-5H> f#29]rC-+V$ $ PZ$ R 4E 4LOfL~0thQS+) q !]:G$"At#$wSzQ&|b|wY9:P| Ŕ4x 7)ޓK}`DLތ$džӦM؜ SBx{J5~mN$v.$QMt$kCBM$>Иj}Ь-wʅOT5هo\Z6ɛkVkBgVpⳢ5> i4J% Kfq:_guAc֤pwP΅KvNbDhCTC0 X 9 J6%ӚѤf 5SMC]1%$)n4`%t6SB_H^oFr!Beu$uIm ͯ[ X 9][5_53?#gH9: {n`ί_ MbmH9: sNG[Mz\]lu~JѲ{Sx&s(YQW~gHnO-!YFLم_,t(W oҚ` 4܄EI@n$4 XYZn!7"ʝew 0QgNC6Gt-7o&h.]0R5XYZnnKh`'Xղ?N0;U n;/zk\;P`ei!xͽOކG(D]SOj_pukB\-QzlȋKsvϚmoeikrε(thCTC0 S4[Eg[TɧlH98kp(KmN}ކVBuk*:+W-8+,4y}a|ޝrt"d?_C1A؜-# c-Gga(=C~.v$x&5ݼ܂؆&lu ,Ҁrp4M#7w9ؙlm a7Y2HڵO{d0b,MɏUqȅ]R;_m%3u S,&bBG*Pt?g\< TZDˁEߵhJ8͗h&z(k^g)!F)`圈4i9p2k wSu% Hl&+zba\XA8U 5E4yDc3v<\7RRF;()9wxZOqa slcHģcE?`#K'ug>bt7pBe7*<;8*d `u$ke}k4 0>ߝJzUn5QsHh.t2[W2q=JYaF"j870 |\$xGlϭ ~^PY"HprCU"8\vخu]p  Gذ55 14@$rtMn5IcMCKR Fc3pc\U5K3V58A0ӺOtɿj,!4юCR?R 4^8re廵JӴ\ix`i ev#]LA(Ϗ&Jn]joR d+`i `i!f<# ഛ ZDy~4TJ7)N?T "RL0J]jLXR{+B"'iGiVQ" GҲ 8vZiY$4 1B,93Dwi%fQM. IdVB͚B \IwiWM"!ȬiyؼjP -"vK=(KxgزX7V lE5 14]!C.{KZJE8d܍G[,t1hj`y0`*! )TB*`!Sɜ֬oZJЬa Y&AuCҎ#塁{a*W=qN]CoD>dAs G1"S1i!V>ŷCjFfJl{cѰRI3MC5a] A[V3g+<~f!ݽ 9K>ʵfk3f+Fn<8mfV>ŷRX#Ҿ˅G|p3A`5=`#hpgzi<@M;G50qH2($OB5fH:kb!h:$:x1v<$5 -"vRM)x+r[٫ۚt0[UC,1MST @s0䅾#hjh!agF%777U&7U4TA0Su`r[ydq[yq[u`r[ElюL ҖTI$T<\#[])]\iPKN""Y&vs3[b1p`+e=%RŪ|fe︤F.C5PW90 ٘=G2Y٘.8zpñhÇL=2z3'sC;7dە, q&KHUOIe`dJ<É))-eg_ $W8!61Clt!bw8Ncm14t%d@G}ɀL׋)n+ӲGgdr ڔ)}.YzJXbZ0IӂN(:`ҦwYb,F)}0~iaaJP.g~ un@&9`>haZ4@mb:Y ӂ~0q5B$j0# z,mn|qjjeaJV3Cޥ0Pg ;St3 }LihY?6:;s̐:9;wD0= 24cHU1ψ_`Sd:R:̫߾ݜ?o_Cu;@'\h MTYOo?U>po7W߾! |1h%U 7PusK B I鐨QX׽*V/vW2IBULZStµ\ԳkĵNM fIXeSel.۰K&aŵ 7 \p uv5*}.1]1D="m"EjM5"RU4 H >0αnSSpnZv˴^e^M+I7-snZOip ^ßl5{ 8wDˮ V+dZvWȴe!3 4ϫ e}i>J`dD%XaZC0 Ѳ٦%AB1HM9V0 3pÍu{V ]y˿|:{@=;$I~{ʰȅw~ \FӇ;k,so÷B2B+y1AՖ(L\S}Dtxl*5﷟ |hȄۻ]mWۮ*]` ÷GMKCXs&y'L:Y-<1_9|B%x1<&\#/jh)a-.nvaYN1(UBvg pzVZ\*5T`g u"P!j,BgX|ynZHE^tyX|zSx)m9>fڠ׶5ii:STrђ7 yMPNl\6PC6Y?%h*I*W-R2ViH2INЇ!<8*40qKErÊ`cECѩhKRG#3h̗SxPкΙU?MRk= d8o>ΰ_['|d yl'+ZO?HO׿uO|anX*=eDj&z0/&ݡ+ Cx% '} _aq-is/к*Ev|@TVv<{kB79^.]}Z ^)!{ނAVV*h0͊u{A)BI ; kް\,y9!%y{;ϣD5:sN/ w40xqP-D2^f`ަJTQQf[@T;ЧO?a}+7,_osCh+=?u*4?LƝ#] 55d9ӷA# ~A 9yc=-fFG^8iXMsk%# ܇[r>O_HF}j3yju& sY*x6MQ DZ%ś }օg~,k_Vp<&'B B v3EOyB?S{/ .E Fz)_z]wpŮkOZbNzh*)k<q1N?báOo£?W~U(LR ~Z, b>&-NpӫX{?=I+i^rZš1G/sj}Vl|endstream endobj 162 0 obj << /Filter /FlateDecode /Length 20076 >> stream x}K%m}D\裌Ȍ|قqFK_d:gھ` u1t)~շiòOe60Ou}𻇯^*վ)ח~^_QR.~CYrRtAϋ]﵊1i[Umzoi[ވ( &O`RdBeۈ:Sz "ekq)nyҘKYS^J\HR/\2hzL[qv4OrvߧVL+[)o x|YՃ# $p;׋ϥ*C6 *$ Fs-@Bh٠ʆS4e8JpqkZZe)lRҹVcNV™bTIhӡ_ Ew= 6*:/`8`%$8nSӫTUf4Gu `Kщz d;,G~L].EfyY(9؜#pЦ׹w!Ar0B F~_Aur(N3|p8t0ct8MVBˆ:r.2N -4Bp8ƐLW1f3Pa4g8`$$ (n]N2h-3שl}3Iz2}yӲ)ɘ"kCA|pDHAܺxȱ?Ѝdhǀ]֗Fإ4;88i߮Cx'tLzS)::8d; }L\G?8"\~#_%:cIc061cjSķyjcWiɗLOVAg&) Z/zf8TcU6#zNou]OÀ\dz( ;ecP\I. 0)'mV1ڟ mنcv mHtfI/t 4퀃#t 'fѱGK[^ΟΙjrD4%H2_nLB yjrDH V?ߦYϙ)뮕#6mɣɃA)94 1(mӲ\d"sPJ #A);Q80y?yXC^2f p4>g[G!.ҊI|'s3~yhMxd&I\. vp>57ۤP4! Gͩcv6wGJ&%uՙib7ð'MCb_.8?URHф=8`"$ )BBpV8GXN8`iVUoWJ\Qr%m<5:hOuzS/H%ҖN2 a 8Ae|v+2';UѼG*]B)ϵ#e[2@,V Kڹ3%p y)2G]i.uly{cqR3t\ІLSI]n4rR+!a17U &NMٍ}04b}#G4.#&8`/$pL=KT+n,M2yx<[;-{C$Y{F$g+B1b\\\!"CpQ8S<G@5v`Gz;NL2p5)1DE87\!( C[d| eN, {dPDj)Cn}{9Qnxx)eO\2pm\b묻Ʌ uyסɱ_jgQv^ 59[t"#!0e$>0ARt0*$ q%9SB1)_4LGs 0~8%؅ԁ+i8ǽuqtVg ]5L[.t0c_W\ӘDUyʢ˸)/z$"=P~ob|ﰯ;0Z: &О\B]avP{Ta4 jK _3ӣeNxO l.!iAuBdik#F0qlު$ڤڤiF3|K!p]BҦ,e&2Y8f.`DP1~q6ϔ}Gb[JB3 Io~#S~A̮Ԩ:+!aq@G]J~eO[iU[ ]~iz~,%صԁii8w #[OIdLuȊI/lGDb} p2hמ u! hS_ esH(h(s_ ܺϤ𙑲_ ~ȍd`.` \쩠S[Wu1gsL&4Rv:]uf=4pMkJ j] Fr]wP˶i|#}Y|9l̘hg=xc.֥Yo_籔Mz_¼XɆ`t& XfoАTl 9[WpEtG^NӛHѦ0[`Hp<_Xk&^&=e"ϤhBU-oÒؕv:'K MGt> zdR ?9ʐ0u!v!bdr||>8Km:R륔+!aR.rKPt S2i 2$6[ 5arhz0`ǭ~KUB}D;Gs ARiY.aqb.* nsg]Nv ڐ3Ůc:p;ǭcޢ׃V3YOޙTiם_Ŵ&~>ft/<){`M9Jpqݏn3N&OɄiF3|p8t| Oi%p:}ԥb^Px7/0i.$.z)SwOGwO'|+] KoSJ'T饳\A'0P)H؛c-T6Q;MV[,=P3-}hJ@R/A.0K9{L>ڈIy/"EN+n]XK;[.]-G5" P (;Mz»UO1yUoocqY|tyGȺwzwL1c vA=O+vWޟjfκG:9w ',쏀aqCB)+%X8ԁai8`%$8zg0,S,Wx<Ո','0.!i , X@ȰFo|x"w)͊!`Q1,& w)EٛycoZDWG/r'PXac]Y wns _iuᅘ; 6&&B!0L1O%f1[%1fS: Ĝ2tXb8Jpx[aݍ6͉71n}o2>3 4,"hXpQ԰aEaÂ+† laMNq5Nq\5\NL4<NLg.'.x_ 9"G(rXpEaÂbWo . eâ (z© ;yIrp8s_8qw\;10cH=bD̩~yDGXI3wB9I #;=śv1 R}#M#%8t_6NhIujM}t]:FA'䀕0e|l)ҡ-Y޻ Ls !hf%z|>8FwzfXLH!#jW l` I#QXXaBGV +VBˆNY1U=FHM#-%ʍr uBdk!F3xpX0}ʾhBd", 0teYmD>T?LLBp׬,acq?%n4d B;L ]a4! ;=z`P5=7O2׭`uk;J%)a$T`] ŝ΃4@M``7a Xq1ɜI|ȏMw,ppX7{6}ǂ5ছ^b34 ^, ~mU8I!N Ԑ)j{)!4Ԁݘ6ks)h!x „0ӣߋbbwv/9MñГ]Z퐵Vb(ȃ"LӇJmΔ]KcTN3bp!$6YC_?\:&FwPELCe$0l@{L)B  L##!a@q &rADηc";}4䰭˔bLsȅ &FpHF lZ=apd!4f6L 0u 4bZaK8u$cD- $ C̓;0| I)5'&;F>ʂ1\M>G!^]V>2XW=ڦͱz:RU_>{MPcB`\ENi'fa@g8L0_daz '&4xeJ m pJHq g4@bKi?PXGIcn)>{=]Fg1&Bb8Pab0G 0b) d2(^bqrabq#PX.m!lt'pdIƂ@a9&JAh| F Kq '۠!0u."F`?${G}!V gQ8 U4JC5貣᭪d/VF?~*'N>vP6~ݽw~O/? CijCB#uf7_ nb>8z*KG7?|W,CDQm/}ŀevmˮzdkտwW o_k<5>}[X8+85{CX6L9t0{FQZI-WUO !,f|g:`:y`:9t>WLi y9e[M'{,-}WӠiN/$x_92ut:;s#>(w)]~s~@@f ϝ3̔iOra@[=ۅx0=4B6d>hF,5r/mw|7 矽}eoW=#l&[dHz<Ƴ:Z, MCۜHȢ[Zd霃UVEkE2@g=VE \|ktr~L'RZ_u2ɰ/NR>,}tX{=wx')8ͅu˃rYU}gѯ=향Օ+ '+Adb13;ZFsEXiYKceOyꮞ! h _D^$lzwMN3"D|?@Ҥ=],U/F)2dO$*Z]7TQȜ]Wco%dBUg:-irp C! h@Y9˔Pz-5ML:˹L lyx376 e.)a!n>%*S^?eNҼ=?s'2.N}&=>f-ifp ! 讀ry]^tOTats0ܒl /VşIE-}A)$iJy%ƶ}, ]$rJmk9mLQ߷]Rёzf# S=2?VWg{;D`;Ӻ{yC*{ތp2L4Pvoej)at;;u n" ܒ݉)/ȟ)酝+ˢ;%uYv'\b?Q5k}M,/{,]Ҏus,IeK+J]6K&΃7sj"sV*&?z|R_kcq߿neLa曺]uzPt3WsO /T}V='V O`hI:)o<?_~w`.0Z`lA$JuR[={g~\cWaLGƲiai4E>Mz<~IJCjj:/hyolfKbכq |2+=~֔ɄIh}m_۳7cYVi_8ֶ}=M<6dnF =įqk錦ά.1Za׹}3umT5 %3ItMk6IuY"M'ME LɦM4FB2V#@Wu]O@ *HJm?SKv+^K\ڼvяGOX\Q}iZ곔xܥLSX"a>E%yC&1n^_ ]OA?F40{(sxNǾ\,KрVzLKg _İsAhR:y\'u-Zg0SŪL˦=,]]g үt<~P+yz2/ svSn=GYz¡f%׻^w-R5B^__Z_?nX2p #b!DyAkY9}l=y=Ŝ.iٟE0>D߷SiBLb>awLS)˲tkk2 +cԃ6gPyqgJ>H[!A_8Џ>lz l3ߤJʁA~&%h 9OnQm2eWx'3` eER\ M}~Bpg\~g3`cӀ_u >rm:SwB纂~9On ñ镌?D. eZC:ɄHǃ>勵(HM= RHm: Ư!ihB&Ma=B5KoAo-Ue4#!mLFUŕt+!aqk(lr hߒL]B=?i?A{ 9Kq㣆OAzIk&r`׾}) #;R^s2 Zlt̠ԹA3h铫:uġM ?E)SwI S=a~++!aq/>z} cseF3zp$*үPݷ 4ŭӼ͑5R w%R;Mñ`5~ ~[ԉ-!aqd/ZD /eu\7zJbk]oG7Fk_ʶI M<~5s\ &!apkdRA54׳R[ U/AiU(ɱ AS4>][pZVB ǭ=^|ַK\z4Įh%m kOYhQvLt ־.يV @5bYIFm*:Hϰi{f.n.sσPQ5b 1!UY&!DQ!RJBV#I8ḷu/C׽~i?:M` ʄ*;CtXK!FS&t #[{ciQQcquw&]5+--i }7<8v@)uQ])ATWa~++!aqk~-bi %6 _&u 8l:@'䀕0⸵c1>/=vd¯o4lԇS8=Y=YYF}1 fU9wz}߭~DO}0ҟ|wF!nQt_5Й tp6,dپpFD)v2ciŷWi *GH`D]&teɶﮆrH-/b{Zljدjk9h"ēmiX‹+v޷C;|Y0:VBˆ%ࡗ̇U׀r{qiZS!i1g-ei!4/!aDa0,XuC}0YԘbZV+80!MMYfƤ4_о>A0\Vӑwb<^5Diߐtli^B4 2$8 8VyFQ/ҳC_ȯ^~.4E׳m/ݳ٩)y!`btؠ#LzWueK`I.R 1YPLI!&w!&Ӑ4E嗧pw 3y dn0OaY}\fҶ& i7b; 1 1e([LCL'CLIt#퍖PT nv rB?t!>z`ZuPa~BpLȊyL]Pr m JԽ0Y#8~;ge/ݝTg]F0p X9gDY)@=fpFpLf?q&*qM_ ?;,|) =2A^t܃76d.#ib]zXkP/aisJm:rb]4>F:ΉurJHq7RtlԟÂbu?8\Hm:\WR?8`%$8Sr4[]O!Ӝ&p՗̐a>|>8Kq7"/wtOAs|wxXNh*;Gw #;&7l2Ş.DM΅grغ2% 32[T05c+6aEp!aqׄ2o5F&BhAQedZSa4X #;'F ǖ& ǘ#ໄ }8$:f.aqJ/[e2h60|Yc-(O + 0Ny#+Sd2Ѽj,s"RC^hpr0x騗\23z:N=Nz9^7R-eӌ ̡[/!"`BpQ E+B&L=Nw=eA_)5c-z-2p5 畹E'.O2!&b&-L< '4!"jBpQԄB؄`ʰ q&]OAʐ#:LCx1Y&.M0Wri" jB]e4=⯁FJBuM0E h}q89ڦ)/ qo[SC7M(ެu6/ ;oHYtRR.!i_%ǬYFyKJ})swyZjt@'|$$m:{zu Llzij+me .©K@WXi2: Y X7UW3AWW}Bf9MЃÁЦJ\/N=R=8H@ܺVs H|,~>f]B@_ Ao}zEmAw@w[uJo ]Xs`T0e7Q͚,=@K :po {|Keñ`oAo6E wLF3Rpdq"7޳{q Gy 5HJn YBoc!K}7 hko >qޭлQ6nNstېTpǮ0ԻiFB1mq\[ęX!#Y߆{1'.6 /Ŕ0=t0R>] ŭ>^~E:(2' < 6 h}G0 u  N(nw__YO!G~m>uwoN(nuz3߻{b^`w6^YfI P4}98]a~#12Ţrڕ:+G'zeEC q S%$[bQREJVVBˆüc q2]7ڕn땻n [-ףrf6 5FVnh^`1b1DTX7Bhʔ+]z &P+S,"Wq ->Bz( 4mDrDvLrL] 3nنgm_ي^1<^/qq+{ h+ܚ)Wp!W0eh/uS('~S'&&x~')} ~.k""Wd \+,{EQt^Eὂ+{O7-)GUP훘̥xXܡxLN^"=G3fz]dB:'aSt.4GzV:3#;}b0rtxmfv5>O2ήFt;9`%$8r-ܝg >uSɷS8{Js I)$f "0ǝ^E7)꠼_Q 6t_mI^E4ŝ~Eg>6Q8-w ~zEx@}q/D(jHqWi^@2JO}ЗBaKйM[jЦ#Gc1*y^+t8䀕0O:ş. I* PL,>},pJHq2b&|U4 !2#M 4u iZFwz?n@6=oZJTp]B]6T\tYF?բUSا}}X8[5a1)tא;54?UP|K) >ߧ:I RtV9MqЦޣ!AZ; yp0ǤO%E }y.+ zp$6 }돛&tFЍfp#!`qSGl K]-ÁЦs}(^4A7wIF=$Es:U+Eۓ! hSPQyN\Gr s3pBqOH-kYv m`! hSkѫ jlcMF&d.N|2.}G7kH m*޽ ڲ #<L ŝ>!by ^ =4S ~\u&>τ\ք׬j7Y9C 8A'W^,l>&>XncE6D&GUtܠ U6cU6B*@Ϟ_vP-R=2TfL\3䡻{K'HuXtܝ]GS0{#W&Xt\[TE$$2sޕ*+p֛2ŢsE%C|\c+H[[.K]ffZXfxH\9]Z+gD @#M8{lNZ.j+ #ߌ)+zn܅fa2s *+mlt'7R+}E!Wgc{p:;J-ۃ^9?k׬,6%R)O^=ן_~*dE)?:; ~ٻox;\D+KtTk(wZe1R"z` Pls!lAm7|½!@BU%}zj|*NWU*p߫ V6Lm&]tt2L7[6}LR ^ǧf^aO`t Q` Q`t rݗ2B1WωMQ?Z}J>ԄO >=~DZ!f.5k5+@aa SD &O 1 1x_Q}gַ}9=Pӵ3g!){N LszV}v$vœ¢W{5* }eS(.e6oztZ|Z;c^Zׇb="l@Y{|x6Rr.kq_t6!J?IYsd-@?Bh[IuMWk,PiDcpV T-l*;饵>EB֣˵<NgیdyE{ʋU||:A85xR-n/T}XsP)MeVj=e0::D|?@%-S^?SP%kٳKSȉtxdw|VCp uP(5v>/ 2FWp:<f|?@%-U|ʭc?LoylUש.l=!U#ŎpL4PvؔPatfw0~pKv'ZdX-ʧȧvI4,nKNLRK'O\dX$3?7}OX3[J]8k9eoɨ;mLvoKK-~S5@bs@Y=&8%q^F߿\6mj2K!(/'7ƷN4Pf8ş^aª>xj:W-O>?4@K:Iq+O+wapdٶۑH p-뤶uoLrPsQlA}'vӄzOӴ_jZ߫47:l? M/]o+DZ7!r^ːз[SOEH¡=~Zݱ?iժ;hV&Е6s[Zxن|T5W Ӹe ö3du^پi*/ˤS;&x5.Oݦ}Ŧ"dSy٦M_No~!;omHgYa x$Jm?SKv+^EUVV{ǿ'eWwTm}(,e9^*wi7=kXDOQIސI W-HSPϵMD{ǯo^96M}Rחh[B^i=̳k/bع Z}DtsFa^g0SŪMY8_髶'Zs;y쥝ӟ"Eܷu9"6+5kyPُLQF4.endstream endobj 163 0 obj << /Filter /FlateDecode /Length 20571 >> stream x}K%mQ;W/*32"2S~ O@H{޲A{F.e*=Y___d]*nǥkKֵ-cռeNar9KSPswJ[Kݣ߀i_ZlPL[.'X`YZ.Jvu5ZؙK&]-'sy s5ąkeߘK&,i,*%~.냙VS[yKb]e&/LETF C;Ke(Υ'./V/P2qU9?'0~}nATy!}ĶV?{.M n\ahriA+-zē够~gVh lE[[JP`Ry+_˿\Qg\_Νr\V^om x]߃t  .@[A+`-&UZF%˫m xYuu'Fpƪ_! M+8 f1̸~&O ƍUO*} 8`s¶Ocb 2S/n7E~~mճuG׀\1ҭ;۫_!p\uwpnۥIp;*e|(UScs#/B/:H]}@v^d*0f7rTma/#軎b~z.!|})K#F3|p|0㸱#126LբЫf.! /Eu_I 0&7vwYK3fm5A3vp6c*{i:1IkW;U#!aBqgtm 2[,,4`]yA'*+$]fh !aqc׷U&E{Cg DZ1~ E1(JoELN.%6H 5C&]y[fNPU#_ńˈA2v݁6HO:>仢T yp@H!:Njj]nriaše;V2hЦvV/N C fY':'6M;}@Np@8_ RHS'Hhۙ||~:N$i4y!8nC衂_~^RTt0_~pHHPD}]QR<:+J2R1,w:߻Oj+?JmmPԀ][{Vur:c<Àn4cw*te} gK4f{Q8u&e}$vg!${Ax.VN 0%\AKb, ]Нzo 鍡v9HŞz5Ax1rJw]nRZ-70$n&䧮[[=%ӷ%9smc'p՛K)7RPS,çNь%(96NK fv 螏|l#_Gxם9⑻pG!>:[v*AO-9Kq典A%}d2 &7@BhAnhgHЎMg4!8ʐ0nٯettwqFڦdatrLRMWմߟd"[&LH_wLu8e-XӘ MF1)cC{``?܃g'kQ_OW카c#h&kg-B4m{'~}ſw*v5wu`S }OՃްpW VCy+~оȜam]iz2(kĞJ۵J MGkԔ&tZ+!a1łmO66/DG s֭Ґtµ慞C4-\VBŒaeZJ+ &#F kii>i]ZKP`4҃+˻ȓE9#ȟmG M|NM @@KT>n]Tx./{~^u^:#n'w@Q=hS=dڏ <~;]xu>,b?p y\#bn?'p~xS!py F0pLOxeE h^ lmeLAK[i]wA89l@oAN[YO4-`כ3УAMEAMЦ0W}%3УF:&+!aqgTu2.0Wrntȇ 2!?Ú.!w@fwyګ\&{'6Nӌf8Hm:L׃b@3|pJHqЗhJd[LK\,&-ShӑžȐ2%F+z }yބ\gx+!5ߘ. yp8tUdDCD! $z Rm>B\:o' r; 6p'+Gp;C[Q/ ֵe'ӌf8HoTMa4Gu 3;య,+a;Ν𝑐ȽvȪ{B<˥s$9`Kq百thZk mX92^)u &]Œ3}RxSGubߦ#W'ƑXMBBSi@KUڥ1Ӵ_FЋeEA?WqiZfUꢏǪM '4@^Z5.=e[^Z#@.a1/@b%Fie4Ϡ#G.!h3n0f8O pJpc:tm2V44^3\Ԟ2i B/B8lAr80>@ŒCWSr;9wq(\:uqp y}u4/~5F^Nĉř )S ϫd8]^Kk+\(d΢cqyuh\^t[QdEom G+ (F iۄK&GSd.?.P\UYJ .=xф7kVM] -f=hຸRC<m@7Mۭ͒)`Vjla&kn=A5>B[3mv7Gz o'fIY{)|-  D5XtG>0wa<8@ߗqOͷ pГYfw*ЍaBAXd4Gv c$-CVe؃`;Gv 3ѷ۠O:k ߿7}70zz.$>t0| e=+!aq zLݦ4`P0hv8pfc˺48=9KqܸqS4li|%e| ԛYi !EI%$$m:x%˦q$!MìCӪíX 3G"}4|>w1i4 8 ˚{0HŽaמ!8dJ:^(T+V.$.ta!9Q2솄Ǎc^VӨcju۽ΈJIHt0|yYIב!*uoKq0~t ػFזiy46[ ]Cw vpJHq8e/Sg'r=[L/9M$ v]å ']I 0&wxbv{]YFi47|+zHHzvߛ榀6M* ŭv}%%=LЫf2CPԡiVǭk;TAOPMPN&BhALJ97r?8`%$8n]e5I eh>r$E7 8!~,H#ໄǭ˼&Ќoc}X>|.!зճ:ϯx.aB/O3A>F']B>];1B=!ԧ9KqܺnR\uvTvP-9KCwdpÙt7pǭkUHm8 Cl뺏dLҾo봌6ϓTh 4 KNut '֦h#LjIກi<8VHm*Ϩ&=5u8MȃÍ ĭ˻O I<;G w |i>f |s `PIf04Gn4pAhSȷ6!A0 tpx>IN?h/8fTAD-i @&o]վڷ/伶~yN.Yr᦮ɉF ]Ӯ\PqҜ.ث~.0]&hUV}Ҕ+ElM?iJ!xN]D` ꀀM@~]RB(( ][G5/[,l=deLB@ЛfI )albҴ \u\j fr081$vط'cЛ  1:*s1u 8B*}0' : 6ݢEKv#;\xil{1貎dH,s$`$T\tjnLW# t0 BqM[Bط>qt?5 <]@6{{{> ?K87etwToo ^}ޛ~Yq*]=|b:1d,Am/Ɛ%%0KA2$b[b%jۉ\Z\Ulehe{w˧A C3'\a g.]T2i'դR;>VeG7PE  ƣRMS5JA ] ߬kMq)kĒYi' #VV%*%XԁXi8`%$8̈% gwfF,+4UeJ *u #VZX 3<*dg;m&2bN8 $͹w56t-f| `d廊b}) *K,UַlWp}0objlXYbr]EdX9rP*K,UJTW0Fp♰|o}#VXlX< 094|X/+Rb ȉ\ ؔ! ڜGC ąPpmPlB6sy(6q!\ɜX6bZJ\+(+VpEZࢴXt+3/ofޞ +"c ą@`ϊAUYl(˲kaf0$ )hA;$d wgwfw][ۄ,uYsZC#bQi䴨/2S8$fwFiw nvN/V .$;yV8> Bf.aqgv~%+[a  42|%|#,#Kq6YsYmc4[s֌̉!!"'M%N}%F99`$$L(RmT^P2hAx/ l.!iAuяXwB4X 3;CU~YOƓПc$џ VLB1E5pԞ8 F03NuM2 5$Vv&KweQԗ;MB X1{G%;Bk ȃΥdrB1%6gWBs)fwIld6w ˜&Bb7dzzl XS03Po$\^u%MiXvZ'5bA^,+b;G@8h1u7PF0J<5y(aZ/шxp$;=J:ľ& ž6 ߣ.aq_RK -$ M6Ir6JJYbIAR% IH+VÍ }JM;uO#5xJeѲ>nNaRuNܷ1t zp0/ZI윣9o)4C/S4GJW\EsS4 9@39R⾀[ɀm8IC:8@W/ϒ@t>ҽ2p7&wFq4c^vUcC{;!ȽG]* 's1` BqgtUC Q vŇ eNx0m*!oIO쇼$% gg!Fz,5SpD{ncT. 8 _$ƲckY9&JIJ_ˎ:?+ 6ge徊.sc}Y@aR_aKA6a\ᐖh"HEYR>="b_lnGRٵENaJ *OC?~*^L)ːOa|XG<>wgIa|_#,cEP~ᅵ2!}[[zQ{>"Stݫ۝tu|n{vr˞櫝tuדM:eMV'goJ1F2YǭBl[OK{=Ws(3]L#@\EqtLGJb)׹Op=l{hȦ)+;WCA|o; kJ'ke)#ފU# :CO))R2)3~;GNS^eedai'j.cTUsuA5WMFL|}WM"ln}-۪9S} Fymw~7R~Z{s~-Fc N@_UVj6Y~,SF"EjsNtdPwrY>&[Y?Sdsҵ-ȬAkҮ#ɀۣݢU.CY?~<<8B? \uvìOie ,L*2o&dk.m3h* rʮ1B+yA*TG^`ǶiQ_ ~sSӪÌ7'X& uD&q v֘D~NM\]{o~cDwѩ;&7?rqI͔CԱf7'<^bOMʿx:jW[Ű~N.嚎TC! h@.w͓5gLB`pQp-\%/6͟~f/Vz#i^-m[5l/]-{|K Mu\86 M3vST`pQp-\%/6͟~޵2I'Q0wO;ɋsA2Zxi88O4~)H C[ܝPn/yY6 ݪ|v$5Vv.Bփ3,t 8/`)`Ῡb n!3[Pf+x)K| WSʫv~sMp2}ݓ0gIe6K/y֡d &l r |xGlկ7&-!Fyir68L45a}t8u7';^bOvJ80лXNإ- KrU{"ΰ?{oƾYso)$k]綉]IśMV_mɿ/ª)OOů}6x߿z黾jn!(/>$7e,}/oFީsuOzO]i]O?|?Z}ҁNǿ7<?_~ă)Ug㓏LÎD4\!hL?e,/ˮbAK& MX_픖ZͿ@,_~d};7k b7^<-|:+=~֔ɄIh䣷o^Mt>Y8UQ{|?%ckx%_ɍHVz_&L㖱T ۗ.m}kv2-zV0=(y.:*?*mO$yܦMw?aS弲i}٦}f^8Ӿ1'zĤB/>D,|Ov_߿{ijǿ'X/<|eSzY!EA~4nm=d/*m-ϥwy]yB,ۻm!^b!ƴi"(L^L%I?6Ռ]e .ʹ%FkO\^\eQKOK&*7=>G|wff77KGɑ?dar%L^@9L\)G\^\12ng뺷Cv6;˸zЦ C+y)AjQv1WXVB}jTFt/ B?;&TQPvmb~zzrFA ߌh'[b>-KG?39>j]SD-ҩܮ9p[^_Ӛ|غQ?'p~̯vvKJGw{<>!?DAvku?46QX+otWoˆ%Jd~?Jjڊ\ ˸ լ rY;@Z^XZB $Q5p%}M]pt0|bT)t%8ns^aH皊K>o4G_K%ӌf\BЦoIHT&tɿ[q%X Q8S4h%;0ClN6'| 9Iqo=&I/A+E&:Mn@BBЦܠKK4!8JHqkw5+JD,t@5|ݩ4 Q!(_>mh++!aqk`d׍FpRr6:͐P5 /ZXfvY[L=w?|s>'|.!wʹ=÷7<HAO{Hq&fweT6ulˆiF3|p$;]u7:FfChhd p+:$8Q]+٧>`€U3NAЇ'Β%#ݠyjiNVBŒDX8vf.4?J{LW;a#9yi),a;ZyD( )ik^5ٓp\kɤۧ:D/k¶{.=7#553frc:{AnC.AשaD8r3zЦOlq'~$|嵯T-K6؜]mzIxI6'J/pHHP d,NWBra-GuӼtڬ,$lӼdЛyztj=qCO'NӜBhSASW=mLp]Œ?tcm+H23iGtX_3 aKUi, 2$8ɮq*DLur'\8|(˱,Sf81:Ü/EǬ] g._L^BLE뻈f4j0%8Sz3 !&Ge 02q0ĄXRwl;/<%֑YPae#0Ӧ/ ? p` 92[91!R<(CdC` a211"YRƼ c,߶}*6A*&]nAff*" sկSӾM6 On`zЦrFJ乏S`aWBpgX_%[bc^Ob=gn_19pZDyx `kv@~l!"ҧ75Jd3"2lʺhr9oNЦ#'Ϋ|zN:ΉsrJHq'R4FI+5#^5tT~@:,׌z 2LifwƉvqUo;/@3zp$ެY{^4 4E G<M MzL؏¦si|@CcW~_awwIAbU}1W=йL[0]G.)K=Pyaמ!!r}(zm\&vG#F3rp9$6rtћ'.ǝA#kf=L-r|.!͚W}+pqwfwI&$%c~#ໄM· J· L';Gw 3;CdE$QJm)U[lݖN$m:rE{!BB%j0:[V+wF HUV&O%$[N7?cɫ.o b> ߡ)9wϬж.;/~1-}_Exp;$6 /zٞpya4ƒVBŒc:-zM9D|1*ɅӴ,% !wf Яi4/6 0mKx>&Tv5X,-T(4.YO?c:ؘ=ESڕ s;㘎n属vCtw)JTy Hm:hܑC2^srX 3nwVrg ך3k3Gn!ˬя%Ŷ> sģw ڶ$C~D٧#лGTSbb5ld 1+Z#`=tL&f𓴄%8J^ܿzm0r,$7cFnX-Vn(EEnV W^7ǭ}E>K~ΎnL/y!y>]dճdi ~iRADi8` $n ޫmNF.Bȷ\ k'iC!Je/D0fj˾}Y"JЗic46`eVVBŒࣽx=jbZhn%޾M:xY/B-d7[<8`1$^nK p]qiC0I Z ]<=jQ\4Z;updH!8E5Y7OY"ޏ ť 4 8tdOBSVz&+!aqcr淋(4ziq'ENszMT酯}a405-Q(-Bo>y'AaЦ#_G`RRyzrJHqܺCsƲD7N`?8\Hy,)#]JXGԱz.f?Kv- _f3SeiGj9uh\J(+a4Gu 3wYVVD%F_3xzz>A;GOL1 [vLCa,dK`6 #gl\0X]u]:8d}M$CB@?,!A?,1B@8Aa; K+@*2˺ /s pgN&'SH*<5sמܺ}o_ӡ6Eec^~6 k-K,-wsn$L n>&«( ږ .Mr(YliI6+mJH,,AG2GX$A@Цw\N욙{$[Ƕ,UAGʈ^-[&A7>W1Ai&Evg& ŭ[T;GߗulA'Ѓea/LA@@5 [}/T`#aUddX';5g5:CuA vo6 l$]-+65}#}xo2x. hSOZ#N* xM{ѱI%C:x|}Pܽwk}:66(2N`\LPrc,mU[|)Ó3d%؃E+.eTyۓτ+߁k_1..p=LaU`UfiғQϤ &]cOcszb '0#V洛]m؃z/ 5t?&]~BrSԍ'˟_+G+8,^-FI0`+W=&h#V<M)p)b: \X+ߗgpp=qߐe3 B~9 WpE,\-pE2 \ !۔ aݔ# QɄdnbxmbBL71y61!<\HND\2q"W8RqW .Ņhm" wŠnb`mpnbPmA8w2!R;"3qa-mA(a֑Zd+5$ RjP Z+2 <4tNí 3;CKӽu/SːhFu M׌K%8K)b!F03p{X:E&] qX9vVi<8SD F3|p|03v;?$#F*iv.!{F j)[ZY;sv 3;WWGtCf8z.`j` AV :/csZfwưU\}]e~䢢R)ih Wt I h\e^T)qnlF)18`%$8 d-k__}/O4÷g6 ov07u+h_QR4y܁&^LBҦ4l']%8 Q?J|_>Qaʔ:F}`̆Б>2$8 o*&zK葖1уh!M˫,b@f40&wѠ%>d'hOlXAm4'RŒΘ> }ƾ[:2#40>'p[v:m3d}L(Ü5aʦ=kl}mTFL4ZwV*1FBh&.WH]34Avcz>oin5L fZ :z>z~ItBw@~G7 gnzN3tpɍ 44r1,з"8$HXF҉a4A4̓R]#gEx1ћr ʝIێI}RtE-\;C;VkC5Y$AA@Цk!'xs$];;ҪxŢ~8$w@tFtGe H[9#U!V 5g~xm?  HgTQn'A1rfCJ= Wn<3]PkCg˽y,=-U6T*ot|gOy UV2cúlm4Iĩ+UGf,tZHI&H XϨR}ދ\!ؐ0p3'WXF\q[ƭ\q#'W$.!i˨,V@Nf1n˨zϸ,rr 8rXhiU,5S*_[X-z?='W. 0cL'.ԃ3#WN$Fi4|[RzF.:GΟ tIJiaٶG::! 4Ree%Ӣg$ɳq \$:^ۏ\4\aqK|>]۔4+4DITivkZCBl" ??w89YçYYF.vӻWo )GaxQ_b[uL ~o|;p/")=KGR0ǯ}@^1O:AW?|Ͽr²/:ʑPQ1mMC+@)ĭR2d=SRuu|Tp]45_,뫦曥)nI($SYd:NQ|akBALmb^+D_)dL@X|%0:L` 9L` @9.\בuʘOfߛUww觿5DH_44c/Yn(̹{ǺxBq): &JLL>TvWyd^vPFex +l.c ob lk-},궩YO.rNg_6hGX~ۯfZW_|o^k{nϯ^RaWd }X_gT㵑Ef~`W_"v7(vcrIv<?{ Х+'"lz>-_fz1SgI㫫.akeKG4._u?dv=hߪb?7??ɃS//?^g'=*xXҦW;0,#*Z+j@:]&=EWh䱎heMn3c[l AMUB" F?y;YB{PKYǶ{/mo?]0׹auü+.M lKC@՛3]49p%=5BS`p Q w[ ' 5]x:jW;nl9#\KW5~wO2H.L4tPt:\ԟԝO.zA#Lln w#Zz{o wŢr&|˒m0RB_o0 n"Onqy˽VOSӌ^ltbDӤ'ث3; ME)4p(v rSH5N˃mF -.Op4Sij[? q^hagB)@iպ7ir58H44"%z_#C`pQp-\%/6ɟ~dY٥UJװ\.huPC!k ޯ>6B`pQp-\%/6͟~ غZt&=!]a_ 3! g%H 6 NMD -Np,S]' jmII2vI?{\_{{"ΰ?{oƞYs[J}8sc/⨦;m'N;Γ7C-~9ߛfɿ/FVW_i{FH 8tw3+֮[7y b0MN4aաp<#?}JeTCVk4bV뢛K~ <oy<SY' ?>dn;A$Jxu~8yI&'Y8UQ{wkJγ75 @=2a bؾ S_spgж,Y]K]tjU~Uv۞IRe6m E ,M66X8ӾHgY`{ &2*g8-j>5Z7kw_zLOkm>uK]#l!E~Sf]YuvShcȦ1F?͐> stream x]fmxQw[.T/^Y b"i3rHY8tUq / $@zIO~o?kTgI/멶^SryO~!=Cڕf-OKq+?n"]5m@/m<ԮYUn_e9mڟ[Z/u8HkE$z% $"_Jl zi!'j/sD,ܰaRHWk/5,ܰJ HRȭŵqm =r]QHDkwwH Hm>"B)xQRܰ֕_;)ky+~'ܰZ*/%~'D?U~=zzisyk,z)[Ķ?䄱eHyD W9o֩e @e_>}ggOڌ-dHI] E՗{hbŶM*e6.ewS踶g$[ƱŖwe~/r?wob&nr.7A&L ŖMl|f;A IL7{z)QKEpIĪvnr"E֟Y&^n>\)b3P Q|9|l v@H+6B@ v2-W̔l+dHͧ\o9ĜMӮgwc+KY}ӄQCp e)ܤxvjm՛n.AY k6fJmw (x*wM"' WØ HQ b,G Rƕ ۇK04<_JI9 o>F34/WZ0Kp㝋Wn zJ E-np )Xmnh㶢v(l]ˡø4 w9޸A$,AʕM2@\n*g 0pTYن 'TNZF 1KpڝWh-KjA|a+ >ʇښ a\oO{G`(apFl#Ղ/SQzoDw)ުj{8;j}%.4p|00E70@⵭H7>^{72ja o]Z :.H ;ƚ9+(&3e9o&Qiɛ[KUüapJl#'oA=m7 (xG߾iu| [oJT,Zt͎s QCf\6X>p0KpCveqh.rtPfތ̌r%ԿpƥQN6cC枥[y%ִw )hY=Q@)=Mr1op#+ ,r0L|`eGt}[6R;E쉖 +zO86 (XmDq6 c~֌[Av0.]wzIpo)*LoF=ގ[*ό9U]͢S(Sn ϩ`F.;$fݣ]1s]&p-݌ɝ<}(ExDhTI5.RpM9S+9Sk9^sW _)xĮ6Pi_=\|)Ŀ .Ww_ju{.[4.߀}P|.xxТclq 58/~U<mC R<8d +\>6Uŵar/ yeePYߍC~n r9W6k~Gh!3Gj|[1\pRxr.97'McϛZ. d/CQvpIAwރD6@‘uMqV>ǃA`eq,mUQE`qir2|/JqQW|z،NL1)Xy+C0u^h6ø4 w9yq:&܁çm.;QvPX;BGg o (aJ.;;.ƺ l.;nVQv0ٍ%v~)/(D ohε o;x;Cm %%ݭ­ВW6jޒ}kI~-q㗴sZ֒š%X%~%n\%*]'d0#ߤp3f% ,Qt~sKLa!Zcb nu K0҉Lpb"CFLӉD?X9ø4 w9n|'79?jRE]FG+G]J1\UR G2Qdi9M)2ү=wK=T:mw (x92(amx"`ִW Nm'\{µ6X;qirqj[!@0"Vj_A|pJF+5?J*}`9ƥQvgC0]bƀX &Q8eqigXGhѸrXߍC}۝W:M,Y$W˥NiTBl,a\(/aś3$YÞ3I!L4GޜrXi8 G0gpm):Cwq,u]Q 4/mY]+`+${j_ g5}0ZW܁e4 &$|Oy..!a?!A Rw[3ρ'ZR?xER'So%,o,F S|\գ -uM}3DS~6iX%CKrW=*4kz4mMu{;B%uavdO-g]CSxM]]‰GiMCiJӲX5;`vRe۟ͫ}≵OBb-Z2k9rZ{skyZ͜:ꃬw;cv@2e˝zf-z;|Dr㓼Iއu|AP28CO?~|-G8ƥQNWw͙ kN%gͳ.99֒"&Voa|t#-vF掸|BтgB#ƊW w9ĺS>vZVn^Ĕg~ UcH45N`ƟQKN6(rE[אUcMAvpJF t s\aA|0.]wzy[ilmA_^QzB\6pD~m;b=<,|>bF&;Wbs ivo( \3\k?+ZAwul&4r/Y7]Dp93D%u]X,F %)ٱ9Krሹ,F%S/y0ط$9\w[1A0ؽg:عaζm 2U_!ojghȳ_Vr<~Nmg29GG Mi3xZ'^ߞ8LYAn\[u >J݀+ī鶂X1u `3Ou LjL/=ݖ)C)[*asf2,9kqirnpOpL8Lh&4V8LB"?Lh6pЈ?L@G g p($q :frSeL; 0LC2 S7hUHT~SM_Mabۓj@(تلw|E=A MDCn6VfG,_h`Ko Y4V5)͑'Sm \9϶a?Mb]&_o~MHt s89zxb|ۯN|7z&WO{-ػSE^.8wC?= /Jޟ= u= u= XOE:XOEQ,؝R%"Kw|1ghm˕vHa 5Ҿ ppspGǀp~77pspG`RZӎLinf}b,=?}='}+XK{%m'ɟm!ǟ!@) )ds$gH!RcM\A?uqyx.uj!-rj싳z/e{ۨeQ೶ oה;=}<53s>& Ϝo~WXwWC)$u3" [#[˵'EkGֵٜPgk>{S[{[r%=qyPϼ7y-_Tso'錦jjr%?7.d{ޗg p͐a!|i|;gC?^//嫳@ [ye8*ू}0ΎSg)>U>,.ydɍMp wJ>hv-.7 P#[G2u\˧AY7GZr:4OOY"Vχ{`76~`mswy G;-.7 P#e]VSK 'ކO;ho"ˏ yuhnC3OIxUv oibE=]^[zajB奊i厠,Zo#uB^'Є}tW6Nj p-e‚ & /&mފxj;0CBrX(RgӆO;Y7GZr:4OOqhyU`XzT]}u2)a3Tˡ :x-#t +jGPMtֺZ!?mH"1 _>|.١oM|,㏩nw WV ٞ_>Gt{}线 Xo˕Ͽ=+Kts_x?_3&ϗn\Ѳl7z;ɂ7ĸVMi󺮾?yȩV~W n1R~_|QpWߣ+e9o<7=  dvϵw_z1ZWub}Z Þ߅x^숵@{|Kj}{s>o,֮+-ܾ༗[@mcjopR70_wJԿݟ8eP"|;=cC $~꼁]ʇ[V!y{c)ïe"p}?Sw=L.}_kVx4o#rlЅq5v\#>iך>]pev_:0O:/ec:K]92ȂP40m? op{ľw[Ie˄)'˭"r5d/g_VYE| r|aKiҮ[3bh>YBl,Efv|EnHۇ5x 2Sdv#.p@ؕt5 UpqΨ2I6Xh&lC jU iXA&$<d9vC<2im 6Dn9!$l>/7 R!*H)L!j$+@wVb>xݐ J_5[_C.R^K^l-vb$ȫepG @4L^$-GgTttkK'mSx%4M_=肂좣"C}(/tͤGDuCbRT-Zmdecʳ&q>#@v=BYH+2G+{TB%)a :M=&| DK~ʽ$2?nτ.tv)29c&{cr=o/3# &h{`4mA~VaNҔ,?/Iy]_hP~u]dZk帖 m8 Q2JzsS@(mRj!I*C7GAHWϛߗ{Keh.vC$Pl\wꞩJ53-&el'=_ WhMr8wp-+&e_)79dX: W=1+Grn#5jl X$m6c+c"ކLΠܸRtQͭbf!؍>32MG.@8̔*9v챿¥Fnڲ'a@ C47d~0V5"uC9&9ssIORH25jT6d`ߪST` 23V:8đ9[23?X7K4>騕C6ZC@Xĝ V `&b{IrW!+5.f.d<S2H䗹GR|6TuR#7P̑uUߛ9u!GZDhot hV2s7@H[xƆ,cm- 8Wֵ$HA5^Yi^XUo0;^a3LV! D׆L0r$2^>"w2lSxAFA?옜:_ޅL>vp/΁-Kp5uMn/``* ">&-`,BD3 Zk2%[ AMV*Eo/M9,2 H* +lj)g*0ٙ^k/*ڨ/Z2֒9{tکdGS\v,:]?\?Q06dvUTnЀ%[Ty{Kaz**P jtb\'YDP_[CV duڀH!yC PIzXj7tdyڔF *+%6ZZg΅Ϧx\.k-}x]#zzx }̆f/ 7H.26-bʵL~4f(^GhkȜ]H}Kxx!a.3k j˨\)ebYY,}! u5HBA܇]t͜s%]ne*:F^> :ƭfCE$b bu}`*X% $>J,5.9De6+K|~eۅB*ݐŧp Yd+|04GX`}S'7yOܔK<!:Hm3bcP\#6x'sՄ0 *9}WdՖPUz9rpD.ŽfrS iY^ή8o_I ̀!H7k4+Q""? 'fn8)L&6;q)!Pȵhf裐V;N㯥I8?y,t k rdgN>~8^Vj]AuV4HvXt~c*wL* rS͆3FkHf#UrS{C:)@g1tmiBvl9;H =[3 ";8KPtrV=0er9>Yaҵb讉S"׊ )S\*C]\8 SfF-pFʿVPs]~aɢwX[u xt>-/gLqo{ιw43ǃYy- I*RdU7f: d>k㞋`­.C[Y/l;gh>^.Uutp+׋4el\>'4{dKu$_ސnܴ&wpu̗)7MLVrI.r:Rgs ]\9Zo^Ćo7:ba" Yx&uh.26DƆ/7ړEv`f0*Gy)I~ IyHhnCX d.@@%wyw II4}@X|wc2d$ Pz7dL?# YG骐Ng$/W )6Z)M=CwhBT7{aHU<*; vVvM7D 2BHSvd}8!Z0<`mh5 p.ƒ 8WI4U E3H[8@ܵ2l?}8ƜVsO skB7ZƗi2Ƨ2( -\8qirp0bsc+FLGW1p8~[ |Kv[rwZp cP <+#<1XAa2I0%gq}B;kyS1Cs!`e 52d.9emC˟>,ZˍK7}7a¥KK3||w d#7ehKX'B$q#bKr1 Y1b0`Y1b0`Y1``Y1z4"l7{w68 8pW{j|zUh HQ@450cQ ˌK ˌK ˌiY ]ǀ%ScɀІA>:F+S! x-S#s15,+[Xwn4IH+GK_}-jMt ^fHC[16!k?p75&ε֘:O(`kt 5%:+`lsc#ƥQq6u&Hη˲!%x0hK<61!ny:Z15!k?p34P9$I RSmōV>ƃA3`eq >6{r piCx0KpfhLu.pB8N0ohch<4# Vf(Q IAN ,+nj7hdLrRP! Vf(DP{x(ImXXqiř 2$,| .n13 13J Nʱ3:s%EnreCYn Ԣg"Z$u2@ռ%9q`wF(1ScZnjxv̀V>ƃAC`eqL OPgV YXN,S0.]Q>sԸYwFҷY/-njtـ7[Ms g1D%pjԸcY,ƃ S_lM{, ٶ%o,S0]qD1\!P>fF6DfE=FFa'#fmYdhcp<4' Z61! (b嬩mhmr rf]C8O1x0hN<6QN8d9P{hcp<ƥQ ^x썲&7jXwIwJׇrв7dke\44ڗzK0<7'rM}_}[՜w#q !PGS‘? .op|G ^P_']q˰z hat,ܰXFR=0O۽K[nX a!BްrÒ 7,76n_,c3OֿAK! ѯ 7,yZ4ȭo3\hR?hx S db%f_ќ'Y<q蟎ki\qfaoʥ5ه  +ˇN~p "Pt6 !dAACsE梸$%ĕfj>E #"E2tF6 6e/V D7ZU2d U2Lg[ӯ* ˈ85vK<,U|AFH?v&co24BF!!$WfɈ~p:ggϊ7Tt;-awv j~EmHp)|ȝtngE@'ֵM[&& |soI`|]c2_芡y06Q5[l70mVHۮQn =!Thoġ'K"D2U]cF''I5݆duYӘ&#P>+Dd|Bx=J2:%{{WhzYE<X1=Aژ%#TS,N~ڣU2 f4%yvw:kgA 쌮NoLcΖ +"YnaPd~"\TXm0?KxqU]`~"fY]V}r$Όne% I$M2Ff%7ejsV) = &\;GiC[L:;NH2!M'[% 8"Ko@YN#j44YV(#RkWOS5?gϒNP| N"Q7ds4dgˑbMp!E#f-H6Aq*YRtna *y 2qO/9aa!DfZ#=bX\"E)e|Wd&`6AڡC,6.(tcj\r]1M6B'Ƈlvb+Ht ^JVY(HSc~T Ų\D#d0&ۜ}_}Y.)}9^++ٲ6#=2rA%(g+>2 "]/Lz,v؂HTEa2A D%a9W)d޲̼"  hj a*AdeAtUe2r]F-$U@`8>pu 1oUL@=#3҃%K$[pmh,d H +c0HK7yZH`V$a3J1'S JJ_dV8TS:$e㓩%ԉ"iljIƑ ь"N%UMȐSjO5/Aj`S \T1eҸHlff*,]'Β,І0 S#ʒJ0Yr `*A@:+,RF Pi2UOԔ丈!8OM #2 F#wntٟU~0:ҸCrs #`I4y8 Lh5w>yqvjbV$|989-1DtP$PFªed@"2UedY5EHFbNy,#LNK^cS&'40oDnb&dY2\rnj:qpgb{х8eN%27&HiK-HȥBӻd8C<"yip,KIZו^ǵ b.BLY\$yBZuqnb @=K;1`&ι&nT[).Hb6A.ঽh()eQ8; ɥZR bL Z$Y hO W B1]ɩV>&0d.U"7^/}\-nV_. rPHripSA2Ir 1I&+؆FGR h&y 2aj"ϼdgTPz^mZL&"M*њ C[@LN}\F(5WD5DJ:EDPT&tKTV ?TNM3lܴ!BTuY~&ީaEe )LZxؐ/"vM&dnH32y *v4B`r P/pL $.8/n2䅛i_8-:(Q锧SA ;LN&_qdjcrvᗥ, r5<,6&Iϊ)XfYd_*LK_eX4ߠX*.W)I&>8FYdQv'YgHr#@6YX9hw4 .:RgX$fapYj5DH,˙^}-hⅥ ϋ4d!'ߦG "Heӏ.īբܪJLIL0 rҸP UoD1d&3 A D֛ Q23g$Y+@D p)% :kOA\4ua1%N^"ʾhfxtP*؎pV)!FIDbHU/B4U٪8\lU렡l0'dYp̘Q9 ɴF([o:d$.\'ɰ# w"(vE25\9L2f<^jw ܆XĥKNܗyNj VWLKxX-ු:a:~\ >gw?7ۉ6"X#X!Hر|VDI3߳ͽ-@{0`!pƒ٨џawYoK +J9lC =\qp. =2_\i!$ 3)XbpQxO{0 (po8an^E#+"fZ\0v`e =t =mX0KpCΔ{ XzpQ0*?̖׳uOm{6 c(ܙD.Oke;MEr}3bʘۆUW{ uMx3ʳk%YC}qa+ %+Z{s<,΢RxBהOC4TWo%|E/1k`߂Gj$02R ^X|FAcy u.rdYr˜}h]AmO+cy+VSe0iJ{ ={ڰ_gEAB ϡC^gr%w}ؗFg@e=޴JAy u.6 w9|WN`f_(k%hnq-Pu6,paƥQajȂ>iDozį9l^\hĮ׀^o#~0ƒ }&2q"7HtgW"~_.+?Mʌ[izk rI9X` ~GV;;"Miw$o9ΐ "HxCY-/#Z rµEJܰpKIhx+͂*$vȨQP)ƮB!)wI0B'x)&#L!3ڱ r*| =`)ń܉h7ȭWoeN৤2/H%hʼn84IJ;y(H?Gi:FHDrZܟ!Pn)yXDU| |a͓ B_7I H;sŘInLLBhsO8pѵB2wLc$FMFf48Fw3ޅO^l18Xɲfq? kx#BcRfe,rV&f{X`YM#r ?.?UD0:S@$YXf$rj8VmK<=:zƋ9͈(Ѧ __<_Ө8,tϼ2o`WX*Y@+ry2sOҗ+S'6E>/"CZiОy]tZ)c0te vI4/:3#r^$<$ !cҐHUICL΁T:%<#6RVrvb];!x`ۗ8dTv7"hj_DMK++TtbVޕ5Aq $JZqWrmpUr3"YWQDSr|T>&fD786a&M.*ز͍ "z(N$vХêhH']Ư吢۸6n:#改s _;0'CeϦ:FƜCQ6s7jJ4tܐjQ,i*HZ6;* Mn)f*+5jTo?*JlCOgPCdF@J%*ԍYBJSuXkxWe*6j\țk~ɴhw!mSIl((ZSYBM-ŐINi5?G\u:QE}jTVEfKM~ك]{R}ڴxbV$C= nL$q^*v 7Ra xRȒ7f8;v2ɩxu_]++p4vjrT{K%*DJFfu`A E3鐬4LHx*u԰Q6LɦJōUjĚ ulSIikЩ;-MjMqC*9>Sws\|#]x\iWǨV [6p}j/#woF۽J(A)Aփ=j~PaUn֑z}TIVõ@<!J Q ]F.=idI߾ AE;QG.~ƃ=փΏdQG.u 䒻+HD0#\/u~:GW&O>9R~$\+SC=h a@ "7Q (J;;_g(=UQ  L\Hbq6BOM.sL\0缜.)Ɯ$<[w*؟2)y*3:u߱rpQs>>w{={gd(7]=C/ :Kq=}܃i KcgƓ+O+HK>7.:)$%x>3p蒁|I9-Cs lxjLIr&۱ξ?Xgߟvodû;R ef!njĎc`kshIshU|3p*XyY 1=c=;W]8j]=ҾXp_|%t$ P8PNGW]gykXλwW]zp޽:փUױw](ړay#)s繝_/I|g z*7yl%@gz $gw,sg% ^۪HPpC£pCIs߀c1.}9Fpӫ>d@#,Y|Rt$RZQ2Z pCdm[ G h,V ]YL ek8vdxw~˛I$NnG0R81ƻ3X0a )?P|⭓Nv?_}zUBǕ}TϿzu9WI?bؗ|S@-} 8kݯjo_%Ҙk6z{>"4㝘o> ArBPൺ*Zr/-]w_/_}*?zi4=?7v?v l} kNy m*`+_?ga׻$v,{akm~ݺ; nOG|4™ w_8V5}ҫ'yMRoΏ}$I5|j%_%:" el8T8t~Iܠfk$ǘþ-[O6L ;LOk+kq,|w attewm04yQʯW۔Eex>qendstream endobj 165 0 obj << /Filter /FlateDecode /Length 37764 >> stream x͎ܽ&9%'2ogSbUF{=׿sHdGezvLOH?EQ6ocߎ|ey{[e{?j,?o6\纼WMYo۷^*}ݗ}~oMKy߶ijՄޯR x֌KyVD$߿}6O{(\uF&DX"6y<R">'q$>X}_RSj_L? q \VS4idt1&#$&ouh&H\s}_QRjX77JdZץdT1{_FiLe+iVQ50!1Km\*?tc]2{GQS}ߧ*5&,[mnEHjטνŋt!A! !k[TV2Ke~ jw*^ޖ}9joq']Ʊ7I$\/QBJhLki1uGVB[E*GUM&ݙ>IgKP9ecU82bϸڇئ*3|Pئ]%9suIXIo T 铞'=2OH= Go'~"Гr^Јq@N,=9;i-ûVgY2෤"N$o'omZjשUZ%)so2un#U鳤mdt--#9};ZeZFNeXZr䀔@zCnQ>vL lhR+ 8cvAaQ㔖'`$%4O+nj}N_?66E.E.͗{<+x wH==ޫ{s{li ){]xcUi"΁/B]dK} ehڻs@J d=Z󝬱C':RxRлZPg :D00Ou^ s :=ZZwѶHkp B /,ؗTl~Ң oU V%OX03=,_.~eN\mWǥȥ}Gh :+mk8Lťrcp{TXWȮfn͙+C`D2)8 !tzξ)SԵ.Vl6YI.9D-p,˂InP wkZrlp@4 d[v{&x=ւÉUNvMY2SϗYd%m}Gf+nSv '/XgnȴaV#W6(!7ئ oPDž5a% K)ԾvVuGﱭC8?!  j+h52~ [唅[{8B֣~-G:Ցh%Kr9QG9ZFALU{ 2,-e䀔@zOwk]TXu \-u(_@3- Dz9w(޵ץ RۛU7?t?bm)2e(E.͗dXQ,Gf1ZLb_@WJI_欳.(K]MkUyR2VǕ>g,OP=GD I9}&Mӡs2B琍}mk^Woe[o?Y{n!NY\ҌhZYzڎ3 EHPj7Gi)aB )z:0|?KƼ0ɟ2\y':Wʕ@'76B^g[h]O_VB7<ۘ|_\0f%R[&*k+{Fߠi sU[UR2TbXu +Xs{lf' &ic{(C 6[clC>j\RMAb$%Zje%l91bn ]qҢZuM,̊b7fL&y:&YӉRMNV^-7\۪;xPZ}JW]q6=^u[׽Cg'{;boww;>;Cl) RWgPFP$-֝c[wFJ k,Mgg|GdGZ }cuX߰>eKp@>dfYhѩGS[]g a)~?tVZvpuFi]ce7l}\nƲVO踐ӳ趰4~ Ϝ9.,kپ!t%4/x5(Z&5(|N4|2D?2.Z'˒dl(j2%1]S0B<D- "˚eSC䲆0sDމa)r%^rgt=%2BAIXJIVZ8Z\0E.߁ ~ kUv|òׅԱe߶f5ϥ7"GΡD`ZeHMY M+9(!tzȺG|u!vZl'3Sm h:* *Z+#*[c߃"Ӑ騬qPJCpe7oݸ&eJp9ssSߴN 髷tTP2G\}N1MUwtor Zn%&;3%9!0eD-kR騦qPHhj޺*PϹ +g9|jK7qʨ널pv9[")FV 4r^0Quzv l^uckƋ%bs{3}U΃gTT4xE-+ 4{o9zE՞vmL}{Dp1[7J{m%`ECF$Q!$!N&.~\+7B١ m'lΜyCش &EB,lO֌)@ͅ8Xz4jz:d$K4ߨf\e`rE9Z|Yӧ]PfJ <+5Z)(NgZu/{?>i&KI m\/ 'e.SꬽJ[S[KD@Oi5_ԍ{INFU]jux<|x?=vt41UiIRWk;Sݱz;z'9_F)yگG.^ }= 4b֏~W)KbX!M99lZFXs;r-[B-tXRBo_W^[a6gRCYQѬ4wvʈyh+爻VMq&ȶkq[ C2(2>mn 7,"cVtP2[wq vI(9 %c3zFz+ :[ ' wV5M8aʀS8~qogvf&/qkӅŐ/ao90 v\F2m5e. +a\+@a9] in*-#5B -/BI5ة͗%~$D̺OEeM>Y)yZ6Mr5='!Byo,溈#]H= G{-(QT9R}b\w!%m^Enƭ-E֮ i-#nˡ[ Eñ24@z=/j 0  K9l,!ZFhu0_F($y r@J d=flj`1f+E3+)f 8Y B)){pi_=H%|pK=ɁU+"VuWSBބ@HZm=()Gy!czsY:\K.i-#\KKqGl3p-:=ƶc VnR}Ŋ{ D%B ~ro`~,Lݚtp[C{?ŝoG\rSI^܍7Jj%p71<NMˑ- }Nq-:O+eC@ӇOci#"{m%gZ09vk96"z6 uK?_oL̼B ^nLnJ \ 9U]*-ri>Ґ%Eס!ˌu(4lvѳ\l"{6C]TuI4Sx$HI$&A &JɃa]GOY:!ŻʕqUM+b_4JR<4b:E2(8FImJgjSUw~2+Hl-}A꼥PwD𓩇3 vwH.F$Ҿ#: 'KbO$!tca#SOꦹpŧ: 6X7;zd✗<~!nrKpX=(8[TN3 +ӱnǖ?nO͊ F9uKlYO4 @rpVC֑RDz3Zs ި[: jbLkQ׵8Z+8WNՂEi AO񏻂 zPW{6 WEq݅QMpP9W1zi@#8uBɥkQaSݶΔPp+24SR!16SE]ʲbxi WZ# k]إ,"إ-k]YL~luXiX$AʂVFPv%++֡Ƀ7!%Bn!Ho&lsBAQr@ eE:`^+͌" KEaR!16g+"Vku&kNvv{0ރ3C7\x \F \NE/%c W(r-pY8u$0Z|E/y~c(ޭ'K}$6ȜwpGիbX8"!uXRW=Y|X;/Y&ȵ3|ǘ*6(%1@" W,ap0`iL0Mzs^}b)IL~/K4&xMSrE!yBɠ cќӂP\3_`\wê͋Ul^ԙ6/_Ks{' [w֖[|XY9`nr  ,>>x=!cyFB9 cCASr@ ݊N `ͨ]g~CHZ=ݗ55mlV(8B1eD=벵P:a 3@z >L)ŘpYfT-d*9,*NW׺oV*fWh/@w9 "c;qCRJ2wҮ&9L"0eo;5ߥMB,Үs@J d=v0q4(2PWk+G#jQ+9`c Ur4@HZ܍<# 6g(G" iTHD#8"|$䀐@HZ-<CXv.[`m {wmyjP;WM2laT:\Dʭ갷 D.\ pІװFC=@++ *xh/q/z-a o$/n֏tà}6&q"ǁd}GbupyvNu3kW>{V'RZ|O50uzG(S>yuBriQZ۷ҍ).컌8qijz{&4q4yoCTRoRJSvsxݒ-nn7nnAP8-}|s/,"2rK8N9WI@rcp{'5ǝ5*wָL5wH7vd1Eb_QJP崱  X7;z=2XͦQH7j.(3xx;֍c?t~qV+{SM_vEM5L)45ВAM -wValu3"NB@QG4yreh: N㆐[q5 McKx]^p}-+3XnkRppe˂MP_ ï [[Ut"\E^?"-x MaB )1Š~ f^"X> T3/'*AQᆽ{')n읤׆w+sZmF ) n=nXi{Ql^(1[̖6[T٧LN%܏yNg#{!c>u6׽lS| ^D@OD(P:)b!:gxlAYB\g^ vރ/7ǧžsVӃ[z^Xlu\usCyHI=wV-+CQ扨N&D?jR8NkٓᢀFL (Ĝ/9znA]C B5ބY׌Ykc/8<^l[ʬ"%j{?PzHebP4d!&ρ>. Ae;Kxߘ>|~g6뽔 #7FrPsg(WpM VpM |U/zz]cJp.e:rijzZGuO_@Zq/\/([(Qrԝ:->c(@я3 /|Ȉ\J\/-f[̔J"j:x~:!YCt=Q`ݘda+Q(sXQUSjĒGRܠ\wzd6 lg _}F@ݘ=A ZݍySgD_h$Ï&="A=yM Ui:GY%ʼnQC3(QETG ,7uVs6SilbR=7pfP&R,G _A"g Ȏ4#24[":1|ppvYQ{YcS{ ǞXM"4-9 8 $,. ޽\KUai-#HDu-: K])8S\(^%KceDeN=MTAYr@n d=n%,ªuEtTlke+#>r:{LV-[08 %z|p2u5w+躞;zߩwV]WPMVu6 U]}IA ?Y2-*V\@zW$-~@ {  uhR^VaKWQ{W@b @N~hkwޮSnG^ ]A.˗P4B ]]#/]K8kO`5TuN|/is WBSsוu H ~8A#]H#oY֮1[ו P= 9~ab7%d`Oe_;!ir^W %ʩo%Jzxp7frʶw{M08 <:"> Z:]!d=~>'s B5,:[3{tS?nEDGi5`G6'˞TȌсfپ S2X:)8h1nfXE@їX\QASMGU)zS )%=6Ц]Qpi Aѵנc!d=~{5QVnxZnk@ ]:]~p|cYz)\uJKQYS4kJQju668(eGBх fg&9 c3qOٮ" b)9L" eee#,dC7?6-ifۨ2VhrPKai-#H \8(9 %wӲ3\l`Pj+z=lzqEZIP|չrV%d~;0**O[oL?SUΝ]K]5`n$U\g{:ΉkYMʾmRvo)K)pʰZ>G}@ m۪@ܚ| P̋)mj씦; ]_Fv26e~9~Ԧv]ó"knL*Elծn!o)~/b?#QxYWu9Oӳ_ěFIk3f+<qk:}d0u|^1|0^67Σ\ٶu0$0-ƛm-r먾7ԑ%0Z|,F|h㉨uee+-rlrֽRޘ|hΥȥ~ Ŏ6Fthdu8>>ٓ}a A(Ѩ$RY'sO2 -l* < e)4_vA紝W|mgOCp>(q{'O0;Ӣq*ώ! :;vdR$~I\)Iu3纃w#2qbCQJqh^zATtXС %ʹy}^KRtL>VRW7:yst4߀c8EkN)MtT24䀔@zMʾkiAXZZz=k:hJӃHvj>پ K}c{29Ӂ"WWsfy2tʖjji-#hZ7UWY:hJH ݞeLRfxPTOm(8PZFTj^1T"c{2&ėuւoi,MA,%B)>]٩$f,9 %c{43 A(EN9 VޒU鿗fdf$ej{f* P,~lJ6ރ f*U.0%rr Ү& *6V@j%~  !aJiZXIQ)Ԃ`a3ӼY fSUUs빴Y-bi9T~@ZKp W *|"Ԣ24M%-Val;BZǎ튶cY_?1gدPQ nC^%xo( }$Ƶ;'1$:1@5|oܢ,H 8a9*ޟډ= qa(8L3a>D&%zA؇cǾ}hPJQ.I_IK x_Ll0))2i$\b:h<]C+;E0^'tp׫>^$<6IxЇ,Ђ+O]uw"S{ӷ(IIUl#rQœb(upnJO`žT JeM}q Aĥ (8t:;vd ƢW$EQҢDSu>=;b\w" b^ri&" MshԘK3 !1@ c[06tj/#9PYAMr`o{?f#ޏ)Zs rmXQIvM0֠n nrv?\9s24*"Z|Y?A#~5#}QK %:_Xsr:ehUDY3h: :(`*8I{_ V@Pr//.q5R`lXYo.t Y@2luߒ _-]Rȼǖ}|p@>$_𸵐X! MP2oI AP{2-0tY, &[0igc(ggOLD2oI=Zrv.،` 7.l#HP; 3AځA~((TWCH E 8Gh+0[7&'oڃe19IiIs of=ږ| ~f7Yb>u>ف}T-hh}Њo$qT\I}N:6Ryg~|t6r`4i([#قFіLN)yҙv#E/(k{I"6BY!0KLF"Udj#rQ,lV>R WJkRH٫/-s{'+0'kdbvd>Y{̎VuGGv`FBF,vc?YiI^)OI6w`\w̩[>(fŦ^6|mJߒ`\63/i*M@rNj6Vӡ]+PFm8Ȗ>H+YoO jk %!bfIeNJ}eEݒhI[I {~ژߘ>R$E^َ}`$k )`-P@IGs=4/weّbKE.6G3V> e=K\J\/IS\)y z;zdEe-8/_U k*;HAEcsd0Q4!qBNr@`Z=99'&Ss3}9!860ت|V‘GXuȃV{P *34 YXueB)8 %c2*葔'lmNb,9L" ee傞ai^䀔@z `r+)f2E10B7BTr*D@FWRdJ3 ])8 %w2}ZEX zZVDTs+XEh:  ش 1іiεAKUai-#*o^NNohRN󲌑ɓcMi,E6-.$߭D R= MSFШʭ;6%2l -mzi["RgeA-3 ۲C\9b`Z W{(nZgmEi"m[oOf//5WSl)z/z/(z;%vLa  @\1lov-pYvɣ|ٵ&7PZu YnvߘRKG`gY,P$S)`ee~gC_ t!Z}yM9/s-,.Jrj1mң zц(e6tor1g[ `PBy*iI"*%x]a?CN?x.8O,s/jqA%"kK$^[4ۡz;xdM /,LJے¯^d [rP=~#6WsV.jۍK/nv3wXŢRTpp5ʋE d:lK4 lfA c/^TqŞI3  r jBF`d{,!9uB`lByzǗK]xdr]%R2EP꾽B񱝢V],J))B;=&d0 3!M(T< Sf f4!S p@J3!wzM`eM!-(@/tTV)a0n2:t0\qᲸn~0e cl_kv0>N b[ 令F*ivJ M. ߍ7Ȧ;Eÿiʲ.oU+Ud+۹ DP\.mԃ|Xy$Tl)_62tc5DG-:(osV,id.|w>.8۶x!c*?f/;#*֎r̩[&1¬鈕҂l~CyX<\FL?.uR dfy>RG}3}13 :h\R@x^aEzF~v5y-pU4rPY[oc(}sh1mYoM>`|׷wt]4YͺKH" dS({u\F\$匒u02J.$.$".8[2J %J溦915BajG1٭.Th2JV5z9'ʭSZ/WPr9e9S+%݉{|ɷۃ70G.( ݢ>OS;)Z@5}5MlS,!(րt+B2P͎`EhZr@p@H $-'䑀KTu RvQu6K=se2PSYMGee@7{o޻L:ڒQUYD՛^X􆰶׏YVTfzjl]!,gP~5^MG5! k1>bqq^ۤȁ Nb;e Pu59G/w8 [pZ%R}V} &  ?}c-}?C{@ɱE{?zЋ ESz_OW{a }) !m6_{+v`Qb[zGuz:!v@]xS w6sifgN=~ iSS}1}pt[!ɥȥPv\x~ߡc ͅ! Xƃ=%m)aE"lۜیqys RZ|[@9 1LY5.˧ Cmkem Ė5u2s;K Ȟ\J{r.˄r!YB} ω#Sis9b*!s¨1E[R|?l4_q>`"!8oU+1ۛ6i:zu[gBb[#N84ʷyKrewh2@n=iA$RrK?u{纁У'N5S򍐧ʜ| gKJmvg ]}mې"}Rۋ FG7ݿe`X>|o3Jvrϱacrh=kOD'\"W6[!e'Va( WZ|4 RZmխ"S.mfڒZdb_;ņ(RZ|IiLJ$A -ƣ*-e͗b7UJsfAMDnr/R&"B.W?3G맩`E*m|e1Rt,;5||)8;1^m; z;zd8i1JT4Ȟr=`\w{d!0[ 2PFPG~vuixlܖUf+[:)s;yk: ;Yg/(G{rvr] r`b2S36lss%iz2!k1i,\)֠*@@Zˈ:e[г8Rq ,y# (ff5 ZTP HӠESMѼ ZR!1H;Xa=Z/]խ۲tB>gyZ}/bI^C&1@D˟Uz'Xz@03@+cB:*HCYi`VA;0h c#sj<:5J .0%t5ŧK.( M`?pwL K 3k:(bb-*頢1@D˟U[w fqlºJhy`t QÇl\Jd8u?J8N,жCvdi9 /fAaC.V^jn<u;w&%E&I_$t t7ӖMyEWy*whlXߗOnj^S7JditsYQ( ލ*FG}وȸ E 1NB_QH*Qkf{S7 ]9i$q-<謔<:%i˙z~ IbXm[C2JjHSLw'm(c?qXQL`KN{wЗ {.cCwO&/HGX=pvDw4&TtL8h z"o{vÇehR̹=Y͆sd1&@JX:* *a'C:FeØ-Vn8NQcpxCHN4]r|5Tn|KT4.4Valٴ!2]*OI_m0KW)6q 2хu+V2qkZQ>8 4ƴH&Jޖ(eXI[7KSKghZx~KjԲG K8c@Valߠ`6b 3H 5@aW 9 2 wC p_6 e}A S-˂HM?dDUALn0ŷ>: ߧz:]uTeM<PPo`l$qs@ \SZj(+S  }/5\-QTc.D=mu$x7F+%ɕUz=>T5 ݟџ3YHrബְI ;q6Na"v7+ݗoڹ8qsܖnr@ddƆ -Js@)W=]Kւ ,?w-q#X簵tВ )0 7R̉8@P0ȏY>(  rxPp@@dƛ|&w3j ^u{HjAGy:j:zc^I9w@̸Gv;>MZn#{훋`cۍq$`U!ץfubT ࠔ16DΨ+Fj uD]a!~&Um6PUCT8::-&&,g \4 ˠ&9!0eD=SV0t8(!tzM9XQv1m>ښF(8Z}5 )F6/D tJC8`8,.AH}/]=ZlS5x'eBEJdx#yq%#[ %K„]Z4.ېm|eAXt)BM~o@.E.Psh 6Sm0%XpcǵF @^ѳzDzD.+}Hi đcu\Y.˗D^eRJO8Ou6=AA'\Ԣk&EAqIPDA[W};@OMkv_hk(dwyQͅ% OBs/iM7Q 9|Ż|'`yvkԼnF`\wɐCQxl)_3A{d=ȐC`l]JIBu*W*~: (q{'#ίW|9r CE!;閛%M&"]X\v A Sal!S2lO1nXL wfjli-#*+KPVyq7ʊvB-z(ֿZ?DYjo:H;ֿ `@9n{P-le0-j:Hki?^kl M)[sTȺ -]-Uߡ#DMg}45DBVnI10BAb Ҵ)b6a"͘eЪL))[s'h9žEk|iĚuTZz)F C ڹ$ PnuNAh"H0 =aܑ7ךO9޻`y5^~Ge[-^|-5h+6⽍j@%a'RVma[$Aё`Ё A} GF\4Žqթ>J&Ǯ⻍RZ|OfoL;4ߨn1 Wz^ltΤȵO_XZղ4i\4+FLB@hN,#HbnIgNNexMr*%y 6zԵQ4~ڛXKd%˸Y )?gAz}@ky9kY Kס|s?YnMc pC3{pnP,brSpNNDW^il/(,3\Vr!i!. GZn 9 2 cˍO+Hؕ\W((*8 81^b]t}!-Bnr~OٞT v^eqRZS:m6SÞs !i16LZw!'^NMJt.n{Rp@DQ8F] e6Oä58eX66c x\0ފS"l(J 4OkQEDk²24T^ ~Sm.*D]Uʨ=Nѻ߮Uv[SalaѦ @ͥ^m !.kUcM)&ʼ.}i@UVyl7ᕟǯc~ūbͫ]XMA}:2ػ6$p_ApMu$"%a{YPJ]#Oqo.CՒ%F|}-aJuN⮙0^"exז#aawO.Lrr!I&ǮC{TRZ|5lR8'&Ѧm_hmP_9tL68Pg mY%Wk]OKiK%݇R% *g=!şj}lHncr\1;f # LhdO7l'K rSF&ײEkn!"(Yj_wb?g P,ndpq̓ 5 )u纣O&U5'A z~+ ׻NP`ݹ=Ȉ#6(IHе̹;֝c?r~ʀ?A9#"xal#?ˠGU[s|,fsM%,L8p' C"鰋"W# n Ŏ,<ˢ;m!q<^QQBP*k+tT\5z(kYeioO|M/;7}-\r\׍XӦ/nխջp,nZ{[tNjzUƍvC Et(v)ѾVvJ/ ѿ_=!%#@j]<|I;ONx⬺tYH񫸋yN#*rou2>Ye֜[s^jf\t/ u_sT>h߬s ]9 Q\\v+\$Rf-'LJH<(Vy$.$}N"s'1m?r(!lSic#,Z avuIY:1șF؜ #w4tM봣r%t5}y$u]4rg(1 ʂ>۶y@, N"=^f9m,]gY%s;2$({[ehZr@ p@J d=\xne24~K)8nI=9, gٹ=c+p-pYlfku)p-ʩ3ugthk]jr7 S5sE.1P֟kRRp\}ob-'_(BeM+-~o[7Xz)2u{4~Yhj8B3,rـCNj`h $oh<bjaŁ\J\/QS)Ic:e[m~Z)q`s}ʧF.YJ MFKRX)naNEߴpJqU:W--evEor±Ỳ1ݡ;\z;zQ5(dAta\w{QL\!k#Ûa3 *Q|iy\eN@s'ƞ}q6i-#Mvʘ,KZ= !1I9d[KPNFg h:* ZFTvԐ;uA`ےY/ͯ}'m8=1pd'8 %D57\]jQtl@ZrܷO8q7Áq`x,ʈutt y XoeMXkT~nMGZiL2ۥ~LG9Ә2^-[4K=kcC7tRp^(#Vaݩs失{ qBcxo)[cv7(yksv7Ur./!9@YP[䲄24-9 8 %#w0@1 _QaibDuJH U*ymڪkam}ÄqgBVa|cR DN*Piu o* Le' $H( ;hvJNۢnʰ[Cvҁ@3hKkM@i hqA? d!wKErc C#AH#$!sBy>"s)-ri,8D1[rGS3ռwu]M~"׺"%ǡm "!ɸ.T u^>V 3~/_=P-_rTBd4|ȫW]q]m-ƽeAksmyq$1>b~Zӑ ,:ENKqu㺁O\!e.XBTB^!IRz{ ~:R'vݭ؜ŔQ|ߖ$ҍ!ӹQ@#dNk*z+#ݸnqm3P7x=>P)FqYHw>XzH C~:H(F-׶M6/g`_#z `P2FG^`x݆T;S].䀥HkAYWv/B]^vʼn[p@J d=\e޷umblLA,%7x]Ҝl,92 9֒rO,ܧ M*(9 %tV=_H16sk1w+9@4_t`On-~(vv.Xk5/?i/_z1r8 X!!N~4W 6eCzeQ`(u/ju8_ ! ,]ig[X[XrG@S)Bhд@zdWؙ (!_a0=21iЙݬ,W)8 "cttcgl,(O('84NHs6)\Drg'迨2I09YVu~2$KP~IʉWjԮ~h@Nνu7o=g5?FpNCY[QQzPQpBb|v(R$ pʣұB*븕+ua9tDCzQKbTU^hsFQ" #P7@/ JS& HRn0x$}`x OƏo=)to]ޫwqVyy?ʘw <~e3蕱7s=hXkKz)V륹o|}R-e2x?7pk\u}s~+m(Ixr5Ʀ'?Ɍ($3.8ߏ؝?{$ 'OI*.~6yڻ˚/t~<>|>|9p-pYqKERZS. ΥUޜ'$6!lo_nL ʹ.#,|qSV$"h)zށdRZ逋2۽Lx*TSE.5uZ:pE.׸0ei FICh_hD.h'}uQ 䍏r-rlIE֘QJ%$!D#"P"B{|97YDzKY)%TZl̚/8!;e !~ry>VyjWJt>,.qP_63 fx` ?4[Ew-}2"e9sNt1+4_;^FmrfyaZEQ&7`nv~VuGﱟNӿMz;zt5=Lo2|'_j#^)nVuGﱟչ.8P=ns{짳u~޵!UF0 <cCwOg~Q4ޱSp~cKC8=Z((c a_<0lSqO ln\%C@ZyuIMIAO%֋?>REZέp "p`+CHrUo}:+kX`=i2bCk f ǭ7ѣgnl|k-]؎Ig;v#U$r6OiaBDDx<4(J@i!$9L  HӤXh"XiH &"''a%"$)A(E'R̅jդɌIAY dg(\(($84H~3ަJM^˛C`3B@P?~`;4$ @ 2$BDD<@ڎC;te-4|hF vfsH-l.N[jW iǐB yV0 ww>3D^(Þ U8Xtu2/W[Ęssp낓9y^cPpGJyp9dJy| sQ ]k_ږq*q޲O:fm&qJsjyGɲ%{|P39d*\м'yZ2)0Yj3g03AEŇ~_ E)2i)5Xk)n<W*HշYP3ԧ5Is*9מPFLMۙ5LvRu;i";wx;7sK g[N]Kw3Kg0L{iGTfɺ->V-<=#PȎcN "^!x3/es!YypoWL`+/c !?v8CH; !='>Q*G^i9j-kVVPJ]smZV4j-ktg* F׸5~r[#8%|I5`24cOj-c] QJ]3wWYBj-k#GX->stjQň}GRôn\Q P+h`w(h::-=nAa:m M॰ NqNXVCHr`6|#lkyQio_ucM]A}ʁﰕWK!90zi-!h7.]_<ǗH*,HUhլa|ZBTqJ`s"X6*&q!`+,VT0t |H:s %5[: 3.ꈟM<[IFm֌f2ĚH@b;\):В *,±;|k+n p\Sq?kj+.;ex~H^Ǘ>T8x-ňkHW-4*zMg)4maJ6䷤ FHn[nq4Fc7r!Р~ % 2K)宥?vg¹Rdr6)4Tc_ Gk:ji Qa&wR:&xzƅJi Nߒ,00 ;Aqն+Q?Y4TH6̢4}p ,{#L^8g&{夤Hq-i{ MJ"-K * 8Հt}PVQ0)Pq gNW.WYJ?D 2U@v3@Nq;T7ATu0H9h@# CMyN <VNP枧8S_i>J|m]OAr޻Л'Yr,?"x,w_%4ZM9"[6* IM̻d_9kR[q-LeYb+ƞ:~NZX'0\so,<~b'v1 [Pn)wSnI?1˃#y!x-J`L{thTg,8sEGyGy}JGy:گ/9q;.ɽ+~+\omm%";l=F+1kc<1b?}# ltOy6Id2ZNrTuBJTRrҲttbO/^I_ ^% O|qb]-ҪF;EzqqO~jw v1J0'L.E.W)R%Rj9 "]qus8wƚ繩J+mesoF[bjhOyt(ۉUZ h $Kj!G@E=szr3t=] i{ڃ7Gs2r(Qtr^nkF0[39+\%FĔ{CU~bLkb;tU#kb9ksi1em]LY#4%F9 ee -(++)eZ:(KH Zc:Wq;OB %jM FRპl&sPq,$Bޫ%qLTys_QMG55@9vՓ*J2jiS)]pX79%+*_C4j-+c%2bS-;,Xq!z9ؽKC@ /"uuiz!.P p@J z{}nAR_84a2tH^pp9f\Slv9xCYӔtP\aBDZ]BwKjHb0'3ڝCI?iS#Nx!vg᭎۲S%}b72=b#33wsaG`Tso[i|[`ÝNv$'uLřРq~#n&~ڂ~DVJ-h6:e`2㺡 ]j\7tJnw';a1Ϲ9N<1x \\.;tƲul}@i%AeGc&ZF%Eo~YϔR(3 C"Ӗw)b*{MN:)gpw1kAEJD@D\.q6dnC y+$ N 'OQ̡r.Rj10w5QɡI7_1˹1"Rq6-hL 2|=giXu; KE9%iJN $-QNHNűmAJ8UOxű.7=PóGnp|@ oeհ tec7Gt H#==Rónƽ GRJ]˳S_ pT˴o?O}~T˴ow<:ڮ,xв+5zꯠhvP nZ(vEBDZ=NjuڤI]J3tP/t Y5 j頬Qf9,|v9u͂VTw]]2b;lCܜ^Τ zVIG΍ڵ٪N<;a : Qz(%Ir ~9 d;5tݼWaA/P\,-kɃظσ\ɎKM\7'1Uࡦf{ȯv]냮!)G7eCqdo/uUM{Жaef,hmиnH[{(_CdO:Żn!.#/N8]8>T̄rhȢ8ɦЍLLX?.ӛQQ|:f޾7FuSRg+^GyYN8)Fˍ<\4BFjQc׏.}4 JnJ'aZHc?PO^UrZr 8G1.{f>=ׇ_CpVZ՟r?̏Wes1*|}Q"_swV0O$8jtZHT)Mz/TQNY:D8ZQp)k 큋]`!'1.qJx?Z&\krREJZe#)55Oc{9b^Zas̹I)At[D3XH!{<ʵ/${5zrOÔ%Gzw7yYNJCWZ`ZK9H^дaBDDJEC,"j&w*QGm=B딓Bu7sU֟q5jC$[éRd!'s,}9S[A:`.<EfK7DZ;xTfնy=yؠy_KĖWi .ΤَD<s+kD҆əMڝoF.Ý:aNn2k%ϏGqtS(r 3?ʝK{}jǵoSEvs-{GIl\;qI.liȁy+oapiVΠ"2yޓQJ/řW}y+B?'OŸۿV16gky_uȓ,[I[cg9$W @{Aa: 0`V y'$٘<qyڮ<TL<-nʸX&ry?>0]D=cKOb}+uD>$f e>;2ۛ//򗻛N,tRc0@*e9TG$sڌ >Znҡk: m%En}56 ȭn2,] LfTx.!4T1ɲ>j;F=(6}=̣u@.y%˓uɻya`rp`ZK&I yǃ0EMN+br&78L\iL޸L<+<J+yoi799̠@JpxF2#aiLfTxɃfrPv& Zޭi׍ >|ưY{G:0(%L$yNj(I78~6!vUao]SKINA;|Rs71eP֎t58`I -ZN16|) &-1) +Oء&vzdc^ٗNN7v|)iT݊64lKӥ`_ "O rEhK0UrYKoxl5}u…i@ȁ0SUSh2~)iƖ]C@ʑ{(nl2_k0QvrCjn. Vn<Ԑ,^+ S5U `>mJ4RWN M`""CFy(Gfuop.X|7+ɩ.?Y߽{^Id;]V1^ KS6$kZ^l/4S^[Xfgx){H3q? khR|y@mi Zcz̵W1u\@g &>Mj> stream x]ْ\q}W@Eo)JMrlCs2I?sjY|PP詩[KV9Y}sީERW__Һk~q{/kn.Ev1%՗{}?^ߨEJlsY^)kڪol!.zsa^r/~+~xe{Swu.b|٥^D᯷R6ol ->߲9Vsf86/9sXI.6:1Xl:7uoʚrid3/}%+}_7iD) >v։O.R7Evt g崕JY߉i_vtI>ԳN9cND7C%6n=3@Yeͥ>StJc.੘ev~]u1fd.v}w_YסIÊϔ6]OK<73ik y# AiW]nIKthXImﲩdH"4Sze}悜ԞWO j$9FrGhr,T(&8lb+LX_Bka׷WJ HhIL;_wQWkm()2rpe2 3W,{ E3AAfhTSk}3 :Y8^J[U 'uL'U\l'Z^Z^>[^j-Sd㢦[tR*Z䌽ewqCϦNe(i2iczZ&&S߄bXt&I,Lzc^EHBw'bi)τAa4+@h2؁ڹeJ-PxRN!ҒpE/mjKD9ɔ}3x߰C[T#Y._*RjlIgQF҅.-$T(Om/k؂+1H+sm4J%d"nH ZpE~*-ø=gVB肶Ձ)t iυ8鈥[ 7 A. ^/O]ؤ- ms3 &f}/O=SYӾ=충@S\(b+-)Jc+B5Z"LEZ-A41aRhF&H @E"[\recHЅ? XfhXg<b6ǢOK@@#@˶*Sr]ح!PE2t,<3e`*%"0s;`G( 8g`Հz2w U:*KTuArLn`SC[cA:Ѯh3O~?8'90&7Q_b(JZaAq?lB EMV^m9S% a&}v"[[.Gl;J xTדiL:beeFQz Y%@<1` K}:Z$@2kU'@Uc̕U*N @uԝSS G@ JSډA8jqD Qdl' ~ K,akѕqPa@K%FZIrbOh\@ X1j`@ґa{ɀp2w0v}m?f*cv 8j$ Rth_@Qy_D"V3Y_͹bm^mgTT;6S@l$r b c[I~^U;$?,,?ȯ$6$?!st &W'dw Uu1d`?phl7 |M(8-L~9~4<_sVcHf'3--i&?h[щ|wV }N~+g%E,Q"33z?џNIH7SxVy?Nߠn +C KT)KIWqucY%/aYW{e?Svb?e?쥘~~M;VmWMrg«9+%Wb?∕Y@JІ OAጌq~` #H]] 600S;vW};1uFf EПf%Ic5Iat`?В Ll05o0`?T Yȣlg? ,Hg?zBu55ST0؏K:Z:]?Pضcx]ʔ?pY:oUB gQC<lj"rh?na?Yc1',SLiZ~{cGtNyI vxA*A,4ZT72 | !>?m M:œU&G$ j4\Mj>j?Z' =>ƏNF~ěs}Y=~eWc''ՇZD}7 `|&L܇mv#)WܗRN} *[CA}QLfӡJ}D}D,ZNfXjc`1ZZ 1H6.2YR9Zf] > kѣA|؉Jfÿ[f=>ɱ~!P2 -8OBKSu.ļ:̧zS/oMw0#R`-nyI!Kg3$/(έUc'?`5/60s/?Lk3(~ug?d0mϒ:a?mFL߽fo_zqx yf#+08*x Oэ; 8W+0<*|JvcؿߝDqͳw5/5</\]b'5N>r{l_dt%4-2/7G6ic z1)ZG,g<.X69|kDLZ>by>ۗ0|HxHtߐ1|}`p\ɺ3;8pKj6|lMj믯jՋ7_;|^}qx>;_l(TV7T,\i@ ^O`97g7@Hu?|wMΠ[YCMŃ~@T?@%V}BO~ngS3M$֦QsJވزJU ksU +~U~-aūJXw" xVS-9 ,'KP=Q AEė/Aʑâ[ekA-`Q9Y \^4`AfջȢNUU7<6#WBNSZK1JFxUsiŜ&,dSn#%6eٛ*Wڍ\ ܲ^svV[?jJQ)hUrc'OS$5*+F K`qxt9wg=m)*}r]fr^ZIU3pԉ,wlnිmQ`G}"gXD r,#TtѰ8ۦܠTZPdBIFJU{ǀ*?-(@{PC"ls"eg*ݰóU[t3"VT '_[#V;>/quD?>sI[m1~( tװ!u_֕&N(*<=:|U: [z=JjGLր2-|OySOp=O\E󿈯lV[[?Z5_e9}l,hjAPMsmj :oHmGHHSYl캶^dr arre}zP¬ ܲb72lgU]b R6Us_ [t{idGuk[8]Ls_~> OꞶ@-; |d#Q̉ߋ\m2aB-Mt{hn*6\lR]N:yV.,f)e3_ImԲ$9(gi=yKm&(~Un>ĂSۢz5YJmuCF{SvhߑӠ;USΡ=?Ush mD"g#J+yS3yq|LSM}[RI$#ُ99Rm~;#ɟ @p ҫ\)xJKOI` ^TCAZfƗJݫKT㺻/._Ցdrbc.ꝧpt(\]3XFv')Qsgiկř;QiS_In9Ͽ ueu?3<)+T>*)*d]ˀlHpGeϯջήa : q478 iΧEɁbr(Zz-[RӃ4_A0N#xSVS=p.=D~pBVb)Y&?4nf>߸ƿ})Ĵ &Msb]5&34wL@(q΁94;</ykpZo .P8b9!ᄿ..G_9Fn݌Zn3Yo)HEq@{s|M":H7_}8 錷iz͈NF|Wr#">W^L҉_4w$kSI"Jp,//&M'HД#"ᓞKR/hrc TМw:ERm p~tcaSua3™,ws[^2g7M:qk.+j>-'QI2QZz0j "[qhqaI0Ljf_d㠓To'<%z GCoC&̳hiv*1Ÿ2Ƴ?نcT)=9Ӳf:s^V4Q $Gp/zm~'fRԚ;H؁Gt!xVh,]5 Њms"Q_ZZ4.1+CfG1KI93dNf:- Π/`/}Ϯ##endstream endobj 167 0 obj << /Type /XRef /Length 204 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 168 /ID [<2f9bcb0ca8017240ac2fcd48a2907839><44693b17ccabc40c0a06b8acf7ea549f>] >> stream xcb&F~0 $8JҐ(oRFÜa. sPhs OD <r@$ d|"\Av)"Y{Qɶ>V&I`~ u`Y^`%X&83X vU $v/uO,k Z~ tl;6`H$'ĞrDJ0 ( endstream endobj startxref 587912 %%EOF robustbase/inst/doc/lmrob_simulation.R0000644000176200001440000012701414555212621017657 0ustar liggesusers### R code from vignette source 'lmrob_simulation.Rnw' ################################################### ### code chunk number 1: initial-setup ################################################### ## set options options(width=60, warn=1) # see warnings where they happen (should eliminate) ## number of workers to start if(FALSE) {## good for pkg developers options(cores= max(1, parallel::detectCores() - 2)) } else { ## CRAN allows maximum of 2: options(cores= min(2, parallel::detectCores())) } ## Number of Repetitions: N <- 1000 ## get path (= ../inst/doc/ in source pkg) robustDoc <- system.file('doc', package='robustbase') robustDta <- robustDoc ## initialize (packages, data, ...): source(file.path(robustDoc, 'simulation.init.R')) # 'xtable' ## set the amount of trimming used in calculation of average results trim <- 0.1 ################################################### ### code chunk number 2: graphics-setup ################################################### ## load required packages for graphics stopifnot(require(ggplot2), require(GGally),# for ggpairs() which replaces ggplot2::plotmatrix() require(grid), require(reshape2)) source(file.path(robustDoc, 'graphics.functions.R')) `%||%` <- function (x, orElse) if (!is.null(x)) x else orElse ## set ggplot theme theme <- theme_bw(base_size = 10) theme$legend.key.size <- unit(1, "lines")# was 0.9 in pre-v.3 ggplot2 theme$plot.margin <- unit(c(1/2, 1/8, 1/8, 1/8), "lines")# was (1/2, 0,0,0) theme_set(theme) ## old and new ggplot2: stopifnot(is.list(theme_G <- theme$panel.grid.major %||% theme$panel.grid)) ## set default sizes for lines and points update_geom_defaults("point", list(size = 4/3)) update_geom_defaults("line", list(size = 1/4)) update_geom_defaults("hline", list(size = 1/4)) update_geom_defaults("smooth", list(size = 1/4)) ## alpha value for plots with many points alpha.error <- 0.3 alpha.n <- 0.4 ## set truncation limits used by f.truncate() & g.truncate.*: trunc <- c(0.02, 0.14) trunc.plot <- c(0.0185, 0.155) f.truncate <- function(x, up = trunc.plot[2], low = trunc.plot[1]) { x[x > up] <- up x[x < low] <- low x } g.truncate.lines <- geom_hline(yintercept = trunc, color = theme$panel.border$colour) g.truncate.line <- geom_hline(yintercept = trunc[2], color = theme$panel.border$colour) g.truncate.areas <- annotate("rect", xmin=rep(-Inf,2), xmax=rep(Inf,2), ymin=c(0,Inf), ymax=trunc, fill = theme_G$colour) g.truncate.area <- annotate("rect", xmin=-Inf, xmax=Inf, ymin=trunc[2], ymax=Inf, fill = theme_G$colour) legend.mod <- list(`SMD.Wtau` = quote('SMD.W'~tau), `SMDM.Wtau` = quote('SMDM.W'~tau), `MM.Avar1` = quote('MM.'~Avar[1]), `MMqT` = quote('MM'~~q[T]), `MMqT.Wssc` = quote('MM'~~q[T]*'.Wssc'), `MMqE` = quote('MM'~~q[E]), `MMqE.Wssc` = quote('MM'~~q[E]*'.Wssc'), `sigma_S` = quote(hat(sigma)[S]), `sigma_D` = quote(hat(sigma)[D]), `sigma_S*qE` = quote(q[E]*hat(sigma)[S]), `sigma_S*qT` = quote(q[T]*hat(sigma)[S]), `sigma_robust` = quote(hat(sigma)[robust]), `sigma_OLS` = quote(hat(sigma)[OLS]), `t1` = quote(t[1]), `t3` = quote(t[3]), `t5` = quote(t[5]), `cskt(Inf,2)` = quote(cskt(infinity,2)) ) ################################################### ### code chunk number 3: tab-psi-functions ################################################### ## get list of psi functions lst <- lapply(estlist$procedures, function(x) { if (is.null(x$args)) return(list(NULL, NULL, NULL)) if (!is.null(x$args$weight)) return(list(x$args$weight[2], round(f.psi2c.chi(x$args$weight[1]),3), round(f.eff2c.psi(x$args$efficiency, x$args$weight[2]),3))) return(list(x$args$psi, round(if (is.null(x$args$tuning.chi)) lmrob.control(psi=x$args$psi)$tuning.chi else x$args$tuning.chi,3), round(if (is.null(x$args$tuning.psi)) lmrob.control(psi=x$args$psi)$tuning.psi else x$args$tuning.psi,3))) }) lst <- unique(lst) ## because of rounding, down from 21 to 5 ! lst <- lst[sapply(lst, function(x) !is.null(x[[1]]))] # 5 --> 4 ## convert to table tbl <- do.call(rbind, lst) tbl[,2:3] <- apply(tbl[,2:3], 1:2, function(x) { gsub('\\$NA\\$', '\\\\texttt{NA}', paste('$', unlist(x), collapse=', ', '$', sep='')) }) tbl[,1] <- paste('\\texttt{', tbl[,1], '}', sep='') colnames(tbl) <- paste('\\texttt{', c('psi', 'tuning.chi', 'tuning.psi'), '}', sep='') require("xtable") # need also print() method: print(xtable(tbl), sanitize.text.function=identity, include.rownames = FALSE, floating=FALSE) ################################################### ### code chunk number 4: fig-psi-functions ################################################### getOption("SweaveHooks")[["fig"]]() d.x_psi <- function(x, psi) { cc <- lmrob.control(psi = psi)$tuning.psi data.frame(x=x, value=Mpsi(x, cc, psi), psi = psi) } x <- seq(0, 10, length.out = 1000) tmp <- rbind(d.x_psi(x, 'optimal'), d.x_psi(x, 'bisquare'), d.x_psi(x, 'lqq'), d.x_psi(x, 'hampel')) print( ggplot(tmp, aes(x, value, color = psi)) + geom_line(lwd=1.25) + ylab(quote(psi(x))) + scale_color_discrete(name = quote(psi ~ '-function'))) ################################################### ### code chunk number 5: fgen ################################################### f.gen <- function(n, p, rep, err) { ## get function name and parameters lerrfun <- f.errname(err$err) lerrpar <- err$args ## generate random predictors ret <- replicate(rep, matrix(do.call(lerrfun, c(n = n*p, lerrpar)), n, p), simplify=FALSE) attr(ret[[1]], 'gen') <- f.gen ret } ratios <- c(1/20, 1/10, 1/5, 1/3, 1/2)## p/n lsit <- expand.grid(n = c(25, 50, 100, 400), p = ratios) lsit <- within(lsit, p <- as.integer(n*p)) .errs.normal.1 <- list(err = 'normal', args = list(mean = 0, sd = 1)) for (i in 1:NROW(lsit)) assign(paste('rand',lsit[i,1],lsit[i,2],sep='_'), f.gen(lsit[i,1], lsit[i,2], rep = 1, err = .errs.normal.1)[[1]]) ################################################### ### code chunk number 6: fig-example-design ################################################### getOption("SweaveHooks")[["fig"]]() require(GGally) colnames(rand_25_5) <- paste0("X", 1:5) # workaround new (2014-12) change in GGally ## and the 2016-11-* change needs data frames: df.r_25_5 <- as.data.frame(rand_25_5) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print(ggpairs(df.r_25_5, axisLabels="show", title = "rand_25_5: n=25, p=5")) ) ################################################### ### code chunk number 7: lmrob_simulation.Rnw:371-372 ################################################### aggrResultsFile <- file.path(robustDta, "aggr_results.Rdata") ################################################### ### code chunk number 8: simulation-run ################################################### if (!file.exists(aggrResultsFile)) { ## load packages required only for simulation stopifnot(require(robust), require(skewt), require(foreach)) if (!is.null(getOption("cores"))) { if (getOption("cores") == 1) registerDoSEQ() ## no not use parallel processing else { stopifnot(require(doParallel)) if (.Platform$OS.type == "windows") { cl <- makeCluster(getOption("cores")) clusterExport(cl, c("N", "robustDoc")) clusterEvalQ(cl, slave <- TRUE) clusterEvalQ(cl, source(file.path(robustDoc, 'simulation.init.R'))) registerDoParallel(cl) } else registerDoParallel() } } else registerDoSEQ() ## no not use parallel processing for (design in c("dd", ls(pattern = 'rand_\\d+_\\d+'))) { print(design) ## set design estlist$design <- get(design) estlist$use.intercept <- !grepl('^rand', design) ## add design.predict: pc estlist$design.predict <- if (is.null(attr(estlist$design, 'gen'))) f.prediction.points(estlist$design) else f.prediction.points(estlist$design, max.pc = 2) filename <- file.path(robustDta, sprintf('r.test.final.%s.Rdata',design)) if (!file.exists(filename)) { ## run print(system.time(r.test <- f.sim(estlist, silent = TRUE))) ## save save(r.test, file=filename) ## delete output rm(r.test) ## run garbage collection gc() } } } ################################################### ### code chunk number 9: str-estlist ################################################### str(estlist, 1) ################################################### ### code chunk number 10: estl-errs ################################################### estlist$errs[[1]] ################################################### ### code chunk number 11: show-errs (eval = FALSE) ################################################### ## set.seed(estlist$seed) ## errs <- c(sapply(1:nrep, function(x) do.call(fun, c(n = nobs, args)))) ################################################### ### code chunk number 12: lmrob_simulation.Rnw:449-450 ################################################### str(estlist$output[1:3], 2) ################################################### ### code chunk number 13: simulation-aggr ################################################### if (!file.exists(aggrResultsFile)) { files <- list.files(robustDta, pattern = 'r.test.final\\.') res <- foreach(file = files) %dopar% { ## get design, load r.test, initialize other stuff design <- substr(basename(file), 14, nchar(basename(file)) - 6) cat(design, ' ') load(file.path(robustDta, file)) estlist <- attr(r.test, 'estlist') use.intercept <- if (!is.null(estlist$use.intercept)) estlist$use.intercept else TRUE sel <- dimnames(r.test)[[3]] ## [dimnames(r.test)[[3]] != "estname=lm"] n.betas <- paste('beta',1:(NCOL(estlist$design)+use.intercept),sep='_') ## get design lX <- if (use.intercept) as.matrix(cbind(1, get(design))) else as.matrix(get(design)) n <- NROW(lX) p <- NCOL(lX) ## prepare arrays for variable designs and leverages if (is.function(attr(estlist$design, 'gen'))) { lXs <- array(NA, c(n, NCOL(lX), dim(r.test)[c(1, 4)]), list(Obs = NULL, Pred = colnames(lX), Data = NULL, Errstr = dimnames(r.test)[[4]])) } ## generate errors lerrs <- array(NA, c(n, dim(r.test)[c(1,4)]) , list(Obs = NULL, Data = NULL, Errstr = dimnames(r.test)[[4]])) for (i in 1:dim(lerrs)[3]) { lerrstr <- f.list2str(estlist$errs[[i]]) lerr <- f.errs(estlist, estlist$errs[[i]], gen = attr(estlist$design, 'gen'), nobs = n, npar = NCOL(lX)) lerrs[,,lerrstr] <- lerr if (!is.null(attr(lerr, 'designs'))) { ## retrieve generated designs: this returns a list of designs lXs[,,,i] <- unlist(attr(lerr, 'designs')) if (use.intercept) stop('intercept not implemented for random desings') } rm(lerr) } if (is.function(attr(estlist$design, 'gen'))) { ## calculate leverages lXlevs <- apply(lXs, 3:4, .lmrob.hat) } ## calculate fitted values from betas if (!is.function(attr(estlist$design, 'gen'))) { ## fixed design case lfitted <- apply(r.test[,n.betas,sel,,drop=FALSE],c(3:4), function(bhat) { lX %*% t(bhat) } ) } else { ## variable design case lfitted <- array(NA, n*prod(dim(r.test)[c(1,4)])*length(sel)) lfitted <- .C('R_calc_fitted', as.double(lXs), ## designs as.double(r.test[,n.betas,sel,,drop=FALSE]), ## betas as.double(lfitted), ## result as.integer(n), ## n as.integer(p), ## p as.integer(dim(r.test)[1]), ## nrep as.integer(length(sel)), ## n procstr as.integer(dim(r.test)[4]), ## n errstr DUP=FALSE, NAOK=TRUE, PACKAGE="robustbase")[[3]] } tdim <- dim(lfitted) <- c(n, dim(r.test)[1], length(sel),dim(r.test)[4]) lfitted <- aperm(lfitted, c(1,2,4,3)) ## calculate residuals = y - fitted.values lfitted <- as.vector(lerrs) - as.vector(lfitted) dim(lfitted) <- tdim[c(1,2,4,3)] lfitted <- aperm(lfitted, c(1,2,4,3)) dimnames(lfitted) <- c(list(Obs = NULL), dimnames(r.test[,,sel,,drop=FALSE])[c(1,3,4)]) lresids <- lfitted rm(lfitted) ## calculate lm MSE and trim trimmed MSE of betas tf.MSE <- function(lbetas) { lnrm <- rowSums(lbetas^2) c(MSE=mean(lnrm,na.rm=TRUE),MSE.1=mean(lnrm,trim=trim,na.rm=TRUE)) } MSEs <- apply(r.test[,n.betas,,,drop=FALSE],3:4,tf.MSE) li <- 1 ## so we can reconstruct where we are lres <- apply(lresids,3:4,f.aggregate.results <- { function(lresid) { ## the counter li tells us, where we are ## we walk dimensions from left to right lcdn <- f.get.current.dimnames(li, dimnames(lresids), 3:4) lr <- r.test[,,lcdn[1],lcdn[2]] ## update counter li <<- li + 1 ## transpose and normalize residuals with sigma lresid <- t(lresid) / lr[,'sigma'] if (lcdn[1] != 'estname=lm') { ## convert procstr to proclst and get control list largs <- f.str2list(lcdn[1])[[1]]$args if (grepl('lm.robust', lcdn[1])) { lctrl <- list() lctrl$psi <- toupper(largs$weight2) lctrl$tuning.psi <- f.eff2c.psi(largs$efficiency, lctrl$psi) lctrl$method <- 'MM' } else { lctrl <- do.call('lmrob.control',largs) } ## calculate correction factors ## A lsp2 <- rowSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lspp <- rowSums(lpp <- Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) ## calculate Huber\'s small sample correction factor lK <- 1 + rowSums((lpp - lspp/n)^2)*NCOL(lX)/lspp^2 ## 1/n cancels } else { lK <- lspp <- lsp2 <- NA } ## only calculate tau variants if possible if (grepl('args.method=\\w*(D|T)\\w*\\b', lcdn[1])) { ## SMD or SMDM ## calculate robustness weights lwgts <- Mwgt(lresid, lctrl$tuning.psi, lctrl$psi) ## function to calculate robustified leverages tfun <- if (is.function(attr(estlist$design, 'gen'))) function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lXs[,,i,lcdn[2]],wi) } else function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lX, wi) } llev <- sapply(1:dim(r.test)[1], tfun) ## calculate unique leverages lt <- robustbase:::lmrob.tau(list(),h=llev,control=lctrl) ## normalize residuals with tau (transpose lresid) lresid <- t(lresid) / lt ## A lsp2t <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lsppt <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) } else { lsp2t <- lsppt <- NA } ## calculate raw scales based on the errors lproc <- f.str2list(lcdn[1])[[1]] q <- NA M <- NA if (lproc$estname == 'lmrob.mar' && lproc$args$type == 'qE') { ## for lmrob_mar, qE variant lctrl <- lmrob.control(psi = 'bisquare', tuning.chi=uniroot(function(c) robustbase:::lmrob.bp('bisquare', c) - (1-p/n)/2, c(1, 3))$root) se <- apply(lerrs[,,lcdn[2]],2,lmrob.mscale,control=lctrl,p=p) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } else if (!is.null(lproc$args$method) && lproc$args$method == 'SMD') { ## for D-scales se <- apply(lerrs[,,lcdn[2]],2,lmrob.dscale,control=lctrl, kappa=robustbase:::lmrob.kappa(control=lctrl)) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } ## calculate empirical correct test value (to yield 5% level) t.val_2 <- t.val_1 <- quantile(abs(lr[,'beta_1']/lr[,'se_1']), 0.95, na.rm = TRUE) if (p > 1) t.val_2 <- quantile(abs(lr[,'beta_2']/lr[,'se_2']), 0.95, na.rm = TRUE) ## return output: summary statistics: c(## gamma AdB2.1 = mean(lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, K2AdB2.1 = mean(lK^2*lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, AdB2t.1 = mean(lsp2t/lsppt^2,trim=trim,na.rm=TRUE)*n, sdAdB2.1 = sd.trim(lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdK2AdB2.1 = sd.trim(lK^2*lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdAdB2t.1 = sd.trim(lsp2t/lsppt^2*n,trim=trim,na.rm=TRUE), ## sigma medsigma = median(lr[,'sigma'],na.rm=TRUE), madsigma = mad(lr[,'sigma'],na.rm=TRUE), meansigma.1 = mean(lr[,'sigma'],trim=trim,na.rm=TRUE), sdsigma.1 = sd.trim(lr[,'sigma'],trim=trim,na.rm=TRUE), meanlogsigma = mean(log(lr[,'sigma']),na.rm=TRUE), meanlogsigma.1 = mean(log(lr[,'sigma']),trim=trim,na.rm=TRUE), sdlogsigma = sd(log(lr[,'sigma']),na.rm=TRUE), sdlogsigma.1 = sd.trim(log(lr[,'sigma']),trim=trim,na.rm=TRUE), q = q, M = M, ## beta efficiency.1 = MSEs['MSE.1','estname=lm',lcdn[2]] / MSEs['MSE.1',lcdn[1],lcdn[2]], ## t-value: level emplev_1 = mean(abs(lr[,'beta_1']/lr[,'se_1']) > qt(0.975, n - p), na.rm = TRUE), emplev_2 = if (p>1) { mean(abs(lr[,'beta_2']/lr[,'se_2']) > qt(0.975, n - p), na.rm = TRUE) } else NA, ## t-value: power power_1_0.2 = mean(abs(lr[,'beta_1']-0.2)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.2 = if (p>1) { mean(abs(lr[,'beta_2']-0.2)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.4 = mean(abs(lr[,'beta_1']-0.4)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.4 = if (p>1) { mean(abs(lr[,'beta_2']-0.4)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.6 = mean(abs(lr[,'beta_1']-0.6)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.6 = if (p>1) { mean(abs(lr[,'beta_2']-0.6)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.8 = mean(abs(lr[,'beta_1']-0.8)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.8 = if (p>1) { mean(abs(lr[,'beta_2']-0.8)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_1 = mean(abs(lr[,'beta_1']-1)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_1 = if (p>1) { mean(abs(lr[,'beta_2']-1)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, ## coverage probability: calculate empirically ## the evaluation points are constant, but the designs change ## therefore this makes only sense for fixed designs cpr_1 = mean(lr[,'upr_1'] < 0 | lr[,'lwr_1'] > 0, na.rm = TRUE), cpr_2 = mean(lr[,'upr_2'] < 0 | lr[,'lwr_2'] > 0, na.rm = TRUE), cpr_3 = mean(lr[,'upr_3'] < 0 | lr[,'lwr_3'] > 0, na.rm = TRUE), cpr_4 = mean(lr[,'upr_4'] < 0 | lr[,'lwr_4'] > 0, na.rm = TRUE), cpr_5 = if (any(colnames(lr) == 'upr_5')) { mean(lr[,'upr_5'] < 0 | lr[,'lwr_5'] > 0, na.rm = TRUE) } else NA, cpr_6 = if (any(colnames(lr) == 'upr_6')) { mean(lr[,'upr_6'] < 0 | lr[,'lwr_6'] > 0, na.rm = TRUE) } else NA, cpr_7 = if (any(colnames(lr) == 'upr_7')) { mean(lr[,'upr_7'] < 0 | lr[,'lwr_7'] > 0, na.rm = TRUE) } else NA ) }}) ## convert to data.frame lres <- f.a2df.2(lres, split = '___NO___') ## add additional info lres$n <- NROW(lX) lres$p <- NCOL(lX) lres$nmpdn <- with(lres, (n-p)/n) lres$Design <- design ## clean up rm(r.test, lXs, lXlevs, lresids, lerrs) gc() ## return lres lres } save(res, trim, file = aggrResultsFile) ## stop cluster if (exists("cl")) stopCluster(cl) } ################################################### ### code chunk number 14: simulation-aggr2 ################################################### load(aggrResultsFile) ## this will fail if the file is not found (for a reason) ## set eval to TRUE for chunks simulation-run and simulation-aggr ## if you really want to run the simulations again. ## (better fail with an error than run for weeks) ## combine list elements to data.frame test.1 <- do.call('rbind', res) test.1 <- within(test.1, { Method[Method == "SM"] <- "MM" Method <- Method[, drop = TRUE] Estimator <- interaction(Method, D.type, drop = TRUE) Estimator <- f.rename.level(Estimator, 'MM.S', 'MM') Estimator <- f.rename.level(Estimator, 'SMD.D', 'SMD') Estimator <- f.rename.level(Estimator, 'SMDM.D', 'SMDM') Estimator <- f.rename.level(Estimator, 'MM.qT', 'MMqT') Estimator <- f.rename.level(Estimator, 'MM.qE', 'MMqE') Estimator <- f.rename.level(Estimator, 'MM.rob', 'MMrobust') Estimator <- f.rename.level(Estimator, 'lsq.lm', 'OLS') Est.Scale <- f.rename.level(Estimator, 'MM', 'sigma_S') Est.Scale <- f.rename.level(Est.Scale, 'MMrobust', 'sigma_robust') Est.Scale <- f.rename.level(Est.Scale, 'MMqE', 'sigma_S*qE') Est.Scale <- f.rename.level(Est.Scale, 'MMqT', 'sigma_S*qT') Est.Scale <- f.rename.level(Est.Scale, 'SMDM', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'SMD', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'OLS', 'sigma_OLS') Psi <- f.rename.level(Psi, 'hampel', 'Hampel') }) ## add interaction of Method and Cov test.1 <- within(test.1, { method.cov <- interaction(Estimator, Cov, drop=TRUE) levels(method.cov) <- sub('\\.+vcov\\.(a?)[wacrv1]*', '\\1', levels(method.cov)) method.cov <- f.rename.level(method.cov, "MMa", "MM.Avar1") method.cov <- f.rename.level(method.cov, "MMrobust.Default", "MMrobust.Wssc") method.cov <- f.rename.level(method.cov, "MM", "MM.Wssc") method.cov <- f.rename.level(method.cov, "SMD", "SMD.Wtau") method.cov <- f.rename.level(method.cov, "SMDM", "SMDM.Wtau") method.cov <- f.rename.level(method.cov, "MMqT", "MMqT.Wssc") method.cov <- f.rename.level(method.cov, "MMqE", "MMqE.Wssc") method.cov <- f.rename.level(method.cov, "OLS.Default", "OLS") ## ratio: the closest 'desired ratios' instead of exact p/n; ## needed in plots only for stat_*(): median over "close" p/n's: ratio <- ratios[apply(abs(as.matrix(1/ratios) %*% t(as.matrix(p / n)) - 1), 2, which.min)] }) ## calculate expected values of psi^2 and psi' test.1$Ep2 <- test.1$Epp <- NA for(Procstr in levels(test.1$Procstr)) { args <- f.str2list(Procstr)[[1]]$args if (is.null(args)) next lctrl <- do.call('lmrob.control',args) test.1$Ep2[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r)^2, lctrl, use.integrate = TRUE) test.1$Epp[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r,1), lctrl, use.integrate = TRUE) } ## drop some observations, separate fixed and random designs test.fixed <- droplevels(subset(test.1, n == 20)) ## n = 20 -- fixed design test.1 <- droplevels(subset(test.1, n != 20)) ## n !=20 -- random designs test.lm <- droplevels(subset(test.1, Function == 'lm')) # lm = OLS test.1 <- droplevels(subset(test.1, Function != 'lm')) # Rob := all "robust" test.lm$Psi <- NULL test.lm.2 <- droplevels(subset(test.lm, Error == 'N(0,1)')) # OLS for N(*) test.2 <- droplevels(subset(test.1, Error == 'N(0,1)' & Function != 'lm'))# Rob for N(*) ## subsets test.3 <- droplevels(subset(test.2, Method != 'SMDM'))# Rob, not SMDM for N(*) test.4 <- droplevels(subset(test.1, Method != 'SMDM'))# Rob, not SMDM for all ################################################### ### code chunk number 15: fig-meanscale ################################################### getOption("SweaveHooks")[["fig"]]() ## ## exp(mean(log(sigma))): this looks almost identical to mean(sigma) print(ggplot(test.3, aes(p/n, exp(meanlogsigma.1), color = Est.Scale)) + stat_summary(aes(x=ratio), # <- "rounded p/n": --> median over "neighborhood" fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + ylab(quote('geometric ' ~ mean(hat(sigma)))) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(test.3$Est.Scale))) ################################################### ### code chunk number 16: fig-sdscale-1 ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(test.3, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + facet_wrap(~ Psi) + geom_point (data=test.lm.2, alpha=alpha.n, aes(color = Est.Scale)) + stat_summary(data=test.lm.2, aes(x=ratio, color = Est.Scale), fun.y=median, geom='line') + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels= lab(test.3 $Est.Scale, test.lm.2$Est.Scale))) ################################################### ### code chunk number 17: fig-sdscale-all ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(test.4, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + ylim(with(test.4, range(sdlogsigma.1*sqrt(n)))) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = Error), alpha = alpha.error) + facet_wrap(~ Psi) + ## "FIXME" (?): the next 'test.lm' one give warnings geom_point (data=test.lm, aes(color = Est.Scale), alpha=alpha.n) + ##-> Warning: Removed 108 rows containing missing values (geom_point). stat_summary(data=test.lm, aes(x = ratio, color = Est.Scale), fun.y=median, geom='line') + ##-> Warning: Removed 108 rows containing non-finite values (stat_summary). g.scale_shape(labels=lab(test.4$Error)) + scale_colour_discrete("Scale Est.", labels=lab(test.4 $Est.Scale, test.lm$Est.Scale))) ################################################### ### code chunk number 18: fig-qscale ################################################### getOption("SweaveHooks")[["fig"]]() t3est2 <- droplevels(subset(test.3, Estimator %in% c("SMD", "MMqE"))) print(ggplot(t3est2, aes(p/n, q, color = Est.Scale)) + ylab(quote(q)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) ################################################### ### code chunk number 19: fig-Mscale ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t3est2, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Psi) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) ################################################### ### code chunk number 20: fig-qscale-all ################################################### getOption("SweaveHooks")[["fig"]]() t1.bi <- droplevels(subset(test.1, Estimator %in% c("SMD", "MMqE") & Psi == 'bisquare')) print(ggplot(t1.bi, aes(p/n, q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Error) + ## labeller missing! ylab(quote(q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) ################################################### ### code chunk number 21: fig-Mscale-all ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t1.bi, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Error) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) ################################################### ### code chunk number 22: fig-efficiency ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(test.2, aes(p/n, efficiency.1, color = Estimator)) + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + facet_wrap(~ Psi) + ylab(quote('efficiency of' ~~ hat(beta))) + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(test.2$Estimator))) ################################################### ### code chunk number 23: fig-efficiency-all ################################################### getOption("SweaveHooks")[["fig"]]() t.1xt1 <- droplevels(subset(test.1, Error != 't1')) print(ggplot(t.1xt1, aes(p/n, efficiency.1, color = Estimator)) + ylab(quote('efficiency of '~hat(beta))) + geom_point(aes(shape = Error), alpha = alpha.error) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + g.scale_shape(values=c(16,17,15,3,7,8,9,1,2,4)[-4], labels=lab(t.1xt1$Error)) + facet_wrap(~ Psi) + scale_colour_discrete(name = "Estimator", labels = lab(t.1xt1$Estimator))) ################################################### ### code chunk number 24: fig-AdB2-1 ################################################### getOption("SweaveHooks")[["fig"]]() t.2o. <- droplevels(subset(test.2, !is.na(AdB2t.1))) print(ggplot(t.2o., aes(p/n, AdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=K2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=AdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=K2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype=2) + stat_summary(aes(x=ratio, y=AdB2t.1), fun.y=median, geom='line', linetype=3) + geom_hline(yintercept = 1/0.95) + g.scale_y_log10_1() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(t.2o.$Estimator)) + ylab(quote(mean(hat(gamma)))) + facet_wrap(~ Psi)) ################################################### ### code chunk number 25: fig-sdAdB2-1 ################################################### getOption("SweaveHooks")[["fig"]]() t.2ok <- droplevels(subset(test.2, !is.na(sdAdB2t.1))) print(ggplot(t.2ok, aes(p/n, sdAdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=sdK2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=sdAdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=sdK2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype= 2) + stat_summary(aes(x=ratio, y=sdAdB2t.1), fun.y=median, geom='line', linetype= 3) + g.scale_y_log10_0.05() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(t.2ok$Estimator)) + ylab(quote(sd(hat(gamma)))) + facet_wrap(~ Psi)) ################################################### ### code chunk number 26: fig-emp-level ################################################### getOption("SweaveHooks")[["fig"]]() t.2en0 <- droplevels(subset(test.2, emplev_1 != 0)) print(ggplot(t.2en0, aes(p/n, f.truncate(emplev_1), color = method.cov)) + g.truncate.lines + g.truncate.areas + geom_point(aes(shape = factor(n)), alpha = alpha.n) + scale_shape_discrete(quote(n)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + scale_colour_discrete(name = "Estimator", labels=lab(t.2en0$method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + facet_wrap(~ Psi)) ################################################### ### code chunk number 27: fig-lqq-level ################################################### getOption("SweaveHooks")[["fig"]]() tmp <- droplevels(subset(test.1, Psi == 'lqq' & emplev_1 != 0)) print(ggplot(tmp, aes(p/n, f.truncate(emplev_1), color = method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + g.truncate.line + g.truncate.area + geom_point(aes(shape = factor(n)), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(tmp$method.cov)) + facet_wrap(~ Error) , legend.mod = legend.mod ) ################################################### ### code chunk number 28: fig-power-1-0_2 ################################################### getOption("SweaveHooks")[["fig"]]() t2.25 <- droplevels(subset(test.2, n == 25))# <-- fixed n ==> no need for 'ratio' tL2.25 <- droplevels(subset(test.lm.2, n == 25)) scale_col_D2.25 <- scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(t2.25 $method.cov, tL2.25$method.cov)) print(ggplot(t2.25, aes(p/n, power_1_0.2, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.2) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) ################################################### ### code chunk number 29: fig-power-1-0_4 ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t2.25, aes(p/n, power_1_0.4, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.4) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) ################################################### ### code chunk number 30: fig-power-1-0_6 ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t2.25, aes(p/n, power_1_0.6, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.6) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + scale_col_D2.25 + facet_wrap(~ Psi) ) ################################################### ### code chunk number 31: fig-power-1-0_8 ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t2.25, aes(p/n, power_1_0.8, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.8) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) ################################################### ### code chunk number 32: fig-power-1-1 ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t2.25, aes(p/n, power_1_1, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 1) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) ################################################### ### code chunk number 33: fig-pred-points ################################################### getOption("SweaveHooks")[["fig"]]() pp <- f.prediction.points(dd)[1:7,] ## Worked in older ggplot2 -- now plotmatrix() is gone, to be replaced by GGally::ggpairs): ## tmp <- plotmatrix(pp)$data ## tmp$label <- as.character(1:7) ## print(plotmatrix(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) if(FALSE) { tmp <- ggpairs(pp)$data tmp$label <- as.character(1:7) # and now? } ## ggpairs() + geom_text() does *NOT* work {ggpairs has own class} ## print(ggpairs(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print( ggpairs(dd) )## now (2016-11) fine ) ################################################### ### code chunk number 34: fig-cpr ################################################### getOption("SweaveHooks")[["fig"]]() n.cprs <- names(test.fixed)[grep('cpr', names(test.fixed))] # test.fixed: n=20 => no 'x=ratio' test.5 <- melt(test.fixed[,c('method.cov', 'Error', 'Psi', n.cprs)]) test.5 <- within(test.5, { Point <- as.numeric(do.call('rbind', strsplit(levels(variable), '_'))[,2])[variable] }) print(ggplot(test.5, aes(Point, f.truncate(value), color = method.cov)) + geom_point(aes(shape = Error), alpha = alpha.error) + g.truncate.line + g.truncate.area + stat_summary(fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(labels=lab(test.5$Error)) + scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(test.5$method.cov)) + ylab("empirical level of confidence intervals") + facet_wrap(~ Psi) ) ################################################### ### code chunk number 35: maxbias-fn ################################################### ## Henning (1994) eq 33: g <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) integrate(function(x) rho(((1 + theta^2)/s^2*x)^2)*dchisq(x, 1, mu^2/(1 + theta^2)), -Inf, Inf)$value }) ## Martin et al 1989 Section 3.2: for mu = 0 g.2 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) lctrl$tuning.psi <- lctrl$tuning.chi robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) g.2.MM <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) ## Henning (1994) eq 30, one parameter case g.3 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) int.x <- Vectorize(function(y) { integrate(function(x) rho((y - x*theta - mu)/s)*dnorm(x)*dnorm(y),-Inf, Inf)$value }) integrate(int.x,-Inf, Inf)$value }) inv.g1 <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2 else g.3 uniroot(function(s) g(s, theta, mu, ...) - value, c(0.1, 100))$root } inv.g1.MM <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2.MM else g.3.MM ret <- tryCatch(uniroot(function(s) g(s, theta, mu, ...) - value, c(0.01, 100)), error = function(e)e) if (inherits(ret, 'error')) { warning('inv.g1.MM: ', value, ' ', theta, ' ', mu,' -> Error: ', ret$message) NA } else { ret$root } } s.min <- function(epsilon, ...) inv.g1(0.5/(1 - epsilon), 0, 0, ...) s.max <- function(epsilon, ...) inv.g1((0.5-epsilon)/(1-epsilon), 0, 0, ...) BS <- Vectorize(function(epsilon, ...) { sqrt(s.max(epsilon, ...)/s.min(epsilon, ...)^2 - 1) }) l <- Vectorize(function(epsilon, ...) { sigma_be <- s.max(epsilon, ...) sqrt((sigma_be/inv.g1.MM(g.2.MM(sigma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1) }) u <- Vectorize(function(epsilon, ...) { gamma_be <- s.min(epsilon, ...) max(l(epsilon, ...), sqrt((gamma_be/inv.g1.MM(g.2.MM(gamma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1)) }) ################################################### ### code chunk number 36: max-asymptotic-bias ################################################### asymptMBFile <- file.path(robustDta, 'asymptotic.max.bias.Rdata') if (!file.exists(asymptMBFile)) { x <- seq(0, 0.35, length.out = 100) rmb <- rbind(data.frame(l=l(x, psi = 'hampel'), u=u(x, psi = 'hampel'), psi = 'Hampel'), data.frame(l=l(x, psi = 'lqq'), u=u(x, psi = 'lqq'), psi = 'lqq'), data.frame(l=l(x, psi = 'bisquare'), u=u(x, psi = 'bisquare'), psi = 'bisquare'), data.frame(l=l(x, psi = 'optimal'), u=u(x, psi = 'optimal'), psi = 'optimal')) rmb$x <- x save(rmb, file=asymptMBFile) } else load(asymptMBFile) ################################################### ### code chunk number 37: fig-max-asymptotic-bias ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(rmb, aes(x, l, color=psi)) + geom_line() + geom_line(aes(x, u, color=psi), linetype = 2) + xlab(quote("amount of contamination" ~~ epsilon)) + ylab("maximum asymptotic bias bounds") + coord_cartesian(ylim = c(0,10)) + scale_y_continuous(breaks = 1:10) + scale_colour_hue(quote(psi ~ '-function'))) robustbase/inst/doc/estimating.functions.R0000644000176200001440000005501012737461431020454 0ustar liggesusers## Called from ./lmrob_simulation.Rnw ## ~~~~~~~~~~~~~~~~~~~~~ ########################################################################### ## Prediction ########################################################################### f.predict <- function (object, newdata = NULL, scale = sigma(object), se.fit = FALSE, df = object$df.residual, interval = c('none', 'confidence', 'prediction'), level = 0.95, type = c('response'), terms = NULL, na.action = na.pass, pred.var = res.var/weights, weights = 1, cov = covariance.matrix(object), ...) { ## Purpose: replace predict.lmrob from robustbase package ## ---------------------------------------------------------------------- ## Arguments: See ?predict.lm ## type = 'presponse' ('term' is not supported) ## terms argument is ignored ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Sept 2009, 12:16 ## take as much from predict.lm as possible ## check arguments if (!missing(terms)) stop('predict.lmrob: terms argument is ignored') ## set data tt <- terms(object) if (missing(newdata) || is.null(newdata)) { mm <- X <- model.matrix(object) mmDone <- TRUE offset <- object$offset } else { Terms <- delete.response(tt) m <- model.frame(Terms, newdata, na.action = na.action, xlev = object$xlevels) if (!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m) X <- model.matrix(Terms, m, contrasts.arg = object$contrasts) offset <- rep(0, nrow(X)) if (!is.null(off.num <- attr(tt, "offset"))) for (i in off.num) offset <- offset + eval(attr(tt, "variables")[[i + 1]], newdata) if (!is.null(object$call$offset)) offset <- offset + eval(object$call$offset, newdata) mmDone <- FALSE } n <- length(object$residuals) p <- object$rank if (p < ncol(X) && !(missing(newdata) || is.null(newdata))) warning("prediction from a rank-deficient fit may be misleading") beta <- coef(object) ## ignoring piv here predictor <- drop(X %*% beta) if (!is.null(offset)) predictor <- predictor + offset interval <- match.arg(interval) type <- match.arg(type) if (se.fit || interval != "none") { res.var <- scale^2 if (type != "terms") { if (p > 0) { ## this is probably not optimal... ## cov <- covariance.matrix(object) ## set as argument ip <- diag(X %*% tcrossprod(cov, X)) } else ip <- rep(0, n) } } if (interval != "none") { tfrac <- qt((1 - level)/2, df) hwid <- tfrac * switch(interval, confidence = sqrt(ip), prediction = sqrt(ip + pred.var)) if (type != "terms") { predictor <- cbind(predictor, predictor + hwid %o% c(1, -1)) colnames(predictor) <- c("fit", "lwr", "upr") } } if (se.fit || interval != "none") se <- sqrt(ip) if (missing(newdata) && !is.null(na.act <- object$na.action)) { predictor <- napredict(na.act, predictor) if (se.fit) se <- napredict(na.act, se) } if (se.fit) list(fit = predictor, se.fit = se, df = df, residual.scale = sqrt(res.var)) else predictor } ## predict(obj, pred, interval = 'prediction') ## f.predict(obj, pred, interval = 'prediction') predict.lmRob <- function(object, newdata = NULL, scale = NULL, ...) { ## Purpose: extend predict() functionality to lmRob objects ## ---------------------------------------------------------------------- ## Arguments: ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Sept 2009, 12:16 class(object) <- c(class(object), "lm") object$qr <- qr(sqrt(weights(object)) * model.matrix(object)) if (missing(scale)) scale <- object$scale predict.lm(object, newdata = newdata, scale = scale, ...) } ########################################################################### ## some helper functions ########################################################################### f.lmRob <- function(...) { ## Purpose: wrapper for lmRob ## ---------------------------------------------------------------------- ## Arguments: see ?lmRob ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 10:28 ## get arguments args <- list(...) ## update defaults: if (is.null(args$mxr)) args$mxr <- 2000 if (is.null(args$mxf)) args$mxf <- 500 if (is.null(args$mxs)) args$mxs <- 2000 ## get all arguments except the arguments of lmRob: uw <- c('formula', 'data', 'weights', 'subset', 'na.action', 'model', 'x', 'y', 'contrasts', 'nrep', 'genetic.control') ind <- if (is.null(names(args))) rep(FALSE, length(args)) else names(args) != '' & !names(args) %in% uw ## they go into control: control <- do.call("lmRob.control", args[ind]) ## now call lmRob do.call("lmRob", c(args[!ind], list(control = control))) } ## lmRob(y ~ x, d.data, control = lmRob.control(initial.alg = 'fast', efficiency = 0.95, weight = c('bisquare', 'bisquare'))) ## lmRob(y ~ x, d.data, initial.alg = 'fast', efficiency = 0.95, weight = c('bisquare', 'bisquare')) ## f.lmRob(y ~ x, d.data, initial.alg = 'fast', efficiency = 0.95, weight = c('bisquare', 'bisquare')) f.lmRob.S <- function(... , robust.control = lmRob.control()) { ## Purpose: call the S estimation procedure of lmRob ## ---------------------------------------------------------------------- ## Arguments: x: design matrix x ## y: vector of observations ## robust.control: control list of lmRob.control() ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 29 Oct 2009, 14:54 ## code: from lmRob.fit.compute, robust package version 0.3-9 robust.control$initial.alg = 'random' robust.control$estim = 'Initial' z <- lmRob(..., control = robust.control) class(z) <- 'lmrob.S' z } ## f.lmRob.S(rep(1,10), rnorm(10), lmRob.control(weight = c('bisquare', 'bisquare'))) f.eff2c.psi <- function(eff, weight='bisquare') { ## Purpose: convert lmRob efficiencies to c.psi ## ---------------------------------------------------------------------- ## Arguments: eff: lmRob efficiency ## weight: type of weight (weight argument in lmRob.control) ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:36 if(is.null(eff)) return(NULL) lw = casefold(weight) if (lw == 'bisquare') { if (eff == 0.95) 4.685061 else if (eff == 0.9) 3.882646 else if (eff == 0.85) 3.443689 else if (eff == 0.8) 3.136909 else NA } else if (lw == 'optimal') { if (eff == 0.95) 1.060158 else if (eff == 0.9) 0.9440982 else if (eff == 0.85) 0.8684 else if (eff == 0.8) 0.8097795 else NA } else NA } f.psi2c.chi <- function(weight) { ## Purpose: return lmRob defaults for c.chi ## ---------------------------------------------------------------------- ## Arguments: weight: type of weight ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 28 Jan 2010, 10:05 switch(weight, 'bisquare' = 1.5477, 'optimal' = 0.4047) } residuals.lmrob.S <- function(obj) obj$residuals robustness.weights <- function(x, ...) UseMethod("robustness.weights") ## Purpose: retrieve robustness weights from robust regression return ## object ## ---------------------------------------------------------------------- ## Arguments: obj: robust regression output object ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:42 robustness.weights.lmrob <- robustness.weights.default <- function(obj) naresid(obj$na.action, obj$w) robustness.weights.lm <- function(obj) { if (any(class(obj) %in% c('lmrob', 'f.lmrob'))) stop('Caution: returning incorrect weights') naresid(obj$na.action, rep(1, length(obj$resid))) } robustness.weights.rlm <- function(obj) naresid(obj$na.action, obj$w) robustness.weights.lmRob <- function(obj) { if (obj$robust.control$weight[2] != 'Optimal') { c.psi <- f.eff2c.psi(obj$robust.control$efficiency, obj$robust.control$weight[2]) rs <- obj$residuals / obj$scale obj$M.weights <- Mwgt(rs, c.psi, obj$robust.control$weight[2]) } naresid(obj$na.action, obj$M.weights) } ## t <- f.lmRob(y ~ x, d.data) ## t <- f.lmrob(y ~ x, d.data, method = 'SM') ## t <- f.lmRob(y ~ x, d.data, initial.alg = 'fast', efficiency = 0.95, weight = c('bisquare', 'bisquare')) ## t <- lmRob(y ~ x, d.data, control = lmRob.control(initial.alg = 'fast', efficiency = 0.95, weight = c('bisquare', 'bisquare'))) ## robustness.weights(t) robustness.weights.lmrob.S <- function(obj) { rstand <- resid(obj)/sigma(obj) Mwgt(rstand, obj$control$tuning.chi, obj$control$psi) } ## MM: Why on earth is this called covariance.matrix() ?? -- S and R standard is vcov() !! ## -- For lm, they are indeed identical; for lmrob, too ## HOWEVER, the *.rlm() method of cov..matrix() *differs* from vcov.rlm() -- why? covariance.matrix <- function(x, ...) UseMethod("covariance.matrix") ## Purpose: retrieve covariance matrix from robust regression return ## object ## ---------------------------------------------------------------------- ## Arguments: obj: robust regression output object ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:42 covariance.matrix.lmrob <- covariance.matrix.default <- function(obj) obj$cov covariance.matrix.rlm <- function(obj, method = 'XtWX') summary(obj, method)$cov covariance.matrix.lm <- function(obj) { s <- summary(obj) s$cov * s$sigma^2 } sigma <- function(x, ...) UseMethod("sigma") ## Purpose: retrieve scale estimate from robust regression return ## object ## ---------------------------------------------------------------------- ## Arguments: obj: robust regression output object ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:42 sigma.lmrob <- sigma.default <- function(obj) obj$scale sigma.lm <- function(obj) summary(obj)$sigma sigma.rlm <- function(obj) obj$s converged <- function(x, ...) UseMethod("converged") ## Purpose: check convergence status of return object ## ---------------------------------------------------------------------- ## Arguments: obj: robust regression output object ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:42 converged.default <- function(obj) is.list(obj) && !is.null(obj$converged) && obj$converged converged.lm <- function(obj) if (is.null(obj$converged)) TRUE else obj$converged converged.lmRob <- function(obj) is.list(obj) && !is.null(obj$est) && obj$est == 'final' ########################################################################### ## alternative estimation methods ########################################################################### lmrob.u <- function(formula, data, subset, weights, na.action, ..., start) { ## Purpose: update lmrob object if possible ## ---------------------------------------------------------------------- ## Arguments: (lmrob arguments) ## start: object to update ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 28 Jul 2010, 08:30 args <- as.list(match.call())[-1] args$start <- NULL if (!missing(start)) { ## if start is a string, get start from parent if (is.character(start)) start <- get(start, envir=parent.frame()) if (class(start) == 'lmrob') { ## check whether we can update start easily oldargs <- as.list(start$call)[-1] if (isTRUE(all.equal(args, oldargs))) return(start) else { ret <- start ## check method argument (ignore cov argument) if (is.null(oldargs$method)) oldargs$method <- start$control$method if (oldargs$method == 'MM') oldargs$method <- 'SM' if (is.null(args$method) || args$method == 'MM') args$method <- 'SM' rest.ok <- isTRUE(all.equal(oldargs[!names(oldargs) %in% c('method', 'cov')], args[!names(args) %in% c('method', 'cov')])) if (is.null(start$x)) stop('x matrix not found. Use lmrob(..., x = TRUE).') if (args$method != oldargs$method && rest.ok) { ## method is different, but the rest is the same oldsteps <- strsplit(oldargs$method, "")[[1]] steps <- strsplit(args $method, "")[[1]] ## reduce start to largest common initial estimator while(length(oldsteps) > length(steps) || any(oldsteps != steps[seq_along(oldsteps)])) { elems <- c('na.action', 'offset', 'contrasts', 'xlevels', 'terms', 'model', 'x', 'y', 'degree.freedom', 'df.residual', 'call') ret <- c(ret$init, start[elems[elems %in% names(ret)]]) class(ret) <- 'lmrob' oldsteps <- oldsteps[-length(oldsteps)] } ret$call$method <- args$method steps <- steps[- seq_along(oldsteps)] if (length(steps) > 0) { ret$cov <- NULL for (step in steps) { ret <- switch(step, D = lmrob..D..fit(ret), M = lmrob..M..fit(obj = ret), N = { y <- model.response(ret$model) ## taus are standardized because otherwise ## the resulting efficiency is lower tau <- ret$tau / mean(ret$tau) tmp <- lmrob..M..fit(x = ret$x/tau, y = y/tau, obj = ret) tmp$residuals <- y - ret$x %*% ret$coef tmp$qr <- NULL tmp }, stop("only M or D steps supported")) if (!ret$converged) { warning(step, "-step did NOT converge.") break } } } else { if (is.null(ret$qr)) ret$qr <- qr(ret$x * sqrt(ret$weights)) ret$rank <- ret$qr$rank } } ## update covariance matrix if (rest.ok) { if (is.null(args$cov)) args$cov <- lmrob.control(method=ret$control$method)$cov ret$cov <- vcov(ret, args$cov) ret$control$cov <- args$cov ret$call$cov <- args$cov return(ret) } } } } ## if we're here, update failed or there was no start cl <- match.call() cl$start <- NULL cl[[1]] <- as.symbol("lmrob") eval(cl, envir = parent.frame()) } ## lmrob.u <- function(formula, data, subset, weights, na.action, ..., start) ## { ## cl <- match.call() ## cl$start <- NULL ## cl[[1]] <- as.symbol("lmrob") ## eval(cl, envir = parent.frame()) ## ## do.call('lmrob', args, envir = parent.frame()) ## } ## set.seed(0); d.data <- data.frame(y = rnorm(10), x = 1:10) ## lres <- lmrob(y ~ x, d.data, method = 'SM', psi = 'lgw', cov = '.vcov.avar1') ## obj1 <- lmrob(y ~ x, d.data, method = 'SM', psi = 'lgw', cov = '.vcov.w') ## test <- lmrob.u(y ~ x, d.data, method = 'SM', psi = 'lgw', cov = '.vcov.w', ## start = 'lres') ## all.equal(obj1, test) ## obj2 <- lmrob(y ~ x, d.data, method = 'SMD', psi = 'lgw', cov = '.vcov.w') ## test <- lmrob.u(y ~ x, d.data, method = 'SMD', psi = 'lgw', ## start = 'lres') ## all.equal(obj2, test[names(obj2)], check.attr = FALSE) ## obj3 <- lmrob(y ~ x, d.data, method = 'SMDM', psi = 'lgw', cov = '.vcov.w') ## test <- lmrob.u(y ~ x, d.data, method = 'SMDM', psi = 'lgw', ## start = 'lres') ## all.equal(obj3, test[names(obj3)], check.attr = FALSE) ## test <- lmrob.u(y ~ x, d.data, method = 'SMDM', psi = 'lgw', ## start = 'obj2') ## all.equal(obj3, test[names(obj3)], check.attr = FALSE) ## test <- lmrob.u(y ~ x, d.data, method = 'SM', psi = 'lgw', cov = '.vcov.w', ## start = obj3) ## all.equal(obj1, test[names(obj1)], check.attr = FALSE) ##' Compute the MM-estimate with corrections qE or qT as in ##' Maronna, R. A., Yohai, V. J., 2010. ##' Correcting MM estimates for "fat" data sets. ##' Computational Statistics & Data Analysis 54 (12), 3168–3173. ##' @title MM-estimate with Maronna-Yohai(2010) corrections ##' @param formula ##' @param data ##' @param subset ##' @param weights ##' @param na.action ##' @param ... ##' @param type ##' @return ##' @author Manuel Koller lmrob.mar <- function(formula, data, subset, weights, na.action, ..., type = c("qE", "qT")) { ## get call and modify it so that ## lmrob returns the appropriate S-estimate cl <- match.call() method <- if (is.null(cl$method)) { if (!is.null(cl$control)) list(...)[["control"]]$method else 'MM' } else cl$method cl$type <- NULL cl$method <- 'S' cov <- if(!is.null(cl$cov)) cl$cov else '.vcov.w' cl$cov <- 'none' cl[[1]] <- as.symbol("lmrob") ## get S-estimate obj <- eval(cl, envir = parent.frame()) ## correct S-scale estimate according to formula n <- length(obj$resid) p <- obj$rank type <- match.arg(type) ## for type qE: adjust tuning.chi (h0) to account for different delta if (type == 'qE') { if (obj$control$psi != 'bisquare')## FIXME: "tukey" should work, too stop('lmrob.mar: type qE is only available for bisquare psi') h0 <- uniroot(function(c) robustbase:::lmrob.bp('bisquare', c) - (1-p/n)/2, c(1, 3))$root ## update scale obj$scale <- obj$scale * obj$control$tuning.chi / h0 obj$control$tuning.chi <- h0 } ## calculate q q <- switch(type, "qT" = { rs <- obj$resid / obj$scale ## \hat a = \mean \rho(r/sigma)^2 ## obj$control$tuning.chi == h_0 ahat <- mean(Mpsi(rs, obj$control$tuning.chi, obj$control$psi)^2) ## \hat b = \mean \rho''(r/sigma) bhat <- mean(Mpsi(rs, obj$control$tuning.chi, obj$control$psi, 1)) ## \hat c = \mean \rho'(r/sigma) * r/sigma chat <- mean(Mpsi(rs, obj$control$tuning.chi, obj$control$psi)*rs) ## qT: 1 + p*ahat/n/2/bhat/chat }, "qE" = 1 / (1 - (1.29 - 6.02/n)*p/n) , stop("unknown type ", type)) ## update scale obj$scale.uncorrected <- obj$scale obj$scale <- q * obj$scale ## add M step if requested if (method %in% c('MM', 'SM')) { obj$control$cov <- cov obj <- lmrob..M..fit(obj = obj) ## construct a proper lmrob object elems <- c('na.action', 'offset', 'contrasts', 'xlevels', 'terms', 'model', 'x', 'y') obj <- c(obj, obj$init.S[elems[elems %in% names(obj$init.S)]]) obj$degree.freedom <- obj$df.residual <- n - obj$rank } else if (method != 'S') stop("lmrob.mar: Only method = S, SM and MM supported.") ## update class class(obj) <- 'lmrob' ## return obj } ## summary(lmrob(y ~ x, d.data)) ## summary(lmrob.mar(y ~ x, d.data, type = 'qE')) ## summary(tmp <- lmrob.mar(y ~ x, d.data, type = 'qT')) ## this function calculates M-estimate of scale ## with constants as used for S-estimate with maximum breakdown point lmrob.mscale <- function(e, control, p = 0L) { ret <- .C("R_lmrob_S", x = as.double(e), ## this is ignored y = as.double(e), n = as.integer(length(e)), p = as.integer(p), ## divide the sum by n - p nResample = 0L, ## find scale only scale = as.double(mad(e)), coef = double(1), as.double(control$tuning.chi), as.integer(.psi2ipsi(control$psi)), as.double(control$bb), ## delta best_r = as.integer(control$best.r.s), groups = as.integer(control$groups), n.group = as.integer(control$n.group), k.fast.s = as.integer(control$k.fast.s), k.max = as.integer(control$k.max), maxit.scale = as.integer(control$maxit.scale), refine.tol = as.double(control$refine.tol), inv.tol = as.double(control$solve.tol), converged = logical(1), trace.lev = as.integer(0), mts = as.integer(control$mts), ss = robustbase:::.convSs(control$subsampling), fast.s.large.n = as.integer(length(e)+1), PACKAGE = 'robustbase') ret$scale } lmrob.dscale <- function(r, control, kappa = robustbase:::lmrob.kappa(control = control)) { tau <- rep.int(1, length(r)) w <- Mwgt(r, control$tuning.psi, control$psi) scale <- sqrt(sum(w * r^2) / kappa / sum(tau^2*w)) psi <- control$psi c.psi <- robustbase:::.psi.conv.cc(psi, control$tuning.psi) ret <- .C("R_find_D_scale", r = as.double(r), kappa = as.double(kappa), tau = as.double(tau), length = as.integer(length(r)), scale = as.double(scale), c = as.double(c.psi), ipsi = .psi2ipsi(psi), type = 3L, ## dt1 as only remaining option rel.tol = as.double(control$rel.tol), k.max = as.integer(control$k.max), converged = logical(1), PACKAGE = 'robustbase') ret$scale } ## sd.trim function by Gregor Gorjanc ## from http://ggorjan.blogspot.com/2008/11/trimmed-standard-deviation.html ## with added correction factor to be unbiased at the normal sd.trim <- function(x, trim=0, na.rm=FALSE, ...) { if(!is.numeric(x) && !is.complex(x) && !is.logical(x)) { warning("argument is not numeric or logical: returning NA") return(NA_real_) } if(na.rm) x <- x[!is.na(x)] if(!is.numeric(trim) || length(trim) != 1) stop("'trim' must be numeric of length one") n <- length(x) if(trim > 0 && n > 0) { if(is.complex(x)) stop("trimmed sd are not defined for complex data") if(trim >= 0.5) return(0) lo <- floor(n * trim) + 1 hi <- n + 1 - lo x <- sort.int(x, partial = unique(c(lo, hi)))[lo:hi] } corr <- if (0 < trim && trim < 0.5) { z <- qnorm(trim, lower.tail=FALSE)# = Phi^{-1}(1 - tr) sqrt(1 - 2/(1-2*trim) *z*dnorm(z)) } else 1 sd(x)/corr } robustbase/inst/doc/fastMcd-kmini.Rnw0000644000176200001440000003113213012615634017324 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{covMcd() -- Generalizing the FastMCD} %\VignetteDepends{robustbase} \SweaveOpts{prefix.string=mcd, eps=FALSE, pdf=TRUE, strip.white=true} \SweaveOpts{width=6, height=4.1} \usepackage{amsmath} \usepackage{amsfonts}% \mathbb \usepackage{mathtools}% -> \floor, \ceil \usepackage[utf8]{inputenc} %% The following is partly R's share/texmf/Rd.sty \usepackage{color} \usepackage{hyperref} \definecolor{Blue}{rgb}{0,0,0.8} \definecolor{Red}{rgb}{0.7,0,0} \hypersetup{% hyperindex,% colorlinks={true},% pagebackref,% linktocpage,% plainpages={false},% linkcolor={Blue},% citecolor={Blue},% urlcolor={Red},% pdfstartview={Fit},% pdfview={XYZ null null null}% } \usepackage{natbib} \usepackage[noae]{Sweave} %---------------------------------------------------- \DeclarePairedDelimiter{\ceil}{\lceil}{\rceil} \DeclarePairedDelimiter{\floor}{\lfloor}{\rfloor} \DeclareMathOperator{\sign}{sign} \newcommand{\abs}[1]{\left| #1 \right|} \newtheorem{definition}{Definition} \newcommand{\byDef}{\mathrm{by\ default}} \newcommand{\R}{{\normalfont\textsf{R}}{}} \newcommand{\code}[1]{\texttt{#1}} \newcommand*{\pkg}[1]{\texttt{#1}} \newcommand*{\CRANpkg}[1]{\href{https://CRAN.R-project.org/package=#1}{\pkg{#1}}} %---------------------------------------------------- \begin{document} \setkeys{Gin}{width=0.9\textwidth} \setlength{\abovecaptionskip}{-5pt} \title{covMcd() -- Considerations about Generalizing the FastMCD} \author{Martin M\"achler} \maketitle %\tableofcontents %% %% Pison, G., Van Aelst, S., and Willems, G. (2002) %% Small Sample Corrections for LTS and MCD. %% Metrika % ~/save/papers/robust-diverse/Pison_VanAelst_Willems.pdf %% <>= # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0))), width = 75) @ \section{Introduction} The context is robust multivariate ``location and scatter'' estimation, which corresponds to estimating the first two moments in cases they exist. We assume data and a model \begin{align} \label{eq:data-model} x_i & \in \mathbb{R}^p, \ \ i=1,2,\dots,n \\ x_i & \sim \mathcal{F}(\mu, \Sigma), \ \ \mathrm{i.i.d.};\ \ \mu \in \mathbb{R}^p, \ \ \Sigma \in \mathbb{R}^{p \times p}, \ \textrm{positive definite}, \end{align} where a conceptual null model is the $p$-dimensional normal distribution. One typical assumption is that $\mathcal{F}$ is a mixture with the majority component (``good data'') being $\mathcal{N}_p(\mu, \Sigma)$ and other components modeling ``the outliers''. In other words, we want estimates $\bigl(\hat{\mu}, \hat{\Sigma}\bigr)$ which should be close to the true ``good data'' $(\mu, \Sigma)$ --- and do not say more here. \section{MCD and ``the Fast'' MCD (= \textsc{fastmcd}) Algorithm} The \CRANpkg{robustbase} \R{} package has featured a function \code{covMcd()} since early on (Feb.~2006) and that has been an interface to the Fortran routine provided by the original authors and (partly) described in \citet{RouPvD99}. %% Rousseeuw, P. J. and van Driessen, K. (1999) %% A fast algorithm for the minimum covariance determinant estimator. %% Technometrics {41}, 212--223. %% >> ~/save/papers/robust-diverse/Rousseeuw_VanD-FastMCD_1999.pdf % ------------------------------------------------------------ We describe shortly how the algorithm works, partly building on the documentation provided in the source (R, S, and Fortran) codes: %% R CMD Rdconv --type=latex ../../man/covMcd.Rd > covMcd.tex The minimum covariance determinant estimator of location and scatter (MCD) implemented in \code{covMcd()} is similar to \R{} function \code{cov.mcd()} in \CRANpkg{MASS}. The (``theoretical'') MCD looks for the $h = h_\alpha (> 1/2)$ out of $n$ observations whose classical covariance matrix has the lowest possible determinant. In more detail, we will use $h = h_\alpha = h(\alpha,n,p) \approx \alpha \cdot (n+p+1)$, where as \citet{RouPvD99} mainly use (the default) $\alpha = \frac 1 2$, where $h = h(1/2, n, p) = \floor[\Big]{\frac{n+p+1}{2}}$. For general $\alpha \ge \frac 1 2$, the \R{} implementation (derived from their original S code) uses $h = h(\alpha,n,p) =$ \code{h.alpha.n(alpha,n,p)} (function in \pkg{robustbase}), which is \begin{eqnarray} \label{eq:def-h} h = h_\alpha = h(\alpha,n,p) := \floor{2 n_2 - n + 2 \alpha (n - n_2)}, \ \mathrm{where\ } n_2 := \floor[\Big]{\frac{n+p+1}{2}}% %= (n+p+1)/2 \ \ (\mathrm{\ where ``/'' denotes \emph{integer} division}) . \end{eqnarray} The fraction $\alpha \ge \frac 1 2$ can be chosen by the user, where $\alpha = \frac 1 2$ is the most robust, and indeed, $h_{1/2} = n_2 = \floor[\Big]{\frac{n+p+1}{2}}$. Even in general, as long as $n \gg p$, $\alpha$ is approximately the \emph{proportion} of the subsample size $h$ in the full sample (size $n$): \begin{equation} \label{eq:h.approx} h \approx \alpha \cdot n \iff \alpha \approx \frac{h}{n}, \end{equation} <>= require(robustbase) n <- c(5, 10, 20, 30, 50, 100, 200, 500) hmat <- function(alpha, p) cbind(n, h.alpha = h.alpha.n (alpha, n,p), h. = floor(alpha * (n + p + 1)), alpha.n = round(alpha * n)) hmat(alpha = 1/2, p = 3) hmat(alpha = 3/4, p = 4) @ The breakdown point (for $h > \frac{n}{2}$) then is \begin{eqnarray} \label{eq:breakdown} \epsilon_{*} = \frac{n-h+1}{n}, \end{eqnarray} which is less than but close to $\frac 1 2$ for $\alpha = \frac 1 2$, and in general, $h/n \approx \alpha$, the breakdown point is approximately, \begin{eqnarray} \label{eq:eps-approx} \epsilon_{*} = \frac{n-h+1}{n} \approx \frac{n-h}{n} = 1 - \frac{h}{n} \approx 1 - \alpha. \end{eqnarray} The raw MCD estimate of location, say $\hat{\mu}_0$, is then the average of these $h$ points, whereas the raw MCD estimate of scatter, $\hat{\Sigma}_0$, is their covariance matrix, multiplied by a consistency factor \code{.MCDcons(p, h/n)}) and (by default) a finite sample correction factor \code{.MCDcnp2(p, n, alpha)}, to make it consistent at the normal model and unbiased at small samples. %% Both rescaling factors (consistency and finite sample) are returned in the length-2 vector %% \code{raw.cnp2}. In practice, for reasonably sized $n$, $p$ and hence $h$, it is not feasible to search the full space of all $n \choose h$ $h$-subsets of $n$ observations. Rather, the implementation of \code{covMcd} uses the Fast MCD algorithm of \citet{RouPvD99} to approximate the minimum covariance determinant estimator, see Section~\ref{sec:fastMCD}. Based on these raw MCD estimates, $\bigl(\hat{\mu}_0, \hat{\Sigma}_0\bigr)$, % (unless argument \code{raw.only} is true), a reweighting step is performed, i.e., \code{V <- cov.wt(x,w)}, where \code{w} are weights determined by ``outlyingness'' with respect to the scaled raw MCD, using the ``Mahalanobis''-like, robust distances $d_i\bigl(\hat{\mu}_0, \hat{\Sigma}_0\bigr)$, see (\ref{eq:Maha}). Again, a consistency factor and %(if \code{use.correction} is true) a finite sample correction factor %(\code{.MCDcnp2.rew(p, n, alpha)}) are applied. The reweighted covariance is typically considerably more efficient than the raw one, see \citet{PisGvAW02}. The two rescaling factors for the reweighted estimates are returned in \code{cnp2}. Details for the computation of the finite sample correction factors can be found in \citet{PisGvAW02}. \section{Fast MCD Algorithm -- General notation}\label{sec:fastMCD} \paragraph{Note:} In the following, apart from the mathematical notation, we also use variable names, e.g., \code{kmini}, used in the Fortran and sometimes \R{} function code, in \R{} package \CRANpkg{robustbase}. Instead of directly searching for $h$-subsets (among ${n \choose h} \approx {n \choose n/2}$) the basic idea is to start with small subsets of size $p+1$, their center $\mu$ and covariance matrix $\Sigma$, and a corresponding $h$-subset of the $h$ observations with smallest (squared) (``Mahalanobis''-like) distances \begin{align} \label{eq:Maha} d_i = d_i(\mu,\Sigma) := (x_i - \mu)' \Sigma^{-1} (x_i - \mu), \ \ i=1,2,\dots,n, \end{align} and then use concentration steps (``C~steps'') to (locally) improve the chosen set by iteratively computing $\mu$, $\Sigma$, new distances $d_i$ and a new set of size $h$ with smallest distances $d_i(\mu,\Sigma)$. Each C~step is proven to decrease the determinant $\det(\Sigma)$ if $\mu$ and $\Sigma$ did change at all. Consequently, convergence to a local minimum is sure, as the number of $h$-subsets is finite. To make the algorithm \emph{fast} for non small sample size $n$ the data set is split into ``groups'' or ``sub-datasets'' as soon as \begin{eqnarray} \label{eq:nmini} n \ge 2 n_0, \ \mathrm{ where}\ \ n_0 := \mathtt{nmini} \ ( = 300, \byDef). \end{eqnarray} i.e., the default cutoff for ``non small'' is at $n = 600$. %% The \emph{number} of such subsets in the original algorithm is maximally 5, and we now use \begin{eqnarray} \label{eq:kmini} k_M = \code{kmini} \ (= 5, \byDef), \end{eqnarray} as upper limit. As above, we assume from now on that $n \ge 2 n_0$, and let \begin{eqnarray} \label{eq:k-def} k := \floor[\Big]{\frac{n}{n_0}} \ge 2 \end{eqnarray} and now distinguish the two cases, \begin{eqnarray} \label{eq:cases} \begin{cases} A. & k < k_M \iff n < k_M \cdot n_0 \\ B. & k \ge k_M \iff n \ge k_M \cdot n_0 \end{cases} \end{eqnarray} \begin{description} \item[In case A] $k$ (\code{= ngroup}) subsets aka ``groups'' or ``sub datasets'' are used, $k \in\{2,3,\dots,k_M-1\}$, of group sizes $n_j$, $j=1,\dots,k$ (see below). Note that case~A may be empty because of $2 \le k < k_M$, namely if $k_M=2$. Hence, in case~A, we have $k_M \ge 3$. \item[in case B] $k_M$ (\code{= ngroup}) groups each of size $n_0$ are built and in the first stage, only a \emph{subset} of $k_M \cdot n_0 \le n$ observations is used. \end{description} In both cases, the disjoint groups (``sub datasets'') are chosen at random from the $n$ observations. %% For the group sizes for case~A, $n_j$, $j=1,\dots,k$, we have \begin{align} n_1 = \; & \floor[\Big]{\frac n k} = \floor[\bigg]{\frac{n}{\floor[\big]{\frac{n}{n_0}}}} \ \ (\; \ge n_0 \label{eq:n1})\\ n_j = \; & n_1,\hspace*{2.8em} j = 2,\dots,j_* \\ n_j = \; & n_1 + 1, \ \ \ j = j_* +1,\dots,k, \label{n1-plus-1}\\ & \qquad \mathrm{where}\ \ j_* := k - r \ \in \{1,\dots,k\}, \label{jstar}\\ & \qquad \mathrm{and}\ \ r := n - k n_1 = \label{r-rest} n - k\floor[\big]{\frac n k} \in \{0,1,\dots,k-1\}, \end{align} where the range of $j_*$, $1,\dots,k$ in (\ref{jstar}) is a consequence of the range of the integer division remainder $r \in \{0,1,\dots,k-1\}$ in (\ref{r-rest}). Consequently, (\ref{n1-plus-1}) maybe empty, namely iff $r=0$ ($\iff n = k \cdot n_1$ is a multiple of $k$): $j_* = k$, and all $n_j \equiv n_1$. Considering the range of $n_j$ in case~A, the minimum $n_1 \ge n_0$ in (\ref{eq:n1}) is easy to verify. What is the maximal value of $n_j$ , i.e., an upper bound for $n_{\max} := n_1+1 \ge \max_j n_j$? \ %% %% This is all correct but not useful: %% From (\ref{eq:n1}), $ n/k - 1 < n_1 \le n/k $, and %% from (\ref{eq:k-def}), $n/n_0 - 1 < k \le n/n_0$. %% Putting these two together, we get %% \begin{eqnarray} %% \label{eq:n1-ineq} %% \frac{n^2}{n_0} - 1 \le n/k - 1 < n_1 \le n/k < \frac{n n_0}{n - n_0}, %% \end{eqnarray} %% (the first $\le$ from $\frac{1}{k} \ge \frac{n_0}{n}$; the last $<$ from %% $\frac{1}{k} < \frac 1{n/n_0 -1} = \frac{n_0}{n-n_0}$.) Also, %% from (\ref{eq:k-def}), $n \ge k n_0$ and $n-n_0 \ge (k-1)n_0$ and since we %% are in case~A, $n < n_0 k_M$, which combines to %% \begin{eqnarray} %% \label{eq:nn0} %% \frac{n n_0}{n - n_0} < \frac{(n_0 k_M) n_0}{(k-1)n_0} = \frac{n_0 k_M}{k-1}. %% \end{eqnarray} Consider $n_{1,\max}(k) = \max_{n, \mathrm{given\ } k} n_1 = \max_{n, \mathrm{given\ } k} \floor{\frac n k}$. Given $k$, the maximal $n$ still fulfilling $\floor[\big]{\frac{n}{n_0}} = k$ is $n = (k+1)n_0 - 1$ where $\floor[\big]{\frac{n}{n_0}} = k + \floor[\big]{1 - \frac{1}{n_0}} = k$. Hence, $n_{1,\max}(k) =\floor[\big]{\frac{(k+1)n_0 - 1}{k}} = n_0 + \floor[\big]{\frac{n_0 - 1}{k}}$, and as $k \ge 2$, the maximum is at $k=2$, $\max n_1 = \max_k n_{1,\max}(k) = n_0 + \floor[\big]{\frac{n_0 - 1}{2}} = \floor[\big]{\frac{3 n_0 - 1}{2}}$. Taken together, as $n_j = n_1+1$ is possible, we have \begin{align} \label{eq:nj-range} n_0 \le & n_1 \le \floor[\Big]{\frac{3 n_0 - 1}{2}} \nonumber\\ n_0 \le & n_j \le \floor[\Big]{\frac{3 n_0 + 1}{2}}, \ \ j \ge 2. \end{align} Note that indeed, $\floor[\big]{\frac{3 n_0 + 1}{2}}$ is the length of the auxiliary vector \code{subndex} in the Fortran code. \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/inst/doc/asymptotic.max.bias.Rdata0000644000176200001440000000774112321016053021021 0ustar liggesusersYgPYDN # ,"A" ( (QE@@$(A$䜾YUU֭9oszJ~~D!QF4 D&H|+{m+ /m^ 2NÇ^uU$h27 7_(/}WטOw/S:kyF6MomȃFFqзKY:|-η K qT e8`i!67X"8 Å7#'$NèښC09ʮ0vuӳ [ zh0%5]&=3̼z?VLM2ba:lqzޡ k^|;\3}-F![? W~Cq{޺KHŏIHJPS\]#-TW2R;},ròkdۜ}\;'6ﷵ,,[E3_V mNNiѐPWxqA(CC}&d0P(!kI- ; ˚ڳP⾐{(}]Y4߭ǐ(庚W&41"uh7(fטPNy|EΞ#.T;%Aʒ (a#U%/0<~O!SAѥ2 Wts`Ȓ87|pAm=J,Fe(&ed\olsa&.,+_^܊GQ',T8sN)h: [Dr:>h\V: ;Q)2!,3+gmu7Gur2wq"#4 B%h:K҅~Lb}-;~]҂ E |q2Kׁn8Gx9Xze}fk~%tB.׃0ڻ>_=h7'Ð=J90| VfpO%F/O7R-]:wƜ zBꖿ"`i 2LH${F!0ҿ2VxsSaZxD56Vm0YaL>0(}`ywOS-!IjgЪb$&I_CFG~:XR^Mބ+A2g!bE/W2BM4f![v#WӇhi1zw{>]*3<'lF^.{SbSgťI _I;UG5wunO@jܢ(hGt6 m 3C4Ki=Y6w>(@K>i ڊzH0pP4 xJU_سy/ma(߳ {0\z4xIV#nFԴ8 jjmP&iW800!}vL菞]m-;;7錥]U0z|SY;J7J0=2۪ 33z_`VS9̎Wk~/~ Ny,=HQy$3$$$Amjl^?n[*KVǢ)YXHU4-a4S(V KCҺ\GEgj6ܖ=R w[N"OL빆%[  FIG6*#?.p{-7oTZEA@ύv(ظ%v D{;D2}QDTyHMGBaﭽ-a[Q(LpCl^0JnRFAU=(+"^p :B9ٶ"w|6i-lmcg,!KХ6h6 >ȵC43K^U,_ӋsmqcV*'+̮J S0myFX-t6߷>M/Է z^?qRT `t`>HgwU'2[V% qA< 6\jCd~Zl ( ˆȶM\ 0|cwI Èî$ak6k+ lpZ"=Bi0ҝƆ^ fRTJ'+|\!ʢ)^G/{>FJ”1LU^5=i-e0CAv%D e,kY͉V_&亝0wok0l" {*v"i[mg߶!i<-GVwWJ#9qBvRX#6#匃u;~Mc>R(EjviM/=L>Qiw͵%9.YGnk;Ϥ@y4YqybWSuoF<>ړ1o ȜytK4 -FA. MR57:*/CGn;"_<҆tE§:H*RlQA5H:\Wb-U{Li>2.~Dqi'(iW:*S(IJU$#SZdEIR{Bm_3FFKUU.T)CĴ^I%u#M 9%N=O(UԩCœ̡ o6Ҫ1im\pln)T|%B%7c*r_e4쿶ܹmO:c?f.?=<.cǭ=m{7w/kxq>rdHğj$󘚏> Y1AYpj| n㷍z )AG(tk;~T==R iRdH#>X _D,ti7xF{R#4C67G7 G<"xL:^ۗD^ x^?=z( ,8n | -^PH]QLs?𚨣X|V8Og,z%D]%m!ADd!|:H򳵄2޲_-Qs%7D\FġGrͳ~9>TT݉׀*-"[&Y2}@5[us1# !!8GWʒZZSu@-kmPK[7J'5:xڛX"nLA=Џh~p~p~_e? "܏9;|n?u~:U'{ )-robustbase/inst/doc/psi_functions.Rnw0000644000176200001440000004152413012615634017527 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{Definitions of Psi-Functions Available in Robustbase} %\VignetteDepends{robustbase} \SweaveOpts{prefix.string=psi, eps=FALSE, pdf=TRUE, strip.white=true} \SweaveOpts{width=6, height=4.1, echo=FALSE, fig=TRUE} %% --------------------- !! \usepackage{amsmath} \usepackage{amsfonts}% \mathbb \usepackage{natbib} \usepackage[utf8]{inputenc} \newcommand{\abs}[1]{\left| #1 \right|} \DeclareMathOperator{\sign}{sign} \newcommand{\R}{\mathbb{R}} \newcommand{\code}[1]{\texttt{#1}} \newcommand*{\pkg}[1]{\texttt{#1}} \newtheorem{definition}{Definition} %% The following is R's share/texmf/Rd.sty \usepackage{color} \usepackage{hyperref} \definecolor{Blue}{rgb}{0,0,0.8} \definecolor{Red}{rgb}{0.7,0,0} \hypersetup{% hyperindex,% colorlinks={true},% pagebackref,% linktocpage,% plainpages={false},% linkcolor={Blue},% citecolor={Blue},% urlcolor={Red},% pdfstartview={Fit},% pdfview={XYZ null null null}% } <>= # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0)))) ## x axis for plots: x. <- seq(-5, 10, length=1501) require(robustbase) <>= source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) @% = ../inst/xtraR/plot-psiFun.R --> p.psiFun() --> robustbase:::matPlotPsi() {for nice legends; lines ..} \begin{document} \setkeys{Gin}{width=0.9\textwidth} \setlength{\abovecaptionskip}{-5pt} \title{Definitions of $\psi$-Functions Available in Robustbase} \author{Manuel Koller and Martin M\"achler} \maketitle \tableofcontents \section*{Preamble} Unless otherwise stated, the following definitions of functions are given by \citet[p. 31]{MarRMY06}, however our definitions differ sometimes slightly from theirs, as we prefer a different way of \emph{standardizing} the functions. To avoid confusion, we first define $\psi$- and $\rho$-functions. \begin{definition}\label{def.psi} A \emph{$\psi$-function} is a piecewise continuous function $\psi: \R \to \R$ such that \begin{enumerate} \item $\psi$ is odd, i.e., \ $\psi(-x) = -\psi(x) \: \forall x$, \item $\psi(x) \ge 0$ for $x \ge 0$, and $\psi(x) > 0$ for $0 < x < x_r := \sup\{\tilde x : \psi(\tilde x) > 0\}$ \ \ ($x_r > 0$, possibly $x_r = \infty$). \item[3*] Its slope is $1$ at $0$, i.e., $\displaystyle \psi'(0) = 1$. \end{enumerate} Note that `3*' is not strictly required mathematically, but we use it for standardization in those cases where $\psi$ is continuous at 0. Then, it also follows (from 1.) that $\psi(0) = 0$, and we require $\psi(0)=0$ also for the case where $\psi$ is discontinuous in 0, as it is, e.g., for the M-estimator defining the median. \end{definition} \begin{definition} A \emph{$\rho$-function} can be represented by the following % definite integral of a $\psi$-function, \begin{equation}\label{def.rho} \rho(x) = \int_0^x \psi(u) du\;, \end{equation} which entails that $\rho(0) = 0$ and $\rho$ is an even function. \end{definition} A $\psi$-function is called \emph{redescending} if $\psi(x) = 0$ for all $x \ge x_r$ for $x_r < \infty$, and $x_r$ is often called \emph{rejection point}. Corresponding to a redescending $\psi$-function, we define the function $\tilde\rho$, a version of $\rho$ standardized such as to attain maximum value one. Formally, \begin{equation} \label{eq:tilde-rho} \tilde\rho(x) = \rho(x)/\rho(\infty). \end{equation} Note that $\rho(\infty) = \rho(x_r) \equiv \rho(x) \ \forall \abs{x} >= x_r$. $\tilde\rho$ is a $\rho$-function as defined in \citet{MarRMY06} and has been called $\chi$ function in other contexts. For example, in package \pkg{robustbase}, \code{Mchi(x, *)} computes $\tilde\rho(x)$, whereas \code{Mpsi(x, *, deriv=-1)} (``(-1)-st derivative'' is the primitive or antiderivative)) computes $\rho(x)$, both according to the above definitions. \textbf{Note:} An alternative slightly more general definition of \emph{redescending} would only require $\rho(\infty) := \lim_{x\to\infty}\rho(x)$ to be finite. E.g., \texttt{"Welsh"} does \emph{not} have a finite rejection point, but \emph{does} have bounded $\rho$, and hence well defined $\rho(\infty)$, and we \emph{can} use it in \texttt{lmrob()}.\footnote{E-mail Oct.~18, 2014 to Manuel and Werner, proposing to change the definition of ``redescending''.} %% \section{Weak Redescenders} %% \subsection{t_nu score functions} %% t_1 (=Cauchy) has been propagated as "Lorentzian merit function" %% regression for outlier detection \paragraph{Weakly redescending $\psi$ functions.}\ Note that the above definition does require a finite rejection point $x_r$. Consequently, e.g., the score function $s(x) = -f'(x)/f(x)$ for the Cauchy ($= t_1$) distribution, which is $s(x) = 2x/(1+x^2)$ and hence non-monotone and ``re descends'' to 0 for $x\to \pm\infty$, and $\psi_C(x) := s(x)/2$ also fulfills ${\psi_C}'(0) = 1$, but it has $x_r=\infty$ and hence $\psi_C()$ is \emph{not} a redescending $\psi$-function in our sense. As they appear e.g. in the MLE for $t_\nu$, we call $\psi$-functions fulfulling $\lim_{x\to\infty}\psi(x) = 0$ \emph{weakly redescending}. Note that they'd naturally fall into two sub categories, namely the one with a \emph{finite} $\rho$-limit, i.e. $\rho(\infty) := \lim_{x\to\infty}\rho(x)$, and those, as e.g., the $t_\nu$ score functions above, for which $\rho(x)$ is unbounded even though $\rho' = \psi$ tends to zero. %% --> ../../TODO section 'Psi/Rho/Chi/Wgt Functions' %% ~~~~~~~~~~ %% %% FIXME: where?? MM: can no longer find it in Hampel et al(1986) \citet{hamfrrs86}. %% FIXME: 0) Mention our psi_func class // and the C interface for "the other" functions %% ----- i.e., we currently have *both* and in addition there is all %% the (to be *deprecated* !) ../R/biweight-funs.R (& ../man/tukeyChi.Rd & ../man/tukeyPsi1.Rd) %% %% FIXME: 1) explain plot() {the plot method of psi_func} %% FIXME: 2) Show how to compute asymptotic efficiency and breakdown point: %% ------- %% a) end of ../../tests/psi-rho-etc.R has aeff.P() and bp.P() and chkP() %% which now uses the psi_func class to compute these *analytically* %% b) Of course, Manuel had used the numeric integration only, %% in ../../R/lmrob.MM.R, lmrob.efficiency(psi, cc, ...) and lmrob.bp(psi, cc, ...) %% ~~~~~~~~~~~~~~~~~~ %% c) *REALLY* nice general solution is via PhiI() in ../../R/psi-rho-funs.R %% for all piecewise polynomial psi()/rho() ~~~~~~~~~~~~~~~~~~~~~~ %%\clearpage \section{Monotone $\psi$-Functions} Montone $\psi$-functions lead to convex $\rho$-functions such that the corresponding M-estimators are defined uniquely. Historically, the ``Huber function'' has been the first $\psi$-function, proposed by Peter Huber in \citet{HubP64}. \clearpage \subsection{Huber} The family of Huber functions is defined as, \begin{align*} \rho_k(x) = {}& \left\{ \begin{array}{ll} \frac{1}{2} x^2 & \mbox{ if } \abs{x} \leq k \\ k(\abs{x} - \frac{k}{2})& \mbox{ if } \abs{x} > k \end{array} \right. \;,\\ \psi_k(x) = {} & \left\{ \begin{array}{ll} x & \mbox{ if } \abs{x} \leq k \\ k \ \sign(x)& \mbox{ if } \abs{x} > k %% -k & \mbox{ if } x < -k \\ %% k & \mbox{ if } x > k \end{array} \right. \;. \end{align*} The constant $k$ for $95\%$ efficiency of the regression estimator is $1.345$. \begin{figure}[h] \centering <>= plot(huberPsi, x., ylim=c(-1.4, 5), leg.loc="topright", main=FALSE) @ \caption{Huber family of functions using tuning parameter $k = 1.345$.} \end{figure} \bigskip \section{Redescenders} For the MM-estimators and their generalizations available via \texttt{lmrob()} (and for some methods of \texttt{nlrob()}), the $\psi$-functions are all redescending, i.e., with finite ``rejection point'' $x_r = \sup\{t; \psi(t) > 0\} < \infty$. From \texttt{lmrob}, the psi functions are available via \texttt{lmrob.control}, or more directly, \texttt{.Mpsi.tuning.defaults}, <>= names(.Mpsi.tuning.defaults) @ %$ and their $\psi$, $\rho$, $\psi'$, and weight function $w(x) := \psi(x)/x$, are all computed efficiently via C code, and are defined and visualized in the following subsections. \clearpage \subsection{Bisquare} Tukey's bisquare (aka ``biweight'') family of functions is defined as, \begin{equation*} \tilde\rho_k(x) = \left\{ \begin{array}{cl} 1 - \bigl(1 - (x/k)^2 \bigr)^3 & \mbox{ if } \abs{x} \leq k \\ 1 & \mbox{ if } \abs{x} > k \end{array} \right.\;, \end{equation*} with derivative ${\tilde\rho_k}'(x) = 6\psi_k(x) / k^2$ where, \begin{equation*} \psi_k(x) = x \left( 1 - \left(\frac{x}{k}\right)^2\right)^2 \cdot I_{\{\abs{x} \leq k\}}\;. \end{equation*} The constant $k$ for $95\%$ efficiency of the regression estimator is $4.685$ and the constant for a breakdown point of $0.5$ of the S-estimator is $1.548$. Note that the \emph{exact} default tuning constants for M- and MM- estimation in \pkg{robustbase} are available via \code{.Mpsi.tuning.default()} and \code{.Mchi.tuning.default()}, respectively, e.g., here, % \begin{small} <>= print(c(k.M = .Mpsi.tuning.default("bisquare"), k.S = .Mchi.tuning.default("bisquare")), digits = 10) @ % \end{small} and that the \code{p.psiFun(.)} utility is available via %\begin{small} <>= <> @ %\end{small} %\enlargethispage{3ex} \begin{figure}[h] \centering <>= p.psiFun(x., "biweight", par = 4.685) @ \caption{Bisquare family functions using tuning parameter $k = 4.685$.} \end{figure} \clearpage \subsection{Hampel} The Hampel family of functions \citep{hamfrrs86} is defined as, \begin{align*} \tilde\rho_{a, b, r}(x) ={}& \left\{ \begin{array}{ll} \frac{1}{2} x^2 / C & \abs{x} \leq a \\ \left( \frac{1}{2}a^2 + a(\abs{x}-a)\right) / C & a < \abs{x} \leq b \\ \frac{a}{2}\left( 2b - a + (\abs{x} - b) \left(1 + \frac{r - \abs{x}}{r-b}\right) \right) / C & b < \abs{x} \leq r \\ 1 & r < \abs{x} \end{array} \right. \;, \\ \psi_{a, b, r}(x) ={}& \left\{ \begin{array}{ll} x & \abs{x} \leq a \\ a \ \sign(x) & a < \abs{x} \leq b \\ a \ \sign(x) \frac{r - \abs{x}}{r - b}& b < \abs{x} \leq r \\ 0 & r < \abs{x} \end{array} \right.\;, \end{align*} where $ C := \rho(\infty) = \rho(r) = \frac{a}{2}\left( 2b - a + (r - b) \right) = \frac{a}{2}(b-a + r)$. As per our standardization, $\psi$ has slope $1$ in the center. The slope of the redescending part ($x\in[b,r]$) is $-a/(r-b)$. If it is set to $-\frac 1 2$, as recommended sometimes, one has \begin{equation*} r = 2a + b\;. \end{equation*} Here however, we restrict ourselves to $a = 1.5 k$, $b = 3.5 k$, and $r = 8k$, hence a redescending slope of $-\frac 1 3$, and vary $k$ to get the desired efficiency or breakdown point. The constant $k$ for $95\%$ efficiency of the regression estimator is $0.902$ (0.9016085, to be exact) and the one for a breakdown point of $0.5$ of the S-estimator is $0.212$ (i.e., 0.2119163). %% --> ../R/lmrob.MM.R, .Mpsi.tuning.defaults .Mchi.tuning.defaults \begin{figure}[h] \centering <>= ## see also hampelPsi p.psiFun(x., "Hampel", par = ## Default, but rounded: round(c(1.5, 3.5, 8) * 0.9016085, 1)) @ \caption{Hampel family of functions using tuning parameters $0.902 \cdot (1.5, 3.5, 8)$.} \end{figure} \clearpage \subsection{GGW}\label{ssec:ggw} The Generalized Gauss-Weight function, or \emph{ggw} for short, is a generalization of the Welsh $\psi$-function (subsection \ref{ssec:Welsh}). In \citet{ks2011} it is defined as, \begin{equation*} %% \label{eq:ggw} \psi_{a, b, c}(x) = \left\{ \begin{array}{ll} x & \abs{x} \leq c \\ \exp\left(-\frac{1}{2}\frac{(\abs{x} - c)^b}{a}\right)x & \abs{x} > c \end{array} \right. \;. \end{equation*} Our constants, fixing $b=1.5$, and minimial slope at $- \frac 1 2$, for $95\%$ efficiency of the regression estimator are $a = 1.387$, $b = 1.5$ and $c = 1.063$, and those for a breakdown point of $0.5$ of the S-estimator are $a = 0.204$, $b = 1.5$ and $c = 0.296$: <>= cT <- rbind(cc1 = .psi.ggw.findc(ms = -0.5, b = 1.5, eff = 0.95 ), cc2 = .psi.ggw.findc(ms = -0.5, b = 1.5, bp = 0.50)); cT @ Note that above, \code{cc*[1]}$= 0$, \code{cc*[5]}$ = \rho(\infty)$, and \code{cc*[2:4]}$ = (a, b, c)$. To get this from $(a,b,c)$, you could use <>= ipsi.ggw <- .psi2ipsi("GGW") # = 5 ccc <- c(0, cT[1, 2:4], 1) integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi=ipsi.ggw)$value # = rho(Inf) @ \begin{figure}[h] \centering <>= p.psiFun(x., "GGW", par = c(-.5, 1, .95, NA)) @ \caption{GGW family of functions using tuning parameters $a=1.387$, $b=1.5$ and $c=1.063$.} \end{figure} \clearpage \subsection{LQQ} The ``linear quadratic quadratic'' $\psi$-function, or \emph{lqq} for short, was proposed by \citet{ks2011}. It is defined as, \begin{equation*} \psi_{b,c,s}(x) = \left\{ \begin{array}{ll} x & \abs{x} \leq c \\ \sign(x)\left(\abs{x} - \frac{s}{2b}\left(\abs{x} - c\right)^2 \right) & c < \abs{x} \leq b + c \\ \sign(x)\left(c+b-\frac{bs}{2} + \frac{s-1}{a} \left(\frac{1}{2}\tilde x^2 - a\tilde x\right) \right) & b + c < \abs{x} \leq a + b + c \\ 0 & \mbox{otherwise,} \end{array} \right. \end{equation*} where \begin{equation} \tilde x := \abs{x} - b - c \ \ \mathrm{and}\ \ a := (2c + 2b - bs)/(s-1).\label{lqq.a} \end{equation} The parameter $c$ determines the width of the central identity part. The sharpness of the bend is adjusted by $b$ while the maximal rate of descent is controlled by $s$ ($s = 1 - \min_x\psi'(x) > 1$). From (\ref{lqq.a}), the length $a$ of the final descent to $0$ is a function of $b$, $c$ and $s$. <>= cT <- rbind(cc1 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=0.95, bp=NA ), cc2 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=NA , bp=0.50)) colnames(cT) <- c("b", "c", "s"); cT @ If the minimal slope is set to $-\frac 1 2$, i.e., $s = 1.5$, and $b/c = 3/2 = 1.5$, the constants for $95\%$ efficiency of the regression estimator are $b=1.473$, $c=0.982$ and $s=1.5$, and those for a breakdown point of $0.5$ of the S-estimator are $b=0.402$, $c=0.268$ and $s=1.5$. \begin{figure}[h] \centering <>= p.psiFun(x., "LQQ", par = c(-.5,1.5,.95,NA)) @ \caption{LQQ family of functions using tuning parameters $b=1.473$, $c=0.982$ and $s=1.5$.} \end{figure} \clearpage \subsection{Optimal} The optimal $\psi$ function as given by \citet[Section~5.9.1]{MarRMY06}, \begin{equation*} \psi_c(x) = \sign(x)\left(-\frac{\varphi'(\abs{x}) + c} {\varphi(\abs{x})}\right)_+\;, \end{equation*} where $\varphi$ is the standard normal density, $c$ is a constant and $t_+ := \max(t, 0)$ denotes the positive part of $t$. Note that the \pkg{robustbase} implementation uses rational approximations originating from the \pkg{robust} package's implementation. That approximation also avoids an anomaly for small $x$ and has a very different meaning of $c$. The constant for $95\%$ efficiency of the regression estimator is $1.060$ and the constant for a breakdown point of $0.5$ of the S-estimator is $0.405$. \begin{figure}[h] \centering <>= p.psiFun(x., "optimal", par = 1.06, leg.loc="bottomright") @ \caption{`Optimal' family of functions using tuning parameter $c = 1.06$.} \end{figure} \clearpage \subsection{Welsh}\label{ssec:Welsh} The Welsh $\psi$ function is defined as, %% FIXME: REFERENCE MISSING %\def\xk{\frac{x}{k}} \def\xk{x/k} %\def\xkdt{-\frac{1}{2}\left(\xk\right)^2} \def\xkdt{- \left(\xk\right)^2 / 2} \begin{align*} \tilde\rho_k(x) ={}& 1 - \exp\bigl(\xkdt\bigr) \\ \psi_k(x) ={}& k^2\tilde\rho'_k(x) = x\exp\bigl(\xkdt\bigr) \\ \psi'_k(x) ={}& \bigl(1 - \bigl(\xk\bigr)^2\bigr) \exp\bigl(\xkdt\bigr) \end{align*} The constant $k$ for $95\%$ efficiency of the regression estimator is $2.11$ and the constant for a breakdown point of $0.5$ of the S-estimator is $0.577$. Note that GGW (subsection \ref{ssec:ggw}) is a 3-parameter generalization of Welsh, matching for $ b = 2 $, $ c = 0 $, and $ a = k^2$ (see R code there): <>= ccc <- c(0, a = 2.11^2, b = 2, c = 0, 1) (ccc[5] <- integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi = 5)$value) # = rho(Inf) stopifnot(all.equal(Mpsi(x., ccc, "GGW"), ## psi[ GGW ](x; a=k^2, b=2, c=0) == Mpsi(x., 2.11, "Welsh")))## psi[Welsh](x; k) @ \begin{figure}[h] \centering <>= p.psiFun(x., "Welsh", par = 2.11) @ \caption{Welsh family of functions using tuning parameter $k = 2.11$.} \end{figure} \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/inst/doc/simulation.functions.R0000644000176200001440000014752012174500122020466 0ustar liggesusers## Called from ./lmrob_simulation.Rnw ## ~~~~~~~~~~~~~~~~~~~~~ ########################################################################### ## 1. simulation helper functions ########################################################################### f.estname <- function(est = 'lmrob') ## Purpose: translate between 'estname' and actual function name, ## defaults to 'lmrob' ## f.lmRob is just a wrapper for lmRob, since there are some ## problems with the weight and weights arguments ## ---------------------------------------------------------------------- ## Arguments: est: name of estimator ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:36 switch(est, lm.rbase = 'lmrob', lm.robust = 'f.lmRob', rlm = 'rlm', lm = 'lm', est) f.errname <- function(err, prefix = 'r') ## Purpose: translate between natural name of distribution and ## R (r,p,q,d)-name ## ---------------------------------------------------------------------- ## Arguments: err: name of distribution ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:36 paste(prefix, switch(err,normal="norm", t="t", cauchy="cauchy",cnormal="cnorm", err),sep = '') f.requires.envir <- function(estname) ## Purpose: returns indicator on whether estname requires envir argument ## ---------------------------------------------------------------------- ## Arguments: estname: name of estimating function ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 7 Oct 2009, 09:34 switch(estname, f.lmrob.local = TRUE, FALSE) f..paste..list <- function(lst) if (length(lst) == 0) return("") else paste(names(lst),lst,sep='=',collapse=', ') f..split..str <- function(str) { litems <- strsplit(str,', ') lst <- lapply(litems, function(str) strsplit(str,'=')) rlst <- list() for (llst in lst) { lv <- vector() for (litem in llst) lv[litem[1]] <- litem[2] rlst <- c(rlst, list(lv)) } rlst } f.list2str <- function(lst, idx) ## Purpose: convert a list into a string that identifies the ## function and parameter configuration ## ---------------------------------------------------------------------- ## Arguments: lst: list or list of lists ## idx: only take the elements in idx ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 7 Oct 2009, 10:03 f..paste..list(if(missing(idx)) unlist(lst) else unlist(lst)[idx]) f.as.numeric <- function(val) { ## Purpose: convert value to numeric if possible ## ---------------------------------------------------------------------- ## Arguments: vec: value to convert ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 26 Oct 2009, 12:10 r <- suppressWarnings(as.numeric(val)) if (is.na(r)) { ## is character, try to convert to TRUE and FALSE return(switch(casefold(val), "true" = TRUE, "false" = FALSE, val)) } else return(r) } f.as.numeric.vectorized <- function(val) sapply(val, f.as.numeric) f.as.integer <- function(val) { ## Purpose: convert value to numeric if possible ## ---------------------------------------------------------------------- ## Arguments: vec: value to convert ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 26 Oct 2009, 12:10 r <- suppressWarnings(as.integer(val)) if (is.na(r)) { ## is character, try to convert to TRUE and FALSE return(switch(casefold(val), "true" = TRUE, "false" = FALSE, val)) } else return(r) } f.str2list <- function(str, splitchar = '\\.') { ## Purpose: inverse of f.list2str ## ---------------------------------------------------------------------- ## Arguments: str: string or list of strings produced with f.list2str ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 14:20 ## split input string or strings into a list of vectors lst <- f..split..str(as.character(str)) rlst <- list() ## walk list for (lv in lst) { lrlst <- list() ## for each element of the vector for (ln in names(lv)) { ## split lnames <- strsplit(ln, splitchar)[[1]] ## set either directly if (length(lnames) == 1) lrlst[ln] <- f.as.numeric(lv[ln]) ## or, if it contains a dot, as a sublist else { if (is.null(lrlst[[lnames[1]]])) lrlst[[lnames[1]]] <- list() lrlst[[lnames[1]]][paste(lnames[-1],collapse='.')] <- f.as.numeric(lv[ln]) } } rlst <- c(rlst, list(lrlst)) } rlst } f.round.numeric <- function(num, digits = 0) { ## round only numeric values in list idx <- sapply(num, is.numeric) ret <- num ret[idx] <- lapply(num[idx],round,digits=digits) ret } f.errs2str <- function(errs) { ## Purpose: convert list of errors into pretty strings ## ---------------------------------------------------------------------- ## Arguments: errs: estlist element errs ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 14:51 rv <- vector() for (lerr in errs) { rv <- c(rv, switch(lerr$err, normal = paste("N(",lerr$args$mean,",", lerr$args$sd,")", sep=""), set =, t = paste("t",lerr$args$df,sep=""), paste(lerr$err,"(",paste(f.round.numeric(lerr$args,2), collapse=","),")",sep=""))) } rv } f.procedures2str <- function(procs) { ## Purpose: convert procedures element in estlist to pretty data.frame ## ---------------------------------------------------------------------- ## Arguments: proc: estlist element procedures ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 14:57 rdf <- rep(" ",7) for (lproc in procs) { method <- if(is.null(lproc$args$method)) switch(lproc$estname, lm = 'lsq', "SM") else lproc$args$method cov <- switch(lproc$estname, ## lm.robust, rlm, lmrob: set default arguments lm.robust = list(cov = 'Default', cov.corrfact = 'empirical', cov.xwx = TRUE, cov.resid = 'trick', cov.hubercorr = TRUE, cov.dfcorr = 1), rlm = list(cov = 'Default', cov.corrfact = 'empirical', cov.xwx = FALSE, cov.resid = 'final', cov.hubercorr = TRUE, cov.dfcorr = 1), ## lmrob = list(cov = 'f.avar1', ## method .vcov.MM equals f.avar1 ## cov.resid = 'final'), lmrob = do.call('lmrob.control', ## get default arguments from lmrob.control lproc$args)[c('cov', 'cov.corrfact', 'cov.xwx', 'cov.resid', 'cov.hubercorr', 'cov.dfcorr')], if (is.null(lproc$args)) list(cov = 'Default') else lproc$args) if (is.null(lproc$args$psi)) { psi <- switch(lproc$estname, rlm =, lmrob = 'bisquare', lm.robust = { if (is.null(lproc$args$weight)) { if (is.null(lproc$args$weight2)) 'optimal' else lproc$args$weight2 } else lproc$args$weight[2] }, "NA") } else { psi <- lproc$args$psi ## test if tuning.psi is the default one if (!is.null(lproc$args$tuning.psi) && isTRUE(all.equal(lproc$args$tuning.psi, .Mpsi.tuning.default(psi)))) psi <- paste(psi, lproc$args$tuning.psi) } D.type <- switch(lproc$estname, lmrob.u =, lmrob = if (is.null(lproc$args$method) || lproc$args$method %in% c('SM', 'MM')) 'S' else 'D', lmrob.mar = if (is.null(lproc$args$type)) 'qE' else lproc$args$type, rlm = 'rlm', lm.robust = 'rob', lm = 'lm', 'NA') rdf <- rbind(rdf,c(lproc$estname, method, f.args2str(lproc$args), cov$cov, f.cov2str(cov), psi, D.type)) } colnames(rdf) <- c("Function", "Method", "Tuning", "Cov", "Cov.Tuning", "Psi", "D.type") if (NROW(rdf) == 2) t(rdf[-1,]) else rdf[-1,] } f.chop <- function(str,l=1) ## Purpose: chop string by l characters ## ---------------------------------------------------------------------- ## Arguments: str: string to chop ## l: number of characters to chop ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:19 substr(str,1,nchar(str)-l) fMpsi2str <- function(psi) { ## Purpose: make pretty M.psi and D.chi, etc. ## ---------------------------------------------------------------------- ## Arguments: M.psi: M.psi argument ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:28 if (is.null(psi)) psi else if (psi == "tukeyPsi1" || psi == "tukeyChi") "bisquare" else if (grepl("Psi1$",psi)) f.chop(psi,4) else if (grepl("Chi$",psi)) f.chop(psi,3) else psi } f.c.psi2str <- function(c.psi) { ## Purpose: make pretty tuning.psi and D.tuning.chi, etc. ## ---------------------------------------------------------------------- ## Arguments: c.psi: tuning.psi argument ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:34 if (is.null(c.psi)) return(NULL) round(as.numeric(c.psi),2) } f.args2str <- function(args) { ## Purpose: convert args element in procedures element of estlist ## to a pretty string ## ---------------------------------------------------------------------- ## Arguments: args: args element in procedures element of estlist ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:11 lst <- list() lst$psi <- if (!is.null(args$weight)) args$weight[2] else if (!is.null(args$weight2)) args$weight2 else args$psi lst$c.psi <- if (!is.null(args$efficiency)) round(f.eff2c.psi(args$efficiency, lst$psi),2) else f.c.psi2str(args$tuning.psi) if (!is.null(args$method) && grepl("D",args$method)) { lst$D <- if (!is.null(args$D.type)) args$D.type else NULL lst$tau <- args$tau } f..paste..list(lst) } f.cov2str <- function(args) { ## Purpose: convert cov part in args element in procedures element of ## estlist to a pretty string ## ---------------------------------------------------------------------- ## Arguments: args: args element in procedures element of estlist ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:39 lst <- list() if (!is.null(args$cov) && !args$cov %in% c('Default','f.avarwh')) lst$cov <- sub('^f\\.', '', args$cov) else { lst$hc <- args$cov.hubercorr lst$dfc <- args$cov.dfcorr lst$r <- args$cov.resid lst$rtau <- args$cov.corrfact lst$xwx <- args$cov.xwx } ## convert logical to numeric lst <- lapply(lst, function(x) if (is.logical(x)) as.numeric(x) else x) f..paste..list(lst) } f.procstr2id <- function(procstrs, fact = TRUE) { ## Purpose: create short identifiers of procstrs ## ---------------------------------------------------------------------- ## Arguments: procstrs: vector of procstrs ## fact: convert to factor or not ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 3 Nov 2009, 08:58 lst0 <- f.str2list(procstrs) r <- sapply(lst0, function(x) { paste(c(x$estname, if (is.null(x$args$method)) NULL else x$args$method, substr(c(x$args$psi,x$args$weight2, x$args$weight[2]), 1, 3)), collapse = '.') }) if (fact) ru <- unique(r) if (fact) factor(r, levels = ru, labels = ru) else r } f.splitstrs <- function(strs, split = '_', ...) { ## Purpose: split vector of strings by split and convert the list into ## a data.frame with columns type and id ## ---------------------------------------------------------------------- ## Arguments: strs: vector of strings ## split: character vector to use for splitting ## ...: arguments to strsplit, see ?strsplit ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 19 Oct 2009, 08:46 lstr <- strsplit(strs, split, ...) ldf <- t(as.data.frame(lstr)) rownames(ldf) <- NULL as.data.frame(ldf, stringsAsFactors = FALSE) } f.abind <- function(arr1,arr2, along = ndim) { ## Purpose: like abind, but less powerful ## ---------------------------------------------------------------------- ## Arguments: arr1, arr2: arrays to bind ## along: dimension along to bind to, ## defaults to last dimension ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 20 Oct 2009, 11:33 ## if along =! last dimension: permutate array ndim <- length(dim(arr1)) if (along != ndim) { arr1 <- aperm(arr1, perm = c((1:ndim)[-along],along)) arr2 <- aperm(arr2, perm = c((1:ndim)[-along],along)) } ldmn1 <- dimnames(arr1) ldmn2 <- dimnames(arr2) ld1 <- dim(arr1) ld2 <- dim(arr2) if (length(ld1) != length(ld2)) stop('f.abind: Dimensions must be identical') if (!identical(ldmn1[-ndim],ldmn2[-ndim])) stop('f.abind: Dimnames other than in the along dimension must match exactly') if (any(ldmn1[[ndim]] %in% ldmn2[[ndim]])) stop('f.abind: Dimnames in along dimension must be unique') ldmn3 <- ldmn1 ldmn3[[ndim]] <- c(ldmn1[[ndim]], ldmn2[[ndim]]) ld3 <- ld1 ld3[ndim] <- ld1[ndim] + ld2[ndim] ## build array arr3 <- array(c(arr1, arr2), dim = ld3, dimnames = ldmn3) ## permutate dimensions back if (along != ndim) { lperm <- 1:ndim lperm[along] <- ndim lperm[(along+1):ndim] <- along:(ndim-1) arr3 <- aperm(arr3, perm = lperm) } arr3 } f.abind.3 <- function(...) f.abind(..., along = 3) f.rename.level <- function(factor, from, to) { ## Purpose: rename level in a factor ## ---------------------------------------------------------------------- ## Arguments: factor: factor variable ## from: level to be changed ## to: value ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 18 Aug 2010, 14:45 levels(factor)[levels(factor) == from] <- to factor } ########################################################################### ## 2. main simulation functions ########################################################################### f.sim <- function(estlist, .combine = 'f.abind', .combine.2 = 'f.abind.3', ## hack for foreach silent = TRUE) { ## Purpose: perform simulation according to estlist entry ec ## ---------------------------------------------------------------------- ## Arguments: ec: estlist, list consisting of: ## - design: data frame of design ## - nrep: number of repetitions ## - errs: list of error distributions including arguments ## - err: name of error distribution ## - args: list of arguments (to be passed to do.call() ## - procedures: list of parameter configurations and ## procedures to call ## - estname: name of estimation procedure ## - args: arguments that define the call ## silent: silent argument to try ## ---------------------------------------------------------------------- ## Author: Werner Stahel / Manuel Koller, Date: 21 Aug 2008, 07:55 ## get designs ldd <- estlist$design use.intercept <- if(is.null(estlist$use.intercept)) TRUE else estlist$use.intercept nobs <- NROW(ldd) npar <- NCOL(ldd) + use.intercept nrep <- estlist$nrep nlerrs <- nobs*nrep ## initialize: lestlist <- estlist ## 'evaluate' estlist$procedure list lprocs <- c() for (i in 1:length(estlist$procedures)) { ## generate lprocstr (identification string) lprocs[i] <- estlist[['procedures']][[i]][['lprocstr']] <- f.list2str(estlist[['procedures']][[i]]) } ## find all error distributions lerrs <- unique(sapply(lestlist$errs, f.list2str)) ## walk estlist$output to create output column names vector ## store result into lnames, it is used in f.sim.process lnames <- c() for (i in 1:length(estlist$output)) { llnames <- estlist[['output']][[i]][['lnames']] <- eval(estlist[['output']][[i]][['names']]) lnames <- c(lnames, llnames) } ## get different psi functions lpsifuns <- unlist(unique(lt <- sapply(estlist$procedures, function(x) x$args$psi))) ## get entries without psi argument lrest <- sapply(lt, is.null) if (sum(lrest) > 0) lpsifuns <- c(lpsifuns, '__rest__') ## Walk error distributions res <- foreach(lerrlst = estlist$errs, .combine = .combine) %:% foreach(lpsifun = lpsifuns, .combine = .combine.2) %dopar% { ## filter for psi functions lidx <- if (lpsifun == '__rest__') lrest else unlist(sapply(estlist$procedures, function(x) !is.null(x$args$psi) && x$args$psi == lpsifun)) cat(f.errs2str(list(lerrlst)), lpsifun, " ") ## get function name and parameters lerrfun <- f.errname(lerrlst$err) lerrpar <- lerrlst$args lerrstr <- f.list2str(lerrlst) ## --- initialize array lres <- array(NA, dim=c(nrep, ## data dimension length(lnames), ## output type dimension sum(lidx), ## estimation functions and arguments dimension 1), ## error distributions dimension dimnames = list(Data = NULL, Type = lnames, Procstr = lprocs[lidx], Errstr = lerrstr)) ## set seed set.seed(estlist$seed) ## generate errors: seperately for each repetition lerrs <- c(sapply(1:nrep, function(x) do.call(lerrfun, c(n = nobs, lerrpar)))) ## if estlist$design has an attribute 'gen' ## then this function gen will generate designs ## and takes arguments: n, p, rep ## and returns the designs in a list if (is.function(attr(ldd, 'gen'))) { ldds <- attr(ldd, 'gen')(nobs, npar - use.intercept, nrep, lerrlst) } ## Walk repetitions for (lrep in 1:nrep) { if (lrep%%100 == 0) cat(" ", lrep) lerr <- lerrs[(1:nobs)+(lrep-1)*nobs] if (exists('ldds')) { ldd <- ldds[[lrep]] ## f.sim.reset.envirs() } ## Walk estimator configurations for (lproc in estlist$procedures[lidx]) { ## call estimating procedure lrr <- tryCatch(do.call(f.estname(lproc$estname), c(if(use.intercept) list(lerr ~ . , data = ldd) else list(lerr ~ . - 1, data = ldd), lproc$args)), error=function(e)e) ERR <- inherits(lrr, 'error') if (ERR && !silent) { print(lproc$lprocstr) print(lrr) } if (!silent && !converged(lrr)) { print(lproc$lprocstr) browser() ## <<< } ## check class: if procedure failed: if (ERR) next ## check convergence of estimator if (!converged(lrr)) next ## process output for (lov in estlist$output) { llnames <- lov$lnames ret <- tryCatch(lres[lrep,llnames,lproc$lprocstr,lerrstr] <- eval(lov$fun), error= function(e)e) if (!silent && inherits(ret, 'error')) { cat('Error', dQuote(ret$message), 'in repetition',lrep, '\n for:',llnames,'procstr:',lproc$lprocstr,'\n') browser() ## <<< print(lov$fun) print(try(eval(lov$fun))) } } } } ## print debug information if requested if (!silent) str(lres) lres } ## restore original order of lprocs res <- res[,,match(lprocs, dimnames(res)[[3]]),,drop=FALSE] ## set attributes attr(res, 'estlist') <- lestlist cat("\n") res } ########################################################################### ## build estlist ########################################################################### f.combine <- function(..., keep.list = FALSE) { ## Purpose: creates a list of all combinations of elements given as ## arguments, similar to expand.grid. ## Arguments can be named. ## If an argument is a list, then its elements are considered ## as fixed objects that should not be recombined. ## if keep.list = TRUE, these elements are combined ## as a list with argument. ## ---------------------------------------------------------------------- ## Arguments: collection of lists or vectors with argument names ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 7 Oct 2009, 11:13 ## convert arguments into a big list args <- list(...) ## if more than two arguments, call recursively if (length(args) > 2) lst <- do.call("f.combine", c(args[-1], list(keep.list=keep.list))) else { ## if just two arguments, create list of second argument ## if this is a list, then there's nothing to do if (!keep.list && is.list(args[[2]])) lst <- args[[2]] ## else convert to a list of one-elements lists with proper name else { lst <- list() for (lelem in args[[2]]) { llst <- list(lelem) if (!is.null(names(args)[2])) names(llst)[1] <- names(args)[2] lst <- c(lst, list(llst)) } } } ## ok, now we can add the first element to all elements of lst lst2 <- list() if (keep.list && is.list(args[[1]])) args[[1]] <- lapply(args[[1]], list) for (lelem in args[[1]]) { for (relem in lst) { llst <- c(lelem, relem) if (nchar(names(llst)[1]) == 0 && nchar(names(args)[1])>0) names(llst)[1] <- names(args)[1] lst2 <- c(lst2, list(llst)) } } lst2 } ## some fragments to build estlist ## errors .errs.normal.1 <- list(err = 'normal', args = list(mean = 0, sd = 1)) .errs.normal.2 <- list(err = 'normal', args = list(mean = 0, sd = 2)) .errs.t.13 <- list(err = 't', args = list(df = 13)) .errs.t.11 <- list(err = 't', args = list(df = 11)) .errs.t.10 <- list(err = 't', args = list(df = 10)) .errs.t.9 <- list(err = 't', args = list(df = 9)) .errs.t.8 <- list(err = 't', args = list(df = 8)) .errs.t.7 <- list(err = 't', args = list(df = 7)) .errs.t.5 <- list(err = 't', args = list(df = 5)) .errs.t.3 <- list(err = 't', args = list(df = 3)) .errs.t.1 <- list(err = 't', args = list(df = 1)) ## skewed t distribution .errs.skt.Inf.2 <- list(err = 'cskt', args = list(df = Inf, gamma = 2)) .errs.skt.5.2 <- list(err = 'cskt', args = list(df = 5, gamma = 2)) ## log normal distribution .errs.lnrm <- list(err = 'lnorm', args = list(meanlog = 0, sdlog = 0.6936944)) ## laplace distribution .errs.laplace <- list(err = 'laplace', args = list(location = 0, scale = 1/sqrt(2))) ## contaminated normal .errs.cnorm..1.0.10 <- list(err = 'cnorm', args = list(epsilon = 0.1, meanc = 0, sdc = sqrt(10))) .errs.cnorm..1.4.1 <- list(err = 'cnorm', args = list(epsilon = 0.1, meanc = 4, sdc = 1)) .errs.test <- list(.errs.normal.1 ,.errs.t.5 ,.errs.t.3 ,.errs.t.1 ) ## arguments .args.final <- f.combine(psi = c('optimal', 'bisquare', 'lqq', 'hampel'), seed = 0, max.it = 500, k.max = 2000, c(list(list(method = 'MM', cov = '.vcov.avar1')), list(list(method = 'MM', cov = '.vcov.w', start = 'lrr')), f.combine(method = c('SMD', 'SMDM'), cov = '.vcov.w', start = 'lrr'))) ## use fixInNamespace("lmrob.fit", "robustbase") ## insert: ## N = { ## tmp <- lmrob..M..fit(x = x/init$tau, y = y/init$tau, obj = ## init) ## tmp$qr <- NULL ## tmp ## }, ## .args.final <- f.combine(psi = c('optimal', 'bisquare', 'ggw', 'lqq'), ## seed = 0, ## max.it = 500, ## k.max = 2000, ## c(list(list(method = "SMDM", cov = '.vcov.w')), ## list(list(method = "SMDN", cov = '.vcov.w', ## start = 'lrr')))) ## standard for lmRob .args.bisquare.lmRob.0 <- list(## initial.alg = 'random', efficiency = 0.95 ,weight = c('bisquare', 'bisquare'), trace = FALSE ) .args.optimal.lmRob.0 <- list(## initial.alg = 'random', efficiency = 0.95 ,weight = c('optimal', 'optimal'), trace = FALSE) .procedures.final <- c(list(list(estname = 'lm')), f.combine(estname = 'lmrob.u', args = .args.final, keep.list = TRUE), f.combine(estname = 'lmrob.mar', args = f.combine(psi = 'bisquare', seed = 0, max.it = 500, k.max = 2000, cov = '.vcov.w', type = c('qT', 'qE')), keep.list = TRUE), f.combine(estname = 'lm.robust', args = list(.args.bisquare.lmRob.0, .args.optimal.lmRob.0), keep.list = TRUE)) ## output .output.sigma <- list(sigma = list( names = quote("sigma"), fun = quote(sigma(lrr)))) .output.beta <- list(beta = list( names = quote(paste('beta',1:npar,sep='_')), fun = quote(coef(lrr)))) .output.se <- list(se = list( names = quote(paste('se',1:npar,sep='_')), fun = quote(sqrt(diag(covariance.matrix(lrr)))))) .output.sumw <- list(sumw = list( names = quote("sumw"), fun = quote(sum(robustness.weights(lrr))))) .output.nnz <- list(nnz = list( names = quote("nnz"), fun = quote(sum(robustness.weights(lrr) < 1e-3)))) ########################################################################### ## simulation results processing functions ########################################################################### ## use apply to aggregate data ## use matplot(t(result)) to plot aggregated data f.apply <- function(res, items = dimnames(res)[[2]], FUN, ..., swap = FALSE) { ## Purpose: similar to apply, return data not as matrix, but ## as data.frame ## ---------------------------------------------------------------------- ## Arguments: res: simulation results array ## items: items to use in apply ## FUN: function to apply ## ...: additional arguments to FUN ## swap: if TRUE: swap first two columns ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 13:39 ## aggregate data lz <- apply(res[,items,,,drop=FALSE], 2:4, FUN, ...) ## if return object has four dimensions (multidim output of FUN) ## rotate first three dimensions if (length(dim(lz)) == 4 && swap) aperm(lz, perm=c(2,1,3,4)) else lz } f.dimnames2df <- function(arr, dm = dimnames(arr), page = TRUE, err.on.same.page = TRUE, value.col = ndim - 2, procstr.col = ndim - 1, errstr.col = ndim, procstr.id = TRUE, split = '_') { ## Purpose: create data frame from dimnames: ## len_1 .. len_100, cpr_1 .. cpr_100 ## will yield a data frame with column id from 1 .. 100 ## column type with cpr and len and columns procstr and errstr ## It is assumed, that the max number (100) is the same for all ## output value types ## ---------------------------------------------------------------------- ## Arguments: arr: 3 or more dim array (optional) ## dm: dimnames to be used ## page: add a column page to simplify plots ## err.on.same.page: whether all errs should be on the same ## page ## value.col: index of value column (set to NULL for none) ## the values in this column are split name_id ## and put into two columns in the data frame ## procstr.col: index of procedure column ## (both: or NULL for not to be converted) ## errstr.col: index of error string column ## procstr.id: create procstr id ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 19 Oct 2009, 08:41 if (!is.list(dm)) stop('f.dimnames2df: dm must be a list') ## remove 'NULL' dimensions dm <- dm[!sapply(dm,is.null)] ndim <- length(dm) if (ndim == 0) stop('f.dimnames2df: dimnames all null') ldims <- sapply(dm, length) ## split and convert types into data.frame if (!is.null(value.col)) { ldf <- f.splitstrs(dm[[value.col]], split = split) lid <- NCOL(ldf) == 2 if (lid) lids <- unique(as.numeric(ldf[,2])) ## convert ids into numeric ## we do not need to repeat over different types of values, only ids ldims[value.col] <- ldims[value.col] / length(unique(ldf[,1])) } ## merge into one large data.frame: for each distribution rdf <- list() for (ld in 1:ndim) { lname <- if (is.null(lname <- names(dm)[ld])) length(rdf)+1 else lname ltimes <- if (ld == ndim) 1 else prod(ldims[(ld+1):ndim]) leach <- if (ld == 1) 1 else prod(ldims[1:(ld-1)]) if (!is.null(value.col) && ld == value.col) { if (lid) rdf[[paste(lname,'Id')]] <- rep(lids,times=ltimes,each=leach) ## value ids ## no else: the values will be added in the a2df procedures } else if (!is.null(procstr.col) && ld == procstr.col) { ## convert procstrs to data.frame with pretty names lprdf <- data.frame(f.procedures2str(f.str2list(dm[[ld]])), Procstr = factor(dm[[ld]], levels = dm[[ld]], labels = dm[[ld]])) if (procstr.id) lprdf$PId <- f.procstr2id(dm[[ld]]) ## repeat lprdf <- if (ltimes == 1 && leach == 1) lprdf else apply(lprdf,2,rep,times=ltimes,each=leach) lprdf <- as.data.frame(lprdf, stringsAsFactors=FALSE) ## convert all into nice factors (with the original ordering) for (lk in colnames(lprdf)) { luniq <- unique(lprdf[[lk]]) lprdf[[lk]] <- factor(lprdf[[lk]], levels = luniq, labels = luniq) } rdf <- c(rdf, lprdf) } else if (!is.null(errstr.col) && ld == errstr.col) { ## convert errstrs to data.frame with pretty names ledf <- f.errs2str(f.str2list(dm[[ld]])) ## repeat and convert to factor with correct ordering rdf[[lname]] <- factor(rep(dm[[ld]],times=ltimes,each=leach), levels = dm[[ld]], labels = dm[[ld]]) rdf[['Error']] <- factor(rep(ledf,times=ltimes,each=leach), levels = ledf, labels = ledf) } else { ## no conversion necessary rdf[[lname]] <- rep(dm[[ld]],times=ltimes,each=leach) } } ## add page argument if (page && !is.null(procstr.col)) { ltpf <- if (!is.null(errstr.col) && !err.on.same.page) interaction(rdf[['Procstr']],rdf[['Error']]) else interaction(rdf[['Procstr']]) rdf[['Page']] <- as.numeric(factor(ltpf, unique(ltpf))) } rdf <- as.data.frame(rdf) if (!is.null(value.col)) attr(rdf, 'Types') <- unique(ldf[,1]) rdf } f.a2df.2 <- function(arr, dm = dimnames(arr), err.on.same.page = FALSE, ...) { ## Purpose: convert arr to data.frame ## uses f.dimnames2df and adds a column to contain the values ## if ndim == 4 and dimnames NULL: assumes first dimension is ## data dimension which is ignored by f.dimnames2df ## add counter ## ---------------------------------------------------------------------- ## Arguments: arr: array to convert ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 23 Oct 2009, 12:29 ## ndim == 2 ?? ndim <- length(dim(arr)) ## if ndim == 4: check if dimnames of dim 1 are NULL if (ndim == 4 && is.null(dm[[1]])) dm[[1]] <- 1:dim(arr)[1] rdf <- f.dimnames2df(dm=dm, ...) ## just add values for all 'Types', possibly including Type.ID if (ndim > 2) for (lvt in attr(rdf, 'Types')) { llvt <- if (is.null(rdf$Type.Id)) lvt else paste(lvt,unique(rdf$Type.Id),sep='_') rdf[[lvt]] <- as.vector(switch(ndim, stop('wrong number of dimensions'), ## 1 arr, ## 2 arr[llvt,,], ## 3 arr[,llvt,,])) ## 4 } else rdf$values <- as.vector(arr) rdf } f.dimnames2pc.df <- function(arr, dm = dimnames(arr), npcs = NCOL(estlist$design.predict), ...) { ## Purpose: create data frame to be used in plotting of pc components ## calls f.dimnames2df and adds an additional column for ## identifying the principal components ## ---------------------------------------------------------------------- ## Arguments: arr, dm: see f.dimnames.df ## npcs: number of principal components ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 23 Oct 2009, 11:51 if (missing(npcs) && !is.null(attr(estlist$design.predict, 'npcs'))) npcs <- attr(estlist$design.predict, 'npcs') ## convert into data.frame rdf <- f.dimnames2df(dm = dm, ...) ## calculate number of points per principal component npts <- (length(unique(rdf$Type.Id)) - 1) / npcs ## add new column pc rdf$PC <- 1 if (npcs > 1) for (li in 2:npcs) { lids <- (1:npts + npts*(li-1) + 1) rdf$PC[rdf$Type.Id %in% lids] <- li ## fixme: center is not repeated } rdf$PC <- factor(rdf$PC, levels = 1:npcs, labels = paste('PC',1:npcs,sep=' ')) rdf } f.a2pc.df <- function(arr, ...) { ## Purpose: convert arr to data.frame ## uses f.dimnames2pc.df and adds a column to contain the values ## ---------------------------------------------------------------------- ## Arguments: arr: array to convert ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 23 Oct 2009, 12:29 ## convert dimnames rdf <- f.dimnames2pc.df(arr, err.on.same.page = FALSE,...) ## add values for (lvt in attr(rdf, 'Types')) rdf[[lvt]] <- as.vector(arr[paste(lvt,unique(rdf$Type.Id),sep='_'),,]) ## repeat values: only PC_1 has center value, add it for other PCs ## build index idx <- 1:NROW(rdf) rpc <- as.character(rdf$PC) for (lerr in levels(rdf$Error)) { for (lprc in levels(rdf$Procstr)) { for (lpc in levels(rdf$PC)) { if (lpc == 'PC 1') next ## get first entry of this PC lmin <- min(which(rdf$Error == lerr & rdf$Procstr == lprc & rdf$PC == lpc)) ## where is this in idx? lwm <- min(which(lmin == idx)) ## get first entry of PC_1 lmin1 <- min(which(rdf$Error == lerr & rdf$Procstr == lprc & rdf$PC == 'PC 1')) ## update idx idx <- c(idx[1:(lwm-1)], lmin1, idx[lwm:length(idx)]) ## update PC column of result rpc <- c(rpc[1:(lwm-1)], lpc, rpc[lwm:length(rpc)]) } } } ## repeat centers rdf <- rdf[idx,] ## update PC column rdf$PC <- factor(rpc) ## return rdf } f.calculate <- function(expr,arr,dimname = as.character(expr)) { ## Purpose: calculate formula and return as conformable array ## ---------------------------------------------------------------------- ## Arguments: expr: expression to calculate (string is also ok) ## arr: array (from f.sim) ## dimname: name of the calculated dimension ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 9 Oct 2009, 10:15 if (!is.expression(expr)) expr <- as.expression(expr) lnams <- dimnames(arr)[[2]] lst <- list() for (lnam in lnams) expr <- gsub(paste(lnam,'\\b',sep=''), paste("arr[,",lnam,",,,drop=FALSE]",sep='"'), expr) r <- eval(parse(text = expr)) dimnames(r)[[2]] <- dimname r ## maybe use abind to merge the two arrays? } f.calculate.many <- function(expr, arr, dimname = dims, dims) { ## Purpose: calculate formula and abind into array ## supply expr as string with # symbols to be replaced ## dimname can also contain # symbols ## ---------------------------------------------------------------------- ## Arguments: same as f.calculate and ## dims: vector of items to replace # with ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 14 Oct 2009, 10:11 for (i in 1:length(dims)) { lexpr <- gsub("#",dims[i],expr) ldimname <- if (length(dimname) > 1) dimname[i] else gsub("#",dims[i],dimname) if (i == 1) rarr <- f.calculate(lexpr,arr,ldimname) else rarr <- abind(rarr, f.calculate(lexpr,arr,ldimname), along=2) } rarr } f.errs <- function(estlist, err, rep, gen = NULL, nobs, npar) { ## Purpose: generate and return errors of specified repetition ## or, if missing, all errors as a matrix ## ---------------------------------------------------------------------- ## Arguments: estlist: estlist ## err: error distribution (estlist$errs[1] for example) ## rep: desired repetition (optional) ## gen: function to generate designs (optional) ## nobs: nr. rows, npap: nr. predictors (both optional) ## --------------------------------------------------------------------- ## Author: Manuel Koller, Date: 13 Oct 2009, 11:21 nobs <- NROW(estlist$design) nrep <- estlist$nrep nlerrs <- nobs*nrep npred <- NROW(estlist$design.predict) ## get function name and parameters lerrfun <- f.errname(err$err) lerrpar <- err$args lerrstr <- f.list2str(err) ## set seed set.seed(estlist$seed) ## generate errors: seperately for each repetition lerrs <- c(sapply(1:nrep, function(x) do.call(lerrfun, c(n = nobs, lerrpar)))) ## lerrs <- do.call(lerrfun, c(n = nlerrs, lerrpar)) ## to get to the same seed state as f.sim(.default) ## generate also the additional errors ## calculate additional number of errors for (i in 1:length(estlist$output)) { if (!is.null(estlist[['output']][[i]][['nlerrs']])) nlerrs <- nlerrs + eval(estlist[['output']][[i]][['nlerrs']]) } if (length(lerrs) < nlerrs) nowhere <- do.call(lerrfun, c(n = nlerrs - length(lerrs), lerrpar)) ## generate designs if (!is.null(gen) && is.function(gen)) { ldds <- gen(nobs, npar, nrep, err) } ## return errors ret <- if (!missing(rep)) lerrs[1:nobs+(rep-1)*nobs] else matrix(lerrs, nobs) if (exists('ldds')) attr(ret, 'designs') <- if (!missing(rep)) ldds[[i]] else ldds ret } f.selection <- function(procstrs = dimnames(r.test)[[3]], what = c('estname', 'args.method', 'args.psi', 'args.tuning.psi', 'args.type', 'args.weight2', 'args.efficiency'), restr = '') { ## Purpose: get selection of results: first one of the specified estimates ## ---------------------------------------------------------------------- ## Arguments: procstrs: what is the selection ## what: named vector to use in grep ## restr: do not select estimators with procstr ## that match this regexp parameters ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 2 Nov 2009, 09:06 ## match restrictions lrestr <- -(lall <- 1:length(procstrs)) ## no restrictions if (!missing(restr)) { lrestr <- grep(restr, procstrs) if (length(lrestr) == 0) lrestr <- -lall procstrs <- procstrs[-lrestr] } ## procstr2list, but do not split into sublists lproclst <- f.str2list(procstrs, splitchar='_____') ## helper function: select only items that occur what tfun <- function(x) x[what] lproclst <- lapply(lproclst, tfun) ## convert back to string lprocstr <- sapply(lproclst, f.list2str) ## get all unique combinations and the first positions lidx <- match(unique(lprocstr), lprocstr) r <- procstrs[lidx] attr(r, 'idx') <- lall[-lrestr][lidx] r } f.get.current.dimnames <- function(i,dn,margin) { ## Purpose: get current dimnames in the margins of array ## we're applying on ## ---------------------------------------------------------------------- ## Arguments: i: counter ## dn: dimnames ## margin: margin argument to apply ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 16 Apr 2010, 10:44 ## pos <- integer(0) lcdn <- character(0) for (lm in margin) { ## get length of current margin llen <- length(dn[[lm]]) ## i modulo llen gives the current position in this dimension lpos <- (if (i > 0) i-1 else 0) %% llen + 1 ## update pos ## pos <- c(pos, lpos) ## update lcdn lcdn <- c(lcdn, dn[[lm]][lpos]) ## update i: subtract lpos and divide by llen i <- (i - lpos) / llen + 1 } lcdn } f.n <- Vectorize(function(design) { ## Purpose: get n obs of design ## ---------------------------------------------------------------------- ## Arguments: design: design to get n of ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 19 Apr 2010, 11:19 NROW(get(design)) }) f.p <- Vectorize(function(design) { ## Purpose: get p par of design ## ---------------------------------------------------------------------- ## Arguments: design: design to get p of ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 19 Apr 2010, 11:19 NCOL(get(design)) + 1 }) f.which.min <- function(x, nr = 1) { ## Purpose: get the indices of the minimal nr of observations ## ---------------------------------------------------------------------- ## Arguments: x: vector of values ## nr: number of indices to return ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 4 May 2010, 12:18 match(sort(x)[1:nr], x) } f.which.max <- function(x, nr = 1) f.which.min(-x, nr) ## f.get.scale <- function(procstr, proclst = f.str2list(procstr)) ## { ## ## Purpose: get scale estimate used for procstrs ## ## ---------------------------------------------------------------------- ## ## Arguments: procstr: procstrs (dimnames(r.test)[[3]]) as output by ## ## f.list2str() ## ## proclst: list of procedures, as in estlist$procedures ## ## ---------------------------------------------------------------------- ## ## Author: Manuel Koller, Date: 9 Sep 2010, 13:52 ## ret <- list() ## for (lproc in proclst) { ## if (lproc$estname == 'lm') { ## ## least squares ## ret <- c(ret, list(list(fun='f.lsq'))) ## } else { ## ## default (S-scale): ## fun <- 'lmrob.mscale' ## lidx <- names(lproc$args)[na.omit(match(c('psi', 'tuning.chi', 'seed'), ## names(lproc$args)))] ## if (!is.null(lproc$args$method) && ## substr(lproc$args$method,1,3) == 'SMD') { ## ## D-scale ## fun <- 'lmrob.dscale' ## lidx <- names(lproc$args)[na.omit(match(c('psi', 'tuning.psi'), ## names(lproc$args)))] ## } else if (lproc$estname == 'lmrob.mar' ### continue here ## ret <- c(ret, list(list(fun=fun, args=lproc$args[lidx]))) ## } ## }) ########################################################################### ## functions related to prediction ########################################################################### f.prediction.points <- function(design, type = c('pc', 'grid'), length.out = 4*NCOL(design), f = 0.5, direction = +1, max.pc = 5) { ## Purpose: generate prediction points for design ## generate four points along the second principal component ## in the center, 2 intermediate distances and long distance ## (from the center) ## ---------------------------------------------------------------------- ## Arguments: design: design matrix ## type: type of prediction points: grid / principal components ## length.out: approximate number of prediction points ## f: extend range by f (like extendrange()) ## direction: +1 or -1: which direction to go from the center ## max.pc: maximum number of principal components to use ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 9 Oct 2009, 16:48 ## match type argument type = match.arg(type) ## get ranges lrange <- apply(design, 2, range) ## extend range by f lrange <- data.frame(apply(lrange, 2, extendrange, f = f)) switch(type, pc = { ## calculate robust covariance matrix rob <- covMcd(design) ## and use it to calculate the principal components rpc <- princomp(covmat = rob$cov) ## get corner with maximum distance from rob$center lidx <- apply(abs(lrange - rob$center),2,which.max) lcr <- diag(as.matrix(lrange[lidx,])) ## create grid points: rdf <- rob$center ## for each principal component for (id in 1:min(NCOL(rpc$loadings),max.pc)) { ## calculate factor to reach each boundary lfct <- (lcr - rob$center) / rpc$loadings[,id] ## calculate distances to boundaries and take the minimal one lmin <- which.min(sapply(lfct, function(x) sum((rpc$loadings[,id] * x)^2))) ## create sequence of multiplicands lmult <- seq(0,lfct[lmin],length.out=length.out/NCOL(rpc$loadings)) rdf <- rbind(rdf, rep(rob$center,each=length(lmult)-1) + direction*lmult[-1] %*% t(rpc$loadings[,id])) } }, grid = { ## generate sequences for every dimension lval <- as.data.frame(apply(lrange,2,f.seq, length.out = round(length.out^(1/NCOL(design))) )) ## return if 1 dimension, otherwise create all combinations rdf <- if (NCOL(design) > 1) t(as.data.frame(do.call('f.combine', lval))) else lval }) rdf <- as.data.frame(rdf) rownames(rdf) <- NULL colnames(rdf) <- colnames(design) if (type == 'pc') attr(rdf, 'npcs') <- id rdf } ## ## plot with ## require(rgl) ## plot3d(design) ## points3d(f.prediction.points(design), col = 2) ## d.data <- data.frame(y = rnorm(10), x = 1:10) ## pred <- f.prediction.points(d.data[,-1,drop=FALSE]) ## obj <- f.lmrob.local(y ~ x, d.data) ## f.predict(obj, pred, interval = 'prediction') ## as.vector(t(cbind(rnorm(4), f.predict(obj, pred, interval = 'prediction')))) ## estlist for prediction: ## start with .output.test ## we only need sigma .output.prediction <- c(.output.sigma,.output.beta,.output.se,.output.sumw,.output.nnz) .output.prediction$predict <- list(names = quote({ npred <- NROW(estlist$design.predict) paste(c('fit', 'lwr', 'upr', 'se.fit', 'cpr'), rep(1:npred,each = 5), sep = '_')}), fun = quote({ lpr <- f.predict(lrr, estlist$design.predict, interval = 'prediction', se.fit = TRUE) ##, df = 16) lpr <- cbind(lpr$fit, lpr$se.fit) lqf <- f.errname(lerrlst$err, 'p') lcpr <- do.call(lqf, c(list(lpr[,'upr']), lerrpar)) - do.call(lqf, c(list(lpr[,'lwr']), lerrpar)) as.vector(t(cbind(lpr,lcpr)))})) .estlist.prediction <- list(design = dd, nrep = 200, errs = .errs.test, seed = 0, procedures = .procedures.final, design.predict = f.prediction.points(dd), output = .output.prediction, use.intercept = TRUE) ## predict confidence intervals instead of prediction intervals .estlist.confint <- .estlist.prediction .estlist.confint$output$predict$fun <- parse(text=gsub('prediction', 'confidence', deparse(.output.prediction$predict$fun))) ########################################################################### ## Generate designs - function ########################################################################### f.gen <- function(n, p, rep, err) { ## get function name and parameters lerrfun <- f.errname(err$err) lerrpar <- err$args ## generate random predictors ret <- lapply(1:rep, function(...) data.frame(matrix(do.call(lerrfun, c(n = n*p, lerrpar)), n, p))) attr(ret[[1]], 'gen') <- f.gen ret } .output.sigmaE <- list(sigmaE = list( names = quote("sigmaE"), fun = quote({ ## estimate scale using current scale estimate. ## this amounts to recalculating the estimate ## with just an intercept llargs <- lproc$args llestname <- lproc$estname ## save time and just calculate S-estimate and no covariance matrix if (grepl('^lmrob', llestname)) { llestname <- 'lmrob' llargs$cov <- 'none' llargs$envir <- NULL ## drop envir argument if (llargs$method %in% c('MM', 'SM')) llargs$method <- 'S' if (grepl('M$', llargs$method)) llargs$method <- f.chop(llargs$method) } else if (lproc$estname == 'lm.robust') { llargs$estim <- 'Initial' } llrr <- tryCatch(do.call(f.estname(lproc$estname), c(list(lerr ~ 1), llargs)), error = function(e)e) ## check class: if procedure failed: class == 'try-error' if (inherits(llrr, 'error')) NA ## check convergence of estimator else if (lproc$estname != 'lm.robust' && !converged(llrr)) NA else sigma(llrr) }))) robustbase/inst/doc/graphics.functions.R0000644000176200001440000005027412023604053020102 0ustar liggesusers## some additional functions to help plotting ## g.drop.labels <- function(breaks, labels) { ## ind <- breaks %in% labels ## breaks <- as.character(breaks) ## breaks[!ind] <- '' ## breaks ## } g.scale_y_log10_0.05 <- function(breaks = c(0.00001, 0.0001, 0.001, 0.01, 0.02, 0.03, 0.05, 0.07, 0.1, 0.14, 0.2, 0.4, 0.8), minor_breaks = seq(0,1,by = 0.01), ...) ## Purpose: add nice breaks and labels ## ---------------------------------------------------------------------- ## Arguments: just like scale_y_log10 ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 11 Nov 2009, 11:52 scale_y_log10(breaks = breaks, minor_breaks = minor_breaks, ...) ## the same for lattice: g.scale_y_log10_0.05_lattice <- list(at = log10(c(seq(0.1, 0.01, by = -0.01), 0.001, 0.0001, 0.00001)), labels = c("", 0.09, "", 0.07, "", 0.05, "", 0.03, "", 0.01, 0.001, 0.0001, 0.00001)) g.scale_y_log10_1 <- function(breaks = c(seq(0,1,by=0.1), seq(1.2, 3.5,by=0.2)), minor_breaks = seq(0,10,by = 0.1), ...) ## Purpose: add nice breaks and labels ## ---------------------------------------------------------------------- ## Arguments: just like scale_y_log10 ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 11 Nov 2009, 11:52 scale_y_log10(breaks = breaks, minor_breaks = minor_breaks, ...) g.scale_y_log10_1_l <- function(breaks = c(seq(0,.4,by=0.1), seq(0.6,1.4,by=0.2), seq(1.6, 3.4, by = 0.4)), minor_breaks = seq(0,10,by = 0.1), ...) ## Purpose: add nice breaks and labels ## ---------------------------------------------------------------------- ## Arguments: just like scale_y_log10 ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 11 Nov 2009, 11:52 scale_y_log10(breaks = breaks, minor_breaks = minor_breaks, ...) g.scale_shape_defaults = c(16, 17, 15, 3, 7, 8) g.scale_shape_defaults2 = c(g.scale_shape_defaults,9,1,2,4) g.scale_linetype_defaults = c("solid", "22", "42", "44", "13", "1343", "73", "2262", "12223242", "F282", "F4448444", "224282F2", "F1") g.scale_shape <- function(..., values=g.scale_shape_defaults2) scale_shape_manual(..., values = values) g.get_colors <- function(n, h=c(0,360) + 15, l=65, c=100, start=0, direction = 1) { rotate <- function(x) (x + start) %% 360 * direction if ((diff(h) %% 360) < 1) { h[2] <- h[2] - 360 / n } grDevices::hcl(h = rotate(seq(h[1], h[2], length = n)), c = c, l = l) } g.get_colors_brewer <- function(n, name='Dark2') { idx <- 1:n if (name=='Dark2') { idx <- c(6,2:5,1,7,8)[idx] } RColorBrewer::brewer.pal(n, name)[idx] } g.scale_colour <- function(..., n=8, values=g.get_colors_brewer(n=n)) scale_colour_manual(..., values=values) ########################################################################### ## some useful helper functions ########################################################################### f.range.xy <- function(x,...) UseMethod("f.range.xy") ## Purpose: get plot range for x and y axis and return as a data.frame ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:42 f.range.xy.default <- function(x, ...) data.frame(x = range(x$x), y = range(x$y)) f.range.xy.data.frame <- function(x, names = c('x','y'), ...) sapply(x[,names],range) f.range.xy.matrix <- function(x, names = c('x','y'), ...) sapply(x[,names],range) f.range.xy.list <- function(x,...) data.frame(x = range(sapply(x, function(x) x$x)), y = range(sapply(x, function(x) x$y))) f.range.xy.histogram <- function(x,...) data.frame(x = range(sapply(x, function(x) x$breaks)), y = range(sapply(x, function(x) x$counts))) f.trim <- function(data, trim = 0.05) { ## Purpose: trim alpha observations ## ---------------------------------------------------------------------- ## Arguments: data and trim ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 19 Nov 2009, 14:33 n <- length(data) lo <- floor(n * trim) + 1 hi <- n + 1 - lo sort.int(data, partial = unique(c(lo, hi)))[lo:hi] } f.seq <- function(x, ...) ## Purpose: make seq callable with an vector x = c(from, to) ## ---------------------------------------------------------------------- ## Arguments: x: vector of length two (from, to) ## ...: other arguments to seq ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 20 Nov 2009, 08:55 seq(from = x[1], to = x[2],...) curves <- function(expr, from = NULL, to = NULL, n = 101, add = FALSE, type = "l", ylab = 'values', xlab = 'x', log = NULL, xlim = NULL, xcol = NULL, geom = geom_path, wrap = TRUE, ...) { ## Purpose: curves: does the same as curve, but for multivariate output ## ---------------------------------------------------------------------- ## Arguments: same as curve ## xcol: column of data.frame to use for x instead of default ## geom: what geom function to use, defaults to geom_path ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 15 Jan 2010, 08:48 sexpr <- substitute(expr) if (is.name(sexpr)) { fcall <- paste(sexpr, "(x)") expr <- parse(text = fcall) if (is.null(ylab)) ylab <- fcall } else { if (!(is.call(sexpr) && match("x", all.vars(sexpr), nomatch = 0L))) stop("'expr' must be a function or an expression containing 'x'") expr <- sexpr if (is.null(ylab)) ylab <- deparse(sexpr) } if (is.null(xlim)) delayedAssign("lims", { pu <- par("usr")[1L:2L] if (par("xaxs") == "r") pu <- extendrange(pu, f = -1/27) if (par("xlog")) 10^pu else pu }) else lims <- xlim if (is.null(from)) from <- lims[1L] if (is.null(to)) to <- lims[2L] lg <- if (length(log)) log else paste(if (add && par("xlog")) "x", if (add && par("ylog")) "y", sep = "") if (length(lg) == 0) lg <- "" x <- if (lg != "" && "x" %in% strsplit(lg, NULL)[[1L]]) { if (any(c(from, to) <= 0)) stop("'from' and 'to' must be > 0 with log=\"x\"") exp(seq.int(log(from), log(to), length.out = n)) } else seq.int(from, to, length.out = n) y <- eval(expr, envir = list(x = x), enclos = parent.frame()) ## up this was an exact copy of curve if (length(dim(y)) == 1) { ydf <- data.frame(x = x, values = y) gl <- geom(data = ydf, aes(x = x, y = values), ...) ret <- if (add) gl else ggplot(ydf) + gl } else { ## check whether we have to transpose y if (NCOL(y) != n) { if (NROW(y) == n) y <- t(y) else stop(paste('output should have n =',n,' columns')) } ## add dimnames dm <- dimnames(y) if (is.null(dm)) dm <- list(1:NROW(y), 1:NCOL(y)) if (is.null(names(dm))) names(dm) <- c('rows', 'cols') if (is.null(dm[[1]])) dm[[1]] <- 1:NROW(y) if (is.null(dm[[2]])) dm[[2]] <- 1:NCOL(y) dimnames(y) <- dm ## restructure the output matrix to a data.frame ydf <- melt(y) ## un-factor the first two columns for (i in 1:2) { if (is.factor(ydf[[i]])) ydf[[i]] <- f.as.numeric.vectorized(levels(ydf[[i]]))[ydf[[i]]] } ## add x column ydf$x <- rep(x, each = NROW(y)) if (is.null(xcol)) { xcol <- 'x' } else { ## get desired x column lx <- ydf[idx <- ydf[,1] == xcol,3] ## remove it from the values ydf <- ydf[!idx,] ## add as additional column ydf[[xcol]] <- rep(lx, each = NROW(y) - 1) if (missing(xlab)) xlab <- xcol } if (wrap) { ## use facet wrap, or assume it was used before ## there seems to be a bug in ggplot that requires sorting for the rows variable ydf <- ydf[order(ydf[,1],ydf[,2]),] gl <- geom(data = ydf, aes_string(x = xcol, y = 'value'), ...) ret <- if (add) gl else ggplot(ydf) + gl + xlab(xlab) + facet_wrap(substitute(~ rows, list(rows = as.name(names(dm)[1])))) } else { ## factor 'rows' again ydf[, 1] <- factor(ydf[, 1], levels = unique(ydf[, 1])) ret <- if (add) geom(data = ydf, aes_string(x = xcol, y = 'value', color = names(dm)[1]), ...) else ggplot(ydf) + geom(aes_string(x = xcol, y = 'value', linetype = names(dm)[1]), ...) + xlab(xlab) } } if (!add && !is.null(log)) { ret <- ret + switch(log, xy = coord_trans(x = 'log', y='log'), x = coord_trans(x = 'log'), y = coord_strans(y = 'log'), list() ) } ret } f.get.range <- function(p, axis) { ## Purpose: get range of axis from ggplot object ## ---------------------------------------------------------------------- ## Arguments: p: ggplot return object ## axis: 'x' or 'y' ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 27 Jan 2010, 09:37 lr <- NULL ## get range in from base mapping, if available if (!is.null(p$mapping[[axis]])) lr <- range(p$data[[as.character(p$mapping[[axis]])]], na.rm = TRUE) ## walk layers for (llayer in p$layers) { lvar <- as.character(llayer$mapping[[axis]]) if (!is.null(lvar) && length(lvar) > 0) { ## check if the variable is available in custom data.frame if (!is.null(llayer$data) && NCOL(llayer$data) > 0 && lvar %in% colnames(llayer$data)) ## if so, update range lr <- range(c(llayer$data[[lvar]], lr), na.rm = TRUE) else ## try to update range with data from global data.frame if (!is.null(p$data) && NCOL(p$data) > 0 && lvar %in% colnames(p$data)) ## if so, update range lr <- range(c(p$data[[lvar]], lr), na.rm = TRUE) } } lr } ## makeFootnote: add footnote to plot (like stamp) ## from: http://www.r-bloggers.com/r-good-practice-%E2%80%93-adding-footnotes-to-graphics/ makeFootnote <- function(footnoteText= format(Sys.time(), "%d %b %Y"), size= .7, color="black") { ## require(grid) pushViewport(viewport()) grid.text(label= footnoteText , x = unit(1,"npc") - unit(2, "mm"), y= unit(2, "mm"), just=c("right", "bottom"), gp=gpar(cex= size, col=color)) popViewport() } ## ## Example ## ## plot(1:10) ## makeFootnote(footnote) ## using this and multicore results in segmentation fault ## print.ggplot <- function(..., footnote) ## { ## ## Purpose: print ggplot and add a footnote ## ## ---------------------------------------------------------------------- ## ## Arguments: see ?print.ggplot ## ## footnote: text to be added as footnote ## ## ---------------------------------------------------------------------- ## ## Author: Manuel Koller, Date: 25 Jan 2010, 16:32 ## ggplot2::print.ggplot(...) ## ## if (!missing(footnote)) grid.text(footnote, x = unit(1, 'npc') - unit(2, 'mm'), ## ## y = unit(2, 'mm'), ## ## just = c('right', 'bottom'), ## ## gp=gpar(cex=.7, col=grey(.5))) ## if (!missing(footnote)) makeFootnote(footnote)o ## } ## ## modify print.ggplot: update legend automatically ## print.ggplot <- function (x, newpage = is.null(vp), vp = NULL, ...) ## { ## set_last_plot(x) ## lg <- ggplotGrob(x, ...) ## ## edit grob: change legends ## ## get all legend texts ## lls <- getGrob(lg, gPath='legend.text.text', grep = TRUE, global = TRUE) ## for(le in lls) { ## print(le$label) ## if (!is.expression(le$label) && le$label %in% names(legend.mod)) { ## lg <- editGrob(lg, gPath=le$name, label = legend.mod[[le$label]]) ## } ## } ## if (newpage) ## grid.newpage() ## if (is.null(vp)) { ## grid.draw(lg) ## } ## else { ## if (is.character(vp)) ## seekViewport(vp) ## else pushViewport(vp) ## grid.draw(lg) ## upViewport() ## } ## } require(grid) print.ggplot <- function(x, newpage = is.null(vp), vp = NULL, ..., footnote = NULL, footnote.col = 'black', footnote.size = .7, footnote.just = c("right", "bottom"), legend.mod = NULL) { ## Purpose: print ggplot and add footnote ## ---------------------------------------------------------------------- ## Arguments: x, newpage, vp, ...: see ?print.ggplot ## footnote: text to be added as footnote ## footnote.col: color of footnote ## .size: size of footnote text (cex) ## .just: justification of footnote ## legend.mod: named list on what legend entries to replace ## by value ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 26 Jan 2010, 09:01 if ((missing(footnote) && missing(legend.mod)) || packageVersion("ggplot2") > "0.9.1") return(ggplot2:::print.ggplot(x, newpage, vp, ...)) ## this is mostly a copy of ggplot2::print.ggplot ggplot2:::set_last_plot(x) if (newpage) grid.newpage() grob <- ggplotGrob(x, ...) if (!missing(legend.mod)) { ## edit grob: change legends and strip text lls <- getGrob(grob, gPath='(xlab-|ylab-|title-|label-|legend.text.text|strip.text.x.text|strip.text.y.text)', grep=TRUE, global=TRUE) ## walk all legend texts for(le in lls) { if (!is.null(le$label) && !is.expression(le$label) && length(le$label) > 0 && le$label %in% names(legend.mod)) { grob <- editGrob(grob, gPath=le$name, label = legend.mod[[le$label]]) } } ## also: remove alpha in legend key points lls <- getGrob(grob, gPath='key.points', grep=TRUE, global=TRUE) for (le in lls) { if (is.character(le$gp$col) && grepl('^\\#', le$gp$col)) { lgp <- le$gp lgp$col <- substr(lgp$col, 1, 7) grob <- editGrob(grob, gPath=le$name, gp=lgp) } } ## also: change spacing of legends grob$children$legends$framevp$layout$heights <- grob$children$legends$framevp$layout$heights * .91 } if (missing(footnote)) grid.draw(grob) else { if (is.null(vp)) { ## add footnote to grob grob$children$footnote <- grid.text(label=footnote, x = unit(1, "npc") - unit(2, "mm"), y = unit(2, "mm"), just = footnote.just, gp=gpar(cex = footnote.size, col = footnote.col), draw = FALSE) llen <- length(grob$childrenOrder) grob$childrenOrder[llen+1] <- 'footnote' grid.draw(grob) } else { if (is.character(vp)) seekViewport(vp) else pushViewport(vp) grid.draw(grob) upViewport() ## add footnote to plot (from makeFootnote) pushViewport(viewport()) grid.text(label=footnote, x = unit(1, "npc") - unit(2, "mm"), y = unit(2, "mm"), just = footnote.just, gp=gpar(cex = footnote.size, col = footnote.col)) popViewport() } } } ## guide_legends_box <- function (scales, layers, default_mapping, horizontal = FALSE, ## theme) ## { ## print('hello') ## legs <- guide_legends(scales, layers, default_mapping, theme = theme) ## n <- length(legs) ## if (n == 0) ## return(zeroGrob()) ## if (!horizontal) { ## width <- do.call("max", lapply(legs, widthDetails)) ## heights <- do.call("unit.c", lapply(legs, function(x) heightDetails(x) * ## 10)) ## fg <- frameGrob(grid.layout(nrow = n, 1, widths = width, ## heights = heights, just = "centre"), name = "legends") ## for (i in 1:n) { ## fg <- placeGrob(fg, legs[[i]], row = i) ## } ## } ## else { ## height <- do.call("sum", lapply(legs, heightDetails)) ## widths <- do.call("unit.c", lapply(legs, function(x) widthDetails(x) * ## 1.1)) ## fg <- frameGrob(grid.layout(ncol = n, 1, widths = widths, ## heights = height, just = "centre"), name = "legends") ## for (i in 1:n) { ## fg <- placeGrob(fg, legs[[i]], col = i) ## } ## } ## fg ## } ### viewport test ## data <- data.frame(x = 1:10, y = 1:10) ## tg <- ggplot(data, aes(x, y)) + geom_line() + ## geom_text(data=data.frame(x=10, y=1), label='test') ## print(tg) ## tgrob2 <- ggplotGrob(tg) ## str(tgrob, max.level = 2) ## str(tgrob2, max.level = 2) ## tgrob$children$footnote <- grid.text(label= 'test haha2', x = unit(1,"npc") - unit(2, "mm"), ## y= unit(2, "mm"), just=c("right", "bottom"), ## gp=gpar(cex= .7, col=grey(.5)), draw=FALSE) ## tgrob$childrenOrder[7] <- 'footnote' ## grid.draw(tgrob) ## print(tg, footnote = 'footnote test text') ########################################################################## ## ggplot 0.8.7 bugfix ########################################################################## ## require(ggplot2) ## data <- data.frame(x = 1:10, y = exp(0:9)) ## ggplot(data, aes(x, y)) + geom_point() + ## geom_hline(yintercept = 9) + geom_vline(xintercept = 2) ## last_plot() + coord_trans(y = 'log', x = 'sqrt') ## GeomVline$draw <- function(., data, scales, coordinates, ...) { ## data$y <- if(coordinates$objname=="trans" && ## coordinates$ytr$objname%in%c("log", "sqrt")) 0 else -Inf ## data$yend <- Inf ## GeomSegment$draw(unique(data), scales, coordinates) ## } ## GeomHline$draw <- function(., data, scales, coordinates, ...) { ## data$x <- if(coordinates$objname=="trans" && ## coordinates$xtr$objname%in%c("log", "sqrt")) 0 else -Inf ## data$xend <- Inf ## GeomSegment$draw(unique(data), scales, coordinates) ## } ## Coord$munch_group <- function(., data, details, npieces=50) { ## n <- nrow(data) ## if(n==2 && (all(data$x==c(-Inf,Inf)) || all(data$y==c(-Inf,Inf)))) npieces=1 ## x <- approx(data$x, n = npieces * (n - 1) + 1)$y ## y <- approx(data$y, n = npieces * (n - 1) + 1)$y ## cbind( ## .$transform(data.frame(x=x, y=y), details), ## data[c(rep(1:(n-1), each=npieces), n), setdiff(names(data), c("x", "y"))] ## ) ## } cs <- function(x, y, ..., if.col) { ## Purpose: make aes dependent on global variable color ## ---------------------------------------------------------------------- ## Arguments: same arguments as for aes ## if.col: list of arguments that are only applied if ## color = TRUE ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 7 Sep 2010, 08:36 aes <- structure(as.list(match.call()[-1]), class = "uneval") if (globalenv()$color) { aes2 <- as.list(aes$if.col[-1]) for (item in names(aes2)) { aes[[item]] <- aes2[[item]] } } aes$if.col <- NULL rename_aes(aes) } ## replace levels by legend.mod lab <- function(..., lm=legend.mod) { factors <- list(...) lev <- unlist(lapply(factors, levels)) if (length(factors) > 1) lev <- sort(lev) ret <- as.list(lev) idx <- lev %in% names(lm) ret[idx] <- lm[lev[idx]] ret } ## my labeller mylabel <- function(name, value, lm) { str(name) str(value) if (value %in% names(lm)) lm[[value]] else value } robustbase/inst/doc/error.distributions.R0000644000176200001440000000271311535441110020320 0ustar liggesusers## error.distributions.R: additional error distributions for use in simulations ## require(skewt) ## loaded in vignette if required ## centered skewed t distribution Eskt <- function(nu, gam) { M <- if (is.infinite(nu)) sqrt(2/pi) else gamma((nu+1)/2)/sqrt(nu*pi)/gamma(nu/2)*2*nu/(nu-1) M*(gam^2-1/gam^2)/(gam + 1/gam) } dcskt <- function(x, df, gamma=2) { ncp <- Eskt(df, gamma) dskt(x + ncp, df, gamma) } pcskt <- function(q, df, gamma=2) { ncp <- Eskt(df, gamma) pskt(q + ncp, df, gamma) } qcskt <- function(p, df, gamma=2) { ncp <- Eskt(df, gamma) qskt(p, df, gamma) - ncp } rcskt <- function(n, df, gamma=2) { ncp <- Eskt(df, gamma) rskt(n, df, gamma) - ncp } #################################################################################### ## contaminated normal #################################################################################### rcnorm <- function (n,mean=0,sd=1,epsilon=0.1,meanc=mean,sdc=sqrt(10)*sd) { e <- rnorm(n,mean,sd) nc <- floor(epsilon*n) idx <- sample(1:n,nc) e[idx] <- rnorm(nc,meanc,sdc) e } ## ignore other arguments for the moment pcnorm <- function(q,mean=0,sd=1,lower.tail=TRUE,log.p=FALSE,...) pnorm(q,mean,sd,lower.tail,log.p) ## ignore other arguments for the moment qcnorm <- function(p,mean=0,sd=1,lower.tail=TRUE,log.p=FALSE,...) qnorm(p,mean,sd,lower.tail,log.p) ## ignore other arguments for the moment dcnorm <- function(x,mean=0,sd=1,log=FALSE,...) dnorm(x,mean,sd,log) robustbase/inst/xtraR/0000755000176200001440000000000014555212560014505 5ustar liggesusersrobustbase/inst/xtraR/plot-psiFun.R0000644000176200001440000001127713310763444017060 0ustar liggesusers#### Functions to plot and check psi-functions #### ----------------------------------------- ## used in ../../tests/lmrob-psifns.R, ## ../../tests/psi-rho-etc.R ## and ../../vignettes/psi_functions.Rnw vignette ## Original Author of functions: Martin Maechler, Date: 13 Aug 2010, 10:17 p.psiFun <- function(x, psi, par, main=FALSE, ...) { m.psi <- cbind(rho = Mpsi(x, par, psi,deriv=-1), psi = Mpsi(x, par, psi,deriv= 0), Dpsi = Mpsi(x, par, psi,deriv= 1), wgt = Mwgt(x, par, psi)) robustbase:::matplotPsi(x, m.psi, psi=psi, par=par, main=main, ...) ## -> cbind(x, m.psi) } p.psiFun2 <- function(x, psi, par, main="short", ...) p.psiFun(x, psi, par, main=main, leg.loc= "bottomright", ylim = c(-2.2, 6)) ## for psi_func class objects: simply use plot() method. mids <- function(x) (x[-1]+x[-length(x)])/2 ##' is 'psi' the name of redescending psi (i.e. with *finite* rejection point) isPsi.redesc <- function(psi) { psi != "Huber" ## <- must be adapted when we introduce more } ##' @title Check consistency of psi/chi/wgt/.. functions ##' @param m.psi matrix as from p.psiFun() ##' @param tol ##' @return concatenation of \code{\link{all.equal}} results ##' @author Martin Maechler chkPsiDeriv <- function(m.psi, tol = 1e-4) { stopifnot(length(tol) > 0, tol >= 0, is.numeric(psi <- m.psi[,"psi"]), is.numeric(dx <- diff(x <- m.psi[,"x"]))) if(length(tol) < 2) tol[2] <- 10*tol[1] xn0 <- abs(x) > 1e-5 ## need to jitter here, as for "huber" the mostly equal cases are not counted ## (=> use all.equal(*, countEQ=TRUE) in future). Save & restore r.seed if needed: if(hasRS <- exists(".Random.seed", envir=.GlobalEnv)) RS <- .Random.seed set.seed(8) dpsidx <- (diff(psi)/dx) * (1 + 1e-7*rnorm(dx)) # jitter if(hasRS) assign(".Random.seed", RS, envir=.GlobalEnv) c(all.equal(mids(psi), diff(m.psi[,"rho"])/dx, tolerance=tol[1]), # rho' == psi all.equal(mids(m.psi[,"Dpsi"]), dpsidx, tolerance= tol[2]), # psi' == psip all.equal(m.psi[xn0,"wgt"], (psi/x)[xn0], tolerance= tol[1]/10))# psi/x == wgt } ##' This version "starts from scratch" instead of from p.psiFun() result: ##' ##' @title Check consistency of psi/chi/wgt/.. functions ##' @param x range or vector of abscissa values ##' @param psi psi() function spec., passed to M.psi() etc ##' @param par tuning parameter, passed to M.psi() etc ##' @param tol tolerance for equality checking of numeric derivatives ##' @return concatenation of \code{\link{all.equal}} results ##' @author Martin Maechler chkPsi.. <- function(x, psi, par, tol = 1e-4, doD2, quiet=FALSE) { stopifnot(length(tol) > 0, tol >= 0, is.numeric(x), is.finite(x)) is.redesc <- isPsi.redesc(psi) if(length(x) == 2) ## it is a *range* -> produce vector x <- seq(x[1], x[2], length = 1025L) dx <- diff(x) x0 <- sort(x) x <- c(-Inf, Inf, NA, NaN, x0) ## if(is.redesc) rho <- Mpsi(x, par, psi, deriv=-1) psix <- Mpsi(x, par, psi, deriv= 0) Dpsi <- Mpsi(x, par, psi, deriv= 1) wgt <- Mwgt(x, par, psi) chi <- Mchi(x, par, psi) if(is.redesc) { chi1 <- Mchi(x, par, psi, deriv=1) chi2 <- Mchi(x, par, psi, deriv=2) } rho.Inf <- MrhoInf(par, psi) stopifnot(all.equal(rep(rho.Inf,2), rho[1:2])) if(is.redesc) stopifnot(all.equal(rep(rho.Inf,2), rho[1:2]), all.equal(chi, rho / rho.Inf), all.equal(chi1,psix / rho.Inf), all.equal(chi2,Dpsi / rho.Inf) ) else { ## check any here? From ../src/lmrob.c : ## chi = C-function rho(x) which is unscaled rho <- chi # for checks below } D2psi <- tryCatch(Mpsi(x, par, psi, deriv= 2), error=function(e)e) has2 <- !inherits(D2psi, "error") doD2 <- if(missing(doD2)) has2 else doD2 && has2 if(!quiet & !doD2) message("Not checking psi''() := Mpsi(*, deriv=2)") stopifnot(is.numeric(psix), ## check NA / NaN : identical5(x[3:4], chi[3:4], psix[3:4], Dpsi[3:4], wgt[3:4]), if(has2) identical(x[3:4], D2psi[3:4]) else TRUE) if(length(tol) < 2) tol[2] <- 16*tol[1] if(length(tol) < 3) tol[3] <- tol[1]/10 if(length(tol) < 4) tol[4] <- 8*tol[2] i <- 5:length(x) # leaving away the first 4 (+-Inf, NA..) xn0 <- is.finite(x) & abs(x) > 1e-5 c("rho' = psi" = all.equal(mids(psix[i]), diff(rho [i])/dx, tolerance=tol[1]), "psi' = psip"= all.equal(mids(Dpsi[i]), diff(psix[i])/dx, tolerance=tol[2]), "psi/x= wgt" = all.equal( wgt[xn0], (psix/x)[xn0], tolerance=tol[3]), "psi''=D2psi"= if(doD2) all.equal(mids(D2psi[i]), diff(Dpsi[i])/dx,tolerance=tol[4]) else NA) } robustbase/inst/xtraR/styleData.R0000644000176200001440000000303314222041416016550 0ustar liggesusers### Small Test Datasets (all kinds odd/even, constant/regular/outlier): D <- within(list(), { ## n = 0,1,2,3 : x0 <- numeric(0) x1 <- 3 x1I <- Inf x2 <- 1:2 x2I <- c(-Inf, 9) xII <- c(-Inf, Inf) x3 <- c(1:2,10) x3I <- c(-Inf, 9,11) x3.2I <- c(-Inf, 9, Inf) ## constant (0 mad) + 0--2 outliers xC <- rep(1, 12) xC. <- rep(1, 11) xC1 <- c(xC, 10) xC1. <- c(xC., 10) xC2 <- c(xC1, 100) xC2. <- c(xC1., 100) ## "uniform" + 0--2 outliers y <- 1:10 y. <- 1:11 y1 <- c(y, 100) y1. <- c(y., 100) y2 <- c(y1, 1000) y2. <- c(y1., 1000) yI <- c(y1, Inf) yI. <- c(y1., Inf) }) smallD <- D[order(lengths(D))] rm(D) ## Constructor of such "stylized" small data with large ('M') values / outliers: mk3Mx <- function(M, ngood = 10, left = floor(ngood/3)) { stopifnot(is.numeric(ngood), ngood >= 3, is.numeric(M), length(M) == 1L, M >= 1000, is.numeric(left), 0 <= left, left <= ngood) right <- ngood-left res <- list( c(rep(-M, left), seq_len(ngood - 1L), rep(M, right)) # < 50% "good" , c(rep(-M, left), seq_len(ngood ), rep(M, right)) # half "good" , c(rep(-M, left), seq_len(ngood + 1L), rep(M, right)) # > 50% "good" ) nM <- gsub("[-+]", "", formatC(M, digits=2, width=1)) names(res) <- paste0("M", nM,"_n", c("m1", "eq", "p1")) res } ## The one that works for a *vector* M: mkMx <- function(M, ngood = 10, left = floor(ngood/3)) unlist(lapply(M, mk3Mx), recursive=FALSE) robustbase/inst/xtraR/test-tools.R0000644000176200001440000000373414241260162016745 0ustar liggesusers## Just a small subset of those from 'Matrix' i.e, ## system.file("test-tools-1.R", package = "Matrix") identical3 <- function(x,y,z) identical(x,y) && identical (y,z) identical4 <- function(a,b,c,d) identical(a,b) && identical3(b,c,d) identical5 <- function(a,b,c,d,e) identical(a,b) && identical4(b,c,d,e) assert.EQ <- function(target, current, tol = if(showOnly) 0 else 1e-15, giveRE = FALSE, showOnly = FALSE, ...) { ## Purpose: check equality *and* show non-equality ## ---------------------------------------------------------------------- ## showOnly: if TRUE, return (and hence typically print) all.equal(...) T <- isTRUE(ae <- all.equal(target, current, tolerance = tol, ...)) if(showOnly) return(ae) else if(giveRE && T) { ## don't show if stop() later: ae0 <- if(tol == 0) ae else all.equal(target, current, tolerance = 0, ...) if(!isTRUE(ae0)) writeLines(ae0) } if(!T) stop("all.equal() |-> ", paste(ae, collapse=sprintf("%-19s","\n"))) else if(giveRE) invisible(ae0) } pkgRversion <- function(pkgname) sub("^R ([0-9.]+).*", "\\1", packageDescription(pkgname)[["Built"]]) showSys.time <- function(expr, ...) { ## prepend 'Time' for R CMD Rdiff st <- system.time(expr, ...) writeLines(paste("Time", capture.output(print(st)))) invisible(st) } showProc.time <- local({ ## function + 'pct' variable pct <- summary(proc.time())# length 3, shorter names function(final="\n", ind=TRUE) { ## CPU elapsed __since last called__ ot <- pct ; pct <<- summary(proc.time()) delta <- (pct - ot)[ind] ## 'Time' *not* to be translated: tools::Rdiff() skips its lines! cat('Time', paste0("(",paste(names(delta),collapse=" "),"):"), delta, final) } }) ## == sfsmisc::relErr : relErr <- function(target, current) { ## make this work for 'Matrix' ==> no mean() .. n <- length(current) if(length(target) < n) target <- rep(target, length.out = n) sum(abs(target - current)) / sum(abs(target)) } robustbase/inst/xtraR/test_MCD.R0000644000176200001440000003056313710236747016306 0ustar liggesusers#### Utility functions for testing covMcd() #### -------------------------------------- ../tests/tmcd.R ## "workhorse" -- by default *passed* to and called from doMCDdata(): domcd1 <- function(x, xname, nrep = 1, ## These are all got from doMCDdata() [yuck!] method = get("method", parent.frame()), # compromise time = get("time", parent.frame()), # compromise short = get("short", parent.frame()), # compromise full = get("full", parent.frame()), # compromise lname = 20) { if(short && full) stop("you should not set both 'full' and 'short' to TRUE") force(xname)# => evaluate when it is a data(<>, ..) call n <- dim(x)[1] p <- dim(x)[2] if(method == "MASS") { mcd <- MASS::cov.mcd(x) mcd$quan <- (n + p + 1) %/% 2 #default: floor((n+p+1)/2) } else if(method == "DetMCD") { mcd <- covMcd(x, nsamp="deterministic") # trace = FALSE } else { mcd <- covMcd(x) # trace = FALSE } if(full) { header <- get("header", parent.frame()) header(time) } xres <- sprintf("%*s %3d %3d %3d %12.6f", lname, xname, n, p, mcd$quan, mcd$crit) if(time) { xtime <- system.time(repMCD(x, nrep, method))[1]/nrep xres <- sprintf("%s %10.1f", xres, 1000 * xtime) } cat(xres, "\n") if(!short) { cat("Best subsample: \n") print(mcd$best) ibad <- which(unname(mcd$mcd.wt) == 0) nbad <- length(ibad) cat("Outliers: ",nbad, if(nbad > 0)":", "\n") if(nbad > 0) print(ibad) if(full) { cat("------------- *MCD() result: --------------------------\n") print(mcd) } cat("--------------------------------------------------------\n") } }## {domcd1} ##' Test the function covMcd() on the literature datasets: ##' ##' Call covMcd() for "all" datasets in robustbase / rrcov and print: ##' - execution time (if time is true) ##' - objective function ##' - best subsample found (if short is false) ##' - outliers identified (with cutoff 0.975) (if short is false) ##' - estimated center and covarinance matrix if full is true) ##' ##' @param nrep : [integer] number of repetitions to use for estimating the ##' (average) execution time ##' @param method : [character] select a method: one of (FASTMCD, MASS) ##' @param time : [logical] whether to evaluate the execution time ##' @param short : [logical] whether to do short output (i.e. only the ##' objective function value). If short == FALSE, ##' the best subsample and the identified outliers are ##' printed. See also the parameter full below ##' @param full : [logical] whether to print the estimated cente and covariance matrix ##' @param digits ##' @param domcd workhorse function, to be called e.g. as ##' @examples domcd(starsCYG, data(starsCYG), nrep) ##' @author Valentin Todorov; tweaks by Martin Maechler ##' @note Is called from ../../demo/determinMCD.R and ../../tests/tmcd.R doMCDdata <- function(nrep = 1, method = c("FASTMCD", "MASS", "DetMCD"), time = nrep >= 3, short = time, full = !short, digits=5, domcd = domcd1) { stopifnot(is.function(domcd), length(formals(domcd)) >= 3) options(digits = digits) method <- match.arg(method) # *is* then accessed from domcd(.) stopifnot(require("robustbase")) # all data() which do not specify package data(Animals, package = "MASS") brain <- Animals[c(1:24, 26:25, 27:28),] data(list = c("fish", "pottery", "rice", "un86", "wages"), package = "rrcov") tmp <- sys.call() cat("\nCall: ", deparse(substitute(tmp)),"\n") header <- function(time) { ## the string length here require 'lname <- 20' {FIXME} ## 1 2 ## 1 3 5 7 901 3 5 7 90 2 4 cat("Data Set n p h(alf) LOG(obj)",if(time)" Time [ms]","\n", "=============================================",if(time)"===========","\n", sep="") } if(full) { ## header() is called in each domcd() } else ## here header(time) domcd(bushfire, data(bushfire), nrep) domcd(heart[, 1:2], data(heart), nrep) domcd(starsCYG, data(starsCYG), nrep) domcd(stack.x, data(stackloss), nrep) domcd(data.matrix(subset(phosphor, select= -plant)),data(phosphor), nrep) domcd(data.matrix(subset(coleman, select = -Y)), data(coleman), nrep) domcd(data.matrix(subset(salinity, select = -Y)), data(salinity), nrep) domcd(data.matrix(subset(wood, select = -y)), data(wood), nrep) domcd(data.matrix(subset(hbk, select = -Y)), data(hbk), nrep) domcd(brain, "Animals", nrep) domcd(milk, data(milk), nrep) domcd(lactic, data(lactic), nrep) domcd(pension, data(pension), nrep) domcd(pilot, data(pilot), nrep) ## This is for CovMcdBig .... ## domcd(radarImage, data(radarImage), nrep) ## domcd(NOxEmissions, data(NOxEmissions), nrep) domcd(data.matrix(subset(vaso, select = -Y)), data(vaso), nrep) domcd(data.matrix(subset(wagnerGrowth, select = -Period)), data(wagnerGrowth), nrep) ## Obs 14 has missing, column 7 is categorical domcd(fish[-14,-7], data(fish, package="rrcov"), nrep) domcd(pottery[,-7], data(pottery, package="rrcov"), nrep) domcd(rice, data(rice, package="rrcov"), nrep) domcd(un86, data(un86, package="rrcov"), nrep) ## there are missing values domcd(wages[-c(29, 31, 38),-9], data(wages, package="rrcov"), nrep) cat("========================================================\n") } ## {doMCDdata} if(FALSE){ data(mortality, package = "riv") mm <- as.data.frame(lapply(mortality, signif, 3)) for(j in c(1,2,6,7)) mm[,j] <- mm[,j] * 10 mm[,5] <- mm[,5] * 1000 mm[,8] <- mm[,8] / 100 mort3 <- mm dput(mort3) } ## which gives the equivalent of mort3 <- data.frame(MO70 = c(140, 101, 86, 102, 115, 121, 118, 76.6, 131, 112, 111, 112, 117, 118, 123, 122, 81.7, 108, 111, 109, 92.5, 83.9, 93.8, 135, 124, 126, 122, 120, 127, 115, 156, 95.1, 127, 129, 116, 82.3, 115, 106, 134, 94.9, 119, 111, 131, 85.6, 135, 126, 141, 152, 137, 151, 93.6, 84.2, 78, 50.2, 81.3, 112, 80.1, 125, 120, 143), MAGE = c(297, 277, 275, 268, 296, 327, 314, 258, 342, 278, 278, 313, 284, 272, 296, 277, 271, 296, 286, 250, 280, 270, 246, 301, 279, 287, 293, 271, 291, 295, 314, 267, 275, 307, 259, 251, 324, 285, 288, 254, 278, 287, 316, 287, 326, 309, 334, 369, 321, 311, 261, 272, 260, 244, 248, 277, 240, 295, 319, 346), CI68 = c(137, 137, 129, 129, 151, 157, 157, 157, 157, 202, 202, 202, 138, 160, 190, 191, 191, 191, 159, 159, 146, 146, 203, 203, 182, 166, 203, 203, 167, 167, 165, 153, 149, 149, 149, 157, 152, 183, 183, 183, 183, 183, 183, 111, 171, 148, 148, 148, 192, 160, 160, 172, 172, 172, 172, 101, 173, 173, 144, 181), MDOC = c(142, 80.4, 148, 167, 230, 187, 240, 149, 240, 195, 327, 377, 203, 160, 161, 68.7, 141, 120, 176, 105, 128, 112, 98.9, 160, 209, 200, 153, 126, 157, 157, 145, 160, 158, 102, 195, 188, 250, 143, 157, 186, 114, 129, 129, 143, 186, 207, 144, 112, 157, 121, 168, 155, 144, 144, 120, 194, 93.6, 231, 185, 89.7), DENS = c(37, 37, 27, 32, 17, 13, 23, 19, 27, 29, 15, 15, 48, 34, 26, 47, 17, 10, 10, 18, 11, 13, 26, 19, 55, 17, 16, 7, 10, 17, 44, 13, 18, 26, 40, 22, 29, 7, 28, 10, 15, 1, 11, 10, 8, 13, 13, 6, 10, 26, 49, 28, 32, 18, 62, 15, 21, 18, 10, 12), NONW = c(4.22, 3.36, 0.67, 0.52, 2.51, 0.82, 4.07, 1.11, 2.86, 2.92, 2.74, 1.05, 7.23, 5.16, 3.44, 2.84, 1.84, 1.47, 0.62, 0.03, 0.96, 1.07, 1.74, 2.41, 0.45, 4.7, 4.45, 1.2, 0.64, 2.28, 4.13, 1.06, 4.02, 2.22, 5.6, 0.43, 2.34, 1.78, 2.81, 1.9, 3.09, 1.43, 2.58, 1.34, 0.78, 3.44, 2.07, 0.68, 1, 3.6, 3.92, 2.58, 2.66, 0.05, 0.86, 0.32, 3.02, 4.24, 1.26, 1.08), EDUC = c(454, 516, 601, 631, 565, 620, 661, 653, 661, 591, 568, 499, 685, 534, 539, 536, 560, 542, 680, 546, 648, 632, 601, 469, 458, 446, 521, 540, 661, 601, 480, 627, 506, 363, 551, 662, 518, 556, 484, 607, 562, 517, 521, 582, 629, 506, 534, 433, 459, 476, 492, 548, 517, 517, 468, 685, 483, 471, 678, 528), IN69 = c(86.9, 99.3, 113, 99.2, 104, 118, 113, 117, 125, 100, 104, 115, 122, 107, 135, 101, 123, 114, 114, 113, 108, 109, 100, 99.8, 102, 100, 110, 112, 111, 113, 92.7, 116, 86.3, 103, 86.4, 109, 116, 112, 104, 108, 103, 116, 99.3, 116, 114, 104, 105, 97, 102, 83.4, 101, 125, 117, 118, 90.3, 108, 92.4, 106, 126, 109)) ###'------*Generate* data for benchmarking ---------------------------------------- ##' Generates a location contaminated multivariate ##' normal sample of n observations in p dimensions ##' (1-eps) * N_p(0, I_p) + eps * N_(m,I_p) ##' where ##' m = (b,b,...,b) ##' Defaults: eps=0 and b=10 ##' @title Generate n x p location contaminated MV data ##' @param n number of observations ##' @param p number of variables ##' @param eps amount of contamination ##' @param b mean of "outliers" gendata <- function(n,p, eps=0, b=10) { if(missing(n) || missing(p)) stop("Please specify (n,p)") if(!is.numeric(eps) || length(eps) != 1 || eps < 0 || eps >= 0.5) stop("eps must be in [0,0.5)") X <- matrix(rnorm(n*p), n, p) nbad <- as.integer(eps * n) if(nbad > 0) { b <- rep(b, length = p) # recycle to p-vector ## = E[.] of bad obs. xind <- sample(n,nbad) X[xind,] <- X[xind, , drop=FALSE] + rep(b, each=nbad) } list(X=X, xind=if(nbad > 0) xind) } ##' Repeated calls to different MCD algorithms for timing purposes *only* repMCD <- function(x, nrep = 1, method = "FASTMCD") { stopifnot(length(nrep) == 1, nrep >= 1) switch(method, "FASTMCD" = replicate(nrep, covMcd(x)), "bestMCD" = replicate(nrep, covMcd(x, nsamp= "best")), "exactMCD" = replicate(nrep, covMcd(x, nsamp= "exact")), "DetMCD" = replicate(nrep, covMcd(x, nsamp="deterministic")), "MASS.best" = replicate(nrep, MASS::cov.mcd(x)),# uses nsamp = "best" ==> up to 5000 ## rrcov.control()$nsamp == 500 : "MASS.500" = replicate(nrep, MASS::cov.mcd(x, nsamp = 500)), ## otherwise: stop(gettextf("Method '%s' not yet implemented", method))) } repMCD.meths <- function() { switch.expr <- body(repMCD)[[3]] m <- names(switch.expr) m[m != ""] } if(FALSE) repMCD.meths() ## [1] "FASTMCD" "bestMCD" "DetMCD" "MASS.best" "MASS.500" ##' calls gendata(), repMCD() dogen <- function(nrep=1, eps=0.49, method = repMCD.meths(), ## "FASTMCD" is first p.set = c(2, 5, 10, 20, 30), n.set = c(100, 500, 1000, 10000, 50000), n.p.ratio = 5, seed = 1234) { domcd <- function(x, nrep=1){ ## system.time() *does* gc() xtime <- system.time(repMCD(x, nrep, method))[1]/nrep cat(sprintf("%6d %3d %12.2f\n", dim(x)[1], dim(x)[2], xtime)) xtime } set.seed(seed) method <- match.arg(method) mkL <- function(ch,m) paste(ch,m,sep="=") ans <- matrix(NA, length(n.set), length(p.set), dimnames = list(mkL("n",n.set), mkL("p",p.set))) cat(sprintf("Method: %-12s; nrep = %d\n", method, nrep), "------------------------------\n", " n p Time\n", "=======================\n", sep="") for(n in n.set) { n. <- mkL("n",n) for(p in p.set) { if(n.p.ratio * p <= n) { xx <- gendata(n, p, eps) ans[n., mkL("p",p)] <- domcd(xx$X, nrep) } } } cat("=======================\n") cat(sprintf("Total time: %11.2f\n", nrep * sum(ans, na.rm=TRUE))) structure(ans, nrep = nrep, method=method) }## {dogen} ###' ------------------ These can only be used with rrcov :: CovMcd() -------------- docheck <- function(n, p, eps, ...) { xx <- gendata(n,p,eps) mcd <- CovMcd(xx$X, ...) check(mcd, xx$xind) } ##' check if mcd is robust w.r.t xind, i.e. check how many of xind ##' did not get zero weight check <- function(mcd, xind){ mymatch <- xind %in% which(mcd@wt == 0) length(xind) - sum(mymatch) } robustbase/inst/xtraR/test_LTS.R0000644000176200001440000001033311757675047016347 0ustar liggesusers#### Utility functions for testing ltsReg() #### -------------------------------------- ../tests/tlts.R repLTS <- function(form, data, nrep = 1, method = c("FASTLTS","MASS")) { if(method == "MASS") ## MASS::lqs(x,y,control=list(psamp = NA, nsamp= "best", adjust= FALSE)) for(i in 1:nrep) MASS::lqs(form, data = data, method = "lts") else ## set mcd=FALSE - we want to time only the LTS algorithm for(i in 1:nrep) ltsReg(form, data = data, mcd = FALSE) } doLTSdata <- function(nrep = 1, time = nrep >= 3, short = time, full = !short, method = c("FASTLTS", "MASS")) { ##@bdescr ## Test the function ltsReg() on the literature datasets: ## ## Call ltsReg() for "all" regression datasets available in robustbase ## and print: ## - execution time (if time) ## - objective function ## - best subsample found (if not short) ## - outliers identified (with cutoff 0.975) (if not short) ## - estimated coeficients and scale (if full) ## ##@edescr ## ##@in nrep : [integer] number of repetitions to use for estimating the ## (average) execution time ##@in time : [boolean] whether to evaluate the execution time ##@in short : [boolean] whether to do short output (i.e. only the ## objective function value). If short == FALSE, ## the best subsample and the identified outliers are ## printed. See also the parameter full below ##@in full : [boolean] whether to print the estimated coeficients and scale ##@in method : [character] select a method: one of (FASTLTS, MASS) dolts <- function(form, dname, dataset, nrep = 1) { if(missing(dataset)) { data(list = dname) dataset <- get(dname) } else if(missing(dname)) dname <- deparse(substitute(dataset)) environment(form) <- environment() ## !?! x <- model.matrix(form, model.frame(form, data = dataset)) dx <- dim(x) - 0:1 # not counting intercept if(method == "MASS") { lts <- MASS::lqs(form, data = dataset, method = "lts") quan <- (dx[1] + (dx[2] + 1) + 1)/2 #default: (n+p+1)/2 } else { lts <- ltsReg(form, data = dataset, mcd = FALSE) quan <- lts$quan } xres <- sprintf("%*s %3d %3d %3d %12.6f", lname, dname, dx[1], dx[2], as.integer(quan), lts$crit) if(time) { xtime <- system.time(repLTS(form, data = dataset, nrep, method))[1] xres <- sprintf("%s %10.1f", xres, 1000 * xtime / nrep) } cat(xres, "\n") if(!short) { cat("Best subsample: \n") print(lts$best) ibad <- which(lts$lts.wt == 0) names(ibad) <- NULL nbad <- length(ibad) cat("Outliers: ",nbad,"\n") if(nbad > 0) print(ibad) if(full) { cat("-------------\n") print(lts) print(summary(lts)) } cat("--------------------------------------------------------\n") } } method <- match.arg(method) data(heart) data(starsCYG) data(phosphor) data(stackloss) data(coleman) data(salinity) data(aircraft) data(delivery) data(wood) data(hbk) cll <- sys.call() cat("\nCall: ", deparse(substitute(cll)),"\n") cat("========================================================\n") cat("Data Set n p Half obj Time [ms]\n") cat("========================================================\n") ## 1 3 5 7 9.1 3 5 7 9. 123 123 lname <- 20 ## --^ dolts(clength ~ . , "heart", nrep = nrep) dolts(log.light ~ log.Te , "starsCYG", nrep = nrep) dolts(plant ~ . , "phosphor", nrep = nrep) dolts(stack.loss ~ . , "stackloss", nrep = nrep) dolts(Y ~ . , "coleman", nrep = nrep) dolts(Y ~ . , "salinity") dolts(Y ~ . , "aircraft") dolts(delTime ~ . , "delivery") dolts(y ~ . , "wood", nrep = nrep) dolts(Y ~ . , "hbk", nrep = nrep) cat("========================================================\n") } robustbase/inst/xtraR/m-s_fns.R0000644000176200001440000001360714437610457016207 0ustar liggesusers#### Testing M-S estimator --- self-contained utility functions --- #### ## Exercised from ../../tests/m-s-estimator.R ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Test subsampling algorithm m_s_subsample <- function(x1, x2, y, control, orthogonalize=TRUE) { storage.mode(x1) <- "double" storage.mode(x2) <- "double" storage.mode(y) <- "double" z <- .C(robustbase:::R_lmrob_M_S, x1, x2, y, res=double(length(y)), n=length(y), p1=NCOL(x1), p2=NCOL(x2), nResample=as.integer(control$nResample), max_it_scale=as.integer(control$maxit.scale), scale=double(1), b1=double(NCOL(x1)), b2=double(NCOL(x2)), tuning_chi=as.double(control$tuning.chi), ipsi=.psi2ipsi(control$psi), bb=as.double(control$bb), K_m_s=as.integer(control$k.m_s), max_k=as.integer(control$k.max), rel_tol=as.double(control$rel.tol), inv_tol=as.double(control$solve.tol), scale_tol=as.double(control$scale.tol), zero_tol=as.double(control$zero.tol), converged=FALSE, trace_lev=as.integer(control$trace.lev), orthogonalize=as.logical(orthogonalize), subsample=TRUE, descent=FALSE, # and hence no 'convergence' here .. mts = 0L, ss = 1L) z[c("b1", "b2", "scale")] } ## Test descent algorithm m_s_descent <- function(x1, x2, y, control, b1, b2, scale) { storage.mode(x1) <- "double" storage.mode(x2) <- "double" storage.mode(y) <- "double" z <- .C(robustbase:::R_lmrob_M_S, X1=x1, X2=x2, y=y, res=double(length(y)), n=length(y), p1=NCOL(x1), p2=NCOL(x2), nResample=as.integer(control$nResample), max_it_scale=as.integer(control$maxit.scale), scale=as.double(scale), b1=as.double(b1), b2=as.double(b2), tuning_chi=as.double(control$tuning.chi), ipsi=.psi2ipsi(control$psi), bb=as.double(control$bb), K_m_s=as.integer(control$k.m_s), max_k=as.integer(control$k.max), rel_tol=as.double(control$rel.tol), inv_tol=as.double(control$solve.tol), scale_tol=as.double(control$scale.tol), zero_tol=as.double(control$zero.tol), converged=logical(1), trace_lev=as.integer(control$trace.lev), orthogonalize=FALSE, subsample=FALSE, descent=TRUE, ## ----- mts = 0L, ss = 1L) z[c("b1", "b2", "scale", "res", "converged")] } find_scale <- function(r, s0, n, p, control) { c.chi <- robustbase:::.psi.conv.cc(control$psi, control$tuning.chi) b <- .C(robustbase:::R_lmrob_S, x = double(1), y = as.double(r), n = as.integer(n), p = as.integer(p), nResample = 0L, # <- only.scale=TRUE , now in lmrob.S() scale = as.double(s0), coefficients = double(p), as.double(c.chi), .psi2ipsi(control$psi), as.double(control$bb), best_r = 0L, groups = 0L, n.group = 0L, k.fast.s = 0L, k.iter = 0L, maxit.scale = as.integer(control$maxit.scale), refine.tol = as.double(control$refine.tol), inv.tol = as.double(control$solve.tol), scale_tol=as.double(control$scale.tol), zero_tol=as.double(control$zero.tol), converged = logical(1), trace.lev = 0L, mts = 0L, ss = 1L, fast.s.large.n = as.integer(n+1) )[c("coefficients", "scale", "k.iter", "converged")] b$scale } ## m_s_descent()--R-only--version : m_s_descent_Ronly <- function(x1, x2, y, control, b1, b2, scale) { stopifnot(is.list(control), is.numeric(control$k.max)) n <- length(y) p1 <- ncol(x1) p2 <- ncol(x2) p <- p1+p2 t2 <- b2 t1 <- b1 rs <- drop(y - x1 %*% b1 - x2 %*% b2) sc <- scale ## do refinement steps ## do maximally control$k.max iterations ## stop if converged ## stop after k.fast.m_s step of no improvement if (control$trace.lev > 4) cat("scale:", scale, "\n") if (control$trace.lev > 4) cat("res:", rs, "\n") n.imp <- nnoimprovement <- nref <- 0L; conv <- FALSE while((nref <- nref + 1) <= control$k.max && !conv && nnoimprovement < control$k.m_s) { ## STEP 1: UPDATE B2 y.tilde <- y - x1 %*% t1 w <- Mwgt(rs / sc, control$tuning.chi, control$psi) if (control$trace.lev > 4) cat("w:", w, "\n") z2 <- lm.wfit(x2, y.tilde, w) t2 <- z2$coef if (control$trace.lev > 4) cat("t2:", t2, "\n") rs <- y - x2 %*% t2 ## STEP 2: OBTAIN M-ESTIMATE OF B1 z1 <- lmrob.lar(x1, rs, control) t1 <- z1$coef if (control$trace.lev > 4) cat("t1:", t1, "\n") rs <- z1$resid ## STEP 3: COMPUTE THE SCALE ESTIMATE sc <- find_scale(rs, sc, n, p, control) if (control$trace.lev > 4) cat("sc:", sc, "\n") ## STEP 4: CHECK FOR CONVERGENCE ##... FIXME ## STEP 5: UPDATE BEST FIT if (sc < scale) { scale <- sc b1 <- t1 b2 <- t2 nnoimprovement <- 0L n.imp <- n.imp + 1L } else nnoimprovement <- nnoimprovement + 1L } ## STEP 6: FINISH if (nref == control$k.max) warning("M-S estimate: maximum number of refinement steps reached.") ## if we'd really check for convergence above : ## if (nnoimprovement == control$k.m_s) ## warning("M-S estimate: maximum number of no-improvements reached.") list(b1=b1, b2=b2, scale=scale, res=rs, nref=nref, n.improve = n.imp)#, converged=conv, nnoimprovement=nnoimprovement) } robustbase/inst/xtraR/platform-sessionInfo.R0000644000176200001440000001103514531134457020753 0ustar liggesusers## <---> sync with ~/R/Pkgs/CLA/inst/xtraR/platform-sessionInfo.R ## ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ##' return 'x' unless it is NULL where you'd use 'orElse' `%||%` <- function(x, orElse) if(!is.null(x)) x else orElse ##' not %in% : `%nin%` <- function (x, table) is.na(match(x, table)) ##' Derive more sessionInfo() like information, notably about BLAS, LAPACK, arithmetic, etc moreSessionInfo <- function(print. = FALSE) { .M <- .Machine if(print.) str(.M[grep("^sizeof", names(.M))]) ## differentiate long-double.. b64 <- .M$sizeof.pointer == 8 onWindows <- .Platform$OS.type == "windows" ## Do we have 64bit but no-long-double ? arch <- Sys.info()[["machine"]] b64nLD <- (b64 && .M$sizeof.longdouble != 16) ##b64nLD <- (arch == "x86_64" && .M$sizeof.longdouble != 16) if(b64nLD) arch <- paste0(arch, "--no-long-double") if(print.) cat(sprintf("%d bit platform type '%s' ==> onWindows: %s\narch: %s\n", if(b64) 64 else 32, .Platform$OS.type, onWindows, arch)) sInfo <- sessionInfo() if(!exists("osVersion")) osVersion <- sInfo$running if(print.) cat("osVersion (0):", osVersion, "\n") if(is.null(osVersion)) osVersion <- "Fedora" # very last resort if(!length(BLAS.is.LAPACK <- sInfo$BLAS == sInfo$LAPACK)) BLAS.is.LAPACK <- NA # R versions <= 3.3.x ## A cheap check (that works on KH's debian-gcc setup, 2019-05): if(!length(BLAS.is.openBLAS <- grepl("openblas", sInfo$BLAS, ignore.case=TRUE))) BLAS.is.openBLAS <- NA if(!length(Lapack.is.openBLAS <- grepl("openblas", sInfo$LAPACK, ignore.case=TRUE))) Lapack.is.openBLAS <- NA if(print.) cat("osVersion:", sQuote(osVersion), "\n" ,'+ BLAS "is" Lapack:', BLAS.is.LAPACK , '| BLAS=OpenBLAS:', BLAS.is.openBLAS , '| Lapack=OpenBLAS:', Lapack.is.openBLAS , "\n") ## NB: sessionInfo() really gets these: if(getRversion() >= "3.4") local({ is.BLAS.LAPACK <- exists("La_library", mode="function") && ## R 3.4.0 and newer identical(La_library(), extSoftVersion()[["BLAS"]]) stopifnot(isTRUE(is.BLAS.LAPACK == BLAS.is.LAPACK)) }) ## also TRUE for Windows [since both are "" !!] ## Find out if we are running Micrsoft R Open is.MS.Ropen <- { file.exists(Rpr <- file.path(R.home("etc"), "Rprofile.site")) && length(lnsRpr <- readLines(Rpr)) && ## length(grep("[Mm]icrosoft", lnsRpr)) > 3 # MRO 3.5.1 has '20' times "[Mm]icrosoft" length(grep("Microsoft R Open", lnsRpr, fixed=TRUE, value=TRUE)) > 0 ## MRO 3.5.1 has it twice } if(print. && is.MS.Ropen) cat("We are running 'Microsoft R Open'\n") ## I'd really would want to know which of (OpenBLAS | ATLAS | MKL | R's own BLAS+LAPACK) ## ## Next best, I'd really like ## ## strictR <- we_are_using_Rs_own_BLAS_and_Lapack() [ ==> BLAS != Lapack ] ## ## Actually the following aims to be equivalent to {and *is* for MM on Fedora, 2019-03} ## strictR <- !(using ATLAS || OpenBLAS || MKL ) if(TRUE) { strictR <- !BLAS.is.LAPACK && !is.MS.Ropen && !BLAS.is.openBLAS && !Lapack.is.openBLAS && TRUE } else { ## workaround: strictR <- print(Sys.info()[["user"]]) == "maechler"# actually ## but not when testing with /usr/bin/R [OpenBLAS on Fedora!] (as "maechler"): if(strictR && substr(osVersion, 1,6) == "Fedora" && R.home() == "/usr/lib64/R") strictR <- FALSE } if(print.) cat("strictR:", strictR, "\n") structure(class = "moreSessionInfo", list( arch = arch , b64 = b64 # 64-bit (:<==> sizeof.pointer == 8 ) , b64nLD = b64nLD # 64-bit, but --no-long-double (sizeof.longdouble != 16) , BLAS.is.LAPACK = BLAS.is.LAPACK , BLAS.is.openBLAS = BLAS.is.openBLAS , Lapack.is.openBLAS = Lapack.is.openBLAS , is.MS.Ropen = is.MS.Ropen # is R a version of Microsoft R Open (==> MKL-linked BLAS) , onWindows = onWindows , osVersion = osVersion , strictR = strictR # are BLAS & Lapack from R's source, and "otherwise known safe platform" )) } if(getRversion() < "3.4.0") withAutoprint <- function(x, ...) x if(isTRUE(getOption("chk.moreSessionInfo"))) withAutoprint({ ms1 <- moreSessionInfo() ms. <- moreSessionInfo(print. = TRUE) stopifnot(is.list(ms1), length(ms1) > 1, identical(ms1, ms.) ) }) robustbase/inst/xtraR/subsample-fns.R0000644000176200001440000001273212271657124017416 0ustar liggesusers### Mainly used for source package checking in ../../tests/subsample.R ### however, available (for reproducible research, confirmation) as ### part of the robustbase package. ## R version of LU decomposition in subsample() in lmrob.c ## Modified from Golub G. H., Van Loan, C. F., Matrix Computations, 3rd edition LU.gaxpy <- function(A, pivot=TRUE, tol = 1e-7, verbose = FALSE) { A <- as.matrix(A) ## m x n matrix, n >= m >= 1 stopifnot((n <- ncol(A)) >= (m <- nrow(A)), m >= 1) ## precondition: cf0 <- max(abs(A)) A <- A / cf0 v <- double(m) ## work matrix ## these matrices will contain the results L <- diag(m) U <- matrix(0, m, m) p <- integer(m-1) ## pivots idc <- 1L:n ## which columns of A are used idr <- 1L:m ## how rows of A are permuted for(j in 1L:m) { sing <- TRUE while(sing) { if (length(idc) < j) break if (j == 1L) { v[j:m] <- A[idr[j:m], idc[j]] } else { rows <- 1L:(j-1L) z <- forwardsolve(L[rows, rows, drop=FALSE], A[idr[rows], idc[j]]) U[rows, j] <- z v[j:m] <- A[idr[j:m], idc[j]] - L[j:m, rows, drop=FALSE] %*% z if(verbose) cat("Step", j, "z=", sapply(z, function(x) sprintf("%.15f", x)), "\n v=", v, "\n") } if (j < m) { mu <- j mu <- if (pivot) which.max(abs(v[j:m])) + j - 1L else j if(verbose) ## debug possumDiv example cat(sprintf("R-Step: %i: ", j), round(abs(v[j:m]), 6), "\n", mu, v[mu], "\n") if (abs(v[mu]) >= tol) { ## singular: can stop here already p[j] <- mu if (pivot) { tmp <- v[j]; v[j] <- v[mu]; v[mu] <- tmp tmp <- idr[j]; idr[j] <- idr[mu]; idr[mu] <- tmp } L[(j+1L):m, j] <- v[(j+1L):m]/v[j] if (pivot && j > 1) { ## swap rows L[j,] <-> L[mu,] tmp <- L[j, rows]; L[j, rows] <- L[mu, rows]; L[mu, rows] <- tmp } } } U[j, j] <- v[j] if (abs(v[j]) < tol) { if(verbose) cat("* singularity detected in step ", j, "; candidate ", idc[j],"\n") idc <- idc[-j] } else sing <- FALSE }## {while} }## {for} list(L = L, U = U * cf0, p = p, idc = idc[1L:m], singular = sing) } Rsubsample <- function(x, y, mts=0, tolInverse = 1e-7) { if(!is.matrix(x)) x <- as.matrix(x) stopifnot((n <- length(y)) == nrow(x)) p <- ncol(x) storage.mode(x) <- "double" .C(robustbase:::R_subsample, x=x, y=as.double(y), n=as.integer(n), m=as.integer(p), beta=double(p), ind_space=integer(n), idc = integer(n), ## elements 1:p: chosen subsample idr = integer(n), lu = matrix(double(1), p,p), v=double(p), pivot = integer(p-1), Dr=double(n), Dc=double(p), rowequ=integer(1), colequ=integer(1), status=integer(1), sample = FALSE, ## set this to TRUE for sampling mts = as.integer(mts), ss = as.integer(mts == 0), tolinv = as.double(tolInverse), solve = TRUE) } ##' Simple version, just checking (non)singularity conformance tstSubsampleSing <- function(X, y) { lX <- X[sample(nrow(X)), ] ## C version zc <- Rsubsample(lX, y) ## R version zR <- LU.gaxpy(t(lX)) if (as.logical(zc$status)) { ## singularity in C detected if (!zR$singular) stop("singularity in C but not in R") } else { ## no singularity detected if (zR$singular) stop("singularity in R but not in C") } zR$singular } ##' Sophisticated version tstSubsample <- function(x, y=rnorm(n), compareMatrix = TRUE, lu.tol = 1e-7, lu.verbose=FALSE, tolInverse = lu.tol, eq.tol = .Machine$double.eps^0.5) { x0 <- x <- as.matrix(x) n <- nrow(x) p <- ncol(x) if(p <= 1) stop("wrong 'x': need at least two columns for these tests") stopifnot(length(y) == n) z <- Rsubsample(x, y, tolInverse=tolInverse) ## ---------- ## convert idc, idr and p to 1-based indexing: idr <- z$idr + 1L idc <- z$idc[1:p] + 1L pivot <- z$pivot + 1L ## get L and U L <- U <- LU <- matrix(z$lu, p, p) L[upper.tri(L, diag=TRUE)] <- 0 diag(L) <- 1 U[lower.tri(U, diag=FALSE)] <- 0 ## test solved parameter if (z$status == 0) { stopifnot(all.equal(z$beta, unname(solve(x[idc, ], y[idc])), tol=eq.tol)) } if (z$rowequ) x <- diag(z$Dr) %*% x if (z$colequ) x <- x %*% diag(z$Dc) if (z$rowequ || z$colequ) cat(sprintf("kappa before equilibration = %g, after = %g\n", kappa(x0), kappa(x))) LU. <- LU.gaxpy(t(x), tol=lu.tol, verbose=lu.verbose) ## -------- if (!isTRUE(all.equal(LU.$p, pivot, tolerance=0))) { cat("LU.gaxpy() and Rsubsample() have different pivots:\n") print(LU.$p) print(pivot) cat(" ... are different at indices:\n ") print(which(LU.$p != pivot)) } else { stopifnot(all.equal(LU.$L, L, tol=eq.tol), all.equal(LU.$U, U, tol=eq.tol), LU.$p == pivot, ## only compare the indices selected before stopping LU.$idc[seq_along(LU.$p)] == idc[seq_along(pivot)]) } ## compare with Matrix result if (compareMatrix && z$status == 0) { xsub <- x[idc, ] stopifnot(require("Matrix")) tmp <- lu(t(xsub)) ## idx <- upper.tri(xsub, diag=TRUE) stopifnot(all.equal(tmp@x, as.vector(z$lu), tol=eq.tol)) } invisible(z) } robustbase/inst/xtraR/lmrob-trace_lev.R0000644000176200001440000000452212154325275017711 0ustar liggesusers## testing trace_lev settings require(robustbase) ## fit a model with categorical, continuous and mixed variables selDays <- c( ## days ranked according to number of outliers: "403", "407", "693", "405", "396", "453", "461", ## "476", "678", "730", "380", "406", "421", "441" ## ,"442", "454", "462", "472", "480", "488" ## some other days ## "712", "503", "666", "616", "591", "552", "624", "522", "509", "388", "606", "580", "573", "602", "686", "476", "708", "600", "567") contr <- list(julday=contr.sum) ## using this seed and the default configuration options, ## the fast_S algorithm stops with some "local exact fits", ## i.e., coefficients with std. error 0. set.seed(711) lseed <- .Random.seed r1 <- lmrob(LNOx ~ (LNOxEm + sqrtWS)*julday, NOxEmissions, julday %in% selDays, contrasts=contr, seed=lseed, max.it=10000, nResample=5, trace.lev=1) ## change best.r.s to 11 and it works properly ## (for this seed at least) res <- update(r1, k.max=10000, best.r.s = 3, nResample=1000, trace.lev=2) ##### ## fast_S (non-large strategy) ## test non-convergence warnings / trace output: res <- update(r1, max.it = 1) res <- update(r1, k.max = 1) ## test trace_levs: res <- update(r1, trace.lev = 0) res <- update(r1, trace.lev = 1) res <- update(r1, trace.lev = 2) res <- update(r1, trace.lev = 3) res <- update(r1, trace.lev = 4) res <- update(r1, trace.lev = 5) ##### ## M-S estimator r2 <- update(r1, init="M-S", split.type="fi", subsampling="simple", mts=10000) ## test non-convergence warnings / trace output: res <- update(r2, max.it = 1) res <- update(r2, k.m_s = 1) ## does not converge anyway ## test trace_levs: res <- update(r2, trace.lev = 0) res <- update(r2, trace.lev = 1) res <- update(r2, trace.lev = 2) res <- update(r2, trace.lev = 3) res <- update(r2, trace.lev = 4) ## this produces _a_lot_ of output: ## res <- update(r2, trace.lev = 5) ##### ## fast_S (large-n strategy) ## need to use continuous only design r3 <- update(r1, LNOx ~ LNOxEm + sqrtWS, subset=NULL, contrasts=NULL) ## test non-convergence warnings / trace output: res <- update(r3, max.it = 1) res <- update(r3, k.max = 1) ## test trace_levs: res <- update(r3, trace.lev = 0) res <- update(r3, trace.lev = 1) res <- update(r3, trace.lev = 2) res <- update(r3, trace.lev = 3) res <- update(r3, trace.lev = 4) ## (there is no level 5) robustbase/inst/xtraR/mcnaive.R0000644000176200001440000000443413325654361016262 0ustar liggesusersmcNaive <- function (x, method = c("h.use", "simple"), low = FALSE, high = FALSE) { ## Purpose: naive implementation of mc() ## ---------------------------------------------- ## (low, high) - as in mad() - for choosing the (lo/hi)-median with even n ## ## Author: Martin Maechler, Date: 21 Jul 2007 n <- length(x) if(n <= 2) return(0) x <- sort(x) stopifnot(is.finite(m <- median(x)))# <==> no NAs in x[] x <- x - m n1 <- length(xL <- x[x <= 0]) # both contain all (if any) median values n2 <- length(xR <- x[x >= 0]) n.n <- as.double(n1)*n2 if(n.n > 1e8)# 1e8 < .Machine$integer.max stop("\"simple\" method not sensible here: would need too much memory: n.n=", n.n) Mmedian <- { if ((low || high) && n.n %% 2 == 0) { if (low && high) stop("'low' and 'high' cannot be both TRUE") N2 <- n.n %/% 2 + as.integer(high) function(x) sort(x, partial = N2)[N2] } else median } method <- match.arg(method) switch(method, "simple" = { r <- outer(xR, xL, "+") / outer(xR, xL, "-") r[is.na(r)] <- 0 # simple -- ## ok only when the median-observations are "in the middle", ## e.g. *not* ok for c(-5, -1, 0, 0, 0, 1) Mmedian(r) }, "h.use" = { # use h() = h_kern() .. treating obs. coinciding with median k <- sum(x == 0) ## the number of obs coinciding with median() irep <- rep.int(n1, n2) if(k > 0) { ## have some obs. == median ( == 0) h <- function(xl,xr, i,j) { ## must parallelize (!) eq <- xl == xr r <- xl xr <- xr[!eq] xl <- xl[!eq] r [eq] <- sign(i[eq]+j[eq]-1-k) r[!eq] <- (xr + xl)/(xr - xl) r } i <- integer(n1) j <- integer(n2) i[(n1-k+1):n1] <- j[1:k] <- 1:k i <- rep(i, times = n2) j <- rep(j, irep) } else { ## k == 0: h <- function(xl,xr, i,j) (xr + xl)/(xr - xl) i <- j <- NULL } ## build outer(xL, xR, FUN= h) manually, such that ## we can pass (i,j) properly : Mmedian(h(xl = rep(xL, times = n2), xr = rep(xR, irep), i, j)) }) } robustbase/inst/xtraR/ex-funs.R0000644000176200001440000000510314004270422016202 0ustar liggesusers## These two fail when length(x) == 0 {but are short and nice otherwise} himed <- function(x) { n2 <- 1 + length(x) %/% 2; sort(x, partial = n2)[n2] } lomed <- function(x) { n2 <- (1+ length(x))%/% 2; sort(x, partial = n2)[n2] } ## From package 'limma' : ~/R/BioCore/madman/Rpacks/limma/R/weightedmedian.R weighted.median <- function (x, w, na.rm = FALSE, low = FALSE, high = FALSE) { ## Weighted median ## Gordon Smyth ## 30 June 2005 ## improved by MMaechler: 'low' and 'high' as with 'mad()'; 21 Nov 2005 if (missing(w)) w <- rep.int(1, length(x)) else { if(length(w) != length(x)) stop("'x' and 'w' must have the same length") if(any(is.na(w))) stop("NA weights not allowed") ## Note that sometimes the estimate would be well-defined even ## with some NA weights! if(any(w < 0)) stop("Negative weights not allowed") if(is.integer(w)) w <- as.numeric(w) } if(any(nax <- is.na(x))) { if(na.rm) { w <- w[i <- !nax] x <- x[i] } else return(NA) } if(all(w == 0)) { warning("All weights are zero") return(NA) } ## otherwise, have sum(w) > 0 if(is.unsorted(x)) { o <- order(x) x <- x[o] w <- w[o] } p <- cumsum(w)/sum(w) k <- sum(p < 0.5) + 1:1 if(p[k] > 0.5 || low) x[k] else if(high) x[k+1] else (x[k] + x[k+1])/2 } ##' pure-R naive version of Qn() ==> slow and large memory for large n ##' >>>>>> keep in sync with ../../man/Qn.Rd 's \examples{} <<<<<< Qn0R <- function(x, k = choose(n %/% 2 + 1, 2)) { n <- length(x <- sort(x)) if(n == 0) return(NA) else if(n == 1) return(0.) stopifnot(is.numeric(k), k == as.integer(k), 1 <= k, k <= n*(n-1)/2) m <- outer(x,x,"-")# abs not needed as x[] is sorted sort(m[lower.tri(m)], partial = k)[k] } ##' pure-R all-k version of Qn() "all-k" is O(n^2) cannot use large n QnAll.k <- function(x) { n <- length(x <- sort(x)) if(n == 0) return(NA) else if(n == 1) return(0.) m <- outer(x,x, `-`) sort(m[lower.tri(m)]) } Sn0R <- function(x) { ## `R only' naive version of Sn() ==> slow and large memory for large n if((n <- length(x)) == 0) return(NA) else if(n == 1) return(0.) lomed(apply(abs(outer(x,x,"-")), 2, himed)) } ## Tol = 2e-7 : higher than usual is.all.equal <- function(x,y, tol = 2e-7, scale = 1) { ## scale = 1: ensures 'absolute error' in all cases ## scale = x: ensures `relative error' in all cases is.logical(r <- all.equal(x,y, tolerance = tol, scale = scale)) && r } robustbase/inst/include/0000755000176200001440000000000014555212560015030 5ustar liggesusersrobustbase/inst/include/robustbase.h0000644000176200001440000000541113177447140017356 0ustar liggesusers#ifndef __robustbase_h__ #define __robustbase_h__ #include // for SEXP #include #ifdef __cplusplus extern "C" { #endif SEXP C_psifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_) { static SEXP(*fun)(SEXP, SEXP, SEXP, SEXP) = NULL; if (fun == NULL) fun = (SEXP(*)(SEXP, SEXP, SEXP, SEXP)) R_GetCCallable("robustbase","R_psifun"); return fun(x_, c_, ipsi_, deriv_); } SEXP C_chifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_) { static SEXP(*fun)(SEXP, SEXP, SEXP, SEXP) = NULL; if (fun == NULL) fun = (SEXP(*)(SEXP, SEXP, SEXP, SEXP)) R_GetCCallable("robustbase","R_chifun"); return fun(x_, c_, ipsi_, deriv_); } SEXP C_wgtfun(SEXP x_, SEXP c_, SEXP ipsi_) { static SEXP(*fun)(SEXP, SEXP, SEXP) = NULL; if (fun == NULL) fun = (SEXP(*)(SEXP, SEXP, SEXP)) R_GetCCallable("robustbase","R_wgtfun"); return fun(x_, c_, ipsi_); } double C_rho(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase","rho"); return fun(x_, c_, ipsi_); } double C_psi(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase","psi"); return fun(x_, c_, ipsi_); } double C_psip(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase","psip"); return fun(x_, c_, ipsi_); } double C_psi2(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase","psi2"); return fun(x_, c_, ipsi_); } double C_wgt(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase","wgt"); return fun(x_, c_, ipsi_); } double C_rho_inf(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase", "rho_inf"); return fun(x_, c_, ipsi_); } double C_normcnst(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase", "normcnst"); return fun(x_, c_, ipsi_); } #ifdef __cplusplus } #endif #endif robustbase/inst/CITATION0000644000176200001440000000340614410263765014550 0ustar liggesuserscitHeader("To cite robustbase in publications use:") if(!exists("meta") || is.null(meta)) meta <- packageDescription("robustbase") year <- sub("-.*", "", meta$Date) note <- sprintf("R package version %s", meta$Version) fullTitle <- paste(meta$Package, meta$Title, sep = ": ") authors <- eval(parse(text = meta$`Authors@R`)) aut.gf <- lapply(unclass(authors), `[`, c("given", "family")) authNms <- sapply(aut.gf, paste, collapse = " ") collNms <- function(nms) paste(paste(nms[-length(nms)], collapse = ", "), "and", nms[length(nms)]) bibentry(bibtype = "Manual", title = fullTitle, author = authors, year = year, note = note, url = "http://robustbase.r-forge.r-project.org/", url = "http://CRAN.R-project.org/package=robustbase", textVersion = paste(collNms(authNms), sprintf("(%s).", year), fullTitle, paste(note, ".", sep = ""), "URL http://CRAN.R-project.org/package=robustbase"), header = paste("To cite package", sQuote(meta$Package), "in publications use:") ) bibentry(bibtype = "Article", title = "An Object-Oriented Framework for Robust Multivariate Analysis", author = c(as.person("Valentin Todorov"), as.person("Peter Filzmoser")), journal = "Journal of Statistical Software", year = "2009", volume = "32", number = "3", pages = "1--47", url = "https://www.jstatsoft.org/article/view/v032i03/", textVersion = paste("Valentin Todorov, Peter Filzmoser (2009).", "An Object-Oriented Framework for Robust Multivariate Analysis.", "Journal of Statistical Software, 32(3), 1-47.", "URL https://www.jstatsoft.org/article/view/v032i03/."), header = "To cite the multivariate class/methods framework use:" ) robustbase/inst/external/0000755000176200001440000000000014555212560015227 5ustar liggesusersrobustbase/inst/external/d1k27.rda0000644000176200001440000035650511762754374016600 0ustar liggesusersRDX2 X  d1k27@%~O"}@'R*o?R@(eO@&*8@&A(9-@(!6@$.2@&*A c @(}@&"S @) ^#@(3B¤@(¹M@'Ew@'hr!@&c^J8@'g@&8K@'/ @)| |[@$.2@'͛t@%Qm݃@'qpO4@'5inX@&g@*R`@&o@&OD)@(ŗN@&^lLYt@'2#@&gM3H@(JA @(8Go*Z@&>W@&ha@&_#cW@'-f@'M/^ @&e{z@'lnm@&NΚ,@(Mj@'PfB@'3cHk@&e+a@'|ϕ@)ܱF_@&6@'ZpU@&rT~@&}2f@((zc@'ƞFJ@( ~+U@(AH@)]Ww@%1y|ß@'th@&"}@'h73@&P@%HӮh^@(v'|E˼@'2<64@&Zqx@$b1Q@(āoh@'xuO7u@'W7@%?@&ha@(@(FFV@'^ @)yhH@&A(9-@' ǎk@'E1L@*_6@&"Y5@'G@&c^J8@'h73@(i_@'z$'@'E1L@'@@%wr(@'3]$@&r Ĝ@&*A c @)]x@#ef@(> @&^ N@&T ǎ@&8\g@&LB@'x*")@(v@&NΚ,@'H˒@'76g@'9K@&]ce@&Zqx@)&ݬ@'Jw@)o@'Gth@&90 )@'< oTK@'ѷX@&9 @$@'.y@'҈p@()QX(@'//t@&ЫNP@(ałU6@&D"@'7k;<@(\C@(L@#2i@&X@'X:S@'7@&`d@(tSM@'`N@&(@'c@'ٞ@(V@(*EM@(@4m9@& >l@'=@*~@&- @&!-w2@'FJ@&+@'_@@'n<`@'[\|ؘ@(F]c@'.y@'D Ri@&Zqx@&޳"@%$@% xF@(008@':@'䎊q@&~J@'T*0@(1;@&(@&ЫNP@&LB@& ǎk@'!bw@& 6P@&A(9-@(Iԕ*@(7@')A!la@( /@+.۵@&_ô@<6@&D@(O?R&W@&`d@)m ֶ@&fA@&A(9-@'FJ@&ۥS@'EwUq@(98R~@'X:S@'CfQm@'Цv @)F@(L/{J@( u"@'7k;<@) D @) @'xK@&x@'G@(X@'Ë@'Y/n@%fP@&ynX/E@&FIm@'P3@&*A c @'ܩiu@'ѷX@'eU.@%wN&@'5@&x@'+S@)AJM+,@(1 @)i>BZ@&9 @&`V. @%muO7v@'::э&@'N-b8<@'Q@(X@&*A c @'`V.@(\C@(v'|E˼@)b"@(e@'x*")@( @&LB@)1&y@& @'W7@'p*@(^ F@#ef@*ݕW@'W@'> @&X@&7@&Zqx@'28@(U;'@$.2@%HӮh^@(u"@'Bw\@(¹M@'/Y@&!-w2@'ޭՐ@'$s@'ߏG0@'@ =q@&NΚ,@&ha@&vݭ@'8Ck@)%@)͉/@&l@@'g @'ѷX@'@( /@'|ϕ@(NzVC@'28@'- @&P]@&^ N@&i}@&lsG@'EwUq@')A!la@(5H@%!Z@$$/@'2<64@' gw@&'!Wi@'c@(008@(K/V@'FJ@'[\|ؘ@&A(9-@%@'Bk΅4@&8K@&FIm@(R@({@)i>BZ@'D Ri@(s@&u^޾@&޳"@)6;%o@)Kq @'klC@(@*_@&6;%p@&^ N@&bM@&ha@'Ew@&WCcH@&Y@&o@&A(9-@&@gS@'28@&6;%p@'_@@'4@&P@&^lLYt@&ܜM@(AH@)fSP -@(#@'FJ@&?z:@%nY@'ϕ@' '76@&6;%p@&e+a@''(b@(]yg@(i;yS@&c^J8@'|ϕ@'.y@&>W@&Zqx@%Ԇ-@&OD)@'ϕ@#TEJ@';D=G@(ԊԿ@'B؃@(I^5?@*j?@&@'L@(v'|E˼@&^lLYt@&D@'W7@&(9-@#Ε*@&JL@%Ԇ-@'B؃@'@(TS@'҈p@%o@%+@'K@(lj@&z\|ؙ@)9XdE@'=@'7k;<@&E˼+@&5inX@'X:S@(}^_1@$xW@&lsG@)͉/@%0եb@'[ @(ӀP@'K@'āoh@' gw@)fWx@(I^5?@'g @(_ô@$@եb@&+@'x*")@$D<@'@ =q@&aQR@&?g@*|hs@'P@'om@(@%HӮh^@'27@&ЫNP@&ѢY@+],{ Z@&A c @(@&@&ha@'nCz@(ͳ|@'^ @&>B@&^ N@(եc@&>W@&@~@&@'FJ@%^"S@(ŗN@'.y@' ǎk@&FIm@%y k~@'N-b8<@'c @'EwUq@'-@&"S @%r@'L@'7@&i}@(O?R&W@'\(@')A!la@&@% $ @'^ @'N-b8<@&z\|ؙ@%$@&`d@&P]@&T ǎ@(@4m9@&]ce@'| (x@(Iԕ*@' @'r/Y@';D=G@(DZ1@'f3@&쪸@){J#9@&P]@'ߤ?@'>Y@(X@&sRC@'0h3@(-@&sRC@&D@'_@@%y k~@(6fffff@($')@(1;@'r ě@(}@(\C@&Zqx@(L/{J@)͉/@'4@'RwkQ@&@'X:S@'gwZ@'s-@$h<ϕ@&T ǎ@'Y/n@'Jw@(AH@&^lLYt@'>Y@(>p@%y k~@(L/{J@$.2@(tqq@(3B¤@'B؃@&P]@(SE @)e;K@'ɯ@'lnm@) 27@( /@'7k;<@%!Z@(1;@&ha@)#ᆘ5@'FJ@'c@)7ޓ@(NzVC@*/ M;@$.2@&wFz@'SE @#{`I@'FJ@'1@'7@'ce@%1y|ß@'ۋq @'W7@&rq6@%M@'ѷX@'>WS@&}w1@'wkP@(ioiD@'28@&@'GM3H,@'N-b8<@(UoiDg@&hb@%r@&c^J8@)3PH@&*A c @&lsG@&D@'PfW@'|ϕ@)'n@&Y@'q Eb@'n<`@&g@(eK@'aR~R@((zc@'@&ha@'7k;<@'S@&LB@&]ce@&u!S@&ha@'|ϕ@&P@&NΚ,@'#@'9K@&ZڹY@'ߏG0@'2<64@'FJ@'x*")@&ЫNP@'< oTK@'E1L@)H>BZc@(WE84@'Ë@&NΚ,@'>Y@'ZpU@&hW @'䎊q@'ߏG0@'< oTK@'p*@(ɯ@(Hh@)JE@''=["@& @&u@(%#M@&T ǎ@'҈p@%DSMj@&P]@#Ε*@'Bk΅4@'IA!la@%˒:)@*AJM+@(O?R&W@(rd2n@&A c @'ܩiu@%r@& `V. @&"&r@'ߏG0@&쪸@&*8@%HӮh^@).H@(+@&9 @'.y@)s")@(L/{J@(_*ZFs@'h73@'28@'@ =q@&- @&e{z@'c@(i_@&6;%p@'nCz@'5@&9 @&6@'z$'@(,<@&ɃQX(@&P]@&LB@(\C@&*A c @&X@'P3:~@'N-b8<@(UoiDg@'x*")@&ha@(i;yS@&!-w2@&j@(@4m9@( @& (@'B؃@''(b@&=`N@'RwkQ@&E˼+@)KCW@'hr!@''(b@(AH@&f3@( 6@%!Z@&gM3H@'Bk΅4@((zc@(AH@&E˼+@&P]@)F@'hH@(L/{J@&Zqx@'7@(O?R&W@)m8Y@( @(O?R&W@'5@&6;%p@'|ϕ@'h73@'tW}W@)*cP@#Rş@'āoh@#Rş@)Z'(b@'7@'wk@(1;@)fWx@&o]@'䎊q@&*A c @*EwU@(3B¤@(a@N@)*cP@*_Ë@'@ =q@(@'+S@' ǎk@'lK[@&ЫNP@(3H+@'c@&&`V.@(L/{J@'2<64@'- @&o@&Zqx@&)Dg8~@(v'|E˼@%w#d@)KCW@&>B@)*cP@'EwUq@(:!B@%I@(U=@'.y@(As@')A!la@&g@'jS|@%v5B@&j@)Ad2@'c@(a@O@*>LX@%!Z@&A(9-@(s@)>3@'-@'6/e@(K/V@&_I@(F]c@*UDR@&1߹8R@'hA_@'Ë@(v'|E˼@'[\|ؘ@'7k;<@$6}2f@'fWx@&*A c @&@%Б}kf@)l@(¹M@'҈p@&+@&*A c @&@'*o?R@&>"@'2<64@'N-b8<@'d@'d@%!Z@& k;=@'@ =q@(K/V@&P@'@ =q@&z\|ؙ@#L7@'&]cf@'d@'[\|ؘ@&u%F @'G@%0H@@(WE84@(ӀP@&6@'EwUq@' Fd@&N@'2#@%+@'ޭՐ@&|ßa@'%@%DSMj@&?g@&e{z@'2#@(L/{J@'[\|ؘ@):=B@%>< @&޳"@&Zqx@&9 @'-@&Y >@'76g@'C]@(o;dZ@'EwUq@&@N@'7k;<@'>-b9@("S@(c 4@%Pa@&ѢY@')A!la@'&]cf@#Ε*@'g @%1y|ß@%Jt}^_@*!$Jb#@&gM3H@(L/{J@'28@'Jw@'x*")@'Q@#;Jm3@'7k;<@)+jg@'L@'!5`@'E1L@'B؃@&h`@$h<ϕ@'IW@'2<64@&@(;@' qiC@%Y@'7k;<@'EwUq@&^ N@'ѷX@(^ F@'B؃@&YE@'x*")@'3cHk@&g@&H߹8@'2<64@%y k~@*ȊG@&x@&6@'7k;<@&$Jb$@'8Ck@(Ma@&ZڹY@'- @'_@@(L/{J@'_@@(#@'< oTK@%Ԇ-@("S@&sRC@'Q@' ǎk@'W7@*GK]d@(E@&P]@'z$'@&NΚ,@&6;%p@)͉/@' :@(&V@@)^)@'̸@'VWx- <(%VW2 JP1 2?/%Z)b 4 >'1eB 2$.")+T+$I+A(4/-T-(-+P=,**,O-)   K9=d>'+;#)9+A,#*'U@ %$(-O *&+#cA&$K5c51*!3&0+1$KxX '(=YY(G(7**`,DD)"-)& d # ><#K431;"<1E 5 2(#+?$?"e6) +:*3 0 Z(4,2$',<+(6+>5N%V"71 >+*1XJ#!H&2- 0c,2!,8+:<( 4!I @7 K30,&d?-316++$'6 ,#(Kb7(-" +*%*D_ @$ 4K-5A8.# 0,,% _($-Z<<,B'd0kd(+K!3Y0+9 +-,+6PK+*E0K4='K#-)-!-4'-)* U A*-7,79K2 -(b+!-.">P 9=+UY !K.?d# b!Y,!5Z(KT##"-d$P)dG)2'" "2+)3$)+ #,7 :$$> P .(<)20R3!L&dN J//#"&.3 I3J3d.2G-'d/<1J'K( '")H"8d(% #< =,'1+_/Ld,!'Y P0(= :#$?)!56L+2*":c2,>K d9S< 2*!=32L*5eK )+046122"P',-F2 .K"6/7221:@,$M$(2-G.-#-)(T.c2 +1Fdc2*) )5!A9.P%03d,1BL)-*$XF.+( 4&P"@53$#??@??@??@ @@@@@@?@?@@ ?@ ?@@?@ ??@?@?@@???@??@?@@?@@@@ ?@@@?@@??@@@?@ @@@?@?@???@@?@??@??@?@@??@@@??@@@@?@???@ ?@ @@@???@???@?@@@ ?@???@??@@@@ @@?@??@@???@??@ ?@??@@@@@ ?@@@??@ ???@ @@??@@@@?@?@ ?@?@@????@??@?@???????@@?@?@@@ ??@???@@@@?@?@@@?@@@@@@??@@@@?@@??@?@??@@??@??@@@?@@????@??@@@@@ ???@?@????@@@@@ @@@@@ @@??@?@???@?@@@?@?@@ @@ ????@@@??@@@@@@ @@ ?@@@?@ @@??@?@??@ @@@@@?@@???@@@??@@@ ?@@@??@?@?@??@???@@@@??@??@@@@@ ?@@@ ?@@???@??????@??@???@@ @ ???@?@??@@ @@@?@??@?@@@@@?@@???????@@ @ @@?@@?@@?@?@@??@?@@?@?@@?@??@?@?@@ ?@@?@@?@????@@@??@@????@?@???@@???@@?@@???@@?????@@@@@@@?@?@ ?@?@@?@?@??@@@?@@@@ @@?@@@@??@@@??@?@?@?@?@@??@ ?@?@?@ @?@ @@ ?@???@@@?@???@??@@??@?@??@@ ????@@@@??@@???@ ?@??@?@@@?@????@??@@?@@?@?@?@@@?@@?@ @@?@@@ ???@?@????@?@@@?@???@???@ @?@@@@@@?@??@@@@@@@@??@@??@?@@@@@?@@ ?@@@@@@?@@?@???@@??@@@@?@@ @@@?@@@@?@?@ ?@@@ @??@ @@@@ ?@@@@@@??@@@???@ ?@@@ @@?@?@?@@?@@@@@??@@@@@?@@?@@@@??@???@@ @??@@?@@@@???@????@@@???@@??@???@?@?@??@@@ ????@?@?@@??@@@???@@???@ ?@@@???@@@@?@?????@??@@?@@ @?@@??@?@ ????@@?@@@@@@@@?@@?@@?@@?@@@@ @@ 4 ``  "L 5 xY u  8lrL ^ D  _k: L| n<  a D Me %/?i @x)  Y $8Q#0 .2 RLT  8thL 4F" .5SL ~@N%O a rZWt  L /O lre . X ]j eL ` PFd lrRL  Z 6fb  HOp1=XX%j~ t`te  rl'tx{ K@ :  x 0Bl H Gr~8   hALf LLhoe Z Y bi@l z: : ` KJL  i 4  Fd.e J {T D4T(dLV ( ` @'y HC"xL,$L 6@R^jt  : `, T#c . ? Y T|] # dJ x f  /x(F CH 7 r OHy=hL9Y T 1 V M~C  t.\=@ = >r` F 0@ (lmoFy5 {    @ZYL LYWe@ ~ 9( c $ f,T4  T> `r  i dX "#4 c*: ` w! L ;g KD,r$ ] Z~ G  s:4-l\ L< lTxKDzRx +-  LIfAFD|au 8I s}9q  T| H 5 ^ 1?4 z 4Lt i$'r@FUjU xp nY! 8f~ p T? =p ? =p ?p =q?(\)?Q?QR?Q?Q?(\)?Gz?zG{?p =q?zG{?zG{?\(?QR?p =q?Gz?Gz?ffffff??У =p?\(?333333?ə?(\)?QR?Q?ə?\(?ə?ҏ\(??Q?Q?Gz?\(?(\)?\(?\(?333333? =p ?333333?У =p?ə?(\)@ =p ?У =p?\(?ҏ\(???\(\?ə?\(?GzH?Gz?(\)?zG{?333333?θQ? =p ?zG{?޸Q?Q? =p ?(\)?p =q?QR? =p ?\(?zG{?zG{?zG{?Gz?p =q?Q?ٙ?Gz? =p ?? =p ?zG{?p =q? =p?(\)?Gz?QR?\(?ə?Q?QR?Q?Gz?QR??Gz?Q?(\)?=p =?Q?\(? =p ?Q?ə?ҏ\(?p =q??\(?Q?333333?zG{?zG{?333333?(\)?Q?\(?333333?Gz?ffffff?ffffff??(\)?ə?ə?zG{?Q?zG? =p ?У =p??У =p?=p =?ٙ?p =q?(\)?׮zG?θQ?У =p? =p?Gz?Q?У =p?(\)?QR?p =q?333333?ٙ?\(?=p =?θQ?ə?zG{?Q?zG{?Gz?GzH??\(?ə?\(?У =p?QR??\(?(\)?(\)?(\)?Gz?Q?Q?Gz?ffffff?333333??Q?QR? =p ?QR??Q?\(?\(?θQ?ҏ\(?QR?\(?Q?zG{?Gz?333333?(\)?ə?Q? =p?zG{?p =q???Q?333333?QR?Q??p =q?333333?333333?Gz?Q?ٙ?zG{?ҏ\(? =p ? =p ?Q?Q?333333?θQ?(\)?zG{?Q? =p ?333333???QR?(\)?Q?zG{?(\)?Q?Gz?Gz??Q?(\)?GzH?Q??ə? =p?333333?zG{?(\)? =p ?Gz?θQ?zG?Q?Gz?333333?θQ?\(?\(?? =p ?=p =?\(?(\)@Q?zG{?Q?zG{?Gz?\(?θQ? =p?QR? =p ?Gz?333333?Gz? =p ?333333?zG{?Q?Q?Gz?(\)? =p?Gz?ə?ə?zG{?333333?Q?\(?(\)?333333?QR?(\)? =p?θQ?׮zG?(\)?GzH??Q? =p?(\)?zG{? =p ?333333?ə?(\)?Q?p =q? =p??ə?GzH? =p?QR?ᙙ?ҏ\(???Gz?p =q?QR?\(?p =q? =p ?p =q? =p ? =p?p =q?\(?GzH?(\)?=p =?QR?zG{?Gz?θQ?QR?Gz? =p@zG???У =p?(\)?Q?Gz?Gz?Q? =p?QR?θQ?zG{?\(?Gz?zG{?(\)?p =q?У =p?Q?zG{?(\)?\(?Q? =p ?Gz?θQ?ə?\(?p =q?Q?333333?(\)?GzH? =p?ə?p =q?(\)?޸Q?p =q?Gz?zG{?? =p ?333333?Q?p =q?GzH?Gz?zG{?Gz?ҏ\(?? =p ? =p ?\(?Gz?zG{? =p ?zG{?GzH?=p =?(\)?θQ?333333?Q??zG?GzH?Q?333333?333333?zG{?Q?ə? =p?(\)? =p?QR?333333?ᙙ?У =p? =p?Gz?Q?ۅQ?\(?\(?Gz?Gz? =p ?Gz?Q?\(? =p ?У =p?ҏ\(?=p =?\(?(\)?zG{?(\)?Q??ҏ\(??Q? =p ?p =q?ə?Q?QR? =p ?Q?? =p ?Gz?Q?QR??(\)? =p ?Q?zG{?\(?У =p?Q?(\)?Gz?޸Q?θQ? =p ? =p ?\(?ə?\(?Q?(\)??ə?Gz?Q?Gz? =p ?(\)??zG{?=p =?333333?Gz? =p?Q?333333?Q? =p ??zG{?\(?GzH? =p? =p ?zG{?(\)?Q?GzH?p =q?=p =?Q??ə?(\)?Gz?zG?Q?Q?Gz?Q?zG{?p =q?Gz?zG{? =p ?Q??ffffff??Q?(\)?У =p? =p@QR?\(?zG{?GzH?Q?θQ?(\)?GzH?Q?p =q??ə??\(? =p?(\)?Gz?zG{?Gz?θQ?QR? =p ?Q? =p ?p =q?333333?Gz?zG{?QR?Q?GzH?(\)?Q?Gz? =p ?p =q?ə?QR?zG{? =p ?ə?(\)?zG{?p =q@\(\?Gz?zG{?ə?Q?QR??zG{?(\)?Gz?׮zG?zG{??\(?333333? =p ?zG{?333333?zG{?zG{?QR?޸Q?Q?ə?(\)??QR?ٙ?333333? =p?ҏ\(? =p ? =p ??ҏ\(?Gz?\(?(\)?(\)??=p =?\(?(\)?У =p?\(?Q?333333?333333? =p ?Q? =p ?ҏ\(?ə? =p?QR?Gz?(\)? =p ?(\)?p =q?zG{?θQ?Gz?ə???Q?zG{?Q?p =q? =p ?QR?\(?ə? =p?p =q?Gz?zG{? =p ? =p ?=p =?(\)?333333?p =q??333333?333333?p =q??p =q?Gz?p =q?Q?p =q? =p ?Q?ҏ\(? =p ?ə?Q? =p?Gz?\(?Gz?ə?\(?Q?(\)?zG?\(?Gz?\(? =p?Q?Gz?θQ?\(?p =q?(\)?zG{?Q?ffffff?Gz?Gz?p =q?Gz?zG{?QR?p =q??p =q?Q?θQ?Q?\(?Q?\(?Gz?Gz?陙?Gz?θQ??(\)?Q?Q?Q?\(?p =q?zG{???ٙ? =p?\(?GzH?\(?zG{?333333??zG{? =p?(\)?GzH?QR?\(?Gz?θQ??Gz?\(?GzH?ҏ\(?zG{?333333?? =p?\(?(\)? =p ?QR?Gz?Gz?QR??Q?GzH?Q?zG{? =p ?Q?У =p? =p ?\(?Q?ə?Q?θQ?\(?Gz? =p?ۅQ?θQ?Gz?\(?\(?У =p? =p ?θQ?Q?\(??ٙ?Q?У =p?ffffff?У =p??zG{?(\)?zG{?Q?Q?Q?Q?ҏ\(?θQ?zG{?? =p?zG{??У =p? =p?У =p?333333?ٙ?zG?333333?QR??p =q?QR?У =p?У =p?zG{?Q? =p ??Gz?ҏ\(? =p ??zG{??ffffff?p =q?=p =?GzH?zG{? =p?333333?(\)??Q? =p?(\)? =p ?QR?\(?(\)? =p ?\(?Q?θQ?p =q?У =p? =p?zG{?p =q?\(?zG{??QR?Q?\(?޸Q?У =p?QR? =p ?(\)? =p ? =p ?zG{?Q?陙?Q?333333?\(?GzH?ə?У =p?\(? =p ?333333?ə?333333?(\)?(\)?Q?zG{?GzH?333333?Q?333333?\(? =p ?(\)?p =q? =p ? =p?GzH?(\)?(\)?Gz? =p?p =q?(\)?Gz?333333?\(? =p ?\(? =p?ffffff?p =q?θQ?zG{? =p ?Q? =p?(\)?ə?Gz?\(?=p =?Gz?θQ? =p ?zG{?(\)?zG{?ۅQ?p =q?ə?Gz?333333?Q?Q?zG{?zG{??GzH?У =p? =p?zG{?Q?Gz?Q?333333?޸Q?\(?QR?Q? =p???ə??Q??p =q?(\)?ə?? =p ?QR??Q?\(?ə?(\)?QR??QR?zG{?ٙ?zG{?Q?QR?p =q?Q?\(?? =p?Gz?QR?(\)?333333? =p ?Q?QR??ۅQ?Gz?QR?=p =?Gz?Q? =p ?zG{? =p?333333?p =q?Gz?Gz?Gz?Q  levels 0 1 class factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor names y a tb tf A r2 r3 r4 r5 r6 r7 r8 r9 t01 t02 t03 t04 t05 t06 t07 t08 t09 t10 t11 t12 t13 t14 row.names  data.frame robustbase/inst/NEWS.Rd0000644000176200001440000005344314555212477014470 0ustar liggesusers% Check from R: % news(db = tools:::.build_news_db_from_package_NEWS_Rd("~/R/Pkgs/robustbase/inst/NEWS.Rd")) \name{NEWS} \title{News for \R Package \pkg{robustbase}} \encoding{UTF-8} % --- 1.0-0 once we fix adjOutlyingness() properly ! % ===== \section{CHANGES in robustbase VERSION 0.99-2 (2023-01-27, svn r9..)}{ \subsection{BUG FIXES}{ \itemize{ \item fix thinko bug introduced exact fit checking in 0.99-0; thanks to the report by Thomas Mang. } } } \section{CHANGES in robustbase VERSION 0.99-1 (2023-11-28, svn r988)}{ \subsection{Misc}{ \itemize{ \item document the \code{...} argument(s) of \code{nlrob.control()}. \item \file{DESCRIPTION}: more URLSs. \item \file{*.Rd} tweaks; avoiding other \verb{NOTE}s. \item \file{src/lmrob.c,mc.c,monitor.c}: format \%lld etc } } } \section{CHANGES in robustbase VERSION 0.99-0 (2023-06-12, svn r928)}{ \subsection{NEW FEATURES}{ \itemize{ \item In case it finds an \emph{exact fit}, i.e., \eqn{k > n/2} residuals are (practically) zero, \code{lmrob.S()} now returns that, \code{scale = 0}, and robustness weights \code{weights(., "robustness")} which are either 0 or 1 quickly and more consistently. This is \emph{not yet} satisfactorily working for the \dQuote{large n} case. **** \bold{TODO}: Optionally (but off by default?? for back compatibility??), \code{lmrob()} initialized by such an exact-fit estimator may use a (user-specified or automatic) scale \eqn{\hat{s} > 0}{s^ > 0} to continue providing efficient SM / MM estimates. \item \code{lmrob.control()} gets class \code{"lmrobCtrl"} which can be \code{print()}ed and \code{update()}d. \item New tuning const \code{zero.scale = 1e-10} instead of hardwired \code{EPS_ZERO} in \code{lmrob.S()}'s C code; additionally the exact fit / zero-residuals checking is finally y-scale equivariant, using \eqn{1/n ||y||_1 = } \code{mean(abs(y))}. } } \subsection{BUG FIXES}{ \itemize{ \item Fixed \code{covMcd()}: corrected the consistency correction factor for the reweighted estimate: was MCDCons(p, sum(w)/n), now is MCDCons(p, 0.975), see Croux and Haesbroeck (1999). Consequently, often \code{covMcd()} and estimates based on it, such as \code{BYlogreg()} or \code{ltsReg()} are slightly changed. % 3 of our ex. checks broke \item registered \code{residuals.lmrob.S} \emph{and} changed \code{formals} compatible with generic to \code{(object, ...)}. \item \code{lmrob.S()} in C level \code{refine_fast_s()} no longer modifies \code{beta_cand[]} which notably fixes a bug found in exact-fit examples previously resulting in \code{coef() == 0} \dQuote{results}, also for \code{lmrob()} which calls \code{lmrob.S()} for its default initial estimate. \item \code{print()} finally works again as intended (\emph{not} printing a largish list); mostly by fixing internal \code{lmrob.control.minimal()}. \item \code{summary.lmrob(obj)} now also gets a \code{weights} component from \code{obj} if that is non-trivial; this may also correct \code{summary()} output in weighted cases. \item Minor fixes in \code{plot.lts()} related to par mfrow. \item ltsReg() Help file changed - the note strongly advising NOT to use LTS regression removed. } } \subsection{Misc}{ \itemize{ \item argument \code{mf} e.g., in \code{lmrob.fit()} has been deprecated since 2017-12-09 and is defunct now. \item \code{lmrob()} tweaks, notably saying more for \code{trace.lev >= 3}. \item replaceed (almost) all \code{Calloc()} by \code{R_alloc()} in C code; e.g., prevents leakage from user interrupt. } } } \section{CHANGES in robustbase VERSION 0.95-1 (2023-03-28, svn r903)}{ \subsection{BUG FIXES}{ \itemize{ \item Fixed \code{covMcd()}: \code{raw.only=TRUE} does not work in the case \code{nsamp="deterministic"} } } \subsection{NEW FEATURES}{ \itemize{ \item New \code{lmc()} and \code{rmc()} for robust tail weight (\dQuote{robust kurtosis}). % ../man/lmc-rmc.Rd } } \subsection{Misc}{ \itemize{ \item Our \file{tests/*.R} no longer rely on the \CRANpkg{Matrix} \file{test-tools.R} collection but rather use our much smaller own one in \file{xtraR/test-tools.R}. \item fixed \code{\\} escape in \file{man/aircraft.Rd}. \item initialize \var{equed} in \file{src/lmrob.c} to avoid false positive compiler warning. \item document "internal" \code{.regularize.Mpsi()}. \item replace \code{()} by \code{(void)} arguments for \code{F77_SUB(println)}. } } } \section{CHANGES in robustbase VERSION 0.95-0 (2022-04-02, svn r894)}{ \subsection{BUG FIXES}{ \itemize{ \item \code{huberM(x, k)} now also works in small sample cases with \code{+/-Inf}, e.g. for \code{x = c(-Inf, 1)}. } } \subsection{NEW FEATURES}{ \itemize{ \item New \code{huberize()} utility, notably by default used in \code{mc()}. \item \code{mc()} now by default uses \code{doScale=FALSE} in order to guarantee convergence also in extreme cases when used \emph{together} with new \code{c.huberize = 1e11}. Both are \emph{not} back compatible but clearly better in extreme cases, as assessed by Lukas Graz' BSc thesis. Note that the behavior of the new default \code{doScale=FALSE} has been amended in the C code to be more compatible in its \code{eps} checks with (the previous default) \code{doScale=TRUE}. Lastly, a \code{message()} is printed currently, at most once per \R session, whenever \code{mc()} is called without an explicit specification of \code{doScale=*}, alerting users to the changed default. This message can be turned off by setting \code{options(mc_doScale_quiet = TRUE)}. \item New dataset \code{data(x30o50)} as an example of potentially problematic data, not just for \code{mc()}. } } \subsection{Misc}{ \itemize{ \item rather "truncating" \eqn{+/-}\code{Inf} to , use \code{NA.OK = TRUE} in \code{Qn()} and \code{Sn()}'s \code{.C()} call. \item Our C code no longer uses the \code{DOUBLE_*} constants from S, but rather standard \code{}. } } } \section{CHANGES in robustbase VERSION 0.93-9 (2021-09-27, svn r888)}{ \subsection{BUG FIXES}{ \itemize{ \item \code{Qn(x)} and \code{Sn(x)} gain an \code{na.rm = FALSE} option and now work when \code{x} has \code{NA}s and contains \eqn{\pm}{+/-} \code{Inf}. } } \subsection{Misc}{ \itemize{ \item No longer include \file{Rdefines.h} as it is somewhat deprecated. \item In \file{src/lmrob.c} add \code{USE_FC_LEN_T} and use \code{FCONE} rather than the older FCLEN approach. } } } \section{CHANGES in robustbase VERSION 0.93-8 (2021-06-01, svn r879)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{scaleTau2()} gets new optional \code{iter = 1} and \code{tol.iter} arguments; mostly experimentally to see if or when iteration makes sense. \item \code{Qn(x, *)} gets new optional \code{k = .} to indicate the \dQuote{quantile} i.e., order statistic to be computed (with default as previously hard-coded).\cr Experimentally to try for cases where more than n/2 observations coincide (with the median), i.e., \code{x[i] == x0 == median(x[])}, and hence \code{Qn(x)} and \code{mad(x)} are zero. \item \code{adjOutlyingness()} gets new option \code{IQRtype = 7}. } } \subsection{Tweaks}{ \itemize{ \item For tests: *again* differences found in the non-sensical \code{adjOutlyingness()} example (with large p/n, hence many "random" values in the order of 1e15). Disable the test for now (and record the result in *.Rout). } } \subsection{BUG FIXES}{ \itemize{ \item The \code{test()} utility in \file{tests/lmrob-ex12.R} no longer calls \code{matrix(x, n,4)} where the length of x does not match \code{4n}. Similar change in \file{tests/mc-strict.R} } } } \section{CHANGES in robustbase VERSION 0.93-7 (2021-01-03, svn r865)}{ \subsection{NEW FEATURES}{ \itemize{ \item Use \command{\\CRANpkg\{.\}} in most places, providing web links to the respective CRAN package page. \item \code{adjOutlyingness()} now gains optional parameters to be passed to \code{mc()}. } } \subsection{BUG FIXES}{ \itemize{ \item update the internal man page, so new \code{checkRdContents()} is happy. \item fix several \samp{\\url{.}}'s that now are diagnosed as \sQuote{moved}. \item \code{adjOutlyingness()} finally works with \code{p.samp > p}. \item \code{scaleTau2()} now works with \code{Inf} and very large values, and obeys new \code{na.rm = FALSE} argument. \item add \code{check.environment=FALSE} to some of the \code{all.equal()} calls (for 'R-devel', i.e., future R 4.1.x). \item \code{wgt.himedian(numeric())} now returns \code{NA} instead of occasionally seg.faulting or inf.looping. Ditto for a case when called from \code{Qn()}. } } } \section{CHANGES in robustbase VERSION 0.93-6 (2020-03-20, svn r854)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{splitFrame()} now treats \code{character} columns also as categorical (the same as \code{factor}s). } } \subsection{Tweaks}{ \itemize{ \item Small updates, also in checks for newer compiler settings, e.g., \code{FCLEN} macro; also F77_*() etc, in order to fix 'LTO' issues. \item More careful or \emph{less} calling \code{intpr()}: correct "Rank" of array (for gfortran/gcc 10, when \code{-fallow-argument-mismatch} is not set). } } } \section{CHANGES in robustbase VERSION 0.93-5 (2019-05-09, svn r840)}{ \subsection{Tweaks}{ \itemize{ \item For tests: try harder in \code{moreSessionInfo()} to detect openBLAS (should work for KH's debian openBLAS setup). } } } \section{CHANGES in robustbase VERSION 0.93-4 (2019-03-18, svn r839)}{ \subsection{BUG FIXES}{ \itemize{ \item \env{R_CHECK_LENGTH_1*} fix in default value for \code{ask} in plot methods for \code{mcd} and \code{lts} \item \file{src/rowMedians.c} gets better PROTECT()ion thanks to Tomas Kalibera's "rchk"s. } } \subsection{Tweaks}{ \itemize{ \item losen the strict tests more, convergence issues on all non-standard BLAS platforms. Now modularized the partly platform dependent testing into \code{moreSessionInfo()} in \file{./xtraR/platform-sessionInfo.R}. Further, for now use \code{RNGversion("3.5.0")} e.g. in \file{../tests/mc-strict.R}. } } } \section{CHANGES in robustbase VERSION 0.93-3 (2018-09-06, svn r827)}{ \subsection{Tweaks}{ \itemize{ \item losen the strict tests, notably in \file{tests/nlregrob-tst.R} to work around \code{nls()} convergence issues on all non-standard BLAS platforms. } } } \section{CHANGES in robustbase VERSION 0.93-2 (2018-07-26, svn r825)}{ \subsection{BUG FIXES}{ \itemize{ \item Update \code{lmrob()} to the change in R (incl 3.5.1 patched). \item \code{mc()} gets new optional \code{doScale} argument, and \emph{increased} defaults for the tolerances \preformatted{ eps1 = 1e-14, eps2 = 1e-15} such that it should converge by default in more cases. \item A \code{na.action} is now kept in \code{summary(lmrob(*))}, and when \code{print()}ing it, a note about omitted observations, e.g., because of \code{NA}'s, is made as for \code{lm()}. \item Internal \code{lmrob.weights()}: more "resistant" in case scale=0, using na.rm=TRUE (report only, no reprex). \item \code{lmrob(*, trace.lev >= 2)} now shows some information about the number of \code{find_scale()} iterations used (as these are now stored C internally).% TODO: return the maximum used to R? } } } \section{CHANGES in robustbase VERSION 0.93-1-1 (2018-07-18, svn r818)}{ \subsection{BUG FIXES}{ \itemize{ \item \file{src/robustbase.h}: \code{is_redescender} now is \code{static inline}, needed for some compilers, e.g., on ubuntu 18.04. Fixing R-forge bug(s) 6588 (and 6590, 6593), \url{https://r-forge.r-project.org/tracker/index.php?func=detail&aid=6588&group_id=59&atid=302} } } } \section{CHANGES in robustbase VERSION 0.93-1 (2018-06-20, svn r815)}{ \subsection{NEW FEATURES}{ \itemize{ \item The \sQuote{Usage:}s in the data set help pages now say \code{data(<..>, package="robustbase")}. \item The \file{lmrob_simulation} vignette now should continue to work with upcoming package \CRANpkg{ggplot2}. } } \subsection{BUG FIXES}{ \itemize{ \item \code{Mpsi(x, c, psi="huber", deriv=-1)} now gives rho(x) instead of mostly \code{Inf}. } } } \section{CHANGES in robustbase VERSION 0.93-0 (2018-04-21, svn r805)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{.psi.const(*, "lqq")} now also gives a \code{"constants"} attribute. \item more examples and help on Mpsi() etc functions and tuning constants. \item The S estimator \code{lmrob.S()} and M-S estimator now both make use of the new \code{lmrob.control()} argument \code{scale.tol} which defaults to \code{1e-10}, its formerly hardwired value. \item \code{lmrob.S()} further gets a new option \code{only.scale = FALSE}, which when true allows to only compute the S scale estimate. In that case, but also generally, \code{trace.lev = 3} or larger also produces output showing the C level \code{find_scale()} iterations. \item (By Manuel Koller) There's now a small C API to call our \code{Mpsi()} etc from C code in other packages, as \code{C_psi()}, etc; using new \file{../inst/include/robustbase.h}. \item \code{nlrob()$call$algorithm} now always contains the algorithm used as a \code{\link{character}} string, compatibly with \code{\link{nls}()}. \item new data set \code{steamUse}. %% which also depicts the \dQuote{non-robust} behavior of \code{lmrob(setting="KS2014")} \item Vignette \file{lmrob_simulation.Rnw}: fixed the wrong \dQuote{emprical power} plots; with faster \CRANpkg{ggplot2}, remove all \code{eval=FALSE} for plots and longer store the \file{*.pdf}s. \code{nlrob()} gets \code{model} option to ask for the \code{model.frame} to be returned. } } \subsection{BUG FIXES}{ \itemize{ \item \code{lmrob(..., method = "S")} no longer necessarily produces a warning in \code{.vcov.w()}. \item \code{nlrob()} returns a correct \code{dataClasses} component. } } \subsection{Tests Tweaks}{ \itemize{ \item For use in non-R-internal BLAS/Lapack libraries, several \file{tests/*.R} examples have been tweaked. } } } \section{CHANGES in robustbase VERSION 0.92-8 (2017-10-30, svn r778)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{fullRank()} utility for \code{adjOutlyingness}: \item \code{adjOutlyingness()}, gets new options \code{p.samp} and \code{trace.lev}, and when it fails to find enough good directions, now checks the rank and mentions \code{fullRank()} in case the matrix is (QR-)rank deficient. \item The \code{"lmrob"} method for \code{vcov()} gets optional argument \code{complete = TRUE}, where only \emph{complete = FALSE} is back compatible. \item improved (error) messages in singular case in \code{.vcov.avar1()}. \item \code{.psi.const()} is exported as well, and help is improved about using and setting non-default psi tuning constants. } } \subsection{BUG FIXES}{ \itemize{ \item loosened some regression test tolerances (for alternatives to BLAS) in \file{tests/(mc-strict|poisson-ex)}. \item \code{scaleTau2(x, *)} gains an optional \code{sigma0} argument and now returns zero instead of \code{NaN} when \code{sigma0 == 0}. } } } \section{CHANGES in robustbase VERSION 0.92-7 (2016-11-23, svn r742)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{vignette(psi_functions)} \item \code{adjOutlyingness(..., maxit.mult = max(100, p))} new option, allowing more iterations for finding \dQuote{good} projection directions. } } \subsection{BUG FIXES}{ \itemize{ \item \code{summary(nlrob(*, method = "CM"))} now works. \item \code{lmrob..MM..fit()} now works again when \code{x} and \code{y} are specified, but neither \code{method} nor \code{obj} is. } } } \section{CHANGES in robustbase VERSION 0.92-6 (2016-05-28, svn r717)}{ \subsection{NEW FEATURES}{ \itemize{ \item Now provide an \code{"lmrob"} method for the standard \R generic function \code{hatvalues()}, and also export its lower level workhorse \code{.lmrob.hat()} (formerly hidden \code{lmrob.leverages()}), which now by default has \code{names(.)}. } } \subsection{BUG FIXES}{ \itemize{ \item \code{.lmrob.hat()} (formerly \code{lmrob.leverages()}) has been corrected for the rank-deficient case. \item \code{classPC(m)} now also works for a 1-column matrix. } } } \section{CHANGES in robustbase VERSION 0.92-5 (2015-07-21, svn r698)}{ \subsection{NEW FEATURES}{ \itemize{ \item \emph{Hidden} \code{print()} methods \code{print.summary.lmrob()} and \code{print.lmrob.S()} get a \code{showAlgo = TRUE} argument which the user can set to FALSE in order to suppress printing of the \dQuote{Algorithmic parameters}. \item import (remaining parts) from "base" packages. \item \code{summary()} now also prints a summary on the residuals. } } \subsection{BUG FIXES}{ \itemize{ \item \code{summary(lmrob(.))}'s variance-covariance matrix is now called \code{cov} instead of \code{cov.unscaled} (because it \emph{is} scaled). Code which has been using \code{vcov()} or \code{ $ cov}, or even \code{ $ cov} is not affected. } } } \section{CHANGES in robustbase VERSION 0.92-4 (2015-06-04, svn r696)}{ \subsection{NEW FEATURES}{ \itemize{ \item Started this \file{NEWS.Rd} file, to eventually replace the \file{ChangeLog} \item \code{plot.lmrob()} also identifies largest residuals as \code{plot.lm()}. Also gets new argument \code{panel}, and \code{add.smooth=TRUE} behavior. \item adapt to the fact that R 3.3.0 will have its own \code{sigma()} S3 generic. \item setup for having message translations (volunteers sought!). } } \subsection{BUG FIXES}{ \itemize{ \item more careful in \file{../src/mc.c} (valgrind, thanks to Brian) \item add missing documentation, better examples for \code{predict.lmrob} \item \code{warn.limit.*} checks in \code{lmrob*()} } } } \section{CHANGES in robustbase VERSION 0.92-3 (2015-01-14, svn r679)}{ \subsection{NEW FEATURES}{ \itemize{ \item The \sQuote{Co-Median} \code{covComed()} from Maria Anna, tweaked by Valentin and modified considerably by Martin. \item Also document (and export) \code{r6pack()} utility. \item New \code{smoothWgt()} function --- \dQuote{Biweight on a Stick} --- to be used as \code{wgtFUN} option for \code{covMcd()} or \code{covComed()}. \item New utility \code{colMedians()} and \code{rowMedians}, as we use columnwise medians in so many places. } } \subsection{BUG FIXES}{ \itemize{ \item Tweaks to \code{medcouple()}, after detecting flaws -- which may be inherent and have \emph{not} been removed. \item Improved its documentation and the \code{adjOutlyingness()} one, notably its \dQuote{central} case. } } } \section{CHANGES in robustbase VERSION 0.92-2 (2014-11-22, svn r660)}{ \subsection{BUG FIXES}{ \itemize{ \item \code{covMcd()} with new options (\code{kmini}, \code{nmini}) now ok (sometimes wrong in 0.92-1). } } } \section{CHANGES in robustbase VERSION 0.92-1 (2014-11-18)}{ \subsection{NEW FEATURES}{ \itemize{ \item The deterministic MCD, via \code{covMcd(..., nsamp="deterministic")}. } } \subsection{BUG FIXES}{ \itemize{ \item \code{adjOutlyingness()}: reverse the defaults of \code{clower} and \code{cupper} and fix an \dQuote{eternal} erronous \eqn{\pm}{+/-} swap; see new note in \file{man/adjOutlyingness.Rd}. } } } \section{CHANGES in robustbase VERSION 0.92-0 (2014-11-18)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{nlrob()} now works with indexed vector parameters. \item new \code{outlierStats()} (Manuel). } } \subsection{BUG FIXES}{ \itemize{ \item got rid of Fortran compiler warnings about \bold{ancient} style. \item \code{nlrob(*, weigths)}, fixing R-forge bug #5988. \item \code{covMcd()} fix for \dQuote{MAD = 0} case (new \code{exactfit} code 3). } } } \section{CHANGES in robustbase VERSION 0.91-1 (2014-05-01)}{ \subsection{BUG FIXES}{ \itemize{ \item . } } } \section{CHANGES in robustbase VERSION 0.91-0 (2014-04-24)}{ \subsection{NEW FEATURES}{ \itemize{ \item . } } \subsection{BUG FIXES}{ \itemize{ \item . } } } \section{CHANGES in robustbase VERSION 0.90-1 (2014-01-30)}{ \subsection{NEW FEATURES}{ \itemize{ \item . } } \subsection{BUG FIXES}{ \itemize{ \item . } } } \section{CHANGES in robustbase VERSION 0.9-8 (2013-06-14)}{ \subsection{NEW FEATURES}{ \itemize{ \item . } } \subsection{BUG FIXES}{ \itemize{ \item . } } } robustbase/inst/po/0000755000176200001440000000000014555212560014023 5ustar liggesusersrobustbase/inst/po/de/0000755000176200001440000000000014555212560014413 5ustar liggesusersrobustbase/inst/po/de/LC_MESSAGES/0000755000176200001440000000000014555212560016200 5ustar liggesusersrobustbase/inst/po/de/LC_MESSAGES/robustbase.mo0000644000176200001440000000470714441677743020732 0ustar liggesusers\G(/4X-17*-bM0$4 T u%#(O)y<=! 3/#c;3-^%5(#$ $, #Q )u '    Skipping design matrix equilibration (DGEEQU): row %i is exactly zero.'deriv'=%d is invalidArgument '%s' must be numeric or integerArgument '%s' must be numeric or integer of length 1Argument 'cc' must be numericArgument 'naRm' must be either TRUE or FALSE.Argument 'x' must be a matrix.Argument 'x' must be numeric (integer or double).DGEEQU: column %i of the design matrix is exactly zero.Error from Rdqags(psi_ggw*, k, ...): ier = %iFast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'.m_s_subsample() stopped prematurely (scale < 0).normcnst(): ipsi=%d not implemented.psi(): ipsi=%d not implemented.psi2(): ipsi=%d not implemented.psip(): ipsi=%d not implemented.rho(): ipsi=%d not implemented.rho_ggw(): case (%i) not implemented.rho_inf(): ipsi=%d not implemented.Project-Id-Version: robustbase 0.93-9 Report-Msgid-Bugs-To: PO-Revision-Date: 2023-06-09 09:18+0200 Last-Translator: Martin Maechler Language-Team: not-existing Language: de MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Äquilibrierung der Designmatrix weggelassen (DGEEQU): Zeile %i ist exakt Null.'deriv'=%d ist ungültigArgument '%s' muss numerisch ('numeric' oder 'integer') seinArgument '%s' muss numerisch oder Ganzzahle der Länge 1 seinArgument 'cc' muss numerisch seinArgument 'naRm' muss entweder TRUE oder FALSE sein.Argument 'x' muss eine Matrix sein.Argument 'x' muss numerisch ('integer' oder 'double') sein.DGEEQU: Spalte %i der Design Matrix ist exact Null.Fehler von Rdqags(psi_ggw*, k, ...): ier = %iDie 'Fast S large n' Strategie hat versagt. Verwende Kontrollparameter 'fast.s.large.n = Inf'.m_s_subsample() hat vorzeitig angehalten (scale < 0).normcnst(): ipsi=%d nicht implementiert.psi(): ipsi=%d nicht implementiert.psi2(): ipsi=%d nicht implementiert.psip(): ipsi=%d nicht implementiert.rho(): ipsi=%d nicht implementiert.rho_ggw(): Fall (%i) nicht implementiert.rho_inf(): ipsi=%d nicht implementiert.robustbase/inst/po/de/LC_MESSAGES/R-robustbase.mo0000644000176200001440000000630714441677743021127 0ustar liggesusers%p"q9 %;XDm%<6 LY/u/3086i+<8#4\&">*i("G*' @R D ! = AE  ? 5 F: , 5 R A7 2y      'X' must have at least two columns'coef' must not be negative'formula' missing or incorrect'start' must be an initial estimate of beta, of length %d0s in V(mu)All observations have missing values!All weights must be positiveConvergence AchievedCurrently, only family 'poisson' is supported for the "MT" estimatorImplosion: sigma1=%g became too smallMore dimensions than observations, currently not implementedMore dimensions than observations: not yet implementedNAs in V(mu)No convergence in %d steps.Not the same method used for fitting the modelsNot the same response used in the fitted modelsUse fullRank(x) insteadinvalid first argumentmaximal number of *inner* step halvings must be > 0maximum number of "kstep" iterations must be > 0models were not all fitted to the same size of datasetno intercept in the modelnon-trivial 'offset' is not yet implementednon-trivial prior 'weights' are not yet implemented for "BY"some terms will have NAs due to the limits of the methodvalue of the tuning constant c ('const') must be > 0y is not onedimensionalProject-Id-Version: robustbase 0.93-9 PO-Revision-Date: 2023-06-09 09:16+0200 Last-Translator: Martin Maechler Language-Team: not-existing Language: de MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit 'X' muss mindestens zwei Spalten haben'coef' darf nicht negativ sein'formula' fehlt oder ist ungültig'start' muss eine Startschätzung sein von beta, der Länge %dNullen ('0') in V(mu)Alle Beobachtungen haben fehlende Werte!Alle Gewichte müssen positiv seinKonvergenz erreichtMomentan ist nur die 'poisson' Familie unterstützt beim "MT" SchätzerImplosion: sigma1=%g ist zu klein gewordenMehr Dimensionen als Beobachtungen; momentan nicht implementiertMehr Dimensionen als Beobachtungen; das ist noch nicht implementiertNAs in V(mu)Keine Konvergenz in %d Schritten.Nicht die gleiche Methode verwendet um die Modelle anzupassenNicht dieselbe Zielvariable verwendet in den angepassten ModellenVerwende stattdessen fullRank(x)ungültiges erstes ArgumentMaximale Anzahl von *inneren* Schritthalbierungen muss > 0 seinMaximale Anzahl von "kstep" Iterationen muss > 0 seinModelle wurden nicht alle auf die gleich grossen Datensätze angepasstKein Achsenabschnitt ('intercept') im Modellnicht-trivialer 'offset' ist noch nicht implementiertnicht-triviale apriori Gewichte ('weights') sind noch nicht implementiert for "BY"Einige Terme werden NAs enthalten wegen Beschränkung der MethodeDie Tuning Konstante c ('const') muss positiv seiny ist nicht eindimensionalrobustbase/inst/po/en@quot/0000755000176200001440000000000014555212560015436 5ustar liggesusersrobustbase/inst/po/en@quot/LC_MESSAGES/0000755000176200001440000000000014555212560017223 5ustar liggesusersrobustbase/inst/po/en@quot/LC_MESSAGES/robustbase.mo0000644000176200001440000001042214441677743021744 0ustar liggesusers!$/,:G$l(4-L1k7;0{O-MGGj-U(9~>0$(M m %#3 4M : G  , 8L ! # 1 " 5 7V  ;   - Q GjP-Y9C>}0$ 2 St%#3      ! M-S estimate: maximum number of refinement steps reached. Skipping design matrix equilibration (DGEEQU): row %i is exactly zero.'deriv'=%d is invalidArgument '%s' must be numeric or integerArgument '%s' must be numeric or integer of length 1Argument 'cc' must be numericArgument 'ipsi' must be integerArgument 'naRm' must be either TRUE or FALSE.Argument 'x' must be a matrix.Argument 'x' must be numeric (integer or double).DGEEQU: column %i of the design matrix is exactly zero.DGEEQU: illegal %i-th argumentDGELS could not determine optimal block size, using minimumDGELS: illegal %i-th argument.DGELS: weighted design matrix not of full rank (column %d). Use control parameter 'trace.lev = 4' to get diagnostic output.Error from Rdqags(psi_ggw*, k, ...): ier = %iFast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'.S refinements did not converge (to refine.tol=%g) in %d (= k.max) stepsToo many singular resamples. Aborting subsample(). See parameter 'subsampling; in help of lmrob.config().fast_s_with_memory() returned invalid code %dfind_scale() did not converge in '%s' (= %d) iterations with tol=%g, last rel.diff=%gfind_scale(*, initial_scale = %g <= 0) -> final scale = 0m_s_descent(): Problem in RLLARSBI (RILARS). KODE=%d. Exiting.m_s_subsample() stopped prematurely (scale < 0).normcnst(): ipsi=%d not implemented.psi(): ipsi=%d not implemented.psi2(): ipsi=%d not implemented.psip(): ipsi=%d not implemented.rho(): ipsi=%d not implemented.rho_ggw(): case (%i) not implemented.rho_inf(): ipsi=%d not implemented.subsample(): could not find non-singular subsample.Project-Id-Version: robustbase 0.99-0 Report-Msgid-Bugs-To: PO-Revision-Date: 2023-06-09 09:18+0200 Last-Translator: Automatically generated Language-Team: none Language: en MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=2; plural=(n != 1); M-S estimate: maximum number of refinement steps reached. Skipping design matrix equilibration (DGEEQU): row %i is exactly zero.‘deriv’=%d is invalidArgument ‘%s’ must be numeric or integerArgument ‘%s’ must be numeric or integer of length 1Argument ‘cc’ must be numericArgument ‘ipsi’ must be integerArgument ‘naRm’ must be either TRUE or FALSE.Argument ‘x’ must be a matrix.Argument ‘x’ must be numeric (integer or double).DGEEQU: column %i of the design matrix is exactly zero.DGEEQU: illegal %i-th argumentDGELS could not determine optimal block size, using minimumDGELS: illegal %i-th argument.DGELS: weighted design matrix not of full rank (column %d). Use control parameter ‘trace.lev = 4’ to get diagnostic output.Error from Rdqags(psi_ggw*, k, ...): ier = %iFast S large n strategy failed. Use control parameter ‘fast.s.large.n = Inf’.S refinements did not converge (to refine.tol=%g) in %d (= k.max) stepsToo many singular resamples. Aborting subsample(). See parameter 'subsampling; in help of lmrob.config().fast_s_with_memory() returned invalid code %dfind_scale() did not converge in ‘%s’ (= %d) iterations with tol=%g, last rel.diff=%gfind_scale(*, initial_scale = %g <= 0) -> final scale = 0m_s_descent(): Problem in RLLARSBI (RILARS). KODE=%d. Exiting.m_s_subsample() stopped prematurely (scale < 0).normcnst(): ipsi=%d not implemented.psi(): ipsi=%d not implemented.psi2(): ipsi=%d not implemented.psip(): ipsi=%d not implemented.rho(): ipsi=%d not implemented.rho_ggw(): case (%i) not implemented.rho_inf(): ipsi=%d not implemented.subsample(): could not find non-singular subsample.robustbase/inst/po/en@quot/LC_MESSAGES/R-robustbase.mo0000644000176200001440000011245214441677743022151 0ustar liggesusersl| xy{X;!#(+<T@6) 3 4F ;{ "  A A8!z!!!;!7"C?"G"("" #!)#AK#(###6#&$)8$/b$*$.$=$9*%Jd%(%"%%1&&L&(s&C&6&B'5Z'''&'/'(-(0(W3(((( (((((O(./)N^) )%)%)#*1)*+[*&*8*"* +(+.+C+.^+%++&+S+GK,O,),0 -->-l---*--D-78.$p.N. .9/7?/Nw/6/e/Bc0>030%1A?1?11019 2 E2f2:22U2 3*?3/j3?3333<46T44474)4/"5=R5 5575B5B,6o6]6$6/7/>76n7+7H718FL8%8878'9#(9$L97q9M949<,:;i:I:A:81;'j;A;";4;3,<.`<G<<<*=1=L=2^=F=Q=*><B>>>>>>7?,9?=f?,????.?,+@X@k@@|@9@ @AA)A,BA+oA A AA$A A B&BBB[BdBzB?B0B# C/C6C ICVCvCCKC0CD2DMDPD)hDMD[D G!G+G2 H9@H"zH+H H.H3I07I(hIIIAI'J6)J`J/rJJ*J>J!,K5NKKK;KKEK#DLhL+L<LL MM*M%>M0dMAMFM N9?NyN8NNN=N-$O#ROJvOO"O%O(P:P6WPPP*P( Q+3Q+_Q!QNQQ, R8RARZR8xR4R*R0SBS)HSrSS9S8S#TL'TDtT)T%T3 U3=U)qUU%UU-U5"V.XVVV4V/V0 W:W5?W*uW'W WWWXXYYX/Y;YYY,Y@YDd@dQdbdOvd.dRd He%Te%ze#e1e+e&"f8If"fffff.f%(gNg&kg[gGgO6h)h0h-hi&i7i*ViiHi7i$jNqkekJlFal7l%lAmCHmm4m=m n9n:YnnUn n.o3Ao?uoooo<o6/p4fp7p)p/pA-q oq|q7qBqB rNr]jr$r/r/s6Ms+sHs5sN/t%~tt7t/t'u$Cu7huMu4u<#v;`vIvEv@,w'mwIw"w8x3;x.oxGxxy*y@y_y:qyFyQyEz@]z!zzz{{7,{,d{E{4{ |||27|0j|||L|= } J}T}d}|},}+} } }~$~ ?~`~y~~~~#~C~05#f K4Bw)πQ[K ,߁/E[7v3:8K^uă ʃIՃB!b+29"+@ l.w30څ( 4NEf'6Ԇ /M*mF!߇57HC_I#+/H@xՉ% 0/A`N 9L8fA-#)JM")Œ,66Pm*(/3B!vN0'0I8g82ُ4 A)Gq=<͐' X2D)Б)3$3X)#)ڒ-5E.{8Ǔ/00a5f*'ǔ (`>l4 8G"_(ub^V+N?B5=" jazI*Fo>GO q4X/,:vT! $YA 06W]e@!HbL',S.K85C?<9`aQQ1 ;D2-d0<Sy\\jLhh^}(@;&J|gEk2P-Z DMU]UAYc#*%X+&7d1p~T.RtFCfksxZ3#BiI'O97) w$ ):leJ  WV[nM{ /N3H6g_ifEPK[m=c%Rr!! Using default."boot" method not yet implemented for "nlrob" objects. Use confint(*, method = "Wald").%s-step did NOT converge. Returning unconverged %s-estimate'' are ('%s' is not a valid family (see ?family)'%s' must be a function, numeric vector of length p, or NULL'%s' must be a number or a function of %s which returns a number'%s' must be character string or function, but is "%s"', 'parm' must be specified as an integer'. Using defaults.'Anova Table' for a single model not yet implemented'Anova Table' for a single model object not yet implemented'X' must have at least two columns'coef' must not be negative'control$n.group' is not much larger than 'p', probably too small'control$n.group' must be larger than 'p' for 'large_n' algorithm'control' is missing'cov' must be a p x p matrix'formula' missing or incorrect'formula' should be a formula of the type 'y ~ f(x, alpha)''full.h' is true, but 'hsets.init' has less than n rows'groups * n.group' must be smaller than 'n' for 'large_n' algorithm'hsets.init' must be a h' x L matrix (h' >= h) of observation indices'hsets.init' must be in {1,2,...,n}; n ='id.n' must be in {1,..,'id.n' must be in {1,..,%d}'nsamp = "best"' allows maximally'nsamp' options 'best' and 'exact' not allowed for n greater than'offset' is not yet implemented for "BY"'offset' not fully implemented'psi' should be one of %s'qr.out = TRUE' for univariate location is disregarded'reflect part' in'scale' must be NULL or a positive number'shortMain' is deprecated and will get defunct.'start' cannot yet be passed to glmrobBY()'start' must be a named list or numeric vector'start' must be a numeric vector, NULL, or a character string'start' must be an initial estimate of beta, of length %d'start' or 'lower' or 'upper' must be fully named (list or numeric vector)'weights' as formula should be one-sided'weights' must be a numeric vector'weights' must be non-negative'weights' must be nonnegative and not contain NAs'weights' must have same length as 'x''weights.on.x = "%s"' is not implemented'weights.on.x' must be a string, function, list or numeric n-vector'wgtFUN' must be a function or a string specifying one'wgtFUN' must be a function or a string specifying such a function'wgtFUN' must be a function or one of the strings %s.'which' must be in 1:5'x' must be a numeric matrix(probably because of rounding errors).(tau / hybrid / tauold): tau not found in 'obj') but should be ()$).** direction sampling iterations were not sufficient. Maybe try increasing 'maxit.mult'*and*,, cc).-estimate in.. Using default.. Please report!. Will use default..vcov.avar1 can only be used when initial estimator is S; using .vcov.w instead.vcov.avar1() supports only SM or MM estimates.vcov.avar1: negative diag() fixed up; consider 'cov=".vcov.w."' instead0s in V(mu):.vcov.w: Caution. Some psi'() are NA:.vcov.w: cov.corrfact must be one of:.vcov.w: cov.dfcorr must be one of:.vcov.w: cov.hubercorr must be logical (or NULL):.vcov.w: cov.xwx must be logical (or NULL):.vcov.w: scale missing, using D scale:.vcov.w: unsupported psi for "hybrid" correction factor:.vcov.w: unsupported psi function; try enlarging eps1, eps2 !?< p ===> scaleTau2(.) = 0Algorithm did not convergeAll models are refitted except the largest oneAll observations have missing values!All weights must be positiveAll weights.on.x must be none negativeAs "sigma" is in 'pnames', do not use it as variable or parameter name in 'formula'Assuming constant prediction variance even though model fit is weightedAssuming prediction variance inversely proportional to weights used for fittingC function R_lmrob_S() exited prematurelyCannot find valid starting values: You need helpCoef. for Hampel psi function not of length 3Coef. for psi functionCoefficients forComputing 5000 subsets of sizeComputing profile confidence intervals ...Convergence AchievedCurrently, only family 'poisson' is supported for the "MT" estimatorData set and provided center have different dimensions!Detected possible local breakdown ofDiagnostic plot is not available for univar iate location and scale estimationDimension {= ncol(x)} must be 2!Do not use '%s' as a variable name or as a parameter nameError: neither breakdown point nor efficiency specifiedFailed to compute robust Mahalanobis distances, reverting to robust leverages.For method 'cubif', use glmRob() from package 'robust'For method = "%s", argument %s is not made use ofFor method = "%s", arguments %s are not made use ofFor method = "%s", currently 'psi' must be specified via 'control'For test = 'Deviance', the estimator chain has to end with 'M'For tolerance ellipses the dimension 'p' must be 2!Implosion: sigma1=%g became too smallIndex plot of standardized residuals is not avalable if scale = 0Initial estimator '%s' not supported; using S-estimator insteadInitial setInitial setsInitialization 'init = "%s"' not supported (yet)Invalid scalefn='%s': must be function or a valid stringInvalid number of trials nsamp =Invalid number of trials nsamp=Invalid number of trials nsamp=%s. Using default nsamp=%d.KS2014Length of initial coefficients vector does not match rank of singular design matrix xM-S estimator did *not* convergeMatrix 'x' is not of full rank: rankM(x) =Maximum 5000 subsets allowed for option 'best'.Maybe use lmrob(*, control=lmrob.control(....) with all these.MethodModels are not nested!Models are not strictly nestedMore dimensions than observations, currently not implementedMore dimensions than observations: not yet implementedMore than h of the observations lie on a hyperplane.More than half of the observations lie on a hyperplane.NA coefficient (at %s) from "best" subsetNA coefs in full and reduced model do not matchNA's present in data; consider using 'na.action = na.exclude'NAs in V(mu)NAs in d(mu)/d(eta)Need more than twice as many observations as variables.No categorical variables found in model. Reverting to S-estimator.No continuous variables found in model. Reverting to L1-estimator.No convergence in %d steps.No weights defined for this object. Use type="robustness" argument to get robustness weights.Non-finite coefficients at iterationNot the same method used for fitting the modelsNot the same response used in the fitted modelsNot the same tuning constant c used in the robust fitsNumber of observations in x and y not equalNumber of offsets is %d, should rather equal %d (number of observations)Only 'test.acc = "coef"' is currently implementedOptions 'best' and 'exact' not allowed for n greater than 2*nmini-1 =Please fit the nested models by lmrobPlease report!Predictions on current data refer to _future_ responsesProvide 'upper' or 'lower' with names()Psi function '%s' not supported yetQn(x) == 0 and tmad(x, trim=%g) == 0Regression Diagnostic plot is not avalable if scale = 0Regression Diagnostic plot is not avalable: option mcd=F was set in ltsReg().Robust GLM fitting not yet implemented for family %sS-estimated scale == 0: Probably exact fit; check your dataSample size n < h(alpha; n,p) := size of "good" subsampleStandardized residuals vs Fitted values plot is not avalable if scale = 0Std.error computation not yet available for the case of 'weights'The 'method' argument is different from 'control$method'The MCD covariance matrix was singular.The argument 'acc' has been renamed to 'tol'; do adapt your code.The covariance matrix is singular!The default of 'doScale' is FALSE now for stability;The first object does not contain the largest modelThe sample size must be greater than 1 for svdThere is at least one constant column. Remove it and set intercept=TRUEThis family is not implementedThis may take aTolerance range must be between 0% to 100%Unexpected 'exactfit' codeUnknown setting 'Use 'main = "short"' instead of 'shortMain = TRUE'Use c(minimal slope, b, efficiency, breakdown point) [2 special cases]Use c(minimal slope, b, efficiency, breakdown point) [6 hard-coded special cases]Use fullRank(x) insteadUse lmrob argument 'setting="KS2014"' to avoid this problem.Use only with 'lmrob' objectsUse only with 'lts' objectsUse only with 'mcd' objectsUsing the former, method =Weighting methodX'WX is almost singular. Consider using cov = ".vcov.w"X'WX is singular. Rather use cov = ".vcov.w"You cannot specify both 'main' and the deprecated 'shortMain'You should use different 'eps' and/or 'eps1'])values^list\(alpha not inside [1/2, 1]anova.glmrob() only works for 'glmrob' objectsanova.lmrob() only works for 'lmrob' objectsare disregarded inare disregarded.argument 'm.cov' must have numeric components 'center' and 'cov'argument 'psi' must be a string (denoting a psi function)argumentsarguments .. inarguments of function 'betaExacto glm(.) error:calculations stopped prematurely in rllarsbicalling predict.lm() ...coefficientcoefficientscontrol is missingcould not compute scale of residualscouldn't find optimizer functioncov.resid must be one ofderiv must be in {-1,0,1,2}deriv must be in {0,1,2}dropped:failed to converge infamily '%s' not yet implementedfinite sample corrections are not corrected for non-default 'k'fitted probabilities numerically 0 or 1 occurredfitted rates numerically 0 occurredfixingfor fitting methodfor method='function incorrectly specified.glm(.) {inner subsample} error:id.nif a list, weights.on.x must contain a covariance function such as covMcd()ignoring cov.resid == 'final' since est != finalillegal 'family' argumentillegal 'singularity$kind'inincompatible dimensionsinit must be "S", "M-S", function or listinitial estim. 'init' not converged -- will be return()ed basically unchangedinitial estimate residuals length differs from final ones. Typically must refit w/ lmrob()instead ofinternal logic error in psi() function name:invalid 'cov.dfcorr':invalid 'init' argumentinvalid 'lmrob' object: no terms componentinvalid 'method':invalid 'posdef.meth':invalid 'psi'=%s; possibly use .regularize.Mpsi(%s)invalid 'seed'. Must be a valid .Random.seed !invalid 'seed'. Must be compatible with .Random.seed !invalid 'start' stringinvalid 'test'invalid 'type'invalid 'variant':invalid first argumentinvalid tuning parameter names:is not implementediteriter2iterationslmrob object does not have a proper 'qr' component. Rank must be zerolmrob object does not have a proper 'qr' component. Rank zero?lmrob..D..fit: control is missinglmrob..D..fit: parameter psi is not definedlmrob..D..fit: parameter tuning.psi is not numericlmrob..D..fit: prior estimator did not converge, stoppinglmrob..D..fit: residuals undefinedlmrob..D..fit: robustness weights undefinedlong time!lower must be either of length %d, or length 1maximal number of *inner* step halvings must be > 0maximum number of "kstep" iterations must be > 0maximum number of iterations must be > 0mc(): not 'converged'method for psi functionmethod='%s' is only applicable for binomial family, but family=""missing or negative weights not allowedmodels were not all fitted to the same size of datasetmust be in {1,..,n < 2 * p, i.e., possibly too small sample sizen <= p -- you can't be serious!n == p+1 is too small sample size for MCDneed 'model' or 'x' component for robust Mahalanobis distancesneed non-negative number of nodesneed non-robust working residuals for this model typenegative eigen([negative scale 's'neither breakdown point 'bp' nor efficiency 'eff' specifiedno intercept in the modelno valid subsample found in LTS - set 'nsamp' or rather use lmrob.S()non-function specified as optimizernon-implemented test method:non-trivial 'offset' is not yet implementednon-trivial prior 'weights' are not yet implemented for "BY"not correctly supported yetnot implementednot of length 1not yet implementednot yet implemented for large vectorsnsamp > i_max := maximal integer -- not allowed;number of offsets is %d, should equal %d (number of observations)number of rows in 'x' and length of 'object$rweights' must be the samenumber of subsamples must be > 0only M and D are steps supported after "init" computationoptim(.) non-convergence:optimizer function must use (at least) formal parametersoptimizer="optimx"optimxoption 'cov.hubercorr' is ignored for cov.corrfact = "asympt"or c(0, a,b,c, max_rho) as from .psi.const(or c(b, c, s) as from .psi.const(original detmcd() wrongly declared c-step convergence (obj=%g, i=%d, j=%d)out ofpackage must be loaded in order toparameter 'tuning.psi' is not numericparameter names must appear in 'formula'parameter psi is not definedprediction from a rank-deficient fit may be misleadingprofile() method not yet implemented for "nlrob" objects. Use method = "Wald".provide better scale; must be all positiverecomputing robust Mahalanobis distancessaving the robust distances 'MD' as part ofscale 's' is zero -- returning initial 'mu'scale MAD is zero for this sampleset options(mc_doScale_quiet=TRUE) to suppress this (once per session) messageset to i_max =setting 'y = FALSE' has no longer any effectsigma0 =singular fit encounteredsome eigenvalues are negativesome terms will have NAs due to the limits of the methodspecifying 'weights' is not yet supported for methodspecifying both 'acc' and 'tol' is invalidstart 'theta' has still NA's .. badly singular xstepssubsample size h < n/2 may be too smallsubsets; computing thesetestthe following arguments to 'anova.glmrob' are invalid andthe following arguments to 'anova.lmrob' are invalid andtuning constant 'cc' is not numerictuning constants for "lqq" psi: both 'eff' and 'bp' specified, ignoring 'bp'tuning constants for ggw psi: both eff and bp specified, ignoring bptuning parameter (chi/psi) is not numerictype 'pearson' is not yet implementedunable to find constants for "ggw" psi function: %sunable to find constants for "lqq" psi function: %sunable to find constants for psi functionunknown 'wgtFUN' specification:unknown setting for 'subsampling': %sunknown split typeunsupported psi function -- should not happenupdate(*, setting = ) is not allowedupper must be either of length %d, or length 1usevalue of acc must be > 0value of the tuning constant c ('const') must be > 0value of the tuning constant c (cw) must be > 0value of the tuning constant c (tcc) must be > 0veryweights should not be both close to 0 and close to 1!weights.on.x needs %d none-negative valuesx is not a numeric dataframe or matrix.x is singulary is not a numericy is not onedimensional}Project-Id-Version: robustbase 0.99-0 PO-Revision-Date: 2023-06-09 09:09 Last-Translator: Automatically generated Language-Team: none Language: en MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=2; plural=(n != 1); !! Using default."boot" method not yet implemented for "nlrob" objects. Use confint(*, method = "Wald").%s-step did NOT converge. Returning unconverged %s-estimate'' are (‘%s’ is not a valid family (see ?family)‘%s’ must be a function, numeric vector of length p, or NULL‘%s’ must be a number or a function of %s which returns a number‘%s’ must be character string or function, but is "%s"‘, ’parm' must be specified as an integer'. Using defaults.‘Anova Table’ for a single model not yet implemented‘Anova Table’ for a single model object not yet implemented‘X’ must have at least two columns‘coef’ must not be negative‘control$n.group’ is not much larger than ‘p’, probably too small‘control$n.group’ must be larger than ‘p’ for ‘large_n’ algorithm‘control’ is missing‘cov’ must be a p x p matrix‘formula’ missing or incorrect‘formula’ should be a formula of the type ‘y ~ f(x, alpha)’‘full.h’ is true, but ‘hsets.init’ has less than n rows‘groups * n.group’ must be smaller than ‘n’ for ‘large_n’ algorithm‘hsets.init’ must be a h' x L matrix (h' >= h) of observation indices‘hsets.init’ must be in {1,2,...,n}; n =‘id.n’ must be in {1,..,‘id.n’ must be in {1,..,%d}‘nsamp = "best"’ allows maximally‘nsamp’ options ‘best’ and ‘exact’ not allowed for n greater than‘offset’ is not yet implemented for "BY"‘offset’ not fully implemented‘psi’ should be one of %s‘qr.out = TRUE’ for univariate location is disregarded‘reflect part’ in‘scale’ must be NULL or a positive number‘shortMain’ is deprecated and will get defunct.‘start’ cannot yet be passed to glmrobBY()‘start’ must be a named list or numeric vector‘start’ must be a numeric vector, NULL, or a character string‘start’ must be an initial estimate of beta, of length %d‘start’ or ‘lower’ or ‘upper’ must be fully named (list or numeric vector)‘weights’ as formula should be one-sided‘weights’ must be a numeric vector‘weights’ must be non-negative‘weights’ must be nonnegative and not contain NAs‘weights’ must have same length as ‘x’‘weights.on.x = "%s"’ is not implemented‘weights.on.x’ must be a string, function, list or numeric n-vector‘wgtFUN’ must be a function or a string specifying one‘wgtFUN’ must be a function or a string specifying such a function‘wgtFUN’ must be a function or one of the strings %s.‘which’ must be in 1:5‘x’ must be a numeric matrix(probably because of rounding errors).(tau / hybrid / tauold): tau not found in ‘obj’) but should be ()$).** direction sampling iterations were not sufficient. Maybe try increasing ‘maxit.mult’*and*,, cc).-estimate in.. Using default.. Please report!. Will use default..vcov.avar1 can only be used when initial estimator is S; using .vcov.w instead.vcov.avar1() supports only SM or MM estimates.vcov.avar1: negative diag() fixed up; consider ‘cov=".vcov.w."’ instead0s in V(mu):.vcov.w: Caution. Some psi'() are NA:.vcov.w: cov.corrfact must be one of:.vcov.w: cov.dfcorr must be one of:.vcov.w: cov.hubercorr must be logical (or NULL):.vcov.w: cov.xwx must be logical (or NULL):.vcov.w: scale missing, using D scale:.vcov.w: unsupported psi for "hybrid" correction factor:.vcov.w: unsupported psi function; try enlarging eps1, eps2 !?< p ===> scaleTau2(.) = 0Algorithm did not convergeAll models are refitted except the largest oneAll observations have missing values!All weights must be positiveAll weights.on.x must be none negativeAs "sigma" is in ‘pnames’, do not use it as variable or parameter name in ‘formula’Assuming constant prediction variance even though model fit is weightedAssuming prediction variance inversely proportional to weights used for fittingC function R_lmrob_S() exited prematurelyCannot find valid starting values: You need helpCoef. for Hampel psi function not of length 3Coef. for psi functionCoefficients forComputing 5000 subsets of sizeComputing profile confidence intervals ...Convergence AchievedCurrently, only family ‘poisson’ is supported for the "MT" estimatorData set and provided center have different dimensions!Detected possible local breakdown ofDiagnostic plot is not available for univar iate location and scale estimationDimension {= ncol(x)} must be 2!Do not use ‘%s’ as a variable name or as a parameter nameError: neither breakdown point nor efficiency specifiedFailed to compute robust Mahalanobis distances, reverting to robust leverages.For method ‘cubif’, use glmRob() from package ‘robust’For method = "%s", argument %s is not made use ofFor method = "%s", arguments %s are not made use ofFor method = "%s", currently ‘psi’ must be specified via ‘control’For test = ‘Deviance’, the estimator chain has to end with ‘M’For tolerance ellipses the dimension ‘p’ must be 2!Implosion: sigma1=%g became too smallIndex plot of standardized residuals is not avalable if scale = 0Initial estimator ‘%s’ not supported; using S-estimator insteadInitial setInitial setsInitialization ‘init = "%s"’ not supported (yet)Invalid scalefn=‘%s’: must be function or a valid stringInvalid number of trials nsamp =Invalid number of trials nsamp=Invalid number of trials nsamp=%s. Using default nsamp=%d.KS2014Length of initial coefficients vector does not match rank of singular design matrix xM-S estimator did *not* convergeMatrix ‘x’ is not of full rank: rankM(x) =Maximum 5000 subsets allowed for option ‘best’.Maybe use lmrob(*, control=lmrob.control(....) with all these.MethodModels are not nested!Models are not strictly nestedMore dimensions than observations, currently not implementedMore dimensions than observations: not yet implementedMore than h of the observations lie on a hyperplane.More than half of the observations lie on a hyperplane.NA coefficient (at %s) from "best" subsetNA coefs in full and reduced model do not matchNA's present in data; consider using ‘na.action = na.exclude’NAs in V(mu)NAs in d(mu)/d(eta)Need more than twice as many observations as variables.No categorical variables found in model. Reverting to S-estimator.No continuous variables found in model. Reverting to L1-estimator.No convergence in %d steps.No weights defined for this object. Use type="robustness" argument to get robustness weights.Non-finite coefficients at iterationNot the same method used for fitting the modelsNot the same response used in the fitted modelsNot the same tuning constant c used in the robust fitsNumber of observations in x and y not equalNumber of offsets is %d, should rather equal %d (number of observations)Only ‘test.acc = "coef"’ is currently implementedOptions ‘best’ and ‘exact’ not allowed for n greater than 2*nmini-1 =Please fit the nested models by lmrobPlease report!Predictions on current data refer to _future_ responsesProvide ‘upper’ or ‘lower’ with names()Psi function ‘%s’ not supported yetQn(x) == 0 and tmad(x, trim=%g) == 0Regression Diagnostic plot is not avalable if scale = 0Regression Diagnostic plot is not avalable: option mcd=F was set in ltsReg().Robust GLM fitting not yet implemented for family %sS-estimated scale == 0: Probably exact fit; check your dataSample size n < h(alpha; n,p) := size of "good" subsampleStandardized residuals vs Fitted values plot is not avalable if scale = 0Std.error computation not yet available for the case of ‘weights’The ‘method’ argument is different from ‘control$method’The MCD covariance matrix was singular.The argument ‘acc’ has been renamed to ‘tol’; do adapt your code.The covariance matrix is singular!The default of ‘doScale’ is FALSE now for stability;The first object does not contain the largest modelThe sample size must be greater than 1 for svdThere is at least one constant column. Remove it and set intercept=TRUEThis family is not implementedThis may take aTolerance range must be between 0% to 100%Unexpected ‘exactfit’ codeUnknown setting 'Use ‘main = "short"’ instead of ‘shortMain = TRUE’Use c(minimal slope, b, efficiency, breakdown point) [2 special cases]Use c(minimal slope, b, efficiency, breakdown point) [6 hard-coded special cases]Use fullRank(x) insteadUse lmrob argument ‘setting="KS2014"’ to avoid this problem.Use only with ‘lmrob’ objectsUse only with ‘lts’ objectsUse only with ‘mcd’ objectsUsing the former, method =Weighting methodX'WX is almost singular. Consider using cov = ".vcov.w"X'WX is singular. Rather use cov = ".vcov.w"You cannot specify both ‘main’ and the deprecated ‘shortMain’You should use different ‘eps’ and/or ‘eps1’])values^list\(alpha not inside [1/2, 1]anova.glmrob() only works for ‘glmrob’ objectsanova.lmrob() only works for ‘lmrob’ objectsare disregarded inare disregarded.argument ‘m.cov’ must have numeric components ‘center’ and ‘cov’argument ‘psi’ must be a string (denoting a psi function)argumentsarguments .. inarguments of function 'betaExacto glm(.) error:calculations stopped prematurely in rllarsbicalling predict.lm() ...coefficientcoefficientscontrol is missingcould not compute scale of residualscouldn't find optimizer functioncov.resid must be one ofderiv must be in {-1,0,1,2}deriv must be in {0,1,2}dropped:failed to converge infamily ‘%s’ not yet implementedfinite sample corrections are not corrected for non-default ‘k’fitted probabilities numerically 0 or 1 occurredfitted rates numerically 0 occurredfixingfor fitting methodfor method='function incorrectly specified.glm(.) {inner subsample} error:id.nif a list, weights.on.x must contain a covariance function such as covMcd()ignoring cov.resid == ‘final’ since est != finalillegal ‘family’ argumentillegal ‘singularity$kind’inincompatible dimensionsinit must be "S", "M-S", function or listinitial estim. ‘init’ not converged -- will be return()ed basically unchangedinitial estimate residuals length differs from final ones. Typically must refit w/ lmrob()instead ofinternal logic error in psi() function name:invalid ‘cov.dfcorr’:invalid ‘init’ argumentinvalid ‘lmrob’ object: no terms componentinvalid ‘method’:invalid ‘posdef.meth’:invalid ‘psi’=%s; possibly use .regularize.Mpsi(%s)invalid ‘seed’. Must be a valid .Random.seed !invalid ‘seed’. Must be compatible with .Random.seed !invalid ‘start’ stringinvalid ‘test’invalid ‘type’invalid ‘variant’:invalid first argumentinvalid tuning parameter names:is not implementediteriter2iterationslmrob object does not have a proper ‘qr’ component. Rank must be zerolmrob object does not have a proper ‘qr’ component. Rank zero?lmrob..D..fit: control is missinglmrob..D..fit: parameter psi is not definedlmrob..D..fit: parameter tuning.psi is not numericlmrob..D..fit: prior estimator did not converge, stoppinglmrob..D..fit: residuals undefinedlmrob..D..fit: robustness weights undefinedlong time!lower must be either of length %d, or length 1maximal number of *inner* step halvings must be > 0maximum number of "kstep" iterations must be > 0maximum number of iterations must be > 0mc(): not ‘converged’method for psi functionmethod=‘%s’ is only applicable for binomial family, but family=""missing or negative weights not allowedmodels were not all fitted to the same size of datasetmust be in {1,..,n < 2 * p, i.e., possibly too small sample sizen <= p -- you can't be serious!n == p+1 is too small sample size for MCDneed ‘model’ or ‘x’ component for robust Mahalanobis distancesneed non-negative number of nodesneed non-robust working residuals for this model typenegative eigen([negative scale ‘s’neither breakdown point ‘bp’ nor efficiency ‘eff’ specifiedno intercept in the modelno valid subsample found in LTS - set ‘nsamp’ or rather use lmrob.S()non-function specified as optimizernon-implemented test method:non-trivial ‘offset’ is not yet implementednon-trivial prior ‘weights’ are not yet implemented for "BY"not correctly supported yetnot implementednot of length 1not yet implementednot yet implemented for large vectorsnsamp > i_max := maximal integer -- not allowed;number of offsets is %d, should equal %d (number of observations)number of rows in ‘x’ and length of ‘object$rweights’ must be the samenumber of subsamples must be > 0only M and D are steps supported after "init" computationoptim(.) non-convergence:optimizer function must use (at least) formal parametersoptimizer="optimx"optimxoption ‘cov.hubercorr’ is ignored for cov.corrfact = "asympt"or c(0, a,b,c, max_rho) as from .psi.const(or c(b, c, s) as from .psi.const(original detmcd() wrongly declared c-step convergence (obj=%g, i=%d, j=%d)out ofpackage must be loaded in order toparameter ‘tuning.psi’ is not numericparameter names must appear in ‘formula’parameter psi is not definedprediction from a rank-deficient fit may be misleadingprofile() method not yet implemented for "nlrob" objects. Use method = "Wald".provide better scale; must be all positiverecomputing robust Mahalanobis distancessaving the robust distances ‘MD’ as part ofscale ‘s’ is zero -- returning initial ‘mu’scale MAD is zero for this sampleset options(mc_doScale_quiet=TRUE) to suppress this (once per session) messageset to i_max =setting ‘y = FALSE’ has no longer any effectsigma0 =singular fit encounteredsome eigenvalues are negativesome terms will have NAs due to the limits of the methodspecifying ‘weights’ is not yet supported for methodspecifying both ‘acc’ and ‘tol’ is invalidstart ‘theta’ has still NA's .. badly singular xstepssubsample size h < n/2 may be too smallsubsets; computing thesetestthe following arguments to ‘anova.glmrob’ are invalid andthe following arguments to ‘anova.lmrob’ are invalid andtuning constant ‘cc’ is not numerictuning constants for "lqq" psi: both ‘eff’ and ‘bp’ specified, ignoring ‘bp’tuning constants for ggw psi: both eff and bp specified, ignoring bptuning parameter (chi/psi) is not numerictype ‘pearson’ is not yet implementedunable to find constants for "ggw" psi function: %sunable to find constants for "lqq" psi function: %sunable to find constants for psi functionunknown ‘wgtFUN’ specification:unknown setting for ‘subsampling’: %sunknown split typeunsupported psi function -- should not happenupdate(*, setting = ) is not allowedupper must be either of length %d, or length 1usevalue of acc must be > 0value of the tuning constant c (‘const’) must be > 0value of the tuning constant c (cw) must be > 0value of the tuning constant c (tcc) must be > 0veryweights should not be both close to 0 and close to 1!weights.on.x needs %d none-negative valuesx is not a numeric dataframe or matrix.x is singulary is not a numericy is not onedimensional}robustbase/po/0000755000176200001440000000000014555212560013046 5ustar liggesusersrobustbase/po/update-me.sh0000754000176200001440000000152612533543452015272 0ustar liggesusers#!/bin/sh # #__>> Keep in sync with ~/R/Pkgs/Matrix/po/update-me.sh <<__ # ## Script for updating package-specific *.pot files ## written such that it should work for any package # R=${R:-R} thisdir=`dirname $0` ; cd $thisdir; thisdir=`pwd` echo "R = '$R' (`$R --version | head -1`) preliminary thisdir='$thisdir'" pkgDIR=`dirname $thisdir` pkg=`basename $pkgDIR` echo ' --> pkgDIR='$pkgDIR' ; pkg='$pkg echo "require('tools'); update_pkg_po('$pkgDIR')" | $R --slave 2>&1 | tee update.log ## -------------------------------- as of R 3.0.0 echo 'end{make pkg-update}' ; echo '' echo 'Test with (e.g.)' echo ' LANGUAGE=de R --no-environ --no-save' ; echo '' echo 'and then something like' echo ' ... [TO BE ADDED] ...'; echo '' echo 'Commit with something like' echo " svn ci -m'translation updates' po inst/po"; echo '' robustbase/po/robustbase.pot0000644000176200001440000000720414441677743015762 0ustar liggesusers# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the robustbase package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: robustbase 0.99-0\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2023-06-09 09:18+0200\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" #: lmrob.c:249 msgid "DGELS could not determine optimal block size, using minimum" msgstr "" #: lmrob.c:277 #, c-format msgid "DGELS: illegal %i-th argument." msgstr "" #: lmrob.c:283 #, c-format msgid "" "DGELS: weighted design matrix not of full rank (column %d).\n" "Use control parameter 'trace.lev = 4' to get diagnostic output." msgstr "" #: lmrob.c:306 #, c-format msgid "DGEEQU: illegal %i-th argument" msgstr "" #: lmrob.c:309 msgid "" "Fast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'." msgstr "" #: lmrob.c:311 #, c-format msgid "DGEEQU: column %i of the design matrix is exactly zero." msgstr "" #: lmrob.c:316 #, c-format msgid " Skipping design matrix equilibration (DGEEQU): row %i is exactly zero." msgstr "" #: lmrob.c:477 msgid "m_s_subsample() stopped prematurely (scale < 0)." msgstr "" #: lmrob.c:575 lmrob.c:576 lmrob.c:614 lmrob.c:615 lmrob.c:642 lmrob.c:643 #: rob-utils.c:62 #, c-format msgid "Argument '%s' must be numeric or integer" msgstr "" #: lmrob.c:597 lmrob.c:628 #, c-format msgid "'deriv'=%d is invalid" msgstr "" #: lmrob.c:657 msgid "Argument 'cc' must be numeric" msgstr "" #: lmrob.c:658 msgid "Argument 'ipsi' must be integer" msgstr "" #: lmrob.c:670 #, c-format msgid "rho_inf(): ipsi=%d not implemented." msgstr "" #: lmrob.c:700 #, c-format msgid "normcnst(): ipsi=%d not implemented." msgstr "" #: lmrob.c:730 #, c-format msgid "rho(): ipsi=%d not implemented." msgstr "" #: lmrob.c:749 #, c-format msgid "psi(): ipsi=%d not implemented." msgstr "" #: lmrob.c:767 #, c-format msgid "psip(): ipsi=%d not implemented." msgstr "" #: lmrob.c:789 #, c-format msgid "psi2(): ipsi=%d not implemented." msgstr "" #: lmrob.c:1223 #, c-format msgid "rho_ggw(): case (%i) not implemented." msgstr "" #: lmrob.c:1264 #, c-format msgid "Error from Rdqags(psi_ggw*, k, ...): ier = %i" msgstr "" #: lmrob.c:1730 #, c-format msgid "fast_s_with_memory() returned invalid code %d" msgstr "" #: lmrob.c:2226 #, c-format msgid "S refinements did not converge (to refine.tol=%g) in %d (= k.max) steps" msgstr "" #: lmrob.c:2384 #, c-format msgid "m_s_descent(): Problem in RLLARSBI (RILARS). KODE=%d. Exiting." msgstr "" #: lmrob.c:2428 msgid " M-S estimate: maximum number of refinement steps reached." msgstr "" #: lmrob.c:2515 msgid "subsample(): could not find non-singular subsample." msgstr "" #: lmrob.c:2564 msgid "" "Too many singular resamples. Aborting subsample().\n" " See parameter 'subsampling; in help of lmrob.config()." msgstr "" #: lmrob.c:2617 #, c-format msgid "find_scale(*, initial_scale = %g <= 0) -> final scale = 0" msgstr "" #: lmrob.c:2632 #, c-format msgid "" "find_scale() did not converge in '%s' (= %d) iterations with tol=%g, last " "rel.diff=%g" msgstr "" #: rob-utils.c:63 rob-utils.c:64 #, c-format msgid "Argument '%s' must be numeric or integer of length 1" msgstr "" #: rowMedians.c:44 msgid "Argument 'x' must be a matrix." msgstr "" #: rowMedians.c:48 msgid "Argument 'naRm' must be either TRUE or FALSE." msgstr "" #: rowMedians.c:73 msgid "Argument 'x' must be numeric (integer or double)." msgstr "" robustbase/po/R-de.po0000644000176200001440000005602314441677743014217 0ustar liggesusersmsgid "" msgstr "" "Project-Id-Version: robustbase 0.93-9\n" "POT-Creation-Date: 2023-06-09 09:09\n" "PO-Revision-Date: 2023-06-09 09:16+0200\n" "Last-Translator: Martin Maechler \n" "Language-Team: not-existing \n" "Language: de\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" msgid "some terms will have NAs due to the limits of the method" msgstr "Einige Terme werden NAs enthalten wegen Beschränkung der Methode" msgid "y is not onedimensional" msgstr "y ist nicht eindimensional" msgid "Number of observations in x and y not equal" msgstr "" msgid "no intercept in the model" msgstr "Kein Achsenabschnitt ('intercept') im Modell" msgid "All observations have missing values!" msgstr "Alle Beobachtungen haben fehlende Werte!" msgid "Implosion: sigma1=%g became too small" msgstr "Implosion: sigma1=%g ist zu klein geworden" msgid "Convergence Achieved" msgstr "Konvergenz erreicht" msgid "No convergence in %d steps." msgstr "Keine Konvergenz in %d Schritten." msgid "maximum number of \"kstep\" iterations must be > 0" msgstr "Maximale Anzahl von \"kstep\" Iterationen muss > 0 sein" msgid "maximal number of *inner* step halvings must be > 0" msgstr "Maximale Anzahl von *inneren* Schritthalbierungen muss > 0 sein" msgid "value of the tuning constant c ('const') must be > 0" msgstr "Die Tuning Konstante c ('const') muss positiv sein" msgid "non-trivial prior 'weights' are not yet implemented for \"BY\"" msgstr "" "nicht-triviale apriori Gewichte ('weights') sind noch nicht implementiert " "for \"BY\"" msgid "'start' cannot yet be passed to glmrobBY()" msgstr "" msgid "'offset' is not yet implemented for \"BY\"" msgstr "" msgid "'weights.on.x = \"%s\"' is not implemented" msgstr "" msgid "glm(.) {inner subsample} error:" msgstr "" msgid "betaExacto glm(.) error:" msgstr "" msgid "value of acc must be > 0" msgstr "" msgid "number of subsamples must be > 0" msgstr "" msgid "maximum number of iterations must be > 0" msgstr "" msgid "value of the tuning constant c (cw) must be > 0" msgstr "" msgid "Currently, only family 'poisson' is supported for the \"MT\" estimator" msgstr "" "Momentan ist nur die 'poisson' Familie unterstützt beim \"MT\" Schätzer" msgid "All weights must be positive" msgstr "Alle Gewichte müssen positiv sein" msgid "non-trivial 'offset' is not yet implemented" msgstr "nicht-trivialer 'offset' ist noch nicht implementiert" msgid "NAs in V(mu)" msgstr "NAs in V(mu)" msgid "0s in V(mu)" msgstr "Nullen ('0') in V(mu)" msgid "'start' must be an initial estimate of beta, of length %d" msgstr "'start' muss eine Startschätzung sein von beta, der Länge %d" msgid "optim(.) non-convergence:" msgstr "" msgid "'X' must have at least two columns" msgstr "'X' muss mindestens zwei Spalten haben" msgid "sigma0 =" msgstr "" msgid "==> scaleTau2(.) = 0" msgstr "" msgid "invalid first argument" msgstr "ungültiges erstes Argument" msgid "'formula' missing or incorrect" msgstr "'formula' fehlt oder ist ungültig" msgid "'coef' must not be negative" msgstr "'coef' darf nicht negativ sein" msgid "More dimensions than observations, currently not implemented" msgstr "Mehr Dimensionen als Beobachtungen; momentan nicht implementiert" msgid "More dimensions than observations: not yet implemented" msgstr "Mehr Dimensionen als Beobachtungen; das ist noch nicht implementiert" msgid "Matrix 'x' is not of full rank: rankM(x) =" msgstr "" msgid "< p =" msgstr "" msgid "Use fullRank(x) instead" msgstr "Verwende stattdessen fullRank(x)" msgid "" "** direction sampling iterations were not sufficient. Maybe try increasing " "'maxit.mult'" msgstr "" msgid "the following arguments to 'anova.glmrob' are invalid and" msgstr "" msgid "dropped:" msgstr "" msgid "anova.glmrob() only works for 'glmrob' objects" msgstr "" msgid "'Anova Table' for a single model object not yet implemented" msgstr "" msgid "Not the same response used in the fitted models" msgstr "Nicht dieselbe Zielvariable verwendet in den angepassten Modellen" msgid "models were not all fitted to the same size of dataset" msgstr "Modelle wurden nicht alle auf die gleich grossen Datensätze angepasst" msgid "Not the same method used for fitting the models" msgstr "Nicht die gleiche Methode verwendet um die Modelle anzupassen" msgid "Not the same tuning constant c used in the robust fits" msgstr "" msgid "Models are not nested!" msgstr "" msgid "Models are not strictly nested" msgstr "" msgid "This family is not implemented" msgstr "" msgid "some eigenvalues are negative" msgstr "" msgid "non-implemented test method:" msgstr "" msgid "for fitting method" msgstr "" msgid "the following arguments to 'anova.lmrob' are invalid and" msgstr "" msgid "For test = 'Deviance', the estimator chain has to end with 'M'" msgstr "" msgid "The first object does not contain the largest model" msgstr "" msgid "All models are refitted except the largest one" msgstr "" msgid "anova.lmrob() only works for 'lmrob' objects" msgstr "" msgid "'Anova Table' for a single model not yet implemented" msgstr "" msgid "invalid 'test'" msgstr "" msgid "Please fit the nested models by lmrob" msgstr "" msgid "NA coefs in full and reduced model do not match" msgstr "" msgid "test" msgstr "" msgid "not yet implemented" msgstr "" msgid "deriv must be in {0,1,2}" msgstr "" msgid "deriv must be in {-1,0,1,2}" msgstr "" msgid "'x' must be a numeric matrix" msgstr "" msgid "The sample size must be greater than 1 for svd" msgstr "" msgid "'wgtFUN' must be a function or a string specifying such a function" msgstr "" msgid "invalid 'seed'. Must be compatible with .Random.seed !" msgstr "" msgid "Invalid number of trials nsamp =" msgstr "" msgid "!" msgstr "" msgid "n <= p -- you can't be serious!" msgstr "" msgid "n == p+1 is too small sample size for MCD" msgstr "" msgid "n < 2 * p, i.e., possibly too small sample size" msgstr "" msgid "Sample size n < h(alpha; n,p) := size of \"good\" subsample" msgstr "" msgid "subsample size\t h < n/2 may be too small" msgstr "" msgid "'wgtFUN' must be a function or one of the strings %s." msgstr "" msgid "Unexpected 'exactfit' code" msgstr "" msgid ". Please report!" msgstr "" msgid "illegal 'singularity$kind'" msgstr "" msgid "Options 'best' and 'exact' not allowed for n greater than 2*nmini-1 =" msgstr "" msgid "" ".\n" "Using default." msgstr "" msgid "'nsamp = \"best\"' allows maximally" msgstr "" msgid "" "subsets;\n" "computing these" msgstr "" msgid "This may take a" msgstr "" msgid "very" msgstr "" msgid "long time!" msgstr "" msgid "Invalid number of trials nsamp=%s. Using default nsamp=%d." msgstr "" msgid "nsamp > i_max := maximal integer -- not allowed;" msgstr "" msgid "set to i_max =" msgstr "" msgid "Use only with 'mcd' objects" msgstr "" msgid "x is not a numeric dataframe or matrix." msgstr "" msgid "argument 'm.cov' must have numeric components 'center' and 'cov'" msgstr "" msgid "Data set and provided center have different dimensions!" msgstr "" msgid "The covariance matrix is singular!" msgstr "" msgid "id.n" msgstr "" msgid "must be in {1,..," msgstr "" msgid "}" msgstr "" msgid "For tolerance ellipses the dimension 'p' must be 2!" msgstr "" msgid "'hsets.init' must be a h' x L matrix (h' >= h) of observation indices" msgstr "" msgid "'full.h' is true, but 'hsets.init' has less than n rows" msgstr "" msgid "'hsets.init' must be in {1,2,...,n}; n =" msgstr "" msgid "More than h of the observations lie on a hyperplane." msgstr "" msgid "" "original detmcd() wrongly declared c-step convergence (obj=%g, i=%d, j=%d)" msgstr "" msgid "Invalid scalefn='%s': must be function or a valid string" msgstr "" msgid "'%s' must be a function, numeric vector of length p, or NULL" msgstr "" msgid "provide better scale; must be all positive" msgstr "" msgid "More than half of the observations lie on a hyperplane." msgstr "" msgid "'%s' is not a valid family (see ?family)" msgstr "" msgid "Robust GLM fitting not yet implemented for family %s" msgstr "" msgid "'weights' must be non-negative" msgstr "" msgid "" "Number of offsets is %d, should rather equal %d (number of observations)" msgstr "" msgid "'weights.on.x' must be a string, function, list or numeric n-vector" msgstr "" msgid "'start' must be a numeric vector, NULL, or a character string" msgstr "" msgid "invalid 'start' string" msgstr "" msgid "For method 'cubif', use glmRob() from package 'robust'" msgstr "" msgid "method='%s' is only applicable for binomial family, but family=\"\"" msgstr "" msgid "invalid 'method':" msgstr "" msgid "setting 'y = FALSE' has no longer any effect" msgstr "" msgid "" "No weights defined for this object. Use type=\"robustness\" argument to get " "robustness weights." msgstr "" msgid "need non-robust working residuals for this model type" msgstr "" msgid "invalid 'variant':" msgstr "" msgid "Weighting method" msgstr "" msgid "is not implemented" msgstr "" msgid "" "if a list, weights.on.x must contain a covariance function such as covMcd()" msgstr "" msgid "weights.on.x needs %d none-negative values" msgstr "" msgid "All weights.on.x must be none negative" msgstr "" msgid "'offset' not fully implemented" msgstr "" msgid "illegal 'family' argument" msgstr "" msgid "start 'theta' has still NA's .. badly singular x" msgstr "" msgid "Cannot find valid starting values: You need help" msgstr "" msgid "family '%s' not yet implemented" msgstr "" msgid "NAs in d(mu)/d(eta)" msgstr "" msgid "Non-finite coefficients at iteration" msgstr "" msgid "Algorithm did not converge" msgstr "" msgid "fitted probabilities numerically 0 or 1 occurred" msgstr "" msgid "fitted rates numerically 0 occurred" msgstr "" msgid "Only 'test.acc = \"coef\"' is currently implemented" msgstr "" msgid "value of the tuning constant c (tcc) must be > 0" msgstr "" msgid "Std.error computation not yet available for the case of 'weights'" msgstr "" msgid "negative scale 's'" msgstr "" msgid "scale 's' is zero -- returning initial 'mu'" msgstr "" msgid "scale MAD is zero for this sample" msgstr "" msgid "Qn(x) == 0 and tmad(x, trim=%g) == 0" msgstr "" msgid "calculations stopped prematurely in rllarsbi" msgstr "" msgid "(probably because of rounding errors)." msgstr "" msgid "unknown split type" msgstr "" msgid "No categorical variables found in model. Reverting to S-estimator." msgstr "" msgid "No continuous variables found in model. Reverting to L1-estimator." msgstr "" msgid "M-S estimator did *not* converge" msgstr "" msgid "'psi' should be one of %s" msgstr "" msgid "invalid 'psi'=%s; possibly use .regularize.Mpsi(%s)" msgstr "" msgid "Unknown setting '" msgstr "" msgid "'. Using defaults." msgstr "" msgid "update(*, setting = ) is not allowed" msgstr "" msgid "Initial estimator '%s' not supported; using S-estimator instead" msgstr "" msgid "" ".vcov.avar1 can only be used when initial estimator is S; using .vcov.w " "instead" msgstr "" msgid "only M and D are steps supported after \"init\" computation" msgstr "" msgid "%s-step did NOT converge. Returning unconverged %s-estimate" msgstr "" msgid "" "initial estim. 'init' not converged -- will be return()ed basically unchanged" msgstr "" msgid ":.vcov.w: cov.hubercorr must be logical (or NULL)" msgstr "" msgid ":.vcov.w: cov.corrfact must be one of" msgstr "" msgid ":.vcov.w: cov.dfcorr must be one of" msgstr "" msgid "ignoring cov.resid == 'final' since est != final" msgstr "" msgid "cov.resid must be one of" msgstr "" msgid ":.vcov.w: cov.xwx must be logical (or NULL)" msgstr "" msgid "parameter psi is not defined" msgstr "" msgid "parameter 'tuning.psi' is not numeric" msgstr "" msgid ":.vcov.w: scale missing, using D scale" msgstr "" msgid "option 'cov.hubercorr' is ignored for cov.corrfact = \"asympt\"" msgstr "" msgid ":.vcov.w: unsupported psi function" msgstr "" msgid "(tau / hybrid / tauold): tau not found in 'obj'" msgstr "" msgid ":.vcov.w: Caution. Some psi'() are NA" msgstr "" msgid ":.vcov.w: unsupported psi for \"hybrid\" correction factor" msgstr "" msgid "invalid 'cov.dfcorr':" msgstr "" msgid ".vcov.avar1() supports only SM or MM estimates" msgstr "" msgid "" "initial estimate residuals length differs from final ones. Typically must " "refit w/ lmrob()" msgstr "" msgid "X'WX is almost singular. Consider using cov = \".vcov.w\"" msgstr "" msgid "X'WX is singular. Rather use cov = \".vcov.w\"" msgstr "" msgid "fixing" msgstr "" msgid "negative eigen([" msgstr "" msgid "])values" msgstr "" msgid "" ".vcov.avar1: negative diag() fixed up; consider 'cov=\".vcov.w.\"' " "instead" msgstr "" msgid "invalid 'posdef.meth':" msgstr "" msgid "'control$n.group' must be larger than 'p' for 'large_n' algorithm" msgstr "" msgid "'groups * n.group' must be smaller than 'n' for 'large_n' algorithm" msgstr "" msgid "'control$n.group' is not much larger than 'p', probably too small" msgstr "" msgid "invalid 'seed'. Must be a valid .Random.seed !" msgstr "" msgid "C function R_lmrob_S() exited prematurely" msgstr "" msgid "S-estimated scale == 0: Probably exact fit; check your data" msgstr "" msgid "lmrob..D..fit: control is missing" msgstr "" msgid "lmrob..D..fit: prior estimator did not converge, stopping" msgstr "" msgid "lmrob..D..fit: robustness weights undefined" msgstr "" msgid "lmrob..D..fit: residuals undefined" msgstr "" msgid "lmrob..D..fit: parameter psi is not defined" msgstr "" msgid "lmrob..D..fit: parameter tuning.psi is not numeric" msgstr "" msgid "control is missing" msgstr "" msgid "'control' is missing" msgstr "" msgid "internal logic error in psi() function name:" msgstr "" msgid "Please report!" msgstr "" msgid "argument 'psi' must be a string (denoting a psi function)" msgstr "" msgid "tuning constant 'cc' is not numeric" msgstr "" msgid "Coefficients for" msgstr "" msgid "function incorrectly specified." msgstr "" msgid "" "Use c(minimal slope, b, efficiency, breakdown point) [6 hard-coded special " "cases]" msgstr "" msgid "or c(0, a,b,c, max_rho) as from .psi.const(" msgstr "" msgid ", cc)." msgstr "" msgid "Use c(minimal slope, b, efficiency, breakdown point) [2 special cases]" msgstr "" msgid "or c(b, c, s) as from .psi.const(" msgstr "" msgid "Coef. for Hampel psi function not of length 3" msgstr "" msgid "Coef. for psi function" msgstr "" msgid "not of length 1" msgstr "" msgid "tuning constants for ggw psi: both eff and bp specified, ignoring bp" msgstr "" msgid "neither breakdown point 'bp' nor efficiency 'eff' specified" msgstr "" msgid "unable to find constants for \"ggw\" psi function: %s" msgstr "" msgid "" "tuning constants for \"lqq\" psi: both 'eff' and 'bp' specified, ignoring " "'bp'" msgstr "" msgid "Error: neither breakdown point nor efficiency specified" msgstr "" msgid "unable to find constants for \"lqq\" psi function: %s" msgstr "" msgid "method for psi function" msgstr "" msgid "not implemented" msgstr "" msgid "tuning parameter (chi/psi) is not numeric" msgstr "" msgid "need non-negative number of nodes" msgstr "" msgid "unknown setting for 'subsampling': %s" msgstr "" msgid "number of rows in 'x' and length of 'object$rweights' must be the same" msgstr "" msgid "'%s' must be a number or a function of %s which returns a number" msgstr "" msgid "Detected possible local breakdown of" msgstr "" msgid "-estimate in" msgstr "" msgid "coefficients" msgstr "" msgid "coefficient" msgstr "" msgid "'" msgstr "" msgid "," msgstr "" msgid "." msgstr "" msgid "KS2014" msgstr "" msgid "Use lmrob argument 'setting=\"KS2014\"' to avoid this problem." msgstr "" msgid "arguments .. in" msgstr "" msgid ")$" msgstr "" msgid "^list\\(" msgstr "" msgid "are disregarded." msgstr "" msgid "Maybe use lmrob(*, control=lmrob.control(....) with all these." msgstr "" msgid "'weights' must be a numeric vector" msgstr "" msgid "number of offsets is %d, should equal %d (number of observations)" msgstr "" msgid "The 'method' argument is different from 'control$method'" msgstr "" msgid "Using the former, method =" msgstr "" msgid "incompatible dimensions" msgstr "" msgid "missing or negative weights not allowed" msgstr "" msgid "singular fit encountered" msgstr "" msgid "init must be \"S\", \"M-S\", function or list" msgstr "" msgid "" "Length of initial coefficients vector does not match rank of singular design " "matrix x" msgstr "" #, fuzzy msgid "invalid 'init' argument" msgstr "ungültiges erstes Argument" msgid "arguments" msgstr "" msgid "are disregarded in" msgstr "" msgid "" "Failed to compute robust Mahalanobis distances, reverting to robust " "leverages." msgstr "" msgid "lmrob object does not have a proper 'qr' component. Rank zero?" msgstr "" msgid "lmrob object does not have a proper 'qr' component. Rank must be zero" msgstr "" msgid "invalid 'lmrob' object: no terms component" msgstr "" msgid "unsupported psi function -- should not happen" msgstr "" msgid "weights should not be both close to 0 and close to 1!" msgstr "" msgid "You should use different 'eps' and/or 'eps1'" msgstr "" msgid "calling predict.lm() ..." msgstr "" msgid "prediction from a rank-deficient fit may be misleading" msgstr "" msgid "Predictions on current data refer to _future_ responses" msgstr "" msgid "" "Assuming prediction variance inversely proportional to weights used for " "fitting" msgstr "" msgid "Assuming constant prediction variance even though model fit is weighted" msgstr "" msgid "'weights' as formula should be one-sided" msgstr "" msgid "Use only with 'lts' objects" msgstr "" msgid "Index plot of standardized residuals is not avalable if scale = 0" msgstr "" msgid "" "Standardized residuals vs Fitted values plot is not avalable if scale = 0" msgstr "" msgid "" "Diagnostic plot is not available for univar\n" "iate location and scale estimation" msgstr "" msgid "Regression Diagnostic plot is not avalable if scale = 0" msgstr "" msgid "" "Regression Diagnostic plot is not avalable: option mcd=F was set in ltsReg()." msgstr "" msgid "The MCD covariance matrix was singular." msgstr "" msgid "'id.n' must be in {1,..," msgstr "" msgid "alpha not inside [1/2, 1]" msgstr "" msgid "unknown 'wgtFUN' specification:" msgstr "" msgid "'wgtFUN' must be a function or a string specifying one" msgstr "" msgid "y is not a numeric" msgstr "" msgid "There is at least one constant column. Remove it and set intercept=TRUE" msgstr "" msgid "'qr.out = TRUE' for univariate location is disregarded" msgstr "" msgid "Need more than twice as many observations as variables." msgstr "" msgid "x is singular" msgstr "" msgid "no valid subsample found in LTS - set 'nsamp' or rather use lmrob.S()" msgstr "" msgid "NA coefficient (at %s) from \"best\" subset" msgstr "" msgid "Invalid number of trials nsamp=" msgstr "" msgid "! Using default." msgstr "" msgid "'nsamp' options 'best' and 'exact' not allowed for n greater than" msgstr "" msgid ". Will use default." msgstr "" msgid "Maximum 5000 subsets allowed for option 'best'." msgstr "" msgid "Computing 5000 subsets of size" msgstr "" msgid "out of" msgstr "" msgid "The default of 'doScale' is FALSE now for stability;" msgstr "" msgid "" "set options(mc_doScale_quiet=TRUE) to suppress this (once per session) " "message" msgstr "" msgid "mc(): not 'converged'" msgstr "" msgid "in" msgstr "" msgid "iter" msgstr "" msgid "iterations" msgstr "" msgid "*and*" msgstr "" msgid "'reflect part' in" msgstr "" msgid "iter2" msgstr "" msgid "; try enlarging eps1, eps2 !?" msgstr "" msgid "optimx" msgstr "" msgid "package must be loaded in order to" msgstr "" msgid "use" msgstr "" msgid "optimizer=\"optimx\"" msgstr "" msgid "couldn't find optimizer function" msgstr "" msgid "non-function specified as optimizer" msgstr "" msgid "optimizer function must use (at least) formal parameters" msgstr "" msgid "Provide 'upper' or 'lower' with names()" msgstr "" msgid "parameter names must appear in 'formula'" msgstr "" msgid "lower must be either of length %d, or length 1" msgstr "" msgid "upper must be either of length %d, or length 1" msgstr "" msgid "'%s' must be character string or function, but is \"%s\"" msgstr "" msgid "Psi function '%s' not supported yet" msgstr "" msgid "Initialization 'init = \"%s\"' not supported (yet)" msgstr "" msgid "unable to find constants for psi function" msgstr "" msgid "" "As \"sigma\" is in 'pnames', do not use it as variable or parameter name in " "'formula'" msgstr "" msgid "Method" msgstr "" msgid "not correctly supported yet" msgstr "" msgid "'formula' should be a formula of the type 'y ~ f(x, alpha)'" msgstr "" msgid "specifying both 'acc' and 'tol' is invalid" msgstr "" msgid "The argument 'acc' has been renamed to 'tol'; do adapt your code." msgstr "" msgid "" "'start' or 'lower' or 'upper' must be fully named (list or numeric vector)" msgstr "" msgid "specifying 'weights' is not yet supported for method" msgstr "" msgid "For method = \"%s\", currently 'psi' must be specified via 'control'" msgstr "" msgid "'start' must be a named list or numeric vector" msgstr "" msgid "'scale' must be NULL or a positive number" msgstr "" msgid "Do not use '%s' as a variable name or as a parameter name" msgstr "" msgid "'weights' must be nonnegative and not contain NAs" msgstr "" msgid "NA's present in data; consider using 'na.action = na.exclude'" msgstr "" msgid "could not compute scale of residuals" msgstr "" msgid "failed to converge in" msgstr "" msgid "steps" msgstr "" msgid "type 'pearson' is not yet implemented" msgstr "" msgid "invalid 'type'" msgstr "" msgid "'cov' must be a p x p matrix" msgstr "" msgid "for method='" msgstr "" msgid "', 'parm' must be specified as an integer" msgstr "" msgid "" "profile() method not yet implemented for \"nlrob\" objects.\n" " Use method = \"Wald\"." msgstr "" msgid "Computing profile confidence intervals ..." msgstr "" msgid "" "\"boot\" method not yet implemented for \"nlrob\" objects.\n" " Use confint(*, method = \"Wald\")." msgstr "" msgid "Use only with 'lmrob' objects" msgstr "" msgid "'which' must be in 1:5" msgstr "" msgid "'id.n' must be in {1,..,%d}" msgstr "" msgid "recomputing robust Mahalanobis distances" msgstr "" msgid "need 'model' or 'x' component for robust Mahalanobis distances" msgstr "" msgid "saving the robust distances 'MD' as part of" msgstr "" msgid "Tolerance range must be between 0% to 100%" msgstr "" msgid "arguments of function '" msgstr "" msgid "' are (" msgstr "" msgid ") but should be (" msgstr "" msgid ")." msgstr "" msgid "invalid tuning parameter names:" msgstr "" msgid "instead of" msgstr "" msgid "You cannot specify both 'main' and the deprecated 'shortMain'" msgstr "" msgid "'shortMain' is deprecated and will get defunct." msgstr "" msgid "Use 'main = \"short\"' instead of 'shortMain = TRUE'" msgstr "" msgid "not yet implemented for large vectors" msgstr "" msgid "finite sample corrections are not corrected for non-default 'k'" msgstr "" msgid "'weights' must have same length as 'x'" msgstr "" msgid "Dimension {= ncol(x)} must be 2!" msgstr "" msgid "Initial set" msgid_plural "Initial sets" msgstr[0] "" msgstr[1] "" msgid "For method = \"%s\", argument %s is not made use of" msgid_plural "For method = \"%s\", arguments %s are not made use of" msgstr[0] "" msgstr[1] "" robustbase/po/de.po0000644000176200001440000001160614441677743014016 0ustar liggesusers# Uebersetzung von robustbase.pot auf deutsch. # Copyright (C) 2021 Martin Maechler # This file is distributed under the same license as the robustbase package. # Martin Maechler , 2021. #, fuzzy msgid "" msgstr "" "Project-Id-Version: robustbase 0.93-9\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2023-06-09 09:18+0200\n" "PO-Revision-Date: 2023-06-09 09:26+0200\n" "Last-Translator: Martin Maechler \n" "Language-Team: not-existing \n" "Language: de\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" #: lmrob.c:249 #, fuzzy msgid "DGELS could not determine optimal block size, using minimum" msgstr "" "Es gab ein Problem beim Bestimmen der optimalen Blockgrösse, wir verwenden " "das Minimum" #: lmrob.c:277 #, c-format msgid "DGELS: illegal %i-th argument." msgstr "DGELS: ungültiges %i. Argument" #: lmrob.c:283 #, c-format, fuzzy msgid "" "DGELS: weighted design matrix not of full rank (column %d).\n" "Use control parameter 'trace.lev = 4' to get diagnostic output." msgstr "" "DGELS: Die gewichtete Design-Matrix hat nicht vollen Rang (Spalte %d).\n" "Benütze die Kontroll Option 'trace.lev = 4' um diagnostischen Output zu erhalten.\n" #: lmrob.c:306 #, c-format msgid "DGEEQU: illegal %i-th argument" msgstr "DGEQU: ungültiges %i. Argument" #: lmrob.c:309 msgid "" "Fast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'." msgstr "" "Die 'Fast S large n' Strategie hat versagt. Verwende Kontrollparameter 'fast." "s.large.n = Inf'." #: lmrob.c:311 #, c-format msgid "DGEEQU: column %i of the design matrix is exactly zero." msgstr "DGEEQU: Spalte %i der Design Matrix ist exact Null." #: lmrob.c:316 #, c-format msgid " Skipping design matrix equilibration (DGEEQU): row %i is exactly zero." msgstr "" "Äquilibrierung der Designmatrix weggelassen (DGEEQU): Zeile %i ist exakt " "Null." #: lmrob.c:477 msgid "m_s_subsample() stopped prematurely (scale < 0)." msgstr "m_s_subsample() hat vorzeitig angehalten (scale < 0)." #: lmrob.c:575 lmrob.c:576 lmrob.c:614 lmrob.c:615 lmrob.c:642 lmrob.c:643 #: rob-utils.c:62 #, c-format msgid "Argument '%s' must be numeric or integer" msgstr "Argument '%s' muss numerisch ('numeric' oder 'integer') sein" #: lmrob.c:597 lmrob.c:628 #, c-format msgid "'deriv'=%d is invalid" msgstr "'deriv'=%d ist ungültig" #: lmrob.c:657 msgid "Argument 'cc' must be numeric" msgstr "Argument 'cc' muss numerisch sein" #: lmrob.c:658 msgid "Argument 'ipsi' must be integer" msgstr "" #: lmrob.c:670 #, c-format msgid "rho_inf(): ipsi=%d not implemented." msgstr "rho_inf(): ipsi=%d nicht implementiert." #: lmrob.c:700 #, c-format msgid "normcnst(): ipsi=%d not implemented." msgstr "normcnst(): ipsi=%d nicht implementiert." #: lmrob.c:730 #, c-format msgid "rho(): ipsi=%d not implemented." msgstr "rho(): ipsi=%d nicht implementiert." #: lmrob.c:749 #, c-format msgid "psi(): ipsi=%d not implemented." msgstr "psi(): ipsi=%d nicht implementiert." #: lmrob.c:767 #, c-format msgid "psip(): ipsi=%d not implemented." msgstr "psip(): ipsi=%d nicht implementiert." #: lmrob.c:789 #, c-format msgid "psi2(): ipsi=%d not implemented." msgstr "psi2(): ipsi=%d nicht implementiert." #: lmrob.c:1223 #, c-format msgid "rho_ggw(): case (%i) not implemented." msgstr "rho_ggw(): Fall (%i) nicht implementiert." #: lmrob.c:1264 #, c-format msgid "Error from Rdqags(psi_ggw*, k, ...): ier = %i" msgstr "Fehler von Rdqags(psi_ggw*, k, ...): ier = %i" #: lmrob.c:1730 #, c-format msgid "fast_s_with_memory() returned invalid code %d" msgstr "" #: lmrob.c:2226 #, c-format msgid "S refinements did not converge (to refine.tol=%g) in %d (= k.max) steps" msgstr "" #: lmrob.c:2384 #, c-format msgid "m_s_descent(): Problem in RLLARSBI (RILARS). KODE=%d. Exiting." msgstr "" #: lmrob.c:2428 msgid " M-S estimate: maximum number of refinement steps reached." msgstr "" #: lmrob.c:2515 msgid "subsample(): could not find non-singular subsample." msgstr "" #: lmrob.c:2564 msgid "" "Too many singular resamples. Aborting subsample().\n" " See parameter 'subsampling; in help of lmrob.config()." msgstr "" #: lmrob.c:2617 #, c-format msgid "find_scale(*, initial_scale = %g <= 0) -> final scale = 0" msgstr "" #: lmrob.c:2632 #, c-format msgid "" "find_scale() did not converge in '%s' (= %d) iterations with tol=%g, last " "rel.diff=%g" msgstr "" #: rob-utils.c:63 rob-utils.c:64 #, c-format msgid "Argument '%s' must be numeric or integer of length 1" msgstr "Argument '%s' muss numerisch oder Ganzzahle der Länge 1 sein" #: rowMedians.c:44 msgid "Argument 'x' must be a matrix." msgstr "Argument 'x' muss eine Matrix sein." #: rowMedians.c:48 msgid "Argument 'naRm' must be either TRUE or FALSE." msgstr "Argument 'naRm' muss entweder TRUE oder FALSE sein." #: rowMedians.c:73 msgid "Argument 'x' must be numeric (integer or double)." msgstr "Argument 'x' muss numerisch ('integer' oder 'double') sein." robustbase/po/R-robustbase.pot0000644000176200001440000005327014441677743016165 0ustar liggesusersmsgid "" msgstr "" "Project-Id-Version: robustbase 0.99-0\n" "POT-Creation-Date: 2023-06-09 09:09\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" "Plural-Forms: nplurals=INTEGER; plural=EXPRESSION;\n" msgid "some terms will have NAs due to the limits of the method" msgstr "" msgid "y is not onedimensional" msgstr "" msgid "Number of observations in x and y not equal" msgstr "" msgid "no intercept in the model" msgstr "" msgid "All observations have missing values!" msgstr "" msgid "Implosion: sigma1=%g became too small" msgstr "" msgid "Convergence Achieved" msgstr "" msgid "No convergence in %d steps." msgstr "" msgid "maximum number of \"kstep\" iterations must be > 0" msgstr "" msgid "maximal number of *inner* step halvings must be > 0" msgstr "" msgid "value of the tuning constant c ('const') must be > 0" msgstr "" msgid "non-trivial prior 'weights' are not yet implemented for \"BY\"" msgstr "" msgid "'start' cannot yet be passed to glmrobBY()" msgstr "" msgid "'offset' is not yet implemented for \"BY\"" msgstr "" msgid "'weights.on.x = \"%s\"' is not implemented" msgstr "" msgid "glm(.) {inner subsample} error:" msgstr "" msgid "betaExacto glm(.) error:" msgstr "" msgid "value of acc must be > 0" msgstr "" msgid "number of subsamples must be > 0" msgstr "" msgid "maximum number of iterations must be > 0" msgstr "" msgid "value of the tuning constant c (cw) must be > 0" msgstr "" msgid "Currently, only family 'poisson' is supported for the \"MT\" estimator" msgstr "" msgid "All weights must be positive" msgstr "" msgid "non-trivial 'offset' is not yet implemented" msgstr "" msgid "NAs in V(mu)" msgstr "" msgid "0s in V(mu)" msgstr "" msgid "'start' must be an initial estimate of beta, of length %d" msgstr "" msgid "optim(.) non-convergence:" msgstr "" msgid "'X' must have at least two columns" msgstr "" msgid "sigma0 =" msgstr "" msgid "==> scaleTau2(.) = 0" msgstr "" msgid "invalid first argument" msgstr "" msgid "'formula' missing or incorrect" msgstr "" msgid "'coef' must not be negative" msgstr "" msgid "More dimensions than observations, currently not implemented" msgstr "" msgid "More dimensions than observations: not yet implemented" msgstr "" msgid "Matrix 'x' is not of full rank: rankM(x) =" msgstr "" msgid "< p =" msgstr "" msgid "Use fullRank(x) instead" msgstr "" msgid "** direction sampling iterations were not sufficient. Maybe try increasing 'maxit.mult'" msgstr "" msgid "the following arguments to 'anova.glmrob' are invalid and" msgstr "" msgid "dropped:" msgstr "" msgid "anova.glmrob() only works for 'glmrob' objects" msgstr "" msgid "'Anova Table' for a single model object not yet implemented" msgstr "" msgid "Not the same response used in the fitted models" msgstr "" msgid "models were not all fitted to the same size of dataset" msgstr "" msgid "Not the same method used for fitting the models" msgstr "" msgid "Not the same tuning constant c used in the robust fits" msgstr "" msgid "Models are not nested!" msgstr "" msgid "Models are not strictly nested" msgstr "" msgid "This family is not implemented" msgstr "" msgid "some eigenvalues are negative" msgstr "" msgid "non-implemented test method:" msgstr "" msgid "for fitting method" msgstr "" msgid "the following arguments to 'anova.lmrob' are invalid and" msgstr "" msgid "For test = 'Deviance', the estimator chain has to end with 'M'" msgstr "" msgid "The first object does not contain the largest model" msgstr "" msgid "All models are refitted except the largest one" msgstr "" msgid "anova.lmrob() only works for 'lmrob' objects" msgstr "" msgid "'Anova Table' for a single model not yet implemented" msgstr "" msgid "invalid 'test'" msgstr "" msgid "Please fit the nested models by lmrob" msgstr "" msgid "NA coefs in full and reduced model do not match" msgstr "" msgid "test" msgstr "" msgid "not yet implemented" msgstr "" msgid "deriv must be in {0,1,2}" msgstr "" msgid "deriv must be in {-1,0,1,2}" msgstr "" msgid "'x' must be a numeric matrix" msgstr "" msgid "The sample size must be greater than 1 for svd" msgstr "" msgid "'wgtFUN' must be a function or a string specifying such a function" msgstr "" msgid "invalid 'seed'. Must be compatible with .Random.seed !" msgstr "" msgid "Invalid number of trials nsamp =" msgstr "" msgid "!" msgstr "" msgid "n <= p -- you can't be serious!" msgstr "" msgid "n == p+1 is too small sample size for MCD" msgstr "" msgid "n < 2 * p, i.e., possibly too small sample size" msgstr "" msgid "Sample size n < h(alpha; n,p) := size of \"good\" subsample" msgstr "" msgid "subsample size\t h < n/2 may be too small" msgstr "" msgid "'wgtFUN' must be a function or one of the strings %s." msgstr "" msgid "Unexpected 'exactfit' code" msgstr "" msgid ". Please report!" msgstr "" msgid "illegal 'singularity$kind'" msgstr "" msgid "Options 'best' and 'exact' not allowed for n greater than 2*nmini-1 =" msgstr "" msgid ".\nUsing default." msgstr "" msgid "'nsamp = \"best\"' allows maximally" msgstr "" msgid "subsets;\ncomputing these" msgstr "" msgid "This may take a" msgstr "" msgid "very" msgstr "" msgid "long time!" msgstr "" msgid "Invalid number of trials nsamp=%s. Using default nsamp=%d." msgstr "" msgid "nsamp > i_max := maximal integer -- not allowed;" msgstr "" msgid "set to i_max =" msgstr "" msgid "Use only with 'mcd' objects" msgstr "" msgid "x is not a numeric dataframe or matrix." msgstr "" msgid "argument 'm.cov' must have numeric components 'center' and 'cov'" msgstr "" msgid "Data set and provided center have different dimensions!" msgstr "" msgid "The covariance matrix is singular!" msgstr "" msgid "id.n" msgstr "" msgid "must be in {1,..," msgstr "" msgid "}" msgstr "" msgid "For tolerance ellipses the dimension 'p' must be 2!" msgstr "" msgid "'hsets.init' must be a h' x L matrix (h' >= h) of observation indices" msgstr "" msgid "'full.h' is true, but 'hsets.init' has less than n rows" msgstr "" msgid "'hsets.init' must be in {1,2,...,n}; n =" msgstr "" msgid "More than h of the observations lie on a hyperplane." msgstr "" msgid "original detmcd() wrongly declared c-step convergence (obj=%g, i=%d, j=%d)" msgstr "" msgid "Invalid scalefn='%s': must be function or a valid string" msgstr "" msgid "'%s' must be a function, numeric vector of length p, or NULL" msgstr "" msgid "provide better scale; must be all positive" msgstr "" msgid "More than half of the observations lie on a hyperplane." msgstr "" msgid "'%s' is not a valid family (see ?family)" msgstr "" msgid "Robust GLM fitting not yet implemented for family %s" msgstr "" msgid "'weights' must be non-negative" msgstr "" msgid "Number of offsets is %d, should rather equal %d (number of observations)" msgstr "" msgid "'weights.on.x' must be a string, function, list or numeric n-vector" msgstr "" msgid "'start' must be a numeric vector, NULL, or a character string" msgstr "" msgid "invalid 'start' string" msgstr "" msgid "For method 'cubif', use glmRob() from package 'robust'" msgstr "" msgid "method='%s' is only applicable for binomial family, but family=\"\"" msgstr "" msgid "invalid 'method':" msgstr "" msgid "setting 'y = FALSE' has no longer any effect" msgstr "" msgid "No weights defined for this object. Use type=\"robustness\" argument to get robustness weights." msgstr "" msgid "need non-robust working residuals for this model type" msgstr "" msgid "invalid 'variant':" msgstr "" msgid "Weighting method" msgstr "" msgid "is not implemented" msgstr "" msgid "if a list, weights.on.x must contain a covariance function such as covMcd()" msgstr "" msgid "weights.on.x needs %d none-negative values" msgstr "" msgid "All weights.on.x must be none negative" msgstr "" msgid "'offset' not fully implemented" msgstr "" msgid "illegal 'family' argument" msgstr "" msgid "start 'theta' has still NA's .. badly singular x" msgstr "" msgid "Cannot find valid starting values: You need help" msgstr "" msgid "family '%s' not yet implemented" msgstr "" msgid "NAs in d(mu)/d(eta)" msgstr "" msgid "Non-finite coefficients at iteration" msgstr "" msgid "Algorithm did not converge" msgstr "" msgid "fitted probabilities numerically 0 or 1 occurred" msgstr "" msgid "fitted rates numerically 0 occurred" msgstr "" msgid "Only 'test.acc = \"coef\"' is currently implemented" msgstr "" msgid "value of the tuning constant c (tcc) must be > 0" msgstr "" msgid "Std.error computation not yet available for the case of 'weights'" msgstr "" msgid "negative scale 's'" msgstr "" msgid "scale 's' is zero -- returning initial 'mu'" msgstr "" msgid "scale MAD is zero for this sample" msgstr "" msgid "Qn(x) == 0 and tmad(x, trim=%g) == 0" msgstr "" msgid "calculations stopped prematurely in rllarsbi" msgstr "" msgid "(probably because of rounding errors)." msgstr "" msgid "unknown split type" msgstr "" msgid "No categorical variables found in model. Reverting to S-estimator." msgstr "" msgid "No continuous variables found in model. Reverting to L1-estimator." msgstr "" msgid "M-S estimator did *not* converge" msgstr "" msgid "'psi' should be one of %s" msgstr "" msgid "invalid 'psi'=%s; possibly use .regularize.Mpsi(%s)" msgstr "" msgid "Unknown setting '" msgstr "" msgid "'. Using defaults." msgstr "" msgid "update(*, setting = ) is not allowed" msgstr "" msgid "Initial estimator '%s' not supported; using S-estimator instead" msgstr "" msgid ".vcov.avar1 can only be used when initial estimator is S; using .vcov.w instead" msgstr "" msgid "only M and D are steps supported after \"init\" computation" msgstr "" msgid "%s-step did NOT converge. Returning unconverged %s-estimate" msgstr "" msgid "initial estim. 'init' not converged -- will be return()ed basically unchanged" msgstr "" msgid ":.vcov.w: cov.hubercorr must be logical (or NULL)" msgstr "" msgid ":.vcov.w: cov.corrfact must be one of" msgstr "" msgid ":.vcov.w: cov.dfcorr must be one of" msgstr "" msgid "ignoring cov.resid == 'final' since est != final" msgstr "" msgid "cov.resid must be one of" msgstr "" msgid ":.vcov.w: cov.xwx must be logical (or NULL)" msgstr "" msgid "parameter psi is not defined" msgstr "" msgid "parameter 'tuning.psi' is not numeric" msgstr "" msgid ":.vcov.w: scale missing, using D scale" msgstr "" msgid "option 'cov.hubercorr' is ignored for cov.corrfact = \"asympt\"" msgstr "" msgid ":.vcov.w: unsupported psi function" msgstr "" msgid "(tau / hybrid / tauold): tau not found in 'obj'" msgstr "" msgid ":.vcov.w: Caution. Some psi'() are NA" msgstr "" msgid ":.vcov.w: unsupported psi for \"hybrid\" correction factor" msgstr "" msgid "invalid 'cov.dfcorr':" msgstr "" msgid ".vcov.avar1() supports only SM or MM estimates" msgstr "" msgid "initial estimate residuals length differs from final ones. Typically must refit w/ lmrob()" msgstr "" msgid "X'WX is almost singular. Consider using cov = \".vcov.w\"" msgstr "" msgid "X'WX is singular. Rather use cov = \".vcov.w\"" msgstr "" msgid "fixing" msgstr "" msgid "negative eigen([" msgstr "" msgid "])values" msgstr "" msgid ".vcov.avar1: negative diag() fixed up; consider 'cov=\".vcov.w.\"' instead" msgstr "" msgid "invalid 'posdef.meth':" msgstr "" msgid "'control$n.group' must be larger than 'p' for 'large_n' algorithm" msgstr "" msgid "'groups * n.group' must be smaller than 'n' for 'large_n' algorithm" msgstr "" msgid "'control$n.group' is not much larger than 'p', probably too small" msgstr "" msgid "invalid 'seed'. Must be a valid .Random.seed !" msgstr "" msgid "C function R_lmrob_S() exited prematurely" msgstr "" msgid "S-estimated scale == 0: Probably exact fit; check your data" msgstr "" msgid "lmrob..D..fit: control is missing" msgstr "" msgid "lmrob..D..fit: prior estimator did not converge, stopping" msgstr "" msgid "lmrob..D..fit: robustness weights undefined" msgstr "" msgid "lmrob..D..fit: residuals undefined" msgstr "" msgid "lmrob..D..fit: parameter psi is not defined" msgstr "" msgid "lmrob..D..fit: parameter tuning.psi is not numeric" msgstr "" msgid "control is missing" msgstr "" msgid "'control' is missing" msgstr "" msgid "internal logic error in psi() function name:" msgstr "" msgid "Please report!" msgstr "" msgid "argument 'psi' must be a string (denoting a psi function)" msgstr "" msgid "tuning constant 'cc' is not numeric" msgstr "" msgid "Coefficients for" msgstr "" msgid "function incorrectly specified." msgstr "" msgid "Use c(minimal slope, b, efficiency, breakdown point) [6 hard-coded special cases]" msgstr "" msgid "or c(0, a,b,c, max_rho) as from .psi.const(" msgstr "" msgid ", cc)." msgstr "" msgid "Use c(minimal slope, b, efficiency, breakdown point) [2 special cases]" msgstr "" msgid "or c(b, c, s) as from .psi.const(" msgstr "" msgid "Coef. for Hampel psi function not of length 3" msgstr "" msgid "Coef. for psi function" msgstr "" msgid "not of length 1" msgstr "" msgid "tuning constants for ggw psi: both eff and bp specified, ignoring bp" msgstr "" msgid "neither breakdown point 'bp' nor efficiency 'eff' specified" msgstr "" msgid "unable to find constants for \"ggw\" psi function: %s" msgstr "" msgid "tuning constants for \"lqq\" psi: both 'eff' and 'bp' specified, ignoring 'bp'" msgstr "" msgid "Error: neither breakdown point nor efficiency specified" msgstr "" msgid "unable to find constants for \"lqq\" psi function: %s" msgstr "" msgid "method for psi function" msgstr "" msgid "not implemented" msgstr "" msgid "tuning parameter (chi/psi) is not numeric" msgstr "" msgid "need non-negative number of nodes" msgstr "" msgid "unknown setting for 'subsampling': %s" msgstr "" msgid "number of rows in 'x' and length of 'object$rweights' must be the same" msgstr "" msgid "'%s' must be a number or a function of %s which returns a number" msgstr "" msgid "Detected possible local breakdown of" msgstr "" msgid "-estimate in" msgstr "" msgid "coefficients" msgstr "" msgid "coefficient" msgstr "" msgid "'" msgstr "" msgid "," msgstr "" msgid "." msgstr "" msgid "KS2014" msgstr "" msgid "Use lmrob argument 'setting=\"KS2014\"' to avoid this problem." msgstr "" msgid "arguments .. in" msgstr "" msgid ")$" msgstr "" msgid "^list\\(" msgstr "" msgid "are disregarded." msgstr "" msgid "Maybe use lmrob(*, control=lmrob.control(....) with all these." msgstr "" msgid "'weights' must be a numeric vector" msgstr "" msgid "number of offsets is %d, should equal %d (number of observations)" msgstr "" msgid "The 'method' argument is different from 'control$method'" msgstr "" msgid "Using the former, method =" msgstr "" msgid "incompatible dimensions" msgstr "" msgid "missing or negative weights not allowed" msgstr "" msgid "singular fit encountered" msgstr "" msgid "init must be \"S\", \"M-S\", function or list" msgstr "" msgid "Length of initial coefficients vector does not match rank of singular design matrix x" msgstr "" msgid "invalid 'init' argument" msgstr "" msgid "arguments" msgstr "" msgid "are disregarded in" msgstr "" msgid "Failed to compute robust Mahalanobis distances, reverting to robust leverages." msgstr "" msgid "lmrob object does not have a proper 'qr' component. Rank zero?" msgstr "" msgid "lmrob object does not have a proper 'qr' component. Rank must be zero" msgstr "" msgid "invalid 'lmrob' object: no terms component" msgstr "" msgid "unsupported psi function -- should not happen" msgstr "" msgid "weights should not be both close to 0 and close to 1!" msgstr "" msgid "You should use different 'eps' and/or 'eps1'" msgstr "" msgid "calling predict.lm() ..." msgstr "" msgid "prediction from a rank-deficient fit may be misleading" msgstr "" msgid "Predictions on current data refer to _future_ responses" msgstr "" msgid "Assuming prediction variance inversely proportional to weights used for fitting" msgstr "" msgid "Assuming constant prediction variance even though model fit is weighted" msgstr "" msgid "'weights' as formula should be one-sided" msgstr "" msgid "Use only with 'lts' objects" msgstr "" msgid "Index plot of standardized residuals is not avalable if scale = 0" msgstr "" msgid "Standardized residuals vs Fitted values plot is not avalable if scale = 0" msgstr "" msgid "Diagnostic plot is not available for univar\niate location and scale estimation" msgstr "" msgid "Regression Diagnostic plot is not avalable if scale = 0" msgstr "" msgid "Regression Diagnostic plot is not avalable: option mcd=F was set in ltsReg()." msgstr "" msgid "The MCD covariance matrix was singular." msgstr "" msgid "'id.n' must be in {1,..," msgstr "" msgid "alpha not inside [1/2, 1]" msgstr "" msgid "unknown 'wgtFUN' specification:" msgstr "" msgid "'wgtFUN' must be a function or a string specifying one" msgstr "" msgid "y is not a numeric" msgstr "" msgid "There is at least one constant column. Remove it and set intercept=TRUE" msgstr "" msgid "'qr.out = TRUE' for univariate location is disregarded" msgstr "" msgid "Need more than twice as many observations as variables." msgstr "" msgid "x is singular" msgstr "" msgid "no valid subsample found in LTS - set 'nsamp' or rather use lmrob.S()" msgstr "" msgid "NA coefficient (at %s) from \"best\" subset" msgstr "" msgid "Invalid number of trials nsamp=" msgstr "" msgid "! Using default." msgstr "" msgid "'nsamp' options 'best' and 'exact' not allowed for n greater than" msgstr "" msgid ". Will use default." msgstr "" msgid "Maximum 5000 subsets allowed for option 'best'." msgstr "" msgid "Computing 5000 subsets of size" msgstr "" msgid "out of" msgstr "" msgid "The default of 'doScale' is FALSE now for stability;" msgstr "" msgid "set options(mc_doScale_quiet=TRUE) to suppress this (once per session) message" msgstr "" msgid "mc(): not 'converged'" msgstr "" msgid "in" msgstr "" msgid "iter" msgstr "" msgid "iterations" msgstr "" msgid "*and*" msgstr "" msgid "'reflect part' in" msgstr "" msgid "iter2" msgstr "" msgid "; try enlarging eps1, eps2 !?" msgstr "" msgid "optimx" msgstr "" msgid "package must be loaded in order to" msgstr "" msgid "use" msgstr "" msgid "optimizer=\"optimx\"" msgstr "" msgid "couldn't find optimizer function" msgstr "" msgid "non-function specified as optimizer" msgstr "" msgid "optimizer function must use (at least) formal parameters" msgstr "" msgid "Provide 'upper' or 'lower' with names()" msgstr "" msgid "parameter names must appear in 'formula'" msgstr "" msgid "lower must be either of length %d, or length 1" msgstr "" msgid "upper must be either of length %d, or length 1" msgstr "" msgid "'%s' must be character string or function, but is \"%s\"" msgstr "" msgid "Psi function '%s' not supported yet" msgstr "" msgid "Initialization 'init = \"%s\"' not supported (yet)" msgstr "" msgid "unable to find constants for psi function" msgstr "" msgid "As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'" msgstr "" msgid "Method" msgstr "" msgid "not correctly supported yet" msgstr "" msgid "'formula' should be a formula of the type 'y ~ f(x, alpha)'" msgstr "" msgid "specifying both 'acc' and 'tol' is invalid" msgstr "" msgid "The argument 'acc' has been renamed to 'tol'; do adapt your code." msgstr "" msgid "'start' or 'lower' or 'upper' must be fully named (list or numeric vector)" msgstr "" msgid "specifying 'weights' is not yet supported for method" msgstr "" msgid "For method = \"%s\", currently 'psi' must be specified via 'control'" msgstr "" msgid "'start' must be a named list or numeric vector" msgstr "" msgid "'scale' must be NULL or a positive number" msgstr "" msgid "Do not use '%s' as a variable name or as a parameter name" msgstr "" msgid "'weights' must be nonnegative and not contain NAs" msgstr "" msgid "NA's present in data; consider using 'na.action = na.exclude'" msgstr "" msgid "could not compute scale of residuals" msgstr "" msgid "failed to converge in" msgstr "" msgid "steps" msgstr "" msgid "type 'pearson' is not yet implemented" msgstr "" msgid "invalid 'type'" msgstr "" msgid "'cov' must be a p x p matrix" msgstr "" msgid "for method='" msgstr "" msgid "', 'parm' must be specified as an integer" msgstr "" msgid "profile() method not yet implemented for \"nlrob\" objects.\n Use method = \"Wald\"." msgstr "" msgid "Computing profile confidence intervals ..." msgstr "" msgid "\"boot\" method not yet implemented for \"nlrob\" objects.\n Use confint(*, method = \"Wald\")." msgstr "" msgid "Use only with 'lmrob' objects" msgstr "" msgid "'which' must be in 1:5" msgstr "" msgid "'id.n' must be in {1,..,%d}" msgstr "" msgid "recomputing robust Mahalanobis distances" msgstr "" msgid "need 'model' or 'x' component for robust Mahalanobis distances" msgstr "" msgid "saving the robust distances 'MD' as part of" msgstr "" msgid "Tolerance range must be between 0% to 100%" msgstr "" msgid "arguments of function '" msgstr "" msgid "' are (" msgstr "" msgid ") but should be (" msgstr "" msgid ")." msgstr "" msgid "invalid tuning parameter names:" msgstr "" msgid "instead of" msgstr "" msgid "You cannot specify both 'main' and the deprecated 'shortMain'" msgstr "" msgid "'shortMain' is deprecated and will get defunct." msgstr "" msgid "Use 'main = \"short\"' instead of 'shortMain = TRUE'" msgstr "" msgid "not yet implemented for large vectors" msgstr "" msgid "finite sample corrections are not corrected for non-default 'k'" msgstr "" msgid "'weights' must have same length as 'x'" msgstr "" msgid "Dimension {= ncol(x)} must be 2!" msgstr "" msgid "Initial set" msgid_plural "Initial sets" msgstr[0] "" msgstr[1] "" msgid "For method = \"%s\", argument %s is not made use of" msgid_plural "For method = \"%s\", arguments %s are not made use of" msgstr[0] "" msgstr[1] ""