robustbase/0000755000176200001440000000000014124314172012422 5ustar liggesusersrobustbase/NAMESPACE0000644000176200001440000001361713772570740013666 0ustar liggesusersuseDynLib(robustbase, .registration=TRUE) if(FALSE) {##MM stopifnot(require(codetoolsBioC), require(robustbase)) findExternalDeps("robustbase") } importFrom("grDevices", dev.interactive, extendrange) importFrom("graphics", abline, axis, box, legend, lines, matplot, mtext, panel.smooth, par, plot, points, strheight, text, title) importFrom("stats", aggregate, alias, as.formula, binomial, coef, cor, cov, cov.wt, cov2cor, delete.response, deviance, dnorm, dpois, family, fitted, fivenum, formula, gaussian, glm, glm.fit, hatvalues, integrate, is.empty.model, lm.fit, lm.wfit, mad, mahalanobis, median, model.frame, model.matrix, model.matrix.lm, model.offset, model.response, model.weights, na.fail, na.omit, na.pass, napredict, naprint, naresid, nlminb, nls, nls.control, nobs, optim, optimize, pbinom, pchisq, pf, pgamma, pnorm, poisson, ppois, predict, printCoefmat, pt, qchisq, qnorm, qpois, qqline, qqnorm, qt, quantile, resid, residuals, residuals.lm, setNames, splinefun, symnum, terms, uniroot, var, vcov, weights, .checkMFClasses, .getXlevels, ## S3 generics (*not* shown yet by findExternalDeps() above): anova, case.names, confint, dummy.coef, logLik, profile, variable.names ) ## ^^^^ MASS has a bit more; take it as example if(getRversion() >= "3.1.0") importFrom("stats", .lm.fit, confint.lm, dummy.coef.lm) if(getRversion() >= "3.3.0") { importFrom("stats", sigma) } else { export(sigma) S3method(sigma, nls) } importFrom("utils", globalVariables, str) ## MASS has many rlm S3 methods; should we use some of them - for rnls() ?! ## we also currently use MASS::cov.rob(); but probably only temporarily importFrom("methods", is, new, setClass, setMethod, slot, "slot<-", ## needed, e.g. for 'robustlmm' to work w/o warning: as, "as<-", coerce, "coerce<-") importFrom("DEoptimR", JDEoptim) ## Functions defined in this package export(Sn, Qn, Qn.old, s_Sn, s_Qn, s_mad, s_IQR, summarizeRobWeights, wgt.himedian, h.alpha.n, covMcd, .MCDcons, .MCDcnp2, .MCDcnp2.rew, .MCDsingularityMsg, robMD, mahalanobisD, # <- still "internal" ltsReg, tolEllipsePlot, ## RENAME ?!? : covPlot, ltsPlot, ## NO! ddplot, distplot, chi2qqplot rrcov.control,## << RENAME --- FIXME huberM, colMedians, rowMedians, covOGK, covGK, hard.rejection, scaleTau2, covComed, ## comedian, COM, -- not yet smoothWgt, .wgtFUN.covMcd, .wgtFUN.covComed, psiFunc, huberPsi, hampelPsi, ## Not yet: ## tukeyPsi, # = biweight / bisquare tukeyChi, tukeyPsi1, # TODO deprecate: see ./TODO Mpsi, Mchi, Mwgt, MrhoInf, .Mpsi, .Mchi, .Mwgt, .MrhoInf, .Mwgt.psi1, .Mchi.tuning.default, .Mpsi.tuning.default, .regularize.Mpsi, .Mchi.tuning.defaults, .Mpsi.tuning.defaults, .psi2ipsi, .psi.const, .psi.ggw.findc, .psi.lqq.findc, lmrob, lmrob.fit, lmrob.fit.MM, lmrob..M..fit, lmrob..D..fit, lmrob.S, lmrob.lar, lmrob.M.S, lmrob.control, .lmrob.hat, # was lmrob.leverages() splitFrame, outlierStats, mc, # Mia Hubers's medcouple adjbox, adjboxStats, adjOutlyingness, fullRank, glmrob, nlrob, nlrob.control , glmrobMqle.control, glmrobBY.control, glmrobMT.control , BYlogreg ## R/BYlogreg.R FIXME: add to glmrob() and "deprecate" ## , glmrobMT ## R/MTestimador2.R , estimethod ## related to detMCD() and to be used in rrcov etc: , r6pack, doScale , rankMM, classPC, .signflip ) ## S3 methods for ``our own'' S3 generics: S3method(ltsReg, default) S3method(ltsReg, formula) S3method(adjbox, default) S3method(adjbox, formula) ## Register all the methods for S3 generics elsewhere ## in case namespace is loaded but not currently attached. S3method(anova, lmrob) S3method(anova, glmrob) S3method(alias, lmrob) S3method(case.names, lmrob) S3method(confint, lmrob) S3method(confint, nlrob) S3method(dummy.coef, lmrob) S3method(estimethod, nlrob) S3method(family, lmrob) S3method(hatvalues, lmrob) S3method(kappa, lmrob) S3method(labels, lmrob) S3method(model.matrix, lmrob) S3method(nobs, lmrob) S3method(nobs, lmrob.S, nobs.lmrob)# use the same as "lmrob" S3method(nobs, mcd) S3method(residuals, lmrob) S3method(variable.names, lmrob) S3method(weights, glmrob) S3method(weights, lmrob) S3method(weights, lmrob.S, weights.lmrob)# use the same as "lmrob" S3method(weights, nlrob, weights.lmrob)# use the same as "lmrob" S3method(formula, nlrob) S3method(fitted, nlrob) S3method(plot, lts) S3method(plot, mcd) S3method(plot, lmrob) S3method(predict, lmrob) S3method(predict, glmrob) S3method(predict, nlrob) S3method(print, glmrob) S3method(print, lts) S3method(print, mcd) S3method(print, nlrob) S3method(print, lmrob) S3method(print, lmrob.S) S3method(print, summary.glmrob) S3method(print, summary.lmrob) S3method(print, summary.nlrob) S3method(print, summary.lts) S3method(qr, lmrob) ## lmrob uses residuals.default S3method(residuals, nlrob) S3method(residuals, glmrob) S3method(summary, glmrob) S3method(summary, lmrob) S3method(summary, lts) S3method(summary, mcd) S3method(summary, nlrob) S3method(sigma, lmrob) S3method(sigma, nlrob) S3method(vcov, glmrob) S3method(vcov, lmrob) S3method(vcov, nlrob) S3method(print, summary.mcd) ## S4 Generics {only those defined in this package}: export( chgDefaults ) exportClasses( "functionX", # function(x, ): VECTORIZED in 'x' "functionXal", # a functional: function of tuning par.s only "psi_func" # containing rho(), psi(), psi'(), ... ) exportMethods( chgDefaults, plot ) robustbase/demo/0000755000176200001440000000000014124272431013347 5ustar liggesusersrobustbase/demo/determinMCD.R0000644000176200001440000001072112432407066015632 0ustar liggesuserslibrary(robustbase) source(system.file("xtraR/test_MCD.R", package = "robustbase"))#-> doMCDdata() ##' This version of domcd() runs *both* "Fast" and "deterministic" MCD ##' @title covMcd() "workhorse" function -- *passed* to and from doMCDdata() ##' @param x data set: n x p numeric matrix ##' @param xname "promise" which will be substituted() and printed ##' @param nrep number of repetition: only sensible for *timing* ##' @param time ##' @param short ##' @param full ##' @param lname optional: ##' @param seed optional: ##' @param trace optional: domcd.2 <- function(x, xname, nrep=1, do.exact = NULL, # <- smart default, globally customizable time = get("time", parent.frame()), # compromise short = get("short", parent.frame()), # compromise full = get("full", parent.frame()), # compromise lname=20, seed=123, trace=FALSE) { if(short && full) stop("you should not set both 'full' and 'short' to TRUE") force(xname)# => evaluate when it is a data(<>, ..) call n <- dim(x)[1] p <- dim(x)[2] metha <- "FastMCD" methb <- "detMCD" if(is.null(do.exact)) { nLarge <- if(exists("nLarge", mode="numeric")) get("nLarge", mode="numeric") else 5000 do.exact <- choose(n, p+1L) < nLarge } set.seed(seed); mcda <- covMcd(x, trace=trace) set.seed(seed); mcdb <- covMcd(x, nsamp="deterministic", trace=trace) if(do.exact) { methX <- "exactMCD" set.seed(seed); mcdX <- covMcd(x, nsamp="exact", trace=trace) } mkRes <- function(mcd) sprintf("%3d %3d %3d %12.6f\n", n,p, mcd$quan, mcd$crit) xresa <- mkRes(mcda) xresb <- mkRes(mcdb) if(do.exact) xresX <- mkRes(mcdX) if(time) { tim1 <- function(meth) sprintf("%10.3f\n", system.time(repMCD(x, nrep, meth))[1]/nrep) xresa <- paste(xresa, tim1(metha)) xresb <- paste(xresb, tim1(methb)) if(do.exact) xresX <- paste(xresX, tim1(methX)) } if(full) { header <- get("header", parent.frame()) header(time) } ## lname: must fit to header(): x.meth <- paste(xname, format(c(metha, methb, if(do.exact) methX))) cat(sprintf("%*s", lname, x.meth[1]), xresa) cat(sprintf("%*s", lname, x.meth[2]), xresb) if(do.exact) cat(sprintf("%*s", lname, x.meth[3]), xresX) cat("Best subsamples: \n") cat(sprintf(" %10s: ", metha)); print(mcda$best) if(identical(mcdb$best, mcda$best)) cat(sprintf(" %s is the same as %s\n", methb, metha)) else { cat(sprintf(" %10s: ", methb)); print(mcdb$best) cat(sprintf(" Difference %s - %s:", methb, metha)) print(setdiff(mcdb$best, mcda$best)) } if(do.exact) { if(identical(mcda$best, mcdX$best)) cat(sprintf(" %s is the same as %s\n", methX, metha)) else if(identical(mcdb$best, mcdX$best)) cat(sprintf(" %s is the same as %s\n", methX, methb)) else { cat(sprintf(" %10s: ", methX)); print(mcdX$best) } } if(!short) { cat("Details about", metha,": ") ibad <- which(mcda$wt==0) names(ibad) <- NULL nbad <- length(ibad) cat("Outliers: ",nbad,"\n") if(nbad > 0) print(ibad) if(full){ cat("-------------\n") print(mcda) } cat("--------------------------------------------------------\n") } } doMCDdata(domcd = domcd.2) warnings() ## in one example n < 2 * p .. ###' Test the exact fit property of CovMcd -------------------------------- ##' generate "exact fit" data d.exact <- function(seed=seed, p=2) { stopifnot(p >= 1) set.seed(seed) n1 <- 45 x1 <- matrix(rnorm(p*n1), nrow=n1, ncol=p) x1[,p] <- x1[,p] + 3 n2 <- 55 m2 <- 3 x <- rbind(x1, cbind(matrix(rnorm((p-1)*n2), n2, p-1), rep(m2,n2))) colnames(x) <- paste0("X", 1:p) x } plot(d.exact(18, p=2)) pairs(d.exact(1234, p=3), gap=0.1) for(p in c(2,4)) for(sid in c(2, 4, 18, 1234)) { cat("\nseed = ",sid,"; p = ",p,":\n") d.x <- d.exact(sid, p=p) d2 <- covMcd(d.x) ## Gave error {for p=2, seeds 2, 4, 18 also on 64-bit}: ## At line 729 of file rffastmcd.f ## Fortran runtime error: Index '6' of dimension 1 of array 'z' above upper bound of 4 print(d2) if(FALSE) ## FIXME fails when calling eigen() in "r6pack()" d2. <- covMcd(d.x, nsamp = "deterministic", scalefn = Qn) stopifnot(d2$singularity$kind == "on.hyperplane") } ## TODO: also get examples of other singularity$kind's robustbase/demo/00Index0000644000176200001440000000012312423467231014501 0ustar liggesusersdeterminMCD Compute (and compare) the deterministic MCD for "standard datasets" robustbase/ChangeLog0000644000176200001440000014236512722336306014214 0ustar liggesusersNOTE: All newer changes are in ./inst/NEWS.Rd --> `news(package = "robustbase")` ---- --------------- 2014-12-12 Martin Maechler * R/mc.R (mc): fix for limit case where x[] contains +/-Inf: pass +/- Inf to C * src/mc.c (mc_C_d): and replace them by 'Large' = +/- DBL_MAX/4 2014-12-11 Martin Maechler * man/adjOutlyingness.Rd: fix another old FIXME: * R/adjoutlyingness.R (adjOutlyingness): cupper=0, clower=0 now computes the classical ("symmetric") outlyingness W/O mc() call. 2014-12-03 Martin Maechler * DESCRIPTION (Version): 0.92-3 * R/comedian.R (covComed): new from Maria Anna, tweaked by Valentin and modified considerably by MM. 2014-11-22 Martin Maechler * DESCRIPTION (Version): 0.92-2, released to CRAN on 2014-11-22 2014-11-18 Martin Maechler * DESCRIPTION (Version): 0.92-1, released to CRAN on 2014-11-18 2014-10-24 Martin Maechler and 2014-10-19 Valentin Todorov * DESCRIPTION (Version): 0.92-1 * R/covMcd.R, detmcd.R: added Deterministic MCD as an option to covMcd (nsamp="deterministic"), function .detMcd() * tests/tmcdd.R: new tests, for covMcd() with nsamp="deterministic" * tests/tmcdd.Rout.save: new tests, for covMcd() with nsamp="deterministic" * inst/xtraR/test_MCD.R: modified to test covMcd() with nsamp="deterministic" * NAMESPACE: export the functions r6pack() and doScale() to be used in rrcov for the deterministic S- and MM-estimates * src/rffastmcd and init.c, R/covMcd: fix a bug in nsamp="exact" 2014-10-17 Martin Maechler * R/adjoutlyingness.R (adjOutlyingness): reverse the defaults of 'clower' and 'cupper' and fix +/- swap; see new note in man/adjOutlyingness.Rd * man/adjOutlyingness.Rd: the wrong defaults came from *.ssc file from Antwerpen 2014-06-30 Martin Maechler * src/rllarsbi.f (rlLARSbi): got rid of warnings -- and many GOTOs 2014-06-11 Martin Maechler * DESCRIPTION (Version): 0.92-0 * R/nlrob.R (.nls.get.start, nlrob): now works with indexed vector parameters -- FIXME: at ETH have testing code + example data -- unfinished!-- -- on nb-mm3 not 2014-04-30 Martin Maechler * DESCRIPTION (Version): 0.91-1, released to CRAN on 2014-05-01 2014-04-24 Martin Maechler * DESCRIPTION (Version, Date): 0.91-0, released to CRAN 2014-04-24 2014-01-30 Martin Maechler * DESCRIPTION (Version): 0.90-1, released to CRAN on 2014-01-30 2013-05-22 Martin Maechler * R/lmrob.MM.R (Mpsi, Mchi, Mwgt): now all work via .Call(): is 9 x faster for n ~ 4000, and a few NAs. 2013-05-18 Martin Maechler * R/lmrob.MM.R (MrhoInf): new, providing rho(Inf), i.e., the scaling constant (rho |--> rho~ = chi) * R/lmrob.MM.R (Mpsi, Mchi, Mwgt): renamed, exported and documented, from former hidden lmrob.psifun(), lmrob.chifun(), lmrob.wgtfun(). * TODO: now has an extensive section on psi/rho/chi etc 2013-03-27 Martin Maechler * DESCRIPTION (Version): 0.9-8, released to CRAN on 2013-06-14 2013-03-06 Martin Maechler * DESCRIPTION (Version): 0.9-7, released to CRAN on 2013-03-06 2013-03-01 Manuel Koller * R/lmrob.R (lmrob): Adding weights and offset arguments. 2013-02-26 Manuel Koller * R/lmrob.R (class lmrob): Objects of class lmrob now store the robustness weights in $rweights (used to be in $weights). * R/lmrob.R (weights.lmrob): specialized weights() function for lmrob objects. Returns prior weights by default. Robustness weights are available via the "type" argument. 2013-02-14 Manuel Koller * R/lmrob.R (lmrob): setting class "lmrob" also for unconverged initial estimates. * R/lmrob.R (print.summary.lmrob, print.lmrob, print.lmrob.S): improved handling of unconverged estimates and exact fits. 2013-02-07 Manuel Koller * R/lmrob.R (lmrob): compatibility to lm: 'assign': labels of terms used for fitting. * R/lmrob.R (summary.lmrob): compatibility to summary.lm for: 'df': degrees of freedom, a 3-vector (p, n-p, p*), the last being the number of non-aliased coefficients (used to be just (n-p)). 'aliased': named logical vector showing if the original coefficients are aliased (was not returned before). 2012-09-11 Manuel Koller * DESCRIPTION (Version): 0.9-4, released to CRAN on 2012-09-11 2012-05-14 Martin Maechler * R/lmrob.MM.R (.vcov.avar1): fix typo in "posdefify" 2012-03-05 Martin Maechler * R/lmrob.MM.R (.vcov.avar1): "posdefify" FIXME (negative eigen values) 2012-02-27 Martin Maechler * DESCRIPTION (Version): 0.8-1-1, released to CRAN on 2012-03-02 2012-02-24 Manuel Koller * R/lmrob.R (lmrob): added init argument: string, function or list. * R/lmrob.MM.R (lmrob.fit): added init argument. * man/lmrob.Rd: Added documentation about init argument. * man/lmrob.fit.Rd: see above. 2012-02-22 Martin Maechler * R/nlrob.R (print.summary.nlrob): use full call instead of just formula. --> shows non-default psi() 2012-02-19 Martin Maechler * R/nlrob.R (nlrob): now use nls(...., weights = *, ..), no longer need hack "put everything to the RHS". 2011-12-23 Martin Maechler * R/nlrob.R (nlrob): for zero weights 'w', the residuals accidentally where NaN. 2011-12-12 Manuel Koller * DESCRIPTION (Version): 0.8-1 * src/mc.c (mc_C_d): fixed bug in iteration; see svn log -r 272 & 274 2011-12-09 Martin Maechler * R/adjbox.R (adjboxStats): fix the sign-error thinko for the case mc < 0. * man/adjboxStats.Rd: document; test the reflection invariance now. 2011-12-08 Martin Maechler * R/adjbox.R (adjbox.default): new 'doReflect' argument; default 'TRUE' ensures that 'adjbox(x)' behaves symmetrically in 'x'. * DESCRIPTION (Version): 0.8-0, released to CRAN on 2011-12-09 2011-10-24 Martin Maechler with Andreas in train to Fribourg * R/glmrob.R (residuals.glmrob): provide a version of residuals.glm() which works * NAMESPACE: residuals.glmrob 2011-10-11 Manuel Koller * DESCRIPTION (Version): 0.7-8, released to CRAN on 2011-10-26, r270 * man/lmrob.Rd: See also extended with lmrob..M..fit. * man/lmrob..M..fit.Rd: Extended example with function lmrob.custom. * R/lmrob.MM.R (lmrob.tau): moving calculation of h to the inside of the function, so that obj$qr is not required. * R/ltsPlot.R (ltsPlot, myqqplot): removed superfluous title(). 2011-10-11 Martin Maechler * src/rfltsreg.f: add 'implicit none' and declare everything; new gfortran gave (correct) warnings. * R/nlrob.R, man/nlrob.Rd: finally fix "fitted.values" * tests/nlrob-tst.R (new): start more testing of nlrob()! 2011-08-09 Manuel Koller * R/plot.lmrob.R (plot.lmrob): Fixing bug reported by Andreas Papritz. is.null(x$x) is always FALSE since there is always x$xlevels present. * inst/doc/lmrob_simulation.Rnw (f.gen): load(file) was missing the proper path. 2011-05-24 Manuel Koller * DESCRIPTION (Version): 0.7-6, released to CRAN today * inst/doc/lmrob_simulation.Rnw: cleaned up chunk headers. * inst/doc/Makefile: added Makefile that runs qpdf after texi2pdf. 2011-05-23 Manuel Koller * DESCRIPTION (Version, Date): 0.7-5 * man/lmrob..D..fit.Rd: updated reference to Koller and Stahel 2011 (now with volume, issue and page numbers). * man/lmrob.control.Rd: updated reference. * man/lmrob.Rd: update reference. * inst/doc/lmrob_simulation.bib: updated reference. * inst/doc/estimating.functions.R (robustness.weights.lmrob.S): fixed typo. 2011-03-17 Manuel Koller * DESCRIPTION (Version, Date): 0.7-4 * inst/doc/lmrob_simulation.Rnw: Fixed problem with paths in source(), save() and load(). 2011-03-08 Manuel Koller * DESCRIPTION (Version, Suggests): 0.7-3 and added xtable and ggplot2 to Suggests. * inst/doc/lmrob_simulation.Rnw: re- or moved dependencies that are not required or just needed for data generation. * inst/doc/error.distributions.R: removed dependency on skewt. 2011-02-10 Manuel Koller * DESCRIPTION (Version): Version: 0.7-2 * inst/doc/lmrob_simulation.Rnw: updated title. * inst/doc/lmrob_simulation.bib: updated references. * man/lmrob.control.Rd: as above. * man/lmrob.Rd: as above. 2011-01-28 Martin Maechler * R/glmrobMqle.R (Huberprop2, ...), * R/psi-rho-funs.R (huberPsi): replace pmin2/pmax2 by pmin.int()/pmax.int(). 2011-01-26 Manuel Koller * R/psi-rho-funs.R (psiFunc, chgDefaults): default arguments are now also set for the E... slots * tests/psi-rho-etc.R: updated comments * tests/psi-rho-etc.Rout.save: updated accordingly 2011-01-20 Manuel Koller Updated reference to Koller and Stahel 2011 paper. * man/lmrob.control.Rd: updated reference. KS2010 -> KS2011. * man/lmrob.Rd: see above * inst/doc/lmrob_simulation.bib: updated ks2011 reference. * inst/doc/lmrob_simulation.Rnw: see above * R/lmrob.MM.R (lmrob.control): setting argument: KS2011 instead of KS2010 2011-01-18 Manuel Koller * tests/psi-rho-etc.R: added tests to document bug in psi_func class and pmin2/pmax2 functions. * tests/psi-rho-etc.Rout.save: results as they should be * inst/doc/lmrob_simulation.Rnw: added table with tuning constants used in simulation. 2010-12-04 Martin Maechler * R/qnsn.R (Qn): even better finite sample correction, from an analysis of a Qn simulation. (Qn.old): provide, if needed for back-compatibility. 2010-12-02 Valentin Todorov * R/ltsReg.R, man/ltsReg.Rd, tests/LTS-specials.R: the raw weights 'raw.weights' added to the returned object * R/ltsReg.R, src/rfltsreg.f, man/ltsReg.Rd: options nsamp="exact" and nsamp="best" fixed; Added 'trace' parameter 2010-12-02 Martin Maechler * tests/mc-strict.R (adjOutlyingness): adapt tests to the fixed mc() code; using some tolerance... still need to be tested on Win/Mac/.. * R/nlrob.R (summary.nlrob): do not compute 'se' if not converged; update help page; (print.summary.nlrob): print "non-convergence" * man/summary.nlrob.Rd, man/predict.lmrob.Rd, ..: update and trivial changes. * R/qnsn.R (Qn): fixed the consistency constant (thanks to Peter Ruckdeschel), slightly adapting the finite sample factors for n = 2:9. 2010-12-02 Manuel Koller * DESCRIPTION (Version): 0.6-9 * R/mc.c: fixed several bugs, consequent use of eps[0] and eps[1] to avoid numerical problems, fixed memory corruption bug that caused segfaults from time to time. * tests/mc-etc.R: see below * tests/mc-etc.Rout.save: see below * tests/mc-strict: updated tests and results * man/mc.Rd: see below * R/mc.R (mc.default): updated eps1 and eps2 defaults. * inst/doc/lmrob_simulation.Rnw: minor cosmetic changes * inst/doc/graphics.functions.R: added some helper functions to ease working with color palettes used by ggplot2. 2006-10-27 Andreas Ruckstuhl and 2010-10-28 Martin Maechler * DESCRIPTION (Version): 0.6-8 * R/lmrobPredict.R, R/glmrobPredict.R: new more careful prediction methods, now documented in * man/predict.lmrob.Rd, man/predict.glmrob.Rd: and no longer in * man/summary.lmrob.Rd: 2010-10-13 Manuel Koller * DESCRIPTION (Version): 0.6-7 * R/lmrob.MM.R (lmrob.psifun, lmrob.rhofun, lmrob.wgtfun): as.double caused input to loose dimension attribute, fixed. * inst/doc/lmrob_simulation.Rnw: Updated vignette title. Enhanced aggregation procedure. * inst/doc/lmrob_simulation.Bib: Fixed entry KS2010, no more trouble with bibtex. * inst/doc/aggr_results.Rdata: updated simulation results. n = 400 missing for now. * inst/doc/error.distributions.R: added cskt distribution: centered skewed t-distribution. * inst/doc/simulation.results.R: small changes in proclist generation. 2010-10-11 Manuel Koller * DESCRIPTION (Version): 0.6-6 (Depends): Depends on R 2.9.0 (use of grepl) * man/lmrob.Rd: Design Adapted Scale estimate renamed to Design Adaptive Scale estimate * man/lmrob.fit.Rd: ditto * man/lmrob..D..fit.Rd: ditto 2010-10-08 Manuel Koller * DESCRIPTION (Version): 0.6-5 * inst/doc: added directory. * inst/doc/lmrob_simulation.Rnw: vignette containing simulation study of Koller and Stahel (2010). * inst/doc/lmrob_simulation.bib: bibtex file for vignette * inst/doc/graphics.functions.R: R code used in vignette, graphics helper functions. * inst/doc/error.distributions.R: R code used in vignette, custom distribution functions. * inst/doc/simulation.functions.R: R code used in vignette, functions used in simulations. * inst/doc/estimating.functions.R: R code used in vignette, extra estimating functions. * inst/doc/asymptotic.max.bias.Rdata: Cache of calculations, to speed up vignette making. * inst/doc/aggr_results.Rdata: Cache of calculations, to speed up vignette making. * .Rbuildignore: Omitting temporary Sweave output files from the build. * src/init.c: added function to process simulation output (used in vignette). * src/lmrob.c (R_calc_fitted): ditto * src/robustbase.h: ditto 2010-10-08 Manuel Koller * DESCRIPTION (Version): 0.6-4 * inst/CITATION: Added my name. * man/lmrob.control.Rd: Added more information to "setting"-Argument. * man/lmrob.Rd: ditto. Added example for "setting" argument. * R/lmrob.R (lmrob): Moved call of lmrob.control to function code. * R/lmrob.MM.R (lmrob.control): Changed setting KS2010: uses lqq instead of ggw. Fixed bug when using unknown setting. * R/lmrob.MM.R (lmrob.fit): Check for initial estimator. Issues warning if neq S. 2010-08-13 Martin Maechler * DESCRIPTION (Version): 0.6-3 * R/lmrob.MM.R (lmrob.psifun, ..., ghq): cosmetic changes; cleaner * src/lmrob.c (rho, psi, psip, ...): cleaner; partly faster; also ok for x = +-Inf.... * tests/lmrob-psifns.R: test psi(), rho(), etc -- also nice plots: tests/rob-psifns.pdf after .. check. 2010-08-13 Manuel Koller * R/lmrob.MM.R: changed the naming of lgw to the more accurate lqq "linear, quadratic, quadratic" (by construction of psi prime). * tests/lmrob-psifuns.R: changed the naming of lgw to the more accurate lqq * man/lmrob.control.Rd: ditto * man/summary.lmrob.Rd: updated documentation to account for "cov" argument of vcov. * R/lmrob.R (lmrob): warning and check for method argument improved. 2010-08-05 Manuel Koller * DESCRIPTION (Version): 0.6-2 * R/lmrob.MM.R (lmrob.const): replaced lmrob.ggw.const by lmrob.const, which now supports ggw and lgw. (lmrob.control, lmrob.lgw.findc, lmrob.conv.cc, lmrob.psi2ipsi): support for lgw psi function. (lmrob.efficiency, lmrob.bp): functions to calculate the efficiency and breakdown point of an M-estimator. (lmrob.tau, lmrob.tau.fast.coefs): Updated constants in all.equal check for Hampel psi function. Updated constants for all the supported psi functions. (lmrob..D..fit): switched order of updating covariance matrix and updating control$method. This caused the correction factors to be set incorrectly in some cases. * src/lmrob.c (normcnst, rho_lin, psi_lin, psip_lin, wgt_lin) (rho, psi, psip): Added support lgw psi function. * man/lmrob.control.Rd: updated to account for lgw. * tests/lmrob-psifns.R: added tests lgw psi function * tests/lmrob-psifns.Rout.save: updated accordingly * tests/lmrob-methods.Rout.save: updated, since constants for calculation of taus changed slightly * R/lmrob.R (vcov.lmrob): added possibility to calculate covariance matrix with another estimation method. 2010-07-12 Manuel Koller * DESCRIPTION (Version): 0.6-1 * src/lmrob.c (normcnst, rho_ggw, psi_ggw_vec, psi_ggw, psip_ggw) (wgt_ggw): Added support for custom constants for psi ggw. * R/lmrob.MM.R (.vcov.w): Modified constants for ggw psi function. (lmrob.control): Added method to calculate constants for ggw psi function. (lmrob.conv.cc, lmrob.ggw.mx, lmrob.ggw.ms, lmrob.ggw.ac) (lmrob.ggw.bp, lmrob.ggw.finda, lmrob.ggw.findc, lmrob.ggw.const): Methods to calculate constants for ggw psi. (lwgt): control argument optional. * tests/lmrob-psifns.R: added tests for custom tuning constants * tests/lmrob-psifns.Rout.save: added tests for custom tuning constants * experi-psi-rho-funs.R: added ggwPsi function, standardized s.t. Dpsi(0) = 1. Putting this into R/psi-rho-funs.R results in an error while "preparing for lazy-loading". 2010-06-25 Manuel Koller * DESCRIPTION (Version): 0.6-0 * NAMESPACE: dropped lmrob.control.sfs * R/lmrob.R (lmrob): "method" argument * man/lmrob.Rd: "method" argument * R/lmrob.MM.R (lmrob.control, lmrob.control.sfs): argument setting="KS2010" replaces lmrob.control.sfs. Setting a method involving a D step, sets default psi to ggw. * man/lmrob.control.Rd: "setting" argument * tests/lmrob-methods.R: "methods" and "psi" argument * tests/lmrob-methods.Rout.save: call output in summary changed * tests/lmrob-psifns.Rout.save: call output in summary changed * man/lmrob.control.Rd: Documentation details for tuning constants and alternative constants for .85 efficiency at the normal. 2010-06-24 Manuel Koller * DESCRIPTION (Version): 0.5-1.1 * tests/lmrob-psifns.R: added Hampel psi function example and updated coefficients of ggw. * src/lmrob.c: replaced "gws" with "ggw" in function names, "gwgt" with "welsh", replaced pow() with R_pow(), implemented support for coefficient vectors. Fixed problem with "ggw" on 32-bit machines. * R/lmrob.MM.R: changed the way coefficients for psi functions are handled: coefficient vectors are now supported. * R/lmrob.MM.R (lmrob..M..fit): class attribute for init entry in lmrob..M..fit was missing. 2010-06-23 Manuel Koller * tests/lmrob-methods.R: tests for compatibility between manual construction of different methods with specifying method argument in lmrob. * tests/lmrob-methods.Rout.save: expected results * tests/lmrob-psifns.R: tests for the support of different psi functions. * tests/lmrob-psifns.Rout.save: expected results * tests/lmrob-data.Rout.save: updated results to reflect slightly modified summary output (more options) * man/lmrob.Rd: updated to reflect changes in options * man/lmrob..D..fit.Rd: added documentation * man/lmrob..M..fit.Rd: updated example * man/lmrob.control.Rd: updated to reflect changes in options * man/lmrob.fit.Rd: updated from lmrob.fit.MM to reflect changes in options * man/lmrob.fit.MM.Rd: moved to lmrob.fit.Rd * man/lmrob.S.Rd: coef -> coefficients in example * R/anova.lmrob.R (length.tl, anovaLmrobPair, anovaLmrobList): Added checks to ensure soundness of result for methods other than "MM". 2010-06-21 Manuel Koller * R/lmrob.MM.R (lmrob.control, lmrob.control.sfs, lmrob.fit.MM) (.vcov.w, .vcov.avar1, lmrob..M..fit, lmrob.S, lmrob..D..fit) (lmrob.kappa, lmrob.tau, lmrob.hatmatrix, lmrob.leverages) (lmrob.psi2ipsi, lmrob.psifun, lmrob.chifun, lmrob.wgtfun) (residuals.lmrob.S, lmrob.E, ghq): Support for psi functions other than bisquare as well as SMDM estimates. * R/lmrob.R (lmrob): Support for psi functions other than bisquare as well as SMDM estimates. Replaced lmrob.fit.MM by lmrob.fit. * src/robustbase.h: declarations of new functions in lmrob.c * src/init.c: definitions of new functions in lmrob.c * src/lmrob.c: added support for psi functions other than bisquare, including vectorized accessor functions ("R_psifun", "R_chifun" and "R_wgtfun"). "wgt" is now always used to calculate robustness weights. Function to calculate scale estimate iteratively ("R_find_D_scale"). * NAMESPACE: added lmrob.fit, lmrob..D..fit and lmrob.control.sfs to export. * DESCRIPTION: added my name to the list. 2010-04-01 Martin Maechler * DESCRIPTION (Version): 0.5-1 2009-11-19 Martin Maechler * R/covMcd.R (.fastmcd): nLarge = 100'000 (instead of previous 5000), for now. nsamp <= integer.max is asserted now. * R/covMcd.R (covMcd): nmini = 300 is now an optional argument. * R/rrcov.control.R (rrcov.control): 'nmini = 300' ... * R/ltsReg.R (ltsReg.default): .fastmcd(..., nmini = 300) * src/rffastmcd.f (rffastmcd): new argument nmini 2009-11-18 Martin Maechler * DESCRIPTION (Version): 0.5-0-1, released to CRAN * tests/mc-strict.R: don't use the longmemo example for now. (platform dependence !) 2009-11-11 Martin Maechler * tests/glmrob-1.R: add moe explicit glmrob() tests; including the simple puzzling 1-outlier problem. * R/glmrobMqle.R: do use getRversion() ! 2009-06-27 Martin Maechler * data/wagnerGrowth.rda: add the "wagner data" (continuous + categorical) * man/wagnerGrowth.Rd: ditto 2009-06-05 Martin Maechler * R/glmrob.R (glmrob): add the option start = "lmrobMM", using a *robust* start for {essentially} glm(). * man/glmrob.Rd: add an example for that <<<<<______________ FIXME 2009-06-04 Martin Maechler * DESCRIPTION (Version): 0.5-0; *not* released * tests/glmrob-1.R: more glmrob() examples 2009-06-03 Martin Maechler * R/glmrobMqle.R (mFormat): make 'trace=TRUE' also depend on getOption("digits"). * R/glmrobMqle.R, R/glmrob.R: changes from Andreas Ruckstuhl, to support family = "Gamma". Unfortunately, these at first also very slightly change binomial, poisson. ==> few small changes by MM. * R/glmrob.R (glmrob): allow y (in "y ~ ...") to be *factor* 2009-01-17 Martin Maechler * DESCRIPTION (Version): 0.4-5 : bug-fixing release * tests/tmcd.R: add a regression test for the bug * src/rffastmcd.f: rfrdraw() w/o 'seed' * src/rfltsreg.f: ditto 2009-01-10 Martin Maechler * src/rf-common.f (rfrangen): drop unused 'seed' argument (rfrdraw): ditto * src/rffastmcd.f: get rid of TABs (gfortran -Wall complains) * man/exAM.Rd, man/possumDiv.Rd, ...: Rd_parse fixes 2008-11-28 Martin Maechler * R/OGK.R (covOGK): also return n.iter and weight.fn. * man/covOGK.Rd: note about the "silly" weight.fn 2008-10-15 Martin Maechler * R/lmrob.MM.R (lmrob.S): fix error message s/larger/smaller/ thanks to Keith Ponting. 2008-10-01 Martin Maechler * src/wgt_himed_templ.h: fix leading comment 2008-08-29 Martin Maechler * DESCRIPTION (Version): 0.4-3 for CRAN release * tests/mc-strict.R: define 'isMac' and tweak the tests; thanks to reports from Rory Winston. 2008-08-09 Martin Maechler * NAMESPACE: import stats::cov {since others hide it!} 2008-08-05 Martin Maechler * R/huber.R (huberM): replace `s' by 's' in errors and warnings. * R/qnsn.R, R/plot.lmrob.R, R/ltsReg.R, R/ltsPlot.R: ditto 2008-08-04 Martin Maechler , really from Valentin Todorov * R/ltsReg.R (ltReg.default): if(mcd) call covMcd() with correct alpha * R/covMcd.R (covMcd): drop (n-1)/n correction, as cov.wt() contains that per default (in R, not in S-plus!). * R/tolEllipse.R (tolEllipsePlot): no text() for id.n == 0 * DESCRIPTION (Version): 0.4-2 released to CRAN. 2008-08-04 Martin Maechler * src/wgt_himed.c: renamed wgt_himed.c_templ to * wgt_himed_templ.h: 2008-08-02 Martin Maechler * DESCRIPTION (Version): 0.4-1 ready for release to CRAN. * R/ltsReg.R (ltsReg.default): don't add artificial "Y" y-name * src/lmrob.c (rwls): first call to sum_rho() is only needed for tracing (was used for lambda iterations). * R/glmrob.R (glmrob), * R/glmrobMqle.R (glmrobMqle): introduce 'trace' option to trace the robustness iterations {*not* part of control: does *not* influence result} * man/CrohnD.Rd: new data example robust poisson regression * data/CrohnD.rda: 2008-01-25 Martin Maechler * R/covMcd.R (.fastmcd): fix nsamp="exact" to use "all" * src/rffastmcd.f: krep=0 <==> nsamp="exact" : all := TRUE; new argument i_trace; and use intpr() and dblepr() * man/covMcd.Rd: pass 'trace' to .fastmcd() and Fortran * src/rf-common.f (rfncomb): give "error" message for very large 'comb' * tests/tmcd.R: add test for nsmap = "exact" 2008-01-05 Martin Maechler * man/ambientNOxCH.Rd: new data set from René Locher, * data/ambientNOxCH.rda: showing some lmrob-nonconvergence 2007-12-22 Martin Maechler * src/lmrob.c (rwls): if (trace_lev >= 3) show beta vector. 2007-12-13 Martin Maechler * man/summarizeRobWeights.Rd: add toy example * R/lmrob.R (summarizeRobWeights): cosmetic change; notably finishing line when weights were practically 0/1. * tests/MCD-specials.Rout.save {adapt from change of 11-07} 2007-11-07 Martin Maechler * R/covMcd.R (covMcd): if we have singularity, also say so, even if trace is FALSE. * R/covMcd.R (singularityMsg): for "on.hyperplane", concatenate long coefficient vector. 2007-10-25 Martin Maechler * man/pulpfiber.Rd: New data set (p=8 = 4 + 4) * data/pulpfiber.tab: from "Robust Multivariate Regression (2004)". * tests/lmrob-ex12.R: use versions of predict[.lmrob]() 2007-10-24 Martin Maechler * man/condroz.Rd: set latin1 encoding (and fix typo) * R/lmrob.R (predict.lmrob): define predict() and model.matrix() methods, working via "lm" methods. * NAMESPACE: export vcov() * man/summary.lmrob.Rd: some docu of new methods 2007-08-02 Martin Maechler * R/adjoutlyingness.R (adjOutlyingness): small improvements, getting rid of loops; fix an obvious typo (in original code). Leave away the Inf and NaN that from abs(Y[] - m) / ( tup | tlo) 2007-07-21 Martin Maechler * DESCRIPTION (Author): add Tobias * src/rmc.c (h_kern): revert to absolute test; many more experiments * tests/mc-etc.R (x3): a smallish "extreme" case of "non-convergence" 2007-07-20 Martin Maechler * tests/mc-etc.R: new testing of mc() and related * src/rmc.c (mc_C_d): changed work[] and weight[] to 0-indexing; this is hopefully the last bug ... * R/adjbox.R (adjbox.formula): use adjbox(), not boxplot()! * man/adjboxStats.Rd: added 2007-07-20 Tobias Verbeke * R/adjbox.R: Skewness-adjusted boxplot ported from Matlab LibRA * man/adjbox.Rd: * data/los.rda, man/los.Rd: * data/condroz.rda, man/condroz.Rd: two dataset related to medcouple 2007-07-19 Martin Maechler * R/adjoutlyingness.R: new (also from the Antwerpen MC collection) * DESCRIPTION (Depends): R >= 2.3.1, so we can use * NAMESPACE (useDynLib): .registration = TRUE * R/*.R (.C, .Fortran): now can use name variable instead of string and drop 'PACKAGE = ".."' 2007-07-18 Tobias Verbeke and Martin Maechler * src/rmc.c, src/robustbase.h (rmc): code for medcouple (MC); needs debugging: infinite loops and segmentation faults * R/mc.R (mc): new mc() for MedCouple 2007-07-16 Martin Maechler * DESCRIPTION (Version): 0.4-0 - definitely made progress * R/plot.lmrob.R (plot.lmrob): recompute robust Mahalanobis distances and cache them with the object. 2007-07-10 Martin Maechler * R/lmrob.R (summarizeRobWeights): new defaults; work but warn when 'eps' and 'eps1' lead to weights both close to 0 and 1. * R/covPlot.R: don't warn for which = "all" * man/covPlot.Rd: example 2007-06-28 Martin Maechler * data/kootenay.tab: add "famous" data set * man/kootenay.Rd: * data/cushny.R: add the other "famous" simple data set * man/cushny.Rd: 2007-06-20 Martin Maechler * DESCRIPTION (Version): 0.2-8 ready to be released * src/lmrob.c (rwls): patch from Matias Salibian: Don't do any "lambda iterations" anymore, they are remnants from old "experiments". 2007-06-19 Valentin Todorov * R/covMcd.R: usage of simulated finite sample correction factors fixed: * - case p=1 fixed * - simulated corrections used only when the Pison et.al. * formula is definitely wrong (negative or very large) 2007-06-09 Martin Maechler * tests/lmrob-data.R, *.Rout.save: new consistency tests for lmrob() * R/biweight-funs.R (tukeyPsi1, tukeyChi): renamed from lmrob.Psi() and lmrob.Chi(); also renamed * man/tukeyPsi1.Rd, man/tukeyChi.Rd: updated, also mentioning "psiFunc" * src/lmrob.c (rwls): now controlling *relative* error in MM iterations, i.e., convergence happens when ||b1 - b2|| < eps * ||b1||. The above 'eps' is now part of lmrob.control() instead of hard-wired in C code. * R/lmrob.MM.R (lmrob.MM): new argument 'trace.lev' * src/robustbase.h (R_lmrob_MM): add trace_lev argument 2007-06-08 Martin Maechler * src/lmrob.c (refine_fast_s): slight change in warning() [make message nicer for future R >= 2.5.1] 2007-04-21 Martin Maechler * R/ltsReg.R: replace 'quan' by 'h' internally * R/covMcd.R: ditto * R/covMcd.R (h.alpha.n): renamed from "quan.f"() and now exported: * NAMESPACE: and hence * man/h.alpha.n.Rd: documented, and linked to from here * man/ltsReg.Rd, man/covMcd.Rd: 2007-04-18 Valentin Todorov * R/covMcd.R: use simulated finite sample correction factors {FIXME!} 2007-04-11 Martin Maechler * R/ltsReg.R (ltsReg.default): 'ans$X' now has "intercept first" to match coefficient vector. * R/ltsReg.R (summary.lts): no need to reorder 'R' (cholesky) anymore 2007-04-02 Valentin Todorov * R/covMcd.R: the (repeated) calculation of the consistency * correction factor for the raw and reweighted MCD covariance * replaced by a call to a function MCDcons() 2007-03-30 Valentin Todorov * R/covMcd (print.mcd): solve a conflict with fastmcd() in package robust: both return an object of class "mcd" 2007-03-28 Martin Maechler * R/covMcd.R (.fastmcd): subsample size myk <- p+1 (was 'p') 2007-03-27 Martin Maechler * R/covMcd.R: cosmetic changes; comments * src/rffastmcd.f: ditto * tests/tmcd.R: added very small sample (n < 2p) examples. 2007-03-26 Martin Maechler * R/ltsReg.R (print.summary.lts): signif.stars: instead of FALSE, use same default as for lm(.). * man/summary.lts.Rd 2007-03-26 Martin Maechler * R/covMcd.R (covMcd): be more tolerant about small n, notably n < 2p, now only requiring n >= p + 2 * src/rf-common.f (rfnbreak): really unused * src/rfltsreg.f, src/rffastmcd.f: comment use of rfnbreak() 2007-03-24 Martin Maechler * R/covMcd.R (covMcd): ans$raw.cov should be matrix even for p==1 2007-03-21 Martin Maechler * R/ltsReg.R (ltsReg.default): fix long-standing ("rrcov") bug of wrong coefficient order in ltsReg(x,y, intercept=FALSE). * tests/LTS-specials.R: add regression-test 2007-02-08 Martin Maechler * R/covMcd.R (singularityMsg): moved the remaining cases out of covMcd(). 2007-01-24 Martin Maechler * R/covMcd.R (covMcd): save 'singularity' info as list, and don't append it as string to 'method'. (singularityMsg): new utility used for printing singularity of MCD (print.mcd): using strwrap() instead of "\n" such that print() obeys options("width"). 2006-12-28 Martin Maechler * DESCRIPTION (Version): 0.2-7 released to CRAN * src/rfltsreg.f: get rid of warnings, notably some extraneous args. * src/rffastmcd.f: ditto * inst/test_MCD.R (mortality, mort3): add example {near singular} * tests/tmcd.R: test "near singular" example, using new tolSolve: * R/rrcov.control.R (rrcov.control): new tolSolve = 1e-14 * R/covMcd.R (covMcd): use 'tolSolve' instead of 1e-10 for mahalanobis' solve(*, tol). 2006-12-21 Valentin Todorov * R/ltsPlot.R (ltsPlot): for "rqq", use *standardized* residuals 2006-10-20 Martin Maechler * src/rfltsreg.f (rfltsreg): more comments; and minor cleanups * R/ltsReg.R (.fastlts): slightly simplified 'nsamp' checking and setting * DESCRIPTION (LazyData): yes * man/heart.Rd: mention survivals' "heart" data 2006-10-18 Martin Maechler * src/rfltsreg.f: less "if() l1,l2,l3" ; better indenting; etc worked pretty hard on C translation but that still seg.faults (!) 2006-10-04 Martin Maechler * INDEX: added a "manual" INDEX which has all the data sets at the end. * R/OGK.R (covOGK): more default arguments: n.iter = 2, and weight.fn = hard.rejection * R/OGK.R (s_mad, s_IQR): more "scale functions" for (O)GK * tests/OGK-ex.R: added examples for new s_*() scales; * tests/OGK-ex.Rout.save: new 2006-10-03 Martin Maechler * R/qnsn.R (s_Qn, sSn): new (trivial) wrapper functions for easier use in covOGK(). * man/Sn.Rd, man/Qn.Rd: ditto * man/covOGK.Rd: example with the above 2006-09-30 Martin Maechler * DESCRIPTION (Version): 0.2-6 * R/covPlot.R (covPlot): added 'labels.id', 'cex.id' and 'label.pos' arguments, "parallel" to plot.lm() and improved the labeling accordingly. * man/covPlot.Rd: 'ask=TRUE' gives problems in R <= 2.3.1 2006-09-29 Martin Maechler * DESCRIPTION (Version): 0.2-5 * data/radarImage.rda: add 'radarImage' data set from MMY-book * man/radarImage.Rd: ditto * data/toxicity.rda: added 'toxicity' data set (from MMY) * man/toxicity.Rd: ditto * src/rf-common.f (rfishsort): swap integer declaration order 2006-09-23 Martin Maechler * man/covPlot.Rd: document covPlot() as well, since that is more generally usable. * src/rf-common.f (rfrangen): declare unifrnd() as double prec.; also in other places; now using 'implicit none' to catch such things more easily. * R/tolEllipse.R (tolEllipsePlot): classic=TRUE: do not prepare a side-by-side plot, since we *over*plot. Rename 2nd argument from 'mcd' to 'm.cov', since it really only needs to have a mean ('center') and covariance component. 2006-09-05 Martin Maechler * DESCRIPTION (Version): 0.2-3 uploaded to CRAN * tests/tlts.R: rename test functions and move to new file * inst/test_LTS.R: * tests/tmcd.R: rename test functions and move to new file; do not 'time' them, since we want to use *.Rout.save files. * inst/test_MCD.R: * src/Makevars: add long overdue $(FLIBS) 2006-09-04 Martin Maechler * R/ltsReg.R (ltsReg.default): add argument name: sort(*, partial = ) * R/covMcd.R (covMcd): ditto 2006-06-27 Martin Maechler * DESCRIPTION (Depends): no longer depend on "MASS" which is suggested. * NAMESPACE, man/summarizeRobWeights.Rd: export and document summarizeRobWeights(). * R/lmrob.R (summarizeRobWeights): improvement (singular/plural etc) * R/ltsReg.R (ltsReg.formula): add 'subset' etc; and follow guidelines in developer.r-project.org/model-fitting-functions.txt. * tests/tmcd.R: typo; add timing comparison * R/glmrob.R (glmrob): family = "gaussian" now dispatches to lmrob() 2006-06-24 Martin Maechler * R/glmrob.R (print.summary.glmrob): as for lmrob(), now use summarizeRobWeights() and printControl(). * R/lmrob.R (printControl): more flexible * man/glmrob.Rd: update examples with 'weights.on.x' 2006-06-23 Andreas Ruckstuhl * R/glmrobMqle.R (wts_HiiDist): *row*Sums()! 2006-06-13 Martin Maechler * DESCRIPTION (Version): 0.2-2 * src/Makevars (PKG_LIBS): need this (in particular for Windows) 2006-06-08 Martin Maechler * src/lmrob.c (refine_fast_s): made sure 'conv *is* called as *Rboolean 2006-06-07 Martin Maechler * src/lmrob.c (refine_fast_s): print warning in case of non-convergence * DESCRIPTION (Version): 0.2-1 - pre-release * R/lmrob.MM.R (lmrob.control): decrease refine.tol "back" to 1e-7 * man/lmrob.control.Rd: ditto * src/lmrob.c (rho_biwgt): fix embarassing sign typo/thinko 2006-06-06 Martin Maechler * DESCRIPTION (Version): 0.2-0 "pre-released" to Andreas & Matias * src/lmrob.c (rwls): return *max_it = #{iterations used} * R/lmrob.MM.R (lmrob.MM): return #{iterations} back to R level * R/lmrob.R (summary.lmrob): and make sure they are printed. 2006-06-05 Andreas Ruckstuhl * R/anova.lmrob.R (anova.lmrob): new, for model comparison both "Wald" and "Deviance" tests. * man/anova.lmrob.Rd: docu + example 2006-05-31 Martin Maechler * R/covMcd.R (covMcd): use R' RNG * R/covMcd.R (print.summary.mcd): is new; also print.mvd and summary.* are now here, and improved. * src/rfltsreg.f (rfltsreg): use R's RNG * src/rffastmcd.f (rffastmcd): ditto * src/rf-common.f (rfrangen): use unifrnd(), and comment out the previous uniran() * src/R-rng4ftn.c: use R's RNGs also for Fortran, in 2006-05-29 Martin Maechler * src/lmrob.c: use R's unif_rand() instead of C's rand() * R/lmrob.MM.R: set/save R's .Random.seed, possible from 'seed' 2006-05-29 Martin Maechler * DESCRIPTION: version 0.1-7; the last one with C's rand() * R/lmrob.MM.R: it's "$seed" from init.S , not M(M)-estimate 2006-04-25 Martin Maechler * R/biweight-funs.R (lmrob.Psi): unification, rescaling, such that Psi'(0) = 1 2006-04-22 Martin Maechler * R/lmrob.MM.R (lmrob.S): new (control) option 'best.r.s = 2'; was hardcoded in C * src/lmrob.c (R_lmrob_S): 'best_r' is now argument; further, seed_rand is passed to the fast_s*() sub functions, so we can call them from R {and decide ourselves "if fast". * src/rf-common.f: new file for functions identical in rffastcmd.f and rfltsreg.f. 2006-04-21 Martin Maechler * data/NOxEmissions.Rd: add the large dataset from René Locher * man/NOxEmissions.Rd: 2006-04-21 Martin Maechler * R/lmrob.MM.R (lmrob.MM): return robustness weights as 'wt' 2006-04-18 Martin Maechler * R/lmrob.MM.R (lmrob.control): change default for 'compute.rd' to FALSE ==> robust (Mahalanobis) distances are *not* computed by default. This prevents singularity errors which happen frequently as soon as X contains (large) factors (with few levels). 2006-04-15 Martin Maechler * man/pension.Rd: add plots and simple 1st aid trafo (!) * R/glmrobMqle.R (glmrobMqle): weights.on.x = "hat" couldn't have worked(!) - still needs work __TODO__ * R/lmrob.MM.R (lmrob.S, lmrob.MM): simplified computations achieving ~ 10% faster execution for a (n,p) = (500,20) example. 2006-04-01 Martin Maechler * src/lmrob.c: new lmrob() code from Matias' roblm package * R/lmrob.*.R: renamed s/roblm/lmrob/ but also refactored * man/lmrob.*.Rd: and added a bit * TODO: section on lmrob() 2006-03-20 Martin Maechler * DESCRIPTION (Version): 0.1-4 * data/vaso.rda: one value, vaso[32,2], had a typo; it was '0.3' but should have been '0.03' 2006-03-16 Martin Maechler * R/OGK.R (scaleTau2): add consistency correction (for OGK). * man/scaleTau2.Rd: added * R/glmrobMqle.R: ni=0 special casing (not finished yet). * tests/binom-ni-small.R: more testing of ni=0 2006-03-14 Martin Maechler * TODO, Done, DESCRIPTION: updated, ready for release 2006-03-14 Andreas Ruckstuhl * R/anova-glmrob.R (anova.glmrob): new function * man/anova.glmrob.Rd: and documentation, replacing previous modsel.*() 2006-02-24 Martin Maechler * R/print.lts.R: moved to this file (and simplified slightly) to * R/ltsReg.R (ltsReg): more cleanup; fix (y ~ 1) and (y ~ 0) properly 2006-02-23 Martin Maechler * tests/LTS-specials.R: new file 2006-02-22 Martin Maechler * tests/MCD-specials.R: new tests * tests/huber-etc.R: tests for huberM() moved from pkg 'sfsmisc' 2006-02-21 Martin Maechler * R/glmrob.R (summary.glmrob): "Std. Error" (with blank!) 2006-02-18 Martin Maechler * R/glmrobMqle.R (glmrobMqle): make it work for ncoef == 0 * tests/glmrob-specials.R: new tests * R/glmrob.R (print.summary.glmrob): the same as print.glmrob + more 2006-02-17 Martin Maechler * R/glmrob.R (vcov.glmrob): added (print.glmrob): fix typo * tests/binom-ni-small.R: add from Martin's old "robGLM1" package * R/glmrob.R (glmrob): '...' passed to glmrob.control(...) 2006-02-17 Valentin Todorov * FIXED - .fastmcd and .fastlts no more return everything * fixed problems in ltsReg in case of location model Y~1 (i.e. x is missing in ltsReg.default()) * ltsReg & covMcd - added options 'best' and 'exact' for nsamp * ltsReg & covMcd - added parameter for controlling the small sample correction factors - use.correction * rrcov.control - added parameter for controlling the small sample correction factors - use.correction * ltsReg & covMcd - output of the consistency and the small sample correction factors 2006-02-09 Martin Maechler * DESCRIPTION (Version): 0.1-2 ready for CRAN "baby release" * man/vaso.Rd: fix longstanding typo: con*s*triction * R/glmrob.R (glmrob): Oops! the *.control() function must only have one "." and must be *called* 2006-02-08 Martin Maechler * R/glmrob.R,......: added 'glmrob' and 'nlrob' from Andreas Ruckstuhl 2006-02-02 Martin Maechler * R/covMcd.R: all moved from 'rrcov' after Valentin's * R/ltsReg.R: "green light" .......... 2006-01-25 Martin Maechler * DESCRIPTION (Version): 0.0-1 "prerelease" ready * man/psi_func-class.Rd: version "checked in" and ready for pre-release * man/chgDefaults-methods.Rd: ditto 2006-01-17 Martin Maechler * NAMESPACE: added name space * R/OGK.R (covOGK): added this; even though, the default scale estimate is *not* consistent * tests/tests-OGK.R: minimal 'test' 2006-01-16 Martin Maechler * man/starsCYG.Rd: clean up of documentation for the Rousseeuw data * man/aircraft.Rd: from Valentin * .... --------------- all these are for those things from 'rrcov' ------------------- 2005-12-28 Valentin Todorov * 0.2-11 * added more data sets from Rousseeuw & Leroy: telef, lactic, pension, pilot, cloud, education, airmay * fixed codoc discrepancies in the data sets stars and wood * ltsReg & covMcd - added control parameter for the small sample correction factors * ltsReg & covMcd - output of the consistency and the small sample correction factors 2005-10-24 Valentin Todorov * 0.2-10 * minor corrections in the help of covPlot * fixed bug in covPlot in case of class=TRUE * tolellipse - both robust and classical ellipse are superimposed * added directory inst/bm containing benchmarks comparing covMcd and ltsReg to the corresponding functions in MASS, S-PLUS and Matlab 2005-10-17 Valentin Todorov * 0.2-9 * minor corrections in the help of covMcd * minor correction in ltsReg (false corrected to FALSE) * covMcd - fixed the limitation on the number of variables <= 50 * ltsReg - fixed the limitation on the number of variables <= 50 * added function summary.mcd which prints (additionally to the output of print.mcd) the correlation matrix (if requested), the eigenvalues of the covariance or correlation matrix and the robust distances. * added control object for the estimation options rrcov.control and used in covMcd and ltsReg 2005-09-20 Valentin Todorov * 0.2-8 * ltsReg: added formula interface * ltsReg: adde generic functions summary.lts and print.summary.lts * ltsReg: fixed a problem with reordering of the coeficients even in case without intercept 2005-04-16 Valentin Todorov * 0.2-7 * ltsReg: fixed a bug related to nsamp -> it was hard-coded = 500 in Fortran * ltsPlot: default for id.n changed - instead of 3, now it is the number of identified outliers * ltsPlot: help enhanced * covMcd, covPlot, tolellipse: tol.inv parameter changed to tol, according the change in mahalanobis() in 2.1.0 2004-12-26 Valentin Todorov * 0.2-6 * a bug in ltsReg (rsquared) fixed * fixed CRLF line endings in FORTRAN sources * fixed a problem in covMcd: in case of p=1 and cov=[0], the cov matrix was a double instead of a matrix, which resulted in errors in subsequent calls (e.g. determinant(mcd$cov) expects a matrix) * fixed a problem in ltsReg when p==1 and Intercept==FALSE - the vectors of coefficients ans$coefficients and ans$raw.coefficients were of size 2 * error handling added in ltsReg in case of scale=0 2004-09-16 Valentin Todorov * 0.2-5 * several errors in doc fixed (ltsPlot.Rd, covPlot.Rd, aircraft.Rd) 2004-09-16 Valentin Todorov * 0.2-4 * added Regression Diagnostic Plots - function ltsPlot() * ...added Normal QQ Plot of the residuals * ...added Standardized Residuals versus index plot * ...added Standardized Residuals versus fitted values plot * ...added Regression diagnostic plot * ltsReg: the responce variable Y added to the result object * covMcd: fixed a bug related to nsamp -> it was hard-coded = 500 in Fortran * covMcd: fixed a bug - in case of alpha=1 * added S3 methods plot.mcd and plot.lts * the S3 methods print.mcd and print.lts moved to separate R files * added the stars data set (Hertzsprung-Russell diagram) 2004-07-13 Valentin Todorov * 0.2-3 * improved documentation of the datasets * added datasets aircraft and delivery * added Covariance Plots - function covPlot() * ...added Distance Plot - function distplot() * ...added Distance-Distance Plot - function ddplot() * ...added Chisquare QQ-Plot - function chi2qqplot() * ...added Tolerance Ellipse Plot - function tolellipse() * added function print.lts (for ltsReg result); included in the test tlts.R * added function print.mcd (for covMcd result); included in the test tmcd.R 2004-06-26 Valentin Todorov * 0.2-2 * fixed bug in Fortran: rfltsreg.f, xrfnbreak() * Depends >= 1.8 (it was >= 1.9, because of the bug above) * Parameter, controlling whether to perform intercept adjustment at each step added to ltsReg and its default value set to FALSE robustbase/.Rinstignore0000644000176200001440000000002211721777657014745 0ustar liggesusersinst/doc/Makefile robustbase/data/0000755000176200001440000000000014124272431013334 5ustar liggesusersrobustbase/data/milk.rda0000644000176200001440000000315211754203337014766 0ustar liggesusersX}hUe?M\%?VZ2hݏ5{bVBMǕ * EPHH keG0XP$H(P'C`~}Ds}~9.qzCo7#U=7JoN%'xbh0޵[sr8}dCaGF_Nw9aNr` m?0u俯qj1~Ma<0w͌N4v>_[u]]~fWʭ/;]/\tT6_B̺D~zu~kͦWگ%w^<9?uJqsmu^\+uk>w-gu)6|~Ly.UJčXYB\ bȐ~mǔX'S?Sd\7G⚑qe<+v֣tux]Љ .oΥDQ`-ɧ䞂'WO|wO=z/W;Qx'o彰僒<]q1:/Q{1Ox{Pss<8;S_WPm_uMm?Uԏz 6}++O!oev+:Ճ ~9^%#S/ q7~OVA2^ \s' ?2 ks`~/lsK6=ess&wY/﫴5-,0\XBW ?4 "? ?G;l|@yeC5&>ļɲN+a` pC|i%9e7Ƥ@ >~}6h2j0>LΚ 0xc_Y>eR6Z. yS$sOX|ţŐ|vo7[`0KܵƆUi<= O򔓧.@ ظos_S҇riÖ,5͘ԡD\N1yrobustbase/data/salinity.rda0000644000176200001440000000074611754203337015674 0ustar liggesusers r0b```b`@& `d`aʼn9y% I b40pBPZ g,0p Pq:->M(_ uPӂC͓U|  է PZMց2yA" PX q>H3z`~ČbBy3,El|`9mwpt0sS/JCI }fy" _h:%By:%a$s9I/oWJL96(` u]\V^ gp9 44@쥏L@7M K?x$6G@QFKhG'.8J~bKM-2d0,c(1݌b0AĒD"h9`ւ4S Ɲrobustbase/data/phosphor.rda0000644000176200001440000000044511754203337015676 0ustar liggesusers r0b```b`@& `d`a@\, ͙ 0 F;pA +A*P:քҪPysIy0t& _8XA>n MXAUb vb bY bU b#@匑sb vb[ 6b_ X(H,v'+k^bnj1!T`f^~Q:d&e& rJMII,JI,IK+Z(\f1wL @?lʶrobustbase/data/heart.rda0000644000176200001440000000037011754203337015134 0ustar liggesusers r0b```b`@& `d`aҬE% @Hy\88Aig(;@hm9vҾ 0qOЎPs^Pڐ>ETs|S(|f/ Vb# Vb bT\ՀX=sS p A2R33J`rd{rNj^zI990s`\)%ziE@+Дs R@2robustbase/data/foodstamp.rda0000644000176200001440000000125112137052541016017 0ustar liggesusersA_뢫`(`&b(,L/ff x** &b 8~E$Mwu/cï}ԫ6O)MDđ~RJZl=kvD2 2,0mi~IryB|,myq6Η A,rw\8f5FzIKV')>Ҭ+ͬtI$4*,'Dir)I:;Qq6[K>Ecrrm(N𷱝y7"诂C__-@O{ejEƮWd|yEr{C`<[Ћ ϓk> bY2 %π{#~)֑z~^'9G>ZQؙSC֗3j+]ĭF3bj9ӣdbuԧ,k\@UufQR~C ,(軔+)T}r}_~1t#O"752c|>З Z5k=y/gF9~Rx~ yZ݁874Vݬ\ tyPFmg~3K {ɮ_1w robustbase/data/radarImage.rda0000644000176200001440000004025311754203337016071 0ustar liggesusers xWu$i xc[dْeki3kYd2*$@)J 冡a,Z|aPX!P S(4ko{@?&qyy~ka7V޲휖sN:G{=ԣ9%LL׌ devO8L3X\Ÿ5frumh:빮纁뷚jk;>ϫ:=Qϫ:IWy>=D=+D=3{wbj=k:A=Vp]g#v1wb;EyeiWzurO><Ϲ-/6w,EƎ|NṄc|_;}ϣϕ7=\/~wqse':nη12gk^y<{ǹ+ϟx>XL}ޱ39%Oci=N/VY/^Eo]~ ,Z?4gpHZ~+h,Ҹ,Z?4g"ba k?>~+o~,VX9ֺcw?}=Q.}O<ŏpgv n^v'OPOpO^ ktsŸ,ѿ۶;g۽}>~ڎpW|8/ ?Σ?kSe>՝~+v/Gn#{*k}T{4]/?}W۵}/{]jokռVE.yo:sZ^v4]}KUzlACk_'?f< [Xe9Mr.Kw ^z#~~}]zؿw\ڼJ e>W=HwZx\{U-L=Knμgŧ[Fu鵌Z4Ƽ~gsoH~ ʥ_Ml}knzS"ԛ>$yF' h>sPzk̿SAsߪui׵Eܣv(胫%g$;D|`֪uWicM|M:5;sZVQLV[)>|2LOo;&,>Zү?ۋ^/_)>ƷqK_kи,h9no^i[?0Fz# }:Y5N:g#_M?oWnU#zX>uI Z%zFWܻYϤM{Ї-VMgeٱB䶓qűBVNJJբO{-N+d 9xmXvΆkKt`"Х zw&<'d_m:__~GdZ׆Լ y;_k&Uctߡ`g7ľ/;2 zAl 3(:&Jic9TH {:dl{ UV~fy8z>+ǒV>m@6-zUqT*&e_mCc//|Tv_Մ;yގڴ@x٥:Чq _O#裪IOuםݖNYӺ͛4Yh;r37B뜵sk>iأ&=zCleg_'ycs~?>qH DmpSp#o$}2etK?٤)KET}I8E8u{IDvS t2α7oc7Zi-%p%$?A^oÞ/z3u=}Xz9a&oaOᷚ_p?/?Zểz~|@#?҈Fnl}t_hE& _71Ǚ3I/ID6XD^A^hceOjOm\ U,_O`/uv:%sM9=d{0Ì^{ۄ}pmZo,5mN;<iƿA/g_Ηl_S؝tֱgG$6o{7~/n$5GSO7|\uFI3ⷋσ׀=e'A;yY8H;}[o-_ h6IeثȻ^~w)pz(4| G5 c4W~~@W}HtE_<ώOGK/5o>3_8=U7U^Lo/T8ȣ͌{\1]=7)B|'fPfnc/6jC=j`o2~s -}?BnGGkd 8 8ZZ:Սz?Sğb7!xGE q?3acL I#S4 }_%> :$r_{/t7@S`>/FFdk7h_yHm+z!UޯF/Z5|zEOn0xauFO6ފcI+/ZG}_p+g깹CQ%!xV%T١aU0~ v>>8xy b(~H:=Ynb|M ?OUW~X~NL zWtqǧ^OqG^zv-t"cCm|u#EjA?86pA//\Q& ɭKCl Csw}o[pK'y= rt~nǺGwj}|6%qqo:q0$r|m<-}eر@?H- qv(n<.M}ApI5#aBu-?/u%q-8Yx{By ?H﫮w >x5#9QIbc=Mۻ_eӷw9^4v]NwbwgbŁm6=˞b'q]:[qNޭ;|?mEp8e[>cw&9kГ.ޢO\z 'q|iTv>a7|{=%=6R?Xa1o"^MWIBOʉtwয়&]B3ol/%b<ߓ^"?aeo׉LC+Zř;u] }.Az>>/Tce/Ƿ xu:ċ}~AM~~d!.:4>?\O ?}ee?uj/ge 2fA2?/>ۯ|؇OQsK#lͣ}Ս _6H ["ݒ+N#gqp4 뵮K_~G{eGl`OD75s쥍 =; 7w`/C=JTBϼ;H%oNjvWZӬWՃ\bGɧb'&|v:ڣo^Bz>zqm ~WnR5nG`❶<{!J/6=!{֍7S҇|J[@c/[[ÌW8uQ.(+gUOcGnܻ[b-w[9 Ğ cIz?qȨ?)K|=ҧ%<: ߆ߋh/[-t1Rғǭ]텞>>Ix_"^W("=*etK$|`'3{5]eqt1N(=mz*{?D=ap]S y~oAAnv/`apA}Fan'^ׇ8\zi|WrcSqb?THO{^g"Il҅rR?~Υ6-a>!4:re/KdB~=҃Y&Y$~Gу$YĹcʛj|avmc&.(*}=S^ys>naÏC-p6'\f1ggf&.s "Mtj':akI/υɧ v*ͶAv:Wzև$wP@BKce?A>!nlm!yN/ (\v+˿^1N_xkXuCLw>8Fu趃8 œmiր)_ڰ Cm^ O9aŌB4KH0ocݤ v6xCߧ\lBy?D́ߪV}@rۀEoXuG/1+6_ gʻ5wi^'U_OS?x.At1#_;GvW߭xj>:5諐?]a>ĩ"~mɳF~kGcz_NA!?G_M+C~k ,o$mCq8rk,?|qod6k|߅8rCR[6'b;7^+O3!X'H!uJW,]H}ܘO>Nr$e# Px_#}U~_gsgKg`/|K~gIpFwg)~ n *ĕgڷp_>mgQ ij2y>!ܷ<94S3Zq;[Lc'TW pC񂀻CJa72 Lf嗍zKE_CK/&ݬ(wB=| w1{vsPu9l%[>nw~ѺTR7Bf#?ǎn}-ac#uAW/U^A<67z~Ϧ6J\bGSOOѿ@<bǯ@ϡ6`bIYcN*WF4zi} ?x@&Q|#? "K>~i:IaMssw3_;h={ĵ=_?PW@z.QO/ElD.+Y>:m:Njk U=]IeK[Bg>m/vx0z-?6;ǎ#y qba&= 6sCfކ~no qIu{֋l.z'ףWs]&D/Ğ%ƻ_O/$?= ~LK{/S⍙CV:]1O/ .c_`9EiF=FC[_&޸;u.M4o"-ؑ#yn'iO,8[O˔aRz5>+K])K ^$~gr=KX7t^>Y]2u !i s™7r. A;GjrEB#A|l; vQwUD^Kl"R7_zc}"_+}W8"Gvh+9w䫚4 |XD٧J?/˿kx~j.I8 EҺt]@ќ3鱔~NaG1Q~cY̗:ڰ܃>CyԃSea$fG7^~/=]Եx3Ԩ9 u׭Uj[W[v_<^:FV*+~B?X%b\>*=Lg{Ruь; 6"=`w[~!cvyu`]Nk /S>Nߣk'k~3~7ocص}蛰ϻA׃>-N ~p ;z^2!7ߟ6FcBڡJ[_,\쇄3Pc_:rr{눜ȏR?ξs)x=/#hlS}Oz|pŭg >~T6-I㑛EY=佮'VJr/j>]e¹jA6w!,z-|Mp ɱ?ą+{_c3F]k4rsQ>&|jeq/wQ\xqzMk}.~@쳓'M@Cey}Kp~e.q]YuLE v nɥŰἴ@y-x0v^CҳxB&8:>!y 6^pwPF]f4ziqFoCϰڑ|#> FoPt7ןj%ҧvi,ІFQ]艐'܋^n:Pתшƛ>^ ~D4˨{5p6SP2>Ƣ1B~Yzo=䓜?Aᰏ'/x(??z=yip~ ,QO?L?48ǃ 8C8tt]u0~LU.-;dj߄-SQ*=*ɫw+%d3;-n_qmo6=ac  Ky=x H]^⟁¾&Q%]B8F.DĽsEv>W/W}]۬u./e*\V^:ط!?˾[dA.P([_ؿj]!E DVg຾sI9N{~Jn<ÝdOݾ;0 }Q'+y?fs86{bЇ},3=PuDriAgjCP//8:_#uq`ȿ|L;:ҢyxH3U&I/S;u@o'>YϬ!a8ۓGLK{? /Q EyRPK˜WBp>@ka>\z 8ˉWsKz_ov/Jkg}>J9+Fݍ87y;>xZP|!/K+t'_؉pI).u,S_/ YoXrZ>@ F߂,?gSaߌ`|o0OragG}JF[>:u5~ uU`6>>PFkw^zs A쪳y 3עO:Qs.KX!\׾ >bU!~Ӎy-\/V?OCǃĻ_}+yC؃Y+;,}/G#I~O؃-^xMrjZvhuY {<y9is.W߼\xȇy94 {Ͽ/־BǟF%s\!q+_,`+йeğɟ:Ub =usŅo2Ǵ+- }8r@|}%8e>\ڪ]{y~9増n:Z bd>=uݡ)?ETm ?Ġc| *Kv}o‘)~N6.6=?ON'h>R~W?%C?sAY?J}%Wb߄#ga̪Q|҅|v//(N㊷eX+zͰọ:&Ϝc1ݵZ+m'86=/ q5ut~5އ^P()u&=i ;=;#`q둛&n5mŲ׶-Y(~i]=y Z]tq3{TuS&p߆oPw8bpTUIXNܦyzGTzqC-'~?5b _E[vˠ[ YL!ʾR)Z;}@I3B?ԙVO>g 'cw?e믦1ܽTx7*2Y!Βd^㌞[MozN,8;CM9VqE؇Y~ډ> [GYIZYO y| m^僣(/(d%ԣSSٖ#d &r3-չ^Wf*.e¾𻗓_zW {5qoݖUb7I{%s ݵп{xx~F7S}Z?TEw-yfJpdùM~:pMu4{kxg n PNz;$9d߶;_X| s:mwXx^ 9▜g)Ι9'lu&,J>uHnM>x% M fJ0=?O~:f-~zw7YMiS>trNxa_:/kcNC| ^BtZf[ ywѾa/G7ha^|swAl^\coU?_8( KWOsj^CULݗ)yͪs.U_d+rB_sqaײ!nLw.?`瑧ݲ_E8g_ʠѳa_Jp`5~;[tRi clBt 0jgP/^?~ODzR࿾< )@:^_S 8੫QɋwG#ԅ9 YPgUG< d |]F^#F 1s~|/*$?\CEoLG7-)oiwr{#q(*ؽBE( ukM$}p~9~εl Ipx">GPmK$^wumV!%.8.rs|w38s(3%0.z:˲!yIޢ ?{.*eį GM6gM|9+ރ!:F=zޗ}`QóeO|8> *#^vUio9$_-q֨mg\0}=NemޗN("UWg^B嚵NˉÏӿ閝)cv`ĺ'WN9@ ?g$*iss9)"9ح7ׁC'u߶ or&=<z$/G`G.Y&[;wx@ EM,}~ʯ&* ? ql>٠yGsCuOعj+?݆%7C^M‰sgʏU79zJ m_=ϛWi|nu,GϬDcn=b Wc+N<[uɡ =u==r+jK+ H?pq<4I#KA͊ x/t+!|ܓ/|lZj]׆`}b;8 h3u$i,{Sy~Ces:! ރGؽR?:k<7Ox?V;g/g:5Z䡒̫F<@~yNC承Үku^ܷxOjݫfƟ$.2L6[IR|C']!g_hR= 9EG7f bwύ>+C--|\X~nZT 7L=D#r^@o~ƱQ#;9[ q$j| 6ZE.@2TLrL|慊'xҟZ՝Ƚ+^]oc%ĝ~Ok!Ợ/(oMؗbd_^L!Ⱥ$r=q5Fɲ?`A$#?⇦vϺX} G}<^B3γTYBmٷזz$fWb֯L[9μo"OyHio.\aM؟pp=oϧZ+WR%{ANE/&>=" sO㉉|8Yu\שЇ=Fmu+w_o8| ^?^E ބsy9"7g}rhOCCץ ō}ٍ*.!@__9` c{=7׬Jy:B\pNOjX=)?ʪ/Q?m1u™LԀcs/{z.^+^ ľ~xߴ?yXj1&yOcv'/[T),=!m˨x V1$8 u7qxx$}R-b+U=mBARu/~Jg _D>BɫzVG~Ht?:6N?Q|/$L V:ԣuzG3% |-})gߒmO==qhKx;uwog&&|xNlϖ飿?z¿robustbase/data/starsCYG.rda0000644000176200001440000000065111754203337015532 0ustar liggesuserseS;LA]nNmI&l!\p$&PSk555ZXQSc,vvf̛O|Z9*c,9y&5KԛQ7D'cIWxo8*eek>惽1827;3xʓQOhۄ/&p&|6yW`'.2X?MoJ ;4 78cSOLʦu -Z;JiC>Ju~5Mr4'%h's9u- GMbur̳C/(4 > M{OIץ2?]x[?ǯsjkhռi\w㱗0XrZWa7,;2m DX.R}ɼrobustbase/data/aircraft.rda0000644000176200001440000000106311754203337015624 0ustar liggesusers]S=hSQ晚:$h Ƙц3XRS>Rb5w 8 ݄(TAG x>;pιc+9b0`y0x! ]ZoⱇG'BRJkܾ#z'=AR>V^U~DAq͊?/Q2/oIp%٧q7U=C+ xסzQVէGDBKH@ 8#@0:Yd c @~ ?a`s=VƁkD,xtsnh~N,'?95'G97`6+]ƥ8o>pf}G_3K/M:0'-\~/E ԡxϯy}%ϩQKKYԳ(yήq&37$/ oE >2cDUЗso=t["OYW/Ͱq> csk;eNZ3M nej6ԍn@ܜ!ΰh|c-?@ robustbase/data/CrohnD.rda0000644000176200001440000000324111045055460015201 0ustar liggesusers[PUc匾4ZBR:c1hKek&+"*^RǮ㳥iNLC/v!gz2{=g]4z?~sTɘ!p gC8]8!tC EBF`fn˄( '(?BT6` Z6M4xς( j"P !@s@9<(D,x AB,FL0ƂqB,M ~p )ĉ;=Okځv/@Ww'݉zw]+U^M.tAK`ZH!$xΩ'CĵZʾr웱Fc1){ﰭ%n64\uLaߴ6i )\35H0F{. =N l9 Ị{J=wn۰ /鿔nP#-!5ꑲ?~xS2lmDދ >!#ނgC_ ;6ﲎ*@'h~ VuϐYJBi+ Cڟw -f|ٺ 2ww:(=m2,=t^&9zG* ޠѿ?C d.:D{/8>T@{/M:n}H)|`n'դsYwknæw3 Z>y{75zYWǹˮU'Iץ_cgyaԓ-:޼z?syu/csӷ9(1{FsW{^ l*{*; Os2G ڸl߃\nvוvSak4>qe۩xմJ6m{!Uo[hg'n;Jv[ożfc͌gwuϠpw\[kBo^uVM$hVEBhUsC+s9ZFY2|{Cqt8JH$=}?\r.oB}\DžG9oeH\DLXٹz`4$p9H{qY+Am)6_-Dc7$O)bx" "Qkci+ b2!3vBз9UlA un&^@;4;ު%p?KeXTpQ/ [)*EE6}xp3ýx+l7sEU)s(gPԪ+U7續ga 2G՟ 壒vK㣬d(SJg{Z6) G=ᄏNq+͙dvd~;п߁KJ\m OۑĞK\[d{c{]vZc]*| T}T}ҘnH'ΙEF>^P#WlY-^TQZUòF)tbFe]%96'DoX1Dεs- hstjM*:9qoayrobustbase/data/ambientNOxCH.rda0000754000176200001440000010463510737747431016333 0ustar liggesuserst8o?pTdoez;+2*iPQʈ"BIvJIFϯ:9bHCCCGC@KCGO":_44l˨#֬gjECCI}LCwMh=S)ĦĦffĖĖVV66ĶĶvvĎĎNN..ĮĮnnĞĞ^^īW&^MGGOO@@HHx qqZ!!ġġ'^Oxqqq8q8qqFě7GGo&LEEMMCCx q,q,Vqq ۈ''''o'NLLxNĻw&MBBJJFFx^'ONN|AęęYYć&>L|QLj'>N|IħO&>M| q!q!qqq1q1Y%%ĥĥ'>O\F\F|Eė/_&L\A\A\I\I\E\E\M\M| U55׈_'N\K\K\G\G\O\O@@|q#q#qqMķo77&MJJFF|]'ONNCĝĝď?&~LEEMMCC q/q/Sψ?'~NKį_&~M qq['~O 23t,-9+ri&X>3ax2%svM"AEg -vkՀ'kEsK @| krTl6y<=Ԅӣ}~ipW ? uL}Sa -uR!e^i7[ Kq-KJIX4=/z:!Uα,ING&h fX=!"1=̞Mߤ "!u\H&2wm}Aya?8jAt|}:ciM]Y7lI`[n0]̖}d9W@E-=VЛW1t|*ڒrO'-rNJ<%1U^NK)=ABp뚲ы nr:Mf Y@zѝүs}}rQoOm$\Qӯ UVU[fɰ##, cq/(7-#V_\- &7=LEdƓP.mk N\us̪|3;9HYaQ O#HD^3 2M>P u XNFU-"\ !k0fwLh\ X`$xvajH owdyYƸ}r9SXZ5cLGyK5;#ryRr@W %PQWMCU)|LhtWe9+Aِ\ȅiOrnt02Kϯn7>"oņ'B-ne0CwJ9LRtY ڱwACC~v]?ߦp@dO(4x/f&l#Ɵ,qVv {wjT}9$j{33 tƬQ<8+ىw\xۗG^_( /[Ҏ$ҌнgA01!)XmdvGW C2YDTk8?/.*ӳڶ/2648@#hTn1 kSmgƧ_fͭ9ՐOsH* 7'B%sj U挠h=yU*ujR j}:"),9H>=6U4mv\ [$FAar(߸?2y/=l‹vc}j j󺮵BYmQ8ӳ U0wJo?w ner~e mr_֠A4D2[=?OY 9Ph]fzhoSVoKS{8 g?s!5-L: k a6Hʶ7n =0;_p6V!4.X: yʰ t8y^̾]MPq؜7$ o)TքSYP~xQ SW`ٓ@ӬA;zjF̦4rwڄa3C?#mӈGvǵPñRϤmf8 |aֱc6nα< 5ءȖMEp9nc l&2>} [}&!ƇӒZ]ߘa:KzW7оu2 w:_H#WѾ=0`c$ f,๵Wp(@BȆ>x2w|/ 'ݧ;K̦*pwxf/ƪeR dcǙpᆵ)S>Ő>~Pf393YWͮJf( =V1ǃ|=PPg+Lɲ ssY%~)'Xt-Jv,_3K 1\vz1 S DŽxEy9SY ~U"`1^55mڅIyμ{+-̓J& (42è~zE߾[ua/TѤ_Ս5TP#\3hDoJDDTcg9 4'!|w!&ô; 踍q؍++6"3fj߆vfImZ6;*ŸSeފMRt 0zf=Ns}tl^) p((7M[!enkߺql{BXyY5K%ˋFjkoU;BҺڳP{LW$t +`aIh4(7+w=ZǴ_h!M]_oC[SĔfzB@>P9Bh'WCPS Ď%5ˇ7Ej' Q!PA{:㛬ض =ڗbk&YYd%3d8C _LҦ Pu'9s9 LPߠ,2l.v%Skm $ׁPY5!i ?L5e5"ep[wN}n!z3>_׽*g"ۡ%g9jH,P -CR;9Yy;52(P}2辡WLEu;X۠;D8z[׽ űϹAf;ϚДݛj?U-zLt%(x 6Cj?30z'~ZMyV)uYڧԎ2CDlhbc;K#?@gE[x_e SBѕw UŶ@_\NJ@R.:Ho]Ex T/ Z~\ Z1HJgמ@ù\OH}zc(-}>I 5 Bmڇ?fW u,c>oVAPH^򬆻okj" ,M?=<ϻ2=Q &~bgAhdf\ cPc@,WfY~R_Z=DMkrq@2-rTЩJ\ )(>3r:J몊z{ZP9~ 6?7>=z$Fc9VA"fchJ[BCAE Pf:ѿ'XY6Zژ_1]ЪQUT5P2j~2nL  Q4; | '<#NA2cM4b&7H@!C{H6+_億 [a\15-}p%YE%w>sZ\#Jcr>U~ %yݱ>V ,T:8rF6B-U^W bεCpDs'dfω}_OЈ~9LGfAxG/Ό}HX˸j/T']5\Hjw%B{I_Ħ @-~Jr. >$94VVQGrz]Ưmգ<s) CS׸i -LwN[ҘY"t>]mhm!lQsa5v:l$%4k>o>3Ft= 9ap9&χ^fS &.1mQ}ͭ6t4?ajSB'OK}C?)o0U ySPVJ:CbP{1 s _`o"_= Es!1h̊ 0w, wWe.) AM3x@ 𥇠ʼZAwWsA9GNcRY`̨^\6\7 m/MᏏ@4HXT, 6xA)EL3ݒdָM۰4]b ёp*՞d\HU=~t;AAi#:v'Lv=;ĹagàةA SzGW:oNLce7ۯÒWg_Sȗ>qA8ƞa{y½xg:@n/}b2>/.A[9Fm3d+=\\ 7 @rC"WЏRۚv@,e]eɥ`JM{l{uKz;V5+Յ*UU{a fFn{:`7ڃbz5ʦ(ԽxTrǡ!&qyn3d|WjΓ|I1kIa΄5~k@p~! W0@u˵ϚL5g\0Ӱ (d cRb8r 꺡΁+}맯׬ a҂pKK⨬YBkq+6Lk&5$!tUeU:kW2U>$sa}(.`yx/!0,p7x(xJxhW] l:16 t m+o9{n#ܙkGM/>68;+= Kn'JEG / +܂ba[/dKoKvƢ4G4]ӛQJsew 4ˋw[ǡ5g5zsx^zY% Enr7a4f8ja BUa˵5[^PnkŠGQkT}5҅f꺰u s+{نrE 7`P*C`>ް!vsTy޻Dz-OaBj%X4ia(;&lz,~0z6v/~pZ$oVF7pG{f^X24ݝ/ь6V6r2q10 ^ ;laM:,nant4*z fr-,8~\KX8ײOK` Ц0Jzr뺚~:KS`}3(\R} ?`~mՁHAXŧdbvNp|2=K3;ha2?%o}/njFILS/M;`KYT "X 7\8 բtz|L"/>b_rs;iTʷ7 ְuZAX/:Kݻ}7S`gh%iw_X.& &mKF|OW0#imU᫡Kg|Xa8a\Z'LẨ4weL^w_ϲaH!A\VON|jQӗ0Q?U %n'Ck]fcp^dς Vka{V kB pQM՛sɒ˟1٨g9#C FPQkYaӇzS?Yg_E0`e}SF vdcmO+Qڪv{+m7;mKopz4h1 n&FC ˏn36sz+LZg- ~QHXLWD2™ŬMm\/RU݉0 XMKmBCJGd˾3oxedě"az8 (rNCW>pU?xCE {WINle`Ki6Hk{WAo6vo9^y\Iggݗ7ۭ/='j|7^9uQ ƦO0Z+D 'CJbQe*x[8s\%-ZʔL WE2e'2,l7!'&uˡwW˧wWo#[I%JǷMs["r(nD ZXW5~{ طϺcr8k$Y/>;  u;hmg:p!W0ٹ'#d̋(~2 L}Gji.y(mg<obY. c }: lBZ۾ 8{(n2LMte'D FªFp\Ζ>[J42ClCoG4"QK-BĦإsX}m( F( iy!$Ÿ`9py,[dd{P۵s~у0Mm/!UXaXbR5ֵl`1jW͈_/\SOs`~]@rY$?VK5a7!oyb-cytt$#]m󗁯Xmx *1^χ,d{^%Puګ QR<òZ[{?CZiy w,r̀PMԱPk3 ٚ4O^LEO!q^zA ˧{C]7IpCn2m+ ׻fW%=&VA&|Ze|̹̞C_AGSVqDtS.@OI\!_ϵ;Cϲ:_@uGMup<:IL.wg׼xÐDZ6S)Kv&p(Yp߹~}vFH yٷ*('L#`:-#.cZQkovp N?oN`~w)3pb./WBg'^_^y%Xtj;  mB8WKg) .jPi@O(C&lBL?mOhe:}U4`zw,<޵?9&H)״ow;w]E%?&g4Y|ֿX'S3̭:[} p+=E9Vg;8=ޮ]6`kIp/L uZ&ܩo΅pl?_SC (iM,:Sn9l6'naA5,\0)P v @][Ay[ԫ);IsS`dܢ-,z=wAc!R}Tc~{JZ!v%=,:x R7 h0|ҁe /|\++*sR`wF=y0ӔRWwr/l}ՍESCqڑ' l>hƯѣq3dB۞y Π_Z wypnqXV`_O)\og,`^6 4ɺc7ySѺ B;Sej󥆚t{!6z)nXJʨ X(OBE9yv7¶Sp[_ρ⽰̃waBUYpW{8~B3w'-xjy S/CEX(3ݜ ޅvj6p٦}b9l[C'EmY1^`/H]J| Iw:Zxy,pIIJO!'8n?ؚ  Ͷrʟ0Q\,` Ak ڶͩ\/8ƀ>nB<0x8VLI |:kM M` gˠN!zRLԕ|ܾōK..$KSugX.0Ȇy@2f!`X('9MgsIj?1P ̻C05J IAP99dT>B$d7?Oh4B@BǟzwTQ9 eI Ry_QhT|\ڭZc ^:-l?]?n6̖٠# ,z[ BABA6B#</ ΃gy cD ?$/ o߭UY,{hjzK@6}vvU< {>|kWO4٫S)&S?C*)1X :QX.f@VCPn:Z;i![}q) z}qPobU#pɃa$A ~5Su0{'.J=l20}pXFw17rdhE%l{l(%ݦjId<> = OVxvJV98830=Ǖ*9dP|%L2ȋ A6\>cAvм+9#jZ_>w< &mY'?oҧ@ADϱvh6\톮V/VXrp}3v|Ga􄜪5̌1A2(6, AԣPεX\I΁|ݍC_iˠ'd[ /B _>h@Fea9&O UaqF9@ZIھɧRe7\幠ZR({G-Sɇ+[k^ ,+~XN[* W|NA4V/})XtGd)\ГVcꁕ~ً4qJ+-f΁Dh iA+'Xw{fhpz_{ZQuo>eCNǶ&r'~+Fq]h~[Iϒٖ^/8&wnPNƒPMdMCWEO⨅`0DӞ " ȻntXkO+$0зbd#&d7BmyhU^󤮳})1:NyˡJvǎ0[a4L2]6Dh.O^ UWL5mkgC!x'ͼAw56^+CFP{6yd:lzLSyph1J^x8 ?\g?RNZ` 3 4SN o&m ?Y'AlG5͡4=[8#U m/OC%uM@%5r~AnX0c~ZkewKt~Rm_9!)#-% # ՝n'h8f.*ѷƏʿΩBcˡ!}RnjO[.Dh*mu:^{$Kr>7^!*H/k uvcA5h Y`tc(mV&P}mB3ns {]>9 ORcfv|ja^t*k^_ a;_@C ($ʯʟEg͜B(_'y\]ЊP,I-އE\Yx^k:s[v@4\4&|-ׅR:(ܻ;E]3rXD޷PL o Jv}y׿g$4mEhŅ @ѕVJ ow6Q7͢YK7[@[GFaUa2{ŵj9Z< 3͡뼡tɏ][`X &$VeXq]MS{lk9z8LBv.sV< _ ױhV@]3`[(GfNy[Jߺ^1 >6NPk,?-KgG pT4L5unJ-avLxЮ1"6X Ϫ:rPTTUϴMҼZ6,,6==s <羆ﱅ70vY V_w]easR{8u5t?^% h]V#B*ӣ'ȧCӰvW`,+MU1K@aa* K˙*1aq=\B3}K|UB,˺,@{,D+u|. Ai+gU,Fumn}eAs0x,!}a6y0<4|o߭!2P9j ˷CC,؈^n?"fm^lPx1Wj`Y.Zw,ߨ vB#HXit '/RtaIhZ 3j< +"[b%_ˋ8Lp^Eh[\}Q6찜>&"3I5CSI+=,1mCp k>Y,蟗$nn5E!𫏢ѻ5Vb<oPjOCMscNh?4fё;-p0[zїs\ vL$7ی paY:*2,_u_`Ep*+<8 '/Wb8 sgþY<,|: 3׉?§dQj4Hߔ Q (WMZ 0T2 e[s .D< VmOZ7?~{8`'}g6&R  &I=ع6eՍيkԾ)> { /;{)j 8 - H]?/qV vZ`%خ/qqQɱЈۑ ]^`I)jCpn#^ s?_; g}Fl0}/>x<ҡA_Zz1h( UG vݒfoum u=[qm)6on gI+CfI _ɰ1v{{#pcIչ}?gmzagjd%Ab3<9"WF;LC@&ãagZ]|ЏOR{KS5ASJuI}6٘zXi?lmQ]5!|˷y?i;0g)Ž-{i=a^:OG5S ̧`^UðE:ls[&IůƗXn`h0e3Zj{>`CƞF~"2 svvX_K~ k!"f0`vMvI亳,zV\kӅWq/,b;}`9ė~#i3oCݟ/ȁ-C'kX[h/mYCEzg">QOn G5EVv^Xr5@huʺ.j؎.? [YI\h`.ydj*([eYl&9x`+Lj(U;Жc}b','WTGöM0TIW C[8}&ch3k_Iʞ=>w'Iw}6 Y) |&Qޓu0->^kUêgݝgϰ<[x'fk^Áa S%Rkml6, c{bypL1},W|Xsec\; &/䌁cbcpIL Cb@9 ./e`cFTȷE,URMj-ǵ^7n~4;(돜:% ;80[f& Wfp<,<aƯ)(u*\GIݦ{3ﬢ`&ef1Цk+e <ˈIK\ji5{oX{HoLG)j\1AjഒMPy4פ4BWd GfS- cc"<.'썚iŰ_{X*k*][OLZ\0 v{ p(jO]|h>94y4k}}c}߉,)HY*m()"R(mJ)JִH%$$Kgw<ޙ= ;srRk= )T!מ[p֗f, ୷rQo_ /gCù:R# C+ iE!|~Ͼ$v MZHI~؛`z0Ynfl?5o1#;f7{ &dPU /sak\UߡnNpۣuwtG<;Ū oe6; - ?]@jƂ0SF-J $BfȬZfEjLyf!`v܁В+3VUdDwV߼s=ӭ'_'%_Kpl@#?𱆭 cԅXK]ŐY7ŗ*EL| v;mg`Gˠv쿾/o_mj%,Tβ zdF xva-k+ `]wnO|B?>Qg:`R'(odԟ}PvkއWE|k\7-lP3wOf3tl/n>ҺllMpt/ܬXԫ 8%%Dɿ|urXKp΂/CY}YNNey@>P#ݯpK[IO‹X|GNEp Hs_XNE0~/䂿Cw5t_Znr8Ӑ|h1e؁k~e@;{!% V ?=d@r3mfr-u/GH\ QO꯿˼3,iklg[n:]a8D_aP6A`Y,ϫUÂwe)*,D[r,P$VŇ|ev0ro!`]*q[`{rOq8$0.lt _u_]`ڸؐ3,&q5˴:辋Ux"&BE<,Q:Ui.qw#Pڤ~}uz!_s3O12˦B Xcc;b /Zh|T" ѥ)TŌw^أ7^T:7 QpuvjuHn5!!+`v>W(d6hdb EtmuuޏPb>l~.j2 ]S '&*|1@qھϜ| 6嵮S >(t]ra!)lRPbb}beSNޡ:@MQ,njFߋA:OR'io"_ΣN-չzF[A>Xx!{7WBj%'K0M|,½Ey{?}L8M '\Kܢ蒮sF  (-h*/O+ZI K|w KۙFZM\a!JJuPˡy"'(/`n+sElY:׷By*/)Fl8˖cmŭu!)g/;u[ȝPA]^ڻ@o~]Fc}ꯅr6 ֿ::?/+Uo]R 2" r/6u/GY,^L[Q\S@Xv__:Xm-6tR x.rN.^pṣbCZ93K]Zp~[E WCS$D+ĠCvK{r~j 9G dp6> A=8kȹ݊ށ\ K TE[a~K@0Lo[ctv3\y 35'Yll *~o`ZBXpN; ^`g[~@?<ʈ.GJ%lpn}k-d dȀ}eZ lD)Lؓ\ָ= ]wb֚LR5z@>nEVs%,= `ɻ! ϊ?6[ ȯ\^;2ޭ#1Pg]n'G N5.ᾀ&m%0Ll_pNz7S_rNY~y1v=}ֻ`p؁$X$9zJfPֻK >'2鯰t ㊣^`'NkS|!'6d^&?&;o )'DPJ؀srH:u# Ro;S ϊ}q/R6b'Rkf%76 tkc#P,H}] }li ̋.C&-H6\j01]%.)f~!og'.c͠sykPs$Ƀ\O)eGApJmȳ>ODp^)\۔[ tʜ_f8t8m Ȟ좪'@˖PY(;0rº]@aEUFݐɦ=3@.Kھ4&: *NR>2 wͤ<4b3.eKi}WlS'tBʤҭqr>k>pzcDž:}d~2rjFz!3? 7o49Mx^SW vIh1w[bҦ!TIIbҏ 3 )::6 rN9z@HP(UF^B݄jKzhz]3Q ߿}}FOS,܆]乗U9:t{C?D}~Cr*}84nPy,WxE#vV@i C² Dυ6P rzC}c' 1[QCV(%[S"GV=l0ir*C֑ۡQW>umjc,v}DP?XCe5wO=O5rU0n9|FjW@o&9P>| f⠬iH`A)~>)vCa~HP l|jNJZf4芧8B{V{[u9 2x?<5D֜24&Xq&ǘJVx1K!z.AXmzT{(|LLE*a؝ie8\}Źe@Dt_h1m> I, 4<ϪGPvU0W18ldflAfR*6礸"8uDF?auh{^F 5P=F.tFs5r^L<%6gb7,صcBac9eC,7qj ^pSk B,]shK< @v1Kۗyjg/5iF|k2<ȃi,[.k<>PHdihO0a^Br^3O?cU< = <*3g~&A$C?ݭ5q'g}Evvʆ gqX}rs-҈~-530w'tp2]YZE'ҷRh~{z.dzZħ O桵:~<_sCVqf>w̫^EftX: _54 (*!;~`3XCP+m>}J r|4hH؂ ?Nb !0}! JZhs qs/TC||Uȧ͜Qt쏤CzK:MW~m֘?b}Y33&?`mkl3i> gҎܳ '† @xx ~ԵRՂ' =W;䍞o7y6 ǝ.ae^r8}JR;7@g=^K֐90\&Nw3t^cP2f2ޘqq/ teGARj?$B@i鮰P?g հ;y@kȭxуa[ӳPx&ruA8JatLy]5; Cc*?A\nPB]eX -ׂKy]8LC1ڴ *U@:#A&֯)$};zGx9@~5C>oN H[8/[dAv<Ɨr̾CUO\Ǝ&?w2j }cuu9}( }O m AW+7!\K'T2 xb9dNnPMry2;xeOf󎰲E}c>@ tЯ@:{ oSCC2pTkﲁ_nN-%2 1Z]ؿ'Jᆈ裗$`мW~rJvN.%3DrPVQ9'̓| |`YZ޿|T OO⮂wo:NV(\M/z> ;<#-ơ*S-P6NM:yCL0H8tE?FKwMTd((qbvA5Nfȅͽ: 5n(턡ePPo^ATPW%$rf更pM#r8 N*&v~,6mnmzլ3]E/C(i7PtT[k܃tN:DHӯWća!Afݖu@rF&Ӵ?7F,޽k\m,z&FN_}ݭzPܿ6zm`ij a°ݡ1bԺʔ js DRf<P=ΦƾR0{W,rMY Vo/`|d/O{p=@Yؑ81TJ>B 6yGtU(SZosWoBXGʘzlWV+ w-| s̆)ؔ Xp?"vI~QW]'z]dŠ^?e` e3`}~lsp4:2crn~D̤yQV@kmS;]! ^n۶:o6Q&α1rH^:kyZ{E&0n]as}su% < oo\Oivs]#4(5L%?Qmc=S}P9PL_tywd(rL &>DZ4cq&w>Km~(OC?{Ka-4@S33& ef5V-U㖇EU> ^c rnoT'tE.jNUT[k83Ga-za{!g5&惘ChCo:os2 5Da_s65$BNqЫM9dN?^V ]C'(\ (R}lѷP{dy?J*y u淞RZٲOw)ELנilt<zz@Ț[czQs?(ceA*A>SjqP-iA NYuケ>A-6tN(tޫ6&Cv"Ut aݡ!./A~3=ΌnlbvKPxI4h,F zpݯW7cGA tνvJD -z.n@Dfb<rsV2q&&n B'c?3@ۃz6 ǚt',O=pcϡ&yuS3>.ejѫڡ3"F |\ׂuj-^~* :WE׀?} =4p@Mj}d_!AS7HW]\׵LHn򡳫ߴH+8p7儧z.LdOJg=G  ¥Nw[P@ՈVH\] GW %"aJۄʂTx/#dXf>JIZmx+HG/J]im97L!5f,2uo6)kt7# B+5c $kՃPd;} ĺ͇ΕzǃrKYFp/d#}VPZ{U.- l%M]Ԗ.$t1^ ߛm2@HQvP?'@;"N-Y yF4$nZ= \ 7V7Bf(ՄLM$$Ss(~"Ta-6oMz^q;9g{F7nB~aNϗ7mw9j h)ӊ)Urk %_R9ސҼN+L BxL0CZjӁsgppWؽn,j8d* $'[3CoeAr-gnϣ_<&ˠ93@YʐɎԼXgH "%w6> 7b { ߵ{'w5 OY rr(O 9އ%)s9/R?\`x[H=rm {q8tyv}IJԛ@?e HԎ?:Ug oA7P_/008J;\]D@khrఝ;lt myXeщ m $E[w үCKYNB#?9J,KjlsbfOm.H;|}}|'wq@!#̜yyGzBKC XsN9^8."}OC<w}{](s} 㽼}4ƫ+B9D-g[>@/{im|Yhx3J9soB٫ؽkSM^3z;|9+O ٹDDBOʾ2dRe#uzSܭ.Ӈn9CIWq(UA5y}/L6FrNL'AP>Wpr=ħQg}3R|V}wgwl: 4=^!*Cٹ} _RC1i[ :6axOjXt LBgL5w9Q oEi;{)f퍲jj/,5^)dWgi={27tv0nc Awajs7rօ¤z`zm/lk> |5Qm5+-JqՊ h  vg>g{\Y}o3@0ٿ 4W Ue#eB9%O]BzϾSSt1/0:D+̷2.y]¼?ςF vēV0KFf%?isOJ>حӱzvZtJޠw/'~KP-v>MX{#}3] I@bq{䯛3sZ:17]աq=ԓ;m"`Q2 +.tNJ9.2 {rp8mPl_0܃E s!\׷^w{(,؉CL!n]:ii2$^9|}{߷qrmmE$𭀧kp=:G+YH*u*ݢ2 ;4} ^~0_W϶Npgທ`~ .?Ea#%zC;\UkapV*eB9w@KxN^."7/f;_CyprEUf?^*rQgX]? 8BLMHᄎ(oX}9?7^wi}I [JFvq7 - 7X@+<Z:CK^; :jYϠFb e} F3w&s _wHc y+gݓ3TI¹SM `5 踼n^"=amJ/>!ۄ.J98ZwOqݏasrwn txa {,Mv`Y,>^sl')eݙ&vAuE&ͯOoNne] &=QU1OJE]A9ʾ2XyH/yl.lnx,཰hRk%yrR1v"c'7"un~2_|3$& [e%tjBhw'_˾1 mru3%j)*0pʖB G,j{>?J+xa>V Ɖ")0ӋޤFnGj6qSy,U0+,6 > `Qt:7g LF:4[ci,L:%:/Lew [Uo8Qw*Uk+>;@h?:iQ e֢e |y+ 0\#u`xXa>(;4 )7Aym?<^[=lHDWvR%CSPoxQ U)I!yIb2V ̲}eH-㛮nAȾuv{寤w? KxcRG@iefKJːʶ6R<u ^1hr &i,px>O ]ucnX΍5~,<9sZlzv'UT^ߺGZACWVyݯ-~sV.mX0Ű ,edzvA<=k$UBS@u$p.gR_+/3R&G_]PȻs_ఌܻ 3-]|xN|0S/3 :'?1P94otUijlaz+[@;?a?vNN$f:;[[Y;G\\HF t1Sb&<\}Ы¢c}?p)3w9.iJVnjopaxs6[>%lJY{Q6MRӸ`Exlx+Ze }\dPB~s:i:K< *%%D)ufk[z۞сVBU;A):fFգ; !ܵrkx(j6>Ӏ`ӛ6jhkj『G4>?aiZ#/a/s57Qa݅äj%6{5~u?O\q ?<;40<0Qd '.@K M?<\_ @(]A1LOm{%P$m\òǘ;kanzy,̊OpyB3Љ9rDvS(rpkYql8W[=XŲpֽءOp_S{ Ѧ#BtVߘ0Gy5ޡVSS@Jqi8 u ;G(Y@ "~~ltf⡉4=fMF"(h5tv_lK^s9vkq˹>HbaEh$ tӓn<?Z_aL’J(2)P{ak섾ku%=2@<;_; wAKJLoljk?$>b]Tuݕʾ5nܢEy{w^i6: %,H&=}?V%jLJ9BWiL"5G|Ȗ-v8u(4ynyAiG2nBin邷'B ~jzUjaTv &(Hٹǿ}|>YM{/A<1&Hh[_|lw[LAܩswlc!Rr]hZ} #.++B3@3f&1ŽBk=]A9MJrh]#}]3rꔌUJlqAV#~=% $@[Q/ܻ ?@休"ugǯ`۫LACS(ʽJzRHjR#dH5Rhsv0w݊T  <& s,[ZsoP}q("u+HÕ om2}>g1$aqh EgwAáʜΛ(u>_8~;鮜HCcYS42;4RoO3ύ[A$?iEWqrZܐ^/Y3 EpJ1(SW\~1o!j QEd(ךز T؍P(Lj<ߗ{Q: iSIV槳A GW-b)i(I98wduϔm2~=(OGW:ۼL\M͐}V~A{n7iei"P{{[<{^O/VG.c!Ϙ m\3?e+Ͽjbޖ;ydE-GEFR?z̮ԝ\)~-?;4˾oT2"YV4UEdr( v_|M(]>tF7ZUڛC.~Vb %Cu]L$s2%=)$0p(鸬X,W:i$X_R z=SFJ^ac'Be,V]+$d2?mU5㏴nJ?1 }?#B MX^r:m J$Y7AmÍ{ n䮺(LcnmsJ0H'>nrK2yzVۜaZl-W8(IuM@C oF:ݫ'j:hovXyԧuȒ\d}צr(m>`?.Xyts=,¡o]K#vȒ E1H;A=vZggP>Y}+A[;i}R'$ 9V9JB]nh$2浮b5BX W3=>i+_R_B{G f9;) Ɨ:x„P1o^S۔́8a,uWb;T`C:W_e^C o.&q rٰ~/!UK{uD<ߝ"Bӫr)AĶtaϵGan"v]%`6HZO~|_=st/10/ ayʝw&?^&K“bEyBLE`+psL$tSu$Y~hVޯV˨~0z9}w'`XnE-3%缔=ۭIԗ0(u']o,p\+ UDJ#">x>vpng@~3Pvt*jJa:VTUM q+* bƗ~6cpg]>?t v'vBVٚ^.R.i`Z8tuPfoE6"브3fw`6%eʭBPjYUB,lFVmUj~8}s>y<}] `y6*S^3vI=k)`Ux6>,`oTAtc-q/Ykl <oa1ͳ|p8k> a鷫rޗ[g>^P lĆm քQlCI5pLK\'1JQ5&غQ$@٬a`ޣxc8qtR_5YLmmG ȕAILByxe!(rW5P[5#+L|Q$ZU9uR ~֓^~{Oud 5C`=>ULg3WhuaPK=\a2(vXŸlvv`.~c幂ݾ1Ϻ^ýM`L 4VM^H)ks0 04t40jg(U-B&bsoL%h њOTʒ@M?:.[Cw 3VnpRc ۖDKMǿYy~JRT ΩuU'Byi<^*%޺Wh ޕ'm%cwN[=C;5*3P;FuV7D|:QAa9˘}Um$5#ۄQEGBXl||J^\*Cs}kvr=$ {A/Z!Y/MP^H\꒵үJ0ɼ]WrsmL; }c(w4V?HԷv )emY0U=^F!1O>z!`TT6˴jS˞>LJ/ȃJt a/ELXA[@IWiArN\N?Wdx6"?_#CŤ ?N4Taa%,Jd4Š[ ~ܛ͊Gba`e RR(?!ݳ;f =2XqAS#0(*Y V@{qVQ63+geLaiqJYCѶ`F*6S±wC,K7~4?epM;`sjrd#7¨[/sb@>V2ܣovydG#(Ŋ#@[Hb%͓˱Ρ5kJϩãlrm0L&[Bvi2r<𶛅Z Mo3N4qIOUݮq5TЫGjzݢid8F)_NN\X W&˕u$qlXj;fu}fYT߼};2:#~$L%1]YGOU=v`ω8a;PRXk]DNͣ.wv(xF^ku{)t80Q0176|7^``Cͻ3Óp;^f"&>ə`LxBj8wUK(F=Q) 4 ܌)5c-k{`\Ӓ9'AݹTo0#+hXX'-[jf+R uqG-u|lZKf]Kf̰4v=̃:w8v Q^9(< 1APKGDŐg}`'|p·NY"lge +`Z|Op6y&BxY!}zD~/}{*Cj hI)a`vbqLlSb_?Q]h p z;kNo,.VO]G(EDDEDHpXoB?x__CPYh26"xE~Eˢ/c#P|MU2bgte?$^ C(?3D?+M0 0B\}3k\robustbase/data/steamUse.rda0000644000176200001440000000135713212607767015633 0ustar liggesusersU=LSQ~寀bhAJ˟8F%& ,/jHpqa1&:h U'cb(qx;߭wϹy-ݘ17*3bFĦKaDRV3rghdwkv' ^p;MʽQ-><݋S~y%ޥ/67'ؗ{4d ~Y 쳾oH,(wEFP%Fs?r?p1g&CB"Eg&[pV9+$K!R/nK9RFLa]i9KiϾ-_a$^O8v«0 wB a o.N(Ć785V =#;ͧ4sgP7k"-mٵ[Μ%.FO{s}Lo7Ξ ~m6 `_'q[shB.̮{7p.ynn?o ЇO1 !.xY{X|rol:Vrobustbase/data/epilepsy.rda0000644000176200001440000000220411754203337015661 0ustar liggesusersŖoU8qĉS5$i@!MR) ;BX!$rS'i!*_ )@x !Xe#|sgή1`㙄BfZ݋V1A <(u4x< z`\/Wkux|>7' | ng - v@\ρ+y` ok1>kp |~VI \KG t;Fe!s8qˌ NZ2%c}xϪ9eLYyb-9m̿{wVdžzk6=Cr=yUkJfx9k^OZ1G̰e37aa{)<N2s]sMs,RGk+rbϦ,p]jgs5 RWMv }/294dz Vo=I cWfp>hN[W[ Otjp:;/VWvsk#fxFi6JSKC7xH[(cQGSÒ~×+֣DR=(MQ@]IH!RHb! lh1*vS!RJԊj ZR-!H:R4e }ߒٴ{hXSYFMI #縞nH ;I3*s7_fĉj^ ^sЙDNQ=ؗS?0w|rg]隅K'ؿQ,SߐVQo[}=KMztJװHC|~9=PqNסO?uฬa:?=?қu|U_Dx_Z@d 7:vzz1Yҹ2`7惸j޷ &^މwH;=ʡ;,۽/d~7++Ǿ fXQ`W?<1WP\){NqbA,pUOv_#.ԵWI;.xS=ԹW: (&h-~P:p[=W7uڑj|7knu~w(7x()WqO,;x H[ĵ/l0%fouBdϓ+- ai+3IoIW/!:9)`Zs夔v%wKP}3[Ob[ߨ;)EEwռ8s9u A5yM9'[uj4B| q[x`6<~i$^-pޟ[Eg[_6Wo|6G^G<}}} q~g+˟G/*70~vWw>k԰̋;nn^됊ݻ> +`mt|FU{rcvbsfw~˰V8Q\arobustbase/data/cushny.R0000644000176200001440000000007210647075756015010 0ustar liggesuserscushny <- c(0, 0.8, 1, 1.2, 1.3, 1.3, 1.4, 1.8, 2.4, 4.6) robustbase/data/wood.rda0000644000176200001440000000150211754203337014777 0ustar liggesusers]TkHTAf%eb&Y=)-3:,LH1+WMDLm4HJʬ$zE'QjZaoz>{5QwJ͌so̜;ܻ.")%E@VGTdq(//C'L+0w|E^D{F1i?RLTwg)}rO^kZivW?S>q(*WX:RC+W\ם\D@}aJ tvNMמq³;|b4lH\qkI c7J vc-+-d;/-xwWoH=ݑt:um‘lO4p> mjj=44Ožmz&*r^oFoGө;?fœkH z?|]Lgj0T(s|B&_9'nX!;%a-,j) rg6r%!Inik PxJ _|G}`$Qz}/gFV㢫7m/XB*%R,O0C]>0{3" .Vio8_{6DZ; a>I`F[y_jnˢ z)jب bο0z:$\!UPAnd&UWeQAnBC/+~ie6ُ}#r[~Ç*h+(## KP0SCRx}&hLe< ,CCCV~robustbase/data/education.rda0000644000176200001440000000160011754203337016001 0ustar liggesusersYlLQϽmZK[S[EŨۡSHLi-F;!$ OHy!O"BHx ^k$ ;_y<|}=k*pRV_a͏ AѦ6<$R3!@>{S`X tW9P@0 FPY0 a4L4B=ZqL0}hU`&0 {Tz‚SZ;Fd*VIT#:7*/((QUTJFˤr]pIEڍBnJ*HjQtqD#Q_Trs VJHT'<(AݠjQ_#JC2oHa7Z+K%W/E_}C‰HnltBIs1Am?a%$0~!aRZUC hn݁/pE/8N5AL}{fw? 9i3;}Dl=nð2b ؏41XLx71hc"If2OfP2X9![0!ه K_rGq#g>>ww{ȱ޳Bn8or{YZU5XU@w%Ϛ Ns_,5=ʠB0̹xFVZO|8Zc)6Z?-9 J6C{^0k[U? tްW>gC} _)<𤷿9Ԡ7=f-Kȶk)n2ĢD[PP"Ldw ))7w0Y'+w>O<gܫ7wrobustbase/data/SiegelsEx.R0000644000176200001440000000010210357044573015350 0ustar liggesusersSiegelsEx <- data.frame(x = c(-4:3, 12), y = c(rep(0,6), -5,5,1)) robustbase/data/delivery.rda0000644000176200001440000000057311754203337015661 0ustar liggesusers r0b```b`@& `d`a)9eE @>H/K1;lP1&(-Ĭ@,\Hj8|6$3bRPyz]L@Y!@:^@:/T @(u[XHkTCc`N:u0pzAP?qA"n$qP3&|E˶`u.S:m(m|B[@PZo18CPڱ u0|^`5CW؋y` :$1/9gF}Hfn*I90`\)%ziE9`b ?R@robustbase/data/NOxEmissions.rda0000644000176200001440000044544210441333762016442 0ustar liggesusersw Vy3$2.EH(%M-i($"(ICEC) Jw>Oy}#s|]uce&###'#'/?(QT,V [:LFF/P_22:=FcyQ`EQbcTFaFaX fpca!0|0a4Faf ea 0: 3atf1HgQ 3a0Ìeq 3a&0!LdI c0 c0 #b1Ø19X0d0Sƒa1aaƚalƖAƞaf:`G0Ɖaf6ø0+a 0 0 0ƛa3Y0Ƈa3/1g a3Lì` 0A aV3L0Ä0H(ìa0Y0f=l` 0&a630f;`H0f7a 0&a390&a3ec($01Id$90'$Übd90) 0g&a2H:Üc d,f sar&a.2%0W"a25g0 san2Lb S0waJa.T0L%c*0 a2#y05 a23e:y0/%übzy0oa2;id aa>3iaVicv0 0] 0= az;` a~3L3`A,2 #0r #0 0J 0* 0j 0,a3 a 1 0Ð c>F0 Z 3a20f0 3af$3(0cƀa280Ɛa&2$1bc1aS1aƜa,f2La c0F00V c06 c0v c0 3af0#dY 0 3a\ƕa0蕂_&???-obfMѡѡ:4G$th!f#k]XwaYL'MBFCSt(Bbth!f!kFȚf!kFȚf!kȚ1f#kȚ f k&Ț f"kȚ)f"kȚ)f"k"dM5&BDȚY!k"dM51&FȚY#kbdM51f!kfcSǦȏM"?6E~l)cSǦcMyy"?6E~l)cSǦȏM"?6E~lXXXXXXXXXXXXttXX RF3Yش`K \Ҿ,ǭ= UOxH/I-S3cSlb 엘q6]7\_pKnexp^8UwBϵ}{h.o+O]o Spޮs+V*a胊Ǭ@̾fP Tn5,K`k?ʘ'Tug  /#;kc(:=Iʬo9j\k}Ծ$NwϢJCMMv`"#:kz:Ȑ׉X z5*ž{Uws"\ O:@ 6z9EOs(ǧj/,9*~W]MߨZxگu%nc@+.Mj9ߔ@C6 S }&9G H}9g6O򷻙t/Oqz DÃmԀouS,eUZ %9h\⚢UtalmHh*^xқU%@^r޺[T{kr?@fH)^~H `{#fҬnA|)Aad۳O~Į^g] 8s#定\|R |%g>fcǀQkV{Q[?䗽/qhy1ȟg7_^?) ܭ8x<~qa3^-+싃 `Ee%qvމݟ@v)3%.]:? ڋ[nUͮ@MҾӏ;l>ʕ; xX%Ck[2[i*qќrEflޠtڎi-)X4oZ*'vXW={NoYog+tT~ x e~J懍^Ze-V,Whr@\68gO0/BySzw%P;Gބ콷ۖe'^q/pSxݵmZ&SoP'kxPX*Ƚ kntJ2hR2-Ɓ{r-2ѻo/>+^ަ;?ݐgg[_WmdJ`}թQ눷 yv5wOZ6:惺9B1+*mv{} 3_DnbɖsLЃ~9gRS."\?YԪd?kn2rB?Ao+?㯬y.FѼ'>߫SV˃Jw-L{b=˫F'Y].NTSH\#` )Z-=to/\Οܯ^OYd/]'ܓ\fhi1mq g Z YlյYᱽNi[ơ|_483WJ,A^ѾqOK- J7vRn^XZ#FSNfU=\4Zx< vI^0 ^.K@-y(q ' u {FWsr!ȍQp }=V7 ߷`Y. ,_Op@*uz5WИFhX2~Sϳ8%AEJN&D98Yֲx5 ]T߱6w–Z+UwnoȭH SvJs;sQ`^͋| X|[m$( zC g}# xK 6I}ovFK:XU.BIyS|X/: 7@vٔz1@nab76Soۺ&y ϩN#kW;7lMŗOODݙǵnʑg6F%OwRQp ?qsL,Udͱ C~^+5Gѳ]&ɉYpT L<j R:,<à4n^x(g qU%W x"Uq.n/a1X 5I6ĆX3?zOnu ږ>^Cּ{+r{^OGfyo6{ BڦBޞ@r1,MϹwɊONS2q kcdTysoՀ){eb i46l2F]h4 >mU>qw~Uu<@8#iS p\8'K <u5痁 ՠɎ@$s(mm,i'?YH, [d3JZwݿӖzk}TvwʹV 3.YSKrA!PH ? q@|@o]GAc'ȃSkWv7jBCw m^ރ0;w\>.x{] Bw9KM:NT<ԊS<@1spa:ڿr.ےG-|XZyĤqO{{#q:@ aO ;0ǀ%:ɿ[kmٍ:K8ɒk5?儩 wxo;?~iwv@;Je׾k!o =:2[:4(wڎ0o'H[y3vN5 xSuXƒҮTv3mɻ@"Zdo!|ig5A+u5vV<dFY#@ftWb|`8M41l_3A=Eϒ7Si[iuG_(=054v_9t |  ^>=nVwá%'l_ )F3Vv5k0opqI]44w;N]4]WQ-W{z`TW s2~l@m<'=1ͣaof J H2(pj'>tbnIn<:~tj]LFLy95 jm z$oJe2\L]oOJo<+1,n”Rq;&yBD޹E$EI@8ӞIfŽIkegVsf0F Sw2p  UsVq)cc/qtYy4JuA9X뙾mzkH$>*[ϑ 4?o? گto٨#Uw5fBͳ-%])_$ygQ<4}e^hWw1ubϻ}eX`c(f]8_5Ve`Il7t܃ r08svMd4pdCh^kN'lԎɥۏ,鮲և_xc$}f$7/0Է tIWcThvvv5?/xsyj@۾haaMg^S9+U~eTРY[3xo\9E)JWW𶃆N^ |]n*Θ$ :Ϳ6i^#*G(g|{-iʀk;a >fݤiY}yb՟2drQ[a;M5m5=.F<-~F%z?kyE[=;Of=~k'գj1e㷠x>G? Ua R;{:v&Z m=JH+;+?+̏z\E:7zWGl_*Xo]rNn-zz;Z J7%>x_)ڵ\y@9RͭccKv_  _pwA/jFmlI( =\Uv#\o6;D6f_ dA+a#կoM' d@C4ʗskA:t[59p\ ">KN"|f TEh@Rv7~޻KP?.wݑS0_\$+oYA`D|s_ S@c0A0oԩ_@9<ҥEM|(k61mϜSTa{<˓sM3xj !gϙ#. sCɯ h]RpwXG׵k@5#j֥QNy7imޗgP5d : 蛝^`CnS7A ҡ|끨Y,7TlGߠL~1xԳy*|7{.!|M7nRVix{^1jܞ|yBnoI&PoR}XmO1T48 .c?}B*p~hA=W鏓6Á(J}^xUD}:9\ů#ֶNM p\O_^^ oo\^IΌI??{?eoP"Cy7/I: n|w<6"#]jvHC W;Jt\4[: 2r]JgX%xVyyJ+Lk7a x m+S?r-]_`[UC̈́)Y> L/W\ éZ=u Sx5Zg^3 xɷ9)>#m#oj v9gen {G yۜgeJƝAqÆQ?QwHb'I~ay޴K~zlZl$~ڨŒ^}/p6=E[<:6{|ɑqܦjL{ `o.vRQG{:mɴ}:Y|1 C緽;w唹 Q`UdF*~wַQ9m!g^67,^Xҕ/?ol.޳Qi@ r}f//O٘`1uMC/T>=׸u{|P7_i|@'ٵgO\Axu์o!mG`N|O&wҴ}woہWexi;:f^J'gǙp>fwu0.=ȈO|cƖD>BޭԀ +~; 2ڻ&vCwQqяj:;vuIP/mJҗtKW SK%3]DO:ogsM=9  S>D/iX;AR|ݔHϏN9cRX86G7j(ÑmsQ2{|6f }ypao{|,l ~fyxrs+Sy~̧wSUJ FTL<'U;Uok**?xi7%:\yZln퓜R<a_3e:gR4{;Pq }ttf~s|Ψ8`|LI JmK&N_SڞC?jGmM2×gtz\K1?=?iG~qD`R[??DЗ#~麖R# gE ۧwp= ;ª_3ɿFqחÝ:9 ްE6o2- 8?&7Wr{&i$jMB;$۞lA/(,| uY_@;S? S2|]MO9sq&t]o1XGfA(oXGgQ-b!X%nMZEtEL2vʃ/K{&Hϓ7kI#EߦSqSt;@xwzU??OucAshT]XH{昖,g.'%Y,.KW]j!,k 4Y; u@ٸ(z{iL ǞrɊcȼsYcSMA[ 'iyZq>Nijn4;w.1Ö;gkﱆ@Yp3:f|OEm Oz˿ni3&RN[9.kÎxҹ?~us^@Spj/Vٷҩq"G.tmҎhWw^:ُ?wҹ Pnwm~`St݁/ݸxؐl; ?MRR)Mh]P]ً~[t%SoxúJ}E\%4GrSPc)gXv'e18;["5 }J!='O&\(A[[T6D>Ub{5Wo+4CL۷'[TזNC1Y.QhG_p otan5 {>J[@A̿}I&gATF{Ո.q鼍0WF[J}[vM.kMÖsjsύ Kaֿ1mct7x9ΊέTGpWx'x {4nH'"Z/cl[ ?ʥ\>UݠocA:&q>Mܵ 8o}aBqV Ĺ B#shs/q#c%_[bQHIEQn*T XkRq4W^-!ب:z .Tٝ87P@Co %cXǹmŅh6*=7HWoJuY X'a`팀0EۅrWx%zcA*~7~(ׇRZ[< >Y lA5 ftR=7ڢx#2; Xx[:N)кOfh*~K{d5R3c >m1 v5Frilp8dqWIpp]ZxMӅրV]N݃ ,/%xO);޵_+ؤLoLT|Wu1eFѼ@-n=W}oƠj/ p.*Jiv׆? ]]T-H~gꥱv3nvh?$ݤGѣEaEuoLWm_p+e|ԁ=!< MAzZ@԰Ԟ )؋ckzj} 'ᆾGp>zW.xu8jUl6ݑoBc ՜j_NIjr*{NeaTH?Kڭ)pƭ/:AKN2x ;c6},;{#@y1j*=Lp4E@ϹqѣNoy$W@嬝diĎyKs f~W~%:l[/^|c בoL%Ip>*XY'Apwɤ9^tx7g  &^sCA"AxAY hT{kBG6֣eZ߱5n {:3їŜ*ۗ4+mOSNrņ};}g*Xp:{d4}:dg+&H guT%eD7/ݼTv _{B_h 9 ^>Ta]@~9SO|>=fZ? 3<1fg\~3i|;c<AU^CWwn]No);oOσbVnC\gx;j]|^` ]CtT$:s+<>#z}Z-i>`Gk׼I*A תNay;I0=O5}B秒zpyA?=O+5?EsޮfA巕{8ޚw7bqs/Sr ʟM>֏u7g^FWϼe\0*_1W떴_0r|h4@ZUX%m0z=m=Q,NZ3IL֌bNk*㤭t=pϿ0g{];DdL❧]+5%G{£*;,yǔ. O)^ )밊o?ԛ 8}$% -@>_=B#8n44 AqQiDzמĮ;טPû;WZTHjOQ4<S@ ?ZyA >{͑11 j qa;ʳm|Fecк/h]ncNFalP9ԥ0"T_2ߓd5sɸLۛiFFC{PI{յYYt^gTi1]*gHs(dY~6 ] {QΧ}bҼ~`|Q3wռ4ɬ1]+\X PoM:Zke 6Fr|˽ܓ` ꑗ|"A)Z xiJvβ<{^uE:{Oz-Z[v?#s8:Ϛϩhٿto# z_9wOQѷZ\jsN{/m={R@ZsgTo=VN&Vyt?Ϧ/#gx*/&Ҽ|rHT@dՕwI~7+?}RPp7YL=c>[W}nTWVU\xg_V7<Ǣ sG-oi֟HRk4΃>}72[ر`U1q8R:KKԳʲl$rIpZҝ s8>_#+|Tp deunoTگ"3Le-H޽ T)9 tNKJ wVqZն[zKdN1|E:oK$o+OKsƸW׾PW n3ݴG'*-IY%9Ď}Xҷm'ݲi8_YMixLj{T~TfAT*/^X\R>KU;;k})˥U)KMVu=,P/ww|^ z~)I_A^`k @5Yo9ܹ{zV4xw~2KMsmWqHN)~>b+r| L|NM3dԓ;!=zur`zkn潗&Q5{M֮t ;;CT\ |[R2)S4/'D.V^PxcO>ցTXZB3CRʟZՋ@mSi ]] dfv<D j7bL[Zթ[Ap?C|񈚽/wȗO7SqCl[ߕ{ wg{RyӒuYTy(zV;a5 $i"Gj%Z-4Sً~LCޑ3t%>ɧbYAx`ɽ rC&N_f?@t=Wz,_x#=[Bs3e&:J}d<1g!{Q4l 'Ƭ8y1t]۲H(+M6MCWbR};|{z.Grk7b-mb7VT<;b,QAĨ-@|rzZ ]i Br~?Z(^H8}J8 姽Kw~AO=}xVEW+;w\}2ns>r읯%Iо#_>jhN ̳̿hpˀ#=Nh嵭I ~ {GqJqT\:",] IҾs2s-EEH[3y}@VdVsB3}܇@_H:!]9GVz<ZFlAW=a/yeAWwB_%$3>>A_/|1g,c3+zD.\sFe3͚ub9qT52g #q890=qT^$`9X**i$t(Q(@KTFKi&B%E[(R! Iyqn_z|>x Y>2ӾK{B r_:emNlOr#`c07 JGP1q3 W,A_T@g_&C>`JFZ?ܖ\*{{nũs߃"56e~P](}ߙu^|Hc:Y9E11C}7YOa.=`oObzH7~9T|!tte+oGkX6n+{CsEf۰K@$ CW'݌1x׾?'MӱD-J&t ʬSZ xX車]C:CM3 祫tP51xӈ~现rV?X\J >DͱRǬiEقڛ=)p*q9W;"fǵ4}X]ZW9<^8.:V_ ,3,tꔓv/Mh J#s *w35Flc}9YEyȝN~hH}rͶo坥m̤[W"kJS]_EJM W$ȊyBaR'ɻmשwjZ5uL(^њjp12ffy;+hN}+/_(O ԯ>^QZ(Mo vz 1tq*PGzͬKU'.Оѝ7t4oz9tN}pe3l ̻NO gƷH3Ss:֋ۂɓCy=l<4ĪzHӲ&`0A'c\I{wLz$l-cP'GIu|_F$_PׇϷ8Q]&.mmD:[k>>Qu> k}~:OX?xC3FGrF>vNm|9*p(|:~tZ)'`}@ϻA)dHdp.?btՂxհ6A`iN0ע{lE`g/LkOygzn002|U ۸>:$%*{w5H= >IP'|`$gpaXң]oYQ=]˷~{~0^6xfsXBǏC&}==1٠jy1;/s|U%ZPA~eEbymXt {ꃑ9ַhά|c}oi>'_U)@5lݫk$"}X_\xalzqi//Vp*^cV=vO7Rd福Ź' ^O\ 8O4BW[6zPj0E һx""oL}S6^_* ޻']L}헏T4vS{}u[[uu HVPCP(00Sg*ͬ~UO) Z>O:GG|uq8n8Y2:v坾AzݦUc3߄<Ƒ7?_̡rףE\Lmϒ].ߤt"ue0Kuly'>>F~?= [roNM#ӗ?z#R } +ߓE8f}MSzN~A>~UVY5:E]p0 we}:+u=I碞d/@ETP=/x.;t ۥS?.ﭞϠj8f8 Q?y=eP @ߥ5;u 33<QZ:sB`MK /YYt>_͕G2kGî;W<d3ً' O.[i X(mh~oA lL_7QLI>[Zf~D6W_C0#5;OA-9YKZQRԣ AwXjt0~G>mD3zקfQ`|CFF?>.ι:|;B fx:*,ΪW=-ZF3o.i96鞘>x3^vDҍg϶ ˑGE_Q]g=Zl% TJWoxK<_G(Op˶gEj PEPc\kǩ y"9[^*̌8(W&P7B#acNܖxPmv; }FY6:OK&.緁NY{=1PjLW(u3v!]1wCc;T_ :kYT׊Ta Ho;==/~?T⋋5"7GO#4 77,kUuerH:IǛ|*UwyE*19#Sgn/qT?T/湼;gdn(t(my± 0`Hj y~*XLjAfso{3R=_xgukz`4sS߀pgHO\*g^yȵ}>-r˙o1 'm2}S' y ]fGyv7\4Qwj|+בf!ijAXQv2'phk`#l/Zܙ*_k*,$8{OrI*53t_yT28G_M~ 0S^GkKHZ[IGTN=VD{o' >cbN )Oi6K΃AŪWQ_6ppm$^@uwVƴpg-j9LNZ)F>XD)~&l]h9u& 0PG:ծT yꃉ}qu vsD!:>iX}.8"ڱ.p`RA LQwU*sE[6F%hl I.}e Ku~͊c~^asEZPh 7IbUW)X Wũj+b\lfazP+Hses6y Oye gTM^jDjanxy[7!XЏjghu%°>ȹ'e~׎H']z< 3z}L}^vsuGQV/Y~':!`u7Ƚ*F}7Y%֙{Tݰ^}}.J +IW; oXЕwA.8k]靺K`IzoGb֟jSf] ?MuE~ui'JHou {Cݴdf_# EiTl^q!rȯ;r8jIM}a=at@Ƽk l‡9U3A0>O^ʙS}c/IO}g߳'鑾/`WWQ*W/\3'˞\:[UI4V%M:gARl)~ r]ʹ%5S9kn=¹ntO؂{&H>=[In~It  ڦw\g}g/19{td%yYS%eNIO8jW"M?[{p͜ڬlfVa Tuuk*Sߍ{,pKݺ:oV%{269{4/o mVo(dpK+&}B|A#nm$>3S Fyjڒu[L |F׊'&q$b}Ͽ!)9Ww7+[B\ 1Q]U9N<7C0C-"do3+5F˾9ZsfdF`^eӞ^rUov3~}vm]ȍwM=iKU |Eɞˏ}V^_48Ǽ&h>*yo*HY=tB_(aCIGOیz ㇌ +v'f# 5=^sioA:C*Q:U&FW'8G~OOG<-Nro$^shރ'/f }{vm=?X>3#?O~%)_4#gT [fvʈ't5q1C%JBw^⁈sj?IgNWf#O.q+q tI&O؇sm6Ѱ͜8`mN?/ˑlV }loVtzW!x*T.Jlo&흺kwF+#vMن5V#w]tjTLjym_i$XޏN?|PmaPO=qXΠwdҜِ9w 8%~<ɧ9_mU {{#͹wg;ݽJU$;!B'<􉪴3|zi{*-Z^gᜄzBCf,qw:9|lwOl (6 v'iAü zŹstܶswu|j{haOz1OQo6?438 FZR#sD dwjڛ%y*kx趘X7" D=My\Sq~&?dl@>O']˧x|?M{WttojO O{,|I^9nNu;0|Fu҃W Ww|h7?\kWKsO\qPs6ֱLL?W;PQ_\:L:}*~"ʽMd57vg%Ω>;$>M{?uS^:aqrgdǝ eHu羜?;++スr!mFTU|?T⇉w]g $֓AkA7x Ƴwа \pp^K`>rQO{_܁ľW6vKȇRz?>e wW1HX2_p_hV=upZolA;~+:c뾤6%F X1lGoX0pL)-;SomgrpO~{&mzlj?,5 @s#GϫMy8F}4:5'cCXf%K,霓r['n3>B};M)\=VzLM>zfwp;Opy[OҞ3HYu.u)qtyb_sNKw8VCZΧ=]Z[k@3|'nQ)t,p:\^uwT1F&nدIy~Xc=N7^_v}nA}faKmJQ-ouʾLVy$~s_]f506e^)qiܴGxn9 LӮ]'$}3MG[Ϸ|N~uQ]4}Ey )GM~m^'ǩڃAvU[Mv8P@ω|z bwe_mYDodR=nz|鞣N//[rͣvrU`#=`{]ȫCdJhcfjX4>?\Q2.Mi)g~>9R )3r.äZع^( L*|iG@g"w}WGı CA98z88>WShXnjKzfSՃǤ%;vm)U9q=VIy2}b2 3GGCdC}ǹ'C_Lb}8 Ͼ25ȃ2s霤l1ՐKS} q=w.KVul.0SΒ/Kr\USsdrecX_[?XoyNsu\`܂x,'A]IyyԼÛ[jYlQ& ۼ.~['K}8'`:xXvYuMzftcv,y:]GWklA}o$>kZ=yŕa#9T߉-x~ ƌ[`u(,ó7(y3l!O9S7Xo+H|$ ?>E>Ig.>슴J #w}SA\矖n|E| I+s_q~3/}ÔDžJk;|o?bC oyڳf3;m14'ڛ`.:gŽn~Nc8`݊~x̫iWsϧ94֔y~}Aׂ͐N4Ds}}5 +M:4pBӉagz fǝm V&=vۑXa+}n'Gl~2Xpo243?V6j$3:.ZI%v}`/"99^_v0!_TLyUkwbt#u^kckFܓs!'cpӢbs_[=RqtΘ7}j )ieJcn.$}~>Ugn0S cX8`G(Y雮kJn^C&WNjrPImf0EW6~' O1x߹dcR`'%kSP;Í8BΥ[,)[GVϗmJmM#f\(Yc{sZ{.PTD#ܔxikVӞ5xO(kpNfLS_7<_4'=cLSdC(.p8(Ľ3D߆'Ė*c7ifkEą>\+rӜgXe׬Vf=i{zgImEc HEni.8zOO8ӥw>~.X8E (żI\؛jF4b=J^')3kա>Ψ/sƼ~~}ađ}Cm ]wjǵ8#ƤϓK]x¿h{:ꞏ-ȓa>nx8K'yJUT07hsw95hnI)t)6ӒM~g[OŎ@ȃ|/Z6/xs";/-s;C˸e)WvIϿ'u!3v6Tx?QbbDn%rY_Qեa;~ ԟoLNsԵXÄB:^﨧uy(j+夲G+ @Q0zoW5MƅW#Kc- wTGtN&-!kbAOJxOG:qHq5Hs&-߻ܫkAeۇ4զ= 9xu#,3E _mKSud,~ X |Q\u2!'}$< m+}=Gٓ?u5, ZͩB~>&݋LW8݁_YpOyzsSUcT\?fPL0a:(<"bR\%_`罙/(gLS<Σq!.ϰZe}騗B `۳ثl.59n*u籼E#[`(pHuq^:C'scf,!WIj1gϊR,/X3R)񍗗vgu$%4?qn{;z:7$$ KqQ3/<:Jԃu3֕=G-xE>k. u"w0ĕTnzn qrGm>u5?9}`_Cy"oC6PNJeaf0#7 &ޛ|^C]?FODwOy,rU G? [w)Yi:_9?UBY;P鴦 >?wFxל߳/,uQ(kyB>}XlА0aZ!m*aȟ2ܸ nL^ȇ[A)cFҼGhur[4e{^ÿt\wٷ-؊~Iq|scUS)k7.梛>NKGrl-宋<!_]OP/ސT7)cos7_:9޳k:Yy2#SuMxZL}_XF̋.}f$Q崠MP'PIƼ`Rz;jLr ![cS(C*oo.赁ێh>~բuۀ.u~w2H IhiGQAnHg(׎(@䓃+Wd䘶@}wº 9-A˟Q8G.:E5tЧy' 5qެuIbG }hE~ :x]<QDiH9 C/MIIa.=piaj|Nʌd }JZC?jX!斒 EItܼ֒wKU%Gh8pw|m(yd|/&`~rpϲOӯ#^oo4y糦[zFkem۲PΊzk7=f+mxio|fqTfP5(ftI5e|,ml9m_ѲTZ#@=KmҴI |Hkܻ!uorn"? gi9 x)I}M PX=y{з~pa`̻׋pQ3i/}6`V\NXFNr;i/4ب Jo&0կU7/q!rOK9FGn7h 04w, ao<4[0{i}mG|"Q#cVnagUVߊlW_OLy}]췖K]5̉g? ҷmWGstCc%N ߈7d.YYGrЏ%~R%cIKA˿M6*XP(bPo%.=xxMoA7lYZ]^Y$M-0hN: ٜp]󏯿< x_vm1ylrcf}1<^(F m͉ =$;/}F/L{oʵEB_tӢ`s^ҁfxQ9kN8E6sF^!=E":¤O,`EnN'09rǶa꓉k9h`/%: yLx:;511\T&wTw㾇r\E_0 ռ2>.=OAuI/ww\G EĜ[(AssSq|L\En_ԫC1[^aߤUmU2uDM堄3>өuOf=*#Yxǧr"_w|Xڶ/8f[໚%,>ocĽTcJ W0V6mc#I`S:Mv/vQ/v;۪z>uW7~MvdWv.x-ftϓ|z }DjGWRɾߤɠ x_8j,(7U|"7g{{26_zP-OLv R3 A24 gQrC_Ξ~J~N#\=R.+O{7goIƫlROwQ Fnޓuh>5d廲:duD9iY?4z`–9һn뫐qȻE8L}oߠ2yvJx:~1YvWL ܒs߃_^T-<w;ZI:pq>VTΑ?4.?{)Ҫo7Oosl :04n"W%Ͻ<Z8MȑbnY lf.qp%ٴT#=r2ipy>v[9>zЫGy ׾mj +Iˉ*zЇPә-n~p<|dg݋@nc~.L\=ou}Xhn687ecn1 /^=w~k0X*0~ S 4^o-sv3M_@a/˸ﺜt{Ե⾏r{ucA9L߰&y̳rȗ#/Fk3?~Siy x>`Qjes?9^ѪQ shPҝTH!]nN'C@_QF5}|l̏GޒbhpUu +Klϛl}٪wΥl-W49ҠܘB|CoH<+*7Uh5g q״9wf[ _Rnբw}r7/(ޜJovUh{x>4bγ_8ذm\A}E*@~ΒE9UUuVqʇZzn^7eՊw]Aã7ԭ}2(γc.?^:.c?R A!]=PMv[6`= ~| 9s|x#>S8>kJ69L(iE ʾ;<6^&) +m2%ɠnv&0.nn"0aL#Ɉ/uE/K΁LܻRC._FbPMcxа mK SsO ;}olfϕ~ݟaH9P_v4ryD\ѡ7ՀqXVTwʬ6`w|S)(\=*/Q}[(Q=^k!rp?.~ryşVӞ˙1[{J "_9r}Noe}w@9s'W@9iW{_dEgHs)~ _V#8eq,([e.֑ B9'ԋMO;7vdMXPq;Hml@hfv`ʻq}䭷ϭ9ncS7#Ryc5iAcZ~کb(OI{wE,dk+ȇ@&tJ@ 3&l(H{s8O9mk /[:9-U)[Ǿk]g>ߞ21A~煺[jKM_o(\ԏ_ 8GUv)[2twk+`A͟c-XesGwҷc~~7Egw;>蝹>db=g; ?u)QQAg햘D\?rSS<6jϠӴ_\J _aw W~:upo /;]ݛx12[LִWɷMn?akO`wG7`w6g3UPO{WvK̷v:Z t/~~)/ j y~=(|?E˙GHd;-cO/1E6:!Ftyv^U,^꫟UB~=Fq|M>ćǽჺeI)wvJ⾛rX* k hNyXөza{OƮ-S6MW ~9[?07KnYJyREv'Q~ 6$ʵ0RZܹ Z%h˭4uĿ-wViΎ~L͟g/ ۂI/%t?VZG&/ufg2_޽OtlNxj/xOI.^ N_qqޚof_8骔=6TX_jvB{ED-"?WHjnweZf({'ߔnsKx_cR?u|C^ü,yLK{P rH ͹?Iq a"I{&җր@~=#3o"[?bmU~ ξjNydžݙZLrXE~p>qU&n ]ƹ9e40m/oN~}_N~E8_۰͔9he$pYu2 T8z( ꀏ2!:N_HI񶴧Pq hIq~r|4[9*ǜ߈󫈽j'K/sdf+lvbEE^u 9QA{#ܣ2q,ER{{cV>:J:Qp^vQ|?\EC%P.Ѩ?O̷E=%TjX#_kJ?G=n1y }-۲@yx@{>/$ :(zǧee[ܟyfv%O'NO)D C]fw|> ;DYwA?-G YC-ZE7)xn7ZIy(8aO.PϷzzЪᎠ,-{(﷚S^x?BjwpGxX̹k?a P/3pY?涌FoSAqbīƜUŮW9xvГ| '|~Ŋ!/>OuoρnO1 7J-@{F?G%SwYM'uA9ݕBie_U?;Y>g?b^={P?Z YoB;9dP~3W,t0u3.;,V (_ =~6{X]Sx@u(!y߅CYatgWMV>(-;@^y#ZuG]|c@ ]sm>gsr"{Uw ށ}^'y1cagXoܗ|];?Y5G8GFRo_ o7B&4J٧|Ƃ۞% Hi9+_dnG5WOIOʤ:&P7VȪvҿ+1y-·mksA1I`WsLXpaVC,r_P:3?߽nM[0 6m /N[6I0mwov_ekx^'eۑk\kNq:Pn^eSyZ-ch/N<^-[xޖ]TZݢh\q= ]}fp7=(+>uh">|עԱAai |Sq!b#k˜X^P\ e*t/iɂ:u/,t{F"ou1 sy]k _Mvq$zA~杧GeYxz::ܑ5\[Vp?MVLVXb1[5*g&o8:|{v(>t"DO߱y H̟-&;eϣG^p^ x}~ 0>"^=R9?c?t'y^m7,SX}2=shu~ͦ&~ Sug&# hL漶*m:xjB9+5'r)F)ߐ7?~NL~s͉5,lM>%ۧ`CcK;0w/s?| <> xoA1JCk`gt0Κ}᜔&@&}ŧ;Q^$W;繁uq@.sKI ay5 ˇ'^*[ÖZNQ)QC~q^#eṁ{<{9ajn}Vm!18U7>QxUf 'uO2Tvnss7:W ѣ\vϛU/, ?킙!B6Df5 {C<΀2T*l)G^یt5pgl|^D)\ӆt8"?/[n* J sY>Ĭ:~0?B&}4t@FX_ Gk/9,A$ N^Ux}< dw 1@oJYuwl`:ukPNТ~#֑W89#A2ʖ߻S J#q=k;c%(s Z?ɗUc *Q~:޳ o;ݷol;2%ywvzx1;ǎYB0'h(M`ț1N~ȵeww(l)qboī sey)txCvUi"k2N]rCkp ΢ u`꙾ ӳ\5[xt~>hK{>SGGG眒5HylçA7jЉ|bNGc9`>8:Ʊ1Ef;6̍|ߨwOw̦Zk\2̍eWdۆJUq>g4sBdq :7|Nw`)PKguX %ُgf -q2 22B;)JV-J$+DȨ$RB~uz^>xqs:.тfu eiπ{¯g<g찟l_?>Nj}tO=rlCD~bYD^?pXAZK lz>'Z1]G B.~yʢɞ'oY(05O*aAr˭2️в39_}Po>R*9d(b-m̉L1k2o8P.> t_ rM^v'>ʝB>F\g݋N|/4Fy:>GMЇH6wBfrfb~?E?_}p~1LXglZdU Gɩ'~yhDjhUwΈ嗺Ӿ:#ta+̙ߝ}ة""C;%1O(l#՜>y9>~iAhycœ#͘+)B$Hr:h-M ^%cNX5e i<HRr {B/g}"pCDl,huVh|sQ=^Vb#~oRy>G+k7+9FbNsV2˰kK%.^_S^D;| @x,EPف6m!wo'ǁ}ͻ`F},Oj@[d~G~S|1ǃI86ӂ?Fy]~`޶n&'&˧v115zy,S U>؍6i8p&}I"@zZ7Wzw-u$?|!8Wuy*sey~ Q\>3y!h\w66@kz2uSO(1C_ Kޱ@4 3ړg:F?~l:z~s fh^Q櫵ޞ&O:]}B{<\e7/5uBerX`eH&Us*$'b~H/"$O gɜㅳ|ԯ"- |Qf*J(~^]໥W%eKc0@jٴŠE94Nc''"ioxfDf 9eLN^wOt9'Y#~^dM͓^@L %&ZZfg57mϖu'椩|92X@?vXA==8Rh‹9F+i9Cb΋+略J%HJl't&CIZ >^3͊\}xp'Po<9G>FHn]CDsx, IY!|Fq`A!}[ ٽ|*?GZOӐ=[z{J%-I &FX">9J =~B./+RO1bM4c+u-@#~i|J?#Ϯwʢ|aLm8O-E暼G=-QxI'h+,Q~xo@ۚ^hT*o:%PşKxFPyI= B+)`H>*vfX`GRIzzo$]v"@S yh#Gv"jS MZJHBW&|gR|\}ǔ7n0XbInǮgex<=4*ճW,݇@⧖sМڬ&߈O\}VXJB:+g۠(C^sd:Snq1O2szH^~W 8s DViK!>q=d^cfiwhmN{׺M]c㎗A->7M,fYOv?#R[_OMO5@nI9a |mNg|vM}G/H6sOX^ZX | -_[]%ܮiCsX;ozFd>1+u@r@bnT P嶛"m@NN7ZqډKFG2hB>&7W{ϩ5ÖW,3xlsnʕd@ዻ bgw8[ypOLjQ*&οDRX^5{.U_B'nz^:F=$}Qr@׵k |plۢ@9 ';tۘD)>Iw !) _;Mb9gBUK8w .j"p06q c xД¯mm>Nf>5GJ_9N~Vs_@/ܾ/0h:k3@p2eȚ^9ݸP_CbUD-pT>}cղ>3PKʔd&Cg'-s%w /v*>jȁ@A$SjDKk([U3Ͳ%2\]xBY,OV,DbCqMѪ.CxM+L%a/݊=d_ֻ|@ݰ=ڍ_+v\yׁ|R;K̀`cl45i5 2-X@ϷXϴaT6x9}`Ƒh(Ϫz(9 ljE}qdązje}>Ї4˛z!17BFHTjs3o&>Q|A6˛H]}y(xj|DžhOc/U{i`JX̭J#{eDcnrⰣ|@q kemuK$Z1@հ3snN'(H5'|p}~ ܯś_{ݒ#~X \E{O`+|E@k+-s5Tň;G.9\M{n冐(̔jZe}zZ;=:@MG^o3Ĝ])=?_:6|hn|V҉8ڗo Wj夀nw!ʗK~=#?EDݲ??~Ŋ|/S:} t z??8o V\vq ׇ]GRhW}:sϏq] 9 3 %$W5 c5Pkn[:[r%P/=O[侯ӸޗL ihfnS֟9h|5e=fC{ y!)[]D!LI/XtW g_#.-/Ӹ] MOzn?ۄp=m맏@Hr3ЏMZF>(L:Ys}5 l:>ԸЭ(ίmZ,[Vc|9oDR/G }w;(Ϥ_4OS]@T4s2(?l}~M"dj{u/K"˽q uG1=N?9Ac~w>n*ӈ۴?!ω䶡t;8i"heO=ǀVrRfC;*±ާ]WnMh.By0 J֔((q^˙-7'O(ЗR1bŢ,Һ9/p$%|q?-ܨjDYZYj\5w7.ħ&R#ȰcLr]}„ :/ B)j5M ;?B1K Dz?H"w:H-=A֫5TA۵\x$ۗ{ 5|߷RA3e}BZl)^7j' !%sFoc"99o8ǝ(Rs / (DύΟ~y~G +N$˨Mr/\T-o)猡8xybz3KyQ}fN@YRCH)%i]O[_䬚uWB̞3Es:#mP >o+ U_@ ILNQyL7C@Wq_wKZV{?9:{-qDIan+y"(:7vH+ _tr]n>Zޯ}@|N oC G@wf?DZxxxxx(\p&3Q*@$|jftDdܰFX73-4>0Sq{SꥺJN} 5m potZ,PGIX >qEfLo#05Br#)pR[/腍,0We[(ZcY]a@1BW._ħ+-2v>Z| `dxwgQManzJVgP*Z \>'O'h˙Mi& ǧݓ}(8j;r%`u+VU ,`*0e^A|/D?T  >ޮL'[f?hŻ?]ڴϨR5U"DDrGgkW#11'XG\8KsY03[< $]wlƩo@J=@RURQJz|}_׏ Wι=3u'n%82sX[mض*)SNnG@O%DWF}et ^Ӯ ՞jiHx`mǼom49"-/DžC偮Е!\4}/վ%{jy~O_8d3gYMpsC|{ĔѻVf(v11s 7XAoؾGq}d;-tCvev` f1^V~ 0ec,QZx9M2C[$Gݥpz%{6U _]IOb^ݦ4xo ;>}5jR^N⚙\}QɘJv5?;Xcu&&gr8|`ccVNo#pW]M7 F6  It}ԃVp\@Y u{/yk%UZe>ϰF@EـܒHBgo'iZXIlU=ؒ m]U~u$ 3=yu[0k3t޹yy31#@+G9 aU׀xb}`' 'z6ps(ۯ%:d԰ '~8,*݁S7q^ܢ\ZK#TW$u#ku89*1oցeA:{7Эg1`XY}}A5@+3(cLyNJjˀkQ$p0G+uWISnQ=7G]+C̕1xͽ(G>?&e/ŸK4w{6Ck p +'2Hܟl!` ƦxFP>el"V!ݧ6h_4>Qo-B|k%@6/Onx ~fx.%1NI]Տ/,'l~ "As§4] (?Y^҂!~VGٵ>0c4`+a` };`h ϟAoP;,իna@^/JN HN޳'?,Cu-z84Mjmۂ7٘,XSR옜#qqtU=6=?{aC7q:#sOYxjF&njIS@~u*' SoTDpNޛ ,7K?`@N]A݂(m,(9@Eȷ>^ C<~pjvzL$)JM/: \'Vv?*p_2΃2;^xݖh)fRJ.`Efnx<{tтRT<1r8 q*WkPj>6*;ێDr 9e2@N>q+PŢN VD40cC$) L?R$66zOݎ}Fա:~FeX^byfDیi7`wQP~;{֓P7Ǘ ϮB8~)kQ-O,4Z7w?}x`oEaAyO*E7znn׿- ;|}E׬7ϥX4k\:!=#VOvW=}iҕ{"Mu#%`H>+/ áǻ.[~yd&h-69B`2P<2oj~@sg;n](]UII0kiz沤Z&H*c}A>G8w` N6=n8MC8qMTW5 FNZwOluzHl偦.c~- ٨uYޔC@zc=ᆙWkWN\=3{EZ%ȃ2.,u/t{5{6Z{P~56. X mB8 17oPǯqOM?\XwGnB=.=~^Păzҗ?JYہd7I H]9 e X,KvJ h9mڸqU0`9Ru(w\~g}*vk^GJ<9n B^}';(<؏s HHz.GSp>pJ fGSp~E@5v5P߳m c@ Q4PwȟJӝV^.+I EU1IKICOS z{wU*?Gv͑G ݞNG"ya}8(.!7 xձcTXى.]Ƽ٦]Gν hhO <r{D;H}}EMUSkZ~NI?C65h< X3jWzsUv>)>e3.̣_?/.OA'{ÛQ} CyAQt=%?O䜵@-^-R (-ނCa@>0ʫ:"@w|ll9< qޏ|`jQ\.~/ݯmոGoG8~c`.OԔ\rJQz_yYQ`眀&ۋa **ra!0/1\轔e@Kˉ츗WBm@h;̮鈠E˗/Q~+Zq_ Ss^;ǫ B\߳LsI\w֥7jhW>Yhbdz^8Ř+S}}z? i@Az`(Oľ̎SܿMjf|y^eW1UIn@{įդ օnH6`}'W< ,WWxO,%|ʃ@~/?Md]VG H|dn;@IySCĻېD`q&^#5.D@k)tnםn]Y|^s:5Nv#>pw?'M RnM~ -Vbg.t X5g4X@e~pԪR@>)jk-zR4Gz+Vq95z;8Kq3-Q}V5WP> pʫ֢7ɞኺ [];!(|d8ߵE}()K>θqu&<-{lPiT4,CQJR PhyK""~qP|7kB6İ?%ޏכdg&/@GvP ʋSuJ:!?·G[qJ pceLhEw_Ҟ.D-B탤8zOV,5,G煙E;Pskg`x^o>ͧJCIbNPouoLG^mVE_l3kใ~%\/nLZA $俑L^((nѕsp!WN!ߝOˁc6>ZnR`F3Nks4S7a10b}ˆϔW$ƫV7a ɢnrf$jxC Uʃ|ԅ.~VUs`ܿqEO&M55 K}"!syv󊞽|58Ł8/. ~xO`DF lzBhf~M~i/_;1?X'H>>vl;KGszљ'iaƚ m-0F86:LV u7c;Xr2pc_q^U`.^G\ʟ3](_dGyy ̒GqJZRXt =n®@4k#ܘOf 䗶 MY!"Axv-!h@yߋ`.on4Ԉ^ #(F9]^ W?/ é}_Vw4rWSp$UdzoqdT ᰄ5R| FrrKi|e-Sкh0X8pE2}oЯ] ԿI@>9cxMbwhe{:;^q-GH(jËox^e/ Dϗ{t=kؓ~ya*w_ڙ3_B^ z?>!Q}ȫ:uM] V~aG,_ ѕ/Ut]qX5~~,&Jh`HuС56_,pi?P"xhl?:q#=W<ϻQ[tn&c{/` z:iJߟ݈wdl _N\oms^/Ďρ!}6 .f֣O/m8kP0Z-}"F[%zy|bi@㙔sJCy揭} 6I׎0`+\'@zKcv;]X~TV$4kj:hjS}{(]2;k;t?:VoAxZEXk#k,Eģie0^ j@'sJGGqsgxYLF /XQ9PߜnRNHݯ+pYNW/샑$sw($vy cYp+c g|}7!3Mh.)Axoؓ5Y\Zâ\K{ARCngvEP;vY[(12NEu 2 N<1J Z@v`cb6!-O-ᰩ_}Ij ;D@!H\1QA%s?uOվ!uet)Z+ "xpmΨۗpu %w; XEa-OW8:W8ti\}cePw }|n?VQ9OovDskT>,/ XςQbZ~x#/گ;wu#;O>E̍{?7+^nxY XU?ސ <(_ t5VEڿ:5zHtUYkik߃XTj<6\g@IwTQsj:qzueWbG }m~+r*HܐE^6S}[:`Aͻ|omz>f>Cy֤D{0Pxͻwl .IIJrKꃕځ)ypO5]<ljT?OG߾~6;bޮ^ݺ3z6s×Y(jӉvm wKsu .8p灀v+t8(oZS/A:f>kZ]O< Er^GYƵ@{z^1 aDEN}ɲ|`>_% ??hݻ[2 KGwiEL=q wb:RuF)`K={+~hv#;& `fNn(Fyg8lOrv@1lr5GyR8i */f# mE@?ymq3`|·?u``Ig5%r`[6XŻρ5sIX_iֻ k75 1ShD&OCt&ayERgPlY_ob7A{0s~oӓUOoD|o+`3[3$!1L-yڂ;>F`A/eQ߂{)~~dM7xns7/t1waThzQ1[`l`ңSY|1Sw $3wOx(U_Zym閙;70`/0MN7%i_uYHʋ,ή7f+ ;]ػ-O 2Q~vTᎾ%Q 9y֭M@h;9n`wXVU`uRL>j2lJoC]ˋhZ4wcҒ`-8[K>MCqbr-`'9/UO}3<ʷf [UY5525;d2e73͟|, k鿝{ I7O;0~3lơDɥ,攺Y}nx8fKQ~QPI^ܯ+2C>@=L6LY\X䘷ܳs)=Oz>`Gdo{}Ҋ!bM)UXf PiIń@T]PHJ|cGL+)< ܎KN<*6e]0*廓o`P;57PIkd= mic.?'PϨ^W~uj0T="T T-BK ;?`Tqpƞj=x]%PZ;K,Rs#ܕT2?'6 dX*zT|qKw%{Gn_%'%OR::ފRZVٸbAG];o]$&8#"?GVZp`FZꟊ؁3ᚂ@sƃP[H:IaU`}C#h5n( YpTn -CXr p\žl#1`gWnƂn6kD(z^S)ֺ2IكہtyFmo@][!ծEh3:gkMI}"sE`/9%Nh~L hS&{}~uw3c6OY -g.' py+$ t)P!}ExFܶΠ_޲BoEw}n:_:{y?;`o7Gqⷺ XX]YضA1ϽƘ zZud]_y~Xi8nE2*|l"I(}EIJ)2(DDҠȊ$BFv%e%23">r9z0DyP_׸j}el=j(!V|-˫c*VKx eFm"_x[R|@Zkd4ЬK֟XY r}w@a!+ݹ?_I+uT ӮN,UƼ+& koD>qvP5䰦0=%^ {̑4܋Mm{&i}ViSTHFyѴ@;Ss}Ȳ?*`(p?_w) s}wH`.DnYd=HI=@>Xy?e܀Y_q#km%m( g?`erWr^2o͋72^ϛ-G^2w8) r ;H}׎60[: b}635竡3 f|Pg.n+~% ۟,J+<`h EcXDW_hjYctDGA;hʋ I⥺^9PuuO;P̓yu +kǯy\խ[oLc߯kpW% ~)Δe6!c3mx>gҖ 5Z| H+HX--raZb#Z9p>^_$0Wewh.^UVEhgۀ*@Y?jݛQGC_[5|g\!|tU#%џ4 f;q}dlp} "w֩$Ի"w5ubꝺ)lWvŹ.*\=[>зCewgwj^\(^\3к-1KZllQ"@/.O\2wS @<֖ kOp)x[/ 2 sfO|ߴT(w]rs>" +Gxws5ڃ aX*J\?bso~9q~hs^|_Բ1,p*u%3ݣggC Ng` tae"dtL=7ԙPUcǙ*E,i;ӾVޫ^/z7>j<+iHQ+9ʟs!#7ѥ/tN ǦdÏ*N|;% Dˌ>ld|g{IyYX[-' s ~vɿ%3}՟~ 8SZig|{}GrC'ҘEoN0l*eløw@op[&0vyL6Z]w5pV..@_ )Wy&q? 46Y$ߺĽ4 %C 3ke5򁤁_ ?."YȟzN.+'rF` ̾2}\UyDh ׁ7:K6Hͻ{=M̞Vnҙ^bߏ.&t_)9"9]Rg`)%?qf.6mRȧK,Kumetuѐ_qa`?eɿgj?02{a8l5Iw*@&{ZCx额m78j?NK ?m1/ ?9m7r𮘖?B;sunq-:G&jGYSᵂ.ԟsۋa7.!_A3.wmZ@0~s GdzPg=H h&@( #w@|\O ]ߗ '{^9|$Ux{i!谌ɣ8qK1>8y>q6; ZߦnzU]sYCڼs5 k-f 1ع ^ֵw/Qn,ky,OR ӏo~ط@FR)_wzWTgo_6gTQ'TrK #gIe/u[ 1p^|,x2B~>#BF5/d}b4b99E3 ,-7J!pے3 ?~:# 4벿*#L_|p(r*7;aI;D/} X*Ÿ@ft.V8){ -:=rRy[{}v9$XbgMZC(ro=1`ǖe@560G~0}Zex*ps;ࣱ* K, d@> Ĉ@8x5|%2rO:߇T"B 6_:]I8pl&/:۟3(eYONkkZn}0Y@}і;?5 hڏXܞ(ok >3jǬ :=R@qX/]5 π\|z76'~2ue)>d x8 =$H@ 8F|Twp?˼+s0_ yn>t>Q]ރ~tѹjl'/A>{({, ~>=_sx6  ?v^q{h{yʤ_bmG j.M$MܯVp/0];(]+fbLj /D?ط8J&3@T| C#-u&K_9v@+Pn_&.r?oepNo>r'uvPv[u6/*ܮpYwPG =T3`;:RTR(t`:͊ ÃV肹W}{c}u+ LOc#PY#rdwpe5!r7 %qäU\a7n|C@􈫸QPaWi™@Ԇbޒ mGƽ?poRү/&О,Z@kg{̍N1m䑮64 ?3=ȕN˘>VM>>6{j ˁ駅ᄡ g=CW\zU:|3tDIY>!=Wy{\^jO}x?( yvp[O^`~1ѯAFɲn[1]rf0g+54ne;n{0/`a@_wG/m/z'pmw8XEoVK7VLsZvSY'qr (F"^78{ EAzobq=*WT}\oD=7jЗ 櫢˝=sXfeEe6~Dp/Ԣvqc!3 [-I`^~QX$LkF7@?|0< smS`B?ƿvao0kedߝrD]jewyi^MȠG2@g/ՙ?}kvE湜p_/&J@Q`oݠ?ͼu^'t-K4>g̻P fKU?O-xl}."Yʍ@l:J"$_8Ijao&3:;!W m7>?؏G'.z߱3C\<ffHW|_,/}czg<@|+yerWL?7 ?O/Ma^9 @B%rαf]^jc2r;ߋ'}%#o$/Wr;;6VO\KH5"rzO`||o)߅g&AY"Ou"bxe5oAyo/YzyCv>^x/M[)PON=srޓ@]+󁟶4PC~@{')P"dn9tu} ` ù{o=m,[Azga'|2)? 2z1 l|ϧ,.LX Bshk䉤T3P;-ۀP }eeƐ3.klQׇMy3ND7 >jb j=9*4#f|/04 އ}^E1g:?ƗO~g/ptwWޱ=@lZ}OMwY|BY!>WS@Tރ@(/C;ئw \|p%R.x]inUG+xuVt"fXUo J#[TA` 8"(Н%Ya7?r0]' pS`#7h NWxBkpZVl`|GF %e\Hw讛NY\@4{X68g 98s7=E־bl{ڔJ`Et@zO@Ӫmiy ;G@>Z|oҔEE_+煿*[u5Pkn`kۈΖNj=@ǔIbyۃ~~aGV_Q!s ~Yn}k^릸kpXi7A<.M?2 "MNIn4S@b[u=oMp_iS@ [=tρOЗFyjTa_N;xyDZvܖ.F?6u~ڂ9F|`~ Pt #ꀻC0W0_ )ԟSok3m#@W(H5-Rֲ3|Tiy8NewcbgوR0v+oջIcOm9yXiqnw+&%l>}+xr}k< `/J\$@ E1xXdG2Y&v%޲0 S^^@9jrݷgmwL/{3Ll sXcaT)a$MCz%39q伐Muq-|Uޖ6Xa>؃ f2ƀYF-L>:$Et1.0ªO"NOpiL=K10rO$nK.WX; ;mnde܄qMˁ싖mzdg߮1x|{go7o7ۧ_jbd&^~VH..#p~ NZ>-}X[ZGqjr_;.'VZ. tE*[^_t{fbohPy( +RKN(WcN8;̓Re燽{Sj3>:oo2GQ,dT8"^A7[] + ̘Q pr7\Ae : o@{&$)7HoL)o;G NQ.^+Kuz/ɞk^=yzٔQ߁F7?떷等z4xrO툅$ꗶш,P]:m*Vl@^xh+!-} XY.s>LS+ZGڵ}uZg/4ϥ=@O:B~^U5u#7?%9\zlog-Qcjڒ&~ W>%~9M975)~Qqi{VtA9AUW|X{#}@/;= hrtqzA_'Ѝ]ҶaݯuҁԖľJsnqtfO>A޼W%=w>gKǿ T+ 䋺JNdM:CkN ruLۡrYg圁!R4LƾMboji{S:uy uDFniԮ )SqFCɎ!>vH57_ Xo|> ;S}'.Hjt=-Dݑf܇`/1fi*'7y0E_;cD pWY|-pVOV{3+5%?37 3h ʞd;yߔzfC T30ˍ|'=z[S֨_2ӛc֬"}y㯘x а"SuY@nqc赩,sb׺nO@Zky ߳۽[I92e }U7+@=I8h<-+A:;0Vc|:NOqtmbdy}s7]zk,P8 O旓L79Ԑޫ.8zE׏z[F\SPGrO}TߛY>b?AfԁB<iotea/c35D]bNFMQݡa MQKn8+z$1_=:rg3ϱ?KW.n2QG>ى;d}]BQ7PNPR}:07>QdVvU*:@"h"W)k"i(O -)-U0:jWk!-ϼ%B~[; bT8.&Cg9b,0J/@z씭D9ͣ|mŷm8!r$r7O\us"z@FtL*ڱbz75ePU?'QJ>wY5|_E/Wbn\QpTGAI`>y8)lk_À1Y;.wӢ]miqcjՕ "\~,r7T"G230/fQ_)X톋0Ue֊_Q:)nZq+!&MU{ ng!k^kʏ94͑ O/ k|3LW·"ȵߕ{wn9p^Mξ:m~i5`<X ?EFYgӈy&^Z<cTV^#Kأo2ܫp-?[CRX ktٯB?h5嵧,q P}΍^^ NjV@zv"Ofz0 -Ԥ # >_ [D߯%-]V?12x+8gGza&+i50K}G&qOO^ĮCd)YfnyE ^71:6F˜;ƵY;}#rri$aQi@@igrwI㡺%=}^g,I`Wj*&Cv`L.X4hI[Xto?%@FG+K\[5o9H_ 9,=G;]73!an MBp2]A w||- 3 \BSymkߒ4J EX*}'ѺLbV~LZ=]rHY]IcZ^O?9:~9JDr'Oޯ_6@^~!}Y-UN?ebgL Ma[e76#nI@]eY`_*]*6\"kǼvuq8S?A~Nn|K/nFjv[]Qܟqܮo.6_(V4g_̀tGڋ@3vp'OV߶j|]*aז* #^z"7Xk)~>>W̵" ߙ9>kbJa^4lʏH:1"P*s"_V.pg]̑ 4Eqۿo>- ږ@TItFá@{Y&uysv+x~ྐu/Bޱy> @]UI. o򒻙v^iq+\݇L^zw-+Z~i3︗*H6IJp#ԛg;en(F93PY}^Pg7x.A]6-m"  9w89rüù 8;w[yփho_{.V}QZcmbՆ1٢ZWGXۃ924p:c@ʅ%U c~ox(crޔ_pw7?Yj򤺠\3pK]k}}ډu>;g9 곷"7ח.wK֮bV,g!r\u |{?[۔+$Rggہq0WQf9ˡuܥ-܁{pv)rY) :>=oĸglEp%@^L6>=TCTLl>pi\onΘP敭{^Om嶞m3@>ґ:O6Eȫ.Ǥ{qC^Ļ^;; %3/muf;0o {9ҝU %x\5z5@ 89.TaQu 'eyG-O?ҾB;W}}G;H_8RS)] Aq}Z3u` l{-|HQydku+vCɰL|㍯ 16ձjglP5@ͨʗ?zaS,jMuHsrV)1gzA>Jpr:0tWES**Wrsd8r^bpO$QOpwo]qjѺy:RP ·M0>-09W)!^~[@'ײGü>;ρ+hD2{5b7'jpc>ۛWfWGLD}`EJĠBK0 : 5 88ls!:Awkgp5g%MmD7xc=|F @֭ŀj:3U6G6g?GS+w? wbe}uG|𖫝@'0s\) lK4w'/Bry-ʕ)`6~W0~dU]u'='S s3?@iO!m6^e'U}򨣉dG|aH²j?pGqN>\:Vvߎ:yB}@M4 A+{= =/ޟ^{s1Ro ,o5ws 們ɨƀ虇[/c!a]ˁS4A_6&rSÄ#x赧gmPt5(/9!XX$Q#uG?wPA'y}!wߩsA7]}+';1XJы/GWhJX{`*?-Beyzw; ف9V0e9p?"/r:_6G)RPu@^\`䪱;̎hU1x`y 8*!):C]Njb Cr(),Fe1c?|:/TPAI{&eo#xZ{U2N' LmjE]}bU4F0P ٻeqΎoXpZ~Q։`) vߞ0)#{&{D/Q@yZ XŶ,/%"@Wn;v m^+[#e #ݗ΋^,Vt'X꟝#xuGoe]) Mg{U@}6B/yG@;"?sr<~G@ +l<_x*D.[v>*'Ήa,\t+0,Y?۸Hygrնz*-o`8Y X\Yp 0&1]QOs{~ywCxo$18 'OI"Ͻ^m :yk>x .ma<\douCkpM&37|nK hy·o<+F@}JIʟ)?ӎǓKl(t}ܴ}zϤm*z};-ׄ=gi-E[MܐDͯ|@'u<ޡ?]=bg%D͔$Bzce }.)JӺ gOjiu HJ K=^ .GW` wl'T}JeͮɆ@˳t s7/AΧ@xy-|>kOC8009)*sAZ|ߪ< .$--wyVBn-O0i琓+Vy#o"?L ?4iN=ʁ5>P OoFۿ>43>ۣRiB곹a[2*(E }~8OOnƧI?u%X;bBr -(Uxݛ)Bw0- T7okYaݽwY [5YV+_ \JGL'[bC`?wWv5%_NX ˫Mb_iu ƚꚊqG#܋QCjqvjJx;5i>LwU 􊣏,ÀWvĒ.j:i0p4#d۳sY` 6?C`iɊ ہUW@TlCu/TW.U3Fl[ 䛜N@(ed@oa@]!9} U^@$m}Hs"pŽת7x'{+$#O~Sunֻ5̹KBs|w?R@,kM+m`tOMC v;tźu^=5ݮΒ{9G?!,~:~: h ͦǀ~xGjȟ$4M^w)|:l(;9b7#W ԟE6չuWe<怑-{^ͷ)96t5zo#83laazN_uƹTy풇]D+9,>\*߅KJ\r鼔cOs蘃χz.ɧb>OHrjѼ, n8R\ݺ?nN `9_u[5Fєg`8H? #tw*d>-Y;t?w@겥 [ yĢΥ;(f=wr c{i{XZ1+oHA-rNgc@mKxXo\_~\tٱ7][79 _> +<,mWqX9%G{K~nHƫ@Z:V䨶 p;klvX=tO̿mI5@qXW}k* [ZgHN_@hu-BY iRI} vRMh`{]Q7ǾNʝ_I+^df;ZfWY#uJ:srE˖9Qr'N|e>MU\E[> ȣ+x~yOx.vsg*B^X(Mc+@[&8f=,\ SK@t4zGAqٿ8[okLVve6ZMn@Ϸ|?ߓT =i7$ej'쏪") X;KwK,Y^`NnR{bf}9 %>bsIVٿar [>SRG4{ TK5ͻSJ~pb 5#߼\fTL 1uM jWeȖ vC 켩\CZ jpANߟ{-^[ZK\iϹ(պ jN9*˅޵j]@Bv '~`Ǣ@li P\Ko8e(ƼWQ'_;L*1{Ⱥe #1U_A]eHɱz? TdnyyNNk SOFr[qVebX`O3+f҂v6o C@+S}F4V ?|Nɑ&nm1 u?!=4:y])wo\^ڱ7~l7=fB #Ϻ|+{̅$?G?i $Y JGڥ7;[I6mYѳ4UIUG`o:WSđ:~߶1KnXs{,Hs_7ƹ-٘;:-\O5}E0*y.%4r7?C_cׁ@,6|u#1 FlK5jeUGZfi4` Y͎%T`ܒ(+_xa!vu>ti\eAOh]wTʐ~uSY]Ar}b8(;(OfQX,>{@7l`_.| hkcep٭EW{+zsq&i 7rLD/||c?aq˝?MX75Kv99@ܙr#u훝~NOH 9Mvp %^baZw LhOh26͎uC]]]_Nfʴ7ȝ#|}Z(5n̠ls!9N7D_Z{_%@<|x(OYNuϲ Ϻآ!]#_\=9OβbgOK# :Y8Vv[8Q5f*k|SPub Wg@&z^S}ݡcZ ys٭ '~rd9ꬅU.:)'8oM>C?݋>|`r<h 7M )Z~)t7"7lqKnGL.wA >Ϳzӵ o*VdΗXљTGg_)>cbUvY쏄մǨW_B͘=T v"7w=(q\ofiͺA;+a7#imitY:]$E{%&c3|Y9tЍ@ի+*էTo8rG ~ 4$Np-~OLPX6]>DfQ227`fdGs}r`} 1}⼥="(JUzo2#)Koۏ"!^`Myv'~ΪgTA!w䩩zϛ"mT}fg>oF=;o]DkR @r_jA_;}M8삄M<-px@kl&h䨒ip1 hʺ"X8o ?'g^Ecgdb=/@;t0uɸ2sdFM n1-)J~+ !ǝݤ-y$@=/x>r*UTVaq-_ПK=]L=?aOhIbSf$ }̍VF·;mď"6YT5G CrUtwGͶA_B/ok%^y̏78;K rW,G2c6vstއ;\OGrJ4d^)3MzX #ho,vi{\#]ZZ4oẃvgDc pj#䑻 u Tm?IbvmYbŏmEF]3¼7qk_hV)6S쯥t¾e۪b)<̀ ѣh, }V|qmж+n0T?dc&%X)hW5Op ǜ:MzCq!Y}oqb 6%j`o bK|Oz~)>}hw % {CzXS̫5wW_$|VkB+%|/vƺ}zu0?\@WW.޿}d%p44;|C^5R&}YC;Lw:Tlzt5u.t{h>{jWLv ǻ9'9MƬ_ L/D OCB`^EE[-O{\`[:W6-8k|w|80'Dz1[Lh5g 5ڏnqOQtnɘ]ȑG2G% @*,oۊ{3̗@ݧ>y߷?\?7$eXKܪg3p;Omݶo0ݮbS[PyzցEV jbt̕~p??̭LH, fNrZ:et+&w_~~Dp7!`9D'[}4QIe{*z`ԝ+]{)JL&r;kum1.KH.\uqȓAح@vH9+OZc.ǷYKfP 4ِޔ:γ=@d̿AJO@c?賞׋y%h 2mY9@;r;ꥴ^@(^v&~Ns(_ 䏮GCPG?JoDn H/Q*orU KGvȁ*+)G췽{o͘7% jζmW@ `VF%@,}@ {=T.:EdB)'n uίد]k8> bGE1w?iP윶rM;-[䲺i?͘Í ̜Cۼ. 'ȢSqs_;_㱼ȷ!CcC1 ZTK;W¯fԟʏ~a@[]E1oC#v]_I>] 9|5?- p[@uE}4qΡX)?u[ ALQ[KL軿,D^"mݥoWkd~ \zrNE@tzC`!K r'!,ulM,0>Vm-AlYbѯ # :i@N?w0Hc:fŶcX x1Q/;5a^qVI5=䵣]tsXB6oX|k"{؟*r˼'{dؠ^>Q7=_eЅקӛcޏ|]*0sjoӫ\}<@t R~ֵ\r}C;|#\5?JHr*r[\ᘓU'/x@.N | ܃$7֑ b]}޿i!_ťK{Ni+Z/9-g 梱Ub~ciu/p Uf↋h+:?vWvͪ;o?_cX/o$:'A Eg_m\%yqAղ XO8,L'JCxun5ТRhM$XÆ,sрNy7nNgHt,\{M]VE箕unᘭ>kyg9rDNoDz<;_*_߷uk_)/dH2!`h2w?3KD9P)bn PGqɷuޮ)xU O|UX#?v|h?ؖd| -\hSP Em\.U5|2ho) z4ߎ'K-{#g G0}-jҁN}6hnOnB>2y_U:~Zg`OtKڛ~ hLȍw3px|a El}۱lBߨ{:\1?wͲRQM=}_&+{9`H*i,U\'V,cwS cJ?cq Ж|W2 oz H-Ӷk-E.Zyc'?!fl~%<#_,uaEMW9 ̜)̵4} v٦7^/B=}u 0[ dEiˇ_sڏΏVr9-#hKΎCʕX, zK8D 4 F<ۋu5|˱@Z↻qGo+ne#C΁eȱwo|'Д>.ê36Nb9]y JWxFfw'tOoP>/L眼}ہѽlg YG 1>>u@@g}NBNhܟ[EhRM7-cޕ*`Nm|kOf-}=㼺RCN=૷]Rz|R_l @+>XEJPpWuK֘LۅsXphVQ}`{p3k~2yy>HMԒ9஗N؈7vP}H1ݚطe9X;x_}yz/NJdI5c=41n/=H O9.9+2xenw,~}lE=9 8}Q y3\%U۪nܟ{F+_anP. WUʣ1qmme`J5LKi@?mrtO;@cR{KoP6B\X Ğ]Wig @n*1;&U=:S[S'^<-|+yaIagu'|pF-o9*;QNOIyJ*x`)p~?9Z@zH7<‹r WGz'-u_T#UN珁+Ӯ?Ӕ ec.4(juS}cP7M?2"σ X$>Oic]؋`RO? /ʀY~]0؍[\RN X5~KH\}00L?IF΋E'Ѷrσ@` ܰ c~Qo1~`=RHvj$mDJnc9J6$Ԥw.u~+m<.@Ŵg޻Ħ$m mNKfu+sn`Znx{0&#%O$|C'`5=LZ3U|@oq˛FGwT?`^J7#'ᛛԶ r[Ja%s>,!2$prrȃ'Ś;$|"AΣp"ůտ:E*xle`(˹~O 5@j? #?үhg//G.Ųˍ;V4Rl@k.~@9sK%H}fgþͻX1}bMcB͝y׽U3n~\;2a`6S5A-⟺/oLvM.О>:!!@xr;P=HXs P9e˛}bإQ6] [8/7I@Y8Xu]\!ݨ[iT]S|q禝㖺9&EϑN`n: _]mv|10;܍[.ϚBbb@ 4d-}JX UO)dzgdW[O~u%=Ҹ;P_xe?\x[{Bӧ3֨ȥNiy%> `蒂s`P?$!` 7=-5bK ln\Njq@1q5֠)TY i&@r&%nVR9:i3 o\}keUj1_$ba|v|W~F?K|?ՎHt8OYt۹IJI| LS{v6Ψ`n?5jGO`Q*Lsૌ#<7Sy L!ƱbܷVXc > Br\|J ÓBZPO,>@}yss |ʳqRT(Ti3+q̭wkpAھs \{0?1C5X̖>PTɗ:Jsfځc| dy))Byg D:1` WE[f&>>`jQ+gI5-@ HyB >a^sq<_{)?=3Qo-gMotsKIw} .VEb".-7iO'^X:t-WY. 0Ή s#&в[A=@[ִb(m[~@?}_}Aj'd d۾m;y!>nb!:p7'Jļ`l ̮SS[dү@xDf=1/kWأk$(@4|W#uAr:j}Xߊ/"џ}~1ތ: %3ihw4,摣 26hpjpƴ.׏fwZE 8]D.5^\@XwN9Db6nG?Q# J[6UnIL0>ʆ#F_Sk_@d9o3plA ~<'19Ύ) OލStA}vڦ1Fֿ_Hj \qaRը%ɟ0 df㆓+1d%mC]n5wm] g]ȟw~n 4ժߨvK}u,Pgέ7.-/|J:3*6/I@^| s1gׯv O?x^C^;7yjkwɸ6xeO,T[R}u;_1ON^ٟø{):$Y$ |bv5`Z<%J+^XZ@nUS'7;{_q ~~%ˇ懲X߆Bdi?D`e0p2nvՄ`,0<@i8ۆy%{ˬJ7,DZPGfU9׍O4=&~>$lÿlogw-z8w=D}Z2xgRV-~81 k_n,`1 be"s>^٘UdUƩU Z;ŁtO9#=myGkTtQ;@: 9s 3BOT-:# IMlmR.vлO'|rW)ѾL+@>z>p.e5cj<1C\ǫk\;i{6Sn|L11yϓ& BoVWoSZs9o[uSFC`]+%Xb/nG20tǁJw:V 4݇AVH%#-ty&`z;v9 3a6YǮbk:s0J?unKj @Svd"RZJW_Yp_u SiρFX[F:sܦ?|g @_<.hbIsrýP/ j~1 V?D}ajfpފ̉;mا6v#n/A]:6&Ǟ|~} ~ =:rqU\fuƞV;?3 ?a (+sL}ya`puYyCfK~Z\n::,TL?֘ P~E9&i aor\YE ^ d\gϝpWԵ ,<^j_,xQrY;@>9>\9z>ygFy_XS`xCE}_RV5\@ݹ_}2qjJI`kLBwX reRǜ7돕F }d'{rTJ[} _PO b}$Zj=`S1)ݓB;P׬Bvq]|l)N;*9w W3h&f~`$F5=@_]`C!yﺻo-<8o_HѪcB0{}{RF& )/gVo]Wkf7{%?DaED! %lN"O+dP8p>]M:  NQ?9 [T5T#OY_XKx_1rb]y\ոΜ<&]Oc^ȼ -f_\ۈy%Ecqom rZ@P}|}8~K`Y= <䍋сzb66r /lR|f5]Wu t'[6P/J*u؆7jKtp~쟼p݅ר*[yp1ב9/QܽR1 | dSG,?l,?=c 2]Hmc$I+B8}HX[|ï1ew}Xw竒@Z-4C j?ǣoIJ7+ŝ/؎4aU?| ?Ь?Jqk]w4cs/iz>+BbN&mӞ:?^1"Z7.b@%S2 1AkZ`<G5ug#M|Jg0,8]tGoQ`: K>7ce{`8W`56^ܹf+=컣6pF,A.R}ْu-czՐ}0^ zSw|AVpy,l̯K60/ǸՕ$iwѥ0xonJ@JR`^OKwr@,ȇ; G/\'Rԣ$#/<,Zf헵= j4^na=3$~-Σs=U|n rM,GX]aW:՟u߀.RE+ (7?7N4l@+RA[ DN"Hg& >"ZT}ٮqJb eUȅs{8D]'2ZS&˔URw"ӰsFhMpxኇ"Q dV!FSu n!qu@ߴ mekLQzX o9eߍh=`zL:o},ХhuoxS<⣚ \7 %|@n{LK4r&ڲ9\W#4XwT3c>b"C>JUZ\?ck I+9}Mh uemƙw@]=*e+!sI 0Ə5 MDZPu~=p>|:@ʨSevD} _9FM7/8,m|eA+{+ꪀ9|0D)D"sr„n`<^G\ ʃwRf* yp:n+(|KFYV+ԕݱPg*mpm1o'ORJxkcI&Q=u u(c#Oب 6n8nݿiscʋi۰Nc\MS:Ҥ{βvHJbv`>aXwHЪ`?v_Whhm^[gnŊC@W Ԟ۞g2 )X% ߍI͔Њ^{7wɫv>#^Qz#$ZP;F2,Emya@٩w#8?ra;zPy]6?{WEް!4P'edA]_HsV 2O 3,p'JKYeg8̊qeq oV,9ݢȡlk8J~ w yb[T#sOŘckOW_C-U"Kdb*j%H{ӁdZSᐝ+殯ˁ&4,$( =~:upԇon .(V<[eQ?=KҘj_|U==BHU;'?{0ntgQ+~Q9$~'XcY*.XK'>ܧõCBX{G,bG+f Xj5$b~ɑ0 5n8am 7#j\E.jÜF," fCTʮY [J=_ߧZ2pcr>Lr@lc\8m2O.} _?J z9M D5ltJ ǚx.X¹MDhy?2p8am\R[bn|e}jB6rPEq2 vZටFDsd0~-zy 96y ܿ5kbPT3# *pDĹƀ*F §>r rV`ܳ/VrП2>ܝȬҔy%X9w`VrO n?hoex$o Nyyw'0/fY`eY?yﺰ+G D9qaOD=Uոh_z ?Y)tZ}Es(WHgWP霅rhЁGo0 z\@꜎X^p]u@X%ސ:6,o[Ev8~ cbO-< Ӗ9^@/2!2$--Ԏl^M- !܊jqsQV`7V+ yK@xy;Xdx>ؼ:ZX~z8ރ05[=L%;rq>Z6G~utn{:EWMxs20N|@rlr ;{Th?6~)E+'ꀀ}/n@#rZ15olz7bP_=F>֒|?{~ܖgGtNt~lxc9\#ƇOB-8Qǔ@,@ _hu|PQC]̗٫ZcWn?]܋,RZ9\HcS}`7>),CnW( CjV h{x׽JRZ䈋%kcJʗŦK [S̿0QFt LK8[\p}앓zy6mtPvEԩ❳.&ș}bQt=N/ y2ʵI@lm^~U"ս90+9rtm0Te8 t:}ƵиZӾ_z(c?+e_wK~\KCKpAo|rU%gߑؿ#^ph?Knx (\C2y`g7|aQ0_F{^Li@oS> t%< 퇛P x`.8~rx d#E\J>zwU` -7ֈ` *B\goKz>pxhB7jq_KfXrŝ/AG3<8r/}t#qXYnHfr0whv9"7[5|"v2}D4L4z sc#Ok/߇vУ福نOU|I1AWX*מ I̤2#RBvH$+4HRJFH"EiH}z:vsnq1{m(מB6KLqz !'/=)C}(Rт' voKkm 5b2:}.h~}tGR8+MC{~g2n`Tii;Hj̯D~+&m@toEmJ[V7hg߈Ew~Pq[@V /LL:uLJa𒮵![;ܪ9s?Nܹ8_zX퇃ce8qOo{̳]# 9=O ס~?v^Gm߲UA5=V;]ATB;Ԥi Lmx`݊퍐;@h*]< S֯܀.(& VʏATJo*=Zle69pF-7l+C5[o#5i慆0T'߲ϼڭxTyNwO3WrmɎ/!ݽL ꟼ^a5磁ױHω,%>ts=^L,'UgW.To+? }⿂x!J|?<-sY} "~u}q"  Ԋ>ӨNZts'}UW&DnT! eUz%@_%`M9]vʻE' PjUڏڀj/`] c83RxcIJ)Y+y b%sP~*M|v} );Ngiz1ҡ N~(PεZ/ N?ң3{jmK\A__pۯfFqgA:?:FGPSci~ >|}M͠YZp踍s1fh>"JDp׍ߝQFޱs˫4xY/d}G* ׺>9ls]Ss:5nǴ!]tCu8c5y$"ȎOmQj*,AIM<R˨NK\k|TNg^#T! heȇ3v?P[ :\EƏN]J'\^?<׮lx+IvMT/qT?h07ĵ*5BjfeP[yWK6oZ .@>u4H)#}Z :z8յwlm#oޥIq@yt‡y{-#?Un0ʯ;G߀S6az\'){ĀYHVeze_Zz;Qm(s4nAżF)`臹UzR=?n^mjcoEn@o|͡Q] ]yda$(9f7-!З ݭ[/=o(G/Q0FxΔD*-e PGy{poU?mx >S(!D[+"Pm{z<Ү@_wŽ(kθt38w:Pb"l<)^ fw':! K@P٭vs7 So't.߬z:ڋWυ!xy ű{!@s(G:AFKE(r/ h@{YsV(NVY9LHu1CT<= Sa|^ˊIUăi`l. Wz8ЧtKNpy\uE&;O< fp1] W!́Fdpm:Osj/]qUJms+wot00 zLCP>9TƁt@Wn R5\*#S])q;{m[n˿ @j귥.;$3 0.jak:gϑק?ZF>gܥ~'}\]hN&O>ؖt(aJ6k:8 L8WKgTv\aѓu{鐉|^Q\9"cfާHsGZ`8l*Cno+yLz3G>B;ׄ!ڀWpþUwVzm#G"VB H {*a*goƦP(< jRdb?OOuI?2˵*CvoQ9,< 8QF o8m1sh3N΄=SQHj喑Q_ۿz8tݕ}~UkkQ/+끋sF_b'ՀA#iaΊFmhq ;o;/=z:,]S{v3o+nl^=aEf$ =+F!yBD'}LM&4PÊz\c~h. ~F+ٸ8[rMH>>>Ws|K8+z=@?$^cct>Di wutM u?iOwWÁ0p˃n9 }P3@8/tGb%;Мas{'m1E.Ir h}rP\3+6ЋtHOC,5fͧTT snm~s!3"sHOtr5ܮh>^Γc̒\fO/<uHQ5ɺfͼ;coB|ԯRvzyS)hܩ]@?P+ 6M6| Js@kȋ1._ml9zqg\_6#6.O V-J%'keu~H;vF6G)hZIJ*- e{&7|p P/q琁SYdmOL7Zj>95H=NΈ3 g,egt"Yy'}$"pf6Kkڇ7Uѹ@T=teZ]mL/M$xHokx=7觇4 ^{Of_5)?G+[K%4j$%o?Ҳ[᳆szZSo}e #y5ʣ[<|MNbD%#ne )=tw$vς-? $au^+/ V 2y<GM܁. f*j/8oz |B@Vsm91o AhTYM/;DZ@Xi_cH~hd{(]ut W >~OM+Y+x-Ҹ0ʴU5Pm-F11 jLg>{K =lwS0^`~,û :[c'\n߯Z_(xS$}V$m-W.Gxu>qݏ -hglJ~,qP>D8vD%P'DAⷢi~F~d=ƿƒlЯi(0;^ٗf_[5yY&_dDe'27LGvxΎV vm/HT"SDr-2OjZeLdiv ,6D|+ҪI 7?i!]PZ߮Csc0r@:0R>65zpmQ}f⤗ ;^E~eG/۶-K)ܗ3fXB)j4h4nF aRGuXauQ]}~7q~[z:(#_>,=׶l*w^A8VĹs2J$9=qm俱WЕ[GZ.#I1ΗJ"`F _Y̸[ Os8흤1˶@Il@$#PѾÂc!w6Z7 pgi&.z4RK4?J1?nX.hL esi]~KPu2 d^nB ţwThmڎpI(024S@{hC]#Vd%ȫ/kt񃿩x <ox{㽪9 (|Ut{ΌͲ%0dm#q#Sc&7sy !~CVl"#->y?胪wf[ng~K w:=Q˾9-sj%+ofjx/\KApd9v OBۮ@4 KE1.}yrFD|/w UXGπ=&h#ߏJyt9d8鎠9Vpb…]Z'Dnd(;[d՝s@]sy,l%rOgM@^Fc *qWv!^^7t-w~iSsDX0=:#Tսo! 36D :|#vfʵN[`]]m  (!?T7>84px`h=s@:6wL= H.@j03,>ݵ,j[t~ZDMnO؍p$2˕&Ұ}@KgbV7j!uS8{+V!z<(pzk-:ZnS@?xF|h k+"$mʳZjp2t5p<] V͛yZ6i[_:jg.AŹ|Y_Wt #|u08jL ᕨn$N䏃L=D|5T@ze ;\vbu*0ukvͣgL<0p#f_QOp3~&*h>~g艎ylLzvs( kRztEz둕$wQ6>Kg ̃יPAcGe,_os''6a+'=LCM;cPBH{Hw_A:mnA g"#>4Ge? ,CNX1%#.J nh 0iż7kw.07ܨ O74Z|ݍ+=@6!W\%wifCޟy :G>m˺l e{wá]SC7H>L6-Ht-xHu (3pn-ẁ8"6 X{͍@t<[mÀjG jr DzdaSr FxU:3us.TW wOˁ|Z>Q^œ, n f~"?`*?(bOT>AYLk!}Q0*xC#ߝ{<0ⶕ fCxio^>wZ 12_M4Z_4X5[USV2Oۣ-hx-Cgra:7@URW7 PY~z'~\;$Dc?Ȕ6EŶh#IU-]}3.M%4_7?lBxj2ͷl@s"Oĭ/!ofo/o8TZo3#(M; tΉ@ϴ\ / $@:C+umUs@:@|j| ymʍ))o癔/߫ C (f BaBVfG8=r S qB;7ҭq w.=Gkvy^G҂|5t+ K؀Ot- O$c?"@vJ+Y #^+ķe(@=jh"DmCiKнH9!>>VJZNCD<$7|S A`>ОY ǘHGM<7W}w*I[@O*rZaOPFyPꣷӣ@3z-vhe:h.y^h۔F $`(z"T?\;80q7WcK:Lސ!I<:?_1jNeP3f3*/+yxWPND_D&;pvemT'mRl;Ą8o2o/]^fe4"3ِHl;߬Bs!`[dG;tтH'"O(蛵V& o>?*9]_.nw5׭sA\aw֞'z;ln,+Wc"'$ ཌྷ |+WSܳܰb`]y?Tx> J `|ѿtp_8ұ-qAo"]X"0$#g#- PT^w%Շ7xMHkW46n t sI 7D: G-]jDVyL{}=mǺ!?G|b[Oo~?1+T\pJMɵq@I:<< =[B[2Sֶks?n/Jbb@z"$6Hs3'踚}/ YߟJ5'm`L4OUWDz좏@W3ˑ#dI^W}ynJ OlXd@J=hn }N | $Z6 c֛lí{jُ,v|?#n*lȌ};43yvsiC (9\@ (wyS(Hyh_'_<K.SZO H_/*e"8s:'u-[g:uVmA|;]?yn)ta/}8-wpe:3~Y5ͫwKg!+O˻D COw =*bxPm)>L7nhΛ{z)-1["Q*]E ] lm6 [d­˅@UP ۝[{5Z.~s9!8aA4ߜe}*G0y}(=oز}QN[r^ osqGW:"蘬ݼw57 O(:[e,н%DyiuS  '0ZZܥxctOo,}Aք=~x;M6@[wcs+tjO߭?E>Ũ6ܛ˃~=c:ˑosjXk33! _?@E?m|N>:]bߊ7|R񢱗EA{")^1hs#|kʍ>?yxM`Zz LL5-ҫOL] \Ӻtb+I^6O y|3ҭF-q#8\I ϗwmJA =byI9aG]$sBHUjWK%XG~Ρ݅ L=3N_t g+Zc}Mqwr%@t+rD;wva٦}{Z$B2'GBTl[tӯ̮x]kX=2/~ #=-dc5nyQ 69'9k/U[2O.@O89&hj0eWDv( f5f:0N}v^%a@|QVZR74SzՇ@=θDn4~:p;xGmXLh1kR]嬗C|6.EQԷt/"|R 08(h_pE?j ,]7ft'hB/Zҏ}$;@  #h~9ޝ(R@|{OpJbՓ@Zng>NW;xQ.I=x ETf~AEB^8ND~Vq#G2Dtz/w3o6溁ϾÜ ydzs?@7V$0 Tbj[Xh ![4" %ߐ~ɩ@x(=&5RGEB4?z;mqfCi8q-7_||zAğKP~Ö_L0`;Y鶼j>cEu wz,f#1 `*"Gڅ=>c@:]VW$' QFr9 Y'1,Dhcs|L 5}枠*@V]twʗJζGqE&`^_30.\s* #/pU؀7>x'*ͭ39I.n>s,>ctCQ˱@.Q٨ d[n@u#GBY@h:׶K vL1,UDr  | ͩS`ރ{.y T6` ad UI4|z?W迈?邴$)[xd4@-2Ze'oqGQҊe~@85AL8+ǁpCEeLY;)mkkpպBxF-/E|.3c(۲@xWC4x9΢WCNP;{Elpқu>u-~:f3Њ{~YQD+%RrC)HZ)l^w7MKnq3Wb[\SkCE0*IOxġ9զbL"Sxmk/>F~}O z}E~f_`fy_pH v u%KߏC ??ַ|=RPj ]u"֌$> sD{z>=a {cD> :0׫ʿ ptb10ϨmX|ŝ(4_"i[դ.GW[8ݾmN!$(}~|*G3#`JZdmA{k{2Ȫf م7tD ͯRj k\e@P#$_4 tjۑ`ns -JМt#h[~/̷tO6tnBuQPf͹2֧6*I(GPK'3&[$M}i3Y@EY ݞs 3kÿ^LJ'qL}CߧԪ2_-GZm`jlE:'fHWff~LMH J>ukD֞ Gxnֲ= (|,CӼ80^CsaB+^Ut7EB\vbMkP;'~բ/ -ʖ#l ~U!1Ee7 O`b=#n)`iù}Ӯ@y)O=bڔ^ bc:6Z)qOׂg f9tl9Xض>?{>Sďx#@Ӈh- 2߉)nc@IƩ`x4i;(F$k[tB4P Pn>IxrbP ݪ@0:(3na0qrK<б2u?QTU~˥H2`l3_{\6FO2`P j%#_40V]@68|Zf\/ͼMyw ;D8/),A/ >jXsBz8;W_>[qRHbsY`|P5F@V0j/ĮoƟ:o0(g3H2M!RT[49QWrnQ ;ϵ^@;>{虌f|)3P 0NjRAfLg ?X#~+`xJ'y̦jQu{jjI{ W`xW֯xvqW;bz3068^ 'x s\$Dxmz04 @̷O58H_FSB%99U C%*{ 'Vʥ']-ޛηں,Ϧ/٘tU @T|OݴJ&t_k׼HxZ)Qyb[N=F (sa3/>y}nA _~||wlY_q6wvsppsp0nOqoKa 584p5^yBAQυOLd ۬ϒGkֱlz5LlkeX.~KͰF7LV`htܩF.}Id_xVpt5qvS`tMcTjwMMB k8#ƉFcdMݫ̚LN:ɚZTRkd*aupddgͱV.=jv6,M{rPauZ 'I!emb=(n5.7ĚJ`MuҎ'VJW.kʹOEͱu^=C་k_Y=1&o \cljp/=;zit$i\XrZNஐ>pvi-Gm>N6ZZEw~*=<Î fއ; ~rf\Fk*F7qM-|Ϸ5Zt8kDց'f5~"OظGz5Vw4E-͚Ŀ)j9٩Y߆FYC}C^Nׄ(v~X}fzY*]( }ʮG?^E({u^ )4K'Q#VY4m`-*d͜P{ʞl= sAJ)q.Bmq}o~Im2`?|:ZЬ_5wAa/G6y:\vaͅۆe'skփ'Ł+:٭4* ,\ Ng>e,۵xpe}N4f]inV4԰go}H:bqV{C6>bs0a6{X0h~6c1q}8))1)X\X`p;pkX>VdŚd~9p4?d]p1ffu7{sk$$=k42p { qJ[Pu5:'~Kŏ X aK|3Bm)i5sE'y VÎ#,Vhvv0j2[qgϰ-/XcD[1~._26[x{4)_ǣ;xpMrk +㋴2Tro'_d}[qտa lҾ͗bx(E#6>cO6;.#X F?巫]`MEz֔iqzw+kRǎ+cռCj܏f[~6d[JX:-,Ưo.0_t?փp g1p z<|/Ksb#d|FxV @#d c^E_ +>x3YCqk"U]UL^7 ?WHH>ٸ!gK=GT} GH &ce~|h]!Tiaë煻@CRpf;rtzwf=4LXM5߱9AbBpWŖl^ӲNo7lOOa:bdQ;-ܥ==gb?lsk|l?0&k6Ͽb>/غgla812zPNٱmٸl?L ;}LG`ѩ{MQ sjek$y~:jkdÅ5so) @o[G,O0+7VڜAM*y8Xzp  O~=?֧Ána?1+B|s2Մ?X_O4q 3 q<a>_M-E|VtjIM Z,G+"^MIy>#ׁ}ܣCgrs_{wfπ7I7p_$73壤7v^Ҿ֒"?zku{]ǝݯi-=}_W }⧿/~;gSB߈硗O'Q ;U'u~O~73Ѿ܇8F]1;>7>mwG?ܷ/y}ѓ'ʟ3߾ᤩO=O_{{?z{N|n</;ُnO.og;o=sSG-~bC.x1" '?[qWk?G;mqd>#nj7|-ݰ/Y?NW?q<7^\ k7=grV]g/sD`7?_ wo㗳+'/߻gn6yكkG=*ܿpNlF}ø:n{7WEaӋ:߷?~?v$05۴wys#}{ť WO?O\n'^%7u⧗˨OuGX:GUI=C?vy CKkܷ{ًCx\ևo#M ~3>/#?_so,i.{=#5N?.tп~[w/Ojd ]W8<|~z;?V3׾~Z s6gN=f?kxק[f?s/E̲s?8g葿0<eoڲ}꧗_8ڂN>bG3'M.x]3/|oO5azuN9.[ US~M?v i|#hoy+ʻ󋫍r ɓCb~{?U8 xL^6qfK.s>:w?G?+vZ[-_P?zg ߈1ylo/Olyev{NuyAB =tAS/]P!x?&-7Xu1!<t}|)7]pװ~W>S y[ƆQ#=y"%|O'`4>y_V/t?xύ^ùh7}~f|=B2;}_XwN{~xLy?i3q|?wr/6鵔/E?μ+.s~!?W}g9Owxmoq~ޟg{KXx7蟟~B^x2Ͻ+ۼKo@y6KpogG^%'[}[ȳ|%ޟq9.6'}67d?ućN97p#x< q.e׾k;?j'y|z |͈;|/'j[[1)gz9GqF z^HuD^ys#܆ݼyΖW'.A\{WO=kɽV ?y>­f~RKtnW_?]߁{q<΅x9B <oxFu:㘍?uAw_XqW'?}5"?`_'O?>^zcW~}/Ї+~#?vw3秝| {vaW/މErJ?N}꿇߹}Qkf<7#ت/2l]?/X7Kκ~kgbo?3wnq ~}jow<n?0v_~ ~kEȏ݌snwόmuVﲼWiO@B9e{W?[?8o؝ٳz[ד%<+fJw+\xz/y6;OE> {op_n8| %Gpt8> ;}ᄧpzϛqt3|?~<= 3=?Gxn_aw/Gv{=\o{Ol/f>?!qsAShw|8p~yl@=M'dX>2v6gNq h3_+ػȋ@^psB\ <C:Aҏs_(M>noz~pOBy*?c<^!xM_2~G7MuW/'>yWWj8z(o7_x!}<P&o/p%C:µ' |y#wzCQvcW烐~ov 9x0x<:)Czy\s?pq/vM^7{ϙy_?S@>{ ;ż.|<_{ :d޵/b ZS'`'9&w\mLlv1 y|aG. -zKQG->~S__'&࿃x -!.  ' ą ??/%\78/?k>uS?~z?\~8㾕?\zq/wCn\U}7j=zPm=nB*?}7#>^~O|xzѻ>Gqhā!:0 |6}prG$ɼr|<qWrQoC~Yq5]̳ QL~{=v]NτxWr<#OϑV{ulο)y]7v>!ox 1nqI >gO=n C;<#;w~<_u?k?u>xg%kxY<^;ޥw879 xC∮ȟ3sżEۇ] {p=ߎz?|>o߅8 ];[/^|,į`߀e;Oprx9`7|/_y x# q\?=0<;籯aүE?+COÎYǬu=QY /κQ/8 Co~x>(>z C7'oc9x?~Пv ;~ ~Nø B>#x|~1^h7q_Ϗ_7Ge~B^y_//M8_#WA ~yr 8&J!% # Sɢ>קO_.}<e#.^E< ī۰IAPV#yUW'u/G9! yCq?u <|^OsA[]|A?ϘF_K>a_kmQCᏐv]nE ?du 1^Q<o3/wx2^'qyEſ'=qa #Wy<zxr y/ >z/$P?! TS=?1&̿ސov>ȇŽ$$ډyv؉;'xU?B 'v ׯj@GH%~T?%+{`3NbOe9 {Ƕ5/N]mggtwQ?' YOκe}؟g>?>kz_Kb!/}B=σA'gyקΧ8'q%0Ϸ/3OqWgq5E@ y߇zoe S+A*xq=ȯ(W@}".Ctί-y vFO-_< 98gؙWm/^z!u}WG}{ =nL/r;t簋/e^/~9OϺ%׷{yŹ~!h#~{M=;~੡n'>1Nx ;|t5o\ֿ%yw ^3p}^B>Y]Zϡx[G'vyF>}js^s@ yuWlv  _ aӬ> ?T7=?هvt:qwA(?./8WqG8={9eNe%b_N=|q=8ٿ.qo=A y޹F;#:xW-G%9/8sѿyEqn|zKB><_KhWɷ8YzyO0|WY̌络|XeO*+#!g/_2 ^<_ς/4/!# _-qiOƺ?`XogƼn\̾'E |:?/<?KP~&kӟ'yL'9 gYܨgq~?։@4=9Ggyxxnƹ:O={+qSr31< ڈ@mOCWg}ù3gr< ̀'N|ӮiWzG׹h/p=9Y{~a^)D}]X4˿7?*Q_A ;qdK{snG" }^ g~{pq'N=~8gĹ{?(}'Oy(</'?5]9{z<~ľ!o}9'[@s"+'>^Lj"ΓW^'b=]ùtEs= 2!~8O7ϛՅo4XBu.x. ~b}R~+'yE*pP зFo7B/uqA~}|GZ'C0?fAAo݂8I3}o}a:?`gfhP8!|} q;%-'=6>1SeoO^v. S|=.W\#w+&g Iy 8G8OD ~"/>7jcP_J^,@.kaW|݉xas`!~+o_D| CvE8aסO n@_|3%⍌'"x S|א~p蓋8xcX8~*YGU\{2xY?\CGw?QD~}6o1jxysnRO=BHq;_/a(<߫@B'5ul'=}WɻC&>ۛ3Gp|:Ebǰg:{?Oi^+{=^$<?ہf?(zއvO̧OB{[FVhOPP?q" qR"6?d]#2CN+ŻC >{WN<.-7'E]߲~8Ϲx?RqaG }u'^Awq4y%n?1N~'n7p#y},?֭g-7C?lq^O̸%5EK$~6.s؎CǑy";2qc7 a0gG܊wJ^?ǿ/B~mzf.y8O Cϐ[kȋ~=ϧ\de< x8>`~ ϗϗ?@?I?|3^uecV>G|!̿y6J@~qgo=F^@~5 G/^}\\`g}DxWOYXoyخ8:ݯ |DdO`]h^5އs]~x! /[p3W|⸷fqďXBސ7~S3|~ F=x>puӆ9WC|}Y9G`^ ;nc?f<ށǼ7C` ynwqoxP\_<ۍ~lx~a9/b}}E8}?e!:p.󓐇b*-C^xϺ{z 벏gE8 wݿ881W<ܞg_D7 9ϰ|{/ ̑/&;c?A~s=}`7z)Y|c1s%ou\g_<߇g< AB>] G+5!ݟe*'~j_)8C;yD_CNgYԝx賂:븞zA% x}[eQ?a%?b/|^yגa= ^s}<ۅyx}u?ߨE/.'óC/<>|>[9Wyds\w:ln>/sǓ|(KЗ?7௻~8uy99 |gG~[ k蝼"yߟks}ͽއ:qx?Xd&KCa~ϽbP] d_QԹ0j^:EA8z\~!?? E=)mG+P_xW C>CA/\Ѿm˄?x5o&ͯBsoBgB=6?p1?y\l5y~q\<ܬ/z_%̷o Ч~m9}˹PÇnϘ\>Y;׉G:o_|+zoio8.xl)7~6+kԏ@ !/5ˎ G|3`7%+=Îg%=D]Igb{g+)|Y 7,OA^.A)ǂ.'㐗qk_} %߰>x 1?"o&/?[{#C'ϸ?|@=sd=yux8΃1I{E?ۿ?AV',~Ǯ;w}/ סO?ow?//0/89;q$O<<1mo@0#_+O/ 9nNYGN9sN>+P"q>)"@~'y0쫂z .a^ 4Ѽ>?pS_uء~OOğx;Q{9??W.Mk|hE/o{`9c߆Fy/Jw͹e\EfSM@~ u"^`+yc? ̽>~^lvEw?/8=G7Kxϖ:s#D)-ҬwEWqNo8_-a\q7g+9}ѧ"{ 4:{e|狻ü(PynsΔ+lk+m*/uWn3_?\k~j/=vybe}?m~fG{ ~*T|pv%x^Nj?y$>`Ɖ_u|?ߜ P7@D>q^L?v;&\"ĭ'xPy" !yu_ [q>?s<ƾE`u1'Cu# =/2piz~:>/)yc7yQo B8ڿ8Ա#^> e6oc~u.;q| _ ec9g-鑧C6 B}|/O|(uB܏|Bο|>>V6scFQ3N)^}qi|oΧ'xv?W"x\ &ϧW |w ǟ7qqXrc.=(xgeџ9e>y%38(J+pn/?9ϋWy ݈fsWfb[{Գ1F >q!ăs>,]pısNA=;|ev,Ǔ?kO~~^&. uW=ށ~ 5sFqyTYq9Y9./"5ᓇ x,cy0__cjkET|?F= 4z:_#{yao?e9s|\N3|% AG:>?C={q#_y {V9P9.ӡO۝'բ/9[@ޅ|U̯:g> D.Η7sCM'8ߗui#s=>jeSޯ~f.LP/Aρ_? }yŽ> ~+<﹜3`?l7~osy޸|WEw!>菑$ y_G<r܇p.'#dss}y.sg]apg;~DueY~y]_QOu忏s\f[9}}yخ|'u6zO뜧Ⱥ̟q}~eyy$벏,^K ^~={f?ׯ??!? >s σ3}wyr?|{_C]ۡxAƇBC }Bx8{ Aލr|I?)/wd}sbu }|򾒹>,ϺXAb^H:O5x /?s}Y'z|:9E #F ܞ#n>ruqMs9g|\n~o~_g=9cy{ p6x~?/ ?G[9cF9 C}\N٧sv?}Ȳ~ny|Ns(\ؿ.[CZ#~y'q!|&D,yI+B]"ϱ?7 y_A;įC}&*Јwz/? %Uʸpp`]x-yB?y>g!3>#Cr~w=A? ~Yg9_?:&qxz}58yf`^cns$3_򮗭?\ȻŹCh7f.8Rs!?zr#b>sw_NNӃ|t?&_3L0?qNso9z*S׽y㣜uņ/D}>=Vy椂߀r79Jc!utϏnd'?33Y.7vjK9Ҙ c x2. {.Va~窡^sO}Pq#8֎__ŸOA$W;`+e:}g~~o6=~ñ7n{!lG{jE78_qY=_oWzs?+t ?O`?vO .㪿>v~s{Awg_?{![+/+xMw>kһɧrE2{iW{ ~9}w7n=X׊gη<%$~xχ ߄y>]ByxiGÿD~8 ĭ>nF?vY+oO㇈DZ`_R+{`}Q Y#O9?B| O11Jn sWy:E=P] q ?kaNtχ}Ⱦ'ߌ--dCX?# }sE/X7KκxLN^/8l 7z~6J> =A[v9`o}xs{>vWފ~\𫽾~ʝ|ɮZm_kw>B s0xKo#cqɏs{y?w>fst D:m|..]~aw0yPW9ǘwq<0_!Dո"Kz~ >MznA^Sgvc?W~pTJ}T0Orst{q#Y_līY~?[Ϣ^f;-nB}0@}x-}8'I?qڛYqO7eoXDiQߎ<yy5M ;gqqAayH}+o5볞 sIF^)xۇÞG~ Qo>JK_`.=A-s1OzҬ>'Py6x^+>~.KȏfyE ?E}yfyVNrg| K(0*a/2c αAX6?{!!'Z ^=#χ9%/ȳ?opy'Onk<_~/O+_rz a^s^<+濡u֧;k'ϕD܎`_ojޗxxԏOcV?u;}ou3z|s==?s8o}u9l ˄~Fu!/?+#ŜOߢO?{3DŽgWna|s`o'o9vy|n_|UX8<.࿺?)=&~_ɸYoG_UC;qޝA'<<>{Ո#_x->Wx <g{|l#x0_q7ОGg_<<`{~y`ƅt~ag|/P;w/wo2WȹŮP@ŽpYM*Lq.=>?mާ_Ln:G>o\O֖ωs`#=sx@yG~~-7 ,Mޫ)s3F_{x?8p𖐿;qBey08!)Ϝ/83u5ɺp9e~Q'H/rnMל7ϋ?=o>҈? ?'?{??kگdMt~B~*GwFʉO\gC6v-OsDK̽E== z/|iszbN޵ƛ7ѧj}@0W.7GPksgy_Z'8K~s|l?p{ϰ[9"/W<5[au77=_p{{.my}pn/<½Ƕv~ ']/o_l~;q'9_/w߿i3Ϝ+.{b~_xO{_xv/ ?uat՜w~ܓgowf_낾ѧ?y[_௱+{d^==~:<^|(cӎ"Oۜ?Jޱ΃~P`uz3 /<['&NNḼ~G?b.鴖`}vqO7{?YzOko-W}w?Ţ_*|=i-[1ٴVJ+gLhJi*i='UZ-znZKi 5Z3Z;uZ7^KZ/^+zeZi*aZeZV(qZuZMZmZ]ZNk^ֆiv)L,MNL.קQZIZoHi)76Mkޚz{ZEZH+Ėimֻ:wIk۴KimzZ;>֎i}(SZ;vIk״vKk#NobLkNk|<}DZT8 ˧LZMkO뀴LsikZO ii}1C֗:4:<մHȴJki1iqi}=ou|Z'ubZ'ʹiַ:5:=3:3:;O?:'s:/u~ZM?^Z+uaZQZ.N'ILc2=}.\$sI璼%yK>}.\$s<撼ϥ3Kg2d.\:t&sLҙ̥3Kg2d.\:t&sLҙ̥3Kg2Y黬\9Z3%=Z3%=tN/H2/HXFSk$=FSk$=FSk&|\3 Lf5>q KmevRh Ë /Fx1Ƌ/hS启J2jWc᫖q={0aøqG=*Qq{TܣLj{Lj{Lj{Lj{Lj{ǘ{ǘ{ǘ{ǘ{ǘ{G=jQs{ܣ5G=jp{4ܣ hG=p{ܣ-hG=Zr{ܣG=:q{tܣGQ񲌗/x9xY&^2v+c2v+c2v+c2v+c2v+c2vb7,vb7,vb*vb*vb*vb*vb*vbQ6F(vnmbQ6Ʊ8vnmcq6Ʊ8vc:vc:vc:vc:vc:vkb&vkb&vkb&vkb&vkb&vkc6vkc6vkc6vkc6vkc6vb.v#U<*qϸg<66666666666666666666666666666666g1g1g1泘NG@K_Wz$׍nuɖ}峍ƲX˾cw,e߱[˾[˾[˾[˾[˾ȾȾȾȾȾʾʾʾʾʾɾɾɾɾɾ];u)M^Wz$׍n[ʾ+k\ʾ[ʾ[ʾ[ʾ&kɾ&kɾ&[ɾ[ɾ[ɾ[ɾ[ɾ;}ƂWcXj,x5 ^ƂWc+ұXj,x5 ^ƂWcXj,x5 ^ƂWcXj,x5 ^ƂWcXj,x5 ^ƂWcXj,x5 ^ƂWcXj,x5 ^ƂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWU-xU ^ՂWu|J^X^׭X^Lkw,e߱;}DzXeZeZeZeZeZmdFgZ 3WxXUɕQQQQQQ$$$$$$$-}]kו;׵nu+x]ʾ[ʾ[ʾ[ʾ[ʾkɾ&kɾ&kɾ[ɾ[ɾ[ɾ[ɾ[ɾ#7dc$hrL^3$U?cy]k~Y4ߥ}kO#\#\#\#\#\#XV4`E#XV42X3*Tၤ[CP>FQ(a0 |>FQ(a0 |>FQ(a0 |>FQ(a0 |>FQ(a0 |>"VD,ZI"rhEЊȠ@+"VDYɳ"rgEΊȜ8+"oVDڬYI"rfE̊Ș0+"_VDlYɲ"reEʊȔ(+"OVD,YI"rdEȊȐ +"?VDzXɱ"rcEƊȌ+"/VDZXI"rbEĊȈ+"VD:lXɰ"raEŠȄ+"QD,FI"rE0`("QDEɋ"rE.\("oQDڢEn~nnn^nNn"Ed+HV("UQDDEy"Ed)HR("EQDE"Ed'HN("5QDfDy"Ed%HJ("%QDFD"Ed#HF("QD&DDy"Ed!HB("QDD"EdH>{("PD案Cy"EdH:s("PDơC" EdH6k("PDDCy" EdH2c("PDC" EdH.[("PDfBy" EdH*S("PDFB" EdH&K("PD&DBzԁ%u`IXRԁ%u`IXRԁ%u`IXRԁ%u`IXRԁ%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%M`IX4%m`IX%m`IX%m`IX%m`IX%m`IX%m`IX%m`IX%m`IX%m`IX%m`IX%m`I-$EˋH-/"Y^DTy"EɋH%/"I^DWY"^E伊Hy*"UDnx"EŋH/")^n%m`IX%m`IXDʲez%m`IX%m`IX%m`IX%m`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%]`IXt%09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09 s(LΡ09S {N)Z&PCar9&PCar9&0kWCar9&PCar9&PCar9&PCar9&PCar9&PCay>PCay>PCay>PCay>PCay>a)R0Ka<,yX a)R0Ka<,yX a)R0Ka<,yX a)R0Ka<,yX a)R0Ka<,yX a)ò*JR*JR*JR*JR*JR*JR*JR*JR*JR*JR*JR*JR*JR*JR*LLLJ ^ ^ ^ ^ ^sK2+2+2+2+2+2+2+2+a&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeW&xeWU%xU ^UWU%xU ^UWU%xU ^UW`T%U FUQ`T%U FUQ`T%U FUQ`T%U FUQ`T%U FUQ`T%U FUQ`T%U FUQ`T%U FIE)e%U FUQ`T%U FUA0*J0*J0*J0*J0*J0*J0*J0*J0*J0*J0J*J0j$5 FFQ#`H0j$5 FFڍDj$x5 ^FW#Hj$x5 ^FW#Hj$x5 ^FW#Hj$x5is-\<"ϵs-\<"ϵs-\<"ϵs-\<"ϵs-\<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύs#܈<7"ύFo#ۈmD6ѿFo#ۈmD6ѿFo#ۈmD6ѿFo#ۈmD6ѿFo#ۈmD6ѿU#x^5WU#x^5WU#x^5WU#x^5WU#x^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WU+x ^WlSD)"JQJR:E)Nt(SD)"JQJR:E)NTR]Jw)ߥTR]Jw)"JQJR:E)Nt(SD ^WU+x ^WU+x ^IE)e+x ^WU+x ^WU+x ^WU+x ^WU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uWU'x ^uW1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1prL89&N 'DŽc1a1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc1sL9&~ ?DŽc2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&SLL 0`25dj%K&ӗL/L_2d2}d%K&ӗL/L_2d2}dҁɤI&L&L:0t`d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%&M:t7o2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2d2}d%K&ӗL/L_2$v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7ᷛM&v~ ݄no7kqeb(>~""Fб;' b`Wh;oooooooooooooooooooooooooooowNx~{'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|'?|r \'>ɅO.|r \'>ɅO.|r \'>ɅO.|r \'>ɅO.|r \'>ɅO.|r \'>ɅO.|r \'>ɅO.|r \$&0ɅI.Lra \$&0ɅI=x]vew]vc=vc=vc`v?`Ӑ1ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \$&0ɅI.Lra \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.r \8!pȅC.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Žv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZŽv-haG ;ZѢ\6rF.Eh#m䢍\6rF.Eh#m䢍\6rF.Eh#m䢍\6rF.Eh#m䢍\6rF.Eh#m䢍\6rF.Eh#mDž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\Dž=.qa {\7Ǎ=nqc{7Ǎ=nqc{7Ǎ=nqc{7Ǎ=nqc{7Ǎ=nqc{7Ǎ=nqc{7Ǎ=nqc{7i#7qȍCnr8!7qȍCnr8!7qȍCnr8!7qȍCnr8!7qȍCnr8!7q m\hBڸƅ6.q m\hBڸƅ6.q m\hBڸƅ6.q m\hBڸƅ6.q m\hBڸƅ6.q m\hBڸƅ6.q m\hBڴj#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1mhcD#ƈ6F1MmhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhE/xƋ6^mhEVmӪmZMi6ڦU۴jVmӪmZMi6ڦU۴jVmӪmZMi6ڦU۴jVmӪmZMi6ڦU۴jVmӪmZMi6ڦU۴jVmӧmM6}ڦO7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7Ƹ1ƍ1nqcc7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qcKX7č%n,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qK,qhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqhqzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzc=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8؃c=8ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=ۃo=}o|>}o|>}o|>}o|>}o|>}o>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W>x^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xǾؗc_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_|߾ŷ/}o_LbӾŴ/}1i_LbӾŴ/}1i_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}qc_Ǿ8ű/}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xū/^}W_xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<xc/{؋^<x/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^ox/xƋ7^oxƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|xƇ7>o|x㣧}LJ=>a{|LJ=>a{||8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>|8!pȇC>zGOɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||'>ɇO>||䣧}>zGOi=}>zGOi=}>zGOi=o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}o~ۇ>}廿?~ן~~>x 2 robustbase/data/exAM.tab0000644000176200001440000000016610353034413014655 0ustar liggesusers x y 1 3 2 2 4 4 3 5 6 4 7 7 5 7 8 6 5 8 7 4 8 8 5 9 9 6 11 10 8 13 11 8 15 12 14 6 robustbase/data/hbk.rda0000644000176200001440000000134311754203337014576 0ustar liggesusersukQ7D'OERUS<E9r "q7 dgߛy3ߙypwv$IBxԒF.d٣R^{a,GXYXOauRv軲"U9⵪FH+?\~JfS>cͿȾ#?VC7}?%qk5& I|CI -#?R7[݉ا~š;6jyǿzWæ曗}eߓ)7$oInKG+]hKRIs:S~Ȯ]\~qx|~4aTz/MuIs@/Ippr=8|:fGA0]sjsj'qO߁=jGFW'l3ҾÀ3Gt_6ϔ0Bn/'r*$Z|T8HP5y$x m2"TD$6Rrobustbase/data/Animals2.tab0000644000176200001440000000267110353034413015474 0ustar liggesusers "body" "brain" "Mountain beaver" 1.35 8.1 "Cow" 465 423 "Grey wolf" 36.33 119.5 "Goat" 27.66 115 "Guinea pig" 1.04 5.5 "Dipliodocus" 11700 50 "Asian elephant" 2547 4603 "Donkey" 187.1 419 "Horse" 521 655 "Potar monkey" 10 115 "Cat" 3.3 25.6 "Giraffe" 529 680 "Gorilla" 207 406 "Human" 62 1320 "African elephant" 6654 5712 "Triceratops" 9400 70 "Rhesus monkey" 6.8 179 "Kangaroo" 35 56 "Golden hamster" 0.12 1 "Mouse" 0.023 0.4 "Rabbit" 2.5 12.1 "Sheep" 55.5 175 "Jaguar" 100 157 "Chimpanzee" 52.16 440 "Rat" 0.28 1.9 "Brachiosaurus" 87000 154.5 "Mole" 0.122 3 "Pig" 192 180 "Artic fox" 3.385 44.5 "Owl monkey" 0.48 15.5 "Roe deer" 14.83 98.2 "Verbet" 4.19 58 "Chinchilla" 0.425 6.4 "Ground squirrel" 0.101 4 "Artic ground squirrel" 0.92 5.7 "African giant pouched rat" 1 6.6 "Lesser short-tailed shrew" 0.005 0.14 "Star-nosed mole" 0.06 1 "Nine-banded armadillo" 3.5 10.8 "Tree hyrax" 2 12.3 "N.A. opossum" 1.7 6.3 "Big brown bat" 0.023 0.3 "European hedgehog" 0.785 3.5 "Galago" 0.2 5 "Genet" 1.41 17.5 "Grey seal" 85 325 "Rock hyrax-a" 0.75 12.3 "Water opossum" 3.5 3.9 "Yellow-bellied marmot" 4.05 17 "Little brown bat" 0.01 0.25 "Slow loris" 1.4 12.5 "Okapi" 250 490 "Baboon" 10.55 179.5 "Desert hedgehog" 0.55 2.4 "Giant armadillo" 60 81 "Rock hyrax-b" 3.6 21 "Raccoon" 4.288 39.2 "E. American mole" 0.075 1.2 "Musk shrew" 0.048 0.33 "Echidna" 3 25 "Brazilian tapir" 160 169 "Tenrec" 0.9 2.6 "Phalanger" 1.62 11.4 "Tree shrew" 0.104 2.5 "Red fox" 4.235 50.4 robustbase/data/bushfire.rda0000644000176200001440000000075111754203337015643 0ustar liggesusersuOHQfWiB5$kNx.ZwAiD52bAKuHO OI] CtCX~q:|޼ A>RE+ ]J^Ow(+Gp})F WD=c/ "V/{qcx(-8/p#)eu$˕JH~OZׁ {]kXdX3徧 qM=ơ?usץUe||'g{gOs ^iIRnSwTz[.{Ƭn5{wokVф}!ܳUVNuRwZKjǽ[^R0}sŃ?|\[Ɔv4=SlbgNL6XdYgy`ۼj ,hQ=mcja0%{ϿSN8hh 4Ww>}q];e{ͳVnǝi ~TOm2v-Y"i|bewDgh#H'JT~MD[JT\$} ̙/*^HEo0T8W7m춨Cwp|jEXrT}*`k\U52,yF59U2PTTcsʗbIAm`,N!2>x]ʗWhh$kф5RX9qeB g9~tF1(İk>:<+<щ{ĵbI*Z_3WTRғTXyA'1G9hd aFeu8ؼ&3;%TE}HgXEGjZQXv.Tɀ N.ܫcL_e_5q6ZǩQrKӖޡެ_nsw`uTi0X?f~8W?8tX5e݈<^tй׳_yзjDQ& eZY:ejt'd?(<]ƊW- Ck8gQTtxJ@bgOV?J74hyFGa菙9 ͘ qԁX?]Ph߆:96xn]@|ݱMZ)nԓPž֠^UCg#;qϹuox~ @W&(b MmwsH.#:xx}Y1{r=_R6<~E{( H|}#PbW :Ex?}+on~~! x$+2X( ׂ}"O#QGrn< N Ox3\!UHP=.K;?DԟrJ)/ L_x)O(@) E7>Ox9Krʍ;y!95y'rVrCN> !n>|Lg|@9ҟdO qo_(Wt/W/S4~-U4@ y8uowu 57%WhW'ϤyP-Ro2 Srv͉:!uG;Sr~j[+99sO`??{ uAWv~ٿ;;;]7~`蹍WqѶ}m}]r#3robustbase/data/pilot.rda0000644000176200001440000000031711754203337015161 0ustar liggesusers]? AQq (FA$Y I L7ý͟( lQ޼ tzz9nǫ:#"JT%&db=_v"gH$, uQip;n84{袆 h"\tZueNqJ{3sYRT>~CUp;mI)ߌX`r8GzBBO@4 mh\"Iك*͎d?5|Oߊ9+7S_ky꺜g1O_RCgm?V; jט{^5+3WZ`y^ ;7 /LŽPȜAZ {f@$<7ui_uDAT`d{{4oMҹw{:wYS~ ?Է}arobustbase/data/condroz.rda0000644000176200001440000000247311754203337015515 0ustar liggesusers[h\eǏIDEQ "QYD,6{=9B %Xo (RE">R!E#(z $s ߙo$k`x07ד}\b^6Zan9ofWe'+^>!h`RB^אWmcg -i8m6:L=Gp yC 6t롏nExZ Ŕ8Һ]i-+y{iaaOb`GGk2>5"ZoX^Zw ^\k4w9/Jμ%xV=>qkznc! zH>N 0;üiY+]'?+4WMz>6|?˞*3I%U0ۻO%|MпBOeߋJ=Dn<x>q>sm8)H֐׾|`/F@nᯅ]K"{'d5>}xym?ӯes;}9M_!5]y8=WOf]'}}u9< k["o yRbuy9o-Xw/^l7)3zzi|:slJ^ќr?]7zG5^!~E߀O=pOE_ù٤n'ü?W_<5:+[Deh9ۜ mK`/RH=֩,_fqD踌ǽԕ{0OC{GyD /ʼ}O<~Gp:/%/O X;$-v8f. ;&\S*+s`qaz+(VQN=.wp#p@U^>H}/S-7'uIh֊f' G;gcm ):iz8^w.8vN)~ǩ쑾&o+||?׉=ru2{:cD^G/λTċUij_TNWoAy70b<Hq790R#>p8w<8E,JV\G"nMaGc OAF "N7k—^`%&p8zc4aݛa߄~ s؏raxq@X>Yx3}ŏ4d,#|:ExC2$o@~+Q~R_cq%;Dhfs!݋xB pQSկ՛Dy2S$L(EOsS:~[R4(cRMJsJy{Tͽ$Tnfq=ybK%+SUmK6*wJuι6 bVC L^DU>~hrG[$-q= @y8`x<%qJFHMB|(م gNP(CHH!\O[s “þX&V;/םw}pjA? ę7D?H:8D~:4rl(INDLЙ266oӛg$sH|9%1#wm;q9}J.W?>V robustbase/data/carrots.rda0000644000176200001440000000054011754203337015505 0ustar liggesusers r0b```b`@& `d`aɉEE% 9 b ib fbN(f UŬP169\P gaDbQb- 60k">:VQ ŀX@|*#m\4Ѵ'R}80590q`V=h`#D3(Ƀ-',5LNpe59'n`ZbrI~]y^bn*L9 ,y&'M)/Í䧧r!2G+%$Q/hEz0AA$Fw66Erobustbase/data/cloud.rda0000644000176200001440000000037311754203337015142 0ustar liggesusers r0b```b`@& `d`aҬ9) `E @ b3A@ ف *り3C@t0 , 8`c 6@*oCiS<yv>s| j5TʷZ d= H-JN+ILO8- ?33DaRKҊf),/׃ |&Ha rrobustbase/data/coleman.rda0000644000176200001440000000120011754203337015440 0ustar liggesusers]T=hSQ~kBcR@ j!i &EźE$"Bn sqX]D| p}sZaJ`#`p#>n a_i֫ k0<#sH > dn!}2 .wOɿ}E} r'RWd<{ 8ב|I2 _s}-sҌqx.|a[Zlg^("t}q2 IbgT$kjc1>~z&gcK97N+-S/3?<:8w2n.#Ni?\ҝ@aΛx7Ҩῦ!<sOxiփS{J#!DD'Q !_zFe]h|8^:>㿇9Qcyԑϱ]@g'AQ*`l#-t¿8=QzP9Xz9V lW8/sє{p6Vꖭjgն ny=K;?9robustbase/data/alcohol.rda0000644000176200001440000000254711754203337015462 0ustar liggesusersV{LSW/Z鄁GPbRs X t } XdsL4L" KAtۘs:{fK7w~;91+mVP(7\lF(O"BQ]h*64X+xj\nkZ顖VPkB}6J-G4ͿZ.Ook̢I3 mBԠ,mi:<5ז?;:WѥMԷS"-v/ڧ=j>O&=t ;=mD-OCesɖe&s¼wC>}U؇<-۠^kD\7`CڜX}~_-À5-Q'3/U$,J_?A"] #5t2 H'_./drՒlyqq~&ӈt;:}5PF= QxFu*Zh>'F;Y~h,_?`=^} `b bUxNjxI(|E?/`䂴:64C=CR.} ޣduIܑ8O?N༄߷'O;gIУxKGe%t.ٞq́XܼԦǘaibywZ=`:9N+􅵽_\ϕM#c7'!o랋,V5 C[kZR /;uMcVjY|܊?w&NbC*b'yZs|ewۿDBE\GڱܘؠٕOAѐl\3|^ {[m߾ȵkVF ټ0`h2 1"r v/.0EL塌&ABGaJMb])%J8> robustbase/data/possumDiv.rda0000644000176200001440000000333011754203337016021 0ustar liggesusersZKsG}豲cBr‡PvUN\#*qI2)n8;?ͬ4kIMh^)~랈ߤqI*Yfӛ˓GdMAQ)jĴ(VWRMFL뀍x%y7BF :b2sܰELxb+s6^¸ھv|;O@ݢuLtT}ct#Zs큁MvGkภ(ϖ1ܞη= fw/-"-żBd@죑$鿫kͱ _Lp؃`']o)c6J(\S,*MXMug ;s>bVeߩk{R5'enAiڗ261ޑB[X|3"xnK5grŘϵՓj g:b^?5ȇ|ѽHȋOzNcGJu/q&~s^{t]ySjGpy{Gmյ=wέWGRJJ 3K0-)7YAT7{듎I'쪮ХE0Ό.|F3 Cww@p&rdgW>DZ:2)cI5 fp7"T}0{sXK;iϱ!KB >6<>Ӿ3B2cq|\W:9";)<)~>CǾ UGuAew8G2'P`yi/7{x:쫎Cc#]bc|!3hjEG ZE[ )lwl{$;N}ꪽ Bl0|.]`-L('"gy[pB?2hTlg2z_Muvt1zFW1Wc76Zxg&E3y8|擁27' ִb#ǞȾ5+">V62`HkOo QZ_4gNy>_$soe)\/Oipv78;}~nPH@Uǿ/Gp '㏣t<?&7oBlvsy=cf #ϻptal==sc(ŏNg:3s:6gG;xH_Cr\C16ڕPxwp>= @ 68^~Xɡ~@!Bw9dJk6ѡIգ9^|]ޯgJSOF _fAq{^;f 4;gww;W^t9!?bwAOIԓ7;WC錕Nb lN}N:<[,DC|bG -|P|F6,^'i8Pstxjv xE-sSa6 TT+&}`ڄ L ?WJbI^Z4[9`6\ c Ґrobustbase/data/kootenay.tab0000644000176200001440000000032510647075756015676 0ustar liggesusers Libby Newgate 1931 27.1 19.7 1932 20.9 18.0 1933 33.4 26.1 1934 77.6 15.7 1935 37.0 26.1 1936 21.6 19.9 1937 17.6 15.7 1938 35.1 27.6 1939 32.6 24.9 1940 26.0 23.4 1941 27.6 23.1 1942 38.7 31.3 1943 27.8 23.8 robustbase/man/0000755000176200001440000000000014124272431013176 5ustar liggesusersrobustbase/man/nlrob-algos.Rd0000644000176200001440000001571413441022761015714 0ustar liggesusers\name{nlrob-algorithms} \alias{nlrob.algorithms} \alias{nlrob.MM} \alias{nlrob.tau} \alias{nlrob.CM} \alias{nlrob.mtl} \title{MM-, Tau-, CM-, and MTL- Estimators for Nonlinear Robust Regression} \description{ \describe{ \item{"MM":}{Compute an MM-estimator for nonlinear robust (constrained) regression.} \item{"tau":}{Compute a Tau-estimator for nonlinear robust (constrained) regression.} \item{"CM":}{Compute a \dQuote{Constrained M} (=: CM) estimator for nonlinear robust (constrained) regression.} \item{"MTL":}{Compute a \dQuote{Maximum Trimmed Likelihood} (=: MTL) estimator for nonlinear robust (constrained) regression.} } } \usage{ ## You can *not* call the nlrob(*, method = ) like this ==> see help(nlrob) ## ------- ===== ------------------------------------------ nlrob.MM(formula, data, lower, upper, tol = 1e-06, psi = c("bisquare", "lqq", "optimal", "hampel"), init = c("S", "lts"), ctrl = nlrob.control("MM", psi = psi, init = init, fnscale = NULL, tuning.chi.scale = .psi.conv.cc(psi, .Mchi.tuning.defaults[[psi]]), tuning.psi.M = .psi.conv.cc(psi, .Mpsi.tuning.defaults[[psi]]), optim.control = list(), optArgs = list(...)), ...) nlrob.tau(formula, data, lower, upper, tol = 1e-06, psi = c("bisquare", "optimal"), ctrl = nlrob.control("tau", psi = psi, fnscale = NULL, tuning.chi.scale = NULL, tuning.chi.tau = NULL, optArgs = list(...)), ...) nlrob.CM(formula, data, lower, upper, tol = 1e-06, psi = c("bisquare", "lqq", "welsh", "optimal", "hampel", "ggw"), ctrl = nlrob.control("CM", psi = psi, fnscale = NULL, tuning.chi = NULL, optArgs = list(...)), ...) nlrob.mtl(formula, data, lower, upper, tol = 1e-06, ctrl = nlrob.control("mtl", cutoff = 2.5, optArgs = list(...)), ...) } \arguments{ \item{formula}{nonlinear regression \code{\link{formula}}, using both variable names from \code{data} and parameter names from either \code{lower} or \code{upper}.} \item{data}{data to be used, a \code{\link{data.frame}}} \item{lower, upper}{bounds aka \dQuote{box constraints} for all the parameters, in the case "CM" and "mtl" these must include the error standard deviation as \code{"sigma"}, see \code{\link{nlrob}()} about its \code{\link{names}}, etc. Note that one of these two must be a properly \dQuote{named}, e.g., \code{names(lower)} being a \code{\link{character}} vector of parameter names (used in \code{formula} above). } \item{tol}{numerical convergence tolerance.} \item{psi, init}{see \code{\link{nlrob.control}}.} \item{ctrl}{a \code{\link{list}}, typically the result of a call to \code{\link{nlrob.control}}.} \item{tuning.psi.M}{..}% FIXME \item{optim.control}{..}% FIXME \item{optArgs}{a \code{\link{list}} of optional arguments for optimization, e.g., \code{trace = TRUE}, passed to to the optimizer, which currently must be \code{\link[DEoptimR]{JDEoptim}(.)}.} \item{...}{alternative way to pass the \code{optArgs} above.} } \value{ an \R object of \code{\link{class}} \code{"nlrob."}, basically a list with components %% FIXME } \details{ Copyright 2013, Eduardo L. T. Conceicao. Available under the GPL (>= 2) Currently, all four methods use \code{\link[DEoptimR]{JDEoptim}()} from \CRANpkg{DEoptimR}, which subsamples using \code{\link{sample}()}. From \R version 3.6.0, \code{\link{sample}} depends on \code{\link{RNGkind}(*, sample.kind)}, such that exact reproducibility of results from \R versions 3.5.3 and earlier requires setting \code{\link{RNGversion}("3.5.0")}. In any case, do use \code{\link{set.seed}()} additionally for reproducibility! } \author{ Eduardo L. T. Conceicao; compatibility (to \code{\link{nlrob}}) tweaks and generalizations, inference, by Martin Maechler. } \source{ For \code{"MTL"}: Maronna, Ricardo A., Martin, R. Douglas, and Yohai, Victor J. (2006). \emph{Robust Statistics: Theory and Methods} Wiley, Chichester, p. 133. } \references{ \describe{ \item{"MM":}{ Yohai, V.J. (1987) High breakdown-point and high efficiency robust estimates for regression. \emph{The Annals of Statistics} \bold{15}, 642--656. } \item{"tau":}{ Yohai, V.J., and Zamar, R.H. (1988). High breakdown-point estimates of regression by means of the minimization of an efficient scale. \emph{Journal of the American Statistical Association} \bold{83}, 406--413. } \item{"CM":}{ Mendes, B.V.M., and Tyler, D.E. (1996) Constrained M-estimation for regression. In: \emph{Robust Statistics, Data Analysis and Computer Intensive Methods}, Lecture Notes in Statistics 109, Springer, New York, 299--320. %% not yet -- e.g. tuning constants for Welsh: %% Edlund, O. and Ekblom, H. (2005) %% Computing the constrained M-estimates for regression. %% Computational Statistics Data Analysis \bold{49}(1): 19--32. } \item{"MTL":}{ Hadi, Ali S., and Luceno, Alberto (1997). Maximum trimmed likelihood estimators: a unified approach, examples, and algorithms. Computational Statistics & Data Analysis \bold{25}, 251--272. Gervini, Daniel, and Yohai, Victor J. (2002). A class of robust and fully efficient regression estimators. The Annals of Statistics \bold{30}, 583--616. } }%describe } \examples{%% for more, --> ../tests/nlregrob-tst.R DNase1 <- DNase[DNase$Run == 1,] form <- density ~ Asym/(1 + exp(( xmid -log(conc) )/scal )) pnms <- c("Asym", "xmid", "scal") set.seed(47) # as these by default use randomized optimization: fMM <- robustbase:::nlrob.MM(form, data = DNase1, lower = setNames(c(0,0,0), pnms), upper = 3, ## call to nlrob.control to pass 'optim.control': ctrl = nlrob.control("MM", optim.control = list(trace = 1), optArgs = list(trace = TRUE))) ## The same via nlrob() {recommended; same random seed to necessarily give the same}: set.seed(47) gMM <- nlrob(form, data = DNase1, method = "MM", lower = setNames(c(0,0,0), pnms), upper = 3, trace = TRUE) gMM summary(gMM) ## and they are the same {apart from 'call' and 'ctrl' and new stuff in gMM}: ni <- names(fMM); ni <- ni[is.na(match(ni, c("call","ctrl")))] stopifnot(all.equal(fMM[ni], gMM[ni])) \dontshow{ if(doExtras <- robustbase:::doExtras()) { gtau <- nlrob(form, data = DNase1, method = "tau", lower = setNames(c(0,0,0), pnms), upper = 3, trace = TRUE) ## these two have "sigma" also as parameter : psNms <- c(pnms, "sigma") gCM <- nlrob(form, data = DNase1, method = "CM", lower = setNames(c(0,0,0,0), psNms), upper = 3, trace = TRUE) gmtl <- nlrob(form, data = DNase1, method = "mtl", lower = setNames(c(0,0,0,0), psNms), upper = 3, trace = TRUE) stopifnot(identical(sapply(list(gMM, gCM, gmtl), estimethod), c("MM", "CM", "mtl"))) }% doExtras }% dontshow } \keyword{robust} \keyword{regression} \keyword{nonlinear} robustbase/man/lmrob.M.S.Rd0000644000176200001440000000676613710236747015225 0ustar liggesusers\name{lmrob.M.S} \alias{lmrob.M.S} \title{ M-S regression estimators } \description{ Computes an M-S-estimator for linear regression using the \dQuote{M-S} algorithm. } \usage{ lmrob.M.S(x, y, control, mf, split = splitFrame(mf, x, control$split.type)) } \arguments{ \item{x}{numeric matrix (a \code{\link{model.matrix}}) of the predictors.} \item{y}{numeric vector for the response } \item{control}{ list as returned by \code{\link{lmrob.control}}.} \item{mf}{a model frame as returned by \code{\link{model.frame}}.} \item{split}{(optional) list as returned by \code{\link{splitFrame}}.} } \details{ This function is used by \code{\link{lmrob}} and not intended to be used on its own (because an M-S-estimator has too low efficiency \sQuote{on its own}). An M-S estimator is a combination of an S-estimator for the continuous variables and an L1-estimator (i.e. an M-estimator with \eqn{\psi(t) = sign(t)}) for the categorical variables. The S-estimator is estimated using a subsampling algorithm. If the model includes interactions between categorical (\code{\link{factor}}) and continuous variables, the subsampling algorithm might fail. In this case, one can choose to assign the interaction to the categorical side of variables rather than to the continuous side. This can be accomplished via the control argument \code{split.type} or by specifying \code{split}, see \code{\link{splitFrame}}. Note that the return status \code{converged} does not refer to the actual convergence status. The algorithm used does not guarantee convergence and thus true convergence is almost never reached. This is, however, not a problem if the estimate is only used as initial estimate part of an MM or SMDM estimate. The algorithm sometimes produces the warning message \dQuote{Skipping design matrix equilibration (dgeequ): row ?? is exactly zero.}. This is just an artifact of the algorithm and can be ignored safely. } \value{ A list with components \item{coefficients}{numeric vector (length \eqn{p}) of M-S-regression coefficient estimates.} \item{scale}{the M-S-scale residual estimate} \item{residuals}{numeric vector (legnth \eqn{n}) of the residuals.} \item{rweights}{numeric vector (length \eqn{n}) of the robustness weights.} \item{control}{the same list as the \code{control} argument.} \item{converged}{Convergence status (always \code{TRUE}), needed for \code{\link{lmrob.fit}}.} } \references{ Maronna, R. A., and Yohai, V. J. (2000). Robust regression with both continuous and categorical predictors. \emph{Journal of Statistical Planning and Inference} \bold{89}, 197--214. } \author{ Manuel Koller } \seealso{ \code{\link{lmrob}}; for a description of the available split types, see \code{\link{splitFrame}}. \code{\link[robust]{lmRob}} in package \CRANpkg{robust} uses a version of the M-S algorithm automatically when the formula contains factors. Our version however follows Maronna and Yohai (2000) more closely. } \examples{ data(education) education <- within(education, Region <- factor(Region)) flm <- lm(Y ~ Region + X1 + X2 + X3, education) x <- model.matrix(flm) y <- education$Y # == model.response(model.frame(flm)) set.seed(17) f.MS <- lmrob.M.S(x, y, control = lmrob.control(), mf = model.frame(flm)) ## The typical use of the "M-S" estimator -- as initial estimate : fmMS <- lmrob(Y ~ Region + X1 + X2 + X3, education, init = "M-S") } \keyword{ M-S } \keyword{ robust } \keyword{ regression } robustbase/man/summary.lts.Rd0000644000176200001440000000657110607131625015775 0ustar liggesusers\name{summary.lts} \alias{summary.lts} \alias{print.summary.lts} \title{Summary Method for LTS objects} \usage{ \method{summary}{lts}(object, correlation = FALSE, \dots) \method{print}{summary.lts}(x, digits = max(3, getOption("digits") - 3), signif.stars = getOption("show.signif.stars"), \dots) } \arguments{ \item{object}{an object of class \code{"lts"}, usually, a result of a call to \code{\link{ltsReg}}.} \item{correlation}{logical; if \code{TRUE}, the correlation matrix of the estimated parameters is returned and printed.} \item{x}{an object of class \code{"summary.lts"}, usually, a result of a call to \code{summary.lts}.} \item{digits}{the number of significant digits to use when printing.} \item{signif.stars}{logical indicating if \dQuote{significance stars} should be printer, see \code{\link{printCoefmat}}.} \item{\dots}{further arguments passed to or from other methods.} } \description{ \code{summary} method for class \code{"lts"}. } \details{ These functions compute and print summary statistics for weighted least square estimates with weights based on LTS estimates. Therefore the statistics are similar to those for LS but all terms are multiplied by the corresponding weight. Correlations are printed to two decimal places: to see the actual correlations print \code{summary(object)$correlation} directly. } \value{ The function \code{summary.lts} computes and returns a list of summary statistics of the fitted linear model given in \code{object}, using the components of this object (list elements). \item{residuals}{the residuals - a vector like the response \code{y} containing the residuals from the weighted least squares regression.} \item{coefficients}{a \eqn{p \times 4}{p x 4} matrix with columns for the estimated coefficient, its standard error, t-statistic and corresponding (two-sided) p-value. } \item{sigma}{the estimated scale of the reweighted residuals \deqn{\hat\sigma^2 = \frac{1}{n-p}\sum_i{R_i^2},}{% sigma^2 = 1/(n-p) Sum(R[i]^2),} where \eqn{R_i}{R[i]} is the \eqn{i}-th residual, \code{residuals[i]}.} \item{df}{degrees of freedom, a 3-vector \eqn{(p, n-p, p*)}, the last being the number of non-aliased coefficients.} \item{fstatistic}{(for models including non-intercept terms) a 3-vector with the value of the F-statistic with its numerator and denominator degrees of freedom.} \item{r.squared}{\eqn{R^2}, the \dQuote{fraction of variance explained by the model}, \deqn{R^2 = 1 - \frac{\sum_i{R_i^2}}{\sum_i(y_i- y^*)^2},}{% R^2 = 1 - Sum(R[i]^2) / Sum((y[i]- y*)^2),} where \eqn{y^*}{y*} is the mean of \eqn{y_i}{y[i]} if there is an intercept and zero otherwise.} \item{adj.r.squared}{the above \eqn{R^2} statistic \dQuote{\emph{adjusted}}, penalizing for higher \eqn{p}.} \item{cov.unscaled}{a \eqn{p \times p}{p x p} matrix of (unscaled) covariances of the \eqn{\hat\beta_j}{coef[j]}, \eqn{j=1, \dots, p}.} \item{correlation}{the correlation matrix corresponding to the above \code{cov.unscaled}, if \code{correlation = TRUE} is specified.} } \seealso{ \code{\link{ltsReg}}; the generic \code{\link{summary}}. } \examples{ data(Animals2) ltsA <- ltsReg(log(brain) ~ log(body), data = Animals2) (slts <- summary(ltsA)) ## non-default options for printing the summary: print(slts, digits = 5, signif.stars = FALSE) } \keyword{regression} \keyword{robust} robustbase/man/summary.glmrob.Rd0000644000176200001440000000475411721663343016463 0ustar liggesusers\name{summary.glmrob} \alias{summary.glmrob} \alias{vcov.glmrob} \alias{print.summary.glmrob} \title{Summarizing Robust Fits of Generalized Linear Models} \description{ The \code{summary} method for class \code{"\link{glmrob}"} summarizes robust fits of (currently only discrete) generalized linear models. } \usage{ \method{summary}{glmrob}(object, correlation = FALSE, symbolic.cor = FALSE, \dots) \method{vcov}{glmrob}(object, \dots) \method{print}{summary.glmrob}(x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), \dots) } \arguments{ \item{object}{an object of class \code{"glmrob"}, usually, a result of a call to \code{\link{glmrob}}.} \item{correlation}{logical; if \code{TRUE}, the correlation matrix of the estimated parameters is returned and printed.} \item{symbolic.cor}{logical. If \code{TRUE}, print the correlations in a symbolic form (see \code{\link{symnum}}) rather than as numbers.} \item{\dots}{further arguments passed to or from other methods.} \item{x}{an object of class \code{"summary.glrob"}.} \item{digits}{the number of digits to use for printing.} \item{signif.stars}{logical indicating if the P-values should be visualized by so called \dQuote{significance stars}.} } \details{ \code{\link{summary.glmrob}} returns an object of \code{\link{class}} \code{"summary.glmrob"}. Its \code{\link{print}()} method tries to be smart about formatting the coefficients, standard errors, etc, and gives \dQuote{significance stars} if \code{signif.stars} is \code{TRUE} (as per default when \code{\link{options}} where not changed). } \value{ The function \code{\link{summary.glmrob}} computes and returns a list of summary statistics of the robustly fitted linear model given in \code{object}. The following elements are in the list: \item{...}{ FIXME } %% MM thinks that summary should possibly just *add* to the original %% object. } %%\references{ ~put references to the literature/web site here ~ } \author{Andreas Ruckstuhl} %%\note{ ~~further notes~~ } %% \seealso{\code{\link{glmrob}}; the generic \code{\link{summary}} and also \code{\link[stats]{summary.glm}}. } \examples{ data(epilepsy) Rmod <- glmrob(Ysum ~ Age10 + Base4*Trt, family = poisson, data = epilepsy, method= "Mqle") ss <- summary(Rmod) ss ## calls print.summary.glmrob() str(ss) ## internal STRucture of summary object } \keyword{regression} \keyword{nonlinear} \keyword{robust} robustbase/man/phosphor.Rd0000644000176200001440000000166513312375575015352 0ustar liggesusers\name{phosphor} \alias{phosphor} \docType{data} \title{Phosphorus Content Data} \description{ This dataset investigates the effect from inorganic and organic Phosphorus in the soil upon the phosphorus content of the corn grown in this soil, from Prescott (1975). } \usage{data(phosphor, package="robustbase")} \format{ A data frame with 18 observations on the following 3 variables. \describe{ \item{\code{inorg}}{Inorganic soil Phosphorus} \item{\code{organic}}{Organic soil Phosphorus} \item{\code{plant}}{Plant Phosphorus content} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection.} Wiley, p.156, table 24. } \examples{ data(phosphor) plot(phosphor) summary(lm.phosphor <- lm(plant ~ ., data = phosphor)) summary(lts.phosphor <- ltsReg(plant ~ ., data = phosphor)) phosphor.x <- data.matrix(phosphor[, 1:2]) cPh <- covMcd(phosphor.x) plot(cPh, "dd") } \keyword{datasets} robustbase/man/radarImage.Rd0000644000176200001440000000420313713564014015524 0ustar liggesusers\name{radarImage} \alias{radarImage} \docType{data} \title{Satellite Radar Image Data from near Munich} \description{ The data were supplied by A. Frery. They are a part of a synthetic aperture satellite radar image corresponding to a suburb of Munich. Provided are coordinates and values corresponding to three frequency bands for each of 1573 pixels. } \usage{data(radarImage, package="robustbase")} \format{ A data frame with 1573 observations on the following 5 variables. \describe{ \item{\code{X.coord}}{a numeric vector} \item{\code{Y.coord}}{a numeric vector} \item{\code{Band.1}}{a numeric vector} \item{\code{Band.2}}{a numeric vector} \item{\code{Band.3}}{a numeric vector} } } % \details{ % } \source{ The website accompanying the MMY-book: \url{https://www.wiley.com/legacy/wileychi/robust_statistics/} } % \references{ % ~~ possibly secondary sources and usages ~~ % } \examples{ data(radarImage) plot(Y.coord ~ X.coord, data = radarImage) ## The 8 "clear" outliers (see also below) ii8 <- c(1548:1549, 1553:1554, 1565:1566, 1570:1571) outF <- 1+(seq_len(nrow(radarImage)) \%in\% ii8) pairs(radarImage[, 3:5], main = "radarImage (n = 1573)", col = outF, pch=outF) ## Finding outliers ----------------------------------------- set.seed(1) system.time(cc.ri <- covMcd(radarImage))# ~ 0.1 sec ## check for covMcd() consistency: iiO <- as.integer( c(262, 450:451, 480:481, 509, 535, 542, 597, 643, 669, 697, 803:804, 832:834, 862:864, 892, 989, 1123, 1145, 1223:1224, 1232:1233, 1249:1250, 1267, 1303, 1347, 1357, 1375, 1411, 1419:1420, 1443, 1453, 1504, 1510:1512, 1518:1521, 1525:1526, 1543:1544, 1546:1555, 1557:1558, 1561:1562, 1564:1566, 1569:1571, 1573)) length(iiO) # 73 -- other seeds sometimes give 72, rarely 71 "outliers" isO <- cc.ri$mcd.wt == 0 stopifnot(identical(iiO, which(isO)), identical(ii8, which(cc.ri$mah > 100)), length(intersect(cc.ri$best, iiO)) == 0) cc <- c(adjustcolor("black", 0.4), adjustcolor("tomato", 0.8)) pairs(radarImage, main = "radarImage (n = 1573) + Outliers", gap=0, col = cc[1+isO], pch = c(1,8)[1+isO], cex = 0.8) } \keyword{datasets} robustbase/man/splitFrame.Rd0000644000176200001440000000543413626040715015605 0ustar liggesusers\name{splitFrame} \alias{splitFrame} \title{ Split Continuous and Categorical Predictors } \description{ Splits the design matrix into categorical and continuous predictors. Categorical variables are variables that are \code{\link{factor}}s, \code{\link{ordered}} factors, \emph{or} \code{\link{character}}. } \usage{ splitFrame(mf, x = model.matrix(mt, mf), type = c("f","fi", "fii")) } \arguments{ \item{mf}{model frame (as returned by \code{\link{model.frame}}).} \item{x}{(optional) design matrix, defaulting to the derived \code{\link{model.matrix}}.} \item{type}{a character string specifying the split type (see details).} } \details{ Which split type is used can be controlled with the setting \code{split.type} in \code{\link{lmrob.control}}. There are three split types. The only differences between the types are how interactions between categorical and continuous variables are handled. The extra types of splitting can be used to avoid \emph{Too many singular resamples} errors. Type \code{"f"}, the default, assigns only the intercept, categorical and interactions of categorical variables to \code{x1}. Interactions of categorical and continuous variables are assigned to \code{x2}. Type \code{"fi"} assigns also interactions between categorical and continuous variables to \code{x1}. Type \code{"fii"} assigns not only interactions between categorical and continuous variables to \code{x1}, but also the (corresponding) continuous variables themselves. } \value{ A list that includes the following components: \item{x1 }{design matrix containing only categorical variables} \item{x1.idx }{logical vectors of the variables considered categorical in the original design matrix} \item{x2 }{design matrix containing the continuous variables} } \references{ Maronna, R. A., and Yohai, V. J. (2000). Robust regression with both continuous and categorical predictors. \emph{Journal of Statistical Planning and Inference} \bold{89}, 197--214. } \author{ Manuel Koller } \seealso{ \code{\link{lmrob.M.S}} } \examples{ data(education) education <- within(education, Region <- factor(Region)) educaCh <- within(education, Region <- as.character(Region)) ## no interactions -- same split for all types: fm1 <- lm(Y ~ Region + X1 + X2 + X3, education) fmC <- lm(Y ~ Region + X1 + X2 + X3, educaCh ) splt <- splitFrame(fm1$model) ; str(splt) splC <- splitFrame(fmC$model) stopifnot(identical(splt, splC)) ## with interactions: fm2 <- lm(Y ~ Region:X1:X2 + X1*X2, education) s1 <- splitFrame(fm2$model, type="f" ) s2 <- splitFrame(fm2$model, type="fi" ) s3 <- splitFrame(fm2$model, type="fii") cbind(s1$x1.idx, s2$x1.idx, s3$x1.idx) rbind(p.x1 = c(ncol(s1$x1), ncol(s2$x1), ncol(s3$x1)), p.x2 = c(ncol(s1$x2), ncol(s2$x2), ncol(s3$x2))) } robustbase/man/steamUse.Rd0000644000176200001440000000622313312375575015271 0ustar liggesusers\name{steamUse} \title{Steam Usage Data (Excerpt)} \alias{steamUse} \docType{data} \encoding{utf8} \description{ The monthly use of steam (\code{Steam}) in a factory may be modeled and described as function of the operating days per month (\code{Operating.Days}) and mean outside temperature per month (\code{Temperature}). } \usage{data("steamUse", package="robustbase")} \format{ A data frame with 25 observations on the following 9 variables. \describe{ \item{\code{Steam}:}{regression response \eqn{Y}, the poinds of steam used monthly.} \item{\code{fattyAcid}:}{pounds of Real Fatty Acid in storage per month.} \item{\code{glycerine}:}{pounds of crude glycerine made.} \item{\code{wind}:}{average wind velocity in miles per hour (a numeric vector).} \item{\code{days}:}{an integer vector with number of days of that month, i.e., in \eqn{28..31}.} \item{\code{op.days}:}{the number of operating days for the given month (integer).} \item{\code{freeze.d}:}{the number of days below 32 degrees Fahrenheit (\eqn{= 0}\enc{°C}{'C} (C=Celsius) \eqn{=} freezing temperature of water).} \item{\code{temperature}:}{a numeric vector of average outside temperature in Fahrenheit (F).} \item{\code{startups}:}{the number of startups (of production in that month).} } } \details{ Nor further information is given in Draper and Smith, about the place and exacts years of the measurements, though some educated guesses should be possible, see the examples. } \source{ Data from Draper and Smith, 1st ed, 1966; appendix A. A version of this has been used in teaching at SfS ETH Zurich, since at least 1996, \url{https://stat.ethz.ch/Teaching/Datasets/NDK/dsteam.dat} The package \CRANpkg{aprean3} contains all data sets from the 3rd edition of Draper and Smith (1998), and this data set with variable names \code{x1 .. x10} (\code{x9} being \code{wind^2}, hence extraneous). } \references{ Draper and Smith (1981) Applied Regression Analysis (2nd ed., p. 615 ff) } \examples{ \dontrun{ if(require("aprean3")) { # show how 'steamUse' is related to 'dsa01a' stm <- dsa01a names(stm) <- c("Steam", "fattyAcid", "glycerine", "wind", "days", "op.days", "freeze.d", "temperature", "wind.2", "startups") ## prove that wind.2 is wind^2, "traditionally" rounded to 1 digit: stopifnot(all.equal(floor(0.5 + 10*stm[,"wind"]^2)/10, stm[,"wind.2"], tol = 1e-14)) ## hence drop it steamUse <- stm[, names(stm) != "wind.2"] } }% dont data(steamUse) str(steamUse) ## Looking at this, cbind(M=rep_len(month.abb, 25), steamUse[,5:8, drop=FALSE]) ## one will conjecture that these were 25 months, Jan--Jan in a row, ## starting in a leap year (perhaps 1960 ?). plot(steamUse) summary(fm1 <- lmrob(Steam ~ temperature + op.days, data=steamUse)) ## diagnoses 2 outliers: month of July, maybe company-wide summer vacations %% no longer visible: summary(fmF <- lmrob(Steam ~ ., data=steamUse)) ## KS2014 alone seems not robust enough: summary(fm.14 <- lmrob(Steam ~ temperature + op.days, data=steamUse, setting="KS2014")) pairs(Steam ~ temperature+op.days, steamUse) } \keyword{datasets} robustbase/man/covMcd.Rd0000644000176200001440000002654713710054521014714 0ustar liggesusers\name{covMcd} \title{Robust Location and Scatter Estimation via MCD} \alias{covMcd} \alias{print.mcd} \alias{.MCDcons} \alias{.MCDcnp2} \alias{.MCDcnp2.rew} \alias{.MCDsingularityMsg} \alias{.wgtFUN.covMcd} % \concept{High breakdown point} \description{ Compute the Minimum Covariance Determinant (MCD) estimator, a robust multivariate location and scale estimate with a high breakdown point, via the \sQuote{Fast MCD} or \sQuote{Deterministic MCD} (\dQuote{DetMcd}) algorithm. } \usage{ covMcd(x, cor = FALSE, raw.only = FALSE, alpha =, nsamp =, nmini =, kmini =, scalefn =, maxcsteps =, initHsets = NULL, save.hsets = FALSE, names = TRUE, % full.h = save.hsets, seed =, tolSolve =, trace =, use.correction =, wgtFUN =, control = rrcov.control()) } \arguments{ \item{x}{a matrix or data frame.} \item{cor}{should the returned result include a correlation matrix? Default is \code{cor = FALSE}.} \item{raw.only}{should only the \dQuote{raw} estimate be returned, i.e., no (re)weighting step be performed; default is false.} \item{alpha}{numeric parameter controlling the size of the subsets over which the determinant is minimized; roughly \code{alpha*n}, (see \sQuote{Details} below) observations are used for computing the determinant. Allowed values are between 0.5 and 1 and the default is 0.5.} \item{nsamp}{number of subsets used for initial estimates or \code{"best"}, \code{"exact"}, or \code{"deterministic"}. Default is \code{nsamp = 500}. For \code{nsamp = "best"} exhaustive enumeration is done, as long as the number of trials does not exceed 100'000 (\code{= nLarge}). For \code{"exact"}, exhaustive enumeration will be attempted however many samples are needed. In this case a warning message may be displayed saying that the computation can take a very long time. For \code{"deterministic"}, the \emph{deterministic} MCD is computed; as proposed by Hubert et al. (2012) it starts from the \eqn{h} most central observations of \emph{six} (deterministic) estimators. } \item{nmini, kmini}{for \eqn{n \ge 2 \times n_0}{n >= 2 n_0}, \eqn{n_0 := \code{nmini}}, the algorithm splits the data into maximally \code{kmini} (by default 5) subsets, of size approximately, but at least \code{nmini}. When \code{nmini*kmini < n}, the initial search uses only a \emph{subsample} of size \code{nmini*kmini}. %% FIXME? -- more accurately ==> ../src/rffastmcd.f The original algorithm had \code{nmini = 300} and \code{kmini = 5} hard coded.} \item{scalefn}{for the deterministic MCD: \code{\link{function}} to compute a robust scale estimate or character string specifying a rule determining such a function. The default, currently \code{"hrv2012"}, uses the recommendation of Hubert, Rousseeuw and Verdonck (2012) who recommend \code{\link{Qn}} for \eqn{n < 1000} and \code{\link{scaleTau2}} for larger n. Alternatively, \code{scalefn = "v2014"}, uses that rule with cutoff \eqn{n = 5000}.} \item{maxcsteps}{maximal number of concentration steps in the deterministic MCD; should not be reached.} \item{initHsets}{NULL or a \eqn{K x h} integer matrix of initial subsets of observations of size \eqn{h} (specified by the indices in \code{1:n}).} \item{save.hsets}{(for deterministic MCD) logical indicating if the initial subsets should be returned as \code{initHsets}.} \item{names}{logical; if true (as by default), several parts of the result have a \code{\link{names}} or \code{\link{dimnames}} respectively, derived from data matrix \code{x}.} \item{seed}{initial seed for random generator, like \code{\link{.Random.seed}}, see \code{\link{rrcov.control}}.} \item{tolSolve}{numeric tolerance to be used for inversion (\code{\link{solve}}) of the covariance matrix in \code{\link{mahalanobis}}.} \item{trace}{logical (or integer) indicating if intermediate results should be printed; defaults to \code{FALSE}; values \eqn{\ge 2}{>= 2} also produce print from the internal (Fortran) code.} \item{use.correction}{ whether to use finite sample correction factors; defaults to \code{TRUE}.} \item{wgtFUN}{a character string or \code{\link{function}}, specifying how the weights for the reweighting step should be computed. Up to April 2013, the only option has been the original proposal in (1999), now specified by \code{wgtFUN = "01.original"} (or via \code{control}). Since \pkg{robustbase} version 0.92-3, Dec.2014, other predefined string options are available, though experimental, see the experimental \code{.wgtFUN.covMcd} object.} \item{control}{a list with estimation options - this includes those above provided in the function specification, see \code{\link{rrcov.control}} for the defaults. If \code{control} is supplied, the parameters from it will be used. If parameters are passed also in the invocation statement, they will override the corresponding elements of the control object.} } \details{ The minimum covariance determinant estimator of location and scatter implemented in \code{covMcd()} is similar to \R function \code{\link[MASS]{cov.mcd}()} in \CRANpkg{MASS}. The MCD method looks for the \eqn{h (> n/2)} (\eqn{h = h(\alpha,n,p) =} \code{\link{h.alpha.n}(alpha,n,p)}) observations (out of \eqn{n}) whose classical covariance matrix has the lowest possible determinant. The raw MCD estimate of location is then the average of these \eqn{h} points, whereas the raw MCD estimate of scatter is their covariance matrix, multiplied by a consistency factor (\code{.MCDcons(p, h/n)}) and (if \code{use.correction} is true) a finite sample correction factor (\code{.MCDcnp2(p, n, alpha)}), to make it consistent at the normal model and unbiased at small samples. Both rescaling factors (consistency and finite sample) are returned in the length-2 vector \code{raw.cnp2}. The implementation of \code{covMcd} uses the Fast MCD algorithm of Rousseeuw and Van Driessen (1999) to approximate the minimum covariance determinant estimator. Based on these raw MCD estimates, (unless argument \code{raw.only} is true), a reweighting step is performed, i.e., \code{V <- \link{cov.wt}(x,w)}, where \code{w} are weights determined by \dQuote{outlyingness} with respect to the scaled raw MCD. Again, a consistency factor and (if \code{use.correction} is true) a finite sample correction factor (\code{.MCDcnp2.rew(p, n, alpha)}) are applied. The reweighted covariance is typically considerably more efficient than the raw one, see Pison et al. (2002). The two rescaling factors for the reweighted estimates are returned in \code{cnp2}. Details for the computation of the finite sample correction factors can be found in Pison et al. (2002). } \author{Valentin Todorov \email{valentin.todorov@chello.at}, based on work written for S-plus by Peter Rousseeuw and Katrien van Driessen from University of Antwerp.% no E-mails for spam-protection Visibility of (formerly internal) tuning parameters, notably \code{wgtFUN()}: Martin Maechler } \value{ An object of class \code{"mcd"} which is basically a \code{\link{list}} with components \item{center}{the final estimate of location.} \item{cov}{the final estimate of scatter.} \item{cor}{the (final) estimate of the correlation matrix (only if \code{cor = TRUE}).} \item{crit}{the value of the criterion, i.e., the logarithm of the determinant. Previous to Nov.2014, it contained the determinant itself which can under- or overflow relatively easily.} \item{best}{the best subset found and used for computing the raw estimates, with \code{\link{length}(best) == quan = \link{h.alpha.n}(alpha,n,p)}.} \item{mah}{mahalanobis distances of the observations using the final estimate of the location and scatter.} \item{mcd.wt}{weights of the observations using the final estimate of the location and scatter.} \item{cnp2}{a vector of length two containing the consistency correction factor and the finite sample correction factor of the final estimate of the covariance matrix.} \item{raw.center}{the raw (not reweighted) estimate of location.} \item{raw.cov}{the raw (not reweighted) estimate of scatter.} \item{raw.mah}{mahalanobis distances of the observations based on the raw estimate of the location and scatter.} \item{raw.weights}{weights of the observations based on the raw estimate of the location and scatter.} \item{raw.cnp2}{a vector of length two containing the consistency correction factor and the finite sample correction factor of the raw estimate of the covariance matrix.} \item{X}{the input data as numeric matrix, without \code{\link{NA}}s.} \item{n.obs}{total number of observations.} \item{alpha}{the size of the subsets over which the determinant is minimized (the default is \eqn{(n+p+1)/2}).} \item{quan}{the number of observations, \eqn{h}, on which the MCD is based. If \code{quan} equals \code{n.obs}, the MCD is the classical covariance matrix.} \item{method}{character string naming the method (Minimum Covariance Determinant), starting with \code{"Deterministic"} when \code{nsamp="deterministic"}.} \item{iBest}{(for the deterministic MCD) contains indices from 1:6 denoting which of the (six) initial subsets lead to the best set found.} \item{n.csteps}{(for the deterministic MCD) for each of the initial subsets, the number of C-steps executed till convergence.} \item{call}{the call used (see \code{\link{match.call}}).} } \references{ Rousseeuw, P. J. and Leroy, A. M. (1987) \emph{Robust Regression and Outlier Detection.} Wiley. Rousseeuw, P. J. and van Driessen, K. (1999) A fast algorithm for the minimum covariance determinant estimator. \emph{Technometrics} \bold{41}, 212--223. Pison, G., Van Aelst, S., and Willems, G. (2002) Small Sample Corrections for LTS and MCD, \emph{Metrika} \bold{55}, 111--123.% ~/save/papers/robust-diverse/Pison_VanAelst_Willems.pdf Hubert, M., Rousseeuw, P. J. and Verdonck, T. (2012) A deterministic algorithm for robust location and scatter. Journal of Computational and Graphical Statistics \bold{21}, 618--637. } \seealso{ \code{\link[MASS]{cov.mcd}} from package \CRANpkg{MASS}; \code{\link{covOGK}} as cheaper alternative for larger dimensions. \code{\link[robustX]{BACON}} and \code{\link[robustX]{covNNC}}, from package \CRANpkg{robustX}; } \examples{ data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) set.seed(17) (cH <- covMcd(hbk.x)) cH0 <- covMcd(hbk.x, nsamp = "deterministic") with(cH0, stopifnot(quan == 39, iBest == c(1:4,6), # 5 out of 6 gave the same identical(raw.weights, mcd.wt), identical(which(mcd.wt == 0), 1:14), all.equal(crit, -1.045500594135))) ## the following three statements are equivalent c1 <- covMcd(hbk.x, alpha = 0.75) c2 <- covMcd(hbk.x, control = rrcov.control(alpha = 0.75)) ## direct specification overrides control one: c3 <- covMcd(hbk.x, alpha = 0.75, control = rrcov.control(alpha=0.95)) c1 ## Martin's smooth reweighting: ## List of experimental pre-specified wgtFUN() creators: ## Cutoffs may depend on (n, p, control$beta) : str(.wgtFUN.covMcd) cMM <- covMcd(hbk.x, wgtFUN = "sm1.adaptive") ina <- which(names(cH) == "call") all.equal(cMM[-ina], cH[-ina]) # *some* differences, not huge (same 'best'): stopifnot(all.equal(cMM[-ina], cH[-ina], tol = 0.2)) } \keyword{robust} \keyword{multivariate} robustbase/man/covOGK.Rd0000644000176200001440000001260411114012313014603 0ustar liggesusers\name{covOGK} \alias{covOGK} \alias{covGK} \alias{s_mad} \alias{s_IQR} \alias{hard.rejection} % \title{Orthogonalized Gnanadesikan-Kettenring (OGK) Covariance Matrix Estimation} \description{ Computes the orthogonalized pairwise covariance matrix estimate described in in Maronna and Zamar (2002). The pairwise proposal goes back to Gnanadesikan and Kettenring (1972). } \usage{ covOGK(X, n.iter = 2, sigmamu, rcov = covGK, weight.fn = hard.rejection, keep.data = FALSE, \dots) covGK (x, y, scalefn = scaleTau2, \dots) s_mad(x, mu.too = FALSE, na.rm = FALSE) s_IQR(x, mu.too = FALSE, na.rm = FALSE) } \arguments{ \item{X}{data in something that can be coerced into a numeric matrix.} \item{n.iter}{number of orthogonalization iterations. Usually 1 or 2; values greater than 2 are unlikely to have any significant effect on the estimate (other than increasing the computing time).} \item{sigmamu, scalefn}{a function that computes univariate robust location and scale estimates. By default it should return a single numeric value containing the robust scale (standard deviation) estimate. When \code{mu.too} is true, \code{sigmamu()} should return a numeric vector of length 2 containing robust location and scale estimates. See \code{\link{scaleTau2}}, \code{\link{s_Qn}}, \code{\link{s_Sn}}, \code{s_mad} or \code{s_IQR} for examples to be used as \code{sigmamu} argument.} \item{rcov}{function that computes a robust covariance estimate between two vectors. The default, Gnanadesikan-Kettenring's \code{covGK}, is simply \eqn{(s^2(X+Y) - s^2(X-Y))/4} where \eqn{s()} is the scale estimate \code{sigmamu()}.} \item{weight.fn}{a function of the robust distances and the number of variables \eqn{p} to compute the weights used in the reweighting step.} \item{keep.data}{logical indicating if the (untransformed) data matrix \code{X} should be kept as part of the result.} \item{\dots}{additional arguments; for \code{covOGK} to be passed to \code{sigmamu()} and \code{weight.fn()}; for \code{covGK} passed to \code{scalefn}.} %%% covGK(): \item{x,y}{numeric vectors of the same length, the covariance of which is sought in \code{covGK} (or the scale, in \code{s_mad} or \code{s_IQR}).} %%% s_mad(), s_IQR(): \item{mu.too}{logical indicating if both location and scale should be returned or just the scale (when \code{mu.too=FALSE} as by default).} \item{na.rm}{if \code{TRUE} then \code{\link{NA}} values are stripped from \code{x} before computation takes place.} } \details{ Typical default values for the \emph{function} arguments \code{sigmamu}, \code{rcov}, and \code{weight.fn}, are available as well, see the \emph{Examples} below, \bold{but} their names and calling sequences are still subject to discussion and may be changed in the future. The current default, \code{weight.fn = hard.rejection} corresponds to the proposition in the litterature, but Martin Maechler strongly believes that the hard threshold currently in use is too arbitrary, and further that \emph{soft} thresholding should be used instead, anyway. } \value{ \code{covOGK()} currently returns a list with components \item{center}{robust location: numeric vector of length \eqn{p}.} \item{cov}{robust covariance matrix estimate: \eqn{p\times p}{p x p} matrix.} \item{wcenter, wcov}{re-\bold{w}eighted versions of \code{center} and \code{cov}.} \item{weights}{the robustness weights used.} \item{distances}{the mahalanobis distances computed using \code{center} and \code{cov}.} \dots\dots \cr \bold{but note that this might be radically changed to returning an S4 classed object!} \code{covGK()} is a trivial 1-line function returning the covariance estimate \deqn{\hat c(x,y) = \left(\hat \sigma(x+y)^2 - \hat \sigma(x-y)^2 \right)/4,% }{ c^(x,y) = [s^(x+y)^2 - s^(x-y)^2]/4,}% where \eqn{\hat \sigma(u)}{s^(u)} is the scale estimate of \eqn{u} specified by \code{scalefn}. \code{s_mad()}, and \code{s_IQR()} return the scale estimates \code{\link[stats]{mad}} or \code{\link[stats]{IQR}} respectively, where the \code{s_*} functions return a length-2 vector (mu, sig) when \code{mu.too = TRUE}, see also \code{\link{scaleTau2}}. } \references{ Maronna, R.A. and Zamar, R.H. (2002) Robust estimates of location and dispersion of high-dimensional datasets; \emph{Technometrics} \bold{44}(4), 307--317. Gnanadesikan, R. and John R. Kettenring (1972) Robust estimates, residuals, and outlier detection with multiresponse data. \emph{Biometrics} \bold{28}, 81--124. } \author{Kjell Konis \email{konis@stats.ox.ac.uk}, with modifications by Martin Maechler.} \seealso{\code{\link{scaleTau2}}, \code{\link{covMcd}}, \code{\link[MASS]{cov.rob}}. } \examples{ data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) cO1 <- covOGK(hbk.x, sigmamu = scaleTau2) cO2 <- covOGK(hbk.x, sigmamu = s_Qn) cO3 <- covOGK(hbk.x, sigmamu = s_Sn) cO4 <- covOGK(hbk.x, sigmamu = s_mad) cO5 <- covOGK(hbk.x, sigmamu = s_IQR) %% FIXME: Add time comparison, here or in "vignette", "demo", "... data(toxicity) cO1tox <- covOGK(toxicity, sigmamu = scaleTau2) cO2tox <- covOGK(toxicity, sigmamu = s_Qn) ## nice formatting of correlation matrices: as.dist(round(cov2cor(cO1tox$cov), 2)) as.dist(round(cov2cor(cO2tox$cov), 2)) ## "graphical" symnum(cov2cor(cO1tox$cov)) symnum(cov2cor(cO2tox$cov), legend=FALSE) } \keyword{robust} \keyword{multivariate} robustbase/man/functionXal-class.Rd0000644000176200001440000000212612114153713017061 0ustar liggesusers\name{functionXal-class} \docType{class} \alias{functionXal-class} \title{Class "functionXal" of Functionals (of Psi-like functions)} \description{ The class \code{"functionXal"} is a class of functionals (typically integrals) typically of \code{\linkS4class{functionX}} functions. Since the \code{functionX} functions typically also depend on tuning parameters, objects of this class (\code{"functionXal"}) are functions of these tuning parameters. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Directly extends class \code{"function"}.} } } \section{Extends}{ Class \code{"function"}, from data part. Class \code{"OptionalFunction"}, by class \code{"function"}. Class \code{"PossibleMethod"}, by class \code{"function"}. } % \section{Methods}{ % No methods defined with class "functionXal" in the signature. % } \seealso{ \code{\link{psiFunc}()} and the class definitions of \code{\linkS4class{functionX}} and \code{\linkS4class{psi_func}} which has several \code{functionXal} slots. } % \examples{ % ##---- Should be DIRECTLY executable !! ---- % } \keyword{classes} robustbase/man/salinity.Rd0000644000176200001440000000565513710054521015332 0ustar liggesusers\name{salinity} \alias{salinity} \docType{data} \title{Salinity Data} \description{ This is a data set consisting of measurements of water salinity (i.e., its salt concentration) and river discharge taken in North Carolina's Pamlico Sound, recording some bi-weekly averages in March, April, and May from 1972 to 1977. This dataset was listed by Ruppert and Carroll (1980). In Carrol and Ruppert (1985) the physical background of the data is described. They indicated that observations 5 and 16 correspond to periods of very heavy discharge and showed that the discrepant observation 5 was masked by observations 3 and 16, i.e., only after deletion of these observations it was possible to identify the influential observation 5. This data set is a prime example of the \emph{masking effect}. } \usage{data(salinity, package="robustbase")} \format{ A data frame with 28 observations on the following 4 variables (in parentheses are the names used in the 1980 reference). \describe{ \item{\code{X1}:}{Lagged Salinity (\sQuote{SALLAG})} \item{\code{X2}:}{Trend (\sQuote{TREND})} \item{\code{X3}:}{Discharge (\sQuote{H2OFLOW})} \item{\code{Y}:}{Salinity (\sQuote{SALINITY})} } } \note{The \CRANpkg{boot} package contains another version of this salinity data set, also attributed to Ruppert and Carroll (1980), but with two clear transcription errors, see the examples. } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.82, table 5. Ruppert, D. and Carroll, R.J. (1980) Trimmed least squares estimation in the linear model. \emph{JASA} \bold{75}, 828--838; table 3, p.835. Carroll, R.J. and Ruppert, D. (1985) Transformations in regression: A robust analysis. \emph{Technometrics} \bold{27}, 1--12 } \examples{ data(salinity) summary(lm.sali <- lm(Y ~ . , data = salinity)) summary(rlm.sali <- MASS::rlm(Y ~ . , data = salinity)) summary(lts.sali <- ltsReg(Y ~ . , data = salinity)) salinity.x <- data.matrix(salinity[, 1:3]) c_sal <- covMcd(salinity.x) plot(c_sal, "tolEllipsePlot") ## Connection with boot package's version : if(requireNamespace("boot")) { ## 'always' print( head(boot.sal <- boot::salinity ) ) print( head(robb.sal <- salinity [, c(4, 1:3)]) ) # difference: has one digit more ## Otherwise the same ? dimnames(robb.sal) <- dimnames(boot.sal) ## apart from the 4th column, they are "identical": stopifnot( all.equal(boot.sal[, -4], robb.sal[, -4], tol = 1e-15) ) ## But the discharge ('X3', 'dis' or 'H2OFLOW') __differs__ in two places: plot(cbind(robustbase = robb.sal[,4], boot = boot.sal[,4])) abline(0,1, lwd=3, col=adjustcolor("red", 1/4)) D.sal <- robb.sal[,4] - boot.sal[,4] stem(robb.sal[,4] - boot.sal[,4]) which(abs(D.sal) > 0.01) ## 2 8 ## *two* typos (=> difference ~= 1) in the version of 'boot': obs. 2 & 8 !!! cbind(robb = robb.sal[,4], boot = boot.sal[,4], D.sal) }# boot } \keyword{datasets} robustbase/man/adjboxStats.Rd0000644000176200001440000000637313721410436015765 0ustar liggesusers\name{adjboxStats} \alias{adjboxStats} \title{Statistics for Skewness-adjusted Boxplots} \description{ Computes the \dQuote{statistics} for producing boxplots adjusted for skewed distributions as proposed in Hubert and Vandervieren (2008), see \code{\link{adjbox}}. } \usage{ adjboxStats(x, coef = 1.5, a = -4, b = 3, do.conf = TRUE, do.out = TRUE, \dots) } \arguments{ \item{x}{a numeric vector for which adjusted boxplot statistics are computed.} \item{coef}{number determining how far \sQuote{whiskers} extend out from the box, see \code{\link{boxplot.stats}}.} \item{a, b}{scaling factors multiplied by the medcouple \code{\link{mc}()} to determine outlyer boundaries; see the references.} \item{do.conf,do.out}{logicals; if \code{FALSE}, the \code{conf} or \code{out} component respectively will be empty in the result.} \item{\dots}{further optional arguments to be passed to \code{\link{mc}()}, such as \code{doReflect}.} } \details{ Given the quartiles \eqn{Q_1}{Q1}, \eqn{Q_3}{Q3}, the interquartile range \eqn{\Delta Q := Q_3 - Q_1}{IQR := Q3-Q1}, and the medcouple \eqn{M :=}\code{mc(x)}, \eqn{c =}\code{coef}, the \dQuote{fence} is defined, for \eqn{M \ge 0} as \deqn{[Q_1 - c e^{a \cdot M}\Delta Q, Q_3 + c e^{b \cdot M}\Delta Q],% }{[Q1 - c*exp(a * M)*IQR, Q3 + c*exp(b * M)*IQR],} and for \eqn{M < 0} as \deqn{[Q_1 - c e^{-b \cdot M}\Delta Q, Q_3 + c e^{-a \cdot M}\Delta Q],% }{[Q1 - c*exp(-b * M)*IQR, Q3 + c*exp(-a * M)*IQR],} and all observations \code{x} outside the fence, the \dQuote{potential outliers}, are returned in \code{out}. Note that a typo in robustbase version up to 0.7-8, for the (rare left-skewed) case where \link{mc}(x) < 0, lead to a \dQuote{fence} not wide enough in the upper part, and hence \emph{less} outliers there. } \value{ A \code{\link{list}} with the components \item{stats}{a vector of length 5, containing the extreme of the lower whisker, the lower hinge, the median, the upper hinge and the extreme of the upper whisker.} \item{n}{the number of observations} \item{conf}{the lower and upper extremes of the \sQuote{notch} (\code{if(do.conf)}). See \code{\link{boxplot.stats}}.} \item{fence}{length 2 vector of interval boundaries which define the non-outliers, and hence the whiskers of the plot.} \item{out}{the values of any data points which lie beyond the fence, and hence beyond the extremes of the whiskers.} } \author{R Core Development Team (\code{\link{boxplot.stats}}); adapted by Tobias Verbeke and Martin Maechler.} \note{The code only slightly modifies the code of \R's \code{\link{boxplot.stats}}. } \seealso{\code{\link{adjbox}()}, also for references, the function which mainly uses this one; further \code{\link{boxplot.stats}}. } \examples{ data(condroz) adjboxStats(ccA <- condroz[,"Ca"]) adjboxStats(ccA, doReflect = TRUE)# small difference in fence ## Test reflection invariance [was not ok, up to and including robustbase_0.7-8] a1 <- adjboxStats( ccA, doReflect = TRUE) a2 <- adjboxStats(-ccA, doReflect = TRUE) nm1 <- c("stats", "conf", "fence") stopifnot(all.equal( a1[nm1], lapply(a2[nm1], function(u) rev(-u))), all.equal(a1[["out"]], -a2[["out"]])) } \keyword{robust} \keyword{univar} robustbase/man/coleman.Rd0000644000176200001440000000263113312375575015120 0ustar liggesusers\name{coleman} \alias{coleman} \docType{data} \title{Coleman Data Set} \description{ Contains information on 20 Schools from the Mid-Atlantic and New England States, drawn from a population studied by Coleman et al. (1966). Mosteller and Tukey (1977) analyze this sample consisting of measurements on six different variables, one of which will be treated as a responce. } \usage{data(coleman, package="robustbase")} \format{ A data frame with 20 observations on the following 6 variables. \describe{ \item{\code{salaryP}}{staff salaries per pupil} \item{\code{fatherWc}}{percent of white-collar fathers} \item{\code{sstatus}}{socioeconomic status composite deviation: means for family size, family intactness, father's education, mother's education, and home items} \item{\code{teacherSc}}{mean teacher's verbal test score} \item{\code{motherLev}}{mean mother's educational level, one unit is equal to two school years} \item{\code{Y}}{verbal mean test score (y, all sixth graders)} } } \author{Valentin Todorov} \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection} Wiley, p.79, table 2. } \examples{ data(coleman) pairs(coleman) summary( lm.coleman <- lm(Y ~ . , data = coleman)) summary(lts.coleman <- ltsReg(Y ~ . , data = coleman)) coleman.x <- data.matrix(coleman[, 1:6]) (Cc <- covMcd(coleman.x)) } \keyword{datasets} robustbase/man/colMedians.Rd0000644000176200001440000000603413710054521015544 0ustar liggesusers\name{colMedians} \title{Fast Row or Column-wise Medians of a Matrix} \alias{colMedians} \alias{rowMedians} \description{ Calculates the median for each row (column) of a matrix \code{x}. This is the same as but more efficient than \code{apply(x, MM, median)} for MM=2 or MM=1, respectively. } \usage{ colMedians(x, na.rm = FALSE, hasNA = TRUE, keep.names=TRUE) rowMedians(x, na.rm = FALSE, hasNA = TRUE, keep.names=TRUE) } \arguments{ \item{x}{a \code{\link{numeric}} \eqn{n \times p}{n x p} \code{\link{matrix}}.} \item{na.rm}{if \code{\link{TRUE}}, \code{\link{NA}}s are excluded first, otherwise not.} \item{hasNA}{logical indicating if \code{x} may contain \code{\link{NA}}s. If set to \code{FALSE}, no internal NA handling is performed which typically is faster.} \item{keep.names}{logical indicating if row or column names of \code{x} should become \code{\link{names}} of the result - as is the case for \code{\link{apply}(x, MM, median)}.} } \value{ a \code{\link{numeric}} vector of length \eqn{n} or \eqn{p}, respectively. } \section{Missing values}{ Missing values are excluded before calculating the medians \emph{unless} \code{hasNA} is false. Note that \code{na.rm} has no effect and is automatically false when \code{hasNA} is false, i.e., internally, before computations start, the following is executed: \preformatted{if (!hasNA) ## If there are no NAs, don't try to remove them narm <- FALSE} } \details{ The implementation of \code{rowMedians()} and \code{colMedians()} is optimized for both speed and memory. To avoid coercing to \code{\link{double}}s (and hence memory allocation), there is a special implementation for \code{\link{integer}} matrices. That is, if \code{x} is an \code{\link{integer}} \code{\link{matrix}}, then \code{rowMedians(as.double(x))} (\code{rowMedians(as.double(x))}) would require three times the memory of \code{rowMedians(x)} (\code{colMedians(x)}), but all this is avoided. } \author{Henrik Bengtsson, Harris Jaffee, Martin Maechler} \seealso{ See \code{\link{wgt.himedian}()} for a weighted hi-median, and \code{\link[matrixStats]{colWeightedMedians}()} etc from package \CRANpkg{matrixStats} for \emph{weighted} medians.\cr For mean estimates, see \code{rowMeans()} in \code{\link{colSums}}(). } \examples{ set.seed(1); n <- 234; p <- 543 # n*p = 127'062 x <- matrix(rnorm(n*p), n, p) x[sample(seq_along(x), size= n*p / 256)] <- NA R1 <- system.time(r1 <- rowMedians(x, na.rm=TRUE)) C1 <- system.time(y1 <- colMedians(x, na.rm=TRUE)) R2 <- system.time(r2 <- apply(x, 1, median, na.rm=TRUE)) C2 <- system.time(y2 <- apply(x, 2, median, na.rm=TRUE)) R2 / R1 # speedup factor: ~= 4 {platform dependent} C2 / C1 # speedup factor: ~= 5.8 {platform dependent} stopifnot(all.equal(y1, y2, tol=1e-15), all.equal(r1, r2, tol=1e-15)) (m <- cbind(x1=3, x2=c(4:1, 3:4,4))) stopifnot(colMedians(m) == 3, all.equal(colMeans(m), colMedians(m)),# <- including names ! all.equal(rowMeans(m), rowMedians(m))) } \keyword{array} \keyword{robust} \keyword{univar} robustbase/man/summary.mcd.Rd0000644000176200001440000000345410441335140015725 0ustar liggesusers\name{summary.mcd} \alias{summary.mcd} \alias{print.summary.mcd} \title{Summary Method for MCD objects} \usage{ \method{summary}{mcd}(object, \dots) \method{print}{summary.mcd}(x, digits = max(3, getOption("digits") - 3), print.gap = 2, \dots) } \arguments{ \item{object,x}{an object of class \code{"mcd"} (or \code{"summary.mcd"}); usually, a result of a call to \code{\link{covMcd}}.} \item{digits}{the number of significant digits to use when printing.} \item{print.gap}{number of horizontal spaces between numbers; see also \code{\link{print.default}}.} \item{\dots}{further arguments passed to or from other methods.} } \description{ \code{\link{summary}} method for class \code{"mcd"}. } \details{ \code{summary.mcd()}, the S3 method, simply returns an (S3) object of \code{\link{class} "summary.mcd"} for which there's a \code{\link{print}} method: \code{print.summary.mcd} prints summary statistics for the weighted covariance matrix and location estimates with weights based on MCD estimates. While the function \code{\link{print.mcd}} prints only the robust estimates of the location and the covariance matrix, \code{print.summary.mcd} will print also the correlation matrix (if requested in the call to \code{covMcd} with \code{cor=TRUE}), the eigenvalues of the covariance or the correlation matrix and the robust (\dQuote{Mahalanobis}) distances. } \value{ \code{summary.mcd} returns an \code{summary.mcd} object, whereas the \code{print} methods returns its first argument via \code{\link{invisible}}, as all \code{print} methods do. } \seealso{ \code{\link{covMcd}}, \code{\link{summary}} } \examples{ data(Animals, package = "MASS") brain <- Animals[c(1:24, 26:25, 27:28),] lbrain <- log(brain) summary(cLB <- covMcd(lbrain)) } \keyword{multivariate} \keyword{robust} robustbase/man/lmrob.fit.Rd0000644000176200001440000000513013212743621015361 0ustar liggesusers\name{lmrob.fit} \alias{lmrob.fit} \alias{lmrob.fit.MM} \title{ MM-type estimator for regression } \description{ Compute MM-type estimators of regression: An S-estimator is used as starting value, and an M-estimator with fixed scale and redescending psi-function is used from there. Optionally a D-step (Design Adaptive Scale estimate) as well as a second M-step is calculated. } \usage{ lmrob.fit(x, y, control, init = NULL, mf = NULL) } \arguments{ \item{x}{design matrix (\eqn{n \times p}{n x p}) typically including a column of \code{1}s for the intercept.} \item{y}{numeric response vector (of length \eqn{n}).} \item{control}{a list of control parameters as returned by \code{\link{lmrob.control}}, used for both the initial S-estimate and the subsequent M- and D-estimates.} \item{init}{optional \code{\link{list}} of initial estimates. See \emph{Details}.} \item{mf}{unused and deprecated.} } \details{This function is the basic fitting function for MM-type estimation, called by \code{\link{lmrob}} and typically not to be used on its own. If given, \code{init} must be a list of initial estimates containing at least the initial coefficients and scale as \code{coefficients} and \code{scale}. Otherwise it calls \code{\link{lmrob.S}(..)} and uses it as initial estimator. } \value{ A list with components \item{fitted.values}{\eqn{X \beta}{X beta}, i.e., \code{X \%*\% coefficients}.} \item{residuals}{the raw residuals, \code{y - fitted.values}} \item{rweights}{robustness weights derived from the final M-estimator residuals (even when not converged).} \item{rank}{} \item{degree.freedom}{\code{n - rank}}% more! \item{coefficients}{estimated regression coefficient vector} \item{scale}{the robustly estimated error standard deviation}% = final.MM$scale, \item{cov}{variance-covariance matrix of \code{coefficients}, if the RWLS iterations have converged (and \code{control$cov} is not \code{"none"}).} \item{control}{}% = control, \item{iter}{}% = final.MM$iter, <<<<<< also 'init.S' ! \item{converged}{logical indicating if the RWLS iterations have converged.} \item{init.S}{the whole initial S-estimator result, including its own \code{converged} flag, see \code{\link{lmrob.S}} (only for MM-estimates).} \item{init}{A similar list that contains the results of intermediate estimates (not for MM-estimates).} } \author{ Matias Salibian-Barrera, Martin Maechler and Manuel Koller} \seealso{ \code{\link{lmrob}}, \code{\link{lmrob..M..fit}}, \code{\link{lmrob..D..fit}}, \code{\link{lmrob.S}} } \keyword{robust} \keyword{regression} robustbase/man/CrohnD.Rd0000644000176200001440000000310613312375575014655 0ustar liggesusers\name{CrohnD} \encoding{utf8} \alias{CrohnD} \docType{data} \title{Crohn's Disease Adverse Events Data} \description{ Data set issued from a study of the adverse events of a drug on 117 patients affected by Crohn's disease (a chronic inflammatory disease of the intestines). } \usage{data(CrohnD, package="robustbase")} \format{ A data frame with 117 observations on the following 9 variables. \describe{ %% FIXME: leave these away -- or explain: -- they code patient sub-groups \item{\code{ID}}{the numeric patient IDs} \item{\code{nrAdvE}}{the number of adverse events} \item{\code{BMI}}{Body MASS Index, i.e., \eqn{weight[kg] / (height[m])^2}.} \item{\code{height}}{in cm} \item{\code{country}}{a factor with levels \code{0} and \code{1}} \item{\code{sex}}{the person's gender, a binary factor with levels \code{M} \code{F}} \item{\code{age}}{in years, a numeric vector} \item{\code{weight}}{in kilograms, a numeric vector} \item{\code{treat}}{how CD was treated: a factor with levels \code{0}, \code{1} and \code{2}, meaning placebo, drug 1 and drug 2.} } } % \details{ % ~~ If necessary, more details than the __description__ above ~~ % } \source{ form the authors of the reference, with permission by the original data collecting agency. } \references{ Serigne N. Lô and Elvezio Ronchetti (2006). Robust Second Order Accurate Inference for Generalized Linear Models. Technical report, University of Geneva, Switzerland. } \examples{ data(CrohnD) str(CrohnD) with(CrohnD, ftable(table(sex,country, treat))) } \keyword{datasets} robustbase/man/smoothWgt.Rd0000644000176200001440000000465412441665126015500 0ustar liggesusers\name{smoothWgt} \alias{smoothWgt} \title{Smooth Weighting Function - Generalized Biweight}% The Biweight on a Stick \description{ \dQuote{The Biweight on a Stick} --- Compute a smooth (when \eqn{h > 0}) weight function typically for computing weights from large (robust) \dQuote{distances} using a piecewise polynomial function which in fact is a 2-parameter generalization of Tukey's 1-parameter \dQuote{biweight}. } \usage{ smoothWgt(x, c, h) } \arguments{ \item{x}{numeric vector of abscissa values} \item{c}{\dQuote{cutoff}, a typically positive number.} \item{h}{\dQuote{bandwidth}, a positive number.} } \details{ Let \eqn{w(x;c,h) := }\code{smoothWgt(x, c, h)}. Then, \deqn{% "FIXME": rather use amsmath package \cases{.} w(x; c,h) := 0 \ \ \ \ \ \mathrm{if}\ |x| \ge c + h/2,}{% w(x; c,h) := 0 if |x| >= c + h/2,} \deqn{ w(x; c,h) := 1 \ \ \ \ \ \mathrm{if}\ |x| \le c - h/2,}{% w(x; c,h) := 1 if |x| <= c - h/2,} \deqn{ w(x; c,h) := \bigl((1 - |x| - (c-h/2))^2\bigr)^2 \ \mathrm{if}\ c-h/2 < |x| < c+h/2,}{% w(x; c,h) := (1 - (|x| - (c-h/2))^2)^2 if c-h/2 < |x| < c+h/2.} \code{smoothWgt()} is \emph{scale invariant} in the sense that \deqn{w(\sigma x; \sigma c, \sigma h) = w(x; c, h),}{% w(S x; S c, S h) = w(x; c, h),} when \eqn{\sigma > 0}{S > 0}. } \value{ a numeric vector of the same length as \code{x} with weights between zero and one. Currently all \code{\link{attributes}} including \code{\link{dim}} and \code{\link{names}} are dropped. } %% \references{ TODO: Write a small vignette ! %% } \author{Martin Maechler} \seealso{ \code{\link{Mwgt}(.., psi = "bisquare")} of which \code{smoothWgt()} is a generalization, and \code{\link{Mwgt}(.., psi = "optimal")} which looks similar for larger \code{c} with its constant one part around zero, but also has only one parameter. } \examples{ ## a somewhat typical picture: curve(smoothWgt(x, c=3, h=1), -5,7, n = 1000) csW <- curve(smoothWgt(x, c=1/2, h=1), -2,2) # cutoff 1/2, bandwidth 1 ## Show that the above is the same as ## Tukey's "biweight" or "bi-square" weight function: bw <- function(x) pmax(0, (1 - x^2))^2 cbw <- curve(bw, col=adjustcolor(2, 1/2), lwd=2, add=TRUE) cMw <- curve(Mwgt(x,c=1,"biweight"), col=adjustcolor(3, 1/2), lwd=2, add=TRUE) stopifnot(## proving they are all the same: all.equal(csW, cbw, tol=1e-15), all.equal(csW, cMw, tol=1e-15)) } \keyword{arith} \keyword{robust} robustbase/man/outlierStats.Rd0000644000176200001440000001032213162417031016163 0ustar liggesusers\name{outlierStats} \alias{outlierStats} \title{Robust Regression Outlier Statistics} \description{ Simple statistics about observations with robustness weight of almost zero for models that include factor terms. The number of rejected observations and the mean robustness weights are computed for each level of each factor included in the model. } \usage{ outlierStats(object, x = object$x, control = object$control, epsw = control$eps.outlier, epsx = control$eps.x, warn.limit.reject = control$warn.limit.reject, warn.limit.meanrw = control$warn.limit.meanrw) } \arguments{ \item{object}{object of class \code{"lmrob"}, typically the result of a call to \code{\link{lmrob}}.} \item{x}{design matrix} \item{control}{list as returned by \code{\link{lmrob.control}}.} \item{epsw}{limit on the robustness weight below which an observation is considered to be an outlier. Either a \code{numeric(1)} or a \code{\link{function}} that takes the number of observations as an argument.} \item{epsx}{limit on the absolute value of the elements of the design matrix below which an element is considered zero. Either a numeric(1) or a function that takes the maximum absolute value in the design matrix as an argument.} \item{warn.limit.reject}{limit of ratio \eqn{\#\mbox{rejected} / \#\mbox{obs in level}}{# rejected / # obs in level} above (\eqn{\geq}{>=}) which a warning is produced. Set to \code{NULL} to disable warning.} \item{warn.limit.meanrw}{limit of the mean robustness per factor level below which (\eqn{\leq}{<=}) a warning is produced. Set to \code{NULL} to disable warning.} } \details{ For models that include factors, the fast S-algorithm used by \code{\link{lmrob}} can produce \dQuote{bad} fits for some of the factor levels, especially if there are many levels with only a few observations. Such a \dQuote{bad} fit is characterized as a fit where most of the observations in a level of a factor are rejected, i.e., are assigned robustness weights of zero or nearly zero. We call such a fit a \dQuote{local exact fit}. If a local exact fit is detected, then we recommend to increase some of the control parameters of the \dQuote{fast S}-algorithm. As a first aid solution in such cases, one can use \code{setting="KS2014"}, see also \code{\link{lmrob.control}}. This function is called internally by \code{\link{lmrob}} to issue a warning if a local exact fit is detected. The output is available as \code{ostats} in objects of class \code{"lmrob"} (only if the statistic is computed). } \value{ A data frames for each column with any zero elementes as well as an overall statistic. The data frame consist of the names of the coefficients in question, the number of non-zero observation in that level (\code{N.nonzero}), the number of rejected observations (\code{N.rejected}), the ratio of rejected observations to the number of observations in that level (\code{Ratio}) and the mean robustness weight of all the observations in the corresponding level (\code{Mean.RobWeight}). } \references{ Koller, M. and Stahel, W.A. (2017) Nonsingular subsampling for regression {S}~estimators with categorical predictors, \emph{Computational Statistics} \bold{32}(2): 631--646. \doi{10.1007/s00180-016-0679-x} } \author{Manuel Koller} \seealso{ \code{\link{lmrob.control}} for the default values of the control parameters; \code{\link{summarizeRobWeights}}. } \examples{ ## artificial data example data <- expand.grid(grp1 = letters[1:5], grp2 = letters[1:5], rep=1:3) set.seed(101) data$y <- c(rt(nrow(data), 1)) ## compute outlier statistics for all the estimators control <- lmrob.control(method = "SMDM", compute.outlier.stats = c("S", "MM", "SMD", "SMDM")) ## warning is only issued for some seeds set.seed(2) fit1 <- lmrob(y ~ grp1*grp2, data, control = control) ## do as suggested: fit2 <- lmrob(y ~ grp1*grp2, data, setting = "KS2014") ## the plot function should work for such models as well plot(fit1) \dontrun{ ## access statistics: fit1$ostats ## SMDM fit1$init$ostats ## SMD fit1$init$init$ostats ## SM fit1$init$init$init.S$ostats ## S }%dont } \keyword{robust} \keyword{regression} robustbase/man/huberM.Rd0000644000176200001440000000624513710054521014714 0ustar liggesusers\name{huberM} \alias{huberM} \title{Safe (generalized) Huber M-Estimator of Location} \concept{robust location} \description{ (Generalized) Huber M-estimator of location with MAD scale, being sensible also when the scale is zero where \code{\link[MASS]{huber}()} returns an error. } \usage{ huberM(x, k = 1.5, weights = NULL, tol = 1e-06, mu = if(is.null(weights)) median(x) else wgt.himedian(x, weights), s = if(is.null(weights)) mad(x, center=mu) else wgt.himedian(abs(x - mu), weights), se = FALSE, warn0scale = getOption("verbose")) } \arguments{ \item{x}{numeric vector.} \item{k}{positive factor; the algorithm winsorizes at \code{k} standard deviations.} \item{weights}{numeric vector of non-negative weights of same length as \code{x}, or \code{NULL}.} \item{tol}{convergence tolerance.} \item{mu}{initial location estimator.} \item{s}{scale estimator held constant through the iterations.} \item{se}{logical indicating if the standard error should be computed and returned (as \code{SE} component). Currently only available when \code{weights} is \code{NULL}.} \item{warn0scale}{logical; if true, and \code{s} is 0 and \code{length(x) > 1}, this will be warned about.} } \value{ list of location and scale parameters, and number of iterations used. \item{mu}{location estimate} \item{s}{the \code{s} argument, typically the \code{\link{mad}}.} \item{it}{the number of \dQuote{Huber iterations} used.} } \details{ Note that currently, when non-\code{NULL} \code{weights} are specified, the default for initial location \code{mu} and scale \code{s} is \code{\link{wgt.himedian}}, where strictly speaking a weighted \dQuote{non-hi} median should be used for consistency. Since \code{s} is not updated, the results slightly differ, see the examples below. When \code{se = TRUE}, the standard error is computed using the \eqn{\tau} correction factor but no finite sample correction. % and as if \code{s} was not estimated from the data. } \author{Martin Maechler, building on the MASS code mentioned.} \references{ Huber, P. J. (1981) \emph{Robust Statistics.} Wiley. } \seealso{ \code{\link[MASS]{hubers}} (and \code{huber}) in package \CRANpkg{MASS}; \code{\link{mad}}. } \examples{ huberM(c(1:9, 1000)) mad (c(1:9, 1000)) mad (rep(9, 100)) huberM(rep(9, 100)) ## When you have "binned" aka replicated observations: set.seed(7) x <- c(round(rnorm(1000),1), round(rnorm(50, m=10, sd = 10))) t.x <- table(x) # -> unique values and multiplicities x.uniq <- as.numeric(names(t.x)) ## == sort(unique(x)) x.mult <- unname(t.x) str(Hx <- huberM(x.uniq, weights = x.mult), digits = 7) str(Hx. <- huberM(x, s = Hx$s, se=TRUE), digits = 7) ## should be ~= Hx stopifnot(all.equal(Hx[-4], Hx.[-4])) str(Hx2 <- huberM(x, se=TRUE), digits = 7)## somewhat different, since 's' differs ## Confirm correctness of std.error : \donttest{ system.time( SS <- replicate(10000, vapply(huberM(rnorm(400), se=TRUE), as.double, 1.)) ) # ~ 12.2 seconds rbind(mean(SS["SE",]), sd(SS["mu",]))# both ~ 0.0508 stopifnot(all.equal(mean(SS["SE",]), sd ( SS["mu",]), tolerance= 0.002)) } } \keyword{univar} \keyword{robust} robustbase/man/r6pack.Rd0000644000176200001440000000477213710054521014663 0ustar liggesusers\name{r6pack} \alias{r6pack} \title{Robust Distance based observation orderings based on robust "Six pack"} \description{ Compute six initial robust estimators of multivariate location and \dQuote{scatter} (scale); then, for each, compute the distances \eqn{d_{ij}}{d_ij} and take the \code{h} (\eqn{h > n/2}) observations with smallest distances. Then compute the statistical distances based on these h observations. Return the indices of the observations sorted in increasing order. } \usage{ r6pack(x, h, full.h, scaled = TRUE, scalefn = rrcov.control()$scalefn) } \arguments{ \item{x}{n x p data matrix} \item{h}{integer, typically around (and slightly larger than) \eqn{n/2}.} \item{full.h}{logical specifying if the full (length n) observation ordering should be returned; otherwise only the first \code{h} are. For \code{.detmcd()}, \code{full.h=FALSE} is typical.} \item{scaled}{logical indicating if the data \code{x} is already scaled; if false, we apply \code{x <- doScale(x, median, scalefn)}.} \item{scalefn}{a \code{\link{function}(u)} to compute a robust univariate scale of u.} }% args \details{%% --> ../R/detmcd.R The six initial estimators are \enumerate{ \item{Hyperbolic tangent of standardized data} \item{Spearmann correlation matrix} \item{Tukey normal scores} \item{Spatial sign covariance matrix} \item{BACON} \item{Raw OGK estimate for scatter} } } \references{ Hubert, M., Rousseeuw, P. J. and Verdonck, T. (2012) A deterministic algorithm for robust location and scatter. Journal of Computational and Graphical Statistics \bold{21}, 618--637. } \value{ a \eqn{h' \times 6}{h' x 6} \code{\link{matrix}} of observation indices, i.e., with values from \eqn{1,\dots,n}{1..n}. If \code{full.h} is true, \eqn{h' = n}, otherwise \eqn{h' = h}. } \author{Valentin Todorov, based on the original Matlab code by Tim Verdonck and Mia Hubert. Martin Maechler for tweaks (performance etc), and \code{full.h}. } \seealso{ \code{\link{covMcd}(*, nsamp = "deterministic")}; \code{\link[rrcov]{CovSest}(*, nsamp = "sdet")} from package \CRANpkg{rrcov}. } \examples{ data(pulpfiber) dim(m.pulp <- data.matrix(pulpfiber)) # 62 x 8 dim(fr6 <- r6pack(m.pulp, h = 40, full.h= FALSE)) # h x 6 = 40 x 6 dim(fr6F <- r6pack(m.pulp, h = 40, full.h= TRUE )) # n x 6 = 62 x 6 stopifnot(identical(fr6, fr6F[1:40,])) \dontshow{ stopifnot(apply(fr6[1:10,], 2L, function(col) c(1,3,6,35,36,38) \%in\% col)) } } \keyword{robust} \keyword{multivariate} robustbase/man/heart.Rd0000644000176200001440000000317713710054521014576 0ustar liggesusers\name{heart} \alias{heart} \docType{data} \title{Heart Catherization Data} \description{ This data set was analyzed by Weisberg (1980) and Chambers et al. (1983). A catheter is passed into a major vein or artery at the femoral region and moved into the heart. The proper length of the introduced catheter has to be guessed by the physician. The aim of the data set is to describe the relation between the catheter length and the patient's height (X1) and weight (X2). This data sets is used to demonstrate the effects caused by collinearity. The correlation between height and weight is so high that either variable almost completely determines the other. } \usage{ data(heart) %> QA bug: would want: %> data(heart, package="robustbase") %> but that gives two warnings } \format{ A data frame with 12 observations on the following 3 variables. \describe{ \item{\code{height}}{Patient's height in inches} \item{\code{weight}}{Patient's weights in pounds} \item{\code{clength}}{Y: Catheter Length (in centimeters)} } } \note{There are other \code{heart} datasets in other \R packages, notably \CRANpkg{survival}, hence considering using \code{package = "robustbase"}, see examples. } \source{ Weisberg (1980) Chambers et al. (1983) P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.103, table 13. } \examples{ data(heart, package="robustbase") heart.x <- data.matrix(heart[, 1:2]) # the X-variables plot(heart.x) covMcd(heart.x) summary( lm.heart <- lm(clength ~ . , data = heart)) summary(lts.heart <- ltsReg(clength ~ . , data = heart)) } \keyword{datasets} robustbase/man/rrcov.control.Rd0000644000176200001440000000767212440116711016310 0ustar liggesusers\name{rrcov.control} \alias{rrcov.control} \title{Control Settings for covMcd and ltsReg} \description{ Auxiliary function for passing the estimation options as parameters to the estimation functions. } \usage{ rrcov.control(alpha = 1/2, method = c("covMcd", "covComed", "ltsReg"), nsamp = 500, nmini = 300, kmini = 5, seed = NULL, tolSolve = 1e-14, scalefn = "hrv2012", maxcsteps = 200, trace = FALSE, wgtFUN = "01.original", beta, use.correction = identical(wgtFUN, "01.original"), adjust = FALSE) } \arguments{ \item{alpha}{This parameter controls the size of the subsets over which the determinant is minimized, i.e., \code{alpha*n} observations are used for computing the determinant. Allowed values are between 0.5 and 1 and the default is 0.5. } \item{method}{a string specifying the \dQuote{main} function for which \code{rrcov.control()} is used. This currently only makes a difference to determine the default for \code{beta}.} \item{nsamp}{number of subsets used for initial estimates or \code{"best"} or \code{"exact"}. Default is \code{nsamp = 500}. If \code{nsamp="best"} exhaustive enumeration is done, as far as the number of trials do not exceed 5000. If \code{nsamp="exact"} exhaustive enumeration will be attempted however many samples are needed. In this case a warning message will be displayed saying that the computation can take a very long time. } \item{nmini, kmini}{for \code{\link{covMcd}}: For large \eqn{n}, the algorithm splits the data into maximally \eqn{kmini} subsets of targetted size \code{nmini}. See \code{\link{covMcd}} for more details.}%--- ./covMcd.Rd \item{seed}{initial seed for R's random number generator; see \code{\link{.Random.seed}} and the description of the \code{seed} argument in \code{\link{lmrob.control}}.} \item{tolSolve}{numeric tolerance to be used for inversion (\code{\link{solve}}) of the covariance matrix in \code{\link{mahalanobis}}.} \item{scalefn}{(for deterministic \code{\link{covMcd}()}:) a character string or \code{\link{function}} for computing a robust scale estimate. The current default \code{"hrv2012"} uses the recommendation of Hubert et al (2012); see \code{\link{covMcd}} for more.} \item{maxcsteps}{integer specifying the maximal number of concentration steps for the deterministic MCD.} \item{trace}{logical or integer indicating whether to print intermediate results. Default is \code{trace = FALSE}.} \item{wgtFUN}{a character string or \code{\link{function}}, specifying how the weights for the reweighting step should be computed, see \code{\link{ltsReg}}, \code{\link{covMcd}} or \code{\link{covComed}}, respectively. The default is specified by \code{"01.original"}, as the resulting weights are 0 or 1. Alternative string specifications need to match \code{names(.wgtFUN.covComed)} - which currently is experimental.} \item{beta}{a quantile, experimentally used for some of the prespecified \code{wgtFUN}s, see e.g., \code{\link{.wgtFUN.covMcd}} and \code{\link{.wgtFUN.covComed}}.} \item{use.correction}{whether to use finite sample correction factors. Defaults to \code{TRUE}.} \item{adjust}{(for \code{\link{ltsReg}()}:) whether to perform intercept adjustment at each step. Because this can be quite time consuming, the default is \code{adjust = FALSE}.} } \seealso{ For details, see the documentation about \code{\link{ltsReg}} and \code{\link{covMcd}}, respectively. } \value{ A list with components, as the parameters passed by the invocation } \author{Valentin Todorov} \examples{ data(Animals, package = "MASS") brain <- Animals[c(1:24, 26:25, 27:28),] data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) ctrl <- rrcov.control(alpha=0.75, trace=TRUE) covMcd(hbk.x, control = ctrl) covMcd(log(brain), control = ctrl) } \keyword{robust} \keyword{multivariate} robustbase/man/rankMM.Rd0000644000176200001440000000243313710054521014652 0ustar liggesusers\name{rankMM} \title{Simple Matrix Rank} \alias{rankMM} \description{ Compute the rank of a matrix \code{A} in simple way, based on the SVD, \code{\link{svd}()}, and \dQuote{the same as Matlab}. } \usage{ rankMM(A, tol = NULL, sv = svd(A, 0, 0)$d) } \arguments{ \item{A}{a numerical matrix, maybe non-square. When \code{sv} is specified, only \code{dim(A)} is made use of.} \item{tol}{numerical tolerance (compared to singular values). By default, when \code{NULL}, the tolerance is determined from the maximal value of \code{sv} and the computer epsilon.} \item{sv}{vector of \emph{non-increasing} singular values of \code{A}, (to be passed if already known).} } \seealso{ There are more sophisticated proposals for computing the rank of a matrix; for a couple of those, see \code{\link[Matrix]{rankMatrix}} in the \CRANpkg{Matrix} package. } \value{ an integer from the set \code{0:min(dim(A))}. } \author{ Martin Maechler, Date: 7 Apr 2007 } \examples{ rankMM # - note the simple function definition hilbert <- function(n) { i <- seq_len(n); 1/outer(i - 1L, i, "+") } hilbert(4) H12 <- hilbert(12) rankMM(H12) # 11 - numerically more realistic rankMM(H12, tol=0) # -> 12 ## explanation : round(log10(svd(H12, 0,0)$d), 1) } \keyword{algebra} \keyword{array} robustbase/man/NOxEmissions.Rd0000644000176200001440000000371313312375575016102 0ustar liggesusers\name{NOxEmissions} \alias{NOxEmissions} \docType{data} \encoding{utf8} \title{NOx Air Pollution Data} \description{ A typical medium sized environmental data set with hourly measurements of \eqn{NOx} pollution content in the ambient air. } \usage{data(NOxEmissions, package="robustbase")} \format{ A data frame with 8088 observations on the following 4 variables. \describe{ \item{\code{julday}}{day number, a factor with levels \code{373} \dots \code{730}, typically with 24 hourly measurements.} \item{\code{LNOx}}{\eqn{\log} of hourly mean of NOx concentration in ambient air [ppb] next to a highly frequented motorway.} \item{\code{LNOxEm}}{\eqn{\log} of hourly sum of NOx emission of cars on this motorway in arbitrary units.} \item{\code{sqrtWS}}{Square root of wind speed [m/s].} } } \details{ The original data set had more observations, but with missing values. Here, all cases with missing values were omitted (\code{\link{na.omit}(.)}), and then only those were retained that belonged to days with at least 20 (fully) observed hourly measurements. } \source{ René Locher (at ZHAW, Switzerland). %% E-mail to R-SIG-robust mailing list, on 2006-04-20. } \seealso{another NOx dataset, \code{\link{ambientNOxCH}}. } % \references{ % ~~ possibly secondary sources and usages ~~ % } \examples{ data(NOxEmissions) plot(LNOx ~ LNOxEm, data = NOxEmissions, cex = 0.25, col = "gray30") \dontrun{## these take too much time -- ## p = 340 ==> already Least Squares is not fast (lmNOx <- lm(LNOx ~ . ,data = NOxEmissions)) plot(lmNOx) #-> indication of 1 outlier M.NOx <- MASS::rlm(LNOx ~ . , data = NOxEmissions) ## M-estimation works ## whereas MM-estimation fails: try(MM.NOx <- MASS::rlm(LNOx ~ . , data = NOxEmissions, method = "MM")) ## namely because S-estimation fails: try(lts.NOx <- ltsReg(LNOx ~ . , data = NOxEmissions)) try(lmR.NOx <- lmrob (LNOx ~ . , data = NOxEmissions)) }% don't run } \keyword{datasets} robustbase/man/robustbase-internal.Rd0000644000176200001440000000341013710054464017452 0ustar liggesusers\name{robustbase-internals} \title{Internal Functions of Package 'robustbase'} %% Formality: everything in \usage{} "needs" \alias{} \alias{internals} % \alias{print.glmrob} \alias{glmrobMqle} \alias{glmrobMqleDiffQuasiDevB} \alias{glmrobMqleDiffQuasiDevPois} % \alias{robMD} \alias{mahalanobisD} % \alias{doScale} \alias{.signflip} % \description{ These functions are for internal use \emph{or} not yet documented. } \usage{ \method{print}{glmrob}(x, digits = max(3, getOption("digits") - 3), ...) %% MM: These are not *needed*, since they are namespace-hidden %% -- OTOH, it shows what we might want to export {and then document} glmrobMqle(X, y, weights = NULL, start = NULL, offset = NULL, family, weights.on.x = "none", control = glmrobMqle.control(), intercept = TRUE, trace = FALSE) glmrobMqleDiffQuasiDevB (mu, mu0, y, ni, w.x, phi, tcc) glmrobMqleDiffQuasiDevPois(mu, mu0, y, ni, w.x, phi, tcc) %%--------From here on, are exported (and MM would want minimal doc, evt.) ------ robMD(x, intercept, wqr, ...) mahalanobisD(x, center, sd) ## Utilities currently for the deterministic MCD only: ## subject to change / be renamed ? doScale(x, center, scale) .signflip(loadings) } % glmrobMqleEpsiB (mu, Vmu, ni, H, K, tcc) % glmrobMqleEpsiPois (mu, Vmu, ni, H, K, tcc) % glmrobMqleEpsiSB (mu, Vmu, ni, H, K, tcc) % glmrobMqleEpsiSPois (mu, Vmu, ni, H, K, tcc) % glmrobMqleEpsi2B (mu, Vmu, ni, H, K, tcc) % glmrobMqleEpsi2Pois (mu, Vmu, ni, H, K, tcc) %% For modsel.* aka anova.glmrob(); later for nlrob() only; %% eliminated, 2013-07-26 {for robustbase 0.9-11}, MM : %% psi.bisquare(u, c = 4.685, deriv = 0, rho = FALSE) %% psi.hampel(u, a = 2, b = 4, c = 8, deriv = 0, rho = FALSE) %% psi.huber(u, k = 1.345, deriv = 0, rho = FALSE) \keyword{internal} robustbase/man/lmrob.Rd0000644000176200001440000003103713537200135014603 0ustar liggesusers\title{MM-type Estimators for Linear Regression} \name{lmrob} \encoding{utf8} \alias{lmrob} % "Link to here", even those are not exported: \alias{.vcov.avar1} \alias{.vcov.w} \description{ Computes fast MM-type estimators for linear (regression) models. } \usage{ lmrob(formula, data, subset, weights, na.action, method = "MM", model = TRUE, x = !control$compute.rd, y = FALSE, singular.ok = TRUE, contrasts = NULL, offset = NULL, control = NULL, init = NULL, ...) } \arguments{ \item{formula}{a symbolic description of the model to be fit. See \code{\link{lm}} and \code{\link{formula}} for more details.} \item{data}{an optional data frame, list or environment (or object coercible by \code{\link{as.data.frame}} to a data frame) containing the variables in the model. If not found in \code{data}, the variables are taken from \code{environment(formula)}, typically the environment from which \code{lmrob} is called.} \item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.} \item{weights}{an optional vector of weights to be used in the fitting process (in addition to the robustness weights computed in the fitting process).} \item{na.action}{a function which indicates what should happen when the data contain \code{NA}s. The default is set by the \code{na.action} setting of \code{\link{options}}, and is \code{\link{na.fail}} if that is unset. The \dQuote{factory-fresh} default is \code{\link{na.omit}}. Another possible value is \code{NULL}, no action. Value \code{\link{na.exclude}} can be useful.} \item{method}{string specifying the estimator-chain. \code{MM} is interpreted as \code{SM}. See \emph{Details}, notably the currently recommended \code{setting = "KS2014"}.} \item{model, x, y}{logicals. If \code{TRUE} the corresponding components of the fit (the model frame, the model matrix, the response) are returned.} \item{singular.ok}{logical. If \code{FALSE} (the default in S but not in \R) a singular fit is an error.} \item{contrasts}{an optional list. See the \code{contrasts.arg} of \code{\link{model.matrix.default}}.} \item{offset}{this can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting. An \code{\link{offset}} term can be included in the formula instead or as well, and if both are specified their sum is used.} \item{control}{a \code{\link{list}} specifying control parameters; use the function \code{\link{lmrob.control}(.)} and see its help page.} \item{init}{an optional argument to specify or supply the initial estimate. See \emph{Details}.} \item{\dots}{additional arguments can be used to specify control parameters directly instead of (but not in addition to!) via \code{control}.} } \details{ \describe{ \item{Overview:}{ This function computes an MM-type regression estimator as described in Yohai (1987) and Koller and Stahel (2011). By default it uses a bi-square redescending score function, and it returns a highly robust and highly efficient estimator (with 50\% breakdown point and 95\% asymptotic efficiency for normal errors). The computation is carried out by a call to \code{\link{lmrob.fit}()}. The argument \code{setting} of \code{\link{lmrob.control}} is provided to set alternative defaults as suggested in Koller and Stahel (2011) (\code{setting="KS2011"}; now do use its extension \code{setting="KS2014"}). For further details, see \code{\link{lmrob.control}}. } \item{Initial Estimator \code{init}:}{ The initial estimator may be specified using the argument \code{init}. This can either be a string, a function or a list. A \emph{string} can be used to specify built in internal estimators (currently \code{S} and \code{M-S}, see \emph{See also} below). A \code{\link{function}} taking arguments \code{x, y, control, mf} (where \code{mf} stands for \code{model.frame}) and returning a list containing at least the initial coefficients as \code{coefficients} and the initial scale estimate \code{scale}. Or a \emph{list} giving the initial coefficients and scale as \code{coefficients} and \code{scale}. See also \emph{Examples}. Note that if the \code{init} argument is a function or list, the \code{method} argument must \emph{not} contain the initial estimator, e.g., use \code{MDM} instead of \code{SMDM}. The default, equivalent to \code{init = "S"}, uses as initial estimator an S-estimator (Rousseeuw and Yohai, 1984) which is computed using the Fast-S algorithm of Salibian-Barrera and Yohai (2006), calling \code{\link{lmrob.S}()}. That function, since March 2012, by default uses \emph{nonsingular} subsampling which makes the Fast-S algorithm feasible for categorical data as well, see Koller (2012). Note that convergence problems may still show up as warnings, e.g., \preformatted{ S refinements did not converge (to refine.tol=1e-07) in 200 (= k.max) steps } and often can simply be remedied by increasing (i.e. weakening) \code{refine.tol} or increasing the allowed number of iterations \code{k.max}, see \code{\link{lmrob.control}}. } \item{Method \code{method}:}{ The following chain of estimates is customizable via the \code{method} argument. % of \code{\link{lmrob.control}}. There are currently two types of estimates available, \describe{ \item{\code{"M"}:}{corresponds to the standard M-regression estimate.} \item{\code{"D"}:}{stands for the Design Adaptive Scale estimate as proposed in Koller and Stahel (2011).} } The \code{method} argument takes a string that specifies the estimates to be calculated as a chain. Setting \code{method='SMDM'} will result in an intial S-estimate, followed by an M-estimate, a Design Adaptive Scale estimate and a final M-step. For methods involving a \code{D}-step, the default value of \code{psi} (see \code{\link{lmrob.control}}) is changed to \code{"lqq"}. By default, standard errors are computed using the formulas of Croux, Dhaene and Hoorelbeke (2003) (\code{\link{lmrob.control}} option \code{cov=".vcov.avar1"}). This method, however, works only for MM-estimates (i.e., \code{method = "MM"} or \code{ = "SM"}). For other \code{method} arguments, the covariance matrix estimate used is based on the asymptotic normality of the estimated coefficients (\code{cov=".vcov.w"}) as described in Koller and Stahel (2011). The var-cov computation can be skipped by \code{cov = "none"} and (re)done later by e.g., \code{vcov(, cov = ".vcov.w")}. As of robustbase version 0.91-0 (April 2014), the computation of robust standard errors for \code{method="SMDM"} has been changed. The old behaviour can be restored by setting the control parameter \code{cov.corrfact = "tauold"}.%% FIXME: regr.test for that } }%end {describe} } \value{ An object of class \code{lmrob}; a list including the following components: \item{coefficients}{The estimate of the coefficient vector} \item{scale}{The scale as used in the M estimator.} \item{residuals}{Residuals associated with the estimator.} %loss \item{converged}{\code{TRUE} if the IRWLS iterations have converged.} \item{iter}{number of IRWLS iterations} \item{rweights}{the \dQuote{robustness weights} \eqn{\psi(r_i/S) / (r_i/S)}.} \item{fitted.values}{Fitted values associated with the estimator.} %control \item{init.S}{The \code{\link{list}} returned by \code{\link{lmrob.S}()} or \code{\link{lmrob.M.S}()} (for MM-estimates, i.e., \code{method="MM"} or \code{"SM"} only)} \item{init}{A similar list that contains the results of intermediate estimates (\emph{not} for MM-estimates).} %qr \item{rank}{the numeric rank of the fitted linear model.} \item{cov}{The estimated covariance matrix of the regression coefficients} \item{df.residual}{the residual degrees of freedom.} %degree.freedom \item{weights}{the specified weights (missing if none were used).} \item{na.action}{(where relevant) information returned by \code{\link{model.frame}} on the special handling of \code{NA}s.} \item{offset}{the offset used (missing if none were used).} \item{contrasts}{(only where relevant) the contrasts used.} \item{xlevels}{(only where relevant) a record of the levels of the factors used in fitting.} \item{call}{the matched call.} \item{terms}{the \code{terms} object used.} %assign \item{model}{if requested (the default), the model frame used.} \item{x}{if requested, the model matrix used.} \item{y}{if requested, the response used.} In addition, non-null fits will have components \code{assign}, and \code{qr} relating to the linear fit, for use by extractor functions such as \code{summary}. } \references{ Croux, C., Dhaene, G. and Hoorelbeke, D. (2003) \emph{Robust standard errors for robust estimators}, Discussion Papers Series 03.16, K.U. Leuven, CES. Koller, M. (2012) Nonsingular subsampling for S-estimators with categorical predictors, \emph{ArXiv e-prints} \url{https://arxiv.org/abs/1208.5595}; extended version published as Koller and Stahel (2017), see \code{\link{lmrob.control}}. Koller, M. and Stahel, W.A. (2011) Sharpening Wald-type inference in robust regression for small samples. \emph{Computational Statistics & Data Analysis} \bold{55}(8), 2504--2515. Maronna, R. A., and Yohai, V. J. (2000) Robust regression with both continuous and categorical predictors. \emph{Journal of Statistical Planning and Inference} \bold{89}, 197--214. Rousseeuw, P.J. and Yohai, V.J. (1984) Robust regression by means of S-estimators, In \emph{Robust and Nonlinear Time Series}, J. Franke, W. Härdle and R. D. Martin (eds.). Lectures Notes in Statistics 26, 256--272, Springer Verlag, New York. Salibian-Barrera, M. and Yohai, V.J. (2006) A fast algorithm for S-regression estimates, \emph{Journal of Computational and Graphical Statistics} \bold{15}(2), 414--427. Yohai, V.J. (1987) High breakdown-point and high efficiency estimates for regression. \emph{The Annals of Statistics} \bold{15}, 642--65. Yohai, V., Stahel, W.~A. and Zamar, R. (1991) A procedure for robust estimation and inference in linear regression; in Stahel and Weisberg (eds), \emph{Directions in Robust Statistics and Diagnostics}, Part II, Springer, New York, 365--374; \doi{10.1007/978-1-4612-4444-8_20}. } \author{(mainly:) Matias Salibian-Barrera and Manuel Koller} \seealso{ \code{\link{lmrob.control}}; for the algorithms \code{\link{lmrob.S}}, \code{\link{lmrob.M.S}} and \code{\link{lmrob.fit}}; and for methods, \code{\link{summary.lmrob}}, for the extra \dQuote{statistics}, notably \eqn{R^2} (\dQuote{R squared}); \code{\link{predict.lmrob}}, \code{\link{print.lmrob}}, \code{\link{plot.lmrob}}, and \code{\link{weights.lmrob}}. } \examples{ data(coleman) set.seed(0) ## Default for a very long time: summary( m1 <- lmrob(Y ~ ., data=coleman) ) ## Nowadays **strongly recommended** for routine use: summary(m2 <- lmrob(Y ~ ., data=coleman, setting = "KS2014") ) ## ------------------ plot(residuals(m2) ~ weights(m2, type="robustness")) ##-> weights.lmrob() abline(h=0, lty=3) data(starsCYG, package = "robustbase") ## Plot simple data and fitted lines plot(starsCYG) lmST <- lm(log.light ~ log.Te, data = starsCYG) (RlmST <- lmrob(log.light ~ log.Te, data = starsCYG)) abline(lmST, col = "red") abline(RlmST, col = "blue") ## --> Least Sq.:/ negative slope \\ robust: slope ~= 2.2 % checked in ../tests/lmrob-data.R summary(RlmST) # -> 4 outliers; rest perfect vcov(RlmST) stopifnot(all.equal(fitted(RlmST), predict(RlmST, newdata = starsCYG), tol = 1e-14)) ## FIXME: setting = "KS2011" or setting = "KS2014" **FAIL** here ##--- 'init' argument ----------------------------------- ## 1) string set.seed(0) m3 <- lmrob(Y ~ ., data=coleman, init = "S") stopifnot(all.equal(m1[-18], m3[-18])) ## 2) function initFun <- function(x, y, control, ...) { # no 'mf' needed init.S <- lmrob.S(x, y, control) list(coefficients=init.S$coef, scale = init.S$scale) } set.seed(0) m4 <- lmrob(Y ~ ., data=coleman, method = "M", init = initFun) ## list m5 <- lmrob(Y ~ ., data=coleman, method = "M", init = list(coefficients = m3$init$coef, scale = m3$scale)) stopifnot(all.equal(m4[-17], m5[-17])) } \keyword{robust} \keyword{regression} robustbase/man/scaleTau2.Rd0000644000176200001440000001111314123440375015310 0ustar liggesusers\name{scaleTau2} \alias{scaleTau2} \title{Robust Tau-Estimate of Scale} \description{ Computes the robust \eqn{\tau}-estimate of univariate scale, as proposed by Maronna and Zamar (2002); improved by a consistency factor, %% FIXME: TODO: add a finite sample correction by Martin Maechler %% (currently have 'n-2' but can even be better !!!! } \usage{%--> ../R/OGK.R <<< scaleTau2(x, c1 = 4.5, c2 = 3.0, na.rm = FALSE, consistency = TRUE, mu0 = median(x), sigma0 = median(x.), mu.too = FALSE, iter = 1, tol.iter = 1e-7) } \arguments{ \item{x}{numeric vector} \item{c1,c2}{non-negative numbers, specifying cutoff values for the biweighting of the mean and the rho function respectively.} \item{na.rm}{a logical value indicating whether \code{NA} values should be stripped before the computation proceeds.} \item{consistency}{logical indicating if the consistency correction factor (for the scale) should be applied.} \item{mu0}{the initial location estimate \eqn{\mu_0}{mu0}, defaulting to the \code{\link{median}}.} \item{sigma0}{the initial scale estimate \eqn{s_0}{s0}, defaulting to the MAD; may be set to a positive value when the MAD is zero.} \item{mu.too}{logical indicating if both location and scale should be returned or just the scale (when \code{mu.too=FALSE} as by default).} \item{iter}{positive integer or logical indicating if and how many iterations should be done. The default, \code{iter = 1} computes the \dQuote{traditional} tau-estimate of scale.} \item{tol.iter}{if \code{iter} is true, or \code{iter > 1}, stop the iterations when \eqn{|s_n - s_o| \le \epsilon s_n}, where \eqn{\epsilon :=}\code{tol.iter}, and \eqn{s_o} and \eqn{s_n} are the previous and current estimates of \eqn{\sigma}.} } \details{ First, \eqn{s_0}{s0} := MAD, i.e. the equivalent of \code{\link{mad}(x, constant=1)} is computed. Robustness weights \eqn{w_i := w_{c1}((x_i - med(X))/ s_0)} are computed, where \eqn{w_c(u) = max(0, (1 - (u/c)^2)^2)}. The robust location measure is defined as \eqn{\mu(X) := (\sum_i w_i x_i)/(\sum_i w_i)}, and the robust \eqn{\tau (tau)}{tau}-estimate is \eqn{s(X)^2 := s_0^2 * (1/n) \sum_i \rho_{c2}((x_i - \mu(X))/s_0)}, where \eqn{\rho_c(u) = min(c^2, u^2)}. \cr When \code{iter=TRUE} or \code{iter > 1}, the above estimate is \emph{iterated} in a fixpoint iteration, setting \eqn{s_0} to the current estimate \eqn{s(X)} and iterating until the number of iterations is larger than \code{iter} or the fixpoint is found in the sense that \ \cr \code{scaleTau2(*, consistency=FALSE)} returns \eqn{s(X)}, whereas this value is divided by its asymptotic limit when \code{consistency = TRUE} as by default. Note that for \code{n = length(x) == 2}, all equivariant scale estimates are proportional, and specifically, \code{scaleTau2(x, consistency=FALSE) == mad(x, constant=1)}. See also the reference. } \value{ numeric vector of length one (if \code{mu.too} is \code{FALSE} as by default) or two (when \code{mu.too = TRUE}) with robust scale or (location,scale) estimators \eqn{\hat\sigma(x)}{s^(x)} or \eqn{(\hat\mu(x),\hat\sigma(x))}{(m^(x), s^(x))}. } \references{ Maronna, R.A. and Zamar, R.H. (2002) Robust estimates of location and dispersion of high-dimensional datasets; \emph{Technometrics} \bold{44}(4), 307--317. % MM: ~/save/papers/robust-diverse/Maronna-Zamar-OGK_2002.pdf Yohai, V.J., and Zamar, R.H. (1988). High breakdown-point estimates of regression by means of the minimization of an efficient scale. \emph{Journal of the American Statistical Association} \bold{83}, 406--413. % MM: ~/save/papers/robust-diverse/Yohai-Zamar-tau_JASA1988.pdf } \author{Original by Kjell Konis with substantial modifications by Martin Maechler. } \seealso{\code{\link{Sn}}, \code{\link{Qn}}, \code{\link{mad}}; further \code{\link{covOGK}} for which \code{scaleTau2} was designed. } \examples{ x <- c(1:7, 1000) sd(x) # non-robust std.deviation scaleTau2(x) scaleTau2(x, mu.too = TRUE) (sI <- scaleTau2(c(x,Inf), mu.too = TRUE)) (sIN <- scaleTau2(c(x,Inf,NA), mu.too = TRUE, na.rm=TRUE)) stopifnot({ identical(sI, sIN) all.equal(scaleTau2(c(x, 999), mu.too = TRUE), sIN, tol = 1e-15) }) if(doExtras <- robustbase:::doExtras()) { set.seed(11) ## show how much faster this is, compared to Qn x <- sample(c(rnorm(1e6), rt(5e5, df=3))) (system.time(Qx <- Qn(x))) ## 2.04 [2017-09, lynne] (system.time(S2x <- scaleTau2(x))) ## 0.25 (ditto) cbind(Qn = Qx, sTau2 = S2x) }## Qn sTau2 ## 1.072556 1.071258 } \keyword{robust} \keyword{univar} robustbase/man/lmrob.control.Rd0000644000176200001440000003272013177452122016267 0ustar liggesusers\name{lmrob.control} \title{Tuning Parameters for lmrob() and Auxiliaries} \encoding{utf8} \alias{lmrob.control} \alias{lmrob.control} \alias{.Mchi.tuning.default} \alias{.Mpsi.tuning.default} \alias{.Mchi.tuning.defaults} \alias{.Mpsi.tuning.defaults} \description{ Tuning parameters for \code{\link{lmrob}}, the MM-type regression estimator and the associated S-, M- and D-estimators. Using \code{setting="KS2011"} sets the defaults as suggested by Koller and Stahel (2011) and analogously for \code{"KS2014"}. The \code{.M*.default} \code{\link{function}}s and \code{.M*.defaults} \code{\link{list}}s contain default tuning parameters for all the predefined \eqn{\psi}{psi} functions, see also \code{\link{Mpsi}}, etc. } \usage{ lmrob.control(setting, seed = NULL, nResample = 500, tuning.chi = NULL, bb = 0.5, tuning.psi = NULL, max.it = 50, groups = 5, n.group = 400, k.fast.s = 1, best.r.s = 2, k.max = 200, maxit.scale = 200, k.m_s = 20, refine.tol = 1e-7, rel.tol = 1e-7, scale.tol = 1e-10, solve.tol = 1e-7, trace.lev = 0, mts = 1000, subsampling = c("nonsingular", "simple"), compute.rd = FALSE, method = "MM", psi = "bisquare", numpoints = 10, cov = NULL, split.type = c("f", "fi", "fii"), fast.s.large.n = 2000, eps.outlier = function(nobs) 0.1 / nobs, eps.x = function(maxx) .Machine$double.eps^(.75)*maxx, compute.outlier.stats = method, warn.limit.reject = 0.5, warn.limit.meanrw = 0.5, ...) .Mchi.tuning.defaults .Mchi.tuning.default(psi) .Mpsi.tuning.defaults .Mpsi.tuning.default(psi) } \arguments{ \item{setting}{a string specifying alternative default values. Leave empty for the defaults or use \code{"KS2011"} or \code{"KS2014"} for the defaults suggested by Koller and Stahel (2011, 2017). See \emph{Details}.} \item{seed}{\code{NULL} or an integer vector compatible with \code{\link{.Random.seed}}: the seed to be used for random re-sampling used in obtaining candidates for the initial S-estimator. The current value of \code{.Random.seed} will be preserved if \code{seed} is set, i.e. non-\code{NULL}; otherwise, as by default, \code{.Random.seed} will be used and modified as usual from calls to \code{\link{runif}()} etc. } \item{nResample}{number of re-sampling candidates to be used to find the initial S-estimator. Currently defaults to 500 which works well in most situations (see references).} \item{tuning.chi}{tuning constant vector for the S-estimator. If \code{NULL}, as by default, sensible defaults are set (depending on \code{psi}) to yield a 50\% breakdown estimator. See \emph{Details}.} \item{bb}{expected value under the normal model of the \dQuote{chi} (rather \eqn{\rho (rho)}{rho}) function with tuning constant equal to \code{tuning.chi}. This is used to compute the S-estimator.} \item{tuning.psi}{tuning constant vector for the redescending M-estimator. If \code{NULL}, as by default, this is set (depending on \code{psi}) to yield an estimator with asymptotic efficiency of 95\% for normal errors. See \emph{Details}.} \item{max.it}{integer specifying the maximum number of IRWLS iterations.} \item{groups}{(for the fast-S algorithm): Number of random subsets to use when the data set is large.} \item{n.group}{(for the fast-S algorithm): Size of each of the \code{groups} above. Note that this must be at least \eqn{p}.} \item{k.fast.s}{(for the fast-S algorithm): Number of local improvement steps (\dQuote{\emph{I-steps}}) for each re-sampling candidate.} \item{k.m_s}{(for the M-S algorithm): specifies after how many unsucessful refinement steps the algorithm stops.} \item{best.r.s}{(for the fast-S algorithm): Number of of best candidates to be iterated further (i.e., \dQuote{\emph{\bold{r}efined}}); is denoted \eqn{t} in Salibian-Barrera & Yohai(2006).} \item{k.max}{(for the fast-S algorithm): maximal number of refinement steps for the \dQuote{fully} iterated best candidates.} \item{maxit.scale}{integer specifying the maximum number of C level \code{find_scale()} iterations.} \item{refine.tol}{(for the fast-S algorithm): relative convergence tolerance for the fully iterated best candidates.} \item{rel.tol}{(for the RWLS iterations of the MM algorithm): relative convergence tolerance for the parameter vector.} \item{scale.tol}{(for the scale estimation iterations of the S algorithm): relative convergence tolerance for the \code{scale} \eqn{\sigma(.)}.} \item{solve.tol}{(for the S algorithm): relative tolerance for inversion. Hence, this corresponds to \code{\link{solve.default}()}'s \code{tol}.} \item{trace.lev}{integer indicating if the progress of the MM-algorithm should be traced (increasingly); default \code{trace.lev = 0} does no tracing.} \item{mts}{maximum number of samples to try in subsampling algorithm.} \item{subsampling}{type of subsampling to be used, a string: \code{"simple"} for simple subsampling (default prior to version 0.9), \code{"nonsingular"} for nonsingular subsampling. See also \code{\link{lmrob.S}}.} \item{compute.rd}{logical indicating if robust distances (based on the MCD robust covariance estimator \code{\link{covMcd}}) are to be computed for the robust diagnostic plots. This may take some time to finish, particularly for large data sets, and can lead to singularity problems when there are \code{\link{factor}} explanatory variables (with many levels, or levels with \dQuote{few} observations). Hence, is \code{FALSE} by default.} \item{method}{string specifying the estimator-chain. \code{MM} is interpreted as \code{SM}. See \emph{Details} of \code{\link{lmrob}} for a description of the possible values.} \item{psi}{string specifying the type \eqn{\psi}-function used. See \emph{Details} of \code{\link{lmrob}}. Defaults to \code{"bisquare"} for S and MM-estimates, otherwise \code{"lqq"}.} \item{numpoints}{number of points used in Gauss quadrature.} \item{cov}{function or string with function name to be used to calculate covariance matrix estimate. The default is \code{if(method \%in\% c('SM', 'MM')) ".vcov.avar1" else ".vcov.w"}. See \emph{Details} of \code{\link{lmrob}}.} \item{split.type}{determines how categorical and continuous variables are split. See \code{\link{splitFrame}}.} \item{fast.s.large.n}{minimum number of observations required to switch from ordinary \dQuote{fast S} algorithm to an efficient \dQuote{large n} strategy.} \item{eps.outlier}{limit on the robustness weight below which an observation is considered to be an outlier. Either a numeric(1) or a function that takes the number of observations as an argument. Used in \code{\link{summary.lmrob}} and \code{\link{outlierStats}}.} \item{eps.x}{limit on the absolute value of the elements of the design matrix below which an element is considered zero. Either a numeric(1) or a function that takes the maximum absolute value in the design matrix as an argument.} \item{compute.outlier.stats}{vector of \code{\link{character}} strings, each valid to be used as \code{method} argument. Used to specify for which estimators outlier statistics (and warnings) should be produced. Set to empty string if none are required.} \item{warn.limit.reject}{limit of ratio \eqn{\#\mbox{rejected} / \#\mbox{obs in level}}{# rejected / # obs in level} above (\eqn{\geq}{>=}) which a warning is produced. Set to \code{NULL} to disable warning.} \item{warn.limit.meanrw}{limit of the mean robustness per factor level below which (\eqn{\leq}{<=}) a warning is produced. Set to \code{NULL} to disable warning.} \item{...}{further arguments to be added as \code{\link{list}} components to the result, e.g., those to be used in \code{.vcov.w()}.} } \value{ \code{.Mchi.tuning.default(psi)} and \code{.Mpsi.tuning.default(psi)} return a short \code{\link{numeric}} vector of tuning constants which are defaults for the corresponding psi-function, see the \emph{Details}. They are based on the named \code{\link{list}}s \code{.Mchi.tuning.defaults} and \code{.Mpsi.tuning.defaults}, respectively. \code{lmrob.control()} returns a named \code{\link{list}} with over twenty components, corresponding to the arguments, where \code{tuning.psi} and \code{tuning.chi} are typically computed, as \code{.Mpsi.tuning.default(psi)} or \code{.Mchi.tuning.default(psi)}, respectively. } \details{The option \code{setting="KS2011"} alters the default arguments. They are changed to \code{method = "SMDM"}, \code{psi = "lqq"}, \code{max.it = 500}, \code{k.max = 2000}, \code{cov = ".vcov.w"}. The defaults of all the remaining arguments are not changed. The option \code{setting="KS2014"} builds upon \code{setting="KS2011"}. More arguments are changed to \code{best.r.s = 20, k.fast.s = 2, nResample = 1000}. This setting should produce more stable estimates for designs with \code{\link{factor}}s. By default, and in \code{.Mpsi.tuning.default()} and \code{.Mchi.tuning.default()}, \code{tuning.chi} and \code{tuning.psi} are set to yield an MM-estimate with breakdown point \eqn{0.5} and efficiency of 95\% at the normal. If numeric \code{tuning.chi} or \code{tuning.psi} are specified, say \code{cc}, for \code{psi = "ggw"} or \code{"lqq"}, \code{\link{.psi.const}(cc, psi)} is used, see its help page. To get the defaults, e.g., \code{.Mpsi.tuning.default(psi)} is equivalent to but more efficient than the formerly widely used \code{lmrob.control(psi = psi)$tuning.psi}. These defaults are: \tabular{rll}{ \code{psi} \tab\code{tuning.chi} \tab\code{tuning.psi} \cr \code{bisquare}\tab\code{1.54764} \tab\code{4.685061} \cr \code{welsh} \tab\code{0.5773502} \tab\code{2.11} \cr \code{ggw} \tab\code{c(-0.5, 1.5, NA, 0.5)} \tab\code{c(-0.5, 1.5, 0.95, NA)} \cr \code{lqq} \tab\code{c(-0.5, 1.5, NA, 0.5)} \tab\code{c(-0.5, 1.5, 0.95, NA)} \cr \code{optimal} \tab\code{0.4047} \tab\code{1.060158} \cr \code{hampel} \tab\code{c(1.5, 3.5, 8)*0.2119163} \tab\code{c(1.5, 3.5, 8)*0.9014} } The values for the tuning constant for the \code{ggw} and \code{lqq} psi functions are specified differently here by a vector with four elements: minimal slope, b (controlling the bend at the maximum of the curve), efficiency, breakdown point. Use \code{NA} for an unspecified value of either efficiency or breakdown point, see examples in the tables (above and below). For these table examples, the respective \dQuote{inner constants} are stored precomputed, see \code{\link{.psi.lqq.findc}} for more. The constants for the \code{"hampel"} psi function are chosen to have a redescending slope of \eqn{-1/3}. Constants for a slope of \eqn{-1/2} would be \tabular{rll}{ \code{psi} \tab\code{tuning.chi} \tab\code{tuning.psi} \cr \code{"hampel"}\tab\code{c(2, 4, 8) * 0.1981319} \tab\code{c(2, 4, 8) * 0.690794} } Alternative coefficients for an efficiency of 85\% at the normal are given in the table below. \tabular{rl}{ \code{psi} \tab\code{tuning.psi} \cr \code{bisquare} \tab\code{3.443689} \cr \code{welsh} \tab\code{1.456} \cr \code{ggw}, \code{lqq}\tab\code{c(-0.5, 1.5, 0.85, NA)} \cr \code{optimal} \tab\code{0.8684} \cr \code{hampel} (-1/3) \tab\code{c(1.5, 3.5, 8)* 0.5704545} \cr \code{hampel} (-1/2) \tab\code{c( 2, 4, 8) * 0.4769578} } } \references{ Koller, M. and Stahel, W.A. (2011) Sharpening Wald-type inference in robust regression for small samples. \emph{Computational Statistics & Data Analysis} \bold{55}(8), 2504--2515. Koller, M. and Stahel, W.A. (2017) Nonsingular subsampling for regression {S}~estimators with categorical predictors, \emph{Computational Statistics} \bold{32}(2): 631--646. \doi{10.1007/s00180-016-0679-x}. Referred as \code{"KS2014"} everywhere in \pkg{robustbase}; A shorter first version, Koller (2012) has been available from \url{https://arxiv.org/abs/1208.5595}. } \author{Matias Salibian-Barrera, Martin Maechler and Manuel Koller} \seealso{ \code{\link{Mpsi}}, etc, for the (fast!) psi function computations; \code{\link{lmrob}}, also for references and examples. } \examples{ ## Show the default settings: str(lmrob.control()) ## Artificial data for a simple "robust t test": set.seed(17) y <- y0 <- rnorm(200) y[sample(200,20)] <- 100*rnorm(20) gr <- as.factor(rbinom(200, 1, prob = 1/8)) lmrob(y0 ~ 0+gr) ## Use Koller & Stahel(2011)'s recommendation but a larger 'max.it': str(ctrl <- lmrob.control("KS2011", max.it = 1000)) str(.Mpsi.tuning.defaults) stopifnot(identical(.Mpsi.tuning.defaults, sapply(names(.Mpsi.tuning.defaults), .Mpsi.tuning.default))) ## Containing (names!) all our (pre-defined) redescenders: str(.Mchi.tuning.defaults) ## Difference between settings: C11 <- lmrob.control("KS2011") C14 <- lmrob.control("KS2014") str(C14) ## Apart from `setting` itself, they only differ in three places: diffC <- names(which(!mapply(identical, C11,C14, ignore.environment=TRUE))) cbind(KS11 = unlist(C11[diffC[-1]]), KS14 = unlist(C14[diffC[-1]])) ## KS11 KS14 ## nResample 500 1000 ## best.r.s 2 20 ## k.fast.s 1 2 } \keyword{robust} \keyword{regression} robustbase/man/covPlot.Rd0000644000176200001440000001265413434014060015116 0ustar liggesusers\name{plot.mcd} \alias{covPlot} \alias{plot.mcd} %NO \alias{ddplot} %NO \alias{distplot} %NO \alias{chi2qqplot} %NO \alias{ellipse} \title{Robust Distance Plots} \description{ Shows the Mahalanobis distances based on robust and classical estimates of the location and the covariance matrix in different plots. The following plots are available: \itemize{ \item index plot of the robust and mahalanobis distances \item distance-distance plot \item Chisquare QQ-plot of the robust and mahalanobis distances \item plot of the tolerance ellipses (robust and classic) \item Scree plot - Eigenvalues comparison plot } } \usage{ \method{plot}{mcd}(x, which = c("all", "dd", "distance", "qqchi2", "tolEllipsePlot", "screeplot"), classic = FALSE, ask = (which[1] == "all" && dev.interactive()), cutoff, id.n, labels.id = rownames(x$X), cex.id = 0.75, label.pos = c(4,2), tol = 1e-7, \dots) covPlot(x, which = c("all", "dd", "distance", "qqchi2", "tolEllipsePlot", "screeplot"), classic = FALSE, ask = (which[1] == "all" && dev.interactive()), m.cov = covMcd(x), cutoff = NULL, id.n, labels.id = rownames(x), cex.id = 0.75, label.pos = c(4,2), tol = 1e-07, \dots) %% ddplot(x, \dots) %% distplot(x, \dots) %% chi2qqplot(x, \dots) %% ellipse(x, \dots) } \arguments{ \item{x}{For the \code{plot()} method, a \code{mcd} object, typically result of \code{\link{covMcd}}.\cr For \code{covPlot()}, the numeric data matrix such as the \code{X} component as returned from \code{\link{covMcd}}.} \item{which}{string indicating which plot to show. See the \emph{Details} section for a description of the options. Defaults to \code{"all"}.}. \item{classic}{whether to plot the classical distances too. Defaults to \code{FALSE}.}. \item{ask}{logical indicating if the user should be \emph{ask}ed before each plot, see \code{\link{par}(ask=.)}. Defaults to \code{which == "all" && \link{dev.interactive}()}. } \item{cutoff}{the cutoff value for the distances.} \item{id.n}{number of observations to be identified by a label. If not supplied, the number of observations with distance larger than \code{cutoff} is used.} \item{labels.id}{vector of labels, from which the labels for extreme points will be chosen. \code{NULL} uses observation numbers.} \item{cex.id}{magnification of point labels.} \item{label.pos}{positioning of labels, for the left half and right half of the graph respectively (used as \code{\link{text}(.., pos=*)}).} \item{tol}{tolerance to be used for computing the inverse, see \code{\link{solve}}. Defaults to \code{tol = 1e-7}.} \item{m.cov}{an object similar to those of class \code{"mcd"}; however only its components \code{center} and \code{cov} will be used. If missing, the MCD will be computed (via \code{\link{covMcd}()}).} \item{\dots}{other parameters to be passed through to plotting functions.} } \details{ These functions produce several plots based on the robust and classical location and covariance matrix. Which of them to select is specified by the attribute \code{which}. The \code{plot} method for \code{"mcd"} objects is calling \code{covPlot()} directly, whereas \code{covPlot()} should also be useful for plotting other (robust) covariance estimates. The possible options are: \describe{ \item{\code{distance}}{index plot of the robust distances} \item{\code{dd}}{distance-distance plot} \item{\code{qqchi2}}{a qq-plot of the robust distances versus the quantiles of the chi-squared distribution} \item{\code{tolEllipsePlot}}{a tolerance ellipse plot, via \code{\link{tolEllipsePlot}()}} \item{\code{screeplot}}{an eigenvalues comparison plot - screeplot} } The Distance-Distance Plot, introduced by Rousseeuw and van Zomeren (1990), displays the robust distances versus the classical Mahalanobis distances. The dashed line is the set of points where the robust distance is equal to the classical distance. The horizontal and vertical lines are drawn at values equal to the cutoff which defaults to square root of the 97.5\% quantile of a chi-squared distribution with p degrees of freedom. Points beyond these lines can be considered outliers. } %\value{} \references{ P. J. Rousseeuw and van Zomeren, B. C. (1990). Unmasking Multivariate Outliers and Leverage Points. \emph{Journal of the American Statistical Association} \bold{85}, 633--639. P. J. Rousseeuw and K. van Driessen (1999) A fast algorithm for the minimum covariance determinant estimator. \emph{Technometrics} \bold{41}, 212--223. } \seealso{ \code{\link{tolEllipsePlot}} } \examples{ data(Animals, package ="MASS") brain <- Animals[c(1:24, 26:25, 27:28),] mcd <- covMcd(log(brain)) plot(mcd, which = "distance", classic = TRUE)# 2 plots plot(mcd, which = "dd") plot(mcd, which = "tolEllipsePlot", classic = TRUE) op <- par(mfrow = c(2,3)) plot(mcd) ## -> which = "all" (5 plots) par(op) ## same plots for another robust Cov estimate: data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) cOGK <- covOGK(hbk.x, n.iter = 2, sigmamu = scaleTau2, weight.fn = hard.rejection) covPlot(hbk.x, m.cov = cOGK, classic = TRUE) % %% this "\dont*{} case is needed for R <= 2.3.1 : % \dontrun{ covPlot(hbk.x, m.cov = cOGK, classic = TRUE, ask= TRUE)} % \dontshow{covPlot(hbk.x, m.cov = cOGK, classic = TRUE, ask= FALSE)} } \keyword{hplot} \keyword{robust} \keyword{multivariate} robustbase/man/ambientNOxCH.Rd0000754000176200001440000001472713312375575015774 0ustar liggesusers\name{ambientNOxCH} \alias{ambientNOxCH} \docType{data} \encoding{utf8} \title{ Daily Means of NOx (mono-nitrogen oxides) in air} \description{ This dataset contains daily means (from midnight to midnight) of NOx, i.e., mono-nitrogen oxides, in [ppb] at 13 sites in central Switzerland and Aarau for the year 2004. } \usage{data(ambientNOxCH, package="robustbase")} \format{ A data frame with 366 observations on the following 14 variables. \describe{ \item{\code{date}}{date of day, of class \code{"Date"}.} \item{\code{ad}}{Site is located north of Altdorf 100 meters east of motorway A2, on an open field at the beginning of a more than 2000m deep valley (690.175, 193.55; 438; inLuft)} \item{\code{ba}}{Site is located in the centre of the little town of Baden in a residential area. Baden has 34'000 inhabitants and is situated on the swiss plateau (666.075, 257.972; 377; inLuft).} \item{\code{ef}}{Site is located 6 km south of altdorf and 800 m north of the village of Erstfeld. The motorway A2 passes 5 m west of the measuring site. Over 8 million vehicles have passed Erstfeld in 2004 where 13\% of the counts were attributed to trucks (691.43, 187.69; 457; MFM-U).} \item{\code{la}}{Site is located on a wooded hill in a rural area called Laegern, about 190 m above Baden, which is about 5 km away (669.8, 259; 690; NABEL).} \item{\code{lu}}{Site is located in the center of town of Lucerne, which has 57'000 inhabitants (666.19, 211.975; 460; inLuft).} \item{\code{re}}{Site is located 1 km west of Reiden on the Swiss plateau. The motorway A2 passes 5 m west of the measuring site (639.56, 232.11; 462; MFM-U).} \item{\code{ri}}{Site is located at Rigi Seebodenalp, 649 m above the lake of Lucerne on an alp with half a dozen small houses (677.9, 213.5; 1030; NABEL).} \item{\code{se}}{Site is located in Sedel next to town of Lucerne 35m above and 250m south of motorway A14 from Zug to Lucerne on a low hill with free 360° panorama (665.5, 213.41; 484; inLuft).} \item{\code{si}}{Site is located at the border of a small industrial area in Sisseln, 300 m east of a main road (640.725, 266.25; 305; inLuft).} \item{\code{st}}{Site is located at the south east border of Stans with 7'000 inhabitants (670.85, 201.025; 438; inLuft).} \item{\code{su}}{Site is located in the center of Suhr (8700 inhabitants), 10 m from the main road (648.49, 246.985; 403; inLuft).} \item{\code{sz}}{Site is located in Schwyz (14'200 inhabitants) near a shopping center (691.92, 208.03; 470; inLuft).} \item{\code{zg}}{Site is located in the centre of Zug with 22'000 inhabitants, 24 m from the main road (681.625, 224.625; 420; inLuft).} } } \details{ The 13 sites are part of one of the three air quality monitoring networks: inLuft (regional authorities of central Switzerland and canton Aargau) \cr NABEL (Swiss federal network) \cr MFM-U (Monitoring flankierende Massnahmen Umwelt), special Swiss federal network along transit motorways A2 and A13 from Germany to Italy through Switzerland \cr The information within the brackets means: Swiss coordinates km east, km north; m above sea level; network When the measuring sites are exposed to the same atmospheric condition and when there is no singular emission event at any site, \code{log(mean(NOx) of a specific day at each site)} is a linear function of \code{log(yearly.mean(NOx) at the corresponding site)}. The offset and the slope of the straight line reflects the atmospheric conditions at this specific day. During winter time, often an inversion prevents the emissions from being diluted vertically, so that there evolve two separate atmospheric compartements: One below the inversion boundary with polluted air and one above with relatively clean air. In our example below, Rigi Seebodenalp is above the inversion boundary between December 10th and 12th. } \source{ http://www.in-luft.ch/ \cr http://www.empa.ch/plugin/template/empa/*/6794 \cr http://www.bafu.admin.ch/umweltbeobachtung/02272/02280 } \seealso{another NOx dataset, \code{\link{NOxEmissions}}. } \examples{ data(ambientNOxCH) str (ambientNOxCH) yearly <- log(colMeans(ambientNOxCH[,-1], na.rm=TRUE)) xlim <- range(yearly) lNOx <- log(ambientNOxCH[, -1]) days <- ambientNOxCH[, "date"] ## Subset of 9 days starting at April 4: idays <- seq(which(ambientNOxCH$date=="2004-12-04"), length=9) ylim <- range(lNOx[idays,],na.rm=TRUE) op <- par(mfrow=c(3,3),mar=rep(1,4), oma = c(0,0,2,0)) for (id in idays) { daily <- unlist(lNOx[id,]) plot(NA, xlim=xlim,ylim=ylim, ann=FALSE, type = "n") abline(0:1, col="light gray") abline(lmrob(daily~yearly, na.action=na.exclude), col="red", lwd=2) text(yearly, daily, names(yearly), col="blue") mtext(days[id], side=1, line=-1.2, cex=.75, adj=.98) } mtext("Daily ~ Yearly log( NOx mean values ) at 13 Swiss locations", outer=TRUE) par(op) ## do all 366 regressions: Least Squares and Robust: LS <- lapply(1:nrow(ambientNOxCH), function(id) lm(unlist(lNOx[id,]) ~ yearly, na.action = na.exclude)) R <- lapply(1:nrow(ambientNOxCH), function(id) lmrob(unlist(lNOx[id,]) ~ yearly, na.action = na.exclude)) ## currently 4 warnings about non-convergence; ## which ones? days[notOk <- ! sapply(R, `[[`, "converged") ] ## "2004-01-10" "2004-05-12" "2004-05-16" "2004-11-16" ## first problematic case: daily <- unlist(lNOx[which(notOk)[1],]) plot(daily ~ yearly, main = paste("lmrob() non-convergent:",days[notOk[1]])) rr <- lmrob(daily ~ yearly, na.action = na.exclude, control = lmrob.control(trace=3, max.it = 100)) ##-> 53 iter. ## Look at all coefficients: R.cf <- t(sapply(R, coef)) C.cf <- t(sapply(LS, coef)) plot(C.cf, xlim=range(C.cf[,1],R.cf[,1]), ylim=range(C.cf[,2],R.cf[,2])) mD1 <- rowMeans(abs(C.cf - R.cf)) lrg <- mD1 > quantile(mD1, 0.80) arrows(C.cf[lrg,1], C.cf[lrg,2], R.cf[lrg,1], R.cf[lrg,2], length=.1, col="light gray") points(R.cf, col=2) ## All robustness weights aW <- t(sapply(R, weights, type="robustness")) colnames(aW) <- names(yearly) summary(aW) sort(colSums(aW < 0.05, na.rm = TRUE)) # how often "clear outlier": # lu st zg ba se sz su si re la ef ad ri # 0 0 0 1 1 1 2 3 4 10 14 17 48 lattice::levelplot(aW, asp=1/2, main="Robustness weights", xlab= "day", ylab= "site") } \keyword{datasets} robustbase/man/alcohol.Rd0000644000176200001440000000310313713564014015107 0ustar liggesusers\name{alcohol} \alias{alcohol} \docType{data} \title{Alcohol Solubility in Water Data} \description{ The solubility of alcohols in water is important in understanding alcohol transport in living organisms. This dataset from (Romanelli et al., 2001) contains physicochemical characteristics of 44 aliphatic alcohols. The aim of the experiment was the prediction of the solubility on the basis of molecular descriptors. } \usage{data(alcohol, package="robustbase")} \format{ A data frame with 44 observations on the following 7 numeric variables. \describe{ \item{\code{SAG}}{solvent accessible surface-bounded molecular volume.} \item{\code{V}}{volume} \item{\code{logPC}}{Log(PC); PC = octanol-water partitions coefficient} \item{\code{P}}{polarizability} \item{\code{RM}}{molar refractivity} \item{\code{Mass}}{the mass} \item{\code{logSolubility}}{ln(Solubility), the response.} } } % \details{ % } \source{ The website accompanying the MMY-book: \url{https://www.wiley.com/legacy/wileychi/robust_statistics/} } \references{ %% FIXME: I don't think this is the correct reference % Romanelli, J.R., Kelly, J.J. and Litwein, D.E.M (2001) % Hand-assisted laparoscopic surgery in the United States: An overview % \emph{Seminars in Laparoscopic Surgery} \bold{8} 96--103. Maronna, R.A., Martin, R.D. and Yohai, V.J. (2006) \emph{Robust Statistics, Theory and Methods}, Wiley. } \examples{ data(alcohol) ## version of data set with trivial names, as s.alcohol <- alcohol names(s.alcohol) <- paste("Col", 1:7, sep='') } \keyword{datasets} robustbase/man/delivery.Rd0000644000176200001440000000165613312375575015333 0ustar liggesusers\name{delivery} \alias{delivery} \docType{data} \title{Delivery Time Data} \description{ Delivery Time Data, from Montgomery and Peck (1982). The aim is to explain the time required to service a vending machine (Y) by means of the number of products stocked (X1) and the distance walked by the route driver (X2). } \usage{data(delivery, package="robustbase")} \format{ A data frame with 25 observations on the following 3 variables. \describe{ \item{\code{n.prod}}{Number of Products} \item{\code{distance}}{Distance} \item{\code{delTime}}{Delivery time} } } \source{ Montgomery and Peck (1982, p.116) } \references{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, page 155, table 23. } \examples{ data(delivery) summary(lm.deli <- lm(delTime ~ ., data = delivery)) delivery.x <- as.matrix(delivery[, 1:2]) c_deli <- covMcd(delivery.x) c_deli } \keyword{datasets} robustbase/man/tolEllipsePlot.Rd0000644000176200001440000000425413710054521016443 0ustar liggesusers\name{tolEllipsePlot} \alias{tolEllipsePlot} \title{Tolerance Ellipse Plot} \description{ Plots the 0.975 tolerance ellipse of the bivariate data set \code{x}. The ellipse is defined by those data points whose distance is equal to the squareroot of the 0.975 chisquare quantile with 2 degrees of freedom. } \usage{ tolEllipsePlot(x, m.cov = covMcd(x), cutoff = NULL, id.n = NULL, classic = FALSE, tol = 1e-07, xlab = "", ylab = "", main = "Tolerance ellipse (97.5\%)", txt.leg = c("robust", "classical"), col.leg = c("red", "blue"), lty.leg = c("solid","dashed")) } \arguments{ \item{x}{a two dimensional matrix or data frame. } \item{m.cov}{an object similar to those of class \code{"mcd"}; however only its components \code{center} and \code{cov} will be used. If missing, the MCD will be computed (via \code{\link{covMcd}()}).} \item{cutoff}{numeric distance needed to flag data points outside the ellipse.} \item{id.n}{number of observations to be identified by a label. If not supplied, the number of observations with distance larger than \code{cutoff} is used.} \item{classic}{whether to plot the classical distances as well, \code{FALSE} by default.} \item{tol}{tolerance to be used for computing the inverse, see \code{\link{solve}}. Defaults to \code{1e-7}.} \item{xlab, ylab, main}{passed to \code{\link{plot.default}}.} \item{txt.leg, col.leg, lty.leg}{character vectors of length 2 for the legend, only used if \code{classic = TRUE}.} } \author{Peter Filzmoser, Valentin Todorov and Martin Maechler} %\details{} %\value{} %\references{ } \seealso{ \code{\link{covPlot}} which calls \code{tolEllipsePlot()} when desired. \code{\link[cluster]{ellipsoidhull}} and \code{\link[cluster]{predict.ellipsoid}} from package \CRANpkg{cluster}. } \examples{ data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) mcd <- covMcd(hbk.x) # compute mcd in advance ## must be a 2-dimensional data set: take the first two columns : tolEllipsePlot(hbk.x[,1:2]) ## an "impressive" example: data(telef) tolEllipsePlot(telef, classic=TRUE) } \keyword{hplot} \keyword{robust} robustbase/man/starsCYG.Rd0000644000176200001440000000304313312375575015177 0ustar liggesusers\name{starsCYG} \alias{starsCYG} \docType{data} \title{Hertzsprung-Russell Diagram Data of Star Cluster CYG OB1} \description{ Data for the Hertzsprung-Russell Diagram of the Star Cluster CYG OB1, which contains 47 stars in the direction of Cygnus, from C.Doom. The first variable is the logarithm of the effective temperature at the surface of the star (Te) and the second one is the logarithm of its light intencity (\eqn{L/L_0}). In the Hertzsprung-Russell diagram, which is the scatterplot of these data points, where the log temperature is plotted from left to right, two groups of points are seen:\cr the majority which tend to follow a steep band and four stars in the upper corner. In the astronomy the 43 stars are said to lie on the main sequence and the four remaining stars are called \dQuote{giants} (the points 11, 20, 30, 34). } \usage{data(starsCYG, package="robustbase")} \format{ A data frame with 47 observations on the following 2 variables \describe{ \item{\code{log.Te}}{Logarithm of the effective temperature at the surface of the star (Te).} \item{\code{log.light}}{Logarithm of its light intencity (\eqn{L/L_0})} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.27, table 3. } \examples{ data(starsCYG) plot(starsCYG) cst <- covMcd(starsCYG) lm.stars <- lm(log.light ~ log.Te, data = starsCYG) summary(lm.stars) plot(lm.stars) lts.stars <- ltsReg(log.light ~ log.Te, data = starsCYG) plot(lts.stars) } \keyword{datasets} robustbase/man/epilepsy.Rd0000644000176200001440000000516013312375575015334 0ustar liggesusers\name{epilepsy} \alias{epilepsy} \docType{data} \title{Epilepsy Attacks Data Set} \description{Data from a clinical trial of 59 patients with epilepsy (Breslow, 1996) in order to illustrate diagnostic techniques in Poisson regression. } \usage{data(epilepsy, package="robustbase")} \format{ A data frame with 59 observations on the following 11 variables. \describe{ \item{\code{ID}}{Patient identification number} \item{\code{Y1}}{Number of epilepsy attacks patients have during the first follow-up period} \item{\code{Y2}}{Number of epilepsy attacks patients have during the second follow-up period} \item{\code{Y3}}{Number of epilepsy attacks patients have during the third follow-up period} \item{\code{Y4}}{Number of epilepsy attacks patients have during the forth follow-up period} \item{\code{Base}}{Number of epileptic attacks recorded during 8 week period prior to randomization} \item{\code{Age}}{Age of the patients} \item{\code{Trt}}{a factor with levels \code{placebo} \code{progabide} indicating whether the anti-epilepsy drug Progabide has been applied or not} \item{\code{Ysum}}{Total number of epilepsy attacks patients have during the four follow-up periods } \item{\code{Age10}}{Age of the patients devided by 10} \item{\code{Base4}}{Variable \code{Base} devided by 4} } } \details{Thall and Vail reported data from a clinical trial of 59 patients with epilepsy, 31 of whom were randomized to receive the anti-epilepsy drug Progabide and 28 of whom received a placebo. Baseline data consisted of the patient's age and the number of epileptic seizures recorded during 8 week period prior to randomization. The response consisted of counts of seizures occuring during the four consecutive follow-up periods of two weeks each. } \source{ Thall, P.F. and Vail S.C. (1990) Some covariance models for longitudinal count data with overdispersion. \emph{Biometrics} \bold{46}, 657--671. } \references{ Diggle, P.J., Liang, K.Y., and Zeger, S.L. (1994) \emph{Analysis of Longitudinal Data}; Clarendon Press. Breslow N. E. (1996) Generalized linear models: Checking assumptions and strengthening conclusions. \emph{Statistica Applicata} \bold{8}, 23--41. } \examples{ data(epilepsy) str(epilepsy) pairs(epilepsy[,c("Ysum","Base4","Trt","Age10")]) Efit1 <- glm(Ysum ~ Age10 + Base4*Trt, family=poisson, data=epilepsy) summary(Efit1) ## Robust Fit : Efit2 <- glmrob(Ysum ~ Age10 + Base4*Trt, family=poisson, data=epilepsy, method = "Mqle", tcc=1.2, maxit=100) summary(Efit2) } \keyword{datasets} robustbase/man/lactic.Rd0000644000176200001440000000141313312375575014736 0ustar liggesusers\name{lactic} \alias{lactic} %% FIXME: shorter \title{Lactic Acid Concentration Measurement Data} \description{ Data on the Calibration of an Instrument that Measures Lactic Acid Concentration in Blood, from Afifi and Azen (1979) - comparing the true concentration X with the measured value Y. } \usage{data(lactic, package="robustbase")} \format{ A data frame with 20 observations on the following 2 variables. \describe{ \item{\code{X}}{True Concentration} \item{\code{Y}}{Instrument} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.62, table 10. } \examples{ data(lactic) summary(lm.lactic <- lm(Y ~., data=lactic)) %% summary(lts.lactic <- ltsReg(Y ~., data=lactic)) } \keyword{datasets} robustbase/man/wgt.himedian.Rd0000644000176200001440000000145113710054521016042 0ustar liggesusers\name{wgt.himedian} \alias{wgt.himedian} \title{Weighted Hi-Median} \description{ Compute the weighted Hi-Median of \code{x}. } \usage{ wgt.himedian(x, weights = rep(1, n)) } \arguments{ \item{x}{numeric vector} \item{weights}{numeric vector of weights; of the same length as \code{x}.} } % \value{ % a number % } %\author{Martin Maechler} \note{ this is rather a by-product of the code used in \code{\link{Sn}} and \code{\link{Qn}}. We currently plan to replace it with more general weighted quantiles. } \seealso{\code{\link{median}}; %%..... also \code{\link[Hmisc]{wtd.quantile}} from package \CRANpkg{Hmisc}. } \examples{ x <- c(1:6, 20) median(x) ## 4 stopifnot(all.equal(4, wgt.himedian(x)), all.equal(6, wgt.himedian(x, c(rep(1,6), 5)))) } \keyword{univar} \keyword{robust} robustbase/man/predict.lmrob.Rd0000644000176200001440000001207712471655756016262 0ustar liggesusers\name{predict.lmrob} \alias{predict.lmrob} \title{Predict method for Robust Linear Model ("lmrob") Fits} \description{ Predicted values based on robust linear model object. } \usage{ \method{predict}{lmrob}(object, newdata, se.fit = FALSE, scale = NULL, df = NULL, interval = c("none", "confidence", "prediction"), level = 0.95, type = c("response", "terms"), terms = NULL, na.action = na.pass, pred.var = res.var/weights, weights = 1, ...) } \arguments{ %% the following is +- copy-pasted from predict.lm.Rd: \item{object}{object of class inheriting from \code{"lmrob"}} \item{newdata}{an optional data frame in which to look for variables with which to predict. If omitted, the fitted values are used.} \item{se.fit}{a switch indicating if standard errors are required.} \item{scale}{scale parameter for std.err. calculation} \item{df}{degrees of freedom for scale} \item{interval}{type of interval calculation.} \item{level}{tolerance/confidence level} \item{type}{Type of prediction (response or model term).} \item{terms}{if \code{type="terms"}, which terms (default is all terms)} \item{na.action}{function determining what should be done with missing values in \code{newdata}. The default is to predict \code{NA}.} \item{pred.var}{the variance(s) for future observations to be assumed for prediction intervals. See \sQuote{Details}.} \item{weights}{variance weights for prediction. This can be a numeric vector or a one-sided model formula. In the latter case, it is interpreted as an expression evaluated in \code{newdata}} \item{\dots}{further arguments passed to or from other methods.} } \details{ Note that this \code{lmrob} method for \code{\link{predict}} is closely modeled after the method for \code{lm()}, \code{\link{predict.lm}}, maybe see there for caveats with missing value treatment. %% Also lifted from predict.lm.Rd : The prediction intervals are for a single observation at each case in \code{newdata} (or by default, the data used for the fit) with error variance(s) \code{pred.var}. This can be a multiple of \code{res.var}, the estimated value of \eqn{\sigma^2}: the default is to assume that future observations have the same error variance as those used for fitting. If \code{weights} is supplied, the inverse of this is used as a scale factor. For a weighted fit, if the prediction is for the original data frame, \code{weights} defaults to the weights used for the model fit, with a warning since it might not be the intended result. If the fit was weighted and \code{newdata} is given, the default is to assume constant prediction variance, with a warning. } \value{ %% the following is +- copy-pasted from predict.lm.Rd: \code{predict.lmrob} produces a vector of predictions or a matrix of predictions and bounds with column names \code{fit}, \code{lwr}, and \code{upr} if \code{interval} is set. If \code{se.fit} is \code{TRUE}, a list with the following components is returned: \item{fit}{vector or matrix as above} \item{se.fit}{standard error of predicted means} \item{residual.scale}{residual standard deviations} \item{df}{degrees of freedom for residual} } % \references{ % } \author{Andreas Ruckstuhl} \seealso{ \code{\link{lmrob}} and the (non-robust) traditional \code{\link{predict.lm}} method. } \examples{ ## Predictions --- artificial example -- closely following example(predict.lm) set.seed(5) n <- length(x <- sort(c(round(rnorm(25), 1), 20))) y <- x + rnorm(n) iO <- c(sample(n-1, 3), n) y[iO] <- y[iO] + 10*rcauchy(iO) p.ex <- function(...) { plot(y ~ x, ...); abline(0,1, col="sky blue") points(y ~ x, subset=iO, col="red", pch=2) abline(lm (y ~ x), col = "gray40") abline(lmrob(y ~ x), col = "forest green") legend("topleft", c("true", "Least Squares", "robust"), col = c("sky blue", "gray40", "forest green"), lwd=1.5, bty="n") } p.ex() fm <- lmrob(y ~ x) predict(fm) new <- data.frame(x = seq(-3, 10, 0.25)) str(predict(fm, new, se.fit = TRUE)) pred.w.plim <- predict(fm, new, interval = "prediction") pred.w.clim <- predict(fm, new, interval = "confidence") pmat <- cbind(pred.w.clim, pred.w.plim[,-1]) matlines(new$x, pmat, lty = c(1,2,2,3,3))# add to first plot ## show zoom-in region : rect(xleft = -3, ybottom = -20, xright = 10, ytop = 40, lty = 3, border="orange4") ## now zoom in : p.ex(xlim = c(-3,10), ylim = c(-20, 40)) matlines(new$x, pmat, lty = c(1,2,2,3,3)) box(lty = 3, col="orange4", lwd=3) legend("bottom", c("fit", "lwr CI", "upr CI", "lwr Pred.I", "upr Pred.I"), col = 1:5, lty=c(1,2,2,3,3), bty="n") ## Prediction intervals, special cases ## The first three of these throw warnings w <- 1 + x^2 fit <- lmrob(y ~ x) wfit <- lmrob(y ~ x, weights = w) predict(fit, interval = "prediction") predict(wfit, interval = "prediction") predict(wfit, new, interval = "prediction") predict(wfit, new, interval = "prediction", weights = (new$x)^2) -> p.w2 p.w2 stopifnot(identical(p.w2, ## the same as using formula: predict(wfit, new, interval = "prediction", weights = ~x^2))) } \keyword{robust} \keyword{regression} robustbase/man/psi_func-class.Rd0000644000176200001440000000503113310673404016376 0ustar liggesusers\name{psi_func-class} \docType{class} \alias{psi_func-class} \title{Class of "Psi Functions" for M-Estimation} %% 'psi_func' cannot be used in title! \description{ The class \code{"psi_func"} is used to store \eqn{\psi \ (psi)}{psi} functions for M-estimation. In particular, an object of the class contains \eqn{\rho(x) \ (\code{rho})}{rho(x)}, its derivative \eqn{\psi(x) \ (psi)}{psi(x)}, the weight function \eqn{\psi(x)/x}, and first derivative of \eqn{\psi}, \code{Dpsi = } \eqn{\psi'(x)}. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("psi_func", ...)}, but preferably by \code{\link{psiFunc}(...)}. } \section{Slots}{ \describe{ \item{\code{rho}:}{the \eqn{\rho()}{rho()} function, an object of class \code{"functionX"}. This is used to formulate the objective function; \eqn{\rho()} can be regarded as generalized negative log-likelihood.} \item{\code{psi}:}{\eqn{\psi()}{psi()} is the derivative of \eqn{\rho}, \eqn{\psi(x) = \frac{d}{dx} \rho(x)}{psi(x) = d/dx rho(x)}; also of class \code{"functionX"}.} \item{\code{wgt}:}{The weight function \eqn{\psi(x)/x}, of class \code{"functionX"}.} \item{\code{Dpsi}:}{the derivative of \eqn{\psi}, \eqn{Dpsi(x) = psi'(x)}; of class \code{"functionX"}.} \item{\code{Dwgt}:}{the derivative of the weight function, of class \code{"functionX"}, is generated automatically if \code{\link{psiFunc}} constructor is used.} \item{\code{tDefs}:}{\emph{named} numeric vector of \bold{t}uning parameter \bold{Def}ault values.} %%% FIXME : Replace these by *methods* \item{\code{Erho}:}{A function of class \code{"functionXal"} for computing \eqn{E[\rho(X)]} when \eqn{X} is standard normal \eqn{\mathcal{N}(0,1)}{N(0,1)}.} \item{\code{Epsi2}:}{A function of class \code{"functionXal"} for computing \eqn{E[\psi^2(X)]} when \eqn{X} is standard normal.} \item{\code{EDpsi}:}{A function of class \code{"functionXal"} for computing \eqn{E[\psi'(X)]} when \eqn{X} is standard normal.} \item{\code{name}:}{Name of \eqn{\psi}{psi}-function used for printing.} \item{\code{xtras}:}{Potentially further information.} } } \section{Methods}{ Currently, only \code{\link{chgDefaults}()}, \code{\link[=plot-methods]{plot}()} and \code{show()}. } \author{Martin Maechler} \seealso{ \code{\link{psiFunc}}. } \examples{ str(huberPsi, give.attr = FALSE) plot(hampelPsi)# calling the plot method (nicely showing "all" !) } \keyword{classes} \keyword{robust} robustbase/man/lmrob.lar.Rd0000644000176200001440000000377213710236747015400 0ustar liggesusers\name{lmrob.lar} \alias{lmrob.lar} \title{Least Absolute Residuals / L1 Regression} \description{ To compute least absolute residuals (LAR) or \dQuote{L1} regression, \code{lmrob.lar} implements the routine L1 in Barrodale and Roberts (1974), which is based on the simplex method of linear programming. It is a copy of \code{lmRob.lar} (in early 2012) from the \CRANpkg{robust} package. } \usage{ lmrob.lar(x, y, control, \dots) } \arguments{ \item{x}{numeric matrix for the predictors.} \item{y}{numeric vector for the response.} \item{control}{\code{\link{list}} as returned by \code{\link{lmrob.control}()} .} \item{\dots}{(unused but needed when called as \code{init(x,y,ctrl, mf)} from \code{\link{lmrob}()})} } \details{ This method is used for computing the M-S estimate and typically not to be used on its own. A description of the Fortran subroutines used can be found in Marazzi (1993). In the book, the main method is named \code{RILARS}. } \value{ A list that includes the following components: \item{coef }{The L1-estimate of the coefficient vector} \item{scale }{The residual scale estimate (mad)} \item{resid }{The residuals} \item{iter }{The number of iterations required by the simplex algorithm} \item{status }{Return status (0: optimal, but non unique solution, 1: optimal unique solution)} \item{converged }{Convergence status (always \code{TRUE}), needed for \code{\link{lmrob.fit}}.} } \references{ Marazzi, A. (1993). \emph{Algorithms, routines, and S functions for robust statistics}. Wadsworth & Brooks/Cole, Pacific Grove, CA. } \author{ Manuel Koller } \seealso{ \code{\link[quantreg]{rq}} from CRAN package \CRANpkg{quantreg}. } \examples{ data(stackloss) X <- model.matrix(stack.loss ~ . , data = stackloss) y <- stack.loss (fm.L1 <- lmrob.lar(X, y)) with(fm.L1, stopifnot(converged , status == 1L , all.equal(scale, 1.5291576438) , sum(abs(residuals) < 1e-15) == 4 # p=4 exactly fitted obs. )) } \keyword{ L1 } \keyword{ regression } robustbase/man/carrots.Rd0000644000176200001440000000342113312375575015155 0ustar liggesusers\name{carrots} \alias{carrots} \docType{data} \title{Insect Damages on Carrots} \description{ The damage carrots data set from Phelps (1982) was used by McCullagh and Nelder (1989) in order to illustrate diagnostic techniques because of the presence of an outlier. In a soil experiment trial with three blocks, eight levels of insecticide were applied and the carrots were tested for insect damage. } \usage{data(carrots, package="robustbase")} \format{ A data frame with 24 observations on the following 4 variables. \describe{ \item{success}{ integer giving the number of carrots with insect damage.} \item{total}{ integer giving the total number of carrots per experimental unit.} \item{logdose}{a numeric vector giving log(dose) values (eight different levels only).} \item{block}{factor with levels \code{B1} to \code{B3}} } } \source{ Phelps, K. (1982). Use of the complementary log-log function to describe doseresponse relationships in insecticide evaluation field trials. \cr In R. Gilchrist (Ed.), \emph{Lecture Notes in Statistics, No. 14. GLIM.82: Proceedings of the International Conference on Generalized Linear Models}; Springer-Verlag. } \references{ McCullagh P. and Nelder, J. A. (1989) \emph{Generalized Linear Models.} London: Chapman and Hall. Eva Cantoni and Elvezio Ronchetti (2001); JASA, and \cr Eva Cantoni (2004); JSS, see \code{\link{glmrob}} } \examples{ data(carrots) str(carrots) plot(success/total ~ logdose, data = carrots, col = as.integer(block)) coplot(success/total ~ logdose | block, data = carrots) ## Classical glm Cfit0 <- glm(cbind(success, total-success) ~ logdose + block, data=carrots, family=binomial) summary(Cfit0) ## Robust Fit (see help(glmrob)) .... } \keyword{datasets} robustbase/man/psiFindc.Rd0000644000176200001440000001247213177452122015236 0ustar liggesusers\name{psi.findc} \title{Find Tuning Constant(s) for "lqq" and "ggw" Psi Functions} \alias{.psi.ggw.findc}% --> ../R/lmrob.MM.R \alias{.psi.lqq.findc} \alias{.psi.const} \description{ Find psi function tuning constant sets for \code{"LQQ"} and \code{"GGW"} psi (\eqn{\psi}) functions by specifying largest descent (minimal slope), efficiency and or breakdown point. \code{.psi.const()} is called from \code{\link{lmrob.control}()} to set the tuning constants for psi and chi for \code{"LQQ"} and \code{"GGW"} psi. Unless the specified tuning constants are from fixed small set where the computations are stored precomputed, \code{.psi.const()} calls the corresponding \code{.psi..findc()}. } \usage{ .psi.ggw.findc(ms, b, eff = NA, bp = NA, subdivisions = 100L, rel.tol = .Machine$double.eps^0.25, abs.tol = rel.tol, tol = .Machine$double.eps^0.25, ms.tol = tol/64, maxiter = 1000) .psi.lqq.findc(ms, b.c, eff = NA, bp = NA, interval = c(0.1, 4), subdivisions = 100L, rel.tol = .Machine$double.eps^0.25, abs.tol = rel.tol, tol = .Machine$double.eps^0.25, maxiter = 1000) .psi.const(cc, psi) } \arguments{ \item{ms}{number, the minimal slope, typically negative.} \item{b, b.c}{number, specifying \eqn{b} or \eqn{b/c} for \code{"ggw"} or \code{"lqq"} respectively.} \item{eff}{a number (or \code{\link{NA}}), the desired \emph{efficiency}, in \eqn{[0,1]} of the estimator. If \code{NA}, \code{bp} must be specified as valid number.} \item{bp}{a number (or \code{\link{NA}}), the desired \emph{\bold{b}reakdown \bold{p}oint} of the estimator, in \eqn{[0,1]}.} \item{interval}{for finding \eqn{c} via \code{\link{uniroot}()}.} \item{subdivisions}{passed to \code{\link{integrate}()}.} \item{rel.tol, abs.tol}{relative and absolute tolerance for \code{\link{integrate}()}.} \item{tol}{relative tolerance for \code{\link{uniroot}()}.} \item{ms.tol}{relative tolerance for the internal \code{.psi.ggw.finda()}, eventually passed to \code{\link{optimize}} inside (internal) \code{.psi.ggw.mxs()}.} \item{maxiter}{maximal number of iterations for \code{\link{uniroot}()}.} %% for .psi.const() : \item{cc}{(for \code{.psi.const()}:) numeric vector of length 4, containing all constants \code{c(ms, b*, eff, bp)}, where \code{b* = b} for \code{"ggw"} and \code{b* = b.c} for \code{"lqq"}, and one of \code{(eff, bp)} is \code{NA}.} \item{psi}{a string, either \code{"ggw"} or \code{"lqq"}.} } \details{ For some important special cases, the result of \code{.psi.*.findc()} are stored precomputed for efficiency reasons. These cases are (the defaults for \code{tuning.chi} and \code{tuning.psi} respectively in \code{\link{lmrob.control}()}s result, \tabular{ll}{ \code{tuning.chi} \tab\code{tuning.psi} \cr \code{c(-0.5, 1.5, NA, 0.5)} \tab\code{c(-0.5, 1.5, 0.95, NA)} } and for \code{"ggw"} additionally, these four cases: \tabular{ll}{ \code{tuning.chi} \tab\code{tuning.psi} \cr {} \tab\code{c(-0.5, 1.5, 0.85, NA)} \cr \code{c(-0.5, 1, NA, 0.5)} \tab\code{c(-0.5, 1, 0.95, NA)} \cr {} \tab\code{c(-0.5, 1, 0.85, NA)} } %tab Note that for \code{"ggw"}, exactly these \eqn{2+4 = 6} cases also allow fast \eqn{\rho} and \eqn{\chi} (aka \eqn{\tilde\rho(\cdot)}{rho~(.)}, see \code{\link{Mchi}}), function evaluations. For all other tuning constant settings, rho() evaluations are based on numerical integration via \R's own \code{Rdqags()} C function (part of R's official API). } \value{ a \code{\link{numeric}} vector of constants, for \code{"lqq"} or \code{"ggw"} psi functions, respectively: \describe{ \item{\code{"lqq"}:}{\eqn{(b, c, s) = (b/c * c, c, s = 1 - min_slope)},} \item{\code{"ggw"}:}{\eqn{(0, a, b, c, \rho(\infty))}{% (0, a, b, c, rho(Inf))}.} } \code{.psi.const(cc, psi)} returns the argument \code{cc} with the above constant vectors as attribute \code{"constants"}, in the case of \code{psi = "lqq"} in all cases (since \pkg{robustbase} version >= 0.93), for \code{psi = "ggw"} only in the non-standard cases. } \references{ See the vignette about %% ../vignettes/psi_functions.Rnw : \dQuote{\eqn{\psi}{psi}-Functions Available in Robustbase}. } \seealso{\code{\link{Mpsi}()} etc for the psi function definitions; \code{\link{.Mpsi.tuning.defaults}}, etc, for tuning constants' defaults for \code{\link{lmrob}()}. } \author{Manuel Koller (original) and Martin Maechler (arguments, export, docs). } \examples{ (c.ge95 <- .psi.ggw.findc(ms = -0.5, b = 1.5, eff = 0.95)) (c.ge90 <- .psi.ggw.findc(ms = -0.5, b = 1.5, eff = 0.90)) (c.gb50 <- .psi.ggw.findc(ms = -0.5, b = 1.5, bp = 0.50)) stopifnot(all.equal(c.ge95, c(0, 1.386362, 1.5, 1.0628199, 4.7773893), tol = 1e-5), all.equal(c.ge90, c(0, 1.0282811, 1.5, 0.87086259, 3.2075233), tol = 1e-5), all.equal(c.gb50, c(0, 0.20367394, 1.5, 0.29591308, 0.37033962),tol = 1e-5)) (cl.e.95 <- .psi.lqq.findc(ms = -0.5, b.c = 1.5, eff = .95)) (cl.b.50 <- .psi.lqq.findc(ms = -0.5, b.c = 1.5, bp = .50)) stopifnot(all.equal(cl.e.95, c(1.4734061, 0.98227073, 1.5), tol = 1e-5), all.equal(cl.b.50, c(0.40154568, 0.26769712, 1.5), tol = 1e-5)) } \keyword{utilities} robustbase/man/nlrob.control.Rd0000644000176200001440000000263213710054521016261 0ustar liggesusers\name{nlrob.control} \alias{nlrob.control} \title{Control Nonlinear Robust Regression Algorithms} \description{ Allow the user to specify details for the different nonlinear robust regression algorithms in \code{\link{nlrob}}. } \usage{ nlrob.control(method, psi = c("bisquare", "lqq", "welsh", "optimal", "hampel", "ggw"), init = c("S", "lts"), optimizer = "JDEoptim", optArgs = list(), ...) } \arguments{ \item{method}{\code{\link{character}} string specifying the method} \item{psi}{string specifying the psi-function which defines the estimator.} \item{init}{for some methods, currently, \code{"MM"} only, a string specifying the initial estimator. } \item{optimizer}{currently only \code{"JDEoptim"} from package \CRANpkg{DEoptimR}.} \item{optArgs}{ a \code{\link{list}} of optional arguments to the optimizer. Currently, that is \code{\link[DEoptimR]{JDEoptim}} from package \CRANpkg{DEoptimR}. } \item{\dots}{ %% ~~Describe \code{\dots} here~~ } } %% \details{ %% %% } \value{ a \code{\link{list}} with several named components. The contents depend quite a bit on the \code{method}. } %\author{Martin Maechler} %% \note{ %% } \seealso{ \code{\link{nlrob}}, \code{\link{nlrob}}, } \examples{ str(nlrob.control("MM")) str(nlrob.control("tau")) str(nlrob.control("CM")) str(nlrob.control("mtl")) } \keyword{utilities} robustbase/man/weights.lmrob.Rd0000644000176200001440000000240012343540277016254 0ustar liggesusers\name{weights.lmrob} \title{Extract Robustness and Model Weights} \alias{weights.lmrob} \alias{weights.glmrob} \description{ \code{weights()} extracts robustness weights or fitting (or prior) weights from a \code{lmrob} or \code{glmrob} object. } \usage{ \method{weights}{lmrob}(object, type = c("prior", "robustness"), ...) } \arguments{ \item{object}{ an object of class \code{"lmrob"} or \code{"glmrob"}, typically the result of a call to \code{\link{lmrob}}, or \code{\link{glmrob}}, respectively.} \item{type}{the type of weights to be returned. Either \code{"prior"} (default), or \code{"robustness"}.} \item{\dots}{not used currently.} } \details{ The \dQuote{prior weights} correspond to the weights specified using the \dQuote{weights} argument when calling \code{lmrob}. The \dQuote{robustness weights} are the weights assigned by the M-estimator of regression, \eqn{\psi(r_i/S) / (r_i/S)}. The robust coefficient estimate then numericarlly corresponds to a weighted least squares fit using the product of both types of weights as weights. } \value{ Weights extracted from the object \code{object}. } \author{Manuel Koller and Martin Maechler.} \seealso{ \code{\link{lmrob}}, \code{\link{glmrob}} and \code{\link{weights}} } robustbase/man/pulpfiber.Rd0000644000176200001440000000446513312375575015501 0ustar liggesusers\name{pulpfiber} \alias{pulpfiber} \encoding{utf-8} \docType{data} \title{Pulp Fiber and Paper Data} \description{ Measurements of aspects pulp fibers and the paper produced from them. Four properties of each are measured in sixty-two samples. } \usage{data(pulpfiber, package="robustbase")} \format{ A data frame with 62 observations on the following 8 variables. \describe{ \item{\code{X1}}{numeric vector of arithmetic fiber length} \item{\code{X2}}{numeric vector of long fiber fraction} \item{\code{X3}}{numeric vector of fine fiber fraction} \item{\code{X4}}{numeric vector of zero span tensile} % \item{\ }{\ }% ------------- white space -- \item{\code{Y1}}{numeric vector of breaking length} \item{\code{Y2}}{numeric vector of elastic modulus} \item{\code{Y3}}{numeric vector of stress at failure} \item{\code{Y4}}{numeric vector of burst strength} } } \details{ Cited from the reference article: \emph{The dataset contains measurements of properties of pulp fibers and the paper made from them. The aim is to investigate relations between pulp fiber properties and the resulting paper properties. The dataset contains \eqn{n = 62} measurements of the following four pulp fiber characteristics: arithmetic fiber length, long fiber fraction, fine fiber fraction, and zero span tensile. The four paper properties that have been measured are breaking length, elastic modulus, stress at failure, and burst strength.} The goal is to predict the \eqn{q = 4} paper properties from the \eqn{p = 4} fiber characteristics. } \source{ Rousseeuw, P. J., Van Aelst, S., Van Driessen, K., and Agulló, J. (2004) Robust multivariate regression; \emph{Technometrics} \bold{46}, 293--305. Till 2016 available from \code{http://users.ugent.be/~svaelst/data/pulpfiber.txt} } \author{port to \R and this help page: Martin Maechler } \references{ Lee, J. (1992) \emph{Relationships Between Properties of Pulp-Fibre and Paper}, unpublished doctoral thesis, U. Toronto, Faculty of Forestry. } \examples{ data(pulpfiber) str(pulpfiber) pairs(pulpfiber, gap=.1) ## 2 blocks of 4 .. c1 <- cov(pulpfiber) cR <- covMcd(pulpfiber) ## how different are they: The robust estimate has more clear high correlations: symnum(cov2cor(c1)) symnum(cov2cor(cR$cov)) } \keyword{datasets} robustbase/man/summarizeRobWeights.Rd0000644000176200001440000000317012137052541017500 0ustar liggesusers\name{summarizeRobWeights} \alias{summarizeRobWeights} \title{Print a Nice "summary" of Robustness Weights} \description{ Print a nice \dQuote{summary} about a numeric vector of robustness weights. Observations with weights around zero are marked as outliers. } \usage{ summarizeRobWeights(w, digits = getOption("digits"), header = "Robustness weights:", eps = 0.1 / length(w), eps1 = 1e-3, \dots) } \arguments{ \item{w}{numeric vector of robustness weigths.} \item{digits}{digits to be used for \code{\link{print}}ing.} \item{header}{string to be printed as header line.} \item{eps}{numeric tolerance \eqn{\epsilon}{eps}: values of \code{w} with \eqn{\left|w_i\right| < \epsilon/n}{|w[i]| < eps/n} are said to be outliers.} \item{eps1}{numeric tolerance: values of \code{w} with \eqn{\left|1 - w_i\right| < eps1}{|1 - w[i]| < eps1} are said to have weight \sQuote{\code{~= 1}}.} \item{\dots}{potential further arguments, passed to \code{\link{print}()}.} } \seealso{ The \code{\link{summary}} methods for \code{\link{lmrob}} and \code{\link{glmrob}} make use of \code{summarizeRobWeights()}. Our methods for \code{\link{weights}()}, \code{\link{weights.lmrob}(*, type="robustness")} and \code{\link{weights.glmrob}(*, type="robustness")}. } \value{ none; the function is used for its side effect of printing. } \author{Martin Maechler} \examples{ w <- c(1,1,1,1,0,1,1,1,1,0,1,1,.9999,.99999, .5,.6,1e-12) summarizeRobWeights(w) # two outside ~= {0,1} summarizeRobWeights(w, eps1 = 5e-5)# now three outside {0,1} ## See the summary() outputs } \keyword{utilities} robustbase/man/bushfire.Rd0000644000176200001440000000126613312375575015314 0ustar liggesusers\name{bushfire} \alias{bushfire} \docType{data} \title{ Campbell Bushfire Data } \description{ This data set was used by Campbell (1984) to locate bushfire scars. The dataset contains satelite measurements on five frequency bands, corresponding to each of 38 pixels. } \usage{data(bushfire, package="robustbase")} \format{ A data frame with 38 observations on 5 variables. % \describe{} } %\Note{} \source{ Maronna, R.A. and Yohai, V.J. (1995) The Behavoiur of the Stahel-Donoho Robust Multivariate Estimator. \emph{Journal of the American Statistical Association} \bold{90}, 330--341. } %\seealso{} \examples{ data(bushfire) plot(bushfire) covMcd(bushfire) } \keyword{datasets} robustbase/man/SiegelsEx.Rd0000644000176200001440000000175413312375575015377 0ustar liggesusers\name{SiegelsEx} \alias{SiegelsEx} \docType{data} \title{Siegel's Exact Fit Example Data} \description{ A small counterexample data set devised by Andrew Siegel. Six (out of nine) data points lie on the line \eqn{y = 0} such that some robust regression estimators exhibit the \dQuote{\emph{exact fit}} property. } \usage{data(SiegelsEx, package="robustbase")} \format{ A data frame with 9 observations on the following 2 variables. \describe{ \item{\code{x}}{a numeric vector} \item{\code{y}}{a numeric vector} } } \source{ Emerson and Hoaglin (1983, p.139) } \references{ Peter J. Rousseeuw and Annick M. Leroy (1987) \emph{Robust Regression and Outlier Detection} Wiley, p.60--61 } \examples{ data(SiegelsEx) plot(SiegelsEx, main = "Siegel's example for 'exact fit'") abline( lm(y ~ x, data = SiegelsEx)) abline(MASS::lqs(y ~ x, data = SiegelsEx, method = "lms"), col = 2) legend("topright", leg = c("lm", "LMS"), col=1:2, lwd=1, inset = 1/20) } \keyword{datasets} robustbase/man/fullRank.Rd0000644000176200001440000000432313170062205015241 0ustar liggesusers\name{fullRank} \alias{fullRank} \title{Remove Columns (or Rows) From a Matrix to Make It Full Rank} \description{ From the QR decomposition with pivoting, (\code{\link{qr}(x, tol)} if \eqn{n \ge p}), if the matrix is not of full rank, the corresponding columns (\eqn{n \ge p}{n >= p}) or rows (\eqn{n < p}) are omitted to form a full rank matrix. } \usage{% -> ../R/adjoutlyingness.R fullRank(x, tol = 1e-7, qrx = qr(x, tol=tol)) } \arguments{ \item{x}{a numeric matrix of dimension \eqn{n \times p}{n * p}, or a similar object for which \code{\link{qr}()} works.} \item{tol}{tolerance for determining rank (deficiency). Currently is simply passed to \code{\link{qr}}.} \item{qrx}{optionally may be used to pass a \code{\link{qr}(x, ..)}; only used when \code{p <= n}.} } \value{ a version of the matrix \code{x}, with less columns or rows if \code{x}'s rank was smaller than \code{min(n,p)}. If \code{x} is of full rank, it is returned unchanged. } \author{Martin Maechler} \note{ This is useful for robustness algorithms that rely on \eqn{X} matrices of full rank, e.g., \code{\link{adjOutlyingness}}. This also works for numeric data frames and whenever \code{qr()} works correctly. } \seealso{ \code{\link{qr}}; for more sophisticated rank determination, \code{\link[Matrix]{rankMatrix}} from package \CRANpkg{Matrix}. } \examples{ stopifnot(identical(fullRank(wood), wood)) ## More sophisticated and delicate dim(T <- tcrossprod(data.matrix(toxicity))) # 38 x 38 dim(T. <- fullRank(T)) # 38 x 10 if(requireNamespace("Matrix")) { rMmeths <- eval(formals(Matrix::rankMatrix)$method) rT. <- sapply(rMmeths, function(.m.) Matrix::rankMatrix(T., method = .m.)) print(rT.) # "qr" (= "qrLinpack"): 13, others rather 10 } dim(T.2 <- fullRank(T, tol = 1e-15))# 38 x 18 dim(T.3 <- fullRank(T, tol = 1e-12))# 38 x 13 dim(T.3 <- fullRank(T, tol = 1e-10))# 38 x 13 dim(T.3 <- fullRank(T, tol = 1e-8 ))# 38 x 12 dim(T.) # default from above 38 x 10 dim(T.3 <- fullRank(T, tol = 1e-5 ))# 38 x 10 -- still plot(svd(T, 0,0)$d, log="y", main = "singular values of T", yaxt="n") axis(2, at=10^(-14:5), las=1) ## pretty clearly indicates that rank 10 is "correct" here. } \keyword{algebra} \keyword{array} robustbase/man/milk.Rd0000644000176200001440000000421213312375575014433 0ustar liggesusers\name{milk} \alias{milk} \docType{data} \title{Daudin's Milk Composition Data} \description{ Daudin et al.(1988) give 8 readings on the composition of 86 containers of milk. They speak about 85 observations, but this can be explained with the fact that observations 63 and 64 are identical (as noted by Rocke (1996)). The data set was used for analysing the stability of principal component analysis by the bootstrap method. In the same context, but using high breakdown point robust PCA, these data were analysed by Todorov et al. (1994). Atkinson (1994) used these data for ilustration of the forward search algorithm for identifying of multiple outliers. } \usage{data(milk, package="robustbase")} \format{ A data frame with 86 observations on the following 8 variables, all but the first measure units in \emph{grams / liter}. \describe{ \item{\code{X1}}{density} \item{\code{X2}}{fat content} \item{\code{X3}}{protein content} \item{\code{X4}}{casein content} \item{\code{X5}}{cheese dry substance measured in the factory} \item{\code{X6}}{cheese dry substance measured in the laboratory} \item{\code{X7}}{milk dry substance} \item{\code{X8}}{cheese product} } } \source{ Daudin, J.J. Duby, C. and Trecourt, P. (1988) Stability of Principal Component Analysis Studied by the Bootstrap Method; \emph{Statistics} \bold{19}, 241--258. } \references{ Todorov, V., Neyko, N., Neytchev, P. (1994) Stability of High Breakdown Point Robust PCA, in \emph{Short Communications, COMPSTAT'94}; Physica Verlag, Heidelberg. Atkinson, A.C. (1994) Fast Very Robust Methods for the Detection of Multiple Outliers. \emph{J. Amer. Statist. Assoc.} \bold{89} 1329--1339. Rocke, D. M. and Woodruff, D. L. (1996) Identification of Outliers in Multivariate Data; \emph{J. Amer. Statist. Assoc.} \bold{91} (435), 1047--1061. } \examples{ data(milk) (c.milk <- covMcd(milk)) summarizeRobWeights(c.milk $ mcd.wt)# 19..20 outliers umilk <- unique(milk) # dropping obs.64 (== obs.63) summary(cumilk <- covMcd(umilk, nsamp = "deterministic")) # 20 outliers %%not yet ## the best 'crit' we've seen was } \keyword{datasets} robustbase/man/condroz.Rd0000644000176200001440000000245713312375575015166 0ustar liggesusers\name{condroz} \alias{condroz} \encoding{utf8} \docType{data} \title{ Condroz Data } \description{ Dataset with pH-value and Calcium content in soil samples, collected in different communities of the Condroz region in Belgium. The data pertain to a subset of 428 samples with a pH-value between 7.0 and 7.5. } \usage{data(condroz, package="robustbase")} \format{ A data frame with 428 observations on the following 2 variables. \describe{ \item{\code{Ca}}{Calcium content of the soil sample} \item{\code{pH}}{pH value of the soil sample} } } \details{ For more information on the dataset, cf. Goegebeur et al. (2005). } \source{ Hubert and Vandervieren (2006), p. 10. This dataset is also studied in Vandewalle et al. (2004). } \references{ See also those for \code{\link{adjbox}}. Goegebeur, Y., Planchon, V., Beirlant, J., Oger, R. (2005). Quality Assesment of Pedochemical Data Using Extreme Value Methodology, Journal of Applied Science, 5, p. 1092-1102. Vandewalle, B., Beirlant, J., Hubert, M. (2004). A robust estimator of the tail index based on an exponential regression model, in Hubert, M., Pison G., Struyf, A. and S. Van Aelst, ed., Theory and Applications of Recent Robust Methods, Birkhäuser, Basel, p. 367-376. } \examples{ adjbox(condroz$Ca) } \keyword{datasets} robustbase/man/hbk.Rd0000644000176200001440000000254013710054521014230 0ustar liggesusers\name{hbk} \alias{hbk} \docType{data} \title{Hawkins, Bradu, Kass's Artificial Data} \description{ Artificial Data Set generated by Hawkins, Bradu, and Kass (1984). The data set consists of 75 observations in four dimensions (one response and three explanatory variables). It provides a good example of the masking effect. The first 14 observations are outliers, created in two groups: 1--10 and 11--14. Only observations 12, 13 and 14 appear as outliers when using classical methods, but can be easily unmasked using robust distances computed by, e.g., MCD - covMcd(). } \usage{data(hbk, package="robustbase")} \format{ A data frame with 75 observations on 4 variables, where the last variable is the dependent one. \describe{ \item{X1}{x[,1]} \item{X2}{x[,2]} \item{X3}{x[,3]} \item{Y}{y} } } \note{ This data set is also available in package \CRANpkg{wle} as \code{artificial}. } \source{ Hawkins, D.M., Bradu, D., and Kass, G.V. (1984) Location of several outliers in multiple regression data using elemental sets. \emph{Technometrics} \bold{26}, 197--208. P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.94. } \examples{ data(hbk) plot(hbk) summary(lm.hbk <- lm(Y ~ ., data = hbk)) hbk.x <- data.matrix(hbk[, 1:3]) (cHBK <- covMcd(hbk.x)) } \keyword{datasets} robustbase/man/ltsPlot.Rd0000644000176200001440000001022213434014060015116 0ustar liggesusers\name{plot.lts} \alias{plot.lts} \alias{ltsPlot} \title{Robust LTS Regression Diagnostic Plots} \description{ Four plots (selectable by \code{which}) are currently provided: \enumerate{ \item a plot of the standardized residuals versus their index, \item a plot of the standardized residuals versus fitted values, \item a Normal Q-Q plot of the standardized residuals, and \item a regression diagnostic plot (standardized residuals versus robust distances of the predictor variables). } } \usage{ \method{plot}{lts}(x, which = c("all","rqq","rindex","rfit","rdiag"), classic=FALSE, ask = (which[1] == "all" && dev.interactive()), id.n, \dots) % ltsPlot(x, which = c("all","rqq","rindex","rfit","rdiag"), classic=FALSE, ask=(which=="all" && dev.interactive()), id.n, \dots) } \arguments{ \item{x}{a \code{lts} object, typically result of \code{ltsReg}.} \item{which}{string indicating which plot to show. See the \emph{Details} section for a description of the options. Defaults to \code{"all"}.}. \item{classic}{whether to plot the classical distances too. Default is \code{FALSE}.}. \item{ask}{logical indicating if the user should be \emph{ask}ed before each plot, see \code{\link{par}(ask=.)}. Defaults to \code{which == "all" && \link{dev.interactive}()}. } \item{id.n}{number of observations to be identified by a label starting with the most extreme. Default is the number of identified outliers (can be different for the different plots - see Details).} \item{\dots}{other parameters to be passed through to plotting functions.} } \details{ This function produces several plots based on the robust and classical regression estimates. Which of them to select is specified by the attribute \code{which}. The possible options are: \describe{ \item{\code{rqq}:}{Normal Q-Q plot of the standardized residuals;} \item{\code{rindex}:}{plot of the standardized residuals versus their index;} \item{\code{rfit}:}{plot of the standardized residuals versus fitted values;} \item{\code{rdiag}:}{regression diagnostic plot.} } The normal quantile plot produces a normal Q-Q plot of the standardized residuals. A line is drawn which passes through the first and third quantile. The \code{id.n} residuals with largest distances from this line are identified by labels (the observation number). The default for \code{id.n} is the number of regression outliers (lts.wt==0). In the Index plot and in the Fitted values plot the standardized residuals are displayed against the observation number or the fitted value respectively. A horizontal dashed line is drawn at 0 and two solid horizontal lines are located at +2.5 and -2.5. The id.n residuals with largest absolute values are identified by labels (the observation number). The default for id.n is the number regression outliers (lts.wt==0). The regression diagnostic plot, introduced by Rousseeuw and van Zomeren (1990), displays the standardized residuals versus robust distances. Following Rousseeuw and van Zomeren (1990), the horizontal dashed lines are located at +2.5 and -2.5 and the vertical line is located at the upper 0.975 percent point of the chi-squared distribution with p degrees of freedom. The id.n residuals with largest absolute values and/or largest robust Mahalanobis distances are identified by labels (the observation number). The default for id.n is the number of all outliers: regression outliers (lts.wt==0) + leverage (bad and good) points (RD > 0.975 percent point of the chi-squared distribution with p degrees of freedom). } %\value{} \references{ P. J. Rousseeuw and van Zomeren, B. C. (1990). Unmasking Multivariate Outliers and Leverage Points. \emph{Journal of the American Statistical Association} \bold{85}, 633--639. P. J. Rousseeuw and K. van Driessen (1999) A fast algorithm for the minimum covariance determinant estimator. \emph{Technometrics} \bold{41}, 212--223. } \seealso{ \code{\link{covPlot}} } \examples{ data(hbk) lts <- ltsReg(Y ~ ., data = hbk) lts plot(lts, which = "rqq") %% Improve: also show model 'call' } \keyword{hplot} \keyword{multivariate} robustbase/man/Qn.Rd0000644000176200001440000001514414123440375014053 0ustar liggesusers\name{Qn} \alias{Qn} \alias{Qn.old} \alias{s_Qn} % \title{Robust Location-Free Scale Estimate More Efficient than MAD} \description{ Compute the robust scale estimator \eqn{Q_n}{Qn}, an efficient alternative to the MAD. By default, \eqn{Q_n(x_1, \ldots, x_n)}{Qn(x1, .., xn)} is the \eqn{k}-th order statistic (a quantile) of the \code{choose(n, 2)} absolute differences \eqn{|x_i - x_j|}{abs(x[i] - x[j])}, (for \eqn{1 \le i < j \le n}{1 <= i < j <= n}), where by default (originally only possible value) \eqn{k = choose(n\%/\% 2 + 1, 2)} which is about the first quartile (25\% quantile) of these pairwise differences. See the references for more. } \usage{ Qn(x, constant = NULL, finite.corr = is.null(constant) && missing(k), na.rm = FALSE, k = choose(n \%/\% 2 + 1, 2), warn.finite.corr = TRUE) s_Qn(x, mu.too = FALSE, \dots) } \arguments{% >> ../R/qnsn.R << \item{x}{numeric vector of observations.} \item{constant}{number by which the result is multiplied; the default achieves consistency for normally distributed data. Note that until Nov. 2010, \dQuote{thanks} to a typo in the very first papers, a slightly wrong default constant, 2.2219, was used instead of the correct one which is equal to \code{1 / (sqrt(2) * qnorm(5/8))} (as mentioned already on p.1277, after (3.7) in Rousseeuw and Croux (1993)). If you need the old slightly off version for historical reproducibility, you can use \code{Qn.old()}. Note that the relative difference is only about 1 in 1000, and that the correction should not affect the finite sample corrections for \eqn{n \le 9}{n <= 9}. } \item{finite.corr}{logical indicating if the finite sample bias correction factor should be applied. Defaults to \code{TRUE} unless \code{constant} is specified. Note the for non-default \code{k}, the consistency \code{constant} already depends on \code{n} leading to \emph{some} finite sample correction, but no simulation-based small-\code{n} correction factors are available.} \item{na.rm}{logical specifying if missing values (\code{\link{NA}}) should be removed from \code{x} before further computation. If false as by default, and if there are \code{NA}s, i.e., \code{if(anyNA(x))}, the result will be \code{NA}.} \item{k}{integer, typically half of n, specifying the \dQuote{quantile}, i.e., rather the order statistic that \code{Qn()} should return; for the Qn() proper, this has been hard wired to \code{choose(n\%/\%2 +1, 2)}, i.e., \eqn{\lfloor\frac{n}{2}\rfloor +1}{floor(n/2) +1}. Choosing a large \code{k} is less robust but allows to get non-zero results in case the default \code{Qn()} is zero.} \item{warn.finite.corr}{logical indicating if a \code{\link{warning}} should be signalled when \code{k} is non-default, in which case specific small-\eqn{n} correction is not yet provided.} \item{mu.too}{logical indicating if the \code{\link[stats]{median}(x)} should also be returned for \code{s_Qn()}.} \item{\dots}{potentially further arguments for \code{s_Qn()} passed to \code{Qn()}.} } \value{ \code{Qn()} returns a number, the \eqn{Q_n}{Qn} robust scale estimator, scaled to be consistent for \eqn{\sigma^2} and i.i.d. Gaussian observations, optionally bias corrected for finite samples. \code{s_Qn(x, mu.too=TRUE)} returns a length-2 vector with location (\eqn{\mu}) and scale; this is typically only useful for \code{\link{covOGK}(*, sigmamu = s_Qn)}. } \details{ As the (default, consistency) constant needed to be corrected, the finite sample correction has been based on a much more extensive simulation, and on a 3rd or 4th degree polynomial model in \eqn{1/n} for odd or even n, respectively. } \references{ Rousseeuw, P.J. and Croux, C. (1993) Alternatives to the Median Absolute Deviation, \emph{Journal of the American Statistical Association} \bold{88}, 1273--1283. \doi{10.2307/2291267}% JSTOR % MM: ~/save/papers/Rousseeuw/93/R+Croux_MAD_Sn_Qn.pdf Christophe Croux and Peter J. Rousseeuw (1992) A class of high-breakdown scale estimators based on subranges , \emph{Communications in Statistics - Theory and Methods} \bold{21}, 1935--1951; \doi{10.1080/03610929208830889} Christophe Croux and Peter J. Rousseeuw (1992) Time-Efficient Algorithms for Two Highly Robust Estimators of Scale, \emph{Computational Statistics, Vol. 1}, ed. Dodge and Whittaker, Physica-Verlag Heidelberg, 411--428; available via Springer Link. % MM: ~/save/papers/robust-diverse/Croux-Rousseeuw-Timeff_Scale_1992.pdf %% no longer \url{http://win-www.uia.ac.be/u/statis/abstract/Timeff92.htm}. About the typo in the \code{constant}:\cr Christophe Croux (2010) Private e-mail, Fri Jul 16, w/ Subject \emph{Re: Slight inaccuracy of Qn implementation \dots\dots}. } \seealso{\code{\link[stats]{mad}} for the \sQuote{most robust} but much less efficient scale estimator; \code{\link{Sn}} for a similar faster but less efficient alternative. Finally, \code{\link{scaleTau2}} which some consider \dQuote{uniformly} better than Qn or competitors. } \author{Original Fortran code: Christophe Croux and Peter Rousseeuw \email{rousse@wins.uia.ac.be}. \cr Port to C and R: Martin Maechler, \email{maechler@R-project.org} } \examples{ set.seed(153) x <- sort(c(rnorm(80), rt(20, df = 1))) s_Qn(x, mu.too = TRUE) Qn(x, finite.corr = FALSE) ## A simple pure-R version of Qn() -- slow and memory-rich for large n: O(n^2) Qn0R <- function(x, k = choose(n \%/\% 2 + 1, 2)) { % n <- length(x <- sort(x)) if(n == 0) return(NA) else if(n == 1) return(0.) stopifnot(is.numeric(k), k == as.integer(k), 1 <= k, k <= n*(n-1)/2) m <- outer(x,x,"-")# abs not needed as x[] is sorted sort(m[lower.tri(m)], partial = k)[k] } (Qx1 <- Qn(x, constant=1)) # 0.5498463 ## the C-algorithm "rounds" to 'float' single precision .. stopifnot(all.equal(Qx1, Qn0R(x), tol = 1e-6)) (qn <- Qn(c(1:4, 10, Inf, NA), na.rm=TRUE)) stopifnot(is.finite(qn), all.equal(4.075672524, qn, tol=1e-10)) ## -- compute for different 'k' : n <- length(x) # = 100 here (k0 <- choose(floor(n/2) + 1, 2)) # 51*50/2 == 1275 stopifnot(identical(Qx1, Qn(x, constant=1, k=k0))) nn2 <- n*(n-1)/2 all.k <- 1:nn2 system.time(Qss <- sapply(all.k, function(k) Qn(x, 1, k=k))) system.time(Qs <- Qn (x, 1, k = all.k)) system.time(Qs0 <- Qn0R(x, k = all.k) ) stopifnot(exprs = { Qs[1] == min(diff(x)) Qs[nn2] == diff(range(x)) all.equal(Qs, Qss, tol = 1e-15) # even exactly all.equal(Qs0, Qs, tol = 1e-7) # see 2.68e-8, as Qn() C-code rounds to (float) }) plot(2:nn2, Qs[-1], type="b", log="y", main = "Qn(*, k), k = 2..n(n-1)/2") } \keyword{robust} \keyword{univar} robustbase/man/Sn.Rd0000644000176200001440000000463014123440375014053 0ustar liggesusers\name{Sn} \alias{Sn} \alias{s_Sn} % \title{Robust Location-Free Scale Estimate More Efficient than MAD} \description{ Compute the robust scale estimator \eqn{S_n}{Sn}, an efficient alternative to the MAD. } \usage{ Sn(x, constant = 1.1926, finite.corr = missing(constant), na.rm = FALSE) s_Sn(x, mu.too = FALSE, \dots) } \arguments{ \item{x}{numeric vector of observations.} \item{constant}{number by which the result is multiplied; the default achieves consisteny for normally distributed data.} \item{finite.corr}{logical indicating if the finite sample bias correction factor should be applied. Default to \code{TRUE} unless \code{constant} is specified.} \item{na.rm}{logical specifying if missing values (\code{\link{NA}}) should be removed from \code{x} before further computation. If false as by default, and if there are \code{NA}s, i.e., \code{if(anyNA(x))}, the result will be \code{NA}.} \item{mu.too}{logical indicating if the \code{\link[stats]{median}(x)} should also be returned for \code{s_Sn()}.} \item{\dots}{potentially further arguments for \code{s_Sn()} passed to \code{Sn()}.} } \value{ \code{Sn()} returns a number, the \eqn{S_n}{Sn} robust scale estimator, scaled to be consistent for \eqn{\sigma^2} and i.i.d. Gaussian observations, optionally bias corrected for finite samples. \code{s_Sn(x, mu.too=TRUE)} returns a length-2 vector with location (\eqn{\mu}) and scale; this is typically only useful for \code{\link{covOGK}(*, sigmamu = s_Sn)}. } \details{ ............ FIXME ........ } \references{ Rousseeuw, P.J. and Croux, C. (1993) Alternatives to the Median Absolute Deviation, \emph{Journal of the American Statistical Association} \bold{88}, 1273--1283. } \seealso{\code{\link[stats]{mad}} for the \sQuote{most robust} but much less efficient scale estimator; \code{\link{Qn}} for a similar more efficient but slower alternative; \code{\link{scaleTau2}}. } \author{Original Fortran code: Christophe Croux and Peter Rousseeuw \email{rousse@wins.uia.ac.be}. \cr Port to C and R: Martin Maechler, \email{maechler@R-project.org} } \examples{ x <- c(1:10, 100+1:9)# 9 outliers out of 19 Sn(x) Sn(x, c=1)# 9 Sn(x[1:18], c=1)# 9 set.seed(153) x <- sort(c(rnorm(80), rt(20, df = 1))) s_Sn(x, mu.too=TRUE) (s <- Sn(c(1:4, 10, Inf, NA), na.rm=TRUE)) stopifnot(is.finite(s), all.equal(3.5527554, s, tol=1e-10)) } \keyword{robust} \keyword{univar} robustbase/man/toxicity.Rd0000644000176200001440000000350713713564014015352 0ustar liggesusers\name{toxicity} \encoding{utf8} \alias{toxicity} \docType{data} \title{Toxicity of Carboxylic Acids Data} \description{ The aim of the experiment was to predict the toxicity of carboxylic acids on the basis of several molecular descriptors. } \usage{data(toxicity, package="robustbase")} \format{ A data frame with 38 observations on the following 10 variables which are attributes for carboxylic acids: \describe{ \item{\code{toxicity}}{aquatic toxicity, defined as \eqn{\log(IGC_{50}^{-1})}{log(IGC50^(-1))}; typically the \dQuote{response}.} \item{\code{logKow}}{\eqn{log Kow}, the partition coefficient} \item{\code{pKa}}{pKa: the dissociation constant} \item{\code{ELUMO}}{\bold{E}nergy of the \bold{l}owest \bold{u}noccupied \bold{m}olecular \bold{o}rbital} \item{\code{Ecarb}}{Electrotopological state of the \bold{carb}oxylic group} \item{\code{Emet}}{Electrotopological state of the \bold{met}hyl group} \item{\code{RM}}{Molar refractivity} \item{\code{IR}}{Refraction index} \item{\code{Ts}}{Surface tension} \item{\code{P}}{Polarizability} } } % \details{ % } \source{ The website accompanying the MMY-book: \url{https://www.wiley.com/legacy/wileychi/robust_statistics/} } \references{ Maguna, F.P., Núñez, M.B., Okulik, N.B. and Castro, E.A. (2003) Improved QSAR analysis of the toxicity of aliphatic carboxylic acids; \emph{Russian Journal of General Chemistry} \bold{73}, 1792--1798. } \examples{ data(toxicity) summary(toxicity) plot(toxicity) plot(toxicity ~ pKa, data = toxicity) ## robustly scale the data (to scale 1) using Qn (scQ.tox <- sapply(toxicity, Qn)) scTox <- scale(toxicity, center = FALSE, scale = scQ.tox) csT <- covOGK(scTox, n.iter = 2, sigmamu = s_Qn, weight.fn = hard.rejection) as.dist(round(cov2cor(csT$cov), 2)) } \keyword{datasets} robustbase/man/wood.Rd0000644000176200001440000000210613312375575014447 0ustar liggesusers\name{wood} \alias{wood} \docType{data} \title{Modified Data on Wood Specific Gravity} \description{ The original data are from Draper and Smith (1966) and were used to determine the influence of anatomical factors on wood specific gravity, with five explanatory variables and an intercept. These data were contaminated by replacing a few observations with outliers. } \usage{data(wood, package="robustbase")} \format{ A data frame with 20 observations on the following 6 variables. \describe{ \item{x1, x2, x3, x4, x5}{explanatory \dQuote{anatomical} wood variables.} \item{y}{wood specific gravity, the target variable.} } } \source{ Draper and Smith (1966, p.227) Peter J. Rousseeuw and Annick M. Leroy (1987) \emph{Robust Regression and Outlier Detection} Wiley, p.243, table 8. } \examples{ data(wood) plot(wood) summary( lm.wood <- lm(y ~ ., data = wood)) summary(rlm.wood <- MASS::rlm(y ~ ., data = wood)) summary(lts.wood <- ltsReg(y ~ ., data = wood)) wood.x <- as.matrix(wood)[,1:5] c_wood <- covMcd(wood.x) c_wood } \keyword{datasets} robustbase/man/M.psi.Rd0000644000176200001440000001756113710054521014463 0ustar liggesusers\name{Mpsi} \title{Psi / Chi / Wgt / Rho Functions for *M-Estimation} \alias{Mchi} \alias{Mpsi} \alias{Mwgt} \alias{MrhoInf} \alias{.Mchi} \alias{.Mpsi} \alias{.Mwgt} \alias{.Mwgt.psi1} \alias{.MrhoInf} \alias{.psi2ipsi} \description{ Compute Psi / Chi / Wgt / Rho functions for M-estimation, i.e., including MM, etc. For definitions and details, please use the vignette \href{https://cran.r-project.org/package=robustbase/vignettes/psi_functions.pdf}{% \dQuote{\eqn{\psi}{psi}-Functions Available in Robustbase}}. \code{MrhoInf(x)} computes \eqn{\rho(\infty)}{rho(Inf)}, i.e., the normalizing or scaling constant for the transformation from \eqn{\rho(\cdot)}{rho(.)} to \eqn{\tilde\rho(\cdot)}{rho~(.)}, where the latter, aka as \eqn{\chi()}{chi()} fulfills \eqn{\tilde\rho(\infty) = 1}{rho~(Inf) = 1} which makes only sense for \dQuote{redescending} psi functions, i.e., not for \code{"huber"}. \code{Mwgt(x, *)} computes \eqn{\psi(x)/x} (fast and numerically accurately). } \usage{ Mpsi(x, cc, psi, deriv = 0) Mchi(x, cc, psi, deriv = 0) Mwgt(x, cc, psi) MrhoInf(cc, psi) .Mwgt.psi1(psi, cc = .Mpsi.tuning.default(psi)) } \arguments{ \item{x}{numeric (\dQuote{abscissa} values) vector, possibly with \code{\link{attributes}} such as \code{\link{dim}} or \code{\link{names}}, etc. These are preserved for the \code{M*()} functions (but not the \code{.M()} ones).} \item{cc}{numeric tuning constant, for some \code{psi} of length \eqn{> 1}.} \item{psi}{a string specifying the psi / chi / rho / wgt function; either \code{"huber"}, or one of the same possible specifiers as for \code{psi} in \code{\link{lmrob.control}}, i.e. currently, \code{"bisquare"}, \code{"lqq"}, \code{"welsh"}, \code{"optimal"}, \code{"hampel"}, or \code{"ggw"}.} \item{deriv}{an integer, specifying the \emph{order} of derivative to consider; particularly, \code{Mpsi(x, *, deriv = -1)} is the principal function of \eqn{\psi()}{psi()}, typically denoted \eqn{\rho()}{rho()} in the literature. For some psi functions, currently \code{"huber"}, \code{"bisquare"}, \code{"hampel"}, and \code{"lqq"}, \code{deriv = 2} is implemented, for the other psi's only \eqn{d \in \{-1,0,1\}}{d in {-1,0,1\}}.}} } \details{ Theoretically, \code{Mchi()} would not be needed explicitly as it can be computed from \code{Mpsi()} and \code{MrhoInf()}, namely, by \preformatted{Mchi(x, *, deriv = d) == Mpsi(x, *, deriv = d-1) / MrhoInf(*)} for \eqn{d = 0, 1, 2} (and \sQuote{*} containing \code{par, psi}, and equality is in the sense of \code{\link{all.equal}(x,y, tol)} with a small \code{tol}. Similarly, \code{Mwgt} would not be needed strictly, as it could be defined via \code{Mpsi}), but the explicit definition takes care of 0/0 and typically is of a more simple form. For experts, there are slightly even faster versions, \code{.Mpsi()}, \code{.Mwgt()}, etc. \code{.Mwgt.psi1()} mainly a utility for \code{\link{nlrob}()}, returns a \emph{\code{\link{function}}} with similar semantics as \code{\link[MASS]{psi.hampel}}, \code{\link[MASS]{psi.huber}}, or \code{\link[MASS]{psi.bisquare}} from package \CRANpkg{MASS}. Namely, a function with arguments \code{(x, deriv=0)}, which for \code{deriv=0} computes \code{Mwgt(x, cc, psi)} and otherwise computes \code{Mpsi(x, cc, psi, deriv=deriv)}. \code{.Mpsi()}, \code{.Mchi()}, \code{.Mwgt()}, and \code{.MrhoInf()} are low-level versions of \code{Mpsi()}, \code{Mchi()}, \code{Mwgt()}, and \code{MrhoInf()}, respectively, and \code{.psi2ipsi()} provides the psi-function integer codes needed for \code{ipsi} argument of the \code{.M*()} functions. For \code{psi = "ggw"}, the \eqn{\rho()}{rho()} function has no closed form and must be computed via numerical integration, apart from 6 special cases including the defaults, see the \sQuote{Details} in \code{help(\link{.psi.ggw.findc})}. } \value{ a numeric vector of the same length as \code{x}, with corresponding function (or derivative) values. } \references{ See the vignette about %% ../vignettes/psi_functions.Rnw : \dQuote{\eqn{\psi}{psi}-Functions Available in Robustbase}. } \author{ Manuel Koller, notably for the original C implementation; tweaks and speedup via \code{\link{.Call}} and \code{.M*()} etc by Martin Maechler. } \seealso{ \code{\link{psiFunc}} and the \code{\linkS4class{psi_func}} class, both of which provide considerably more on the \R side, but are less optimized for speed. \code{\link{.Mpsi.tuning.defaults}}, etc, for tuning constants' defaults for\code{lmrob()}, and \code{\link{.psi.ggw.findc}()} utilities to construct such constants' vectors. } \examples{ x <- seq(-5,7, by=1/8) matplot(x, cbind(Mpsi(x, 4, "biweight"), Mchi(x, 4, "biweight"), Mwgt(x, 4, "biweight")), type = "l") abline(h=0, v=0, lty=2, col=adjustcolor("gray", 0.6)) hampelPsi (ccHa <- hampelPsi @ xtras $ tuningP $ k) psHa <- hampelPsi@psi(x) % FIXME: interesting as long as hampelPsi does not use Mpsi(... "hampel") ! ## using Mpsi(): Mp.Ha <- Mpsi(x, cc = ccHa, psi = "hampel") stopifnot(all.equal(Mp.Ha, psHa, tolerance = 1e-15)) psi.huber <- .Mwgt.psi1("huber") if(getRversion() >= "3.0.0") stopifnot(identical(psi.huber, .Mwgt.psi1("huber", 1.345), ignore.env=TRUE)) curve(psi.huber(x), -3, 5, col=2, ylim = 0:1) curve(psi.huber(x, deriv=1), add=TRUE, col=3) ## and show that this is indeed the same as MASS::psi.huber() : x <- runif(256, -2,3) stopifnot(all.equal(psi.huber(x), MASS::psi.huber(x)), all.equal( psi.huber(x, deriv=1), as.numeric(MASS::psi.huber(x, deriv=1)))) ## and how to get MASS::psi.hampel(): psi.hampel <- .Mwgt.psi1("Hampel", c(2,4,8)) x <- runif(256, -4, 10) stopifnot(all.equal(psi.hampel(x), MASS::psi.hampel(x)), all.equal( psi.hampel(x, deriv=1), as.numeric(MASS::psi.hampel(x, deriv=1)))) ## "lqq" / "LQQ" and its tuning constants: ctl0 <- lmrob.control(psi = "lqq", tuning.psi=c(-0.5, 1.5, 0.95, NA)) ctl <- lmrob.control(psi = "lqq", tuning.psi=c(-0.5, 1.5, 0.90, NA)) ctl0$tuning.psi ## keeps the vector _and_ has "constants" attribute: ## [1] -0.50 1.50 0.95 NA ## attr(,"constants") ## [1] 1.4734061 0.9822707 1.5000000 ctl$tuning.psi ## ditto: ## [1] -0.5 1.5 0.9 NA \\ .."constants" 1.213726 0.809151 1.500000 stopifnot(all.equal(Mpsi(0:2, cc = ctl$tuning.psi, psi = ctl$psi), c(0, 0.977493, 1.1237), tol = 6e-6)) x <- seq(-4,8, by = 1/16) ## Show how you can use .Mpsi() equivalently to Mpsi() stopifnot(all.equal( Mpsi(x, cc = ctl$tuning.psi, psi = ctl$psi), .Mpsi(x, ccc = attr(ctl$tuning.psi, "constants"), ipsi = .psi2ipsi("lqq")))) stopifnot(all.equal( Mpsi(x, cc = ctl0$tuning.psi, psi = ctl0$psi, deriv=1), .Mpsi(x, ccc = attr(ctl0$tuning.psi, "constants"), ipsi = .psi2ipsi("lqq"), deriv=1))) ## M*() preserving attributes : x <- matrix(x, 32, 8, dimnames=list(paste0("r",1:32), col=letters[1:8])) comment(x) <- "a vector which is a matrix" px <- Mpsi(x, cc = ccHa, psi = "hampel") stopifnot(identical(attributes(x), attributes(px))) ## The "optimal" psi exists in two versions "in the litterature": --- ## Maronna et al. 2006, 5.9.1, p.144f: psi.M2006 <- function(x, c = 0.013) sign(x) * pmax(0, abs(x) - c/dnorm(abs(x))) ## and the other is the one in robustbase from 'robust': via Mpsi(.., "optimal") ## Here are both for 95\% efficiency: (c106 <- .Mpsi.tuning.default("optimal")) c1 <- curve(Mpsi(x, cc = c106, psi="optimal"), -5, 7, n=1001) c2 <- curve(psi.M2006(x), add=TRUE, n=1001, col=adjustcolor(2,0.4), lwd=2) abline(0,1, v=0, h=0, lty=3) ## the two psi's are similar, but really quite different ## a zoom into Maronna et al's: c3 <- curve(psi.M2006(x), -.5, 1, n=1001); abline(h=0,v=0, lty=3);abline(0,1, lty=2) } \keyword{robust} robustbase/man/Animals2.Rd0000644000176200001440000000376613710054521015145 0ustar liggesusers\name{Animals2} \alias{Animals2} \title{Brain and Body Weights for 65 Species of Land Animals} \description{ A data frame with average brain and body weights for 62 species of land mammals and three others. Note that this is simply the union of \code{\link[MASS]{Animals}} and \code{\link[MASS]{mammals}}. } \usage{ Animals2 } \format{ \describe{ \item{\code{body}}{body weight in kg} \item{\code{brain}}{brain weight in g} } } \source{ Weisberg, S. (1985) \emph{Applied Linear Regression.} 2nd edition. Wiley, pp. 144--5. P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection.} Wiley, p. 57. } \references{ Venables, W. N. and Ripley, B. D. (2002) \emph{Modern Applied Statistics with S.} Forth Edition. Springer. } \note{ After loading the \CRANpkg{MASS} package, the data set is simply constructed by \code{Animals2 <- local({D <- rbind(Animals, mammals); unique(D[order(D$body,D$brain),])})}. Rousseeuw and Leroy (1987)'s \sQuote{brain} data is the same as \CRANpkg{MASS}'s \code{Animals} (with Rat and Brachiosaurus interchanged, see the example below). } \examples{ data(Animals2) ## Sensible Plot needs doubly logarithmic scale plot(Animals2, log = "xy") ## Regression example plot: plotbb <- function(bbdat) { d.name <- deparse(substitute(bbdat)) plot(log(brain) ~ log(body), data = bbdat, main = d.name) abline( lm(log(brain) ~ log(body), data = bbdat)) abline(MASS::rlm(log(brain) ~ log(body), data = bbdat), col = 2) legend("bottomright", leg = c("lm", "rlm"), col=1:2, lwd=1, inset = 1/20) } plotbb(bbdat = Animals2) ## The `same' plot for Rousseeuw's subset: data(Animals, package = "MASS") brain <- Animals[c(1:24, 26:25, 27:28),] plotbb(bbdat = brain) lbrain <- log(brain) plot(mahalanobis(lbrain, colMeans(lbrain), var(lbrain)), main = "Classical Mahalanobis Distances") mcd <- covMcd(lbrain) plot(mahalanobis(lbrain,mcd$center,mcd$cov), main = "Robust (MCD) Mahalanobis Distances") } \keyword{datasets} robustbase/man/nlrob.Rd0000644000176200001440000003370413710054521014606 0ustar liggesusers\name{nlrob} \title{Robust Fitting of Nonlinear Regression Models} \alias{nlrob} \alias{fitted.nlrob} \alias{residuals.nlrob} \alias{predict.nlrob} \alias{vcov.nlrob} \description{ \code{nlrob} fits a nonlinear regression model by robust methods. Per default, by an M-estimator, using iterated reweighted least squares (called \dQuote{IRLS} or also \dQuote{IWLS}). } \usage{ nlrob(formula, data, start, lower, upper, weights = NULL, na.action = na.fail, method = c("M", "MM", "tau", "CM", "mtl"), psi = .Mwgt.psi1("huber", cc=1.345), scale = NULL, test.vec = c("resid", "coef", "w"), maxit = 20, tol = 1e-06, acc, algorithm = "default", doCov = FALSE, model = FALSE, control = if(method == "M") nls.control() else nlrob.control(method, optArgs = list(trace=trace), ...), trace = FALSE, ...) \method{fitted}{nlrob}(object, ...) \method{residuals}{nlrob}(object, type = , ...)% FIXME: more 'type's + DOCU \method{predict}{nlrob}(object, newdata, ...) } \arguments{ \item{formula}{a nonlinear \code{\link{formula}} including variables and parameters of the model, such as \code{y ~ f(x, theta)} (cf. \code{\link{nls}}). (For some checks: if \eqn{f(.)} is linear, then we need parentheses, e.g., \code{y ~ (a + b * x)}; (note that \code{._nlrob.w} is not allowed as variable or parameter name)) %% FIXME in code -- long overdue, as nls() is more flexible *SINCE* R 2.2.1 %% Do not use \code{w} as variable or parameter name! %% FIXME: this should really no longer be needed ==> add a check } \item{data}{an optional data frame containing the variables in the model. If not found in \code{data}, the variables are taken from \code{environment(formula)}, typically the environment from which \code{nlrob} is called.} \item{start}{a named numeric vector of starting parameters estimates, only for \code{method = "M"}.} \item{lower, upper}{numeric vectors of lower and upper bounds; if needed, will be replicated to be as long as the longest of \code{start}, \code{lower} or \code{upper}. For (the default) \code{method = "M"}, if the bounds are unspecified all parameters are assumed to be unconstrained; also, for method \code{"M"}, bounds can only be used with the \code{"port"} algorithm. They are ignored, with a warning, in cases they have no effect. For all other methods, currently these bounds \emph{must} be specified as finite values, and one of them must have \code{\link{names}} matching the parameter names in \code{formula}. For methods \code{"CM"} and \code{"mtl"}, the bounds must \emph{additionally} have an entry named \code{"sigma"} as that is determined simultaneously in the same optimization, and hence its \code{lower} bound must not be negative. } \item{weights}{an optional vector of weights to be used in the fitting process (for intrinsic weights, not the weights \code{w} used in the iterative (robust) fit). I.e., \code{sum(w * e^2)} is minimized with \code{e} = residuals, \eqn{e_i = y_i - f(xreg_i, \theta)}{e[i] = y[i] - f(xreg[i], theta)}, where \eqn{f(x,\theta)}{f(x, theta)} is the nonlinear function, and \code{w} are the robust weights from \code{resid * weights}.} \item{na.action}{a function which indicates what should happen when the data contain \code{NA}s. The default action is for the procedure to fail. If NAs are present, use \code{na.exclude} to have residuals with \code{length == nrow(data) == length(w)}, where \code{w} are the weights used in the iterative robust loop. This is better if the explanatory variables in \code{formula} are time series (and so the NA location is important). For this reason, \code{na.omit}, which leads to omission of cases with missing values on any required variable, is not suitable here since the residuals length is different from \code{nrow(data) == length(w)}. } \item{method}{a character string specifying which method to use. The default is \code{"M"}, for historical and back-compatibility reasons. For the other methods, primarily see \code{\link{nlrob.algorithms}}. % nlrob-algos.Rd \describe{ \item{"M"}{Computes an M-estimator, using \code{\link{nls}(*, weights=*)} iteratively (hence, IRLS) with weights equal to \eqn{\psi(r_i) / r_i}, where \eqn{r_i} is the i-the residual from the previous fit.} \item{"MM"}{Computes an MM-estimator, starting from \code{init}, either "S" or "lts".}% more: FIXME \item{"tau"}{Computes a Tau-estimator.} \item{"CM"}{Computes a \dQuote{Constrained M} (=: CM) estimator.} \item{"mtl"}{Compute as \dQuote{Maximum Trimmed Likelihood} (=: MTL) estimator.} } Note that all methods but \code{"M"} are \dQuote{random}, hence typically to be preceded by \code{\link{set.seed}()} in usage, see also \code{\link{nlrob.algorithms}}. % nlrob-algos.Rd } \item{psi}{a function (possibly by name) of the form \code{g(x, 'tuning constant(s)', deriv)} that for \code{deriv=0} returns \eqn{\psi(x)/x}{psi(x)/x} and for \code{deriv=1} returns \eqn{\psi'(x)}{psi'(x)}. Note that tuning constants can \emph{not} be passed separately, but directly via the specification of \code{psi}, typically via a simple \code{\link{.Mwgt.psi1}()} call as per default. Note that this has been a deliberately non-backcompatible change for robustbase version 0.90-0 (summer 2013 -- early 2014). } \item{scale}{when not \code{NULL} (default), a positive number specifying a scale kept \emph{fixed} during the iterations (and returned as \code{Scale} component).} \item{test.vec}{character string specifying the convergence criterion. The relative change is tested for residuals with a value of \code{"resid"} (the default), for coefficients with \code{"coef"}, and for weights with \code{"w"}.} \item{maxit}{maximum number of iterations in the robust loop.} \item{tol}{non-negative convergence tolerance for the robust fit.} \item{acc}{previous name for \code{tol}, now deprecated.} \item{algorithm}{character string specifying the algorithm to use for \code{\link{nls}}, see there, only when \code{method = "M"}. The default algorithm is a Gauss-Newton algorithm.} \item{doCov}{a logical specifying if \code{nlrob()} should compute the asymptotic variance-covariance matrix (see \code{\link{vcov}}) already. This used to be hard-wired to \code{TRUE}; however, the default has been set to \code{FALSE}, as \code{\link{vcov}(obj)} and \code{\link{summary}(obj)} can easily compute it when needed.} \item{model}{a \code{\link{logical}} indicating if the \code{\link{model.frame}} should be returned as well.} \item{control}{an optional list of control settings. \describe{ \item{for \code{method = "M"}:}{settings for \code{\link{nls}()}. See \code{\link{nls.control}} for the names of the settable control values and their effect.} \item{for all \code{method}s but \code{"M"}:}{a list, typically resulting from \code{\link{nlrob.control}(method, *)}.} } } \item{trace}{logical value indicating if a \dQuote{trace} of the \code{nls} iteration progress should be printed. Default is \code{FALSE}. \cr If \code{TRUE}, in each robust iteration, the residual sum-of-squares and the parameter values are printed at the conclusion of each \code{nls} iteration. When the \code{"plinear"} algorithm is used, the conditional estimates of the linear parameters are printed after the nonlinear parameters.} \item{object}{an \R object of class \code{"nlrob"}, typically resulting from \code{nlrob(..)}.} \item{\dots}{for \code{nlrob}: only when \code{method} is \emph{not} \code{"M"}, optional arguments for \code{\link{nlrob.control}}; \cr for other functions: potentially optional arguments passed to the extractor methods.} \item{type}{a string specifying the \emph{type} of residuals desired. Currently, \code{"response"} and \code{"working"} are supported. %% FIXME: 1. document these (here) 2. write and support more types } \item{newdata}{a data frame (or list) with the same names as the original \code{data}, see e.g., \code{\link{predict.nls}}.} } \details{ For \code{method = "M"}, iterated reweighted least squares (\dQuote{IRLS} or \dQuote{IWLS}) is used, calling \code{\link{nls}(*, weights= .)} where \code{weights} \eqn{w_i} are proportional to \eqn{\psi(r_i/ \hat{\sigma})}{psi(r_i/ sig.)}. All other methods minimize differently, and work \bold{without} \code{\link{nls}}. See \link{nlrob.algorithms} % -> nlrob-algos.Rd for details. } \value{ \code{nlrob()} returns an object of S3 class \code{"nlrob"}, for \code{method = "M"} also inheriting from class \code{"nls"}, (see \code{\link{nls}}). It is a list with several components; they are not documented yet, as some of them will probably change. Instead, rather use \dQuote{accessor} methods, where possible: There are methods (at least) for the generic accessor functions \code{\link{summary}()}, \code{\link{coefficients}()} (aka \code{coef()}) \code{fitted.values()}, \code{residuals()}, \code{\link{sigma}()} and \code{\link{vcov}()}, the latter for the variance-covariance matrix of the estimated parameters, as returned by \code{coef()}, i.e., not including the variance of the errors. For \code{nlrob()} results, \code{\link{estimethod}()} returns the \dQuote{estimation method}, which coincides with the \code{method} argument used. \code{residuals(.)}, by default \code{type = "response"}, returns the residuals \eqn{e_i}, defined above as \eqn{e_i = Y_i - f_(x_i, \hat\theta)}{e[i] = Y[i] - f(x[i], theta^)}. These differ from the standardized or weighted residuals which, e.g., are assumed to be normally distributed, and a version of which is returned in \code{working.residuals} component. %% and another is working.residuals/Scale } \author{ \describe{ \item{\code{method = "M"}:}{ Andreas Ruckstuhl (inspired by \code{\link[MASS]{rlm}}() and \code{\link{nls}}()), in July 1994 for S-plus.\cr Christian Sangiorgio did the update to \R and corrected some errors, from June 2002 to January 2005, and Andreas contributed slight changes and the first methods in August 2005.} \item{\code{method = "MM"}, etc:}{Originally all by Eduardo L. T. Conceicao, see \code{\link{nlrob.algorithms}}:} % nlrob-algos.Rd } Since then, the help page, testing, more cleanup, new methods: Martin Maechler. } \note{ This function (with the only method \code{"M"}) used to be named \code{rnls} and has been in package \CRANpkg{sfsmisc} in the past, but been dropped there. } \seealso{ \code{\link{nls}}, \code{\link[MASS]{rlm}}. } \examples{ DNase1 <- DNase[ DNase$Run == 1, ] ## note that selfstarting models don't work yet % <<< FIXME !!! ##--- without conditional linearity --- ## classical fmNase1 <- nls( density ~ Asym/(1 + exp(( xmid - log(conc) )/scal ) ), data = DNase1, start = list( Asym = 3, xmid = 0, scal = 1 ), trace = TRUE ) summary( fmNase1 ) ## robust RmN1 <- nlrob( density ~ Asym/(1 + exp(( xmid - log(conc) )/scal ) ), data = DNase1, trace = TRUE, start = list( Asym = 3, xmid = 0, scal = 1 )) summary( RmN1 ) ##--- using conditional linearity --- ## classical fm2DNase1 <- nls( density ~ 1/(1 + exp(( xmid - log(conc) )/scal ) ), data = DNase1, start = c( xmid = 0, scal = 1 ), alg = "plinear", trace = TRUE ) summary( fm2DNase1 ) ## robust frm2DNase1 <- nlrob(density ~ 1/(1 + exp(( xmid - log(conc) )/scal ) ), data = DNase1, start = c( xmid = 0, scal = 1 ), alg = "plinear", trace = TRUE ) summary( frm2DNase1 ) ## Confidence for linear parameter is quite smaller than "Asym" above c1 <- coef(summary(RmN1)) c2 <- coef(summary(frm2DNase1)) rownames(c2)[rownames(c2) == ".lin"] <- "Asym" stopifnot(all.equal(c1[,1:2], c2[rownames(c1), 1:2], tol = 0.09)) # 0.07315 ### -- new examples -- "moderate outlier": DN2 <- DNase1 DN2[10,"density"] <- 2*DN2[10,"density"] fm3DN2 <- nls(density ~ Asym/(1 + exp(( xmid - log(conc) )/scal ) ), data = DN2, trace = TRUE, start = list( Asym = 3, xmid = 0, scal = 1 )) ## robust Rm3DN2 <- nlrob(density ~ Asym/(1 + exp(( xmid - log(conc) )/scal ) ), data = DN2, trace = TRUE, start = list( Asym = 3, xmid = 0, scal = 1 )) Rm3DN2 summary(Rm3DN2) # -> robustness weight of obs. 10 ~= 0.037 confint(Rm3DN2, method = "Wald") stopifnot(identical(Rm3DN2$dataClasses, c(density = "numeric", conc = "numeric"))) ## utility function sfsmisc::lseq() : lseq <- function (from, to, length) 2^seq(log2(from), log2(to), length.out = length) ## predict() {and plot}: h.x <- lseq(min(DN2$conc), max(DN2$conc), length = 100) nDat <- data.frame(conc = h.x) h.p <- predict(fm3DN2, newdata = nDat)# classical h.rp <- predict(Rm3DN2, newdata = nDat)# robust plot(density ~ conc, data=DN2, log="x", main = format(formula(Rm3DN2))) lines(h.x, h.p, col="blue") lines(h.x, h.rp, col="magenta") legend("topleft", c("classical nls()", "robust nlrob()"), lwd = 1, col= c("blue", "magenta"), inset = 0.05) ## See ?nlrob.algorithms for examples \donttest{ DNase1 <- DNase[DNase$Run == 1,] form <- density ~ Asym/(1 + exp(( xmid -log(conc) )/scal )) gMM <- nlrob(form, data = DNase1, method = "MM", lower = c(Asym = 0, xmid = 0, scal = 0), upper = 3, trace = TRUE) ## "CM" (and "mtl") additionally need bounds for "sigma" : gCM <- nlrob(form, data = DNase1, method = "CM", lower = c(Asym = 0, xmid = 0, scal = 0, sigma = 0), upper = c(3,3,3, sigma = 0.8)) summary(gCM)# did fail; note it has NA NA NA (std.err, t val, P val) stopifnot(identical(Rm3DN2$dataClasses, gMM$dataClasses), identical( gCM$dataClasses, gMM$dataClasses)) }%not (always) tested } \keyword{robust} \keyword{regression} \keyword{nonlinear} robustbase/man/summary.lmrob.Rd0000644000176200001440000001220213167157117016302 0ustar liggesusers\name{summary.lmrob} \title{Summary Method for "lmrob" Objects} % \alias{summary.lmrob} \alias{hatvalues.lmrob} \alias{.lmrob.hat} \alias{vcov.lmrob} \alias{print.summary.lmrob} \alias{model.matrix.lmrob} % \description{ Summary method for \R object of class \code{"lmrob"} and \code{\link{print}} method for the summary object. Further, methods \code{\link{fitted}()}, \code{\link{residuals}()} work (via the default methods), and \code{\link{predict}()} (see \code{\link{predict.lmrob}}, \code{\link{vcov}()}, \code{\link{weights}()} (see \code{\link{weights.lmrob}}), \code{\link{model.matrix}()}, \code{\link{confint}()}, \code{\link{dummy.coef}()}, \code{\link{hatvalues}()}, etc., have explicitly defined \code{lmrob} methods. \code{.lmrob.hat()} is the lower level \dQuote{work horse} of the \code{hatvalues()} method. } \usage{% all source in ../R/lmrob.R <<< \method{summary}{lmrob}(object, correlation = FALSE, symbolic.cor = FALSE, \dots) \method{print}{summary.lmrob}(x, digits = max(3, getOption("digits") - 3), symbolic.cor= x$symbolic.cor, signif.stars = getOption("show.signif.stars"), showAlgo = TRUE, \dots) \method{vcov}{lmrob}(object, cov = object$control$cov, complete = TRUE, \dots) \method{model.matrix}{lmrob}(object, \dots) % not yet % .lmrob.hat(x, w = rep(1, NROW(x)), wqr = qr(sqrt(w) * x)) } \arguments{ \item{object}{an \R object of class \code{lmrob}, typically created by \code{\link{lmrob}}.} \item{correlation}{logical variable indicating whether to compute the correlation matrix of the estimated coefficients.} \item{symbolic.cor}{logical indicating whether to use symbols to display the above correlation matrix.} \item{x}{an \R object of class \code{summary.lmrob}, typically resulting from \code{summary(\link{lmrob}(..),..)}.} \item{digits}{number of digits for printing, see \code{digits} in \code{\link{options}}.} \item{signif.stars}{logical variable indicating whether to use stars to display different levels of significance in the individual t-tests.} \item{showAlgo}{optional \code{\link{logical}} indicating if the algorithmic parameters (as mostly inside the \code{control} part) should be shown.} \item{cov}{covariance estimation function to use, a \code{\link{function}} or \link{character} string naming the function; \pkg{robustbase} currently provides \code{".vcov.w"} and \code{".vcov.avar1"}, see \emph{Details} of \code{\link{lmrob}}. Particularly useful when \code{object} is the result of \code{lmrob(.., cov = "none")}, where \preformatted{ object$cov <- vcov(object, cov = ".vcov.w")} allows to \emph{update} the fitted object.} \item{complete}{(mainly for \R \code{>= 3.5.0}:)% ~/R/D/r-devel/R/src/library/stats/man/vcov.Rd \code{\link{logical}} indicating if the full variance-covariance matrix should be returned also in case of an over-determined system where some coefficients are undefined and \code{\link{coef}(.)} contains \code{NA}s correspondingly. When \code{complete = TRUE}, \code{vcov()} is compatible with \code{coef()} also in this singular case.} \item{\dots}{potentially more arguments passed to methods.} } \value{ \code{summary(object)} returns an object of S3 class \code{"summary.lmrob"}, basically a \code{\link{list}} with components "call", "terms", "residuals", "scale", "rweights", "converged", "iter", "control" all copied from \code{object}, and further components, partly for compatibility with \code{\link{summary.lm}}, \item{coefficients}{a \code{\link{matrix}} with columns \code{"Estimate"}, \code{"Std. Error"}, \code{"t value"}, and \code{"PR(>|t|)"}, where "Estimate" is identical to \code{\link{coef}(object)}. Note that \code{\link{coef}()} is slightly preferred to access this matrix.} \item{df}{degrees of freedom, in an \code{\link{lm}} compatible way.} \item{sigma}{identical to \code{\link{sigma}(object)}.} \item{aliased}{..}%FIXME \item{cov}{derived from \code{object$cov}.}% FIXME: say more \item{r.squared}{robust \dQuote{R squared} or \eqn{R^2}, a coefficient of determination: This is the consistency corrected robust coefficient of determination by Renaud and Victoria-Feser (2010).} \item{adj.r.squared}{an adjusted R squared, see \code{r.squared}.} } \references{ Renaud, O. and Victoria-Feser, M.-P. (2010). A robust coefficient of determination for regression, \emph{Journal of Statistical Planning and Inference} \bold{140}, 1852-1862. } \seealso{\code{\link{lmrob}}, \code{\link{predict.lmrob}}, \code{\link{weights.lmrob}}, \code{\link{summary.lm}}, \code{\link{print}}, \code{\link{summary}}. } \examples{ mod1 <- lmrob(stack.loss ~ ., data = stackloss) sa <- summary(mod1) # calls summary.lmrob(....) sa # dispatches to call print.summary.lmrob(....) ## correlation between estimated coefficients: cov2cor(vcov(mod1)) cbind(fit = fitted(mod1), resid = residuals(mod1), wgts= weights(mod1, type="robustness"), predict(mod1, interval="prediction")) data(heart) sm2 <- summary( m2 <- lmrob(clength ~ ., data = heart) ) sm2 } \keyword{robust} \keyword{regression} robustbase/man/tukeyPsi1.Rd0000644000176200001440000000611712152143522015365 0ustar liggesusers\name{tukeyPsi1} \alias{robustbase-deprecated} \alias{tukeyPsi1} \alias{tukeyChi} \title{Tukey's Bi-square Score (Psi) and "Chi" (Rho) Functions and Derivatives} \description{ These are \bold{deprecated}, replaced by \code{\link{Mchi}(*, psi="tukey")}, \code{\link{Mpsi}(*, psi="tukey")} \code{tukeyPsi1()} computes Tukey's bi-square score (psi) function, its first derivative or it's integral/\dQuote{principal function}. This is scaled such that \eqn{\psi'(0) = 1}, i.e., \eqn{\psi(x) \approx x}{psi(x) ~= x} around 0. \code{tukeyChi()} computes Tukey's bi-square loss function, \code{chi(x)} and its first two derivatives. Note that in the general context of \eqn{M}-estimators, these loss functions are called \eqn{\rho (rho)}{rho}-functions. } \usage{ tukeyPsi1(x, cc, deriv = 0) tukeyChi (x, cc, deriv = 0) } \arguments{ \item{x}{numeric vector.} \item{cc}{ tuning constant } \item{deriv}{integer in \eqn{\{-1,0,1,2\}} specifying the order of the derivative; the default, \code{deriv = 0} computes the psi-, or chi- ("rho"-)function.} } \value{ a numeric vector of the same length as \code{x}. } \note{\code{tukeyPsi1(x, d)} and \code{\link{tukeyChi}(x, d+1)} are just re-scaled versions of each other (for \code{d in -1:1}), i.e., \deqn{\chi^{(\nu)}(x, c) = (6/c^2) \psi^{(\nu-1)}(x,c),} for \eqn{\nu = 0,1,2}. We use the name \sQuote{tukeyPsi\bold{1}}, because \code{tukeyPsi} is reserved for a future \dQuote{Psi Function} class object, see \code{\link{psiFunc}}. } \seealso{ \code{\link{lmrob}} and \code{\link{Mpsi}}; further \code{\link{anova.lmrob}} which needs the \code{deriv = -1}. } \author{Matias Salibian-Barrera, Martin Maechler and Andreas Ruckstuhl} \examples{ \dontshow{oop <- options(warn = -1) # against the "deprecated" warnings} op <- par(mfrow = c(3,1), oma = c(0,0, 2, 0), mgp = c(1.5, 0.6, 0), mar= .1+c(3,4,3,2)) x <- seq(-2.5, 2.5, length = 201) cc <- 1.55 # as set by default in lmrob.control() plot. <- function(...) { plot(...); abline(h=0,v=0, col="gray", lty=3)} plot.(x, tukeyChi(x, cc), type = "l", col = 2) plot.(x, tukeyChi(x, cc, deriv = 1), type = "l", col = 2) plot.(x, tukeyChi(x, cc, deriv = 2), type = "l", col = 2) % \ is escape for Rd mtext(sprintf("tukeyChi(x, c = \%g, deriv), deriv = 0,1,2", cc), outer = TRUE, font = par("font.main"), cex = par("cex.main")) par(op) op <- par(mfrow = c(3,1), oma = c(0,0, 2, 0), mgp = c(1.5, 0.6, 0), mar= .1+c(3,4,1,1)) x <- seq(-5, 5, length = 201) cc <- 4.69 # as set by default in lmrob.control() plot. <- function(...) { plot(..., asp = 1); abline(h=0,v=0, col="gray", lty=3)} plot.(x, tukeyPsi1(x, cc), type = "l", col = 2) abline(0:1, lty = 3, col = "light blue") plot.(x, tukeyPsi1(x, cc, deriv = -1), type = "l", col = 2) plot.(x, tukeyPsi1(x, cc, deriv = 1), type = "l", col = 2); abline(h=1,lty=3) % \ is escape for Rd mtext(sprintf("tukeyPsi1(x, c = \%g, deriv), deriv = 0, -1, 1", cc), outer = TRUE, font = par("font.main"), cex = par("cex.main")) par(op) \dontshow{options(oop)} } \keyword{robust} robustbase/man/chgDefaults-methods.Rd0000644000176200001440000000167212145665261017375 0ustar liggesusers\name{chgDefaults-methods} \title{Change Defaults (Parameters) of "Psi Function" Objects} \docType{methods} \alias{chgDefaults}% the generic \alias{chgDefaults-methods} \alias{chgDefaults,ANY-method} \alias{chgDefaults,psi_func-method} \description{ To modify an object of class \code{\linkS4class{psi_func}}, i.e. typically change the tuning parameters, the generic function \code{chgDefaults()} is called and works via the corresponding method. } \section{Methods}{ \describe{ \item{\code{object = "psi_func"}}{The method is used to change the default values for the tuning parameters, and returns an object of class \code{\linkS4class{psi_func}}, a copy of input \code{object} with the slot \code{tDefs} possibly changed;. } } } \seealso{ \code{\link{psiFunc}} } \examples{ ## Hampel's psi and rho: H.38 <- chgDefaults(hampelPsi, k = c(1.5, 3.5, 8)) H.38 plot(H.38) ## for more see ?psiFunc } \keyword{methods} robustbase/man/estimethod.Rd0000644000176200001440000000136512270435276015647 0ustar liggesusers\name{estimethod} \alias{estimethod} \title{Extract the Estimation Method 'Estimethod' from a Fitted Model} \description{ Extract the estimation method as a \code{\link{character}} string from a fitted model. } \usage{ estimethod(object, ...) } \arguments{ \item{object}{a fitted model.} \item{...}{additional, optional arguments. (None are used in our methods)} } \value{ a \code{\link{character}} string, the estimation method used. } \details{ This is a (S3) generic function for which we provide methods, currently for \code{\link{nlrob}} only.% TODO %% \code{\link{lmrob}}, %% \code{\link{glmrob}}, %% , and \code{\link{nls}}. } \seealso{ \code{\link{nlrob}}, and \code{\link{nlrob.MM}}, notably for examples. } \keyword{models} robustbase/man/airmay.Rd0000644000176200001440000000245113312375575014764 0ustar liggesusers\name{airmay} \alias{airmay} \docType{data} \title{Air Quality Data} \description{ Air Quality Data Set for May 1973, from Chambers et al. (1983). The whole data set consists of daily readings of air quality values from May 1, 1973 to September 30, 1973, but here are included only the values for May. This data set is an example of the special treatment of the missing values. } \usage{data(airmay, package="robustbase")} \format{ A data frame with 31 observations on the following 4 variables. \describe{ \item{\code{X1}}{Solar Radiation in Longleys in the frequency band 4000-7700 from 0800 to 1200 hours at Central Park} \item{\code{X2}}{Average windspeed (in miles per hour) between 7000 and 1000 hours at La Guardia Airport} \item{\code{X3}}{Maximum daily temperature (in degrees Fahrenheit) at La Guardia Airport} \item{\code{Y}}{Mean ozone concentration (in parts per billion) from 1300 to 1500 hours at Roosevelt Island} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.86, table 6. } \examples{ data(airmay) summary(lm.airmay <- lm(Y ~ ., data=airmay)) %%summary(lts.airmay <- ltsReg(Y ~ ., data=airmay)) airmay.x <- data.matrix(airmay[,1:3]) %%covMcd(airmay.x) } \keyword{datasets} robustbase/man/lmrob..M..fit.Rd0000644000176200001440000000777413710054521015725 0ustar liggesusers\name{lmrob..M..fit} \alias{lmrob..M..fit} \title{Compute M-estimators of regression} \description{ This function performs RWLS iterations to find an M-estimator of regression. When started from an S-estimated \code{beta.initial}, this results in an MM-estimator. } \usage{ lmrob..M..fit(x, y, beta.initial, scale, control, obj, mf = obj$model, method = obj$control$method) } \arguments{ \item{x}{design matrix (\eqn{n \times p}{n x p}) typically including a column of \code{1}s for the intercept.} \item{y}{numeric response vector (of length \eqn{n}).} \item{beta.initial}{numeric vector (of length \eqn{p}) of initial estimate. Usually the result of an S-regression estimator.} \item{scale}{robust residual scale estimate. Usually an S-scale estimator.} \item{control}{list of control parameters, as returned by \code{\link{lmrob.control}}. Currently, the components \code{c("max.it", "rel.tol","trace.lev", "psi", "tuning.psi", "mts", "subsampling")} are accessed.} \item{obj}{an optional \code{lmrob}-object. If specified, this is typically used to set values for the other arguments.} \item{mf}{unused and deprecated.} \item{method}{optional; the \code{method} used for \emph{obj} computation.} } \details{ This function is used by \code{\link{lmrob.fit}} (and \code{anova(, type = "Deviance")}) and typically not to be used on its own. } \value{A list with the following elements: \item{coef}{the M-estimator (or MM-estim.) of regression} \item{control}{the \code{control} list input used} \item{scale}{ The residual scale estimate} \item{seed }{ The random number generator seed} \item{converged}{ \code{TRUE} if the RWLS iterations converged, \code{FALSE} otherwise} } \references{ Yohai, 1987 } \seealso{ \code{\link{lmrob.fit}}, \code{\link{lmrob}}; \code{\link[MASS]{rlm}} from package \CRANpkg{MASS}. } \author{Matias Salibian-Barrera and Martin Maechler} \examples{ data(stackloss) X <- model.matrix(stack.loss ~ . , data = stackloss) y <- stack.loss ## Compute manual MM-estimate: ## 1) initial LTS: m0 <- ltsReg(X[,-1], y) ## 2) M-estimate started from LTS: m1 <- lmrob..M..fit(X, y, beta.initial = coef(m0), scale = m0$scale, method = "SM", control = lmrob.control(tuning.psi = 1.6, psi = 'bisquare')) ## no 'method' (nor 'obj'): m1. <- lmrob..M..fit(X, y, beta.initial = coef(m0), scale = m0$scale, control = m1$control) stopifnot(all.equal(m1, m1., tol = 1e-15)) # identical {call *not* stored!} cbind(m0$coef, m1$coef) ## the scale is kept fixed: stopifnot(identical(unname(m0$scale), m1$scale)) ## robustness weights: are r.s <- with(m1, residuals/scale) # scaled residuals m1.wts <- Mpsi(r.s, cc = 1.6, psi="tukey") / r.s summarizeRobWeights(m1.wts) ##--> outliers 1,3,4,13,21 which(m0$lts.wt == 0) # 1,3,4,21 but not 13 \dontshow{stopifnot(which(m0$lts.wt == 0) == c(1,3,4,21)) } ## Manually add M-step to SMD-estimate (=> equivalent to "SMDM"): m2 <- lmrob(stack.loss ~ ., data = stackloss, method = 'SMD') m3 <- lmrob..M..fit(obj = m2) ## Simple function that allows custom initial estimates ## (Deprecated; use init argument to lmrob() instead.) %% MM: why deprecated? lmrob.custom <- function(x, y, beta.initial, scale, terms) { ## initialize object obj <- list(control = lmrob.control("KS2011"), terms = terms) ## terms is needed for summary() ## M-step obj <- lmrob..M..fit(x, y, beta.initial, scale, obj = obj) ## D-step obj <- lmrob..D..fit(obj, x) ## Add some missing elements obj$cov <- TRUE ## enables calculation of cov matrix obj$p <- obj$qr$rank obj$degree.freedom <- length(y) - obj$p ## M-step obj <- lmrob..M..fit(x, y, obj=obj) obj$control$method <- ".MDM" obj } m4 <- lmrob.custom(X, y, m2$init$init.S$coef, m2$init$scale, m2$terms) stopifnot(all.equal(m4$coef, m3$coef)) ## Start from ltsReg: m5 <- ltsReg(stack.loss ~ ., data = stackloss) m6 <- lmrob.custom(m5$X, m5$Y, coef(m5), m5$scale, m5$terms) } \keyword{robust} \keyword{regression} robustbase/man/summary.nlrob.Rd0000644000176200001440000000307112270244535016302 0ustar liggesusers\name{summary.nlrob} \alias{summary.nlrob} \title{Summarizing Robust Fits of Nonlinear Regression Models } \description{ \code{summary} method for objects of class \code{"nlrob"}, i.e., \code{\link{nlrob}()} results. Currently it only works for \code{nlrob(*, method="M")}. } \usage{ \method{summary}{nlrob}(object, correlation = FALSE, symbolic.cor = FALSE, ...) } \arguments{ \item{object}{an object of class \code{"nlrob"}, usually, a result of a call to \code{\link{nlrob}}.} \item{correlation}{logical variable indicating whether to compute the correlation matrix of the estimated coefficients.} \item{symbolic.cor}{logical indicating whether to use symbols to display the above correlation matrix.} \item{\dots}{further arguments passed to or from other methods.} } \value{ The function \code{\link{summary.nlrob}} computes and returns an object of class \code{"summary.nlrob"} of summary statistics of the robustly fitted linear model given in \code{object}. There is a print method, \code{print.summary.lmrob()}, which nicely formats the output. The result keeps a large part of \code{object}'s components such as \code{residuals}, \code{cov} or \code{w}, and additionally contains \item{coefficients}{the matrix of coefficients, standard errors and p-values.} \item{correlation}{if the \code{correlation} argument was true, the correlation matrix of the parameters.} %% maybe add more } \author{Andreas Ruckstuhl} \seealso{\code{\link{nlrob}()}, also for examples. } \keyword{regression} \keyword{nonlinear} \keyword{robust} robustbase/man/ltsReg.Rd0000644000176200001440000002406612425013547014740 0ustar liggesusers\name{ltsReg} \alias{ltsReg} \alias{ltsReg.default} \alias{ltsReg.formula} \alias{print.lts} \title{Least Trimmed Squares Robust (High Breakdown) Regression} \concept{High breakdown point} \description{ Carries out least trimmed squares (LTS) robust (high breakdown point) regression. } \usage{ ltsReg(x, \dots) \method{ltsReg}{formula}(formula, data, subset, weights, na.action, model = TRUE, x.ret = FALSE, y.ret = FALSE, contrasts = NULL, offset, \dots) \method{ltsReg}{default}(x, y, intercept = TRUE, alpha = , nsamp = , adjust = , mcd = TRUE, qr.out = FALSE, yname = NULL, seed = , trace = , use.correction = , wgtFUN = , control = rrcov.control(), \dots) } \arguments{ \item{formula}{a \code{\link{formula}} of the form \code{y ~ x1 + x2 + ...}.} \item{data}{data frame from which variables specified in \code{formula} are to be taken.} \item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.} \item{weights}{an optional vector of weights to be used in the fitting process. \bold{NOT USED YET}. %%% If specified, weighted least squares is used %%% with weights \code{weights} (that is, minimizing \code{sum(w*e^2)}); %%% otherwise ordinary least squares is used. } \item{na.action}{a function which indicates what should happen when the data contain \code{NA}s. The default is set by the \code{na.action} setting of \code{\link{options}}, and is \code{\link{na.fail}} if that is unset. The \dQuote{factory-fresh} default is \code{\link{na.omit}}. Another possible value is \code{NULL}, no action. Value \code{\link{na.exclude}} can be useful.} \item{model, x.ret, y.ret}{\code{\link{logical}}s indicating if the model frame, the model matrix and the response are to be returned, respectively.} \item{contrasts}{an optional list. See the \code{contrasts.arg} of \code{\link{model.matrix.default}}.} \item{offset}{this can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting. An \code{\link{offset}} term can be included in the formula instead or as well, and if both are specified their sum is used.} \item{x}{a matrix or data frame containing the explanatory variables.} \item{y}{the response: a vector of length the number of rows of \code{x}.}. \item{intercept}{if true, a model with constant term will be estimated; otherwise no constant term will be included. Default is \code{intercept = TRUE} } \item{alpha}{the percentage (roughly) of squared residuals whose sum will be minimized, by default 0.5. In general, \code{alpha} must between 0.5 and 1.} \item{nsamp}{number of subsets used for initial estimates or \code{"best"} or \code{"exact"}. Default is \code{nsamp = 500}. For \code{nsamp="best"} exhaustive enumeration is done, as long as the number of trials does not exceed 5000. For \code{"exact"}, exhaustive enumeration will be attempted however many samples are needed. In this case a warning message will be displayed saying that the computation can take a very long time. } \item{adjust}{whether to perform intercept adjustment at each step. Since this can be time consuming, the default is \code{adjust = FALSE}.} \item{mcd}{whether to compute robust distances using Fast-MCD.} \item{qr.out}{whether to return the QR decomposition (see \code{\link{qr}}); defaults to false.} \item{yname}{the name of the dependent variable. Default is \code{yname = NULL}} \item{seed}{initial seed for random generator, like \code{\link{.Random.seed}}, see \code{\link{rrcov.control}}.} \item{trace}{logical (or integer) indicating if intermediate results should be printed; defaults to \code{FALSE}; values \eqn{\ge 2}{>= 2} also produce print from the internal (Fortran) code.} \item{use.correction}{ whether to use finite sample correction factors. Default is \code{use.correction=TRUE}} \item{wgtFUN}{a character string or \code{\link{function}}, specifying how the weights for the reweighting step should be computed. Up to April 2013, the only option has been the original proposal in (1999), now specified by \code{wgtFUN = "01.original"} (or via \code{control}).} %% MM: want control also for formula !?!?!?!?!! \item{control}{a list with estimation options - same as these provided in the function specification. If the control object is supplied, the parameters from it will be used. If parameters are passed also in the invocation statement, they will override the corresponding elements of the control object.} \item{\dots}{arguments passed to or from other methods.} } \details{ The LTS regression method minimizes the sum of the \eqn{h} smallest squared residuals, where \eqn{h > n/2}, i.e. at least half the number of observations must be used. The default value of \eqn{h} (when \code{alpha=1/2}) is roughly \eqn{n / 2}, more precisely, \code{(n+p+1) \%/\% 2} where \eqn{n} is the total number of observations, but by setting \code{alpha}, the user may choose higher values up to n, where \eqn{h = h(\alpha,n,p) =} \code{\link{h.alpha.n}(alpha,n,p)}. The LTS estimate of the error scale is given by the minimum of the objective function multiplied by a consistency factor and a finite sample correction factor -- see Pison et al. (2002) for details. The rescaling factors for the raw and final estimates are returned also in the vectors \code{raw.cnp2} and \code{cnp2} of length 2 respectively. The finite sample corrections can be suppressed by setting \code{use.correction=FALSE}. The computations are performed using the Fast LTS algorithm proposed by Rousseeuw and Van Driessen (1999). As always, the formula interface has an implied intercept term which can be removed either by \code{y ~ x - 1} or \code{y ~ 0 + x}. See \code{\link{formula}} for more details. } \note{We strongly recommend using \code{\link{lmrob}()} instead of \code{ltsReg} (\emph{See also} below)! } \value{ The function \code{ltsReg} returns an object of class \code{"lts"}. The \code{\link{summary}} method function is used to obtain (and print) a summary table of the results, and \code{\link[=ltsPlot]{plot}()} can be used to plot them, see the the specific help pages. The generic accessor functions \code{\link{coefficients}}, \code{\link{fitted.values}} and \code{\link{residuals}} extract various useful features of the value returned by \code{ltsReg}. An object of class \code{lts} is a \code{\link{list}} containing at least the following components: \item{crit}{ the value of the objective function of the LTS regression method, i.e., the sum of the \eqn{h} smallest squared raw residuals. } \item{coefficients}{ vector of coefficient estimates (including the intercept by default when \code{intercept=TRUE}), obtained after reweighting. } \item{best}{ the best subset found and used for computing the raw estimates, with \code{\link{length}(best) == quan = \link{h.alpha.n}(alpha,n,p)}. } \item{fitted.values}{vector like \code{y} containing the fitted values of the response after reweighting.} \item{residuals}{vector like \code{y} containing the residuals from the weighted least squares regression.} \item{scale}{scale estimate of the reweighted residuals. } \item{alpha}{same as the input parameter \code{alpha}.} \item{quan}{the number \eqn{h} of observations which have determined the least trimmed squares estimator.} \item{intercept}{same as the input parameter \code{intercept}.} \item{cnp2}{a vector of length two containing the consistency correction factor and the finite sample correction factor of the final estimate of the error scale.} \item{raw.coefficients}{vector of raw coefficient estimates (including the intercept, when \code{intercept=TRUE}).} \item{raw.scale}{scale estimate of the raw residuals.} \item{raw.resid}{vector like \code{y} containing the raw residuals from the regression.} \item{raw.cnp2}{a vector of length two containing the consistency correction factor and the finite sample correction factor of the raw estimate of the error scale.} \item{lts.wt}{ vector like y containing weights that can be used in a weighted least squares. These weights are 1 for points with reasonably small residuals, and 0 for points with large residuals. } \item{raw.weights}{ vector containing the raw weights based on the raw residuals and raw scale. } \item{method}{character string naming the method (Least Trimmed Squares).} \item{X}{the input data as a matrix (including intercept column if applicable).} \item{Y}{the response variable as a vector.} } \author{Valentin Todorov \email{valentin.todorov@chello.at}, based on work written for S-plus by Peter Rousseeuw and Katrien van Driessen from University of Antwerp.% no E-mails for spam-protection } \references{ Peter J. Rousseeuw (1984), Least Median of Squares Regression. \emph{Journal of the American Statistical Association} \bold{79}, 871--881. P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection.} Wiley. P. J. Rousseeuw and K. van Driessen (1999) A fast algorithm for the minimum covariance determinant estimator. \emph{Technometrics} \bold{41}, 212--223. Pison, G., Van Aelst, S., and Willems, G. (2002) Small Sample Corrections for LTS and MCD. \emph{Metrika} \bold{55}, 111-123. } \seealso{ \code{\link{lmrob.S}()} provides a fast S estimator with similar breakdown point as \code{ltsReg()} but better efficiency.\cr For data analysis, rather use \code{\link{lmrob}} which is based on \code{\link{lmrob.S}}. \code{\link{covMcd}}; \code{\link{summary.lts}} for summaries. The generic functions \code{\link{coef}}, \code{\link{residuals}}, \code{\link{fitted}}. } \examples{ data(heart) ## Default method works with 'x'-matrix and y-var: heart.x <- data.matrix(heart[, 1:2]) # the X-variables heart.y <- heart[,"clength"] ltsReg(heart.x, heart.y) data(stackloss) ltsReg(stack.loss ~ ., data = stackloss) } \keyword{robust} \keyword{regression} robustbase/man/functionX-class.Rd0000644000176200001440000000201312114153713016537 0ustar liggesusers\name{functionX-class} \docType{class} \alias{functionX-class} \title{Class "functionX" of Psi-like Vectorized Functions} \description{ The class \code{"functionX"} of vectorized functions of one argument \code{x} and typically further tuning parameters. } \section{Objects from the Class}{ Objects can be created by calls of the form \code{new("functionX", ...)}. } \section{Slots}{ \describe{ \item{\code{.Data}:}{Directly extends class \code{"function"}.} } } \section{Extends}{ Class \code{"function"}, from data part. Class \code{"OptionalFunction"}, by class \code{"function"}. Class \code{"PossibleMethod"}, by class \code{"function"}. } \section{Methods}{ No methods defined with class "functionX" in the signature. } \author{Martin Maechler} \seealso{ \code{\link{psiFunc}()}, and class descriptions of \code{\linkS4class{functionXal}} for \emph{functionals} of \code{"functionX"}, and \code{\linkS4class{psi_func}} which has several \code{functionX} slots. } % \examples{ % } \keyword{classes} robustbase/man/kootenay.Rd0000644000176200001440000000453413312375575015337 0ustar liggesusers\name{kootenay} \encoding{utf8} \alias{kootenay} \docType{data} \title{Waterflow Measurements of Kootenay River in Libby and Newgate} \description{ The original data set is the waterflow in January of the Kootenay river, measured at two locations, namely, Libby (Montana) and Newgate (British Columbia) for 13 consecutive years, 1931--1943. The data set is of mostly interest because it has been used as example in innumerous didactical situations about robust regression. To this end, one number (in observation 4) has been modified from the original data from originally 44.9 to 15.7 (here). } \usage{data(kootenay, package="robustbase")} \format{ A data frame with 13 observations on the following 2 variables. \describe{ \item{\code{Libby}}{a numeric vector} \item{\code{Newgate}}{a numeric vector} } } \details{ The original (unmodified) version of the data is easily obtainable as \code{kootenay0} from the examples; other modified versions of the data sets are also used in different places, see the examples below. } \source{ Original Data, p.58f of Ezekiel and Fox (1959), \emph{Methods of Correlation and Regression Analysis}. Wiley, N.Y. } \references{ Hampel, F., Ronchetti, E., Rousseeuw, P. and Stahel, W. (1986) \emph{Robust Statistics: The Approach Based on Influence Functions}; Wiley, N.Y. Rousseeuw, P. J. and Leroy, A. M. (1987) \emph{Robust Regression & Outlier Detection}, Wiley, N. Y. } \examples{ data(kootenay) plot(kootenay, main = "'kootenay' data") points(kootenay[4,], col = 2, cex =2, pch = 3) abline(lm (Newgate ~ Libby, data = kootenay), col = "pink") abline(lmrob(Newgate ~ Libby, data = kootenay), col = "blue") ## The original version of Ezekiel & Fox: kootenay0 <- kootenay kootenay0[4, "Newgate"] <- 44.9 plot(kootenay0, main = "'kootenay0': the original data") abline(lm (Newgate ~ Libby, data = kootenay0), col = "pink") abline(lmrob(Newgate ~ Libby, data = kootenay0), col = "blue") ## The version with "milder" outlier -- Hampel et al., p.310 kootenay2 <- kootenay0 kootenay2[4, "Libby"] <- 20.0 # instead of 77.6 plot(kootenay2, main = "The 'kootenay2' data", xlim = range(kootenay[,"Libby"])) points(kootenay2[4,], col = 2, cex =2, pch = 3) abline(lm (Newgate ~ Libby, data = kootenay2), col = "pink") abline(lmrob(Newgate ~ Libby, data = kootenay2), col = "blue") } \keyword{datasets} robustbase/man/adjbox.Rd0000644000176200001440000001723313721410436014743 0ustar liggesusers\name{adjbox} \title{Plot an Adjusted Boxplot for Skew Distributions} \alias{adjbox} \alias{adjbox.default} \alias{adjbox.formula} \description{ Produces boxplots adjusted for skewed distributions as proposed in Hubert and Vandervieren (2008). } \usage{ adjbox(x, \dots) \method{adjbox}{formula}(formula, data = NULL, \dots, subset, na.action = NULL) \method{adjbox}{default}(x, \dots, range = 1.5, doReflect = FALSE, width = NULL, varwidth = FALSE, notch = FALSE, outline = TRUE, names, plot = TRUE, border = par("fg"), col = NULL, log = "", pars = list(boxwex = 0.8, staplewex = 0.5, outwex = 0.5), horizontal = FALSE, add = FALSE, at = NULL) } \arguments{ \item{formula}{a formula, such as \code{y ~ grp}, where \code{y} is a numeric vector of data values to be split into groups according to the grouping variable \code{grp} (usually a factor).} \item{data}{a data.frame (or list) from which the variables in \code{formula} should be taken.} \item{subset}{an optional vector specifying a subset of observations to be used for plotting.} \item{na.action}{a function which indicates what should happen when the data contain \code{NA}s. The default is to ignore missing values in either the response or the group.} \item{x}{for specifying data from which the boxplots are to be produced. Either a numeric vector, or a single list containing such vectors. Additional unnamed arguments specify further data as separate vectors (each corresponding to a component boxplot). \code{\link{NA}}s are allowed in the data.} \item{\dots}{For the \code{formula} method, named arguments to be passed to the default method. For the default method, unnamed arguments are additional data vectors (unless \code{x} is a list when they are ignored), and named arguments are arguments and graphical parameters to be passed to \code{\link{bxp}} in addition to the ones given by argument \code{pars} (and override those in \code{pars}). } \item{range}{this determines how far the plot whiskers extend out from the box, and is simply passed as argument \code{coef} to \code{\link{adjboxStats}()}. If \code{range} is positive, the whiskers extend to the most extreme data point which is no more than \code{range} times the interquartile range from the box. A value of zero causes the whiskers to extend to the data extremes.} \item{doReflect}{logical indicating if the MC should also be computed on the \emph{reflected} sample \code{-x}, and be averaged, see \code{\link{mc}}.} \item{width}{a vector giving the relative widths of the boxes making up the plot.} \item{varwidth}{if \code{varwidth} is \code{TRUE}, the boxes are drawn with widths proportional to the square-roots of the number of observations in the groups.} \item{notch}{if \code{notch} is \code{TRUE}, a notch is drawn in each side of the boxes. If the notches of two plots do not overlap this is \sQuote{strong evidence} that the two medians differ (Chambers \emph{et al.}, 1983, p. 62). See \code{\link{boxplot.stats}} for the calculations used.} \item{outline}{if \code{outline} is not true, the outliers are not drawn (as points whereas S+ uses lines).}% the argument name is most ugly but S+ compatible \item{names}{group labels which will be printed under each boxplot.} \item{boxwex}{a scale factor to be applied to all boxes. When there are only a few groups, the appearance of the plot can be improved by making the boxes narrower.} \item{staplewex}{staple line width expansion, proportional to box width.} \item{outwex}{outlier line width expansion, proportional to box width.} \item{plot}{if \code{TRUE} (the default) then a boxplot is produced. If not, the summaries which the boxplots are based on are returned.} \item{border}{an optional vector of colors for the outlines of the boxplots. The values in \code{border} are recycled if the length of \code{border} is less than the number of plots.} \item{col}{if \code{col} is non-null it is assumed to contain colors to be used to colour the bodies of the box plots. By default they are in the background colour.} \item{log}{character indicating if x or y or both coordinates should be plotted in log scale.} \item{pars}{a list of (potentially many) more graphical parameters, e.g., \code{boxwex} or \code{outpch}; these are passed to \code{\link{bxp}} (if \code{plot} is true); for details, see there.} \item{horizontal}{logical indicating if the boxplots should be horizontal; default \code{FALSE} means vertical boxes.} \item{add}{logical, if true \emph{add} boxplot to current plot.} \item{at}{numeric vector giving the locations where the boxplots should be drawn, particularly when \code{add = TRUE}; defaults to \code{1:n} where \code{n} is the number of boxes.} } \details{ The generic function \code{adjbox} currently has a default method (\code{adjbox.default}) and a formula interface (\code{adjbox.formula}). If multiple groups are supplied either as multiple arguments or via a formula, parallel boxplots will be plotted, in the order of the arguments or the order of the levels of the factor (see \code{\link{factor}}). Missing values are ignored when forming boxplots. Extremes of the upper and whiskers of the adjusted boxplots are computed using the medcouple (\code{\link{mc}()}), a robust measure of skewness. For details, cf. TODO %% << FIXME } \value{ A \code{\link{list}} with the following components: \item{stats}{a matrix, each column contains the extreme of the lower whisker, the lower hinge, the median, the upper hinge and the extreme of the upper whisker for one group/plot. If all the inputs have the same class attribute, so will this component.} \item{n}{a vector with the number of observations in each group.} \item{coef}{a matrix where each column contains the lower and upper extremes of the notch.} \item{out}{the values of any data points which lie beyond the extremes of the whiskers.} \item{group}{a vector of the same length as out whose elements indicate to which group the outlier belongs.} \item{names}{a vector of names for the groups.} } \references{ %% Hubert, M. and Vandervieren, E. (2006) %% \emph{An Adjusted Boxplot for Skewed Distributions}, %% Technical Report TR-06-11, KU Leuven, Section of Statistics, Leuven. %% \url{http://wis.kuleuven.be/stat/robust/Papers/TR0611.pdf} Hubert, M. and Vandervieren, E. (2008). An adjusted boxplot for skewed distributions, \emph{Computational Statistics and Data Analysis} \bold{52}, 5186--5201. \doi{10.1016/j.csda.2007.11.008} } \author{ R Core Development Team, slightly adapted by Tobias Verbeke } \note{ The code and documentation only slightly modifies the code of \code{\link{boxplot.default}}, \code{boxplot.formula} and \code{\link{boxplot.stats}} } \seealso{The medcouple, \code{\link{mc}}; \code{\link{boxplot}}. } \examples{ if(require("boot")) { ### Hubert and Vandervieren (2008), Fig. 5.%(2006): p. 10, Fig. 4. data(coal, package = "boot") coaldiff <- diff(coal$date) op <- par(mfrow = c(1,2)) boxplot(coaldiff, main = "Original Boxplot") adjbox(coaldiff, main = "Adjusted Boxplot") par(op) } ### Hubert and Vandervieren (2008), p. 11, Fig. 7a -- enhanced op <- par(mfrow = c(2,2), mar = c(1,3,3,1), oma = c(0,0,3,0)) with(condroz, { boxplot(Ca, main = "Original Boxplot") adjbox (Ca, main = "Adjusted Boxplot") boxplot(Ca, main = "Original Boxplot [log]", log = "y") adjbox (Ca, main = "Adjusted Boxplot [log]", log = "y") }) mtext("'Ca' from data(condroz)", outer=TRUE, font = par("font.main"), cex = 2) par(op) } \keyword{hplot} robustbase/man/sigma.Rd0000644000176200001440000000221713710054521014565 0ustar liggesusers\name{sigma} \title{Extract 'Sigma' - Standard Deviation of Errors for Robust Models} \alias{sigma} \alias{sigma.lmrob} \description{ Extract the estimated standard deviation of the errors, the \dQuote{residual standard deviation} (misnomed also \dQuote{residual standard error}) from a fitted model. } \usage{ % Declare S3 method as the generic is no longer in lme4 for R (>= 2015-06-01): \S3method{sigma}{lmrob}(object, \dots) } \arguments{ \item{object}{a fitted model.} \item{\dots}{additional, optional arguments. (None are used in our methods)} } \value{ the residual standard error as a scalar } \details{ For \R \code{<= 3.2.x}, we provide an (S3) generic function (as e.g., package \CRANpkg{lme4}) and methods for \code{\link{lmrob}}, \code{\link{nlrob}}, and \code{\link{nls}}. From \R \code{>= 3.3.0}, we provide methods for our \code{\link{lmrob}} and \code{\link{nlrob}} models. } \examples{ m.cl <- lm (Y ~ ., data=coleman) if(getRversion() >= "3.3.0") sigma(m.cl) else summary(m.cl)$sigma sigma( m1 <- lmrob(Y ~ ., data=coleman) ) sigma( m2 <- lmrob(Y ~ ., data=coleman, setting = "KS2014") ) } \keyword{models} robustbase/man/wagnerGrowth.Rd0000644000176200001440000000405613312375575016163 0ustar liggesusers\name{wagnerGrowth} \encoding{utf8} \Rdversion{1.1} \alias{wagnerGrowth} \docType{data} \title{ Wagner's Hannover Employment Growth Data } \description{ Wagner (1994) investigates the rate of employment growth (\code{y}) as function of percentage of people engaged in \bold{p}roducation \bold{a}ctivities (\code{PA}) and \bold{h}igher \bold{s}ervices (\code{HS}) and of the \bold{g}rowth of these percentages (\code{GPA}, \code{GHS}) during three time periods in 21 geographical regions of the greater Hannover area. } \usage{data(wagnerGrowth, package="robustbase")} \format{ A data frame with \eqn{21 \times 3 = 63}{21 * 3 = 63} observations (one per \code{Region x Period}) on the following 7 variables. \describe{ \item{\code{Region}}{a \code{\link{factor}} with 21 levels, denoting the corresponding region in Hannover (conceptually a \dQuote{block factor}).} \item{\code{PA}}{numeric: percent of people involved in production activities.} \item{\code{GPA}}{\bold{g}rowth of \code{PA}.} \item{\code{HS}}{a numeric vector} \item{\code{GHS}}{a numeric vector} \item{\code{y}}{a numeric vector} \item{\code{Period}}{a \code{\link{factor}} with levels \code{1:3}, denoting the time period, 1 = 1979-1982, 2 = 1983-1988, 3 = 1989-1992.} } } % \details{ % } \source{ Hubert, M. and Rousseeuw, P. J. (1997). Robust regression with both continuous and binary regressors, \emph{Journal of Statistical Planning and Inference} \bold{57}, 153--163. } \references{ Wagner J. (1994). Regionale Beschäftigungsdynamik und höherwertige Produktionsdienste: Ergebnisse für den Grossraum Hannover (1979-1992). \emph{Raumforschung und Raumordnung} \bold{52}, 146--150. } \examples{ data(wagnerGrowth) ## maybe str(wagnerGrowth) require(lattice) (xyplot(y ~ Period | Region, data = wagnerGrowth, main = "wagnerGrowth: 21 regions @ Hannover")) (dotplot(y ~ reorder(Region,y,median), data = wagnerGrowth, main = "wagnerGrowth", xlab = "Region [ordered by median(y | Region) ]")) } \keyword{datasets} robustbase/man/pilot.Rd0000644000176200001440000000155113312375575014631 0ustar liggesusers\name{pilot} \alias{pilot} \docType{data} \title{Pilot-Plant Data} \description{ Pilot-Plant data from Daniel and Wood (1971). The response variable corresponds to the acid content determined by titration and the explanatory variable is the organic acid content determined by extraction and weighing. This data set was analyzed also by Yale and Forsythe (1976). } \usage{data(pilot, package="robustbase")} \format{ A data frame with 20 observations on the following 2 variables. \describe{ \item{\code{X}}{Organic acid content - extraction} \item{\code{Y}}{Acid content - titration } } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, page 21, table 1. } \examples{ data(pilot) summary(lm.pilot <- lm(Y ~.,data=pilot)) %%summary(lts.pilot <- ltsReg(Y~.,data=pilot)) } \keyword{datasets} robustbase/man/predict.glmrob.Rd0000644000176200001440000000702611651467605016420 0ustar liggesusers\name{predict.glmrob} \alias{predict.glmrob} \title{Predict Method for Robust GLM ("glmrob") Fits} \description{ Obtains predictions and optionally estimates standard errors of those predictions from a fitted \emph{robust} generalized linear model (GLM) object. } \usage{ \method{predict}{glmrob}(object, newdata = NULL, type = c("link", "response", "terms"), se.fit = FALSE, dispersion = NULL, terms = NULL, na.action = na.pass, \dots) } \arguments{ %% the following is +- copy-pasted from predict.glm.Rd: \item{object}{a fitted object of class inheriting from \code{"glmrob"}.} \item{newdata}{optionally, a data frame in which to look for variables with which to predict. If omitted, the fitted linear predictors are used.} \item{type}{the type of prediction required. The default is on the scale of the linear predictors; the alternative \code{"response"} is on the scale of the response variable. Thus for a default binomial model the default predictions are of log-odds (probabilities on logit scale) and \code{type = "response"} gives the predicted probabilities. The \code{"terms"} option returns a matrix giving the fitted values of each term in the model formula on the linear predictor scale. The value of this argument can be abbreviated. } \item{se.fit}{logical switch indicating if standard errors are required.} \item{dispersion}{the dispersion of the GLM fit to be assumed in computing the standard errors. If omitted, that returned by \code{summary} applied to the object is used.} \item{terms}{with \code{type="terms"} by default all terms are returned. A character vector specifies which terms are to be returned} \item{na.action}{function determining what should be done with missing values in \code{newdata}. The default is to predict \code{NA}.} \item{\dots}{optional further arguments, currently simply passed to \code{\link{predict.lmrob}()}.} } % \details{ % If necessary, more details than the description above ~~ % } \value{ %% the following is +- copy-pasted from predict.glm.Rd: %% also correct,here ? If \code{se = FALSE}, a vector or matrix of predictions. If \code{se = TRUE}, a list with components \item{fit}{Predictions} \item{se.fit}{Estimated standard errors} \item{residual.scale}{A scalar giving the square root of the dispersion used in computing the standard errors.} } \author{Andreas Ruckstuhl} \seealso{ \code{\link{glmrob}()} to fit these robust GLM models, \code{\link{residuals.glmrob}()} and other methods; \code{\link{predict.lm}()}, the method used for a non-robust fit. } \examples{ data(carrots) ## simplistic testing & training: i.tr <- sample(24, 20) fm1 <- glmrob(cbind(success, total-success) ~ logdose + block, family = binomial, data = carrots, subset = i.tr) fm1 predict(fm1, carrots[-i.tr, ]) # --> numeric vector predict(fm1, carrots[-i.tr, ], type="response", se = TRUE)# -> a list % FIXME: gives a "bad" error -- should rather say "not yet implemented" % or implement it ! % predict(fm1, carrots[-i.tr, ], interval = "confidence") % predict(fm1, carrots[-i.tr, ], interval = "prediction") data(vaso) Vfit <- glmrob(Y ~ log(Volume) + log(Rate), family=binomial, data=vaso) newd <- expand.grid(Volume = (V. <- seq(.5, 4, by = 0.5)), Rate = (R. <- seq(.25,4, by = 0.25))) p <- predict(Vfit, newd) filled.contour(V., R., matrix(p, length(V.), length(R.)), main = "predict(glmrob(., data=vaso))", xlab="Volume", ylab="Rate") } \keyword{models} \keyword{regression} robustbase/man/covComed.Rd0000644000176200001440000000635613772570740015252 0ustar liggesusers\name{covComed} \title{Co-Median Location and Scatter "Covariance" Estimator} \alias{covComed} %%TODO: these two are not yet imported and hence not yet documented below: \alias{comedian} \alias{COM} \alias{.wgtFUN.covComed} % \concept{Co-median} \concept{Comedian} \description{ Compute (versions of) the (multivariate) \dQuote{Comedian} covariance, i.e., multivariate location and scatter estimator } \usage{ covComed(X, n.iter = 2, reweight = FALSE, tolSolve = control$tolSolve, trace = control$trace, wgtFUN = control$wgtFUN, control = rrcov.control()) % comedian(...) % COM(...) } \arguments{ \item{X}{data matrix of dimension, say \eqn{n \times p}{n x p}.} \item{n.iter}{number of comedian() iterations. Can be as low as zero.} \item{reweight}{logical indicating if the final distances and weights should be recomputed from the final \code{cov} and \code{center}. The default is currently \code{FALSE} because that was implicit in the first version of the \R code.} \item{tolSolve}{a numerical tolerance passed to \code{\link{solve}}.} \item{trace}{logical (or integer) indicating if intermediate results should be printed; defaults to \code{FALSE}; values \eqn{\ge 2}{>= 2} also produce print from the internal (Fortran) code.} \item{wgtFUN}{a character string or \code{\link{function}}, specifying how the weights for the reweighting step should be computed. The default, \code{wgtFUN = "01.original"} corresponds to 0-1 weights as proposed originally. Other predefined string options are available, though experimental, see the experimental \code{.wgtFUN.covComed} object.} \item{control}{a list with estimation options - this includes those above provided in the function specification, see \code{\link{rrcov.control}} for the defaults. If \code{control} is supplied, the parameters from it will be used. If parameters are passed also in the invocation statement, they will override the corresponding elements of the control object.} } \details{ .. not yet .. } \value{ an object of class \code{"covComed"} which is basically a list with components \item{comp1 }{Description of 'comp1'} \item{comp2 }{Description of 'comp2'} ... FIXME ... } \references{ Falk, M. (1997) On mad and comedians. \emph{Annals of the Institute of Statistical Mathematics} \bold{49}, 615--644. Falk, M. (1998). A note on the comedian for elliptical distributions. \emph{Journal of Multivariate Analysis} \bold{67}, 306--317. } \author{ Maria Anna di Palma (initial), Valentin Todorov and Martin Maechler } \seealso{ \code{\link{covMcd}}, etc } \examples{ data(hbk) hbk.x <- data.matrix(hbk[, 1:3]) (cc1 <- covComed(hbk.x)) (ccW <- covComed(hbk.x, reweight=TRUE)) cc0 <- covComed(hbk.x, n.iter=0) cc0W <- covComed(hbk.x, n.iter=0, reweight=TRUE) stopifnot(all.equal(unclass(cc0), # here, the 0-1 weights don't change: cc0W[names(cc0)], tol=1e-12, check.environment = FALSE), which(cc1$weights == 0) == 1:14, which(ccW$weights == 0) == 1:14, which(cc0$weights == 0) == 1:14) %% TODO : more ## Martin's smooth reweighting: ## List of experimental pre-specified wgtFUN() creators: ## Cutoffs may depend on (n, p, control$beta) : str(.wgtFUN.covComed) } robustbase/man/residuals.glmrob.Rd0000644000176200001440000000523611651611170016746 0ustar liggesusers% Origin: src/library/stats/man/glm.summaries.Rd (as of 2011-10-23) \name{residuals.glmrob} \alias{residuals.glmrob} \title{Residuals of Robust Generalized Linear Model Fits} \usage{ \method{residuals}{glmrob}(object, type = c("deviance", "pearson", "working", "response", "partial"), \dots) } \arguments{ \item{object}{an object of class \code{glmrob}, typically the result of a call to \code{\link{glmrob}}.} \item{type}{the type of residuals which should be returned. The alternatives are: \code{"deviance"} (default), \code{"pearson"}, \code{"working"}, \code{"response"}, and \code{"partial"}.} \item{\dots}{further arguments passed to or from other methods.} } \description{ Compute residuals of a fitted \code{\link{glmrob}} model, i.e., robust generalized linear model fit. } \details{ The references in \code{\link{glm}} define the types of residuals: Davison & Snell is a good reference for the usages of each. The partial residuals are a matrix of working residuals, with each column formed by omitting a term from the model. The \code{residuals} (S3) method (see \code{\link{methods}}) for \code{\link{glmrob}} models has been modeled to follow closely the method for classical (non-robust) \code{\link{glm}} fitted models. Possibly, see its documentation, i.e., \link{residuals.glm}, for further details. } \seealso{ \code{\link{glmrob}} for computing \code{object}, \code{\link{anova.glmrob}}; the corresponding \emph{generic} functions, \code{\link{summary.glmrob}}, \code{\link{coef}}, % \code{\link{deviance}}, \code{\link{effects}}, \code{\link{fitted}}, \code{\link{residuals}}. } \references{ See those for the classical GLM's, \code{\link{glm}}. } \examples{ ### -------- Gamma family -- data from example(glm) --- clotting <- data.frame( u = c(5,10,15,20,30,40,60,80,100), lot1 = c(118,58,42,35,27,25,21,19,18), lot2 = c(69,35,26,21,18,16,13,12,12)) summary(cl <- glm (lot1 ~ log(u), data=clotting, family=Gamma)) summary(ro <- glmrob(lot1 ~ log(u), data=clotting, family=Gamma)) clotM5.high <- within(clotting, { lot1[5] <- 60 }) cl5.high <- glm (lot1 ~ log(u), data=clotM5.high, family=Gamma) ro5.high <- glmrob(lot1 ~ log(u), data=clotM5.high, family=Gamma) rr <- range(residuals(ro), residuals(cl), residuals(ro5.high)) plot(residuals(ro5.high) ~ residuals(cl5.high), xlim = rr, ylim = rr, asp = 1) abline(0,1, col=2, lty=3) points(residuals(ro) ~ residuals(cl), col = "gray", pch=3) ## Show all kinds of residuals: r.types <- c("deviance", "pearson", "working", "response") sapply(r.types, residuals, object = ro5.high) } \keyword{models} \keyword{regression} robustbase/man/adjOutlyingness.Rd0000644000176200001440000002200214000141110016621 0ustar liggesusers\name{adjOutlyingness} \alias{adjOutlyingness} \title{Compute (Skewness-adjusted) Multivariate Outlyingness} \description{ For an \eqn{n \times p}{n * p} data matrix (or data frame) \code{x}, compute the \dQuote{\emph{outlyingness}} of all \eqn{n} observations. Outlyingness here is a generalization of the Donoho-Stahel outlyingness measure, where skewness is taken into account via the medcouple, \code{\link{mc}()}. } \usage{ adjOutlyingness(x, ndir = 250, p.samp = p, clower = 4, cupper = 3, IQRtype = 7, alpha.cutoff = 0.75, coef = 1.5, qr.tol = 1e-12, keep.tol = 1e-12, only.outlyingness = FALSE, maxit.mult = max(100, p), trace.lev = 0, mcReflect = n <= 100, mcScale = TRUE, mcMaxit = 2*maxit.mult, mcEps1 = 1e-12, mcEps2 = 1e-15, mcTrace = max(0, trace.lev-1)) } \arguments{ \item{x}{a numeric \code{\link{matrix}} or \code{\link{data.frame}}, which must be of full rank \eqn{p}.} \item{ndir}{positive integer specifying the number of directions that should be searched.} \item{p.samp}{the sample size to use for finding good random directions, must be at least \code{p}. The default, \code{p} had been hard coded previously.} \item{clower, cupper}{the constant to be used for the lower and upper tails, in order to transform the data towards symmetry. You can set \code{clower = 0, cupper = 0} to get the \emph{non}-adjusted, i.e., classical (\dQuote{central} or \dQuote{symmetric}) outlyingness. In that case, \code{\link{mc}()} is not used.} \item{IQRtype}{a number from \code{1:9}, denoting type of empirical quantile computation for the \code{\link{IQR}()}. The default 7 corresponds to \code{\link{quantile}}'s and \code{\link{IQR}}'s default. MM has evidence that \code{type=6} would be a bit more stable for small sample size.} \item{alpha.cutoff}{number in (0,1) specifying the quantiles \eqn{(\alpha, 1-\alpha)} which determine the \dQuote{outlier} cutoff. The default, using quartiles, corresponds to the definition of the medcouple (\code{\link{mc}}), but there is no stringent reason for using the same alpha for the outlier cutoff.} \item{coef}{positive number specifying the factor with which the interquartile range (\code{\link{IQR}}) is multiplied to determine \sQuote{boxplot hinges}-like upper and lower bounds.} \item{qr.tol}{positive tolerance to be used for \code{\link{qr}} and \code{\link{solve.qr}} for determining the \code{ndir} directions, each determined by a random sample of \eqn{p} (out of \eqn{n}) observations. Note that the default \eqn{10^{-12}} is rather small, and \code{\link{qr}}'s default \code{= 1e-7} may be more appropriate.} \item{keep.tol}{positive tolerance to determine which of the sample direction should be kept, namely only those for which \eqn{\|x\| \cdot \|B\|}{||x|| * ||B||} is larger than \code{keep.tol}.} \item{only.outlyingness}{logical indicating if the final outlier determination should be skipped. In that case, a vector is returned, see \sQuote{Value:} below.} \item{maxit.mult}{integer factor; \code{maxit <- maxit.mult * ndir} will determine the maximal number of direction searching iterations. May need to be increased for higher dimensional data, though increasing \code{ndir} may be more important.} \item{trace.lev}{an integer, if positive allows to monitor the direction search.} %% new (Aug-Dec 2020), related to mc(): see >>> ./mc.Rd <<< \item{mcReflect}{passed as \code{doReflect} to \code{\link{mc}()}.} \item{mcScale}{passed as \code{doScale} to \code{\link{mc}()}.} \item{mcMaxit}{passed as \code{maxit} to \code{\link{mc}()}.} \item{mcEps1}{passed as \code{eps1} to \code{\link{mc}()}; the default is slightly looser (100 larger) than the default for \code{mc}().} \item{mcEps2}{passed as \code{eps2} to \code{\link{mc}()}.} \item{mcTrace}{passed as \code{trace.lev} to \code{\link{mc}()}.} } \note{ The result is \emph{random} as it depends on the sample of \code{ndir} directions chosen; specifically, to get sub samples the algorithm uses \code{\link{sample.int}(n, p.samp)} which from \R version 3.6.0 depends on \code{\link{RNGkind}(*, sample.kind)}. Exact reproducibility of results from \R versions 3.5.3 and earlier, requires setting \code{\link{RNGversion}("3.5.0")}.% same text in ./glmrob.Rd ("MT") In any case, do use \code{\link{set.seed}()} yourself for reproducibility! Till Aug/Oct. 2014, the default values for \code{clower} and \code{cupper} were accidentally reversed, and the signs inside \code{exp(.)} where swapped in the (now corrected) two expressions \preformatted{ tup <- Q3 + coef * IQR * exp(.... + clower * tmc * (tmc < 0)) tlo <- Q1 - coef * IQR * exp(.... - cupper * tmc * (tmc < 0)) } already in the code from Antwerpen (\file{mcrsoft/adjoutlingness.R}), contrary to the published reference. Further, the original algorithm had not been scale-equivariant in the direction construction, which has been amended in 2014-10 as well. The results, including diagnosed outliers, therefore have changed, typically slightly, since \pkg{robustbase} version 0.92-0. } \details{ \bold{FIXME}: Details in the comment of the Matlab code; also in the reference(s). %% SEE /u/maechler/R/MM/STATISTICS/robust/MC/mcmatl/adjoutlyingness.m %% ---- which has notes about input/output etc of the corresponding %% Matlab code The method as described can be useful as preprocessing in FASTICA (\url{http://research.ics.aalto.fi/ica/fastica/} see also the \R package \CRANpkg{fastICA}. } \value{ If \code{only.outlyingness} is true, a vector \code{adjout}, otherwise, as by default, a list with components \item{adjout}{numeric of \code{length(n)} giving the adjusted outlyingness of each observation.} \item{cutoff}{cutoff for \dQuote{outlier} with respect to the adjusted outlyingnesses, and depending on \code{alpha.cutoff}.} \item{nonOut}{logical of \code{length(n)}, \code{TRUE} when the corresponding observation is \bold{non}-outlying with respect to the cutoff and the adjusted outlyingnesses.} } \references{ Brys, G., Hubert, M., and Rousseeuw, P.J. (2005) A Robustification of Independent Component Analysis; \emph{Journal of Chemometrics}, \bold{19}, 1--12. Hubert, M., Van der Veeken, S. (2008) Outlier detection for skewed data; \emph{Journal of Chemometrics} \bold{22}, 235--246; \doi{10.1002/cem.1123}. %% preprint \url{http://wis.kuleuven.be/stat/robust/papers/2008/outlierdetectionskeweddata-revision.pdf} %%MM: Journal-pdf ~/save/papers/robust-diverse/Hubert_VdV_skewed-Chemom_2008.pdf %%MM: Compstat 2010: Slides (of talk) and paper of Mia H: %% ~/save/papers/robust-diverse/Hubert_skewed-CS2010-slides.pdf and %% ~/save/papers/robust-diverse/Hubert_skewed-CS2010-paper.pdf (slides are better !!) For the up-to-date reference, please consult %\url{https://wis.kuleuven.be/stat/robust}% to sound modern: \url{https://wis.kuleuven.be/statdatascience/robust} } \author{Guy Brys; help page and improvements by Martin Maechler} \seealso{the adjusted boxplot, \code{\link{adjbox}} and the medcouple, \code{\link{mc}}. } \examples{ ## An Example with bad condition number and "border case" outliers dim(longley) # 16 x 7 // set seed, as result is random : set.seed(31) ao1 <- adjOutlyingness(longley, mcScale=FALSE) ## which are outlying ? which(!ao1$nonOut) ## for this seed, two: "1956", "1957"; (often: none) ## For seeds 1:100, we observe (Linux 64b) if(FALSE) { adjO <- sapply(1:100, function(iSeed) { set.seed(iSeed); adjOutlyingness(longley)$nonOut }) table(nrow(longley) - colSums(adjO)) } ## #{outl.}: 0 1 2 3 ## #{cases}: 74 17 6 3 ## An Example with outliers : dim(hbk) set.seed(1) ao.hbk <- adjOutlyingness(hbk) str(ao.hbk) hist(ao.hbk $adjout)## really two groups table(ao.hbk$nonOut)## 14 outliers, 61 non-outliers: ## outliers are : which(! ao.hbk$nonOut) # 1 .. 14 --- but not for all random seeds! ## here, they are the same as found by (much faster) MCD: cc <- covMcd(hbk) stopifnot(all(cc$mcd.wt == ao.hbk$nonOut)) ## This is revealing: About 1--2 cases, where outliers are *not* == 1:14 ## but needs almost 1 [sec] per call: if(interactive()) { for(i in 1:30) { print(system.time(ao.hbk <- adjOutlyingness(hbk))) if(!identical(iout <- which(!ao.hbk$nonOut), 1:14)) { cat("Outliers:\n"); print(iout) } } } ## "Central" outlyingness: *not* calling mc() anymore, since 2014-12-11: trace(mc) out <- capture.output( oo <- adjOutlyingness(hbk, clower=0, cupper=0) ) untrace(mc) stopifnot(length(out) == 0) ## A rank-deficient case T <- tcrossprod(data.matrix(toxicity)) try(adjOutlyingness(T, maxit. = 20, trace.lev = 2)) # fails and recommends: T. <- fullRank(T) aT <- adjOutlyingness(T.) plot(sort(aT$adjout, decreasing=TRUE), log="y") plot(T.[,9:10], col = (1:2)[1 + (aT$adjout > 10000)]) ## .. (not conclusive; directions are random, more 'ndir' makes a difference!) } \keyword{robust} \keyword{multivariate} robustbase/man/biomassTill.Rd0000644000176200001440000000644213312375575015770 0ustar liggesusers\name{biomassTill} \alias{biomassTill} \docType{data} \title{Biomass Tillage Data} \description{ An agricultural experiment in which different tillage methods were implemented. The effects of tillage on plant (maize) biomass were subsequently determined by modeling biomass accumulation for each tillage treatment using a 3 parameter Weibull function. A datset where the total biomass is modeled conditional on a three value factor, and hence \emph{vector} parameters are used. } \usage{data("biomassTill", package="robustbase")} \format{ A data frame with 58 observations on the following 3 variables. \describe{ \item{\code{Tillage}}{Tillage treatments, a \code{\link{factor}} with levels \describe{ \item{\code{CA-}:}{a no-tillage system with plant residues removed} \item{\code{CA+}:}{a no-tillage system with plant residues retained} \item{\code{CT}:}{a conventionally tilled system with residues incorporated} } } \item{\code{DVS}}{the development stage of the maize crop. A DVS of \code{1} represents maize anthesis (flowering), and a DVS of \code{2} represents physiological maturity. For the data, numeric vector with 5 different values between 0.5 and 2.} \item{\code{Biomass}}{accumulated biomass of maize plants from each tillage treatment.} \item{\code{Biom.2}}{the same as \code{Biomass}, but with three values replaced by \dQuote{gross errors}.} } } \source{ From Strahinja Stepanovic and John Laborde, Department of Agronomy & Horticulture, University of Nebraska-Lincoln, USA } %% \references{ %% %% ~~ possibly secondary sources and usages ~~ %% } \examples{ data(biomassTill) str(biomassTill) require(lattice) ## With long tailed errors xyplot(Biomass ~ DVS | Tillage, data = biomassTill, type=c("p","smooth")) ## With additional 2 outliers: xyplot(Biom.2 ~ DVS | Tillage, data = biomassTill, type=c("p","smooth")) ### Fit nonlinear Regression models: ----------------------------------- ## simple starting values, needed: m00st <- list(Wm = rep(300, 3), a = rep( 1.5, 3), b = rep( 2.2, 3)) robm <- nlrob(Biomass ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, maxit = 200) ## ----------- summary(robm) ## ... 103 IRWLS iterations plot(sort(robm$rweights), log = "y", main = "ordered robustness weights (log scale)") mtext(getCall(robm)) ## the classical (only works for the mild outliers): cl.m <- nls(Biomass ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st) ## now for the extra-outlier data: -- fails with singular gradient !! try( rob2 <- nlrob(Biom.2 ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st) ) ## use better starting values: m1st <- setNames(as.list(as.data.frame(matrix( coef(robm), 3))), c("Wm", "a","b")) try(# just breaks a bit later! rob2 <- nlrob(Biom.2 ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m1st, maxit= 200, trace=TRUE) ) ## Comparison {more to come} % once we have "MM" working... rbind(start = unlist(m00st), class = coef(cl.m), rob = coef(robm)) } \keyword{datasets} robustbase/man/classPC.Rd0000644000176200001440000000705112661565243015032 0ustar liggesusers\name{classPC} \title{Compute Classical Principal Components via SVD or Eigen} \alias{classPC} \concept{PCA} \description{ Compute classical principal components (PC) via SVD (\code{\link{svd}} or eigenvalue decomposition (\code{\link{eigen}}) with non-trivial rank determination. } \usage{ classPC(x, scale = FALSE, center = TRUE, signflip = TRUE, via.svd = n > p, scores = FALSE) } \arguments{ \item{x}{a numeric \code{\link{matrix}}.} \item{scale}{logical indicating if the matrix should be scaled; it is mean centered in any case (via \code{\link{scale}(*, scale=scale)}c} \item{center}{logical or numeric vector for \dQuote{centering} the matrix.} \item{signflip}{logical indicating if the sign(.) of the loadings should be determined should flipped such that the absolutely largest value is always positive.} \item{via.svd}{logical indicating if the computation is via SVD or Eigen decomposition; the latter makes sense typically only for n <= p.} \item{scores}{logical indicating} } \author{ Valentin Todorov; efficiency tweaks by Martin Maechler } \value{ a \code{\link{list}} with components \item{rank}{the (numerical) matrix rank of \code{x}; an integer number, say \eqn{k}, from \code{0:min(dim(x))}. In the \eqn{n > p} case, it is \code{\link{rankMM}(x)}.} \item{eigenvalues}{the \eqn{k} eigenvalues, in the \eqn{n > p} case, proportional to the variances.} \item{loadings}{the loadings, a \eqn{p \times k}{p * k} matrix.} \item{scores}{if the \code{scores} argument was true, the \eqn{n \times k}{n * k} matrix of scores, where \eqn{k} is the \code{rank} above.} \item{center}{a numeric \eqn{p}-vector of means, unless the \code{center} argument was false.} \item{scale}{if the \code{scale} argument was not false, the \code{scale} used, a \eqn{p}-vector.} } %% \details{ %% } %% \references{ %% } \seealso{ In spirit very similar to \R's standard \code{\link{prcomp}} and \code{\link{princomp}}, one of the main differences being how the \emph{rank} is determined via a non-trivial tolerance. } \examples{ set.seed(17) x <- matrix(rnorm(120), 10, 12) # n < p {the unusual case} pcx <- classPC(x) (k <- pcx$rank) # = 9 [after centering!] pc2 <- classPC(x, scores=TRUE) pcS <- classPC(x, via.svd=TRUE) all.equal(pcx, pcS, tol = 1e-8) ## TRUE: eigen() & svd() based PC are close here pc0 <- classPC(x, center=FALSE, scale=TRUE) pc0$rank # = 10 here *no* centering (as E[.] = 0) ## Loadings are orthnormal: zapsmall( crossprod( pcx$loadings ) ) ## PC Scores are roughly orthogonal: S.S <- crossprod(pc2$scores) print.table(signif(zapsmall(S.S), 3), zero.print=".") stopifnot(all.equal(pcx$eigenvalues, diag(S.S)/k)) ## the usual n > p case : pc.x <- classPC(t(x)) pc.x$rank # = 10, full rank in the n > p case cpc1 <- classPC(cbind(1:3)) # 1-D matrix stopifnot(cpc1$rank == 1, all.equal(cpc1$eigenvalues, 1), all.equal(cpc1$loadings, 1)) \dontshow{ stopifnot(classPC(x, center=FALSE)$rank == min(dim(x))) ii <- names(pcx); ii <- ii[ii != "scores"] stopifnot(all.equal(pcx[ii], pc2[ii], tol=0), all.equal(pcx, pcS, tol=1e-8), length(pc.x$center) == 10, identical(pc0$center, FALSE), all.equal(crossprod(pcx $loadings), diag(9)), all.equal(crossprod(pc.x$loadings), diag(10)), all.equal(colSums(abs(pcx$loadings)), c(2.69035673, 2.78449399, 3.00148438, 2.9016688, 2.49400759, 2.90477204, 3.01639807, 2.4217181, 2.64665957)), length(pc0$scale) == 12) }% dont.. } \keyword{multivariate} robustbase/man/anova.lmrob.Rd0000644000176200001440000001010612221620231015670 0ustar liggesusers\name{anova.lmrob} \alias{anova.lmrob} \title{Analysis of Robust Deviances ('anova') for "lmrob" Objects} \description{ Compute an analysis of robust Wald-type or deviance-type test tables for one or more linear regression models fitted by \code{\link{lmrob}}. } \usage{ \method{anova}{lmrob}(object, \dots, test = c("Wald", "Deviance"), verbose = getOption("verbose")) } \arguments{ \item{object, \dots}{objects of class \code{"lmrob"}, typically the result of a call to \code{\link{lmrob}}. \code{\dots} arguments may also be symbolic descriptions of the reduced models (cf. argument \code{formula} in \code{\link{lm}}). } \item{test}{a character string specifying the test statistic to be used. Can be one of \code{"Wald"} or \code{"Deviance"}, with partial matching allowed, for specifying a \code{"Wald"}-type test or \code{"Deviance"}-type test.} \item{verbose}{logical; if true some informative messages are printed.} } \details{ Specifying a single object gives a sequential analysis of a robust quasi-deviance table for that fit. That is, the reductions in the robust residual deviance as each term of the formula is added in turn are given in as the rows of a table. (Currently not yet implemented.) If more than one object is specified, the table has a row for the residual quasi-degrees of freedom (however, this information is never used in the asymptotic tests). For all but the first model, the change in degrees of freedom and robust deviance is also given. (This only makes statistical sense if the models are nested.) As opposed to the convention, the models are forced to be listed from largest to smallest due to computational reasons. In addition, the table will contain test statistics and P values comparing the reduction in robust deviances for the model on the row to that on top of it. There are two different robust tests available: The "Wald"-type test (\code{test = "Wald"}) and the Deviance-type test (\code{test = "Deviance"}). When using formula description of the nested models in the dot arguments and \code{test = "Deviance"}, you may be urged to supply a \code{\link{lmrob}} fit for these models by an error message. This happens when the coefficients of the largest model reduced to the nested models result in invalid initial estimates for the nested models (indicated by robustness weights which are all 0). The comparison between two or more models by \code{\link{anova.lmrob}} will only be valid if they are fitted to the same dataset. } \value{ Basically, an object of class \code{\link{anova}} inheriting from class \code{\link{data.frame}}. } %%\references{ ~put references to the literature/web site here ~ } \author{Andreas Ruckstuhl} \seealso{\code{\link{lmrob}}, \code{\link{anova}}. } \examples{ data(salinity) summary(m0.sali <- lmrob(Y ~ . , data = salinity)) anova(m0.sali, Y ~ X1 + X3) ## -> X2 is not needed (m1.sali <- lmrob(Y ~ X1 + X3, data = salinity)) anova(m0.sali, m1.sali) # the same as before anova(m0.sali, m1.sali, test = "Deviance") ## whereas 'X3' is highly significant: m2 <- update(m0.sali, ~ . -X3) anova(m0.sali, m2) anova(m0.sali, m2, test = "Deviance") ## Global test [often not interesting]: anova(m0.sali, update(m0.sali, . ~ 1), test = "Wald") anova(m0.sali, update(m0.sali, . ~ 1), test = "Deviance") if(require("MPV")) { ## Montgomery, Peck & Vining datasets Jet <- table.b13 Jet.rflm1 <- lmrob(y ~ ., data=Jet, control = lmrob.control(max.it = 500)) summary(Jet.rflm1) anova(Jet.rflm1, y ~ x1 + x5 + x6, test="Wald") try( anova(Jet.rflm1, y ~ x1 + x5 + x6, test="Deviance") ) ## -> Error in anovaLm.... Please fit the nested models by lmrob ## {{ since all robustness weights become 0 in the nested model ! }} ## Ok: Do as the error message told us: ## test by comparing the two *fitted* models: Jet.rflm2 <- lmrob(y ~ x1 + x5 + x6, data=Jet, control=lmrob.control(max.it=100)) anova(Jet.rflm1, Jet.rflm2, test="Deviance") } # end{"MPV" data} } \keyword{robust} \keyword{models} \keyword{regression} robustbase/man/plot.lmrob.Rd0000644000176200001440000000565412513246457015600 0ustar liggesusers\name{plot.lmrob} \alias{plot.lmrob} \title{Plot Method for "lmrob" Objects} \description{ Diagnostic plots for elements of class lmrob } \usage{ \method{plot}{lmrob}(x, which = 1:5, caption = c("Standardized residuals vs. Robust Distances", "Normal Q-Q vs. Residuals", "Response vs. Fitted Values", "Residuals vs. Fitted Values" , "Sqrt of abs(Residuals) vs. Fitted Values"), panel = if(add.smooth) panel.smooth else points, sub.caption = deparse(x$call), main = "", compute.MD = TRUE, ask = prod(par("mfcol")) < length(which) && dev.interactive(), id.n = 3, labels.id = names(residuals(x)), cex.id = 0.75, label.pos = c(4,2), qqline = TRUE, add.smooth = getOption("add.smooth"), \dots, p=0.025) } \arguments{ \item{x}{ an object as created by \code{lmrob} } \item{which}{ integer number between 1 and 5 to specify which plot is desired } \item{caption}{Caption for the different plots} \item{panel}{panel function. The useful alternative to \code{\link{points}}, \code{\link{panel.smooth}} can be chosen by \code{add.smooth = TRUE}.} \item{main}{main title} \item{sub.caption}{sub titles} \item{compute.MD}{logical indicating if the robust Mahalanobis distances should be recomputed, using \code{\link{covMcd}()} when needed, i.e., if \code{which} contains \code{1}.} \item{ask}{waits for user input before displaying each plot } \item{id.n}{number of points to be labelled in each plot, starting with the most extreme.} \item{labels.id}{vector of labels, from which the labels for extreme points will be chosen. \code{NULL} uses observation numbers.} \item{cex.id}{magnification of point labels.} \item{label.pos}{positioning of labels, for the left half and right half of the graph respectively.}% for plots ... \item{qqline}{logical indicating if a \code{\link{qqline}()} should be added to the normal Q-Q plot.} \item{add.smooth}{logical indicating if a smoother should be added to most plots; see also \code{panel} above.} \item{\dots}{optional arguments for \code{\link{par}}, \code{\link{title}}, etc.} \item{p}{threshold for distance-distance plot} } \references{ Robust diagnostic plots as in Rousseeuw and van Zomeren (1990), see \sQuote{References} in \code{\link{ltsPlot}}. } \details{ if \code{compute.MD = TRUE} and the robust Mahalanobis distances need to be computed, they are stored (\dQuote{cached}) with the object \code{x} when this function has been called from top-level. } \seealso{ \code{\link{lmrob}}, also for examples, \code{\link{plot.lm}}. } \examples{ data(starsCYG) ## Plot simple data and fitted lines plot(starsCYG) lmST <- lm(log.light ~ log.Te, data = starsCYG) RlmST <- lmrob(log.light ~ log.Te, data = starsCYG) RlmST abline(lmST, col = "red") abline(RlmST, col = "blue") op <- par(mfrow = c(2,2), mgp = c(1.5, 0.6, 0), mar= .1+c(3,3,3,1)) plot(RlmST, which = c(1:2, 4:5)) par(op) } \keyword{robust} \keyword{regression} robustbase/man/pension.Rd0000644000176200001440000000223313312375575015153 0ustar liggesusers\name{pension} \alias{pension} \title{Pension Funds Data} \description{ The total 1981 premium income of pension funds of Dutch firms, for 18 Professional Branches, from de Wit (1982). } \usage{data(pension, package="robustbase")} \format{ A data frame with 18 observations on the following 2 variables. \describe{ \item{\code{Income}}{Premium Income (in millions of guilders)} \item{\code{Reserves}}{Premium Reserves (in millions of guilders)} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.76, table 13. } \examples{ data(pension) plot(pension) summary(lm.p <- lm(Reserves ~., data=pension)) summary(lmR.p <- lmrob(Reserves ~., data=pension)) summary(lts.p <- ltsReg(Reserves ~., data=pension)) abline( lm.p) abline(lmR.p, col=2) abline(lts.p, col=2, lty=2) ## MM: "the" solution is much simpler: plot(pension, log = "xy") lm.lp <- lm(log(Reserves) ~ log(Income), data=pension) lmR.lp <- lmrob(log(Reserves) ~ log(Income), data=pension) plot(log(Reserves) ~ log(Income), data=pension) ## no difference between LS and robust: abline( lm.lp) abline(lmR.lp, col=2) } \keyword{datasets} robustbase/man/possumDiv.Rd0000644000176200001440000001163113713564014015464 0ustar liggesusers\name{possumDiv} \alias{possumDiv} \alias{possum.mat} \docType{data} \title{Possum Diversity Data} \description{ Possum diversity data: As issued from a study of the diversity of possum (arboreal marsupials) in the Montane ash forest (Australia), this dataset was collected in view of the management of hardwood forest to take conservation and recreation values, as well as wood production, into account. The study is fully described in the two references. The number of different species of arboreal marsupials (possum) was observed on 151 different 3ha sites with uniform vegetation. For each site the nine variable measures (see below) were recorded. The problem is to model the relationship between \code{diversity} and these other variables. } \usage{data(possumDiv, package="robustbase")} \format{ Two different representations of the same data are available: \code{possumDiv} is a data frame of 151 observations of 9 variables, where the last two are factors, \code{eucalyptus} with 3 levels and \code{aspect} with 4 levels. \code{possum.mat} is a numeric (integer) matrix of 151 rows (observations) and 14 columns (variables) where the last seven ones are 0-1 dummy variables, three (\code{E.*}) are coding for the kind of \code{eucalyptus} and the last four are 0-1 coding for the \code{aspect} factor. The variables have the following meaning: \describe{ \item{Diversity}{main variable of interest is the number of different species of arboreal marsupial (possum) observed, with values in 0:5.} \item{Shrubs}{the number of shrubs.} \item{Stumps}{the number of cut stumps from past logging operations.} \item{Stags}{the number of stags (hollow-bearing trees).} \item{Bark}{bark index (integer) vector reflecting the quantity of decorticating bark.} \item{Habitat}{an integer score indicating the suitability of nesting and foraging habitat for Leadbeater's possum.} \item{BAcacia}{a numeric vector giving the basal area of acacia species.} \cr \item{eucalyptus}{a 3-level \code{\link{factor}} specifying the species of eucalypt with the greatest stand basal area. This has the same information as the following three variables} \item{E.regnans}{0-1 indicator for Eucalyptus regnans} \item{E.delegatensis}{0-1 indicator for Eucalyptus deleg.} \item{E.nitens}{0-1 indicator for Eucalyptus nitens} \cr \item{aspect}{a 4-level \code{\link{factor}} specifying the aspect of the site. It is the same information as the following four variables.} \item{NW-NE}{0-1 indicator} \item{NW-SE}{0-1 indicator} \item{SE-SW}{0-1 indicator} \item{SW-NW}{0-1 indicator} } } \source{ Eva Cantoni (2004) Analysis of Robust Quasi-deviances for Generalized Linear Models. \emph{Journal of Statistical Software} \bold{10}, 04, \url{https://www.jstatsoft.org/article/view/v010i04} } \references{ Lindenmayer, D. B., Cunningham, R. B., Tanton, M. T., Nix, H. A. and Smith, A. P. (1991) The conservation of arboreal marsupials in the montane ash forests of the central highlands of victoria, south-east australia: III. The habitat requirements of leadbeater's possum \emph{gymnobelideus leadbeateri} and models of the diversity and abundance of arboreal marsupials. \emph{Biological Conservation} \bold{56}, 295--315. Lindenmayer, D. B., Cunningham, R. B., Tanton, M. T., Smith, A. P. and Nix, H. A. (1990) The conservation of arboreal marsupials in the montane ash forests of the victoria, south-east australia, I. Factors influencing the occupancy of trees with hollows, \emph{Biological Conservation} \bold{54}, 111--131. See also the references in \code{\link{glmrob}}. } \examples{ data(possumDiv) head(possum.mat) str(possumDiv) ## summarize all variables as multilevel factors: summary(as.data.frame(lapply(possumDiv, function(v) if(is.integer(v)) factor(v) else v))) ## Following Cantoni & Ronchetti (2001), JASA, p.1026 f.:% cf. ../tests/poisson-ex.R pdFit <- glmrob(Diversity ~ . , data = possumDiv, family=poisson, tcc = 1.6, weights.on.x = "hat", acc = 1e-15) summary(pdFit) summary(pdF2 <- update(pdFit, ~ . -Shrubs)) summary(pdF3 <- update(pdF2, ~ . -eucalyptus)) summary(pdF4 <- update(pdF3, ~ . -Stumps)) summary(pdF5 <- update(pdF4, ~ . -BAcacia)) summary(pdF6 <- update(pdF5, ~ . -aspect))# too much .. anova(pdFit, pdF3, pdF4, pdF5, pdF6, test = "QD") # indeed, ## indeed, the last simplification is too much possumD.2 <- within(possumDiv, levels(aspect)[1:3] <- rep("other", 3)) ## and use this binary 'aspect' instead of the 4-level one: summary(pdF5.1 <- update(pdF5, data = possumD.2)) if(FALSE) # not ok, as formually not nested. anova(pdF5, pdF5.1) summarizeRobWeights(weights(pdF5.1, type="rob"), eps = 0.73) ##-> "outliers" (1, 59, 110) wrob <- setNames(weights(pdF5.1, type="rob"), rownames(possumDiv)) head(sort(wrob)) } \keyword{datasets} robustbase/man/psiFunc.Rd0000644000176200001440000000521213310673404015075 0ustar liggesusers\name{psiFunc} \alias{psiFunc} \alias{huberPsi} \alias{hampelPsi} \title{Constructor for Objects "Psi Function" Class} %% Rd Problem: 'psi_func' cannot be used in title! \description{ \code{psiFunc(..)} is a convenience interface to \code{new("psi_func",..)}, i.e. for constructing objects of class \code{"psi_func"}. } \usage{ psiFunc(rho, psi, wgt, Dpsi,Dwgt, Erho = NULL, Epsi2 = NULL, EDpsi = NULL, name, ...) huberPsi hampelPsi } \arguments{ \item{rho, psi, wgt, Dpsi, Dwgt}{each a \code{\link{function}} of \code{x} and tuning parameters typically. Specification of Dwgt is optional.} \item{Erho, Epsi2, EDpsi}{see \code{\linkS4class{psi_func}}, and note that these may change in the future.} \item{name}{Name of \eqn{\psi}{psi}-function used for printing.} \item{\dots}{potential further arguments for specifying tuning parameter names and defaults.} %% FIXME; give more details } % \details{ % ~~ If necessary, more details than the description above ~~ % } % \value{ % ~Describe the value returned % If it is a LIST, use % \item{comp1 }{Description of 'comp1'} % \item{comp2 }{Description of 'comp2'} % ... % } %%\references{ ~put references to the literature/web site here ~ } \author{Martin Maechler} \seealso{The description of class \code{\linkS4class{psi_func}}. } \examples{ plot(huberPsi) # => shows "all" {as an object with a smart plot() method} ## classical (Gaussian / "least-squares") psi {trivial}: F1 <- function(x, .) rep.int(1, length(x)) FF <- function(.) rep.int(1, length(.)) cPsi <- psiFunc(rho = function(x,.) x^2 / 2, psi = function(x, .) x, wgt = F1, Dpsi = F1, Erho = function(.) rep.int(1/2, length(.)), Epsi2 = FF, EDpsi = FF, name = "classic", . = Inf) show(cPsi) plot(cPsi) ## is the same as the limit of Huber's: plot(chgDefaults(huberPsi, k = Inf)) ## Hampel's psi and rho: H.38 <- chgDefaults(hampelPsi, k = c(1.5, 3.5, 8)) k. <- H.38@xtras$tuningP$k ; k.. <- as.vector(outer(c(-1,1), k.)) c.t <- adjustcolor("skyblue3", .8) .ax.k <- function(side) { abline(h=0, v=0, lty=2) axis(side, at = k.., labels=formatC(k..), pos=0, col=c.t, col.axis=c.t) } op <- par(mfrow=c(2,1), mgp = c(1.5, .6, 0), mar = .6+c(2,2,1,.5)) curve(H.38@psi(x), -10, 10, col=2, lwd=2, n=512) lines(k.., H.38@psi(k..), type = "h", lty=3, col=c.t); .ax.k(1) curve(H.38@rho(x), -10, 10, col=2, lwd=2, n=512); abline(h=0, v=0, lty=2) lines(k.., H.38@rho(k..), type = "h", lty=3, col=c.t); .ax.k(1) title(expression("Hampel's " ~~~ psi(x) ~~ "and" ~~ rho(x) ~~~ " functions")) par(op) ## Not the same, but similar, directly using the plot() method: plot(H.38) } \keyword{classes} \keyword{robust} robustbase/man/cushny.Rd0000644000176200001440000000432013312375575015010 0ustar liggesusers\name{cushny} \encoding{utf8} \alias{cushny} \docType{data} \title{Cushny and Peebles Prolongation of Sleep Data} \description{ The original data set was bivariate and recorded for ten subjects the prolongation of sleep caused by two different drugs. These data were used by Student as the first illustration of the paired t-test which only needs the \emph{differences} of the two measurements. These differences are the values of \code{cushny}. } \usage{data(cushny, package="robustbase")} \format{ numeric vector, sorted increasingly:\cr 0 0.8 1 1.2 1.3 1.3 1.4 1.8 2.4 4.6 } \source{ Cushny, A.R. and Peebles, A.R. (1905) The action of optical isomers. II. Hyoscines. \emph{J. Physiol.} \bold{32}, 501--510. These data were used by Student(1908) as the first illustration of the paired t-test, see also \code{\link[datasets]{sleep}}; then cited by Fisher (1925) and thereforth copied in numerous books as an example of a normally distributed sample, see, e.g., Anderson (1958). } \references{ Student (1908) The probable error of a mean. \emph{Biometrika} \bold{6}, 1--25. Fisher, R.A. (1925) \emph{Statistical Methods for Research Workers}; Oliver & Boyd, Edinburgh. Anderson, T.W. (1958) \emph{An Introduction to Multivariate Statistical Analysis}; Wiley, N.Y. Hampel, F., Ronchetti, E., Rousseeuw, P. and Stahel, W. (1986) \emph{Robust Statistics: The Approach Based on Influence Functions}; Wiley, N.Y. } \examples{ data(cushny) plot(cushny, rep(0, 10), pch = 3, cex = 3, ylab = "", yaxt = "n") plot(jitter(cushny), rep(0, 10), pch = 3, cex = 2, main = "'cushny' data (n= 10)", ylab = "", yaxt = "n") abline(h=0, col="gray", lty=3) myPt <- function(m, lwd = 2, ..., e = 1.5*par("cxy")[2]) segments(m, +e, m, -e, lwd = lwd, ...) myPt( mean(cushny), col = "pink3") myPt(median(cushny), col = "light blue") legend("topright", c("mean", "median"), lwd = 2, col = c("pink3", "light blue"), inset = .01) ## The 'sleep' data from the standard 'datasets' package: d.sleep <- local({ gr <- with(datasets::sleep, split(extra, group)) gr[[2]] - gr[[1]] }) stopifnot(all.equal(cushny, sort(d.sleep), tolerance=1e-15)) } \keyword{datasets} robustbase/man/anova.glmrob.Rd0000644000176200001440000001032113713564014016053 0ustar liggesusers\name{anova.glmrob} \alias{anova.glmrob} \title{Analysis of Robust Quasi-Deviance for "glmrob" Objects} \description{ Compute an analysis of robust quasi-deviance table for one or more generalized linear models fitted by \code{\link{glmrob}}. } \usage{ \method{anova}{glmrob}(object, ..., test = c("Wald", "QD", "QDapprox")) } \arguments{ \item{object, \dots}{objects of class \code{glmrob}, typically the result of a call to \code{\link{glmrob}}.} \item{test}{a character string specifying the test statistic to be used. (Partially) matching one of \code{"Wald"}, \code{"QD"} or \code{"QDapprox"}. See Details.} } \details{ Specifying a single object gives a sequential analysis of robust quasi-deviance table for that fit. That is, the reductions in the robust residual quasi-deviance as each term of the formula is added in turn are given in as the rows of a table. \emph{(Currently not yet implemented.)} If more than one object is specified, the table has a row for the residual quasi-degrees of freedom (However, this information is never used in the asymptotic tests). For all but the first model, the change in degrees of freedom and robust quasi-deviance is also given. (This only makes statistical sense if the models are nested.) It is conventional to list the models from smallest to largest, but this is up to the user. In addition, the table will contain test statistics and P values comparing the reduction in robust quasi-deviance for the model on the row to that on top of it. For all robust fitting methods, the \dQuote{Wald}-type test between two models can be applied (\code{test = "Wald"}). When using Mallows or Huber type robust estimators (\code{method="Mqle"} in \code{\link{glmrob}}), then there are additional test methods. One is the robust quasi-deviance test (\code{test = "QD"}), as described by Cantoni and Ronchetti (2001). The asymptotic distribution is approximated by a chi-square distibution. Another test (\code{test = "QDapprox"}) is based on a quadratic approximation of the robust quasi-deviance test statistic. Its asymptotic distribution is chi-square (see the reference). The comparison between two or more models by \code{anova.glmrob} will only be valid if they are fitted to the same dataset and by the same robust fitting method using the same tuning constant \eqn{c} (\code{tcc} in \code{\link{glmrob}}). } \value{ Basically, an object of class \code{\link{anova}} inheriting from class \code{\link{data.frame}}. } \references{ E. Cantoni and E. Ronchetti (2001) Robust Inference for Generalized Linear Models. \emph{JASA} \bold{96} (455), 1022--1030. E.Cantoni (2004) Analysis of Robust Quasi-deviances for Generalized Linear Models. \emph{Journal of Statistical Software} \bold{10}, \url{https://www.jstatsoft.org/article/view/v010i04} } \author{ Andreas Ruckstuhl } \seealso{ \code{\link{glmrob}}, \code{\link{anova}}. %% %% \code{\link{drop1}} for %% so-called \sQuote{type II} anova where each term is dropped one at a %% time respecting their hierarchy. } \examples{ ## Binomial response ----------- data(carrots) Cfit2 <- glmrob(cbind(success, total-success) ~ logdose + block, family=binomial, data=carrots, method="Mqle", control=glmrobMqle.control(tcc=1.2)) summary(Cfit2) Cfit4 <- glmrob(cbind(success, total-success) ~ logdose * block, family=binomial, data=carrots, method="Mqle", control=glmrobMqle.control(tcc=1.2)) anova(Cfit2, Cfit4, test="Wald") anova(Cfit2, Cfit4, test="QD") anova(Cfit2, Cfit4, test="QDapprox") ## Poisson response ------------ data(epilepsy) Efit2 <- glmrob(Ysum ~ Age10 + Base4*Trt, family=poisson, data=epilepsy, method="Mqle", control=glmrobMqle.control(tcc=1.2,maxit=100)) summary(Efit2) Efit3 <- glmrob(Ysum ~ Age10 + Base4 + Trt, family=poisson, data=epilepsy, method="Mqle", control=glmrobMqle.control(tcc=1.2,maxit=100)) anova(Efit3, Efit2, test = "Wald") anova(Efit3, Efit2, test = "QD") ## trivial intercept-only-model: E0 <- update(Efit3, . ~ 1) anova(E0, Efit3, Efit2, test = "QDapprox") %% failed in robustbase <= 2013-11-27 } \keyword{robust} \keyword{models} \keyword{regression} robustbase/man/glmrob.control.Rd0000644000176200001440000000312612150222661016425 0ustar liggesusers\name{glmrob..control} \title{Controlling Robust GLM Fitting by Different Methods} \alias{glmrobMqle.control} \alias{glmrobMT.control} \alias{glmrobBY.control} \description{ These are auxiliary functions as user interface for \code{\link{glmrob}} fitting when the different methods, \code{"Mqle"}, \code{"BY"}, or \code{"MT"} are used. Typically only used when calling \code{\link{glmrob}}. } \usage{ glmrobMqle.control(acc = 1e-04, test.acc = "coef", maxit = 50, tcc = 1.345) glmrobBY.control (maxit = 1000, const = 0.5, maxhalf = 10) glmrobMT.control (cw = 2.1, nsubm = 500, acc = 1e-06, maxit = 200) } \arguments{ \item{acc}{positive convergence tolerance; the iterations converge when ???} \item{test.acc}{Only "coef" is currently implemented} \item{maxit}{integer giving the maximum number of iterations. } \item{tcc}{tuning constant c for Huber's psi-function} \item{const}{for "BY", the normalizing constant ..}% FIXME \item{maxhalf}{for "BY"; the number of halving steps when the gradient itself no longer improves. We have seen examples when increasing \code{maxhalf} was of relevance.} \item{cw}{tuning constant c for Tukey's biweight psi-function} \item{nsubm}{the number of subsamples to take for finding an initial estimate for \code{method = "MT"}.} } %% \details{ %% } \value{ A \code{\link{list}} with the arguments as components. } \author{Andreas Ruckstuhl and Martin Maechler} \seealso{\code{\link{glmrob}}} \examples{ str(glmrobMqle.control()) str(glmrobBY.control()) str(glmrobMT.control()) } \keyword{robust} \keyword{regression} \keyword{nonlinear} robustbase/man/foodstamp.Rd0000644000176200001440000000455113710054521015464 0ustar liggesusers\name{foodstamp} \title{Food Stamp Program Participation} \alias{foodstamp} \docType{data} \encoding{utf8} \description{ This data consists of 150 randomly selected persons from a survey with information on over 2000 elderly US citizens, where the response, indicates participation in the U.S. Food Stamp Program. } \usage{data(foodstamp, package="robustbase")} \format{ A data frame with 150 observations on the following 4 variables. \describe{ \item{\code{participation}}{participation in U.S. Food Stamp Program; yes = 1, no = 0} \item{\code{tenancy}}{tenancy, indicating home ownership; yes = 1, no = 0} \item{\code{suppl.income}}{supplemental income, indicating whether some form of supplemental security income is received; yes = 1, no = 0} \item{\code{income}}{monthly income (in US dollars)} } } \source{ Data description and first analysis: Stefanski et al.(1986) who indicate Rizek(1978) as original source of the larger study. Electronic version from CRAN package \CRANpkg{catdata}. % which wrongly labeled 'income' (='INC') as "log(1 + income)" } \references{ Rizek, R. L. (1978) The 1977-78 Nationwide Food Consumption Survey. \emph{Family Econ. Rev.}, Fall, 3--7. %% MM ~/save/papers/robust-GLM/Stefanski_etal-Biometrika-1986.pdf : Stefanski, L. A., Carroll, R. J. and Ruppert, D. (1986) Optimally bounded score functions for generalized linear models with applications to logistic regression. \emph{Biometrika} \bold{73}, 413--424. Künsch, H. R., Stefanski, L. A., Carroll, R. J. (1989) Conditionally unbiased bounded-influence estimation in general regression models, with applications to generalized linear models. \emph{J. American Statistical Association} \bold{84}, 460--466. } \examples{ data(foodstamp) (T123 <- xtabs(~ participation+ tenancy+ suppl.income, data=foodstamp)) summary(T123) ## ==> the binary var's are clearly not independent foodSt <- within(foodstamp, { logInc <- log(1 + income) rm(income) }) m1 <- glm(participation ~ ., family=binomial, data=foodSt) summary(m1) rm1 <- glmrob(participation ~ ., family=binomial, data=foodSt) summary(rm1) ## Now use robust weights.on.x : rm2 <- glmrob(participation ~ ., family=binomial, data=foodSt, weights.on.x = "robCov") summary(rm2)## aha, now the weights are different: which( weights(rm2, type="robust") < 0.5) } \keyword{datasets} robustbase/man/print.lmrob.Rd0000644000176200001440000000135411721663343015744 0ustar liggesusers\name{print.lmrob} \alias{print.lmrob} \title{Print Method for Objects of Class "lmrob"} \description{ Print method for elements of class \code{"lmrob"}. } \usage{ \method{print}{lmrob}(x, digits = max(3, getOption("digits") - 3), \dots) } \arguments{ \item{x}{an \R object of class \code{lmrob}, typically created by \code{\link{lmrob}}.} \item{digits}{number of digits for printing, see \code{digits} in \code{\link{options}}.} \item{\dots}{potentially more arguments passed to methods.} } \seealso{\code{\link{lmrob}}, \code{\link{summary.lmrob}}, \code{\link{print}} and \code{\link{summary}}. } \examples{ data(coleman) ( m1 <- lmrob(Y ~ ., data=coleman) ) # -> print.lmrob() method } \keyword{robust} \keyword{regression} robustbase/man/lmrob..D..fit.Rd0000644000176200001440000000362513212743621015706 0ustar liggesusers\name{lmrob..D..fit} \alias{lmrob..D..fit} \title{Compute Design Adaptive Scale estimate} \description{This function calculates a Design Adaptive Scale estimate for a given MM-estimate. This is supposed to be a part of a chain of estimates like \code{SMD} or \code{SMDM}. } \usage{ lmrob..D..fit(obj, x=obj$x, control = obj$control, mf = obj$model, method = obj$control$method) } \arguments{ \item{obj}{\code{lmrob}-object based on which the estimate is to be calculated.} \item{x}{the design matrix; if \code{\link{missing}}, the method tries to get it from \code{obj$x} and if this fails from \code{obj$model}.} \item{control}{list of control parameters, as returned by \code{\link{lmrob.control}}.} \item{mf}{unused and deprecated.} \item{method}{optional; the \code{method} used for \emph{obj} computation.} } \details{ This function is used by \code{\link{lmrob.fit}} and typically not to be used on its own. Note that \code{lmrob.fit()} specifies \code{control} potentially differently than the default, but does use the default for \code{method}. } \value{The given \code{lmrob}-object with the following elements updated: \item{scale}{The Design Adaptive Scale estimate} \item{converged}{ \code{TRUE} if the scale calculation converged, \code{FALSE} other.} } \references{ Koller, M. and Stahel, W.A. (2011), Sharpening Wald-type inference in robust regression for small samples, \emph{Computational Statistics & Data Analysis} \bold{55}(8), 2504--2515. } \seealso{ \code{\link{lmrob.fit}}, \code{\link{lmrob}} } \examples{ data(stackloss) ## Compute manual SMD-estimate: ## 1) MM-estimate m1 <- lmrob(stack.loss ~ ., data = stackloss) ## 2) Add Design Adaptive Scale estimate m2 <- lmrob..D..fit(m1) print(c(m1$scale, m2$scale)) summary(m1) summary(m2) ## the covariance matrix estimate is also updated } \author{Manuel Koller} \keyword{robust} \keyword{regression} robustbase/man/education.Rd0000644000176200001440000000255413312375575015461 0ustar liggesusers\name{education} \alias{education} \docType{data} \title{Education Expenditure Data} \description{ Education Expenditure Data, from Chatterjee and Price (1977, p.108). This data set, representing the education expenditure variables in the 50 US states, providing an interesting example of heteroscedacity. } \usage{data(education, package="robustbase")} \format{ A data frame with 50 observations on the following 6 variables. \describe{ \item{\code{State}}{State} \item{\code{Region}}{Region (1=Northeastern, 2=North central, 3=Southern, 4=Western)} \item{\code{X1}}{Number of residents per thousand residing in urban areas in 1970} \item{\code{X2}}{Per capita personal income in 1973} \item{\code{X3}}{Number of residents per thousand under 18 years of age in 1974} \item{\code{Y}}{Per capita expenditure on public education in a state, projected for 1975} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.110, table 16. } \examples{ data(education) education.x <- data.matrix(education[, 3:5]) %%covMcd(education.x) %%______ FIXME ___ X1:X3 is not what you mean !! ________ summary(lm.education <- lm(Y ~ Region + X1+X2+X3, data=education)) %%summary(lts.education <- ltsReg(Y .....) ## See example(lmrob.M.S) # for how robust regression is used } \keyword{datasets} robustbase/man/BYlogreg.Rd0000644000176200001440000001102413713564014015201 0ustar liggesusers\name{BYlogreg} \alias{BYlogreg} \title{Bianco-Yohai Estimator for Robust Logistic Regression} \encoding{utf8} \description{ Computation of the estimator of Bianco and Yohai (1996) in logistic regression. Now provides both the \emph{weighted} and regular (unweighted) BY-estimator. By default, an intercept term is included and p parameters are estimated. For more details, see the reference. Note: This function is for \dQuote{back-compatibility} with the \code{BYlogreg()} code web-published at KU Leuven, Belgium, % moved to ? -- but NA (404) on 2017-09-26: % --> ../R/BYlogreg.R % \url{http://feb.kuleuven.be/public/NDBAE06/programs/roblog/}; % now, findable at % http://feb.kuleuven.be/public/u0017833/software_donotuse/logreg/BYlogreg.txt % but rather use Wiley's book resources and also available as file \file{FunctionsRob/BYlogreg.ssc} from \url{https://www.wiley.com/legacy/wileychi/robust_statistics/robust.html}. However instead of using this function, the recommended interface is \code{\link{glmrob}(*, method = "BY")} or \code{... method = "WBY" ..}, see \code{\link{glmrob}}. } \usage{ BYlogreg(x0, y, initwml = TRUE, addIntercept = TRUE, const = 0.5, kmax = 1000, maxhalf = 10, sigma.min = 1e-4, trace.lev = 0) } \arguments{ \item{x0}{a numeric \eqn{n \times (p-1)}{n * (p-1)} matrix containing the explanatory variables.} \item{y}{numeric \eqn{n}-vector of binomial (0 - 1) responses.} \item{initwml}{logical for selecting one of the two possible methods for computing the initial value of the optimization process. If \code{initwml} is true (default), a weighted ML estimator is computed with weights derived from the MCD estimator computed on the explanatory variables. If \code{initwml} is false, a classical ML fit is perfomed. When the explanatory variables contain binary observations, it is recommended to set initwml to FALSE or to modify the code of the algorithm to compute the weights only on the continuous variables. } \item{addIntercept}{logical indicating that a column of \code{1} must be added the \eqn{x} matrix.} \item{const}{tuning constant used in the computation of the estimator (default=0.5).} \item{kmax}{maximum number of iterations before convergence (default=1000).} \item{maxhalf}{max number of step-halving (default=10).} \item{sigma.min}{smallest value of the scale parameter before implosion (and hence non-convergence) is assumed.} \item{trace.lev}{logical (or integer) indicating if intermediate results should be printed; defaults to \code{0} (the same as \code{FALSE}).} } %% \details{ %% If necessary, more details than the description above %% } \value{ a list with components \item{convergence}{logical indicating if convergence was achieved} \item{objective}{the value of the objective function at the minimum} \item{coefficients}{vector of parameter estimates} \item{vcov}{variance-covariance matrix of the coefficients (if convergence is TRUE).} \item{sterror}{standard errors, i.e., simply \code{sqrt(diag(.$vcov))}, if convergence.} } \references{ Croux, C., and Haesbroeck, G. (2003) Implementing the Bianco and Yohai estimator for Logistic Regression, \emph{Computational Statistics and Data Analysis} \bold{44}, 273--295. Ana M. Bianco and Víctor J. Yohai (1996) Robust estimation in the logistic regression model. In Helmut Rieder, \emph{Robust Statistics, Data Analysis, and Computer Intensive Methods}, Lecture Notes in Statistics \bold{109}, pages 17--34. } \author{ Originally, Christophe Croux and Gentiane Haesbroeck, with thanks to Kristel Joossens and Valentin Todorov for improvements. Speedup, tweaks, more \dQuote{control} arguments: Martin Maechler. } \seealso{ The more typical way to compute BY-estimates (via \code{\link{formula}} and methods): \code{\link{glmrob}(*, method = "WBY")} and \code{.. method = "BY"}. } \examples{ set.seed(17) x0 <- matrix(rnorm(100,1)) y <- rbinom(100, size=1, prob= 0.5) # ~= as.numeric(runif(100) > 0.5) BY <- BYlogreg(x0,y) BY <- BYlogreg(x0,y, trace.lev=TRUE) ## The "Vaso Constriction" aka "skin" data: data(vaso) vX <- model.matrix( ~ log(Volume) + log(Rate), data=vaso) vY <- vaso[,"Y"] head(cbind(vX, vY))# 'X' does include the intercept vWBY <- BYlogreg(x0 = vX, y = vY, addIntercept=FALSE) # as 'vX' has it already v.BY <- BYlogreg(x0 = vX, y = vY, addIntercept=FALSE, initwml=FALSE) ## they are relatively close: stopifnot( all.equal(vWBY, v.BY, tolerance = 2e-4) ) } \keyword{robust} \keyword{regression} \keyword{nonlinear} robustbase/man/plot-methods.Rd0000644000176200001440000000360012140441054016076 0ustar liggesusers\name{plot-methods} \docType{methods} \alias{plot-methods} \alias{plot,psi_func-method} \title{Plot an Object of the "Psi Function" Class} \description{ The \code{\link{plot}} method objects of class \code{\linkS4class{psi_func}} simply visualizes the \eqn{\rho()}, \eqn{\psi()}, and weight functions and their derivatives. } \usage{ \S4method{plot}{psi_func}(x, y, which = c("rho", "psi", "Dpsi", "wgt", "Dwgt"), main = "full", % shortMain = FALSE, col = c("black", "red3", "blue3", "dark green", "light green"), leg.loc = "right", ...) } \arguments{ \item{x}{object of class \code{\linkS4class{psi_func}} to be plotted} \item{y}{(optional) vector of abscissa values (to plot object at).} \item{which}{\code{\link{character}} vector of slots to be included in plot; by default, all of the slots are included} \item{main}{string or logical indicating the kind of plot title; either \code{"full"}, \code{"short"} or \code{FALSE} which chooses a full, a short or no main title at all.} %% \item{shortMain}{use short or long plot title}%-------- deprecated! \item{col}{colors to be used for the different slots} \item{leg.loc}{legend placement, see also \code{x} argument of \code{\link{legend}}} \item{...}{passed to \code{\link{matplot}}} } \note{ An earlier version had argument \code{shortMain} which is deprecated now. Use \code{main = "short"} instead of \code{shortMain = TRUE}. If you want to specify your own title, use \code{main=FALSE}, and a subsequent \code{\link{title}(...)} call. } \seealso{ \code{\link{psiFunc}()} and the \code{\link{class}} \code{\linkS4class{psi_func}}. } \examples{ plot(huberPsi) plot(huberPsi, which=c("psi", "Dpsi", "wgt"), main="short", leg = "topleft") plot(hampelPsi) ## Plotting aspect ratio = 1:1 : plot(hampelPsi, asp=1, main="short", which = c("psi", "Dpsi", "wgt", "Dwgt")) } \keyword{methods} robustbase/man/h.alpha.n.Rd0000644000176200001440000000251510614156473015246 0ustar liggesusers\name{h.alpha.n} \alias{h.alpha.n} \title{Compute h, the subsample size for MCD and LTS} \description{ Compute h(alpha) which is the size of the subsamples to be used for MCD and LTS. Given \eqn{\alpha = alpha}{alpha}, \eqn{n} and \eqn{p}, \eqn{h} is an \emph{integer}, \eqn{h \approx \alpha n}{h ~= alpha*n}, where the exact formula also depends on \eqn{p}. For \eqn{\alpha = 1/2}, \code{h == floor(n+p+1)/2}; for the general case, it's simply \code{n2 <- (n+p+1) \%/\% 2; floor(2*n2 - n + 2*(n-n2)*alpha)}. } \usage{ h.alpha.n(alpha, n, p) } \arguments{ \item{alpha}{fraction, numeric (vector) in [0.5, 1], see, e.g., \code{\link{covMcd}}.} \item{n}{integer (valued vector), the sample size.} \item{p}{integer (valued vector), the dimension.} } \value{ numeric vector of \eqn{h(\alpha, n,p)}; when any of the arguments of length greater than one, the usual \R arithmetic (recycling) rules are used. } \seealso{\code{\link{covMcd}} and \code{\link{ltsReg}} which are \emph{defined} by \eqn{h = h(\alpha,n,p)} and hence both use \code{h.alpha.n}. } \examples{ n <- c(10:20,50,100) p <- 5 ## show the simple "alpha = 1/2" case: cbind(n=n, h= h.alpha.n(1/2, n, p), n2p = floor((n+p+1)/2)) ## alpha = 3/4 is recommended by some authors : n <- c(15, 20, 25, 30, 50, 100) cbind(n=n, h= h.alpha.n(3/4, n, p = 6)) } \keyword{arith} robustbase/man/lmrob.S.Rd0000644000176200001440000000663013212743621015007 0ustar liggesusers\name{lmrob.S} \alias{lmrob.S} \title{ S-regression estimators } \description{ Computes an S-estimator for linear regression, using the \dQuote{fast S} algorithm.% of Matias Salibian & Victor Yohai ... } \usage{ lmrob.S(x, y, control, trace.lev = control$trace.lev, only.scale = FALSE, mf = NULL) } \arguments{ \item{x}{design matrix (\eqn{n \times p}{n * p})} \item{y}{numeric vector of responses (or residuals for \code{only.scale=TRUE}).} \item{control}{ list as returned by \code{\link{lmrob.control}} } \item{trace.lev}{integer indicating if the progress of the algorithm should be traced (increasingly); default \code{trace.lev = 0} does no tracing.} \item{only.scale}{\code{\link{logical}} indicating if only the scale of \code{y} should be computed. In this case, \code{y} will typically contain \emph{residuals}.}%% FIXME: explain %% namely, s, fulfilling the \sum_i chi(i) = 1/2 equation. \item{mf}{unused and deprecated.} } \details{ This function is used by \code{\link{lmrob.fit}} and typically not to be used on its own (because an S-estimator has too low efficiency \sQuote{on its own}). By default, the subsampling algorithm uses a customized LU decomposition which ensures a non singular subsample (if this is at all possible). This makes the Fast-S algorithm also feasible for categorical and mixed continuous-categorical data. One can revert to the old subsampling scheme by setting the parameter \code{subsampling} in \code{control} to \code{"simple"}. } \value{ By default (when \code{only.scale} is false), a list with components \item{coefficients}{numeric vector (length \eqn{p}) of S-regression coefficient estimates.} \item{scale}{the S-scale residual estimate}% 'residual estimate' ?? % resid. VAR !? % \item{cov}{covariance matrix (\eqn{p \times p}{p x p}) of the % coefficient estimates.} \item{fitted.values}{numeric vector (length \eqn{n}) of the fitted values.} \item{residuals}{numeric vector (length \eqn{n}) of the residuals.} \item{rweights}{numeric vector (length \eqn{n}) of the robustness weights.} \item{k.iter}{(maximal) number of refinement iterations used.} \item{converged}{logical indicating if \bold{all} refinement iterations had converged.} \item{control}{the same list as the \code{control} argument.} If \code{only.scale} is true, the computed scale (a number) is returned. } \seealso{\code{\link{lmrob}}, also for references. } \author{ Matias Salibian-Barrera and Manuel Koller; Martin Maechler for minor new options and more documentation. } \examples{ set.seed(33) x1 <- sort(rnorm(30)); x2 <- sort(rnorm(30)); x3 <- sort(rnorm(30)) X. <- cbind(x1, x2, x3) y <- 10 + X. \%*\% (10*(2:4)) + rnorm(30)/10 y[1] <- 500 # a moderate outlier X.[2,1] <- 20 # an X outlier X1 <- cbind(1, X.) (m.lm <- lm(y ~ X.)) set.seed(12) m.lmS <- lmrob.S(x=X1, y=y, control = lmrob.control(nRes = 20), trace.lev=1) m.lmS[c("coefficients","scale")] all.equal(unname(m.lmS$coef), 10 * (1:4), tolerance = 0.005) stopifnot(all.equal(unname(m.lmS$coef), 10 * (1:4), tolerance = 0.005), all.equal(m.lmS$scale, 1/10, tolerance = 0.09)) ## only.scale = TRUE: Compute the S scale, given residuals; s.lmS <- lmrob.S(X1, y=residuals(m.lmS), only.scale = TRUE, control = lmrob.control(trace.lev = 3)) all.equal(s.lmS, m.lmS$scale) # close: 1.89e-6 [64b Lnx] } \keyword{robust} \keyword{regression} robustbase/man/los.Rd0000644000176200001440000000302513312375575014275 0ustar liggesusers\name{los} \alias{los} \docType{data} \title{ Length of Stay Data } \description{ Length of stay for 201 patients that stayed at the University Hospital of Lausanne during the year 2000. } \usage{data(los, package="robustbase")} \format{ Vector of integer values giving the length of stay (days): int [1:201] 16 13 17 4 15 24 59 18 33 8 ... } \details{ These data may be used to estimate and predict the total resource consumption of this group of patients. Cf. Ruffieux, Paccaud and Marazzi (2000). } \source{ The data were kindly provided by A. Marazzi. Cf. Hubert, M. and Vandervieren, E. (2006), p. 13--15. } \references{ Ruffieux, C., Paccaud, F. and A. Marazzi (2000) Comparing rules for truncating hospital length of stay; \emph{Casemix Quarterly} \bold{2}, n. 1. See also those for \code{\link{adjbox}}. } \examples{ summary(los) # quite skewed, with median(.) = 8 plot(table(los)) boxplot(los, horizontal=TRUE, add=TRUE, col = "red", axes=FALSE) ##-> "outliers" instead of "just skewed" hist(log(los)) boxplot(log(los), add=TRUE, col=2, border=2, horizontal = TRUE, at = -1) ## Hubert and Vandervieren (2006), p. 15, Fig. 11. adjbox(los, col = "gray", staplecol="red", outcol = "red", main = "(Skewness-)Adjusted and original boxplot for 'los' data") boxplot(los, add = TRUE, staplewex= 0.2, outcex= 0.5, outpch= 4, staplecol = "blue", outcol = "blue", staplelwd=2) legend("topright", c("adjbox(los)", "boxplot(los)"), col=c("red","blue"), lwd = 1:2, bty="n") } \keyword{datasets} robustbase/man/telef.Rd0000644000176200001440000000135513312375575014603 0ustar liggesusers\name{telef} \alias{telef} \docType{data} \title{Number of International Calls from Belgium} \description{ Number of international calls from Belgium, taken from the Belgian Statistical Survey, published by the Ministry of Economy. } \usage{data(telef, package="robustbase")} \format{ A data frame with 24 observations on the following 2 variables. \describe{ \item{\code{Calls}}{Number of Calls (in tens of millions) } \item{\code{Year}}{Year (1950 - 1973)} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, page 26, table 2. } \examples{ data(telef) summary(lm.telef <- lm(Year~., data=telef)) %%summary(lts.telef <- ltsReg(Year~., data=telef)) } \keyword{datasets} robustbase/man/cloud.Rd0000644000176200001440000000142513312375575014610 0ustar liggesusers\name{cloud} \alias{cloud} \docType{data} \title{Cloud point of a Liquid} \description{ This data set contains the measurements concerning the cloud point of a Liquid, from Draper and Smith (1969). The cloud point is a measure of the degree of crystallization in a stock. } \usage{data(cloud, package="robustbase")} \format{ A data frame with 19 observations on the following 2 variables. \describe{ \item{\code{Percentage}}{Percentage of I-8} \item{\code{CloudPoint}}{Cloud point} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, p.96, table 10. } \examples{ data(cloud) summary(lm.cloud <- lm(CloudPoint ~., data=cloud)) %% summary(lts.cloud <- ltsReg(CloudPoint ~., data=cloud)) } \keyword{datasets} robustbase/man/aircraft.Rd0000644000176200001440000000202713312375575015274 0ustar liggesusers\name{aircraft} \alias{aircraft} \docType{data} \title{Aircraft Data} \description{ Aircraft Data, deals with 23 single-engine aircraft built over the years 1947-1979, from Office of Naval Research. The dependent variable is cost (in units of \$100,000) and the explanatory variables are aspect ratio, lift-to-drag ratio, weight of plane (in pounds) and maximal thrust. } \usage{data(aircraft, package="robustbase")} \format{ A data frame with 23 observations on the following 5 variables. \describe{ \item{\code{X1}}{Aspect Ratio} \item{\code{X2}}{Lift-to-Drag Ratio} \item{\code{X3}}{Weight} \item{\code{X4}}{Thrust} \item{\code{Y}}{Cost} } } \source{ P. J. Rousseeuw and A. M. Leroy (1987) \emph{Robust Regression and Outlier Detection}; Wiley, page 154, table 22. } \examples{ data(aircraft) summary( lm.airc <- lm(Y ~ ., data = aircraft)) summary(rlm.airc <- MASS::rlm(Y ~ ., data = aircraft)) aircraft.x <- data.matrix(aircraft[,1:4]) c_air <- covMcd(aircraft.x) c_air } \keyword{datasets} robustbase/man/mc.Rd0000644000176200001440000001103613762451710014073 0ustar liggesusers\name{mc} \alias{mc} \title{Medcouple, a Robust Measure of Skewness} \description{ Compute the \sQuote{medcouple}, a \emph{robust} concept and estimator of skewness. The medcouple is defined as a scaled median difference of the left and right half of distribution, and hence \emph{not} based on the third moment as the classical skewness. } \usage{ mc(x, na.rm = FALSE, doReflect = (length(x) <= 100), doScale = TRUE, # <- chg default to 'FALSE' ? eps1 = 1e-14, eps2 = 1e-15, # << new in 0.93-2 (2018-07..) maxit = 100, trace.lev = 0, full.result = FALSE) } \arguments{ \item{x}{a numeric vector} \item{na.rm}{logical indicating how missing values (\code{\link{NA}}s) should be dealt with.} \item{doReflect}{logical indicating if the internal MC should also be computed on the \emph{reflected} sample \code{-x}, with final result \code{(mc.(x) - mc.(-x))/2}. This makes sense since the internal MC, \code{mc.()} computes the himedian() which can differ slightly from the median.}%% only whenever sum(x <= med) * sum(x >= med) is even \item{doScale}{logical indicating if the internal algorithm should also \emph{scale} the data (using the most distant value from the median which is unrobust and numerically dangerous); scaling has been the hardwired default in the original algorithm and in \R's \code{mc()} till summer 2018.} \item{eps1, eps2}{tolerance in the algorithm; \code{eps1} is used as a for convergence tolerance, where \code{eps2} is only used in the internal \code{h_kern()} function to prevent underflow to zero, so could be considerably smaller. The original code implicitly \emph{hard coded} in C \code{eps1 := eps2 := 1e-13}; only change with care!} \item{maxit}{maximal number of iterations; typically a few should be sufficient.} \item{trace.lev}{integer specifying how much diagnostic output the algorithm (in C) should produce. No output by default, most output for \code{trace.lev = 5}.} \item{full.result}{logical indicating if the full return values (from C) should be returned as a list via \code{attr(*, "mcComp")}.} } % \details{ % ~~ If necessary, more details than the description above ~~ % } \value{ a number between -1 and 1, which is the medcouple, \eqn{MC(x)}. For \code{r <- mc(x, full.result = TRUE, ....)}, then \code{attr(r, "mcComp")} is a list with components \item{medc}{the medcouple \eqn{mc.(x)}.} \item{medc2}{the medcouple \eqn{mc.(-x)} if \code{doReflect=TRUE}.} \item{eps}{tolerances used.} \item{iter,iter2}{number of iterations used.} \item{converged,converged2}{logical specifying \dQuote{convergence}.} } \section{Convergence Problems}{ For extreme cases there \emph{are} convergence problems. Some of them can be alleviated by \dQuote{loosening} the tolerances \code{eps1} and \code{eps2}. \cr For others, with pecular values, notably many almost-ties with the median, it can help extremely to replace \code{mc(x, *)} by \code{mc(jitter(x), *)}, % MM: see also ~/R/MM/Pkg-ex/robustbase/Robnik-mc.R or also just \code{mc(signif(x), *)}, % Also, the algorithm not only centers the data around the median but also scales them by the extremes which may have a negative effect e.g., when changing an extreme outlier to even more extreme, the result changes wrongly; see the 'mc10x' example. } \references{ Guy Brys, Mia Hubert and Anja Struyf (2004) A Robust Measure of Skewness; \emph{JCGS} \bold{13} (4), 996--1017. Hubert, M. and Vandervieren, E. (2008). An adjusted boxplot for skewed distributions, \emph{Computational Statistics and Data Analysis} \bold{52}, 5186--5201. } \author{Guy Brys; modifications by Tobias Verbeke and bug fixes and extensions by Manuel Koller and Martin Maechler. } \seealso{\code{\link{Qn}} for a robust measure of scale (aka \dQuote{dispersion}), .... } \examples{ mc(1:5) # 0 for a symmetric sample x1 <- c(1, 2, 7, 9, 10) mc(x1) # = -1/3 data(cushny) mc(cushny) # 0.125 stopifnot(mc(c(-20, -5, -2:2, 5, 20)) == 0, mc(x1, doReflect=FALSE) == -mc(-x1, doReflect=FALSE), all.equal(mc(x1, doReflect=FALSE), -1/3, tolerance = 1e-12)) ## Susceptibility of the current algorithm to large outliers : dX10 <- function(X) c(1:5,7,10,15,25, X) # generate skewed size-10 with 'X' x <- c(10,20,30, 100^(1:20)) (mc10x <- vapply(x, function(X) mc(dX10(X)), 1)) ## limit X -> Inf should be 7/12 = 0.58333... but that "breaks down a bit" : plot(x, mc10x, type="b", main = "mc( c(1:5,7,10,15,25, X) )", xlab="X", log="x") } \keyword{robust} \keyword{univar} robustbase/man/vaso.Rd0000644000176200001440000000420213312375575014446 0ustar liggesusers\name{vaso} \alias{vaso} \docType{data} \title{Vaso Constriction Skin Data Set} \description{ Finney's data on vaso constriction in the skin of the digits. } \usage{data(vaso, package="robustbase")} \format{ A data frame with 39 observations on the following 3 variables. \describe{ \item{\code{Volume}}{Inhaled volume of air} \item{\code{Rate}}{Rate of inhalation} \item{\code{Y}}{vector of 0 or 1 values.} } } \details{The data taken from Finney (1947) were obtained in a carefully controlled study in human physiology where a reflex \dQuote{vaso constriction} may occur in the skin of the digits after taking a single deep breath. The response y is the occurence (y = 1) or non-occurence (y = 0) of vaso constriction in the skin of the digits of a subject after he or she inhaled a certain volume of air at a certain rate. The responses of three subjects are available. The first contributed 9 responses, the second contributed 8 responses, and the third contributed 22 responses. Although the data represent repeated measurements, an analysis that assumes independent observations may be applied, as claimed by Pregibon (1981). } \source{ Finney, D.J. (1947) The estimation from individual records of the relationship between dose and quantal response. \emph{Biometrika} \bold{34}, 320--334 } \references{ Atkinson, A.C. and Riani, M. (2000) \emph{Robust Diagnostic Regression Analysis}, First Edition. New York: Springer, Table A.23. Fahrmeir, L. and Tutz, G. (2001) \emph{Multivariate Statistical Modelling Based on Generalized Linear Models}, Springer, Table 4.2. Kuensch, H.R., Stefanski, A. and Carrol, R.J. (1989) Conditionally unbiased bounded influence estimation in general regression models, with applications to generalized linear models, \emph{JASA} \bold{84}, 460--466. Pregibon, D. (1981) Logistic regression diagnostics, \emph{Annals of Statistics} \bold{9}, 705--724. } \examples{ data(vaso) str(vaso) pairs(vaso) glmV <- glm(Y ~ log(Volume) + log(Rate), family=binomial, data=vaso) summary(glmV) ## --> example(glmrob) showing classical & robust GLM } \keyword{datasets} robustbase/man/glmrob.Rd0000644000176200001440000003276513713564014014770 0ustar liggesusers\name{glmrob} \alias{glmrob} \title{Robust Fitting of Generalized Linear Models} \encoding{utf8} \description{ \code{glmrob} is used to fit generalized linear models by robust methods. The models are specified by giving a symbolic description of the linear predictor and a description of the error distribution. Currently, robust methods are implemented for \code{\link{family} = binomial}, \code{ = poisson}, \code{ = Gamma} and \code{ = gaussian}. } \usage{ glmrob(formula, family, data, weights, subset, na.action, start = NULL, offset, method = c("Mqle", "BY", "WBY", "MT"), weights.on.x = c("none", "hat", "robCov", "covMcd"), control = NULL, model = TRUE, x = FALSE, y = TRUE, contrasts = NULL, trace.lev = 0, ...) } \arguments{ \item{formula}{a \code{\link{formula}}, i.e., a symbolic description of the model to be fit (cf. \code{\link{glm}} or \code{\link{lm}}).} \item{family}{a description of the error distribution and link function to be used in the model. This can be a character string naming a family function, a family \code{\link{function}} or the result of a call to a family function. (See \code{\link{family}} for details of family functions.)} \item{data}{an optional data frame containing the variables in the model. If not found in \code{data}, the variables are taken from \code{environment(formula)}, typically the environment from which \code{glmrob} is called.} \item{weights}{an optional vector of weights to be used in the fitting process.} \item{subset}{an optional vector specifying a subset of observations to be used in the fitting process.} \item{na.action}{a function which indicates what should happen when the data contain \code{NA}s. The default is set by the \code{na.action} setting in \code{\link{options}}. The \dQuote{factory-fresh} default is \code{\link{na.omit}}.} \item{start}{starting values for the parameters in the linear predictor. Note that specifying \code{start} has somewhat different meaning for the different \code{method}s. Notably, for \code{"MT"}, this skips the expensive computation of initial estimates via sub samples, but needs to be \emph{robust} itself.} \item{offset}{this can be used to specify an \emph{a priori} known component to be included in the linear predictor during fitting.} \item{method}{a character string specifying the robust fitting method. The details of method specification are given below.} \item{weights.on.x}{ a character string (can be abbreviated), a \code{\link{function}} or \code{\link{list}} (see below), or a numeric vector of length \code{n}, specifying how points (potential outliers) in x-space are downweighted. If \code{"hat"}, weights on the design of the form \eqn{\sqrt{1-h_{ii}}} are used, where \eqn{h_{ii}} are the diagonal elements of the hat matrix. If \code{"robCov"}, weights based on the robust Mahalanobis distance of the design matrix (intercept excluded) are used where the covariance matrix and the centre is estimated by \code{\link[MASS]{cov.rob}} from the package \CRANpkg{MASS}.\cr Similarly, if \code{"covMcd"}, robust weights are computed using \code{\link{covMcd}}. The default is \code{"none"}. If \code{weights.on.x} is a \code{\link{function}}, it is called with arguments \code{(X, intercept)} and must return an n-vector of non-negative weights. If it is a \code{\link{list}}, it must be of length one, and as element contain a function much like \code{\link{covMcd}()} or \code{\link[MASS]{cov.rob}()} (package \CRANpkg{MASS}), which computes multivariate location and \dQuote{scatter} of a data matrix \code{X}. } \item{control}{a list of parameters for controlling the fitting process. See the documentation for \code{\link{glmrobMqle.control}} for details.} \item{model}{a logical value indicating whether \emph{model frame} should be included as a component of the returned value.} \item{x, y}{logical values indicating whether the response vector and model matrix used in the fitting process should be returned as components of the returned value.} \item{contrasts}{an optional list. See the \code{contrasts.arg} of \code{model.matrix.default}.} \item{trace.lev}{logical (or integer) indicating if intermediate results should be printed; defaults to \code{0} (the same as \code{FALSE}).} \item{\dots}{arguments passed to \code{\link{glmrobMqle.control}} when \code{control} is \code{NULL} (as per default).} } \details{ \code{method="model.frame"} returns the \code{\link{model.frame}()}, the same as \code{\link{glm}()}. \cr \code{method="Mqle"} fits a generalized linear model using Mallows or Huber type robust estimators, as described in Cantoni and Ronchetti (2001) and Cantoni and Ronchetti (2006). In contrast to the implementation described in Cantoni (2004), the pure influence algorithm is implemented. \cr \code{method="WBY"} and \code{method="BY"}, available for logistic regression (\code{family = binomial}) only, call \code{\link{BYlogreg}(*, initwml= . )} for the (weighted) Bianco-Yohai estimator, where \code{initwml} is true for \code{"WBY"}, and false for \code{"BY"}. \cr \code{method="MT"}, currently only implemented for \code{family = poisson}, computes an \dQuote{[M]-Estimator based on [T]ransformation}, by Valdora and Yohai (2013), via (hidden internal) \code{glmrobMT()}; as that uses \code{\link{sample}()}, from \R version 3.6.0 it depends on \code{\link{RNGkind}(*, sample.kind)}. Exact reproducibility of results from \R versions 3.5.3 and earlier, requires setting \code{\link{RNGversion}("3.5.0")}.% same text in ./adjOutlyingness.Rd \code{weights.on.x= "robCov"} makes sense if all explanatory variables are continuous. In the cases,where \code{weights.on.x} is \code{"covMcd"} or \code{"robCov"}, or list with a \dQuote{robCov} function, the mahalanobis distances \code{D^2} are computed with respect to the covariance (location and scatter) estimate, and the weights are \code{1/sqrt(1+ pmax.int(0, 8*(D2 - p)/sqrt(2*p)))}, where \code{D2 = D^2} and \code{p = ncol(X)}. } \value{ \code{glmrob} returns an object of class \code{"glmrob"} and is also inheriting from \code{\link{glm}}. \cr The \code{\link{summary}} method, see \code{\link{summary.glmrob}}, can be used to obtain or print a summary of the results. \cr The generic accessor functions \code{\link{coefficients}}, \code{effects}, \code{fitted.values} and \code{residuals} (see \code{\link{residuals.glmrob}}) can be used to extract various useful features of the value returned by \code{glmrob()}. An object of class \code{"glmrob"} is a list with at least the following components: \item{coefficients}{a named vector of coefficients} \item{residuals}{the \emph{working} residuals, that is the (robustly \dQuote{huberized}) residuals in the final iteration of the IWLS fit.} \item{fitted.values}{the fitted mean values, obtained by transforming the linear predictors by the inverse of the link function.} \item{w.r}{robustness weights for each observations; i.e., \code{residuals} \eqn{\times}{*} \code{w.r} equals the psi-function of the Preason's residuals.} \item{w.x}{weights used to down-weight observations based on the position of the observation in the design space.} \item{dispersion}{robust estimation of dispersion paramter if appropriate} \item{cov}{the estimated asymptotic covariance matrix of the estimated coefficients.} \item{tcc}{the tuning constant c in Huber's psi-function.} \item{family}{the \code{\link{family}} object used.} \item{linear.predictors}{the linear fit on link scale.} \item{deviance}{NULL; Exists because of compatipility reasons.} \item{iter}{the number of iterations used by the influence algorithm.} \item{converged}{logical. Was the IWLS algorithm judged to have converged?} \item{call}{the matched call.} \item{formula}{the formula supplied.} \item{terms}{the \code{\link{terms}} object used.} \item{data}{the \code{data argument}.} \item{offset}{the offset vector used.} \item{control}{the value of the \code{control} argument used.} \item{method}{the name of the robust fitter function used.} \item{contrasts}{(where relevant) the contrasts used.} \item{xlevels}{(where relevant) a record of the levels of the factors used in fitting.} %% FIXME: This is for glm() -- but *not* (yet ??) for glmrob() %% ----- should we change? % If a \code{\link{binomial}} \code{glm} model was specified by giving a % two-column response, the weights returned by \code{prior.weights} are % the total numbers of cases (multipied by the supplied case weights) and % the component \code{y} of the result is the proportion of successes. } \references{ Eva Cantoni and Elvezio Ronchetti (2001) Robust Inference for Generalized Linear Models. \emph{JASA} \bold{96} (455), 1022--1030. Eva Cantoni (2004) Analysis of Robust Quasi-deviances for Generalized Linear Models. \emph{Journal of Statistical Software}, \bold{10}, \url{https://www.jstatsoft.org/article/view/v010i04} Eva Cantoni and Elvezio Ronchetti (2006) A robust approach for skewed and heavy-tailed outcomes in the analysis of health care expenditures. \emph{Journal of Health Economics} \bold{25}, 198--213. S. Heritier, E. Cantoni, S. Copt, M.-P. Victoria-Feser (2009) \emph{Robust Methods in Biostatistics}. Wiley Series in Probability and Statistics. Marina Valdora and Víctor J. Yohai (2013) Robust estimators for Generalized Linear Models. In progress. } \author{Andreas Ruckstuhl ("Mqle") and Martin Maechler} %%\note{ } \seealso{ \code{\link{predict.glmrob}} for prediction; \code{\link{glmrobMqle.control}} } \examples{ ## Binomial response -------------- data(carrots) Cfit1 <- glm(cbind(success, total-success) ~ logdose + block, data = carrots, family = binomial) summary(Cfit1) Rfit1 <- glmrob(cbind(success, total-success) ~ logdose + block, family = binomial, data = carrots, method= "Mqle", control= glmrobMqle.control(tcc=1.2)) summary(Rfit1) Rfit2 <- glmrob(success/total ~ logdose + block, weights = total, family = binomial, data = carrots, method= "Mqle", control= glmrobMqle.control(tcc=1.2)) coef(Rfit2) ## The same as Rfit1 ## Binary response -------------- data(vaso) Vfit1 <- glm(Y ~ log(Volume) + log(Rate), family=binomial, data=vaso) coef(Vfit1) Vfit2 <- glmrob(Y ~ log(Volume) + log(Rate), family=binomial, data=vaso, method="Mqle", control = glmrobMqle.control(tcc=3.5)) coef(Vfit2) # c = 3.5 ==> not much different from classical ## Note the problems with tcc <= 3 %% FIXME algorithm ??? Vfit3 <- glmrob(Y ~ log(Volume) + log(Rate), family=binomial, data=vaso, method= "BY") coef(Vfit3)## note that results differ much. ## That's not unreasonable however, see Kuensch et al.(1989), p.465 ## Poisson response -------------- data(epilepsy) Efit1 <- glm(Ysum ~ Age10 + Base4*Trt, family=poisson, data=epilepsy) summary(Efit1) Efit2 <- glmrob(Ysum ~ Age10 + Base4*Trt, family = poisson, data = epilepsy, method= "Mqle", control = glmrobMqle.control(tcc= 1.2)) summary(Efit2) ## 'x' weighting: (Efit3 <- glmrob(Ysum ~ Age10 + Base4*Trt, family = poisson, data = epilepsy, method= "Mqle", weights.on.x = "hat", control = glmrobMqle.control(tcc= 1.2))) try( # gives singular cov matrix: 'Trt' is binary factor --> # affine equivariance and subsampling are problematic Efit4 <- glmrob(Ysum ~ Age10 + Base4*Trt, family = poisson, data = epilepsy, method= "Mqle", weights.on.x = "covMcd", control = glmrobMqle.control(tcc=1.2, maxit=100)) ) ##--> See example(possumDiv) for another Poisson-regression ### -------- Gamma family -- data from example(glm) --- clotting <- data.frame( u = c(5,10,15,20,30,40,60,80,100), lot1 = c(118,58,42,35,27,25,21,19,18), lot2 = c(69,35,26,21,18,16,13,12,12)) summary(cl <- glm (lot1 ~ log(u), data=clotting, family=Gamma)) summary(ro <- glmrob(lot1 ~ log(u), data=clotting, family=Gamma)) clotM5.high <- within(clotting, { lot1[5] <- 60 }) op <- par(mfrow=2:1, mgp = c(1.6, 0.8, 0), mar = c(3,3:1)) plot( lot1 ~ log(u), data=clotM5.high) plot(1/lot1 ~ log(u), data=clotM5.high) par(op) ## Obviously, there the first observation is an outlier with respect to both ## representations! cl5.high <- glm (lot1 ~ log(u), data=clotM5.high, family=Gamma) ro5.high <- glmrob(lot1 ~ log(u), data=clotM5.high, family=Gamma) with(ro5.high, cbind(w.x, w.r))## the 5th obs. is downweighted heavily! plot(1/lot1 ~ log(u), data=clotM5.high) abline(cl5.high, lty=2, col="red") abline(ro5.high, lwd=2, col="blue") ## result is ok (but not "perfect") %% FIXME: Need work -- option of *starting* from %% ----- see Andreas' ~/R/MM/Pkg-ex/robustbase/glmrob-gamma-ARu.R % ## a "regular outlier" in the middle : % clotM4.3 <- within(clotting, { lot1[4] <- 1000 }) % ## .. not even this one works : ... need *robust* start ?! % try(cl4.3 <- glm (lot1 ~ log(u), data=clotM4.3, family=Gamma)) % try(ro4.3 <- glmrob(lot1 ~ log(u), data=clotM4.3, family=Gamma)) % ## The new option to start from "lmrobMM" --- not yet ok either % try( % ro4.3 <- glmrob(lot1 ~ log(u), data=clotM4.3, family=Gamma, % start = "lmrobMM") % ) % ## summary(ro4.3) %% TODO the "same" with lot2 : %% summary(glm(lot2 ~ log(u), data=clotting, family=Gamma)) } \keyword{robust} \keyword{regression} \keyword{nonlinear} robustbase/man/exAM.Rd0000644000176200001440000000202213312375575014326 0ustar liggesusers\name{exAM} \alias{exAM} \docType{data} \title{Example Data of Antille and May - for Simple Regression} \description{ This is an artificial data set, cleverly construced and used by Antille and May to demonstrate \sQuote{problems} with LMS and LTS. } \usage{data(exAM, package="robustbase")} \format{ A data frame with 12 observations on 2 variables, \code{x} and \code{y}. } \details{ Because the points are not in general position, both LMS and LTS typically \emph{fail}; however, e.g., \code{\link[MASS]{rlm}(*, method="MM")} \dQuote{works}. } \source{ Antille, G. and El May, H. (1992) The use of slices in the LMS and the method of density slices: Foundation and comparison.\cr In Yadolah Dodge and Joe Whittaker, editors, \emph{COMPSTAT: Proc. 10th Symp. Computat. Statist., Neuchatel}, \bold{1}, 441--445; Physica-Verlag. } % \references{ % ~~ possibly secondary sources and usages ~~ % } \examples{ data(exAM) plot(exAM) summary(ls <- lm(y ~ x, data=exAM)) abline(ls) } \keyword{robust} \keyword{datasets} robustbase/TODO0000644000176200001440000003167014123440375013125 0ustar liggesusers -*- org -*- C-c C-o follows link[MM: grep-r -e '\(FIXME\|TODO\)'] * Before next release ** DONE print() and print(summary()): 'method' almost twice; show iBest for "deterministic" ** DONE covMcd(): allow 'hsets.ini' argument to covMcd(), and *return* them (optionally) as 'Hsubsets' ** DONE export rankMM, classSVD, .signflip [MM: repmat should not be needed] ** DONE 'scalefn' argument for covMcd() to be used for "detMCD" ** DONE adjOutlyingness(): *** DONE change defaults for clower & cupper (E-mail from P.Segaert). *** DONE But there is more: +/- swap ==> results not back compatible ** DONE colMedians() -> ask Henrik/ about "License: Artistic-2.0" ** DONE splitFrame() [important for lmrob(.. method = "M-S") --> lmrob.M.S()]: *character* should be treated *as* factors ** TODO nlrob() *** TODO summary(nlrob(*)) fails for new methods; better error message or *work* **** TODO for "MM" we are close to done; ideally want '$ rweights' (robustness weights) for all meth *** TODO residuals( nlrob(), type = "...") should provide types "as in the literature" *** DONE nlrob(*, method = "...") should call methods "tau", "CM", "MTL", "MM" by Eduardo Conceicao **** DONE shouldn't we rename jde() to jdeopt() or even jdeoptim(), jDEoptim(), or JDEoptim() R users already know optim() etc.. so the name seems more logical for them. * Short Term ** DONE sc.1) Qn(), Sn() should work with 'NA' (via 'na.rm=FALSE' argument, as mad()) ** DONE sc.2) Qn(), Sn() should work with Inf and "large x", see example(mc) ** TODO sc.2) mc() should work better with "large x", see example(mc) ** TODO Try scaleTau2(*, iterate=TRUE) or 'iter=TRUE' (which can have 'iter = 10' etc) ** TODO [Peter Filzmoser, Geneva 2016-07-07 talk]: covMcd() warns when n < 2*p .. should not *warn* but give message() ** TODO [Peter Filzmoser, Geneva 2016-07-07 talk]: solve.default(getCov(mcd)) error with CovControlOgk() init *** INSTEAD it should report the (theoretical) breakdown point (p ...) / (n - h ... ) [from the MCD theory] ** TODO mc(x) can *fail* to converge: workaround: jitter() -> ~/R/MM/Pkg-ex/robust/Robnik-mc.R ** TODO estimethod(): also for lmrob() and glmrob() models ** TODO VT implement .detMcd() in C ** TODO r6pack(milk, ..): *return* singularity message but do not signal an error ** TODO nlrob(*): *** TODO for the "vector parameter" biomass example in tests/nlrob-tst.R: method = "MM" As we do want the formula to work ==> we *must* allow 'lower' & 'upper' as list()s in R/nlregrob.R, have 14 matches for "eval *( *formula\[\[3L?" ((and *org* shows the `[[3L].]` (no ".") as underscored 3L)) : 123: y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 127: y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 141: y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 175: res <- y - eval( formula[[3L]], c(data, initial$par) ) 193: fit <- eval( formula[[3L]], c(data, coef) ) 254: fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 300: fit <- eval( formula[[3L]], c(data, coef) ) 355: fit <- eval( formula[[3L]], c(data, par) ) 361: fit <- eval( formula[[3L]], c(data, par) ) 366: fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 390: fit <- eval( formula[[3L]], c(data, coef) ) 434: fit <- eval( formula[[3L]], c(data, par) ) 442: fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) 468: fit <- eval( formula[[3L]], c(data, coef) ) the same as in R/nlrob.R where we had eval(.., c(data, coef)) but now eval(.., c(data, start)) *** TODO nlrob(*, method=.) -- try at least *one* other optimizer than JDEoptim(), since we provide already most of the needed "hooks". *** DONE nlrob(*, method="M"): allow a "fixed initial sigma" (--> inference for "MM") *** TODO confint.nlrob():, "Wald" works; naming follows confint.lmer() from lme4 **** TODO method = "profile" based on (TODO) profile.nlrob() **** DONE method = "Wald" works ** TODO simulate() : implement for "nlrob" {and lmrob() ? and ..} ** glmrob *** BYlogreg() [ R/BYlogreg.R ] --> more tests in ./tests/glmrob-1.R --> glm.fit() instead of glm() --> vcov() instead of just std.err. {is already there} *** glmrob(*, weights.on.x = "robCov") uses MASS::cov.rob(), i.e. "MVE" and Andreas had a comment that "mcd" is worse. "covMcd" has been available for a while; now via robXweights() in ./R/glmrobMqle.R HOWEVER: Need something better when 'X' has (binary!) factors! "hat" +- works, but needs more work *** We now allow weights.on.x to be an arbitrary general wts(X, intercept) function _or_ a list containing a robMcd()-like function. Definitely need *testing* this situation! *** glmrob(): anova() has three variants: "V1", "Eva1", "Andreas1" --> ./R/glmrobMqle-DQD.R - gives warning every time {-> easy to fix} - Default is "V1" is that a good idea? *** glmrob() needs a bit more tests in ./tests/ [also consider those from man/glmrob.Rd] take those from Martin's old 'robGLM1' package (need more!) *** --> first test already shows that Martin's tests for "huberC == Inf" were *not* yet moved from robGLM1 to glmrob()... (in other words: glmrob() should work *** also, ni = 0 does not work quite as it should ( ./tests/binom-ni-small.R ) *** obj $ df ... maybe should be defined -- for "glm" methods to be applicable --> e.g. for predict(, interval="..") ! *** summary.glmrob() should be better documented; we should decide if the current return value is fine. *** Eva's code (and MM's) also computed & returned the "asymptotic efficiency"! *** anova.glmrob(): More modularization, allowing to provide own 'test' function. Test if Huber's C are different. Need theory to compare different C's and same model (which includes classical vs robust). *** add1() and/or drop1() would be nice ** TODO scaleTau2(): Also do a cheap finite-sample correction [MM] ! [DONE partly; but undocumented, since bound to change --> file:~/R/MM/STATISTICS/robust/1d-scale.R , 1d-scale-sim.R, etc --- unfinished!! ** TODO Psi/Rho/Chi/Wgt Functions We have quite a few "partial" collections of rho/psi functions; some are "sync"ed now, some not yet:: *** TODO 1) have the nice S4 class psi_func + psiFunc() and .defDwgt() functions in file:R/psi-rho-funs.R with further explorations, ideas in file:misc/experi-psi-rho-funs.R **** TODO print/show of such psi_func should show more; at least the psi function **** TODO str.psi_func() should be a bit nicer than the current default str() **** TODO nlrob(): also allow psi to be a 'psiFunc': --> ./R/nlrob.R ; consider even more *real* checks; now in tests/nlrob-tst.R *** DONE 2) deprecated: "old" tukeyChi() & tukeyPsi1() originally called from lmrob() , in ./R/biweight-funs.R *** DONE 3) psi.*(...., rho = FALSE/TRUE) functions from Andreas ([[file:.R/psi-funs-AR.R]]) replaced by using the new psi_func objects **** DONE nlrob() changed: uses psi = .Mwgt.psi1("huber", cc=1.345) as default *** TODO 4) have (C-based) functions Mpsi(), Mchi(), Mwgt(), etc, used from lmrob(), in ./R/lmrob.MM.R **** TODO provide Mpsi(psi = "t") etc; tuning parameter: 'nu' => MLE-t_\nu psi and psi' **** TODO provide '1)'-i.e. psi_func versions of the Mpsi() etc **** TODO Mpsi(*, "GGW") etc : have no (??) easy way to directly specify (a,b,c) tuning pars **** TODO *New* Mwgt(*, deriv=1) would correspond to Dwgt in psiFunc() which Manuel needs **** DONE now exported and documented in man/M.psi.Rd Further files, illustrating features, differences, etc: ./vignettes/psi_functions.Rnw -- with quite a few FIXME ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ./inst/xtraR/plot-psiFun.R chkPsiDeriv() {and plot utils} ./tests/psi-rho-etc.R compute asymp.efficiency and breakdown point ! ./tests/lmrob-psifns.R plot and lmrob()-test them **** DONE Deprecate* the "2)", tukeyChi() etc, making them call the M.*fun(): * Mid Term ** TODO R/lmrob.MM.R: Using lmrob.E( * ) *** TODO: numerically clearly better than these -- e.g. in summary.lmrob(): use integrate(), or *** ---- allow to specify 'use.integrate=TRUE, tol = 1e-10' etc for *accurate* cor.factors *** "hampel", "bisquare", "lqq" (polyn!): derive *exact* formula; others maybe, too <==> psi_func objects above? ** TODO New lmrob() features ("M-S" as option?): *** Function names with "." (which are exported) are frowned upon e.g. lmrob.split() *** checking .vcov.avar1() and its "posdefify" options [but "KS201x" uses .vcov.w() anyway] *** TODO lmrob.mar() [file:inst/doc/estimating.functions.R]: Maronna & Yohai (2010) should ~~~~~~~~~~ become part of robustbase, maybe under a better name, e.g. via lmrob( ... control ..) or directly. It is much used in the simulations of Koller & Stahel (2011) *** TODO Provide "simple M" estimator [so MASS :: rlm() is entirely superseeded] Consider lmrob(*, method = "M") --> default init = "ls" (Least Sq; as MASS:::rlm.default) which calls lmrob..M..fit() which is already documented as "simple" M-estimator (though the *scale* is kept fixed; i.e., no 'proposal 2'). ** TODO glmrob(), glmrobMqle(), etc : expression()s and eval() no longer "satisfactory", e.g., see FIXME in ./R/glmrobMqle.R ** TODO covMcd(): pass k3 as argument; default=current ==> allow "formula" k3 = k3(n,p) !! ** covOGK(): The argument name 'weight.fn' is pretty ugly and the default function name 'hard.rejection()' is just awful (we need a globally available function as 'role model'. - Could allow 'n.iter = 0' to simply compute Cov()_{ij} = rcov(X_i, X_j) ** rrcov etc *** rrcov.control() __ NEEDS name change ! ______ probably use mcd.control() and lts.control() or forget about *control() completely? since there are only a few in each ??????/ *** TODO tolellipse() --> renamed to tolEllipsePlot() **** maybe use cluster::ellipsoidPoints() **** allow other percentiles than just 97.5% **** maybe *return* something *** plot(mcd. ) [ R/covPlot.R ] : should show the call Default for 'ask' should be smarter: depend on prod(par("mfrow")) < #{plots} (which depends on 'classic' and p=2) *** ltsReg(): has undocumented '$resid' in addition to '$residuals' and '$raw.residuals'; drop it or document it ! ** More lmrob() considerations *** DONE more tests in tests/ *** fully implement and test the multivariate case (y = matrix with > 1 col.) *** src/lmrob.c : does median() , MAD() instead of using R's sort() routines * Long Term / Maybe ** inst/doc/lmrob_simulation.Rnw : *** use hyperlinks {e.g. using jss docu.class!} *** consider making parts available in (new) ./demo/lmrob...R *** tau_i (p.5) is not clear for Joe Average. .......................................... ** Generalizing 'wgt.himedian': We'd want a C API on which R builds. There are pure R implementations: - 'weighted.median()' in limma and I have generalized it ---> file:inst/xtraR/ex-funs.R - more general code (different 'tie' strategies; weighted *quantile*s) in file:/u/maechler/R/MM/STATISTICS/robust/weighted-median.R - The 'Hmisc' package has wtd.quantile() ** Miscellaneous *** Alternative version of covOGK() for correlation-only using's Huber's correlation formula which ensures [-1,1] range --> ~/R/MM/Pkg-ex/robustbase/robcorgroesser1.R and ~/R/MM/STATISTICS/robust/pairwise-new.R *** package 'riv' (author @ epfl.ch!) has 'slc()' ~= cov.S(.) -- in pure R code doesn't Valentin have a version too? otherwise: test this, ask author for "donation" to robustbase *** adjOutlyingness() : **** typo-bug is corrected; and I have made it more pretty. Still a bit problematic when denominator = 0 Currently leave away all the c/0 = Inf and 0/0 = NaN values. MM: Maybe, it's the fact that the coef = 1.5 should really depend on the sample size n and will be too large for small n (??) --> should ask Mia and maybe Guy Brys **** For really small (n,p): Taking 250 random samples of size p; is non-sense when choose(n,p) <= 250 Rather then, take *all* sub-samples of size p ==> getting a non-random result. *** Add data sets from the MMY-book -- mostly done {do we have *all* ?} *** Data Sets --- Valentin Todorov has several of Rousseeuw's in the 'rrov' package (and promised me "the rest" when needed) Don't like the *.x, *.y sub datasets: They shouldn't be needed when use a *formula* In his lts tests, he uses these "data sets from the literature": (Note that 'stackloss' is already in "datasets") : heart.x,heart.y, data(heart) stars.x,stars.y, data(stars) phosphor.x,phosphor.y, data(phosphor) stack.x,stack.loss, data(stackloss) coleman.x,coleman.y, data(coleman) salinity.x,salinity.y, data(salinity) aircraft.x,aircraft.y, data(aircraft) delivery.x,delivery.y, data(delivery) wood.x,wood.y, data(wood) hbk.x,hbk.y, data(hbk) robustbase/DESCRIPTION0000644000176200001440000000565314124314172014141 0ustar liggesusersPackage: robustbase Version: 0.93-9 VersionNote: Released 0.93-8 on 2021-06-02 to CRAN Date: 2021-09-27 Title: Basic Robust Statistics Authors@R: c(person("Martin","Maechler", role=c("aut","cre"), email="maechler@stat.math.ethz.ch", comment = c(ORCID = "0000-0002-8685-9910")) , person("Peter", "Rousseeuw", role="ctb", comment = "Qn and Sn") , person("Christophe", "Croux", role="ctb", comment = "Qn and Sn") , person("Valentin", "Todorov", role = "aut", email = "valentin.todorov@chello.at", comment = "most robust Cov") , person("Andreas", "Ruckstuhl", role = "aut", email = "andreas.ruckstuhl@zhaw.ch", comment = "nlrob, anova, glmrob") , person("Matias", "Salibian-Barrera", role = "aut", email = "matias@stat.ubc.ca", comment = "lmrob orig.") , person("Tobias", "Verbeke", role = c("ctb","fnd"), email = "tobias.verbeke@openanalytics.eu", comment = "mc, adjbox") , person("Manuel", "Koller", role = "aut", email = "koller.manuel@gmail.com", comment = "mc, lmrob, psi-func.") , person(c("Eduardo", "L. T."), "Conceicao", role = "aut", email = "mail@eduardoconceicao.org", comment = "MM-, tau-, CM-, and MTL- nlrob") , person("Maria", "Anna di Palma", role = "ctb", comment = "initial version of Comedian") ) URL: https://robustbase.R-forge.R-project.org/ BugReports: https://R-forge.R-project.org/tracker/?atid=302&group_id=59 Description: "Essential" Robust Statistics. Tools allowing to analyze data with robust methods. This includes regression methodology including model selections and multivariate statistics where we strive to cover the book "Robust Statistics, Theory and Methods" by 'Maronna, Martin and Yohai'; Wiley 2006. Depends: R (>= 3.5.0) Imports: stats, graphics, utils, methods, DEoptimR Suggests: grid, MASS, lattice, boot, cluster, Matrix, robust, fit.models, MPV, xtable, ggplot2, GGally, RColorBrewer, reshape2, sfsmisc, catdata, doParallel, foreach, skewt SuggestsNote: mostly only because of vignette graphics and simulation Enhances: robustX, rrcov, matrixStats, quantreg, Hmisc EnhancesNote: linked to in man/*.Rd LazyData: yes NeedsCompilation: yes License: GPL (>= 2) Packaged: 2021-09-27 07:40:12 UTC; maechler Author: Martin Maechler [aut, cre] (), Peter Rousseeuw [ctb] (Qn and Sn), Christophe Croux [ctb] (Qn and Sn), Valentin Todorov [aut] (most robust Cov), Andreas Ruckstuhl [aut] (nlrob, anova, glmrob), Matias Salibian-Barrera [aut] (lmrob orig.), Tobias Verbeke [ctb, fnd] (mc, adjbox), Manuel Koller [aut] (mc, lmrob, psi-func.), Eduardo L. T. Conceicao [aut] (MM-, tau-, CM-, and MTL- nlrob), Maria Anna di Palma [ctb] (initial version of Comedian) Maintainer: Martin Maechler Repository: CRAN Date/Publication: 2021-09-27 10:10:02 UTC robustbase/build/0000755000176200001440000000000014124272532013524 5ustar liggesusersrobustbase/build/vignette.rds0000644000176200001440000000062214124272532016063 0ustar liggesusers}SO00~H4A@ ל#$ @c>[H R A&1MpP΅Pr$q\zXbBnڷ3,IVr"YK塻yC[]ns-h񡲻E 媟ávoeS. L͎ L0 ߕgvup_m_Bo"\τܟ 3R.t?D;~/"b+*.`{:Jrobustbase/build/partial.rdb0000644000176200001440000021345214124272451015660 0ustar liggesusers `I`}A @ HILYU]vD8/FG{DeeuՇԇԗ>->$uKcH3}HH;{hv5jkz?xÑif*I>Af߾}}yo*Kܱ#O;vRCyCwTjg_7+ ozn5&{WVդ? m_韵mw}qvv٩Xv;TҾU3=ߨ>HL_Zhr0A-ک/;B3GzEb=ҥC,afZ_5Y1O2I{RA0a咣ө%𱬫&Bׁyyb ɇWcX\J\4?c^Bն^ yiڤN3fT}ӵ\&+箰 /2(H|N):8H; V1F1INP/2ܪ=R-V+؛d#Gș8CNP䔸]ކ|΀ ' Ohfy)$p@tIjB~]5;!π!?fr1ǭ.|yQf}0lߪY,Pd($z&~/:gfkg^>ކ\ ߵXv*iRw8YeƒTy(kMc8Y?}XZ^6].v7[,{o|K|>̛Z_#r_d.‹#]wd54(<,"\Hܒxf2e`|.1{,S`r X\9="䢶gG![_-5,֊esl?u%nOkFcI0x1cǏg^)a6ʮ1AI+ȯni&CjW`A<4YUKNQz>.,3zsPӓeJ Oi8cur-ό4Lә@3pxbbzeת$uatM/x;D'x41MLXٱPz9(l7 eu,`7L,r,n$e^ C~2OcQpk 9lq9nvE_8~cxgubcyi9'i哌(YNI'[T*N"ƨ?<Y)lț 8Bbub3V:%,mr/ztwٽ{P(?=0ލ!ł1~yJcj: 񂪥,c<" 8 wQ FFt2[3˴#Ag35^|-Vkb됯ޒI}.xzng`gj{@I]x!e>>xd4Ax68ZeVO03N31jk|dCky>lQ51SPuoVX >0z 4}0bAzZFUρ Ae+~k! z)p:  ^1CGhK,[45lAݨ_ʪ_ݐ+b{ mOɮ]^iNy xbOL.wu̱g;ϣHoؘ4*Պ$ C!%XNXeO\?3e'27q~!'7H¡-ȷpnf5Ӧ`ɬ | Y`F"U=*z4zo>37 睆fcv͸N ˔&&ndsi G[/BuɌRy>[fN&(FjkY&q#=*TXޅ|W .fu*dU-#'ާl%$Kv6{iBt2"b}%T_ph[eDɴ§A0AT_ 5n.P*҂9HN89o`Op<4WeQeQW_+/s/@F-U2LB;a$GC[,OđY$R{ R!בnU8< Yi^֘RҶ x mpm4 3<6Sͤ4gVg';#結EgukajU3i;Qv Gۈ sXt ŒexuLB~]EDeC6[2QrCzOa| @p$ʆDe pq=aWMOڦA֟^T ;*X+ =|VBv߁ZJ!KAW7rТŶᬝBޅ68:eDO/jm ̖OsTE-8߅6CAo!*TCm+':'9|9{!u _Ю)ƊY7 Uo@V8ޱA*4A<1w+h?wd51 |y{ A L9a:)FScYƨ<]%7brlޚyVQ r-P"EX^nd8AJG뫦Flj|n/q9xm0?ٟy#=''C&6 uׁO!{zDb >ٜ &y<'G -OHƙ.)v 00dQpHiWP_ nGdP&P(L/ y1@lA$D# 4 2 T]iP3OůL{ួC)Ջj+D60"wۯQ=Mz'֩e|6!{hY]8b$De`*~1FBvSI|{!vC!;R$xzMAw}uQlt<Ϟ&s;|80N%$⨈J;*R<Y)iۊbڜl2\@L(\27*àf!ZسY+5룒f0yt<g!럕ǐx“$#7:BE|H Iގ5%n QZx(x{l$D YUVuʢ=<ȚttH~`Qf;Lo_5$'FtJKu+m'p?dYq|Zw7K\ \nTU=;3K{HdO;OOS6< hn)fSX.1Ǵ̵Zmi?M34f!Ų-*Xkk)Ż:jݽ(RtY*HB:y4&Ql_F8:Aoh4cӞR47EMf7 ?"5ҧCFf4_ ̓l"q%h4xW^&3 YZ < &w 7\.lTs5MVwS*I[+́6adq/T!B3Ts>tJ79TO1KL{#sn_IBtNx rrgt,VL߰ޅm%H}_z1]:蓼6DӐ<#T+1j'!?Ԯ9Vu#/޾!ݣ߆*Xz 曺Uyr+9ޯSK ZڧT+T~?0X)U(ߊy s3/c^xtf}Q ei/E>r;Pq9y*pydm%2?d_&E۾7ѻ٪wA){{kzut߃knO?#3pJ\i]/9ֿ]EѲ@;ʸ,zU[ΪZ*s$/uH=jD+ Զ:Xͪi[qK{,$|ݲc󽘿7_IuN݅W/m3k{K9(R!xz>0GPlnn}kfnM+6gNJ7sӏ>^?.+I NU&TP!&3[|b.]R__5L1.{9)8G8>dlyܪn[(6Yzk^^c5cs_w‡nC tEMwK3Nְ`ÒH֜XZ5wCXȜ:D; ԘbYORtެ'A< U^hICL*K^bf=I]ࣔ%/ozNܸϗ&nȶo2 < {g'6yG#ő㥑щё۷og"4A‹/Ġ*.Nyۡ!ֱ㧗gb}i_.id)&1 nL&FNj*"鋱nUhD;T'D+/l7ݮ"d͵/Rt/u VL8 R鹣K m>j"]2Ma=aU3V٢SǵZYպÞZ+ 攖hKzkxvlˏs݄})qn'?f6o49ȩD L3<3mEYY:cf9S/ZoV]>?W7&ǩ[w!ԿA<=_uj$Z 3aUG4ņ[d[v~}}аQ.,F1(h,ykbPӼWg*6Psz_ŤEj5dM/!؊oZQȄS#“v<N +$Ɯ4%uW!!;H3ģX)e7Fyx)WfCsģItAft4"= |Z_:f&35cb64cEX$5AL~*-C7~>z}ޜd U>J?46ht,:Ki0@xx#ռ76ؕ6d[mO +,u,{V+=t^"vT4}Q ۨY|, D2mM/x @$xfT$ r;~/@)of{*WӐ:AfH^EK6 ;YvWs<NAN,zqW{)2H2!.4ÉNYCKaZԿH K긭kVŀ!'8LjԝA{!uG +e`h={!܋AHeã2ifY&r!?&ym*fl/0is6 7R,hzƨmJaik 7oR'!o[ݏ+<&>X>\yo_F vӆmq?Pn(: < 9 |N[2}aؚƲSEˑ+bw89PgϢgt RL0A6RMi GM/zmtNtc]^e`&eIRJ#\:[o.KQzt:9c`ݲ͵E ^4i1ForkҭC<$JbN2=fwA{!M{{J95#$3\߲9* ɴ" rb x CSPc8 y- BVH Ģ:S Ij[{sRAʋJ!,x\ɔn纋%~B>M*gzH6ϣ#9VYfSA0Y99@_]O@ r&d-PXNk`Ex5Xb2.Kl&(nyFۨhQ>f,ꬋ~fw#Ҹ&b2{_ߒVo&/!~yjO [K&kxm8pq묔c,Y1cf1wZ"7#J#GyuѦ 7 *kI$enWSSl$H#b:h;~R󣑂>52 凲1atco5Ɍ )gL8Yi25Qdߵrkߵ2rN#W_MAd}I>('F?M*>-|Sxf/r6 n-K 4 s7C>h鮂('ic[7oo-LqbiD5~ @~ #b20=⨑),6v,v=9!CP`CLζģԹp72VyISQ"< YiXĀ%y9n)tMi?ɍnDZw ~SGT&E~l o@VmR7cbi^?.!5Hn51+MrҬBf5mxY+V|yb\g NFs:\0xNAVZm [߁)c!WĻ(I$L.XJx$ LZ<K`t ?ӠCxQmjgx}}5܇,;Cπarӌ v"sǟ/sd#/Gءt:+qˑ( il myA-&-6s4 Tg򴑁=ehOJ(^p&A;a?m~CL&{^r4J'UW954cUgejh 5òyZ?:Z(؈ _"oU]DKSzaENk绕b*8@8Y)*ukscq;ET/6^W ?AM?Za$M.}Ki孝mԢ_h{yӚE͚Ͷ蒵lWhSzQc ꬧ZUWsl:L{Q?ǥSaoԶhDDpo{uov)-žŷ oYn{pz}G֖_R+ t?<б@Jmˈɞ/`Ϣo,5-'v#Hw,zvIppCuMa:׎ X~cW]%vfbbbźQ~_Q4 Uĵm>/-l=};@H\^(B@V2Q\tEՇquw:մ{'mC𢇁_.ZjopmFJ;wֱ9؎>1UXh٘F߆.:=㐕ʨJG;'nHInn`fᵹNśYɲ9. ;y~]wA[wԻah۫D&QCtk8AD<]vN@ЮA,y&.ϺI$)ڤXK+NsRvA7 8YyΪ_U* ȝ0ta5ce<VtO,wQ.; }=O8 Y98 'Vҩ"Qds;sBZYigkñYãI|ULV6<ݏD̐j}R7>ck~Վ>j{xz=&Qlϋ6=ģY,3m3^t֮XnH tTϱH1.7u!߈cd?*pҖ}0O8[#޸REvFj Yĉ#[7)q)(n :ݝ{'nah6zo,(dh6l;qy[h=pDBQ5" BF#0^ۼhgY+ϛULjS&6Q89*~r;6+BE_@~R#ol+mڿy4<.O;:QJ-%nQ(k<|6/ bB$pR/ocG{HA Y۱HA[aepAUlɤF\~-k;5-gqbfp$d:qҤ@j$nsj3ܸ^\D왰{qNQ`9i&g- ɹ8L#p@%[u8GI_\JO!믷WFB3 k6Ө)MZri4^7Ӡ"7Hʺ.640\PZ'y" oͭito+S^d+Hއ|_/R fbе%iF\yM\N`hD<iI0 YOPfS.> Oz*A^)T3լiqH:s\kl=J$۫ǼZ 4XٰyT,Gjkuۘ2h' wL9\7vFDn0"w3"U>pò{Ob*b0Yor4~;ܔ"Eh 8y<K [oi̾I"rCmBǨA[@P ϣkXFZ$o-D5P6m. /ClCV ^ WYjNFGe$:T3Joj;5UM5YۯG7-x *]h/ƌ`>F2##G Eh:Hؔ(@~)u%P; Y AzlJFOǦRo Kukl: Aȃ JʲT  ڥ YBL3{0aJ0 Y\ULZJUv-\FԮC֟}' pVCe?-*+G tӾ C&,A߱iQ J\"uC~íKlk":77C츮\$I, )EF%wj?{&BARw܃^f! W=1IP׀#G7 J!M>76QNCz<>[%"xUcت1s@-X^̚a"q ZarC.GR`Dβp,:fKUcW`ӄ!?fx̊"JF"TdGWaA=WqCm2tdZoMFOG4qT`Ώ*1bm,3+e<ᕴW'Ṉ:SU,0UӎU|$(*9J@m&~{g ' n7i< Y!NnͲ sdgY0෤fI-\H(p6LģIClc޼G!W\=d_'rx; M3bpoT[ W%eɭǎH90F +r.cK $Ȋd;x_0)Gِ:̦4:Rwx ~sYa(QU^BaS325dv^]vJv؃KΛz*uʲ)GilQ.`X~\j6b?*4x.ې˼ۓ IdA!^o;- <=axPL_S |G!&?RUuwn8 YiB$u7!OM"'ȉ $K! L 95 m%4“N}(<Y)ڕ3PR xRP6J+ABEC1Xjd+De8Y$d@t7!*wO {kML#:K_aKpqY5F&XՍ):zdIȠXHRދ Jb\eX7ud$lDAnk)B_S?Bg Oe*Db^8>6]to &G=/~&iJĪL2-co=>P">ǀ!+-šn0,p0lԥ'!ZwSu(Dw։tǷdO"s XLlg YwՌ ;zuB;U>ObJ%fJj߅vHOޑ"G7` \`>CcW`|H]庱w[5:l1| C~]ϓ-Y"^u?FHϩ;y&F8lp Ve?YoY[,W-~$FxLGz7ƾRvM5u9%mRT+(.eMu$dѫ 4)dGr|Y?NKߍ n=}tH@GOCہA ^HpihR|vWLYw(Qh Xl{S29JbApF)[{mf#[3~$KzÔoq$'7\۬gJ/>D"8I >2 sR sj[li^|QIUuYN$aƖ$w_Ÿvjf͍$ΫJvR-xz#[bng/ <YvP=؇yG/CVڮѻ]c* D:drQ$B HѤ1%n նHTY5trVVCIr>!+M'[T3r/;Y?Z82[lKe"5 =ɃC;4R?y {CyE40C_a׹h$؜;6<km1&*&GӐk&ݑl{j2ʵho8d5ϬHyۛFOkwy2Ӎ#FGFG9>~ʙ${BV.WݻwF3/f_Ľ3 g:4O!vnO+Ki޺Qix*vxbtٺb7s~FּS!Q1(K6(nP9YTL̞RsHSY 0GS=D]o/Gg!odd&CTпvʆtG"NmNt'#3Վ`v,Uaźb3T~;5s e#׀esgxr/b9R7 Y?B?G + / Y8!ϑ~#^sۚxQ*"4;#>Gsj[U3!.C+tߙHjmA& p{9s8ywky}^ w!+uN9zˑ{^CNHy/7 z6rkٺWRLhCӐw\j6K*֕˩|v%LjsșT2q 0ҽ/>oAV:*ǑmJ}NBN:- 0Oi9"my̫Z+~uC!/OULeJ##8V UD+$4zys%_eŚބ|E qUןPDprsLNItE J*֙P p{(bp@q`(R7L{@kRPғnt#u) Lq^H4_#^7P+ wT89L~*s:3YC!ug!MBBOG r"w@OG|RtnxrNF# ?a6鏤YHvOG^tQڵwpW5OE[ӥC"GT79 4IV~k~_`B ˩Kif0dOE:*fݤ۴;29˲fD G!sT!l"Ju\G ЮdƬL;4oT7r>K0'OQ]q\_Q}B֟H9\"T `ٲ $dU]JiviM+s^2ie=CK#?'ݫ+ģICeשɶh- #j1}1!jiftΞ(F"}lu&e[BڻDvϻ ̿xڲ+  'y5Ƶ48rZcBڿ+))^r7F#TekɪZe1߃#n)9ј LCNkD!t~~7 7DJpp)GDy+S!{=wqT7aL0p/"Sv4pP!^|I?ؓR$#G_|3fJ:KK 34*H'@ ۘo? MEF@ƊC#]&m>xpR-~l 6K'9EC#wvkD CV'hH `rA zfUzxS<5`bb[xQZ jG*0NTrCvӍ˲_`#\˖5Jy,?L\ˮzޚt/ce[ /I2XL#uy ۥrqQK5%V-f_j 75 9[T}{u7L `!\(i4Ν! (2YXV˲=%YS(0mg }*N@VjjrD I*%[֦TRi3dlgVpJH^`BJy%97 q+x4 >uJ v̮g_c~7Cpdy/ CKT%y w E%:j=-75[P{:J"I}_hfF5FبɿӶύn,#I()ajV:9~Ѩ|C\QW`@VS,[N= (mZj= LfO e-]ݠ{zcjyf˼7fA"h# tT( {pf`֊bdsêQa%F6iJ|p-te_x(p[ CQ3 7 Nm"Z7Ui?N$`u|/Tu<}rڨPem3]4љ׸3_5˾Fz`_Ol@=IK ExrrR OX9͝+1Z%!a01nD' 9c8v{oޅ=mހEP=^#=K =CģY*k&jL%rw=~YJiJ{btɩvW~Ak0#7h{xW%ߑ": Y?Yc 6Čv:˰)³Uk thya0;ˊb0˫U- _=[xlft^Źbp޳W۪R 0ṟ>? W**t5ʝᕛE"e >gaC ;WI0;?7=32}ClooƞEz 1Ѭz;k_ͷ~8gB_NxjsmaL_]”5$RFy$Mob%w\ ,'e q 1D<.';fb2RG+cW7 ѱ{\ CBNynzA>?ۈ]P_#GLJ%ޅ܃ޤ<^H=\-T^L:&*~JDR5xz}2צc Ͼ- +D?fe0Y8Iksk7=daKkп c=4K{I]?;Tb$GGdY;hR7ԚrjVϱ7ѱq6LL!oCZC_N,.PP[y>8]jX>MNNW>}\qХP6{4R(fP*ވy-7IZomݹ#N坔SFNEXRaݜOng4ʼnOeOp0'޽VjV9vRio.{pv\xՍgqb~'F.I_|oWYu4=^V32htun^&جל fsh2'k*+q'E߀ ##L?d`bEX3Z<_D:b0G>:H}L@5-_?%\&"CJ'U1jʔGod-T 1bOT<5Y{7hۢBHx^9 %w#èNBqEZjJhFEh[> C~%O"u#In?T[UQl}ޜ/^ڢL%_}e/GOioɬ{bR<"yr^5  RU_Q"~ɶ)͐:flyNo5bBhSzo^RX,lU45Tq fb4@8YKڀH*_WQŦꔧ⛰OixAIˏÐCKԠo=ilvwتY0{UΚIy79-CQ%~"އ|?X4(|AOGb%nse^u|b($%ŝ %ȥ'=Rw8 y4A1/Hހ?hs/zͬS]11Áe LVV5<Pԥ!~GAz6< hbg3M#~2B{I4b_N쁵J7CģY,3.inF{?KFKܴ%~nhqi)]>;Լ`BM~J8y{g;d mb2U^Yvvbn*z('Fg\^NMJς4$d‹{37J[c9 cv,E)b A~mJטhc<^ndP [,fu' Ix ,_s$,}&o  ?~Rخ}EՅFЛߒ*/}&MC9")/}J$c#e4_'SO%ď%5J<Wt<ǟ?(0U|~ נ;d +Mky0 9ߝ6ݿBg@0x u ]  f~4~#т'v925Mt4G,6ߛAiη4qDs>?-% Yi1γh"Oς GbZլPet7[݄J#nZ .J˟Y](ǟMkA7DugLX K,Skq+]H`Y67H(~ 8R.;-;ks劚D;ukba\\+W >j3`y, _g&_ьMkNrMJq`)M_u5*b>N~;>L0'pJb*¨gbZoP 3͛kf&2ˠCxrA wE@&o0+`C)BMfhAlxG U pOU0&L.3$~Ѣ :k1cQ[kiN0ru)ƿ҄uCiބ0ZwP!*%@p(T\JZ~䯃_If  wB޹3H A 1>]g աvϠZ9=5¸ёX4/仕 |/IMo^{ +uᚅԌ4O@>ͮĘt9V:;5¥FN|[JXߧ!Pm۠A$2SDB"Lky@wɸ?(|\hǯ/~i~.8&7Q~M:WwJ6&jZo237 Fݑʱ lw|OP5,ͪ, ikW&}s Tˍju[I|Y Cܰ~U'i'Ghjs\?cTjULO xgL m @p6qAU᫹r-5S~ĮͷaXi< !kY24+YvrV\D%oH?k.W+8WᑘcuT@IȑSXn[=ׇqSHx;m^x³/ďfL9/fIm9jo7ejn/z6jDNt(dyG3l:<08%։^RCDd)zp_(="pu";!]˛67Ovt1Ia&oTSn啡lTAYА<# K8 yTÐ8qfU:4R{4ߨW7E4ʿHd7zGk _dqo:oT7 ɨT"?]rdv9gt|>R]pS*`]]:4{?xt;2"I&ѩτC$ڗ E0Tm1Xs@5ChQW _Q.Jm}uٶNlo\b1=!. nbUcCRf"2|v즃w#w!5ӵl@ Uw^2WZ>l?< Όr=tDwd_&2JIwZ0*iz:^+3;i*mPMWpK"Ko 2ue^C݈AV-(T06d| wAއd~2x4ig14NKv?R`% F;0,,t?ý&lujHBG(?zNCޖ0 *L*0W2fP+n#ryNZ7^;<C%9!qiwH#0rRpwؠ- ԟ3kt˰#ݍjHzul-$H( 2DbσlE'dz"pqn m9uنx~.`Y4sx !x% 1\SN\-jU0CXv*>CW ?kVzݔKQ4dDWM[MbY\˖ nn}/e"r8y ;jF4{EU\֊MjGgeM(^>TjL ܌L[Vp!kzu YG`[E;zjvP;yԗ""J{~ E?Fg|5m蹹rQl#[}?prc+6Jm2GMӠl++USޗdfA! >FPqCM^+*]Ti/q Ce.ݒbTvabQ\?S6ˠOveah'LfzC.p؊aJ3 1LM^"Z xZrW5'6NւZ㐏'w6rYQT; /B?#re :kZL&zA'g `%>_~@_s 5H?o>Iy-QEZra8p@- ԟ8F馸B]#[uٲ ٬uo|a×"G~A]XHNĬtUW7 J:CߑDe8y~ԅGA];R}Ck%,0Bd_pz7N~+$1Rx/m+D|m^?]`ٲ20dRmjG Z676@֬#Ϣ{ąpg lj1 Wi_f e2NN_5wv"g#=v9'.)DrOvA>]).M>ioUMdVg{Md 8>X,oF{4 o+F4Ov9r\o0"~}Ay~:xz>̻'0/L%@/ @n:+b.iߏ,L{n&=lT=}b|xyEaѱUQnyݠ͊3@}1Wc^g]}[?K,9ȹ// IDn +חI]xMZy'Y2fƊY^"uxA;Rx Y_hˀ/l fV,C=ܲ&ugř̪A1EL;3#-2}fTe>Ӑ$c|jZ?y,JoXGW-ȯ#Zԩ MBS؛GԦ +wL,[w!+%I] z? #GyGLFܨbHyd[t`iCV ~dP{gC^paq:[>=3h#GY谨:M,v6VlS(ޡ,UZ˹q%6 ,C.ޭJܭ?vϭw[)PK)hV5̪V*Қyt p0gF p8 Yi~]}>B$| q? ydeS82pO mQ~R61%$|&5!2ǀc:n.!Z))LdG?V,p R`#{'"nw{n? + q9C%c(XCLNOA>}COC?QKyQ2Khgx|,Qlk#gO 'TЬ: CeFMk (,&#r}(,wu@?TP;ڶR)۵!J(K{_e'Ѕv?KOwβŕA>%ي&O 'tR^f"eGvބ!=Ym4IyCJK]m[%ߊY?Em\»LM۷j ge;KAcX2 +l}uL07"b+OlHdC>ˁ]<Y"'/P+'ZK kiuSDfxY`ZP"T~8e UR~%g;!ԲCD0J1Vstp@XI.1j`ZԱ<Q8< 9Zd D"2d r8 YNG1Hg&wG O]>ECmEJ?tLEO# ]`"ཫk}&d'Wx8V>>osm}cigmy9k%lBJ;x<|Az q9H)erIL6lWa`$W$l?ށ|^ߍ {E4y~RQ Oص9 Nw*vx rx15cPLNY׶MitE! ާj1!3T|:jݻ:>u{xz]/x[i]:F*BģY*'-+-]I[B5`q.U֭K{0eDN/C֟2b(ǍtrsrftK솁 럄g,#I s!ܟ L’52ZvWծLЙʠEJ.eSNo@Vv4TIImQicW٦M߃ +A61n8·(SDE]HBcZH7 7h ʚe0CmLjѡ6YԀQyf`Sa.+(+[w‡IZ,Y\X 2)WR-gVztdwPR G!'#f KAV!wI]?8w Ԇߵ`!&w<_q^T=X\1I>o/e %<| !&Sf )y%6>Pã}cQw\}JUPf 2Sv}1TQ DIU`rQdeh"0,AVX"p="[pYEgib7u{@W^.W/dN%u8L6i).4msӐ̬ ߠ33IBR6xz fW[aӐ~ 02w$܈ {U ؼ7rV--NkT+ dޠKMST8QIN"4oّcρK7ARs[GQFn uģIcV?]dny׍jPVKfFGKB|a p T ߋ ^!@t`Էz08^!`5zkD]tS‘`C3MnǴfDl>ahgY8MG!'Ŧ!uϖPIn* Y)PLm,9Jڛ)`,oq|ʳ|A k؇Ŀ >X\0ݯGyJ+RêV8Uorrr1}ce. Bi4^FU ?.M̿y~!ٷo\9~d]od'ىK*c'>'Y|9̇'W Rq޽Vjw&K&QG>h;KiF4.HnuN"鋱k_!lWu4=^V3Gtb©U7ܜE{kӢ[Vknw僩Q-ϢlVN݊ڿnٱ`^_雯"ĥ'۷ȿCBp(Pi~qqawB"'uͳw4^F-ǼJhQU앳dt҉~,̩BV6 5&!dJ!+muϢgFsb4ԶoRA=}҉ w: f3IrВC,TϠ~&6GҥKآ$lMʱJ, J'YJ#XǾς>#gBϋ3UCWՍkcdS E.MyJU sUev7˜)9Aԉ]p3eYXΔrYy @?X7e̱aT/]kŢ]W֫P?ȅHI9 D>QiaN]/arZL/B99|Kb2/_o,gje3v>$-~ Cu8Pqn+85Cf$738ރx!J#[]qo__i2Mg9c$& 3VIB7'@O$Jw .jYPq7Ip&</RIS<_v~L.hKdfqq'V QC{cyip#Ymq 阸N ݳcˎj%;Dr`ڲ~T &Hyj0@RǎjӋVv\iTݾXEPofUYL!Lp㵹6&p`7@M/o -vJHw={Grnk֦ɢ_|dtq^l俵T=62V5vKgk]ą0aF?k)&_i%Q7٬[5ws6y;oY-iGfuH<*z~ij_f-y_.cO[߅#0e۫#IȞZX3ܒU%|Vx%^ =G#'ze]NPhf{Td {!Q>]:{Zk!!7Xrqq:n㖌di߶Yx+*!$fV+Wv\Uqo%q Lx};>Mm7(dN,iy%slݤ^Fzr_Q#c#@׸L {ԂT-Hi]zJmO?_N4^ ny| 7{Ňϟ瘳+'Q 5w Jղ&SV('#K".+Pj[}/z^.h!,GP#GRl}Rc+ri dHݾbd0oT2taAҒɖ-dAV#gv5y'yI&?:;ۮxh(v.ȉ٤FQjubB|*qPlss C*N gLLĂB- ?H8briӽH5Ajb2|p )q4U3a6F ԑPo53I[iYcJ V3B֏c -[KU H:&< p+PD*P< Eԝ^|};AV6@׈w6FeM ?ӦX \3\,.0kFL 7adX[nbD6d)9#ujWnqץ1)C92MCU/xmGI'a,ΫhHN/fe ]ނ|DoC֟L''ydGڨSFpU$j#I0 9AmkAe8yDoB]s]kͬȎ8W|mܦ1QmEuqL;XER1c˨7yul-oVh"튾"mxf%#-'u)lYW7IUW{$I}&xzM#mg8S]]W4p0FvƷ;٪&G 3g7ET,.ĉ`M%d_PkHˋisiYY@02mv>0+8w&Dܙ\Dvϙ{S襁!+:%<)lanPb4j-[(p~\-.^_5&f0&[Cn|k^LˎPXCrǁLj3TKbz L$oErQRԺ!kG2p 6=ᜧ$KC5sEA JUxH]xIO"zW(T)eȗ*cxa!*oANr0fA]ކ4Y(o ƑΉ=hLB~MGevow&ˆUU wxr$FIݾ\'QR+*vTD?=,fnasoP.NK[8;lr#ުIcȏΩdI( d+ +]i 7LZ8y+ũYӦD,prmc!uS=b|+Xaȡj\[ux겺ybb]C<ӛ bdR7E+ >Tǖc͆BVkO`d-#a0 utx 8q+߲MKGpR)^H5$dpy; Y +6&U9M"s.w,9M( ]A MmM)1yRw%1VI`rVVelͧ]35~Ʈ"M]͠E5qq&^׼;- <]邿*rӑSث@0X߼`ur\KULmbbm=fԌAFFG& ۛ8dkbZ-2]bўjI\b7qClFyUT#Y)y3|q^܃eBj8ͅTOrSDrn-tm-kD-|J*Gsq:ĉyiT(EAgūU ZfK*&o_5-y8O CV'G39sE$ >튣"޹'^\8J:wRXG@VZزI- $Ottjs6wu uGÐZA˳?m dpxڻ@4sؼƧ\pGZ'j=LJNDc71I rBim! hxzMAw8I%魥Eb7ѡ8soE̦SK 8A<Ţ1 P?Iܐ+rt,s=N5kҭ7qA,|ۢ,MaJ So -})8rx8R7< Ymxֲh׌i9Տ֯TE@'iYvDkS4< hFԥ +R`-5 HHmo%ǁ!{60SXf3Ǯ$/#7M'd\;dArފԝ^t451Sa`r6+phoFX_W c j3Lv9  I.sxaR7 | yNgģI0vUg/,0ܸl5' s؝CVZc1)$Rwxr}*daiԲ[2 QJ\\suDp~I'LxXmYWЈ۠mgj@> -xxrvEPgv3K.Ǐ];L !F䕍j>5~؝N@Nn KSGW$p~#H@of&A% -5+P?[ k1J~r]Rxq)k-\5P ; zI|Nk0: tN $iy緹' .lJcSV.AVjHN{*s8y<3ƨիl b3|9Ɏ: [YQ:jyϯ5d< ǧ|b-$O@>\젲NT/BV/THI!jQJhڦ#yyAfu usx r=wGʇ?̧װ:\ Ʉ77wq45>z J,Z5#z%L/ Z1KKݷbR7<]mGr\f$>7 S=jGNNL.LD+M˲Uʣj7*xp\j~lx~~ǵ9j-Yho'6&ހ0qstdd<Y'u$-qj[ehmۊ%`rsW1;k^?wsu`R<,!'>sÏ,=_B֟ ZӜr5:RQ20p̒f4A[[f#+*C; -|"sxRwxy}˕MBB/D< a't RP&i\u=NAtW>y梁)69"sx$ճ4n}49 <Y\&𩈛>m >Cs`8!뛴{ "Gݦ{pMqcf at#(5ê #Wnoh@#oأ[DpXZJ"D /ƃ ɺObvHv(sȹϵR TG-##%8'2!`З܈.D="aZOeJ@f<޲I*ˍAE z 1%Οd F8y|gYYV0u&\m dPA1ijɠ:|⃝FOQs t5D dd6]DV"t1ز0CY:bWgb:iS] R I>$ Y-^ d (DO uvJT8tfTҽ>w0)!3gɚ㋄G ِw / f=!(dަ{{B($s1{BHހ.-4p PVJHG.JM#ە&0mPOd= q92R7UGVd_>arUf\&O5KkSA0tSwߵ>dԍg!+Uo u9cȏ{&IѤqAh]䩑C0O$ց.;kSC5cp$^G!_aJi[ euqgغѮnQ|]-TFE(s~/8!}.aY!O0DiUL؍m1F.GS۬'&7&#;Ԫe 8YizWZnA:tl '.y4&?P\3!2GR=Hm~|oڋ`ދwkt`z9*.ɒA$2Zw|,kh"G_u*fөʪot ߢҚW6B"br}K$Yvbi&@_ەUHLB֟w&.yocB!)l.SQ8ސ罶$E}0fvlS[{ H] 89VrMƏ4V= F.R&%,}^B5(8 |A ]NC֮EC;1D [z6a$!&3È_Yjx>E7i0)L#,3]w*%ˆaҦ7c arw ^o͔*\@"Ih'NdVIꆀ7!}<6) FR$L8)d}l3pU]Ǻt~6#13"aCUÓGPI 7Gˆ$,AfIHqEjÖZvڨln5AB Yc"zCAiǦIYNCxmtjuI-*;3~s[1L4NvdvUd"\ˆekW8rȅ[8y} A<=ퟃ=\Wm{+?#a2&L&s9e~?ANr(ƦvLx; ~GIlf+&tև- ^j9״ ϯN~U~rAa WSxAe߳)Zas;iIYEeIS9NO!+Q3eD@z0t\ހpty=ūcINZma''.&O ?덙߅2Xv7= J?X@[tN[ : <T[ < Y?BK!gg!+] +RH.9JK,Ӟ?"ǔG[oiuo (d FO!+Wm3JU;{1x8YmFc,:~ zS'/_e:-ήmQ+_~,ǽݑ d`7I;fi26FtkU5`{nx!oX$⌧eKK+\@O GeR)&mGqIJ~8V_Q{x R,v0= H69{H skhzαy bVP o_kC<=?Ue`LYuMvfVp;3}ZUYC't4dzMFO-LW-)KAVZx)諶|[`wkW!4:כFgHaJL?A<=ot6}BxA`dCU.sd{byxA9 285A<=7߄!QG?ېeUCvBv5m{Զ+4p`b~gT\+*JYo-3J?pLB@߂YnR?ga&Q @7he3y׬kgeUAȆ%Gw!o׭V^e@9J1 fٖ|>A1 =nxm8o.G]^1jNb:CLJ#e|M>Sy%>%~SkiڟlY`:gʮ4`Jwelj̀W! ޅ܃&L8yMEOmaϿUN}߆'=mo!==l=%¸emjr۰^mseۿ{n7t"߁5t}mvleǭjAj78+Qh:/f;hzd~Z͐ 7ÿ]m;FUv/&*|T˫tɚ/^|a 2s]v|wf6>waRI[A~PJj+BC .!`^z;!JXJT0~70.۸"/-PϡD?6|%n}*!LCNzHyeT_֮-4WvJ%k>iɐeݸ)GPfb_e|w?6M8 yMFOJW{ZIȶ/Gm;jǦbt?}s+0a#7gR_ _ t5g9WwlO3U/u׻oK2G&Qƞ̨T,R咼ݨVi2cV s˚s'<`kdhLAVm뻣-in&`-=0kD ݷ R7<@vͪܓ1޾sV7+fem{ t([:l%Zn (RIvExSFfqn a=lCN@~$Wɥݽ(2u0=9 Eե7R ~`1:796SȱGi9 ҒQ2o k&tT[[ͻ,38 <9:Me"q--{dy8h7&ӵL(s‡l<➙]v fPq;T+NsPb2(c9ժ؜ Qio ֚z k,yF.vRs\o hǵF]b ҆O!o t/[k]K0$qJC>}_F' o*5s2x _&E}cwh,y͛7$&w!'lbíh=lc&!qδ+8KUˣ}|T}-ƪYV6-1 SRLF5R5E8Q2ʰY YiKG/s7xEٜ9 pt"qoJ9JyG*f[enXɱ:kR9MCLwZKakFsÁq>?H%Me]}Hy],D C&L>/E'Q_͆@? <.'!+McN$dgwC`7.Q*Al[ /ޡbC?ފoIj*[rMuYuDzu>s7"WG-HM.]%_B7a$%Zfrd6Kd}M!_&CijmNxd +ͦ9mRw x55bD$,UPMLaDv. fb %5@fb v'J18oYżĦ;&I1ii iyK"FY`F 3eG8icg˖) | vC'+,YƊF;!bK]nz1Vۙ Ejeu:ݭq0JôA-@X[ &FR{Ғ} }#A"'FfXvuc2 29nU4|C*X$YiEL0( YZ3 $SģXCtT6YQ#8; ݵӺDmӺuI]?0Ӻ$ ],? <Y)iIBcfk&+{OPB$@?j9z({0P*frv*yTq} Ǐ De~OBV?Z+1ꆬA YmoLGL'G'v}@\Hg(d\[y}0\@< Bh8*ҟP@x r;8T.HA`R0 u';x!$S?IX$ OAVș?I|Oag gHY9jd+[:Vq xXeXyM)wT楈8d"N{6?!ks),Yp6WRR-Wfs=% Y<np&O\&ٞ+71޳&fV|APiη0\6L < J_?;&e&\0Ydb֚ɼS7[DISl0ʆYy,6)[ +^QQL0/xG¯0({W_xjwnYtͲ/y'8Ym;ҔvarcKԭe`Z-& nt7^KLw U6F\ok<-dKc ^n/C/Xeٗf'3c GN=#UӍ*UUU>Bۑ`B$yvղ-*3ܕW3ڣ p/ RWOyI)pSsYڍ PYPqI`rj'7[+}5_wBE Ln0-iûȥY! \H~qæeFKG,,î֖+Hopo@xo4Dv7/,4-6p\ Mq&Ce]yeJC`R(N6Ϥ`bGxY}.= JyfsCM'8G$>[^ Tq!DLDIX)SҬO5aڬFi[v2}oF/o':eώ7jզ7g0gVĄh&#$9ю@V3:jݳXwJCCRx'JٻWYT ;䲥nU1D3Cmb@|o9 ^;RpA==ɵz+DѯՋ>z2Im8/Z*ܚX.n.yLn"êY\ȂۃǜOR,(сTKc K63vi}qvoi6_|9DEne?4Yv-ӊk?ßj/[Us|eQ봿,=wr>q>Bm~6 oXӍ7V2.{bp Zw/_Ts^^ jng_MI6!LBxk 7;{_'^xc'ZQJ&w`uL`!R stA CjDiM̌c k%6;ʉ0ssyښL֊V#mzYxY+ACsʖA967bLKDR;-H* pr_b}!l! 7řbd4C n~)ȧka)05VVAq[ڐ5#j-2$!掛3TaUgix GvFX\Lem OuH{@H0|ACADm͊4v7;I.Pe؜+]iҗO@>}[/ OBV ZlD^NEO‘p+<|u,җ8 ,BVs歟ݠoB_CR`A߈:ܴp~1 T w\k .@}ABs8@h)Ola6,;Y0{%GV8 yZe>AbTb4@geCIGxee&_Ѡ,f]!+pBMpa 7MXG)ϑr4| 652l5޲ ma2[0|NԥNTB;4c_c3[|ۿX1k7&ʣ[JEا2Q:#Už,-22~*MOlP翹92"}xAz;zeUI|i#C_'0ރ?W45MA¸9c+Ө vASȚNl 䯵]X6<@KM.Qy§@dSyy?^T'-WtJX>j}WD[9j6S9յ$r8!#iܹeP5רslHpQ[{ǣJ#d-^cI~93dШP xS*Dw:*3Rvjo\r/̶YZ" 䙤ݨF5e$/$ꇞ4+ ge Zb?) 7 |j}mն%/{+2_c[oi}(o/ɲœ# ۼ\qKܡ}=Su3{>!'<.MQVqNѬIGҹi)iS .To #ftjSm.m4lw=8)4Ni@!&3;qߡ4H]FGNw"=|-f@C1'P\IJ/^Gi &0=Qw }ԛwXѥC^h|2ɧir Aȃ*ҭᕁPP={,.h ;4GoXUDfx 6CN|5Fmx~<'"'|iCI lʓmTyhW[ۼ`D ;9m.;qVi'ɵJk:e򳌲Wo'\:ޡXXṁ!ZM'!PmWzF6$)kE!siq~("qLw WfZڰHɫea*^T.; * mAгu^6$SRsbc2w8 >"3uj&a WoHm& {#)YԌلPDQDL̑'a6(~}928xb/99Rwb.S\B^ ؗ=%)./8H8Ƭ+M`944c|Ѳ%ypߎ;OOԆd(7"EJhhw[Vq8VҭK1*޷)NFt?tGK;D&8ps{鷍msR~~F&դ+Skԕ?̺5ܧLO/(L&4M7X.`wBwxu{{ԛYu^"f].fR(3d^ p?2.unt-.ρH /"< ~u^t) /f,{c_/>mT{WeVIb+]><\q+Y>:Qx[!mp$? !vGC1%a!2˻lD꼓0."[0Oab| fiHW ؟P4k?aҌr9ECIi"r Z4'ZhA#)o 3BJ05o3u)Ry\}t:Qh&|m1Ĝ)߇1`z23 MTݚkYpQvq֊zi>WO3M](ަmW&HL={=J4$j|H! 9#&p|\=~|>xQg`1OO[(N#~V8 m)`7RBKMЙt(Z^a{[.*WE=K+ycx7spydaO'/B'vO~^OSJhK=Iӵt0$\c(ol/dE KjycτOVȣ;*!.I&D9$*_\~FqKrTXf9 P:m0t{{?#s>pa^뵷7.zY㳻OTK.]?p\icDy(fFTxQ=d6p \F r vC`~E"B'}=ױ W%|kR]>L\2*۶JVHK J\AA^ԓ Wqy BT~K}6m!$te|#^LB) :ߜaćhK?'edA9 ~udbq›'l_ɳKHKO&v41k4ue:pcU]9%pOGԫNh:F B9>jcxHfp:τ#>ś4֠.TDB] ؛P } k:^%kvN@b2m2Mi:"SGI xZ%'PVaP~g΅kV.E=<+d7; .-{NܦY7YQ`[IOm%k+BwJJLnB-f޽uͷpX=|n2#t5 e/`,e$ 8>)XH's3R߄?Rr8 CHΙT];k% գHM~Am[ޤ4ULO+y޻i7!g 1Nu; AM7e[Q\ -!.Iki39vUI}?)S墾AѢnQ1 3Dk5KJ2; @≽~7U[khjJw y)`_B5&Yw(d ݫh~r*7GGA:_F>HK8>w2%2B<%Һ9fH碆Q+$k .5jYU/Ѿ !veԈ$av|1/O}ׄj2}JMn_"[Rه;'z-is= 56ZD[=DO8>,_f6πuBټ!!x \j1&{u YM0 d^ј##+r\3T["7c 9-PO4 ! j0NBṈkE3W6JKÉS?3rU-YE/nt|Ǯmz=sh"kl7-k?|wigV/<2oVU;A%Q(;m6I$p|&GXf:E՗{ȣ5JNPT ~BjUf{{o|_ޠxkГWt? J&AH7-CP 9-^R \WmM5n_p( 9{FD3QKs ' 7 ] )T7ܝm#$0t7wqmLz)P^c#E-Ոh Dr\'Q?5.%y2M4R:am&j Si,"dʂ'Ym#Za w"<.M̵,YʅF/d4ʎ+<\_<W6Wj\kU q}3:})bk.Y_s_!:7]+I^h1 A365RokܭR1vk|ᵊ #^#-g+K[3=ΕwwISwӨ:ȳZՔ'$^~G.A j;Ұ^6+n&Xχȏ֠ˆA`%Mcl]80օeZ|]6") O%ɕyNi~e9Ϧy5j% Ƅyrr|'!hiϡ9Zz0EM!-WL"27 .uֻ@>RRr8 >Q)0;~RH}TjZoTzb[5сiJwt5i/E4ҍ y}p9j";VVDrx^NM=#K!ԳU_JU]޶MRݓF8>UV]\KbB=Z$_V(h1-pG#Ip%),zw)ʱ}+|yEBZ4ԕbCk2LĖ"uȸ Fw6?m5DZѭ7-γ%pH,ޥ/ d~Ʊ}>7s',TwIЅ.߯#!W\l #Nd.Ոd-PcjњF:nK.z&9訊 /K EUoAKR>dJex\z-s󽊣qG-ۺa$A]  SʗJWV0f Dfgm-J=}p)/9q(? !vB(I#GBTj7sl($UWkVUxJOZ6˺w,WJ:ب;mu[ (;\Zsph»wwn2/xbw8N[;YL66Q1Ca$B2R \W_͘[Ì2mrFɷ-ᳪ?;΃]`M"!)筍ɮZիr=ˮ\dHs #,{~N8!T*ΐNcp =iNԣS:7ħ9x1cڼJrO\ s) Բ4)8<.witx\&i2w϶ scc|.x&RqD2!ucvrs!/O9-(pŪWt굪kU뵺VIip&#u˖sK*1w[bvjvɪhfoC '-#׵ ip}^]ּV23 >eB)G|9 Ė$C;rMiN~t_6. dO_zAؾ~vr[L[B` *<#Z*jб*|gZ[.[7(ݖa^pE _R.+_1%i&[x^)}h aLoT.d׺n.?u Ao-x|YƝuvσ <+/L [:7!E>e$&$'WfQD}$Lfy3 wZU#@unpR2.yRI\'p\*R"!!P$2p(G/OUcBN D/q'I)ɄKܗ8cݩ.d2,}7Yzt*vs\ 7ka_ ?.ȄTVpTEuWZ ^BGiñyW$M5k7dzHxk$O1#ʐTL%ko ط@z0[m] 5 ~ kHnd~)z\+|Ϣe^Ga»5q."/Gbƀw2w4oZo_"E;sOs%~] ftXv>3*aFp1!ˍWl۵Rc.~ -' E㗿 PO#B'Ǜ'l_Gޞêm~E 2Ssc==m0~7hYId~JP}>-43kkEi~Y: s (J~ pN ,eaB_ikN'tOCGQF_X5YZPO':^4RZ#o3.[vx\nÙ`Z+hVk7pjw8 z?a#(AA?O9-^7|蠂OEO #OC&pf?F!ģ_Z΢-@ↀWfd,*νJ){Wb5(󚢞L{m?I)H\Z^$%;9ٳ1M/ oK3AY>zN > SȑgRh!+q{VpҨ_!>AnsR$b % oCI憁y %H?T?*Tj"?Җ*ms2Us2a,tR sVsR~O"eE+4_./RN]Y9?;a:@=5ۄ[+ GZY|2Tg.DAd. < .uit.:(1s x棈H4p| mpO]~j[mLH#p1ά;l;Yolڦ}?6 V5{`PkxD5p ! J [<%(|$ax\Mޯh\hipzEs{ݝm[k~5KYI!Խ<˛JoF1$p$k0%q(Q5p).1CpNSPׁ{yq |>c:"؛P׵zdgiawߟ9O⑬;ؔȩWG } lԱ1]׫ #W'ˆh]&Y7ud4&f]vpD{1B.P2~IA\rW-Օ+nE2΂orp\.`:m*2I9-Zm wK~rsT>/!$)<%/dx\jʵu|+\ 3RC^9 Adnr1Zx[jTe/#UKR=3\_V/ٿ|^<Q.cC}Ű!QUknU  c?? Rߥ( “'5|$k8 ..V\7 Ԏ¦Z[ruX/|e#Jg4c+5lj凖_8&M9GAYMο832t(xx\=Y?cצcT˦ cxqtzaditV!s\~8YZx$WSo;G|b;o?g6 swy2wx \jB̻nAx\=rnI@x\}L$)D[ ,_?O@ip|2r1#HT$dn8>\lR H- DmT1Xs*UxigQ%su“_;;'bDD$q cD3 ]Ndv,Ӌ k7H ݄®ipe,0w6oJwU]‡9 GN;ME Q7ݩߋ]fճ=0CSsSW!pMƀO&wTkv(  ;0M>\2O*$hx \D2H0p|cJIP8.5j/Q-[%3w6 :Ǝh&E@}\_ŨDBBR)d. ,(>>me_(&Eg#RxG墹/+2tJm"-9x m+Ko3 d8~axKQ%ꃩ%zg)9NRs lڍ9b #14ad. Wo.QZeJg\mmmlvC Pπc&M69-%A".5h?R$~N憀 kKxY.KmlkPݥ( \y?o]a﬒Vѯc_㐖!M0ژpSȏ;Ii x\t[d sI`\Cm&s&}o tB[,aznlE匵]Cma'R\ U_"}!ģܹ_/&%Z;R< .51+ڑN`7Ly',)pN(ka}x\ q4.5m!gip I,6 3$h753sk*?!CkczUR΀ׯY~D ܧeCҧ;G ]IRT28xS3'?|\`MrSslfᑵtc%s@)2?JV׭fYr)˼p|"9 [KNgS,P:"w)Y~E<>r˒O<,Z,*xkslC/[[AJ1dx$/KVk/Kچ1d,`{YR0 %%q{Y2wx,nce%_zws{٣8bD! .JN`?x5̟ !P:&321: gWV{kzY: lúk!fed4ۺ+7Q(op \TNO߈Ef\ -?<\j̬OMٿ/mp^6Z=IXN=q?B<2t.gРvϾUw炒--lLJ&RX]&s9 \#`N& I:m>a [>&C(%ۇzH88iX0mw]DwޚYO Q ߯o!TA6|O3&OX?*VfטSmY8\,{MԺ =P1[z[3#W!\0 6'a>~B0#q__QSrI!!'Rb- ; Q. UVUh^5L9%p-Ϥb'8<t=z{u\{:S9d 9к q(U{Ix\VǪV=b:GB$p|Y[:V^s["2#vMQ" Oկ8Ϙ&ݓa$NuCP߿!<~^ɁZ9R7ϋz Z*JhG"sS1L ipI.PFE?w'MY0Ɏ[C("< /M.{D4!9kd:yц 9fLo2 No7Y~TyH0ƼVd,P_^*;HJ8.5vivl9Oeq>?hdyc݋FF__&l/B<ЯQߵ9W$ ؟-ƟՊhDMvs­œL oK&/IpPƋOO2 ߣnr̜0Ҷ xcM ;wAV/vr)HU<|<%3 .0![ᎄ !x:2~//t$w$)HM/[J;2 OJiwLr8<_KJV/GszVx-\bx\. V$≧$cE-'χ?e$ ؛-i%t%-K4IHM?YŐ?gģV2V.vxU2YEn)B!#*E^,)c[m_Ҙm2ʳ1^w\f8Ӂ6:\f Ix@4ItBIjف?%0Va5ؒMDjS?3X0={RoV^YUėL#}u͓>y e1|JHHjR>"_3Kybm3?>}8W7#?9/,{o#1JQkOlE?NVju#2?u=8ed͎̏9eu_~QKدIzU7)ԚFy|4qPӎ0ꖿ#6Z=fmeՆ_ۆ'?qNV<݈h1!<ʭd7c ӦB j_[_=|u"_r7$4"E5+b ^\B=|k;j fw7q}~* mpĈPcc;r'¿=u%ll״نe;h&A W௔|ڲX;˄UuӬ2ay V pUV5Dzcc/LgsSSskqZpzd?W.Yԕ-v&ZOt'N/ibIRGhrõju˨-^_kYbخלfq*&-պYFU[_,$\_^uk" W*.\dAHe{/|Jm?eg/h ".K#s1DedzT6e[0rޖ綨?!+p@܏8v*^()!x*p#p|L৕Je}YA{RP紽ckiJCg qz_gPWT).r"6AQ|-Clu7%sè]昮l:"OӎkyåL.Hmz!-uO /P rɋF) Qf%_rF#C--p'=wyA)HMtcʚw؝1۔rD8>,ntKN3_B>#*:QpM 'aӴnUkefY!˶=d 0>+Q$̂oL+M[\ן>hg~<ڙ7cgװ?taY-񘙭] *avbYuᶇӿ8>ƷزfY]&7maHO^# 9NBzca8?@Mq^a9Fሿ$^BQm›[۶657m̪K[+9ބb]c{Ӊx;!pg-V3uN@^v߫3/rIZfmT[UٻٴCCG oBm,niǠ(n#b^Y?6sR>LBLYB!s/T\Cq*uR9)TЪ2ES*# ,Әm۰]3uF;V-T 4v(N~"_ɻOwOﶬVrv;,9@YP̙շ]zᢻ7e|%JFJ)|ի~ ^-[NKU+|D j^VOdXdX{؅>g<^-@ꪯMxT>σ~auWݺ9;GքЮ@]KV9JZMS-z;PrPPq`5^pls]nq|c/ 7B!}{< yzA]gQ0P0+(B4)rkhjCOaȽ[??h(ϡsډDCAh C?롟h߯B=q?냭Kne{4Y/N'K3s{{ +Fua| ^6awg[/FԘ[LNGЏEF/oh!ti)~AGc'9˄d?jo~f'SG?y;E7pkO5X~W֏7ݬmtV#?R: )"0xNaP;>Ax$dvrOKVehDumt$Zv q ?¦nA`R_y|ڐ$ &MZnUM¬; iq !;*Q|+3^27cݫ欶[omǼ>ߙKwlJͫ?0mQV2s Yb^2+*>hVh?Ϙk'>uz66Yj|wTL*?,ϱ?~ET QbeY/G%*3o܀GxZhlbcybvlk}8E7Od6M6[6Q|- (][eY!)8./({ Γ W;fw~^[B%Sw;îVrs~#Ŀ*8Ӿ@-W'{TBDDWwVvytPyTӇZ ŷ"%ҟHڍ/ZӼ~C(-z^_E&ŲUG2pmLwtOOrRs8.5p8K@.s9ewe6֖wxx'?:7|JYQjݚ&ai;Ch̨Ea\7;ohB_;r,plxBG۷\Q'/{Ѫ)'1<ھQ8P\G<}aڬږFsEW ρK5j~KχO~:EF=G{#6cnݮ%#WJdÚ ]\x^;e2w LBݍ[<.L4lq#C'mЯK׸0 e) pq|0 hPO?9;[7˵wpeF5j,&dfI?\I&O2{hyߎN mSvo ZUL2Q7jN7d2^P.#Nzi΀a{cA u}owNciNyp`y_uoL7{ 먴 {N{E#d ' N !{$pί{ IQ  nR N\wM(MrtY6ɱN_ r?)jƈ{TtByB_g7=SS+T1[z@pB' O(Smɋs1R lh^(F2Vq_%y5}KLCdyI^Z fS/ZP7o(L;$T`J36VYaGl?(jVEɏ; Rn}ato8eaB8Z[]3FX~IM4xetediAAt` Iv?i~+'8.ٴTqwABYp}C(%k jLP Z^4K^G8 >Di$a {25xbb/v;+%dAl@n7 Wv#;xD4_>W5E6ax_pOσK_vp|]?Y+ <\_]5m>E 9w~Y(ݯƉu/( ڙ5oC!smVgy sr_&t+"Ű!E1@ӯ. tL-˜Չv`wA³:Gt`d89ҹiE]D.&tH2a'Z&ڜ\DhEHuZDx; W?9/{86.%vED0@)j݅ꙋht')keh p\jq4L[z"YiB庾eۦ]hAp=8[]L>mhfWν_@\k`]O8RqBcH9p}3Gx΅ ^ wPm׭*g9g[ZL x[f9˾-Xz"_g%> 0S0F`zٰS_ׯR#H#Ͻ~;M!х}q3x4U/FŶ_1 ?+^k:?M|ofw,W|J!\h%}cKYb2m=Yފ0^ސ˔Hh*[z2R{>"JcV.EjWDfM*MON $& WKq/[[5o_w#8 v)Ʀ/nVv_G^fGF #+s|~f |az,?=37-f땥_E|$P>'N|ZP54o-]6̲j׶aOSU&~7t.%4,qޣuy1mZ4[[2}>qሎB:BGSzrŨ2^QJ0tu67V͎0|8>ޡ ?B9㋄B)?R+ ?Uex_˅QfخEڍ|O?"7F*|"T˟cΘ'kۨrUJoʦKߗ?M6M|ݎ^ 3e'v W9dAv8~LnY}dw-L\L8 >,~+R9M0KҾ9G5f:+P[|3/PLlcH? \sY8[zozN% ̈́ssHxxɮAڭAj%l(_S )g,c8td/lM}/0t[ADzp@J}!7#yڬ6 g@yp) } RT:t)kFWn5,UTiKe(W#j(Y+%tvWb_<|jO*:G@3JHajh`j- o?@ #@AMW{{4֒o/Li|w !Mr|Ms& Px@`J_S|A?[ډDCAh^bW{;.i!]l(Xw+㯽}zqv2?Yۚu1+FuS'tov.ӻ+o6 ?J~+x#E:gvafj=Mdۍrobustbase/tests/0000755000176200001440000000000014124272534013571 5ustar liggesusersrobustbase/tests/huber-etc.Rout.save0000644000176200001440000000555710377104055017271 0ustar liggesusers R : Copyright 2006, The R Foundation for Statistical Computing Version 2.2.1 Patched (2006-02-18 r37407) ISBN 3-900051-07-0 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(robustbase) Loading required package: MASS > > ### Test sets (all kinds odd/even, constant/regular/outlier) > > ## n = 0,1,2,3 : > x0 <- numeric(0) > x1 <- 3 > x2 <- 1:2 > x3 <- c(1:2,10) > ## constant (0 mad) + 0--2 outliers > xC <- rep(1, 12) > xC. <- rep(1, 11) > xC1 <- c(xC, 10) > xC1. <- c(xC., 10) > xC2 <- c(xC1, 100) > xC2. <- c(xC1., 100) > ## "uniform" + 0--2 outliers > y <- 1:10 > y. <- 1:11 > y1 <- c(y, 100) > y1. <- c(y., 100) > y2 <- c(y1, 1000) > y2. <- c(y1., 1000) > > nms <- ls(pat="^[xy]"); nms; names(nms) <- nms [1] "x0" "x1" "x2" "x3" "xC" "xC." "xC1" "xC1." "xC2" "xC2." [11] "y" "y." "y1" "y1." "y2" "y2." > lx <- lapply(nms, + function(n) { + x <- get(n) + m <- mad(x) + hx <- + if(!is.na(m) && m > 0) MASS::huber(x) + else list(m=NA, s=NA) + hMx <- huberM(x) + list(loc = + c(median = median(x), + huber = hx$m, + huberM = hMx$m), + scale= + c(mad = m, + huber = hx$s, + huberM = hMx$s)) + }) > r <- list(mu = sapply(lx, function(x) x$loc), + s = sapply(lx, function(x) x$scale)) > r $mu x0 x1 x2 x3 xC xC. xC1 xC1. xC2 xC2. y y. y1 y1. median NA 3 1.5 2.000000 1 1 1 1 1 1 5.5 6 6.000000 6.500000 huber NA NA 1.5 2.611949 NA NA NA NA NA NA 5.5 6 6.167169 6.606518 huberM NA 3 1.5 2.611949 1 1 1 1 1 1 5.5 6 6.167169 6.606518 y2 y2. median 6.500000 7.000000 huber 6.834339 7.213034 huberM 6.834339 7.213034 $s x0 x1 x2 x3 xC xC. xC1 xC1. xC2 xC2. y y. y1 y1. mad NA 0 0.7413 1.4826 0 0 0 0 0 0 3.7065 4.4478 4.4478 4.4478 huber NA NA 0.7413 1.4826 NA NA NA NA NA NA 3.7065 4.4478 4.4478 4.4478 huberM NA 0 0.7413 1.4826 0 0 0 0 0 0 3.7065 4.4478 4.4478 4.4478 y2 y2. mad 4.4478 4.4478 huber 4.4478 4.4478 huberM 4.4478 4.4478 > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 1.09 0.1 1.68 0 0 > robustbase/tests/tmcd.R0000644000176200001440000001557713774561564014700 0ustar liggesuserslibrary(robustbase) source(system.file("xtraR/test_MCD.R", package = "robustbase"))#-> doMCDdata ## ../inst/xtraR/test_MCD.R source(system.file("test-tools-1.R", package="Matrix", mustWork=TRUE)) ## -> assertError(), relErr(), and: showProc.time() ## -- now do it: options(digits = 5) set.seed(101) # <<-- sub-sampling algorithm now based on R's RNG and seed doMCDdata() doMCDdata(method="DetMCD"); warnings() ## vvvv no timing for 'R CMD Rdiff' outputs doMCDdata(nrep = 12, time=FALSE) doMCDdata(nrep = 12, time=FALSE, method="DetMCD"); warnings() doMCDdata(nrep = 12, time=FALSE, method = "MASS") ###--- now the "close to singular" mahalanobis case: set.seed(6) (c3 <- covMcd(mort3)) (c3. <- covMcd(mort3, nsamp="deterministic")) stopifnot(log(c3$crit) <= log(c3.$crit), print(log(c3.$crit / c3$crit)) <= 0.8) ## see 0.516 / 0.291 {with seed 7} ## ## rescale variables: scaleV <- c(0.1, 0.1, 1, 1, .001, 0.1, 0.1, 100) mm <- data.matrix(mort3) * rep(scaleV, each = nrow(mort3)) C3 <- covMcd(mm) C3. <- covMcd(mm, nsamp="deterministic") stopifnot(C3$mcd.wt == c3$mcd.wt)# here, not for all seeds! ## error ("computationally singular") with old (too high) default tolerance: try( covMcd(mm, control= rrcov.control(tol = 1e-10)) ) try( covMcd(mm, control= rrcov.control(tol = 1e-10), nsamp="deterministic") ) showProc.time() ## "large" examples using different algo branches {seg.fault in version 0.4-4}: n <- 600 ## - partitioning will be triggered set.seed(1) X <- matrix(round(100*rnorm(n * 3)), n, 3) (cX <- covMcd(X)) cX. <- covMcd(X, nsamp="deterministic", scalefn = scaleTau2) i <- names(cX); i <- i[!(i %in% c("call", "nsamp", "method", "raw.weights"))] stopifnot(sum(cX.$raw.weights != cX$raw.weights) <= 2, all.equal(cX[i], cX.[i], tol= 1/9)) n <- 2000 ## - nesting will be triggered set.seed(4) X <- matrix(round(100*rnorm(n * 3)), n, 3) set.seed(1) summary(cX <- covMcd(X)) # <- show newly activated print.summary.mcd(.) cX. <- covMcd(X, nsamp="deterministic", scalefn = scaleTau2) i2 <- i[i != "mcd.wt"] stopifnot(print(sum(cX.$raw.weights != cX$raw.weights)) <= 3, # 2 all.equal(cX[i2], cX.[i2], tol= 1/10))# 1/16 set.seed(1) ## testing of 'raw.only' : cXo <- covMcd(X, raw.only=TRUE) i <- paste0("raw.", c("cov", "center", "cnp2")) stopifnot(cXo$raw.only, all.equal(cX[i], cXo[i], tol = 1e-15), c("best", "mah") %in% setdiff(names(cX), names(cXo))) showProc.time() ## Now, some small sample cases: ## maximal values: n. <- 10 p. <- 8 set.seed(44) (X. <- cbind(1:n., round(10*rt(n.,3)), round(10*rt(n.,2)), matrix(round(10*rnorm(n. * (p.-3)), 1), nrow = n., ncol = p.-3))) ## 2 x 1 ---> Error r <- tryCatch(covMcd(X.[1:2, 2, drop=FALSE]), error=function(e)e) stopifnot(inherits(r, "error"), grepl("too small sample size", r$message)) ## 3 x 2 --- ditto r <- tryCatch(covMcd(X.[1:3, 2:3]), error=function(e)e) stopifnot(inherits(r, "error"), grepl("too small sample size", r$message)) ## 5 x 3 [ n < 2 p ! ] --- also works for MASS X <- X.[1:5, 1:3] set.seed(101) ## the finite-sample correction is definitely doubtful: summary(cc <- covMcd(X, use.correction = FALSE)) str(cc) ## best = 2 3 4 5 if(hasMASS <- requireNamespace("MASS", quietly=TRUE)) { mcc <- MASS::cov.mcd(X) stopifnot(cc$best == mcc$best, all.equal(cc$center, mcc$center, tolerance = 1e-10), all.equal(c(mcc$cov / cc$raw.cov), rep(0.673549282206, 3*3))) } ## p = 4 -- 6 x 4 & 7 x 4 [ n < 2 p ! ] p <- 4 n <- 7 X <- X.[1:n, 1+(1:p)] stopifnot(dim(X) == c(n,p)) (cc <- covMcd(X, use.correction = FALSE)) str(cc) ## best = 1 2 4 5 6 7 if(hasMASS) { mcc <- MASS::cov.mcd(X) stopifnot(cc$best == mcc$best, all.equal(cc$center, mcc$center, tolerance = 1e-10), all.equal(c(mcc$cov / cc$raw.cov), rep(0.7782486992881, p*p))) } n <- 6 X <- X[1:n,] (cc <- covMcd(X, use.correction = FALSE)) if(hasMASS) { mcc <- MASS::cov.mcd(X) stopifnot(cc$best == mcc$best, all.equal(cc$center, mcc$center, tolerance = 1e-10), all.equal(c(mcc$cov / cc$raw.cov), rep(0.7528695976179, p*p))) } showProc.time() ## nsamp = "exact" -- here for p=7 coleman.x <- data.matrix(coleman[, 1:6]) showSys.time(CcX <- covMcd(coleman.x, nsamp= "exact")) showSys.time(Ccd <- covMcd(coleman.x, nsamp= "deterministic")) stopifnot(all.equal(CcX$best, c(2, 5:9, 11,13, 14:16, 19:20), tolerance=0), intersect(CcX$best, Ccd$best) == c(2,5,7,8,13,14,16,19,20), relErr(CcX$crit, Ccd$crit) < 0.35 # see ~ 0.34 ) summary(Ccd) demo(determinMCD)## ../demo/determinMCD.R ## ----------- including simple "exactfit" (code = 3) warnings() showProc.time() if(!robustbase:::doExtras()) quit() ## if ( doExtras ) ----------------------------------------------------------------- ## ============== ## (nmini, kmini) examples: set.seed(7) ; X1 <- gendata(10000, p=13, eps = 0.30) showSys.time(c1 <- covMcd(X1$X)) # 0.87 sec chk.covMcd <- function(ans, ind) { stopifnot(inherits(ans, "mcd")) ## check that all outliers were detected: mod.outl <- which(ans$mcd.wt == 0) outl.detected <- (ind %in% mod.outl) if(!all(outl.detected)) { cat("The following outliers are *not* detected:\n") print(which(!outl.detected)) } fp <- !(mod.outl %in% ind) if(any(fp)) { cat(sprintf("False positive \"outliers\" (a few expected) %d of %d (= %.2f%%):\n", sum(fp), nobs(ans), 100*sum(fp)/nobs(ans))) print(which(fp)) } else cat("** No ** false positive outliers -- exceptional!\n") } ## chk.covMcd(c1, X1$xind) cat("\ncovMcd(*, kmini=12, trace=2) ...\n------\n") showSys.time(c2 <- covMcd(X1$X, kmini=12, trace=2))# slower.. chk.covMcd(c2, X1$xind) ## Comparing: ii <- !(names(c1) %in% c("call", "method")) cat("\ncovMcd(*, nsamp=\"deterministic\")\n") showSys.time(cD <- covMcd(X1$X, nsamp="deterministic"))# quite slower than FASTMCD chk.covMcd(cD, X1$xind) cat("<.>$crit = log(det(.)) [minimal = best] :\n") print(cbind(sort(c(default = c1$crit, kmini.12 = c2$crit, determin = cD$crit)))) i2 <- names(c1)[ii]; i2 <- i2[i2 != "nsamp"] ## closer coincidence if "raw.*" are dropped: i3 <- i2; i3 <- i3[ - grep("^raw", i3) ] stopifnot(all.equal(c1[ii], c2[ii], tol= 0.02), all.equal(cD[i2], c1[i2], tol= 0.02), all.equal(cD[i3], c1[i3], tol= 6e-4), # 4.60e-4 ## the 0/1 weights coincide : cD$mcd.wt == c1$mcd.wt, c2$mcd.wt == c1$mcd.wt) showProc.time() ## Radarexample --- already some in ../man/radarImage.Rd <<<------------- data(radarImage) print(d <- dim(radarImage)); n.rI <- d[1] ## The 8 "clear" outliers (see also below) ii8 <- c(1548:1549, 1553:1554, 1565:1566, 1570:1571) set.seed(7) showSys.time( L1 <- lapply(0:200, function(n) n+ which(0 == covMcd(unname(radarImage[(n+1L):n.rI,]), trace=2)$mcd.wt))) ## check for covMcd() consistency: print(tablen <- table(vapply(L1, length, 1))) plot(tablen) print(iCommon <- Reduce(intersect, L1)) stopifnot(ii8 %in% iCommon) ## robustbase/tests/poisson-ex.R0000644000176200001440000002455613500426047016031 0ustar liggesusers library(robustbase) source(system.file("test-tools-1.R", package="Matrix", mustWork=TRUE)) ## -> assertError(), assert.EQ(), showSys.time(), ... #### Poisson examples from Eva Cantoni's paper ### Using Possum Data ### ================ data(possumDiv) ## Try to follow closely Cantoni & Ronchetti(2001), JASA dim(X <- possum.mat[, -1]) # 151 13 str(y <- possum.mat[, "Diversity"]) ##--- reduce the matrix from singularity ourselves: X. <- possum.mat[, -c(1, match(c("E.nitens", "NW-NE"), colnames(possum.mat)))] dim(X.)# 151 11 ## "classical via robust: c = Inf : Inf. <- 1e5 ## --- FIXME ## The following used to fail because glm.fit() returns NA coefficients ## now fine .. keep this as test! glm.cr <- glmrob(y ~ X, family = "poisson", tcc = Inf.) (scr <- summary(glm.cr)) scl <- summary(glm.cl <- glm (Diversity ~ . , data=possumDiv, family=poisson)) sc2 <- summary(glm.c2 <- glmrob(Diversity ~ . , data=possumDiv, family=poisson, tcc = Inf.)) MMg <- model.matrix(glm.cl) assert.EQ(coef(scl), coef(sc2), tol = 6e-6, giveRE=TRUE) # 1.37e-6 dnms <- list(colnames(MMg), c("Estimate", "Std. Error", "z value", "Pr(>|z|)")) cf.sc <- array(c(-0.9469439, 0.01192096, -0.2724059, 0.04022862, 0.03988606, 0.07173483, 0.01763833, -0.01534376, 0.1149216, 0.06675529, 0.1169463, -0.4889071, ## SE 0.2655031, 0.02194661, 0.2859216, 0.01120463, 0.01438884, 0.03814053, 0.01059779, 0.1916126, 0.2724202, 0.1901612, 0.1902903, 0.2474653, ## z val -3.566603, 0.5431798, -0.9527294, 3.590356, 2.772014, 1.880803, 1.664341, -0.08007701, 0.421854, 0.3510457, 0.6145675, -1.975659, ## P val 0.0003616393, 0.587006, 0.3407272, 0.0003302263, 0.00557107, 0.05999869, 0.09604432, 0.936176, 0.6731316, 0.7255541, 0.5388404, 0.04819339), dim = c(12L, 4L), dimnames = dnms) assert.EQ(cf.sc, coef(sc2), tol = 4e-7, giveRE=TRUE) # 8.48e-8 ## c = 2.0 summary(g2 <- glmrob(Diversity ~ . , data=possumDiv, family=poisson, tcc = 2.0, trace=TRUE)) ## c = 1.6 glm.r <- glmrob(Diversity ~ . , data=possumDiv, family=poisson, tcc = 1.6, trace=TRUE) (s.16 <- summary(glm.r)) str(glm.r) ## Now with *smaller* X (two variables less): glm.c2 <- glmrob(y ~ X., family = "poisson", tcc = Inf.) summary(glm.c2) ## c = 1.6, x-weights, as in Cantoni-Ronchetti glm.r2 <- glmrob(y ~ X., family = "poisson", tcc = 1.6, weights.on.x = "hat") ## Now the same, for the direct possum data (no matrix), ## This indeed gives the same coefficients as in ## Table 3 of Cantoni+Ronchetti(2001): .. (tech.rep.): glm.r2. <- glmrob(Diversity ~ ., family = "poisson", data=possumDiv, tcc = 1.6, weights.on.x = "hat", acc = 1e-15) ## here iterate till convergence (acc = 10^(-15)) (sglm.r2 <- summary(glm.r2.)) ## This is too accurate for S.E. (but we have converged to end) cf2 <- matrix(c(-0.898213938628341, 0.269306882951903, 0.00717220104127189, 0.0224349606070713, -0.25335520175528, 0.288588183720387, 0.0403970350911325, 0.0113429514237665, 0.0411096703375411, 0.0145996036305452, 0.0730250489306713, 0.0386771060643486, 0.0176994176433365, 0.0107414247342375, -0.0289935051669504,0.194215229266707, 0.149521144883774, 0.271648514202971, 0.0503262879663932, 0.191675979065398, 0.0909870068741749, 0.192192515800464, -0.512247626309172, 0.250763990619973), 12,2, byrow=TRUE) assert.EQ(cf2, unname(coef(sglm.r2)[, 1:2]), tol = 1e-9, giveRE=TRUE)#-> show : ~ 1.46e-11 stopifnot(abs(glm.r2.$iter - 18) <= 1) # 18 iterations on 32-bit (2008) ## MT estimator -- "quick" examples if(!robustbase:::doExtras()) { cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' quit() } ## if ( doExtras ) ----------------------------------------------------- X1 <- cbind(1, X.) if(FALSE) ## for debugging ... options(warn = 1, error=recover) options(nwarnings = 1000) # def. 50 RNGversion("3.5.0") ## [TODO: adapt to "current" RNG settings] set.seed(57) showSys.time( ## m1 <- glmrobMT(x=X1, y=y) m1 <- glmrob(Diversity ~ ., data=possumDiv, family=poisson, method="MT") ) summary(warnings()) stopifnot(m1$converged) assert.EQ(m1$initial, c(-0.851594294907422, -0.0107066895370536, -0.226958540075445, 0.0355906625338308, 0.048010654640958, 0.0847493155436896, 0.0133604488401352, -0.024115201062159, 0.0270535337324518, 0.146022135657894, -0.00751380783260833, -0.417638086169033) , tol = 1e-13, check.attributes=FALSE, giveRE=TRUE) ## MM: I'm shocked how much this changes after every tweak ... (arch <- Sys.info()[["machine"]]) .M <- .Machine; str(.M[grep("^sizeof", names(.M))]) ## differentiate long-double.. if(arch == "x86_64") { if(.M$sizeof.longdouble != 16) arch <- paste0(arch, "--no-long-double") else if(osVersion == "Fedora 30 (Thirty)") arch <- paste0(arch, "_F30") } dput(signif(unname(coef(m1)), 11)) ## --> ## Something strange going on: R CMD check is different from interactive R, here. ## ???? [I see that the byte compiler is not listed in sessionInfo] ## In any case, take the dput(.) output from the *.Rout[.fail] file ## 2015-07-21: on 32-bit, the results *change* when re-run ??? beta1 <- list(i686 = ## old florence: ## c(-0.83715700394, 0.0085488694315, -0.16734609346, 0.040965601691, ## 0.042387113444, 0.063146240793, 0.018632137866, -0.0062886781262, ## 0.11466679192, 0.091457894347, -0.025009954018, -0.66867971209) ## for a "moment": f32sfs-2; 2015-07-20 ## c(-0.83818366695, 0.0085885492587, -0.1680548609, 0.040969491636, ## 0.042401438906, 0.063170238296, 0.018647880253, -0.0058039548495, ## 0.11500468542, 0.091940159895, -0.024804291737, -0.66861710581) ## f32sfs-2; 2015-07-21; in "R CMD check"/BATCH, *not* interactive c(-0.83701057367, 0.0085408263511, -0.16692955779, 0.040980220489, 0.042389760873, 0.063145608346, 0.018632314682, -0.0062819674369, 0.11513144785, 0.091268054568, -0.025531439815, -0.66981350787) ## f32sfs-2; 2015-07-21, in R-devel, several times in a row: ## c(-0.83734949811, 0.008554484224, -0.16727333284, 0.040980350692, ## 0.042391751765, 0.06315585848, 0.018633222478, -0.0062978140762, ## 0.11509071086, 0.091463771235, -0.025113314023, -0.66955433495) , "x86_64" = c(-0.83723213945, 0.0085385261915, -0.16697112315, 0.040985126003, 0.042400738973, 0.063168847366, 0.01863253681, -0.0064477807228, 0.11488937188, 0.091283185006, -0.025627390293, -0.66995658693) , "x86_64--no-long-double" = c(-0.83710423989, 0.0085428949874, -0.16713845989, 0.040973904414, 0.042391910971, 0.063159426394, 0.018629240073, -0.006362108938, 0.1145563969, 0.091490891317, -0.025378427464, -0.66943593439) , "x86_64_F30" = c(-0.83703991366, 0.008536691385, -0.16707196217, 0.040980171987, 0.042388781206, 0.063132162167, 0.018634264818, -0.0064298708197, 0.11486525895, 0.091433901799, -0.025384338265, -0.66920847831) ) ## just FYI: difference 32-bit vs 64-bit: assert.EQ(beta1[[1]], beta1[[2]], tol = 0.004, check.attributes=FALSE, giveRE=TRUE) ## Mean relative difference: 0.00142 [~ 2013-12]; 0.00273 [f32sfs-2; 2015-08]; then (R-devel 2015-07-21): 0.000916 assert.EQ(beta1[[2]], beta1[[3]], tol = 0.002, check.attributes=FALSE, giveRE=TRUE) ## Mean relative difference: 0.00082849 [2014-11] ## when bypassing BLAS in matprod() vvvvv seen 0.001385 [Lx 64b]: assert.EQ(coef(m1), beta1[[arch]], tol = 0.002, # typically 1e-10 is ok !! check.attributes=FALSE, giveRE=TRUE) ## The same, with another seed: set.seed(64) showSys.time( ## m2 <- glmrobMT(x=X1, y=y) m2 <- glmrob(Diversity ~ ., data=possumDiv, family=poisson, method="MT") ) summary(warnings()) stopifnot(m2$converged) if(FALSE) dput(signif(unname(m2$initial), 13)) ## --> assert.EQ(m2$initial, ## so this is *not* platform (32bit/64bit) dependent: c(-1.204304813829, 0.02776038445201, -0.3680174045842, 0.04325746912892, 0.03895315289169, 0.04537145479989, 0.02847987541025, 0.07073207523212, 0.355491639539, 0.1822955449528, 0.1323720331562, -0.3419939094877) , tol = 1e-12, check.attributes=FALSE, giveRE=TRUE) dput(signif(unname(coef(m2)), 11)) ## --> beta2 <- list(i686 = ## florence?, Nov. 2014 (or even Dec 2013) ## c(-0.83698669149, 0.0085587296184, -0.16778044558, 0.040960021262, ## 0.042402954975, 0.063188868629, 0.018630275088, -0.0061015509403, ## 0.11385896307, 0.090966386294, -0.02572887737, -0.66945784056) ## f32sfs-2, July 2015, "R CMD .." (non-interactive!): c(-0.83644647378, 0.0085365454367, -0.16770422458, 0.040958113098, 0.04238796628, 0.063174324485, 0.018618360015, -0.0062357940483, 0.11380146782, 0.090988141307, -0.025500338638, -0.66949122367) ## f32sfs-2, July 2015, interactive ## c(-0.83675287265, 0.0085383816807, -0.16763418359, 0.040968861778, ## 0.042399340988, 0.063148815999, 0.018624181637, -0.0061320761338, ## 0.11423331389, 0.0912474233, -0.025508101291, -0.66971416165) , "x86_64" = c(-0.83687097624, 0.0085341676033, -0.1674299545, 0.040968820903, 0.042397459287, 0.063159075944, 0.018625582804, -0.0063140636571, 0.11426134017, 0.091317308575, -0.025373078819, -0.66957444238) , "x86_64--no-long-double" = c(-0.8370370234, 0.008538975248, -0.1671607287, 0.040976013861, 0.042393702043, 0.06314300867, 0.018631172062, -0.0063382132536, 0.11445827857, 0.091409918881, -0.025308999173, -0.66935766843) , "x86_64_F30" = ## Fedora 30, R-devel (2019-06-13): c(-0.83651130836, 0.0085272636623, -0.16777225909, 0.040958046751, 0.042398611622, 0.063169934556, 0.018622060538, -0.0067041556052, 0.11358762483, 0.090950270043, -0.025393966426, -0.66916946118) ) ## just FYI: difference 32-bit vs 64-bit: assert.EQ(beta2[[1]], beta2[[2]], tol = 0.001, check.attributes=FALSE, giveRE=TRUE) ## Mean relative difference: 0.0009487 [2013-12 approx.] assert.EQ(beta2[[2]], beta2[[3]], tol = 0.001, check.attributes=FALSE, giveRE=TRUE) ## Mean relative difference: 0.0005119 [2014-11] ## when bypassing BLAS in matprod() vvvvv seen 0.0002766 [Lx 64b]: assert.EQ(coef(m2), beta2[[arch]], tol = 0.001, # typically 1e-10 is ok !! check.attributes=FALSE, giveRE=TRUE) ## slight changes of algorithm often change the above by ~ 4e-4 !!! summary(warnings()) ###---- Model Selection ----- ## (not yet) [ MM had this in ../../robGLM1/tests/quasi-possum.R ] cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/comedian-tst.R0000644000176200001440000000442112437665430016312 0ustar liggesusersrequire(robustbase) source(system.file("test-tools-1.R", package="Matrix", mustWork=TRUE)) showProc.time() data(hbk); hbk.x <- data.matrix(hbk[, 1:3]) covComed(hbk.x) covComed(hbk.x, n.iter=4) showProc.time() data(radarImage) covComed(radarImage) covComed(radarImage[,3:5], n.iter = 5) showProc.time() data(bushfire) ; covComed(bushfire) data(heart); covComed(heart[, 1:2]) data(starsCYG); covComed(starsCYG) data(stackloss); covComed(stack.x) showProc.time() if(!robustbase:::doExtras()) quit() ## if ( doExtras ) ----------------------------------------------------------------- ## ============== i.rr <- c("raw.cov", "raw.center", "cov", "center") n <- 1024 ; p <- 7 set.seed(47) showSys.time( rX <- replicate(100, covComed(matrix(rnorm(n*p), n,p))[i.rr], simplify=FALSE)) ## Computing simulation-average (cov / center) <==> looking at Bias ## _FIXME_ Really look at "MSE = Var + Bias^2" -- or something like ## "simulation-average Squared Error or other Loss" C0 <- Reduce("+", lapply(rX, `[[`, "raw.cov")) / length(rX) C. <- Reduce("+", lapply(rX, `[[`, "cov")) / length(rX) round(1000 * C0) round(1000 * C.) assert.EQ(C0, diag(p), tol= 0.04, giveRE=TRUE) #-> 0.02805 assert.EQ(C., diag(p), tol= 0.09, giveRE=TRUE) #-> 0.06475 ## Hmm.. raw.cov is better than cov ?? c00 <- Reduce("+", lapply(rX, `[[`, "raw.center")) / length(rX) c0 <- Reduce("+", lapply(rX, `[[`, "center")) / length(rX) stopifnot(print(sqrt(mean( (c00 - rep(0, p))^2 ))) < 0.005)# 0.004188 stopifnot(print(sqrt(mean( (c0 - rep(0, p))^2 ))) < 0.005)# 0.003434 n <- 4096 ; p <- 11 set.seed(17) showSys.time( r4 <- replicate(64, covComed(matrix(10+rnorm(n*p), n,p))[i.rr], simplify=FALSE)) C0 <- Reduce("+", lapply(r4, `[[`, "raw.cov")) / length(r4) C. <- Reduce("+", lapply(r4, `[[`, "cov")) / length(r4) round(1000 * C0) round(1000 * C.) assert.EQ(C0, diag(p), tol = 0.025, giveRE=TRUE) # 0.0162 assert.EQ(C., diag(p), tol = 0.06 , giveRE=TRUE) # 0.0486 ## Again... raw.cov better than cov ?? c00 <- Reduce("+", lapply(r4, `[[`, "raw.center")) / length(r4) c0 <- Reduce("+", lapply(r4, `[[`, "center")) / length(r4) assert.EQ(c00, rep(10, p), tol = 2e-4, giveRE=TRUE)# 7.97267e-05 = "raw" is better ? assert.EQ(c0 , rep(10, p), tol = 2e-4, giveRE=TRUE)# 0.0001036 robustbase/tests/binom-no-x.R0000644000176200001440000000102012271657124015673 0ustar liggesusers library(robustbase) ### "intercept only" : "no x" set.seed(101) k <- rbinom(100, size=3, pr = 0.2) y <- cbind(k, n.k = 3 - k) gg <- glm(y ~ 1, family = "binomial") (cfK <- coef(summary(gg))) Inf. <- 1e5 # FIXME (note that much larger values *deteriorate* slightly!) rg.Inf <- glmrob(y ~ 1, family = "binomial", tcc= Inf.) stopifnot(all.equal(unname(cfK[1:2]), unname(unlist(coef(summary(rg.Inf))[1:2])), tolerance = 1e-7))# 4.09e-8 rg.0 <- glmrob(y ~ 1, family = "binomial") summary(rg.0) str(rg.0, digits= 6) robustbase/tests/lmrob-psifns.Rout.save0000644000176200001440000007370513313011777020027 0ustar liggesusers R Under development (unstable) (2018-06-19 r74919) -- "Unsuffered Consequences" Copyright (C) 2018 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > #### Tests psi(), chi(),... etc and tuning.psi, tuning.chi : > > library(robustbase) > source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) > source(system.file("test-tools-1.R", package = "Matrix", mustWork=TRUE))# assert.EQ Loading required package: tools > > ### (1) Test the functions themselves -------------------------------- > if(!dev.interactive(orNone=TRUE)) pdf("rob-psifns.pdf") > > ## Simple version, no error checking, no derivative, nothing: > psiGGW <- function(x, a,b,c) { + ifelse((ax <- abs(x)) < c, + x, + ifelse((ea <- -((ax-c)^b)/(2*a)) < -708.4, 0, x * exp(ea))) + } > assert.EQ(Mpsi (5:9, cc=c(0, a=1/8,b=2,c=1/8, NA), "GGW"), + psiGGW(5:9, a=1/8,b=2,c=1/8), tol = 1e-13) > > > ## Check that psi() |-> works; ditto for +-Inf, NA,.. > cG <- c(-.5, 1, .95, NA) # one of the 6 "builtin"s > d0 <- numeric() > IoI <- c(-Inf, 0, Inf) > NN <- c(NaN, NA) > > cGs <- list( c(-.4, 1.5, 0.85, NA) + , c(-.4, 1.5 , 0.90, NA) + , c(-.4, 1.5 , 0.95, NA) + , c(-.4, 1.5, 0.975, NA) + , c(-.4, 1.5, 0.99 , NA) + , c(-.4, 1.5, 0.995, NA) + ## + , c(-.4, 1.25, 0.975, NA) + , c(-.4, 1.1, 0.975, NA) + , c(-.4, 1.025, 0.975, NA) + , c(-.4, 1.0125, 0.975, NA) + ## + ## FIXME , c(-.1, 1.25, 0.95, NA) + ## FIXME , c(-.1, 1.25, 0.99, NA) + ) > st <- system.time( + cG.cnst <- lapply(cGs, function(cc) + lmrob.control(psi = "ggw", tuning.psi = cc)$tuning.psi) + ) > cat('Time for constants computation of tuning.psi: ', st,'\n') Time for constants computation of tuning.psi: 0.16 0.001 0.161 0 0 > cGct <- t(sapply(cG.cnst, attr, "constants"))[,-1] > colnames(cGct) <- c("a","b","c", "rhoInf") > signif(cGct, 4) a b c rhoInf [1,] 1.0170 1.500 0.4996 2.384 [2,] 1.2810 1.500 0.5826 3.242 [3,] 1.8100 1.500 0.7335 5.139 [4,] 2.4430 1.500 0.8959 7.666 [5,] 3.4380 1.500 1.1250 12.090 [6,] 4.2970 1.500 1.3050 16.280 [7,] 1.3780 1.250 1.4350 7.654 [8,] 1.0140 1.100 1.7000 7.643 [9,] 0.8873 1.025 1.8130 7.712 [10,] 0.8693 1.012 1.8300 7.733 > assert.EQ(sapply(cG.cnst, function(cc) MrhoInf(cc, "ggw")), + cGct[,"rhoInf"], tol = 1e-8) > > > ## Do these checks for a *list* of (c.par, psi) combinations: > c.psi.list <- list( + list(1.345, "Huber"), + list(1.8, "Huber"), + list(cG, "GGW"), + list(c(2,4,8), "Hampel"), + list(c(1.5,3.5,8)*0.90, "Hampel"), + list(par=c(-.5,1.5,.95,NA), "lqq"), + list(bcs=c(1, 1, 1.25), "lqq"), + list(1.1, "optimal"), + list(0.1, "optimal"), + list(2.3, "Welsh") + ) > > for(c.psi in c.psi.list) { + tPar <- c.psi[[1]]; psi <- c.psi[[2]] + stopifnot(is.numeric(tPar), is.character(psi)) + cat("Psi function ", psi,"; tuning par. c[]= (", + paste(formatC(tPar, width=1), collapse=", "),")\n") + for(FUN in list(Mpsi, Mchi, Mwgt)) + stopifnot(identical(d0, FUN(d0, tPar, psi=psi)), + identical(NN, FUN(NN, tPar, psi=psi))) + stopifnot(identical(c(0,1,0), Mwgt(IoI, tPar,psi=psi))) + if(isPsi.redesc(psi)) + stopifnot(identical(c(0,0,0), Mpsi(IoI, tPar,psi=psi)), + identical(c(1,0,1), Mchi(IoI, tPar,psi=psi))) + else if(psi == "Huber") { + stopifnot(identical(c(-tPar,0,tPar), Mpsi(IoI, tPar,psi=psi)), + identical(c( Inf,0, Inf), Mchi(IoI, tPar,psi=psi))) + } + cat("chkPsi..(): ") + isHH <- psi %in% c("Huber", "Hampel") # not differentiable + tol <- switch(tolower(psi), + "huber"=, "hampel"= c(.001, 1.0), + "optimal" = .008, + "ggw" = c(5e-5, 5e-3, 1e-12), + "lqq" = c(1e-5, 5e-5, 1e-5, .08)) # .08 needed for bcs=c(1, 1, 1.25) + if(is.null(tol)) tol <- 1e-4 # default otherwise + cc <- chkPsi..(c(-5, 10), psi=psi, par=tPar, doD2 = !isHH, tol=tol) + ## -------- + cc. <- cc[!is.na(cc)] + if(is.logical(cc) && all(cc.)) + cat(" [Ok]\n") + else { + cat(" not all Ok:\n") + print(cc.[cc. != "TRUE"]) + } + cat("------------------------\n\n") + } Psi function Huber ; tuning par. c[]= ( 1.345 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function Huber ; tuning par. c[]= ( 1.8 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function GGW ; tuning par. c[]= ( -0.5, 1, 0.95, NA ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function Hampel ; tuning par. c[]= ( 2, 4, 8 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function Hampel ; tuning par. c[]= ( 1.35, 3.15, 7.2 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function lqq ; tuning par. c[]= ( -0.5, 1.5, 0.95, NA ) chkPsi..(): [Ok] ------------------------ Psi function lqq ; tuning par. c[]= ( 1, 1, 1.25 ) chkPsi..(): [Ok] ------------------------ Psi function optimal ; tuning par. c[]= ( 1.1 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function optimal ; tuning par. c[]= ( 0.1 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ Psi function Welsh ; tuning par. c[]= ( 2.3 ) chkPsi..(): Not checking psi''() := Mpsi(*, deriv=2) [Ok] ------------------------ > > > ## Nice plots -- and check derivatives ---- > > head(x. <- seq(-5, 10, length=1501)) [1] -5.00 -4.99 -4.98 -4.97 -4.96 -4.95 > ## [separate lines, for interactive "play": ] > stopifnot(chkPsiDeriv(p.psiFun(x., "LQQ", par=c(-.5,1.5,.95,NA)))) > stopifnot(chkPsiDeriv(p.psiFun(x., "GGW", par= cG))) > stopifnot(chkPsiDeriv(p.psiFun(x., "optimal", par=2))) > stopifnot(chkPsiDeriv(p.psiFun(x., "Hampel", + par = ## Default, but rounded: + round(c(1.5, 3.5, 8) * 0.9016085, 1)), + tol = 1e-3)) > > stopifnot(chkPsiDeriv(p.psiFun(x., "biweight", par = 4))) > stopifnot(chkPsiDeriv(p.psiFun(x., "Welsh", par = 1.5))) > stopifnot(chkPsiDeriv(p.psiFun(x., "huber", par = 1.5), + tol = c(1e-10, 5e-3))) > ## "huber"-rho via Mpsi(*, deriv=-1) was badly wrong till 2018-06 > > ## The same 6, all in one plot: > op <- par(mfrow=c(3,2), mgp = c(1.5, .6, 0), mar = .1+c(3,3,2,.5)) > p.psiFun2(x., "LQQ", par=c(-.5,1.5,.95,NA)) > p.psiFun2(x., "GGW", par= cG) > p.psiFun2(x., "optimal", par=1.3) > p.psiFun2(x., "Hampel", par = round(c(1.5, 3.5, 8) * 0.9016085, 1)) > p.psiFun2(x., "biweight", par = 4) > p.psiFun2(x., "Welsh", par = 1.5) > par(op) > > > ### (2) Test them as arguments of lmrob() or lmrob.control(): ----- > > data(aircraft) > > set.seed(1) > summary(mp0 <- lmrob(Y ~ ., data = aircraft, psi = 'bisquare', method = 'SMDM')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "bisquare") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.5552 -1.8395 -0.2113 2.8205 46.6311 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.8785690 6.5321336 1.053 0.306256 X1 -3.2192206 1.0907887 -2.951 0.008543 ** X2 1.5876658 0.7442079 2.133 0.046912 * X3 0.0018266 0.0004293 4.255 0.000477 *** X4 -0.0008677 0.0003685 -2.355 0.030083 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.335 Multiple R-squared: 0.7958, Adjusted R-squared: 0.7504 Convergence in 22 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| = 0 ( < 0.0043); 3 weights are ~= 1. The remaining 19 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.3958 0.8772 0.9738 0.9139 0.9892 0.9972 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 4.348e-03 eps.x warn.limit.reject warn.limit.meanrw 8.399e-08 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(2) > summary(mp1 <- update(mp0, psi = 'optimal')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "optimal") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -6.6691 -2.4291 0.2249 3.8876 54.2841 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 9.5007403 5.5576768 1.709 0.10455 X1 -3.0487969 0.9158751 -3.329 0.00374 ** X2 1.2100330 0.6469186 1.870 0.07777 . X3 0.0013810 0.0003910 3.532 0.00238 ** X4 -0.0005549 0.0003269 -1.697 0.10687 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 4.545 Multiple R-squared: 0.8159, Adjusted R-squared: 0.775 Convergence in 1 IRWLS iterations Robustness weights: 2 observations c(16,22) are outliers with |weight| = 0 ( < 0.0043); 21 weights are ~= 1. Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 4.047e-01 5.000e-01 1.060e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 4.348e-03 eps.x warn.limit.reject warn.limit.meanrw 8.399e-08 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "optimal" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(3) > summary(mp2 <- update(mp0, psi = 'ggw')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "ggw") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.4418 -1.7993 -0.1711 2.8466 47.0906 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.8192703 6.5041383 1.048 0.30831 X1 -3.1718079 1.0869559 -2.918 0.00918 ** X2 1.5705706 0.7510236 2.091 0.05096 . X3 0.0017983 0.0004300 4.182 0.00056 *** X4 -0.0008434 0.0003691 -2.285 0.03466 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.367 Multiple R-squared: 0.7942, Adjusted R-squared: 0.7484 Convergence in 20 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| <= 0.00044 ( < 0.0043); 16 weights are ~= 1. The remaining 6 ones are 3 4 12 16 17 19 0.9892 0.9891 0.8770 0.4139 0.9796 0.9839 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -5.000e-01 1.500e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -5.000e-01 1.500e+00 9.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol eps.outlier eps.x warn.limit.reject 1.000e-07 4.348e-03 8.399e-08 5.000e-01 warn.limit.meanrw 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "ggw" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(4) > summary(mp3 <- update(mp0, psi = 'welsh')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "welsh") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.7243 -1.9199 -0.2471 2.8060 45.9435 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.6404575 6.5552085 1.013 0.324482 X1 -3.2329194 1.0954988 -2.951 0.008546 ** X2 1.6174887 0.7443222 2.173 0.043367 * X3 0.0018656 0.0004279 4.360 0.000378 *** X4 -0.0008941 0.0003680 -2.430 0.025803 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.408 Multiple R-squared: 0.7958, Adjusted R-squared: 0.7504 Convergence in 18 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| <= 0.0003 ( < 0.0043); 2 weights are ~= 1. The remaining 20 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.4284 0.8583 0.9701 0.9112 0.9874 0.9985 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 5.774e-01 5.000e-01 2.110e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 4.348e-03 eps.x warn.limit.reject warn.limit.meanrw 8.399e-08 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "welsh" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(5) > summary(mp4 <- update(mp0, psi = 'ggw', tuning.psi = c(-.5, 1.5, 0.85, NA), + tuning.chi = c(-0.5, 1.5, NA, 0.5))) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "ggw", tuning.psi = c(-0.5, 1.5, 0.85, NA), tuning.chi = c(-0.5, 1.5, NA, 0.5)) \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -7.2207 -2.2226 0.3446 3.5745 52.2885 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 9.6540916 6.6414237 1.454 0.16327 X1 -3.2353135 1.0947329 -2.955 0.00847 ** X2 1.3343505 0.7636515 1.747 0.09762 . X3 0.0015256 0.0004619 3.303 0.00395 ** X4 -0.0006913 0.0003903 -1.771 0.09343 . --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.117 Multiple R-squared: 0.7832, Adjusted R-squared: 0.7351 Convergence in 15 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| <= 2.8e-08 ( < 0.0043); 15 weights are ~= 1. The remaining 7 ones are 3 4 12 16 17 19 23 0.87262 0.79602 0.73029 0.06024 0.96761 0.73117 0.97769 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -5.000e-01 1.500e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -5.000e-01 1.500e+00 8.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol eps.outlier eps.x warn.limit.reject 1.000e-07 4.348e-03 8.399e-08 5.000e-01 warn.limit.meanrw 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "ggw" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(6) > summary(mp5 <- update(mp0, psi = 'ggw', + tuning.psi = c(-0.5, 1.0, 0.95, NA), + tuning.chi = c(-0.5, 1.0, NA, 0.5))) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "ggw", tuning.psi = c(-0.5, 1, 0.95, NA), tuning.chi = c(-0.5, 1, NA, 0.5)) \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.4182 -1.7447 -0.1322 2.8735 47.0376 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.7557666 6.0919101 1.109 0.282039 X1 -3.1767976 1.0196958 -3.115 0.005974 ** X2 1.5756461 0.7050185 2.235 0.038339 * X3 0.0018004 0.0004003 4.497 0.000279 *** X4 -0.0008432 0.0003446 -2.447 0.024897 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.502 Multiple R-squared: 0.7941, Adjusted R-squared: 0.7484 Convergence in 19 IRWLS iterations Robustness weights: 21 weights are ~= 1. The remaining 2 ones are 16 22 0.423706 0.005042 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -5.000e-01 1.000e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -5.000e-01 1.000e+00 9.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol eps.outlier eps.x warn.limit.reject 1.000e-07 4.348e-03 8.399e-08 5.000e-01 warn.limit.meanrw 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "ggw" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(7) > summary(mp6 <- update(mp0, psi = 'hampel')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "hampel") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.706 -1.937 -0.234 2.825 46.037 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 6.4297956 6.7818616 0.948 0.35564 X1 -3.1885813 1.1366401 -2.805 0.01170 * X2 1.6224243 0.7839018 2.070 0.05315 . X3 0.0018590 0.0004445 4.182 0.00056 *** X4 -0.0008851 0.0003832 -2.310 0.03295 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.475 Multiple R-squared: 0.7946, Adjusted R-squared: 0.7489 Convergence in 11 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| = 0 ( < 0.0043); 20 weights are ~= 1. The remaining 2 ones are 12 16 0.8504 0.4975 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 bb 3.179e-01 7.417e-01 1.695e+00 5.000e-01 tuning.psi1 tuning.psi2 tuning.psi3 refine.tol 1.352e+00 3.156e+00 7.213e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 4.348e-03 eps.x warn.limit.reject warn.limit.meanrw 8.399e-08 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "hampel" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(8) > ctr7 <- lmrob.control(psi = 'ggw', + tuning.psi = c(-0.3, 1.4, 0.95, NA), + tuning.chi = c(-0.3, 1.4, NA, 0.5)) > ctr7$tuning.psi ## -> "constants" [1] -0.30 1.40 0.95 NA attr(,"constants") [1] 0.0000000 2.0011562 1.4000000 0.4125717 5.6874488 > ctr7$tuning.chi [1] -0.3 1.4 NA 0.5 attr(,"constants") [1] 0.00000000 0.24044569 1.40000000 0.09081713 0.27558437 > summary(mp7 <-lmrob(Y ~ ., data = aircraft, control = ctr7)) # *not* converging in k.max=200 Call: lmrob(formula = Y ~ ., data = aircraft, control = ctr7) \--> method = "S" Residuals: Min 1Q Median 3Q Max -7.6919 -1.9269 0.1767 3.7081 48.5801 Algorithm did not converge Coefficients of the *initial* S-estimator: Estimate Std. Error t value Pr(>|t|) (Intercept) 13.155499 NA NA NA X1 -4.349383 NA NA NA X2 1.647243 NA NA NA X3 0.001817 NA NA NA X4 -0.001035 NA NA NA Robustness weights: 2 observations c(16,22) are outliers with |weight| <= 0.0003 ( < 0.0043); 4 weights are ~= 1. The remaining 17 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.03668 0.20120 0.58420 0.52290 0.71930 0.99110 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -3.000e-01 1.400e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -3.000e-01 1.400e+00 9.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol eps.outlier eps.x warn.limit.reject 1.000e-07 4.348e-03 8.399e-08 5.000e-01 warn.limit.meanrw 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "ggw" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) Warning message: In lmrob.S(x, y, control = control) : S refinements did not converge (to refine.tol=1e-07) in 200 (= k.max) steps > > set.seed(9) > summary(mp8 <- update(mp0, psi = 'lqq')) Call: lmrob(formula = Y ~ ., data = aircraft, method = "SMDM", psi = "lqq") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -8.280 -1.717 -0.138 2.857 47.743 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 7.0858566 6.3506262 1.116 0.279194 X1 -3.1657682 1.0600204 -2.987 0.007914 ** X2 1.5402736 0.7336570 2.099 0.050145 . X3 0.0017612 0.0004222 4.171 0.000574 *** X4 -0.0008188 0.0003616 -2.265 0.036118 * --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.319 Multiple R-squared: 0.7944, Adjusted R-squared: 0.7487 Convergence in 19 IRWLS iterations Robustness weights: observation 22 is an outlier with |weight| = 0 ( < 0.0043); 16 weights are ~= 1. The remaining 6 ones are 3 4 12 16 17 19 0.9861 0.9842 0.8921 0.3720 0.9820 0.9782 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -5.000e-01 1.500e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -5.000e-01 1.500e+00 9.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol eps.outlier eps.x warn.limit.reject 1.000e-07 4.348e-03 8.399e-08 5.000e-01 warn.limit.meanrw 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "lqq" "nonsingular" ".vcov.w" compute.outlier.stats "SMDM" seed : int(0) > > set.seed(10) ## c(.) drops attributes : > ctr9 <- lmrob.control(psi = 'lqq', tuning.psi = c(ctr7$tuning.psi), tuning.chi = c(ctr7$tuning.chi)) > ctr9$tuning.psi [1] -0.30 1.40 0.95 NA attr(,"constants") [1] 1.3007171 0.9290836 1.3000000 > ctr9$tuning.chi [1] -0.3 1.4 NA 0.5 attr(,"constants") [1] 0.2763568 0.1973977 1.3000000 > ## Confirm these constants above (against the ones we got earlier) > ## by recomputing them using higher accuracy : > (tpsi. <- do.call(.psi.lqq.findc, c(ctr9$tuning.psi, list(rel.tol=1e-11, tol=1e-8)))) [1] 1.3007495 0.9291068 1.3000000 > (tchi. <- do.call(.psi.lqq.findc, c(ctr9$tuning.chi, list(rel.tol=1e-11, tol=1e-8)))) [1] 0.2763425 0.1973875 1.3000000 > (tol4 <- .Machine$double.eps^.25) [1] 0.0001220703 > > Rver <- getRversion() > integr.bug <- "2.12.0" <= Rver && Rver <= "3.0.1" > integr.bug [1] FALSE > if(integr.bug) tol4 <- 8*tol4 > > assert.EQ(attr(ctr9$tuning.psi, "constants"), tpsi., tol=tol4, giveRE=TRUE) Mean relative difference: 2.495013e-05 > assert.EQ(attr(ctr9$tuning.chi, "constants"), tchi., tol=tol4, giveRE=TRUE) Mean relative difference: 5.155651e-05 > > summary(mp9 <- lmrob(Y ~ ., data = aircraft, control = ctr9)) Call: lmrob(formula = Y ~ ., data = aircraft, control = ctr9) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -10.4061 -2.6517 -0.4156 3.7945 38.6444 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 3.5003005 12.9625202 0.270 0.79021 X1 -3.2953770 0.9467913 -3.481 0.00267 ** X2 1.8957842 0.9928099 1.910 0.07227 . X3 0.0022793 0.0014340 1.589 0.12936 X4 -0.0011563 0.0008966 -1.290 0.21347 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 6.127 Multiple R-squared: 0.7973, Adjusted R-squared: 0.7523 Convergence in 33 IRWLS iterations Robustness weights: 17 weights are ~= 1. The remaining 6 ones are 3 4 12 16 17 22 0.97698 0.99840 0.82584 0.78662 0.91318 0.06838 Algorithmic parameters: tuning.chi1 tuning.chi2 tuning.chi3 tuning.chi4 -3.000e-01 1.400e+00 NA 5.000e-01 bb tuning.psi1 tuning.psi2 tuning.psi3 5.000e-01 -3.000e-01 1.400e+00 9.500e-01 tuning.psi4 refine.tol rel.tol scale.tol NA 1.000e-07 1.000e-07 1.000e-10 solve.tol eps.outlier eps.x warn.limit.reject 1.000e-07 4.348e-03 8.399e-08 5.000e-01 warn.limit.meanrw 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "lqq" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 1.616 0.136 1.756 0.001 0.001 > > proc.time() user system elapsed 1.617 0.137 1.756 robustbase/tests/mc-etc.R0000644000176200001440000000670613325654361015101 0ustar liggesusers #### Testing medcouple and related functions ### Strict (and timing) tests are in ./mc-strict.R ### ~~~~~~~~~~~~ ### Here, we produce output which is *compared* with ./mc-etc.Rout.save library(robustbase) source(system.file("xtraR/mcnaive.R", package = "robustbase"))# mcNaive() ## This is somewhat interesting {diff the output !} ## particularly since *most* give the 'not found' diagnostic set.seed(17) for(n in 1:100) { cat(sprintf("n =%3d:\n------\n", n)) mcval <- mc(rlnorm(n), trace=TRUE, doRefl=FALSE) cat(sprintf(" --> mc(rlnorm(%d)) = %.6f\n", n, mcval)) } allEQ <- function(x,y) all.equal(x,y, tolerance = 1e-12) x3 <- c(-2, rep(-1,4), rep(0,6), 2, 2, 2:4) mcNaive(x3,"h.use") # 1/3 mcNaive(x3,"simple")# 0 mcComp <- robustbase:::mcComp mcComp. <- function (x, doScale, doReflect = FALSE, maxit = 15, eps1 = 1e-13, eps2 = eps1, trace.lev = 1, ...) { mcComp(x, doReflect=doReflect, doScale=doScale, maxit=maxit, eps1=eps1, eps2=eps2, trace.lev=trace.lev, ...) } try( mc(x3, doRefl = FALSE, maxit = 15, trace = 3)) ## "non-convergence" (32-bit) str(mcComp.(-x3, doScale=TRUE, trace = 4)) ### And here is the "real" problem of the whole 'eps' idea: x4 <- c(1:5,7,10,15,25, 1e15) ## this is also in mc-strict.R (but differently) mcNaive(x4,"h.use") # 0.5833333 mcNaive(x4,"simple")# == " == 7/12 mc(x4) # now ok, == 7/12 str(mcComp.( x4, doScale=TRUE, trace= 3))## = 0: conv.quickly str(mcComp.(-x4, doScale=TRUE, trace= 3)) # *not* conv! if(FALSE) { ## a much more extreme eps seems the cure: str(mcComp.( x4, doScale=TRUE, eps1=.Machine$double.xmin)) str(mcComp.(-x4, doScale=TRUE, eps1=.Machine$double.xmin)) } ### Examples "like x3" (non-convergence on 32-bit) xClist <- list(## length 5 : c(0,0, 1, 3,3), c(0,0, 1, 3:4), ## ## length 6 : c(0,0, 2, 4:6), c(0,0, 2, 3, 4, 6), c(0,0, 4, 5, 7, 8), c(0, 1,1, 2, 6,6), c(0, 3,3, 4, 6,6), c(0,0, 1, 3, 5,5), c(0,0, 1, 4,4, 6), c(0,0, 1, 4,4, 7), c(0,0, 1, 5,5, 6), ## n = 9 : c(-2,-2,-2, -1,-1, 1,1,1, 3), c(-3,-1,-1, 0, 1, 2,2,2,2) ) rlis <- lapply(xClist, function(x) try(mc(x, maxit=9), silent=TRUE)) table(sapply(rlis, class)) ## if(R.version$arch == "x86_64") { print(unlist(rlis)) rl2 <- lapply(xClist, mc, maxit=9) ##, eps1= 1e-10) stopifnot(allEQ(rlis, rl2), allEQ(unlist(rlis), sapply(xClist, mcNaive))) ##} set.seed(47) for(n in 3:60) { cat(" ") x <- round(2 * rnorm(n)) # many ties, often at median -- not converging ## if(R.version$arch == "x86_64") { ## non-convergence BUG rarely and only on 32-bit (solved, MK) mc1 <- mc(x) mc2 <- mcNaive(x, method = "simple") mc3 <- mcNaive(x, method = "h.use") stopifnot(allEQ(mc1, mc3)) if(mc2 != mc3) { cat("d"); if(!isTRUE(allEQ(mc2, mc3))) cat("!!") } ## } cat(".") }; cat("\n") cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' quit('no') ## ------ ## Find short example of non-convergence (32-bit) --> for above xClist n <- 9 for(ii in 1:100) { x <- round(2 * rnorm(n)) # many ties, often at median -- not converging mc1 <- mc(x) } ## x5 <- c(-3, -3, -2, -1, -1, 0, 0, 1, 2, 2, 3, 4) x6 <- c(-5, -2, -1, -1, -1, 0, 0, 0, 2, 2, 2, 4) robustbase/tests/m-s-estimator.R0000644000176200001440000001400113244325717016415 0ustar liggesusers## Test implementation of M-S estimator require(robustbase) source(system.file("xtraR/m-s_fns.R", package = "robustbase", mustWork=TRUE)) source(system.file("xtraR/ex-funs.R", package = "robustbase", mustWork=TRUE)) source(system.file("test-tools-1.R", package = "Matrix", mustWork=TRUE))# assert.EQ ## dataset with factors and continuous variables: data(education) education <- within(education, Region <- factor(Region)) ## for testing purposes: education2 <- within(education, Group <- factor(rep(1:3, length.out=length(Region)))) ## Test splitFrame (type fii is the only problematic type) testFun <- function(formula, x1.idx) { obj <- lm(formula, education2) mf <- obj$model ret <- splitFrame(mf, type="fii") if (missing(x1.idx)) { print(ret$x1.idx) return(which(unname(ret$x1.idx))) } stopifnot(identical(x1.idx, which(unname(ret$x1.idx)))) } testFun(Y ~ 1, integer(0)) testFun(Y ~ X1*X2*X3, integer(0)) testFun(Y ~ Region + X1 + X2 + X3, 1:4) testFun(Y ~ 0 + Region + X1 + X2 + X3, 1:4) testFun(Y ~ Region*X1 + X2 + X3, c(1:5, 8:10)) testFun(Y ~ Region*X1 + X2 + X3 + Region*Group, c(1:5, 8:18)) testFun(Y ~ Region*X1 + X2 + X3 + Region*Group*X2, c(1:6, 8:29)) testFun(Y ~ Region*X1 + X2 + Region*Group*X2, 1:28) testFun(Y ~ Region*X1 + X2 + Region:Group:X2, 1:21) testFun(Y ~ Region*X1 + X2*X3 + Region:Group:X2, c(1:6, 8:10, 12:23)) testFun(Y ~ (X1+X2+X3+Region)^2, c(1:7,10:12,14:19)) testFun(Y ~ (X1+X2+X3+Region)^3, c(1:19, 21:29)) testFun(Y ~ (X1+X2+X3+Region)^4, 1:32) testFun(Y ~ Region:X1:X2 + X1*X2, c(1:1, 4:7)) control <- lmrob.control() f.lm <- lm(Y ~ Region + X1 + X2 + X3, education) splt <- splitFrame(f.lm$model) y <- education$Y ## test orthogonalizing x1 <- splt$x1 x2 <- splt$x2 tmp <- lmrob.lar(x1, y, control) y.tilde <- tmp$resid t1 <- tmp$coef x2.tilde <- x2 T2 <- matrix(0, nrow=ncol(x1), ncol=ncol(x2)) for (i in 1:ncol(x2)) { tmp <- lmrob.lar(x1, x2[,i], control) x2.tilde[,i] <- tmp$resid T2[,i] <- tmp$coef } set.seed(10) mss1 <- m_s_subsample(x1, x2.tilde, y.tilde, control, orth = FALSE) mss1 <- within(mss1, b1 <- drop(t1 + b1 - T2 %*% b2)) set.seed(10) mss2 <- m_s_subsample(x1, x2, y, control, orth = TRUE) stopifnot(all.equal(mss1, mss2)) res <- vector("list", 100) set.seed(0) time <- system.time(for (i in seq_along(res)) { tmp <- m_s_subsample(x1, x2.tilde, y.tilde, control, FALSE) res[[i]] <- unlist(within(tmp, b1 <- drop(t1 + b1 - T2 %*% b2))) }) cat('Time elapsed in subsampling: ', time,'\n') ## show a summary of the results {"FIXME": output is platform dependent} summary(res1 <- do.call(rbind, res)) ## compare with fast S solution fmS <- lmrob(Y ~ Region + X1 + X2 + X3, education, init="S") coef(fmS) fmS$scale ### Comparing m-s_descent implementations() {our C and R} : ------------------- ctrl <- control #ctrl$trace.lev <- 5 ctrl$k.max <- 1 mC <- m_s_descent (x1, x2, y, ctrl, mss2$b1, mss2$b2, mss2$scale+10) mR <- m_s_descent_Ronly(x1, x2, y, ctrl, mss2$b1, mss2$b2, mss2$scale+10) nm <- c("b1","b2", "scale", "res") stopifnot(all.equal(mC[nm], mR[nm], check.attributes = FALSE, tolerance = 4e-14)) # seen 5.567e-15 in OpenBLAS ^^^^^ ## control$k.m_s <- 100 res3 <- vector("list", 100) time <- system.time(for (i in seq_along(res3)) { ri <- res[[i]] res3[[i]] <- unlist(m_s_descent(x1, x2, y, control, ri[1:4], ri[5:7], ri[8])) }) cat('Time elapsed in descent proc: ', time,'\n') ## show a summary of the results {"FIXME": output is platform dependent} res4 <- do.call(rbind, res3) summary(res4[,1:8]) stopifnot(all.equal( # 'test', not only plot: res1[, "scale"], res4[,"scale"], tol = 0.03), res1[, "scale"] >= res4[,"scale"] - 1e-7 ) # 1e-7 just in case plot(res1[, "scale"], res4[,"scale"]) abline(0,1, col=adjustcolor("gray", 0.5)) ## Test lmrob.M.S x <- model.matrix(fmS) control$trace.lev <- 3 ## --------- -- set.seed(1003) fMS <- lmrob.M.S(x, y, control, fmS$model) resid <- drop(y - x %*% fMS$coef) assert.EQ(resid, fMS$resid, check.attributes=FALSE, tol = 1e-12) ## Test direct call to lmrob ## 1. trace_lev output: set.seed(17) fMS <- lmrob(Y ~ Region + X1 + X2 + X3, education, init = "M-S", trace.lev=2) set.seed(13) fiMS <- lmrob(Y ~ Region + X1 + X2 + X3, education, init = "M-S") out2 <- capture.output(summary(fiMS)) writeLines(out2) set.seed(13) fiM.S <- lmrob(Y ~ Region + X1 + X2 + X3, education, init=lmrob.M.S) out3 <- capture.output(summary(fiM.S)) ## must be the same {apart from the "init=" in the call}: i <- 3 stopifnot(identical(out2[-i], out3[-i])) ## the difference: c(rbind(out2[i], out3[i])) ### "Skipping design matrix equilibration" warning can arise for reasonable designs ----- set.seed(1) x2 <- matrix(rnorm(2*30), 30, 2) data <- data.frame(y = rnorm(30), group = rep(letters[1:3], each=10), x2) obj <- lmrob(y ~ ., data, init="M-S", trace.lev=1) ## illustration: the zero row is introduced during the orthogonalization of x2 wrt x1 ## l1 regression always produces p zero residuals ## by chance, the zero residuals of multiple columns happen to be on the same row sf <- splitFrame(obj$model) x1 <- sf$x1 x2 <- sf$x2 control <- obj$control ## orthogonalize x2.tilde <- x2 for(i in 1:ncol(x2)) { tmp <- lmrob.lar(x1, x2[,i], control) x2.tilde[,i] <- tmp$resid } x2.tilde == 0 ## Specifying init="M-S" for a model without categorical variables ## used to cause a segfault; now uses "S" lmrob(LNOx ~ LNOxEm, NOxEmissions[1:10,], init="M-S") ## Now an ANOVA model with *only* categorical variables n <- 64 # multiple of 16 stopifnot(n %% 16 == 0) d.AOV <- data.frame(y = round(100*rnorm(64)), A=gl(4,n/4), B=gl(2,8, n), C=gl(2,4,n)) fm <- lmrob(y ~ A*B*C, data = d.AOV, init = "M-S", trace.lev=2) ## lmrob_M_S(n = 64, nRes = 500, (p1,p2)=(16,0), (orth,subs,desc)=(1,1,1)) ## Starting subsampling procedure.. Error in lmrob.M.S(x, y, control, mf) : ## 'Calloc' could not allocate memory (18446744073709551616 of 4 bytes) ## BTW: Can we compute an M-estimate (instead of MM-*) as we ## --- cannot have any x-outliers in such an ANOVA! robustbase/tests/wgt-himed.Rout.save0000644000176200001440000001072414004270422017261 0ustar liggesusers R version 4.0.3 Patched (2021-01-08 r79818) -- "Bunny-Wunnies Freak Out" Copyright (C) 2021 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > himed <- function(x) { n2 <- 1 + length(x) %/% 2; sort(x, partial = n2)[n2] } > > ## Tolerance 2e-7 {13 * higher than default 1.49e-8 } > is.all.equal <- function(x,y, tol = 2e-7) { + is.logical(r <- all.equal(x,y, tolerance = tol)) && r } > > > library(robustbase) > > stopifnot(is.na(wgt.himedian(numeric()))) > ## hi-median() seg.faulted or inf.looped till Jan.3, 2021 > > options(digits = 7)# single precision! > set.seed(15) > > cat(" n | range(x) | wgt.Himed\n", + "------------------------------\n",sep="") n | range(x) | wgt.Himed ------------------------------ > for(i in 1:100) { + n <- rpois(1, lam = 10) + cat(formatC(n,wid=3)," ") + x <- round(rnorm(n),3) + iw <- 1 + rpois(n, lam = 2) + him <- himed(rep(x, iw)) ## == naive R solution + whim <- wgt.himedian (x, iw) + if(!is.all.equal(whim, him)) + cat("whim != him: ", whim, "!=", him,"\n") + cat(formatC(range(x), wid = 6, flag="-"), "", + formatC(whim, wid = 6, flag="+"), "\n") + } 10 -1.255 1.831 +0.488 9 -1.167 1.412 +0.032 18 -2.253 1.611 +0.471 10 -1.589 0.754 +0.004 11 0.2 1.575 +0.712 16 -2.587 1.552 -0.04 8 -1.499 1.055 +0.041 15 -1.069 1.966 +0.621 15 -1.829 1.193 -0.177 13 -2.088 1.33 +0.022 15 -1.905 2.1 +0.104 18 -1.827 1.43 -0.338 15 -1.494 1.409 +0.378 6 -1.266 1.263 -0.162 11 -1.243 1.458 +0.227 9 -1.609 1.146 +0.609 6 -0.898 1.625 -0.659 14 -1.379 1.898 -0.123 6 -1.928 0.656 -0.44 9 -1.286 2.61 +0.442 8 -1.196 1.169 -0.376 5 -1.871 0.875 +0.004 10 -1.618 1.87 -0.023 11 -0.979 1.337 +0.156 6 -0.357 1.723 +0.519 8 -2.566 2.334 -0.416 14 -1.909 2.04 -0.051 15 -2.229 2.505 +0.377 9 -0.409 1.553 +0.452 7 -0.924 1.009 -0.645 13 -2.14 0.711 -0.348 9 -1.697 1.601 +0.369 12 -2.227 1.793 +0.508 14 -2.308 1.808 +0.444 11 -1.85 3.437 +0.285 9 -1.312 0.601 +0.017 7 -1.792 0.005 -0.247 10 -2.178 1.51 -0.905 10 -1.121 1.464 -0.133 8 -0.246 1.299 +0.885 14 -1.666 2.306 +0.234 9 -0.752 2.056 +0.151 13 -0.472 1.625 -0.055 6 -1.795 0.449 +0.122 10 -2.023 2.992 +0.141 8 -1.265 1.476 +0.083 11 -1.715 0.966 +0.137 8 -1.943 0.374 -0.215 8 -2.377 1.483 +0.029 8 -0.659 2.699 +1.052 8 -0.671 1.426 +0.033 12 -1.462 2.075 -0.13 14 -1.865 1.406 -0.478 6 -0.324 2.014 +1.453 12 -1.519 1.072 -0.106 12 -1.511 1.232 +0.055 14 -0.516 1.865 +0.31 8 -2.402 0.218 -0.319 10 -2.724 0.983 -0.47 5 -1.566 1.034 +0.676 7 -1.98 1.7 -0.002 11 -2.203 1.736 -0.209 8 -1.782 0.435 +0.035 15 -0.835 1.668 +0.166 12 -1.938 0.838 -0.228 8 -1.257 1.542 -0.527 11 -2.394 2.062 -0.007 14 -2.574 2.356 +0.095 10 -1.691 1.387 -0.217 14 -0.601 2.453 +0.951 9 -0.631 0.953 +0.601 9 -1.501 1.146 -0.337 7 -1.826 1.32 +0.621 6 -0.859 0.343 +0.009 4 -1.038 1.396 +1.375 9 -1.325 0.892 +0.399 10 -0.632 1.347 +0.026 4 -0.926 0.666 -0.555 11 -1.67 2.158 -0.355 17 -0.818 1.842 +0.054 6 -1.296 1.066 +0.384 11 -1.494 1.224 -0.222 11 -2.397 2.254 +0.165 11 -1.76 2.115 +0.008 9 -0.542 1.41 +0.412 7 -1.078 1.596 -0.43 9 -1.161 1.527 -0.12 7 0.112 1.656 +0.929 13 -1.175 2.39 +0.627 11 -1.177 1.838 +0.359 8 -1.601 0.792 +0.313 12 -2.57 1.827 +0.019 5 -0.463 0.505 +0.49 11 -2.077 1.694 +0.032 7 -1.139 1.232 -0.141 9 -0.814 1.482 +0.331 5 -0.542 0.405 +0.244 13 -2.127 1.754 -0.424 7 -1.082 1.015 -0.1 11 -0.773 2.253 -0.011 > > proc.time() user system elapsed 0.163 0.034 0.188 robustbase/tests/glmrob-specials.R0000644000176200001440000000151112271657124017000 0ustar liggesuserslibrary(robustbase) ## Model without coefficients [ print.glmrob() tests for this ..] ### very simple model [with outliers] set.seed(1) y <- rpois(1000, lambda = 4) ## without outliers m0o <- glm(y ~ 0, family = poisson, epsilon = 1e-12) m1o <- glm(y ~ 1, family = poisson, epsilon = 1e-12) y[1:3] <- 99:101 # outliers m0 <- glm(y ~ 0, family = poisson, epsilon = 1e-12) m1 <- glm(y ~ 1, family = poisson, epsilon = 1e-12) ## these both failed in version 0.1-2: rm0 <- glmrob(y ~ 0, family = poisson, acc = 1e-12) rm1 <- glmrob(y ~ 1, family = poisson, acc = 1e-12) rm0 rm1 (s0 <- summary(rm0)) (s1 <- summary(rm1)) str(s1) stopifnot(all.equal(c(coef(s1)), c(1.390672035557, 0.016213613600955, 85.77187478275, 0), tolerance = 1e-13))# 32-b: 4.7e-15 cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/lmrob-methods.Rout.save0000644000176200001440000003632413772570740020175 0ustar liggesusers R version 4.0.3 Patched (2020-12-26 r79698) -- "Bunny-Wunnies Freak Out" Copyright (C) 2020 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ### tests methods argument of lmrob.control > > library(robustbase) > > data(stackloss) > > ## S > set.seed(0) > summary(m0 <- lmrob(stack.loss ~ ., data = stackloss, method = "S", + compute.outlier.stats = "S")) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "S", compute.outlier.stats = "S") \--> method = "S" Residuals: Min 1Q Median 3Q Max -9.46226 -0.82076 0.02249 0.80806 8.31829 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -36.92542 5.41708 -6.816 3.0e-06 *** Air.Flow 0.84957 0.07892 10.765 5.2e-09 *** Water.Temp 0.43047 0.19507 2.207 0.0414 * Acid.Conc. -0.07354 0.07216 -1.019 0.3224 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 1.912 Multiple R-squared: 0.9863, Adjusted R-squared: 0.9839 Convergence in IRWLS iterations Robustness weights: 5 observations c(1,3,4,13,21) are outliers with |weight| = 0 ( < 0.0048); one weight is ~= 1. The remaining 15 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.4126 0.7595 0.8726 0.8270 0.9718 0.9986 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 4.762e-03 eps.x warn.limit.reject warn.limit.meanrw 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "S" seed : int(0) > set.seed(0) > m0a <- lmrob.S(m0$x, stack.loss, lmrob.control()) > > all.equal(m0[c('coefficients', 'scale', 'rweights')], + m0a[c('coefficients', 'scale', 'rweights')]) [1] TRUE > > ## MM > set.seed(0) > summary(m1 <- lmrob(stack.loss ~ ., data = stackloss, method = "MM", + compute.outlier.stats = "S")) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "MM", compute.outlier.stats = "S") \--> method = "MM" Residuals: Min 1Q Median 3Q Max -10.50974 -1.43819 -0.09134 1.02503 7.23113 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -41.52462 5.29780 -7.838 4.82e-07 *** Air.Flow 0.93885 0.11743 7.995 3.68e-07 *** Water.Temp 0.57955 0.26296 2.204 0.0416 * Acid.Conc. -0.11292 0.06989 -1.616 0.1246 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 1.912 Multiple R-squared: 0.9593, Adjusted R-squared: 0.9521 Convergence in 17 IRWLS iterations Robustness weights: observation 21 is an outlier with |weight| = 0 ( < 0.0048); 2 weights are ~= 1. The remaining 18 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1215 0.8757 0.9428 0.8721 0.9797 0.9978 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 4.762e-03 eps.x warn.limit.reject warn.limit.meanrw 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "S" seed : int(0) > > set.seed(0) > m2 <- update(m1, method = "SM") > > all.equal(m1[c('coefficients', 'scale', 'cov')], + m2[c('coefficients', 'scale', 'cov')]) [1] TRUE > > set.seed(0) > m3 <- update(m0, method = "SM", cov = '.vcov.w') > > ## SMD > set.seed(0) > summary(m4 <- lmrob(stack.loss ~ ., data = stackloss, method = "SMD", psi = 'bisquare', + compute.outlier.stats = "S")) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "SMD", psi = "bisquare", compute.outlier.stats = "S") \--> method = "SMD" Residuals: Min 1Q Median 3Q Max -10.50974 -1.43819 -0.09134 1.02503 7.23113 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -41.5246 8.9525 -4.638 0.000235 *** Air.Flow 0.9388 0.1175 7.990 3.71e-07 *** Water.Temp 0.5796 0.3199 1.812 0.087756 . Acid.Conc. -0.1129 0.1176 -0.960 0.350512 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 2.651 Multiple R-squared: 0.9593, Adjusted R-squared: 0.9521 Convergence in 17 IRWLS iterations Robustness weights: observation 21 is an outlier with |weight| = 0 ( < 0.0048); 2 weights are ~= 1. The remaining 18 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1215 0.8757 0.9428 0.8721 0.9797 0.9978 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 4.762e-03 eps.x warn.limit.reject warn.limit.meanrw 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "S" seed : int(0) > summary(m4a <- lmrob..D..fit(m3)) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "SMD", compute.outlier.stats = "S", cov = ".vcov.w") \--> method = "MM" Residuals: Min 1Q Median 3Q Max -10.50974 -1.43819 -0.09134 1.02503 7.23113 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -41.5246 9.3676 -4.433 0.000365 *** Air.Flow 0.9388 0.1230 7.636 6.84e-07 *** Water.Temp 0.5796 0.3348 1.731 0.101505 Acid.Conc. -0.1129 0.1231 -0.917 0.371736 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 2.651 Multiple R-squared: 0.9593, Adjusted R-squared: 0.9521 Convergence in 17 IRWLS iterations Robustness weights: observation 21 is an outlier with |weight| = 0 ( < 0.0048); 2 weights are ~= 1. The remaining 18 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1215 0.8757 0.9428 0.8721 0.9797 0.9978 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 4.762e-03 eps.x warn.limit.reject warn.limit.meanrw 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "S" seed : int(0) > > ## rearrange m4a and update call > m4a <- m4a[names(m4)] > class(m4a) <- class(m4) > m4a$call <- m4$call > > all.equal(m4, m4a, check.environment = FALSE) [1] "Component \"control\": Component \"method\": 1 string mismatch" [2] "Component \"init\": Component \"control\": Component \"method\": 1 string mismatch" [3] "Component \"cov\": Attributes: < Component \"corrfact\": Mean relative difference: 0.1167673 >" [4] "Component \"cov\": Attributes: < Component \"scorr\": Mean relative difference: 0.01959345 >" [5] "Component \"cov\": Mean relative difference: 0.09488594" > > ## SMDM > set.seed(0) > summary(m5 <- lmrob(stack.loss ~ ., data = stackloss, method = "SMDM", psi = 'bisquare', + compute.outlier.stats = "S")) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "SMDM", psi = "bisquare", compute.outlier.stats = "S") \--> method = "SMDM" Residuals: Min 1Q Median 3Q Max -9.6746 -1.7721 0.1346 1.2041 6.6080 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -41.9398 9.7719 -4.292 0.000494 *** Air.Flow 0.8747 0.1231 7.107 1.76e-06 *** Water.Temp 0.8099 0.3363 2.408 0.027656 * Acid.Conc. -0.1188 0.1284 -0.926 0.367655 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 2.651 Multiple R-squared: 0.9384, Adjusted R-squared: 0.9275 Convergence in 17 IRWLS iterations Robustness weights: 2 weights are ~= 1. The remaining 19 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1546 0.9139 0.9597 0.8874 0.9866 0.9966 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 4.762e-03 eps.x warn.limit.reject warn.limit.meanrw 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "S" seed : int(0) > summary(m5a <- lmrob..M..fit(obj=m4)) Call: lmrob(formula = stack.loss ~ ., data = stackloss, method = "SMDM", psi = "bisquare", compute.outlier.stats = "S") \--> method = "SMD" Residuals: Min 1Q Median 3Q Max -9.6746 -1.7721 0.1346 1.2041 6.6080 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) -41.9398 9.7719 -4.292 0.000494 *** Air.Flow 0.8747 0.1231 7.107 1.76e-06 *** Water.Temp 0.8099 0.3363 2.408 0.027656 * Acid.Conc. -0.1188 0.1284 -0.926 0.367655 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 2.651 Multiple R-squared: 0.9384, Adjusted R-squared: 0.9275 Convergence in 17 IRWLS iterations Robustness weights: 2 weights are ~= 1. The remaining 19 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1546 0.9139 0.9597 0.8874 0.9866 0.9966 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 4.762e-03 eps.x warn.limit.reject warn.limit.meanrw 1.692e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd numpoints 200 0 1000 0 10 fast.s.large.n 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.w" compute.outlier.stats "S" seed : int(0) > > ## rearrange m5a > m5a <- m5a[names(m5)] > class(m5a) <- class(m5) > > all.equal(m5, m5a, check.environment = FALSE) #-> 3 string mismatch [1] "Component \"control\": Component \"method\": 1 string mismatch" [2] "Component \"init\": Component \"control\": Component \"method\": 1 string mismatch" [3] "Component \"init\": Component \"init\": Component \"control\": Component \"method\": 1 string mismatch" > > ## Fast S large n strategy (sped up) > model <- model.frame(LNOx ~ . ,data = NOxEmissions) > control <- lmrob.control(fast.s.large.n = 10, n.group = 341, groups = 2) > set.seed(0) > try(ret <- lmrob.S(model.matrix(model, NOxEmissions)[1:682,], NOxEmissions$LNOx[1:682], control)) Error in lmrob.S(model.matrix(model, NOxEmissions)[1:682, ], NOxEmissions$LNOx[1:682], : Fast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'. In addition: Warning message: In lmrob.S(model.matrix(model, NOxEmissions)[1:682, ], NOxEmissions$LNOx[1:682], : 'control$n.group' is not much larger than 'p', probably too small > ## do what the error says > control <- lmrob.control(fast.s.large.n = Inf) > try(ret <- lmrob.S(model.matrix(model, NOxEmissions)[1:682,], NOxEmissions$LNOx[1:682], control)) Error in lmrob.S(model.matrix(model, NOxEmissions)[1:682, ], NOxEmissions$LNOx[1:682], : DGEEQU: column 30 of the design matrix is exactly zero. > ##-> Error ...... DGEEQU: column 30 of the design matrix is exactly zero. > ## > ## still fails, but this error is to be expected since only a part > ## of the design matrix is given > > proc.time() user system elapsed 0.249 0.047 0.376 robustbase/tests/lmrob-psifns.R0000644000176200001440000001515513310763444016337 0ustar liggesusers#### Tests psi(), chi(),... etc and tuning.psi, tuning.chi : library(robustbase) source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) source(system.file("test-tools-1.R", package = "Matrix", mustWork=TRUE))# assert.EQ ### (1) Test the functions themselves -------------------------------- if(!dev.interactive(orNone=TRUE)) pdf("rob-psifns.pdf") ## Simple version, no error checking, no derivative, nothing: psiGGW <- function(x, a,b,c) { ifelse((ax <- abs(x)) < c, x, ifelse((ea <- -((ax-c)^b)/(2*a)) < -708.4, 0, x * exp(ea))) } assert.EQ(Mpsi (5:9, cc=c(0, a=1/8,b=2,c=1/8, NA), "GGW"), psiGGW(5:9, a=1/8,b=2,c=1/8), tol = 1e-13) ## Check that psi() |-> works; ditto for +-Inf, NA,.. cG <- c(-.5, 1, .95, NA) # one of the 6 "builtin"s d0 <- numeric() IoI <- c(-Inf, 0, Inf) NN <- c(NaN, NA) cGs <- list( c(-.4, 1.5, 0.85, NA) , c(-.4, 1.5 , 0.90, NA) , c(-.4, 1.5 , 0.95, NA) , c(-.4, 1.5, 0.975, NA) , c(-.4, 1.5, 0.99 , NA) , c(-.4, 1.5, 0.995, NA) ## , c(-.4, 1.25, 0.975, NA) , c(-.4, 1.1, 0.975, NA) , c(-.4, 1.025, 0.975, NA) , c(-.4, 1.0125, 0.975, NA) ## ## FIXME , c(-.1, 1.25, 0.95, NA) ## FIXME , c(-.1, 1.25, 0.99, NA) ) st <- system.time( cG.cnst <- lapply(cGs, function(cc) lmrob.control(psi = "ggw", tuning.psi = cc)$tuning.psi) ) cat('Time for constants computation of tuning.psi: ', st,'\n') cGct <- t(sapply(cG.cnst, attr, "constants"))[,-1] colnames(cGct) <- c("a","b","c", "rhoInf") signif(cGct, 4) assert.EQ(sapply(cG.cnst, function(cc) MrhoInf(cc, "ggw")), cGct[,"rhoInf"], tol = 1e-8) ## Do these checks for a *list* of (c.par, psi) combinations: c.psi.list <- list( list(1.345, "Huber"), list(1.8, "Huber"), list(cG, "GGW"), list(c(2,4,8), "Hampel"), list(c(1.5,3.5,8)*0.90, "Hampel"), list(par=c(-.5,1.5,.95,NA), "lqq"), list(bcs=c(1, 1, 1.25), "lqq"), list(1.1, "optimal"), list(0.1, "optimal"), list(2.3, "Welsh") ) for(c.psi in c.psi.list) { tPar <- c.psi[[1]]; psi <- c.psi[[2]] stopifnot(is.numeric(tPar), is.character(psi)) cat("Psi function ", psi,"; tuning par. c[]= (", paste(formatC(tPar, width=1), collapse=", "),")\n") for(FUN in list(Mpsi, Mchi, Mwgt)) stopifnot(identical(d0, FUN(d0, tPar, psi=psi)), identical(NN, FUN(NN, tPar, psi=psi))) stopifnot(identical(c(0,1,0), Mwgt(IoI, tPar,psi=psi))) if(isPsi.redesc(psi)) stopifnot(identical(c(0,0,0), Mpsi(IoI, tPar,psi=psi)), identical(c(1,0,1), Mchi(IoI, tPar,psi=psi))) else if(psi == "Huber") { stopifnot(identical(c(-tPar,0,tPar), Mpsi(IoI, tPar,psi=psi)), identical(c( Inf,0, Inf), Mchi(IoI, tPar,psi=psi))) } cat("chkPsi..(): ") isHH <- psi %in% c("Huber", "Hampel") # not differentiable tol <- switch(tolower(psi), "huber"=, "hampel"= c(.001, 1.0), "optimal" = .008, "ggw" = c(5e-5, 5e-3, 1e-12), "lqq" = c(1e-5, 5e-5, 1e-5, .08)) # .08 needed for bcs=c(1, 1, 1.25) if(is.null(tol)) tol <- 1e-4 # default otherwise cc <- chkPsi..(c(-5, 10), psi=psi, par=tPar, doD2 = !isHH, tol=tol) ## -------- cc. <- cc[!is.na(cc)] if(is.logical(cc) && all(cc.)) cat(" [Ok]\n") else { cat(" not all Ok:\n") print(cc.[cc. != "TRUE"]) } cat("------------------------\n\n") } ## Nice plots -- and check derivatives ---- head(x. <- seq(-5, 10, length=1501)) ## [separate lines, for interactive "play": ] stopifnot(chkPsiDeriv(p.psiFun(x., "LQQ", par=c(-.5,1.5,.95,NA)))) stopifnot(chkPsiDeriv(p.psiFun(x., "GGW", par= cG))) stopifnot(chkPsiDeriv(p.psiFun(x., "optimal", par=2))) stopifnot(chkPsiDeriv(p.psiFun(x., "Hampel", par = ## Default, but rounded: round(c(1.5, 3.5, 8) * 0.9016085, 1)), tol = 1e-3)) stopifnot(chkPsiDeriv(p.psiFun(x., "biweight", par = 4))) stopifnot(chkPsiDeriv(p.psiFun(x., "Welsh", par = 1.5))) stopifnot(chkPsiDeriv(p.psiFun(x., "huber", par = 1.5), tol = c(1e-10, 5e-3))) ## "huber"-rho via Mpsi(*, deriv=-1) was badly wrong till 2018-06 ## The same 6, all in one plot: op <- par(mfrow=c(3,2), mgp = c(1.5, .6, 0), mar = .1+c(3,3,2,.5)) p.psiFun2(x., "LQQ", par=c(-.5,1.5,.95,NA)) p.psiFun2(x., "GGW", par= cG) p.psiFun2(x., "optimal", par=1.3) p.psiFun2(x., "Hampel", par = round(c(1.5, 3.5, 8) * 0.9016085, 1)) p.psiFun2(x., "biweight", par = 4) p.psiFun2(x., "Welsh", par = 1.5) par(op) ### (2) Test them as arguments of lmrob() or lmrob.control(): ----- data(aircraft) set.seed(1) summary(mp0 <- lmrob(Y ~ ., data = aircraft, psi = 'bisquare', method = 'SMDM')) set.seed(2) summary(mp1 <- update(mp0, psi = 'optimal')) set.seed(3) summary(mp2 <- update(mp0, psi = 'ggw')) set.seed(4) summary(mp3 <- update(mp0, psi = 'welsh')) set.seed(5) summary(mp4 <- update(mp0, psi = 'ggw', tuning.psi = c(-.5, 1.5, 0.85, NA), tuning.chi = c(-0.5, 1.5, NA, 0.5))) set.seed(6) summary(mp5 <- update(mp0, psi = 'ggw', tuning.psi = c(-0.5, 1.0, 0.95, NA), tuning.chi = c(-0.5, 1.0, NA, 0.5))) set.seed(7) summary(mp6 <- update(mp0, psi = 'hampel')) set.seed(8) ctr7 <- lmrob.control(psi = 'ggw', tuning.psi = c(-0.3, 1.4, 0.95, NA), tuning.chi = c(-0.3, 1.4, NA, 0.5)) ctr7$tuning.psi ## -> "constants" ctr7$tuning.chi summary(mp7 <-lmrob(Y ~ ., data = aircraft, control = ctr7)) # *not* converging in k.max=200 set.seed(9) summary(mp8 <- update(mp0, psi = 'lqq')) set.seed(10) ## c(.) drops attributes : ctr9 <- lmrob.control(psi = 'lqq', tuning.psi = c(ctr7$tuning.psi), tuning.chi = c(ctr7$tuning.chi)) ctr9$tuning.psi ctr9$tuning.chi ## Confirm these constants above (against the ones we got earlier) ## by recomputing them using higher accuracy : (tpsi. <- do.call(.psi.lqq.findc, c(ctr9$tuning.psi, list(rel.tol=1e-11, tol=1e-8)))) (tchi. <- do.call(.psi.lqq.findc, c(ctr9$tuning.chi, list(rel.tol=1e-11, tol=1e-8)))) (tol4 <- .Machine$double.eps^.25) Rver <- getRversion() integr.bug <- "2.12.0" <= Rver && Rver <= "3.0.1" integr.bug if(integr.bug) tol4 <- 8*tol4 assert.EQ(attr(ctr9$tuning.psi, "constants"), tpsi., tol=tol4, giveRE=TRUE) assert.EQ(attr(ctr9$tuning.chi, "constants"), tchi., tol=tol4, giveRE=TRUE) summary(mp9 <- lmrob(Y ~ ., data = aircraft, control = ctr9)) cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/small-sample.Rout.save0000644000176200001440000001415012271657124017774 0ustar liggesusers R version 3.0.2 Patched (2014-01-26 r64896) -- "Frisbee Sailing" Copyright (C) 2014 The R Foundation for Statistical Computing Platform: x86_64-unknown-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(robustbase) > ## testing functions: > source(system.file("xtraR/ex-funs.R", package = "robustbase")) > > set.seed(152) > > Nmax <- 12 > nn <- length(nset <- c(2:Nmax, 20, 50))## NOTA BENE: n == 1 etc are NOT YET TREATED! > Sim <- 2^9 # = 512 > > sn <- qn <- numeric(Sim) > cpu <- numeric(nn) > names(cpu) <- as.character(nset) > > for(n in nset) { + nS <- Sim ## if(n < 20) Sim else round(10*Sim/n) + cat("\nn = ",n,"\n------\nno.Sim. = ",nS,"\n") + cpu[as.character(n)] <- system.time(for(i in 1:nS) { + x <- rnorm(n) + sn[i] <- Sn0R(x) + qn[i] <- Qn0R(x) + Sn.x <- Sn(x, const = 1) + Qn.x <- Qn(x, const = 1) + if(!is.all.equal(Sn.x, sn[i], tol = 1e-5)) + cat("i=",i," Sn() != Sn0R(): ", Sn.x, "!=", sn[i],"\n") + if(!is.all.equal(Qn.x, qn[i], tol = 1e-5)) + cat("i=",i," Qn() != Qn0R(): ", Qn.x, "!=", qn[i],"\n") + })[1] + cat("Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...):\n") + print(c(mean(sn), sd(sn)/sqrt(nS), quantile(sn, p = (1:3)/4))) + print(c(mean(qn), sd(qn)/sqrt(nS), quantile(qn, p = (1:3)/4))) + } n = 2 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 1.17177829 0.03695327 0.52951343 1.05306039 1.62748640 25% 50% 75% 1.17177829 0.03695327 0.52951343 1.05306039 1.62748640 n = 3 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.46255834 0.01739738 0.15475882 0.36551414 0.65869811 25% 50% 75% 0.46255834 0.01739738 0.15475882 0.36551414 0.65869811 n = 4 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.84170614 0.02015695 0.51981874 0.77630479 1.12005284 25% 50% 75% 0.84170614 0.02015695 0.51981874 0.77630479 1.12005284 n = 5 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.64309841 0.01561957 0.35812751 0.60223019 0.87200727 25% 50% 75% 0.5591093 0.0133655 0.3273456 0.5337290 0.7412843 n = 6 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.81292713 0.01603854 0.54635157 0.78437781 1.03135301 25% 50% 75% 0.70787958 0.01251911 0.47540581 0.70464628 0.88664634 n = 7 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.71007470 0.01209913 0.52623935 0.69253904 0.88255673 25% 50% 75% 0.533802144 0.009128141 0.376988927 0.520001355 0.663751417 n = 8 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.82650425 0.01419433 0.58883395 0.79562656 1.00574823 25% 50% 75% 0.6645775 0.0103031 0.4938057 0.6323886 0.8027008 n = 9 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.75759768 0.01136142 0.57868634 0.74677447 0.91852270 25% 50% 75% 0.52259364 0.00736039 0.39618164 0.51826059 0.63137960 n = 10 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.83182112 0.01239199 0.62462885 0.80885273 1.01920116 25% 50% 75% 0.623637627 0.008403906 0.474993989 0.613887533 0.750270402 n = 11 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.76832479 0.01118554 0.59020462 0.76690177 0.93425844 25% 50% 75% 0.512979423 0.007139196 0.388824484 0.506239908 0.621525112 n = 12 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.85538366 0.01139366 0.66084900 0.84512561 1.01552717 25% 50% 75% 0.607337007 0.007056424 0.496338090 0.596697510 0.703213097 n = 20 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.84113700 0.00812507 0.71508462 0.82864901 0.94684680 25% 50% 75% 0.542464499 0.004534141 0.473622699 0.541929347 0.612841843 n = 50 ------ no.Sim. = 512 Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...): 25% 50% 75% 0.846902031 0.004784579 0.780334229 0.846165240 0.917926909 25% 50% 75% 0.4860477 0.0023965 0.4492734 0.4853898 0.5220596 > > rbind("Time (CPU) used:" = summary(cpu)) Min. 1st Qu. Median Mean 3rd Qu. Max. Time (CPU) used: 0.284 0.321 0.385 0.4803 0.462 1.515 > > proc.time() user system elapsed 7.506 0.077 7.725 robustbase/tests/weights.R0000644000176200001440000002171713266635627015412 0ustar liggesusers## test handing of weights and offset argument require(robustbase) ## generate simple example data (extension of the one in ./NAcoef.R ) data <- expand.grid(x1=letters[1:3], x2=LETTERS[1:4], rep=1:3) ## generate offset column data$os <- 1:nrow(data) set.seed(1) data$y <- data$os + rnorm(nrow(data)) ## add collinear variables data$x3 <- rnorm(nrow(data)) data$x4 <- rnorm(nrow(data)) data$x5 <- data$x3 + data$x4 ## lm() will have 'x5' "aliased" (and give coef = NA) ## add some NA terms data$y[1] <- NA data$x4[2:3] <- NA ## to test anova ## generate weights ## some obs with weight 0 data$weights <- as.numeric(with(data, x1 != 'c' | (x2 != 'B' & x2 != 'C'))) ## some obs with weight 2 data$weights[data$x1 == 'b'] <- 2 ## data2 := {data + weights}, encoded in "data2" (-> "ok" for coef(), not for SE) data2 <- rbind(subset(data, weights > 0), subset(data, weights == 2)) ## using these parameters we're essentially forcing lmrob() to ## fit a classic model --> easier to compare to lm() ctrl <- lmrob.control(psi="optimal", tuning.chi = 20, bb = 0.0003846154, tuning.psi=20, method="SM", cov=".vcov.w") ## SM = MM == the case where .vcov.avar1 was also defined for ## Classical models start with 'cm', robust just with 'rm' (or just 'm'): (cm0 <- lm (y ~ x1*x2 + x3 + x4 + x5 + offset(os), data)) (cm1 <- lm (y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, weights=weights)) (cm2 <- lm (y ~ x1*x2 + x3 + x4 + x5, data2, offset=os)) (rm0 <- lmrob(y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, control=ctrl)) set.seed(2) (rm1 <- lmrob(y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, weights=weights, control=ctrl)) set.seed(2) (rm2 <- lmrob(y ~ x1*x2 + x3 + x4 + x5, data2, offset=os, control=ctrl)) sc0 <- summary(cm0) sc1 <- summary(cm1) sc2 <- summary(cm2) (sr0 <- summary(rm0)) (sr1 <- summary(rm1)) (sr2 <- summary(rm2)) ## test Estimates, Std. Errors, ... stopifnot(all.equal(coef(cm1), coef(cm2)), all.equal(coef(rm1), coef(rm2)), all.equal(coef(sc0), coef(sr0)), all.equal(coef(sc1), coef(sr1)), all.equal(coef(sc2), coef(sr2))) ## test class "lm" methods that do not depend on weights meths1 <- c("family", "formula", "labels", "model.matrix", "na.action", "terms") for (meth in meths1) stopifnot(all.equal(do.call(meth, list(rm0)), do.call(meth, list(rm1)))) ## class "lm" methods that depend on weights ## FIXME: meths2 <- c(#"AIC", "alias", #"BIC", "case.names", "coef", "confint", #"cooks.distance", #"deviance", "df.residual", #"dfbeta", #"dfbetas", #"drop1", "dummy.coef", #"effects", #"extractAIC", #"hatvalues", #"influence", "kappa", #"logLik", #"model.frame", ## disable because of zero.weights attribute "nobs", "predict", #"proj", #"rstandard", #"rstudent", #"simulate", ##"summary", ## see above "variable.names", ##"vcov", ## see below "weights") op <- options(warn = 1)# print immediately for (meth in meths2) { cat(meth,":") .SW. <- if(meth == "weights") suppressWarnings else identity # for suppressing ## No weights defined for this object. Use type="robustness" .... stopifnot(all.equal(do.call(meth, list(cm1)), do.call(meth, list(rm1))), all.equal(do.call(meth, list(cm2)), .SW.(do.call(meth, list(rm2))))) cat("\n") } options(op)# reverting ## further tests: anova(rm1, update(rm1, ~ . - x4 - x5)) anova(rm2, update(rm2, ~ . - x4 - x5)) stopifnot(all.equal(fitted(cm0), fitted(rm0)), all.equal(fitted(cm1), fitted(rm1)), all.equal(fitted(cm2), fitted(rm2))) nd <- expand.grid(x1=letters[1:3], x2=LETTERS[1:4]) set.seed(3) nd$x3 <- rnorm(nrow(nd)) nd$x4 <- rnorm(nrow(nd)) nd$x5 <- rnorm(nrow(nd)) nd$os <- nrow(nd):1 wts <- runif(nrow(nd)) stopifnot(all.equal(predict(cm0, nd, interval="prediction"), predict(rm0, nd, interval="prediction")), all.equal(predict(cm1, nd, interval="prediction"), predict(rm1, nd, interval="prediction")), all.equal(predict(cm2, nd, interval="prediction"), predict(rm2, nd, interval="prediction")), all.equal(predict(cm0, nd, interval="prediction", weights=wts), predict(rm0, nd, interval="prediction", weights=wts)), all.equal(predict(cm1, nd, interval="prediction", weights=wts), predict(rm1, nd, interval="prediction", weights=wts)), all.equal(predict(cm2, nd, interval="prediction", weights=wts), predict(rm2, nd, interval="prediction", weights=wts), tolerance=1e-7)) ## Padding can lead to differing values here ## so test only full rank part qrEQ <- function(m1, m2) { q1 <- qr(m1) q2 <- qr(m2) r <- 1:q1$rank stopifnot(q1$rank == q2$rank, all.equal(q1$pivot, q2$pivot), all.equal(q1$qraux[r],q2$qraux[r]), all.equal(q1$qr[r,r], q2$qr[r,r])) } qrEQ(cm0, rm0) qrEQ(cm1, rm1) qrEQ(cm2, rm2) stopifnot(all.equal(residuals(cm0), residuals(rm0)), all.equal(residuals(cm1), residuals(rm1)), all.equal(residuals(cm2), residuals(rm2)), all.equal(resid(cm0, type="pearson"), resid(rm0, type="pearson")), all.equal(resid(cm1, type="pearson"), resid(rm1, type="pearson")), all.equal(resid(cm2, type="pearson"), resid(rm2, type="pearson"))) ## R 3.5.0: vcov(*, complete=TRUE) new default ==> same NA's as coef() if(interactive()) withAutoprint({ op <- options(width = 130, digits = 2) # --> vcov() rows fit on 1 line vcov(cm0) # 'x5' is NA vcov(cm2) # 'x5', 'x1c:2B', 'x1c:2C' rows & columns are NA options(op) }) (no.C <- is.na(match("complete", names(formals(stats:::vcov.lm))))) ## temporary _FIXME_ vcovC <- if(no.C) function(M, ...) vcov(M, complete=FALSE, ...) else vcov # (complete=TRUE) stopifnot(all.equal(vcov(cm0), vcovC(rm0), check.attributes=FALSE), all.equal(vcov(cm1), vcovC(rm1), check.attributes=FALSE), all.equal(vcov(cm2), vcovC(rm2), check.attributes=FALSE)) ## "clean": cln <- function(vc) structure(vc, weights=NULL, eigen=NULL) ## .vcov.avar1() is not recommended here, but also should work with singular / NA coef case: ok0 <- !is.na(coef(rm0)) vr0.NA<- vcov(rm0, cov=".vcov.avar1", complete=NA) # "almost singular" warning vr0.T <- vcov(rm0, cov=".vcov.avar1", complete=TRUE) vr0.F <- vcov(rm0, cov=".vcov.avar1", complete=FALSE) stopifnot(identical(dim(vr0.NA), dim(vr0.T)), identical(dim(vr0.F), dim(vr0.T) - 1L), dim(vr0.F) == 14, all.equal(cln(vr0.F), vr0.T[ok0,ok0], tol = 1e-15)) if(!no.C) { vc0.T <- vcov(cm0, complete=TRUE) vc0.F <- vcov(cm0, complete=FALSE) } ok1 <- !is.na(coef(rm1)) ## cannot work because init/fit residuals are not of full length tools::assertError(vr1.NA<- vcov(rm1, cov=".vcov.avar1", complete=NA)) tools::assertError(vr1.T <- vcov(rm1, cov=".vcov.avar1", complete=TRUE )) tools::assertError(vr1.F <- vcov(rm1, cov=".vcov.avar1", complete=FALSE)) ## instead, must refit rm1. <- update(rm1, control = within(ctrl, cov <- ".vcov.avar1")) vr1.NA<- vcov(rm1., complete=NA) vr1.T <- vcov(rm1., complete=TRUE) vr1.F <- vcov(rm1., complete=FALSE) stopifnot(identical(vr1.F, vr1.NA), # in this case identical(dim(vr1.F), dim(vr1.T) - 3L), dim(vr1.F) == 12, isSymmetric(vr1.T), identical(rownames(vr1.F), rownames(vr1.T)[ok1]), all.equal(cln(vr1.F), vr1.T[ok1,ok1], tol=1e-15)) if(FALSE) ## ERROR "exact singular" (probably *NOT* to fix, as TRUE/FALSE do work !) vr2.NA<- vcov(rm2, cov=".vcov.avar1", complete=NA) # "almost singular" warning vr2.T <- vcov(rm2, cov=".vcov.avar1", complete=TRUE) vr2.F <- vcov(rm2, cov=".vcov.avar1", complete=FALSE) stopifnot(TRUE, # identical(dim(vr2.NA), dim(vr2.T)), identical(dim(vr2.F), dim(vr2.T) - 3L), dim(vr2.F) == 12, identical(rownames(vr2.F), rownames(vr1.F)), identical(rownames(vr2.T), rownames(vr1.T)), all.equal(cln(vr2.F), vr2.T[ok1,ok1], tol=1e-15)) ## Hmm, the supposedly heteroscedastic-robust estimates *are* very different: all.equal(vcov(cm0), vcovC(rm0, cov = ".vcov.avar1"), check.attributes=FALSE) # rel.diff. 0.5367564 if(FALSE) # does not make sense all.equal(vcov(cm1), vcovC(rm1, cov = ".vcov.avar1"), check.attributes=FALSE) all.equal(vcov(cm2), vcovC(rm2, cov = ".vcov.avar1"), check.attributes=FALSE) # rel.diff. 0.5757642 ## Null fits (rank(X)==0) are tested in NAcoef.R ## testing weight=0 bug lmrob(y ~ x3, data, weights=weights) robustbase/tests/wgt-himed-xtra.R0000644000176200001440000001034714055507253016564 0ustar liggesusers library(robustbase) ## testing functions: source(system.file("xtraR/ex-funs.R", package = "robustbase")) x <- c(0.26, 0.161, 1.33, -0.925, 0.199, -1.476, 0.489) iw <- c(5, 4, 4, 1, 5, 1, 5) stopifnot(0.26 == (himR <- weighted.median(rep(x,iw))), himR == wgt.himedian(x, iw), ## (once gave infinite loop) himR == wgt.himedian(x, as.integer(iw))) ## same result, but *different wweigted.median() debug output! ##-- even when having EXACT data (& exact differences!) all.equal(Qn(c(2:1,4:3)), 1.1376128) ###--- another inifinite loop {solved}: (z4 <- round(qnorm(ppoints(4)), 2)) # -1.05 -0.30 0.30 1.05 ## both the same (also wweigted.median debug output) (all.equal(weighted.median(z4, 4:1), print(wgt.himedian (z4, 4:1))))# 3.97e-8 (all.equal(weighted.median(z4, c(4,2,3,17)), print(wgt.himedian (z4, c(4,2,3,17)))))# 4.54e-8 Sn (z4)## = 0.8533053 Sn (z4, const = 1)# = 0.75 ##-- now Qn (z4)# --> gave (another) infinite loop ##--> now "works" after (float) rounding of differences! ##--- DIFFERENT whimed() output! stopifnot(all.equal(Qn(z4, const = 1), print(Qn0R(z4)))) set.seed(1) n <- length(x <- round(2048*runif(16))) n*(n-1)/2 # 120 xDiff <- local({y <- sort(x); m <- outer(y,y,"-"); m[lower.tri(m)] }) xDsrt <- unique(sort(xDiff)) stopifnot(exprs = { ## so all pairwise differences differ : length(xDsrt) == n*(n-1)/2 # 120 Qn0R(x, k=1:120) == xDsrt ## *all* the "quantiles" = order stats do differ indeed Qn(x, constant=1) == 374 }) ## Bugs in Qn(x, k = ) sapply(1:120, function(k) Qn(x, 1, k=k)) ## ends with 0 0 0 ... for the last ones if(FALSE) ## e.g. Qn(x, k=1:10) # enters infinite loop after k[4] = 5 ## yet another problem: Sn0R(c(1.1, -0.93, -0.11, -0.74))# 0.82 Sn (c(1.1, -0.93, -0.11, -0.74))# 0.9329471 ## gave segmentation fault at Sat Mar 16 23:54:30 2002 ## not anymore but 0.9329471 ### Check validity of basic algorithm few times set.seed(471) for(sim in 1:100) { # had '500' cat(".") x <- rnorm(rpois(1, lam=80))# not too large the *n0R() use time! ##--> Sn0R() "fails" for odd n stopifnot(all.equal(Sn(x, const = 1), Sn0R(x)), all.equal(Qn(x, const = 1), Qn0R(x), tolerance = 7e-8)) x <- round(x,2) stopifnot(all.equal(Sn(x, const = 1), Sn0R(x)), all.equal(Qn(x, const = 1), Qn0R(x), tolerance = 7e-8)) if(sim %% 50 == 0) cat(sim, "\n") } ###---- Last series of problems: when n^2 > max.integer: ## Large x with 1% outliers N <- 1e5 n.o <- round(0.01 * N) nSim <- 24## interesting nSim <- 4 ## for package testing estim.lst <- c("mad", "Sn", "Qn") Res <- array(NA, dim = c(nSim, length(estim.lst), 1 + 2), dimnames= list(NULL,estim.lst, c("Tx","cpu1", "cpu3"))) set.seed(101) for(i in 1:nSim) { x <- sample(c(rnorm(N), 10*min(1, abs(rt(1, df=2))) + rnorm(n.o))) cat(i) for(S in estim.lst) { cpu <- system.time(Tx <- get(S)(x))[1:3] Res[i, S,] <- c(Tx, cpu[c(1,3)]) } cat(" ") }; cat("\n") options(digits = 5) (Tx <- Res[,, "Tx"]) stopifnot(abs(range(Tx - 1)) < 0.03) q()##============================================================================ ### -- Rest: rather for demo -- keep here for reference apply(Res, c(2,3), mean) ## Variation: robust or not: 1000* apply(Tx, 2, sd)#-> Qn < Sn < mad 1000* apply(Tx, 2, Qn)#-> Qn > Sn > mad if(dev.interactive(orNone=TRUE)) { boxplot(Tx, main = sprintf("n=%d x N(0,1) + %d (1%%) outliers to the right", N,n.o)) abline(h = 1, lty = 3, lwd = 2, col = "gray") } if(interactive()) { ## i.e. not when package testing .. N <- 500 set.seed(101) str(iw <- 1L+ as.integer(rpois(N, 1))); str(w <- as.double(iw)) cr <- ci <- numeric(50) for(nn in seq_along(ci)) { x <- round(rnorm(N),1) cat(".") cr[nn] <- system.time(for(i in 1:1000) rr <- wgt.himedian(x, w))[1] ci[nn] <- system.time(for(i in 1:1000) ri <- wgt.himedian(x, iw))[1] stopifnot(rr == ri) };cat("\n") ## Or rather (as correctly) a "paired" comparsion: boxplot(cr - ci, notch=TRUE) ## rather t.test( cr, ci, paired = TRUE) ##-> P-value of 0.0219 t.test(log(cr), log(ci), paired = TRUE) ##-> P-value of 0.0088 wilcox.test(cr, ci, paired = TRUE) ##-> P-value of 2.23e-5 (!!) } robustbase/tests/MCD-specials.Rout.save0000644000176200001440000003003513162424403017601 0ustar liggesusers R Under development (unstable) (2017-05-19 r72698) -- "Unsuffered Consequences" Copyright (C) 2017 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > #### Test special cases for covMcd() > > library(robustbase) > > ### 1) p = 1 ---------------------------------------------------- > set.seed(1) > x <- c(rnorm(50),100, 1e10) > (r1 <- covMcd(x)) Minimum Covariance Determinant (MCD) estimator approximation. Method: Univariate Fast MCD(alpha=0.5 ==> h=27); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = x) Log(Det.): -2.13 Robust Estimate of Location: x 0.1922 Robust Estimate of Covariance: x x 0.7483 > str(r1) List of 15 $ call : language covMcd(x = x) $ nsamp : num 500 $ method : chr "Univariate Fast MCD(alpha=0.5 ==> h=27); nsamp = 500; (n,k)mini = (300,5)" $ cov : num [1, 1] 0.748 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "x" .. ..$ : chr "x" $ center : Named num 0.192 ..- attr(*, "names")= chr "x" $ n.obs : int 52 $ alpha : num 0.5 $ quan : num 27 $ raw.cov : num [1, 1] 0.839 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "x" .. ..$ : chr "x" $ raw.center: Named num 0.325 ..- attr(*, "names")= chr "x" $ crit : num -2.13 $ mcd.wt : num [1:52] 1 1 1 1 1 1 1 1 1 1 ... $ X : num [1:52, 1] -0.626 0.184 -0.836 1.595 0.33 ... ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:52] "1" "2" "3" "4" ... .. ..$ : NULL $ raw.cnp2 : num [1:2] 6.45 1.14 $ cnp2 : num [1:2] 1.47 1.01 - attr(*, "class")= chr "mcd" > summary(r1) Minimum Covariance Determinant (MCD) estimator approximation. Method: Univariate Fast MCD(alpha=0.5 ==> h=27); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = x) Log(Det.): -2.13 Robust Estimate of Location: x 0.1922 Robust Estimate of Covariance: x x 0.7483 Eigenvalues: [1] 0.7483 Robustness weights: 4 observations c(14,24,51,52) are outliers with |weight| = 0 ( < 0.0019); 48 weights are ~= 1. > ## with alpha = 1 > (r1.1 <- covMcd(x, alpha = 1)) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=52) alpha = 1: The minimum covariance determinant estimates based on 52 observations are equal to the classical estimates. Call: covMcd(x = x, alpha = 1) Log(Det.): 42.1 Robust Estimate of Location: x 2.059 Robust Estimate of Covariance: x x 223.9 > str(r1.1) List of 15 $ call : language covMcd(x = x, alpha = 1) $ nsamp : num 500 $ method : chr "MCD(alpha=1 ==> h=52) \nalpha = 1: The minimum covariance determinant estimates based on 52 observations \nare "| __truncated__ $ cov : num [1, 1] 224 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "x" .. ..$ : chr "x" $ center : Named num 2.06 ..- attr(*, "names")= chr "x" $ n.obs : int 52 $ alpha : num 1 $ quan : num 52 $ raw.cov : num [1, 1] 1.92e+18 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "x" .. ..$ : chr "x" $ raw.center: Named num 1.92e+08 ..- attr(*, "names")= chr "x" $ crit : num 42.1 $ mcd.wt : num [1:52] 1 1 1 1 1 1 1 1 1 1 ... $ X : num [1:52, 1] -0.626 0.184 -0.836 1.595 0.33 ... ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:52] "1" "2" "3" "4" ... .. ..$ : NULL $ raw.cnp2 : num [1:2] 1 1 $ cnp2 : num [1:2] 1.14 1 - attr(*, "class")= chr "mcd" > summary(r1.1) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=52) alpha = 1: The minimum covariance determinant estimates based on 52 observations are equal to the classical estimates. Call: covMcd(x = x, alpha = 1) Log(Det.): 42.1 Robust Estimate of Location: x 2.059 Robust Estimate of Covariance: x x 223.9 Eigenvalues: [1] 223.9 Robustness weights: 2 observations c(51,52) are outliers with |weight| = 0 ( < 0.0019); 50 weights are ~= 1. > > ### 1b) p = 1, constant scale > (rc <- covMcd(rep(1,12))) Minimum Covariance Determinant (MCD) estimator approximation. Method: Univariate Fast MCD(alpha=0.5 ==> h=7); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = rep(1, 12)) Initial scale 0 because more than 'h' (=7) observations are identical. Log(Det.): -Inf Robust Estimate of Location: rep(1, 12) 1 Robust Estimate of Covariance: rep(1, 12) rep(1, 12) 0 Warning message: In covMcd(rep(1, 12)) : Initial scale 0 because more than 'h' (=7) observations are identical. > str(rc) List of 16 $ call : language covMcd(x = rep(1, 12)) $ nsamp : num 500 $ method : chr "Univariate Fast MCD(alpha=0.5 ==> h=7); nsamp = 500; (n,k)mini = (300,5)" $ singularity:List of 2 ..$ kind: chr "identicalObs" ..$ q : num 7 $ cov : num [1, 1] 0 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "rep(1, 12)" .. ..$ : chr "rep(1, 12)" $ raw.cov : num [1, 1] 0 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "rep(1, 12)" .. ..$ : chr "rep(1, 12)" $ center : Named num 1 ..- attr(*, "names")= chr "rep(1, 12)" $ raw.center : Named num 1 ..- attr(*, "names")= chr "rep(1, 12)" $ n.obs : int 12 $ alpha : num 0.5 $ quan : num 7 $ crit : num -Inf $ mcd.wt : num [1:12] 1 1 1 1 1 1 1 1 1 1 ... $ X : num [1:12, 1] 1 1 1 1 1 1 1 1 1 1 ... ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:12] "1" "2" "3" "4" ... .. ..$ : NULL $ raw.cnp2 : num [1:2] 4.97 1.41 $ cnp2 : num [1:2] 1 1 - attr(*, "class")= chr "mcd" > summary(rc) Minimum Covariance Determinant (MCD) estimator approximation. Method: Univariate Fast MCD(alpha=0.5 ==> h=7); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = rep(1, 12)) Initial scale 0 because more than 'h' (=7) observations are identical. Log(Det.): -Inf Robust Estimate of Location: rep(1, 12) 1 Robust Estimate of Covariance: rep(1, 12) rep(1, 12) 0 Eigenvalues: [1] 0 Robustness weights: All 12 weights are ~= 1. > ## with alpha = 1 > (rc1 <- covMcd(rep(1,12), alpha = 1)) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=12) alpha = 1: The minimum covariance determinant estimates based on 12 observations are equal to the classical estimates. Call: covMcd(x = rep(1, 12), alpha = 1) The classical covariance matrix is singular. Log(Det.): -Inf Robust Estimate of Location: rep(1, 12) 1 Robust Estimate of Covariance: rep(1, 12) rep(1, 12) 0 > str(rc1) List of 16 $ call : language covMcd(x = rep(1, 12), alpha = 1) $ nsamp : num 500 $ method : chr "MCD(alpha=1 ==> h=12) \nalpha = 1: The minimum covariance determinant estimates based on 12 observations \nare "| __truncated__ $ cov : num [1, 1] 0 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "rep(1, 12)" .. ..$ : chr "rep(1, 12)" $ center : Named num 1 ..- attr(*, "names")= chr "rep(1, 12)" $ n.obs : int 12 $ singularity:List of 1 ..$ kind: chr "classical" $ alpha : num 1 $ quan : num 12 $ raw.cov : num [1, 1] 0 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr "rep(1, 12)" .. ..$ : chr "rep(1, 12)" $ raw.center : Named num 1 ..- attr(*, "names")= chr "rep(1, 12)" $ crit : num -Inf $ mcd.wt : num [1:12] 1 1 1 1 1 1 1 1 1 1 ... $ X : num [1:12, 1] 1 1 1 1 1 1 1 1 1 1 ... ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:12] "1" "2" "3" "4" ... .. ..$ : NULL $ raw.cnp2 : num [1:2] 1 1 $ cnp2 : num [1:2] 1 1 - attr(*, "class")= chr "mcd" > summary(rc1) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=12) alpha = 1: The minimum covariance determinant estimates based on 12 observations are equal to the classical estimates. Call: covMcd(x = rep(1, 12), alpha = 1) The classical covariance matrix is singular. Log(Det.): -Inf Robust Estimate of Location: rep(1, 12) 1 Robust Estimate of Covariance: rep(1, 12) rep(1, 12) 0 Eigenvalues: [1] 0 Robustness weights: All 12 weights are ~= 1. > > ### 2) constant observations { multivariate scale == 0 } ----------- > (X <- matrix(rep(2*(1:4), 12), nrow = 12, byrow = TRUE)) [,1] [,2] [,3] [,4] [1,] 2 4 6 8 [2,] 2 4 6 8 [3,] 2 4 6 8 [4,] 2 4 6 8 [5,] 2 4 6 8 [6,] 2 4 6 8 [7,] 2 4 6 8 [8,] 2 4 6 8 [9,] 2 4 6 8 [10,] 2 4 6 8 [11,] 2 4 6 8 [12,] 2 4 6 8 > (rC <- covMcd(X)) Minimum Covariance Determinant (MCD) estimator approximation. Method: Fast MCD(alpha=0.5 ==> h=8); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = X) The covariance matrix of the data is singular. There are 12 observations (in the entire dataset of 12 obs.) lying on the hyperplane with equation a_1*(x_i1 - m_1) + ... + a_p*(x_ip - m_p) = 0 with (m_1, ..., m_p) the mean of these observations and coefficients a_i from the vector a <- c(1, 0, 0, 0) Log(Det.): -Inf Robust Estimate of Location: [1] 2 4 6 8 Robust Estimate of Covariance: [,1] [,2] [,3] [,4] [1,] 0 0 0 0 [2,] 0 0 0 0 [3,] 0 0 0 0 [4,] 0 0 0 0 Warning message: In covMcd(X) : The covariance matrix of the data is singular. There are 12 observations (in the entire dataset of 12 obs.) lying on the hyperplane with equation a_1*(x_i1 - m_1) + ... + a_p*(x_ip - m_p) = 0 with (m_1, ..., m_p) the mean of these observations and coefficients a_i from the vector a <- c(1, 0, 0, 0) > summary(rC) Minimum Covariance Determinant (MCD) estimator approximation. Method: Fast MCD(alpha=0.5 ==> h=8); nsamp = 500; (n,k)mini = (300,5) Call: covMcd(x = X) The covariance matrix of the data is singular. There are 12 observations (in the entire dataset of 12 obs.) lying on the hyperplane with equation a_1*(x_i1 - m_1) + ... + a_p*(x_ip - m_p) = 0 with (m_1, ..., m_p) the mean of these observations and coefficients a_i from the vector a <- c(1, 0, 0, 0) Log(Det.): -Inf Robust Estimate of Location: [1] 2 4 6 8 Robust Estimate of Covariance: [,1] [,2] [,3] [,4] [1,] 0 0 0 0 [2,] 0 0 0 0 [3,] 0 0 0 0 [4,] 0 0 0 0 Eigenvalues: [1] 0 0 0 0 Robustness weights: All 12 weights are ~= 1. > (rC1 <- covMcd(X, alpha = 1)) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=12) alpha = 1: The minimum covariance determinant estimates based on 12 observations are equal to the classical estimates. Call: covMcd(x = X, alpha = 1) The classical covariance matrix is singular. Log(Det.): -Inf Robust Estimate of Location: [1] 2 4 6 8 Robust Estimate of Covariance: [,1] [,2] [,3] [,4] [1,] 0 0 0 0 [2,] 0 0 0 0 [3,] 0 0 0 0 [4,] 0 0 0 0 > summary(rC1) Minimum Covariance Determinant (MCD) estimator approximation. Method: MCD(alpha=1 ==> h=12) alpha = 1: The minimum covariance determinant estimates based on 12 observations are equal to the classical estimates. Call: covMcd(x = X, alpha = 1) The classical covariance matrix is singular. Log(Det.): -Inf Robust Estimate of Location: [1] 2 4 6 8 Robust Estimate of Covariance: [,1] [,2] [,3] [,4] [1,] 0 0 0 0 [2,] 0 0 0 0 [3,] 0 0 0 0 [4,] 0 0 0 0 Eigenvalues: [1] 0 0 0 0 Robustness weights: All 12 weights are ~= 1. > > ### 3) alpha = 1 : classical estimates --- for general cases -------- > > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 0.211 0.038 0.31 0.002 0 > > proc.time() user system elapsed 0.515 0.072 0.664 robustbase/tests/binom-ni-small.Rout.save0000644000176200001440000002756212553432042020227 0ustar liggesusers R Under development (unstable) (2015-07-18 r68693) -- "Unsuffered Consequences" Copyright (C) 2015 The R Foundation for Statistical Computing Platform: x86_64-unknown-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(robustbase) > > ### Binomial example with *small* ni > > N <- 51 > set.seed(123) > table(ni <- rpois(N, lam=4))# has 4 '1's, (no '0') 1 2 3 4 5 6 7 8 10 4 8 10 9 7 4 5 3 1 > n0 <- ni; n0[print(which(ni == 1)[1:2])] <- 0 # has two '0's [1] 6 18 > x <- seq(0,1, length=N) > pr.x <- plogis(5*(x - 1/2)) > k <- rbinom(N, size = ni, prob = pr.x) > k0 <- rbinom(N, size = n0, prob = pr.x) > cbind(k,ni, k0,n0) k ni k0 n0 [1,] 0 3 0 3 [2,] 1 6 2 6 [3,] 0 3 0 3 [4,] 1 6 2 6 [5,] 0 7 2 7 [6,] 0 1 0 0 [7,] 1 4 1 4 [8,] 2 7 1 7 [9,] 0 4 0 4 [10,] 1 4 2 4 [11,] 0 8 1 8 [12,] 0 4 0 4 [13,] 0 5 3 5 [14,] 2 4 1 4 [15,] 0 2 0 2 [16,] 3 7 2 7 [17,] 2 3 2 3 [18,] 1 1 0 0 [19,] 1 3 1 3 [20,] 4 8 2 8 [21,] 3 7 3 7 [22,] 3 5 1 5 [23,] 0 5 2 5 [24,] 4 10 3 10 [25,] 1 5 2 5 [26,] 2 5 5 5 [27,] 2 4 3 4 [28,] 3 4 4 4 [29,] 3 3 3 3 [30,] 2 2 1 2 [31,] 4 8 5 8 [32,] 5 7 3 7 [33,] 3 5 3 5 [34,] 6 6 3 6 [35,] 1 1 1 1 [36,] 1 4 3 4 [37,] 3 5 3 5 [38,] 1 2 1 2 [39,] 3 3 1 3 [40,] 2 2 2 2 [41,] 2 2 2 2 [42,] 3 3 3 3 [43,] 2 3 3 3 [44,] 3 3 3 3 [45,] 2 2 1 2 [46,] 1 2 2 2 [47,] 2 2 2 2 [48,] 4 4 4 4 [49,] 3 3 3 3 [50,] 5 6 5 6 [51,] 1 1 1 1 > g1 <- glm(cbind(k , ni-k ) ~ x, family = binomial) > coef(summary(g1))[,1:2] Estimate Std. Error (Intercept) -2.515884 0.3784211 x 5.123650 0.7344629 > g0 <- glm(cbind(k0, n0-k0) ~ x, family = binomial)# works too > g0. <- glm(cbind(k0, n0-k0) ~ x, family = binomial, subset = n0 > 0) > ## all.equal(g0, g0.) > stopifnot(all.equal(print(coef(summary(g0))), coef(summary(g0.)))) Estimate Std. Error z value Pr(>|z|) (Intercept) -1.913157 0.3346560 -5.716786 1.085574e-08 x 4.061024 0.6512647 6.235596 4.500620e-10 > > > rg1 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial) > rg1. <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, + acc = 1e-10) # default is just 1e-4 > > stopifnot(all.equal(unname(coef(rg1.)), c(-2.37585864, 4.902389143), tolerance=1e-9), + all.equal(coef(rg1), coef(rg1.), tolerance=1e-4), + all.equal(vcov(rg1.), vcov(rg1), tolerance = 1e-4)) > rg1$iter [1] 5 > which(rg1.$w.r != 1) ## 7 of them : [1] 11 18 23 29 34 36 46 > str(rg1.["family" != names(rg1.)]) List of 27 $ coefficients : Named num [1:2] -2.38 4.9 ..- attr(*, "names")= chr [1:2] "(Intercept)" "x" $ residuals : Named num [1:51] -0.528 0.622 -0.582 0.435 -0.981 ... ..- attr(*, "names")= chr [1:51] "1" "2" "3" "4" ... $ fitted.values : Named num [1:51] 0.085 0.093 0.102 0.111 0.121 ... ..- attr(*, "names")= chr [1:51] "1" "2" "3" "4" ... $ w.r : num [1:51] 1 1 1 1 1 1 1 1 1 1 ... $ w.x : num [1:51] 1 1 1 1 1 1 1 1 1 1 ... $ ni : num [1:51] 3 6 3 6 7 1 4 7 4 4 ... $ dispersion : num 1 $ cov : num [1:2, 1:2] 0.144 -0.253 -0.253 0.55 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:2] "(Intercept)" "x" .. ..$ : chr [1:2] "(Intercept)" "x" $ matM : num [1:2, 1:2] 0.625 0.287 0.287 0.163 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:2] "(Intercept)" "x" .. ..$ : chr [1:2] "(Intercept)" "x" $ matQ : num [1:2, 1:2] 0.551 0.252 0.252 0.143 ..- attr(*, "dimnames")=List of 2 .. ..$ : chr [1:2] "(Intercept)" "x" .. ..$ : chr [1:2] "(Intercept)" "x" $ tcc : num 1.34 $ linear.predictors: Named num [1:51] -2.38 -2.28 -2.18 -2.08 -1.98 ... ..- attr(*, "names")= chr [1:51] "1" "2" "3" "4" ... $ deviance : NULL $ iter : int 11 $ y : Named num [1:51] 0 0.167 0 0.167 0 ... ..- attr(*, "names")= chr [1:51] "1" "2" "3" "4" ... $ converged : logi TRUE $ model :'data.frame': 51 obs. of 2 variables: ..$ cbind(k, ni - k): int [1:51, 1:2] 0 1 0 1 0 0 1 2 0 1 ... .. ..- attr(*, "dimnames")=List of 2 .. .. ..$ : NULL .. .. ..$ : chr [1:2] "k" "" ..$ x : num [1:51] 0 0.02 0.04 0.06 0.08 0.1 0.12 0.14 0.16 0.18 ... ..- attr(*, "terms")=Classes 'terms', 'formula' language cbind(k, ni - k) ~ x .. .. ..- attr(*, "variables")= language list(cbind(k, ni - k), x) .. .. ..- attr(*, "factors")= int [1:2, 1] 0 1 .. .. .. ..- attr(*, "dimnames")=List of 2 .. .. .. .. ..$ : chr [1:2] "cbind(k, ni - k)" "x" .. .. .. .. ..$ : chr "x" .. .. ..- attr(*, "term.labels")= chr "x" .. .. ..- attr(*, "order")= int 1 .. .. ..- attr(*, "intercept")= int 1 .. .. ..- attr(*, "response")= int 1 .. .. ..- attr(*, ".Environment")= .. .. ..- attr(*, "predvars")= language list(cbind(k, ni - k), x) .. .. ..- attr(*, "dataClasses")= Named chr [1:2] "nmatrix.2" "numeric" .. .. .. ..- attr(*, "names")= chr [1:2] "cbind(k, ni - k)" "x" $ call : language glmrob(formula = cbind(k, ni - k) ~ x, family = binomial, acc = 1e-10) $ formula :Class 'formula' language cbind(k, ni - k) ~ x .. ..- attr(*, ".Environment")= $ terms :Classes 'terms', 'formula' language cbind(k, ni - k) ~ x .. ..- attr(*, "variables")= language list(cbind(k, ni - k), x) .. ..- attr(*, "factors")= int [1:2, 1] 0 1 .. .. ..- attr(*, "dimnames")=List of 2 .. .. .. ..$ : chr [1:2] "cbind(k, ni - k)" "x" .. .. .. ..$ : chr "x" .. ..- attr(*, "term.labels")= chr "x" .. ..- attr(*, "order")= int 1 .. ..- attr(*, "intercept")= int 1 .. ..- attr(*, "response")= int 1 .. ..- attr(*, ".Environment")= .. ..- attr(*, "predvars")= language list(cbind(k, ni - k), x) .. ..- attr(*, "dataClasses")= Named chr [1:2] "nmatrix.2" "numeric" .. .. ..- attr(*, "names")= chr [1:2] "cbind(k, ni - k)" "x" $ data : $ offset : NULL $ control :List of 4 ..$ acc : num 1e-10 ..$ test.acc: chr "coef" ..$ maxit : num 50 ..$ tcc : num 1.34 $ method : chr "Mqle" $ prior.weights : num [1:51] 1 1 1 1 1 1 1 1 1 1 ... $ contrasts : NULL $ xlevels : Named list() > > rg2 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, + acc = 1e-10, tcc = 3) # large cutoff: almost classical > vcov(rg2) # << already close to limit (Intercept) x (Intercept) 0.1430407 -0.2501886 x -0.2501886 0.5388665 > rg10 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = 10) > rgL <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = 100) > > no.comp <- - match(c("call", "data", "family", "control", "tcc"), names(rg10)) > stopifnot(all.equal(rg10[no.comp], rgL[no.comp], tolerance= 1e-14)) > > vcov(rgL) # is now the same as the following: (Intercept) x (Intercept) 0.1432102 -0.2504843 x -0.2504843 0.5394659 > if(FALSE) { ## tcc=Inf fails: non-convergence / singular matrix from GOTO/Atlas3 + rgI <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = Inf) + ## tcc = Inf still *FAILS* (!) + stopifnot(all.equal(rgL[no.comp], rgI[no.comp], tolerance= 0)) + ## and is quite close to the classic one: + (all.equal(vcov(rgI), vcov(g1))) + } > > rg0 <- glmrob(cbind(k0, n0-k0) ~ x, family = binomial) Warning message: In glmrobMqle(X = X, y = Y, weights = weights, start = start, offset = offset, : fitted probabilities numerically 0 or 1 occurred > ## --> warning.. > rg0. <- glmrob(cbind(k0, n0-k0) ~ x, family = binomial, subset = n0 > 0) > > coef(summary(rg0)) # not yet good (cf. 'g0' above!) -- but the one of rg0. is Estimate Std. Error z value Pr(>|z|) (Intercept) -1.852918 NaN NaN NaN x 3.847520 NaN NaN NaN > stopifnot(all.equal(coef(rg0), coef(rg0.))) > > > ### Example where all ni >= 3 -- works better, now also correct as.var. !! > ### ----------------- ======= > > min(n3 <- ni + 2)# = 3 [1] 3 > k3 <- rbinom(N, size = n3, prob = pr.x) > g3 <- glm(cbind(k3 , n3-k3) ~ x, family = binomial) > (cfg <- coef(summary(g3))[,1:2]) Estimate Std. Error (Intercept) -2.945565 0.3420351 x 5.546417 0.6259260 > stopifnot(all.equal(sqrt(diag(vcov(g3))), cfg[,2])) > > rg3 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial) > (s3 <- summary(rg3)) Call: glmrob(formula = cbind(k3, n3 - k3) ~ x, family = binomial) Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -2.8901 0.3499 -8.260 <2e-16 *** x 5.5039 0.6447 8.537 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robustness weights w.r * w.x: 48 weights are ~= 1. The remaining 3 ones are 42 44 51 0.5127 0.7846 0.7388 Number of observations: 51 Fitted by method 'Mqle' (in 5 iterations) (Dispersion parameter for binomial family taken to be 1) No deviance values available Algorithmic parameters: acc tcc 0.0001 1.3450 maxit 50 test.acc "coef" > summary(rg3$w.r) Min. 1st Qu. Median Mean 3rd Qu. Max. 0.5127 1.0000 1.0000 0.9811 1.0000 1.0000 > rg3.5 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 5) > (s3.5 <- summary(rg3.5)) Call: glmrob(formula = cbind(k3, n3 - k3) ~ x, family = binomial, tcc = 5) Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -2.9454 0.3420 -8.611 <2e-16 *** x 5.5461 0.6259 8.861 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robustness weights w.r * w.x: All 51 weights are ~= 1. Number of observations: 51 Fitted by method 'Mqle' (in 1 iterations) (Dispersion parameter for binomial family taken to be 1) No deviance values available Algorithmic parameters: acc 1e-04 maxit tcc 50 5 test.acc "coef" > summary(rg3.5$w.r)# all 1 Min. 1st Qu. Median Mean 3rd Qu. Max. 1 1 1 1 1 1 > stopifnot(all.equal(coef(s3)[,1:2], coef(s3.5)[,1:2], tolerance = 0.02)) > > rg3.15 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 15, acc=1e-10) > (s3.15 <- summary(rg3.15)) Call: glmrob(formula = cbind(k3, n3 - k3) ~ x, family = binomial, tcc = 15, acc = 1e-10) Coefficients: Estimate Std. Error z value Pr(>|z|) (Intercept) -2.9456 0.3420 -8.612 <2e-16 *** x 5.5464 0.6259 8.861 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robustness weights w.r * w.x: All 51 weights are ~= 1. Number of observations: 51 Fitted by method 'Mqle' (in 1 iterations) (Dispersion parameter for binomial family taken to be 1) No deviance values available Algorithmic parameters: acc 1e-10 maxit tcc 50 15 test.acc "coef" > > stopifnot(all.equal(coef(s3.15)[,1:2], cfg, tolerance = 1e-5),# 2e-6 + all.equal(cfg[,"Estimate"], rg3.15$coeff, tolerance= 1e-8) # 6.05e-10 + ) > ##rg3.15$eff # == 1 > > ## doesn't change any more: > rg3.1000 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 1000, + acc=1e-10) > stopifnot(all.equal(rg3.1000[no.comp], + rg3.15 [no.comp], tol = 1e-13)) > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 0.389 0.082 0.561 0 0.008 > > proc.time() user system elapsed 0.389 0.090 0.561 robustbase/tests/binom-ni-small.R0000644000176200001440000000644312271657124016544 0ustar liggesuserslibrary(robustbase) ### Binomial example with *small* ni N <- 51 set.seed(123) table(ni <- rpois(N, lam=4))# has 4 '1's, (no '0') n0 <- ni; n0[print(which(ni == 1)[1:2])] <- 0 # has two '0's x <- seq(0,1, length=N) pr.x <- plogis(5*(x - 1/2)) k <- rbinom(N, size = ni, prob = pr.x) k0 <- rbinom(N, size = n0, prob = pr.x) cbind(k,ni, k0,n0) g1 <- glm(cbind(k , ni-k ) ~ x, family = binomial) coef(summary(g1))[,1:2] g0 <- glm(cbind(k0, n0-k0) ~ x, family = binomial)# works too g0. <- glm(cbind(k0, n0-k0) ~ x, family = binomial, subset = n0 > 0) ## all.equal(g0, g0.) stopifnot(all.equal(print(coef(summary(g0))), coef(summary(g0.)))) rg1 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial) rg1. <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, acc = 1e-10) # default is just 1e-4 stopifnot(all.equal(unname(coef(rg1.)), c(-2.37585864, 4.902389143), tolerance=1e-9), all.equal(coef(rg1), coef(rg1.), tolerance=1e-4), all.equal(vcov(rg1.), vcov(rg1), tolerance = 1e-4)) rg1$iter which(rg1.$w.r != 1) ## 7 of them : str(rg1.["family" != names(rg1.)]) rg2 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, acc = 1e-10, tcc = 3) # large cutoff: almost classical vcov(rg2) # << already close to limit rg10 <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = 10) rgL <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = 100) no.comp <- - match(c("call", "data", "family", "control", "tcc"), names(rg10)) stopifnot(all.equal(rg10[no.comp], rgL[no.comp], tolerance= 1e-14)) vcov(rgL) # is now the same as the following: if(FALSE) { ## tcc=Inf fails: non-convergence / singular matrix from GOTO/Atlas3 rgI <- glmrob(cbind(k , ni-k ) ~ x, family = binomial, tcc = Inf) ## tcc = Inf still *FAILS* (!) stopifnot(all.equal(rgL[no.comp], rgI[no.comp], tolerance= 0)) ## and is quite close to the classic one: (all.equal(vcov(rgI), vcov(g1))) } rg0 <- glmrob(cbind(k0, n0-k0) ~ x, family = binomial) ## --> warning.. rg0. <- glmrob(cbind(k0, n0-k0) ~ x, family = binomial, subset = n0 > 0) coef(summary(rg0)) # not yet good (cf. 'g0' above!) -- but the one of rg0. is stopifnot(all.equal(coef(rg0), coef(rg0.))) ### Example where all ni >= 3 -- works better, now also correct as.var. !! ### ----------------- ======= min(n3 <- ni + 2)# = 3 k3 <- rbinom(N, size = n3, prob = pr.x) g3 <- glm(cbind(k3 , n3-k3) ~ x, family = binomial) (cfg <- coef(summary(g3))[,1:2]) stopifnot(all.equal(sqrt(diag(vcov(g3))), cfg[,2])) rg3 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial) (s3 <- summary(rg3)) summary(rg3$w.r) rg3.5 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 5) (s3.5 <- summary(rg3.5)) summary(rg3.5$w.r)# all 1 stopifnot(all.equal(coef(s3)[,1:2], coef(s3.5)[,1:2], tolerance = 0.02)) rg3.15 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 15, acc=1e-10) (s3.15 <- summary(rg3.15)) stopifnot(all.equal(coef(s3.15)[,1:2], cfg, tolerance = 1e-5),# 2e-6 all.equal(cfg[,"Estimate"], rg3.15$coeff, tolerance= 1e-8) # 6.05e-10 ) ##rg3.15$eff # == 1 ## doesn't change any more: rg3.1000 <- glmrob(cbind(k3 , n3-k3) ~ x, family = binomial, tcc = 1000, acc=1e-10) stopifnot(all.equal(rg3.1000[no.comp], rg3.15 [no.comp], tol = 1e-13)) cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/NAcoef.R0000644000176200001440000001412214053426061015044 0ustar liggesusers## test handing of NA coefficients / singular fits ## also check: ## -- what would have to be done if class "lm" was added. ## -- general compatibility to class lm. require(robustbase) source(system.file("test-tools-1.R", package="Matrix", mustWork=TRUE)) ##-> assertError(), etc options(digits = 5)# -> higher chance of platform independence ## generate simple example data (almost as in ./weights.R ) data <- expand.grid(x1=letters[1:3], x2=LETTERS[1:3], rep=1:3) set.seed(1) data$y <- rnorm(nrow(data)) ## drop all combinations of one interaction: data <- subset(data, x1 != 'c' | (x2 != 'B' & x2 != 'C')) ## add collinear variables data$x3 <- rnorm(nrow(data)) data$x4 <- rnorm(nrow(data)) data$x5 <- data$x3 + data$x4 ## add some NA terms data$y[1] <- NA data$x4[2:3] <- NA ## to test anova ## Classical models start with 'cm', robust just with 'rm' (or just 'm'): cm0 <- lm (y ~ x1*x2 + x3, data) cm1 <- lm (y ~ x1*x2 + x3 + x4 + x5, data) set.seed(2) rm1 <- lmrob(y ~ x1*x2 + x3 + x4 + x5, data) m3 <- lmrob(y ~ x1*x2 + x3 + x4, data) # same column space as rm1 rm0 <- lmrob(y ~ x1*x2 + x3, data) ## clean version of rm1 (to check predict) data2 <- data.frame(y=data$y[-(1:3)], rm1$x[,!is.na(rm1$coef)]) set.seed(2) rm1c <- lmrob(y ~ x1b + x1c + x2B + x2C + x3 + x4 + x1b:x2B + x1b:x2C, data2) ## add class lm to rm1 (for now) class(rm1) <- c(class(rm1), "lm") class(rm0) <- c(class(rm0), "lm") ## the full matrix (data) should be returned by model matrix (frame) stopifnot(all.equal(model.matrix(cm1), model.matrix(rm1)), all.equal(model.frame (cm1), model.frame (rm1))) ## qr decomposition should be for the full data and pivots identical lm result qr.cm1 <- qr(cm1)$qr qr.rm1 <- rm1$qr$qr stopifnot(NCOL(qr.rm1) == NCOL(qr.cm1), NROW(qr.rm1) == NROW(qr.cm1), length(rm1$qr$qraux) == length(qr(cm1)$qraux), all.equal(rm1$qr$pivot, qr(cm1)$pivot), all.equal(dimnames(qr.rm1),dimnames(qr.cm1))) ## the alias function should return the same result stopifnot(all.equal(alias(cm1), alias(rm1))) #### ## these helper functions should print NAs for the dropped coefficients print(rm1) summary(rm1) -> s1 confint(rm1) -> ci1 stopifnot(identical(is.na(coef(cm1)), apply(ci1, 1L, anyNA)), identical(sigma(rm1), s1$ sigma), identical(vcov(rm1, complete=FALSE), s1$ cov ), TRUE) print(s1, showAlgo=FALSE) ci1 ## drop1 should return df = 0 #drop1(rm1) ## drop.lm does not return valid results (yet)! #### ## methods that should just drop the NA coefficients ## m3 is actually the same as rm1, so anova should raise an error assertError(anova(rm1, m3, test="Wald")) assertError(anova(rm1, m3, test="Deviance")) ## but comparing rm1 and rm0 should be ok anova(rm1, rm0, test="Wald") anova(rm1, rm0, test="Deviance") ## commands with single #: ## they do (or might) not return sensible results for robust fits ## and need to be checked again #cooks.distance(rm1) #deviance(rm1) #dfbeta(rm1) #dfbetas(rm1) #effects(rm1) ## fails #extractAIC(rm1) #influence(rm1) stopifnot(all.equal(hv1 <- hatvalues(rm1), .lmrob.hat(wqr=rm1$qr), tol=1e-15), all.equal(hv1, stats:::hatvalues.lm(rm1), tol=1e-15), all.equal(hat(cm1$qr), unname(hatvalues(cm1)), tol=1e-15), all.equal(unname(hv1), hat(rm1$qr), tol=1e-15), ## ditto : all.equal(hv1c <- hatvalues(rm1c), stats:::hatvalues.lm(rm1c), tol=1e-15)) ## kappa() & labels() : stopifnot(is.infinite(kr1 <- kappa(rm1)), kr1 == kappa(cm1), # = +Inf both identical(labels(rm1), labels(cm1))) logLik(rm1)# well, and what does it mean? ## plot(rm1, which=1) ## plot.lmrob() fails "singular covariance" .. FIXME! par(mfrow=c(2,2)) plot(rm1, which=2:4) stopifnot(all.equal(predict(rm1), predict(rm1c), tol=1e-15), all.equal(predict(rm1, se.fit=TRUE, interval="confidence"), predict(rm1c, se.fit=TRUE, interval="confidence"), tol=4e-15)) # seen 1.3e-15 (ATLAS) predict(rm1, type="terms", se.fit=TRUE, interval="confidence") #proj(rm1) ## fails "FIXME" residuals(rm1) #rstandard(rm1) #rstudent(rm1) #simulate(rm1) ## just $weights needs to be changed to prior weights V1 <- vcov(rm1, complete=FALSE) ## but don't show the "eigen" part {vectors may flip sign}: attributes(V1) <- attributes(V1)[c("dim","dimnames", "weights")]; V1 set.seed(12); sc <- simulate(cm1, 64) set.seed(12); rc <- simulate(rm1, 64) stopifnot(all.equal(sqrt(diag(V1)), coef(summary(rm1))[,"Std. Error"], tol=1e-15), all.equal(sc, rc, tolerance = 0.08),# dimension *and* approx. values (no NA) identical(variable.names(rm1), variable.names(cm1)), all.equal(residuals(rm1), residuals(cm1), tolerance = 0.05),# incl. names all.equal(rstudent (rm1), rstudent (cm1), tolerance = 0.06), identical(dimnames(rm1), dimnames(cm1)), all.equal(dummy.coef(rm1), dummy.coef(cm1), tolerance= .5)) ## check mostly structure ## other helper functions stopifnot(identical(case.names(rm1), case.names(cm1)), all.equal(family(rm1), family(cm1)),# identical() upto environment identical(formula(rm1), formula(cm1)), nobs(rm1) == nobs(cm1)) #add1(rm0, ~ . + x3 + x4 + x5) ## does not return valid results (yet)! ## test other initial estimators lmrob(y ~ x1*x2 + x3 + x4 + x5, data, init="M-S") lmrob(y ~ x1*x2 + x3 + x4 + x5, data, init=lmrob.lar) ## test all zero design matrix data <- data.frame(y=1:10,x1=0,x2=0,os=2,w=c(0.5, 1)) (m5 <- lmrob(y ~ 1+x1+x2+offset(os), data, weights=w)) (sm5 <- summary(m5)) (m6 <- lmrob(y ~ 0+x1+x2+offset(os), data, weights=w)) (sm6 <- summary(m6)) sc5 <- summary(cm5 <- lm(y ~ 1+x1+x2+offset(os), data, weights=w)) sc6 <- summary(cm6 <- lm(y ~ 0+x1+x2+offset(os), data, weights=w)) if(getRversion() <= "3.5.1" && as.numeric(R.version$`svn rev`) < 74993) ## in the past, lm() returned logical empty matrix storage.mode(sc6$coefficients) <- "double" stopifnot(all.equal(coef(m5), coef(cm5), tolerance = 0.01), all.equal(coef(m6), coef(cm6), tolerance = 1e-14), all.equal(coef(sm5), coef(sc5), tolerance = 0.05), all.equal(coef(sm6), coef(sc6), tolerance = 1e-14), identical(sm5$df, sc5$df), identical(sm6$df, sc6$df)) robustbase/tests/NAcoef.Rout.save0000644000176200001440000004670114053474212016542 0ustar liggesusers R Under development (unstable) (2021-05-25 r80378) -- "Unsuffered Consequences" Copyright (C) 2021 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ## test handing of NA coefficients / singular fits > ## also check: > ## -- what would have to be done if class "lm" was added. > ## -- general compatibility to class lm. > require(robustbase) Loading required package: robustbase > source(system.file("test-tools-1.R", package="Matrix", mustWork=TRUE)) Loading required package: tools > ##-> assertError(), etc > options(digits = 5)# -> higher chance of platform independence > > ## generate simple example data (almost as in ./weights.R ) > data <- expand.grid(x1=letters[1:3], x2=LETTERS[1:3], rep=1:3) > set.seed(1) > data$y <- rnorm(nrow(data)) > ## drop all combinations of one interaction: > data <- subset(data, x1 != 'c' | (x2 != 'B' & x2 != 'C')) > ## add collinear variables > data$x3 <- rnorm(nrow(data)) > data$x4 <- rnorm(nrow(data)) > data$x5 <- data$x3 + data$x4 > ## add some NA terms > data$y[1] <- NA > data$x4[2:3] <- NA ## to test anova > > ## Classical models start with 'cm', robust just with 'rm' (or just 'm'): > cm0 <- lm (y ~ x1*x2 + x3, data) > cm1 <- lm (y ~ x1*x2 + x3 + x4 + x5, data) > set.seed(2) > rm1 <- lmrob(y ~ x1*x2 + x3 + x4 + x5, data) > m3 <- lmrob(y ~ x1*x2 + x3 + x4, data) # same column space as rm1 > rm0 <- lmrob(y ~ x1*x2 + x3, data) > > ## clean version of rm1 (to check predict) > data2 <- data.frame(y=data$y[-(1:3)], rm1$x[,!is.na(rm1$coef)]) > set.seed(2) > rm1c <- lmrob(y ~ x1b + x1c + x2B + x2C + x3 + x4 + x1b:x2B + x1b:x2C, data2) > > ## add class lm to rm1 (for now) > class(rm1) <- c(class(rm1), "lm") > class(rm0) <- c(class(rm0), "lm") > > ## the full matrix (data) should be returned by model matrix (frame) > stopifnot(all.equal(model.matrix(cm1), model.matrix(rm1)), + all.equal(model.frame (cm1), model.frame (rm1))) > ## qr decomposition should be for the full data and pivots identical lm result > qr.cm1 <- qr(cm1)$qr > qr.rm1 <- rm1$qr$qr > stopifnot(NCOL(qr.rm1) == NCOL(qr.cm1), + NROW(qr.rm1) == NROW(qr.cm1), + length(rm1$qr$qraux) == length(qr(cm1)$qraux), + all.equal(rm1$qr$pivot, qr(cm1)$pivot), + all.equal(dimnames(qr.rm1),dimnames(qr.cm1))) > ## the alias function should return the same result > stopifnot(all.equal(alias(cm1), alias(rm1))) > > #### > ## these helper functions should print NAs for the dropped coefficients > print(rm1) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data) \--> method = "MM" Coefficients: (Intercept) x1b x1c x2B x2C x3 0.4381 0.5968 0.0344 0.2012 0.1789 -0.1320 x4 x5 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.2155 NA -1.8763 NA -0.8651 NA > summary(rm1) -> s1 > confint(rm1) -> ci1 > stopifnot(identical(is.na(coef(cm1)), apply(ci1, 1L, anyNA)), + identical(sigma(rm1), s1$ sigma), + identical(vcov(rm1, complete=FALSE), s1$ cov ), + TRUE) > > print(s1, showAlgo=FALSE) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -1.4584 -0.3556 0.0246 0.3651 1.0296 Coefficients: (3 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) 0.4381 0.5443 0.80 0.44 x1b 0.5968 0.6423 0.93 0.38 x1c 0.0344 0.6880 0.05 0.96 x2B 0.2012 0.7164 0.28 0.79 x2C 0.1789 0.6871 0.26 0.80 x3 -0.1320 0.4155 -0.32 0.76 x4 -0.2155 0.1694 -1.27 0.24 x5 NA NA NA NA x1b:x2B -1.8763 1.2153 -1.54 0.16 x1c:x2B NA NA NA NA x1b:x2C -0.8651 0.7466 -1.16 0.28 x1c:x2C NA NA NA NA Robust residual standard error: 0.927 (3 observations deleted due to missingness) Multiple R-squared: 0.338, Adjusted R-squared: -0.251 Convergence in 15 IRWLS iterations Robustness weights: 2 weights are ~= 1. The remaining 16 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.787 0.937 0.985 0.952 0.988 0.994 > ci1 2.5 % 97.5 % (Intercept) -0.79333 1.66946 x1b -0.85607 2.04973 x1c -1.52188 1.59076 x2B -1.41948 1.82189 x2C -1.37549 1.73320 x3 -1.07182 0.80783 x4 -0.59863 0.16756 x5 NA NA x1b:x2B -4.62539 0.87283 x1c:x2B NA NA x1b:x2C -2.55391 0.82381 x1c:x2C NA NA > ## drop1 should return df = 0 > #drop1(rm1) ## drop.lm does not return valid results (yet)! > > #### > ## methods that should just drop the NA coefficients > ## m3 is actually the same as rm1, so anova should raise an error > assertError(anova(rm1, m3, test="Wald")) > assertError(anova(rm1, m3, test="Deviance")) > ## but comparing rm1 and rm0 should be ok > anova(rm1, rm0, test="Wald") Robust Wald Test Table Model 1: y ~ x1 * x2 + x3 + x4 + x5 Model 2: y ~ x1 * x2 + x3 Largest model fitted by lmrob(), i.e. SM pseudoDf Test.Stat Df Pr(>chisq) 1 6 2 10 1.62 1 0.2 > anova(rm1, rm0, test="Deviance") Robust Deviance Table Model 1: y ~ x1 * x2 + x3 + x4 + x5 Model 2: y ~ x1 * x2 + x3 Largest model fitted by lmrob(), i.e. SM pseudoDf Test.Stat Df Pr(>chisq) 1 6 2 10 1.4 1 0.24 > ## commands with single #: > ## they do (or might) not return sensible results for robust fits > ## and need to be checked again > #cooks.distance(rm1) > #deviance(rm1) > #dfbeta(rm1) > #dfbetas(rm1) > #effects(rm1) ## fails > #extractAIC(rm1) > #influence(rm1) > stopifnot(all.equal(hv1 <- hatvalues(rm1), .lmrob.hat(wqr=rm1$qr), tol=1e-15), + all.equal(hv1, stats:::hatvalues.lm(rm1), tol=1e-15), + all.equal(hat(cm1$qr), unname(hatvalues(cm1)), tol=1e-15), + all.equal(unname(hv1), hat(rm1$qr), tol=1e-15), + ## ditto : + all.equal(hv1c <- hatvalues(rm1c), stats:::hatvalues.lm(rm1c), tol=1e-15)) > > ## kappa() & labels() : > stopifnot(is.infinite(kr1 <- kappa(rm1)), kr1 == kappa(cm1), # = +Inf both + identical(labels(rm1), labels(cm1))) > logLik(rm1)# well, and what does it mean? 'log Lik.' -17.67 (df=10) > ## plot(rm1, which=1) ## plot.lmrob() fails "singular covariance" .. FIXME! > par(mfrow=c(2,2)) > plot(rm1, which=2:4) > stopifnot(all.equal(predict(rm1), predict(rm1c), tol=1e-15), + all.equal(predict(rm1, se.fit=TRUE, interval="confidence"), + predict(rm1c, se.fit=TRUE, interval="confidence"), tol=4e-15)) # seen 1.3e-15 (ATLAS) > predict(rm1, type="terms", se.fit=TRUE, interval="confidence") $fit x1 x2 x3 x4 x5 x1:x2 4 -0.26908 0.074520 -0.166290 0.17233795 0 0.45689 5 0.32774 0.074520 0.026620 -0.03309916 0 -1.41939 7 -0.26908 0.052168 -0.038119 0.28384254 0 0.45689 8 0.32774 0.052168 0.020155 -0.26844357 0 -0.40816 10 -0.26908 -0.126688 0.194821 -0.38642275 0 0.45689 11 0.32774 -0.126688 0.067831 0.11957373 0 0.45689 12 -0.23465 -0.126688 0.065098 0.26547275 0 0.45689 13 -0.26908 0.074520 0.020882 -0.08237063 0 0.45689 14 0.32774 0.074520 -0.132148 0.06953345 0 -1.41939 16 -0.26908 0.052168 -0.087685 -0.47721028 0 0.45689 17 0.32774 0.052168 0.034769 0.04888197 0 -0.40816 19 -0.26908 -0.126688 0.046496 -0.10823918 0 0.45689 20 0.32774 -0.126688 -0.078945 0.03438888 0 0.45689 21 -0.23465 -0.126688 -0.060426 0.20062634 0 0.45689 22 -0.26908 0.074520 0.103967 -0.00026715 0 0.45689 23 0.32774 0.074520 0.106440 0.42945756 0 -1.41939 25 -0.26908 0.052168 -0.035072 -0.27545520 0 0.45689 26 0.32774 0.052168 -0.088392 0.00739277 0 -0.40816 attr(,"constant") [1] 0.32347 $se.fit x1 x2 x3 x4 x5 x1:x2 4 0.35192 0.42010 0.523390 0.13540939 0 0.29013 5 0.29582 0.42010 0.083786 0.02600668 0 0.95012 7 0.35192 0.40345 0.119979 0.22302078 0 0.29013 8 0.29582 0.40345 0.063436 0.21092151 0 0.53827 10 0.35192 0.40191 0.613190 0.30362011 0 0.29013 11 0.29582 0.40191 0.213494 0.09395148 0 0.29013 12 0.40411 0.40191 0.204892 0.20858727 0 0.29013 13 0.35192 0.42010 0.065724 0.06472026 0 0.29013 14 0.29582 0.42010 0.415930 0.05463383 0 0.95012 16 0.35192 0.40345 0.275984 0.37495370 0 0.29013 17 0.29582 0.40345 0.109434 0.03840755 0 0.53827 19 0.35192 0.40191 0.146343 0.08504570 0 0.29013 20 0.29582 0.40191 0.248476 0.02702003 0 0.29013 21 0.40411 0.40191 0.190187 0.15763614 0 0.29013 22 0.35192 0.42010 0.327230 0.00020991 0 0.29013 23 0.29582 0.42010 0.335015 0.33743343 0 0.95012 25 0.35192 0.40345 0.110386 0.21643068 0 0.29013 26 0.29582 0.40345 0.278210 0.00580864 0 0.53827 $lwr x1 x2 x3 x4 x5 x1:x2 4 -1.06517 -0.87582 -1.35028 -0.1339794 0 -0.19943 5 -0.34144 -0.87582 -0.16292 -0.0919303 0 -3.56872 7 -1.06517 -0.86049 -0.30953 -0.2206655 0 -0.19943 8 -0.34144 -0.86049 -0.12335 -0.7455812 0 -1.62581 10 -1.06517 -1.03588 -1.19231 -1.0732591 0 -0.19943 11 -0.34144 -1.03588 -0.41513 -0.0929593 0 -0.19943 12 -1.14880 -1.03588 -0.39840 -0.2063844 0 -0.19943 13 -1.06517 -0.87582 -0.12780 -0.2287780 0 -0.19943 14 -0.34144 -0.87582 -1.07305 -0.0540569 0 -3.56872 16 -1.06517 -0.86049 -0.71200 -1.3254145 0 -0.19943 17 -0.34144 -0.86049 -0.21279 -0.0380019 0 -1.62581 19 -1.06517 -1.03588 -0.28455 -0.3006259 0 -0.19943 20 -0.34144 -1.03588 -0.64104 -0.0267347 0 -0.19943 21 -1.14880 -1.03588 -0.49066 -0.1559714 0 -0.19943 22 -1.06517 -0.87582 -0.63628 -0.0007420 0 -0.19943 23 -0.34144 -0.87582 -0.65142 -0.3338699 0 -3.56872 25 -1.06517 -0.86049 -0.28478 -0.7650554 0 -0.19943 26 -0.34144 -0.86049 -0.71775 -0.0057473 0 -1.62581 attr(,"constant") [1] 0.32347 $upr x1 x2 x3 x4 x5 x1:x2 4 0.52701 1.02486 1.01770 0.47865527 0 1.11321 5 0.99693 1.02486 0.21616 0.02573203 0 0.72993 7 0.52701 0.96483 0.23329 0.78835059 0 1.11321 8 0.99693 0.96483 0.16366 0.20869402 0 0.80949 10 0.52701 0.78250 1.58195 0.30041366 0 1.11321 11 0.99693 0.78250 0.55079 0.33210673 0 1.11321 12 0.67950 0.78250 0.52860 0.73732993 0 1.11321 13 0.52701 1.02486 0.16956 0.06403677 0 1.11321 14 0.99693 1.02486 0.80875 0.19312376 0 0.72993 16 0.52701 0.96483 0.53663 0.37099391 0 1.11321 17 0.99693 0.96483 0.28233 0.13576588 0 0.80949 19 0.52701 0.78250 0.37755 0.08414755 0 1.11321 20 0.99693 0.78250 0.48315 0.09551244 0 1.11321 21 0.67950 0.78250 0.36981 0.55722407 0 1.11321 22 0.52701 1.02486 0.84421 0.00020769 0 1.11321 23 0.99693 1.02486 0.86430 1.19278501 0 0.72993 25 0.52701 0.96483 0.21464 0.21414502 0 1.11321 26 0.99693 0.96483 0.54096 0.02053283 0 0.80949 attr(,"constant") [1] 0.32347 $df [1] 9 $residual.scale [1] 0.92726 > #proj(rm1) ## fails "FIXME" > residuals(rm1) 4 5 7 8 10 11 12 13 1.003436 1.029645 -0.321738 0.691394 -0.498376 0.342960 -0.359752 -1.145548 14 16 17 19 20 21 22 23 -1.458427 -0.043483 -0.395061 0.498376 -0.342960 0.359752 0.092640 0.232325 25 26 0.366908 -0.270349 > #rstandard(rm1) > #rstudent(rm1) > #simulate(rm1) ## just $weights needs to be changed to prior weights > V1 <- vcov(rm1, complete=FALSE) > ## but don't show the "eigen" part {vectors may flip sign}: > attributes(V1) <- attributes(V1)[c("dim","dimnames", "weights")]; V1 (Intercept) x1b x1c x2B x2C x3 (Intercept) 0.296312 -0.321429 -0.338842 -0.238010 -0.3289125 0.1357438 x1b -0.321429 0.412501 0.369763 0.253038 0.3616767 -0.1594475 x1c -0.338842 0.369763 0.473317 0.274811 0.3497592 -0.1464335 x2B -0.238010 0.253038 0.274811 0.513277 0.2342086 -0.0640599 x2C -0.328913 0.361677 0.349759 0.234209 0.4721185 -0.2294044 x3 0.135744 -0.159448 -0.146434 -0.064060 -0.2294044 0.1726038 x4 -0.035258 0.039598 0.060587 0.035359 0.0024864 0.0037187 x1b:x2B 0.305162 -0.399754 -0.273260 -0.557840 -0.5219539 0.3087350 x1b:x2C 0.321423 -0.414159 -0.349092 -0.233097 -0.4394078 0.2039253 x4 x1b:x2B x1b:x2C (Intercept) -0.0352579 0.305162 0.321423 x1b 0.0395980 -0.399754 -0.414159 x1c 0.0605871 -0.273260 -0.349092 x2B 0.0353593 -0.557840 -0.233097 x2C 0.0024864 -0.521954 -0.439408 x3 0.0037187 0.308735 0.203925 x4 0.0286797 0.063743 -0.012435 x1b:x2B 0.0637434 1.476860 0.498060 x1b:x2C -0.0124347 0.498060 0.557368 attr(,"weights") 4 5 7 8 10 11 12 13 14 16 0.89614 0.89081 0.98906 0.94998 0.97385 0.98757 0.98633 0.86577 0.78729 0.99980 17 19 20 21 22 23 25 26 0.98353 0.97385 0.98757 0.98633 0.99909 0.99429 0.98578 0.99227 > set.seed(12); sc <- simulate(cm1, 64) > set.seed(12); rc <- simulate(rm1, 64) > > stopifnot(all.equal(sqrt(diag(V1)), coef(summary(rm1))[,"Std. Error"], tol=1e-15), + all.equal(sc, rc, tolerance = 0.08),# dimension *and* approx. values (no NA) + identical(variable.names(rm1), variable.names(cm1)), + all.equal(residuals(rm1), residuals(cm1), tolerance = 0.05),# incl. names + all.equal(rstudent (rm1), rstudent (cm1), tolerance = 0.06), + identical(dimnames(rm1), dimnames(cm1)), + all.equal(dummy.coef(rm1), dummy.coef(cm1), tolerance= .5)) ## check mostly structure > > ## other helper functions > stopifnot(identical(case.names(rm1), case.names(cm1)), + all.equal(family(rm1), family(cm1)),# identical() upto environment + identical(formula(rm1), formula(cm1)), + nobs(rm1) == nobs(cm1)) > #add1(rm0, ~ . + x3 + x4 + x5) ## does not return valid results (yet)! > > > ## test other initial estimators > lmrob(y ~ x1*x2 + x3 + x4 + x5, data, init="M-S") Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data, init = "M-S") \--> method = "M-SM" Coefficients: (Intercept) x1b x1c x2B x2C x3 0.4358 0.5996 0.0346 0.2005 0.1877 -0.1395 x4 x5 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.2185 NA -1.8957 NA -0.8698 NA Warning message: In lmrob.M.S(x, y, control, mf = mf) : Skipping design matrix equilibration (DGEEQU): row 12 is exactly zero. > lmrob(y ~ x1*x2 + x3 + x4 + x5, data, init=lmrob.lar) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data, init = lmrob.lar) \--> method = "lM" Coefficients: (Intercept) x1b x1c x2B x2C x3 0.561131 0.444339 0.000184 0.530303 -0.251794 0.236541 x4 x5 x1b:x2B x1c:x2B x1b:x2C x1c:x2C -0.082680 NA -1.298418 NA -0.597602 NA > > ## test all zero design matrix > data <- data.frame(y=1:10,x1=0,x2=0,os=2,w=c(0.5, 1)) > (m5 <- lmrob(y ~ 1+x1+x2+offset(os), data, weights=w)) Call: lmrob(formula = y ~ 1 + x1 + x2 + offset(os), data = data, weights = w) \--> method = "MM" Coefficients: (Intercept) x1 x2 3.64 NA NA > (sm5 <- summary(m5)) Call: lmrob(formula = y ~ 1 + x1 + x2 + offset(os), data = data, weights = w) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -4.641 -2.391 -0.141 2.109 4.359 Coefficients: (2 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) 3.64 1.03 3.53 0.0064 ** x1 NA NA NA NA x2 NA NA NA NA --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 3.24 Convergence in 8 IRWLS iterations Robustness weights: 1 2 3 4 5 6 7 8 9 10 0.909 0.889 0.970 0.977 0.998 0.999 0.992 0.952 0.952 0.842 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.55e+00 5.00e-01 4.69e+00 1.00e-07 rel.tol scale.tol solve.tol eps.outlier 1.00e-07 1.00e-10 1.00e-07 1.00e-02 eps.x warn.limit.reject warn.limit.meanrw 1.82e-12 5.00e-01 5.00e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > (m6 <- lmrob(y ~ 0+x1+x2+offset(os), data, weights=w)) Call: lmrob(formula = y ~ 0 + x1 + x2 + offset(os), data = data, weights = w) \--> method = "MM" Coefficients: x1 x2 NA NA > (sm6 <- summary(m6)) Call: lmrob(formula = y ~ 0 + x1 + x2 + offset(os), data = data, weights = w) \--> method = "MM" Weighted Residuals: Min 1Q Median 3Q Max -3.83 -1.37 1.09 3.54 6.00 Coefficients: (2 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) x1 NA NA NA NA x2 NA NA NA NA Robust residual standard error: NA Convergence in 0 IRWLS iterations Robustness weights: [1] NA NA NA NA NA NA NA NA NA NA Algorithmic parameters: tuning.psi rel.tol scale.tol solve.tol 4.69e+00 1.00e-07 1.00e-10 1.00e-07 eps.outlier warn.limit.reject warn.limit.meanrw 1.00e-02 5.00e-01 5.00e-01 max.it maxit.scale trace.lev compute.rd fast.s.large.n 50 200 0 0 2000 eps.x 0 psi cov compute.outlier.stats "bisquare" ".vcov.avar1" "SM" seed : int(0) > > sc5 <- summary(cm5 <- lm(y ~ 1+x1+x2+offset(os), data, weights=w)) > sc6 <- summary(cm6 <- lm(y ~ 0+x1+x2+offset(os), data, weights=w)) > > if(getRversion() <= "3.5.1" && as.numeric(R.version$`svn rev`) < 74993) + ## in the past, lm() returned logical empty matrix + storage.mode(sc6$coefficients) <- "double" > > stopifnot(all.equal(coef(m5), coef(cm5), tolerance = 0.01), + all.equal(coef(m6), coef(cm6), tolerance = 1e-14), + all.equal(coef(sm5), coef(sc5), tolerance = 0.05), + all.equal(coef(sm6), coef(sc6), tolerance = 1e-14), + identical(sm5$df, sc5$df), + identical(sm6$df, sc6$df)) > > proc.time() user system elapsed 0.857 0.073 1.008 robustbase/tests/weights.Rout.save0000644000176200001440000005155213326344173017065 0ustar liggesusers R version 3.5.1 Patched (2018-07-24 r75005) -- "Feather Spray" Copyright (C) 2018 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > ## test handing of weights and offset argument > require(robustbase) Loading required package: robustbase > > ## generate simple example data (extension of the one in ./NAcoef.R ) > data <- expand.grid(x1=letters[1:3], x2=LETTERS[1:4], rep=1:3) > ## generate offset column > data$os <- 1:nrow(data) > set.seed(1) > data$y <- data$os + rnorm(nrow(data)) > ## add collinear variables > data$x3 <- rnorm(nrow(data)) > data$x4 <- rnorm(nrow(data)) > data$x5 <- data$x3 + data$x4 ## lm() will have 'x5' "aliased" (and give coef = NA) > ## add some NA terms > data$y[1] <- NA > data$x4[2:3] <- NA ## to test anova > ## generate weights > ## some obs with weight 0 > data$weights <- as.numeric(with(data, x1 != 'c' | (x2 != 'B' & x2 != 'C'))) > ## some obs with weight 2 > data$weights[data$x1 == 'b'] <- 2 > ## data2 := {data + weights}, encoded in "data2" (-> "ok" for coef(), not for SE) > data2 <- rbind(subset(data, weights > 0), + subset(data, weights == 2)) > ## using these parameters we're essentially forcing lmrob() to > ## fit a classic model --> easier to compare to lm() > ctrl <- lmrob.control(psi="optimal", tuning.chi = 20, bb = 0.0003846154, + tuning.psi=20, method="SM", cov=".vcov.w") > ## SM = MM == the case where .vcov.avar1 was also defined for > > ## Classical models start with 'cm', robust just with 'rm' (or just 'm'): > (cm0 <- lm (y ~ x1*x2 + x3 + x4 + x5 + offset(os), data)) Call: lm(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data) Coefficients: (Intercept) x1b x1c x2B x2C x2D 0.01008 -1.14140 0.48156 0.01357 0.86985 0.15178 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.01655 -0.02388 NA 1.05416 -0.32889 0.69954 x1c:x2C x1b:x2D x1c:x2D -0.73949 1.08478 -1.31578 > (cm1 <- lm (y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, weights=weights)) Call: lm(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data, weights = weights) Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.021632 -0.079147 NA 1.040529 NA 0.736944 x1c:x2C x1b:x2D x1c:x2D NA 1.099090 -1.371953 > (cm2 <- lm (y ~ x1*x2 + x3 + x4 + x5, data2, offset=os)) Call: lm(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data2, offset = os) Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.021632 -0.079147 NA 1.040529 NA 0.736944 x1c:x2C x1b:x2D x1c:x2D NA 1.099090 -1.371953 > (rm0 <- lmrob(y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, control=ctrl)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data, control = ctrl) \--> method = "MM" Coefficients: (Intercept) x1b x1c x2B x2C x2D 0.01008 -1.14140 0.48156 0.01357 0.86985 0.15178 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.01655 -0.02388 NA 1.05416 -0.32889 0.69954 x1c:x2C x1b:x2D x1c:x2D -0.73949 1.08478 -1.31578 > set.seed(2) > (rm1 <- lmrob(y ~ x1*x2 + x3 + x4 + x5 + offset(os), data, weights=weights, control=ctrl)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data, weights = weights, control = ctrl) \--> method = "MM" Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.021632 -0.079147 NA 1.040529 NA 0.736944 x1c:x2C x1b:x2D x1c:x2D NA 1.099090 -1.371953 > set.seed(2) > (rm2 <- lmrob(y ~ x1*x2 + x3 + x4 + x5, data2, offset=os, control=ctrl)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data2, offset = os, control = ctrl) \--> method = "MM" Coefficients: (Intercept) x1b x1c x2B x2C x2D -0.002961 -1.132857 0.492904 0.017959 0.858031 0.208510 x3 x4 x5 x1b:x2B x1c:x2B x1b:x2C -0.021632 -0.079147 NA 1.040529 NA 0.736944 x1c:x2C x1b:x2D x1c:x2D NA 1.099090 -1.371953 > > sc0 <- summary(cm0) > sc1 <- summary(cm1) > sc2 <- summary(cm2) > (sr0 <- summary(rm0)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data, control = ctrl) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -1.50524 -0.48219 0.01663 0.42714 1.59122 Coefficients: (1 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) 0.01008 0.76421 0.013 0.990 x1b -1.14140 1.02228 -1.117 0.278 x1c 0.48156 1.01891 0.473 0.642 x2B 0.01357 0.95276 0.014 0.989 x2C 0.86985 0.94762 0.918 0.370 x2D 0.15178 0.99480 0.153 0.880 x3 -0.01655 0.22284 -0.074 0.942 x4 -0.02388 0.25629 -0.093 0.927 x5 NA NA NA NA x1b:x2B 1.05416 1.30705 0.807 0.430 x1c:x2B -0.32889 1.30044 -0.253 0.803 x1b:x2C 0.69954 1.37279 0.510 0.616 x1c:x2C -0.73949 1.30141 -0.568 0.577 x1b:x2D 1.08478 1.32102 0.821 0.422 x1c:x2D -1.31578 1.33335 -0.987 0.336 Robust residual standard error: 1.007 (3 observations deleted due to missingness) Multiple R-squared: 0.9933, Adjusted R-squared: 0.9887 Convergence in 1 IRWLS iterations Robustness weights: All 33 weights are ~= 1. Algorithmic parameters: bb refine.tol rel.tol scale.tol 3.846e-04 1.000e-07 1.000e-07 1.000e-10 solve.tol eps.outlier eps.x warn.limit.reject 1.000e-07 3.030e-03 4.369e-12 5.000e-01 warn.limit.meanrw 5.000e-01 nResample tuning.chi tuning.psi max.it best.r.s 500 20 20 50 2 k.fast.s k.max maxit.scale trace.lev mts 1 200 200 0 1000 compute.rd fast.s.large.n 0 2000 psi subsampling cov "optimal" "nonsingular" ".vcov.w" compute.outlier.stats "SM" seed : int(0) > (sr1 <- summary(rm1)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5 + offset(os), data = data, weights = weights, control = ctrl) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -1.52261 -0.57370 -0.07248 0.39247 1.61986 Coefficients: (3 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) -0.002961 0.977109 -0.003 0.998 x1b -1.132857 1.133342 -1.000 0.333 x1c 0.492904 1.297399 0.380 0.709 x2B 0.017959 1.213927 0.015 0.988 x2C 0.858031 1.204169 0.713 0.487 x2D 0.208510 1.275792 0.163 0.872 x3 -0.021632 0.284226 -0.076 0.940 x4 -0.079147 0.324629 -0.244 0.811 x5 NA NA NA NA x1b:x2B 1.040529 1.443384 0.721 0.482 x1c:x2B NA NA NA NA x1b:x2C 0.736944 1.530596 0.481 0.637 x1c:x2C NA NA NA NA x1b:x2D 1.099090 1.461384 0.752 0.464 x1c:x2D -1.371953 1.698858 -0.808 0.432 Robust residual standard error: 1.281 (3 observations deleted due to missingness) Multiple R-squared: 0.9923, Adjusted R-squared: 0.9866 Convergence in 1 IRWLS iterations Robustness weights: 6 observations c(3,6,15,18,27,30) are outliers with |weight| = 0 ( < 0.0037); 27 weights are ~= 1. Algorithmic parameters: bb refine.tol rel.tol scale.tol 3.846e-04 1.000e-07 1.000e-07 1.000e-10 solve.tol eps.outlier eps.x warn.limit.reject 1.000e-07 3.704e-03 5.094e-12 5.000e-01 warn.limit.meanrw 5.000e-01 nResample tuning.chi tuning.psi max.it best.r.s 500 20 20 50 2 k.fast.s k.max maxit.scale trace.lev mts 1 200 200 0 1000 compute.rd fast.s.large.n 0 2000 psi subsampling cov "optimal" "nonsingular" ".vcov.w" compute.outlier.stats "SM" seed : int(0) > (sr2 <- summary(rm2)) Call: lmrob(formula = y ~ x1 * x2 + x3 + x4 + x5, data = data2, offset = os, control = ctrl) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -1.52261 -0.51773 0.06925 0.38640 1.61986 Coefficients: (3 not defined because of singularities) Estimate Std. Error t value Pr(>|t|) (Intercept) -0.002961 0.742168 -0.004 0.997 x1b -1.132857 0.860835 -1.316 0.200 x1c 0.492904 0.985445 0.500 0.621 x2B 0.017959 0.922044 0.019 0.985 x2C 0.858031 0.914632 0.938 0.357 x2D 0.208510 0.969033 0.215 0.831 x3 -0.021632 0.215885 -0.100 0.921 x4 -0.079147 0.246574 -0.321 0.751 x5 NA NA NA NA x1b:x2B 1.040529 1.096329 0.949 0.351 x1c:x2B NA NA NA NA x1b:x2C 0.736944 1.162571 0.634 0.532 x1c:x2C NA NA NA NA x1b:x2D 1.099090 1.110001 0.990 0.331 x1c:x2D -1.371953 1.290375 -1.063 0.297 Robust residual standard error: 0.9728 (4 observations deleted due to missingness) Multiple R-squared: 0.9923, Adjusted R-squared: 0.989 Convergence in 1 IRWLS iterations Robustness weights: All 38 weights are ~= 1. Algorithmic parameters: bb refine.tol rel.tol scale.tol 3.846e-04 1.000e-07 1.000e-07 1.000e-10 solve.tol eps.outlier eps.x warn.limit.reject 1.000e-07 2.632e-03 4.369e-12 5.000e-01 warn.limit.meanrw 5.000e-01 nResample tuning.chi tuning.psi max.it best.r.s 500 20 20 50 2 k.fast.s k.max maxit.scale trace.lev mts 1 200 200 0 1000 compute.rd fast.s.large.n 0 2000 psi subsampling cov "optimal" "nonsingular" ".vcov.w" compute.outlier.stats "SM" seed : int(0) > > ## test Estimates, Std. Errors, ... > stopifnot(all.equal(coef(cm1), coef(cm2)), + all.equal(coef(rm1), coef(rm2)), + all.equal(coef(sc0), coef(sr0)), + all.equal(coef(sc1), coef(sr1)), + all.equal(coef(sc2), coef(sr2))) > > ## test class "lm" methods that do not depend on weights > meths1 <- c("family", + "formula", + "labels", + "model.matrix", + "na.action", + "terms") > for (meth in meths1) + stopifnot(all.equal(do.call(meth, list(rm0)), + do.call(meth, list(rm1)))) > > ## class "lm" methods that depend on weights > ## FIXME: > meths2 <- c(#"AIC", + "alias", + #"BIC", + "case.names", + "coef", + "confint", + #"cooks.distance", + #"deviance", + "df.residual", + #"dfbeta", + #"dfbetas", + #"drop1", + "dummy.coef", + #"effects", + #"extractAIC", + #"hatvalues", + #"influence", + "kappa", + #"logLik", + #"model.frame", ## disable because of zero.weights attribute + "nobs", + "predict", + #"proj", + #"rstandard", + #"rstudent", + #"simulate", + ##"summary", ## see above + "variable.names", + ##"vcov", ## see below + "weights") > op <- options(warn = 1)# print immediately > for (meth in meths2) { + cat(meth,":") + .SW. <- if(meth == "weights") suppressWarnings else identity # for suppressing + ## No weights defined for this object. Use type="robustness" .... + stopifnot(all.equal(do.call(meth, list(cm1)), + do.call(meth, list(rm1))), + all.equal(do.call(meth, list(cm2)), + .SW.(do.call(meth, list(rm2))))) + cat("\n") + } alias : case.names : coef : confint : df.residual : dummy.coef : kappa : nobs : predict : variable.names : weights : > options(op)# reverting > > ## further tests: > anova(rm1, update(rm1, ~ . - x4 - x5)) Robust Wald Test Table Model 1: y ~ x1 * x2 + x3 + x4 + x5 + offset(os) Model 2: y ~ x1 + x2 + x3 + x1:x2 + offset(os) Largest model fitted by lmrob(), i.e. SM pseudoDf Test.Stat Df Pr(>chisq) 1 18 2 22 0.059442 1 0.8074 > anova(rm2, update(rm2, ~ . - x4 - x5)) Robust Wald Test Table Model 1: y ~ x1 * x2 + x3 + x4 + x5 Model 2: y ~ x1 + x2 + x3 + x1:x2 Largest model fitted by lmrob(), i.e. SM pseudoDf Test.Stat Df Pr(>chisq) 1 23 2 27 0.10303 1 0.7482 > > stopifnot(all.equal(fitted(cm0), fitted(rm0)), + all.equal(fitted(cm1), fitted(rm1)), + all.equal(fitted(cm2), fitted(rm2))) > > nd <- expand.grid(x1=letters[1:3], x2=LETTERS[1:4]) > set.seed(3) > nd$x3 <- rnorm(nrow(nd)) > nd$x4 <- rnorm(nrow(nd)) > nd$x5 <- rnorm(nrow(nd)) > nd$os <- nrow(nd):1 > wts <- runif(nrow(nd)) > stopifnot(all.equal(predict(cm0, nd, interval="prediction"), + predict(rm0, nd, interval="prediction")), + all.equal(predict(cm1, nd, interval="prediction"), + predict(rm1, nd, interval="prediction")), + all.equal(predict(cm2, nd, interval="prediction"), + predict(rm2, nd, interval="prediction")), + all.equal(predict(cm0, nd, interval="prediction", weights=wts), + predict(rm0, nd, interval="prediction", weights=wts)), + all.equal(predict(cm1, nd, interval="prediction", weights=wts), + predict(rm1, nd, interval="prediction", weights=wts)), + all.equal(predict(cm2, nd, interval="prediction", weights=wts), + predict(rm2, nd, interval="prediction", weights=wts), + tolerance=1e-7)) There were 14 warnings (use warnings() to see them) > > ## Padding can lead to differing values here > ## so test only full rank part > qrEQ <- function(m1, m2) { + q1 <- qr(m1) + q2 <- qr(m2) + r <- 1:q1$rank + stopifnot(q1$rank == q2$rank, + all.equal(q1$pivot, q2$pivot), + all.equal(q1$qraux[r],q2$qraux[r]), + all.equal(q1$qr[r,r], q2$qr[r,r])) + } > qrEQ(cm0, rm0) > qrEQ(cm1, rm1) > qrEQ(cm2, rm2) > > stopifnot(all.equal(residuals(cm0), residuals(rm0)), + all.equal(residuals(cm1), residuals(rm1)), + all.equal(residuals(cm2), residuals(rm2)), + all.equal(resid(cm0, type="pearson"), resid(rm0, type="pearson")), + all.equal(resid(cm1, type="pearson"), resid(rm1, type="pearson")), + all.equal(resid(cm2, type="pearson"), resid(rm2, type="pearson"))) > > ## R 3.5.0: vcov(*, complete=TRUE) new default ==> same NA's as coef() > if(interactive()) withAutoprint({ + op <- options(width = 130, digits = 2) # --> vcov() rows fit on 1 line + vcov(cm0) # 'x5' is NA + vcov(cm2) # 'x5', 'x1c:2B', 'x1c:2C' rows & columns are NA + options(op) + }) > > (no.C <- is.na(match("complete", names(formals(stats:::vcov.lm))))) ## temporary _FIXME_ [1] FALSE > vcovC <- if(no.C) function(M, ...) vcov(M, complete=FALSE, ...) else vcov # (complete=TRUE) > stopifnot(all.equal(vcov(cm0), vcovC(rm0), check.attributes=FALSE), + all.equal(vcov(cm1), vcovC(rm1), check.attributes=FALSE), + all.equal(vcov(cm2), vcovC(rm2), check.attributes=FALSE)) > > ## "clean": > cln <- function(vc) structure(vc, weights=NULL, eigen=NULL) > ## .vcov.avar1() is not recommended here, but also should work with singular / NA coef case: > ok0 <- !is.na(coef(rm0)) > vr0.NA<- vcov(rm0, cov=".vcov.avar1", complete=NA) # "almost singular" warning Warning messages: 1: In lf.cov(object, complete = complete, ...) : X'WX is almost singular. Consider using cov = ".vcov.w" 2: In lf.cov(object, complete = complete, ...) : .vcov.avar1: negative diag() fixed up; consider 'cov=".vcov.w."' instead > vr0.T <- vcov(rm0, cov=".vcov.avar1", complete=TRUE) > vr0.F <- vcov(rm0, cov=".vcov.avar1", complete=FALSE) > stopifnot(identical(dim(vr0.NA), dim(vr0.T)), + identical(dim(vr0.F), dim(vr0.T) - 1L), dim(vr0.F) == 14, + all.equal(cln(vr0.F), vr0.T[ok0,ok0], tol = 1e-15)) > > if(!no.C) { + vc0.T <- vcov(cm0, complete=TRUE) + vc0.F <- vcov(cm0, complete=FALSE) + } > > ok1 <- !is.na(coef(rm1)) > ## cannot work because init/fit residuals are not of full length > tools::assertError(vr1.NA<- vcov(rm1, cov=".vcov.avar1", complete=NA)) > tools::assertError(vr1.T <- vcov(rm1, cov=".vcov.avar1", complete=TRUE )) > tools::assertError(vr1.F <- vcov(rm1, cov=".vcov.avar1", complete=FALSE)) > ## instead, must refit > rm1. <- update(rm1, control = within(ctrl, cov <- ".vcov.avar1")) > vr1.NA<- vcov(rm1., complete=NA) > vr1.T <- vcov(rm1., complete=TRUE) > vr1.F <- vcov(rm1., complete=FALSE) > > stopifnot(identical(vr1.F, vr1.NA), # in this case + identical(dim(vr1.F), dim(vr1.T) - 3L), dim(vr1.F) == 12, isSymmetric(vr1.T), + identical(rownames(vr1.F), rownames(vr1.T)[ok1]), + all.equal(cln(vr1.F), vr1.T[ok1,ok1], tol=1e-15)) > > if(FALSE) ## ERROR "exact singular" (probably *NOT* to fix, as TRUE/FALSE do work !) + vr2.NA<- vcov(rm2, cov=".vcov.avar1", complete=NA) # "almost singular" warning > vr2.T <- vcov(rm2, cov=".vcov.avar1", complete=TRUE) > vr2.F <- vcov(rm2, cov=".vcov.avar1", complete=FALSE) > stopifnot(TRUE, # identical(dim(vr2.NA), dim(vr2.T)), + identical(dim(vr2.F), dim(vr2.T) - 3L), dim(vr2.F) == 12, + identical(rownames(vr2.F), rownames(vr1.F)), + identical(rownames(vr2.T), rownames(vr1.T)), + all.equal(cln(vr2.F), vr2.T[ok1,ok1], tol=1e-15)) > > ## Hmm, the supposedly heteroscedastic-robust estimates *are* very different: > all.equal(vcov(cm0), vcovC(rm0, cov = ".vcov.avar1"), check.attributes=FALSE) # rel.diff. 0.5367564 [1] "Mean relative difference: 0.5367564" > if(FALSE) # does not make sense + all.equal(vcov(cm1), vcovC(rm1, cov = ".vcov.avar1"), check.attributes=FALSE) > all.equal(vcov(cm2), vcovC(rm2, cov = ".vcov.avar1"), check.attributes=FALSE) # rel.diff. 0.5757642 [1] "Mean relative difference: 0.5757642" > > > ## Null fits (rank(X)==0) are tested in NAcoef.R > > ## testing weight=0 bug > lmrob(y ~ x3, data, weights=weights) Call: lmrob(formula = y ~ x3, data = data, weights = weights) \--> method = "MM" Coefficients: (Intercept) x3 18.7474 0.1751 > > proc.time() user system elapsed 0.329 0.049 0.371 robustbase/tests/lmrob-data.R0000644000176200001440000002127413774561564015762 0ustar liggesusers### lmrob() with "real data" ----------------------- ## testing functions: source(system.file("test-tools-1.R", package = "Matrix", mustWork=TRUE))# -> assert.EQ(), .. (doExtras <- robustbase:::doExtras()) showProc.time() library(robustbase) ##' short form to get "pure" robustness weights rw <- function(fm) unname(weights(fm, type="robustness")) set.seed(0) data(salinity) summary(m0.sali <- lmrob(Y ~ . , data = salinity)) (A1 <- anova(m0.sali, Y ~ X1 + X3)) ## -> X2 is not needed (m1.sali <- lmrob(Y ~ X1 + X3, data = salinity)) (A2 <- anova(m0.sali, m1.sali)) # the same as before stopifnot(all.equal(A1[2,"Pr(>chisq)"], A2[2,"Pr(>chisq)"], tolerance=1e-14)) anova(m0.sali, m1.sali, test = "Deviance") ## whereas 'X3' is highly significant: m2 <- update(m0.sali, ~ . -X3) (A3 <- anova(m0.sali, m2)) (A4 <- anova(m0.sali, m2, test = "Deviance")) cX3 <- c(Estimate = -0.627327396, `Std. Error` = 0.15844971, `t value` = -3.9591577, `Pr(>|t|)` = 0.000584156) stopifnot(all.equal(cX3, coef(summary(m0.sali))["X3",], tolerance = 1e-6)) showProc.time() ## example(lmrob) set.seed(7) data(coleman) summary( m1 <- lmrob(Y ~ ., data=coleman) ) stopifnot(identical(which(rw(m1) < 0.2), c(3L, 18L))) if(FALSE) # to find out *why setting = "KS201x" fails trace(lmrob.S, exit = quote({cat("coef:\n"); print(b$coefficients)})) if(FALSE) # to find out via setting = "KS201x" fails here in the *initial* estimate debug(lmrob.S) data(starsCYG) lmST <- lm(log.light ~ log.Te, data = starsCYG) (RlmST <- lmrob(log.light ~ log.Te, data = starsCYG, control=lmrob.control(trace = 1))) summary(RlmST) ## Least Sq. w/ negative slope, where robust has slope ~= 2.2 : stopifnot(exprs = { coef(lmST)[["log.Te"]] < 0 all.equal(coef(RlmST), c("(Intercept)" = -4.969, log.Te=2.253), tol = 1e-3) identical(which(rw(RlmST) < 0.01), as.integer( c(11,20,30,34) )) }) showProc.time() ## ==> Now see that "KS2011" and "KS2014" both break down -- and it is the fault of "lqq" *only* : (RlmST.11 <- update(RlmST, control = lmrob.control("KS2011", trace= 1))) (RlmST.14 <- update(RlmST, control = lmrob.control("KS2014", trace= 1))) (RlmSTM11 <- update(RlmST, control = lmrob.control("KS2011", method="MM", trace= 1))) (RlmSTM14 <- update(RlmST, control = lmrob.control("KS2014", method="MM", trace= 1))) ## using "biweight" instead of "lqq" fixes the problem : (RlmSTbM11 <- update(RlmST,control = lmrob.control("KS2011", method="MM", psi="biweight",trace= 1))) (RlmSTbM14 <- update(RlmST,control = lmrob.control("KS2014", method="MM", psi="biweight",trace= 1))) (RlmSTb.11 <- update(RlmST,control = lmrob.control("KS2011", psi="biweight",trace= 1))) (RlmSTb.14 <- update(RlmST,control = lmrob.control("KS2014", psi="biweight",trace= 1))) ## NB: RlmST has component 'init.S' the others have "init" -- documented in ?lmrob.fit == ../man/lmrob.fit.Rd R.ini.cf <- t(sapply(mget(ls(patt = "^RlmST")), function(OB) OB$init$coef)) R..cf <- t(sapply(mget(ls(patt = "^RlmST")), coef)) cbind(R.ini.cf, R..cf) ##---> "lqq" is *NOT* robust enough here -- but "biweight" is !! showProc.time() options(digits = 5)# less platform dependence ## Directly look at init.S(): x.s <- model.matrix(~ log.Te, data = starsCYG) y.s <- model.response(model.frame(log.light ~ log.Te, data = starsCYG)) ini.df <- lmrob.S(x.s, y.s, control=lmrob.control()) ini.11 <- lmrob.S(x.s, y.s, control=lmrob.control("KS2011")) ini.14 <- lmrob.S(x.s, y.s, control=lmrob.control("KS2014")) ## but these are fine !! : rbind(deflt = ini.df$coef, KS.11 = ini.11$coef, KS.14 = ini.14$coef) ##==> No, it is *not* the init.S() ini.14$scale # 0.48144 ## More clearly shows how M-estimate is converging to *WRONG* solution: (RlmST.lqq <- update(RlmST, init=ini.14, control = lmrob.control(method="MM", psi="lqq", trace= 4))) ## --> break down ## The 10 largest residuals from the robust init. S-estim: (i10 <- head(order(abs(residuals(ini.14)), decreasing=TRUE), 10)) residuals(ini.14)[i10] ## ==> and their weights for the different psi() and their default (95% efficiency) tuning: PSIs <- names(.Mchi.tuning.defaults) sapply(PSIs, function(PSI) Mwgt(residuals(ini.14)[i10], cc = .Mpsi.tuning.defaults[[PSI]], psi=PSI)) ## All MM: RlmST.MM <- lapply(setNames(,PSIs), function(PSI) update(RlmST, init=ini.14, control = lmrob.control(method="MM", psi = PSI))) cf.MM <- t(sapply(RlmST.MM, coef)) cf.MM[order(cf.MM[,1], cf.MM[,2]),] ## only 'bisquare' and 'optimal' are robust enough showProc.time() ##=== Werner's analysis: Sensitivity curves for the most-left obs ========================================= dd <- starsCYG dd <- dd[order(dd[,"log.Te"]),] # ==> leverage points come first (and easier plotting) (rr <- lmrob(log.light ~ log.Te, data = dd)) (rr14 <- update(rr, control = lmrob.control("KS2014"))) dd[1,2] # 6.05 will be replaced for sensitivity curve leg.s <- c("default, biweight" ,"KS14, lqq" ,"KS14, biweight" ,"KS14, optimal" ,"KS14, Hampel" ,"KS14, GGW" ,"KS14, Welsh" ) nEst <- length(leg.s) # == number of estimators used below nn <- length(y1 <- c(NA, seq(2,9, length=if(doExtras) 64 else 8))) nCf <- length(coef(rr)) + 1 # +1: sigma r.coef <- matrix(NA, length(y1), nEst*nCf) t.d <- dd oo <- options(warn = 1) showProc.time() ## vary the left-most observation and fit all three for (i in seq_along(y1)) { cat(sprintf("i=%3d, y1[i]=%11.6g: -- ", i, y1[i])) t.d[1,2] <- y1[i] ## the (old) default does not converge in 4 cases lr <- update(rr, data=t.d, control = lmrob.control(maxit=500)) ; cat(" 1") lr14 <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="lqq") ) ; cat(" 2") lr14b <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="biweight") ); cat(" 3") lr14o <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="optimal" ) ); cat(" 4") lr14h <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="hampel" ) ); cat(" 5") lr14g <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="ggw" ) ); cat(" 6") lr14a <- update(rr14, data=t.d, control = lmrob.control("KS2014", psi="welsh" ) ); cat(" 7") r.coef[i,] <- c(coef(lr ), sigma(lr), coef(lr14 ), sigma(lr14), coef(lr14b), sigma(lr14b), coef(lr14o), sigma(lr14o), coef(lr14h), sigma(lr14h), coef(lr14g), sigma(lr14g), coef(lr14a), sigma(lr14a)) cat("\n") } showProc.time() options(oo) ## cbind(y=y.1, r.coef) ## y1[1] = where the NA is pMat <- function(j, main, x.legend, col = 1:8, lty=1:6, lwd = 2, ylab=NA, ...) { stopifnot(j %in% seq_len(ncol(r.coef))) matplot(y1, r.coef[, j], type="l", xlab = quote("varying obs." ~ ~ y[1]), ylab=ylab, main=main, col=col, lty=lty, lwd=lwd, ...) xx <- par("usr")[1:2]; yL <- .99* xx[1] + .01*xx[2] matpoints(yL, r.coef[1, j, drop=FALSE], pch = 20, col=col, lwd=lwd) abline(h = r.coef[1, j, drop=FALSE], col = col, lwd=1, lty=3) legend(x.legend, leg.s, lty=lty, col=col, lwd=lwd, bty = "n") abline(v = dd[1,2], col=adjustcolor("tomato", 1/4)) # true y value } (jj0 <- nCf*(seq_len(nEst)-1L)) op <- { if(requireNamespace("sfsmisc", quietly=TRUE)) sfsmisc::mult.fig(2)$old.par else par(mfrow = 2:1, mar = .1+ c(4,4,2,1), mgp = c(1.5, 0.6, 0)) } pMat(j = 2+jj0, main = quote("slope" ~~ hat(beta[2])), "bottomleft") pMat(j = 3+jj0, main = quote(hat(sigma)), "topleft") par(op) showProc.time() ## -------------------------------- set.seed(47) data(hbk) m.hbk <- lmrob(Y ~ ., data = hbk) summary(m.hbk) stopifnot(1:10 == which(rw(m.hbk) < 0.01)) data(heart) summary(mhrt <- lmrob(clength ~ ., data = heart)) # -> warning 'maxit.scale=200' too small stopifnot(8 == which(rw(mhrt) < 0.15), 11 == which(0.61 < rw(mhrt) & rw(mhrt) < 0.62), c(1:7,9:10,12) == which(rw(mhrt) > 0.90)) iN <- c(3,5,7,11) heartN <- heart; heartN[iN, "clength"] <- NA lmN <- lm (clength ~ ., data = heartN) # default na.action=na.omit mhN <- lmrob(clength ~ ., data = heartN) # default na.action=na.omit # ==> everything just uses the n=8 complete obs summary(mhN) # now *does* note the 4 omitted obs. mhNex <- lmrob(clength ~ ., data = heartN, na.action=na.exclude) summary(mhNex) mhNx1 <- update(mhNex, ~ . - weight) mhNx0 <- update(mhNex, ~ 1) stopifnot( length(r.mNex <- resid(mhNex)) == nrow(heartN) , iN == which(iNAr <- is.na(r.mNex)) , identical(iNAr, is.na(r.mN1 <- residuals(mhNx1))) , identical(iNAr, is.na(r.mN0 <- residuals(mhNx0))) ) showProc.time() data(stackloss) mSL <- lmrob(stack.loss ~ ., data = stackloss) summary(mSL) showProc.time() robustbase/tests/tlts.Rout.save0000644000176200001440000003505113162677266016407 0ustar liggesusers R Under development (unstable) (2017-09-26 r73351) -- "Unsuffered Consequences" Copyright (C) 2017 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(robustbase) > ## library(MASS)## MASS::lqs > > source(system.file("xtraR/test_LTS.R", package = "robustbase")) > ## ../inst/test_LTS.R > > y20 <- c(2:4, 8, 12, 22, 28, 29, 33, 34, 38, 40, 41, 47:48, 50:51, 54, 56, 59) > > test_location <- function() { + ## Improve: print less, and test equality explicitly + Y <- y20 + print(ltsReg(y=Y)) + print(ltsReg(y=Y, intercept=TRUE)) + print(ltsReg(y=Y, intercept=FALSE)) + print(ltsReg(y=Y, alpha=1)) + print(ltsReg(Y ~ 1)) + print(ltsReg(Y ~ 0))# = Y ~ 1 - 1 : empty model (no coefficients) + print(ltsReg(Y ~ 1, alpha=1)) + } > > test_rsquared <- function() { + x1 <- y20 + y1 <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 3.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5) + ll1 <- ltsReg(x1,y1, alpha = 0.8) + ## print() ing is platform-dependent, since only ~= 0 + stopifnot(all.equal(unname(coef(ll1)), c(1,0), tolerance=1e-12), + ll1$scale < 1e-14) + print(ltsReg(y1,x1, alpha = 0.8)) + print(ltsReg(y1,x1, alpha = 0.8, intercept = FALSE)) + } > > options(digits = 5) > set.seed(101) # <<-- sub-sampling algorithm now based on R's RNG and seed > > doLTSdata() Call: doLTSdata() ======================================================== Data Set n p Half obj Time [ms] ======================================================== heart 12 2 8 0.065810 Best subsample: [1] 1 2 4 5 6 7 11 12 Outliers: 4 [1] 3 8 9 10 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept height weight 63.353 -1.227 0.688 Scale estimate 1.52 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): 1 2 3 4 5 6 7 8 9 10 11 -1.393 0.169 0.000 0.443 -0.341 0.165 -0.115 0.000 0.000 0.000 0.666 12 0.404 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 63.3528 4.0227 15.75 1.9e-05 *** height -1.2265 0.1403 -8.74 0.00032 *** weight 0.6884 0.0528 13.04 4.7e-05 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.765 on 5 degrees of freedom Multiple R-Squared: 0.991, Adjusted R-squared: 0.988 F-statistic: 286 on 2 and 5 DF, p-value: 6.99e-06 -------------------------------------------------------- starsCYG 47 1 25 1.880169 Best subsample: [1] 2 4 6 10 13 15 17 19 21 22 25 27 28 29 33 35 36 38 39 41 42 43 44 45 46 Outliers: 6 [1] 7 9 11 20 30 34 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept log.Te -8.50 3.05 Scale estimate 0.456 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -0.784 -0.214 0.000 0.227 0.592 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept -8.500 1.926 -4.41 7.8e-05 *** log.Te 3.046 0.437 6.97 2.4e-08 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.341 on 39 degrees of freedom Multiple R-Squared: 0.554, Adjusted R-squared: 0.543 F-statistic: 48.5 on 1 and 39 DF, p-value: 2.39e-08 -------------------------------------------------------- phosphor 18 2 11 0.245377 Best subsample: [1] 1 2 3 4 6 7 11 12 14 15 18 Outliers: 1 [1] 17 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept inorg organic 60.9149 1.2110 0.0883 Scale estimate 13.5 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -30.297 -3.591 -0.692 4.251 17.116 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 60.9149 10.1995 5.97 3.4e-05 *** inorg 1.2110 0.3549 3.41 0.0042 ** organic 0.0883 0.2574 0.34 0.7366 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 12.7 on 14 degrees of freedom Multiple R-Squared: 0.519, Adjusted R-squared: 0.45 F-statistic: 7.55 on 2 and 14 DF, p-value: 0.00597 -------------------------------------------------------- stackloss 21 3 13 0.083378 Best subsample: [1] 5 6 7 8 9 10 11 12 15 16 17 18 19 Outliers: 4 [1] 1 3 4 21 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept Air.Flow Water.Temp Acid.Conc. -37.6525 0.7977 0.5773 -0.0671 Scale estimate 1.92 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -2.506 -0.424 0.000 0.576 1.934 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept -37.6525 4.7321 -7.96 2.4e-06 *** Air.Flow 0.7977 0.0674 11.83 2.5e-08 *** Water.Temp 0.5773 0.1660 3.48 0.0041 ** Acid.Conc. -0.0671 0.0616 -1.09 0.2961 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 1.25 on 13 degrees of freedom Multiple R-Squared: 0.975, Adjusted R-squared: 0.969 F-statistic: 169 on 3 and 13 DF, p-value: 1.16e-10 -------------------------------------------------------- coleman 20 5 13 0.028344 Best subsample: [1] 1 2 6 7 8 9 11 13 14 15 16 19 20 Outliers: 2 [1] 3 18 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept salaryP fatherWc sstatus teacherSc motherLev 29.7577 -1.6985 0.0851 0.6662 1.1840 -4.0668 Scale estimate 1.12 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -1.216 -0.389 0.000 0.306 0.984 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 29.7577 5.5322 5.38 0.00017 *** salaryP -1.6985 0.4660 -3.64 0.00336 ** fatherWc 0.0851 0.0208 4.09 0.00149 ** sstatus 0.6662 0.0382 17.42 6.9e-10 *** teacherSc 1.1840 0.1643 7.21 1.1e-05 *** motherLev -4.0668 0.8487 -4.79 0.00044 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.782 on 12 degrees of freedom Multiple R-Squared: 0.988, Adjusted R-squared: 0.983 F-statistic: 203 on 5 and 12 DF, p-value: 3.65e-11 -------------------------------------------------------- salinity 28 3 16 0.065610 Best subsample: [1] 2 3 4 6 7 12 14 15 17 18 19 20 21 22 26 27 Outliers: 4 [1] 5 16 23 24 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept X1 X2 X3 38.063 0.443 -0.206 -1.373 Scale estimate 1.23 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -2.482 -0.390 0.000 0.339 1.701 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 38.063 5.172 7.36 4.1e-07 *** X1 0.443 0.086 5.15 4.9e-05 *** X2 -0.206 0.138 -1.50 0.15 X3 -1.373 0.195 -7.06 7.7e-07 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 1.03 on 20 degrees of freedom Multiple R-Squared: 0.899, Adjusted R-squared: 0.884 F-statistic: 59.3 on 3 and 20 DF, p-value: 3.92e-10 -------------------------------------------------------- aircraft 23 4 14 0.298554 Best subsample: [1] 1 5 6 7 8 9 10 11 13 14 15 17 20 23 Outliers: 2 [1] 16 22 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept X1 X2 X3 X4 9.500740 -3.048797 1.210033 0.001381 -0.000555 Scale estimate 5.69 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -6.67 -2.43 0.00 2.79 6.79 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 9.500740 5.577532 1.70 0.1078 X1 -3.048797 0.919147 -3.32 0.0044 ** X2 1.210033 0.649230 1.86 0.0808 . X3 0.001381 0.000392 3.52 0.0028 ** X4 -0.000555 0.000328 -1.69 0.1102 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 4.35 on 16 degrees of freedom Multiple R-Squared: 0.826, Adjusted R-squared: 0.782 F-statistic: 19 on 4 and 16 DF, p-value: 6.47e-06 -------------------------------------------------------- delivery 25 2 14 0.112945 Best subsample: [1] 2 5 6 7 8 10 12 13 14 15 17 21 22 25 Outliers: 3 [1] 1 9 24 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept n.prod distance 3.7196 1.4058 0.0163 Scale estimate 2.38 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -5.0321 -1.0306 -0.0124 0.3474 4.2371 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 3.71959 0.91011 4.09 0.00063 *** n.prod 1.40578 0.13128 10.71 1.7e-09 *** distance 0.01625 0.00301 5.40 3.3e-05 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 2.32 on 19 degrees of freedom Multiple R-Squared: 0.962, Adjusted R-squared: 0.958 F-statistic: 243 on 2 and 19 DF, p-value: 2.9e-14 -------------------------------------------------------- wood 20 5 13 0.070258 Best subsample: [1] 2 3 9 10 11 12 13 14 15 16 17 18 20 Outliers: 4 [1] 4 6 8 19 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept x1 x2 x3 x4 x5 0.377 0.217 -0.085 -0.564 -0.400 0.607 Scale estimate 0.0124 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -0.00928 -0.00177 0.00000 0.00115 0.01300 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept 0.3773 0.0540 6.99 3.8e-05 *** x1 0.2174 0.0421 5.16 0.00042 *** x2 -0.0850 0.1977 -0.43 0.67634 x3 -0.5643 0.0435 -12.98 1.4e-07 *** x4 -0.4003 0.0654 -6.12 0.00011 *** x5 0.6074 0.0786 7.73 1.6e-05 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.00745 on 10 degrees of freedom Multiple R-Squared: 0.958, Adjusted R-squared: 0.937 F-statistic: 46 on 5 and 10 DF, p-value: 1.4e-06 -------------------------------------------------------- hbk 75 3 40 3.724554 Best subsample: [1] 11 12 14 16 17 18 20 25 26 30 31 32 33 34 35 36 37 39 40 41 42 44 45 46 48 [26] 50 55 56 58 59 60 61 63 64 66 67 69 71 72 74 Outliers: 10 [1] 1 2 3 4 5 6 7 8 9 10 ------------- Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Coefficients: Intercept X1 X2 X3 -0.1805 0.0814 0.0399 -0.0517 Scale estimate 0.744 Call: ltsReg.formula(formula = form, data = dataset, mcd = FALSE) Residuals (from reweighted LS): Min 1Q Median 3Q Max -0.926 -0.396 0.000 0.397 1.011 Coefficients: Estimate Std. Error t value Pr(>|t|) Intercept -0.1805 0.1044 -1.73 0.089 . X1 0.0814 0.0667 1.22 0.227 X2 0.0399 0.0405 0.99 0.328 X3 -0.0517 0.0354 -1.46 0.149 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Residual standard error: 0.557 on 61 degrees of freedom Multiple R-Squared: 0.0428, Adjusted R-squared: -0.00429 F-statistic: 0.909 on 3 and 61 DF, p-value: 0.442 -------------------------------------------------------- ======================================================== > if(FALSE) { ## FIXME: These *FAIL* ! + doLTSdata(nrep = 12, time = FALSE) + doLTSdata(nrep = 12, time = FALSE, method = "MASS") + } > > test_rsquared() Call: ltsReg.default(x = y1, y = x1, alpha = 0.8) Coefficients: Intercept y1 25.9 5.3 Scale estimate 18 Call: ltsReg.default(x = y1, y = x1, intercept = FALSE, alpha = 0.8) Coefficients: y1 31.4 Scale estimate 24.6 Warning messages: 1: In covMcd(X, alpha = alpha, use.correction = use.correction) : Initial scale 0 because more than 'h' (=16) observations are identical. 2: In covMcd(X, alpha = alpha, use.correction = use.correction) : Initial scale 0 because more than 'h' (=16) observations are identical. > test_location() Call: ltsReg.default(y = Y) Coefficients: [1] 44.6 Scale estimate 19.7 Call: ltsReg.default(y = Y, intercept = TRUE) Coefficients: [1] 44.6 Scale estimate 19.7 Call: ltsReg.default(y = Y, intercept = FALSE) Coefficients: [1] 44.6 Scale estimate 20 Call: ltsReg.default(y = Y, alpha = 1) Coefficients: [1] 33 Scale estimate 19.3 Call: ltsReg.formula(formula = Y ~ 1) Coefficients: [1] 44.6 Scale estimate 19.7 Call: ltsReg.formula(formula = Y ~ 0) No coefficients Call: ltsReg.formula(formula = Y ~ 1, alpha = 1) Coefficients: [1] 33 Scale estimate 19.3 > > if(length(W <- warnings())) print(if(getRversion() >= "3.5") summary(W) else W) 2 identical warnings: In covMcd(X, alpha = alpha, use.correction = use.correction) : Initial scale 0 because more than 'h' (=16) observations are identical. > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 0.332 0.058 0.463 0.002 0.001 > > proc.time() user system elapsed 0.334 0.059 0.463 robustbase/tests/psi-rho-etc.Rout.save0000644000176200001440000002031612271657124017540 0ustar liggesusers R version 3.0.2 Patched (2014-01-26 r64896) -- "Frisbee Sailing" Copyright (C) 2014 The R Foundation for Statistical Computing Platform: x86_64-unknown-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > require(robustbase) Loading required package: robustbase > ## see also ./lmrob-psifns.R <<<<<<<< > source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) > > EQ <- function(x,y) all.equal(x,y, tolerance = 1e-13) > > ## Demonstrate that one of tukeyChi() / tukeyPsi1() is superfluous > x <- seq(-4,4, length=201) > suppressWarnings(## as tukeyPsi1(), tukeyChi() are deprecated + for(c. in c(0.1, 1:2, pi, 100)) { + ix <- abs(x) != c. + stopifnot(EQ(tukeyChi(x, c.), + 6/c.^2* tukeyPsi1(x, c., deriv=-1)), + EQ(tukeyChi(x, c., deriv= 1), + 6/c.^2* tukeyPsi1(x, c., deriv= 0)), + EQ(tukeyChi(x, c., deriv= 2), + 6/c.^2* tukeyPsi1(x, c., deriv= 1)), + ## Now show equivalence with Mpsi(): + EQ(tukeyPsi1(x, c.), Mpsi(x, c., "tukey")), + EQ(tukeyPsi1(x, c., d=1), Mpsi(x, c., "tukey", d=1)), + EQ(tukeyPsi1(x[ix], c., d=2), Mpsi(x[ix], c., "tukey", d=2)) + ) + } + ) > ## Test if default arguments are used > h2Psi <- chgDefaults(huberPsi, k = 2) > > x <- 1:10 > stopifnot(h2Psi@ rho(x, k=2) == h2Psi@ rho(x), + h2Psi@ psi(x, k=2) == h2Psi@ psi(x), + h2Psi@Dpsi(x, k=2) == h2Psi@Dpsi(x), + h2Psi@ wgt(x, k=2) == h2Psi@ wgt(x), + h2Psi@Dwgt(x, k=2) == h2Psi@Dwgt(x)) > > ## Test default arguments for E... slots > stopifnot(EQ(h2Psi@Erho (), 0.49423127328548), + EQ(h2Psi@Epsi2(), 0.920536925636323), + EQ(h2Psi@EDpsi(), 0.954499736103642)) > > stopifnot(EQ(1, huberPsi@psi(1, k = 1e16)), + huberPsi@wgt(0.1591319494080224, 0.5 + 1/13) <= 1) > ## both used to fail because of numeric instability in pmin2/pmax2 > > f1 <- function(.) rep.int(1, length(.)) > F1 <- function(x, .) rep.int(1, length(x)) > ## correct "classical psi": > cPs <- psiFunc(rho = function(x,.) x^2 / 2, psi = function(x, .) x, + wgt = F1, Dpsi = F1, Erho = function(.) rep.int(1/2, length(.)), + Epsi2 = f1, EDpsi = f1, . = Inf) > validObject(cPs); cPs [1] TRUE psi function > ## incorrect dummy psi > cP <- psiFunc(rho = F1, psi = F1, wgt = F1, Dpsi = F1, + Erho = f1, Epsi2 = f1, EDpsi = f1, . = Inf) > cP psi function > ## Check the autogenerated Dwgt(): > x <- seq(0,2, by=1/4) > stopifnot(## strict symmetry { including Dwgt(0) == 0 } : + huberPsi @Dwgt(-x) == -huberPsi @Dwgt(x), + hampelPsi@Dwgt(-x) == -hampelPsi@Dwgt(x), + huberPsi @Dwgt(x)[x < 1.345] == 0, + hampelPsi@Dwgt(x)[x < 1.487] == 0, + EQ(huberPsi @Dwgt(x[x >= 1.5]), + c(-0.597777777777778, -0.439183673469388, -0.33625)), + EQ(hampelPsi@Dwgt(x[x >= 1.5]), + c(-0.660883932259397, -0.485547378802822, -0.371747211895911)) + ) > > .defDwgt <- robustbase:::.defDwgt > (ddd <- .defDwgt(psi = function(u, k) pmin.int(k, pmax.int(-k, u)), + Dpsi = function(u, k) abs(u) <= k)) function (u, k) { y <- u u <- u[not0 <- u != 0] y[not0] <- (Dpsi(u, k) - psi(u, k)/u)/u y } > stopifnot(is.function(ddd), names(formals(ddd)) == c("u","k"), + EQ(ddd(x, 1.345), huberPsi@Dwgt(x))) > > ## TODO: Provide some functionality of this as a Plot+Check function > ## ---- and then call the function for all our psiFunc objects (with different 'k') > kk <- c(1.5, 3, 8) > psiH.38 <- chgDefaults(hampelPsi, k = kk) > c1 <- curve(psiH.38@psi(x), -10, 10, n=512, col=2) > abline(h=0, v=0, lty=3, lwd=.5, col="gray25") > c2 <- curve(x * psiH.38@wgt(x), add=TRUE, n=512, col=adjustcolor("blue", .5), lwd=2) > title("psi_Hampel_(1.5, 3, 8) : psi(x) = x * wgt(x)") > axis(1, at=kk, expression(k[1], k[2], k[3]), pos=0) > axis(2, at=kk[1], quote(k[1]), pos=0, las=1) > stopifnot(all.equal(c1,c2, tolerance= 1e-15)) > > r1 <- curve(psiH.38@rho(x), -10, 10, col=2, + main = quote(rho(x) == integral(phi(t) * dt, 0, x))) > axis(1, at=kk, expression(k[1], k[2], k[3]), pos=0) > curve(psiH.38@psi(x), add=TRUE, n=512, col=adjustcolor("blue", .5), lwd=2) > abline(h=0, v=0, lty=3, lwd=.5, col="gray25") > ## check rho(x) = \int_0^x psi(x) dx {slightly *more* than rho' = psi !} > rhoH.38.int <- function(x) integrate(function(u) psiH.38@psi(u), 0, x, rel.tol=1e-10)$value > r2 <- curve(sapply(x, rhoH.38.int), add = TRUE, + lwd=4, col=adjustcolor("red", 1/4)) > ## numerical integration == "formula" : > stopifnot(all.equal(r1,r2, tolerance=1e-10)) > > curve(psiH.38@Dpsi(x), -10, 10, n=512, col=2, + main = quote(psi*minute(x))) > abline(h=0, v=0, lty=3, lwd=.5, col="gray25") > > ## check rho'(x) = phi(x) etc {TODO: for all our psiFun.} > head(xx <- seq(-10, 10, length=1024)) [1] -10.000000 -9.980450 -9.960899 -9.941349 -9.921799 -9.902248 > FrhoH.38 <- splinefun(xx, rho.x <- psiH.38@rho (xx)) > FpsiH.38 <- splinefun(xx, psi.x <- psiH.38@psi (xx)) > F1psH.38 <- splinefun(xx, Dps.x <- psiH.38@Dpsi(xx)) > > curve(FpsiH.38(x, deriv=1), -10,10, n=512) > curve(F1psH.38, add=TRUE, col=4, n=512) > stopifnot(all.equal(FpsiH.38(xx, deriv=1), Dps.x, + tolerance = 0.02))# not better because of discontinuities > > curve(FrhoH.38(x, deriv=1), -10,10, n=512) > curve(FpsiH.38, add=TRUE, col=4, n=512) > stopifnot(all.equal(FrhoH.38(xx, deriv=1), psi.x, tolerance = 1e-4)) > > E.norm <- function(FUN, tol=1e-12, ...) { + integrate(function(x) FUN(x) * dnorm(x), -Inf, Inf, + rel.tol=tol, ...)$value + } > > ##' asymptotic efficiency -- both integrate + "formula"(@Epsi, @EDpsi) version > aeff.P <- function(psiF, k, ...) { + stopifnot(is(psiF, "psi_func")) + if(!missing(k)) + psiF <- chgDefaults(psiF, k = k) + ## E[ psi'(X) ] ^2 / E[ psi(X) ^ 2 ] : + c(int = E.norm(psiF@Dpsi, ...)^2 / E.norm(function(x) psiF@psi(x)^2, ...), + form= psiF@EDpsi()^2 / psiF@Epsi2()) + } > > > ## Breakdown Point --- for redescenders only, > ## both integrate + "formula"(@Erho) version > bp.P <- function(psiF, k, ...) { + stopifnot(is(psiF, "psi_func")) + if(!missing(k)) + psiF <- chgDefaults(psiF, k = k) + if(!is.finite( rhoInf <- psiF@rho(Inf) )) + stop("rho(Inf) is not finite: ", rhoInf) + integ <- function(x) psiF@rho(x) + c(int = E.norm(integ, ...), form= psiF@Erho()) / rhoInf + } > > ## Print & Check the result of aeff.P() or bp.P() > chkP <- function(rp, tol = 1e-9) { + print(rp) + ae <- all.equal(rp[[1]], rp[[2]], tolerance=tol) + if(isTRUE(ae)) invisible(rp) else stop(ae) + } > > chkP(aeff.P(huberPsi)) int form 0.9500003 0.9500003 > chkP(aeff.P(huberPsi, k = 1.5)) int form 0.9642358 0.9642358 > chkP(aeff.P(huberPsi, k = 2)) int form 0.9897156 0.9897156 > chkP(aeff.P(huberPsi, k = 2.5)) int form 0.9977041 0.9977041 > > chkP(aeff.P(hampelPsi)) int form 0.9613126 0.9613126 > chkP(aeff.P(hampelPsi, k = c(1.5, 3, 8))) int form 0.9632396 0.9632396 > chkP(aeff.P(hampelPsi, k = c(2, 4, 8), tol=1e-10),# fails with tol=1e-11 + tol = 1e-4) int form 0.989679 0.989679 > > ## Now works too: > chkP(bp.P(hampelPsi)) int form 0.08615786 0.08615786 > chkP(bp.P(hampelPsi, k = c(1.5, 3, 8))) int form 0.06696027 0.06696027 > chkP(bp.P(hampelPsi, k = c(2, 4, 8))) int form 0.04942297 0.04942297 > > > ## test derivatives (adapted from ./lmrob-psifns.R) > head(x. <- seq(-5, 10, length=1501)) [1] -5.00 -4.99 -4.98 -4.97 -4.96 -4.95 > ## [separate lines, for interactive "play": ] > stopifnot(chkPsiDeriv(plot(huberPsi, x.))) > ## ToDo: improve accuracy of derivative check > stopifnot(chkPsiDeriv(plot(hampelPsi, x.), tol=c(1e-4, 1e-1))) > > > proc.time() user system elapsed 1.167 0.079 1.301 robustbase/tests/OGK-ex.Rout.save0000644000176200001440000001160412271657124016440 0ustar liggesusers R version 2.4.0 Patched (2006-10-03 r39576) Copyright (C) 2006 The R Foundation for Statistical Computing ISBN 3-900051-07-0 R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > library(robustbase) > > ## minimal testing only > data(ruspini, package = "cluster") > > rub1 <- covOGK(ruspini, 1, scaleTau2, covGK, hard.rejection, consistency=FALSE) > rub2 <- covOGK(ruspini, 2, scaleTau2, covGK, hard.rejection, consistency=FALSE) > > AE <- function(x,y) all.equal(x,y, tolerance = 2e-15) > ## The following test is already fulfilled by Kjell Konis' original code: > stopifnot(AE(c(rub1$wcov)[c(1,3:4)], + c(917.99893333333, 94.9232, 2340.319288888888)), + all.equal(rub1$wcov, rub2$wcov, tolerance=0) + , + AE(c(rub1$cov)[c(1,3:4)], + c(923.5774514441657, 91.5385216376565, 2342.4556232436971)) + , + AE(c(rub2$cov)[c(1,3:4)], + c(927.2465953711782, 91.8009184487779, 2346.5790105548940)) + ) > > data(milk) > cM1 <- covOGK(milk, 1, sigmamu = scaleTau2, weight.fn = hard.rejection) > cM2 <- covOGK(milk, 2, sigmamu = scaleTau2, weight.fn = hard.rejection) > > symnum(cov2cor(cM1 $cov)) [1,] 1 [2,] 1 [3,] . . 1 [4,] . * 1 [5,] . . * * 1 [6,] . . * * * 1 [7,] . . . . . . 1 [8,] . , . . . . 1 attr(,"legend") [1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1 > symnum(cov2cor(cM2 $cov)) [1,] 1 [2,] 1 [3,] . . 1 [4,] . . B 1 [5,] . . * * 1 [6,] . . B * * 1 [7,] . , . . . . 1 [8,] . . . . . . 1 attr(,"legend") [1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1 > symnum(cov2cor(cM1 $wcov)) X1 X2 X3 X4 X5 X6 X7 X8 X1 1 X2 1 X3 1 X4 B 1 X5 * * 1 X6 * * * 1 X7 . . 1 X8 . . . . . . 1 attr(,"legend") [1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1 > symnum(cov2cor(cM2 $wcov)) X1 X2 X3 X4 X5 X6 X7 X8 X1 1 X2 1 X3 1 X4 . B 1 X5 * B 1 X6 B B B 1 X7 . , . . . . 1 X8 . . . . . . 1 attr(,"legend") [1] 0 ‘ ’ 0.3 ‘.’ 0.6 ‘,’ 0.8 ‘+’ 0.9 ‘*’ 0.95 ‘B’ 1 > > cMQn <- covOGK(milk, sigmamu = s_Qn, weight.fn = hard.rejection) > cMSn <- covOGK(milk, sigmamu = s_Sn, weight.fn = hard.rejection) > cMiqr <- covOGK(milk, sigmamu = s_IQR, weight.fn = hard.rejection) > cMmad <- covOGK(milk, sigmamu = s_mad, weight.fn = hard.rejection) > > as.dist(round(cov2cor(cMQn$wcov), 3)) X1 X2 X3 X4 X5 X6 X7 X2 0.091 X3 0.227 0.187 X4 0.288 0.176 0.964 X5 0.256 0.132 0.943 0.952 X6 0.241 0.196 0.954 0.956 0.957 X7 0.445 0.634 0.360 0.372 0.377 0.370 X8 0.014 0.452 0.440 0.380 0.340 0.350 0.479 > as.dist(round(cov2cor(cMSn$wcov), 3)) X1 X2 X3 X4 X5 X6 X7 X2 0.096 X3 0.242 0.219 X4 0.305 0.200 0.960 X5 0.269 0.142 0.945 0.952 X6 0.260 0.233 0.948 0.953 0.964 X7 0.445 0.636 0.391 0.399 0.395 0.408 X8 0.020 0.448 0.453 0.384 0.331 0.360 0.484 > as.dist(round(cov2cor(cMiqr$wcov), 3)) X1 X2 X3 X4 X5 X6 X7 X2 0.162 X3 0.181 0.215 X4 0.225 0.199 0.964 X5 0.210 0.140 0.945 0.954 X6 0.187 0.239 0.950 0.951 0.954 X7 0.453 0.660 0.350 0.354 0.355 0.367 X8 0.111 0.454 0.470 0.407 0.345 0.404 0.516 > as.dist(round(cov2cor(cMmad$wcov), 3)) X1 X2 X3 X4 X5 X6 X7 X2 0.077 X3 0.228 0.175 X4 0.289 0.159 0.962 X5 0.257 0.092 0.945 0.952 X6 0.238 0.189 0.954 0.956 0.962 X7 0.451 0.588 0.345 0.358 0.353 0.358 X8 -0.003 0.392 0.488 0.412 0.353 0.380 0.439 > > > cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' Time elapsed: 1.925 0.07 2.512 0 0 > robustbase/tests/nlrob-tst.R0000644000176200001440000002612413772570740015655 0ustar liggesuserslibrary(robustbase) source(system.file("test-tools-1.R", package = "Matrix", mustWork=TRUE)) ## -> assert.EQ(), identical3(), .. DNase1 <- DNase[ DNase$Run == 1, ] Y <- DNase1[,"density"] # for convenience below ## classical fm1 <- nls(density ~ Asym/(1 + exp(( xmid - log(conc) )/scal ) ), data = DNase1, start = list(Asym = 3, xmid = 0, scal = 1), trace=TRUE) summary(fm1) wm1 <- update(fm1, weights = sqrt(conc)) # (weights as function of ) ## robust rm1 <- nlrob(formula(fm1), data = DNase1, trace = TRUE, start = list(Asym = 3, xmid = 0, scal = 1)) (sm1 <- summary(rm1)) stopifnot(all.equal(Y, fitted(fm1) + residuals(fm1), check.attributes=FALSE), ## fitted() has "label" attribute identical3(c(fitted(fm1)), predict(fm1), predict(fm1, newdata=DNase1)), ## robust fit : identical3(fitted(rm1), predict(rm1), predict(rm1, newdata=DNase1)), all.equal(Y, unname(fitted(rm1) + residuals(rm1)))) print(coef(rm1), digits=12) ## 2.35963008460 1.49945088410 1.04506391722 F19 Lx 64b ## 2.35963008460 1.49945088410 1.04506391722 Win(Serv.2003) 64b ## 2.35963008613 1.49945088600 1.04506391793 F19 Lx 32b ## 2.35963008613 1.49945088600 1.04506391793 Win(Serv.2003) 32b assert.EQ(coef(rm1), giveRE=TRUE, c(Asym=2.35963008, xmid=1.49945088, scal=1.04506392), tol = 4e-8) assert.EQ(sqrt(diag(sm1$cov)), giveRE=TRUE, ## 32b 0.08626872273, 0.0902194541, 0.03503833759 c(Asym=0.0862687305, xmid=0.0902194608, scal=0.0350383389), tol = 7e-7) ## examples with weights: rm. <- update(rm1, weights = NULL)# 'NULL' but not missing() ww <- sqrt(DNase1[,"conc"]) wr1 <- update(rm1, weights = sqrt(conc), trace=FALSE) wr1. <- update(rm1, weights = ww, trace=FALSE) ii <- names(rm1) != "call" stopifnot(all.equal(rm1[ii], rm.[ii], tol = 1e-15), all.equal(wr1[ii],wr1.[ii], tol = 1e-15)) ## From: "Pascal A. Niklaus" ## To: ## Subject: nlrob problem ## Date: Tue, 20 Dec 2011 07:04:38 +0100 ## For psi-functions that can become zero (e.g. psi.bisquare), weights in ## the internal call to nls can become zero. ## Was ## psiTuk <- robustbase:::psi.bisquare ## psiHamp <- robustbase:::psi.hampel lmrob.control(psi="bisquare")$tuning.psi psiTuk <- function(x, der=0) { ## cc: dput( lmrob.control(psi="bisquare")$tuning.psi ) if(der == 0) Mwgt(x, cc=4.685061, psi="Tukey") else Mpsi(x, cc=4.685061, psi="Tukey", deriv=1) } c.Ha <- lmrob.control(psi="hampel"); c.Ha$tuning.psi psiHamp <- function(x, der=0) { ## cc: dput( lmrob.control(psi="hampel")$tuning.psi ) if(der == 0) Mwgt(x, cc=c(1.35241275, 3.15562975, 7.212868), psi="Hampel") else Mpsi(x, cc=c(1.35241275, 3.15562975, 7.212868), psi="Hampel", deriv=1) } d <- data.frame(x = -6:9, y = 43 + c(7, 52, 21, 12, 10, -4, -5, -4, 0, -77, -8, -10, 22, 33, 38, 51)) nlr1 <- nlrob(y ~ a*(x + b*exp(-c*x)), start=list(a= 4, b= 1, c= 1.2), data = d, maxit = 50, # default 20 is *not* sufficient model = TRUE, trace=TRUE) ## These failed in robustbase version 0.8-0 and earlier nlr2 <- update(nlr1, psi = psiTuk) # now *does* converge... ## check 'model' and dataClasses stopifnot(is.list(mod <- nlr2$model), is.data.frame(mod), inherits(attr(mod, "terms"), "terms"), identical(dCl <- attr(attr(mod, "terms"),"dataClasses"), nlr2$dataClasses), identical(dCl, c(y = "numeric", x = "numeric"))) ## 'port' ditto: nlr2. <- update(nlr2, algorithm= "port") nlr3 <- update(nlr1, psi = psiHamp) # *does* converge, too... nlr3. <- update(nlr3, algorithm= "port") summary(nlr2.) summary(nlr3.) i. <- -c(2, 15) # <- drop 'call' and 'iter' components stopifnot(all.equal(nlr2[i.], nlr2.[i.], tolerance = 2e-5, check.environment = FALSE), all.equal(nlr3[i.], nlr3.[i.], tolerance = 1e-4, check.environment = FALSE), ## The redescending psi() give some exact 0 weights : identical(which(abs(nlr2$rweights) < 1e-9), c(1L, 10 :12)), identical(which(abs(nlr3$rweights) < 1e-9), c(1L, 10L,12L)) ) ## Different example with more data: pp <- list(a=10, b=4, c=1/4) x <- seq(-6,9, by = 1/8) f.x <- with(pp, a*(x + b*exp(-c*x))) set.seed(6); y <- y0 <- f.x + 4*rnorm(x) iO <- c(2:3,20,70:71,90); y[iO] <- y[iO] + 32*c(-1,-1,1)*(2+rlnorm(iO)); y <- round(y) plot(x,y); lines(x, f.x, col="tomato", lty = 2) dd <- data.frame(x,y) nlc1 <- nls(formula(nlr1), start = coef(nlr1), data=dd, trace=TRUE) nlR1 <- update(nlr1, data = dd)# update the model with the new data summary(nlR1) lines(x, predict(nlc1), col=3) lines(x, predict(nlR1), col=4) legend("top", c("f(x)", "least squares", "robust"), col=c("tomato", palette()[3:4]), lty=c(2,1,1)) ## These both now *do* converge, but failed earlier (nlbi <- update(nlR1, psi = psiTuk)) (nlFH <- update(nlR1, psi = psiHamp)) lines(x, predict(nlbi), col=5) lines(x, predict(nlFH), col=6) stopifnot(nlR1$status == "converged", nlbi$status == "converged", nlFH$status == "converged") assert.EQ(coef(nlR1), c(a=9.914874, b=3.98612416, c=0.250896252), tol = 4e-9) assert.EQ(coef(nlbi), c(a=9.947458207, b=3.954210623, c=0.2535835248), tol = 4e-9) ## This is suddently quite different : ???!?!?? ## assert.EQ(coef(nlFH), c(a=9.94242831, b=3.97370746, c=0.252907618)) assert.EQ(coef(nlFH), c(a=9.952893755,b=3.949047387,c=0.2536216541), tol = 1e-7) assert.EQ(1000*diag(vcov(nlR1)), c(a=16.167493, b=10.0986644, c=0.0200814189), tol = 7e-7, giveRE=TRUE) assert.EQ(1000*local({V <- vcov(nlFH); V[lower.tri(V, diag=TRUE)]}), c(16.33774615, -9.704702857, 0.3149189329, 10.03560556, -0.4079936961, 0.02039106329), tol = 7e-7) assert.EQ(predict(nlR1), predict(nlbi), tol = 0.05, giveRE=TRUE) assert.EQ(predict(nlR1), predict(nlFH), tol = 0.05, giveRE=TRUE) nlFH2 <- update(nlFH, psi = .Mwgt.psi1("Hampel", c(2,4,8))) ## TODO: and compare ## TODO: same with Tukey ##----- *Vector* parameters indexed by factor levels ------------- ##----- MM: ~/R/MM/Pkg-ex/robustbase/nlrob-vectorpar.R data(biomassTill)## see also smaller example in ../man/biomassTill.Rd if(!dev.interactive(orNone=TRUE)) pdf("nlrob-biomT.pdf") require(lattice) xyplot(Biomass ~ DVS | Tillage, data = biomassTill) xyplot(Biom.2 ~ DVS | Tillage, data = biomassTill) ## starting values m0.st <- list(Wm = rep(200, 3), a = rep( 1, 3), b = rep( 2, 3)) ##-> nls(), even with -expm1(.) fails to start properly (and hence nlrob() fails too): try( m.c0 <- nls(Biomass ~ Wm[Tillage] * (- expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m0.st, trace=TRUE) ) ## several other versions of the above fail similarly. This works: m00st <- list(Wm = rep(300, 3), a = rep( 1.5, 3), b = rep( 2.2, 3)) m.c00 <- nls(Biomass ~ Wm[Tillage] * (- expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE) ## These were the "true" beta for simulating in creation of {Biomass, Biom.2}: m1.st <- list(Wm = c(219.8, 265.9, 343.4), a = c(1.461, 1.493, 1.294), b = c(2.889, 2.838, 4.046)) m.cl <- nls(Biomass ~ Wm[Tillage] * (1 - exp(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE) ## this now fails to converge: try( # "singular gradient" m.c2 <- nls(Biom.2 ~ Wm[Tillage] * (1 - exp(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE) ) str(C1 <- nls.control(minFactor=1e-6, warnOnly=TRUE, printEval=TRUE, maxiter=500)) try( m.c2 <- nls(Biom.2 ~ Wm[Tillage] * (1 - exp(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE, control=C1) ) ## fails (!) too {numericDeriv() in iteration 129} even though we have ## 'warnOnly' ! ==> bug in nls() !!!!!!!!!!!!!!!!!!!!!!!!!!! ## -expm1(u) is better than (1 - exp(u)) : m.c00 <- nls(Biom.2 ~ Wm[Tillage] * (- expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE, control=C1) ## "fails" but returns .. very bad.. m.c00 ## Use better starting values, as we have such problems: m.c2 <- nls(Biom.2 ~ Wm[Tillage] * (- expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m1.st, trace=TRUE, control=C1) ## "fails" but returns at least: singular gradient iteration 126 m.c2 ## Robust: not converging in 20 steps (only warning) mrob <- nlrob(Biomass ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE) stopifnot(identical(mrob$dataClasses, c(Biomass = "numeric", Tillage = "factor", DVS = "numeric"))) try(## now: singular gradient in nls mr.2 <- nlrob(Biom.2 ~ Wm[Tillage] * (-expm1(-(DVS/a[Tillage])^b[Tillage])), data = biomassTill, start = m00st, trace=TRUE) ) ## Compare coeffs: rbind(c.true = unlist(m1.st), cl0 = coef(m.c00), cl = coef(m.cl), rob = coef(mrob), c2 = coef(m.c2))#, r.2 = coef(mr.2)) ## Compare fit ## Now for plotting --- nice would be xyplot, but I don't easily see how: (yl2 <- range(biomassTill[,"Biom.2"])) (ylim <- range(biomassTill[,"Biomass"]))# --> *not* showing the two outliers! ## or even a bit more robustly: ## sfsmisc::rrange(biomassTill[,"Biom.2"]) ##-> -201.3064 394.0914 ## using global data + fits from above p.biomass.fits <- function(ylim = c(-200, 400), n = 257, f.DVS = 0.1, leg.txt = c(outer(c("nls() ", "nlrob()"), c("", "[ + 2 outl.]"), paste)), col = c("blue2","blue3","tomato","red3"), lty = c(2,1,2,1), lwd = 2) { ## more and equispaced DVS values for nice plot: rr <- extendrange(biomassTill[,"DVS"], f=f.DVS) bbDVS <- seq(rr[1], rr[2], length = n) b.Till <- biomassTill[,"Tillage"] nP <- nlevels(b.Till) # == 3 m <- length(leg.txt) col <- rep_len(col, m) lwd <- rep_len(lwd, m) lty <- rep_len(lty, m) ## Prefer xyplot() - this is ugly but works (and tests predict(*, )): op <- par(mfrow = c(nP,1), mar = .1 + c(3, 3, 2, 1), mgp = c(1.25, 0.6, 0)) on.exit(par(op)) for(lev in levels(b.Till)) { cat(lev,":\n--------\n") dsub <- subset(biomassTill, Tillage == lev) plot(Biom.2 ~ DVS, data = dsub, ylim=ylim, main = paste("Tillage = ", lev)) grid() dd <- data.frame(Tillage = factor(rep.int(lev, n), levels=levels(b.Till)), DVS = bbDVS) lines(predict(m.cl, dd) ~ DVS, data=dd, col=col[1], lty=lty[1], lwd=lwd[1]) lines(predict(mrob, dd) ~ DVS, data=dd, col=col[2], lty=lty[2], lwd=lwd[2]) lines(predict(m.c2, dd) ~ DVS, data=dd, col=col[3], lty=lty[3], lwd=lwd[3]) ## lines(predict(mr.2, dd) ~ DVS, data=dd, col=col[4], lty=lty[4], lwd=lwd[4]) if(lev == "CA-") legend("top", leg.txt, col = col, lty=lty, lwd=lwd, inset=.02, bg = "gray96") #, bty="n") } } ## showing all data points: p.biomass.fits(ylim = yl2) ## more interesting: p.biomass.fits() cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/MT-tst.R0000644000176200001440000001537412413747102015053 0ustar liggesusersrequire("robustbase") ##---> ./poisson-ex.R ## ~~~~~~~~~~~~~~ for more glmrobMT() tests source(system.file("test-tools-1.R", package="Matrix", mustWork=TRUE)) ## -> assertError(), showSys.time(), ... source(system.file("xtraR/ex-funs.R", package = "robustbase")) ## -> newer assert.EQ() {TODO: no longer needed in 2015} if(!require("sfsmisc")) { eaxis <- axis # so we can use eaxis() below } (doExtras <- robustbase:::doExtras()) ## Explore the espRho() function: --------------------------------------------- if(!dev.interactive(orNone=TRUE)) pdf("MT-E_rho.pdf") E.rho <- robustbase:::espRho lambdas <- ((1:10)/2)^2 cws <- c(1, 1.5, 1.75, 2, 2.25, 3) (gr <- expand.grid(lam = lambdas, cw = cws)) Egr <- apply(gr, 1, function(r) { lam <- r[["lam"]]; cw <- r[["cw"]]; sL <- sqrt(lam) xx <- seq(lam - 2*sL, lam + 2*sL, length=17) vapply(xx, function(X) E.rho(lam, xx=X, cw=cw), NA_real_) }) str(Egr)# 17 x 60 mLeg <- function(pos, type="o") legend(pos, legend=paste("lambda = ", format(lambdas, digits=2)), lty=1:5, col=1:6, pch= c(1:9, 0, letters, LETTERS), bty="n") matplot(Egr[, gr[,"cw"]== 1.0 ], type="o",main="c_w = 1.0" ); mLeg("bottomright") matplot(Egr[, gr[,"cw"]== 1.5 ], type="o",main="c_w = 1.5" ); mLeg("bottomright") matplot(Egr[, gr[,"cw"]== 1.75], type="o",main="c_w = 1.75"); mLeg("bottomright") matplot(Egr[, gr[,"cw"]== 2.0 ], type="o",main="c_w = 2.0" ); mLeg("bottomright") matplot(Egr[, gr[,"cw"]== 2.25], type="o",main="c_w = 2.25"); mLeg("bottomright") matplot(Egr[, gr[,"cw"]== 3.0 ], type="o",main="c_w = 3.0" ); mLeg("bottomright") dev.off() ## Explore the m() function: --------------------------------------------- if(!dev.interactive(orNone=TRUE)) pdf("MT-m_rho.pdf") mkM <- robustbase:::mk.m_rho # itself calling splinefun(*, "monoH.FC") getSpline.xy <- function(splfun) { ## Depending on the version of R, the ## environment of splinefun() slightly changes: stopifnot(is.function(splfun), length(e <- environment(splfun)) > 0) if("x0" %in% ls(e)) list(x = e$x0, y = e$y0) else list(x = e$x, y = e$y) } m21 <- mkM(2.1, recompute=TRUE)# the default 'cw = 2.1' m16 <- mkM(1.6, recompute=TRUE) p.m2 <- function(mrho, from = 0, to, col=2, addKnots=TRUE, pchK=4, cexK=1.5, ...) { stopifnot(is.function(mrho)) curve(mrho, from, to, col=col, ...) curve(sqrt(x), add=TRUE, col=adjustcolor("gray",.5), lwd=2) if(addKnots) points(getSpline.xy(mrho), pch=pchK, cex=cexK) } p.m.diff <- function(mrho, from = 0, to, col=2, addKnots=TRUE, pchK=4, cexK=1.5, ...) { stopifnot(is.function(mrho)) curve(mrho(x) - sqrt(x), from=from, to=to, n=512, col=col, ...) abline(h=0,lty=3) if(addKnots) { xy <- getSpline.xy(mrho) if(is.numeric(x <- xy$x)) points(x, xy$y - sqrt(x), pch=pchK, cex=cexK) else warning("'addKnots' not available: No knots in function's environment") } } p.m2(m21, to=10) p.m2(m16, to=10) p.m2(m21, to=50) p.m2(m21, to=120, cexK=.8) p.m.diff(m21, to=120, cex=.5)# pchK="." p.m.diff(m16, to=120, cex=.5)# pchK="." mm21 <- function(.) robustbase:::mm(., m21) environment(mm21) <- environment(m21)# <- for p.m() p.m2(mm21, to=120, cexK=.8) p.m.diff(mm21, to=120, cexK=.8)#-- discontinuity at 100 !! ## TODO: ways to improve! ## Here: look at "larger lambda" (and more cw) la2 <- 5*2^seq(0, 10, by = 0.25) c.s <- .25*c(1:10, 15, 50) mL <- lapply(c.s, function(cc) mkM(cc, lambda = la2, recompute=TRUE)) str(mL, max=1) # a list of functions.. assert.EQ(la2, getSpline.xy(mL[[1]])$x) mmL <- sapply(mL, function(F) getSpline.xy(F)$y) matplot(la2, mmL, type ="l") # "all the same" from very far ... mm.d. <- mmL - sqrt(la2) matplot(la2, mm.d., type ="l", xlab=quote(lambda)); abline(h=0, lty=3) legend("bottom", legend= paste("cw=",c.s), col=1:6, lty=1:5, ncol = 3, bty="n") matplot(la2, -mm.d., type ="l", xlab=quote(lambda), log = "xy", axes=FALSE) eaxis(1); eaxis(2) legend("bottom", legend= paste("cw=",c.s), col=1:6, lty=1:5, ncol = 3, bty="n") ## ok, that's the correct scale c.s2 <- c.s [c.s >= .75] mm.d2 <- mm.d.[, c.s >= .75] matplot(la2, -mm.d2, type ="l", xlab=quote(lambda), log = "xy", axes=FALSE) eaxis(1); eaxis(2) legend("bottomleft", legend= paste("cw=",c.s2), col=1:6, lty=1:5, ncol = 3, bty="n") ##-> log (sqrt(lam) - m(lam)) = a[c] - beta * log(lam) : dd2 <- data.frame(m.d = c(mm.d2), cw = rep(c.s2, each = length(la2)), lambda = rep(la2, length(c.s2))) ## gives a pretty nice picture: summary(fm <- lm(log(-m.d) ~ 0+factor(cw) + log(lambda), data = dd2, subset = lambda >= 50)) ##=> slope of log(lambda) = -1/2 dd3 <- within(dd2, { ld2 <- log(-m.d) + 1/2 * log(lambda) })[dd2[,"lambda"] >= 50,] plot(ld2 ~ cw, data = dd3, type = "b") plot(ld2 ~ cw, data = dd3, type = "b", log="x") coplot(ld2 ~ cw|lambda, data = dd3) coplot(ld2 ~ cw|log(lambda), data = dd3) coplot(ld2 ~ log10(cw) | log10(lambda), data = dd3) dev.off() ##-------------------------------------------------------- end m(.) ------------- ## The simple intercept example from ./glmrob-1.R set.seed(113) y <- rpois(17, lambda = 4) y[1:2] <- 99:100 # outliers y.1 <- y x.1 <- cbind(rep(1, length(y))) options("robustbase:m_rho_recompute" = TRUE)#-> recompute in any case: showSys.time( r <- glmrob(y ~ 1, family = poisson, method = "MT", nsubm=100) )# some output str(r) ## was c(ini = 1.30833281965018, est = 1.29369680430613) ## then c(ini = 1.30833281965018, est = 1.29369680422799) ## c(ini = 1.30833281965018, est = 1.29369680430627) r.64b <- c(ini = 1.30833281965018, est = 1.29369680452016) stopifnot(r$converged) assert.EQ(r$initial, r.64b[["ini"]], check.attributes=FALSE, tol = 1e-13)# rel.diff: 3.394.e-16 assert.EQ(r$coefficients, r.64b[["est"]], check.attributes=FALSE, tol = 1e-09)# as long we use different optim()) ## now, as the algorithm has a random start: set.seed(7) nSim <- if(doExtras) 20 else 2 showSys.time(LL <- replicate(nSim, glmrob(y ~ 1, family = poisson, method = "MT"), simplify=FALSE)) ini <- sapply(LL, `[[`, "initial") est <- sapply(LL, `[[`, "coefficients") ## surprise: all the 20 initial estimators are identical: stopifnot(diff(range(ini)) == 0, diff(range(est)) == 0) ## probably too accurate ... but ok, for now assert.EQ(est[1], r.64b[["est"]], check.attributes=FALSE, tol = 1e-10)# Winbuilder needed ~ 2e-11 assert.EQ(ini[1], r.64b[["ini"]], check.attributes=FALSE, tol = 1e-10) ccvv <- sapply(LL, `[[`, "cov") stopifnot(ccvv[1] == ccvv) assert.EQ(print(ccvv[1]), 0.0145309081924157, tol = 1e-7, giveRE=TRUE) cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' ## "Platform" info (SysI <- Sys.info()[c("sysname", "release", "nodename", "machine")]) if(require("sfsmisc") && SysI[["sysname"]] == "Linux") ## not on the Mac (yet) c(SysI, MIPS=Sys.MIPS(), Sys.sizes()) else SysI robustbase/tests/Rsquared.R0000644000176200001440000000426613326360567015521 0ustar liggesusersrequire(robustbase) set.seed(17)# reproducibility! ## to check: ## - for the empty model summary(lmrob(Y ~ 0, coleman)) ## - with and without an intercept in the model summary(lmrob(Y ~ 1, coleman)) writeLines(sfm <- capture.output( summary(lmrob(Y ~ ., coleman)))) # and this must be "identical": sfm2 <- capture.output(summary(lmrob(Y ~ ., coleman, model=FALSE, x=FALSE, y=FALSE))) iCall <- grep("lmrob.*coleman", sfm)# the only line that differs stopifnot(sfm[-iCall] == sfm2[-iCall]) ## w/o intercept: summary(lmrob(Y ~ . - 1, coleman, model=FALSE, x=FALSE, y=FALSE)) ## - when prior-weights are included wts <- c(rep(0.05, 10), rep(2, 10)) summary(lmrob(Y ~ . - 1, coleman, model=FALSE, x=FALSE, y=FALSE, weights = wts)) ## - should work for object with NA in the coefficients, and ## - should work for object with NA in the observations --> both in ./NAcoef.R ## check equality with lm() for classical model test <- function(formula, data, items=c("coefficients", "residuals", "df", "scale", "r.squared", "adj.r.squared", "weights"), tol = 1e-4, ...) { lmrCtrl <- lmrob.control(psi = "hampel", tuning.psi = c(1000, 2000, 3000), method="SMDM", ...) sc <- summary(lm (formula, data)) sr <- summary(lmrob(formula, data, control= lmrCtrl)) names(sc)[names(sc) == "sigma"] <- "scale" if(sc$df[1] == 0 && getRversion() <= "3.5.1" && as.numeric(R.version$`svn rev`) < 74993) ## in the past, lm() returned logical empty matrix storage.mode(sc$coefficients) <- "double" ret <- all.equal(sc[items], sr[items], tolerance=tol) if (!isTRUE(ret)) { print(sr) for (i in seq_along(items)) { print(sc[items[i]]) print(sr[items[i]]) } print(ret) stop(sprintf("all.equal(sc[items], sr[items], tol.. = %g) are not all TRUE", tol)) } ret } set.seed(101) test(Y ~ 0, coleman, c("residuals", "df", "coefficients", "r.squared", "adj.r.squared"), tol=1e-10) test(Y ~ 1, coleman, tol = 2e-4) test(Y ~ ., coleman, tol = 4e-4) test(Y ~ . - 1, coleman, tol = 4e-4) robustbase/tests/tlts.R0000644000176200001440000000262113162677266014717 0ustar liggesuserslibrary(robustbase) ## library(MASS)## MASS::lqs source(system.file("xtraR/test_LTS.R", package = "robustbase")) ## ../inst/test_LTS.R y20 <- c(2:4, 8, 12, 22, 28, 29, 33, 34, 38, 40, 41, 47:48, 50:51, 54, 56, 59) test_location <- function() { ## Improve: print less, and test equality explicitly Y <- y20 print(ltsReg(y=Y)) print(ltsReg(y=Y, intercept=TRUE)) print(ltsReg(y=Y, intercept=FALSE)) print(ltsReg(y=Y, alpha=1)) print(ltsReg(Y ~ 1)) print(ltsReg(Y ~ 0))# = Y ~ 1 - 1 : empty model (no coefficients) print(ltsReg(Y ~ 1, alpha=1)) } test_rsquared <- function() { x1 <- y20 y1 <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 3.5, 1, 1, 1, 1, 1, 1, 1, 1, 1, 5) ll1 <- ltsReg(x1,y1, alpha = 0.8) ## print() ing is platform-dependent, since only ~= 0 stopifnot(all.equal(unname(coef(ll1)), c(1,0), tolerance=1e-12), ll1$scale < 1e-14) print(ltsReg(y1,x1, alpha = 0.8)) print(ltsReg(y1,x1, alpha = 0.8, intercept = FALSE)) } options(digits = 5) set.seed(101) # <<-- sub-sampling algorithm now based on R's RNG and seed doLTSdata() if(FALSE) { ## FIXME: These *FAIL* ! doLTSdata(nrep = 12, time = FALSE) doLTSdata(nrep = 12, time = FALSE, method = "MASS") } test_rsquared() test_location() if(length(W <- warnings())) print(if(getRversion() >= "3.5") summary(W) else W) cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/nlregrob-tst.R0000644000176200001440000004075013530741352016343 0ustar liggesusersstopifnot(require("robustbase")) source(system.file("xtraR", "platform-sessionInfo.R", # moreSessionInfo() etc package = "robustbase", mustWork=TRUE)) ## testing functions: source(system.file("test-tools-1.R", package = "Matrix", mustWork=TRUE))# -> assert.EQ(), ## showProc.time(), showSys.time() ... S.time <- showSys.time # "back compatible" mS <- moreSessionInfo(print.=TRUE) ## as long as we don't export these (nor provide an nlrob(., method=.) interface: nlrob.MM <- robustbase:::nlrob.MM nlrob.tau <- robustbase:::nlrob.tau nlrob.CM <- robustbase:::nlrob.CM nlrob.mtl <- robustbase:::nlrob.mtl (doExtras <- robustbase:::doExtras()) if(doExtras) { NP <- 30 ; tol <- 1e-11 } else { ## "fast" NP <- 15 ; tol <- 1e-7 } start.from.true <- !doExtras # (but not necessarily ..) if(start.from.true) { # population size = NP (random) + 1 (true parameters) init_p <- c(1, 0.2) init_p_sigma <- c(1, 0.2, 1) } else { init_p <- init_p_sigma <- NULL } if(!dev.interactive(orNone=TRUE)) pdf("nlregrob-tst.pdf") RNGversion("3.5.0") # -- TODO once R >> 3.5.0 : update results !! ## Stromberg, Arnold J. (1993). ## Computation of high breakdown nonlinear regression parameters. ## J. Amer. Statist. Assoc. 88(421), 237-244. ## exponential regression Expo <- function(x, a, b) exp(a + b*x) set.seed(2345) # for reproducibility ## data without outliers: d.exp30 <- data.frame(x = sort( runif(30, 0, 10) ), err = rnorm(30)) d.exp30 <- transform(d.exp30, y = Expo(x, 1, 0.2) + err) ## classical (starting at truth .. hmm) op <- options(digits=12) Cfit <- nls(y ~ Expo(x, a, b), data = d.exp30, start = c(a = 1, b = 0.2), control = nls.control(tol = 8e-8, printEval = TRUE), trace=TRUE) showProc.time()# ---- OS X needing 6e-8 options(op) ## robust Rfit.MM.S.bisquare <- nlrob.MM(y ~ Expo(x, a, b), data = d.exp30, lower = c(a = -10, b = -2), upper = c(10, 2), NP = NP, tol = tol, add_to_init_pop = init_p ) if(doExtras) { Rfit.MM.S.lqq <- update(Rfit.MM.S.bisquare, psi = "lqq") Rfit.MM.S.optimal <- update(Rfit.MM.S.bisquare, psi = "optimal") Rfit.MM.S.hampel <- update(Rfit.MM.S.bisquare, psi = "hampel") } showProc.time() Rfit.MM.lts.bisquare <- update(Rfit.MM.S.bisquare, init = "lts") Rfit.MM.lts.lqq <- update(Rfit.MM.S.bisquare, init = "lts", psi = "lqq") Rfit.MM.lts.optimal <- update(Rfit.MM.S.bisquare, init = "lts", psi = "optimal") Rfit.MM.lts.hampel <- update(Rfit.MM.S.bisquare, init = "lts", psi = "hampel") showProc.time() S.time(Rfit.tau.bisquare <- nlrob.tau( y ~ Expo(x, a, b), data = d.exp30, lower = c(a = -10, b = -2), upper = c(10, 2), NP = NP, add_to_init_pop = init_p )) S.time(Rfit.tau.optimal <- update(Rfit.tau.bisquare, psi = "optimal")) S.time(Rfit.CM <- nlrob.CM( y ~ Expo(x, a, b), data = d.exp30, lower = c(a = -10, b = -2, sigma = 0), upper = c( 10, 2, 10), NP = NP, add_to_init_pop = init_p_sigma )) S.time(Rfit.mtl <- nlrob.mtl(y ~ Expo(x, a, b), data = d.exp30, lower = c(a = -10, b = -2, sigma = 0), upper = c( 10, 2, 3), NP = ceiling(NP*1.33), # <- higher prob. to get close tol = tol, trace=TRUE, details=TRUE, add_to_init_pop = init_p_sigma )) showProc.time() plot(y ~ x, d.exp30, main = "Data = d.exp30") cTr <- adjustcolor("red4", 0.5) cLS <- adjustcolor("blue2", 0.5) cE <- curve(Expo(x, a=1, b=0.2), 0, 10, n=1+2^9, col=cTr, lwd=2, lty=2, add=TRUE) lines(d.exp30$x, fitted(Cfit), col=cLS, lwd=3) ll <- length(m1 <- sapply(ls.str(patt="^Rfit"), get, simplify=FALSE)) .tmp <- lapply(m1, function(.) lines(d.exp30$x, fitted(.))) legend("topleft", c("true", "LS", names(m1)), lwd=c(2,3, rep(1,ll)), lty=c(2,1, rep(1,ll)), col=c(cTr,cLS, rep(par("fg"),ll)), bty="n", inset=.01) showProc.time() ## 40% outliers present {use different data name: seen in print() d.exp40out <- within(d.exp30, y[15:27] <- y[15:27] + 100) op <- options(digits=12) Cfit.40out <- update(Cfit, data = d.exp40out, trace=TRUE, control = nls.control(tol = Cfit$control$tol)) if(FALSE) ## this fails for "bad" non-R BLAS/LAPACK Cfit.no.out <- update(Cfit.40out, subset = -(15:27)) ## help giving it a good start *and* raise tolerance (from 8e-8): ## still fails for all three of {ATLAS, MKL, OpenBLAS} with ## Error in nls(formula = y ~ Expo(x, a, b), data = d.exp.Hlev, start = c(a = 1, : ## step factor 0.000488281 reduced below 'minFactor' of 0.000976562 Cfit.no.out <- tryCatch(error = function(e) e, update(Cfit.40out, subset = -(15:27), start = c(a = 1, b = 0.2), trace=TRUE, control = nls.control(maxiter = 1000, tol = 5e-7, printEval=TRUE)) ) Cfit.no..ok <- !inherits(Cfit.no.out, "error") options(op) if(doExtras) { Rf.out.MM.S.bisquare <- update(Rfit.MM.S.bisquare, data=d.exp40out) Rf.out.MM.S.lqq <- update(Rf.out.MM.S.bisquare, psi = "lqq") Rf.out.MM.S.optimal <- update(Rf.out.MM.S.bisquare, psi = "optimal") Rf.out.MM.S.hampel <- update(Rf.out.MM.S.bisquare, psi = "hampel") showProc.time() } Rf.out.MM.lts.bisquare <- update(Rfit.MM.S.bisquare, data=d.exp40out, init= "lts") Rf.out.MM.lts.lqq <- update(Rf.out.MM.lts.bisquare, psi= "lqq") #----------- Rf.out.MM.lts.optimal <- update(Rf.out.MM.lts.bisquare, psi= "optimal") Rf.out.MM.lts.hampel <- update(Rf.out.MM.lts.bisquare, psi= "hampel") showProc.time() Rf.out.tau.bisquare <- update(Rfit.tau.bisquare, data=d.exp40out) Rf.out.tau.optimal <- update(Rfit.tau.bisquare, data=d.exp40out, psi = "optimal") Rf.out.CM <- update(Rfit.CM, data=d.exp40out) Rf.out.mtl <- update(Rfit.mtl, data=d.exp40out) showProc.time() plot(y ~ x, d.exp40out, main = "Data = d.exp40out") cE <- curve(Expo(x, a=1, b=0.2), 0, 10, n=1+2^9, col=cTr, lwd=2, lty=2, add=TRUE) ll <- length(m1 <- sapply(ls.str(patt="^Rf.out"), get, simplify=FALSE)) .tmp <- lapply(m1, function(.) lines(d.exp40out$x, fitted(.))) xx <- local({p <- par("usr"); seq(p[1],p[2], length.out=256)}) if(Cfit.no..ok) lines(xx, predict(Cfit.no.out, list(x=xx)), col=cLS, lwd=3) lines(xx, predict(Cfit.40out , list(x=xx)), col=cLS, lty=2) legend("topleft", c("true", "LS [w/o outl]", "LS", names(m1)), lwd=c(2,3, rep(1,1+ll)), lty=c(2,1,2, rep(1,ll)), col=c(cTr,cLS,cLS, rep(par("fg"),ll)), bty="n", inset=.01) showProc.time() ## presence of high leverage point outliers d.exp.Hlev <- within(d.exp40out, { x[28:30] <- x[28:30] + 10 ## shift 10 y <- Expo(x, 1, 0.2) + err y[28:30] <- y[28:30] + 500 }) op <- options(digits=12) Cfit.Hlev <- tryCatch(error = function(e) e, update(Cfit.40out, data = d.exp.Hlev, start = c(a = 1, b = 0.2), trace=TRUE, control = nls.control(maxiter = 100, tol = 5e-7, printEval=TRUE)) ) if(Cfit.Hlev..ok <- !inherits(Cfit.Hlev, "error")) { Cfit.no.Hlev <- update(Cfit.Hlev, subset = -(28:30)) } else { ## substitute -- better? Cfit.no.Hlev <- update(Cfit, subset = -(28:30)) } showProc.time() options(op) if(doExtras) { Rf.Hlev.MM.S.bisquare <- update(Rfit.MM.S.bisquare, data = d.exp.Hlev) Rf.Hlev.MM.S.lqq <- update(Rf.Hlev.MM.S.bisquare, psi = "lqq") Rf.Hlev.MM.S.optimal <- update(Rf.Hlev.MM.S.bisquare, psi = "optimal") Rf.Hlev.MM.S.hampel <- update(Rf.Hlev.MM.S.bisquare, psi = "hampel") showProc.time() } Rf.Hlev.MM.lts.bisquare <- update(Rfit.MM.S.bisquare, data = d.exp.Hlev, init="lts") Rf.Hlev.MM.lts.lqq <- update(Rf.Hlev.MM.lts.bisquare, psi= "lqq") Rf.Hlev.MM.lts.optimal <- update(Rf.Hlev.MM.lts.bisquare, psi="optimal") Rf.Hlev.MM.lts.hampel <- update(Rf.Hlev.MM.lts.bisquare, psi= "hampel") showProc.time() Rf.Hlev.tau.bisquare <- update(Rfit.tau.bisquare, data = d.exp.Hlev) Rf.Hlev.tau.optimal <- update(Rf.Hlev.tau.bisquare, psi = "optimal") Rf.Hlev.CM <- update(Rfit.CM, data = d.exp.Hlev) Rf.Hlev.mtl <- update(Rfit.mtl, data = d.exp.Hlev) showProc.time() plot(y ~ x, d.exp.Hlev, main = "Data = d.exp.Hlev") cE <- curve(Expo(x, a=1, b=0.2), 0, par("usr")[2], n=1+2^9, col=cTr, lwd=2, lty=2, add=TRUE) x.H <- seq(par("usr")[1], par("usr")[2], length.out = 256) ll <- length(m1 <- sapply(ls.str(patt="^Rf.Hlev"), get, simplify=FALSE)) .tmp <- lapply(m1, function(.) lines(x.H, predict(., list(x=x.H)))) lines(x.H, predict(Cfit.no.Hlev, list(x=x.H)), col=cLS, lwd=3)## L.S.() if(Cfit.Hlev..ok) { lines(x.H, predict(Cfit.Hlev, list(x=x.H)), col=cLS, lty=2)## L.S. legend("topleft", c("true", "LS [w/o outl]", "LS", names(m1)), lwd=c(2,3, rep(1,1+ll)), lty=c(2,1,2, rep(1,ll)), col=c(cTr, cLS,cLS, rep(par("fg"),ll)), bty="n", inset=.01) } else { cat("no Cfit.Hlev lines as nls() failed there\n") cat(" : legend(...) !?\n") } showProc.time() cfcl <- coef(Cfit) if(Cfit.no..ok) { cfcl.n.o <- coef(Cfit.no.out) } else { cfcl.n.o <- cfcl } # use substitute - for code below cfcl.n.H <- coef(Cfit.no.Hlev) ## no outliers present assert.EQ(coef(Rfit.MM.S.bisquare), cfcl, tol = 0.01, giveRE=TRUE) if(doExtras) { assert.EQ(coef(Rfit.MM.S.lqq), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.MM.S.optimal), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.MM.S.hampel), cfcl, tol = 0.01, giveRE=TRUE) } assert.EQ(coef(Rfit.MM.lts.bisquare), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.MM.lts.lqq), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.MM.lts.optimal), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.MM.lts.hampel), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.tau.bisquare), cfcl, tol = 0.02, giveRE=TRUE)# 0.009873 assert.EQ(coef(Rfit.tau.optimal), cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.CM)[-3], cfcl, tol = 0.01, giveRE=TRUE) assert.EQ(coef(Rfit.mtl)[-3], cfcl, tol = 0.02, giveRE=TRUE) ## 40% outliers present -- compare with L.S.(good.data) if(doExtras) { assert.EQ(coef(Rf.out.MM.S.bisquare), cfcl.n.o, tol = 7e-4, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.S.lqq), cfcl.n.o, tol = 1e-5, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.S.optimal), cfcl.n.o, tol = 1e-5, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.S.hampel), cfcl.n.o, tol = 1e-5, giveRE=TRUE) } assert.EQ(coef(Rf.out.MM.lts.bisquare), cfcl.n.o, tol = 6e-4, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.lts.lqq), cfcl.n.o, tol = 1e-5, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.lts.optimal), cfcl.n.o, tol = 1e-5, giveRE=TRUE) assert.EQ(coef(Rf.out.MM.lts.hampel), cfcl.n.o, tol = 1e-5, giveRE=TRUE) assert.EQ(coef(Rf.out.tau.bisquare), cfcl.n.o, tol = .007, giveRE=TRUE) assert.EQ(coef(Rf.out.tau.optimal), cfcl.n.o, tol = .002, giveRE=TRUE) assert.EQ(coef(Rf.out.CM)[-3], cfcl.n.o, tol = .012, giveRE=TRUE)# 0.00708,0.01079 assert.EQ(coef(Rf.out.mtl)[-3], cfcl.n.o, tol = .002, giveRE=TRUE)# better in 64b ## presence of high leverage point outliers -- compare with LS(good.data) if(doExtras) { assert.EQ(coef(Rf.Hlev.MM.S.bisquare), cfcl.n.H, tol = .01, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.S.lqq), cfcl.n.H, tol = .02, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.S.optimal), cfcl.n.H, tol = .005, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.S.hampel), cfcl.n.H, tol = .02, giveRE=TRUE) } assert.EQ(coef(Rf.Hlev.MM.lts.bisquare),cfcl.n.H, tol = .01, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.lts.lqq), cfcl.n.H, tol = .015, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.lts.optimal), cfcl.n.H, tol = .002, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.MM.lts.hampel), cfcl.n.H, tol = .02, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.tau.bisquare), cfcl.n.H, tol = .05, giveRE=TRUE)# 0.0363, 0.0415 assert.EQ(coef(Rf.Hlev.tau.optimal), cfcl.n.H, tol = .03, giveRE=TRUE) assert.EQ(coef(Rf.Hlev.CM)[-3], cfcl.n.H, tol = .12, giveRE=TRUE)# 0.032, 0.082 assert.EQ(coef(Rf.Hlev.mtl)[-3], cfcl.n.H, tol = .08, giveRE=TRUE) length(mods <- sapply(ls.str(patt="^Rf"), get, simplify=FALSE)) # 36 is.conv <- sapply(mods, `[[`, "status") == "converged" prblm <- mods[!is.conv] if(length(prblm)) { cat("\n*** NON-converged model fits:\n") print(prblm) mods <- mods[is.conv] } else cat("\n All models converged\n") ## Now, all mods are converged ----------- dKnd <- as.factor(vapply(mods, function(.m.) as.character(getCall(.m.)[["data"]]), "")) table(dKnd) ## (iKnd <- setNames(seq_len(nlevels(dKnd)), levels(dKnd))) ## Coefficients: Some have 'sigma', some not: pcf <- vapply(lcf <- lapply(mods, coef), length, 1) table(pcf) ## 2 and 3 stopifnot(min(pcf) + 1 == max(pcf)) # +1 : those which have 'sigma pp <- min(pcf) ccf <- t(simplify2array(lapply(lcf, `[`, 1:max(pcf)))) ## take the "Scale" for those that do not have 'sigma' among coef(): i.n <- is.na(ccf[,"sigma"]) ccf[i.n, "sigma"] <- vapply(mods[i.n], `[[`, 0, "Scale") ## not yet: vapply(mods[i.n], sigma, 0.) ccf ## well, the 'sigma's are definitely *not* unbiased estimates of ## true sqrt(var(eps)) ... [FIXME] ## --> indeed, this can be found in the CM paper [TODO: write more here] plot(ccf[,1:2], pch = as.integer(dKnd))## use 'method' to get color legend("topright", inset=.01, names(iKnd), pch = iKnd) points(rbind(cfcl.n.H, cfcl, cfcl.n.o), # <- order from iKind col=adjustcolor("tomato",.5), cex=2, pch=1:3, lwd=5) ## optional labs <- sub("^Rfit\\.", '', sub("^Rf\\.[A-Za-z]+\\.", '', rownames(ccf))) labs <- sub("hampel$", "Ham", sub("optimal$", "opt", sub("bisquare$", "biS", labs))) labs text(ccf[,1:2], labs, cex=0.75, col=adjustcolor(1, 0.5), adj= -1/5, srt=75, xpd=NA) points(rbind(cfcl), col=adjustcolor("tomato",.5), cex=2, pch=3, lwd=5) showProc.time() ###------- Extended Tests for the DNase1 example from >>>> ../man/nlrob-algos.Rd <<<< ### ===================== DNase1 <- DNase[DNase$Run == 1,] form <- density ~ Asym/(1 + exp(( xmid -log(conc) )/scal )) pnms <- c("Asym", "xmid", "scal") psNms <- c(pnms, "sigma") ##' a version that recycles x: setNames. <- function(x, nm) setNames(rep_len(x, length(nm)), nm) ## for comparisons, later: all.eq.mod <- function(m1, m2, sub=FALSE, excl = c("call", "ctrl"), ...) { nm1 <- names(m1) stopifnot(if(sub) nm1 %in% names(m2) else nm1 == names(m2)) ni <- if(sub) nm1[is.na(match(nm1, c("call","ctrl")))] else is.na(match(names(m1), excl))## <<- all but those with names in 'excl' all.equal(m1[ni], m2[ni], ...) } set.seed(47) # as these by default use randomized optimization: fMM <- robustbase:::nlrob.MM(form, data = DNase1, lower = setNames.(0, pnms), upper = 3, ## call to nlrob.control to pass 'optim.control': ctrl = nlrob.control("MM", optim.control = list(trace = 1), optArgs = list(trace = TRUE))) showProc.time() if(doExtras) { ftau <- robustbase:::nlrob.tau(form, data = DNase1, lower= setNames.(0, pnms), upper= 3, trace=TRUE) ## fCM <- robustbase:::nlrob.CM (form, data = DNase1, lower= setNames.(0, psNms), upper= 3, trace=TRUE) ## fmtl <- robustbase:::nlrob.mtl(form, data = DNase1, lower= setNames.(0, psNms), upper= 3, trace=TRUE) ## mods <- list(MM=fMM, tau=ftau, CM=fCM, MTL=fmtl) print(sts <- sapply(mods, `[[`, "status")) stopifnot(sts == "converged") print(sapply(mods, `[[`, "data")) # currently 'language' %% FIXME print(sapply(mods, `[[`, "coefficients")) # nice matrix showProc.time() } ## Compare with traditional M-estimate, a) started robustly b) psi = Tukey's: fM <- nlrob(formula(fMM), data=eval(fMM$data), start = coef(fMM), psi = .Mwgt.psi1("bisquare"), trace = TRUE) rbind(M=coef(fM), MM=coef(fMM)) # "similar" ... well, no: the sigma's get much different ## stopifnot(%%____FIXME___ all.equal(coef(fM), coef(fMM), tolerance = 1e-4) ## ) # had 3.26e-5 ## FIXME: nlrob( "M") should allow to keep specify an initial sigma *and* keep that fixed showProc.time() if(doExtras) { ### Now call the above methods via nlrob(): set.seed(47) # (same as above) ## without "sigma" gMM <- nlrob(form, data = DNase1, method = "MM", lower = setNames(c(0,0,0), pnms), upper = 3) gtau <- nlrob(form, data = DNase1, method = "tau", lower = setNames(c(0,0,0), pnms), upper = 3) ## those with "sigma" -> must be in (lower, upper), too : gCM <- nlrob(form, data = DNase1, method = "CM", lower = setNames(c(0,0,0,0), psNms), upper = 3) gmtl <- nlrob(form, data = DNase1, method = "mtl", lower = setNames(c(0,0,0,0), psNms), upper = 3) showProc.time() ## list {and test print() for these}: (mod2 <- list(MM=gMM, tau=gtau, CM=gCM, MTL=gmtl)) stopifnot(mapply(all.eq.mod, mods, mod2, sub=TRUE)) }# only if(doExtras) robustbase/tests/exact-fit-categorical.R0000644000176200001440000000145511726415440020060 0ustar liggesusers## recreating exact fit problem for categorical data require(robustbase) ## some simple balanced dataset with one grouping variable ngrp <- 10 nrep <- 10 set.seed(2) data <- data.frame(y = rnorm(ngrp*nrep), grp=rep(letters[1:ngrp], each=nrep)) ## this works fine m1 <- lmrob(y ~ grp, data) ## now contaminate the dataset data2 <- data data2$y[1:48] <- 1e10 try(m2 <- lmrob(y ~ grp, data2, trace.lev = 3)) ## All observations of group "e" get rob. weight of 0: weights <- c(1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) ## from trace output weights %*% m1$x robustbase/tests/mc-strict.R0000644000176200001440000003107014055507253015624 0ustar liggesusers #### Testing medcouple mc() and related functions ### here, we do "strict tests" -- hence no *.Rout.save ### hence, can also produce non-reproducible output such as timing library(robustbase) for(f in system.file("xtraR", c("mcnaive.R", # -> mcNaive() "platform-sessionInfo.R"), package = "robustbase", mustWork=TRUE)) { cat("source(",f,"):\n", sep="") source(f) } source(system.file("test-tools-1.R", package="Matrix", mustWork=TRUE)) assertEQm12 <- function(x,y, giveRE=TRUE, ...) assert.EQ(x,y, tol = 1e-12, giveRE=giveRE, ...) ## ^^ shows *any* difference ("tol = 0") unless there is no difference at all ## c.time <- function(...) cat('Time elapsed: ', ..., '\n') S.time <- function(expr) c.time(system.time(expr)) DO <- function(...) S.time(stopifnot(...)) mS <- moreSessionInfo(print.=TRUE) (doExtras <- robustbase:::doExtras())# TRUE if interactive() or activated by envvar n.set <- c(1:99, 1e5L+ 0:1) # large n gave integer overflow in earlier versions DO(0 == sapply(n.set, function(n) mc(seq_len(n)))) DO(0 == sapply(n.set, function(n) mc(seq_len(n), doRefl=FALSE))) DO(0 == sapply(1:100, function(n) mcNaive(seq_len(n), "simple"))) DO(0 == sapply(1:100, function(n) mcNaive(seq_len(n), "h.use" ))) x1 <- c(1, 2, 7, 9, 10) mcNaive(x1) # = -1/3 assertEQm12(-1/3, mcNaive(x1, "simple")) assertEQm12(-1/3, mcNaive(x1, "h.use")) assertEQm12(-1/3, mc(x1)) x2 <- c(-1, 0, 0, 0, 1, 2) mcNaive(x2, meth="simple") # = 0 - which is wrong mcNaive(x2, meth="h.use") # = 1/6 = 0.16666 assertEQm12(1/6, mc(x2)) assertEQm12(1/6, mcNaive(x2, "h.use")) x4 <- c(1:5,7,10,15,25, 1e15) ## - bombed in orignal algo mcNaive(x4,"h.use") # 0.5833333 assertEQm12( 7/12, mcNaive(x4, "h.use")) assertEQm12( 7/12, mcNaive(x4, "simple")) assertEQm12( 7/12, mc( x4, doRefl= FALSE)) assertEQm12(-7/12, mc(-x4, doRefl= FALSE)) set.seed(17) for(n in 3:50) { cat(" ") for(k in 1:5) { x <- rlnorm(n) mc1 <- mc(x) mc2 <- mcNaive(x, method = "simple") mc3 <- mcNaive(x, method = "h.use" ) stopifnot(all.equal(mc1, mc3, tolerance = 1e-10),# 1e-12 not quite ok mc2 == mc3) cat(".") } }; cat("\n") ###---- Strict tests of adjOutlyingness(): ### ================= changed after long-standing bug fix in Oct.2014 ## For longley, note n < 4p and hence "random nonsense" numbers set.seed(1); S.time(a1.1 <- adjOutlyingness(longley)) set.seed(11); S.time(a1.2 <- adjOutlyingness(longley)) ## set.seed(2); S.time(a2 <- adjOutlyingness(hbk)) # 75 x 4 set.seed(3); S.time(a3 <- adjOutlyingness(hbk[, 1:3]))# the 'X' space set.seed(4); S.time(a4 <- adjOutlyingness(milk)) # obs.63 = obs.64 set.seed(5); S.time(a5 <- adjOutlyingness(wood)) # 20 x 6 ==> n < 4p set.seed(6); S.time(a6 <- adjOutlyingness(wood[, 1:5]))# ('X' space) 20 x 5: n = 4p (ok!) ## 32-bit <-> 64-bit different results {tested on Linux only} is32 <- .Machine$sizeof.pointer == 4 ## <- should work for Linux/MacOS/Windows isMac <- Sys.info()[["sysname"]] == "Darwin" isSun <- Sys.info()[["sysname"]] == "SunOS" Rnk <- function(u) rank(unname(u), ties.method = "first") ## to use for testing below: cat("\nRnk(a3 $ adjout): "); dput(Rnk(a3$adjout), control= {}) cat("\nRnk(a4 $ adjout): "); dput(Rnk(a4$adjout), control= {}) (i.a4Out <- which( ! a4$nonOut)) # the outliers -- varies "wildly" stopifnot(70 %in% i.a4Out) { if(is32 && !isMac) all.equal(i.a4Out, c(1, 2, 41, 70)) ## and this is "typically" true, but not for a 64-bit Linux version bypassing BLAS in matprod else if(isSun || isMac) TRUE else if(grepl("^Fedora", osVersion) && !is32) identical(i.a4Out, 70L) # since Dec 2020 (F 32) else all.equal(i.a4Out, c(9:19, 23:27,57, 59, 70, 77)) } ## only for ATLAS (BLAS/Lapack), not all are TRUE; which ones [but n < 4p] if(!all(a5$nonOut)) print(which(!a5$nonOut)) # if we know, enable check below stopifnot(exprs = { which(!a2$nonOut) == 1:14 which(!a3$nonOut) == 1:14 ## 'longley', 'wood' have no outliers in the "adjOut" sense: if(doExtras && !isMac) { ## longley also has n < 4p (!) if(mS$ strictR) sum(a1.2$nonOut) >= 15 # sum(.) = 16 [nb-mm3, Oct.2014] else ## however, openBLAS Fedora Linux /usr/bin/R gives sum(a1.2$nonOut) = 13 sum(a1.2$nonOut) >= 13 } else TRUE if(doExtras) { ## have n < 4p (!) if(mS$ strictR) a5$nonOut else ## not for ATLAS sum(a5$nonOut) >= 18 # 18: OpenBLAS } else TRUE a6$nonOut[-20] ## hbk (n = 75, p = 3) should be "stable" (but isn't quite) abs(Rnk(a3$adjout) - c(62, 64, 69, 71, 70, 66, 65, 63, 68, 67, 73, 75, 72, 74, 35, 60, 55, 4, 22, 36, 6, 33, 34, 28, 53, 16, 13, 9, 27, 31, 49, 39, 20, 50, 14, 2, 24, 40, 54, 21, 17, 37, 52, 23, 58, 19, 61, 11, 25, 8, 46, 59, 48, 47, 29, 44, 43, 42, 7, 30, 18, 51, 41, 15, 10, 38, 3, 56, 57, 5, 1, 12, 26, 32, 45) ) <= 3 ## all 0 on 64-bit (F 32) Linux }) ## milk (n = 86) : -- Quite platform dependent! r <- Rnk(a4$adjout) r64 <- ## the 64-bit (ubuntu 14.04, nb-mm3) values: c(65, 66, 61, 56, 47, 51, 19, 37, 74, 67, 79, 86, 83, 84, 85, 82, 81, 73, 80, 55, 27, 3, 70, 68, 78, 76, 77, 53, 48, 8, 29, 33, 6, 32, 28, 31, 36, 40, 22, 58, 64, 52, 39, 63, 44, 30, 57, 46, 43, 45, 25, 54, 12, 1, 9, 2, 71, 14, 75, 23, 4, 10, 34, 35, 17, 24, 15, 20, 38, 72, 42, 13, 50, 60, 62, 26, 69, 18, 5, 21, 7, 49, 11, 41, 59, 16) r32 <- ## Linux 32bit (florence: 3.14.8-100.fc19.i686.PAE) c(78, 79, 72, 66, 52, 61, 22, 41, 53, 14, 74, 85, 82, 83, 84, 80, 81, 56, 73, 65, 30, 3, 16, 17, 68, 57, 58, 63, 54, 8, 32, 37, 6, 36, 31, 35, 40, 44, 25, 69, 77, 62, 43, 76, 48, 34, 67, 51, 47, 49, 28, 64, 12, 1, 9, 2, 33, 15, 59, 26, 4, 10, 38, 39, 20, 27, 18, 23, 42, 86, 46, 13, 60, 71, 75, 29, 50, 21, 5, 24, 7, 55, 11, 45, 70, 19) d <- (r - if (is32) r32 else r64) cbind(r, d) table(abs(d)) cumsum(table(abs(d))) # <=> unscaled ecdf(d) ## For the biggest part (79 out of 86), the ranks are "close": ## 2014: still true, but in a different sense.. ## ^ typically, but e.g., *not* when using non-BLAS matprod(): sum(abs(d) <= 17) >= 78 sum(abs(d) <= 13) >= 75 ## check of adjOutlyingness *free* bug ## reported by Kaveh Vakili set.seed(-37665251) X <- matrix(rnorm(100*5), 100, 5) Z <- matrix(rnorm(10*5)/100, 10, 5) Z[,1] <- Z[,1] + 5 X[91:100,] <- Z # if anything these should be outliers, but ... for (i in 1:10) { ## this would produce an error in the 6th iteration aa <- adjOutlyingness(x=X, ndir=250) if(any(is.out <- !aa$nonOut)) cat("'outliers' at obs.", paste(which(is.out), collapse=", "),"\n") stopifnot(1/4 < aa$adjout & aa$adjout < 16) } ## Check "high"-dimensional Noise ... typically mc() did *not* converge for some re-centered columns ## Example by Valentin Todorov: n <- 50 p <- 30 set.seed(1) # MM a <- matrix(rnorm(n * p), nrow=n, ncol=p) str(a) kappa(a) # 20.42 (~ 10--20 or so; definitely not close to singular) a.a <- adjOutlyingness(a, mcScale=FALSE, # <- my own recommendation trace.lev=1) a.s <- adjOutlyingness(a, mcScale=TRUE, trace.lev=1) ## a.a : str(a.a) # high 'adjout' values "all similar" -> no outliers .. hmm .. ??? (hdOut <- which( ! a.a$nonOut)) ## indices of "outlier" -- very platform dependent ! a.a$MCadjout; all.equal(a.a$MCadjout, 0.136839766177, tol = 1e-12) # seen 7.65e-14 and "big" differences on non-default platforms ## a.s : which(! a.s$nonOut ) # none [all TRUE] a.s$MCadjout # platform dependent; saw all.equal(a.s$MCadjout, 0.32284906741568, tol = 1e-13) # seen 2.2e-15 .. # and big diffs on non-default platforms ## ## The adjout values are all > 10^15 !!! why ?? ## Now (2021) I know: n < 4*p ==> can find 1D-projection where 1 of the 2 {Q3-Q2, Q2-Q1} is 0 ! ##--------------------------------------------------------------------------------------------- ###-- Back to mc() checks for "hard" cases ### ===== ----------------------- ## "large n" (this did overflow sum_p, sum_q earlier ==> had inf.loop): set.seed(3); x <- rnorm(2e5) (mx <- mc(x, trace.lev=3)) stopifnot(print(abs(mx - -0.000772315846101988)) < 1e-15) # 3.252e-19, 64b Linux # 1.198e-16, 32b Windows ### Some platform info : local({ nms <- names(Si <- Sys.info()) dropNms <- c("nodename", "machine", "login") structure(Si[c("nodename", nms[is.na(match(nms, dropNms))])], class="simple.list") }) if(identical(1L, grep("linux", R.version[["os"]]))) { ##----- Linux - only ---- ## Sys.procinfo <- function(procfile) { l2 <- strsplit(readLines(procfile),"[ \t]*:[ \t]*") r <- sapply(l2[sapply(l2, length) == 2], function(c2)structure(c2[2], names= c2[1])) attr(r,"Name") <- procfile class(r) <- "simple.list" r } ## Scpu <- Sys.procinfo("/proc/cpuinfo") Smem <- Sys.procinfo("/proc/meminfo") print(Scpu[c("model name", "cpu MHz", "cache size", "bogomips")]) print(Smem[c("MemTotal", "SwapTotal")]) } ##' Checking the breakdown point of mc() --- Hubert et al. theory said : 25% ##' using non-default doReflect=FALSE as that corresponds to original Hubert et al. ##' ##' @title Medcouple mc() checking ##' @param x ##' @param Xfun ##' @param eps ##' @param NAiferror ##' @param doReflect ##' @param ... ##' @return mc(*,..) or NaN in case mc() signals an error [non-convergence] ##' @author Martin Maechler mcX <- function(x, Xfun, eps=0, NAiferror=FALSE, doReflect=FALSE, ...) { stopifnot(is.numeric(x), is.function(Xfun), "eps" %in% names(formals(Xfun))) myFun <- if(NAiferror) function(u) tryCatch(mc(Xfun(u, eps=eps), doReflect=doReflect, ...), error = function(e) NaN) else function(u) mc(Xfun(u, eps=eps), doReflect=doReflect, ...) vapply(x, myFun, 1.) } X1. <- function(u, eps=0) c(1,2,3, 7+(-10:10)*eps, u + (-1:1)*eps) ## ==> This *does* breakdown [but points are not "in general position"]: r.mc1 <- curve(mcX(x, X1.), 10, 1e35, log="x", n=1001) rt1 <- uniroot(function(x) mcX(exp(x), X1.) - 1/2, lower=0, upper=500) exp(rt1$root) # 4.056265e+31 ## eps > 0 ==> No duplicated points ==> theory says breakdown point = 0.25 ## ------- but get big numerical problems: if(FALSE) { # ==> convergence problem [also in maxit = 1e5] .. really an *inf* loop! r.mc1.1 <- curve(mcX(x, X1., eps= .1 ), 10, 1e35, log="x", n=1001) r.mc1.2 <- curve(mcX(x, X1., eps= .01 ), 10, 1e35, log="x", n=1001) r.mc1.3 <- curve(mcX(x, X1., eps= .001), 10, 1e35, log="x", n=1001) r.mc1.5 <- curve(mcX(x, X1., eps= 1e-5), 10, 1e35, log="x", n=1001) r.mc1.8 <- curve(mcX(x, X1., eps= 1e-8), 10, 1e35, log="x", n=1001) r.mc1.15 <- curve(mcX(x, X1., eps=1e-15), 10, 1e35, log="x", n=1001)# still! } ## practically identical to eps = 0 where we have breakdown (see above) r.mc1.16 <- curve(mcX(x, X1., eps=1e-16), 10, 1e35, log="x", n=1001) all.equal(r.mc1, r.mc1.16, tol=1e-15)#-> TRUE ## Quite bad case: Non convergence X2. <- function(u) c(1:3, seq(6, 8, by = 1/8), u, u, u) try(mc(X2.(4.3e31)))## -> error: no convergence if(FALSE) # and the same here -- after longer waiting: mc(X2.(4.3e31), eps1=1e-7, eps2=1e-100, maxit = 1e6)## -> error: no convergence ## related, more direct: X3. <- function(u) c(10*(1:3), 60:80, (4:6)*u) mc(X3.(1e31), trace=5) # fine convergence in one iter. try( mc(X3.(1e32), trace=3) # no convergence... )# bad try(mc(X3.(1e32), trace=5, maxit=6)) # no convergence... ### TODO : find example with *smaller* sample size -- with no convergence X4. <- function(u, eps, ...) c(10, 70:75, (2:3)*u) mc(X4.(1e34))# "fine" ## whoa: jump down and up: r.mc4 <- curve(mcX(x, X4.), 100, 1e35, log="x", n=2^12) X5. <- function(u) c(10*(1:3), 70:78, (4:6)*u) try(mc(X5.(1e32), maxit=1000)) X5. <- function(u, eps,...) c(5*(1:12), (4:6)*u) (r.mc5 <- mc(X5.(1e32), doReflect=FALSE, maxit=1000)) all.equal(1, ## <- i.e. complete breakdown r.mc5) ## platform dependent! yes, on 64-bit try(mc(X5.(5e31), maxit=10000)) # no convergence.. r.mc5Sml <- curve(mcX(x, X5.), 1, 100, log="x", n=1024) ## quite astonishing r.mc5Lrg <- curve(mcX(x, X5.), 1, 1e30, log="x", n=1024) ## ok.. ## but then going higher -- we have problems: r.mc5Big <- curve(mcX(x, X5., NAiferror=TRUE), 1, 1e38, log="x", n = 2^12, type = "o", cex = 1/4) warnings() summary(r.mc5Big$y) ## 15 NA's at x : with(r.mc5Big, x[is.na(y)]) ## ~= [4.3, 5.8] * 10^31 c.time(proc.time()) summary(warnings()) # seen 15 x In mcComp(....) : ## maximal number of iterations (100 =? 100) reached prematurely robustbase/tests/wgt-himed.R0000644000176200001440000000163414004270422015574 0ustar liggesusershimed <- function(x) { n2 <- 1 + length(x) %/% 2; sort(x, partial = n2)[n2] } ## Tolerance 2e-7 {13 * higher than default 1.49e-8 } is.all.equal <- function(x,y, tol = 2e-7) { is.logical(r <- all.equal(x,y, tolerance = tol)) && r } library(robustbase) stopifnot(is.na(wgt.himedian(numeric()))) ## hi-median() seg.faulted or inf.looped till Jan.3, 2021 options(digits = 7)# single precision! set.seed(15) cat(" n | range(x) | wgt.Himed\n", "------------------------------\n",sep="") for(i in 1:100) { n <- rpois(1, lam = 10) cat(formatC(n,wid=3)," ") x <- round(rnorm(n),3) iw <- 1 + rpois(n, lam = 2) him <- himed(rep(x, iw)) ## == naive R solution whim <- wgt.himedian (x, iw) if(!is.all.equal(whim, him)) cat("whim != him: ", whim, "!=", him,"\n") cat(formatC(range(x), wid = 6, flag="-"), "", formatC(whim, wid = 6, flag="+"), "\n") } robustbase/tests/glmrob-1.R0000644000176200001440000001667012553432042015342 0ustar liggesuserslibrary(robustbase) source(system.file("xtraR/ex-funs.R", package = "robustbase")) source(system.file("test-tools-1.R", package = "Matrix", mustWork=TRUE))# assert.EQ ###>> 1 ------------------- family = poisson ------------------------------------ ### very simple model [with outliers] set.seed(113) y <- rpois(17, lambda = 4) ## -> target: beta_0 = log(E[Y]) = log(4) = 1.386294 y[1:2] <- 99:100 # outliers y rm1 <- glmrob(y ~ 1, family = poisson, trace = TRUE, acc = 1e-13) # default is just 1e-4 ## and check the robustness weights: assert.EQ(c(0.0287933850640724, 0.0284930623638766, 0.950239140568007, 0.874115394740014), local({w <- rm1$w.r; w[ w != 1 ] }), tol = 1e-14) assert.EQ(coef(rm1), c("(Intercept)" = 1.41710946076738),tol = 1e-14) cm1 <- glm (y ~ 1, family = poisson, trace = TRUE) rmMT <- glmrob(y ~ 1, family = poisson, trace = TRUE, method="MT") (sMT <- summary(rmMT)) if(FALSE) # for manual digging: debug(robustbase:::glmrobMqle) allresid <- function(obj, types = c("deviance", "pearson", "working", "response")) { sapply(types, residuals, object = obj) } okFit <- function(obj, check.attr=FALSE, ...) { all.equal(obj$y, obj$fitted.values + residuals(obj, "response"), check.attributes=check.attr, ...) } ## check validity of several methods simultaneously: y. <- model.response(model.frame(rm1)) stopifnot(okFit(cm1), okFit(rm1), y. == y) alr.c <- allresid(cm1) alr.r <- allresid(rm1) ## MM --- just for now -- plot(resid(cm1), resid(rm1), asp=1); abline(0,1, col=2) plot(resid(cm1,type="pearson"), resid(rm1, type="pearson"), asp=1); abline(0,1, col=2) plot(resid(cm1,type="working"), resid(rm1, type="working"), asp=1); abline(0,1, col=2) ## leave away the outliers -- cm0 <- glm (y ~ 1, family = poisson, trace = TRUE, subset = -(1:2)) plot(resid(cm0), resid(rm1)[-(1:2)], asp=1); abline(0,1, col=2) plot(resid(cm0,type="pearson"), resid(rm1, type="pearson")[-(1:2)], asp=1); abline(0,1, col=2) plot(resid(cm0,type="working"), resid(rm1, type="working")[-(1:2)], asp=1); abline(0,1, col=2) plot(resid(cm0,type="response"), resid(rm1, type="response")[-(1:2)], asp=1); abline(0,1, col=2) ## Use weights (slow convergence !) w2 <- c(rep(1,8), rep(10,9)) rm2 <- glmrob(y ~ 1, family = poisson, trace = TRUE, weights = w2, maxit = 500, acc = 1e-10) # default is just 1e-4 ## slow convergence stopifnot(okFit(rm2)) ###>> 2 ------------------- family = binomial ----------------------------------- ## Using *factor* y ... x <- seq(0,5, length = 120) summary(px <- plogis(-5 + 2*x)) set.seed(7) (f <- factor(rbinom(length(x), 1, prob=px))) summary(m.c0 <- glm (f ~ x, family = binomial)) summary(m.r0 <- glmrob(f ~ x, family = binomial)) ## add outliers --- in y: f. <- f f.[i1 <- 2:3] <- 1 f.[i0 <- 110+c(1,7)] <- 0 m.c1 <- glm (f. ~ x, family = binomial) summary(m.r1 <- glmrob(f. ~ x, family = binomial)) ## hmm, not so robust? stopifnot(m.r1$w.r[c(i0,i1)] < 1/3, # well, at least down weighted ## and coefficients change less : (coef(m.r1) - coef(m.c0)) / (coef(m.c1) - coef(m.c0)) < 1) assert.EQ(c("(Intercept)" = -3.10817337603974, x = 1.31618564057790), coef(m.r1), tol= 1e-14, giveRE=TRUE) y <- as.numeric(as.character(f.)) m.r2 <- BYlogreg(x0=x, y=y, trace=TRUE, maxhalf= 10) m.r2A <- BYlogreg(x0=x, y=y, trace= 2 , maxhalf= 15) ## different.. but not so much: iB <- 1:5 assert.EQ(m.r2A[iB], m.r2[iB], tol = .003, giveRE=TRUE) assert.EQ(c("Intercept" = -2.9554950286, x = 1.2574679132), ## 32-bit{ada-5} -2.95549502890363 1.25746791332613 m.r2$coef, tol=8e-10, giveRE=TRUE)# seen 5.316e-10 for --disable-long-double assert.EQ( c(0.685919891749065, 0.256419206157062), ## 32-bit{ada-5}: ## 0.685919891858219, 0.256419206203016) m.r2$sterror, tol=4e-9)# seen 1.025e-9 for --disable-long-double data(foodstamp) str(foodstamp) ## Model with 'income' instead of log(income+1) is "interesting" ## because BYlogreg() needs maxhalf > 10 for convergence! m.fs0 <- glm (participation ~ ., family=binomial, data=foodstamp) m.fs0QL <- glmrob(participation ~ ., family=binomial, data=foodstamp) y.fs <- foodstamp[,"participation"] X.fs0 <- model.matrix(m.fs0) head(X.fs0) ## (former default) maxhalf = 10 leads to too early convergence: m.fsWBY. <- BYlogreg(x0=X.fs0, y=y.fs, addIntercept=FALSE, trace=TRUE, maxhalf=10) m.fs.BY. <- BYlogreg(x0=X.fs0, y=y.fs, initwml=FALSE, addIntercept=FALSE, trace=TRUE, maxhalf=10) m.fsWBY <- BYlogreg(x0=X.fs0, y=y.fs, addIntercept=FALSE, trace=TRUE, maxhalf=18) m.fs.BY <- BYlogreg(x0=X.fs0, y=y.fs, initwml=FALSE, addIntercept=FALSE, trace=TRUE, maxhalf=18) assert.EQ(m.fsWBY.[iB], m.fsWBY[iB], tol= 0.07)## almost 7% different assert.EQ(m.fs.BY.[iB], m.fs.BY[iB], tol= 0.08) foodSt <- within(foodstamp, { logInc <- log(1 + income) ; rm(income) }) m.fsML <- glm (participation ~ ., family=binomial, data=foodSt) m.fsQL <- glmrob(participation ~ ., family=binomial, data=foodSt) X.fs <- model.matrix(m.fsML) stopifnot(dim(X.fs) == c(150, 4)) # including intercept! try(## FIXME -- Mahalanobis fails with singular matrix, here: m.fsWBY <- BYlogreg(x0=X.fs, y=y.fs, addIntercept=FALSE, trace=TRUE, maxhalf=18) ) ## maxhalf=18 is too much --> no convergence (in 1000 steps) m.fs.BY <- BYlogreg(x0=X.fs, y=y.fs, initwml=FALSE, addIntercept=FALSE, trace=TRUE, maxhalf=18) signif( rbind(ML = coef(m.fsML), QL =coef(m.fsQL), WBY0=coef(m.fsWBY.), BY0=coef(m.fs.BY.), WBY =coef(m.fsWBY ), BY =coef(m.fs.BY) ) , 4) if(FALSE) { ## *scaling* of X ( ?? <==> ?? 'sigma1' ) ------------------ ## no "W" (Mahalanobis fail because of *singular* X): m.fs.BY100 <- BYlogreg(x0=100*X.fs, initwml=FALSE, y=y.fs, addIntercept=FALSE, trace=TRUE, maxhalf=18) ## ==> no convergence X1c <- cbind(1, 100*X.fs[,-1]) m.fsWBY1c <- BYlogreg(x0=X1c, y=y.fs, addIntercept=FALSE, trace=TRUE, maxhalf=18) ## ==> illegal singularity$kind }## not yet ###-------- Gamma ------------ ## Realistic "data" {from help(glmrob)}: mu <- c(122.131, 53.0979, 39.9039, 33.9232, 28.007, 24.923, 21.5747, 19.6971, 18.4516) ns.resid <- c(-0.0338228, 0.0923228, 0.0525284, 0.0317426, -0.035954, 0.00308925, -0.026637, -0.0353932, -0.0244761) Vmu <- c(14915.9, 2819.38, 1592.32, 1150.78, 784.39, 621.156, 465.467, 387.978, 340.462) Hp2 <- robustbase:::Huberprop2 ## Hp2. <- robustbase:::Huberprop2. ## was: phis <- 2^(-70:-1) -- but that was *not* reliable (on 32-bit e.g.) phis <- 2^(-42:-1) H1 <- sapply(phis, function(phi) Hp2(phi, ns.resid=ns.resid, mu=mu, Vmu=Vmu, tcc = 1.345)) ## H2 <- sapply(phis, function(phi) ## Hp2.(phi, ns.resid=ns.resid, mu=mu, Vmu=Vmu, tcc = 1.345)) dput(signif(H1)) H2 <- c(9.91741, 9.88674, 9.89438, 9.88674, 9.88961, 9.88961, 9.88961, 9.88984, 9.88973, 9.88964, 9.8897, 9.88975, 9.88976, 9.88975, 9.88974, 9.88974, 9.88974, 9.88974, 9.88974, 9.88974, 9.88974, 9.88974, 9.88975, 9.88975, 9.88975, 9.33161, 8.70618, 8.39347, 8.23714, 8.15902, 8.12006, 7.16275, 3.38703, -0.0879886, -2.3322, -4.16929, -5.26821, -5.80526, -6.04822, -6.11538, -6.02613, -5.66718) all.equal(H1,H2, tolerance = 0) # -> see 8.869e-7 stopifnot(all.equal(H1,H2, tolerance = 1e-5)) if(dev.interactive(TRUE)) # shows that phi < 1e-12 is doubtful matplot(phis, cbind(H1,H2), log="x", ylim = rrange(H1), type="o") robustbase/tests/MCD-specials.R0000644000176200001440000000141410377104102016107 0ustar liggesusers#### Test special cases for covMcd() library(robustbase) ### 1) p = 1 ---------------------------------------------------- set.seed(1) x <- c(rnorm(50),100, 1e10) (r1 <- covMcd(x)) str(r1) summary(r1) ## with alpha = 1 (r1.1 <- covMcd(x, alpha = 1)) str(r1.1) summary(r1.1) ### 1b) p = 1, constant scale (rc <- covMcd(rep(1,12))) str(rc) summary(rc) ## with alpha = 1 (rc1 <- covMcd(rep(1,12), alpha = 1)) str(rc1) summary(rc1) ### 2) constant observations { multivariate scale == 0 } ----------- (X <- matrix(rep(2*(1:4), 12), nrow = 12, byrow = TRUE)) (rC <- covMcd(X)) summary(rC) (rC1 <- covMcd(X, alpha = 1)) summary(rC1) ### 3) alpha = 1 : classical estimates --- for general cases -------- cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/lmrob-methods.R0000644000176200001440000000375213772570740016507 0ustar liggesusers### tests methods argument of lmrob.control library(robustbase) data(stackloss) ## S set.seed(0) summary(m0 <- lmrob(stack.loss ~ ., data = stackloss, method = "S", compute.outlier.stats = "S")) set.seed(0) m0a <- lmrob.S(m0$x, stack.loss, lmrob.control()) all.equal(m0[c('coefficients', 'scale', 'rweights')], m0a[c('coefficients', 'scale', 'rweights')]) ## MM set.seed(0) summary(m1 <- lmrob(stack.loss ~ ., data = stackloss, method = "MM", compute.outlier.stats = "S")) set.seed(0) m2 <- update(m1, method = "SM") all.equal(m1[c('coefficients', 'scale', 'cov')], m2[c('coefficients', 'scale', 'cov')]) set.seed(0) m3 <- update(m0, method = "SM", cov = '.vcov.w') ## SMD set.seed(0) summary(m4 <- lmrob(stack.loss ~ ., data = stackloss, method = "SMD", psi = 'bisquare', compute.outlier.stats = "S")) summary(m4a <- lmrob..D..fit(m3)) ## rearrange m4a and update call m4a <- m4a[names(m4)] class(m4a) <- class(m4) m4a$call <- m4$call all.equal(m4, m4a, check.environment = FALSE) ## SMDM set.seed(0) summary(m5 <- lmrob(stack.loss ~ ., data = stackloss, method = "SMDM", psi = 'bisquare', compute.outlier.stats = "S")) summary(m5a <- lmrob..M..fit(obj=m4)) ## rearrange m5a m5a <- m5a[names(m5)] class(m5a) <- class(m5) all.equal(m5, m5a, check.environment = FALSE) #-> 3 string mismatch ## Fast S large n strategy (sped up) model <- model.frame(LNOx ~ . ,data = NOxEmissions) control <- lmrob.control(fast.s.large.n = 10, n.group = 341, groups = 2) set.seed(0) try(ret <- lmrob.S(model.matrix(model, NOxEmissions)[1:682,], NOxEmissions$LNOx[1:682], control)) ## do what the error says control <- lmrob.control(fast.s.large.n = Inf) try(ret <- lmrob.S(model.matrix(model, NOxEmissions)[1:682,], NOxEmissions$LNOx[1:682], control)) ##-> Error ...... DGEEQU: column 30 of the design matrix is exactly zero. ## ## still fails, but this error is to be expected since only a part ## of the design matrix is given robustbase/tests/subsample.R0000644000176200001440000001276212271657124015722 0ustar liggesusers### test subsample ### LU decomposition and singular subsamples handling require(robustbase) source(system.file("xtraR/subsample-fns.R", package = "robustbase", mustWork=TRUE)) source(system.file("test-tools-1.R", package="Matrix", mustWork=TRUE)) require(Matrix) cat("doExtras:", doExtras <- robustbase:::doExtras(),"\n") showProc.time() A <- matrix(c(0.001, 1, 1, 2), 2) set.seed(11) str(sa <- tstSubsample(A)) A <- matrix(c(3, 2, 6, 17, 4, 18, 10, -2, 12), 3) tstSubsample(A) ## test some random matrix set.seed(1002) A <- matrix(rnorm(100), 10) tstSubsample(A) ## test singular matrix handling A <- matrix(c(1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1), 4, byrow=TRUE) tstSubsample(A) ## test subsample with mts > 0 data <- data.frame(y = rnorm(9), expand.grid(A = letters[1:3], B = letters[1:3])) x <- model.matrix(y ~ ., data) y <- data$y ## this should produce a warning and return status == 2 showSys.time(z <- Rsubsample(x, y, mts=2)) stopifnot(z$status == 2) ## test equilibration ## columns only X <- matrix(c(1e-7, 2, 1e-10, 0.2), 2) y <- 1:2 tstSubsample(t(X), y) ## rows only X <- matrix(c(1e-7, 2, 1e10, 0.2), 2) y <- 1:2 tstSubsample(X, y) ## both X <- matrix(c(1e-7, 1e10, 2, 2e12), 2) y <- 1:2 tstSubsample(X, y) showProc.time() ## test real data example data(possumDiv)## 151 * 9; the last two variables are factors with(possumDiv, table(eucalyptus, aspect)) mf <- model.frame(Diversity ~ .^2, possumDiv) X <- model.matrix(mf, possumDiv) y <- model.response(mf) stopifnot(qr(X)$rank == ncol(X)) ## this used to fail: different pivots in step 37 str(s1 <- tstSubsample(X, y)) s2 <- tstSubsample(X / max(abs(X)), y / max(abs(X))) s3 <- tstSubsample(X * 2^-50, y * 2^-50) ## all components *BUT* x, y, lu, Dr, Dc, rowequ, colequ : nm <- names(s1); nm <- nm[is.na(match(nm, c("x","y","lu", "Dr", "Dc", "rowequ", "colequ")))] stopifnot(all.equal(s1[nm], s2[nm], tolerance=1e-10), all.equal(s1[nm], s3[nm], tolerance=1e-10)) showProc.time() set.seed(10) nsing <- sum(replicate(if(doExtras) 200 else 20, tstSubsampleSing(X, y))) stopifnot(nsing == 0) showProc.time() ## test example with many categorical predictors set.seed(10) r1 <- lmrob(Diversity ~ .^2 , data = possumDiv, cov="none") ## lmrob.S used to fail for this seed: set.seed(108) r2 <- lmrob(Diversity ~ .^2 , data = possumDiv, cov="none") #, trace=4) showProc.time() ## investigate problematic subsample: idc <- 1 + c(140, 60, 12, 13, 89, 90, 118, 80, 17, 134, 59, 94, 36, 43, 46, 93, 107, 62, 57, 116, 11, 45, 35, 38, 120, 34, 29, 33, 147, 105, 115, 92, 61, 91, 104, 141, 138, 129, 130, 84, 119, 132, 6, 135, 112, 16, 67, 41, 102, 76, 111, 82, 148, 24, 131, 10, 96, 0, 87, 21, 127, 56, 124) rc <- lm(Diversity ~ .^2 , data = possumDiv, subset = idc) X <- model.matrix(rc) y <- possumDiv$Diversity[idc] tstSubsample(X, y)## have different pivots ... could not find non-singular lu <- LU.gaxpy(t(X)) stopifnot(lu$sing) zc <- Rsubsample(X, y) stopifnot(zc$status > 0) ## column 52 is linearly dependent and should have been discarded ## qr(t(X))$pivot image(as(round(zc$lu - (lu$L + lu$U - diag(nrow(lu$U))), 10), "Matrix")) image(as( sign(zc$lu) - sign(lu$L + lu$U - diag(nrow(lu$U))), "Matrix")) showProc.time() ## test equilibration ## colequ only X <- matrix(c(1e-7, 2, 1e-10, 0.2), 2) y <- 1:2 tstSubsample(t(X), y) ## rowequ only X <- matrix(c(1e-7, 2, 1e10, 0.2), 2) y <- 1:2 tstSubsample(X, y) ## both X <- matrix(c(1e-7, 1e10, 2, 2e12), 2) y <- 1:2 tstSubsample(X, y) showProc.time() ### real data, see MM's ~/R/MM/Pkg-ex/robustbase/hedlmrob.R ## close to singular cov(): attach(system.file("external", "d1k27.rda", package="robustbase", mustWork=TRUE)) fm1 <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k27) ## ^^^^^ gave error, earlier, now with a warning -- use ".vcov.w" ## --> cov = ".vcov.w" fm2 <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k27, cov = ".vcov.w", trace = TRUE) showProc.time()# 2.77 if(doExtras) {##----------------------------------------------------------------- ## Q: does it change to use numeric instead of binary factors ? ## A: not really .. d1k.n <- d1k27 d1k.n[-(1:5)] <- lapply(d1k27[,-(1:5)], as.numeric) fm1.n <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k.n) fm2.n <- lmrob(y ~ a + I(a^2) + tf + I(tf^2) + A + I(A^2) + . , data = d1k.n, cov = ".vcov.w", trace = 2) print(summary(weights(fm1, type="robustness"))) hist(weights(fm1, type="robustness"), main="robustness weights of fm1") rug(weights(fm1, type="robustness")) showProc.time()## 2.88 ## fmc <- lm (y ~ poly(a,2)-a + poly(tf, 2)-tf + poly(A, 2)-A + . , data = d1k27) print(summary(fmc)) ## -> has NA's for 'a, tf, A' --- bad that it did *not* work to remove them nform <- update(formula(fm1), ~ . +poly(A,2) -A -I(A^2) +poly(a,2) -a -I(a^2) +poly(tf,2) -tf -I(tf^2)) fm1. <- lmrob(nform, data = d1k27)# now w/o warning !? !! fm2. <- lmrob(nform, data = d1k27, cov = ".vcov.w", trace = TRUE) ## now lmrob takes care of NA coefficients automatically print(lmrob(y ~ poly(a,2)-a + poly(tf, 2)-tf + poly(A, 2)-A + . , data = d1k27)) showProc.time() ## 4.24 } ## only if(doExtras) ##-------------------------------------------------------- ## test exact fit property set.seed(20) data <- data.frame(y=c(rep.int(0, 20), rnorm(5)), group=rep(letters[1:5], each=5)) x <- model.matrix(y ~ group, data) lmrob.S(x, data$y, lmrob.control()) (ret <- lmrob(y ~ group, data)) summary(ret) showProc.time() robustbase/tests/Rsquared.Rout.save0000644000176200001440000002436613326544553017210 0ustar liggesusers R version 3.5.1 (2018-07-02) -- "Feather Spray" Copyright (C) 2018 The R Foundation for Statistical Computing Platform: x86_64-pc-linux-gnu (64-bit) R is free software and comes with ABSOLUTELY NO WARRANTY. You are welcome to redistribute it under certain conditions. Type 'license()' or 'licence()' for distribution details. R is a collaborative project with many contributors. Type 'contributors()' for more information and 'citation()' on how to cite R or R packages in publications. Type 'demo()' for some demos, 'help()' for on-line help, or 'help.start()' for an HTML browser interface to help. Type 'q()' to quit R. > require(robustbase) Loading required package: robustbase > > set.seed(17)# reproducibility! > ## to check: > ## - for the empty model > summary(lmrob(Y ~ 0, coleman)) Call: lmrob(formula = Y ~ 0, data = coleman) \--> method = "" Residuals: Min 1Q Median 3Q Max 22.70 32.78 35.86 39.95 43.10 No Coefficients > ## - with and without an intercept in the model > summary(lmrob(Y ~ 1, coleman)) Call: lmrob(formula = Y ~ 1, data = coleman) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -12.8605 -2.7855 0.2945 4.3895 7.5395 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 35.560 1.342 26.5 <2e-16 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 5.48 Convergence in 9 IRWLS iterations Robustness weights: one weight is ~= 1. The remaining 19 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.5611 0.8986 0.9553 0.9044 0.9918 0.9987 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 5.000e-03 eps.x warn.limit.reject warn.limit.meanrw 1.819e-12 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > writeLines(sfm <- capture.output( + summary(lmrob(Y ~ ., coleman)))) # and this must be "identical": Call: lmrob(formula = Y ~ ., data = coleman) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -4.16181 -0.39226 0.01611 0.55619 7.22766 Coefficients: Estimate Std. Error t value Pr(>|t|) (Intercept) 30.50232 6.71260 4.544 0.000459 *** salaryP -1.66615 0.43129 -3.863 0.001722 ** fatherWc 0.08425 0.01467 5.741 5.10e-05 *** sstatus 0.66774 0.03385 19.726 1.30e-11 *** teacherSc 1.16778 0.10983 10.632 4.35e-08 *** motherLev -4.13657 0.92084 -4.492 0.000507 *** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 1.134 Multiple R-squared: 0.9814, Adjusted R-squared: 0.9747 Convergence in 11 IRWLS iterations Robustness weights: observation 18 is an outlier with |weight| = 0 ( < 0.005); The remaining 19 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.1491 0.9412 0.9847 0.9279 0.9947 0.9982 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 5.000e-03 eps.x warn.limit.reject warn.limit.meanrw 1.569e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > sfm2 <- capture.output(summary(lmrob(Y ~ ., coleman, model=FALSE, x=FALSE, y=FALSE))) > iCall <- grep("lmrob.*coleman", sfm)# the only line that differs > stopifnot(sfm[-iCall] == sfm2[-iCall]) > ## w/o intercept: > summary(lmrob(Y ~ . - 1, coleman, model=FALSE, x=FALSE, y=FALSE)) Call: lmrob(formula = Y ~ . - 1, data = coleman, model = FALSE, x = FALSE, y = FALSE) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -4.86146 -0.59195 -0.04679 0.87826 5.40639 Coefficients: Estimate Std. Error t value Pr(>|t|) salaryP -1.97540 0.45262 -4.364 0.000555 *** fatherWc 0.03388 0.02220 1.526 0.147749 sstatus 0.55922 0.07590 7.367 2.34e-06 *** teacherSc 1.60446 0.19039 8.427 4.51e-07 *** motherLev -0.48903 0.90805 -0.539 0.598097 --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 1.344 Multiple R-squared: 0.9987, Adjusted R-squared: 0.9983 Convergence in 14 IRWLS iterations Robustness weights: 3 weights are ~= 1. The remaining 17 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.06899 0.89030 0.95860 0.82750 0.98700 0.99820 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 5.000e-03 eps.x warn.limit.reject warn.limit.meanrw 1.569e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > > ## - when prior-weights are included > wts <- c(rep(0.05, 10), rep(2, 10)) > summary(lmrob(Y ~ . - 1, coleman, model=FALSE, x=FALSE, y=FALSE, + weights = wts)) Call: lmrob(formula = Y ~ . - 1, data = coleman, weights = wts, model = FALSE, x = FALSE, y = FALSE) \--> method = "MM" Residuals: Min 1Q Median 3Q Max -4.53960 -0.03032 0.10984 1.55271 6.36480 Coefficients: Estimate Std. Error t value Pr(>|t|) salaryP -2.68644 0.05871 -45.761 < 2e-16 *** fatherWc 0.04761 0.00721 6.603 8.39e-06 *** sstatus 0.58362 0.00314 185.842 < 2e-16 *** teacherSc 1.77115 0.07918 22.369 6.20e-13 *** motherLev -1.03171 0.34154 -3.021 0.0086 ** --- Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1 Robust residual standard error: 0.423 Multiple R-squared: 0.9985, Adjusted R-squared: 0.998 Convergence in 5 IRWLS iterations Robustness weights: 3 observations c(12,16,18) are outliers with |weight| = 0 ( < 0.005); 5 weights are ~= 1. The remaining 12 ones are summarized as Min. 1st Qu. Median Mean 3rd Qu. Max. 0.5441 0.9257 0.9833 0.9303 0.9956 0.9985 Algorithmic parameters: tuning.chi bb tuning.psi refine.tol 1.548e+00 5.000e-01 4.685e+00 1.000e-07 rel.tol scale.tol solve.tol eps.outlier 1.000e-07 1.000e-10 1.000e-07 5.000e-03 eps.x warn.limit.reject warn.limit.meanrw 2.219e-10 5.000e-01 5.000e-01 nResample max.it best.r.s k.fast.s k.max 500 50 2 1 200 maxit.scale trace.lev mts compute.rd fast.s.large.n 200 0 1000 0 2000 psi subsampling cov "bisquare" "nonsingular" ".vcov.avar1" compute.outlier.stats "SM" seed : int(0) > ## - should work for object with NA in the coefficients, and > ## - should work for object with NA in the observations --> both in ./NAcoef.R > > ## check equality with lm() for classical model > test <- function(formula, data, + items=c("coefficients", "residuals", "df", "scale", + "r.squared", "adj.r.squared", "weights"), + tol = 1e-4, ...) + { + lmrCtrl <- lmrob.control(psi = "hampel", tuning.psi = c(1000, 2000, 3000), + method="SMDM", ...) + sc <- summary(lm (formula, data)) + sr <- summary(lmrob(formula, data, control= lmrCtrl)) + names(sc)[names(sc) == "sigma"] <- "scale" + if(sc$df[1] == 0 && getRversion() <= "3.5.1" && as.numeric(R.version$`svn rev`) < 74993) + ## in the past, lm() returned logical empty matrix + storage.mode(sc$coefficients) <- "double" + ret <- all.equal(sc[items], sr[items], tolerance=tol) + if (!isTRUE(ret)) { + print(sr) + for (i in seq_along(items)) { + print(sc[items[i]]) + print(sr[items[i]]) + } + print(ret) + stop(sprintf("all.equal(sc[items], sr[items], tol.. = %g) are not all TRUE", + tol)) + } + ret + } > > set.seed(101) > > test(Y ~ 0, coleman, c("residuals", "df", "coefficients", + "r.squared", "adj.r.squared"), tol=1e-10) [1] TRUE > test(Y ~ 1, coleman, tol = 2e-4) [1] TRUE > test(Y ~ ., coleman, tol = 4e-4) [1] TRUE > test(Y ~ . - 1, coleman, tol = 4e-4) [1] TRUE > > > proc.time() user system elapsed 0.277 0.039 0.311 robustbase/tests/huber-etc.R0000644000176200001440000000230710377104055015572 0ustar liggesuserslibrary(robustbase) ### Test sets (all kinds odd/even, constant/regular/outlier) ## n = 0,1,2,3 : x0 <- numeric(0) x1 <- 3 x2 <- 1:2 x3 <- c(1:2,10) ## constant (0 mad) + 0--2 outliers xC <- rep(1, 12) xC. <- rep(1, 11) xC1 <- c(xC, 10) xC1. <- c(xC., 10) xC2 <- c(xC1, 100) xC2. <- c(xC1., 100) ## "uniform" + 0--2 outliers y <- 1:10 y. <- 1:11 y1 <- c(y, 100) y1. <- c(y., 100) y2 <- c(y1, 1000) y2. <- c(y1., 1000) nms <- ls(pat="^[xy]"); nms; names(nms) <- nms lx <- lapply(nms, function(n) { x <- get(n) m <- mad(x) hx <- if(!is.na(m) && m > 0) MASS::huber(x) else list(m=NA, s=NA) hMx <- huberM(x) list(loc = c(median = median(x), huber = hx$m, huberM = hMx$m), scale= c(mad = m, huber = hx$s, huberM = hMx$s)) }) r <- list(mu = sapply(lx, function(x) x$loc), s = sapply(lx, function(x) x$scale)) r cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/psi-rho-etc.R0000644000176200001440000001464712271657124016065 0ustar liggesusersrequire(robustbase) ## see also ./lmrob-psifns.R <<<<<<<< source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) EQ <- function(x,y) all.equal(x,y, tolerance = 1e-13) ## Demonstrate that one of tukeyChi() / tukeyPsi1() is superfluous x <- seq(-4,4, length=201) suppressWarnings(## as tukeyPsi1(), tukeyChi() are deprecated for(c. in c(0.1, 1:2, pi, 100)) { ix <- abs(x) != c. stopifnot(EQ(tukeyChi(x, c.), 6/c.^2* tukeyPsi1(x, c., deriv=-1)), EQ(tukeyChi(x, c., deriv= 1), 6/c.^2* tukeyPsi1(x, c., deriv= 0)), EQ(tukeyChi(x, c., deriv= 2), 6/c.^2* tukeyPsi1(x, c., deriv= 1)), ## Now show equivalence with Mpsi(): EQ(tukeyPsi1(x, c.), Mpsi(x, c., "tukey")), EQ(tukeyPsi1(x, c., d=1), Mpsi(x, c., "tukey", d=1)), EQ(tukeyPsi1(x[ix], c., d=2), Mpsi(x[ix], c., "tukey", d=2)) ) } ) ## Test if default arguments are used h2Psi <- chgDefaults(huberPsi, k = 2) x <- 1:10 stopifnot(h2Psi@ rho(x, k=2) == h2Psi@ rho(x), h2Psi@ psi(x, k=2) == h2Psi@ psi(x), h2Psi@Dpsi(x, k=2) == h2Psi@Dpsi(x), h2Psi@ wgt(x, k=2) == h2Psi@ wgt(x), h2Psi@Dwgt(x, k=2) == h2Psi@Dwgt(x)) ## Test default arguments for E... slots stopifnot(EQ(h2Psi@Erho (), 0.49423127328548), EQ(h2Psi@Epsi2(), 0.920536925636323), EQ(h2Psi@EDpsi(), 0.954499736103642)) stopifnot(EQ(1, huberPsi@psi(1, k = 1e16)), huberPsi@wgt(0.1591319494080224, 0.5 + 1/13) <= 1) ## both used to fail because of numeric instability in pmin2/pmax2 f1 <- function(.) rep.int(1, length(.)) F1 <- function(x, .) rep.int(1, length(x)) ## correct "classical psi": cPs <- psiFunc(rho = function(x,.) x^2 / 2, psi = function(x, .) x, wgt = F1, Dpsi = F1, Erho = function(.) rep.int(1/2, length(.)), Epsi2 = f1, EDpsi = f1, . = Inf) validObject(cPs); cPs ## incorrect dummy psi cP <- psiFunc(rho = F1, psi = F1, wgt = F1, Dpsi = F1, Erho = f1, Epsi2 = f1, EDpsi = f1, . = Inf) cP ## Check the autogenerated Dwgt(): x <- seq(0,2, by=1/4) stopifnot(## strict symmetry { including Dwgt(0) == 0 } : huberPsi @Dwgt(-x) == -huberPsi @Dwgt(x), hampelPsi@Dwgt(-x) == -hampelPsi@Dwgt(x), huberPsi @Dwgt(x)[x < 1.345] == 0, hampelPsi@Dwgt(x)[x < 1.487] == 0, EQ(huberPsi @Dwgt(x[x >= 1.5]), c(-0.597777777777778, -0.439183673469388, -0.33625)), EQ(hampelPsi@Dwgt(x[x >= 1.5]), c(-0.660883932259397, -0.485547378802822, -0.371747211895911)) ) .defDwgt <- robustbase:::.defDwgt (ddd <- .defDwgt(psi = function(u, k) pmin.int(k, pmax.int(-k, u)), Dpsi = function(u, k) abs(u) <= k)) stopifnot(is.function(ddd), names(formals(ddd)) == c("u","k"), EQ(ddd(x, 1.345), huberPsi@Dwgt(x))) ## TODO: Provide some functionality of this as a Plot+Check function ## ---- and then call the function for all our psiFunc objects (with different 'k') kk <- c(1.5, 3, 8) psiH.38 <- chgDefaults(hampelPsi, k = kk) c1 <- curve(psiH.38@psi(x), -10, 10, n=512, col=2) abline(h=0, v=0, lty=3, lwd=.5, col="gray25") c2 <- curve(x * psiH.38@wgt(x), add=TRUE, n=512, col=adjustcolor("blue", .5), lwd=2) title("psi_Hampel_(1.5, 3, 8) : psi(x) = x * wgt(x)") axis(1, at=kk, expression(k[1], k[2], k[3]), pos=0) axis(2, at=kk[1], quote(k[1]), pos=0, las=1) stopifnot(all.equal(c1,c2, tolerance= 1e-15)) r1 <- curve(psiH.38@rho(x), -10, 10, col=2, main = quote(rho(x) == integral(phi(t) * dt, 0, x))) axis(1, at=kk, expression(k[1], k[2], k[3]), pos=0) curve(psiH.38@psi(x), add=TRUE, n=512, col=adjustcolor("blue", .5), lwd=2) abline(h=0, v=0, lty=3, lwd=.5, col="gray25") ## check rho(x) = \int_0^x psi(x) dx {slightly *more* than rho' = psi !} rhoH.38.int <- function(x) integrate(function(u) psiH.38@psi(u), 0, x, rel.tol=1e-10)$value r2 <- curve(sapply(x, rhoH.38.int), add = TRUE, lwd=4, col=adjustcolor("red", 1/4)) ## numerical integration == "formula" : stopifnot(all.equal(r1,r2, tolerance=1e-10)) curve(psiH.38@Dpsi(x), -10, 10, n=512, col=2, main = quote(psi*minute(x))) abline(h=0, v=0, lty=3, lwd=.5, col="gray25") ## check rho'(x) = phi(x) etc {TODO: for all our psiFun.} head(xx <- seq(-10, 10, length=1024)) FrhoH.38 <- splinefun(xx, rho.x <- psiH.38@rho (xx)) FpsiH.38 <- splinefun(xx, psi.x <- psiH.38@psi (xx)) F1psH.38 <- splinefun(xx, Dps.x <- psiH.38@Dpsi(xx)) curve(FpsiH.38(x, deriv=1), -10,10, n=512) curve(F1psH.38, add=TRUE, col=4, n=512) stopifnot(all.equal(FpsiH.38(xx, deriv=1), Dps.x, tolerance = 0.02))# not better because of discontinuities curve(FrhoH.38(x, deriv=1), -10,10, n=512) curve(FpsiH.38, add=TRUE, col=4, n=512) stopifnot(all.equal(FrhoH.38(xx, deriv=1), psi.x, tolerance = 1e-4)) E.norm <- function(FUN, tol=1e-12, ...) { integrate(function(x) FUN(x) * dnorm(x), -Inf, Inf, rel.tol=tol, ...)$value } ##' asymptotic efficiency -- both integrate + "formula"(@Epsi, @EDpsi) version aeff.P <- function(psiF, k, ...) { stopifnot(is(psiF, "psi_func")) if(!missing(k)) psiF <- chgDefaults(psiF, k = k) ## E[ psi'(X) ] ^2 / E[ psi(X) ^ 2 ] : c(int = E.norm(psiF@Dpsi, ...)^2 / E.norm(function(x) psiF@psi(x)^2, ...), form= psiF@EDpsi()^2 / psiF@Epsi2()) } ## Breakdown Point --- for redescenders only, ## both integrate + "formula"(@Erho) version bp.P <- function(psiF, k, ...) { stopifnot(is(psiF, "psi_func")) if(!missing(k)) psiF <- chgDefaults(psiF, k = k) if(!is.finite( rhoInf <- psiF@rho(Inf) )) stop("rho(Inf) is not finite: ", rhoInf) integ <- function(x) psiF@rho(x) c(int = E.norm(integ, ...), form= psiF@Erho()) / rhoInf } ## Print & Check the result of aeff.P() or bp.P() chkP <- function(rp, tol = 1e-9) { print(rp) ae <- all.equal(rp[[1]], rp[[2]], tolerance=tol) if(isTRUE(ae)) invisible(rp) else stop(ae) } chkP(aeff.P(huberPsi)) chkP(aeff.P(huberPsi, k = 1.5)) chkP(aeff.P(huberPsi, k = 2)) chkP(aeff.P(huberPsi, k = 2.5)) chkP(aeff.P(hampelPsi)) chkP(aeff.P(hampelPsi, k = c(1.5, 3, 8))) chkP(aeff.P(hampelPsi, k = c(2, 4, 8), tol=1e-10),# fails with tol=1e-11 tol = 1e-4) ## Now works too: chkP(bp.P(hampelPsi)) chkP(bp.P(hampelPsi, k = c(1.5, 3, 8))) chkP(bp.P(hampelPsi, k = c(2, 4, 8))) ## test derivatives (adapted from ./lmrob-psifns.R) head(x. <- seq(-5, 10, length=1501)) ## [separate lines, for interactive "play": ] stopifnot(chkPsiDeriv(plot(huberPsi, x.))) ## ToDo: improve accuracy of derivative check stopifnot(chkPsiDeriv(plot(hampelPsi, x.), tol=c(1e-4, 1e-1))) robustbase/tests/Qn-Sn-plots.R0000644000176200001440000000524114055507253016013 0ustar liggesuserslibrary(robustbase) ### Back-compatibility check of consistency & finite sample correction for Qn() : set.seed(153) x <- sort(c(rnorm(80), rt(20, df = 1))) ix <- c(27, 57, 15, 1, 26, 13, 23, 70, 9, 54, 6, 12, 8, 80, 11, 69, 41, 53, 10, 37, 50, 21, 48, 51, 71, 17, 30, 16, 45, 28, 55, 5, 59, 77, 61, 40, 63, 42, 72, 66) QnA. <- c(0, 0.72307295, 1.2926498, 1.596857, 1.0979815, 0.84209457, 1.0719335, 0.88620416, 1.0905118, 0.99056842, 1.2229216, 1.0626517, 1.1738174, 1.1433873, 1.2071829, 1.1562513, 1.2182886, 1.1587793, 1.1585524, 1.1555462, 1.1376428, 1.0532134, 1.0447343, 1.0200998, 1.0495224, 1.0120569, 1.0094172, 0.9749928, 0.9530458, 0.92767184, 0.90922667, 0.98987601, 0.98223857, 1.0053697, 0.98792848, 0.951908, 0.92226488, 0.92312857, 0.92313406, 0.92733413) QnAx <- sapply(seq_along(ix), function(n) Qn(x[ix[1:n]])) stopifnot( all.equal(QnA., QnAx) ) ### -------------- Plots ----------------------------------------------- if(!dev.interactive(orNone=TRUE)) pdf("Qn-Sn-plots.pdf") plot(QnA., type="o", main="Qn()") abline(h = 1, lty=2) n <- 1:50 (qnn <- sapply(n, function(n)Qn(1:n, const=1))) plot(n, qnn, type = 'b', col = 2, ylab = "Qn", main = "Qn(1:n) [unscaled]") (snn <- sapply(n, function(n)Sn(1:n, const=1))) plot(n, snn, type = 'b', col = 2, ylab = "Sn", main = "Sn(1:n) [unscaled]") matplot(n, cbind(qnn, snn), type = 'b', ylab = "Qn & Sn", main = "Qn(1:n) & Sn(1:n) [unscaled]") legend("topleft", c("Qn", "Sn"), col=1:2, lty=1:2, bty="n", pch=paste(1:2)) (sdn <- c(1, sapply(n[-1], function(n)sd(1:n)/n))) ## sd(1) => NA for(Sample in c(function(n) ppoints(n), function(n) qnorm(ppoints(n)))) { ##mult.fig(2) : op <- par(mfrow=c(2,1), mgp = c(1.5,.6,0), mar = .1 + c(4,4,2,1)) for(N in c(50, 200)) { n <- 1:N sdn <- c(1, sapply(n[-1], function(m)sd(Sample(m)))) r <- cbind(Qn = sapply(n, function(m)Qn(Sample(m))), Sn = sapply(n, function(m)Sn(Sample(m)))) / sdn matplot(n, r, type = 'b', col = 2:3, lty = 1, ylab = "Qn & Sn", main = "Qn(Sample(n)) & Sn(..) [consistently scaled]") legend(.85*N, 0.4, c("Qn()", "Sn()"), col = 2:3, lty = 1, pch = paste(1:2)) abline(h=1, col = "gray", lty = 2) } par(op) ## Hmm, the above does not look 100% consistent to *my* eyes... ## Investigate: matplot(n, r, ylim = c(0.9, 1.1), type = 'b', col = 2:3, lty = 1) abline(h=1, col = "gray", lty = 2) matplot(n, r^2, ylim = c(0.7, 1.3), type = 'b', col = 2:3, lty = 1) abline(h=1, col = "gray", lty = 2) } rownames(r) <- paste(n) r robustbase/tests/LTS-specials.R0000644000176200001440000000351113774561564016175 0ustar liggesusers#### Test special cases for ltsReg() library(robustbase) ## Platform - and other such info -- so we find it in old saved outputs .libPaths() SysI <- Sys.info() structure(Sys.info()[c(4,5,1:3)], class="simple.list") library(lib.loc = .libPaths()[1]) # the "R CMD check specific tmp-library" sessionInfo() c(robustbase = packageDescription("robustbase")$Built, DEoptimR = packageDescription("DEoptimR")$Built) if(SysI[["sysname"]] == "Linux" && require("sfsmisc")) local({ nn <- names(.Sc <- sfsmisc::Sys.cpuinfo()) nn <- names(.Sc <- .Sc[nn != "flags"]) print(.Sc[grep("\\.[0-9]$", nn, invert=TRUE)]) }) ### 1) p = 1 ---------------------------------------------------- set.seed(1) x <- c(rnorm(50),100, 1e10) (r1 <- ltsReg(x ~ 1)) # failed in Valentin's 1.0-3 (pre-version) summary(r1) (r1. <- ltsReg(y = x)) i1 <- 15:17; ii <- (1:20)[-i1] UN <- function(lis) lapply(lis, unname) dimnames(r1.$X)[1] <- dimnames(r1$X)[1] stopifnot(all.equal( r1[ii], r1.[ii], tolerance= 1e-15), all.equal(UN(r1[i1]), UN(r1.[i1]), tolerance= 1e-15)) ## intercept=FALSE, p > 1 -- coefficients were switched once n <- 100; theta <- c(x=10, x2=40) set.seed(7) X <- cbind(x = rt(n, 4), x2 = rnorm(n)) dat <- data.frame(X, y = X %*% theta + rt(n, df=3)/10) summary(M <- ltsReg(y ~ . -1, data = dat)) stopifnot(all.equal(coef(M), theta, tolerance = 1e-3)) ## with alpha = 1 (r1.1 <- ltsReg(x ~ 1, alpha = 1)) summary(r1.1) ### 1b) p = 1, constant scale (rc <- ltsReg(y = rep(1,12))) str(rc) summary(rc) ## with alpha = 1 (rc1 <- ltsReg(y = rep(1,12), alpha = 1)) summary(rc1) stopifnot(residuals(rc) == 0, all.equal(unname(coef(rc )), 1), residuals(rc1) == 0, all.equal(unname(coef(rc1)), 1)) ### 2) alpha = 1 : classical estimates --- for general cases -------- cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/large-values.R0000644000176200001440000000530514000141110016260 0ustar liggesusers### Have had cases where differences between large numbers lose precision, or even give Inf, ### which lead to NA require(robustbase) stopifnot(exprs = { all.equal(scaleTau2(c(-4:4, 10000), consistency=FALSE), (scaleTau2(c(-4:4, 1e300), consistency=FALSE) -> sT), # <- gave NaN, now fine ! tol = 1e-15) # even 0 (exact equality; Linux 64b) all.equal(3.41103800034854, sT, tol = 1e-14) # seen 6.5e-16 }) mkMx <- function(M, ngood = 10, left = floor(ngood/3)) { stopifnot(is.numeric(ngood), ngood >= 3, is.numeric(M), length(M) == 1L, M >= 1000, is.numeric(left), 0 <= left, left <= ngood) right <- ngood-left res <- list( c(rep(-M, left), seq_len(ngood - 1L), rep(M, right)) # < 50% "good" , c(rep(-M, left), seq_len(ngood ), rep(M, right)) # half "good" , c(rep(-M, left), seq_len(ngood + 1L), rep(M, right)) # > 50% "good" ) nM <- gsub("[-+]", "", formatC(M, digits=2, width=1)) names(res) <- paste0("M", nM,"_n", c("m1", "eq", "p1")) res } exL <- c( list( xNA = c(NA, 1:6) , xMe9 = c(-4:4, 1e9) , xM = c(-4:4, .Machine$double.xmax) , xI = c(-4:4, Inf) , IxI = c(-Inf, -4:4, Inf) , IxI2 = c(-Inf, -4:4, Inf,Inf)) ## , mkMx(M = .Machine$double.xmax) , mkMx(M = 1e6) , mkMx(M = 1e9) , mkMx(M = 1e12) , mkMx(M = 1e14) , mkMx(M = 1e16) , mkMx(M = 1e20) , mkMx(M = 1e40) ) madL <- vapply(exL, mad, pi) ## Initially, scaleTau2() "works" but gives NaN everywhere -- now fine! sT2.L <- vapply(exL, scaleTau2, FUN.VALUE=1, sigma0 = 1, consistency=FALSE) sT2.i2.L <- vapply(exL, scaleTau2, FUN.VALUE=1, sigma0 = 1, consistency=FALSE, iter = 2) sT2.i5.L <- vapply(exL, scaleTau2, FUN.VALUE=1, sigma0 = 1, consistency=FALSE, iter = 5) cbind(madL, sT2.L) stopifnot(exprs = { is.na(madL [1]) is.na(sT2.L[1]) 2.3 < sT2.L[-1] sT2.L[-1] < 2.71 }) xI <- exL$xI stopifnot(exprs = { mad(exL$xI, constant = 1) == 2.5 }) ## FIXME: should not give NaN : scaleTau2(xI) ## FIXME: even give Error in ..... : NA/NaN/Inf in foreign function call (arg 1) try( Sn(xI) ) try( Qn(xI) ) ## From example(mc) {by MM} : ## Susceptibility of the current algorithm to large outliers : dX10 <- function(X) c(1:5,7,10,15,25, X) # generate skewed size-10 with 'X' (Xs <- c(10,20,30, 60, 10^(2:10), 1000^(4:19), 1e6^c(10:20,10*(3:5)), Inf)) (mc10x <- vapply(Xs, function(x) mc(dX10(x)), 1)) plot(Xs, mc10x, type="b", main = "mc( c(1:5,7,10,15,25, X) )", xlab="X", log="x") ##--- FIXME: the above must change! ## so, Inf does work, indeed for mc() dX10(Inf) set.seed(2020-12-04) stopifnot(exprs = { is.finite(mc(dX10(Inf))) # 0.5 currently mc(c(-Inf, rlnorm(100), Inf)) == 0 }) robustbase/tests/lmrob-ex12.R0000644000176200001440000001637114052244671015615 0ustar liggesusers library(robustbase) source(system.file("test-tools-1.R", package="Matrix", mustWork=TRUE)) ##-> assertError(), etc set.seed(1) # since now .Random.seed is used by default! ## EX 1 data(coleman) ## "empty model" (not really a lot of sense) (m0 <- lmrob(Y ~ 0, data = coleman)) summary(m0) stopifnot(is.numeric(coef(m0)), length(coef(m0)) == 0, residuals(m0) == coleman[,"Y"]) ## "Intercept" only: robust mean (m1 <- lmrob(Y ~ 1, data = coleman)) summary(m1) stopifnot(all.equal(coef(m1), c("(Intercept)" = 35.56048875388), tol = 1e-11)) (mC <- lmrob(Y ~ ., data = coleman, control = lmrob.control(refine.tol = 1e-8, rel.tol = 1e-9))) summary(mC) ## Values will change once we use R's random number generator ! stopifnot( all.equal(unname(coef(mC)), c(30.50232, -1.666147, 0.08425381, 0.6677366, 1.167777, -4.136569), tolerance = 2e-7)# 6.112 e-8 (32-b) ) dput(signif(unname(coef(mC)), 7)) ## 64b(0.2-0): c(30.50232, -1.666147, 0.08425381, 0.6677366, 1.167777, -4.136569) ## 32b(0.2-0): "exactly" same ! ## Full precision: dput(unname(coef(mC))) ## 2012-06-04: ## 32-bit:c(30.5023184450149, -1.66614687548007, 0.0842538074792178, 0.667736590070332, 1.16777744029117, -4.13656885405815) ## 64-bit:c(30.5023184450148, -1.66614687548008, 0.0842538074792178, 0.667736590070332, 1.16777744029117, -4.13656885405814) ## ## 32-bit:c(30.5023183940104, -1.66614687550933, 0.0842538074635567, 0.667736589938547, 1.16777744089398, -4.13656884777543) ## 64-bit:c(30.5023184150851, -1.66614687537736, 0.0842538074722959, 0.667736589980183, 1.16777744061092, -4.1365688503035) str(mC) ## EX 2 gen <- function(n,p, n0, y0, x0, beta = rep(1, p)) { stopifnot(n >= 1, p >= 1, n0 >= 0, length(beta) == p) x <- matrix(rnorm(n*p),n,p) # iid x's y <- x %*% beta + rnorm(n) xc <- matrix(0,n0,p) xc[,1] <- x0 xc <- xc + 0.1*matrix(rnorm(n0*p),n0,p) x[1:n0,] <- xc y[1:n0] <- y0 + .001*rnorm(n0) list(x=x, y=y) } ## generate --a sample of n observations with p variables ## and 10% of outliers near (x1,y) = (10,10) n <- 500 ; n0 <- n %/% 10 p <- 7 ## p = 20 is more impressive but too slow for "standard test" set.seed(17) a <- gen(n=n, p=p, n0= n0, y0=10, x0=10) plot(a$x[,1], a$y, col = c(rep(2, n0), rep(1, n-n0))) system.time( m1 <- lmrob(y~x, data = a, control = lmrob.control(compute.rd = TRUE, trace.lev=4))) plot(m1, ask=FALSE) ##-> currently 5 plots; MM:I don't like #3 (Response vs fitted) S1 <- m1$init.S resS1 <- drop(a$y - model.matrix(m1, data=a) %*% coef(S1)) all.equal(S1$residuals, resS1)## hmm, but still close ## "Mean relative difference: 2.655326e-07" ctr.t3 <- lmrob.control(trace.lev = 3) (mS <- lmrob.S(x=a$x, y=residuals(S1), only.scale=TRUE, control = ctr.t3)) all.equal(S1$scale, mS) ## "Mean relative difference: 0.003015849" -- too different, why? (mS. <- lmrob.S(x=a$x, y=resS1, only.scale=TRUE, control = ctr.t3)) all.equal(mS, mS., tol=0)# 2.401 e -10 -- ok/perfect stopifnot(all.equal(mS, mS.), all.equal(mS, S1$scale, tol = 0.008)) # at least that ## don't compute robust distances --> faster by factor of two: system.time(m2 <- lmrob(y~x, data = a, control = lmrob.control(compute.rd = FALSE))) ## ==> half of the CPU time is spent in covMcd()! (sm2 <- summary(m2)) l1 <- lm(y~x, data = a) cbind(robust = coef(sm2)[,1:2], lm = coef(summary(l1))[,1:2]) m2.S1 <- with(a, lmrob.S(cbind(1,x), y, trace.lev = 2, ## trace.lev = 2 : quite a bit of output control= lmrob.control(seed = .Random.seed, nRes = 80, k.max = 20, refine.tol = 1e-4))) S.ctrl <- lmrob.control(seed = .Random.seed,## << keeps .Random.seed unchanged nResample = 1000, best.r.s = 15, refine.tol = 1e-9) m2.S <- with(a, lmrob.S(cbind(1,x), y, control = S.ctrl, trace.lev = 1)) str(m2.S) ##--- Now use n > 2000 --> so we use C internal fast_s_large_n(...) n <- 2500 ; n0 <- n %/% 10 a2 <- gen(n=n, p = 3, n0= n0, y0=10, x0=10) plot(a2$x[,1], a2$y, col = c(rep(2, n0), rep(1, n-n0))) rs <- .Random.seed system.time( m3 <- lmrob(y~x, data = a2) ) m3 nrs <- .Random.seed # <-- to check that using 'seed' keeps .Random.seed system.time( m4 <- lmrob(y~x, data = a2, seed = rs, compute.rd = FALSE)) (sm4 <- summary(m4)) ## random seed must be the same because we used 'seed = *' : stopifnot(nrs == .Random.seed, identical(coef(m3), coef(m4))) dput(signif(cf <- unname(coef(m3)), 7)) ## 2012-06-04:c(-0.05108914, 1.005971, 1.003201, 0.9833263) - 32 AND 64 bit ## ## 0.2-0: c(0.007446546, 1.000712, 1.027921, 0.9896527) ## 0.2-1: c(0.03148659, 0.9980933, 1.016364, 1.03243) ## both for 32 and 64 bit dput(signif(100 * (sd <- unname(coef(sm4)[, "Std. Error"])), 7)) ## 2012-06-04:c(2.213815, 0.2864678, 2.202318, 2.180886) - 32 AND 64 bit ## ## 0.2-0: c(2.219388, 0.274644, 2.196982, 2.26253) ## 0.2-1: c(2.194914, 0.2737579, 2.371728, 2.206261) ## both for 32 and 64 bit stopifnot( all.equal(cf, c(-0.05108914, 1.00597115, 1.00320052, 0.98332632), tolerance= 7e-7) , # ... e-7 needed on 64b all.equal(100*sd,c(2.2138147, 0.2864678, 2.2023182, 2.1808862),tolerance= 7e-7) ) # 1.334 e-7 needed on 64b cat('Time elapsed: ', proc.time(),'\n') # "stats" ## rm(a,m1, m2, m3, m4, sm2, l1) ## Small examples from R-SIG-robust ## First example from René Locher : dat1 <- data.frame(lconc= log(c(21.8, 23.7, 12.2, 38.5, 21, 38.9)), dist = c( 100, 180, 280, 30, 220, 6)) m5 <- lmrob(lconc ~ dist, data = dat1) ## Warning messages: ## ... S refinements did not converge (to tol=1e-07) in 200 iterations ## " " " m5$init.S$converged # FALSE m5. <- lmrob(lconc ~ dist, data = dat1, control = lmrob.control(refine.tol = 1e-5)) m5.$init.S$converged # TRUE ## gives TRUE as the IRWLS iterations after the lmrob.S() have converged. ## 2nd example from René Locher , 6 Jun 2007 dat2 <- data.frame(lconc=log(c(29.5,40.1,21.1,25.3,27.3,25.2,26.9,19.1,16.4)), dist = c(520, 1480,1780, 740, 540,1050,1100,1640,1860)) res2 <- lmrob(lconc~dist, data = dat2) ## Used to give Warning messages: ## 1: rwls(): not converged in 1000 lambda iterations ## ... ## 4: rwls(): ............ res2 <- lmrob(lconc~dist, data = dat2, trace.lev = 3) ## ------------- summary(res2) stopifnot(dim(model.matrix(res2)) == c(9,2)) ## Check predict(): dd <- seq(300, 2000, by = 50) with(dat2, plot(dist, lconc, pch=20, cex=2, xlim = range(dd))) new.d <- data.frame(dist=dd) fit.dd <- predict(res2, new.d) lines(dd, fit.dd, col=2, type="o") predict(res2, new.d, se=TRUE)$se.fit matlines(dd, predict(res2, new.d, interval="confidence")[, 2:3], col=3) ## Check handling of X of not full rank test <- function(n, ...) { X <- cbind(rep_len(1:3, n), rnorm(n), rnorm(n), NA) y <- rnorm(n) X[,4] <- X[,2] + X[,3] X <- data.frame(X) X$X1 <- factor(X$X1) fm <- tryCatch(suppressWarnings(lmrob(y ~ ., X, ...)), error=identity) stopifnot(inherits(fm, "lmrob")) } set.seed(0) test(12) ## fast_S() test(2500) ## fast_S_large_n() test(200, trace.lev = TRUE) ## Check a case, where cov() matrix needs "posdefify": coleman16 <- coleman[ -c(2, 7, 16, 19),] (m16 <- lmrob(Y ~ ., data = coleman16, tuning.psi = 3.44, trace.lev = TRUE)) ## failed in 0.9_0 assertWarning(verbose = TRUE, lmrob(Y ~ ., data = coleman, setting = "KS2011", control = lmrob.control()) ) cat('Time elapsed: ', proc.time(),'\n') # "stats" robustbase/tests/OGK-ex.R0000644000176200001440000000300012271657124014742 0ustar liggesuserslibrary(robustbase) ## minimal testing only data(ruspini, package = "cluster") rub1 <- covOGK(ruspini, 1, scaleTau2, covGK, hard.rejection, consistency=FALSE) rub2 <- covOGK(ruspini, 2, scaleTau2, covGK, hard.rejection, consistency=FALSE) AE <- function(x,y) all.equal(x,y, tolerance = 2e-15) ## The following test is already fulfilled by Kjell Konis' original code: stopifnot(AE(c(rub1$wcov)[c(1,3:4)], c(917.99893333333, 94.9232, 2340.319288888888)), all.equal(rub1$wcov, rub2$wcov, tolerance=0) , AE(c(rub1$cov)[c(1,3:4)], c(923.5774514441657, 91.5385216376565, 2342.4556232436971)) , AE(c(rub2$cov)[c(1,3:4)], c(927.2465953711782, 91.8009184487779, 2346.5790105548940)) ) data(milk) cM1 <- covOGK(milk, 1, sigmamu = scaleTau2, weight.fn = hard.rejection) cM2 <- covOGK(milk, 2, sigmamu = scaleTau2, weight.fn = hard.rejection) symnum(cov2cor(cM1 $cov)) symnum(cov2cor(cM2 $cov)) symnum(cov2cor(cM1 $wcov)) symnum(cov2cor(cM2 $wcov)) cMQn <- covOGK(milk, sigmamu = s_Qn, weight.fn = hard.rejection) cMSn <- covOGK(milk, sigmamu = s_Sn, weight.fn = hard.rejection) cMiqr <- covOGK(milk, sigmamu = s_IQR, weight.fn = hard.rejection) cMmad <- covOGK(milk, sigmamu = s_mad, weight.fn = hard.rejection) as.dist(round(cov2cor(cMQn$wcov), 3)) as.dist(round(cov2cor(cMSn$wcov), 3)) as.dist(round(cov2cor(cMiqr$wcov), 3)) as.dist(round(cov2cor(cMmad$wcov), 3)) cat('Time elapsed: ', proc.time(),'\n') # for ``statistical reasons'' robustbase/tests/small-sample.R0000644000176200001440000000216111757675047016322 0ustar liggesuserslibrary(robustbase) ## testing functions: source(system.file("xtraR/ex-funs.R", package = "robustbase")) set.seed(152) Nmax <- 12 nn <- length(nset <- c(2:Nmax, 20, 50))## NOTA BENE: n == 1 etc are NOT YET TREATED! Sim <- 2^9 # = 512 sn <- qn <- numeric(Sim) cpu <- numeric(nn) names(cpu) <- as.character(nset) for(n in nset) { nS <- Sim ## if(n < 20) Sim else round(10*Sim/n) cat("\nn = ",n,"\n------\nno.Sim. = ",nS,"\n") cpu[as.character(n)] <- system.time(for(i in 1:nS) { x <- rnorm(n) sn[i] <- Sn0R(x) qn[i] <- Qn0R(x) Sn.x <- Sn(x, const = 1) Qn.x <- Qn(x, const = 1) if(!is.all.equal(Sn.x, sn[i], tol = 1e-5)) cat("i=",i," Sn() != Sn0R(): ", Sn.x, "!=", sn[i],"\n") if(!is.all.equal(Qn.x, qn[i], tol = 1e-5)) cat("i=",i," Qn() != Qn0R(): ", Qn.x, "!=", qn[i],"\n") })[1] cat("Mean and its. std.err; Quartiles of Sn(x_1 .. x_n) and Qn(...):\n") print(c(mean(sn), sd(sn)/sqrt(nS), quantile(sn, p = (1:3)/4))) print(c(mean(qn), sd(qn)/sqrt(nS), quantile(qn, p = (1:3)/4))) } rbind("Time (CPU) used:" = summary(cpu)) robustbase/src/0000755000176200001440000000000014124272534013216 5ustar liggesusersrobustbase/src/qn_sn.c0000644000176200001440000003533614004270422014501 0ustar liggesusers/* * Copyright (C) 2005--2007, 2021 Martin Maechler, ETH Zurich * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* This is a merge of the C version of original files qn.f and sn.f, * translated by f2c (version 20010821). ==== ==== * and then by f2c-clean,v 1.9 2000/01/13 13:46:53 * and further clean-edited manually by Martin Maechler. * * Further added interface functions to be called via .C() from R or S-plus * Note that Peter Rousseeuw has explicitely given permission to * use his code under the GPL for the R project. */ /* Original comments by the authors of the Fortran original code, * (merged for Qn & Sn in one file by M.M.): This file contains fortran functions for two new robust estimators of scale denoted as Qn and Sn, decribed in Rousseeuw and Croux (1993). These estimators have a high breakdown point and a bounded influence function. The implementation given here is very fast (running in O(n logn) time) and needs little storage space. Rousseeuw, P.J. and Croux, C. (1993) Alternatives to the Median Absolute Deviation", Journal of the American Statistical Association, Vol. 88, 1273-1283. For both estimators, implementations in the pascal language can be obtained from the original authors. This software may be used and copied freely for scientific and/or non-commercial purposes, provided reference is made to the abovementioned paper. Note by MM: We have explicit permission from P.Rousseeuw to licence it under the GNU Public Licence. See also ../inst/Copyrights */ #include /* ^^^^^^^^^^ is supposedly more common and standard than * #include * or #include */ /* --> int64_t ; if people don't have the above, they can forget about it.. */ /* #include "int64.h" */ #include /* -> and much more */ /* Interface routines to be called via .C() : */ #include "robustbase.h" /* ----------------- Further Declarations ------------------------------ */ /* sn0() and qn0() --- but also mc_C() in ./mc.c * ----- ---- ------ use pull(a,n,k): finds the k-th order statistic of an array a[] of length n (preserving a[]) */ /* whimed_i(a,iw,n): finds the weighted high median of an array a[] of length n, with positive int weights iw[] (using auxiliary arrays acand[], a_srt[] & iw_cand[] all of length n). */ /* qn0() uses (and for C API:) */ /* Main routines for C API */ double qn(double *x, int n, int h, int finite_corr); double sn(double *x, int n, int is_sorted, int finite_corr); /* these have no extra factors (no consistency factor & finite_corr): */ void qn0(const double x[], int n, const int64_t k[], int len_k, /* ==> */ double *res); double sn0(double *x, int n, int is_sorted, double *a2); /* ----------- Implementations -----------------------------------*/ // === called from R ( ../R/qnsn.R ) via .C() : void Qn0(double *x, Sint *n, double *k, Sint *len_k, double *res) { int l_k = (int)*len_k; // "hack" as R / .C() have no int_64 : copy k[] to int64 ik[]: int64_t *ik = (int64_t *) R_alloc(l_k, sizeof(int64_t)); for(int i=0; i < l_k; i++) ik[i] = (int64_t) k[i]; qn0(x, (int)*n, ik, l_k, res); } void Sn0(double *x, Sint *n, Sint *is_sorted, double *res, double *a2) { char *vmax = vmaxget(); *res = sn0(x, (int)*n, (int)*is_sorted, a2); #ifdef DEBUG_Sno REprintf("Sn0(* -> res=%g)\n", *res); #endif vmaxset(vmax); } void qn0(const double x[], int n, const int64_t k[], int len_k, /* ==> */ double *res) { /*-------------------------------------------------------------------- Efficient algorithm for the scale estimator: Q*_n = { |x_i - x_j|; i= 2) */ double *y = (double *)R_alloc(n, sizeof(double)); double *work = (double *)R_alloc(n, sizeof(double)); double *a_srt = (double *)R_alloc(n, sizeof(double)); double *a_cand = (double *)R_alloc(n, sizeof(double)); int *left = (int *)R_alloc(n, sizeof(int)); int *right = (int *)R_alloc(n, sizeof(int)); int *p = (int *)R_alloc(n, sizeof(int)); int *q = (int *)R_alloc(n, sizeof(int)); int *weight = (int *)R_alloc(n, sizeof(int)); const int64_t nn2 = (int64_t) n * (n + 1) / 2, // = choose(n+1, 2) n2 = (int64_t) n * n, // k >= k_L ==> right[] = n (and smaller k can start with smaller right[], hence more efficiently) /* NB: The correct k_L seems hard to find, till only from largish simulations and "regression fit": * 2021, Jan. 3 -- 19 : k_L = 2 + ((int64_t)(n-1) * (n-2))/2; * 2021, Jan. 19 -- 27, 11:13: k_L = ((int64_t)(n-1) * (n-2))/2; * 2021, Jan. 27 -- ... : from .../Pkg-ex/robustbase/Qn-multi-k-debugging.R bnd1 <- function(n) 5 - 1.75*(n %% 2) + (0.3939 - 0.0067*(n %% 2)) * n*(n-1) */ k_L = 5 - 1.75*(n % 2) + (0.3939 - 0.0067*(n % 2)) * (int64_t) n*(n-1) ; int h = n / 2 + 1; // really use, only to set right[] below ? for (int i = 0; i < n; ++i) y[i] = x[i]; R_qsort(y, 1, n); /* y := sort(x) */ #ifdef DEBUG_qn REprintf("qn0(x, n=%2d, .. |k|=%d): \n", n, len_k); #endif // k = (int64_t)h * (h - 1) / 2; for(int i_k=0; i_k < len_k; i_k++) { // compute k'th quantile k' := k[i_k] ------------------ /* Following should be `long long int' : they can be of order n^2 */ int64_t nl = nn2, nr = n2, knew = k[i_k] + nl;/* = k + (n+1 \over 2) */ #ifdef DEBUG_qn REprintf(" qn0(x, ..): nl,nr= %d, %d; k[%d] = %5d :\n", nl,nr, i_k, k[i_k]); #endif /* L200: */ Rboolean found = FALSE; double trial = R_NaReal;/* -Wall */ int i, j; for (int i = 0; i < n; ++i) left [i] = n - i + 1; if(k[i_k] >= k_L) { // for these large "quantiles", need "large" right[] boundary for (int i = 0; i < n; ++i) right[i] = n; } else { for (int i = 0; i < n; ++i) right[i] = (i <= h) ? n : n - (i - h); /* the n - (i-h) is from the paper (Cr. + Ro. '92 "Time-efficient"); original code had 'n' */ } while(!found && nr - nl > n) { j = 0; /* Truncation to float : try to make sure that the same values are got later (guard bits !) */ for (i = 1; i < n; ++i) { if (left[i] <= right[i]) { weight[j] = right[i] - left[i] + 1; int jh = left[i] + weight[j] / 2; work[j] = (float)(y[i] - y[n - jh]); ++j; } } trial = whimed_i(work, weight, j, a_cand, a_srt, /*iw_cand*/ p); #ifdef DEBUG_qn REprintf(" ..!found: whimed("); # ifdef DEBUG_long REprintf("wrk=c("); for(i=0; i < j; i++) REprintf("%s%g", (i>0)? ", " : "", work[i]); REprintf("),\n wgt=c("); for(i=0; i < j; i++) REprintf("%s%d", (i>0)? ", " : "", weight[i]); REprintf("), j= %3d) -> trial= %7g\n", j, trial); # else REprintf("j=%3d) -> trial= %g:", j, trial); # endif #endif j = 0; for (i = n - 1; i >= 0; --i) { while (j < n && ((float)(y[i] - y[n - j - 1])) < trial) ++j; p[i] = j; } #ifdef DEBUG_qn REprintf(" f_1: j=%2d", j); #endif j = n + 1; for (i = 0; i < n; ++i) { while ((float)(y[i] - y[n - j + 1]) > trial) --j; q[i] = j; } int64_t sump = 0, sumq = 0; for (i = 0; i < n; ++i) { sump += p[i]; sumq += q[i] - 1; } #ifdef DEBUG_qn REprintf(" f_2 -> j=%2d, sump|q= %lld,%lld; ", j, sump,sumq); #endif if (knew <= sump) { for (i = 0; i < n; ++i) right[i] = p[i]; nr = sump; #ifdef DEBUG_qn REprintf("knew <= sump =: nr , new right[]\n"); #endif } else if (knew > sumq) { for (i = 0; i < n; ++i) left[i] = q[i]; nl = sumq; #ifdef DEBUG_qn REprintf("knew > sumq =: nl , new left[]\n"); #endif } else { /* sump < knew <= sumq */ found = TRUE; #ifdef DEBUG_qn REprintf("sump < knew <= sumq ---> FOUND\n"); #endif } } /* while */ if (found) res[i_k] = trial; else { #ifdef DEBUG_qn REprintf(".. not fnd -> new work[]"); #endif j = 0; for (i = 1; i < n; ++i) { for (int jj = left[i]; jj <= right[i]; ++jj) { work[j] = y[i] - y[n - jj]; j++; }/* j will be = sum_{i=2}^n (right[i] - left[i] + 1)_{+} */ } #ifdef DEBUG_qn REprintf(" of length %d; k' = knew-nl = %d\n", j, knew-nl); #endif /* return pull(work, j - 1, knew - nl) : */ knew -= (nl + 1); /* -1: 0-indexing */ if(knew > j-1) { // see this happening when the quantile number k[i_k] is close to the right end! knew = j-1; #ifdef DEBUG_qn REprintf("knew >= j should never happen, setting it to j-1=%d\n", knew); #endif } else if(knew < 0) { knew = 0; #ifdef DEBUG_qn REprintf("knew < 0 should never happen, setting it to 0\n"); #endif } rPsort(work, j, knew); res[i_k] = work[knew]; } } // for(int i_k=0, i_k < len_k ...) { k_ = k[i_k] ; .... return; } /* qn0 */ #ifdef __never__ever__ /* NB: "old version" -- with *wrong* constant and "inaccurate" finite-sample corr * -- equivalent to Qn.old() in ../R/qnsn.R , see also ../man/Qn.Rd * * (This is *not* called from our R code anyway) */ double qn(double *x, int n, int finite_corr) { /* Efficient algorithm for the scale estimator: Qn = dn * 2.2219 * {|x_i-x_j|; i= n) containing the observations n : number of observations (n>=2) is_sorted: logical indicating if x is already sorted a2 : to contain a2[i] := LOMED_{j != i} | x_i - x_j |, for i=1,...,n */ /* Local variables */ double medA, medB; int i, diff, half, Amin, Amax, even, length; int leftA,leftB, nA,nB, tryA,tryB, rightA,rightB; int n1_2; if(!is_sorted) R_qsort(x, 1, n); a2[0] = x[n / 2] - x[0]; n1_2 = (n + 1) / 2; /* first half for() loop : */ for (i = 2; i <= n1_2; ++i) { nA = i - 1; nB = n - i; diff = nB - nA; leftA = leftB = 1; rightA = rightB = nB; Amin = diff / 2 + 1; Amax = diff / 2 + nA; while (leftA < rightA) { length = rightA - leftA + 1; even = 1 - length % 2; half = (length - 1) / 2; tryA = leftA + half; tryB = leftB + half; if (tryA < Amin) { rightB = tryB; leftA = tryA + even; } else { if (tryA > Amax) { rightA = tryA; leftB = tryB + even; } else { medA = x[i - 1] - x[i - tryA + Amin - 2]; medB = x[tryB + i - 1] - x[i - 1]; if (medA >= medB) { rightA = tryA; leftB = tryB + even; } else { rightB = tryB; leftA = tryA + even; } } } } /* while */ if (leftA > Amax) { a2[i - 1] = x[leftB + i - 1] - x[i - 1]; } else { medA = x[i - 1] - x[i - leftA + Amin - 2]; medB = x[leftB + i - 1] - x[i - 1]; a2[i - 1] = fmin2(medA,medB); } } /* second half for() loop : */ for (i = n1_2 + 1; i <= n - 1; ++i) { nA = n - i; nB = i - 1; diff = nB - nA; leftA = leftB = 1; rightA = rightB = nB; Amin = diff / 2 + 1; Amax = diff / 2 + nA; while (leftA < rightA) { length = rightA - leftA + 1; even = 1 - length % 2; half = (length - 1) / 2; tryA = leftA + half; tryB = leftB + half; if (tryA < Amin) { rightB = tryB; leftA = tryA + even; } else { if (tryA > Amax) { rightA = tryA; leftB = tryB + even; } else { medA = x[i + tryA - Amin] - x[i - 1]; medB = x[i - 1] - x[i - tryB - 1]; if (medA >= medB) { rightA = tryA; leftB = tryB + even; } else { rightB = tryB; leftA = tryA + even; } } } } /* while */ if (leftA > Amax) { a2[i - 1] = x[i - 1] - x[i - leftB - 1]; } else { medA = x[i + leftA - Amin] - x[i - 1]; medB = x[i - 1] - x[i - leftB - 1]; a2[i - 1] = fmin2(medA,medB); } } a2[n - 1] = x[n - 1] - x[n1_2 - 1]; return pull(a2, n, n1_2); } /* sn0 */ // C API version -- *not* called from our R code : double sn(double *x, int n, int is_sorted, int finite_corr) { /* Efficient algorithm for the scale estimator: Sn = cn * 1.1926 * LOMED_{i} HIMED_{j} |x_i-x_j| which can equivalently be written as Sn = cn * 1.1926 * LOMED_{i} LOMED_{j != i} |x_i-x_j|*/ double cn, r; double *a2 = (double *)R_alloc(n, sizeof(double)); r = 1.1926 * /* asymptotic consistency for sigma^2 */ sn0(x, n, is_sorted, a2); /* === */ cn = 1.; /* n >= 10 even, or no finite_corr[ection] */ if (finite_corr) { if (n <= 9) { if (n == 2) cn = 0.743; else if (n == 3) cn = 1.851; else if (n == 4) cn = 0.954; else if (n == 5) cn = 1.351; else if (n == 6) cn = 0.993; else if (n == 7) cn = 1.198; else if (n == 8) cn = 1.005; else if (n == 9) cn = 1.131; } else if (n % 2 == 1) /* n odd, >= 11 */ cn = n / (n - 0.9); } return cn * r; } /* sn */ /* pull(): auxiliary routine for (Qn and) Sn * ====== ======== ------------------------ */ double pull(double *a_in, int n, int k) { /* Finds the k-th order statistic of an array a[] of length n * -------------------- */ int j; double *a, ax; char* vmax = vmaxget(); a = (double *)R_alloc(n, sizeof(double)); /* Copy a[] and use copy since it will be re-shuffled: */ for (j = 0; j < n; j++) a[j] = a_in[j]; k--; /* 0-indexing */ rPsort(a, n, k); ax = a[k]; vmaxset(vmax); return ax; } /* pull */ /* Local variables section * Local variables: * mode: c * kept-old-versions: 12 * kept-new-versions: 20 * End: */ robustbase/src/rob-utils.c0000644000176200001440000000574212440116711015303 0ustar liggesusers/* * Copyright (C) 2014 Martin Maechler, ETH Zurich * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, a copy is available at * http://www.r-project.org/Licenses/ */ #include /* ^^^^^^^^^^ is supposedly more common and standard than * #include * or #include */ /* --> int64_t ; if people don't have the above, they can forget about it.. */ /* #include "int64.h" */ #include /* -> and much more */ // Interface routines to be called via .C(), .Call() : #include "robustbase.h" //-> , -> XLENGTH, R_xlen_t /* Smooth Weighting Function -- typically for computing weights from large distances * ------------------------- * \ <-- quartic polynomial here * \ * --------------------------- * In fact a 2-parameter generalization of Tukey's 1-parameter "biweight" * * --- see also psi, rho, ... utilities in ./lmrob.c */ double wgt_flex(double x, double c, double h) { double h2 = h/2.; x = fabs(x); if (x >= c+h2) return 0. ; if (x <= c-h2) return 1. ; // non-trivial {biweight like} down weighting: x = (x - (c-h2)) / h; // is in (0, 1) x = 1 - x*x; return x*x; // = (1 - ((|x| - (c - h/2))/ h)^2)^2 {in original 'x'} } SEXP R_wgt_flex(SEXP x_, SEXP c_, SEXP h_) { // TODO?: add , SEXP keep_attributes /* * Calculate Flexible weight function for vectorized x */ int nprot = 1; if (isInteger(x_)) { x_ = PROTECT(coerceVector(x_, REALSXP)); nprot++; } if (isInteger(c_)) { c_ = PROTECT(coerceVector(c_, REALSXP)); nprot++; } if (isInteger(h_)) { h_ = PROTECT(coerceVector(h_, REALSXP)); nprot++; } if (!isReal(x_)) error(_("Argument '%s' must be numeric or integer"), "x"); if (!isReal(c_) || LENGTH(c_) != 1) error(_("Argument '%s' must be numeric or integer of length 1"), "c"); if (!isReal(h_) || LENGTH(h_) != 1) error(_("Argument '%s' must be numeric or integer of length 1"), "h"); R_xlen_t i, n = XLENGTH(x_); SEXP res = PROTECT(allocVector(REALSXP, n)); // the result double *x = REAL(x_), *r = REAL(res), c = asReal(c_), h = asReal(h_); for(i = 0; i < n; i++) r[i] = ISNAN(x[i]) ? x[i] : wgt_flex(x[i], c, h); /* if(asLogical(keep_attributes)) { */ // do the "no exception" version of copyMostAttrib() in ..R/src/main/attrib.c /* } */ UNPROTECT(nprot); return res; } robustbase/src/lmrob.c0000644000176200001440000026102014123653602014474 0ustar liggesusers/* -*- mode: c; kept-new-versions: 40; kept-old-versions: 40 -*- * Indentation (etc) style: C-c . gnu */ /* file lmrob.c * was roblm/src/roblm.c - version 0.6 by Matias Salibian-Barreras * Includes the stable correct asymptotic variance estimators * of Croux, Dhaene, Hoorelbeke * Includes the fast-s algorithm */ /* Robust MM regression estimates * * ------------------------------ */ /* comment code * * * adapt other sampler <<<<<<<<<< R's random number generator !!!! * replace abort for too many singular resamples by * returning the number of singular ones */ /* MM: - Done: fast_s[_large_n]() both had FIXED seed (= 37), and effectively discarded the seed_rand argument below - Done: drop 'register' : today's compilers do optimize well! - Done: using Calloc() / Free() instead of malloc()/free() */ /* kollerma: Added alternative psi functions callable via psifun, chifun and wgtfun. ipsi is used to distinguish between the different types. The ipsi argument works for the S-estimator as well as for the MM-estimator. - Added implementation of M-S algorithm. - Modified subsampling behaviour: avoiding singular resamples by using customized LU decomposition. - Replaced C style matrices with Fortran style matrices, with as little copying as possible. - Using LAPACK's DGELS instead of local lu() decomposition. - Code clean up: removed all subroutines that were unused. */ #ifndef USE_FC_LEN_T # define USE_FC_LEN_T #endif #include #include #include #ifndef FCONE # define FCONE #endif #include #include #include "robustbase.h" //-> , -> XLENGTH, R_xlen_t /* these will also move to "lmrob.h" --- * but first make many of these 'static' <<< FIXME! */ void fast_s_large_n(double *X, double *y, int *nn, int *pp, int *nRes, int *max_it_scale, double *res, int *ggroups, int *nn_group, int *K, int *max_k, double rel_tol, double inv_tol, double scale_tol, int *converged, int *best_r, double *bb, double *rrhoc, int *iipsi, double *bbeta, double *sscale, int trace_lev, int mts, Rboolean ss); void fast_s(double *X, double *y, int *nn, int *pp, int *nRes, int *max_it_scale, double *res, int *K, int *max_k, double rel_tol, double inv_tol, double scale_tol, int *converged, int *best_r, double *bb, double *rrhoc, int *iipsi, double *bbeta, double *sscale, int trace_lev, int mts, Rboolean ss); Rboolean rwls(const double X[], const double y[], int n, int p, double *estimate, double *i_estimate, double *resid, double *loss, double scale, double epsilon, int *max_it, double *rho_c, const int ipsi, int trace_lev); static void sample_noreplace(int *x, int n, int k, int *ind_space); double norm2 (double *x, int n); double norm (double *x, int n); double norm1(double *x, int n); double norm_diff2 (double *x, double *y, int n); double norm_diff (double *x, double *y, int n); double norm1_diff(double *x, double *y, int n); /* moved to robustbase.h * * double normcnst(const double c[], int ipsi); * double rho_inf (const double c[], int ipsi); * double rho(double x, const double c[], int ipsi); * double psi(double x, const double c[], int ipsi); * double psip(double x, const double c[], int ipsi);// psi' * double psi2(double x, const double c[], int ipsi);// psi'' * double wgt(double x, const double c[], int ipsi); */ double rho_huber(double x, const double c[]); double psi_huber(double x, const double c[]); double psip_huber(double x, const double c[]); double psi2_huber(double x, const double c[]); double wgt_huber(double x, const double c[]); double rho_biwgt(double x, const double c[]); double psi_biwgt(double x, const double c[]); double psip_biwgt(double x, const double c[]); double psi2_biwgt(double x, const double c[]); double wgt_biwgt(double x, const double c[]); double rho_gwgt(double x, const double c[]); double psi_gwgt(double x, const double c[]); double psip_gwgt(double x, const double c[]); double wgt_gwgt(double x, const double c[]); double rho_opt(double x, const double c[]); double psi_opt(double x, const double c[]); double psip_opt(double x, const double c[]); double wgt_opt(double x, const double c[]); double rho_hmpl(double x, const double c[]); double psi_hmpl(double x, const double c[]); double psip_hmpl(double x, const double c[]); double psi2_hmpl(double x, const double c[]); double wgt_hmpl(double x, const double c[]); double rho_ggw(double x, const double c[]); void psi_ggw_vec(double *x, int n, void *k); double psi_ggw(double x, const double c[]); double psip_ggw(double x, const double c[]); double wgt_ggw(double x, const double c[]); double rho_lqq(double x, const double c[]); double psi_lqq(double x, const double c[]); double psip_lqq(double x, const double c[]); double psi2_lqq(double x, const double c[]); double wgt_lqq(double x, const double c[]); double sum_rho_sc(const double r[], double scale, int n, int p, const double c[], int ipsi); void get_weights_rhop(const double r[], double s, int n, const double rrhoc[], int ipsi, /* --> */ double *w); int refine_fast_s(const double X[], double *wx, const double y[], double *wy, double *weights, int n, int p, double *res, double *work, int lwork, double *beta_cand, int kk, Rboolean *conv, int max_k, double rel_tol, int trace_lev, double b, double *rrhoc, int ipsi, double initial_scale, double *beta_ref, double *scale); void m_s_subsample(double *X1, double *y, int n, int p1, int p2, int nResample, int max_it_scale, double rel_tol, double inv_tol, double scale_tol, double *bb, double *rrhoc, int ipsi, double *sscale, int trace_lev, double *b1, double *b2, double *t1, double *t2, double *y_tilde, double *res, double *x1, double *x2, int *NIT, int *K, int *KODE, double *SIGMA, double *BET0, double *SC1, double *SC2, double *SC3, double *SC4, int mts, Rboolean ss); Rboolean m_s_descent(double *X1, double *X2, double *y, int n, int p1, int p2, int K_m_s, int max_k, int max_it_scale, double rel_tol, double scale_tol, double *bb, double *rrhoc, int ipsi, double *sscale, int trace_lev, double *b1, double *b2, double *t1, double *t2, double *y_tilde, double *res, double *res2, double *x1, double *x2, int *NIT, int *K, int *KODE, double *SIGMA, double *BET0, double *SC1, double *SC2, double *SC3, double *SC4); int subsample(const double x[], const double y[], int n, int m, double *beta, int *ind_space, int *idc, int *idr, double *lu, double *v, int *p, double *Dr, double *Dc, int rowequ, int colequ, Rboolean sample, int mts, Rboolean ss, double tol_inv, Rboolean solve); Rboolean fast_s_with_memory(double *X, double *y, double *res, int *nn, int *pp, int *nRes, int *max_it_scale, int *K, int *max_k, double rel_tol, double inv_tol, double scale_tol, int trace_lev, int *best_r, double *bb, double *rrhoc, int *iipsi, double **best_betas, double *best_scales, int mts, Rboolean ss); /* for "tracing" only : */ void disp_mat(double **a, int n, int m); void disp_vec(double *a, int n); void disp_veci(int *a, int n); double kthplace(double *, int, int); int find_max(double *a, int n); double find_scale(const double r[], double b, const double rrhoc[], int ipsi, double initial_scale, int n, int p, int* iter, // input: max_iter, output: #{iterations used} double scale_tol, Rboolean trace); double median_abs(double *, int, double *); double MAD(double *a, int n, double center, double *tmp, double *tmp2); void zero_mat(double **a, int n, int m); #define INIT_WLS(_X_, _y_, _n_, _p_) \ /* Determine optimal block size for work array*/ \ F77_CALL(dgels)("N", &_n_, &_p_, &one, _X_, &_n_, _y_, \ &_n_, &work0, &lwork, &info FCONE); \ if (info) { \ warning(_(" Problem determining optimal block size, using minimum")); \ lwork = 2*_p_; \ } else \ lwork = (int)work0; \ \ if (trace_lev >= 4) \ Rprintf(" Optimal block size for DGELS: %d\n", lwork); \ \ /* allocate */ \ work = (double *) Calloc(lwork, double); \ weights = (double *) Calloc(n, double); #define CLEANUP_WLS \ Free(work); Free(weights); #define CLEANUP_EQUILIBRATION \ Free(Dr); Free(Dc); Free(Xe); #define CLEANUP_SUBSAMPLE \ Free(ind_space); Free(idc); Free(idr); Free(pivot); \ Free(lu); Free(v); \ CLEANUP_EQUILIBRATION; #define FIT_WLS(_X_, _x_, _y_, _n_, _p_) \ /* add weights to _y_ and _x_ */ \ for (j=0; j<_n_; j++) { \ wtmp = sqrt(weights[j]); \ _y_[j] *= wtmp; \ for (k=0; k<_p_; k++) \ _x_[_n_*k+j] = _X_[_n_*k+j] * wtmp; \ } \ /* solve weighted least squares problem */ \ F77_CALL(dgels)("N", &_n_, &_p_, &one, _x_, &_n_, _y_, \ &_n_, work, &lwork, &info FCONE); \ if (info) { \ if (info < 0) { \ CLEANUP_WLS; \ error(_("DGELS: illegal argument in %i. argument."), info); \ } else { \ if (trace_lev >= 4) { \ Rprintf(" Robustness weights in failing step: "); \ disp_vec(weights, _n_); \ } \ CLEANUP_WLS; \ error(_("DGELS: weighted design matrix not of full rank (column %d).\nUse control parameter 'trace.lev = 4' to get diagnostic output."), info); \ } \ } #define SETUP_EQUILIBRATION(_n_, _p_, _X_, _large_n_) \ /* equilibration of matrix _X_ */ \ /* solve (Dr X Dc) b = Dr y with beta = Dc b instead of */ \ /* X beta = y */ \ /* see Demmel (1997) APPLIED NUMERICAL LINEAR ALGEBRA */ \ /* Section 2.5.2 Equilibration */ \ double *Dr, *Dc, *Xe, rowcnd, colcnd, amax; \ int rowequ = 0 , colequ = 0; \ Dr = (double *) Calloc(_n_, double); \ Dc = (double *) Calloc(_p_, double); \ Xe = (double *) Calloc(_n_*_p_, double); \ COPY(_X_, Xe, _n_*_p_); \ F77_CALL(dgeequ)(&_n_, &_p_, Xe, &_n_, Dr, Dc, &rowcnd, \ &colcnd, &amax, &info); \ if (info) { \ if (info < 0) { \ CLEANUP_EQUILIBRATION; \ error(_("DGEEQ: illegal argument in %i. argument"), -1 * info); \ } else if (info > _n_) { \ if (_large_n_) { \ error(_("Fast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'.")); \ } else { \ error(_("DGEEQU: column %i of the design matrix is exactly zero."), info - _n_); \ } \ } else { \ /* FIXME: replace dgeequ by our own version */ \ /* that does not treat this as error */ \ warning(_(" Skipping design matrix equilibration (DGEEQU): row %i is exactly zero."), info); \ } \ } else { \ /* scale _X_ */ \ char equed; \ F77_CALL(dlaqge)(&_n_, &_p_, Xe, &_n_, Dr, Dc, &rowcnd, \ &colcnd, &amax, &equed FCONE); \ rowequ = equed == 'B' || equed == 'R'; \ colequ = equed == 'B' || equed == 'C'; \ } #define SETUP_SUBSAMPLE(_n_, _p_, _X_, _large_n_) \ /* (Pointers to) Arrays - to be allocated */ \ int *ind_space, *idc, *idr, *pivot; \ double *lu, *v; \ ind_space = (int *) Calloc(_n_, int); \ idc = (int *) Calloc(_n_, int); \ idr = (int *) Calloc(_p_, int); \ pivot = (int *) Calloc(_p_-1, int); \ lu = (double *) Calloc(_p_*_p_, double); \ v = (double *) Calloc(_p_, double); \ SETUP_EQUILIBRATION(_n_, _p_, _X_, _large_n_); #define COPY(from, to, len) Memcpy(to, from, len) /* This assumes that 'p' is correctly defined, and 'j' can be used in caller: */ /* #define COPY(BETA_FROM, BETA_TO, _p_) \ */ /* for(j=0; j < _p_; j++) BETA_TO[j] = BETA_FROM[j]; */ /* In theory BLAS should be fast, but this seems slightly slower, * particularly for non-optimized BLAS :*/ /* static int one = 1; */ /* #define COPY(BETA_FROM, BETA_TO, _p_) \ */ /* F77_CALL(dcopy)(&_p_, BETA_FROM, &one, BETA_TO, &one); */ #define EPS_SCALE 1e-10 #define INFI 1e+20 /* Called from R's lmrob.S() in ../R/lmrob.MM.R, * help() in ../man/lmrob.S.Rd, this function computes an S-regression estimator ~~~~~~~~~~~~~~~~~ */ void R_lmrob_S(double *X, double *y, int *n, int *P, int *nRes, // = nResample ( = 500, by default) double *scale, double *beta_s, double *rrhoc, int *iipsi, double *bb, int *best_r, int *Groups, int *N_group, int *K_s, int *max_k, int *max_it_scale, double *rel_tol, double *inv_tol, double *scale_tol, // <- new, was hardwired to EPS_SCALE := 1e-10 int *converged, int *trace_lev, int *mts, int *ss, int *cutoff) { /* best_r = 't' of Salibian-Barrera_Yohai(2006), * = no. of best candidates to be iterated further ("refined") * = 2, by default */ if (*nRes > 0) { double *res = (double *) R_alloc(*n, sizeof(double)); // residuals if (*n > *cutoff) { if(*trace_lev > 0) Rprintf("lmrob_S(n = %d, nRes = %d): fast_s_large_n():\n", *n, *nRes); fast_s_large_n(X, y, n, P, nRes, max_it_scale, res, Groups, N_group, K_s, max_k, *rel_tol, *inv_tol, *scale_tol, converged, best_r, bb, rrhoc, iipsi, beta_s, scale, *trace_lev, *mts, (Rboolean)*ss); } else { if(*trace_lev > 0) Rprintf("lmrob_S(n = %d, nRes = %d): fast_s() [non-large n]:\n", *n, *nRes); fast_s(X, y, n, P, nRes, max_it_scale, res, K_s, max_k, *rel_tol, *inv_tol, *scale_tol, converged, best_r, bb, rrhoc, iipsi, beta_s, scale, *trace_lev, *mts, *ss); } COPY(res, y, *n); // return the 'residuals' in 'y' } else { // nRes[] <= 0 <==> 'only.scale = TRUE' if(*trace_lev > 0) Rprintf("lmrob_S(nRes = 0, n = %d): --> find_scale(*, scale=%g) only:", *n, *scale); *scale = find_scale(y, *bb, rrhoc, *iipsi, *scale, *n, *P, max_it_scale, *scale_tol, *trace_lev >= 3); if(*trace_lev > 0) Rprintf(" used %d iterations\n", *max_it_scale); } } /* Called from R, this function computes an M-S-regression estimator */ // not only called from ../R/lmrob.M.S.R, but also ../inst/xtraR/m-s_fns.R // ~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~ void R_lmrob_M_S(double *X1, double *X2, double *y, double *res, int *nn, int *pp1, int *pp2, int *nRes, int *max_it_scale, double *scale, double *b1, double *b2, double *rho_c, int *ipsi, double *bb, int *K_m_s, int *max_k, double *rel_tol, double *inv_tol, double *scale_tol, int *converged, int *trace_lev, int *orthogonalize, int *subsample, int *descent, int *mts, int *ss) { /* Initialize (some of the) memory here, * so that we have to do it only once */ int i, n = *nn, p1 = *pp1, p2 = *pp2, one = 1; /* (Pointers to) Arrays - to be allocated */ double *t1, *t2, *y_tilde, *y_work, done = 1., dmone = -1.; double *x1, *x2, *ot1, *oT2, *ptr; if(*trace_lev > 0) Rprintf( "lmrob_M_S(n = %d, nRes = %d, (p1,p2)=(%d,%d), (orth,subs,desc)=(%d,%d,%d))\n", n, *nRes, p1, p2, *orthogonalize, *subsample, *descent); t1 = (double *) R_alloc(n, sizeof(double)); /* size n needed for rllarsbi */ t2 = (double *) R_alloc(p2, sizeof(double)); ot1 = (double *) R_alloc(p1, sizeof(double)); oT2 = (double *) R_alloc(p2*p1, sizeof(double)); y_work = (double *) R_alloc(n, sizeof(double)); COPY(y, y_work, n); y_tilde = (double *) R_alloc(n, sizeof(double)); x1 = (double *) R_alloc(n*p1, sizeof(double)); x2 = (double *) R_alloc(n*p2, sizeof(double)); COPY(X2, x2, n*p2); /* Variables required for rllarsbi * (l1 / least absolut residuals - estimate) */ int NIT=0, K=0, KODE=0; double SIGMA = 0., *SC1 = (double *) R_alloc(n, sizeof(double)), *SC2 = (double *) R_alloc(p1, sizeof(double)), *SC3 = (double *) R_alloc(p1, sizeof(double)), *SC4 = (double *) R_alloc(p1, sizeof(double)); double BET0 = 0.773372647623; /* = pnorm(0.75) */ /* STEP 1: Orthgonalize X2 and y from X1 */ if (*orthogonalize) { COPY(X1, x1, n*p1); F77_CALL(rllarsbi)(x1, y_work, &n, &p1, &n, &n, rel_tol, &NIT, &K, &KODE, &SIGMA, t1, y_tilde, SC1, SC2, SC3, SC4, &BET0); COPY(t1, ot1, p1); for (i=0; i < p2; i++) { COPY(X1, x1, n*p1); ptr = X2+i*n; COPY(ptr, y_work, n); F77_CALL(rllarsbi)(x1, y_work, &n, &p1, &n, &n, rel_tol, &NIT, &K, &KODE, &SIGMA, t1, x2+i*n, SC1, SC2, SC3, SC4, &BET0); ptr = oT2+i*p1; COPY(t1, ptr, p1); } COPY(y_tilde, y_work, n); /* compare with Maronna & Yohai 2000: * y_work and y_tilde now contain \tilde y, ot1 -> t_1, * x2 -> \tilde x2, oT2 -> T_2 */ } /* STEP 2: Subsample */ if (*subsample) { m_s_subsample(X1, y_work, n, p1, p2, *nRes, *max_it_scale, *rel_tol, *inv_tol, *scale_tol, bb, rho_c, *ipsi, scale, *trace_lev, b1, b2, t1, t2, y_tilde, res, x1, x2, &NIT, &K, &KODE, &SIGMA, &BET0, SC1, SC2, SC3, SC4, *mts, *ss); if (*scale < 0) error(_("m_s_subsample() stopped prematurely (scale < 0).")); } /* STEP 3: Transform back */ if (*orthogonalize) { /* t1 = ot1 + b1 - oT2 %*% b2 */ for(int i=0; i < p1; i++) t1[i] = ot1[i] + b1[i]; F77_CALL(dgemv)("N", &p1, &p2, &dmone, oT2, &p1, b2, &one, &done, t1, &one FCONE); COPY(t1, b1, p1); /* restore x2 */ COPY(X2, x2, n*p2); } /* update / calculate residuals */ COPY(y, res, n); F77_CALL(dgemv)("N", &n, &p1, &dmone, X1, &n, b1, &one, &done, res, &one FCONE); F77_CALL(dgemv)("N", &n, &p2, &dmone, X2, &n, b2, &one, &done, res, &one FCONE); /* STEP 4: Descent procedure */ if (*descent) { *converged = m_s_descent( X1, X2, y, n, p1, p2, *K_m_s, *max_k, *max_it_scale, *rel_tol, *scale_tol, bb, rho_c, *ipsi, scale, *trace_lev, b1, b2, t1, t2, y_tilde, res, y_work, x1, x2, &NIT, &K, &KODE, &SIGMA, &BET0, SC1, SC2, SC3, SC4); } } /* This function performs RWLS iterations starting from * an S-regression estimator (and associated residual scale). * So, in itself, this is ``just'' an M-estimator -- called from R's * lmrob..M..fit() [ ../R/lmrob.MM.R ] * ~~~~~~~~~~~~~~~ * NOTE: rel_tol now controls the *relative* changes in beta, * instead of being hard-wired to EPS = 1e-7 and bounding the * absolute || beta_1 - beta_2 || */ void R_lmrob_MM(double *X, double *y, int *n, int *P, double *beta_initial, double *scale, double *beta_m, double *resid, int *max_it, double *rho_c, int *ipsi, double *loss, double *rel_tol, int *converged, int *trace_lev, int *mts, int *ss) { /* starting from the S-estimate (beta_initial), use * irwls to compute the MM-estimate (beta_m) */ if(*trace_lev > 0) Rprintf("lmrob_MM(): rwls():\n"); *converged = (int)rwls(X,y,*n,*P,beta_m, beta_initial, resid, loss, *scale, *rel_tol, max_it, rho_c, *ipsi, *trace_lev); if (!converged) COPY(beta_initial, beta_m, *P); } /* Call subsample() from R, for testing purposes only */ void R_subsample(const double x[], const double y[], int *n, int *m, double *beta, int *ind_space, int *idc, int *idr, double *lu, double *v, int *p, double *_Dr, double *_Dc, int *_rowequ, int *_colequ, int *status, int *sample, int *mts, int *ss, double *tol_inv, int *solve) { int info; /* set the seed */ GetRNGstate(); SETUP_EQUILIBRATION(*n, *m, x, 0); *status = subsample(Xe, y, *n, *m, beta, ind_space, idc, idr, lu, v, p, Dr, Dc, rowequ, colequ, (Rboolean)*sample, *mts, (Rboolean)*ss, *tol_inv, (Rboolean)*solve); COPY(Dr, _Dr, *n); COPY(Dc, _Dc, *m); *_rowequ = rowequ; *_colequ = colequ; CLEANUP_EQUILIBRATION; PutRNGstate(); } //---- Psi(), Rho(), Functions----------------------------------------------------------- SEXP R_psifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_) { /* * Calculate psi for vectorized x, scaled to get psi'(0) = 1 * deriv -1: rho(x) {*not* normalized} * deriv 0: psi(x) = rho'(x) * deriv 1: psi'(x) = rho''(x) {we always have psip(0) == 1} * deriv 2: psi''(x)= rho'''(x) */ int nprot = 1, ipsi = asInteger(ipsi_), deriv = asInteger(deriv_); if (isInteger(x_)) { x_ = PROTECT(coerceVector(x_, REALSXP)); nprot++; } if (!isReal(x_)) error(_("Argument '%s' must be numeric or integer"), "x"); if (!isReal(c_)) error(_("Argument '%s' must be numeric or integer"), "cc"); R_xlen_t i, n = XLENGTH(x_); SEXP res = PROTECT(allocVector(REALSXP, n)); // the result double *x = REAL(x_), *r = REAL(res), *cc = REAL(c_); // put the for() loop *inside* the switch (--> speed for llength >> 1) : #define for_i_n_NA for(i = 0; i < n; i++) r[i] = ISNAN(x[i]) ? x[i] : switch(deriv) { // our rho() is rho~(), i.e., scaled to max = 1 case -1: if(is_redescender(ipsi)) { double rho_Inf = rho_inf(cc, ipsi); for_i_n_NA rho(x[i], cc, ipsi) * rho_Inf; } else { // huber, .. for_i_n_NA rho(x[i], cc, ipsi); } break; case 0: for_i_n_NA psi (x[i], cc, ipsi); break; case 1: for_i_n_NA psip(x[i], cc, ipsi); break; case 2: for_i_n_NA psi2(x[i], cc, ipsi); break; default: error(_("'deriv'=%d is invalid"), deriv); } UNPROTECT(nprot); return res; } SEXP R_chifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_) { /* * Calculate chi for vectorized x, i.e. rho~(.) with rho~(inf) = 1: * deriv 0: chi (x) = \rho(x) / \rho(Inf) =: \rho(x) * nc == our rho() C-function * deriv 1: chi'(x) = psi(x) * nc * deriv 2: chi''(x) = psi'(x) * nc */ int nprot = 1, ipsi = asInteger(ipsi_), deriv = asInteger(deriv_); if (isInteger(x_)) { x_ = PROTECT(coerceVector(x_, REALSXP)); nprot++; } if (!isReal(x_)) error(_("Argument '%s' must be numeric or integer"), "x"); if (!isReal(c_)) error(_("Argument '%s' must be numeric or integer"), "cc"); R_xlen_t i, n = XLENGTH(x_); SEXP res = PROTECT(allocVector(REALSXP, n)); // the result double *x = REAL(x_), *r = REAL(res), *cc = REAL(c_); // our rho() is rho~() == chi(), i.e., scaled to max = 1 double rI = (deriv > 0) ? rho_inf(cc, ipsi) : 0./* -Wall */; switch(deriv) { case 0: for_i_n_NA rho(x[i], cc, ipsi); break; case 1: for_i_n_NA psi (x[i], cc, ipsi) / rI; break; case 2: for_i_n_NA psip(x[i], cc, ipsi) / rI; break; case 3: for_i_n_NA psi2(x[i], cc, ipsi) / rI; break; default: error(_("'deriv'=%d is invalid"), deriv); } UNPROTECT(nprot); return res; } SEXP R_wgtfun(SEXP x_, SEXP c_, SEXP ipsi_) { /* * Calculate wgt(x) = psi(x)/x for vectorized x */ int nprot = 1, ipsi = asInteger(ipsi_); if (isInteger(x_)) { x_ = PROTECT(coerceVector(x_, REALSXP)); nprot++; } if (!isReal(x_)) error(_("Argument '%s' must be numeric or integer"), "x"); if (!isReal(c_)) error(_("Argument '%s' must be numeric or integer"), "cc"); R_xlen_t i, n = XLENGTH(x_); SEXP res = PROTECT(allocVector(REALSXP, n)); // the result double *x = REAL(x_), *r = REAL(res), *cc = REAL(c_); for_i_n_NA wgt(x[i], cc, ipsi); UNPROTECT(nprot); return res; } #undef for_i_n_NA SEXP R_rho_inf(SEXP cc, SEXP ipsi) { if (!isReal(cc)) error(_("Argument 'cc' must be numeric")); if (!isInteger(ipsi)) error(_("Argument 'ipsi' must be integer")); return ScalarReal(rho_inf(REAL(cc), INTEGER(ipsi)[0])); } double rho_inf(const double k[], int ipsi) { /* * Compute \rho(\infty) for psi functions * (Note that our C function rho() is "rho~" and has rho(Inf) = 1) */ double c = k[0]; switch(ipsi) { default: error(_("rho_inf(): ipsi=%d not implemented."), ipsi); case 0: return(R_PosInf); // huber case 1: return(c*c/6.); // biweight case 2: return(c*c); // GaussWeight / "Welsh" case 3: return(3.25*c*c); // Optimal case 4: return(0.5*k[0]*(k[1]+k[2]-k[0])); // Hampel case 5: // GGW (Generalized Gauss Weight) switch((int)c) { default: case 0: return(k[4]); break; // k[4] == cc[5] in R -- must be correct! case 1: return(5.309853); break; case 2: return(2.804693); break; case 3: return(0.3748076); break; case 4: return(4.779906); break; case 5: return(2.446574); break; case 6: return(0.4007054); break; }; case 6: // LQQ aka 'lin psip' return (k[2]*k[1]*(3*k[1]+2*k[0]) + (k[0]+k[1])*(k[0]+k[1])) / (6.*(k[2]-1.)); } } // rho_inf() double normcnst(const double k[], int ipsi) { /* * return normalizing constant for psi functions := 1 / \rho(\infty) */ double c = k[0]; switch(ipsi) { default: error(_("normcnst(): ipsi=%d not implemented."), ipsi); case 0: return(0.); // huber {normcnst() should never be used for that!} case 1: return(6./(c*c)); // biweight case 2: return(1./(c*c)); // GaussWeight / "Welsh" case 3: return(1./3.25/(c*c)); // Optimal case 4: return(2./(k[0]*(k[1]+k[2]-k[0]))); // Hampel case 5: // GGW switch((int)c) { default: case 0: return(1./ k[4]); break; // k[4] == cc[5] in R -- must be correct! case 1: return(1./5.309853); break; case 2: return(1./2.804693); break; case 3: return(1./0.3748076); break; case 4: return(1./4.779906); break; case 5: return(1./2.446574); break; case 6: return(1./0.4007054); break; }; case 6: // LQQ aka 'lin psip' return((6*(k[2]-1))/(k[2]*k[1]*(3*k[1]+2*k[0])+(k[0]+k[1])*(k[0]+k[1]))); } } // normcnst() double rho(double x, const double c[], int ipsi) { /* * return the correct rho according to ipsi * This rho() is normalized to 1, called rho~() or chi() in other contexts */ switch(ipsi) { default: error(_("rho(): ipsi=%d not implemented."), ipsi); case 0: return(rho_huber(x, c)); // huber case 1: return(rho_biwgt(x, c)); // biweight case 2: return(rho_gwgt(x, c)); // GaussWeight / "Welsh" case 3: return(rho_opt(x, c)); // Optimal case 4: return(rho_hmpl(x, c)); // Hampel case 5: return(rho_ggw(x, c)); // GGW (Generalized Gauss Weight) case 6: return(rho_lqq(x, c)); // LQQ := Linear-Quadratic-Quadratic // was LGW := "lin psip" := piecewise linear psi'() } } double psi(double x, const double c[], int ipsi) { /* * return the correct psi according to ipsi * this is actually rho' and not psi */ switch(ipsi) { default: error(_("psi(): ipsi=%d not implemented."), ipsi); case 0: return(psi_huber(x, c)); // huber case 1: return(psi_biwgt(x, c)); // biweight case 2: return(psi_gwgt(x, c)); // GaussWeight / "Welsh" case 3: return(psi_opt(x, c)); // Optimal case 4: return(psi_hmpl(x, c)); // Hampel case 5: return(psi_ggw(x, c)); // GGW case 6: return(psi_lqq(x, c)); // LQQ (piecewise linear psi') } } double psip(double x, const double c[], int ipsi) { /* * return the correct ppsi according to ipsi * this is actually rho'' and not psip */ switch(ipsi) { default: error(_("psip(): ipsi=%d not implemented."), ipsi); case 0: return(psip_huber(x, c)); // huber case 1: return(psip_biwgt(x, c)); // biweight case 2: return(psip_gwgt(x, c)); // GaussWeight / "Welsh" case 3: return(psip_opt(x, c)); // Optimal case 4: return(psip_hmpl(x, c)); // Hampel case 5: return(psip_ggw(x, c)); // GGW case 6: return(psip_lqq(x, c)); // LQQ (piecewise linear psi') } } double psi2(double x, const double c[], int ipsi) { /* Compute psi''(x) == rho'''(x) */ switch(ipsi) { // default: error(_("psi2: ipsi=%d not implemented."), ipsi); case 0: return(psi2_huber(x, c)); // huber case 1: return(psi2_biwgt(x, c)); // biweight case 4: return(psi2_hmpl(x, c)); // Hampel case 6: return(psi2_lqq(x, c)); // LQQ (piecewise linear psi') default: error(_("psi2(): ipsi=%d not implemented."), ipsi); /* case 2: return(psi2_gwgt(x, c)); // GaussWeight / "Welsh" case 3: return(psi2_opt(x, c)); // Optimal case 5: return(psi2_ggw(x, c)); // GGW */ } } double wgt(double x, const double c[], int ipsi) { /* * return the correct wgt according to ipsi * wgt: rho'(x) / x */ switch(ipsi) { default: case 0: return(wgt_huber(x, c)); // huber case 1: return(wgt_biwgt(x, c)); // biweight case 2: return(wgt_gwgt(x, c)); // GaussWeight / "Welsh" case 3: return(wgt_opt(x, c)); // Optimal case 4: return(wgt_hmpl(x, c)); // Hampel case 5: return(wgt_ggw(x, c)); // GGW case 6: return(wgt_lqq(x, c)); // LQQ (piecewise linear psi') } } //--- Huber's rho / psi / ... //--- ------- /* Huber's rho(): contrary to all the redescenders below, this can NOT be scaled to rho(Inf)=1 : */ double rho_huber(double x, const double c[]) { return (fabs(x) <= c[0]) ? x*x*0.5 : c[0]*(fabs(x) - c[0]/2); } double psi_huber(double x, const double c[]) { // Huber's psi = rho'() return (x <= -c[0]) ? -c[0] : ((x < c[0]) ? x : c[0]); } double psip_huber(double x, const double c[]) { // psi' = rho'' : Second derivative of Huber's loss function return (fabs(x) >= c[0]) ? 0. : 1.; } double psi2_huber(double x, const double c[]) { // psi'' = rho''' : Third derivative of Huber's loss function return 0; // FIXME? return NaN when |x| == c ?? -- then also for psi2_hmpl() } double wgt_huber(double x, const double c[]) { /* * Weights for Huber's loss function w(x) = psi(x)/x */ return (fabs(x) >= c[0]) ? c[0]/fabs(x) : 1.; } //--- Biweight = Bisquare = Tukey's Biweight ... //--- -------------------------------------- double rho_biwgt(double x, const double c[]) { /* * Tukey's bisquare loss function == R's tukeyChi() */ if (fabs(x) > (*c)) return(1.); else { double t = x / (*c); t *= t; /* = t^2 */ return( t*(3. + t*(-3. + t)) ); } } double psi_biwgt(double x, const double c[]) { /* * First derivative of Tukey's bisquare loss function */ if (fabs(x) > (*c)) return(0.); else { double a = x / (*c), u = 1. - a*a; return( x * u * u ); } } double psip_biwgt(double x, const double c[]) { /* * Second derivative of Tukey's bisquare loss function */ if (fabs(x) > (*c)) return(0.); else { x /= *c; double x2 = x*x; return( (1. - x2) * (1 - 5 * x2)); } } double psi2_biwgt(double x, const double c[]) { /** 3rd derivative of Tukey's bisquare loss function rho() *= 2nd derivative of psi() : */ if (fabs(x) >= c[0]) // psi''() *is* discontinuous at x = c[0]: use "middle" value there: return (fabs(x) == c[0]) ? 4*x/c[0] : 0.; else { x /= c[0]; double x2 = x*x; return 4*x/c[0] * (5 * x2 - 3.); } } double wgt_biwgt(double x, const double c[]) { /* * Weights for Tukey's bisquare loss function */ if( fabs(x) > *c ) return(0.); else { double a = x / (*c); a = (1. - a)*(1. + a); return( a * a ); } } //---------- gwgt == Gauss Weight Loss function =: "Welsh" -------------------- double rho_gwgt(double x, const double c[]) { /* * Gauss Weight Loss function */ double ac = x / (*c); return(-expm1(-(ac*ac)/2)); } // Largest x such that exp(-x) does not underflow : static double MIN_Exp = -708.4; // ~ = M_LN2 * DBL_MIN_EXP = -log(2) * 1022 = -708.3964 */ // Largest x such that exp(-x^2/2) does not underflow : static double MAX_Ex2 = 37.7; // ~ = sqrt(- 2. * M_LN2 * DBL_MIN_EXP); /* max {x | exp(-x^2/2) < .Machine$double.xmin } = * min {x | x^2 > -2*log(2)* .Machine$double.min.exp } = * = sqrt(-2*log(2)* .Machine$double.min.exp) = {IEEE double} * = sqrt(log(2) * 2044) = 37.64031 */ double psi_gwgt(double x, const double c[]) { /* * Gauss Weight Psi() */ double a = x / (*c); if(fabs(a) > MAX_Ex2) return 0.; else return x*exp(-(a*a)/2); } double psip_gwgt(double x, const double c[]) { /* * Gauss Weight Psi'() */ x /= (*c); if(fabs(x) > MAX_Ex2) return 0.; else { double ac = x*x; return exp(-ac/2) * (1. - ac); } } double wgt_gwgt(double x, const double c[]) { /* * Gauss Weight Loss function */ double a = x / (*c); return(exp(-(a*a)/2)); } double rho_opt(double x, const double c[]) { /* * Optimal psi Function, thank you robust package */ double ac = x / (*c), // AX=S/XK ax = fabs(ac); // AX=ABST/XK if (ax > 3) // IF (AX .GT. 3.D0) THEN return(1); // rlRHOm2=3.25D0*XK*XK else if (ax > 2.) { const double R1 = -1.944/2., R2 = 1.728/4., R3 = -0.312/6., R4 = 0.016/8.; ax *= ax; // = |x/c| ^ 2 return (ax*(R1+ ax*(R2+ ax*(R3+ ax*R4))) +1.792)/3.25; // rlRHOm2=XK*XK*(R1*AX**2+R2*AX**4+R3*AX**6+R4*AX**8+1.792D0) } else return(ac*ac/6.5); // rlRHOm2=S2/2.D0 } double psi_opt(double x, const double c[]) { /* * Optimal psi Function, thank you robust package */ double R1 = -1.944, R2 = 1.728, R3 = -0.312, R4 = 0.016; double ax, ac; ac = x / (*c); // AX=S/XK ax = fabs(ac); // AX=ABST/XK if (ax > 3.) // IF (AX .GT. 3.D0) THEN return(0); // rlPSIm2=0.D0 else if (ax > 2.) { // ELSE IF(AX .GT. 2.D0) THEN double a2 = ac*ac; if (ac > 0.) // IF (AX .GT. 0.D0) THEN return fmax2(0., (*c)*((((R4*a2 +R3)*a2 +R2)*a2 +R1)*ac)); // rlPSIm2=DMAX1(0.D0,XK*(R4*AX**7+R3*AX**5+R2*AX**3+R1*AX)) else return -fabs((*c)*((((R4*a2 +R3)*a2 +R2)*a2 +R1)*ac)); // rlPSIm2=-DABS(XK*(R4*AX**7+R3*AX**5+R2*AX**3+R1*AX)) } else return x; } double psip_opt(double x, const double c[]) { /* * psi'() for Optimal psi Function, thank you robust package */ double ac = x / (*c), ax = fabs(ac); if (ax > 3.) return 0; else if (ax > 2.) { const double R1 = -1.944, R2 = 1.728, R3 = -0.312, R4 = 0.016; ax *= ax; // = |x/c| ^ 2 return R1 + ax*(3*R2 + ax*(5*R3 + ax * 7*R4)); } else return 1; } double wgt_opt(double x, const double c[]) { /* * w(.) for optimal psi Function, thank you robust package */ double ac = x / (*c), ax = fabs(ac); if (ax > 3.) return 0.; else if (ax > 2.) { const double R1 = -1.944, R2 = 1.728, R3 = -0.312, R4 = 0.016; ax *= ax; // = |x/c| ^ 2 return fmax2(0., R1+ ax*(R2 + ax*(R3 + ax*R4))); } else return 1.; } double rho_hmpl(double x, const double k[]) { /* * rho() for Hampel's redescending psi function * constants (a, b, r) == k[0:2] s.t. slope of psi is 1 in the center * * This function is normalized s.t. rho(inf) = 1 */ double u = fabs(x), nc = k[0]*(k[1]+k[2]-k[0])/2; if (u <= k[0]) return( x*x/2 / nc ); else if (u <= k[1]) return( ( u - k[0]/2 ) * k[0] / nc ); else if (u <= k[2]) return( ( k[1] - k[0]/2 + (u - k[1]) * (1 - ( u - k[1] ) / ( k[2] - k[1] ) / 2 )) * k[0] / nc); else return( 1 ); } double psi_hmpl(double x, const double k[]) { /* * psi() for Hampel's redescending psi function * constants (a, b, r) == k[0:2] s.t. slope of psi is 1 in the center */ // double sx = sign(x), u = fabs(x); : double sx, u; if (x < 0) { sx = -1; u = -x; } else { sx = +1; u = x; } if (u <= k[0]) return( x ); else if (u <= k[1]) return sx * k[0]; else if (u <= k[2]) return sx * k[0] * (k[2] - u) / (k[2] - k[1]); else return 0.; } double psip_hmpl(double x, const double k[]) { /* * psi'() for Hampel's redescending psi function * constants (a, b, r) == k[0:2] s.t. slope of psi is 1 in the center */ double u = fabs(x); if (u <= k[0]) return( 1 ); else if (u <= k[1]) return( 0 ); else if (u <= k[2]) return( k[0] / ( k[1] - k[2] ) ); else return( 0 ); } double psi2_hmpl(double x, const double k[]) { /* * psi''() for Hampel's redescending psi function * constants (a, b, r) == k[0:2] s.t. slope of psi is 1 in the center */ return 0.; // even though psi'() is already discontinuous at k[j] } double wgt_hmpl(double x, const double k[]) { /* * w(x) = psi(x)/x for Hampel's redescending psi function * Hampel redescending psi function * constants (a, b, r) == k[0:2] s.t. slope of psi is 1 in the center */ double u = fabs(x); if (u <= k[0]) return( 1 ); else if (u <= k[1]) return( k[0] / u ); else if (u <= k[2]) return( k[0] * ( k[2] - u ) / ( k[2] - k[1] ) / u ); else return( 0 ); } //--- GGW := Generalized Gauss-Weight Koller and Stahel (2011) //--- --- // rho() & chi() need to be calculated by numerical integration -- apart from 6 pre-stored cases double rho_ggw(double x, const double k[]) { /* * Gauss Weight with constant center */ if (k[0] > 0) { // for hard-coded constants --- use a *polynomial* approximation const double C[6][20] = { // 0: b = 1, 95% efficiency {0.094164571656733, -0.168937372816728, 0.00427612218326869, 0.336876420549802, -0.166472338873754, 0.0436904383670537, -0.00732077121233756, 0.000792550423837942, -5.08385693557726e-05, 1.46908724988936e-06, -0.837547853001024, 0.876392734183528, -0.184600387321924, 0.0219985685280105, -0.00156403138825785, 6.16243137719362e-05, -7.478979895101e-07, -3.99563057938975e-08, 1.78125589532002e-09, -2.22317669250326e-11}, // 1: b = 1, 85% efficiency {0.174505224068561, -0.168853188892986, 0.00579250806463694, 0.624193375180937, -0.419882092234336, 0.150011303015251, -0.0342185249354937, 0.00504325944243195, -0.0004404209084091, 1.73268448820236e-05, -0.842160072154898, 1.19912623576069, -0.345595777445623, 0.0566407000764478, -0.00560501531439071, 0.000319084704541442, -7.4279004383686e-06, -2.02063746721802e-07, 1.65716101809839e-08, -2.97536178313245e-10}, // 2: b = 1, bp 0.5 {1.41117142330711, -0.168853741371095, 0.0164713906344165, 5.04767833986545, -9.65574752971554, 9.80999125035463, -6.36344090274658, 2.667031271863, -0.662324374141645, 0.0740982983873332, -0.84794906554363, 3.4315790970352, -2.82958670601597, 1.33442885893807, -0.384812004961396, 0.0661359078129487, -0.00557221619221031, -5.42574872792348e-05, 4.92564168111658e-05, -2.80432020951381e-06}, // 3: b = 1.5, 95% efficiency {0.104604570079252, 0.0626649856211545, -0.220058184826331, 0.403388189975896, -0.213020713708997, 0.102623342948069, -0.0392618698058543, 0.00937878752829234, -0.00122303709506374, 6.70669880352453e-05, 0.632651530179424, -1.14744323908043, 0.981941598165897, -0.341211275272191, 0.0671272892644464, -0.00826237596187364, 0.0006529134641922, -3.23468516804340e-05, 9.17904701930209e-07, -1.14119059405971e-08}, // 4: b = 1.5, 85% efficiency {0.205026436642222, 0.0627464477520301, -0.308483319391091, 0.791480474953874, -0.585521414631968, 0.394979618040607, -0.211512515412973, 0.0707208739858416, -0.0129092527174621, 0.000990938134086886, 0.629919019245325, -1.60049136444912, 1.91903069049618, -0.933285960363159, 0.256861783311473, -0.0442133943831343, 0.00488402902512139, -0.000338084604725483, 1.33974565571893e-05, -2.32450916247553e-07}, // 5: b = 1.5, bp 0.5 {1.35010856132000, 0.0627465630782482, -0.791613168488525, 5.21196700244212, -9.89433796586115, 17.1277266427962, -23.5364159883776, 20.1943966645350, -9.4593988142692, 1.86332355622445, 0.62986381140768, -4.10676399816156, 12.6361433997327, -15.7697199271455, 11.1373468568838, -4.91933095295458, 1.39443093325178, -0.247689078940725, 0.0251861553415515, -0.00112130382664914}}; double end[6] = {18.5527638190955, 13.7587939698492, 4.89447236180905, 11.4974874371859, 8.15075376884422, 3.17587939698492}; int j = ((int)k[0]) - 1; double c; switch(j) { // c : identical numbers to those in SET_ABC_GGW below case 0: c = 1.694; break; case 1: c = 1.2442567; break; case 2: c = 0.4375470; break; case 3: c = 1.063; break; case 4: c = 0.7593544; break; case 5: c = 0.2959132; break; default: error(_("rho_ggw(): case (%i) not implemented."), j+1); } x = fabs(x); if (x <= c) return(C[j][0]*x*x); else if (x <= 3*c) return(C[j][1] + x*(C[j][2] + x*(C[j][3] + x*(C[j][4] + x*(C[j][5] + x*(C[j][6] + x*(C[j][7] + x*(C[j][8] + x*(C[j][9]))))))))); else if (x <= end[j]) return(C[j][10] + x*(C[j][11] + x*(C[j][12] + x*(C[j][13] + x*(C[j][14] + x*(C[j][15] + x*(C[j][16] + x*(C[j][17] + x*(C[j][18]+ x*(C[j][19])))))))))); else return(1.); } else { // k[0] == 0; k[1:4] = (a, b, c, rho(Inf)) = "general parameters" x = fabs(x); double a = 0., epsabs = R_pow(DOUBLE_EPS, 0.25), result, abserr; int neval, ier, last, limit = 100, lenw = 4 * limit; int *iwork = (int *) R_alloc(limit, sizeof(int)); double *work = (double *) R_alloc(lenw, sizeof(double)); // --> calculate integral of psi(.); Rdqags() is from R's official API ("Writing R Extensions") Rdqags(psi_ggw_vec, (void *)k, &a, &x, &epsabs, &epsabs, &result, &abserr, &neval, &ier, &limit, &lenw, &last, iwork, work); if (ier >= 1) error(_("Error from Rdqags(psi_ggw*, k, ...): ier = %i"), ier); return(result/k[4]); } } void psi_ggw_vec(double *x, int n, void *k) { for (int i = 0; i piecewise constant psi''(): double psi2_lqq (double x, const double k[]) { // double sx = sign(x), ax = fabs(x); : double sx, ax; if (x < 0) { sx = -1; ax = -x; } else { sx = +1; ax = x; } // k[0:2] == (b, c, s) : if (ax <= k[1]) return 0.; else { double k01 = k[0] + k[1]; if (/*k[1] < ax && */ ax <= k01) return sx * (- k[2]/k[0]); else { double s5 = 1. - k[2], // = (1-s) a = (k[0] * k[2] - 2 * k01)/ s5; if (/* k01 < ax && */ ax < k01 + a) return sx * (- s5 / a); else return 0.; } } } double psi_lqq (double x, const double k[]) { double ax = fabs(x); if (ax <= k[1]) return(x); else { // k[0:2] == (b, c, s) : double k01 = k[0] + k[1]; if (ax <= k01) return((double) (x>0 ? 1 : (x<0 ? -1 : 0)) * (ax - k[2] * pow(ax - k[1], 2.) / k[0] / 2.)); else { double s5 = k[2] - 1., // s - 1 s6 = -2 * k01 + k[0] * k[2]; // numerator( -a ) ==> s6/s5 = -a if (/* k01 < ax && */ ax < k01 - s6 / s5) return((double) (x>0 ? 1 : -1) * (-s6/2. - pow(s5, 2.) / s6 * (pow(ax - k01, 2.) / 2. + s6 / s5 * (ax - k01)))); else return 0.; } } } double rho_lqq (double x, const double k[]) { double ax = fabs(x), k01 = k[0] + k[1]; if (ax <= k[1]) return((3. * k[2] - 3.) / (k[2] * k[1] * (3. * k[1] + 2. * k[0]) + pow(k01, 2.)) * x * x); else if (/* k[1] < ax && */ ax <= k01) { double s0 = ax - k[1]; return((6. * k[2] - 6.) / (k[2] * k[1] * (3. * k[1] + 2. * k[0]) + pow(k01, 2.)) * (x * x / 2. - k[2] / k[0] * pow(s0, 3.) / 6.)); } else { double s5 = k[2] - 1., s6 = -2 * k01 + k[0] * k[2]; if (/* k01 < ax && */ ax < k01 - s6 / s5) { double s7 = ax - k01, k01_2 = pow(k01, 2.); return((6. * s5) / (k[2] * k[1] * (3. * k[1] + 2. * k[0]) + k01_2) * (k01_2 / 2. - k[2] * k[0] * k[0] / 6. - s7/2. * (s6 + s7 * (s5 + s7 * s5 * s5 / 3. / s6)))); } else return 1.; } } double wgt_lqq (double x, const double k[]) { double ax = fabs(x); if (ax <= k[1]) return(1.); else { double k01 = k[0] + k[1]; if (ax <= k01) { double s0 = ax - k[1]; return(1. - k[2] * s0 * s0 / (2 * ax * k[0])); } else { double s5 = k[2] - 1., s6 = -2 * k01 + k[0] * k[2]; if (ax < k01 - s6 / s5) { double s7 = ax - k01; return(-(s6/2. + s5 * s5 / s6 * s7 * (s7/2. + s6 / s5)) / ax); } else return(0.); } } } /*============================================================================*/ /* this function finds the k-th place in the * vector a, in the process it permutes the * elements of a */ double kthplace(double *a, int n, int k) { int jnc,j; int l,lr; double ax,w; k--; l=0; lr=n-1; while (l < lr) { ax=a[k]; jnc=l; j=lr; while (jnc <= j) { while (a[jnc] < ax) jnc++; while (a[j] > ax) j--; if (jnc <= j) { w=a[jnc]; a[jnc]=a[j]; a[j]=w; jnc++; j--; } } if (j < k) l=jnc; if (k < jnc) lr=j; } return(a[k]); } /* This is from VR's bundle, MASS package VR/MASS/src/lqs.c : */ /* Sampling k from 0:n-1 without replacement. */ static void sample_noreplace(int *x, int n, int k, int *ind_space) { int i, j, nn=n; #define II ind_space for (i = 0; i < n; i++) II[i] = i; for (i = 0; i < k; i++) { j = nn * unif_rand(); x[i] = II[j]; II[j] = II[--nn]; } #undef II } /* RWLS iterations starting from i_estimate, * ---- the workhorse of the "lmrob_MM" algorithm, called only from R_lmrob_MM(), * which itself is called only from R's lmrob..M..fit(). * In itself, ``just'' an M-estimator : */ Rboolean rwls(const double X[], const double y[], int n, int p, double *estimate, double *i_estimate, double *resid, double* loss, double scale, double epsilon, int *max_it, /* on Input: maximal number of iterations; on Output: number of iterations */ double *rho_c, const int ipsi, int trace_lev) { int lwork = -1, one = 1, info = 1; double work0, *work, wtmp, *weights; double done = 1., dmone = -1., d_beta = 0.; int j, k, iterations = 0; Rboolean converged = FALSE; double *wx = (double *) R_alloc(n*p, sizeof(double)), *wy = (double *) R_alloc(n, sizeof(double)), *beta0 = (double *) R_alloc(p, sizeof(double)); INIT_WLS(wx, wy, n, p); COPY(i_estimate, beta0, p); /* calculate residuals */ COPY(y, resid, n); F77_CALL(dgemv)("N", &n, &p, &dmone, X, &n, beta0, &one, &done, resid, &one FCONE); /* main loop */ while(!converged && ++iterations < *max_it) { R_CheckUserInterrupt(); /* compute weights */ get_weights_rhop(resid, scale, n, rho_c, ipsi, weights); /* solve weighted least squares problem */ COPY(y, wy, n); FIT_WLS(X, wx, wy, n, p); COPY(wy, estimate, p); /* calculate residuals */ COPY(y, resid, n); F77_CALL(dgemv)("N", &n, &p, &dmone, X, &n, estimate, &one, &done, resid, &one FCONE); d_beta = norm1_diff(beta0,estimate, p); if(trace_lev >= 3) { /* get the loss for the new estimate */ *loss = sum_rho_sc(resid,scale,n,0,rho_c,ipsi); Rprintf(" it %4d: L(b1) = %#.12g ", iterations, *loss); if(trace_lev >= 4) { Rprintf("\n b1 = ("); for(j=0; j < p; j++) Rprintf("%s%.11g", (j > 0)? ", " : "", estimate[j]); Rprintf(");"); } Rprintf(" ||b0 - b1||_1 = %g\n", d_beta); } /* check for convergence */ converged = d_beta <= epsilon * fmax2(epsilon, norm1(estimate, p)); COPY(estimate, beta0, p); } /* end while(!converged & iter <=...) */ if(0 < trace_lev) { if(trace_lev < 3) *loss = sum_rho_sc(resid,scale,n,0,rho_c,ipsi); Rprintf(" rwls() used %2d it.; last ||b0 - b1||_1 = %#g, L(b1) = %.12g; %sconvergence\n", iterations, d_beta, *loss, (converged ? "" : "NON-")); } *max_it = iterations; CLEANUP_WLS; return converged; } /* rwls() */ /* sets the entries of a matrix to zero */ void zero_mat(double **a, int n, int m) { int i,j; for(i=0; i < n; i++) for(j=0; j < m; j++) a[i][j] = 0.; } /* * * 2004 / 5 -- Matias Salibian-Barrera & Victor Yohai * Department of Statistics, University of British Columbia * matias@stat.ubc.ca * Department of Mathematics, University of Buenos Aires * vyohai@uolsinectis.com.ar * * * Reference: A fast algorithm for S-regression estimates, * 2005, Salibian-Barrera and Yohai. */ /* This function implements the "large n" strategy */ void fast_s_large_n(double *X, double *y, int *nn, int *pp, int *nRes, int *max_it_scale, double *res, int *ggroups, int *nn_group, int *K, int *max_k, double rel_tol, double inv_tol, double scale_tol, int *converged, int *best_r, double *bb, double *rrhoc, int *iipsi, double *bbeta, double *sscale, int trace_lev, int mts, Rboolean ss) { /* *X = the n x p design matrix (incl. intercept if appropriate), * in column order as sent by R) * *y = the ( n ) response vector * *nn =: n = the length of y * *pp =: p = the number of columns in X * *nRes = number of re-sampling candidates to be used in each partition * *ggroups = number of groups in which to split the * random subsample * *nn_group = size of each of the (*ggroups) groups * to use in the random subsample * *K = number of refining steps for each candidate (typically 1 or 2) * *max_k = number of refining steps for each candidate (typically 1 or 2) [used to be hard coded to MAX_ITER_REFINE_S = 50 ] * *rel_tol= convergence tolerance for iterative refinement iterations [used to be hard coded to EPS = 1e-7 ] * *converged: will become 0(FALSE) iff at least one of the best_r iterations * did not converge (in max_k steps to rel_tol precision) * *best_r = no. of best candidates to be iterated further ("refined") * *bb = right-hand side of S-equation (typically 1/2) * *rrhoc = tuning constant for loss function * (this should be associated with *bb) * *iipsi = indicator for type of psi function to be used * *bbeta = final estimator * *sscale = associated scale estimator (or -1 when problem) */ int i,j,k, ij, freedsamp = 0, initwls = 0; int n = *nn, p = *pp, kk = *K, ipsi = *iipsi; int groups = *ggroups, n_group = *nn_group, sg = groups * n_group; double b = *bb, sc, best_sc, worst_sc; /* (Pointers to) Arrays - to be allocated */ int *indices, *ind_space; double **best_betas, *best_scales; double *xsamp, *ysamp, *beta_ref; double **final_best_betas, *final_best_scales; #define CALLOC_MAT(_M_, _n_, _d_) \ _M_ = (double **) Calloc(_n_, double *); \ for(int i=0; i < _n_; i++) \ _M_[i] = (double *) Calloc(_d_, double) beta_ref = (double *) Calloc(p, double); CALLOC_MAT(final_best_betas, *best_r, p); final_best_scales = (double *) Calloc(*best_r, double); k = *best_r * groups; best_scales = (double *) Calloc(k, double ); CALLOC_MAT(best_betas, k, p); indices = (int *) Calloc(sg, int); ind_space = (int *) Calloc(n, int); xsamp = (double *) Calloc(n_group*p, double); ysamp = (double *) Calloc(n_group, double); /* assume that n > 2000 */ /* set the seed */ GetRNGstate(); /* get a sample of k indices */ sample_noreplace(indices, n, sg, ind_space); /* FIXME: define groups using nonsingular subsampling? */ /* would also need to allow observations to be part */ /* of multiple groups at the same time */ Free(ind_space); /* FIXME: Also look at lqs_setup(), * ----- and xr[.,.] "fortran-like" matrix can be used from there!*/ /* For each (of 'groups') group : get the *best_r best betas : */ #define X(_k_, _j_) X[_j_*n+_k_] #define xsamp(_k_, _j_) xsamp[_j_*n_group+_k_] for(i=0; i < groups; i++) { /* populate matrix */ for(j = 0; j < n_group; j++) { ij = i*n_group + j; for (k = 0; k < p; k++) xsamp(j, k) = X(indices[ij], k); ysamp[j] = y[indices[ij]]; } if (trace_lev) Rprintf(" Subsampling to find candidate betas in group %d:\n", i); if(fast_s_with_memory(xsamp, ysamp, res, &n_group, pp, nRes, max_it_scale, K, max_k, rel_tol, inv_tol, scale_tol, trace_lev, best_r, bb, rrhoc, iipsi, best_betas + i* *best_r, best_scales+ i* *best_r, mts, ss)) { *sscale = -1.; /* problem */ goto cleanup_and_return; } } Free(xsamp); Free(ysamp); freedsamp = 1; #undef xsamp /* now iterate (refine) these "best_r * groups" * best betas in the (xsamp,ysamp) sample * with kk C-steps and keep only the "best_r" best ones */ /* initialize new work matrices */ double *wx, *wy; wx = (double *) R_alloc(n*p, sizeof(double)); // need only k here, wy = (double *) R_alloc(n, sizeof(double)); // but n in the last step xsamp = (double *) Calloc(sg*p, double); ysamp = (double *) Calloc(sg, double); freedsamp = 0; #define xsamp(_k_,_j_) xsamp[_j_*sg+_k_] for (ij = 0; ij < sg; ij++) { for (k = 0; k < p; k++) xsamp(ij, k) = X(indices[ij],k); ysamp[ij] = y[indices[ij]]; } int lwork = -1, one = 1, info = 1; double work0, *work, *weights; INIT_WLS(wx, wy, n, p); initwls = 1; Rboolean conv = FALSE; int pos_worst_scale = 0; for(i=0; i < *best_r; i++) final_best_scales[i] = INFI; worst_sc = INFI; /* set the matrix to zero */ zero_mat(final_best_betas, *best_r, p); for(i=0; i < (*best_r * groups); i++) { if(trace_lev >= 3) { Rprintf(" Sample[%3d]: before refine_(*, conv=FALSE):\n", i); if(i > 0) { Rprintf(" beta_cand : "); disp_vec(best_betas[i],p); Rprintf(" with scale %.15g\n", best_scales[i]); } } refine_fast_s(xsamp, wx, ysamp, wy, weights, sg, p, res, work, lwork, best_betas[i], kk, &conv/* = FALSE*/, *max_k, rel_tol, trace_lev, b, rrhoc, ipsi, best_scales[i], /* -> */ beta_ref, &sc); if(trace_lev >= 3) { Rprintf(" after refine: beta_ref : "); disp_vec(beta_ref,p); Rprintf(" with scale %.15g\n", sc); } if ( sum_rho_sc(res, worst_sc, sg, p, rrhoc, ipsi) < b ) { int scale_iter = *max_it_scale; /* scale will be better */ sc = find_scale(res, b, rrhoc, ipsi, sc, sg, p, &scale_iter, scale_tol, trace_lev >= 3); int k2 = pos_worst_scale; final_best_scales[ k2 ] = sc; COPY(beta_ref, final_best_betas[k2], p); pos_worst_scale = find_max(final_best_scales, *best_r); worst_sc = final_best_scales[pos_worst_scale]; } } Free(xsamp); Free(ysamp); freedsamp = 1; /* now iterate the best "best_r" * betas in the whole sample until convergence (max_k, rel_tol) */ best_sc = INFI; *converged = 1; k = 0; if(trace_lev) Rprintf(" Now refine() to convergence for %d very best ones:\n", *best_r); for(i=0; i < *best_r; i++) { conv = TRUE; int it_k = refine_fast_s(X, wx, y, wy, weights, n, p, res, work, lwork, final_best_betas[i], kk, &conv/* = TRUE */, *max_k, rel_tol, trace_lev, b, rrhoc, ipsi, final_best_scales[i], /* -> */ beta_ref, &sc); if(trace_lev) Rprintf(" Best[%d]: %sconvergence (%d iter.)", i, conv ? "" : "NON ", it_k); if(best_sc > sc) { if(trace_lev) Rprintf(": -> improved scale to %.15g", sc); best_sc = sc; COPY(beta_ref, bbeta, p); } if (trace_lev) Rprintf("\n"); if (!conv && *converged) *converged = 0; if (k < it_k) k = it_k; } *sscale = best_sc; *max_k = k; /* Done. Now clean-up. */ cleanup_and_return: PutRNGstate(); Free(best_scales); k = *best_r * groups; for(i=0; i < k; i++) Free( best_betas[i] ); Free(best_betas); Free(indices); for(i=0; i < *best_r; i++) Free(final_best_betas[i]); Free(final_best_betas); Free(final_best_scales); Free(beta_ref); if (freedsamp == 0) { Free(xsamp); Free(ysamp); } if (initwls) { CLEANUP_WLS; } #undef X #undef xsamp } /* fast_s_large_n() */ Rboolean fast_s_with_memory(double *X, double *y, double *res, int *nn, int *pp, int *nRes, int *max_it_scale, int *K, int *max_k, double rel_tol, double inv_tol, double scale_tol, int trace_lev, int *best_r, double *bb, double *rrhoc, int *iipsi, double **best_betas, double *best_scales, int mts, Rboolean ss) { /* * Called from fast_s_large_n(), the adjustment for large "n", * same as fast_s, but it returns the best_r best betas, * and their associated scales. * * x : an n x p design matrix (including intercept if appropriate) * y : an n vector * res : an n vector of residuals * *nn = n, *pp = p * *nRes = number of re-sampling candidates to be taken * *K = number of refining steps for each candidate * *best_r = number of (refined) to be retained for full iteration * *bb = right-hand side of the S-equation (typically 1/2) * *rrhoc = tuning constant for loss function * (this should be associated with *bb) * *iipsi = indicator for type of loss function to be used * *best_betas = returning the best ... coefficient vectors * *best_scales = returning their associated residual scales */ int i,j,k; int n = *nn, p = *pp, nResample = *nRes; Rboolean conv = FALSE, sing = FALSE; // sing = TRUE|FALSE the final result int ipsi = *iipsi; double b = *bb, sc, worst_sc = INFI; double work0, *weights, *work; int lwork = -1, one = 1, info = 1; SETUP_SUBSAMPLE(n, p, X, 1); INIT_WLS(X, y, n, p); double *wx = (double *) Calloc(n*p, double), *wy = (double *) Calloc(n, double), *beta_cand = (double *) Calloc(p, double), *beta_ref = (double *) Calloc(p, double); for(i=0; i < *best_r; i++) best_scales[i] = INFI; int pos_worst_scale = 0; /* resampling approximation */ for(i=0; i < nResample; i++) { R_CheckUserInterrupt(); /* find a candidate */ sing = (Rboolean) // 0 |--> FALSE (= success); {1,2} |-> TRUE subsample(Xe, y, n, p, beta_cand, ind_space, idc, idr, lu, v, pivot, Dr, Dc, rowequ, colequ, 1, mts, ss, inv_tol, 1); if (sing) { for (k=0; k< *best_r; k++) best_scales[i] = -1.; goto cleanup_and_return; } /* FIXME: is_ok ?? */ /* improve the re-sampling candidate */ /* conv = FALSE : do *K refining steps */ refine_fast_s(X, wx, y, wy, weights, n, p, res, work, lwork, beta_cand, *K, &conv/* = FALSE*/, *max_k, rel_tol, trace_lev, b, rrhoc, ipsi, -1., /* -> */ beta_ref, &sc); /* FIXME: if sc ~ 0 ---> return beta_cand and be done */ if ( sum_rho_sc(res, worst_sc, n, p, rrhoc, ipsi) < b ) { int scale_iter = *max_it_scale; /* scale will be better */ sc = find_scale(res, b, rrhoc, ipsi, sc, n, p, &scale_iter, scale_tol, trace_lev >= 3); k = pos_worst_scale; best_scales[ k ] = sc; for(j=0; j < p; j++) best_betas[k][j] = beta_ref[j]; pos_worst_scale = find_max(best_scales, *best_r); worst_sc = best_scales[pos_worst_scale]; if (trace_lev >= 2) { Rprintf(" Sample[%3d]: found new candidate with scale %.7g in %d iter.\n", i, sc, scale_iter); Rprintf(" worst scale is now %.7g\n", worst_sc); } } } /* for(i ) */ cleanup_and_return: CLEANUP_SUBSAMPLE; CLEANUP_WLS; Free(wx); Free(wy); Free(beta_cand); Free(beta_ref); return sing; } /* fast_s_with_memory() */ void fast_s(double *X, double *y, int *nn, int *pp, int *nRes, int *max_it_scale, double *res, int *K, int *max_k, double rel_tol, double inv_tol, double scale_tol, int *converged, int *best_r, double *bb, double *rrhoc, int *iipsi, double *bbeta, double *sscale, int trace_lev, int mts, Rboolean ss) { /* *X = the n x p design matrix (incl. intercept if appropriate), * in column order as sent by R) * *y = the ( n ) response vector * *nn =: n = the length of y * *pp =: p = the number of columns in X * *nRes = number of re-sampling candidates to be taken * *K = number of refining steps for each candidate * *best_r = number of (refined) to be retained for full iteration * *converged: will become FALSE iff at least one of the best_r iterations * did not converge (in max_k steps to rel_tol precision) * *bb = right-hand side of the S-equation (typically 1/2) * *rrhoc = tuning constant for loss function * (this should be associated with *bb) * *iipsi = indicator for type of loss function to be used * *bbeta = final estimator * *sscale = associated scale estimator (or -1 when problem) */ int i,k; int n = *nn, p = *pp, nResample = *nRes, ipsi = *iipsi; double b = *bb; double sc, best_sc, aux; int lwork = -1, one = 1, info = 1; double work0, *work, *weights; /* Rprintf("fast_s %d\n", ipsi); */ SETUP_SUBSAMPLE(n, p, X, 0); // More arrays, allocated: double *wx = (double *) R_alloc(n*p, sizeof(double)), *wy = (double *) R_alloc(n, sizeof(double)), *beta_cand = (double *) Calloc(p, double), *beta_ref = (double *) Calloc(p, double), *best_scales = (double *) Calloc(*best_r, double), // matrix: **best_betas = (double **) Calloc(*best_r, double *); for(i=0; i < *best_r; i++) { best_betas[i] = (double*) Calloc(p, double); best_scales[i] = INFI; } INIT_WLS(wx, wy, n, p); /* disp_mat(x, n, p); */ int pos_worst_scale = 0; Rboolean conv = FALSE; double worst_sc = INFI; /* srand((long)*seed_rand); */ GetRNGstate(); /* resampling approximation */ if (trace_lev) Rprintf(" Subsampling to find candidate betas:\n", i); for(i=0; i < nResample; i++) { R_CheckUserInterrupt(); /* find a candidate */ Rboolean sing = (Rboolean) // 0 |--> FALSE (= success); {1,2} |-> TRUE subsample(Xe, y, n, p, beta_cand, ind_space, idc, idr, lu, v, pivot, Dr, Dc, rowequ, colequ, 1, mts, ss, inv_tol, 1); if (sing) { *sscale = -1.; goto cleanup_and_return; } if (trace_lev >= 5) { Rprintf(" Sample[%3d]: idc = ", i); disp_veci(idc, p); } /* disp_vec(beta_cand,p); */ /* improve the re-sampling candidate */ /* conv = FALSE : do *k refining steps */ refine_fast_s(X, wx, y, wy, weights, n, p, res, work, lwork, beta_cand, *K, &conv/* = FALSE*/, *max_k, rel_tol, trace_lev, b, rrhoc, ipsi, -1., /* -> */ beta_ref, &sc); if(trace_lev >= 3) { double del = norm_diff(beta_cand, beta_ref, p); Rprintf(" Sample[%3d]: after refine_(*, conv=FALSE):\n", i); Rprintf(" beta_ref : "); disp_vec(beta_ref,p); Rprintf(" with ||beta_ref - beta_cand|| = %.12g, --> sc = %.15g\n", del, sc); } if(fabs(sc) == 0.) { /* exact zero set by refine_*() */ if(trace_lev >= 1) Rprintf(" Too many exact zeroes -> leaving refinement!\n"); *sscale = sc; COPY(beta_cand, bbeta, p); goto cleanup_and_return; } if ( sum_rho_sc(res, worst_sc, n, p, rrhoc, ipsi) < b ) { int scale_iter = *max_it_scale; /* scale will be better */ sc = find_scale(res, b, rrhoc, ipsi, sc, n, p, &scale_iter, scale_tol, trace_lev >= 3); k = pos_worst_scale; best_scales[ k ] = sc; COPY(beta_ref, best_betas[k], p); pos_worst_scale = find_max(best_scales, *best_r); worst_sc = best_scales[pos_worst_scale]; if (trace_lev >= 2) { Rprintf(" Sample[%3d]: found new candidate with scale %.7g in %d iter.\n", i, sc, scale_iter); Rprintf(" worst scale is now %.7g\n", worst_sc); } } } /* for(i ) */ /* now look for the very best */ if(trace_lev) Rprintf(" Now refine() to convergence for %d very best ones:\n", *best_r); best_sc = INFI; *converged = 1; k = 0; for(i=0; i < *best_r; i++) { conv = TRUE; if(trace_lev >= 4) Rprintf(" i=%d:\n", i); int it_k = refine_fast_s(X, wx, y, wy, weights, n, p, res, work, lwork, best_betas[i], *K, &conv /* = TRUE */, *max_k, rel_tol, trace_lev, b, rrhoc, ipsi, best_scales[i], /* -> */ beta_ref, &aux); if(trace_lev) Rprintf(" Best[%d]: %sconvergence (%d iter.)", i, (conv) ? "" : "NON ", it_k); if(aux < best_sc) { if(trace_lev) Rprintf(": -> improved scale to %.15g", aux); best_sc = aux; COPY(beta_ref, bbeta, p); } if(trace_lev) Rprintf("\n"); if (!conv && *converged) *converged = 0; if (k < it_k) k = it_k; } *sscale = best_sc; *max_k = k; cleanup_and_return: PutRNGstate(); CLEANUP_SUBSAMPLE; CLEANUP_WLS; Free(best_scales); Free(beta_cand); Free(beta_ref); for(i=0; i < *best_r; i++) Free(best_betas[i]); Free(best_betas); return; } /* fast_s() */ int refine_fast_s(const double X[], double *wx, const double y[], double *wy, double *weights, int n, int p, double *res, double *work, int lwork, double *beta_cand, int kk, Rboolean *conv, int max_k, double rel_tol, int trace_lev, double b, double *rrhoc, int ipsi, double initial_scale, double *beta_ref, double *scale) { /* * X = matrix (n x p) of explanatory variables * y = vector ( n ) of responses * weights = robustness weights wt[] * y[] (of length n) * res = residuals y[] - x[,] * beta (of length n) * conv: FALSE means do kk refining steps (and conv stays FALSE) * TRUE means refine until convergence(rel_tol, max_k) * and in this case, 'conv' *returns* TRUE if refinements converged * beta_cand= candidate beta[] (of length p) Input *and* Output * is = initial scale input * beta_ref = resulting beta[] (of length p) Output * scale = final scale Output * for FIT_WLS, DGELS: * wx = matrix (n x p) * wy = vector of length n * work = vector of length lwork * lwork = length of vector work */ int i,j,k, zeroes=0, one = 1, info = 1; Rboolean converged = FALSE;/* Wall */ double s0, done = 1., dmone = -1., wtmp; if (trace_lev >= 4) { Rprintf(" beta_cand before refinement : "); disp_vec(beta_cand,p); } /* calculate residuals */ COPY(y, res, n); F77_CALL(dgemv)("N", &n, &p, &dmone, X, &n, beta_cand, &one, &done, res, &one FCONE); for(j=0; j < n; j++) { if( fabs(res[j]) < EPS_SCALE ) zeroes++; } /* if "perfect fit", return it with a 0 assoc. scale */ if( zeroes > (((double)n + (double)p)/2.) ) /* <<- FIXME: depends on 'b' ! */ { COPY(beta_cand, beta_ref, p); *scale = 0.; return 0; } if( initial_scale < 0. ) initial_scale = MAD(res, n, 0., wy, weights);// wy and weights used as work arrays s0 = initial_scale; if(*conv) kk = max_k; for(i=0; i < kk; i++) { /* one step for the scale */ s0 = s0 * sqrt( sum_rho_sc(res, s0, n, p, rrhoc, ipsi) / b ); /* compute weights for IRWLS */ get_weights_rhop(res, s0, n, rrhoc, ipsi, weights); /* solve weighted least squares problem */ COPY(y, wy, n); FIT_WLS(X, wx, wy, n, p); COPY(wy, beta_ref, p); if(*conv) { /* check for convergence */ double del = norm_diff(beta_cand, beta_ref, p); double nrmB= norm(beta_cand, p); if(trace_lev >= 4) Rprintf(" it %4d, ||b[i]||= %#.12g, ||b[i] - b[i-1]|| = %#.15g\n", i, nrmB, del); converged = (del <= rel_tol * fmax2(rel_tol, nrmB)); if(converged) break; } /* calculate residuals */ COPY(y, res, n); F77_CALL(dgemv)("N", &n, &p, &dmone, X, &n, beta_ref, &one, &done, res, &one FCONE); COPY(beta_ref, beta_cand, p); } /* for(i = 0; i < kk ) */ if(*conv) { /* was "if(0)", since default lead to 'NOT converged' */ if(!converged) { *conv = FALSE; warning(_("S refinements did not converge (to refine.tol=%g) in %d (= k.max) steps"), rel_tol, i); } } *scale = s0; return i; /* number of refinement steps */ } /* refine_fast_s() */ /* Subsampling part for M-S algorithm */ /* Recreates RLFRSTML function found in src/lmrobml.f */ /* of the robust package */ void m_s_subsample(double *X1, double *y, int n, int p1, int p2, int nResample, int max_it_scale, double rel_tol, double inv_tol, double scale_tol, double *bb, double *rrhoc, int ipsi, double *sscale, int trace_lev, double *b1, double *b2, double *t1, double *t2, double *y_tilde, double *res, double *x1, double *x2, int *NIT, int *K, int *KODE, double *SIGMA, double *BET0, double *SC1, double *SC2, double *SC3, double *SC4, int mts, Rboolean ss) { int i, one = 1, p = p1 + p2, info; double b = *bb, sc = INFI, done = 1., dmone = -1.; *sscale = INFI; if (trace_lev >= 2) Rprintf(" Starting subsampling procedure.. "); SETUP_SUBSAMPLE(n, p2, x2, 0); /* set the seed */ GetRNGstate(); if (trace_lev >= 2) Rprintf(" [setup Ok]\n"); for(i=0; i < nResample; i++) { R_CheckUserInterrupt(); /* STEP 1: Draw a subsample of size p2 from (X2, y) */ Rboolean sing = (Rboolean) // 0 |--> FALSE (= success); {1,2} |-> TRUE subsample(Xe, y, n, p2, t2, ind_space, idc, idr, lu, v, pivot, Dr, Dc, rowequ, colequ, /* sample= */ TRUE, mts, ss, inv_tol, /*solve = */ TRUE); if (sing) { *sscale = -1.; goto cleanup_and_return; } /* calculate partial residuals */ COPY(y, y_tilde, n); F77_CALL(dgemv)("N", &n, &p2, &dmone, x2, &n, t2, &one, &done, y_tilde, &one FCONE); /* STEP 3: Obtain L1-estimate of b1 */ COPY(X1, x1, n*p1); F77_CALL(rllarsbi)(x1, y_tilde, &n, &p1, &n, &n, &rel_tol, NIT, K, KODE, SIGMA, t1, res, SC1, SC2, SC3, SC4, BET0); if (*KODE > 1) { REprintf("m_s_subsample(): Problem in RLLARSBI (RILARS). KODE=%d. Exiting.\n", *KODE); *sscale = -1.; goto cleanup_and_return; } /* STEP 4: Check if candidate looks promising */ if (sum_rho_sc(res, *sscale, n, p, rrhoc, ipsi) < b) { int scale_iter = max_it_scale; /* scale will be better */ /* STEP 5: Solve for sc */ sc = find_scale(res, b, rrhoc, ipsi, sc, n, p, &scale_iter, scale_tol, trace_lev >= 4); if(trace_lev >= 2) Rprintf(" Sample[%3d]: new candidate with sc = %#10.5g in %d iter\n", i, sc, scale_iter); /* STEP 6: Update best fit */ *sscale = sc; COPY(t1, b1, p1); COPY(t2, b2, p2); if (sc < EPS_SCALE) { REprintf("\nScale too small\n", "Aborting m_s_subsample()\n\n"); *sscale = -1.; goto cleanup_and_return; } } } /* for(i ) */ /* STEP 7: Clean up and return */ if (trace_lev >= 1) { Rprintf(" Finished M-S subsampling with scale = %.5f\n",*sscale); #define maybe_SHOW_b1_b2 \ if (trace_lev >= 3) { \ Rprintf(" b1: "); disp_vec(b1,p1);\ Rprintf(" b2: "); disp_vec(b2,p2);\ } maybe_SHOW_b1_b2; } cleanup_and_return: CLEANUP_SUBSAMPLE; PutRNGstate(); } /* m_s_subsample() */ /* Descent step for M-S algorithm * Return value: convergence; note that convergence is *not* guaranteed */ Rboolean m_s_descent(double *X1, double *X2, double *y, int n, int p1, int p2, int K_m_s, int max_k, int max_it_scale, double rel_tol, double scale_tol, double *bb, double *rrhoc, int ipsi, double *sscale, int trace_lev, double *b1, double *b2, double *t1, double *t2, double *y_tilde, double *res, double *res2, double *x1, double *x2, int *NIT, int *K, int *KODE, double *SIGMA, double *BET0, double *SC1, double *SC2, double *SC3, double *SC4) { int j, k, nnoimpr = 0, nref = 0; int p = p1 + p2; Rboolean converged = FALSE; double b = *bb; double sc = *sscale, done = 1., dmone = -1.; int lwork = -1, one = 1, info = 1; double work0, *work, wtmp, *weights; COPY(b1, t1, p1); COPY(b2, t2, p2); COPY(res, res2, n); if (trace_lev >= 2) Rprintf(" Starting descent procedure...\n"); INIT_WLS(x2, y, n, p2); if (trace_lev >= 3) { Rprintf(" Scale: %.5f\n", *sscale); if (trace_lev >= 5) { Rprintf(" res2: "); disp_vec(res2,n); } } /* Do descent steps until there is no improvement for */ /* K_m_s steps or we are converged */ /* (convergence is not guaranteed) */ while ( (nref++ < max_k) & (!converged) & (nnoimpr < K_m_s) ) { R_CheckUserInterrupt(); /* STEP 1: update b2 (save it to t2) */ /* y_tilde = y - x1 %*% t1 */ COPY(y, y_tilde, n); COPY(X1, x1, n*p1); F77_CALL(dgemv)("N", &n, &p1, &dmone, x1, &n, t1, &one, &done, y_tilde, &one FCONE); /* compute weights */ get_weights_rhop(res2, sc, n, rrhoc, ipsi, weights); /* solve weighted least squares problem */ FIT_WLS(X2, x2, y_tilde, n, p2); COPY(y_tilde, t2, p2); /* get (intermediate) residuals */ COPY(y, res2, n); F77_CALL(dgemv)("N", &n, &p2, &dmone, X2, &n, t2, &one, &done, res2, &one FCONE); /* STEP 2: Obtain L1-estimate of b1 */ COPY(res2, y_tilde, n); F77_CALL(rllarsbi)(x1, y_tilde, &n, &p1, &n, &n, &rel_tol, NIT, K, KODE, SIGMA, t1, res2, SC1, SC2, SC3, SC4, BET0); if (*KODE > 1) { CLEANUP_WLS; error(_("m_s_descent(): Problem in RLLARSBI (RILARS). KODE=%d. Exiting."), *KODE); } /* STEP 3: Compute the scale estimate */ int scale_iter = max_it_scale; sc = find_scale(res2, b, rrhoc, ipsi, sc, n, p, &scale_iter, scale_tol, trace_lev >= 4); // <- here only if higher trace_lev /* STEP 4: Check for convergence */ /* FIXME: check convergence using scale ? */ double del = sqrt(norm_diff2(b1, t1, p1) + norm_diff2(b2, t2, p2)); double nrmB = sqrt(norm2(t1, p1) + norm2(t2, p2)); converged = (del < rel_tol * fmax2(rel_tol, nrmB)); if (trace_lev >= 3) { if(converged) Rprintf(" -->> converged\n"); if (trace_lev >= 4) { Rprintf(" Ref.step %3d: #{no-improvements}=%3d; (del,dB)=(%12.7g,%12.7g)\n", nref, nnoimpr, del, rel_tol * fmax2(rel_tol, nrmB)); if (trace_lev >= 5) { Rprintf(" weights: "); disp_vec(weights,n); Rprintf(" t2: "); disp_vec(t2,p2); Rprintf(" t1: "); disp_vec(t1,p1); Rprintf(" res2: "); disp_vec(res2,n); } } } /* STEP 5: Update best fit */ if (sc < *sscale) { COPY(t1, b1, p1); COPY(t2, b2, p2); COPY(res2, res, n); *sscale = sc; if (trace_lev >= 2) Rprintf(" Refinement step %3d: better fit, scale: %#10.5g\n", nref, sc); nnoimpr = 0; } else { if (trace_lev >= 3) Rprintf(" Refinement step %3d: no improvement, scale: %#10.5g\n", nref, sc); nnoimpr++; } } // while(.) if ( (!converged) & (nref == max_k) ) warning(_(" M-S estimate: maximum number of refinement steps reached.")); if (trace_lev >= 1) { Rprintf(" Descent procedure: %sconverged (best scale: %.5g, last step: %.5g)\n", converged ? "" : "not ", *sscale, sc); if (nnoimpr == K_m_s) Rprintf(" The procedure stopped after %d steps because there was no improvement in the last %d steps.\n To increase this number, use the control parameter 'k.m_s'.\n", nref, nnoimpr); else if (trace_lev >= 2) Rprintf(" No improvements in %d out of %d steps.\n", nnoimpr, nref); maybe_SHOW_b1_b2; } CLEANUP_WLS; return converged; } /* m_s_descent() */ /* draw a subsample of observations and calculate a candidate * * starting value for S estimates * * uses a custom LU decomposition, which acts on the transposed design * * matrix. In case of a singular subsample, the subsample is modified * * until it is non-singular (for ss == TRUE (== 1)). * * * * Parts of the algorithm are based on the Gaxpy version of the LU * * decomposition with partial pivoting by * * Golub G. H., Van Loan C. F. (1996) - MATRIX Computations */ int subsample(const double x[], const double y[], int n, int m, double *beta, int *ind_space, int *idc, int *idr, double *lu, double *v, int *pivot, double *Dr, double *Dc, int rowequ, int colequ, Rboolean sample, int mts, Rboolean ss, double tol_inv, Rboolean solve) { /* x: design matrix (n x m) y: response vector n: length of y, nrow of x m: ncol of x ( == p ) beta: [out] candidate parameters (length m) ind_space: (required in sample_noreplace, length n) holds the index permutation idc: (required in sample_noreplace, !! length n !!) [out] index of observations used in subsample idr: work array of length m lu: [out] LU decomposition of subsample of xt (m x m) Note: U has is not rescaled by 1 / *cf, as it should, this is done R_subsample(). v: work array of length m pivot: [out] pivoting table of LU decomposition (length m-1) Dr: row equilibration (as calculated in SETUP_EQUILIBRATION) Dc: column equilibration rowequ: whether rows were equilibrated coleq: whether cols were equilibrated sample: whether to sample or not mts: the number of singular samples allowed before giving up (Max Try Samples) ss: type of subsampling to be used: 0 (FALSE): simple subsampling 1 (TRUE): nonsingular subsampling tol_inv: tolerance for declaring a matrix singular solve: solve the least squares problem on the subsample? (0: no, 1: yes) return value ('condition'): 0: success 1: singular (matrix xt does not contain a m dim. full rank submatrix) 2: too many singular resamples (simple subsampling case) */ int j, k, l, one = 1, mu = 0, tmpi, i = 0, attempt = 0; double tmpd; Rboolean sing; #define xt(_k_, _j_) x[idr[_k_]*n+idc[_j_]] #define U(_k_, _j_) lu[_j_*m+_k_] #define u(_k_, _j_) lu + (_j_*m+_k_) #define L(_k_, _j_) lu[_j_*m+_k_] #define l(_k_, _j_) lu + (_j_*m+_k_) Start: /* STEP 1: Calculate permutation of 1:n */ if (sample) { sample_noreplace(ind_space, n, n, idc); } else for(k=0;k 51: %x\n", fabs(v[46]) > fabs(v[50])); */ /* Rprintf("47 < 51: %x\n", fabs(v[46]) < fabs(v[50])); */ /* } */ /* continue only if pivot is large enough */ if (tmpd >= tol_inv) { pivot[j] = mu; tmpd = v[j]; v[j] = v[mu]; v[mu] = tmpd; tmpi = idr[j]; idr[j] = idr[mu]; idr[mu] = tmpi; for(k=j+1;k 0) { for(k=0;k= mts) { warning(_("Too many singular resamples. Aborting subsample().\n See parameter 'subsampling; in help of lmrob.config().")); return(2); } goto Start; } /* drop observation and try next one */ i++; } else { sing = FALSE; U(j, j) = v[j]; } } while(sing); } /* end for loop */ /* Rprintf("lu:"); disp_vec(lu, m*m); */ /* Rprintf("pivot:"); disp_veci(pivot, m-1); */ /* Rprintf("idc:"); disp_veci(idc, m); */ /* STEP 3: Solve for candidate parameters if requested */ if (solve == 0) { for(k=0;k=0;k--) { tmpd = beta[k]; beta[k] = beta[pivot[k]]; beta[pivot[k]] = tmpd; } } return(0); #undef Xt #undef U #undef u #undef L #undef l } void get_weights_rhop(const double r[], double s, int n, const double rrhoc[], int ipsi, double *w) { for(int i=0; i < n; i++) w[i] = wgt(r[i] / s, rrhoc, ipsi); } double find_scale(const double r[], double b, const double rrhoc[], int ipsi, double initial_scale, int n, int p, int* iter, // input: max_iter, output: #{iterations used} double scale_tol, Rboolean trace) { if(initial_scale <= 0.) { warning(_("find_scale(*, initial_scale = %g) -> final scale = 0"), initial_scale); return 0.; } // else double scale = initial_scale; if(trace) Rprintf("find_scale(*, ini.scale =%#15.11g):\nit | new scale\n", scale); for(int it = 0; it < iter[0]; it++) { scale = initial_scale * sqrt( sum_rho_sc(r, initial_scale, n, p, rrhoc, ipsi) / b ) ; if(trace) Rprintf("%2d | %#13.10g\n", it, scale); if(fabs(scale - initial_scale) <= scale_tol*initial_scale) { // converged: *iter = it; return(scale); } initial_scale = scale; } warning(_("find_scale() did not converge in '%s' (= %d) iterations with tol=%g, last rel.diff=%g"), "maxit.scale", /* <- name from lmrob.control() */ *iter, scale_tol, (scale - initial_scale) / initial_scale); return(scale); } // As R's which.max(a), return()ing zero-based k in {0,1,...,n-1} int find_max(double *a, int n) { int k = 0; if(n > 1) { double tt = a[0]; for(int i=1; i < n; i++) if(tt < a[i]) { tt = a[i]; k = i; } } return k; } double sum_rho_sc(const double r[], double scale, int n, int p, const double c[], int ipsi) { double s = 0; for(int i=0; i < n; i++) s += rho(r[i]/scale, c, ipsi); return(s / ((double) n - p)); } /* ||x||_2^2 */ double norm2(double *x, int n) { double s = 0.; int one = 1; s = F77_CALL(dnrm2)(&n, x, &one); return( s*s ); } /* ||x||_2 */ double norm(double *x, int n) { int one = 1; return(F77_CALL(dnrm2)(&n, x, &one)); } double norm1(double *x, int n) { int one = 1; return(F77_CALL(dasum)(&n, x, &one)); } /* ||x-y||_2^2 */ double norm_diff2(double *x, double *y, int n) { int i; double s = 0; for(i=0; i < n; i++) s += (x[i]-y[i])*(x[i]-y[i]); return( s ); } /* ||x-y||_2 */ double norm_diff(double *x, double *y, int n) { int i; double s = 0; for(i=0; i < n; i++) s += (x[i]-y[i])*(x[i]-y[i]); return( sqrt(s) ); } /* ||x-y||_1 */ double norm1_diff(double *x, double *y, int n) { int i; double s = 0; for(i=0; i < n; i++) s += fabs(x[i]-y[i]); return(s); } double MAD(double *a, int n, double center, double *b, double *tmp) { /* if center == 0 then do not center */ int i; /* if( fabs(center) > 0.) { */ for(i=0; i < n; i++) b[i] = a[i] - center; /* } */ return( median_abs(b,n,tmp) * 1.4826 ); } double median(double *x, int n, double *aux) { double t; for(int i=0; i < n; i++) aux[i]=x[i]; if ( (n/2) == (double) n / 2 ) t = ( kthplace(aux,n,n/2) + kthplace(aux,n,n/2+1) ) / 2.0 ; else t = kthplace(aux,n, n/2+1 ) ; return(t); } double median_abs(double *x, int n, double *aux) { double t; for(int i=0; i < n; i++) aux[i]=fabs(x[i]); if ( (n/2) == (double) n / 2 ) t = ( kthplace(aux,n,n/2) + kthplace(aux,n,n/2+1) ) / 2.0 ; else t = kthplace(aux,n, n/2+1 ) ; return(t); } void disp_vec(double *a, int n) { for(int i=0; i < n; i++) Rprintf("%lf ",a[i]); Rprintf("\n"); } void disp_veci(int *a, int n) { for(int i=0; i < n; i++) Rprintf("%d ",a[i]); Rprintf("\n"); } void disp_mat(double **a, int n, int m) { for(int i=0; i < n; i++) { Rprintf("\n"); for(int j=0; j < m; j++) Rprintf("%10.8f ",a[i][j]); } Rprintf("\n"); } void R_find_D_scale(double *rr, double *kkappa, double *ttau, int *llength, double *sscale, double *cc, int *iipsi, int *ttype, double *rel_tol, int *max_k, int *converged) { /* compute D_scale using iterative algorithm type: 1: d1 2: d2 3: dt1 4: dt2 */ *converged = 0; for (int k=0; k < *max_k; k++) { double scale = *sscale, tsum1 = 0, tsum2 = 0; // calculate weights for(int i=0; i < *llength; i++) { double a, w = wgt(rr[i] / ttau[i] / scale, cc, *iipsi); switch(*ttype) { case 1: // d1 a = rr[i]/ttau[i]; tsum1 += a*a*w; tsum2 += w; break; case 2: // d2 a = rr[i]/ttau[i]*w; tsum1 += a*a; tsum2 += w*w; break; default: case 3: // dt1 tsum1 += rr[i]*rr[i]*w; tsum2 += w*ttau[i]*ttau[i]; break; case 4: // dt2 a = rr[i]*w; tsum1 += a*a; a = ttau[i]*w; tsum2 += a*a; break; }; } *sscale = sqrt(tsum1 / tsum2 / *kkappa); // Rprintf("\n type = %d, scale = %10.8f \n", *ttype, *sscale); if (fabs(scale - *sscale) < *rel_tol * fmax2(*rel_tol, scale)) { *converged = 1; break; } } } /* specialized function calc_fitted */ /* calculates fitted values from simulation output array. */ /* this is used to process simulation output in the */ /* lmrob_simulation vignette */ void R_calc_fitted(double *XX, double *bbeta, double *RR, int *nn, int *pp, int *nnrep, int *nnproc, int *nnerr) { unsigned long A, B, C, D, E; A = (unsigned long)*nnerr; B = (unsigned long)*nnproc; C = (unsigned long)*nnrep; D = (unsigned long)*nn; E = (unsigned long)*pp; // calculate fitted values over errstr, procstr and replicates for(unsigned long a = 0; a < A; a++) { // errstr for(unsigned long b = 0; b < B; b++) { // procstr for(unsigned long c = 0; c < C; c++) { // replicates // check for NAs if (!ISNA(bbeta[c + /* 0*C + */ b*C*E + a*B*E*C])) { for(unsigned long d = 0; d < D; d++) { // observations RR[d + c*D + b*C*D + a*B*C*D] = 0; // initialize result for(unsigned long e = 0; e < E; e++) { // predictors RR[d + c*D + b*C*D + a*B*C*D] += bbeta[c + e*C + b*C*E + a*B*E*C] * XX[d + e*D + c*E*D + a*C*E*D]; } } } } } } } robustbase/src/templates-types.h0000644000176200001440000000212612441664610016530 0ustar liggesusers/* * Sets type-specific macros */ #define CONCAT(x,y) x ##_## y #define CONCAT3(x,y,z) x ##_## y ##_## z #define CONCAT_MACROS(x,y) CONCAT(x,y) #define CONCAT_3MACROS(x,y,z) CONCAT3(x,y,z) /* Data type macros for argument 'x' */ #if X_TYPE == 'i' # ifndef METHOD_NAME # define METHOD_NAME CONCAT_MACROS(METHOD, Integer) # define C_METHOD_NAME CONCAT_3MACROS(C, METHOD, Integer) # endif #define X_C_TYPE int #define X_IN_C INTEGER #define X_ISNAN(x) (x == NA_INTEGER) #elif X_TYPE == 'r' # ifndef METHOD_NAME # define METHOD_NAME CONCAT_MACROS(METHOD, Real) # define C_METHOD_NAME CONCAT_3MACROS(C, METHOD, Real) # endif #define X_C_TYPE double #define X_IN_C REAL #define X_ISNAN(x) ISNAN(x) #endif /* Data type macros for result ('ans') */ #ifndef ANS_TYPE /* Default to same as 'x' */ #define ANS_TYPE X_TYPE #endif #if ANS_TYPE == 'i' #define ANS_SXP INTSXP #define ANS_NA NA_INTEGER #define ANS_C_TYPE int #define ANS_IN_C INTEGER #elif ANS_TYPE == 'r' #define ANS_SXP REALSXP #define ANS_NA NA_REAL #define ANS_C_TYPE double #define ANS_IN_C REAL #endif robustbase/src/rowMedians_TYPE-template.h0000644000176200001440000001437714100267120020152 0ustar liggesusers/* included 2 x from ./rowMedians.c ~~~~~~~~~~~~ *********************************************************************** TEMPLATE: SEXP rowMedians_(...) GENERATES: SEXP rowMedians_Integer(SEXP x, int nrow, int ncol, int narm, int hasna, int byrow) SEXP rowMedians_Real(SEXP x, int nrow, int ncol, int narm, int hasna, int byrow) Arguments: The following macros ("arguments") should be defined for the template to work as intended. - METHOD: the name of the resulting function - X_TYPE: 'i' or 'r' Authors: Adopted from rowQuantiles.c by R. Gentleman. Template by Henrik Bengtsson. Copyright: Martin Maechler 2014-2021; Henrik Bengtsson, 2007-2013. ***********************************************************************/ #include #include #include /* Expand arguments: X_TYPE => (X_C_TYPE, X_IN_C, X_ISNAN, [METHOD_NAME]) */ #include "templates-types.h" #if X_TYPE == 'i' #define PSORT iPsort #elif X_TYPE == 'r' #define PSORT rPsort #endif SEXP METHOD_NAME(SEXP x, int nrow, int ncol, int narm, int hasna, int byrow) { /* R allocate a double vector of length 'nrow' Note that 'nrow' means 'ncol' if byrow=FALSE. */ SEXP ans = PROTECT(allocVector(REALSXP, nrow)); C_METHOD_NAME(X_IN_C(x), REAL(ans), nrow, ncol, narm, hasna, byrow); UNPROTECT(1); return(ans); } // "C-only" method [SEXP free, can be called from "pure C" : void C_METHOD_NAME(X_C_TYPE *x, double *res, int nrow, int ncol, int narm, int hasna, int byrow) { Rboolean isOdd; int ii, jj, kk, qq; int *colOffset; X_C_TYPE value, /* R allocate memory for the 'rowData'. This will be taken care of by the R garbage collector later on. */ *rowData = (X_C_TYPE *) R_alloc(ncol, sizeof(X_C_TYPE)); if (!hasna) // If there are no missing values, don't try to remove them narm = FALSE; /* When narm is false, isOdd and qq are the same for all rows */ if (!narm) { isOdd = (ncol % 2 == 1); qq = (int)(ncol/2) - 1; } else { isOdd = FALSE; qq = 0; } value = 0; /* Pre-calculate the column offsets */ colOffset = (int *) R_alloc(ncol, sizeof(int)); // HJ begin if (byrow) { for(jj=0; jj < ncol; jj++) colOffset[jj] = (int)jj*nrow; } else { for(jj=0; jj < ncol; jj++) colOffset[jj] = jj; } // HJ end if (hasna) { for(ii=0; ii < nrow; ii++) { if(ii % 1000 == 0) R_CheckUserInterrupt(); int rowIdx = byrow ? ii : ncol*ii; //HJ kk = 0; /* The index of the last non-NA value detected */ for(jj=0; jj < ncol; jj++) { value = x[rowIdx+colOffset[jj]]; //HJ if (X_ISNAN(value)) { if (!narm) { kk = -1; break; } } else { rowData[kk] = value; kk = kk + 1; } } if (kk == 0) { res[ii] = R_NaN; } else if (kk == -1) { res[ii] = R_NaReal; } else { /* When narm is true, isOdd and qq may change with row */ if (narm) { isOdd = (kk % 2 == 1); qq = (int)(kk/2) - 1; } /* Permute x[0:kk-1] so that x[qq] is in the correct place with smaller values to the left, ... */ PSORT(rowData, kk, qq+1); value = rowData[qq+1]; if (isOdd) { res[ii] = (double)value; } else { if (narm || !X_ISNAN(value)) { /* Permute x[0:qq-2] so that x[qq-1] is in the correct place with smaller values to the left, ... */ PSORT(rowData, qq+1, qq); if (X_ISNAN(rowData[qq])) res[ii] = R_NaReal; else res[ii] = ((double)(rowData[qq] + value))/2; } else { res[ii] = (double)value; } } } } // for(..) } else { // no NAs for(ii=0; ii < nrow; ii++) { if(ii % 1000 == 0) R_CheckUserInterrupt(); int rowIdx = byrow ? ii : ncol*ii; //HJ for(jj=0; jj < ncol; jj++) rowData[jj] = x[rowIdx+colOffset[jj]]; //HJ /* Permute x[0:ncol-1] so that x[qq] is in the correct place with smaller values to the left, ... */ PSORT(rowData, ncol, qq+1); value = rowData[qq+1]; if (isOdd) { res[ii] = (double)value; } else { /* Permute x[0:qq-2] so that x[qq-1] is in the correct place with smaller values to the left, ... */ PSORT(rowData, qq+1, qq); res[ii] = (double)((rowData[qq] + value))/2; } } // for(..) } /* if (hasna ...) */ } /* Undo template macros */ #undef PSORT #include "templates-types_undef.h" /*************************************************************************** HISTORY: 2014-12-09 [MMaechler] o do not use '== TRUE' '== FALSE' -- as we have no NA here o resolve REAL(ans) outside for(ii ..) o add "SEXP-free" C routines, others can call: C_rowMedians_(Real|Integer) 2013-04-23 [HB] o BUG FIX: The integer template of rowMedians_() would not handle ties properly. This was because ties were calculated as '(double)((rowData[qq] + value)/2)' instead of '((double)(rowData[qq] + value))/2'. 2013-01-13 [HB] o Merged rowMedians_Integer() and rowMedians_Read() into template rowMedians_(). 2013-01-13 [HB] o Using internal arguments 'by_row' instead of 'by_column'. 2011-12-11 [HB] o BUG FIX: rowMediansReal(..., na.rm=TRUE) did not handle NaN:s, only NA:s. Note that NaN:s does not exist for integers. 2011-10-12 [HJ] o Added colMedians(). o Now rowMediansInteger/Real() can operate also by columns, cf. argument 'by_column'. 2007-08-14 [HB] o Added checks for user interrupts every 1000 line. o Added argument 'hasNA' to rowMedians(). 2005-12-07 [HB] o BUG FIX: When calculating the median of an even number (non-NA) values, the length of the second sort was one element too short, which made the method to freeze, i.e. rPsort(rowData, qq, qq) is now (...qq+1, qq). 2005-11-24 [HB] o By implementing a special version for integers, there is no need to coerce to double in R, which would take up twice the amount of memory. o rowMedians() now handles NAs too. o Adopted from rowQuantiles.c in Biobase of Bioconductor. **************************************************************************/ robustbase/src/rfltsreg.f0000644000176200001440000011702013635407354015224 0ustar liggesuserscccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cc rrcov : Scalable Robust Estimators with High Breakdown Point cc cc This program is free software; you can redistribute it and/or modify cc it under the terms of the GNU General Public License as published by cc the Free Software Foundation; either version 2 of the License, or cc (at your option) any later version. cc cc This program is distributed in the hope that it will be useful, cc but WITHOUT ANY WARRANTY; without even the implied warranty of cc MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the cc GNU General Public License for more details. cc cc You should have received a copy of the GNU General Public License cc along with this program; if not, write to the Free Software cc Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA cc cc I would like to thank Peter Rousseeuw and Katrien van Driessen for cc providing the initial code of this function. cc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc subroutine rfltsreg(dat,n,nvar, nhalff, krep, inbest, objfct, * intercept,intadjust, nvad, datt, weights, * temp, index1,index2, aw2,aw, residu, y, nmahad, ndist, * am,am2, slutn, * jmiss, xmed,xmad, a,da, h,hvec, * c, cstock, mstock, c1stock, m1stock, * dath, sd, means, bmeans, i_trace) c implicit none c integer n ! n = sample size * , nvad ! nvad = p+1 * , nvar ! nvar = p * , nhalff ! nhalff = 'quan' = quan.f = h.alpha.n(alpha, n, rk) which is ! = (n + p + 1) %/% 2 when alpha= 1/2 double precision dat(n, nvad) ! dat = cbind(x,y) hence n x (p+1) integer krep ! krep = nsamp (e.g. = 5000 for "best") c krep := the total number of trial subsamples to be drawn when n exceeds 2*nmini; c krep = 0 :<==> "exact" <==> all possible subsamples integer inbest(nhalff) double precision objfct integer intercept, intadjust double precision datt(n,nvad) double precision weights(n) integer temp(n) integer index1(n) integer index2(n) double precision aw2(n),aw(n) double precision residu(n) double precision y(n) double precision nmahad(n) double precision ndist(n) double precision am(n), am2(n), slutn(n) integer jmiss(nvad) double precision xmed(nvad) double precision xmad(nvad) double precision a(nvad), da(nvad) double precision h(nvar,nvad), hvec(nvar*nvad) double precision c(nvar,nvad) double precision cstock(10, nvar*nvar) double precision mstock(10, nvar) c parameters for array sizes : integer km10 ! := 10*kmini * , kmini * , nmini * , nmaxi ! := nmini*kmini parameter (kmini = 5) parameter (km10 = 10*kmini) parameter (nmini = 300) parameter (nmaxi = nmini*kmini) double precision c1stock(km10, nvar*nvar) double precision m1stock(km10, nvar) double precision dath(nmaxi, nvad) double precision sd(nvar) double precision means(nvar) double precision bmeans(nvar) integer i_trace c --------------------- end {arguments} ---------------------- integer k1,k2,k3, maxmini integer i_aux(4) ! just for printing when i_trace > 0 c ccc parameter (nvmax=115) ccc parameter (nmax=57000) cc parameter (k1 = 2 ) parameter (k2 = 2 ) parameter (k3 = 100) ccc parameter (nvmax1=nvmax+1) ccc parameter (nvmax2=nvmax*nvmax) ccc parameter (nvm11=nvmax*(nvmax+1)) C-- VT parameter (maxmini=int((3*nmini-1)/2)+1) parameter (maxmini=450) cc integer matz ! , iseed, seed * , tottimes, step * , pnsel, replow integer i,ii,iii, j,jj,jjj, jndex, k,kk, lll, m,mm, nn integer jmin,jmax, jerd,jnc, jreg, kstep,kount c unused integer jdefaul, jbreak integer minigr integer nfac,nerr, ngroup, nhalf,nlen,nmax,nmore, nmore2, nquant integer nvmax1, nvm11, nvmax, nsel, nstop, nrep double precision bstd, dist2, eps, factor, object double precision fckw, fckwi, fckw1, percen double precision MADeps logical all,part,fine,final,rfodd,more1,more2 c unused integer rfnbreak integer rfncomb integer flag(km10) integer mini(kmini) integer subdat(2,nmaxi) integer subndex(maxmini) double precision faclts(11) double precision mcdndex(10,2,kmini) c Function double precision rffindq data faclts/2.6477,2.5092,2.3826,2.2662,2.1587, * 2.0589,1.9660,1.879,1.7973,1.7203,1.6473/ nrep = krep if(i_trace .ge. 2) then i_aux(1) = nrep call intpr('Entering rfltsreg() - krep: ',-1, i_aux, 1) endif call rndstart C -------- == GetRNGstate() in C CCCC 10.10.2005 - substitute the parameters nmax and nvmax nmax = n nvmax = nvar nvmax1 = nvmax+1 nvm11 = nvmax*(nvmax+1) if(nvar .lt.5 ) then eps=1.0D-12 else if(nvar .ge. 5 .and. nvar .le. 8) then eps=1.0D-14 else eps=1.0D-16 endif endif c Tolerance for rfstatis(): |MAD| < MADeps : <==> "problem" MADeps=1.0D-6 cc nhalff=int((n+nvar+1)/2) jmin=(n/2)+1 jmax = max((3*n/4)+(nvar+1)/4, nhalff) nquant=min(11, 1+ nint(40*(dble(nhalff)/n - 0.5))) factor=faclts(nquant) c unused jbreak=rfnbreak(nhalff,n,nvar) c unused jdefaul=(n+nvar+1)/2 percen = (1.D0*nhalff)/(1.D0*n) if(nvad.eq.1) goto 9000 cc CDDD CALL INTPR('>>> Enter RFLTSREG ... iseed=',-1,iseed,1) c seed=iseed matz=1 nsel=nvar ngroup=1 part=.false. fine=.false. final=.false. all=.true. do 21,i=1,nmaxi subdat(1,i)=1000000 subdat(2,i)=1000000 21 continue cc mini(1)=0 mini(2)=0 mini(3)=0 mini(4)=0 mini(5)=0 if(krep.gt.0 .and. n.gt.(2*nmini-1)) then kstep=k1 part=.true. ngroup=int(n/dble(nmini)) if(n.ge.(2*nmini) .and. n.le.(3*nmini-1)) then if(rfodd(n)) then mini(1)=int(n/2) mini(2)=int(n/2)+1 else mini(1)=n/2 mini(2)=n/2 endif else if(n.ge.(3*nmini) .and. n.le.(4*nmini-1)) then if(3*(n/3) .eq. n) then mini(1)=n/3 mini(2)=n/3 mini(3)=n/3 else mini(1)=int(n/3) mini(2)=int(n/3)+1 if(3*(n/3) .eq. n-1) then mini(3)=int(n/3) else mini(3)=int(n/3)+1 endif endif else if(n.ge.(4*nmini) .and. n.le.(5*nmini-1)) then if(4*(n/4) .eq. n) then mini(1)=n/4 mini(2)=n/4 mini(3)=n/4 mini(4)=n/4 else mini(1)=int(n/4) mini(2)=int(n/4)+1 if(4*(n/4) .eq. n-1) then mini(3)=int(n/4) mini(4)=int(n/4) else if(4*(n/4) .eq. n-2) then mini(3)=int(n/4)+1 mini(4)=int(n/4) else mini(3)=int(n/4)+1 mini(4)=int(n/4)+1 endif endif endif else mini(1)=nmini mini(2)=nmini mini(3)=nmini mini(4)=nmini mini(5)=nmini endif nhalf=int(mini(1)*percen) if(ngroup.gt.kmini) ngroup=kmini nrep=int(dble(krep)/ngroup) minigr=mini(1)+mini(2)+mini(3)+mini(4)+mini(5) if(i_trace .ge. 2) + call intpr(' rftls.... minigr=',-1,[minigr],1) call rfrdraw(subdat,n,minigr,mini,ngroup,kmini) else c krep == 0 or n <= 2*nmini-1 ( = 599 by default) minigr=n nhalf=nhalff kstep=k1 C VT::25.11.2010 - added krep==0 means "exact" (all combinations) if(krep.eq.0 .or. n.le.replow(nsel)) then c use all combinations; happens iff nsel = nvar = p <= 6 nrep=rfncomb(nsel,n) if(i_trace .ge. 2) call prallc(nrep) else nrep = krep all=.false. endif endif c seed=iseed cc CDDD CALL INTPR('>>> Start initialization ... nrep=',-1,nrep,1) do 31, j=1,nvmax do k=1,10 mstock(k,j)=1000000.D0 do kk=1,kmini m1stock((kk-1)*10+k,j)=1000000.D0 end do do i=1,nvmax do kk=1,kmini c1stock((kk-1)*10+k,(j-1)*nvmax+i)=1000000.D0 end do cstock(k,(j-1)*nvmax+i)=1000000.D0 end do end do means(j)=0.D0 bmeans(j)=0.D0 sd(j)=0.D0 do k=1,nvmax1 c(j,k)=0.D0 h(j,k)=0.D0 end do 31 continue do 41, j=1,nmax nmahad(j)=0.D0 ndist(j)=0.D0 index1(j)=1000000 index2(j)=1000000 temp(j)=1000000 weights(j)=0.D0 aw(j)=0.D0 aw2(j)=0.D0 residu(j)=0.D0 y(j)=0.D0 am(j)=0.D0 am2(j)=0.D0 slutn(j)=0.D0 41 continue do j=1,km10 flag(j)=1 end do do 45, j=1,nvmax1 jmiss(j)=0 xmed(j)=0.D0 xmad(j)=0.D0 a(j)=0.D0 da(j)=0.D0 do k=1,nmaxi dath(k,j)=0.D0 end do 45 continue do j=1,maxmini subndex(j)=0 end do do j=1,nvm11 hvec(j)=0.D0 end do if(i_trace .ge. 2) + call intpr(' rftls.... initialization ready',-1,[0],1) 9000 continue if(nvad.eq.1) then do jj=1,n ndist(jj)=dat(jj,1) end do call rfshsort(ndist,n) call rfmcduni(ndist,n,nhalff,slutn,bstd,am,am2,factor, * n-nhalff+1) goto 9999 endif cc if(.not.fine .and. .not.final) then call rfstatis(dat,xmed,xmad,aw2,intercept,nvad, nvmax1,nmax,n, * nstop,MADeps,weights,y,nvar,index2) if(nstop.eq.1) goto 9999 endif cc jreg=1 call rflsreg(nvmax1, nvmax,nvar,n,a,dat, weights, da, h, * fckw,hvec,nvm11,jmiss,nvad,n) cc nfac=nvad-1 nfac=nvar-1 call rfrtran(nvar,intercept,nfac,nvad,nvmax1,xmed, * xmad,a,nvad,fckw) call rftrc(h,da,nvmax,nvmax1,nvar,intercept,nfac,nvad, * xmed,xmad) jerd=0 tottimes=0 c---- - - - - - - - - Outermost loop - - - - - - - - - - - - - - - - - - - c---- 5555 object=10.D25 if(.not. part .or. final) nn=n if(part .and. fine .and. .not. final) nn=minigr if(fine.or.(.not.part.and.final)) then nrep=10 nsel=nhalf kstep=k2 if (final) then nhalf=nhalff ngroup=1 if (n*nvar .le.100000) then kstep=k3 else if (n*nvar .gt.100000 .and. n*nvar .le.200000) then kstep=10 else if (n*nvar .gt.200000 .and. n*nvar .le.300000) then kstep=9 else if (n*nvar .gt.300000 .and. n*nvar .le.400000) then kstep=8 else if (n*nvar .gt.400000 .and. n*nvar .le.500000) then kstep=7 else if (n*nvar .gt.500000 .and. n*nvar .le.600000) then kstep=6 else if (n*nvar .gt.600000 .and. n*nvar .le.700000) then kstep=5 else if (n*nvar .gt.700000 .and. n*nvar .le.800000) then kstep=4 else if (n*nvar .gt.800000 .and. n*nvar .le.900000) then kstep=3 else if (n*nvar .gt.900000 .and. n*nvar .le.1000000) then kstep=2 else kstep=1 endif if (n .gt. 5000) nrep=1 else nhalf= int(minigr*percen) endif endif if(i_trace .ge. 2) then i_aux(1) = nrep i_aux(2) = kstep i_aux(3) = nhalf call intpr('Main (number of trials nrep, kstep, nhalf):', + -1, i_aux, 3) endif do i=1,nsel-1 index1(i)=i end do index1(nsel)=nsel-1 cc if(.not. final) then do i=1,10 do j=1,ngroup mcdndex(i,1,j)=10.D25 mcdndex(i,2,j)=10.D25 end do end do endif if (fine .and. .not. final) then do j=1,minigr do k=1,nvad dath(j,k)=dat(subdat(1,j),k) end do end do endif kount=0 CDDD CALL INTPR('>>> MAIN LOOP BY GROUPS: NGROUP= ',-1,ngroup,1) do 1111 ii = 1,ngroup if(i_trace .ge. 3) + call intpr(' rftls.... looping by group ii=',-1,[ii],1) if(.not.fine) kount=0 if(part .and. .not. fine) nn=mini(ii) do i=1,nn index2(i)=i end do if(part .and. .not. fine) then jndex=0 do j=1,minigr if(subdat(2,j).eq.ii) then jndex=jndex+1 subndex(jndex)=subdat(1,j) endif end do do j=1,mini(ii) do k=1,nvad dath(j,k)=dat(subndex(j),k) end do end do endif do 1000 i=1,nrep if(i_trace .ge. 4) + call intpr(' rftls.... for(i = 1,nrep): i=',-1,[i],1) pnsel=nsel tottimes=tottimes+1 fckwi=0.D0 fckw1=0.D0 step=0 132 if((part.and..not.fine).or.(.not.part.and..not.final)) then if(part) then call rfrangen(mini(ii),nsel,index1) else if(all) then call rfgenpn(n,nsel,index1) else call rfrangen(n,nsel,index1) endif endif endif c 9550 continue if(.not.fine .and. part) then do j=1,pnsel do m=1,nvad c(j,m)=dath(index1(j),m) end do end do else if(.not.part .and. .not.final) then do j=1,pnsel do m=1,nvad c(j,m)=dat(index1(j),m) end do end do endif if((.not.part.and..not.final).or.(.not.fine.and.part)) then if(nvar.gt.1) then call rfequat(c,nvmax,nvmax1,hvec,nvm11,nvar,1,nerr) if(nerr.lt.0) then jerd=jerd+1 if(.not.all .and. i.gt.2) goto 132 goto 1000 endif else if(c(1,1).ne.0.D0) c(1,1)=c(1,2)/c(1,1) endif do jnc=1,nvar a(jnc)=c(jnc,1) end do endif if (final) then if(mstock(i,1).ne.1000000.D0) then do jj=1,nvar a(jj)=mstock(i,jj) end do else goto 1111 endif endif if (fine.and..not.final) then if(m1stock((ii-1)*10+i,1).ne.1000000.D0) then do jj=1,nvar a(jj)=m1stock((ii-1)*10+i,jj) end do else goto 1111 endif endif do jnc=1,nn residu(jnc)=0.D0 do j=1,nvar if(part.and..not.final) then residu(jnc)=residu(jnc)+a(j)*dath(jnc,j) else residu(jnc)=residu(jnc)+a(j)*dat(jnc,j) endif end do if(part.and..not.final) then residu(jnc)=dath(jnc,nvad)-residu(jnc) else residu(jnc)=dat(jnc,nvad)-residu(jnc) endif aw(jnc)=residu(jnc) end do more1=.false. more2=.false. nmore=200 nmore2=nmore/2 if(intadjust.eq.1) then CDDD CALL INTPR('>>> INTERCEPT ADJUSTMENT 1',-1,i,1) if(intercept.eq.1.and.((.not.fine.and.part).or. * .not.part.or.((nn-nhalf).le.nmore))) then call rfshsort(aw,nn) call rfmcduni(aw,nn,nhalf,slutn,bstd,am,am2, * factor,nn-nhalf+1) a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do else if(intercept.eq.1) then call rfshsort(aw,nn) do jj=1,nn am2(jj)=abs(aw(jj)) end do dist2=rffindq(am2,nn,nhalf,index1) do jj=1,nhalf aw2(jj)=aw(index1(jj)) end do dist2=rffindq(aw2,nhalf,1,index2) jnc=index1(index2(1)) if(jnc+nmore-nmore2+nhalf-1.gt.nn.or.jnc-nmore2.lt.1) * then call rfmcduni(aw,nn,nhalf,slutn,bstd,am,am2, * factor,nn-nhalf+1) a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do else 555 do jj=0,nhalf-1+nmore aw2(jj+1)=aw(jnc-nmore2+jj) end do nlen=nmore+1 call rfmcduni(aw2,nhalf+nmore,nhalf,slutn, * bstd,am,am2,factor,nlen) if(nlen.eq.1.and..not.more1) then if(.not.more2) then nmore=nmore2 nmore2=nmore2+nmore2 more1=.true. if(jnc-nmore2.ge.1) goto 555 endif else if(nlen.eq.(nmore+1).and..not.more2) then if(.not.more1) then nmore=nmore2 nmore2=-nmore2 more2=.true. if(jnc+nmore-nmore2+nhalf-1.le.nn) * goto 555 endif else if(nlen.eq.1.and.more1) then if(.not.more2) then nmore2=nmore2+100 if(jnc-nmore2.ge.1) goto 555 endif else if(nlen.eq.(nmore+1).and.more2) then if(.not.more1) then nmore2=nmore2+100 if(jnc+nmore-nmore2+nhalf-1.le.nn) goto 555 endif endif a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do endif endif endif do jnc=1,nn residu(jnc)=abs(residu(jnc)) end do dist2=rffindq(residu,nn,nhalf,index2) c 9555 do step=1,kstep tottimes=tottimes+1 do j=1,nhalf temp(j)=index2(j) end do call rfishsort(temp,nhalf) do j=1,nhalf if(.not.part.or.final) then do mm=1,nvad datt(j,mm)=dat(temp(j),mm) end do else do mm=1,nvad datt(j,mm)=dath(temp(j),mm) end do endif end do call rflsreg(nvmax1, nvmax,nvar,n,a,datt, weights, da, h, * fckw,hvec,nvm11,jmiss,nvad,nn) do jnc=1,nn residu(jnc)=0.D0 do j=1,nvar if(part.and..not.final) then residu(jnc)=residu(jnc)+a(j)*dath(jnc,j) else residu(jnc)=residu(jnc)+a(j)*dat(jnc,j) endif end do if(part.and..not.final) then residu(jnc)=dath(jnc,nvad)-residu(jnc) else residu(jnc)=dat(jnc,nvad)-residu(jnc) endif aw(jnc)=residu(jnc) end do more1=.false. more2=.false. nmore=200 nmore2=nmore/2 if(intadjust.eq.1) then CDDD CALL INTPR('>>> INTERCEPT ADJUSTMENT 2',-1,step,1) if(intercept .eq. 1 .and. ((.not.fine.and.part) .or. * .not.part.or.((nn-nhalf).le.nmore))) then call rfshsort(aw,nn) call rfmcduni(aw,nn,nhalf,slutn,bstd,am,am2, * factor,nn-nhalf+1) a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do else if(intercept.eq.1) then call rfshsort(aw,nn) do jj=1,nn am2(jj)=abs(aw(jj)) end do dist2=rffindq(am2,nn,nhalf,index1) do jj=1,nhalf aw2(jj)=aw(index1(jj)) end do dist2=rffindq(aw2,nhalf,1,index2) jnc=index1(index2(1)) if(jnc+nmore-nmore2+nhalf-1.gt.nn.or.jnc-nmore2.lt.1) * then call rfmcduni(aw,nn,nhalf,slutn,bstd,am,am2, * factor,nn-nhalf+1) a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do else c--- repeat { .... 666 continue do jj=0,nhalf-1+nmore aw2(jj+1)=aw(jnc-nmore2+jj) end do nlen=nmore+1 call rfmcduni(aw2,nhalf+nmore,nhalf,slutn,bstd, * am,am2,factor,nlen) if(nlen.eq.1.and..not.more1) then if(.not.more2) then nmore=nmore2 nmore2=nmore2+nmore2 more1=.true. if(jnc-nmore2.ge.1) goto 666 endif else if(nlen.eq.(nmore+1).and..not.more2) then if(.not.more1) then nmore=nmore2 nmore2=-nmore2 more2=.true. if(jnc+nmore-nmore2+nhalf-1.le.nn) goto 666 endif else if(nlen.eq.1.and.more1) then if(.not.more2) then nmore2=nmore2+100 if(jnc-nmore2.ge.1) goto 666 endif else if(nlen.eq.(nmore+1).and.more2) then if(.not.more1) then nmore2=nmore2+100 if(jnc+nmore-nmore2+nhalf-1.le.nn) goto 666 endif endif c } (end repeat) c update a[nvar] a(nvar)=a(nvar)+slutn(1) do jnc=1,nn residu(jnc)=residu(jnc)-slutn(1) end do endif endif endif do jnc=1,nn residu(jnc)=abs(residu(jnc)) end do dist2=rffindq(residu,nn,nhalf,index2) fckw=0.D0 do jnc=1,nhalf fckw=fckw+residu(jnc)**2 end do if(step.ge.2 .and. fckw.eq.fckw1) exit ! break {step loop} fckw1=fckwi fckwi=fckw if(final .and. ((i.eq.1.and.step.eq.1 .and. .not.fine) * .or.fckw.lt.object)) then if(i_trace .ge. 3) then call dblepr('Setting objfct= ', -1, fckw,1) endif object=fckw objfct=fckw do jjj=1,nhalf inbest(jjj)=index2(jjj) end do call rfcovcopy(a,bmeans,nvar,1) endif end do ! step in 1..kstep c 5000 if(.not. final) then if(part .and. .not. fine) then iii=ii else iii=1 cc At the end of the algorithm, only the ten cc best solutions need to be stored. endif if( flag((iii-1)*10+1).eq.1) then lll=1 else lll=2 endif do j = lll,10 if (fckw .le. mcdndex(j,2,iii)) then if(fckw.ne.mcdndex(j,2,iii)) then if(.not.fine.and.part) goto 203 goto 205 else do k=j,10 if(fckw.eq.mcdndex(k,2,iii)) then do jjj=1,nvar if(part.and..not.fine) then if(a(jjj).ne.m1stock((iii-1)*10+ * k,jjj)) then goto 203 endif else if(a(jjj).ne.mstock(k,jjj)) goto 205 endif end do endif end do endif exit ! j-loop c ---- cvvv using m1stock[,] 203 do k=10,j+1,-1 do kk=1,nvar m1stock((iii-1)*10+k,kk)= * m1stock((iii-1)*10+k-1,kk) end do mcdndex(k,1,iii)=mcdndex(k-1,1,iii) mcdndex(k,2,iii)=mcdndex(k-1,2,iii) end do do kk=1,nvar m1stock((iii-1)*10+j,kk)=a(kk) end do mcdndex(j,1,iii)=i mcdndex(j,2,iii)=fckw exit ! j-loop c ---- cvvv using mstock[,] 205 do k=10,j+1,-1 do kk=1,nvar mstock(k,kk)= mstock(k-1,kk) end do mcdndex(k,1,iii)=mcdndex(k-1,1,iii) mcdndex(k,2,iii)=mcdndex(k-1,2,iii) end do do kk=1,nvar mstock(j,kk)=a(kk) end do mcdndex(j,1,iii)=i mcdndex(j,2,iii)=fckw exit ! j-loop c ---- endif end do ! j-loop endif 1000 continue c..... end for( i = 1..nrep ) 1111 continue c---- -------- end for( ii = 1..ngroup ) cc if(part .and. .not. fine) then fine=.true. goto 5555 endif if(.not. final .and. (.not.part .or. fine)) then final=.true. goto 5555 endif 9999 continue call rndend C ------ == PutRNGstate() in C return end ccccc end {rfltsreg} =================================================== subroutine rfstatis(x, xmed,xmad * , aw2 * , intercept * , nvad * , nvmax1 * , nmax * , n * , nstop ! nstop=0: success; =1 : "problem": mad ~= 0 * , MADeps * , weights * , y * , nvar * , index2) cc implicit none integer intercept, nvad, nvmax1, nmax, n, nstop double precision x(n, nvad), xmed(nvmax1), xmad(nvmax1) double precision aw2(nmax) double precision MADeps, weights(nmax), y(nmax) integer nvar, index2(nmax) c Var integer j,jnc c Function double precision rfamdan cc nstop=0 ! == success if (intercept.eq.0) then c regression without intercept do 50 j=1,nvad xmed(j)=0.0 do jnc=1,n aw2(jnc) = abs(x(jnc,j)) end do xmad(j)=rfamdan(nmax,aw2,n,index2)*1.4826 if(abs(xmad(j)) .le. MADeps) then xmad(j)=0.0 do jnc=1,n xmad(j)=xmad(j)+aw2(jnc) end do xmad(j)=(xmad(j)/n)*1.2533 if(abs(xmad(j)) .le. MADeps) then nstop=1 return endif endif do jnc=1,n x(jnc,j)=x(jnc,j)/xmad(j) end do 50 continue else c regression with intercept xmed(nvar)=0.D0 xmad(nvar)=1.D0 do 120 j=1,nvad if(j.eq.nvar) goto 120 do jnc=1,n aw2(jnc)=x(jnc,j) end do xmed(j)=rfamdan(nmax,aw2,n,index2) do jnc=1,n aw2(jnc)=abs(aw2(jnc)-xmed(j)) end do xmad(j)=rfamdan(nmax,aw2,n,index2)*1.4826 if(abs(xmad(j)) .le. MADeps) then xmad(j)=0.0 do jnc=1,n xmad(j)=xmad(j)+aw2(jnc) end do xmad(j)=(xmad(j)/n)*1.2533 if(dabs(xmad(j)) .le. MADeps) then nstop=1 return endif endif do jnc=1,n x(jnc,j)=(x(jnc,j)-xmed(j))/xmad(j) end do 120 continue endif do jnc=1,n weights(jnc)=1.0 y(jnc)=x(jnc,nvad) end do return end cc function rfamdan(nmax, aa, n ,index2) cc implicit none integer nmax, n, index2(nmax) double precision aa(n) double precision rffindq, rfamdan integer jndl cc jndl=int(n/2.0) if(mod(n,2).eq.0) then rfamdan=(rffindq(aa,n,jndl, index2)+ * rffindq(aa,n,jndl+1,index2))/2.0 else rfamdan=rffindq(aa,n,jndl+1,index2) endif return end c --- subroutine rflsreg(nvmax1, nvmax, k, n, f, x, w, da, h, 1 fckw, hvec, nvm11,jmiss,nvad,nnn) implicit none cc Arguments integer nvmax1, nvmax, k, n, nvm11, nvad double precision f(k), x(n,nvad), w(n), da(k), h(nvmax,nvmax1) 1 , fckw, hvec(nvm11) integer jmiss(nvmax1), nnn cc External Functions double precision rfqlsrg cc Var double precision dfckw, dfact, dwjnc, dyj, dfka double precision ank, anul, hda integer j,l, jnc,ka,kplus, mm cc kplus=k+1 do jnc=1,k do j=1,kplus h(jnc,j)=0.D0 end do end do anul=0.0 do 30 jnc=1,nnn call rffcn(k,f,x,jnc,n,nvad) dwjnc=dble(w(jnc)) anul=anul+w(jnc) dyj=dble(x(jnc,kplus)) do ka=1,k dfka=dble(f(ka)) h(ka,k+1)=h(ka,k+1)+dwjnc*dfka*dyj do l=1,ka h(ka,l)=h(ka,l)+dwjnc*dfka*dble(f(l)) end do end do 30 continue do j=1,k do jnc=1,j h(jnc,j)=h(j,jnc) end do end do call rfmatnv(h,nvmax,nvmax1,hvec,nvm11,k,1,jmiss) mm=k+1 fckw = rfqlsrg(k,n,nvmax1,nvmax,f,x, w,h,mm,nvad,nnn) do jnc=1,k f(jnc)=h(jnc,k+1) end do dfckw=dble(fckw) ank=anul-k dfact=dble(ank) dfact=dfckw/dfact do jnc=1,k do j=1,k h(jnc,j)=h(jnc,j)*dfact end do end do do jnc=1,k hda=h(jnc,jnc) da(jnc)=sqrt(hda) end do return end ccccc ccccc subroutine rffcn(k, f,x, jnc,n,nvad) c implicit none integer k, jnc, n, nvad double precision f(k), x(n,nvad) c integer j do j=1,k f(j)=x(jnc,j) end do return end ccccc ccccc subroutine rfmatnv(an, nvmax,nvmax1, hvec, nvm11, na,nb, jmiss) c implicit none c integer nvmax,nvmax1 double precision an(nvmax,nvmax1) integer nvm11 double precision hvec(nvm11) integer na,nb, jmiss(nvmax1) c Var double precision deter, turn, swap integer j,n, nc, jcl,jdelc,jdla,jdlb,jdm,jhfd, * jnc,jncb,jncc,jncd,jnk,jpaal, nma,npnb, ldel deter=1.0D0 n=na npnb=n+nb jnk=0 do j=1,npnb jnk=(j-1)*nvmax do nc=1,nvmax jnk=jnk+1 hvec(jnk)=an(nc,j) end do end do ldel=0 jdm=nvmax nma=n-1 jdelc=1-jdm do 130 jhfd=1,n turn=0.0D0 jdelc=jdelc+jdm jdla=jdelc+jhfd-1 jdlb=jdelc+nma do jncb=jdla,jdlb if(dabs(hvec(jncb)) .gt. dabs(turn)) then turn=hvec(jncb) ldel=jncb endif end do if (turn .eq. 0) goto 180 jpaal=ldel-jdelc+1 jmiss(jhfd)=jpaal if(jpaal .gt. jhfd) then deter=-deter jpaal=jpaal-jdm jncd=jhfd-jdm do jnc=1,npnb jpaal=jpaal+jdm jncd=jncd+jdm swap=hvec(jncd) hvec(jncd)=hvec(jpaal) hvec(jpaal)=swap end do endif deter=deter*turn turn=1.0D0/turn jncd=jdelc+nma do jnc=jdelc,jncd hvec(jnc)=-hvec(jnc)*turn end do hvec(jdla)=turn jncb=jhfd-jdm jpaal=1-jdm do jnc=1,npnb jpaal=jpaal+jdm jncb=jncb+jdm if(jnc .ne. jhfd) then jcl=jpaal+nma swap=hvec(jncb) jncd=jdelc-1 do jncc=jpaal,jcl jncd=jncd+1 hvec(jncc)=hvec(jncc)+swap*hvec(jncd) end do hvec(jncb)=swap*turn endif end do 130 continue do 160 jncb=1,n jhfd=n+1-jncb ldel=jmiss(jhfd) if(ldel .ne. jhfd) then jpaal=(ldel-1)*jdm+1 jcl=jpaal+nma jdelc=(jhfd-1)*jdm+1-jpaal do jncc=jpaal,jcl jncd=jncc+jdelc swap=hvec(jncc) hvec(jncc)=hvec(jncd) hvec(jncd)=swap end do endif 160 continue c--- 180 jnk=0 do j=1,npnb do nc=1,nvmax jnk=jnk+1 an(nc,j)=hvec(jnk) end do end do return end ccccc ccccc double precision * function rfqlsrg(k,n,nvmax1,nvmax,f,x, w,h,mm,nvad,nnn) cc implicit none integer k,n,nvmax1,nvmax, mm,nvad,nnn double precision f(k), x(n,nvad), w(n), h(nvmax,nvmax1) double precision q,hsum integer jnc,jncb cc q=0.D0 do 30 jnc=1,nnn call rffcn(k,f,x,jnc,n,nvad) hsum=0.D0 do jncb=1,k hsum=h(jncb,mm)*f(jncb)+hsum enddo q=(hsum-x(jnc,mm))*(hsum-x(jnc,mm))*w(jnc)+q 30 continue rfqlsrg=q return end ccccc ccccc subroutine rfrtran(nvar,jcst,nfac,nvad,nvmax1, * xmed,xmad, aa,jal,fckw) cc implicit none integer nvar,jcst,nfac,nvad,nvmax1, jal double precision aa(jal), xmed(nvmax1), xmad(nvmax1), fckw c Var integer j if(nvar.le.1) then aa(1)=aa(1)*xmad(nvad)/xmad(1) else do j=1,nfac aa(j)=aa(j)*xmad(nvad)/xmad(j) end do if(jcst.eq.0) then aa(nvar)=aa(nvar)*xmad(nvad)/xmad(nvar) else aa(nvar)=aa(nvar)*xmad(nvad) do j=1,nfac aa(nvar)=aa(nvar)-aa(j)*xmed(j) end do aa(nvar)=aa(nvar)+xmed(nvad) endif endif fckw=fckw*(xmad(nvad)*xmad(nvad)) return end ccccc ccccc subroutine rftrc(h, da, nvmax,nvmax1, nvar,jcst,nfac,nvad, * xmed,xmad) c implicit none integer nvmax, nvmax1 double precision h(nvmax,nvmax1), da(nvmax) integer nvar,jcst,nfac,nvad double precision xmed(nvmax1), xmad(nvmax1) c Var double precision xmp2,hnn integer j,k, k2 cc xmp2=dble(xmad(nvad))*dble(xmad(nvad)) if(jcst.eq.0) then do 10 j=1,nvar do k=1,j h(j,k)=h(j,k)*(xmp2/(dble(xmad(j))*dble(xmad(k)))) end do da(j)=dsqrt(h(j,j)) 10 continue else do j=1,nvar h(j,nvad)=h(j,j) end do do 30, j=1,nvar do k=1,j h(j,k)=h(j,k)*xmp2/(dble(xmad(j))*dble(xmad(k))) end do da(j)=dsqrt(h(j,j)) 30 continue do 50 k=1,nfac h(nvar,k)=h(k,nvar)*xmp2/dble(xmad(k)) do 60 k2=1,nvar if(k.eq.k2) then h(nvar,k)=h(nvar,k)-dble(xmed(k))*xmp2/ * (dble(xmad(k2))*dble(xmad(k)))*h(k2,nvad) else if(k.lt.k2) then h(nvar,k)=h(nvar,k)-(dble(xmed(k2))*xmp2)/ * (dble(xmad(k2))*dble(xmad(k)))*h(k,k2) else ! k > k2 h(nvar,k)=h(nvar,k)-dble(xmed(k2))*xmp2/ * (dble(xmad(k2))*dble(xmad(k)))*h(k2,k) endif 60 continue 50 continue h(nvar,nvar)=h(nvar,nvad)*xmp2 do 70 k=1,nvar h(nvar,nvar)=h(nvar,nvar)+ * (dble(xmed(k))*dble(xmed(k)))*xmp2/ * (dble(xmad(k))*dble(xmad(k)))*h(k,nvad) 70 continue do 80 k=1,nvar if(k.ne.nvar) then h(nvar,nvar)=h(nvar,nvar)-2.0D0*xmp2*dble(xmed(k))/ * (dble(xmad(k)))*h(k,nvar) else h(nvar,nvar)=h(nvar,nvar)-2.0D0*xmp2*dble(xmed(k))/ * (dble(xmad(k)))*h(nvar,nvad) endif 80 continue do j=1,nfac do k=j+1,nvar hnn=2.0D0*dble(xmed(j))*dble(xmed(k))*xmp2 h(nvar,nvar)=h(nvar,nvar)+hnn/ * (dble(xmad(j))*dble(xmad(k)))*h(j,k) end do end do da(nvar)=dsqrt(h(nvar,nvar)) endif return end ccccc ccccc subroutine rfequat(am, nvmax,nvmax1, hvec,nvm11, na,nb, nerr) implicit none integer nvmax,nvmax1 double precision am(nvmax,nvmax1) integer nvm11 double precision hvec(nvm11) integer na,nb, nerr c Var double precision turn,swap,deter integer j,n, ldel, jbegc,jbegx,jdel,jdm,jendc,jendx,jhfd,jmat, * jnc,jncb,jncc,jncd,jnce,jncf,jnk,jrow, lclpl, nc,neqa,nznde ldel=0 jdm=nvmax deter=1.0D0 n=na jmat=n+nb jnk=0 do j=1,jmat jnk=(j-1)*nvmax do nc=1,nvmax jnk=jnk+1 hvec(jnk)=am(nc,j) end do end do nznde=n-1 lclpl=-jdm do 120 jhfd=1,n turn=0.D0 lclpl=lclpl+jdm+1 jdel=lclpl+n-jhfd do jncb=lclpl,jdel if(dabs(hvec(jncb)) .gt. dabs(turn)) then turn=hvec(jncb) ldel=jncb endif end do if(dabs(turn) .le. 1D-8) then nerr=-1 goto 180 endif if(ldel .ne. lclpl) then deter=-deter ldel=ldel-jdm jncb=lclpl-jdm do jncc=jhfd,jmat ldel=ldel+jdm jncb=jncb+jdm swap=hvec(jncb) hvec(jncb)=hvec(ldel) hvec(ldel)=swap end do end if deter=deter*turn if(jhfd.eq.n) goto 120 turn=1./turn jncb=lclpl+1 do jncc=jncb,jdel hvec(jncc)=hvec(jncc)*turn end do jncd=lclpl jrow=jhfd+1 do jncb=jrow,n jncd=jncd+1 jnce=lclpl jncf=jncd do jncc=jrow,jmat jnce=jnce+jdm jncf=jncf+jdm hvec(jncf)=hvec(jncf)-hvec(jnce)*hvec(jncd) end do end do 120 continue nerr=0 neqa=n+1 jbegx=nznde*jdm+1 do 150 jnc=neqa,jmat jbegx=jbegx+jdm jendx=jbegx+n jbegc=n*jdm+1 jendc=jbegc+nznde do 140 jncb=1,nznde jendx=jendx-1 jbegc=jbegc-jdm jendc=jendc-jdm-1 hvec(jendx)=hvec(jendx)/hvec(jendc+1) swap=hvec(jendx) jncd=jbegx-1 do jncc=jbegc,jendc jncd=jncd+1 hvec(jncd)=hvec(jncd)-hvec(jncc)*swap end do 140 continue hvec(jbegx)=hvec(jbegx)/hvec(1) 150 continue jnc=-jdm jbegx=nznde*jdm+1 jendx=jbegx+nznde do 160 jncb=neqa,jmat jbegx=jbegx+jdm jendx=jendx+jdm jnc=jnc+jdm jncd=jnc do jncc=jbegx,jendx jncd=jncd+1 hvec(jncd)=hvec(jncc) end do 160 continue 180 jnk=0 do j=1,jmat do nc=1,nvmax jnk=jnk+1 am(nc,j)=hvec(jnk) end do end do return end ccccc C-- VT-- The following functions were added C-- C-- MM: moved to ./rf-common.f - since they are used from ./rffastmcd.f too robustbase/src/robustbase.h0000644000176200001440000001433214107541105015535 0ustar liggesusers/* External and interal API of C and Fortran routines in robustbase */ #include // for SEXP #include /**< For internationalized messages */ #ifdef ENABLE_NLS #include #define _(String) dgettext ("Matrix", String) #else #define _(String) (String) #define dngettext(pkg, String, StringP, N) (N > 1 ? StringP : String) #endif /* --------- ./qn_sn.c : -------- */ #define Sint int void Qn0(double *x, Sint *n, double *k, Sint *len_k, double *res); void Sn0(double *x, Sint *n, Sint *is_sorted, double *res, double *a2); /* * void Qn (double *x, Sint *n, Sint *h, Sint *finite_corr, double *res); * void Sn (double *x, Sint *n, Sint *finite_corr, double *res); */ /* call via .C() from R : */ void wgt_himed_i(double *x, Sint *n, Sint *iw, double *res); void wgt_himed (double *x, Sint *n, double *w, double *res); /* call from C: */ double pull(double *a, int n, int k); double whimed_i(double *a, int *iw, int n, double *acand, double *a_srt, int *iw_cand); double whimed(double *a, double *w, int n, double *acand, double *a_srt, double *w_cand); /* --------- ./mc.c -------- */ /* call via .C() from R : */ void mc_C(double *z, int *in, double *eps, int *iter, double *out, int *scale); /* call from C: *iter is both input and output */ double mc_C_d(const double z[], int n, const double eps[], int *iter, int scale); /* --------- ./lmrob.c --------- */ static inline Rboolean is_redescender(int ipsi) {// a simple wrapper for readability // for now, fastest: if(ipsi == 0) return FALSE; return TRUE; /* if have many more, maybe switch(ipsi) { default: error(_("ipsi=%d not implemented."), ipsi); case 0: // huber and future other non-redescenders return FALSE; case 1: case 2: case 3: case 4: case 5: case 6: return TRUE; } */ } SEXP R_rho_inf(SEXP cc, SEXP ipsi); void R_lmrob_S(double *X, double *y, int *n, int *P, int *nRes, double *scale, double *beta_s, double *C, int *iipsi, double *bb, int *best_r, int *Groups, int *N_group, int *K_s, int *max_k, int *max_it_scale, double *rel_tol, double *inv_tol, double *scale_tol, // ^^^^^^^^^ = refine.tol in R int* converged, int *trace_lev, int *mts, int *ss, int *cutoff); void R_lmrob_M_S(double *X1, double *X2, double *y, double *res, int *n, int *p1, int *p2, int *nRes, int *max_it_scale, double *scale, double *b1, double *b2, double *rho_c, int *ipsi, double *bb, int *K_m_s, int *max_k, double *rel_tol, double *inv_tol, double *scale_tol, int *converged, int *trace_lev, int *orthogonalize, int *subsample, int *descent, int *mts, int *ss); void R_lmrob_MM(double *X, double *y, int *n, int *P, double *beta_initial, double *scale, double *beta_m, double *resid, int *max_it, double *rho_c, int *ipsi, double *loss, double *rel_tol, int *converged, int *trace_lev, int *mts, int *ss); void R_subsample(const double *x, const double *y, int *n, int *m, double *beta, int *ind_space, int *idc, int *idr, double *lu, double *v, int *p, double *_Dr, double *_Dc, int *_rowequ, int *_colequ, int *status, int *sample, int *mts, int *ss, double *tol_inv, int *solve); SEXP R_psifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_); SEXP R_chifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_); SEXP R_wgtfun(SEXP x_, SEXP c_, SEXP ipsi_); double rho(double x, const double c[], int ipsi); double psi(double x, const double c[], int ipsi); double psip(double x, const double c[], int ipsi);// psi' double psi2(double x, const double c[], int ipsi);// psi'' double wgt(double x, const double c[], int ipsi); double rho_inf (const double c[], int ipsi); // == \rho(\infty) double normcnst(const double c[], int ipsi); // == 1 / \rho(\infty) == 1 / rho_inf() void R_find_D_scale(double *rr, double *kkappa, double *ttau, int *llength, double *sscale, double *cc, int *iipsi, int *ttype, double *rel_tol, int *max_k, int *converged); void R_calc_fitted(double *XX, double *bbeta, double *RR, int *nn, int *pp, int *nnrep, int *nnproc, int *nnerr); // ------- ./rob-utils.c --------------- SEXP R_wgt_flex(SEXP x_, SEXP c_, SEXP h_); // ------- ./rowMedians.c --------------- SEXP R_rowMedians(SEXP x, SEXP naRm, SEXP hasNA, SEXP byRow, SEXP keepNms); /* ------- ./rffastmcd.f ------------ */ void F77_NAME(rffastmcd)( double *dat, int *n, int *nvar, int *nhalff, int *krep, // 5 int *nmini, int *kmini, // 7 double *initcov, double *initmean, int *inbest, double *det, // 11 int *weight, int *fit, double *coeff, int *kount, double *adcov, // 16 int *temp, int *index1, int *index2, int *indexx, double *nmahad,// 21 double *ndist, double *am, double *am2, // 24 double *slutn, double *med, double *mad, double *sd, // 28 double *means, double *bmeans, double *w, double *fv1, // 32 double *fv2, double *rec, double *sscp1, double *cova1, // 36 double *corr1, double *cinv1, double *cova2, // 39 double *cinv2, double *z__, double *cstock, double *mstock, // 43 double *c1stock, double *m1stock, double *dath, // 46 double *cutoff, double *chimed, int *i_trace); // 49 args /* ------- ./rfltsreg.f ------------ */ void F77_NAME(rfltsreg)( double *dat, int *n, int *nvar, int *nhalff, int *krep, int *inbest, double *objfct, int *intercept, int *intadjust, int *nvad, double *datt, double *weights, int *temp, int *index1, int *index2, double *aw2, double *aw, double *residu, double *y, double *nmahad, double *ndist, double *am, double *am2, double *slutn, int *jmiss, double *xmed, double *xmad, double *a, double *da, double *h__, double *hvec, double *c__, double *cstock, double *mstock, double *c1stock, double *m1stock, double *dath, double *sd, double *means, double *bmeans, int *i_trace); /* ------- ./rllarsbi.f -------------- */ void F77_NAME(rllarsbi)( double *X, double *Y, int *N, int *NP, int *MDX, int *MDT, double *TOL, int *NIT, int *K, int *KODE, double *SIGMA, double *THETA, double *RS, double *SC1, double *SC2, double *SC3, double *SC4, double *BET0); robustbase/src/init.c0000644000176200001440000000772014052244671014334 0ustar liggesusers #include #include "robustbase.h" #define CDEF(name) {#name, (DL_FUNC) &name, sizeof(name ## _typ)/sizeof(name ## _typ[0]), name ##_typ} #define CALLDEF(name, n) {#name, (DL_FUNC) &name, n} static R_NativePrimitiveArgType Qn0_typ[] = { // ./qn_sn.c REALSXP, INTSXP, REALSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType Sn0_typ[] = { REALSXP, INTSXP, INTSXP, REALSXP, REALSXP }; static R_NativePrimitiveArgType mc_C_typ[] = { REALSXP, INTSXP, REALSXP, INTSXP, REALSXP, LGLSXP }; static R_NativePrimitiveArgType wgt_himed_i_typ[] = { REALSXP, INTSXP, INTSXP, REALSXP }; static R_NativePrimitiveArgType wgt_himed_typ[] = { REALSXP, INTSXP, REALSXP, REALSXP }; static R_NativePrimitiveArgType R_lmrob_S_typ[] = { REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, /* rrhoc */ REALSXP, INTSXP, REALSXP, /* best_r */ INTSXP, INTSXP, INTSXP, /* K_s */ INTSXP, INTSXP, INTSXP, /* rel_tol*/ REALSXP, REALSXP, REALSXP, /* converged */ LGLSXP, INTSXP, INTSXP, INTSXP, INTSXP }; static R_NativePrimitiveArgType R_lmrob_MM_typ[] = { REALSXP, REALSXP, INTSXP, INTSXP, /* beta_initial */ REALSXP, REALSXP, /* beta_m */ REALSXP, REALSXP, /* max_it */ INTSXP, REALSXP, INTSXP, /* loss */ REALSXP, REALSXP, LGLSXP, INTSXP, INTSXP, INTSXP }; static R_NativePrimitiveArgType R_find_D_scale_typ[] = { REALSXP, REALSXP, REALSXP, INTSXP, REALSXP, /* c */ REALSXP, INTSXP, INTSXP, REALSXP, /* max_k */ INTSXP, LGLSXP }; static R_NativePrimitiveArgType R_calc_fitted_typ[] = { REALSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, INTSXP, INTSXP }; static R_NativePrimitiveArgType R_lmrob_M_S_typ[] = { REALSXP, REALSXP, REALSXP, REALSXP, /* nn */ INTSXP, INTSXP, INTSXP, INTSXP, INTSXP, /* scale */ REALSXP, REALSXP, REALSXP, /* rho_c */ REALSXP, INTSXP, REALSXP, /* K_m_s */ INTSXP, INTSXP, /* rel_tol */ REALSXP, REALSXP, REALSXP, /* converged */ LGLSXP, INTSXP, /* orthogonalize */ LGLSXP, LGLSXP, LGLSXP, INTSXP, INTSXP }; static R_NativePrimitiveArgType R_subsample_typ[] = { REALSXP, REALSXP, INTSXP, INTSXP, REALSXP, INTSXP, INTSXP, INTSXP, REALSXP, REALSXP, INTSXP, REALSXP, REALSXP, INTSXP, INTSXP, INTSXP, LGLSXP, INTSXP, INTSXP, REALSXP, LGLSXP }; static const R_CMethodDef CEntries[] = { CDEF(Qn0), CDEF(Sn0), CDEF(mc_C), CDEF(wgt_himed_i), CDEF(wgt_himed), CDEF(R_lmrob_S), CDEF(R_lmrob_MM), CDEF(R_find_D_scale), CDEF(R_calc_fitted), CDEF(R_lmrob_M_S), CDEF(R_subsample), {NULL, NULL, 0} }; static R_CallMethodDef CallEntries[] = { CALLDEF(R_rho_inf, 2), // -> lmrob.c CALLDEF(R_psifun, 4), CALLDEF(R_chifun, 4), CALLDEF(R_wgtfun, 3), CALLDEF(R_wgt_flex, 3), // -> rob-utils.c CALLDEF(R_rowMedians, 5),// -> rowMedians.c [Biobase also has rowQ for quantiles] {NULL, NULL, 0} }; static R_FortranMethodDef FortEntries[] = { {"rffastmcd", (DL_FUNC) &F77_SUB(rffastmcd), 49},/* ./rffastmcd.f */ {"rfltsreg", (DL_FUNC) &F77_SUB(rfltsreg), 41}, /* ./rfltsreg.f */ {"rllarsbi", (DL_FUNC) &F77_SUB(rllarsbi), 18}, /* ./rllarsbi.f */ {NULL, NULL, 0} }; void R_init_robustbase(DllInfo *dll) { R_registerRoutines(dll, CEntries, CallEntries, FortEntries, NULL); R_useDynamicSymbols(dll, FALSE); R_RegisterCCallable("robustbase", "R_psifun", (DL_FUNC) &R_psifun); R_RegisterCCallable("robustbase", "R_chifun", (DL_FUNC) &R_chifun); R_RegisterCCallable("robustbase", "R_wgtfun", (DL_FUNC) &R_wgtfun); R_RegisterCCallable("robustbase", "rho", (DL_FUNC) &rho); R_RegisterCCallable("robustbase", "psi", (DL_FUNC) &psi); R_RegisterCCallable("robustbase", "psip", (DL_FUNC) &psip); R_RegisterCCallable("robustbase", "psi2", (DL_FUNC) &psi2); R_RegisterCCallable("robustbase", "wgt", (DL_FUNC) &wgt); R_RegisterCCallable("robustbase", "rho_inf", (DL_FUNC) &rho_inf); R_RegisterCCallable("robustbase", "normcnst", (DL_FUNC) &normcnst); } robustbase/src/rf-common.f0000644000176200001440000002356012432425613015266 0ustar liggesusersc c-- Routines common to c-- fastLTS ( ./rfltsreg.f ) and c-- fastMCD ( ./rffastmcd.f ) c c subroutine rfrangen(n, nsel, index) c c Randomly draws nsel cases out of n cases. c Here, index is the index set. c implicit none integer n, nsel, index(nsel) c unifrnd() == R C API's unif_rand() --> see ./R-rng4ftn.c double precision unifrnd integer i,j, num c do i=1,nsel cOLD 10 num=int(uniran(seed)*n)+1 10 num=int(unifrnd()*n)+1 C if(num .gt. n) then C call intpr('** rfrangen(): num > n; num=', -1, num, 1) C num=n C endif if(i.gt.1) then do j=1,i-1 if(index(j).eq.num) goto 10 end do endif index(i)=num end do return end c --------------------------------------------------------- cOLD function uniran(seed) cOLD cc cOLD cc Draws a random number from the uniform distribution on [0,1]. cOLD cc cOLD real uniran cOLD integer seed cOLD integer quot cOLD cc cOLD seed=seed*5761+999 cOLD quot=seed/65536 cOLD seed=seed-quot*65536 cOLD uniran=float(seed)/65536.D0 cOLD return cOLD end c --------------------------------------------------------- subroutine rfgenpn(n,nsel,index) cc cc Constructs all subsets of nsel cases out of n cases. cc implicit none integer n,nsel,index(nsel) cc integer k,i k=nsel index(k)=index(k)+1 c while 10 if(k.eq.1 .or. index(k).le.(n-(nsel-k))) goto 100 k=k-1 index(k)=index(k)+1 do i=k+1,nsel index(i)=index(i-1)+1 end do goto 10 c end{while} 100 return end c --------------------------------------------------------- subroutine rfshsort(a,n) cc cc Sorts the array a of length n. cc implicit none integer n double precision a(n) c double precision t integer gap, i,j, nextj gap=n c --- repeat 100 gap=gap/2 if(gap.eq.0) goto 200 do 180 i=1,n-gap j=i 120 if(j.lt.1) goto 180 nextj=j+gap if(a(j).gt.a(nextj)) then t=a(j) a(j)=a(nextj) a(nextj)=t else j=0 endif j=j-gap goto 120 180 continue goto 100 c ---- --- end repeat 200 return end c --------------------------------------------------------- subroutine rfishsort(a,n) cc cc Sorts the integer array a of length n. cc implicit none integer n, a(n) c integer t, gap, i,j, nextj gap=n c --- repeat 100 gap=gap/2 if(gap.eq.0) goto 200 do 180 i=1,n-gap j=i 120 if(j.lt.1) goto 180 nextj=j+gap if(a(j).gt.a(nextj)) then t=a(j) a(j)=a(nextj) a(nextj)=t else j=0 endif j=j-gap goto 120 180 continue goto 100 c ---- --- end repeat 200 return end c --------------------------------------------------------- integer function replow(k) cc cc Find out which combinations of n and p are cc small enough in order to perform exaustive search cc Returns the maximal n for a given p, for which cc exhaustive search is to be done cc cc k is the number of variables (p) cc implicit none integer k c integer irep(6) data irep/500,50,22,17,15,14/ c if(k .le. 6) then replow = irep(k) else replow = 0 endif return end c --------------------------------------------------------- integer function rfncomb(k,n) cc cc Computes the number of combinations of k out of n. cc (To avoid integer overflow during the computation, cc ratios of reals are multiplied sequentially.) cc For comb > 1E+009 the resulting 'comb' may be too large cc to be put in the integer 'rfncomb', but the main program cc only calls this function for small enough n and k. cc implicit none integer k,n c double precision comb,fact integer j c comb=dble(1.0) do j=1,k fact=(dble(n-j+1.0))/(dble(k-j+1.0)) comb=comb*fact end do c Should give error now instead of integer overflow! c Don't know how to get .Machine$integer.max in Fortran, portably if(comb .gt. 2147483647) then comb=2147483647. call + dblepr('** too many combinations; using max.integer instead:', + -1,comb,1) endif rfncomb=int(comb+0.5D0) return end c --------------------------------------------------------- subroutine rfcovcopy(a,b,n1,n2) cc cc Copies matrix a to matrix b. cc double precision a(n1,n2) double precision b(n1,n2) c do i=1,n1 do j=1,n2 b(i,j)=a(i,j) end do end do return end c --------------------------------------------------------- double precision function rffindq(aw, ncas, k, index) c c Finds the k-th order statistic of the array aw[1..ncas], c sorting the array aw[.] until aw[k] is sure to contain the k-th value c c MM{FIXME}: "rather" use R's C API rPsort (double* X, int N, int K) implicit none integer ncas,k,index(ncas) double precision aw(ncas) c double precision ax,wa integer i,j,l,lr,jnc c do j=1,ncas index(j)=j end do c lower (= l) and upper ( =lr ) bounds: l=1 lr=ncas c--- while(l < lr) 20 if(l .lt. lr) then ax=aw(k) jnc=l j=lr c--- while(jnc < j) 30 if(jnc .le. j) then 40 if(aw(jnc).ge.ax) goto 50 jnc=jnc+1 goto 40 50 if(aw(j).le.ax) goto 60 j=j-1 goto 50 60 if(jnc .le. j) then ! swap jnc <--> j i=index(jnc) index(jnc)=index(j) index(j)=i wa=aw(jnc) aw(jnc)=aw(j) aw(j)=wa jnc=jnc+1 j=j-1 endif goto 30 end if if(j.lt.k) l=jnc if(k.lt.jnc) lr=j goto 20 end if rffindq=aw(k) return end c --------------------------------------------------------- subroutine rfrdraw(a,n,ntot,mini,ngroup,kmini) cc cc Draws ngroup nonoverlapping subdatasets out of a dataset of size n, cc such that the selected case numbers are uniformly distributed from 1 to n. cc implicit none integer n, ntot, kmini, a(2,ntot), mini(kmini), ngroup c unifrnd() == R C API's unif_rand() --> see ./R-rng4ftn.c double precision unifrnd c integer jndex, nrand, k,m,i,j cc jndex=0 do k=1,ngroup do 20 m=1,mini(k) cOLD nrand=int(uniran(seed)*(n-jndex))+1 nrand=int(unifrnd()*(n-jndex))+1 C if(nrand .gt. n-jndex) then C call intpr( C 1 '** rfrdraw(): need to correct nrand > n-jndex; nrand=', C 2 -1, nrand, 1) C nrand=n-jndex C endif jndex=jndex+1 if(jndex.eq.1) then a(1,jndex)=nrand a(2,jndex)=k else a(1,jndex)=nrand+jndex-1 a(2,jndex)=k do i=1,jndex-1 if(a(1,i).gt.nrand+i-1) then do j=jndex,i+1,-1 a(1,j)=a(1,j-1) a(2,j)=a(2,j-1) end do a(1,i)=nrand+i-1 a(2,i)=k goto 20 c ------- break endif end do endif 20 continue end do return end c --------------------------------------------------------- logical function rfodd(n) rfodd=.true. if(2*(n/2).eq.n) rfodd=.false. return end c --------------------------------------------------------- c unused function rfnbreak(nhalf,n,nvar) c unused cc c unused cc Computes the breakdown value - in percent! - of the MCD estimator c unused cc c unused implicit none c unused integer rfnbreak, nhalf, n, nvar c unused c unused if (nhalf.le.(n+nvar+1)/2) then c unused rfnbreak=(nhalf-nvar)*100/n c unused else c unused rfnbreak=(n-nhalf+1)*100/n c unused endif c unused return c unused end c --------------------------------------------------------- subroutine rfmcduni(w,ncas,jqu,slutn,bstd,aw,aw2,factor,len) cc cc rfmcduni : calculates the MCD in the univariate case. cc w contains the ordered observations cc c This version returns the index (jint) in 'len' c which is used in rfltreg.f implicit double precision (a-h,o-z), integer(i-n) integer ncas, jqu, len double precision w(ncas), aw(ncas), aw2(ncas) double precision slutn(len) cc sq=0.D0 sqmin=0.D0 ndup=1 do j=1,ncas-jqu+1 slutn(j)=0.D0 end do do jint=1,ncas-jqu+1 aw(jint)=0.D0 do j=1,jqu aw(jint)=aw(jint)+w(j+jint-1) if (jint.eq.1) sq=sq+w(j)*w(j) end do aw2(jint)=aw(jint)*aw(jint)/jqu if (jint.eq.1) then sq=sq-aw2(jint) sqmin=sq slutn(ndup)=aw(jint) len=jint else sq=sq - w(jint-1)*w(jint-1) + w(jint+jqu-1)*w(jint+jqu-1) * - aw2(jint) + aw2(jint-1) if(sq.lt.sqmin) then ndup=1 sqmin=sq slutn(ndup)=aw(jint) len=jint else if(sq.eq.sqmin) then ndup=ndup+1 slutn(ndup)=aw(jint) endif endif endif end do slutn(1)=slutn(int((ndup+1)/2))/jqu bstd=factor*sqrt(sqmin/jqu) return end c --------------------------------------------------------- robustbase/src/templates-types_undef.h0000644000176200001440000000035012441664610017706 0ustar liggesusers#undef CONCAT #undef CONCAT_MACROS #undef METHOD_NAME #undef C_METHOD_NAME #undef X_C_TYPE #undef X_IN_C #undef X_ISNAN #undef ANS_SXP #undef ANS_NA #undef ANS_C_TYPE #undef ANS_IN_C #undef X_TYPE #undef ANS_TYPE #undef MARGIN robustbase/src/R-rng4ftn.c0000644000176200001440000000033610441335044015137 0ustar liggesusers#include void F77_SUB(rndstart)(void) { GetRNGstate(); } void F77_SUB(rndend)(void) { PutRNGstate(); } double F77_SUB(normrnd)(void) { return norm_rand(); } double F77_SUB(unifrnd)(void) { return unif_rand(); } robustbase/src/monitor.c0000644000176200001440000000472513635376552015074 0ustar liggesusers// Flexible printing of informative messages from Fortran // ----------------- ------------ // other MM versions: ~/R/Pkgs/cobs99/src/monitor.c ~/R/Pkgs/lokern/src/monitor.c #include /* called for trace >= 2 : ----------------------------------------------- */ #ifdef _new_gfortran__fixme # define F_Logical int_least32_t * #else # define F_Logical int * #endif // but 'int *' gives warnings with LTO ? void F77_SUB(println)() { Rprintf("\n"); } void F77_SUB(prallc)(int *nrep) { Rprintf("will use *all* combinations: %d\n", *nrep); } void F77_SUB(pr1mcd)(int *i_trace, int *n, int *nvar, int *nhallf, int *krep, int *nmini, int *kmini) { Rprintf("rffastmcd(n=%d, nvar=%d, nhallf=%d, krep=%d, nmini=%d, kmini=%d, i_trace=%d)\n", *n, *nvar, *nhallf, *krep, *nmini, *kmini, *i_trace); } void F77_SUB(pr2mcd)(F_Logical part, F_Logical all, int *kstep, int *ngroup, int *minigr, int *nhalf, int *nrep) { Rprintf("pr[2]: (part=%d, all=%d); (kstep=%d, ngroup=%d, minigr=%d, nhalf=%d, nrep=%d)\n", *part, *all, *kstep, *ngroup, *minigr, *nhalf, *nrep); } void F77_SUB(pr3mcd)(F_Logical part, F_Logical fine, F_Logical final, int *nrep, int *nn, int *nsel, int *nhalf, int *kstep, int *nmini, int *kmini) { char* phase_kind = (*part) ? ((*fine && !*final) ? "fine (2 of 3)" : ((*final) ? "final (3 of 3)" : "first (of 3)")) : ((*final) ? "final" : "one"); Rprintf(" Main loop, phase[%s]:\n (nrep=%4d, nn=%4d, nsel=%4d, nhalf=%4d, kstep=%d, nmini=%d, kmini=%d)\n", phase_kind, *nrep, *nn, *nsel, *nhalf, *kstep, *nmini, *kmini); } void F77_SUB(prp1mcd)(int *n, int *ngroup, int *minigr, int *nhalf, int *nrep, int mini[]) { // int mini[*kmini]; Rprintf(" Partitioning n=%d into at most kmini groups: ngroup=%d, minigr=%d, nhalf=%d, nrep=%d;" "\n groups are of sizes (", *n, *ngroup, *minigr, *nhalf, *nrep); for(int j=0; j < *ngroup; j++) Rprintf(" %d", mini[j]); Rprintf(")\n"); } void F77_SUB(pr9mcd)(int *ntot) { Rprintf(" -- finishing: total times = %d\n", *ntot); } /* called for trace >= 3 : ----------------------------------------------- */ void F77_SUB(prgrmcd)(int *ii, int *nn, int *i_trace) { Rprintf(" group ii = %d (nn = %d)%s\n", *ii, *nn, (*i_trace >= 4) ? ": i=1..nrep loop: " : ""); } void F77_SUB(pr4mcd)(int *i) { Rprintf(" i = %d "); } void F77_SUB(pr5mcd)(int *step, int *ntot) { Rprintf("(step %d, tot=%d)", *step, *ntot); } robustbase/src/Makevars0000644000176200001440000000021710477324431014713 0ustar liggesusers## Dear Emacs, make me -*- Makefile -*- ## we use the BLAS and now also the LAPACK library: PKG_LIBS= $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) robustbase/src/eigen.f0000644000176200001440000005117112432357143014461 0ustar liggesusersc--- EISPACK Eigen Value Computation --- c--- ================================ --- c--- This has been /src/appl/eigen.f from the 1990's upto 2013 c--- when it has become deprecated and unused in R's own sources ---------- c--- c--- "TODO": Use LAPACK's eigen routines instead ------------------------ c--- ====== -------------------------------------------------------------- c DOUBLE PRECISION FUNCTION EPSLON (X) DOUBLE PRECISION X C C ESTIMATE UNIT ROUNDOFF IN QUANTITIES OF SIZE X. C DOUBLE PRECISION A,B,C,EPS C C THIS PROGRAM SHOULD FUNCTION PROPERLY ON ALL SYSTEMS C SATISFYING THE FOLLOWING TWO ASSUMPTIONS, C 1. THE BASE USED IN REPRESENTING FLOATING POINT C NUMBERS IS NOT A POWER OF THREE. C 2. THE QUANTITY A IN STATEMENT 10 IS REPRESENTED TO C THE ACCURACY USED IN FLOATING POINT VARIABLES C THAT ARE STORED IN MEMORY. C THE STATEMENT NUMBER 10 AND THE GO TO 10 ARE INTENDED TO C FORCE OPTIMIZING COMPILERS TO GENERATE CODE SATISFYING C ASSUMPTION 2. C UNDER THESE ASSUMPTIONS, IT SHOULD BE TRUE THAT, C A IS NOT EXACTLY EQUAL TO FOUR-THIRDS, C B HAS A ZERO FOR ITS LAST BIT OR DIGIT, C C IS NOT EXACTLY EQUAL TO ONE, C EPS MEASURES THE SEPARATION OF 1.0 FROM C THE NEXT LARGER FLOATING POINT NUMBER. C THE DEVELOPERS OF EISPACK WOULD APPRECIATE BEING INFORMED C ABOUT ANY SYSTEMS WHERE THESE ASSUMPTIONS DO NOT HOLD. C C THIS VERSION DATED 4/6/83. C A = 4.0D0/3.0D0 10 B = A - 1.0D0 C = B + B + B EPS = DABS(C-1.0D0) IF (EPS .EQ. 0.0D0) GO TO 10 EPSLON = EPS*DABS(X) RETURN END DOUBLE PRECISION FUNCTION PYTHAG(A,B) DOUBLE PRECISION A,B C C FINDS DSQRT(A**2+B**2) WITHOUT OVERFLOW OR DESTRUCTIVE UNDERFLOW C DOUBLE PRECISION P,R,S,T,U P = DMAX1(DABS(A),DABS(B)) IF (P .EQ. 0.0D0) GO TO 20 R = (DMIN1(DABS(A),DABS(B))/P)**2 10 CONTINUE T = 4.0D0 + R IF (T .EQ. 4.0D0) GO TO 20 S = R/T U = 1.0D0 + 2.0D0*S P = U*P R = (S/U)**2 * R GO TO 10 20 PYTHAG = P RETURN END SUBROUTINE RS(NM,N,A,W,MATZ,Z,FV1,FV2,IERR) C INTEGER N,NM,IERR,MATZ DOUBLE PRECISION A(NM,N),W(N),Z(NM,N),FV1(N),FV2(N) C C THIS SUBROUTINE CALLS THE RECOMMENDED SEQUENCE OF C SUBROUTINES FROM THE EIGENSYSTEM SUBROUTINE PACKAGE (EISPACK) C TO FIND THE EIGENVALUES AND EIGENVECTORS (IF DESIRED) C OF A REAL SYMMETRIC MATRIX. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF THE TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX A. C C A CONTAINS THE REAL SYMMETRIC MATRIX. C C MATZ IS AN INTEGER VARIABLE SET EQUAL TO ZERO IF C ONLY EIGENVALUES ARE DESIRED. OTHERWISE IT IS SET TO C ANY NON-ZERO INTEGER FOR BOTH EIGENVALUES AND EIGENVECTORS. C C ON OUTPUT C C W CONTAINS THE EIGENVALUES IN ASCENDING ORDER. C C Z CONTAINS THE EIGENVECTORS IF MATZ IS NOT ZERO. C C IERR IS AN INTEGER OUTPUT VARIABLE SET EQUAL TO AN ERROR C COMPLETION CODE DESCRIBED IN THE DOCUMENTATION FOR TQLRAT C AND TQL2. THE NORMAL COMPLETION CODE IS ZERO. C C FV1 AND FV2 ARE TEMPORARY STORAGE ARRAYS. C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ C IF (N .LE. NM) GO TO 10 IERR = 10 * N GO TO 50 C 10 IF (MATZ .NE. 0) GO TO 20 C .......... FIND EIGENVALUES ONLY .......... CALL TRED1(NM,N,A,W,FV1,FV2) CALL TQLRAT(N,W,FV2,IERR) GO TO 50 C .......... FIND BOTH EIGENVALUES AND EIGENVECTORS .......... 20 CALL TRED2(NM,N,A,W,FV1,Z) CALL TQL2(NM,N,W,FV1,Z,IERR) 50 RETURN END SUBROUTINE TQL2(NM,N,D,E,Z,IERR) C INTEGER I,J,K,L,M,N,II,L1,L2,NM,MML,IERR DOUBLE PRECISION D(N),E(N),Z(NM,N) DOUBLE PRECISION C,C2,C3,DL1,EL1,F,G,H,P,R,S,S2,TST1,TST2,PYTHAG C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQL2, C NUM. MATH. 11, 293-306(1968) BY BOWDLER, MARTIN, REINSCH, AND C WILKINSON. C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 227-240(1971). C C THIS SUBROUTINE FINDS THE EIGENVALUES AND EIGENVECTORS C OF A SYMMETRIC TRIDIAGONAL MATRIX BY THE QL METHOD. C THE EIGENVECTORS OF A FULL SYMMETRIC MATRIX CAN ALSO C BE FOUND IF TRED2 HAS BEEN USED TO REDUCE THIS C FULL MATRIX TO TRIDIAGONAL FORM. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX. C C D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. C C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE INPUT MATRIX C IN ITS LAST N-1 POSITIONS. E(1) IS ARBITRARY. C C Z CONTAINS THE TRANSFORMATION MATRIX PRODUCED IN THE C REDUCTION BY TRED2, IF PERFORMED. IF THE EIGENVECTORS C OF THE TRIDIAGONAL MATRIX ARE DESIRED, Z MUST CONTAIN C THE IDENTITY MATRIX. C C ON OUTPUT C C D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN C ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT BUT C UNORDERED FOR INDICES 1,2,...,IERR-1. C C E HAS BEEN DESTROYED. C C Z CONTAINS ORTHONORMAL EIGENVECTORS OF THE SYMMETRIC C TRIDIAGONAL (OR FULL) MATRIX. IF AN ERROR EXIT IS MADE, C Z CONTAINS THE EIGENVECTORS ASSOCIATED WITH THE STORED C EIGENVALUES. C C IERR IS SET TO C ZERO FOR NORMAL RETURN, C J IF THE J-TH EIGENVALUE HAS NOT BEEN C DETERMINED AFTER 30 ITERATIONS. C C CALLS PYTHAG FOR DSQRT(A*A + B*B) . C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ c c unnecessary initialization of C3 and S2 to keep g77 -Wall happy c C3 = 0.0D0 S2 = 0.0D0 C IERR = 0 IF (N .EQ. 1) GO TO 1001 C do I = 2, N E(I-1) = E(I) end do C F = 0.0D0 TST1 = 0.0D0 E(N) = 0.0D0 C DO 240 L = 1, N J = 0 H = DABS(D(L)) + DABS(E(L)) IF (TST1 .LT. H) TST1 = H C .......... LOOK FOR SMALL SUB-DIAGONAL ELEMENT .......... DO 110 M = L, N TST2 = TST1 + DABS(E(M)) IF (TST2 .EQ. TST1) GO TO 120 C .......... E(N) IS ALWAYS ZERO, SO THERE IS NO EXIT C THROUGH THE BOTTOM OF THE LOOP .......... 110 CONTINUE C 120 IF (M .EQ. L) GO TO 220 130 IF (J .EQ. 30) GO TO 1000 J = J + 1 C .......... FORM SHIFT .......... L1 = L + 1 L2 = L1 + 1 G = D(L) P = (D(L1) - G) / (2.0D0 * E(L)) R = PYTHAG(P,1.0D0) D(L) = E(L) / (P + DSIGN(R,P)) D(L1) = E(L) * (P + DSIGN(R,P)) DL1 = D(L1) H = G - D(L) IF (L2 .GT. N) GO TO 145 C DO I = L2, N D(I) = D(I) - H end do C 145 F = F + H C .......... QL TRANSFORMATION .......... P = D(M) C = 1.0D0 C2 = C EL1 = E(L1) S = 0.0D0 MML = M - L C .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... DO 200 II = 1, MML C3 = C2 C2 = C S2 = S I = M - II G = C * E(I) H = C * P R = PYTHAG(P,E(I)) E(I+1) = S * R S = E(I) / R C = P / R P = C * D(I) - S * G D(I+1) = H + S * (C * G + S * D(I)) C .......... FORM VECTOR .......... DO 180 K = 1, N H = Z(K,I+1) Z(K,I+1) = S * Z(K,I) + C * H Z(K,I) = C * Z(K,I) - S * H 180 CONTINUE C 200 CONTINUE C P = -S * S2 * C3 * EL1 * E(L) / DL1 E(L) = S * P D(L) = C * P TST2 = TST1 + DABS(E(L)) IF (TST2 .GT. TST1) GO TO 130 220 D(L) = D(L) + F 240 CONTINUE C .......... ORDER EIGENVALUES AND EIGENVECTORS .......... DO 300 II = 2, N I = II - 1 K = I P = D(I) C DO 260 J = II, N IF (D(J) .GE. P) GO TO 260 K = J P = D(J) 260 CONTINUE C IF (K .EQ. I) GO TO 300 D(K) = D(I) D(I) = P C DO 280 J = 1, N P = Z(J,I) Z(J,I) = Z(J,K) Z(J,K) = P 280 CONTINUE C 300 CONTINUE C GO TO 1001 C .......... SET ERROR -- NO CONVERGENCE TO AN C EIGENVALUE AFTER 30 ITERATIONS .......... 1000 IERR = L 1001 RETURN END SUBROUTINE TQLRAT(N,D,E2,IERR) C INTEGER I,J,L,M,N,II,L1,MML,IERR DOUBLE PRECISION D(N),E2(N) DOUBLE PRECISION B,C,F,G,H,P,R,S,T,EPSLON,PYTHAG C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TQLRAT, C ALGORITHM 464, COMM. ACM 16, 689(1973) BY REINSCH. C C THIS SUBROUTINE FINDS THE EIGENVALUES OF A SYMMETRIC C TRIDIAGONAL MATRIX BY THE RATIONAL QL METHOD. C C ON INPUT C C N IS THE ORDER OF THE MATRIX. C C D CONTAINS THE DIAGONAL ELEMENTS OF THE INPUT MATRIX. C C E2 CONTAINS THE SQUARES OF THE SUBDIAGONAL ELEMENTS OF THE C INPUT MATRIX IN ITS LAST N-1 POSITIONS. E2(1) IS ARBITRARY. C C ON OUTPUT C C D CONTAINS THE EIGENVALUES IN ASCENDING ORDER. IF AN C ERROR EXIT IS MADE, THE EIGENVALUES ARE CORRECT AND C ORDERED FOR INDICES 1,2,...IERR-1, BUT MAY NOT BE C THE SMALLEST EIGENVALUES. C C E2 HAS BEEN DESTROYED. C C IERR IS SET TO C ZERO FOR NORMAL RETURN, C J IF THE J-TH EIGENVALUE HAS NOT BEEN C DETERMINED AFTER 30 ITERATIONS. C C CALLS PYTHAG FOR DSQRT(A*A + B*B) . C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ c c unnecessary initialization of B and C to keep g77 -Wall happy c B = 0.0D0 C = 0.0D0 C IERR = 0 IF (N .EQ. 1) GO TO 1001 C DO I = 2, N E2(I-1) = E2(I) end do C F = 0.0D0 T = 0.0D0 E2(N) = 0.0D0 C DO 290 L = 1, N J = 0 H = DABS(D(L)) + DSQRT(E2(L)) IF (T .GT. H) GO TO 105 T = H B = EPSLON(T) C = B * B C .......... LOOK FOR SMALL SQUARED SUB-DIAGONAL ELEMENT .......... 105 DO 110 M = L, N IF (E2(M) .LE. C) GO TO 120 C .......... E2(N) IS ALWAYS ZERO, SO THERE IS NO EXIT C THROUGH THE BOTTOM OF THE LOOP .......... 110 CONTINUE C 120 IF (M .EQ. L) GO TO 210 130 IF (J .EQ. 30) GO TO 1000 J = J + 1 C .......... FORM SHIFT .......... L1 = L + 1 S = DSQRT(E2(L)) G = D(L) P = (D(L1) - G) / (2.0D0 * S) R = PYTHAG(P,1.0D0) D(L) = S / (P + DSIGN(R,P)) H = G - D(L) C DO I = L1, N D(I) = D(I) - H end do C F = F + H C .......... RATIONAL QL TRANSFORMATION .......... G = D(M) IF (G .EQ. 0.0D0) G = B H = G S = 0.0D0 MML = M - L C .......... FOR I=M-1 STEP -1 UNTIL L DO -- .......... DO 200 II = 1, MML I = M - II P = G * H R = P + E2(I) E2(I+1) = S * R S = E2(I) / R D(I+1) = H + S * (H + D(I)) G = D(I) - E2(I) / G IF (G .EQ. 0.0D0) G = B H = G * P / R 200 CONTINUE C E2(L) = S * G D(L) = H C .......... GUARD AGAINST UNDERFLOW IN CONVERGENCE TEST .......... IF (H .EQ. 0.0D0) GO TO 210 IF (DABS(E2(L)) .LE. DABS(C/H)) GO TO 210 E2(L) = H * E2(L) IF (E2(L) .NE. 0.0D0) GO TO 130 210 P = D(L) + F C .......... ORDER EIGENVALUES .......... IF (L .EQ. 1) GO TO 250 C .......... FOR I=L STEP -1 UNTIL 2 DO -- .......... DO 230 II = 2, L I = L + 2 - II IF (P .GE. D(I-1)) GO TO 270 D(I) = D(I-1) 230 CONTINUE C 250 I = 1 270 D(I) = P 290 CONTINUE C GO TO 1001 C .......... SET ERROR -- NO CONVERGENCE TO AN C EIGENVALUE AFTER 30 ITERATIONS .......... 1000 IERR = L 1001 RETURN END SUBROUTINE TRED1(NM,N,A,D,E,E2) C INTEGER I,J,K,L,N,II,NM,JP1 DOUBLE PRECISION A(NM,N),D(N),E(N),E2(N) DOUBLE PRECISION F,G,H,SCALE C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TRED1, C NUM. MATH. 11, 181-195(1968) BY MARTIN, REINSCH, AND WILKINSON. C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 212-226(1971). C C THIS SUBROUTINE REDUCES A REAL SYMMETRIC MATRIX C TO A SYMMETRIC TRIDIAGONAL MATRIX USING C ORTHOGONAL SIMILARITY TRANSFORMATIONS. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX. C C A CONTAINS THE REAL SYMMETRIC INPUT MATRIX. ONLY THE C LOWER TRIANGLE OF THE MATRIX NEED BE SUPPLIED. C C ON OUTPUT C C A CONTAINS INFORMATION ABOUT THE ORTHOGONAL TRANS- C FORMATIONS USED IN THE REDUCTION IN ITS STRICT LOWER C TRIANGLE. THE FULL UPPER TRIANGLE OF A IS UNALTERED. C C D CONTAINS THE DIAGONAL ELEMENTS OF THE TRIDIAGONAL MATRIX. C C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE TRIDIAGONAL C MATRIX IN ITS LAST N-1 POSITIONS. E(1) IS SET TO ZERO. C C E2 CONTAINS THE SQUARES OF THE CORRESPONDING ELEMENTS OF E. C E2 MAY COINCIDE WITH E IF THE SQUARES ARE NOT NEEDED. C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ C DO 100 I = 1, N D(I) = A(N,I) A(N,I) = A(I,I) 100 CONTINUE C .......... FOR I=N STEP -1 UNTIL 1 DO -- .......... DO 300 II = 1, N I = N + 1 - II L = I - 1 H = 0.0D0 SCALE = 0.0D0 IF (L .LT. 1) GO TO 130 C .......... SCALE ROW (ALGOL TOL THEN NOT NEEDED) .......... DO K = 1, L SCALE = SCALE + DABS(D(K)) end do C IF (SCALE .NE. 0.0D0) GO TO 140 C DO 125 J = 1, L D(J) = A(L,J) A(L,J) = A(I,J) A(I,J) = 0.0D0 125 CONTINUE C 130 E(I) = 0.0D0 E2(I) = 0.0D0 GO TO 300 C 140 continue DO K = 1, L D(K) = D(K) / SCALE H = H + D(K) * D(K) end do C E2(I) = SCALE * SCALE * H F = D(L) G = -DSIGN(DSQRT(H),F) E(I) = SCALE * G H = H - F * G D(L) = F - G IF (L .gt. 1) then C .......... FORM A*U .......... DO J = 1, L E(J) = 0.0D0 end do C DO 240 J = 1, L F = D(J) G = E(J) + A(J,J) * F JP1 = J + 1 IF (L .ge. JP1) then DO K = JP1, L G = G + A(K,J) * D(K) E(K) = E(K) + A(K,J) * F end do end if E(J) = G 240 CONTINUE C .......... FORM P .......... F = 0.0D0 C DO J = 1, L E(J) = E(J) / H F = F + E(J) * D(J) end do C H = F / (H + H) C .......... FORM Q .......... DO J = 1, L E(J) = E(J) - H * D(J) end do C .......... FORM REDUCED A .......... DO J = 1, L F = D(J) G = E(J) DO K = J, L A(K,J) = A(K,J) - F * E(K) - G * D(K) end do end do end if c 285 DO J = 1, L F = D(J) D(J) = A(L,J) A(L,J) = A(I,J) A(I,J) = F * SCALE end do C 300 CONTINUE RETURN END SUBROUTINE TRED2(NM,N,A,D,E,Z) C INTEGER I,J,K,L,N,II,NM,JP1 DOUBLE PRECISION A(NM,N),D(N),E(N),Z(NM,N) DOUBLE PRECISION F,G,H,HH,SCALE C C THIS SUBROUTINE IS A TRANSLATION OF THE ALGOL PROCEDURE TRED2, C NUM. MATH. 11, 181-195(1968) BY MARTIN, REINSCH, AND WILKINSON. C HANDBOOK FOR AUTO. COMP., VOL.II-LINEAR ALGEBRA, 212-226(1971). C C THIS SUBROUTINE REDUCES A REAL SYMMETRIC MATRIX TO A C SYMMETRIC TRIDIAGONAL MATRIX USING AND ACCUMULATING C ORTHOGONAL SIMILARITY TRANSFORMATIONS. C C ON INPUT C C NM MUST BE SET TO THE ROW DIMENSION OF TWO-DIMENSIONAL C ARRAY PARAMETERS AS DECLARED IN THE CALLING PROGRAM C DIMENSION STATEMENT. C C N IS THE ORDER OF THE MATRIX. C C A CONTAINS THE REAL SYMMETRIC INPUT MATRIX. ONLY THE C LOWER TRIANGLE OF THE MATRIX NEED BE SUPPLIED. C C ON OUTPUT C C D CONTAINS THE DIAGONAL ELEMENTS OF THE TRIDIAGONAL MATRIX. C C E CONTAINS THE SUBDIAGONAL ELEMENTS OF THE TRIDIAGONAL C MATRIX IN ITS LAST N-1 POSITIONS. E(1) IS SET TO ZERO. C C Z CONTAINS THE ORTHOGONAL TRANSFORMATION MATRIX C PRODUCED IN THE REDUCTION. C C A AND Z MAY COINCIDE. IF DISTINCT, A IS UNALTERED. C C QUESTIONS AND COMMENTS SHOULD BE DIRECTED TO BURTON S. GARBOW, C MATHEMATICS AND COMPUTER SCIENCE DIV, ARGONNE NATIONAL LABORATORY C C THIS VERSION DATED AUGUST 1983. C C ------------------------------------------------------------------ C DO I = 1, N DO J = I, N Z(J,I) = A(J,I) end do D(I) = A(N,I) end do C IF (N .EQ. 1) GO TO 510 C .......... FOR I=N STEP -1 UNTIL 2 DO -- .......... DO 300 II = 2, N I = N + 2 - II L = I - 1 H = 0.0D0 SCALE = 0.0D0 IF (L .LT. 2) GO TO 130 C .......... SCALE ROW (ALGOL TOL THEN NOT NEEDED) .......... DO K = 1, L SCALE = SCALE + DABS(D(K)) end do C IF (SCALE .NE. 0.0D0) GO TO 140 130 E(I) = D(L) C DO J = 1, L D(J) = Z(L,J) Z(I,J) = 0.0D0 Z(J,I) = 0.0D0 end do C GO TO 290 C 140 CONTINUE DO K = 1, L D(K) = D(K) / SCALE H = H + D(K) * D(K) end do C F = D(L) G = -DSIGN(DSQRT(H),F) E(I) = SCALE * G H = H - F * G D(L) = F - G C .......... FORM A*U .......... DO J = 1, L E(J) = 0.0D0 end do C DO 240 J = 1, L F = D(J) Z(J,I) = F G = E(J) + Z(J,J) * F JP1 = J + 1 IF (L .ge. JP1) then do K = JP1, L G = G + Z(K,J) * D(K) E(K) = E(K) + Z(K,J) * F end do end if E(J) = G 240 CONTINUE C .......... FORM P .......... F = 0.0D0 C do J = 1, L E(J) = E(J) / H F = F + E(J) * D(J) end do C HH = F / (H + H) C .......... FORM Q .......... do J = 1, L E(J) = E(J) - HH * D(J) end do C .......... FORM REDUCED A .......... DO J = 1, L F = D(J) G = E(J) C do K = J, L Z(K,J) = Z(K,J) - F * E(K) - G * D(K) end do C D(J) = Z(L,J) Z(I,J) = 0.0D0 end do C 290 D(I) = H 300 CONTINUE C .......... ACCUMULATION OF TRANSFORMATION MATRICES .......... do I = 2, N L = I - 1 Z(N,L) = Z(L,L) Z(L,L) = 1.0D0 H = D(I) IF (H .EQ. 0.0D0) GO TO 380 C do K = 1, L D(K) = Z(K,I) / H end do C DO J = 1, L G = 0.0D0 C do K = 1, L G = G + Z(K,I) * Z(K,J) end do C do K = 1, L Z(K,J) = Z(K,J) - G * D(K) end do end do 380 continue do K = 1, L Z(K,I) = 0.0D0 end do end do C 510 CONTINUE DO I = 1, N D(I) = Z(N,I) Z(N,I) = 0.0D0 end do Z(N,N) = 1.0D0 E(1) = 0.0D0 RETURN END robustbase/src/wgt_himed.c0000644000176200001440000000442713774372203015344 0ustar liggesusers/* * R : A Computer Language for Statistical Data Analysis * Copyright (C) 2006--2007 the R Development Core Team * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Used to be part of ./qn_sn.c Note by MM: We have explicit permission from P.Rousseeuw to licence it under the GNU Public Licence. See also ../inst/Copyrights */ #include /* ^^^^^^^^^^ is supposedly more common and standard than * #include * or #include */ /* --> int64_t ; if people don't have the above, they can forget about it.. */ #include /* -> and much more */ #include "robustbase.h" // whimed() and whimed_i() function called from C : in ./mc.c , ./qn_sn.c : #define _i_whimed_ #include "wgt_himed_templ.h" #define _d_whimed_ #include "wgt_himed_templ.h" /* Interface routines to be called via .C() : */ void wgt_himed_i(double *x, Sint *n, Sint *iw, double *res) { double *a_srt, *acand; int *iw_cand, nn = (int)*n; char *vmax; vmax = vmaxget(); acand = (double *)R_alloc(nn, sizeof(double)); a_srt = (double *)R_alloc(nn, sizeof(double)); iw_cand= (int *) R_alloc(nn, sizeof(int)); *res = whimed_i(x, (int *)iw, nn, acand, a_srt, iw_cand); vmaxset(vmax); } void wgt_himed(double *x, Sint *n, double *w, double *res) { double *a_srt, *a_cand, *w_cand; int nn = (int)*n; char *vmax; vmax = vmaxget(); a_cand = (double *) R_alloc(nn, sizeof(double)); a_srt = (double *) R_alloc(nn, sizeof(double)); w_cand = (double *) R_alloc(nn, sizeof(double)); *res = whimed(x, w, nn, a_cand, a_srt, w_cand); vmaxset(vmax); } robustbase/src/wgt_himed_templ.h0000644000176200001440000000540413774372203016546 0ustar liggesusers/*------ Definition of a template for whimed(_i) : * * -------- ~~~~~~ * i.e., included several times from ./wgt_himed.c * ~~~~~~~~~~~~~ */ #ifdef _d_whimed_ # define _WHIMED_ whimed # define _WGT_TYPE_ double # define _WGT_SUM_TYPE_ double # undef _d_whimed_ #elif defined (_i_whimed_) # define _WHIMED_ whimed_i # define _WGT_TYPE_ int # define _WGT_SUM_TYPE_ int64_t # undef _i_whimed_ #else # error "must define correct whimed_ macro !" #endif double _WHIMED_(double *a, _WGT_TYPE_ *w, int n, double* a_cand, double *a_srt, _WGT_TYPE_* w_cand) { /* Algorithm to compute the weighted high median in O(n) time. The whimed is defined as the smallest a[j] such that the sum of the weights of all a[i] <= a[j] is strictly greater than half of the total weight. Arguments: a: double array containing the observations n: number of observations w: array of (int/double) weights of the observations. */ int i; /* sum of weights: `int' do overflow when n ~>= 1e5 */ _WGT_SUM_TYPE_ wleft, wmid, wright, w_tot, wrest; double trial; w_tot = wrest = 0; for (i = 0; i < n; ++i) w_tot += w[i]; #ifdef DEBUG_whimed REprintf("wgt_himed(a[], w[], n) -- on entry: n=%d, w_tot=%g\n", n, (double)w_tot); #endif if(n == 0) return NA_REAL; /* REPEAT : */ do { int n2 = n/2;/* =^= n/2 +1 with 0-indexing */ for (i = 0; i < n; ++i) a_srt[i] = a[i]; rPsort(a_srt, n, n2); trial = a_srt[n2]; wleft = 0; wmid = 0; wright= 0; for (i = 0; i < n; ++i) { if (a[i] < trial) wleft += w[i]; else if (a[i] > trial) wright += w[i]; else wmid += w[i]; } /* wleft = sum_{i; a[i] < trial} w[i] * wmid = sum_{i; a[i] == trial} w[i] at least one 'i' since trial is one a[]! * wright= sum_{i; a[i] > trial} w[i] */ #ifdef DEBUG_whimed REprintf(" trial=%-g; w(left|mid|right) = (%g,%g,%g); ", trial, (double)wleft, (double)wmid, (double)wright); #endif int kcand = 0; if (2 * (wrest + wleft) > w_tot) { for (i = 0; i < n; ++i) { if (a[i] < trial) { a_cand[kcand] = a[i]; w_cand[kcand] = w[i]; ++kcand; } } } else if (2 * (wrest + wleft + wmid) <= w_tot) { for (i = 0; i < n; ++i) { if (a[i] > trial) { a_cand[kcand] = a[i]; w_cand[kcand] = w[i]; ++kcand; } } wrest += wleft + wmid; #ifdef DEBUG_whimed REprintf(" new wrest = %g; ", (double)wrest); #endif } else { #ifdef DEBUG_whimed REprintf(" -> found! return trial\n"); #endif return trial; /*==========*/ } n = kcand; #ifdef DEBUG_whimed REprintf(" ... and try again with n:= kcand=%d\n", n); #endif for (i = 0; i < n; ++i) { a[i] = a_cand[i]; w[i] = w_cand[i]; } } while(1); } /* _WHIMED_ */ #undef _WHIMED_ #undef _WGT_TYPE_ #undef _WGT_SUM_TYPE_ robustbase/src/mc.c0000644000176200001440000002626313713564014013772 0ustar liggesusers/* Algorithm for the skewness estimator medcouple (MC) -------------------------------------------------- ( originally matlabmc.c and also mc/mcrsoft/spmc.c ) */ #include #include #include // -> int64_t #include /* -> fmax2(.,.) */ #include /* Interface routines to be called via .C() and those from API : */ #include "robustbase.h" /* including whimed_i(a,iw,n): the weighted high median of an array a of length n, using the positive integer weights iw[]. * which is in ./wgt_himed.c_templ * ~~~~~~~~~~~~~~~~~ */ /* Includes the auxiliary function h_kern(a,b, ai,bi,ab, eps): the values h(a,b) needed to compute the mc */ static double h_kern(double a, double b, int ai, int bi, int ab, double eps, Rboolean do_scale); // Called via .C() : void mc_C(double *z, int *in, double *eps, int *iter, double *out, int *scale) { *out = mc_C_d(z, *in, eps, iter, *scale); return; } /* MM: The tolerance 'eps1' and 'eps2' can now be passed from R; * the original code had only one 'eps' for both and hardcoded * eps = 0.0000000000001; (== 1e-13 ) * * MK: eps1: for (relative) "equality" checks * eps2: used to check for over- and underflow, respectively * therefore I suggest eps1 = DBL_EPS and eps2 = DBL_MIN */ double mc_C_d(const double z[], int n, const double eps[], int *iter, int scale) { /* NOTE: eps = c(eps1, eps2) iter := c(maxit, trace.lev) as input = c(it, converged) as output */ int trace_lev = iter[1], it = 0; Rboolean converged = TRUE, do_scale = (Rboolean) scale; double medc; // "the" result static const double Large = DBL_MAX / 4.; if (n < 3) { medc = 0.; goto Finish; } /* copy data before sort()ing in place, also reflecting it -- dealing with +-Inf. NOTE: x[0] "empty" so we can use 1-indexing below */ double *x = (double *) R_alloc(n+1, sizeof(double)); x[0] = 0; for (int i = 0; i < n; i++) { double zi = z[i]; x[i+1] = - ((zi == R_PosInf) ? Large : (zi == R_NegInf ? -Large : zi)); } R_rsort(&x[1], n); /* full sort */ double xmed; // := median( x[1:n] ) = - median( z[0:(n-1)] ): if (n%2) { // n odd xmed = x[(n/2)+1]; } else { // n even int ind = (n/2); xmed = (x[ind] + x[ind+1])/2; } double x_eps = eps[0] * (do_scale ? eps[0] + fabs(xmed) : fabs(xmed)); if (fabs(x[1] - xmed) <= x_eps) { medc = -1.; goto Finish; } else if (fabs(x[n] - xmed) <= x_eps) { medc = 1.; goto Finish; } /* else : median is not at the border ------------------- */ if(trace_lev) Rprintf("mc_C_d(z[1:%d], trace_lev=%d, scale=%s): Median = %g (not at the border)\n", n, trace_lev, do_scale ? "T" : "F", -xmed); int i,j; /* center x[] wrt median --> such that then median( x[1:n] ) == 0 */ for (i = 1; i <= n; i++) x[i] -= xmed; if(do_scale) { /* MM: ==> This scaling is extremely outlier-dependent -- it *kills* equivariance when e.g. x[n] --> very large. e.g., below '(eps[0] + fabs(xmed))' depends on rescaling Should *NOT* be needed if everything else is *relative* instead of absolute Consider replacing 1) eps[0] * (eps[0] + fabs(xmed)) with eps[0]*fabs(xmed) 2) x[j] > x_eps with x[j] >= x_eps (>= : for 0) */ /* Now scale to inside [-0.5, 0.5] and flip sign such that afterwards * x[1] >= x[2] >= ... >= x[n] */ double xden = -2 * fmax2(-x[1], x[n]); for (i = 1; i <= n; i++) x[i] /= xden; xmed /= xden; x_eps = eps[0] * (eps[0] + fabs(xmed)); if(trace_lev >= 2) Rprintf(" x[] has been rescaled (* 1/s) with s = %g\n", -xden); } else { // no re-scaling; still flipping signs : for (i = 1; i <= n; i++) x[i] *= -1.; } j = 1; while (j <= n && x[j] >= x_eps) { /* test relative to xmed */ /* x1[j] = x[j]; */ j++; } if(trace_lev >= 2) Rprintf(" x1[] := {x | x_j >= x_eps = %g} has %d (='j-1') entries\n", x_eps, j-1); i = 1; double *x2 = x+j-1; /* pointer -- corresponding to x2[i] = x[j]; */ while (j <= n && x[j] >= -x_eps) { /* test relative to xmed */ /* x1[j] = x[j]; */ /* x2[i] = x[j]; */ j++; i++; } /* now x1[] := {x | x_j > -eps} also includes the median (0) */ if(trace_lev >= 2) Rprintf("'median-x' {x | -eps < x_i <= eps} has %d (= 'k') entries\n", i-1); int h1 = j-1, /* == size of x1[] == the sum of those two sizes above */ /* conceptually, x2[] := {x | x_j <= eps} (which includes the median 0) */ h2 = i + (n-j);// == size of x2[] == maximal size of whimed() arrays if(trace_lev) Rprintf(" now allocating 2+5 work arrays of size (1+) h2=%d each:\n", h2); /* work arrays for whimed_i() : allocate *once* only !! */ double *acand = (double *) R_alloc(h2, sizeof(double)), *a_srt = (double *) R_alloc(h2, sizeof(double)); int *iw_cand= (int *) R_alloc(h2, sizeof(int)), /* work arrays for the fast-median-of-table algorithm: * currently still with 1-indexing */ *left = (int *) R_alloc((h2+1), sizeof(int)), *right = (int *) R_alloc((h2+1), sizeof(int)), *p = (int *) R_alloc((h2+1), sizeof(int)), *q = (int *) R_alloc((h2+1), sizeof(int)); for (i = 1; i <= h2; i++) { left [i] = 1; right[i] = h1; } int64_t nr = ((int64_t) h1) * ((int64_t) h2), // <-- careful to *NOT* overflow knew = nr/2 +1; if(trace_lev >= 2) Rprintf(" (h1,h2, nr, knew) = (%d,%d, %.0f, %.0f)\n", h1,h2, (double)nr, (double)knew); double trial = -2./* -Wall */; double *work= (double *) R_alloc(n, sizeof(double)); int *iwt = (int *) R_alloc(n, sizeof(int)); Rboolean IsFound = FALSE; int nl = 0, neq = 0; /* MK: 'neq' counts the number of observations in the * inside the tolerance range, i.e., where left > right + 1, * since we would miss those when just using 'nl-nr'. * This is to prevent index overflow in work[] later on. * left might be larger than right + 1 since we are only * testing with accuracy eps_trial and therefore there might * be more than one observation in the `tolerance range` * between < and <=. */ while (!IsFound && (nr-nl+neq > n) && it < iter[0]) { int64_t sum_p, sum_q; it++; j = 0; for (i = 1; i <= h2; i++) if (left[i] <= right[i]) { iwt[j] = right[i] - left[i]+1; int k = left[i] + (iwt[j]/2); work[j] = h_kern(x[k], x2[i], k, i, h1+1, eps[1], do_scale); j++; } if(trace_lev >= 4) { Rprintf(" before whimed(): work and iwt, each [0:(%d-1)]:\n", j); if(j >= 100) { for(i=0; i < 90; i++) Rprintf(" %8g", work[i]); Rprintf("\n ... "); for(i=j-4; i < j; i++) Rprintf(" %8g", work[i]); Rprintf("\n"); for(i=0; i < 90; i++) Rprintf(" %8d", iwt [i]); Rprintf("\n ... "); for(i=j-4; i < j; i++) Rprintf(" %8d", iwt [i]); Rprintf("\n"); } else { // j <= 99 for(i=0; i < j; i++) Rprintf(" %8g", work[i]); Rprintf("\n"); for(i=0; i < j; i++) Rprintf(" %8d", iwt [i]); Rprintf("\n"); } } trial = whimed_i(work, iwt, j, acand, a_srt, iw_cand); double eps_trial = eps[0] * (do_scale ? eps[0] + fabs(trial) : fabs(trial)); if(trace_lev >= 3) Rprintf("%2s it=%2d, whimed(*, n=%6d)=%11.5g ", " ", it, j, trial); j = 1; for (i = h2; i >= 1; i--) { while (j <= h1 && h_kern(x[j],x2[i],j,i,h1+1,eps[1], do_scale) - trial > eps_trial) { // while (j <= h1 && h_kern(x[j],x2[i],j,i,h1+1,eps[1], do_scale) > trial) { if (trace_lev >= 5) Rprintf("\nj=%3d, i=%3d, x[j]=%g, x2[i]=%g, h=%g", j, i, x[j], x2[i], h_kern(x[j],x2[i],j,i,h1+1,eps[1], do_scale)); j++; } /* for(; j <= h1; j++) { */ /* register double h = h_kern(x[j],x2[i],j,i,h1+1,eps[1], do_scale); */ /* if(h > trial) break; */ /* } */ p[i] = j-1; } j = h1; for (i = 1, sum_p=0, sum_q=0; i <= h2; i++) { while (j >= 1 && trial - h_kern(x[j],x2[i],j,i,h1+1,eps[1], do_scale) > eps_trial) // while (j >= 1 && h_kern(x[j],x2[i],j,i,h1+1,eps[1], do_scale) < trial) j--; q[i] = j+1; sum_p += p[i]; sum_q += j;/* = q[i]-1 */ } if(trace_lev >= 3) { if (trace_lev == 3) Rprintf("sum_(p,q)= (%.0f,%.0f)", (double)sum_p, (double)sum_q); else { /* trace_lev >= 4 */ Rboolean lrg = h2 >= 100; int i_m = lrg ? 95 : h2; Rprintf("\n%3s p[1:%d]:", "", h2); for(i = 1; i <= i_m; i++) Rprintf(" %2d", p[i]); if(lrg) Rprintf(" ..."); Rprintf(" sum=%4.0f\n%3s q[1:%d]:", (double)sum_p, "", h2); for(i = 1; i <= i_m; i++) Rprintf(" %2d", q[i]); if(lrg) Rprintf(" ..."); Rprintf(" sum=%4.0f\n", (double)sum_q); } } if (knew <= sum_p) { if(trace_lev >= 3) Rprintf("; sum_p >= kn\n"); for (i = 1, neq = 0; i <= h2; i++) { right[i] = p[i]; if (left[i] > right[i]+1) neq += left[i]-right[i]-1; } nr = sum_p; } else { /* knew > sum_p */ IsFound = (knew <= sum_q); /* i.e. sum_p < knew <= sum_q */; if(trace_lev >= 3) Rprintf("; s_p < kn ?<=? s_q: %s\n", IsFound ? "TRUE": "no"); if(IsFound) { medc = trial; } else { /* knew > sum_q */ for (i = 1; i <= h2; i++) { left[i] = q[i]; if (left[i] > right[i]+1) neq += left[i]-right[i]-1; } nl = sum_q; } } R_CheckUserInterrupt(); } /* end while loop */ converged = IsFound || (nr-nl+neq <= n); if(!converged) { warning("maximal number of iterations (%d =? %d) reached prematurely\n", iter[0], it); /* still: */ medc = trial; } if (converged && !IsFound) { /* e.g., for mc(1:4) : */ j = 0; for (i = 1; i <= h2; i++) { if (left[i] <= right[i]) { for (int k = left[i]; k <= right[i]; k++) { work[j] = -h_kern(x[k],x2[i],k,i,h1+1,eps[1], do_scale); j++; } } } if(trace_lev) Rprintf(" not found [it=%d, (nr,nl) = (%d,%d)]," " -> (knew-nl, j) = (%d,%d)\n", it, nr, nl, knew-nl, j); /* using rPsort(work, n,k), since we don't need work[] anymore:*/ rPsort(work, /* n = */ j, /* k = */ knew-nl-1); medc = - work[knew-nl-1]; } if(trace_lev >= 2) Rprintf(converged ? "converged in %d iterations\n" : "not converged in %d (maxit) iterations; try enlarging eps1, eps2 !?\n", it); Finish: iter[0] = it; /* to return */ iter[1] = converged; return medc; } /* end{ mc_C_d } */ /* h_kern() -- was called calwork() in original rmc.c code and did if (fabs(a-b) < 2.0*eps) { if (ai+bi == ab) { return 0; } else { return (ai+bi < ab) ? 1 : -1 ; } } else { return (a+b)/(a-b); } */ static double h_kern(double a, double b, int ai, int bi, int ab, double eps, Rboolean do_scale) { // eps := 'eps2' in R's mc() /* if (fabs(a-b) <= DBL_MIN) */ /* check for zero division and positive b */ // MK added a check '|| b > 0' ("or positive b"), but said "_and_ positive b" (r221) /* if (fabs(a-b) < 2.0*eps || b > 0) */ // MM: don't see why (but it seems needed); the check should be *relative* to |a+b| if (b > 0 || fabs(a-b) <= eps*(do_scale ? 2. : fabs(a+b))) // '<=' since RHS maybe 0 return sign((double)(ab - (ai+bi))); /* else */ return (a+b)/(a-b); } /* Local variables section * Local variables: * mode: c * kept-old-versions: 12 * kept-new-versions: 20 * End: */ robustbase/src/rffastmcd.f0000644000176200001440000017410313635407354015352 0ustar liggesuserscc -*- mode: fortran; kept-new-versions: 25; kept-old-versions: 20 -*- cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cc rrcov : Scalable Robust Estimators with High Breakdown Point cc cc This program is free software; you can redistribute it and/or modify cc it under the terms of the GNU General Public License as published by cc the Free Software Foundation; either version 2 of the License, or cc (at your option) any later version. cc cc This program is distributed in the hope that it will be useful, cc but WITHOUT ANY WARRANTY; without even the implied warranty of cc MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the cc GNU General Public License for more details. cc cc You should have received a copy of the GNU General Public License cc along with this program; if not, write to the Free Software cc Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA cc cc I would like to thank Peter Rousseeuw and Katrien van Driessen for cc providing the initial code of this function. cc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cc cc Computes the MCD estimator of multivariate location and scatter. cc This estimator is given by the subset of h observations for which cc the determinant of their covariance matrix is minimal. The MCD cc location estimate is then the mean of those h points, and the MCD cc scatter estimate is their covariance matrix. This value of h may be cc chosen by the user; its default value is roughly n/2. cc cc The MCD estimator was first introduced in: cc cc Rousseeuw, P.J. (1984), "Least Median of Squares Regression," cc Journal of the American Statistical Association, Vol. 79, cc pp. 871-881. [See page 877.] cc cc The MCD is a robust estimator in the sense that the estimates are cc not unduly influenced by outliers in the data, even if there cc are many outliers. Its robustness was proved in: cc cc Rousseeuw, P.J. (1985), "Multivariate Estimation with High cc Breakdown Point," in Mathematical Statistics and Applications, cc edited by W. Grossmann, G. Pflug, I. Vincze, and W. Wertz. cc Dordrecht: Reidel Publishing Company, pp. 283-297. cc cc Rousseeuw, P.J. and Leroy, A.M. (1987), Robust Regression and cc Outlier Detection, Wiley-Interscience, New York. [Chapter 7] cc cc The program also computes the distance of each observation cc from the center (location) of the data, relative to the shape cc (scatter) of the data: cc cc * Using the classical estimates yields the Mahalanobis distance cc MD(i). Often, outlying points fail to have a large Mahalanobis cc distance because of the masking effect. cc cc * Using the MCD estimates yields a robust distance RD(i). cc These distances allow us to easily identify the outliers. cc cc For applications of robust distances in a regression context see: cc cc Rousseeuw, P.J. and van Zomeren, B.C. (1990), "Unmasking cc Multivariate Outliers and Leverage Points," Journal of the cc American Statistical Association, Vol. 85, 633-639. cc cc There also a diagnostic plot is given to distinguish between cc regular observations, vertical outliers, good leverage points, cc and bad leverage points. cc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cc cc The new FAST_MCD algorithm introduced here is due to cc cc Rousseeuw, P.J. and Van Driessen, K. (1997), "A Fast cc Algorithm for the Minimum Covariance Determinant cc Estimator," in preparation. cc cc The algorithm works as follows: cc cc The dataset contains n cases, and nvar variables are used. cc Let n_0 := 2 * nmini (== 600 by default). cc When n < n_0, the algorithm will analyze the dataset as a whole, cc when n >= n_0, the algorithm will use several subdatasets. cc cc 1. n < n_0 : When the dataset is analyzed as a whole, a trial cc subsample of nvar+1 cases is taken, of which the mean and cc covariance matrix is calculated. The h cases with smallest cc relative distances are used to calculate the next mean and cc covariance matrix, and this cycle is repeated k1 times. cc [For small n we can consider all subsets of nvar+1 out of n, else cc the algorithm draws 500 random subsets.] cc Afterwards, the best 10 solutions (covariance matrices and cc corresponding means) are used as starting values for the final cc iterations. These iterations stop when two subsequent determinants cc become equal. (At most k3 iteration steps are taken.) cc The solution with smallest determinant is retained. cc cc 2. n > n_0 --- more than n_0 = 2*nmini cases: The algorithm cc does part of the calculations on (at most) kmini nonoverlapping cc subdatasets, of (roughly) nmini cases. cc cc Stage 1: For each trial subsample in each subdataset, cc k1 iterations are carried out in that subdataset. cc For each subdataset, the 10 best solutions are stored. cc cc Stage 2 considers the union of the subdatasets, called the cc merged set. (If n is large, the merged set is a proper subset of cc the entire dataset.) In this merged set, each of the 'best cc solutions' of stage 1 are used as starting values for k2 cc iterations. Also here, the 10 best solutions are stored. cc cc Stage 3 depends on n, the total number of cases in the cc dataset. If n <= 5000, all 10 preliminary solutions are iterated cc k3 times. If n > 5000, only the best preliminary cc solution is iterated, and the number of iterations decreases to 1 cc according to n*nvar. (If n*nvar <= 100,000 we iterate k3 times, cc whereas for n*nvar > 1,000,000 we take only one iteration step.) cc cc An important advantage of the algorithm FAST_MCD is that it allows cc for exact fit situations, where more than h observations lie on cc a hyperplane. Then the program still yields the MCD location and cc scatter matrix, the latter being singular (as it should be), as cc well as the equation of the hyperplane. cc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc subroutine rffastmcd(dat, n,nvar, nhalff, krep, nmini,kmini, ! 7 * initcov, initmean, inbest, det, ! 11 * weight, fit, coeff, kount, adcov, ! 16 * temp, index1, index2, indexx, nmahad, ndist, am, am2, ! 24 * slutn, med, mad, sd, means, bmeans, w, fv1, fv2, ! 33 * rec, sscp1, cova1, corr1, cinv1, cova2, cinv2, z, ! 41 * cstock, mstock, c1stock, m1stock, dath, ! 46 * cutoff, chimed, i_trace) ! 49 args cc VT::10.10.2005 - a DATA operator was used for computing the cc median and the 0.975 quantile of the chisq distribution cc with nvar degrees of freedom. Since now we have no cc restriction on the number of variables, these will be cc passed as parameters - cutoff and chimed implicit none cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc c c ALGORITHM PARAMETERS: c c The number of iteration steps in stages 1,2 and 3 can be changed c by adapting the parameters k1, k2, and k3. integer k1,k2,k3, int_max parameter (k1 = 2 ) parameter (k2 = 2 ) parameter (k3 = 100) c int_max: easily recognized, slightly smaller than 2147483647 = .Machine$integer.max parameter (int_max = 2146666666) c Arguments integer n,nvar ! (n, p) integer nhalff ! == quan := h(alpha) >= n/2 "n half" integer krep ! krep == nsamp c krep := the total number of trial subsamples c to be drawn when n exceeds 2*nmini; c krep = 0 :<==> "exact" <==> all possible subsamples c was hardcoded krep := 500; now an *argument* integer kmini ! the maximal number of subdatasets and integer nmini ! their minimal size double precision dat(n,nvar) double precision initcov(nvar*nvar), initmean(nvar) integer inbest(nhalff) double precision det integer weight(n), fit double precision coeff(kmini,nvar) integer kount double precision adcov(nvar*nvar) integer temp(n) integer index1(n), index2(n), indexx(n) double precision nmahad(n), ndist(n) double precision am(n), am2(n), slutn(n) double precision med(nvar), mad(nvar), sd(nvar), means(nvar), * bmeans(nvar), w(nvar), fv1(nvar), fv2(nvar) double precision rec(nvar+1), * sscp1((nvar+1)*(nvar+1)), corr1(nvar*nvar), * cova1(nvar*nvar), cinv1(nvar*nvar), * cova2(nvar*nvar), cinv2(nvar*nvar), * z(nvar*nvar) double precision cstock(10,nvar*nvar), mstock(10,nvar), * c1stock(10*kmini, nvar*nvar), * m1stock(10*kmini, nvar*nvar), * dath(nmini*kmini, nvar) double precision cutoff, chimed integer i_trace integer l2i c Functions from ./rf-common.f : integer replow integer rfncomb double precision rffindq c ------------------------------------------------------------------ c Variables integer i,ii,iii, ix, j,jj,jjj, k,kk,kkk,kstep, * l,lll, m,mm,minigr, * nn, ngroup,nhalf,nrep,nsel, nv_2 double precision bstd, deti,detimin1,dist,dist2, eps, * object, qorder, t c km10, nmaxi: now *variable* as nmini integer km10, nmaxi, * ierr,matz,pnsel, tottimes, step, * flag(10*kmini), mini(kmini), * subdat(2, nmini*kmini) double precision mcdndex(10,2,kmini) c subndex: vector of indices; c length(subndex) = maximal value of n_j := mini(j) {j in 1:ngroup} below; n0 := nmini c mini(j) = n1 or n1+1, where n0 <= n1 < n_max := max_j n_j <= n1+1 <= 1+ (3 n0 - 1)/2 = (3 n0 + 1)/2 c ==> see vignette ../vignettes/fastMcd-kmini.Rnw integer subndex((3*nmini + 1)/ 2) double precision med1,med2, percen, pivot,rfmahad,medi2 logical all,part,fine,final,class c -Wall (false alarm): all = .true. part= .false. c Consistency correction now happens in R if(i_trace .ge. 2) then call pr1mcd(i_trace, n, nvar, nhalff, krep, nmini, kmini) endif call rndstart C -------- == GetRNGstate() in C nrep = krep kstep = k1 medi2 = 0 cc From here on, the sample size n is known. cc Some initializations can be made. First of all, h (= the number of cc observations on which the MCD is based) is given by the integer variable cc nhalff. cc If nhalff equals n, the MCD is the classical covariance matrix. cc The logical value class indicates this situation. cc The variable jbreak is the breakdown point of the MCD estimator cc based on nhalff observations, whereas jdefaul = (n+nvar+1)/2 cc would be the optimal value of nhalff, with maximal breakdown point. cc The variable percen is the corresponding percentage (MM: rather "fraction"). cc c unused jbreak=rfnbreak(nhalff,n,nvar) percen = dble(nhalff) / n ! the fraction, also called 'alpha' if(nvar.lt.5) then eps=1.0D-12 else if(nvar.ge.5.and.nvar.le.8) then eps=1.0D-14 else eps=1.0D-16 endif endif class = (nhalff .ge. n) if(class) goto 9500 ! compute *only* the classical estimate if(nvar.eq.1) then do jj=1,n ndist(jj)=dat(jj,1) end do call rfshsort(ndist,n) cc. consistency correction now happens in R code cc. nquant=min(int(real(((nhalff*1.D0/n)-0.5D0)*40))+1,11) cc. factor=faclts(nquant) cc. call rfmcduni(ndist,n,nhalff,slutn,bstd,am,am2, factor, call rfmcduni(ndist,n,nhalff,slutn,bstd,am,am2, 1.d0, * n-nhalff+1) initmean(1)=slutn(1) adcov(1)=bstd initcov(1)=bstd goto 9999 endif cc p >= 2 in the following cc ------ c These are "constants" given the arguments: nmaxi = nmini*kmini km10 = 10*kmini nv_2 = nvar*nvar cc Some initializations: cc matz = auxiliary variable for the subroutine rs, indicating whether cc or not eigenvectors are calculated cc nsel = number of variables + 1 cc ngroup = number of subdatasets, is in {1,2,.., kmini} cc part = logical value, true if the dataset is split up cc fine = logical value, becomes true when the subsets are merged cc final = logical value, to indicate the final stage of the algorithm cc all = logical value, true if all (p+1)-subsets out n of should be drawn; cc always true for (very) small n, but also when krep=0 (special value) cc subdat = matrix with a first row containing indices of observations cc and a second row indicating the corresponding subdataset cc matz=1 nsel=nvar+1 ngroup=1 fine=.false. final=.false. do i=1,nmaxi subdat(1,i)=int_max subdat(2,i)=int_max end do cc Determine whether the dataset needs to be divided into subdatasets cc or can be treated as a whole. The subroutine rfrdraw constructs cc nonoverlapping subdatasets, with uniform distribution of the case numbers. cc For small n, the number of trial subsamples is determined. c part := Shall we partition the data into sub-datasets / "groups"? part = (krep.gt.0 .and. n .ge. (2*nmini)) all = .not. part if(part) then do i=1,kmini mini(i)=0 end do kstep=k1 ngroup = n / nmini ! =: k = n % nmini (integer division) if(ngroup .lt. kmini) then c we distribute n evenly into ngroup subdatasets, of size mm = n / ngroup ! =: n_0 = n % k ==> rest r = n - k*N = n-k*n_0 c The rest r in {0,..,k-1} gives one extra obs. in the last r groups, i.e., c group numbers j > jj := k - r : ii = n - ngroup*mm ! =: r jj = ngroup - ii ! = k - r do j = 1,jj mini(j) = mm end do do j = jj+1,ngroup mini(j) = mm +1 end do minigr = ngroup*mm + ii else ! ngroup = k := floor(n/nmini) >= kmini =: k_0 : ngroup = kmini do j=1,kmini mini(j)=nmini end do minigr = kmini*nmini end if nhalf = int(mini(1)*percen) nrep = krep / ngroup ! integer division if(i_trace .ge. 2) + call prp1mcd (n,ngroup,minigr,nhalf,nrep, mini) call rfrdraw(subdat,n,minigr,mini,ngroup,kmini) else c "not part" : not partitioning; either krep == 0 or n <= 2*nmini-1 ( = 599 by default) minigr=n nhalf=nhalff kstep=k1 if(krep.eq.0 .or. n.le.replow(nsel)) then c use all combinations; happens iff nsel = nvar+1 = p+1 <= 6 nrep = rfncomb(nsel,n) else nrep=krep all = .false. endif endif c seed=iseed c above: prp1mcd (n,ngroup, minigr, nhalf,nrep, mini) if(i_trace .ge. 2) 1 call pr2mcd(l2i(part), l2i(all), 2 kstep, ngroup, minigr, nhalf, nrep) cc cc Some more initializations: cc m1stock = matrix containing the means of the ngroup*10 best estimates cc obtained in the subdatasets. cc c1stock = matrix containing the covariance matrices of the ngroup*10 cc best estimates obtained in the subdatasets. cc mstock = matrix containing the means of the ten best estimates cc obtained after merging the subdatasets and iterating from cc their best estimates. cc cstock = matrix containing the covariance matrices of the ten best cc estimates obtained after merging the subdatasets cc and iterating from their best estimates. cc means = mean vector cc bmeans = initial MCD location estimate cc sd = standard deviation vector cc nmahad = vector of mahalanobis distances cc ndist = vector of general (possibly robust) distances cc inbest = best solution vector cc index1 = index vector of subsample observations cc index2 = index vector of ordered mahalanobis distances cc indexx = temporary index vector, parallel to index1, used when cc generating all possible subsamples cc temp = auxiliary vector cc flag = vector with components indicating the occurrence of a cc singular intermediate MCD estimate. cc do j=1,nvar do k=1,10 mstock(k,j)=1234567.D0 do kk=1,kmini m1stock((kk-1)*10+k,j)=1234567.D0 end do do i=1,nvar do kk=1,kmini c1stock((kk-1)*10+k,(j-1)*nvar+i)=1234567.D0 end do cstock(k,(j-1)*nvar+i)=1234567.D0 end do end do means(j)=0.D0 bmeans(j)=0.D0 sd(j)=0.D0 end do do j=1,n nmahad(j)=0.D0 ndist(j)=0.D0 index1(j)=int_max index2(j)=int_max indexx(j)=int_max temp(j)=int_max end do do j=1,km10 flag(j)=1 end do 9500 continue c==== ********* Compute the classical estimates ************** c call rfcovinit(sscp1,nvar+1,nvar+1) do i=1,n do j=1,nvar rec(j)=dat(i,j) end do call rfadmit(rec,nvar,sscp1) end do call rfcovar(n,nvar,sscp1,cova1,means,sd) do j=1,nvar if(sd(j).eq.0.D0) goto 5001 end do call rfcovcopy(cova1,cinv1,nvar,nvar) det= 0. do j=1,nvar pivot=cinv1((j-1)*nvar+j) det=det + log(pivot) if(pivot.lt.eps) goto 5001 call rfcovsweep(cinv1,nvar,j) end do call rfcorrel(nvar,cova1,corr1,sd) c if just classical estimate, we are done if(class) goto 9999 goto 5002 c singularity '1' (exact fit := 1) : 5001 continue call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,n,nvar,means) call rfexact(kount,n,ndist, nvar, * sscp1,rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do j=1,nvar coeff(1,j)=z(j) end do fit=1 goto 9999 5002 continue cc cc Compute and store classical Mahalanobis distances. cc do j=1,n do i=1,nvar rec(i)=dat(j,i) end do nmahad(j)=rfmahad(rec,nvar,means,cinv1) end do cc ******* Compute the MCD estimates ************** ---------------------------- cc Main loop: inspects the subsamples. cc Every time the sscp of the subsample is placed in sscp1, cc its covariance matrix in cova1, and its inverse in cinv1 . cc The minimum covariance determinant matrix is placed in cova2, cc and its inverse in cinv2. cc The robust distances are placed in ndist. cc c tottimes := counting the total number of iteration steps in the main loop cc cc The algorithm returns here twice when the dataset is divided cc at the beginning of the program. According to the situation, cc new initializations are made. c fine == TRUE : <==> We are in the second stage, where the subdatasets are merged, c final == TRUE : <==> We are in the last stage, when the whole dataset is considered c In the last stage, the number of iterations 'nrep' c is determined according to the total number of observations and the dimension. tottimes=0 5555 object=10.D25 if(.not. part .or. final) then nn=n else if (fine) then !-> part & fine & .not. final nn=minigr else !-> part - "phase 1" (.not. fine & .not. final) nn=-1 endif if(i_trace .ge. 2) ! " Main loop, phase[%s]: ... " 1 call pr3mcd(l2i(part), l2i(fine), l2i(final), 2 nrep, nn, nsel, nhalf, kstep, nmini, kmini) if(fine .or.(.not.part.and.final)) then nrep = 10 c ---- == hardcoded nsel = nhalf kstep = k2 if (final) then ! "final": stage 3 -- nhalf=nhalff ngroup=1 c ksteps := k3 (= 100) unless n*p is "large" where c ksteps jumps down to at most 10 <<- "discontinuous!" FIXME if (n*nvar .le.100000) then kstep=k3 ! = 100 ("hardcoded default") else if (n*nvar .gt.100000 .and. n*nvar .le.200000) then kstep=10 else if (n*nvar .gt.200000 .and. n*nvar .le.300000) then kstep=9 else if (n*nvar .gt.300000 .and. n*nvar .le.400000) then kstep=8 else if (n*nvar .gt.400000 .and. n*nvar .le.500000) then kstep=7 else if (n*nvar .gt.500000 .and. n*nvar .le.600000) then kstep=6 else if (n*nvar .gt.600000 .and. n*nvar .le.700000) then kstep=5 else if (n*nvar .gt.700000 .and. n*nvar .le.800000) then kstep=4 else if (n*nvar .gt.800000 .and. n*nvar .le.900000) then kstep=3 else if (n*nvar .gt.900000 .and. n*nvar .le.1000000) then kstep=2 else ! n*p > 1e6 kstep=1 endif if (n.gt.5000) then nrep=1 endif else nhalf=int(minigr*percen) endif endif do i=1,nsel-1 index1(i)=i indexx(i)=i end do index1(nsel)=nsel-1 indexx(nsel)=nsel-1 cc cc Initialization of the matrices to store partial results. For the cc first stage of the algorithm, the currently best covariance matrices and cc means are stored in the matrices c1stock and m1stock initialized earlier. cc The corresponding objective values and the number of the trial subset cc are stored in the matrix mcdndex. cc For the second stage of the algorithm or for small datasets, only the cc currently best objective values are stored in the same matrix mcdndex cc and the corresponding covariance matrices and mean vectors are stored in cc the matrices cstock and mstock initialized earlier. cc if(.not. final) then do i=1,10 do j=1,ngroup mcdndex(i,1,j)=10.D25 mcdndex(i,2,j)=10.D25 end do end do endif if(.not.fine .and. .not.final) then !-- first phase do j=1,nvar do i=1,n am (i)=dat(i,j) am2(i)=dat(i,j) end do if(2*n/2 .eq. n) then med1=rffindq(am, n, n/2, index2) med2=rffindq(am2,n,(n+2)/2,index2) med(j)=(med1+med2)/2 else med(j)=rffindq(am,n,(n+1)/2,index2) endif do i=1,n ndist(i)=dabs(dat(i,j)-med(j)) end do mad(j)=rffindq(ndist,n,nhalff,index2) if(mad(j)-0.D0 .lt. eps) then do k=1,j-1 do i=1,n dat(i,k)=dat(i,k)*mad(k)+med(k) end do end do call rfcovinit(sscp1,nvar+1,nvar+1) do k=1,nsel do m=1,nvar rec(m)=dat(index2(k),m) end do call rfadmit(rec,nvar,sscp1) end do call rfcovar(nsel,nvar,sscp1,cova1,means,sd) call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) C VT::15.11.2014, fixing array overrun, found by MM C The following code expects that z (the plane coefficients) C are all zeros with 1 in the position of the variable with MAD=0 C If not, tries to find it. C if(.FALSE.) then if(z(j).ne.1) then do kk=1,nvar if(z(kk*nvar+j).eq.1) then do l=1,nvar z(l)=z(kk*nvar+l) end do goto 76 ! break endif end do endif 76 continue else C Instead of this, we set all coefficients to 0 and the one of C variable j to 1. The exactfit code will be set 3 and will be C handled respectively by the R code. do kk=1,nvar z(kk) = 0 end do z(j) = 1 end if call rfdis(dat,z,ndist,n,nvar,n,nvar,means) call rfexact(kount,n,ndist, nvar, * sscp1,rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=3 goto 9999 endif do i=1,n dat(i,j)=(dat(i,j)-med(j))/mad(j) end do end do endif cc cc The matrix dath contains the observations to be used in the cc algorithm. In the first stage of the split-up procedure dath contains cc nmini objects, corresponding to the original observations, with the index cc of the processed group in the array subdat. For the second stage, the cc data points of all the subdatasets are merged in dath. cc The variable kount indicates the occurrence of a singular subsample leading cc to the corresponding plane. In some situations the variable kount counts cc the number of observations on that plane. cc if (fine .and. .not. final) then do j=1,minigr do k=1,nvar dath(j,k)=dat(subdat(1,j),k) end do end do endif kount=0 c---- For-Loop over groups - - - - - - - - - - - - - - - - - - - - - do 1111 ii= 1,ngroup if(.not.fine) kount=0 if(part .and. .not. fine) then nn=mini(ii) kk=0 do j=1,minigr if(subdat(2,j).eq.ii) then kk=kk+1 subndex(kk)=subdat(1,j) endif end do do j=1,mini(ii) do k=1,nvar dath(j,k)=dat(subndex(j),k) end do end do endif if(i_trace .ge. 3) call prgrmcd(ii, nn, i_trace) do i=1,nn index2(i)=i end do cc The number of trial subsamples is represented by nrep, which depends cc on the data situation. cc When all (p+1)-subsets out of n can be drawn, the subroutine rfgenpn cc is used. Otherwise, random subsamples are drawn by the routine cc rfrangen. The trial subsamples are put in the array index1. The cc same thing happens for large datasets, except that the number of cc observations is nmini instead of n. cc cc When a trial subsample is singular, the algorithm counts the number of cc observations that lie on the hyperplane corresponding to this sample. cc If, for small datasets, this number is larger than nhalff, the program cc stops (exact fit) and gives the mean and the covariance matrix cc of the observations on the hyperplane, together with the equation cc of the hyperplane. cc For large datasets, the algorithm first checks whether there are more cc than nhalff observations on the hyperplane. If this is the case, the cc program stops for the same reason of exact fit and gives the covariance cc matrix and mean of the observations on the hyperplane. If not, the cc algorithm counts the number of observations that lie on the hyperplane. cc When this number is smaller than the current nhalf in the subdataset, these cc observations are extended to nhalf observations by adding those cc observations that have smallest orthogonal distances to the hyperplane cc and the algorithm continues. cc When larger, the coefficients of the hyperplane are stored in the matrix cc m1stock for use as starting value in the next stage, and the flag of this cc estimate gets the value zero. cc cc In the second stage of the algorithm, when the subdatasets are merged, cc the array index2 contains the indices of the observations cc corresponding to the nhalf observations with minimal relative distances cc with respect to the best estimates of the first stage. cc When the estimate of the first stage is a hyperplane, the algorithm cc investigates whether there are more than the current nhalf observations of cc the merged subdataset on that hyperplane. If so, the coefficients of the cc hyperplane are again stored, now in the matrix mstock, for the final cc stage of the algorithm. cc If not, the observations on the hyperplane are extended to nhalf cc observations by adding the observations in the merged dataset with cc smallest orthogonal distances to that hyperplane. cc For small datasets or for larger datasets with n <= nmaxi := nmini*kmini, cc the algorithm already stops when one solution becomes singular, cc since we then have an exact fit. cc cc In the third stage, the covariance matrices and means of the best cc solutions of the second stage are used as starting values. cc Again, when a solution becomes singular, the subroutine 'exact' cc determines the hyperplane through at least nhalff observations and stops cc because of the exact fit. cc cc When the program stops because of an exact fit, the covariance matrix and cc mean of the observations on the hyperplane will always be given. cc C VT::27.10.2014 - an issue with nsamp="exact" fixed: do ix=1,n indexx(ix)=index1(ix) end do do 1000 i=1,nrep pnsel=nsel tottimes=tottimes+1 if(i_trace .ge. 4) call pr4mcd(i) call rchkusr() ! <- allow user interrupt deti= -1.d300 detimin1=deti step=0 call rfcovinit(sscp1,nvar+1,nvar+1) if((part.and..not.fine).or.(.not.part.and..not.final)) then if(part) then call rfrangen(mini(ii),nsel,index1) else if(all) then call rfgenpn(n,nsel,indexx) do ix=1,n index1(ix)=indexx(ix) end do else call rfrangen(n,nsel,index1) endif endif cc cc The covariance matrix and mean of the initial subsamples are cc calculated with the subroutine covar and represented by cc the variables cova1 and means. cc cc In the following stages of the algorithm, the covariance matrices and means cc used as starting values are already stored in the matrices c1stock cc and m1stock (for the second stage), and in the matrices cstock and mstock cc (for the third stage). cc cc The inverse cinv1 of the covariance matrix is calculated by the cc subroutine rfcovsweep, together with its determinant det. c c Repeat 9550 call rfcovinit(sscp1,nvar+1,nvar+1) if(.not.fine.and.part) then do j=1,pnsel do m=1,nvar rec(m)=dath(index1(j),m) end do call rfadmit(rec,nvar,sscp1) end do call rfcovar(pnsel,nvar,sscp1,cova1,means,sd) endif if(.not.part.and..not.final) then do j=1,pnsel do m=1,nvar rec(m)=dat(index1(j),m) end do call rfadmit(rec,nvar,sscp1) end do call rfcovar(pnsel,nvar,sscp1,cova1,means,sd) endif if (final) then if(mstock(i,1) .ne. 1234567.D0) then do jj=1,nvar means(jj)=mstock(i,jj) do kk=1,nvar cova1((jj-1)*nvar+kk)=cstock(i,(jj-1)*nvar+kk) end do end do else goto 1111 endif if(flag(i).eq.0) then qorder=1.D0 do jjj=1,nvar z(jjj)=coeff(1,jjj) end do call rfdis(dat,z,ndist,n,nvar,nn,nvar, means) dist2=rffindq(ndist,nn,nhalf,index2) goto 9555 endif endif if (fine .and. .not.final) then if(m1stock((ii-1)*10+i,1) .ne. 1234567.D0) then do jj=1,nvar means(jj)=m1stock((ii-1)*10+i,jj) do kk=1,nvar cova1((jj-1)*nvar+kk)=c1stock((ii-1)*10+i, * (jj-1)*nvar+kk) end do end do else goto 1111 endif if(flag((ii-1)*10+i).eq.0) then qorder=1.D0 do jjj=1,nvar z(jjj)=coeff(ii,jjj) end do call rfdis(dath,z,ndist,nmaxi,nvar,nn,nvar, means) call rfshsort(ndist,nn) qorder=ndist(nhalf) if(dabs(qorder-0.D0).lt.10.D-8 .and. kount.eq.0 * .and. n.gt.nmaxi) then kount=nhalf do kkk=nhalf+1,nn if(dabs(ndist(kkk)-0.D0).lt.10.D-8) then kount=kount+1 endif end do flag(1)=0 do kkk=1,nvar coeff(1,kkk)=z(kkk) end do call rfstore2(nvar,cstock,mstock,nv_2, * kmini,cova1,means,i,mcdndex,kount) kount=1 goto 1000 else if(dabs(qorder-0.D0).lt.10.D-8 .and. * kount.ne.0 .and. n.gt.nmaxi) then goto 1000 else flag(1)=1 dist2=rffindq(ndist,nn,nhalf,index2) goto 9555 endif endif endif call rfcovcopy(cova1,cinv1,nvar,nvar) det=0. do 200 j=1,nvar pivot=cinv1((j-1)*nvar+j) det=det + log(pivot) if(pivot.lt.eps) then call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) qorder=1.D0 if(.not.part.or.final) then call rfdis(dat,z,ndist,n,nvar,nn,nvar,means) else call rfdis(dath,z,ndist,nmaxi,nvar,nn,nvar,means) endif call rfshsort(ndist,nn) qorder=ndist(nhalf) if(dabs(qorder-0.D0).lt. 10.D-8 .and. .not.part) then call transfo(cova1,means,dat,med,mad,nvar,n) call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,nn,nvar,means) call rfexact(kount,n,ndist, nvar, * sscp1,rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=2 goto 9999 else if(dabs(qorder-0.D0).lt. 10.D-8 .and. part .and. * kount.eq.0) then call rfdis(dat,z,ndist,n,nvar,n,nvar, means) call rfshsort(ndist,n) if(dabs(ndist(nhalff)-0.D0).lt.10.D-8) then call transfo(cova1,means,dat,med,mad,nvar,n) call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,nn,nvar,means) call rfexact(kount,n,ndist, nvar,sscp1, * rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=2 goto 9999 endif call rfdis(dath,z,ndist,nmaxi,nvar,nn,nvar, means) call rfshsort(ndist,nn) kount=nhalf do kkk=nhalf+1,nn if(dabs(ndist(kkk)-0.D0) .lt. 10.D-8) then kount=kount+1 endif end do flag((ii-1)*10+1)=0 do kkk=1,nvar coeff(ii,kkk)=z(kkk) end do call rfstore1(nvar,c1stock,m1stock,nv_2, * kmini,cova1,means,i,km10,ii,mcdndex, kount) kount=1 goto 1000 else if(dabs(qorder-0.D0).lt. 10.D-8 .and. part .and. * kount.ne.0) then goto 1000 else C C VT::27.10.2014 - an issue with nsamp="exact" fixed: C C Add one more observation and return to recompute the C covariance. In case of complete enumeration, when all C p+1 subsamples are generated, the array 'index1' must C be preserved around label 9550). C if(i_trace .ge. 2) * call intpr('Singularity -> extended subsample: ', * -1,index1,nsel) call rfishsort(index1,pnsel) call prdraw(index1,pnsel, nn) pnsel=pnsel+1 goto 9550 c --------- until endif endif call rfcovsweep(cinv1,nvar,j) 200 continue cc cc Mahalanobis distances are computed with the subroutine rfmahad cc and stored in the array ndist. cc The k-th order statistic of the mahalanobis distances is stored cc in dist2. The array index2 containes the indices of the cc corresponding observations. cc do j=1,nn if(.not.part.or.final) then do mm=1,nvar rec(mm)=dat(j,mm) end do else do mm=1,nvar rec(mm)=dath(j,mm) end do endif t=rfmahad(rec,nvar,means,cinv1) ndist(j)=t end do dist2=rffindq(ndist,nn,nhalf,index2) cc cc The variable kstep represents the number of iterations of the current stage (1,2, or 3), cc i.e., the situation of the program, kstep = k1, k2, or k3. Within each cc iteration the mean and covariance matrix of nhalf observations are cc calculated. The nhalf smallest corresponding mahalanobis distances cc determine the subset for the next iteration. cc The best subset for the whole data is stored in the array inbest. cc The iteration stops when two subsequent determinants become equal. cc 9555 do 400 step=1,kstep tottimes=tottimes+1 if(i_trace .ge. 4) call pr5mcd(step, tottimes) call rchkusr() ! <- allow user interrupt call rfcovinit(sscp1,nvar+1,nvar+1) do j=1,nhalf temp(j)=index2(j) end do call rfishsort(temp,nhalf) do j=1,nhalf if(.not.part.or.final) then do mm=1,nvar rec(mm)=dat(temp(j),mm) end do else do mm=1,nvar rec(mm)=dath(temp(j),mm) end do endif call rfadmit(rec,nvar,sscp1) end do call rfcovar(nhalf,nvar,sscp1,cova1,means,sd) call rfcovcopy(cova1,cinv1,nvar,nvar) det= 0. do 600 j=1,nvar pivot=cinv1((j-1)*nvar+j) det=det + log(pivot) if(pivot.lt.eps) then if(final .or. .not.part .or. * (fine.and. .not.final .and. n .le. nmaxi)) then call transfo(cova1,means,dat,med,mad,nvar,n) call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) if(final.or..not.part) then call rfdis(dath,z,ndist,n, nvar,nn, * nvar,means) else call rfdis(dath,z,ndist,nmaxi,nvar,nn, * nvar,means) endif call rfexact(kount,n,ndist,nvar,sscp1, * rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=2 goto 9999 endif if(part.and..not.fine.and.kount.eq.0) then call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,n,nvar, means) call rfshsort(ndist,n) if(dabs(ndist(nhalff)-0.D0).lt.10.D-8) then call transfo(cova1,means,dat,med,mad,nvar,n) call rs(nvar,nvar,cova1,w,matz,z, * fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,n,nvar,means) call rfexact(kount,n,ndist,nvar,sscp1, * rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=2 goto 9999 endif call rfdis(dath,z,ndist,nmaxi,nvar,nn, * nvar,means) call rfshsort(ndist,nn) kount=nhalf do,kkk=nhalf+1,nn if(dabs(ndist(kkk)-0.D0).lt.10.D-8) then kount=kount+1 endif end do flag((ii-1)*10+1)=0 do kkk=1,nvar coeff(ii,kkk)=z(kkk) end do call rfstore1(nvar,c1stock,m1stock,nv_2, * kmini,cova1,means,i,km10,ii,mcdndex, kount) kount=1 goto 1000 else if(part.and..not.fine.and.kount.ne.0) then goto 1000 endif endif if(fine.and..not.final.and.kount.eq.0) then call rs(nvar,nvar,cova1,w,matz,z,fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,n,nvar, means) call rfshsort(ndist,n) if(dabs(ndist(nhalff)-0.D0).lt.10.D-8) then call transfo(cova1,means,dat,med,mad,nvar,n) call rs(nvar,nvar,cova1,w,matz,z, * fv1,fv2,ierr) call rfdis(dat,z,ndist,n,nvar,n,nvar,means) call rfexact(kount,n,ndist,nvar,sscp1, * rec,dat, cova1,means,sd,weight) call rfcovcopy(cova1,initcov,nvar,nvar) call rfcovcopy(means,initmean,nvar,1) do jjj=1,nvar coeff(1,jjj)=z(jjj) end do fit=2 goto 9999 endif call rfdis(dath,z,ndist,nmaxi,nvar,nn, * nvar,means) call rfshsort(ndist,nn) kount=nhalf do kkk=nhalf+1,nn if(dabs(ndist(kkk)-0.D0).lt.10.D-8) then kount=kount+1 endif end do flag(1)=0 do kkk=1,nvar coeff(1,kkk)=z(kkk) end do call rfstore2(nvar,cstock,mstock,nv_2, * kmini,cova1,means,i,mcdndex,kount) kount=1 goto 1000 else if(fine.and..not.final.and.kount.ne.0) then goto 1000 endif endif endif call rfcovsweep(cinv1,nvar,j) 600 continue if(step.ge.2 .and. det.eq.detimin1) then goto 5000 endif detimin1=deti deti=det do j=1,nn if(.not.part.or.final) then do mm=1,nvar rec(mm)=dat(j,mm) end do else do mm=1,nvar rec(mm)=dath(j,mm) end do endif t=rfmahad(rec,nvar,means,cinv1) ndist(j)=t end do dist2=rffindq(ndist,nn,nhalf,index2) dist=dsqrt(dist2) if(final .and. ((i.eq.1 .and. step.eq.1 .and. .not.fine) * .or. det .lt. object)) then medi2=rffindq(ndist,nn,int(n/2),index1) object=det do jjj=1,nhalf inbest(jjj)=index2(jjj) end do call rfcovcopy(cova1,cova2,nvar,nvar) call rfcovcopy(cinv1,cinv2,nvar,nvar) call rfcovcopy(means,bmeans,nvar,1) endif 400 continue if(i_trace .ge. 4) call println() cc After each iteration, it has to be checked whether the new solution cc is better than some previous one and therefore needs to be stored. This cc isn't necessary in the third stage of the algorithm, where only the best cc solution is kept. 5000 if(.not. final) then if(part .and. .not. fine) then iii=ii else iii=1 endif c At the end of the algorithm, only the ten c best solutions need to be stored. cc For each data group : cc If the objective function is lower than the largest value in the cc matrix mcdndex : cc A distinction is made between different stages of the algorithm: cc * At the first stage of the split-up situation: cc -If the new objective value did not yet occur in mcdndex cc its value and corresponding covariance matrix and mean are cc stored at the right place in the matrices mcdndex, c1stock and cc m1stock, and other values are shifted to their new position cc in these arrays. cc -If the new objective value already occurs in mcdndex, a cc comparison is made between the new mean vector and covariance matrix cc and those estimates with the same determinant. cc When for an equal determinant, the mean vector or covariance matrix cc do not correspond, both of them are kept in the matrices mcdndex cc and nbest. cc * In the second stage of the algorithm, the covariances and means cc are stored : cc - If the new objective value did not yet occur cc in the matrix mcdndex, it is inserted by shifting the greater cc determinants upwards and doing the same in the arrays mstock cc and cstock. cc - If the new objective value already occurs in the array mcdndex, cc it is compared with all solutions with the same determinant. cc In the case of an equality, the means and covariances cc are compared to determine whether or not to insert the cc new solution. cc Otherwise nothing happens. When a singularity occurs, cc the determinant in the matrix mcdndex is zero and the cc corresponding flag is zero too, so the search in the arrays mcdndex, cc m1stock, c1stock, mstock and cstock is done on the rows with flag one. cc if( flag((iii-1)*10+1).eq.1) then lll=1 else lll=2 endif do j=lll,10 if (det .le. mcdndex(j,2,iii)) then if(det.ne.mcdndex(j,2,iii)) then if(.not.fine.and.part) goto 203 goto 205 else do kkk=j,10 if(det.eq.mcdndex(kkk,2,iii)) then do jjj=1,nvar if(part.and..not.fine) then if(means(jjj) .ne. * m1stock((iii-1)*10+ kkk,jjj)) * goto 203 else if(means(jjj).ne.mstock(kkk,jjj)) * goto 205 endif end do do jjj=1,nvar*nvar if(part.and..not.fine) then if(cova1(jjj) .ne. * c1stock((iii-1)*10+ kkk,jjj)) * goto 203 else if(cova1(jjj).ne.cstock(kkk,jjj)) * goto 205 endif end do endif end do ! kkk endif goto 1000 c--- 203 do k=10,j+1,-1 do kk=1,nvar*nvar c1stock((iii-1)*10+k,kk)= * c1stock((iii-1)*10+k-1,kk) end do do kk=1,nvar m1stock((iii-1)*10+k,kk)= * m1stock((iii-1)*10+k-1,kk) end do mcdndex(k,1,iii)=mcdndex(k-1,1,iii) mcdndex(k,2,iii)=mcdndex(k-1,2,iii) end do do kk=1,nvar do kkk=1,nvar c1stock((iii-1)*10+j,(kk-1)*nvar+kkk)= * cova1((kk-1)*nvar+kkk) m1stock((iii-1)*10+j,kk)=means(kk) end do end do mcdndex(j,1,iii)=i mcdndex(j,2,iii)=det goto 1000 c--- 205 do k=10,j+1,-1 do kk=1,nvar*nvar cstock(k,kk)= cstock(k-1,kk) end do do kk=1,nvar mstock(k,kk)= mstock(k-1,kk) end do mcdndex(k,1,iii)=mcdndex(k-1,1,iii) mcdndex(k,2,iii)=mcdndex(k-1,2,iii) end do do kk=1,nvar do kkk=1,nvar cstock(j,(kk-1)*nvar+kkk)= * cova1((kk-1)*nvar+kkk) mstock(j,kk)=means(kk) end do end do mcdndex(j,1,iii)=i mcdndex(j,2,iii)=det goto 1000 endif end do ! j endif c (not final) 1000 continue !end{ i = 1..nrep } 1111 continue c---- - - - - - end [ For (ii = 1 .. ngroup) ] - - - - - - - - - cc Determine whether the algorithm needs to be run again or not. cc if(part .and. .not. fine) then fine= .true. goto 5555 else if(.not. final .and. ((part.and.fine).or. .not.part)) then final= .true. goto 5555 endif cc******** end { Main Loop } ************** -------------------------------- c MM: 'temp' is thrown away in calling R code: c do j=1,nhalf c temp(j)=inbest(j) c end do c call rfishsort(temp,nhalf) do j=1,nvar means(j)=bmeans(j)*mad(j)+med(j) end do call rfcovcopy(means,initmean,nvar,1) do i=1,nvar do j=1,nvar cova1((i-1)*nvar+j)=cova2((i-1)*nvar+j)*mad(i)*mad(j) end do end do call rfcovcopy(cova1,initcov,nvar,nvar) det=object do j=1,nvar det=det + 2*log(mad(j)) end do cc VT::chimed is passed now as a parameter cc call rfcovmult(cova1,nvar,nvar,medi2/chimed(nvar)) cc call rfcovmult(cova2,nvar,nvar,medi2/chimed(nvar)) cc call rfcovmult(cinv2,nvar,nvar,1.D0/(medi2/chimed(nvar))) medi2 = medi2/chimed call rfcovmult(cova1, nvar,nvar, medi2) call rfcovmult(cova2, nvar,nvar, medi2) call rfcovmult(cinv2, nvar,nvar, 1.D0/medi2) call rfcovcopy(cova1, adcov,nvar,nvar) cc cc The MCD location is in bmeans. cc The MCD scatter matrix is in cova2, cc and its inverse in cinv2. cc cc For every observation we compute its MCD distance cc and compare it to a cutoff value. cc call rfcovinit(sscp1,nvar+1,nvar+1) do i=1,n do mm=1,nvar rec(mm)=dat(i,mm) end do dist2=rfmahad(rec,nvar,bmeans,cinv2) if(dist2.le.cutoff) then weight(i)=1 else weight(i)=0 endif end do call transfo(cova2,bmeans,dat,med,mad,nvar,n) goto 9999 cc ****************************************************************** 9999 continue if(i_trace .ge. 2) call pr9mcd(tottimes) call rndend C ------ == PutRNGstate() in C return end ccccc end {rffastmcd} ccccc ccccc c --- Auxiliary just to pass Fortran 'logical' as 'integer' to C; int(.) does *not* work integer function l2i(logi) implicit none logical logi if(logi) then l2i = 1 else l2i = 0 endif return end ccccc ccccc subroutine rfexact(kount,nn,ndist, nvar,sscp1, * rec,dat, cova1,means,sd,weight) cc cc Determines how many objects lie on the hyperplane with equation cc z(1,1)*(x_i1 - means_1)+ ... + z(p,1)* (x_ip - means_p) = 0 cc and computes their mean and their covariance matrix. cc double precision ndist(nn) double precision sscp1(nvar+1,nvar+1) double precision rec(nvar+1) double precision dat(nn,nvar) double precision cova1(nvar,nvar) double precision means(nvar), sd(nvar) integer weight(nn) call rfcovinit(sscp1,nvar+1,nvar+1) kount=0 do kk=1,nn if(dabs(ndist(kk)-0.D0).lt.10.D-8) then kount=kount+1 weight(kk)=1 do j=1,nvar rec(j)=dat(kk,j) end do call rfadmit(rec,nvar,sscp1) else weight(kk)=0 endif end do call rfcovar(kount,nvar,sscp1,cova1,means,sd) return end ccccc ccccc subroutine transfo(cova,means,dat,med,mad,nvar,n) cc implicit none integer n, nvar double precision dat(n,nvar), cova(nvar,nvar) double precision means(nvar), med(nvar), mad(nvar) integer i,j,k do j=1,nvar means(j)=means(j)*mad(j)+med(j) do k=1,nvar cova(j,k)=cova(j,k)*mad(j)*mad(k) end do do i=1,n dat(i,j)=dat(i,j)*mad(j)+med(j) end do end do return end ccccc ccccc subroutine rfcovmult(a,n1,n2,fac) cc cc Multiplies the matrix a by the real factor fac. cc double precision a(n1,n2) double precision fac cc do i=1,n1 do j=1,n2 a(i,j)=a(i,j)*fac end do end do return end ccccc ccccc subroutine rfadmit(rec,nvar,sscp) cc cc Updates the sscp matrix with the additional case rec. cc double precision rec(nvar) double precision sscp(nvar+1,nvar+1) cc sscp(1,1)=sscp(1,1)+1.D0 do j=1,nvar sscp(1,j+1)=sscp(1,j+1)+rec(j) sscp(j+1,1)=sscp(1,j+1) end do do i=1,nvar do j=1,nvar sscp(i+1,j+1)=sscp(i+1,j+1)+rec(i)*rec(j) end do end do return end ccccc ccccc subroutine rfcovar(n,nvar, sscp,cova, means,sd) cc cc Computes the classical mean and covariance matrix. cc implicit none integer n,nvar, i,j double precision sscp(nvar+1,nvar+1), cova(nvar,nvar) double precision means(nvar), sd(nvar), f do i=1,nvar means(i)=sscp(1,i+1) sd(i)=sscp(i+1,i+1) f=(sd(i)-means(i)*means(i)/n)/(n-1) if(f.gt.0.D0) then sd(i)=dsqrt(f) else sd(i)=0.D0 endif means(i)=means(i)/n end do do i=1,nvar do j=1,nvar cova(i,j)=sscp(i+1,j+1) end do end do do i=1,nvar do j=1,nvar cova(i,j)=cova(i,j)-n*means(i)*means(j) cova(i,j)=cova(i,j)/(n-1) end do end do return end ccccc ccccc subroutine rfcorrel(nvar,a,b,sd) cc cc Transforms the scatter matrix a to the correlation matrix b: <==> R's cov2cor(.) cc implicit none integer nvar double precision a(nvar,nvar), b(nvar,nvar), sd(nvar) integer j,i do j=1,nvar sd(j)=1/sqrt(a(j,j)) end do do i=1,nvar do j=1,nvar if(i.eq.j) then b(i,j)=1.0 else b(i,j)=a(i,j)*sd(i)*sd(j) endif end do end do return end subroutine prdraw(a,pnsel, nn) implicit none integer nn, a(nn), pnsel c double precision unifrnd integer jndex, nrand, i,j jndex=pnsel c OLD nrand=int(uniran(seed)*(nn-jndex))+1 nrand=int(unifrnd() * (nn-jndex))+1 C if(nrand .gt. nn-jndex) then C call intpr( C 1 '** prdraw(): correcting nrand > nn-jndex; nrand=', C 2 -1, nrand, 1) C nrand=nn-jndex C endif jndex=jndex+1 a(jndex)=nrand+jndex-1 do i=1,jndex-1 if(a(i).gt.nrand+i-1) then do j=jndex,i+1,-1 a(j)=a(j-1) end do a(i)=nrand+i-1 goto 10 c ------- break endif end do 10 continue return end ccccc ccccc double precision function rfmahad(rec,nvar,means,sigma) cc cc Computes a Mahalanobis-type distance. cc double precision rec(nvar), means(nvar), sigma(nvar,nvar), t t = 0. do j=1,nvar do k=1,nvar t = t + (rec(j)-means(j))*(rec(k)-means(k))*sigma(j,k) end do end do rfmahad=t return end ccccc ccccc subroutine rfdis(da,z,ndist,nm,nv,nn,nvar, means) cc cc Computes the distance between the objects of da and a hyperplane with cc equation z(1,1)*(x_i1 - means_1) + ... + z(p,1)*(x_ip - means_p) = 0 cc double precision da(nm,nv) double precision z(nvar,nvar) double precision ndist(nn) double precision means(nvar) do i=1,nn ndist(i)=0 do j=1,nvar ndist(i)=z(j,1)*(da(i,j)-means(j))+ndist(i) end do ndist(i)=dabs(ndist(i)) end do return end ccccc ccccc subroutine rfstore2(nvar,cstock,mstock,nv_2, * kmini,cova1,means,i,mcdndex,kount) cc cc Stores the coefficients of a hyperplane cc z(1,1)*(x_i1 - means_1) + ... + z(p,1)*(x_ip - means_p) = 0 cc into the first row of the matrix mstock, and shifts the other cc elements of the arrays mstock and cstock. cc double precision cstock(10, nv_2), mstock(10, nvar) double precision mcdndex(10, 2, kmini) double precision cova1(nvar,nvar), means(nvar) do k=10,2,-1 do kk=1,nvar*nvar cstock(k,kk)= cstock(k-1,kk) end do do kk=1,nvar mstock(k,kk)= mstock(k-1,kk) end do mcdndex(k,1,1)=mcdndex(k-1,1,1) mcdndex(k,2,1)=mcdndex(k-1,2,1) end do do kk=1,nvar mstock(1,kk)=means(kk) do jj=1,nvar cstock(1,(kk-1)*nvar+jj)=cova1(kk,jj) end do end do mcdndex(1,1,1)=i mcdndex(1,2,1)=kount return end ccccc ccccc subroutine rfstore1(nvar,c1stock,m1stock,nv_2, * kmini,cova1,means,i,km10,ii,mcdndex,kount) double precision c1stock(km10, nv_2), m1stock(km10, nvar) double precision mcdndex(10,2,kmini) double precision cova1(nvar,nvar), means(nvar) do k=10,2,-1 do kk=1,nvar*nvar c1stock((ii-1)*10+k,kk)= * c1stock((ii-1)*10+k-1,kk) end do do kk=1,nvar m1stock((ii-1)*10+k,kk)= * m1stock((ii-1)*10+k-1,kk) end do mcdndex(k,1,ii)=mcdndex(k-1,1,ii) mcdndex(k,2,ii)=mcdndex(k-1,2,ii) end do do kk=1,nvar m1stock((ii-1)*10+1,kk)=means(kk) do jj=1,nvar c1stock((ii-1)*10+1,(kk-1)*nvar+jj)= * cova1(kk,jj) end do end do mcdndex(1,1,ii)=i mcdndex(1,2,ii)=kount return end CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC ccccc ccccc subroutine rfcovinit(a,n1,n2) cc cc Initializes the matrix a by filling it with zeroes. cc double precision a(n1,n2) cc do i=1,n1 do j=1,n2 a(i,j)=0.D0 end do end do return end ccccc ccccc subroutine rfcovsweep(a,nvar,k) cc double precision a(nvar,nvar) double precision b, d cc d=a(k,k) do j=1,nvar a(k,j)=a(k,j)/d end do do i=1,nvar if(i.ne.k) then b=a(i,k) do j=1,nvar a(i,j)=a(i,j)-b*a(k,j) end do a(i,k) = -b/d endif end do a(k,k)=1/d return end ccccc robustbase/src/rowMedians.c0000644000176200001440000000774614123440375015507 0ustar liggesusers/*************************************************************************** Authors: Adopted from rowQuantiles.c by R. Gentleman. Copyright Henrik Bengtsson, 2007; Martin Maechler, 2014-2021; History --> EOF **************************************************************************/ #include // was #include #include "robustbase.h" // Public methods: SEXP rowMedians_Real (SEXP x, int nrow, int ncol, int narm, int hasna, int byrow); SEXP rowMedians_Integer(SEXP x, int nrow, int ncol, int narm, int hasna, int byrow); void C_rowMedians_Real (double* x, double* res, int nrow, int ncol, int narm, int hasna, int byrow); void C_rowMedians_Integer(int* x, double* res, int nrow, int ncol, int narm, int hasna, int byrow); /* TEMPLATE rowMedians_(...): - SEXP rowMedians_Real(...); - SEXP rowMedians_Integer(...); */ #define METHOD rowMedians #define X_TYPE 'i' #include "rowMedians_TYPE-template.h" #define X_TYPE 'r' #include "rowMedians_TYPE-template.h" #undef METHOD /* TODO: implement: hasNA in {NA,TRUE,FALSE}; and = NA <==> code should *check* R code {for error message}: ../R/comedian.R */ SEXP R_rowMedians(SEXP x, SEXP naRm, SEXP hasNA, SEXP byRow, SEXP keepNms) { // Argument checking and "C type coercion": if (!isMatrix(x)) error(_("Argument 'x' must be a matrix.")); int narm = asLogical(naRm); // error if it ain't if (narm != TRUE && narm != FALSE) error(_("Argument 'naRm' must be either TRUE or FALSE.")); int hasna = asLogical(hasNA); // error if it ain't if (hasna == NA_INTEGER) hasna = TRUE;// <- for now; TODO ? become smarter and check int byrow = INTEGER(byRow)[0]; int keepnms = asLogical(keepNms); /* Get dimensions of 'x'. */ SEXP ans = PROTECT(getAttrib(x, R_DimSymbol)); int nrow, ncol; if (byrow) { // rowMedians nrow = INTEGER(ans)[0]; ncol = INTEGER(ans)[1]; } else { // colMedians nrow = INTEGER(ans)[1]; ncol = INTEGER(ans)[0]; } UNPROTECT(1); // and reprotect : if (isReal(x)) { ans = PROTECT(rowMedians_Real (x, nrow, ncol, narm, hasna, byrow)); } else if (isInteger(x)) { ans = PROTECT(rowMedians_Integer(x, nrow, ncol, narm, hasna, byrow)); } else { error(_("Argument 'x' must be numeric (integer or double).")); } if(keepnms) { SEXP xDnms = getAttrib(x, R_DimNamesSymbol); if(xDnms != R_NilValue) { PROTECT(xDnms); setAttrib(ans, R_NamesSymbol, duplicate(VECTOR_ELT(xDnms, byrow ? 0 : 1))); UNPROTECT(1); } } UNPROTECT(1); return(ans); } /* R_rowMedians() */ /*************************************************************************** HISTORY: 2014-12-09 [M.Maechler] o Copied to 'robustbase' CRAN package - to replace many apply(*., 2, median) NB: 'Biobase' also contains rowQ = general row/col Quantiles o argument checking all in C o add 'keepNms' argument {and do keep names by default!} 2013-01-13 [HB] o Added argument 'byRow' to rowMedians() and dropped colMedians(). o Using internal arguments 'by_row' instead of 'by_column'. 2011-12-11 [HB] o BUG FIX: rowMediansReal(..., na.rm=TRUE) did not handle NaN:s, only NA:s. Note that NaN:s does not exist for integers. 2011-10-12 [HJ] o Added colMedians(). o Now rowMediansInteger/Real() can operate also by columns, cf. argument 'by_column'. 2007-08-14 [HB] o Added checks for user interrupts every 1000 line. o Added argument 'hasNA' to rowMedians(). 2005-12-07 [HB] o BUG FIX: When calculating the median of an even number (non-NA) values, the length of the second sort was one element too short, which made the method to freeze, i.e. rPsort(rowData, qq, qq) is now (...qq+1, qq). 2005-11-24 [HB] o By implementing a special version for integers, there is no need to coerce to double in R, which would take up twice the amount of memory. o rowMedians() now handles NAs too. o Adopted from rowQuantiles.c in Biobase of Bioconductor. **************************************************************************/ robustbase/src/rllarsbi.f0000644000176200001440000002301212401436721015171 0ustar liggesusersc--- For lmrob.lar() in ../R/lmrob.M.S.R c--- ~~~~~~~~~~~ C======================================================================= SUBROUTINE rlSTORm2(Y,N,J,YJ) C....................................................................... IMPLICIT DOUBLE PRECISION (A-H,O-Z) DIMENSION Y(N) C----------------------------------------------------------------------- C rlSTORm2 SEARCHES THE J-TH VALUE IN ORDER OF MAGNITUDE IN C A VECTOR OF LENGTH N. C----------------------------------------------------------------------- C--- copied from robust package: src/lmrobmm.f ------------------------- L=1 LR=N 20 IF (L.GE.LR) GOTO 90 AX=Y(J) JNC=L JJ=LR 30 IF(JNC.GT.JJ) GOTO 80 40 IF (Y(JNC).GE.AX) GOTO 50 JNC=JNC+1 GOTO 40 50 IF(Y(JJ).LE.AX) GOTO 60 JJ=JJ-1 GOTO 50 60 IF(JNC.GT.JJ) GOTO 70 WA=Y(JNC) Y(JNC)=Y(JJ) Y(JJ)=WA JNC=JNC+1 JJ=JJ-1 70 GOTO 30 80 IF(JJ.LT.J) L=JNC IF(J.LT.JNC) LR=JJ GOTO 20 90 YJ=Y(J) RETURN END C======================================================================= SUBROUTINE rlCOLbi(V1,V2,MLT,M,IOUT) C....................................................................... DOUBLE PRECISION V1(M),V2(M),MLT C----------------------------------------------------------------------- C AUXILIARY ROUTINE FOR rlLARSbi C----------------------------------------------------------------------- C--- copied from robust package: src/lmrobbi.f ------------------------- DO 220 I=1,M IF (I .EQ. IOUT) GOTO 220 V1(I)=V1(I)-V2(I)*MLT 220 CONTINUE RETURN END C======================================================================= SUBROUTINE rlICHGbi(A,B) C....................................................................... C AUXILIARY ROUTINE FOR rlLARSbi C----------------------------------------------------------------------- C--- copied from robust package: src/lmrobbi.f ------------------------- DOUBLE PRECISION A,B,C C=A A=B B=C RETURN END C======================================================================= SUBROUTINE rlLARSbi(X,Y,N,NP,MDX,MDT,TOL,NIT,K, + KODE,SIGMA,THETA,RS,SC1,SC2,SC3,SC4,BET0) C....................................................................... IMPLICIT DOUBLE PRECISION (A-H,O-Z) DIMENSION X(MDX,NP),Y(N),THETA(MDT),RS(N),SC1(N),SC2(NP), + SC3(NP),SC4(NP) INTEGER OUT LOGICAL STAGE,TEST DATA ZERO,TWO,EPS,BIG/0.D0,2.D0,1.0D-10,3.401D38/ cMM would think rather this: double precision --- but it breaks our checks C DATA ZERO,TWO,EPS,BIG/0.D0,2.D0,2.22D-16,1.796D308/ C----------------------------------------------------------------------- C LEAST ABSOLUTE RESIDUALS -- aka L_1 - Regression C --> Result in THETA[1:NP] C----------------------------------------------------------------------- C--- copied from robust package: src/lmrobbi.f ------------------------- DO J=1,NP SC4(J)=DBLE(J) SC2(J)=ZERO end do SUM=ZERO DO I=1,N SC1(I)=DBLE(NP+I) THETA(I)=Y(I) IF (Y(I) .lt. ZERO) then DO J=1,NP X(I,J)=-X(I,J) end do THETA(I)=-THETA(I) SC1(I)=-SC1(I) endif SUM=SUM+THETA(I) end do C----------------------------------------------------------------------- C COMPUTE THE MARGINAL COSTS. C----------------------------------------------------------------------- SUMIN=SUM DO J=1,NP SUM=ZERO DO I=1,N SUM=SUM+X(I,J) end do SC3(J)=SUM end do C----------------------------------------------------------------------- C STAGE I. DETERMINE THE VECTOR TO ENTER THE BASIS. C----------------------------------------------------------------------- TEST=.FALSE. ! -Wall STAGE=.TRUE. KOUNT=0 KR=1 KL=1 IN=1 ! -Wall c-- ---------------- LOOP (Stage I) ------------------------------------ 70 VMAX=-1.D0 DNP=DBLE(NP) DO J=KR,NP IF (DABS(SC4(J)) .GT. DNP) cycle ! = continue D=DABS(SC3(J)) IF (D-VMAX .LE. ZERO) cycle IF (D-VMAX .LE. EPS) cycle VMAX=D IN=J end do IF (SC3(IN) .lt. ZERO) then ! swap signs do I=1,N X(I,IN)=-X(I,IN) end do SC3(IN)=-SC3(IN) SC4(IN)=-SC4(IN) endif C----------------------------------------------------------------------- C DETERMINE THE VECTOR TO LEAVE THE BASIS. C----------------------------------------------------------------------- cvvv ------------ 2nd-level loop --------------------------------- 100 K=0 DO I=KL,N D=X(I,IN) IF (D .LE. TOL) cycle K=K+1 Y(K)=THETA(I)/D RS(K)=DBLE(I) TEST=.TRUE. end do C--- -------------- 3rd-level loop ------------------ 120 IF (K .le. 0) then TEST=.FALSE. ! and GOTO 150 else ! 130 VMIN=BIG DO I=1,K IF (Y(I)-VMIN .GE. ZERO) cycle IF (VMIN-Y(I) .LE. EPS) cycle J=I VMIN=Y(I) OUT=INT(RS(I)) end do Y(J)=Y(K) RS(J)=RS(K) K=K-1 endif C----------------------------------------------------------------------- C CHECK FOR LINEAR DEPENDENCE IN STAGE I. C----------------------------------------------------------------------- c 150 IF (.not.TEST .and. STAGE) then DO I=1,N CALL rlICHGbi(X(I,KR),X(I,IN)) end do CALL rlICHGbi(SC3(KR),SC3(IN)) CALL rlICHGbi(SC4(KR),SC4(IN)) KR=KR+1 c GOTO 260 else c 170 IF (.not. TEST) then KODE=2 GOTO 350 endif c 180 PIVOT=X(OUT,IN) IF (SC3(IN)-PIVOT-PIVOT .gt. TOL) then ! not converged DO J=KR,NP D=X(OUT,J) SC3(J)=SC3(J)-D-D X(OUT,J)=-D end do D=THETA(OUT) SUMIN=SUMIN-D-D THETA(OUT)=-D SC1(OUT)=-SC1(OUT) GOTO 120 c -----------end{ 3rd-level loop } ----------------- endif C----------------------------------------------------------------------- C 200 PIVOT ON X(OUT,IN). C----------------------------------------------------------------------- DO J=KR,NP IF (J.EQ.IN) cycle ! = continue X(OUT,J)=X(OUT,J)/PIVOT end do THETA(OUT)=THETA(OUT)/PIVOT DO J=KR,NP IF (J .EQ. IN) cycle D=X(OUT,J) SC3(J)=SC3(J)-D*SC3(IN) CALL rlCOLbi(X(1,J),X(1,IN),D,N,OUT) end do SUMIN=SUMIN-SC3(IN)*THETA(OUT) DO I=1,N IF (I .EQ. OUT) cycle D=X(I,IN) THETA(I)=THETA(I)-D*THETA(OUT) X(I,IN)=-D/PIVOT end do SC3(IN)=-SC3(IN)/PIVOT X(OUT,IN)=1.D0/PIVOT CALL rlICHGbi(SC1(OUT),SC4(IN)) KOUNT=KOUNT+1 IF (.NOT. STAGE) GOTO 270 C----------------------------------------------------------------------- C INTERCHANGE ROWS IN STAGE I. C----------------------------------------------------------------------- KL=KL+1 DO J=KR,NP CALL rlICHGbi(X(OUT,J),X(KOUNT,J)) enddo CALL rlICHGbi(THETA(OUT),THETA(KOUNT)) CALL rlICHGbi(SC1(OUT),SC1(KOUNT)) endif IF (KOUNT+KR .NE. NP+1) GOTO 70 c ======= C----------------------------------------------------------------------- C STAGE II. DETERMINE THE VECTOR TO ENTER THE BASIS. C----------------------------------------------------------------------- STAGE=.FALSE. cvvv 270 VMAX=-BIG DO J=KR,NP D=SC3(J) IF (D .lt. ZERO) then IF (D+TWO .GT. ZERO) cycle D=-D-TWO endif IF (D-VMAX .LE. ZERO) cycle IF (D-VMAX .LE. EPS) cycle VMAX=D IN=J end do IF (VMAX .gt. TOL) then ! not converged IF (SC3(IN) .le. ZERO) then DO I=1,N X(I,IN)=-X(I,IN) end do SC3(IN)=-SC3(IN)-2.D0 SC4(IN)=-SC4(IN) endif GOTO 100 c ======== endif C----------------------------------------------------------------------- C 310 PREPARE OUTPUT C----------------------------------------------------------------------- L=KL-1 DO I=1,N RS(I)=ZERO IF (I .GT. L .OR. THETA(I) .GE. ZERO) cycle do J=KR,NP X(I,J)=-X(I,J) end do THETA(I)=-THETA(I) SC1(I)=-SC1(I) end do KODE=0 IF (KR .eq. 1) then ! first time only do J=1,NP D=DABS(SC3(J)) IF (D .LE. TOL .OR. TWO-D .LE. TOL) GOTO 350 end do KODE=1 endif c--- 350 DO I=1,N K=INT(SC1(I)) D=THETA(I) IF (K .le. 0) then K=-K D=-D endif IF (I .lt. KL) then SC2(K)=D else K=K-NP RS(K)=D endif end do K=NP+1-KR SUM=ZERO DO I=KL,N SUM=SUM+THETA(I) end do SUMIN=SUM NIT=KOUNT DO J=1,NP THETA(J)=SC2(J) end do DO I=1,N Y(I)=DABS(RS(I)) end do N2=N/2+1 CALL RLSTORM2(Y,N,N2,SIGMA) SIGMA=SIGMA/BET0 RETURN END robustbase/vignettes/0000755000176200001440000000000014124272534014437 5ustar liggesusersrobustbase/vignettes/lmrob_simulation.Rnw0000644000176200001440000016447413774561564020544 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{Simulations for Robust Regression Inference in Small Samples} %\VignettePackage{robustbase} %\VignetteDepends{xtable,ggplot2,GGally,RColorBrewer,grid,reshape2} \usepackage{amsmath} \usepackage{natbib} \usepackage[utf8]{inputenc} \newcommand{\makeright}[2]{\ifx#1\left\right#2\else#1#2\fi} \newcommand{\Norm}[2][\left]{\mathcal N #1( #2 \makeright{#1}{)}} \newcommand{\norm}[1] {\| #1 \|} \newcommand{\bld}[1]{\boldsymbol{#1}} % shortcut for bold symbol \newcommand{\T}[1] {\texttt{#1}} \DeclareMathOperator{\wgt}{w} \DeclareMathOperator{\var}{var} \DeclareMathOperator{\diag}{diag} \DeclareMathOperator{\median}{median} \DeclareMathOperator{\mad}{mad} \DeclareMathOperator{\Erw}{\mathbf{E}} \SweaveOpts{prefix.string=plot, eps = FALSE, pdf = TRUE, strip.white=true} \SweaveOpts{width=6, height=4} \usepackage[noae]{Sweave} \begin{document} \setkeys{Gin}{width=\textwidth} \setlength{\abovecaptionskip}{-5pt} <>= ## set options options(width=60, warn=1) # see warnings where they happen (should eliminate) ## number of workers to start if(FALSE) {## good for pkg developers options(cores= max(1, parallel::detectCores() - 2)) } else { ## CRAN allows maximum of 2: options(cores= min(2, parallel::detectCores())) } ## Number of Repetitions: N <- 1000 ## get path (= ../inst/doc/ in source pkg) robustDoc <- system.file('doc', package='robustbase') robustDta <- robustDoc ## initialize (packages, data, ...): source(file.path(robustDoc, 'simulation.init.R')) # 'xtable' ## set the amount of trimming used in calculation of average results trim <- 0.1 <>= ## load required packages for graphics stopifnot(require(ggplot2), require(GGally),# for ggpairs() which replaces ggplot2::plotmatrix() require(grid), require(reshape2)) source(file.path(robustDoc, 'graphics.functions.R')) `%||%` <- function (x, orElse) if (!is.null(x)) x else orElse ## set ggplot theme theme <- theme_bw(base_size = 10) theme$legend.key.size <- unit(1, "lines")# was 0.9 in pre-v.3 ggplot2 theme$plot.margin <- unit(c(1/2, 1/8, 1/8, 1/8), "lines")# was (1/2, 0,0,0) theme_set(theme) ## old and new ggplot2: stopifnot(is.list(theme_G <- theme$panel.grid.major %||% theme$panel.grid)) ## set default sizes for lines and points update_geom_defaults("point", list(size = 4/3)) update_geom_defaults("line", list(size = 1/4)) update_geom_defaults("hline", list(size = 1/4)) update_geom_defaults("smooth", list(size = 1/4)) ## alpha value for plots with many points alpha.error <- 0.3 alpha.n <- 0.4 ## set truncation limits used by f.truncate() & g.truncate.*: trunc <- c(0.02, 0.14) trunc.plot <- c(0.0185, 0.155) f.truncate <- function(x, up = trunc.plot[2], low = trunc.plot[1]) { x[x > up] <- up x[x < low] <- low x } g.truncate.lines <- geom_hline(yintercept = trunc, color = theme$panel.border$colour) g.truncate.line <- geom_hline(yintercept = trunc[2], color = theme$panel.border$colour) g.truncate.areas <- annotate("rect", xmin=rep(-Inf,2), xmax=rep(Inf,2), ymin=c(0,Inf), ymax=trunc, fill = theme_G$colour) g.truncate.area <- annotate("rect", xmin=-Inf, xmax=Inf, ymin=trunc[2], ymax=Inf, fill = theme_G$colour) legend.mod <- list(`SMD.Wtau` = quote('SMD.W'~tau), `SMDM.Wtau` = quote('SMDM.W'~tau), `MM.Avar1` = quote('MM.'~Avar[1]), `MMqT` = quote('MM'~~q[T]), `MMqT.Wssc` = quote('MM'~~q[T]*'.Wssc'), `MMqE` = quote('MM'~~q[E]), `MMqE.Wssc` = quote('MM'~~q[E]*'.Wssc'), `sigma_S` = quote(hat(sigma)[S]), `sigma_D` = quote(hat(sigma)[D]), `sigma_S*qE` = quote(q[E]*hat(sigma)[S]), `sigma_S*qT` = quote(q[T]*hat(sigma)[S]), `sigma_robust` = quote(hat(sigma)[robust]), `sigma_OLS` = quote(hat(sigma)[OLS]), `t1` = quote(t[1]), `t3` = quote(t[3]), `t5` = quote(t[5]), `cskt(Inf,2)` = quote(cskt(infinity,2)) ) @% end{graphics-setup} \title{Simulations for Sharpening Wald-type Inference in Robust Regression for Small Samples} \author{Manuel Koller} \maketitle \tableofcontents \section{Introduction} In this vignette, we recreate the simulation study of \citet{KS2011}. This vignette is supposed to complement the results presented in the above cited reference and render its results reproducible. Another goal is to provide simulation functions, that, with small changes, could also be used for other simulation studies. Additionally, in Section~\ref{sec:maximum-asymptotic-bias}, we calculate the maximum asymptotic bias curves of the $\psi$-functions used in the simulation. \section{Setting} The simulation setting used here is similar to the one in \citet{maronna2009correcting}. We simulate $N = \Sexpr{N}$ repetitions. To repeat the simulation, we recommend using a small value of $N$ here, since for large $n$ and $p$, computing all the replicates will take days. \subsection{Methods} We compare the methods \begin{itemize} \item MM, SMD, SMDM as described in \citet{KS2011}. These methods are available in the package \T{robustbase} (\T{lmrob}). \item MM as implemented in the package \T{robust} (\T{lmRob}). This method will be denoted as \emph{MMrobust} later on. \item MM using S-scale correction by $q_{\rm T}$ and $q_{\rm E}$ as proposed by \citet{maronna2009correcting}. $q_{\rm T}$ and $q_{\rm E}$ are defined as follows. \begin{equation*} q_{\rm E} = \frac{1}{1 - (1.29 - 6.02/n)p/n}, \end{equation*} \begin{equation*} \hat q_{\rm T} = 1 + \frac{p}{2n}\frac{\hat a}{\hat b\hat c}, \end{equation*} where \begin{equation*} \hat a = \frac{1}{n}\sum_{i=1}^n \psi\left(\frac{r_i}{\hat\sigma_{\rm S}}\right)^2, \hat b = \frac{1}{n} \sum_{i=1}^n\psi'\left(\frac{r_i}{\hat\sigma_{\rm S}}\right),%' \hat c = \frac{1}{n}\sum_{i=1}^n \psi\left(\frac{r_i}{\hat\sigma_{\rm S}}\right) \frac{r_i}{\hat\sigma_{\rm S}}, \end{equation*} with $\psi = \rho'$,%' $n$ the number of observations, $p$ the number of predictor variables, $\hat\sigma_{\rm S}$ is the S-scale estimate and $r_i$ is the residual of the $i$-th observation. When using $q_{\rm E}$ it is necessary to adjust the tuning constants of $\chi$ to account for the dependence of $\kappa$ on $p$. For $q_{\rm T}$ no change is required. This method is implemented as \T{lmrob.mar()} in the source file \T{estimating.functions.R}. \end{itemize} \subsection{$\psi$-functions} We compare \emph{bisquare}, \emph{optimal}, \emph{lqq} and \emph{Hampel} $\psi$-functions. They are illustrated in Fig.~\ref{fig:psi.functions}. The tuning constants used in the simulation are compiled in Table~\ref{tab:psi-functions}. Note that the \emph{Hampel} $\psi$-function is tuned to have a downward slope of $-1/3$ instead of the originally proposed $-1/2$. This was set to allow for a comparison to an even slower descending $\psi$-function. %% generate table of tuning constants used for \psi functions \begin{table}[ht] \begin{center} <>= ## get list of psi functions lst <- lapply(estlist$procedures, function(x) { if (is.null(x$args)) return(list(NULL, NULL, NULL)) if (!is.null(x$args$weight)) return(list(x$args$weight[2], round(f.psi2c.chi(x$args$weight[1]),3), round(f.eff2c.psi(x$args$efficiency, x$args$weight[2]),3))) return(list(x$args$psi, round(if (is.null(x$args$tuning.chi)) lmrob.control(psi=x$args$psi)$tuning.chi else x$args$tuning.chi,3), round(if (is.null(x$args$tuning.psi)) lmrob.control(psi=x$args$psi)$tuning.psi else x$args$tuning.psi,3))) }) lst <- unique(lst) ## because of rounding, down from 21 to 5 ! lst <- lst[sapply(lst, function(x) !is.null(x[[1]]))] # 5 --> 4 ## convert to table tbl <- do.call(rbind, lst) tbl[,2:3] <- apply(tbl[,2:3], 1:2, function(x) { gsub('\\$NA\\$', '\\\\texttt{NA}', paste('$', unlist(x), collapse=', ', '$', sep='')) }) tbl[,1] <- paste('\\texttt{', tbl[,1], '}', sep='') colnames(tbl) <- paste('\\texttt{', c('psi', 'tuning.chi', 'tuning.psi'), '}', sep='') require("xtable") # need also print() method: print(xtable(tbl), sanitize.text.function=identity, include.rownames = FALSE, floating=FALSE) @ %def \vspace{15pt} \caption{Tuning constants of $\psi$-functions used in the simulation.} \label{tab:psi-functions} \end{center} \end{table} \begin{figure} \begin{center} <>= d.x_psi <- function(x, psi) { cc <- lmrob.control(psi = psi)$tuning.psi data.frame(x=x, value=Mpsi(x, cc, psi), psi = psi) } x <- seq(0, 10, length.out = 1000) tmp <- rbind(d.x_psi(x, 'optimal'), d.x_psi(x, 'bisquare'), d.x_psi(x, 'lqq'), d.x_psi(x, 'hampel')) print( ggplot(tmp, aes(x, value, color = psi)) + geom_line(lwd=1.25) + ylab(quote(psi(x))) + scale_color_discrete(name = quote(psi ~ '-function'))) @ \end{center} \caption{$\psi$-functions used in the simulation.} \label{fig:psi.functions} \end{figure} \subsection{Designs} Two types of designs are used in the simulation: fixed and random designs. One design with $n=20$ observations, $p=1+3$ predictors and strong leverage points. This design also includes an intercept column. It is shown in Fig.~\ref{fig:design-predict}. The other designs are random, i.e., regenerated for every repetition, and the models are fitted without an intercept. We use the same distribution to generate the designs as for the errors. The number of observations simulated are $n = 25, 50, 100, 400$ and the ratio to the number of parameters are $p/n = 1/20, 1/10, 1/5, 1/3, 1/2$. We round $p$ to the nearest smaller integer if necessary. The random datasets are generated using the following code. <>= f.gen <- function(n, p, rep, err) { ## get function name and parameters lerrfun <- f.errname(err$err) lerrpar <- err$args ## generate random predictors ret <- replicate(rep, matrix(do.call(lerrfun, c(n = n*p, lerrpar)), n, p), simplify=FALSE) attr(ret[[1]], 'gen') <- f.gen ret } ratios <- c(1/20, 1/10, 1/5, 1/3, 1/2)## p/n lsit <- expand.grid(n = c(25, 50, 100, 400), p = ratios) lsit <- within(lsit, p <- as.integer(n*p)) .errs.normal.1 <- list(err = 'normal', args = list(mean = 0, sd = 1)) for (i in 1:NROW(lsit)) assign(paste('rand',lsit[i,1],lsit[i,2],sep='_'), f.gen(lsit[i,1], lsit[i,2], rep = 1, err = .errs.normal.1)[[1]]) @ An example design is shown in Fig.~\ref{fig:example.design}. \begin{figure} \begin{center} <>= require(GGally) colnames(rand_25_5) <- paste0("X", 1:5) # workaround new (2014-12) change in GGally ## and the 2016-11-* change needs data frames: df.r_25_5 <- as.data.frame(rand_25_5) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print(ggpairs(df.r_25_5, axisLabels="show", title = "rand_25_5: n=25, p=5")) ) @ \end{center} \caption{Example random design.} \label{fig:example.design} \end{figure} \subsection{Error Distributions} We simulate the following error distributions \begin{itemize} \item standard normal distribution, \item $t_5$, $t_3$, $t_1$, \item centered skewed t with $df = \infty, 5$ and $\gamma = 2$ (denoted by \emph{cskt$(\infty,2)$} and \emph{cskt}$(5,2)$, respectively); as introduced by \citet{fernandez1998bayesian} using the \T{R} package \T{skewt}, \item contaminated normal, $\Norm{0,1}$ contaminated with $10\%$ $\Norm{0, 10}$ (symmetric, \emph{cnorm}$(0.1,0,3.16)$) or $\Norm{4, 1}$ (asymmetric, \emph{cnorm}$(0.1,4,1)$). \end{itemize} \subsection{Covariance Matrix Estimators} For the standard MM estimator, we compare ${\rm Avar}_1$ of \citet{croux03} and the empirical weighted covariance matrix estimate corrected by Huber's small sample correction as described in \citet{HubPR09} (denoted by \emph{Wssc}). The latter is also used for the variation of the MM estimate proposed by \citet{maronna2009correcting}. For the SMD and SMDM variants we use the covariance matrix estimate as described in \citet{KS2011} (\emph{W$\tau$}). The covariance matrix estimate consists of three parts: \begin{equation*} {\rm cov}(\hat\beta) = \sigma^2\gamma\bld V_{\bld X}^{-1}. \end{equation*} The SMD and SMDM methods of \T{lmrob} use the following defaults. \begin{equation} \label{eq:gammatau} \hat\gamma = \frac{\frac{1}{n}\sum_{i=1}^n\tau_i^2 \psi\left(\frac{r_i}{\tau_i\hat\sigma}\right)^2} {\frac{1}{n}\sum_{i=1}^n\psi'\left(\frac{r_i}{\tau_i\hat\sigma}\right)} \end{equation} where $\tau_i$ is the rescaling factor used for the D-scale estimate (see \citet{KS2011}). \noindent\textbf{Remark: } Equation \eqref{eq:gammatau} is a corrected version of $\gamma$. It was changed in \texttt{robustbase} version \texttt{0.91} (April 2014) to ensure that the equation reduces to $1$ in the classical case ($\psi(x) = x$). If the former (incorrect) version is needed for compatibility reasons, it can be obtained by adding the argument \texttt{cov.corrfact = "tauold"}. \begin{equation*} \bld{\widehat V}_{\bld X} = \frac{1}{\frac{1}{n}\sum_{i=1}^n\wgt_{ii}}\bld X^T\bld W\bld X \end{equation*} where $\bld W = \diag\left(\wgt\left(\frac{r_1}{\hat\sigma}\right), \dots, \wgt\left(\frac{r_n}{\hat\sigma}\right)\right)$. The function $\wgt(r) = \psi(r)/r$ produces the robustness weights. \section{Simulation} The main loop of the simulation is fairly simple. (This code is only run if there are no aggregate results available.) %% set eval to TRUE for chunks simulation-run and simulation-aggr %% if you really want to run the simulations again. %% (better fail with an error than run for weeks) <>= aggrResultsFile <- file.path(robustDta, "aggr_results.Rdata") <>= if (!file.exists(aggrResultsFile)) { ## load packages required only for simulation stopifnot(require(robust), require(skewt), require(foreach)) if (!is.null(getOption("cores"))) { if (getOption("cores") == 1) registerDoSEQ() ## no not use parallel processing else { stopifnot(require(doParallel)) if (.Platform$OS.type == "windows") { cl <- makeCluster(getOption("cores")) clusterExport(cl, c("N", "robustDoc")) clusterEvalQ(cl, slave <- TRUE) clusterEvalQ(cl, source(file.path(robustDoc, 'simulation.init.R'))) registerDoParallel(cl) } else registerDoParallel() } } else registerDoSEQ() ## no not use parallel processing for (design in c("dd", ls(pattern = 'rand_\\d+_\\d+'))) { print(design) ## set design estlist$design <- get(design) estlist$use.intercept <- !grepl('^rand', design) ## add design.predict: pc estlist$design.predict <- if (is.null(attr(estlist$design, 'gen'))) f.prediction.points(estlist$design) else f.prediction.points(estlist$design, max.pc = 2) filename <- file.path(robustDta, sprintf('r.test.final.%s.Rdata',design)) if (!file.exists(filename)) { ## run print(system.time(r.test <- f.sim(estlist, silent = TRUE))) ## save save(r.test, file=filename) ## delete output rm(r.test) ## run garbage collection gc() } } } @ The variable \T{estlist} is a list containing all the necessary settings required to run the simulation as outlined above. Most of its elements are self-explanatory. <>= str(estlist, 1) @ \T{errs} is a list containing all the error distributions to be simulated. The entry for the standard normal looks as follows. <>= estlist$errs[[1]] @ \T{err} is translated internally to the corresponding random generation or quantile function, e.g., in this case \T{rnorm} or \T{qnorm}. \T{args} is a list containing all the required arguments to call the function. The errors are then generated internally with the following call. <>= set.seed(estlist$seed) errs <- c(sapply(1:nrep, function(x) do.call(fun, c(n = nobs, args)))) @ All required random numbers are generated at once instead of during the simulation. Like this, it is certain, that all the compared methods run on exactly the same data. The entry \T{procedures} follows a similar convention. \T{design.predict} contains the design used for the prediction of observations and calculation of confidence or prediction intervals. The objects returned by the procedures are processed by the functions contained in the \T{estlist\$output} list. <<>>= str(estlist$output[1:3], 2) @ The results are stored in a 4-dimensional array. The dimensions are: repetition number, type of value, procedure id, error id. Using \T{apply} it is very easy and fast to generate summary statistics. The raw results are stored on the hard disk, because typically it takes much longer to execute all the procedures than to calculate the summary statistics. The variables saved take up a lot of space quite quickly, so only the necessary data is stored. These are $\sigma$, $\bld\beta$ as well as the corresponding standard errors. To speed up the simulation routine \T{f.sim}, the simulations are carried out in parallel, as long as this is possible. This is accomplished with the help of the \T{R}-package \T{foreach}. This is most easily done on a machine with multiple processors or cores. The \T{multicore} package provides the methods to do so easily. The worker processes are just forked from the main \T{R} process. After all the methods have been simulated, the simulation output is processed. The code is quite lengthy and thus not displayed here (check the Sweave source file \T{lmrob\_simulation.Rnw}). The residuals, robustness weights, leverages and $\tau$ values have to be recalculated. Using vectorized operations and some specialized \T{C} code, this is quite cheap. The summary statistics generated are discussed in the next section. <>= if (!file.exists(aggrResultsFile)) { files <- list.files(robustDta, pattern = 'r.test.final\\.') res <- foreach(file = files) %dopar% { ## get design, load r.test, initialize other stuff design <- substr(basename(file), 14, nchar(basename(file)) - 6) cat(design, ' ') load(file.path(robustDta, file)) estlist <- attr(r.test, 'estlist') use.intercept <- if (!is.null(estlist$use.intercept)) estlist$use.intercept else TRUE sel <- dimnames(r.test)[[3]] ## [dimnames(r.test)[[3]] != "estname=lm"] n.betas <- paste('beta',1:(NCOL(estlist$design)+use.intercept),sep='_') ## get design lX <- if (use.intercept) as.matrix(cbind(1, get(design))) else as.matrix(get(design)) n <- NROW(lX) p <- NCOL(lX) ## prepare arrays for variable designs and leverages if (is.function(attr(estlist$design, 'gen'))) { lXs <- array(NA, c(n, NCOL(lX), dim(r.test)[c(1, 4)]), list(Obs = NULL, Pred = colnames(lX), Data = NULL, Errstr = dimnames(r.test)[[4]])) } ## generate errors lerrs <- array(NA, c(n, dim(r.test)[c(1,4)]) , list(Obs = NULL, Data = NULL, Errstr = dimnames(r.test)[[4]])) for (i in 1:dim(lerrs)[3]) { lerrstr <- f.list2str(estlist$errs[[i]]) lerr <- f.errs(estlist, estlist$errs[[i]], gen = attr(estlist$design, 'gen'), nobs = n, npar = NCOL(lX)) lerrs[,,lerrstr] <- lerr if (!is.null(attr(lerr, 'designs'))) { ## retrieve generated designs: this returns a list of designs lXs[,,,i] <- unlist(attr(lerr, 'designs')) if (use.intercept) stop('intercept not implemented for random desings') } rm(lerr) } if (is.function(attr(estlist$design, 'gen'))) { ## calculate leverages lXlevs <- apply(lXs, 3:4, .lmrob.hat) } ## calculate fitted values from betas if (!is.function(attr(estlist$design, 'gen'))) { ## fixed design case lfitted <- apply(r.test[,n.betas,sel,,drop=FALSE],c(3:4), function(bhat) { lX %*% t(bhat) } ) } else { ## variable design case lfitted <- array(NA, n*prod(dim(r.test)[c(1,4)])*length(sel)) lfitted <- .C('R_calc_fitted', as.double(lXs), ## designs as.double(r.test[,n.betas,sel,,drop=FALSE]), ## betas as.double(lfitted), ## result as.integer(n), ## n as.integer(p), ## p as.integer(dim(r.test)[1]), ## nrep as.integer(length(sel)), ## n procstr as.integer(dim(r.test)[4]), ## n errstr DUP=FALSE, NAOK=TRUE, PACKAGE="robustbase")[[3]] } tdim <- dim(lfitted) <- c(n, dim(r.test)[1], length(sel),dim(r.test)[4]) lfitted <- aperm(lfitted, c(1,2,4,3)) ## calculate residuals = y - fitted.values lfitted <- as.vector(lerrs) - as.vector(lfitted) dim(lfitted) <- tdim[c(1,2,4,3)] lfitted <- aperm(lfitted, c(1,2,4,3)) dimnames(lfitted) <- c(list(Obs = NULL), dimnames(r.test[,,sel,,drop=FALSE])[c(1,3,4)]) lresids <- lfitted rm(lfitted) ## calculate lm MSE and trim trimmed MSE of betas tf.MSE <- function(lbetas) { lnrm <- rowSums(lbetas^2) c(MSE=mean(lnrm,na.rm=TRUE),MSE.1=mean(lnrm,trim=trim,na.rm=TRUE)) } MSEs <- apply(r.test[,n.betas,,,drop=FALSE],3:4,tf.MSE) li <- 1 ## so we can reconstruct where we are lres <- apply(lresids,3:4,f.aggregate.results <- { function(lresid) { ## the counter li tells us, where we are ## we walk dimensions from left to right lcdn <- f.get.current.dimnames(li, dimnames(lresids), 3:4) lr <- r.test[,,lcdn[1],lcdn[2]] ## update counter li <<- li + 1 ## transpose and normalize residuals with sigma lresid <- t(lresid) / lr[,'sigma'] if (lcdn[1] != 'estname=lm') { ## convert procstr to proclst and get control list largs <- f.str2list(lcdn[1])[[1]]$args if (grepl('lm.robust', lcdn[1])) { lctrl <- list() lctrl$psi <- toupper(largs$weight2) lctrl$tuning.psi <- f.eff2c.psi(largs$efficiency, lctrl$psi) lctrl$method <- 'MM' } else { lctrl <- do.call('lmrob.control',largs) } ## calculate correction factors ## A lsp2 <- rowSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lspp <- rowSums(lpp <- Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) ## calculate Huber\'s small sample correction factor lK <- 1 + rowSums((lpp - lspp/n)^2)*NCOL(lX)/lspp^2 ## 1/n cancels } else { lK <- lspp <- lsp2 <- NA } ## only calculate tau variants if possible if (grepl('args.method=\\w*(D|T)\\w*\\b', lcdn[1])) { ## SMD or SMDM ## calculate robustness weights lwgts <- Mwgt(lresid, lctrl$tuning.psi, lctrl$psi) ## function to calculate robustified leverages tfun <- if (is.function(attr(estlist$design, 'gen'))) function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lXs[,,i,lcdn[2]],wi) } else function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lX, wi) } llev <- sapply(1:dim(r.test)[1], tfun) ## calculate unique leverages lt <- robustbase:::lmrob.tau(list(),h=llev,control=lctrl) ## normalize residuals with tau (transpose lresid) lresid <- t(lresid) / lt ## A lsp2t <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lsppt <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) } else { lsp2t <- lsppt <- NA } ## calculate raw scales based on the errors lproc <- f.str2list(lcdn[1])[[1]] q <- NA M <- NA if (lproc$estname == 'lmrob.mar' && lproc$args$type == 'qE') { ## for lmrob_mar, qE variant lctrl <- lmrob.control(psi = 'bisquare', tuning.chi=uniroot(function(c) robustbase:::lmrob.bp('bisquare', c) - (1-p/n)/2, c(1, 3))$root) se <- apply(lerrs[,,lcdn[2]],2,lmrob.mscale,control=lctrl,p=p) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } else if (!is.null(lproc$args$method) && lproc$args$method == 'SMD') { ## for D-scales se <- apply(lerrs[,,lcdn[2]],2,lmrob.dscale,control=lctrl, kappa=robustbase:::lmrob.kappa(control=lctrl)) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } ## calculate empirical correct test value (to yield 5% level) t.val_2 <- t.val_1 <- quantile(abs(lr[,'beta_1']/lr[,'se_1']), 0.95, na.rm = TRUE) if (p > 1) t.val_2 <- quantile(abs(lr[,'beta_2']/lr[,'se_2']), 0.95, na.rm = TRUE) ## return output: summary statistics: c(## gamma AdB2.1 = mean(lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, K2AdB2.1 = mean(lK^2*lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, AdB2t.1 = mean(lsp2t/lsppt^2,trim=trim,na.rm=TRUE)*n, sdAdB2.1 = sd.trim(lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdK2AdB2.1 = sd.trim(lK^2*lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdAdB2t.1 = sd.trim(lsp2t/lsppt^2*n,trim=trim,na.rm=TRUE), ## sigma medsigma = median(lr[,'sigma'],na.rm=TRUE), madsigma = mad(lr[,'sigma'],na.rm=TRUE), meansigma.1 = mean(lr[,'sigma'],trim=trim,na.rm=TRUE), sdsigma.1 = sd.trim(lr[,'sigma'],trim=trim,na.rm=TRUE), meanlogsigma = mean(log(lr[,'sigma']),na.rm=TRUE), meanlogsigma.1 = mean(log(lr[,'sigma']),trim=trim,na.rm=TRUE), sdlogsigma = sd(log(lr[,'sigma']),na.rm=TRUE), sdlogsigma.1 = sd.trim(log(lr[,'sigma']),trim=trim,na.rm=TRUE), q = q, M = M, ## beta efficiency.1 = MSEs['MSE.1','estname=lm',lcdn[2]] / MSEs['MSE.1',lcdn[1],lcdn[2]], ## t-value: level emplev_1 = mean(abs(lr[,'beta_1']/lr[,'se_1']) > qt(0.975, n - p), na.rm = TRUE), emplev_2 = if (p>1) { mean(abs(lr[,'beta_2']/lr[,'se_2']) > qt(0.975, n - p), na.rm = TRUE) } else NA, ## t-value: power power_1_0.2 = mean(abs(lr[,'beta_1']-0.2)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.2 = if (p>1) { mean(abs(lr[,'beta_2']-0.2)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.4 = mean(abs(lr[,'beta_1']-0.4)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.4 = if (p>1) { mean(abs(lr[,'beta_2']-0.4)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.6 = mean(abs(lr[,'beta_1']-0.6)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.6 = if (p>1) { mean(abs(lr[,'beta_2']-0.6)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.8 = mean(abs(lr[,'beta_1']-0.8)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.8 = if (p>1) { mean(abs(lr[,'beta_2']-0.8)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_1 = mean(abs(lr[,'beta_1']-1)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_1 = if (p>1) { mean(abs(lr[,'beta_2']-1)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, ## coverage probability: calculate empirically ## the evaluation points are constant, but the designs change ## therefore this makes only sense for fixed designs cpr_1 = mean(lr[,'upr_1'] < 0 | lr[,'lwr_1'] > 0, na.rm = TRUE), cpr_2 = mean(lr[,'upr_2'] < 0 | lr[,'lwr_2'] > 0, na.rm = TRUE), cpr_3 = mean(lr[,'upr_3'] < 0 | lr[,'lwr_3'] > 0, na.rm = TRUE), cpr_4 = mean(lr[,'upr_4'] < 0 | lr[,'lwr_4'] > 0, na.rm = TRUE), cpr_5 = if (any(colnames(lr) == 'upr_5')) { mean(lr[,'upr_5'] < 0 | lr[,'lwr_5'] > 0, na.rm = TRUE) } else NA, cpr_6 = if (any(colnames(lr) == 'upr_6')) { mean(lr[,'upr_6'] < 0 | lr[,'lwr_6'] > 0, na.rm = TRUE) } else NA, cpr_7 = if (any(colnames(lr) == 'upr_7')) { mean(lr[,'upr_7'] < 0 | lr[,'lwr_7'] > 0, na.rm = TRUE) } else NA ) }}) ## convert to data.frame lres <- f.a2df.2(lres, split = '___NO___') ## add additional info lres$n <- NROW(lX) lres$p <- NCOL(lX) lres$nmpdn <- with(lres, (n-p)/n) lres$Design <- design ## clean up rm(r.test, lXs, lXlevs, lresids, lerrs) gc() ## return lres lres } save(res, trim, file = aggrResultsFile) ## stop cluster if (exists("cl")) stopCluster(cl) } <>= load(aggrResultsFile) ## this will fail if the file is not found (for a reason) ## set eval to TRUE for chunks simulation-run and simulation-aggr ## if you really want to run the simulations again. ## (better fail with an error than run for weeks) ## combine list elements to data.frame test.1 <- do.call('rbind', res) test.1 <- within(test.1, { Method[Method == "SM"] <- "MM" Method <- Method[, drop = TRUE] Estimator <- interaction(Method, D.type, drop = TRUE) Estimator <- f.rename.level(Estimator, 'MM.S', 'MM') Estimator <- f.rename.level(Estimator, 'SMD.D', 'SMD') Estimator <- f.rename.level(Estimator, 'SMDM.D', 'SMDM') Estimator <- f.rename.level(Estimator, 'MM.qT', 'MMqT') Estimator <- f.rename.level(Estimator, 'MM.qE', 'MMqE') Estimator <- f.rename.level(Estimator, 'MM.rob', 'MMrobust') Estimator <- f.rename.level(Estimator, 'lsq.lm', 'OLS') Est.Scale <- f.rename.level(Estimator, 'MM', 'sigma_S') Est.Scale <- f.rename.level(Est.Scale, 'MMrobust', 'sigma_robust') Est.Scale <- f.rename.level(Est.Scale, 'MMqE', 'sigma_S*qE') Est.Scale <- f.rename.level(Est.Scale, 'MMqT', 'sigma_S*qT') Est.Scale <- f.rename.level(Est.Scale, 'SMDM', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'SMD', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'OLS', 'sigma_OLS') Psi <- f.rename.level(Psi, 'hampel', 'Hampel') }) ## add interaction of Method and Cov test.1 <- within(test.1, { method.cov <- interaction(Estimator, Cov, drop=TRUE) levels(method.cov) <- sub('\\.+vcov\\.(a?)[wacrv1]*', '\\1', levels(method.cov)) method.cov <- f.rename.level(method.cov, "MMa", "MM.Avar1") method.cov <- f.rename.level(method.cov, "MMrobust.Default", "MMrobust.Wssc") method.cov <- f.rename.level(method.cov, "MM", "MM.Wssc") method.cov <- f.rename.level(method.cov, "SMD", "SMD.Wtau") method.cov <- f.rename.level(method.cov, "SMDM", "SMDM.Wtau") method.cov <- f.rename.level(method.cov, "MMqT", "MMqT.Wssc") method.cov <- f.rename.level(method.cov, "MMqE", "MMqE.Wssc") method.cov <- f.rename.level(method.cov, "OLS.Default", "OLS") ## ratio: the closest 'desired ratios' instead of exact p/n; ## needed in plots only for stat_*(): median over "close" p/n's: ratio <- ratios[apply(abs(as.matrix(1/ratios) %*% t(as.matrix(p / n)) - 1), 2, which.min)] }) ## calculate expected values of psi^2 and psi' test.1$Ep2 <- test.1$Epp <- NA for(Procstr in levels(test.1$Procstr)) { args <- f.str2list(Procstr)[[1]]$args if (is.null(args)) next lctrl <- do.call('lmrob.control',args) test.1$Ep2[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r)^2, lctrl, use.integrate = TRUE) test.1$Epp[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r,1), lctrl, use.integrate = TRUE) } ## drop some observations, separate fixed and random designs test.fixed <- droplevels(subset(test.1, n == 20)) ## n = 20 -- fixed design test.1 <- droplevels(subset(test.1, n != 20)) ## n !=20 -- random designs test.lm <- droplevels(subset(test.1, Function == 'lm')) # lm = OLS test.1 <- droplevels(subset(test.1, Function != 'lm')) # Rob := all "robust" test.lm$Psi <- NULL test.lm.2 <- droplevels(subset(test.lm, Error == 'N(0,1)')) # OLS for N(*) test.2 <- droplevels(subset(test.1, Error == 'N(0,1)' & Function != 'lm'))# Rob for N(*) ## subsets test.3 <- droplevels(subset(test.2, Method != 'SMDM'))# Rob, not SMDM for N(*) test.4 <- droplevels(subset(test.1, Method != 'SMDM'))# Rob, not SMDM for all @ \section{Simulation Results} \subsection{Criteria} The simulated methods are compared using the following criteria. \textbf{Scale estimates.} The criteria for scale estimates are all calculated on the log-scale. The bias of the estimators is measured by the $\Sexpr{trim*100}\%$ trimmed mean. To recover a meaningful scale, the results are exponentiated before plotting. It is easy to see that this is equivalent to calculating geometric means. Since the methods are all tuned at the central model, ${\mathcal N}(0,1)$, a meaningful comparison of biases can only be made for ${\mathcal N}(0,1)$ distributed errors. The variability of the estimators, on the other hand, can be compared over all simulated error distributions. It is measured by the $\Sexpr{trim*100}\%$ trimmed standard deviation, rescaled by the square root of the number of observations. For completeness, the statistics used to compare scale estimates in \citet{maronna2009correcting} are also calculated. They are defined as \begin{equation} \label{eq:def.q.and.M} q = \median\left(\frac{S(\bld e)}{\hat\sigma_S}\right), \quad M = \mad\left(\frac{S(\bld e)}{\hat\sigma_S}\right), \end{equation} where $S(e)$ stands for the S-scale estimate evaluated for the actual errors $\bld e$. For the D-scale estimate, the definition is analogue. Since there is no design to correct for, we set $\tau_i = 1\ \forall i$. \textbf{Coefficients.} The efficiency of estimated regression coefficients $\bld{\hat\beta}$ is characterized by their mean squared error (\emph{MSE}). Since we simulate under $H_0: \bld\beta = 0$, this is determined by the covariance matrix of $\bld{\hat\beta}$. We use $\Erw\left[\norm{\bld{\hat\beta}}_2^2\right] = \sum_{j=1}^p \var(\hat\beta_j)$ as a summary. When comparing to the MSE of the ordinary least squares estimate (\emph{OLS}), this gives the efficiency, which, by the choice of tuning constants of $\psi$, should yield \begin{equation*} \frac{{\rm MSE}(\bld{\hat\beta}_{\rm OLS})}{{\rm MSE}(\bld{\hat\beta})} \approx 0.95 \end{equation*} for standard normally distributed errors. The simulation mean of $\sum_{j=1}^p \var(\hat\beta_j)$ is calculated with $\Sexpr{trim*100}\%$ trimming. For other error distributions, this ratio should be larger than $1$, since by using robust procedures we expect to gain efficiency at other error distributions (relative to the least squares estimate). $\bld\gamma$\textbf{.} We compare the behavior of the various estimators of $\gamma$ by calculating the trimmed mean and the trimmed standard deviation for standard normal distributed errors. \textbf{Covariance matrix estimate.} The covariance matrix estimates are compared indirectly over the performance of the resulting test statistics. We compare the empirical level of the hypothesis tests $H_0: \beta_j = 0$ for some $j \in \{1,\dots, p\}$. The power of the tests is compared by testing for $H_0: \beta_j = b$ for several values of $b>0$. The formal power of a more liberal test is generally higher. Therefore, in order for this comparison to be meaningful, the critical value for each test statistic was corrected such that all tests have the same simulated level of $5\%$. The simple hypothesis tests give only limited insights. To investigate the effects of other error distributions, e.g., asymmetric error distributions, we compare the confidence intervals for the prediction of some fixed points. Since it was not clear how to assess the quality prediction intervals, either at the central or the simulated model, we do not calculate them here. A small number of prediction points is already enough, if they are chosen properly. We chose to use seven points lying on the first two principal components, spaced evenly from the center of the design used to the extended range of the design. The principal components were calculated robustly (using \T{covMcd} of the \T{robustbase} package) and the range was extended by a fraction of $0.5$. An example is shown in Figure~\ref{fig:design-predict}. \subsection{Results} The results are given here as plots (Fig.~\ref{fig:meanscale-1} to Fig.~\ref{fig:cpr}). For a complete discussion of the results, we refer to \citet{KS2011}. The different $\psi$-functions are each plotted in a different facet, except for Fig.~\ref{fig:qscale-all}, Fig.~\ref{fig:Mscale-all} and Fig.~\ref{fig:lqq-level}, where the facets show the results for various error distributions. The plots are augmented with auxiliary lines to ease the comparison of the methods. The lines connect the median values over the values of $n$ for each simulated ratio $p/n$. In many plots the y-axis has been truncated. Points in the grey shaded area represent truncated values using a different scale. \begin{figure} \begin{center} <>= ## ## exp(mean(log(sigma))): this looks almost identical to mean(sigma) print(ggplot(test.3, aes(p/n, exp(meanlogsigma.1), color = Est.Scale)) + stat_summary(aes(x=ratio), # <- "rounded p/n": --> median over "neighborhood" fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + ylab(quote('geometric ' ~ mean(hat(sigma)))) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(test.3$Est.Scale))) @ \end{center} \caption{Mean of scale estimates for normal errors. The mean is calculated with $\Sexpr{trim*100}\%$ trimming. The lines connect the median values for each simulated ratio $p/n$. Results for random designs only. } \label{fig:meanscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(test.3, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + facet_wrap(~ Psi) + geom_point (data=test.lm.2, alpha=alpha.n, aes(color = Est.Scale)) + stat_summary(data=test.lm.2, aes(x=ratio, color = Est.Scale), fun.y=median, geom='line') + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels= lab(test.3 $Est.Scale, test.lm.2$Est.Scale))) @ \end{center} \caption{Variability of the scale estimates for normal errors. The standard deviation is calculated with $\Sexpr{trim*100}\%$ trimming. } \label{fig:sdscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(test.4, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + ylim(with(test.4, range(sdlogsigma.1*sqrt(n)))) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = Error), alpha = alpha.error) + facet_wrap(~ Psi) + ## "FIXME" (?): the next 'test.lm' one give warnings geom_point (data=test.lm, aes(color = Est.Scale), alpha=alpha.n) + ##-> Warning: Removed 108 rows containing missing values (geom_point). stat_summary(data=test.lm, aes(x = ratio, color = Est.Scale), fun.y=median, geom='line') + ##-> Warning: Removed 108 rows containing non-finite values (stat_summary). g.scale_shape(labels=lab(test.4$Error)) + scale_colour_discrete("Scale Est.", labels=lab(test.4 $Est.Scale, test.lm$Est.Scale))) @ \end{center} \caption{Variability of the scale estimates for all simulated error distributions.} \label{fig:sdscale-all} \end{figure} \begin{figure} \begin{center} <>= t3est2 <- droplevels(subset(test.3, Estimator %in% c("SMD", "MMqE"))) print(ggplot(t3est2, aes(p/n, q, color = Est.Scale)) + ylab(quote(q)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) @ \end{center} \caption{$q$ statistic for normal errors. $q$ is defined in \eqref{eq:def.q.and.M}.} \label{fig:qscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t3est2, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Psi) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) @ \end{center} \caption{$M/q$ statistic for normal errors. $M$ and $q$ are defined in \eqref{eq:def.q.and.M}.} \label{fig:Mscale-1} \end{figure} \begin{figure} \begin{center} <>= t1.bi <- droplevels(subset(test.1, Estimator %in% c("SMD", "MMqE") & Psi == 'bisquare')) print(ggplot(t1.bi, aes(p/n, q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Error) + ## labeller missing! ylab(quote(q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) @ \end{center} \caption{$q$ statistic for \emph{bisquare} $\psi$. } \label{fig:qscale-all} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t1.bi, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Error) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) @ \end{center} \caption{$M/q$ statistic for \emph{bisquare} $\psi$. } \label{fig:Mscale-all} \end{figure} \clearpage% not nice, but needed against LaTeX Error: Too many unprocessed floats. \begin{figure} \begin{center} <>= print(ggplot(test.2, aes(p/n, efficiency.1, color = Estimator)) + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + facet_wrap(~ Psi) + ylab(quote('efficiency of' ~~ hat(beta))) + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(test.2$Estimator))) @ \end{center} \caption{Efficiency for normal errors. The efficiency is calculated by comparing to an OLS estimate and averaging with $\Sexpr{trim*100}\%$ trimming. } \label{fig:efficiency} \end{figure} \begin{figure} \begin{center} <>= t.1xt1 <- droplevels(subset(test.1, Error != 't1')) print(ggplot(t.1xt1, aes(p/n, efficiency.1, color = Estimator)) + ylab(quote('efficiency of '~hat(beta))) + geom_point(aes(shape = Error), alpha = alpha.error) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + g.scale_shape(values=c(16,17,15,3,7,8,9,1,2,4)[-4], labels=lab(t.1xt1$Error)) + facet_wrap(~ Psi) + scale_colour_discrete(name = "Estimator", labels = lab(t.1xt1$Estimator))) @ \end{center} \caption{Efficiency for all simulated error distributions except $t_1$. } \label{fig:efficiency-all} \end{figure} \begin{figure} \begin{center} <>= t.2o. <- droplevels(subset(test.2, !is.na(AdB2t.1))) print(ggplot(t.2o., aes(p/n, AdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=K2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=AdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=K2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype=2) + stat_summary(aes(x=ratio, y=AdB2t.1), fun.y=median, geom='line', linetype=3) + geom_hline(yintercept = 1/0.95) + g.scale_y_log10_1() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(t.2o.$Estimator)) + ylab(quote(mean(hat(gamma)))) + facet_wrap(~ Psi)) @ \end{center} \caption{Comparing the estimates of $\gamma$. The solid line connects the uncorrected estimate, dotted the $\tau$ corrected estimate and dashed Huber's small sample correction. } \label{fig:AdB2-1} \end{figure} \begin{figure} \begin{center} <>= t.2ok <- droplevels(subset(test.2, !is.na(sdAdB2t.1))) print(ggplot(t.2ok, aes(p/n, sdAdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=sdK2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=sdAdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=sdK2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype= 2) + stat_summary(aes(x=ratio, y=sdAdB2t.1), fun.y=median, geom='line', linetype= 3) + g.scale_y_log10_0.05() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(t.2ok$Estimator)) + ylab(quote(sd(hat(gamma)))) + facet_wrap(~ Psi)) @ \end{center} \caption{Comparing the estimates of $\gamma$. The solid line connects the uncorrected estimate, dotted the $\tau$ corrected estimate and dashed Huber's small sample correction. } \label{fig:sdAdB2-1} \end{figure} \begin{figure} \begin{center} <>= t.2en0 <- droplevels(subset(test.2, emplev_1 != 0)) print(ggplot(t.2en0, aes(p/n, f.truncate(emplev_1), color = method.cov)) + g.truncate.lines + g.truncate.areas + geom_point(aes(shape = factor(n)), alpha = alpha.n) + scale_shape_discrete(quote(n)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + scale_colour_discrete(name = "Estimator", labels=lab(t.2en0$method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + facet_wrap(~ Psi)) @ \end{center} \caption{Empirical levels of test $H_0: \beta_1 = 0$ for normal errors. The y-values are truncated at $\Sexpr{trunc[1]}$ and $\Sexpr{trunc[2]}$. } \label{fig:emp-level} \end{figure} \begin{figure} \begin{center} <>= tmp <- droplevels(subset(test.1, Psi == 'lqq' & emplev_1 != 0)) print(ggplot(tmp, aes(p/n, f.truncate(emplev_1), color = method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + g.truncate.line + g.truncate.area + geom_point(aes(shape = factor(n)), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(tmp$method.cov)) + facet_wrap(~ Error) , legend.mod = legend.mod ) @ \end{center} \caption{Empirical levels of test $H_0: \beta_1 = 0$ for \emph{lqq} $\psi$-function and different error distributions. } \label{fig:lqq-level} \end{figure} \begin{figure} \begin{center} <>= t2.25 <- droplevels(subset(test.2, n == 25))# <-- fixed n ==> no need for 'ratio' tL2.25 <- droplevels(subset(test.lm.2, n == 25)) scale_col_D2.25 <- scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(t2.25 $method.cov, tL2.25$method.cov)) print(ggplot(t2.25, aes(p/n, power_1_0.2, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.2) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.2$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_2} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.4, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.4) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.4$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_4} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.6, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.6) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.6$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_6} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.8, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.8) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.8$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_8} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_1, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 1) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 1$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-1} \end{figure} %\clearpage \begin{figure} \begin{center} %% now (2016-11 GGally) works --- but fails with new 2018-05 ggplot2: <>= pp <- f.prediction.points(dd)[1:7,] ## Worked in older ggplot2 -- now plotmatrix() is gone, to be replaced by GGally::ggpairs): ## tmp <- plotmatrix(pp)$data ## tmp$label <- as.character(1:7) ## print(plotmatrix(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) if(FALSE) { tmp <- ggpairs(pp)$data tmp$label <- as.character(1:7) # and now? } ## ggpairs() + geom_text() does *NOT* work {ggpairs has own class} ## print(ggpairs(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print( ggpairs(dd) )## now (2016-11) fine ) @ \end{center} \caption{Prediction points for fixed design. The black points are the points of the original design. The red digits indicate the numbers and locations of the points where predictions are taken.} \label{fig:design-predict} \end{figure} \begin{figure} \begin{center} <>= n.cprs <- names(test.fixed)[grep('cpr', names(test.fixed))] # test.fixed: n=20 => no 'x=ratio' test.5 <- melt(test.fixed[,c('method.cov', 'Error', 'Psi', n.cprs)]) test.5 <- within(test.5, { Point <- as.numeric(do.call('rbind', strsplit(levels(variable), '_'))[,2])[variable] }) print(ggplot(test.5, aes(Point, f.truncate(value), color = method.cov)) + geom_point(aes(shape = Error), alpha = alpha.error) + g.truncate.line + g.truncate.area + stat_summary(fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(labels=lab(test.5$Error)) + scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(test.5$method.cov)) + ylab("empirical level of confidence intervals") + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical coverage probabilities. Results for fixed design. The y-values are truncated at $\Sexpr{trunc[2]}$. } \label{fig:cpr} \end{figure} \clearpage \section{Maximum Asymptotic Bias} \label{sec:maximum-asymptotic-bias} The slower redescending $\psi$-functions come with higher asymptotic bias as illustrated in Fig.~\ref{fig:max-asymptotic-bias}. We calculate the asymptotic bias as in \citet{berrendero2007maximum}. <>= ## Henning (1994) eq 33: g <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) integrate(function(x) rho(((1 + theta^2)/s^2*x)^2)*dchisq(x, 1, mu^2/(1 + theta^2)), -Inf, Inf)$value }) ## Martin et al 1989 Section 3.2: for mu = 0 g.2 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) lctrl$tuning.psi <- lctrl$tuning.chi robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) g.2.MM <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) ## Henning (1994) eq 30, one parameter case g.3 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) int.x <- Vectorize(function(y) { integrate(function(x) rho((y - x*theta - mu)/s)*dnorm(x)*dnorm(y),-Inf, Inf)$value }) integrate(int.x,-Inf, Inf)$value }) inv.g1 <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2 else g.3 uniroot(function(s) g(s, theta, mu, ...) - value, c(0.1, 100))$root } inv.g1.MM <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2.MM else g.3.MM ret <- tryCatch(uniroot(function(s) g(s, theta, mu, ...) - value, c(0.01, 100)), error = function(e)e) if (inherits(ret, 'error')) { warning('inv.g1.MM: ', value, ' ', theta, ' ', mu,' -> Error: ', ret$message) NA } else { ret$root } } s.min <- function(epsilon, ...) inv.g1(0.5/(1 - epsilon), 0, 0, ...) s.max <- function(epsilon, ...) inv.g1((0.5-epsilon)/(1-epsilon), 0, 0, ...) BS <- Vectorize(function(epsilon, ...) { sqrt(s.max(epsilon, ...)/s.min(epsilon, ...)^2 - 1) }) l <- Vectorize(function(epsilon, ...) { sigma_be <- s.max(epsilon, ...) sqrt((sigma_be/inv.g1.MM(g.2.MM(sigma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1) }) u <- Vectorize(function(epsilon, ...) { gamma_be <- s.min(epsilon, ...) max(l(epsilon, ...), sqrt((gamma_be/inv.g1.MM(g.2.MM(gamma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1)) }) @ \begin{figure}[h!] \begin{center} <>= asymptMBFile <- file.path(robustDta, 'asymptotic.max.bias.Rdata') if (!file.exists(asymptMBFile)) { x <- seq(0, 0.35, length.out = 100) rmb <- rbind(data.frame(l=l(x, psi = 'hampel'), u=u(x, psi = 'hampel'), psi = 'Hampel'), data.frame(l=l(x, psi = 'lqq'), u=u(x, psi = 'lqq'), psi = 'lqq'), data.frame(l=l(x, psi = 'bisquare'), u=u(x, psi = 'bisquare'), psi = 'bisquare'), data.frame(l=l(x, psi = 'optimal'), u=u(x, psi = 'optimal'), psi = 'optimal')) rmb$x <- x save(rmb, file=asymptMBFile) } else load(asymptMBFile) <>= print(ggplot(rmb, aes(x, l, color=psi)) + geom_line() + geom_line(aes(x, u, color=psi), linetype = 2) + xlab(quote("amount of contamination" ~~ epsilon)) + ylab("maximum asymptotic bias bounds") + coord_cartesian(ylim = c(0,10)) + scale_y_continuous(breaks = 1:10) + scale_colour_hue(quote(psi ~ '-function'))) @ \end{center} \caption{Maximum asymptotic bias bound for the $\psi$-functions used in the simulation. Solid line: lower bound. Dashed line: upper bound.} \label{fig:max-asymptotic-bias} \end{figure} \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/vignettes/fastMcd-kmini.Rnw0000644000176200001440000003113213012615634017612 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{covMcd() -- Generalizing the FastMCD} %\VignetteDepends{robustbase} \SweaveOpts{prefix.string=mcd, eps=FALSE, pdf=TRUE, strip.white=true} \SweaveOpts{width=6, height=4.1} \usepackage{amsmath} \usepackage{amsfonts}% \mathbb \usepackage{mathtools}% -> \floor, \ceil \usepackage[utf8]{inputenc} %% The following is partly R's share/texmf/Rd.sty \usepackage{color} \usepackage{hyperref} \definecolor{Blue}{rgb}{0,0,0.8} \definecolor{Red}{rgb}{0.7,0,0} \hypersetup{% hyperindex,% colorlinks={true},% pagebackref,% linktocpage,% plainpages={false},% linkcolor={Blue},% citecolor={Blue},% urlcolor={Red},% pdfstartview={Fit},% pdfview={XYZ null null null}% } \usepackage{natbib} \usepackage[noae]{Sweave} %---------------------------------------------------- \DeclarePairedDelimiter{\ceil}{\lceil}{\rceil} \DeclarePairedDelimiter{\floor}{\lfloor}{\rfloor} \DeclareMathOperator{\sign}{sign} \newcommand{\abs}[1]{\left| #1 \right|} \newtheorem{definition}{Definition} \newcommand{\byDef}{\mathrm{by\ default}} \newcommand{\R}{{\normalfont\textsf{R}}{}} \newcommand{\code}[1]{\texttt{#1}} \newcommand*{\pkg}[1]{\texttt{#1}} \newcommand*{\CRANpkg}[1]{\href{https://CRAN.R-project.org/package=#1}{\pkg{#1}}} %---------------------------------------------------- \begin{document} \setkeys{Gin}{width=0.9\textwidth} \setlength{\abovecaptionskip}{-5pt} \title{covMcd() -- Considerations about Generalizing the FastMCD} \author{Martin M\"achler} \maketitle %\tableofcontents %% %% Pison, G., Van Aelst, S., and Willems, G. (2002) %% Small Sample Corrections for LTS and MCD. %% Metrika % ~/save/papers/robust-diverse/Pison_VanAelst_Willems.pdf %% <>= # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0))), width = 75) @ \section{Introduction} The context is robust multivariate ``location and scatter'' estimation, which corresponds to estimating the first two moments in cases they exist. We assume data and a model \begin{align} \label{eq:data-model} x_i & \in \mathbb{R}^p, \ \ i=1,2,\dots,n \\ x_i & \sim \mathcal{F}(\mu, \Sigma), \ \ \mathrm{i.i.d.};\ \ \mu \in \mathbb{R}^p, \ \ \Sigma \in \mathbb{R}^{p \times p}, \ \textrm{positive definite}, \end{align} where a conceptual null model is the $p$-dimensional normal distribution. One typical assumption is that $\mathcal{F}$ is a mixture with the majority component (``good data'') being $\mathcal{N}_p(\mu, \Sigma)$ and other components modeling ``the outliers''. In other words, we want estimates $\bigl(\hat{\mu}, \hat{\Sigma}\bigr)$ which should be close to the true ``good data'' $(\mu, \Sigma)$ --- and do not say more here. \section{MCD and ``the Fast'' MCD (= \textsc{fastmcd}) Algorithm} The \CRANpkg{robustbase} \R{} package has featured a function \code{covMcd()} since early on (Feb.~2006) and that has been an interface to the Fortran routine provided by the original authors and (partly) described in \citet{RouPvD99}. %% Rousseeuw, P. J. and van Driessen, K. (1999) %% A fast algorithm for the minimum covariance determinant estimator. %% Technometrics {41}, 212--223. %% >> ~/save/papers/robust-diverse/Rousseeuw_VanD-FastMCD_1999.pdf % ------------------------------------------------------------ We describe shortly how the algorithm works, partly building on the documentation provided in the source (R, S, and Fortran) codes: %% R CMD Rdconv --type=latex ../../man/covMcd.Rd > covMcd.tex The minimum covariance determinant estimator of location and scatter (MCD) implemented in \code{covMcd()} is similar to \R{} function \code{cov.mcd()} in \CRANpkg{MASS}. The (``theoretical'') MCD looks for the $h = h_\alpha (> 1/2)$ out of $n$ observations whose classical covariance matrix has the lowest possible determinant. In more detail, we will use $h = h_\alpha = h(\alpha,n,p) \approx \alpha \cdot (n+p+1)$, where as \citet{RouPvD99} mainly use (the default) $\alpha = \frac 1 2$, where $h = h(1/2, n, p) = \floor[\Big]{\frac{n+p+1}{2}}$. For general $\alpha \ge \frac 1 2$, the \R{} implementation (derived from their original S code) uses $h = h(\alpha,n,p) =$ \code{h.alpha.n(alpha,n,p)} (function in \pkg{robustbase}), which is \begin{eqnarray} \label{eq:def-h} h = h_\alpha = h(\alpha,n,p) := \floor{2 n_2 - n + 2 \alpha (n - n_2)}, \ \mathrm{where\ } n_2 := \floor[\Big]{\frac{n+p+1}{2}}% %= (n+p+1)/2 \ \ (\mathrm{\ where ``/'' denotes \emph{integer} division}) . \end{eqnarray} The fraction $\alpha \ge \frac 1 2$ can be chosen by the user, where $\alpha = \frac 1 2$ is the most robust, and indeed, $h_{1/2} = n_2 = \floor[\Big]{\frac{n+p+1}{2}}$. Even in general, as long as $n \gg p$, $\alpha$ is approximately the \emph{proportion} of the subsample size $h$ in the full sample (size $n$): \begin{equation} \label{eq:h.approx} h \approx \alpha \cdot n \iff \alpha \approx \frac{h}{n}, \end{equation} <>= require(robustbase) n <- c(5, 10, 20, 30, 50, 100, 200, 500) hmat <- function(alpha, p) cbind(n, h.alpha = h.alpha.n (alpha, n,p), h. = floor(alpha * (n + p + 1)), alpha.n = round(alpha * n)) hmat(alpha = 1/2, p = 3) hmat(alpha = 3/4, p = 4) @ The breakdown point (for $h > \frac{n}{2}$) then is \begin{eqnarray} \label{eq:breakdown} \epsilon_{*} = \frac{n-h+1}{n}, \end{eqnarray} which is less than but close to $\frac 1 2$ for $\alpha = \frac 1 2$, and in general, $h/n \approx \alpha$, the breakdown point is approximately, \begin{eqnarray} \label{eq:eps-approx} \epsilon_{*} = \frac{n-h+1}{n} \approx \frac{n-h}{n} = 1 - \frac{h}{n} \approx 1 - \alpha. \end{eqnarray} The raw MCD estimate of location, say $\hat{\mu}_0$, is then the average of these $h$ points, whereas the raw MCD estimate of scatter, $\hat{\Sigma}_0$, is their covariance matrix, multiplied by a consistency factor \code{.MCDcons(p, h/n)}) and (by default) a finite sample correction factor \code{.MCDcnp2(p, n, alpha)}, to make it consistent at the normal model and unbiased at small samples. %% Both rescaling factors (consistency and finite sample) are returned in the length-2 vector %% \code{raw.cnp2}. In practice, for reasonably sized $n$, $p$ and hence $h$, it is not feasible to search the full space of all $n \choose h$ $h$-subsets of $n$ observations. Rather, the implementation of \code{covMcd} uses the Fast MCD algorithm of \citet{RouPvD99} to approximate the minimum covariance determinant estimator, see Section~\ref{sec:fastMCD}. Based on these raw MCD estimates, $\bigl(\hat{\mu}_0, \hat{\Sigma}_0\bigr)$, % (unless argument \code{raw.only} is true), a reweighting step is performed, i.e., \code{V <- cov.wt(x,w)}, where \code{w} are weights determined by ``outlyingness'' with respect to the scaled raw MCD, using the ``Mahalanobis''-like, robust distances $d_i\bigl(\hat{\mu}_0, \hat{\Sigma}_0\bigr)$, see (\ref{eq:Maha}). Again, a consistency factor and %(if \code{use.correction} is true) a finite sample correction factor %(\code{.MCDcnp2.rew(p, n, alpha)}) are applied. The reweighted covariance is typically considerably more efficient than the raw one, see \citet{PisGvAW02}. The two rescaling factors for the reweighted estimates are returned in \code{cnp2}. Details for the computation of the finite sample correction factors can be found in \citet{PisGvAW02}. \section{Fast MCD Algorithm -- General notation}\label{sec:fastMCD} \paragraph{Note:} In the following, apart from the mathematical notation, we also use variable names, e.g., \code{kmini}, used in the Fortran and sometimes \R{} function code, in \R{} package \CRANpkg{robustbase}. Instead of directly searching for $h$-subsets (among ${n \choose h} \approx {n \choose n/2}$) the basic idea is to start with small subsets of size $p+1$, their center $\mu$ and covariance matrix $\Sigma$, and a corresponding $h$-subset of the $h$ observations with smallest (squared) (``Mahalanobis''-like) distances \begin{align} \label{eq:Maha} d_i = d_i(\mu,\Sigma) := (x_i - \mu)' \Sigma^{-1} (x_i - \mu), \ \ i=1,2,\dots,n, \end{align} and then use concentration steps (``C~steps'') to (locally) improve the chosen set by iteratively computing $\mu$, $\Sigma$, new distances $d_i$ and a new set of size $h$ with smallest distances $d_i(\mu,\Sigma)$. Each C~step is proven to decrease the determinant $\det(\Sigma)$ if $\mu$ and $\Sigma$ did change at all. Consequently, convergence to a local minimum is sure, as the number of $h$-subsets is finite. To make the algorithm \emph{fast} for non small sample size $n$ the data set is split into ``groups'' or ``sub-datasets'' as soon as \begin{eqnarray} \label{eq:nmini} n \ge 2 n_0, \ \mathrm{ where}\ \ n_0 := \mathtt{nmini} \ ( = 300, \byDef). \end{eqnarray} i.e., the default cutoff for ``non small'' is at $n = 600$. %% The \emph{number} of such subsets in the original algorithm is maximally 5, and we now use \begin{eqnarray} \label{eq:kmini} k_M = \code{kmini} \ (= 5, \byDef), \end{eqnarray} as upper limit. As above, we assume from now on that $n \ge 2 n_0$, and let \begin{eqnarray} \label{eq:k-def} k := \floor[\Big]{\frac{n}{n_0}} \ge 2 \end{eqnarray} and now distinguish the two cases, \begin{eqnarray} \label{eq:cases} \begin{cases} A. & k < k_M \iff n < k_M \cdot n_0 \\ B. & k \ge k_M \iff n \ge k_M \cdot n_0 \end{cases} \end{eqnarray} \begin{description} \item[In case A] $k$ (\code{= ngroup}) subsets aka ``groups'' or ``sub datasets'' are used, $k \in\{2,3,\dots,k_M-1\}$, of group sizes $n_j$, $j=1,\dots,k$ (see below). Note that case~A may be empty because of $2 \le k < k_M$, namely if $k_M=2$. Hence, in case~A, we have $k_M \ge 3$. \item[in case B] $k_M$ (\code{= ngroup}) groups each of size $n_0$ are built and in the first stage, only a \emph{subset} of $k_M \cdot n_0 \le n$ observations is used. \end{description} In both cases, the disjoint groups (``sub datasets'') are chosen at random from the $n$ observations. %% For the group sizes for case~A, $n_j$, $j=1,\dots,k$, we have \begin{align} n_1 = \; & \floor[\Big]{\frac n k} = \floor[\bigg]{\frac{n}{\floor[\big]{\frac{n}{n_0}}}} \ \ (\; \ge n_0 \label{eq:n1})\\ n_j = \; & n_1,\hspace*{2.8em} j = 2,\dots,j_* \\ n_j = \; & n_1 + 1, \ \ \ j = j_* +1,\dots,k, \label{n1-plus-1}\\ & \qquad \mathrm{where}\ \ j_* := k - r \ \in \{1,\dots,k\}, \label{jstar}\\ & \qquad \mathrm{and}\ \ r := n - k n_1 = \label{r-rest} n - k\floor[\big]{\frac n k} \in \{0,1,\dots,k-1\}, \end{align} where the range of $j_*$, $1,\dots,k$ in (\ref{jstar}) is a consequence of the range of the integer division remainder $r \in \{0,1,\dots,k-1\}$ in (\ref{r-rest}). Consequently, (\ref{n1-plus-1}) maybe empty, namely iff $r=0$ ($\iff n = k \cdot n_1$ is a multiple of $k$): $j_* = k$, and all $n_j \equiv n_1$. Considering the range of $n_j$ in case~A, the minimum $n_1 \ge n_0$ in (\ref{eq:n1}) is easy to verify. What is the maximal value of $n_j$ , i.e., an upper bound for $n_{\max} := n_1+1 \ge \max_j n_j$? \ %% %% This is all correct but not useful: %% From (\ref{eq:n1}), $ n/k - 1 < n_1 \le n/k $, and %% from (\ref{eq:k-def}), $n/n_0 - 1 < k \le n/n_0$. %% Putting these two together, we get %% \begin{eqnarray} %% \label{eq:n1-ineq} %% \frac{n^2}{n_0} - 1 \le n/k - 1 < n_1 \le n/k < \frac{n n_0}{n - n_0}, %% \end{eqnarray} %% (the first $\le$ from $\frac{1}{k} \ge \frac{n_0}{n}$; the last $<$ from %% $\frac{1}{k} < \frac 1{n/n_0 -1} = \frac{n_0}{n-n_0}$.) Also, %% from (\ref{eq:k-def}), $n \ge k n_0$ and $n-n_0 \ge (k-1)n_0$ and since we %% are in case~A, $n < n_0 k_M$, which combines to %% \begin{eqnarray} %% \label{eq:nn0} %% \frac{n n_0}{n - n_0} < \frac{(n_0 k_M) n_0}{(k-1)n_0} = \frac{n_0 k_M}{k-1}. %% \end{eqnarray} Consider $n_{1,\max}(k) = \max_{n, \mathrm{given\ } k} n_1 = \max_{n, \mathrm{given\ } k} \floor{\frac n k}$. Given $k$, the maximal $n$ still fulfilling $\floor[\big]{\frac{n}{n_0}} = k$ is $n = (k+1)n_0 - 1$ where $\floor[\big]{\frac{n}{n_0}} = k + \floor[\big]{1 - \frac{1}{n_0}} = k$. Hence, $n_{1,\max}(k) =\floor[\big]{\frac{(k+1)n_0 - 1}{k}} = n_0 + \floor[\big]{\frac{n_0 - 1}{k}}$, and as $k \ge 2$, the maximum is at $k=2$, $\max n_1 = \max_k n_{1,\max}(k) = n_0 + \floor[\big]{\frac{n_0 - 1}{2}} = \floor[\big]{\frac{3 n_0 - 1}{2}}$. Taken together, as $n_j = n_1+1$ is possible, we have \begin{align} \label{eq:nj-range} n_0 \le & n_1 \le \floor[\Big]{\frac{3 n_0 - 1}{2}} \nonumber\\ n_0 \le & n_j \le \floor[\Big]{\frac{3 n_0 + 1}{2}}, \ \ j \ge 2. \end{align} Note that indeed, $\floor[\big]{\frac{3 n_0 + 1}{2}}$ is the length of the auxiliary vector \code{subndex} in the Fortran code. \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/vignettes/psi_functions.Rnw0000644000176200001440000004152413012615634020015 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{Definitions of Psi-Functions Available in Robustbase} %\VignetteDepends{robustbase} \SweaveOpts{prefix.string=psi, eps=FALSE, pdf=TRUE, strip.white=true} \SweaveOpts{width=6, height=4.1, echo=FALSE, fig=TRUE} %% --------------------- !! \usepackage{amsmath} \usepackage{amsfonts}% \mathbb \usepackage{natbib} \usepackage[utf8]{inputenc} \newcommand{\abs}[1]{\left| #1 \right|} \DeclareMathOperator{\sign}{sign} \newcommand{\R}{\mathbb{R}} \newcommand{\code}[1]{\texttt{#1}} \newcommand*{\pkg}[1]{\texttt{#1}} \newtheorem{definition}{Definition} %% The following is R's share/texmf/Rd.sty \usepackage{color} \usepackage{hyperref} \definecolor{Blue}{rgb}{0,0,0.8} \definecolor{Red}{rgb}{0.7,0,0} \hypersetup{% hyperindex,% colorlinks={true},% pagebackref,% linktocpage,% plainpages={false},% linkcolor={Blue},% citecolor={Blue},% urlcolor={Red},% pdfstartview={Fit},% pdfview={XYZ null null null}% } <>= # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0)))) ## x axis for plots: x. <- seq(-5, 10, length=1501) require(robustbase) <>= source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) @% = ../inst/xtraR/plot-psiFun.R --> p.psiFun() --> robustbase:::matPlotPsi() {for nice legends; lines ..} \begin{document} \setkeys{Gin}{width=0.9\textwidth} \setlength{\abovecaptionskip}{-5pt} \title{Definitions of $\psi$-Functions Available in Robustbase} \author{Manuel Koller and Martin M\"achler} \maketitle \tableofcontents \section*{Preamble} Unless otherwise stated, the following definitions of functions are given by \citet[p. 31]{MarRMY06}, however our definitions differ sometimes slightly from theirs, as we prefer a different way of \emph{standardizing} the functions. To avoid confusion, we first define $\psi$- and $\rho$-functions. \begin{definition}\label{def.psi} A \emph{$\psi$-function} is a piecewise continuous function $\psi: \R \to \R$ such that \begin{enumerate} \item $\psi$ is odd, i.e., \ $\psi(-x) = -\psi(x) \: \forall x$, \item $\psi(x) \ge 0$ for $x \ge 0$, and $\psi(x) > 0$ for $0 < x < x_r := \sup\{\tilde x : \psi(\tilde x) > 0\}$ \ \ ($x_r > 0$, possibly $x_r = \infty$). \item[3*] Its slope is $1$ at $0$, i.e., $\displaystyle \psi'(0) = 1$. \end{enumerate} Note that `3*' is not strictly required mathematically, but we use it for standardization in those cases where $\psi$ is continuous at 0. Then, it also follows (from 1.) that $\psi(0) = 0$, and we require $\psi(0)=0$ also for the case where $\psi$ is discontinuous in 0, as it is, e.g., for the M-estimator defining the median. \end{definition} \begin{definition} A \emph{$\rho$-function} can be represented by the following % definite integral of a $\psi$-function, \begin{equation}\label{def.rho} \rho(x) = \int_0^x \psi(u) du\;, \end{equation} which entails that $\rho(0) = 0$ and $\rho$ is an even function. \end{definition} A $\psi$-function is called \emph{redescending} if $\psi(x) = 0$ for all $x \ge x_r$ for $x_r < \infty$, and $x_r$ is often called \emph{rejection point}. Corresponding to a redescending $\psi$-function, we define the function $\tilde\rho$, a version of $\rho$ standardized such as to attain maximum value one. Formally, \begin{equation} \label{eq:tilde-rho} \tilde\rho(x) = \rho(x)/\rho(\infty). \end{equation} Note that $\rho(\infty) = \rho(x_r) \equiv \rho(x) \ \forall \abs{x} >= x_r$. $\tilde\rho$ is a $\rho$-function as defined in \citet{MarRMY06} and has been called $\chi$ function in other contexts. For example, in package \pkg{robustbase}, \code{Mchi(x, *)} computes $\tilde\rho(x)$, whereas \code{Mpsi(x, *, deriv=-1)} (``(-1)-st derivative'' is the primitive or antiderivative)) computes $\rho(x)$, both according to the above definitions. \textbf{Note:} An alternative slightly more general definition of \emph{redescending} would only require $\rho(\infty) := \lim_{x\to\infty}\rho(x)$ to be finite. E.g., \texttt{"Welsh"} does \emph{not} have a finite rejection point, but \emph{does} have bounded $\rho$, and hence well defined $\rho(\infty)$, and we \emph{can} use it in \texttt{lmrob()}.\footnote{E-mail Oct.~18, 2014 to Manuel and Werner, proposing to change the definition of ``redescending''.} %% \section{Weak Redescenders} %% \subsection{t_nu score functions} %% t_1 (=Cauchy) has been propagated as "Lorentzian merit function" %% regression for outlier detection \paragraph{Weakly redescending $\psi$ functions.}\ Note that the above definition does require a finite rejection point $x_r$. Consequently, e.g., the score function $s(x) = -f'(x)/f(x)$ for the Cauchy ($= t_1$) distribution, which is $s(x) = 2x/(1+x^2)$ and hence non-monotone and ``re descends'' to 0 for $x\to \pm\infty$, and $\psi_C(x) := s(x)/2$ also fulfills ${\psi_C}'(0) = 1$, but it has $x_r=\infty$ and hence $\psi_C()$ is \emph{not} a redescending $\psi$-function in our sense. As they appear e.g. in the MLE for $t_\nu$, we call $\psi$-functions fulfulling $\lim_{x\to\infty}\psi(x) = 0$ \emph{weakly redescending}. Note that they'd naturally fall into two sub categories, namely the one with a \emph{finite} $\rho$-limit, i.e. $\rho(\infty) := \lim_{x\to\infty}\rho(x)$, and those, as e.g., the $t_\nu$ score functions above, for which $\rho(x)$ is unbounded even though $\rho' = \psi$ tends to zero. %% --> ../../TODO section 'Psi/Rho/Chi/Wgt Functions' %% ~~~~~~~~~~ %% %% FIXME: where?? MM: can no longer find it in Hampel et al(1986) \citet{hamfrrs86}. %% FIXME: 0) Mention our psi_func class // and the C interface for "the other" functions %% ----- i.e., we currently have *both* and in addition there is all %% the (to be *deprecated* !) ../R/biweight-funs.R (& ../man/tukeyChi.Rd & ../man/tukeyPsi1.Rd) %% %% FIXME: 1) explain plot() {the plot method of psi_func} %% FIXME: 2) Show how to compute asymptotic efficiency and breakdown point: %% ------- %% a) end of ../../tests/psi-rho-etc.R has aeff.P() and bp.P() and chkP() %% which now uses the psi_func class to compute these *analytically* %% b) Of course, Manuel had used the numeric integration only, %% in ../../R/lmrob.MM.R, lmrob.efficiency(psi, cc, ...) and lmrob.bp(psi, cc, ...) %% ~~~~~~~~~~~~~~~~~~ %% c) *REALLY* nice general solution is via PhiI() in ../../R/psi-rho-funs.R %% for all piecewise polynomial psi()/rho() ~~~~~~~~~~~~~~~~~~~~~~ %%\clearpage \section{Monotone $\psi$-Functions} Montone $\psi$-functions lead to convex $\rho$-functions such that the corresponding M-estimators are defined uniquely. Historically, the ``Huber function'' has been the first $\psi$-function, proposed by Peter Huber in \citet{HubP64}. \clearpage \subsection{Huber} The family of Huber functions is defined as, \begin{align*} \rho_k(x) = {}& \left\{ \begin{array}{ll} \frac{1}{2} x^2 & \mbox{ if } \abs{x} \leq k \\ k(\abs{x} - \frac{k}{2})& \mbox{ if } \abs{x} > k \end{array} \right. \;,\\ \psi_k(x) = {} & \left\{ \begin{array}{ll} x & \mbox{ if } \abs{x} \leq k \\ k \ \sign(x)& \mbox{ if } \abs{x} > k %% -k & \mbox{ if } x < -k \\ %% k & \mbox{ if } x > k \end{array} \right. \;. \end{align*} The constant $k$ for $95\%$ efficiency of the regression estimator is $1.345$. \begin{figure}[h] \centering <>= plot(huberPsi, x., ylim=c(-1.4, 5), leg.loc="topright", main=FALSE) @ \caption{Huber family of functions using tuning parameter $k = 1.345$.} \end{figure} \bigskip \section{Redescenders} For the MM-estimators and their generalizations available via \texttt{lmrob()} (and for some methods of \texttt{nlrob()}), the $\psi$-functions are all redescending, i.e., with finite ``rejection point'' $x_r = \sup\{t; \psi(t) > 0\} < \infty$. From \texttt{lmrob}, the psi functions are available via \texttt{lmrob.control}, or more directly, \texttt{.Mpsi.tuning.defaults}, <>= names(.Mpsi.tuning.defaults) @ %$ and their $\psi$, $\rho$, $\psi'$, and weight function $w(x) := \psi(x)/x$, are all computed efficiently via C code, and are defined and visualized in the following subsections. \clearpage \subsection{Bisquare} Tukey's bisquare (aka ``biweight'') family of functions is defined as, \begin{equation*} \tilde\rho_k(x) = \left\{ \begin{array}{cl} 1 - \bigl(1 - (x/k)^2 \bigr)^3 & \mbox{ if } \abs{x} \leq k \\ 1 & \mbox{ if } \abs{x} > k \end{array} \right.\;, \end{equation*} with derivative ${\tilde\rho_k}'(x) = 6\psi_k(x) / k^2$ where, \begin{equation*} \psi_k(x) = x \left( 1 - \left(\frac{x}{k}\right)^2\right)^2 \cdot I_{\{\abs{x} \leq k\}}\;. \end{equation*} The constant $k$ for $95\%$ efficiency of the regression estimator is $4.685$ and the constant for a breakdown point of $0.5$ of the S-estimator is $1.548$. Note that the \emph{exact} default tuning constants for M- and MM- estimation in \pkg{robustbase} are available via \code{.Mpsi.tuning.default()} and \code{.Mchi.tuning.default()}, respectively, e.g., here, % \begin{small} <>= print(c(k.M = .Mpsi.tuning.default("bisquare"), k.S = .Mchi.tuning.default("bisquare")), digits = 10) @ % \end{small} and that the \code{p.psiFun(.)} utility is available via %\begin{small} <>= <> @ %\end{small} %\enlargethispage{3ex} \begin{figure}[h] \centering <>= p.psiFun(x., "biweight", par = 4.685) @ \caption{Bisquare family functions using tuning parameter $k = 4.685$.} \end{figure} \clearpage \subsection{Hampel} The Hampel family of functions \citep{hamfrrs86} is defined as, \begin{align*} \tilde\rho_{a, b, r}(x) ={}& \left\{ \begin{array}{ll} \frac{1}{2} x^2 / C & \abs{x} \leq a \\ \left( \frac{1}{2}a^2 + a(\abs{x}-a)\right) / C & a < \abs{x} \leq b \\ \frac{a}{2}\left( 2b - a + (\abs{x} - b) \left(1 + \frac{r - \abs{x}}{r-b}\right) \right) / C & b < \abs{x} \leq r \\ 1 & r < \abs{x} \end{array} \right. \;, \\ \psi_{a, b, r}(x) ={}& \left\{ \begin{array}{ll} x & \abs{x} \leq a \\ a \ \sign(x) & a < \abs{x} \leq b \\ a \ \sign(x) \frac{r - \abs{x}}{r - b}& b < \abs{x} \leq r \\ 0 & r < \abs{x} \end{array} \right.\;, \end{align*} where $ C := \rho(\infty) = \rho(r) = \frac{a}{2}\left( 2b - a + (r - b) \right) = \frac{a}{2}(b-a + r)$. As per our standardization, $\psi$ has slope $1$ in the center. The slope of the redescending part ($x\in[b,r]$) is $-a/(r-b)$. If it is set to $-\frac 1 2$, as recommended sometimes, one has \begin{equation*} r = 2a + b\;. \end{equation*} Here however, we restrict ourselves to $a = 1.5 k$, $b = 3.5 k$, and $r = 8k$, hence a redescending slope of $-\frac 1 3$, and vary $k$ to get the desired efficiency or breakdown point. The constant $k$ for $95\%$ efficiency of the regression estimator is $0.902$ (0.9016085, to be exact) and the one for a breakdown point of $0.5$ of the S-estimator is $0.212$ (i.e., 0.2119163). %% --> ../R/lmrob.MM.R, .Mpsi.tuning.defaults .Mchi.tuning.defaults \begin{figure}[h] \centering <>= ## see also hampelPsi p.psiFun(x., "Hampel", par = ## Default, but rounded: round(c(1.5, 3.5, 8) * 0.9016085, 1)) @ \caption{Hampel family of functions using tuning parameters $0.902 \cdot (1.5, 3.5, 8)$.} \end{figure} \clearpage \subsection{GGW}\label{ssec:ggw} The Generalized Gauss-Weight function, or \emph{ggw} for short, is a generalization of the Welsh $\psi$-function (subsection \ref{ssec:Welsh}). In \citet{ks2011} it is defined as, \begin{equation*} %% \label{eq:ggw} \psi_{a, b, c}(x) = \left\{ \begin{array}{ll} x & \abs{x} \leq c \\ \exp\left(-\frac{1}{2}\frac{(\abs{x} - c)^b}{a}\right)x & \abs{x} > c \end{array} \right. \;. \end{equation*} Our constants, fixing $b=1.5$, and minimial slope at $- \frac 1 2$, for $95\%$ efficiency of the regression estimator are $a = 1.387$, $b = 1.5$ and $c = 1.063$, and those for a breakdown point of $0.5$ of the S-estimator are $a = 0.204$, $b = 1.5$ and $c = 0.296$: <>= cT <- rbind(cc1 = .psi.ggw.findc(ms = -0.5, b = 1.5, eff = 0.95 ), cc2 = .psi.ggw.findc(ms = -0.5, b = 1.5, bp = 0.50)); cT @ Note that above, \code{cc*[1]}$= 0$, \code{cc*[5]}$ = \rho(\infty)$, and \code{cc*[2:4]}$ = (a, b, c)$. To get this from $(a,b,c)$, you could use <>= ipsi.ggw <- .psi2ipsi("GGW") # = 5 ccc <- c(0, cT[1, 2:4], 1) integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi=ipsi.ggw)$value # = rho(Inf) @ \begin{figure}[h] \centering <>= p.psiFun(x., "GGW", par = c(-.5, 1, .95, NA)) @ \caption{GGW family of functions using tuning parameters $a=1.387$, $b=1.5$ and $c=1.063$.} \end{figure} \clearpage \subsection{LQQ} The ``linear quadratic quadratic'' $\psi$-function, or \emph{lqq} for short, was proposed by \citet{ks2011}. It is defined as, \begin{equation*} \psi_{b,c,s}(x) = \left\{ \begin{array}{ll} x & \abs{x} \leq c \\ \sign(x)\left(\abs{x} - \frac{s}{2b}\left(\abs{x} - c\right)^2 \right) & c < \abs{x} \leq b + c \\ \sign(x)\left(c+b-\frac{bs}{2} + \frac{s-1}{a} \left(\frac{1}{2}\tilde x^2 - a\tilde x\right) \right) & b + c < \abs{x} \leq a + b + c \\ 0 & \mbox{otherwise,} \end{array} \right. \end{equation*} where \begin{equation} \tilde x := \abs{x} - b - c \ \ \mathrm{and}\ \ a := (2c + 2b - bs)/(s-1).\label{lqq.a} \end{equation} The parameter $c$ determines the width of the central identity part. The sharpness of the bend is adjusted by $b$ while the maximal rate of descent is controlled by $s$ ($s = 1 - \min_x\psi'(x) > 1$). From (\ref{lqq.a}), the length $a$ of the final descent to $0$ is a function of $b$, $c$ and $s$. <>= cT <- rbind(cc1 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=0.95, bp=NA ), cc2 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=NA , bp=0.50)) colnames(cT) <- c("b", "c", "s"); cT @ If the minimal slope is set to $-\frac 1 2$, i.e., $s = 1.5$, and $b/c = 3/2 = 1.5$, the constants for $95\%$ efficiency of the regression estimator are $b=1.473$, $c=0.982$ and $s=1.5$, and those for a breakdown point of $0.5$ of the S-estimator are $b=0.402$, $c=0.268$ and $s=1.5$. \begin{figure}[h] \centering <>= p.psiFun(x., "LQQ", par = c(-.5,1.5,.95,NA)) @ \caption{LQQ family of functions using tuning parameters $b=1.473$, $c=0.982$ and $s=1.5$.} \end{figure} \clearpage \subsection{Optimal} The optimal $\psi$ function as given by \citet[Section~5.9.1]{MarRMY06}, \begin{equation*} \psi_c(x) = \sign(x)\left(-\frac{\varphi'(\abs{x}) + c} {\varphi(\abs{x})}\right)_+\;, \end{equation*} where $\varphi$ is the standard normal density, $c$ is a constant and $t_+ := \max(t, 0)$ denotes the positive part of $t$. Note that the \pkg{robustbase} implementation uses rational approximations originating from the \pkg{robust} package's implementation. That approximation also avoids an anomaly for small $x$ and has a very different meaning of $c$. The constant for $95\%$ efficiency of the regression estimator is $1.060$ and the constant for a breakdown point of $0.5$ of the S-estimator is $0.405$. \begin{figure}[h] \centering <>= p.psiFun(x., "optimal", par = 1.06, leg.loc="bottomright") @ \caption{`Optimal' family of functions using tuning parameter $c = 1.06$.} \end{figure} \clearpage \subsection{Welsh}\label{ssec:Welsh} The Welsh $\psi$ function is defined as, %% FIXME: REFERENCE MISSING %\def\xk{\frac{x}{k}} \def\xk{x/k} %\def\xkdt{-\frac{1}{2}\left(\xk\right)^2} \def\xkdt{- \left(\xk\right)^2 / 2} \begin{align*} \tilde\rho_k(x) ={}& 1 - \exp\bigl(\xkdt\bigr) \\ \psi_k(x) ={}& k^2\tilde\rho'_k(x) = x\exp\bigl(\xkdt\bigr) \\ \psi'_k(x) ={}& \bigl(1 - \bigl(\xk\bigr)^2\bigr) \exp\bigl(\xkdt\bigr) \end{align*} The constant $k$ for $95\%$ efficiency of the regression estimator is $2.11$ and the constant for a breakdown point of $0.5$ of the S-estimator is $0.577$. Note that GGW (subsection \ref{ssec:ggw}) is a 3-parameter generalization of Welsh, matching for $ b = 2 $, $ c = 0 $, and $ a = k^2$ (see R code there): <>= ccc <- c(0, a = 2.11^2, b = 2, c = 0, 1) (ccc[5] <- integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi = 5)$value) # = rho(Inf) stopifnot(all.equal(Mpsi(x., ccc, "GGW"), ## psi[ GGW ](x; a=k^2, b=2, c=0) == Mpsi(x., 2.11, "Welsh")))## psi[Welsh](x; k) @ \begin{figure}[h] \centering <>= p.psiFun(x., "Welsh", par = 2.11) @ \caption{Welsh family of functions using tuning parameter $k = 2.11$.} \end{figure} \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/vignettes/robustbase.bib0000644000176200001440000001361012737467727017311 0ustar liggesusers@PREAMBLE{ " " # "\providecommand{\noop}[1]{} " # " \ifx\undefined\SfSbibINI\else\SfSbibINI\fi" } @PREAMBLE{ " " # "\providecommand{\noop}[1]{} " # " \ifx\undefined\SfSbibINI\else\SfSbibINI\fi" } @Manual{robustbase-pkg, title = {robustbase: Basic Robust Statistics}, author = {Martin Maechler and Valentin Todorov and Andreas Ruckstuhl and Matias Salibian-Barrera and Manuel Koller and Eduardo L. T. Conceicao}, year = 2016, note = {R package version 0.92-6}, url = {http://CRAN.R-project.org/package=robustbase}, url = {http://robustbase.r-forge.r-project.org/}, } @article{RouPvD99, author = {Peter J. Rousseeuw and Katrien van~Driessen}, title = {A Fast Algorithm for the Minimum Covariance Determinant Estimator}, year = 1999, journal = {Technometrics}, volume = 41, pages = {212--223}, number = 3, month = aug, issn = {0040-1706}, numpages = 12, url = {http://dx.doi.org/10.2307/1270566}, doi = {10.2307/1270566}, publisher = {American Society for Quality Control and American Statistical Association}, address = {Alexandria, Va, USA}, keywords = {breakdown value, multivariate location and scatter, outlier detection, regression, robust estimation}, } @article{PisGvAW02, author = {Pison, G. and Van Aelst, S. and Willems, G.}, year = 2002, title = {Small sample corrections for LTS and MCD}, journal = {Metrika}, volume = 55, pages = {111--123}, number = {1-2}, doi = {10.1007/s001840200191}, issn = {0026-1335}, url = {http://dx.doi.org/10.1007/s001840200191}, publisher = {Springer-Verlag Berlin Heidelberg}, keywords = {Key words: Robustness; Least Trimmed Squares estimator; Minimum Covariance Determinant estimator; Bias}, language = {English} } %% ------ was ./lmrob_simulation.bib ------------------------ %% ~~~~~~~~~~~~~~~~~~~~ @Article{berrendero2007maximum, title = {{On the maximum bias functions of MM-estimates and constrained M-estimates of regression}}, author = {Berrendero, J.R. and Mendes, B.V.M. and Tyler, D.E.}, journal = {Annals of statistics}, volume = 35, number = 1, pages = 13, year = 2007, publisher = {IMS INSTITUTE OF MATHEMATICAL STATISTICS} } @TechReport{croux03, author = {Croux, C. and Dhaene, G. and Hoorelbeke, D.}, title = {Robust standard errors for robust estimators}, institution = {Dept. of Applied Economics, K.U. Leuven}, year = 2003 } @Article{fernandez1998bayesian, title = {On Bayesian Modeling of Fat Tails and Skewness}, author = {Fern{\'a}ndez, C. and Steel, M.F.J.}, journal = {Journal of the American Statistical Association}, volume = 93, number = 441, pages = {359--371}, year = 1998, publisher = {American Statistical Association} } @article{HubP64, author = {Peter J. Huber}, title = "Robust estimation of a location parameter", year = 1964, journal = {Ann. Math. Statist.}, volume = 35, pages = {73--101}, } @Book{hubpr09, author = {Peter J. Huber and Elvezio M. Ronchetti}, title = {Robust Statistics, Second Edition}, publisher = {Wiley and Sons Inc.}, address = {NY}, year = 2009 } @Article{ks2011, title = "Sharpening Wald-type inference in robust regression for small samples", journal = "Computational Statistics \& Data Analysis", volume = 55, number = 8, pages = "2504--2515", year = 2011, issn = "0167-9473", doi = "DOI: 10.1016/j.csda.2011.02.014", url = "http://www.sciencedirect.com/science/article/pii/S0167947311000739", author = "Manuel Koller and Werner A. Stahel", keywords = "MM-estimator", keywords1 = "Robust regression", keywords2 = "Robust inference" } @Misc{ks2014, author = "Manuel Koller and Werner A. Stahel", year = 2014, title = {Nonsingular subsampling for regression {S}~estimators with categorical predictors}, journal = {under review (2012 version from arXiv)}, keywords = "MM-estimator", keywords1 = "Robust regression", } @Comment url = {http://arxiv.org/abs/1208.5595}, @Misc{kolm2012, author = {Manuel Koller}, year = 2012, title = {Nonsingular subsampling for S-estimators with categorical predictors}, url = {http://arxiv.org/abs/1208.5595}, keywords = "MM-estimator", keywords1 = "Robust regression", } @Article{maronna2009correcting, title = {Correcting {MM} estimates for "fat" data sets}, volume = 54, number = 12, issn = "0167-9473", risfield_0_m3 = "doi: DOI: 10.1016/j.csda.2009.09.015", url = "http://www.sciencedirect.com/science/article/B6V8V-4X6VMB1-5/2/3a9a08575ea5e5e69ad06d720c627ec9" , author = "Ricardo A. Maronna and Victor J. Yohai", journal = "Computational Statistics \& Data Analysis", pages = "3168--3173", year = 2010 } @Book{MarRMY06, author = {Ricardo A. Maronna and R. Douglas Martin and Victor J. Yohai}, title = {Robust Statistics, Theory and Methods}, year = 2006, publisher = {John Wiley \& Sons, Ltd}, pages = 408, ISBN = {0-470-01092-4}, series = {Wiley Series in Probility and Statistics}, } @Book{hamfrrs86, author = {Frank Hampel and Elvezio Ronchetti and Peter Rousseeuw and Werner Stahel}, title = {Robust Statistics: The Approach Based on Influence Functions}, year = 1986, address = {N.Y.}, publisher = {Wiley} } @Manual{R-Lang, title = {R: A Language and Environment for Statistical Computing}, author = {{R Core Team}}, organization = {R Foundation for Statistical Computing}, address = {Vienna, Austria}, year = {2016}, url = {https://www.R-project.org/}, } @Article{RenOVicM2010, author = {Olivier Renaud and Maria-Pia Victoria-Feser}, title = {A robust coefficient of determination for regression}, journal = {Journal of Statistical Planning and Inference}, year = 2010, volume = 140, pages = {1852--1862}, annote = {robust $R^2$} } robustbase/R/0000755000176200001440000000000014124272432012625 5ustar liggesusersrobustbase/R/lmrob.M.S.R0000644000176200001440000001602013626040715014461 0ustar liggesuserslmrob.lar <- function(x, y, control = lmrob.control(), ...) { ## LAR : Least Absolute Residuals -- i.e. L_1 M-estimate ## this function is identical to lmRob.lar of the robust package ## '...': to be called as 'init(**, mf)' from lmrob() x <- as.matrix(x) p <- ncol(x) n <- nrow(x) stopifnot(p > 0, n >= p, length(y) == n, is.numeric(control$rel.tol)) storage.mode(x) <- "double" storage.mode(y) <- "double" bet0 <- 0.773372647623 ## bet0 = pnorm(0.75); only for normalizing scale=SIGMA tmpn <- double(n) tmpp <- double(p) z1 <- .Fortran(rllarsbi, ##-> ../src/rllarsbi.f x, y, as.integer(n), as.integer(p), as.integer(n), as.integer(n), as.double(control$rel.tol), NIT=integer(1), K=integer(1), KODE=integer(1), SIGMA=double(1), THETA=tmpn, RS=tmpn, SC1=tmpn, SC2=tmpp, SC3=tmpp, SC4=tmpp, BET0=as.double(bet0))[c("THETA","SIGMA","RS","NIT","KODE")] if (z1[5] > 1) stop("calculations stopped prematurely in rllarsbi\n", "(probably because of rounding errors).") names(z1) <- c("coefficients", "scale", "residuals", "iter", "status") ## c("THETA", "SIGMA", "RS", "NIT", "KODE") z1$converged <- TRUE length(z1$coefficients) <- p z1 } splitFrame <- function(mf, x = model.matrix(mt, mf), type = c("f","fi", "fii")) { mt <- attr(mf, "terms") type <- match.arg(type) x <- as.matrix(x) p <- ncol(x) ## --- split categorical and interactions of categorical vars. ## from continuous variables factors <- attr(mt, "factors") factor.idx <- attr(mt, "dataClasses") %in% c("factor", "character") if (!any(factor.idx)) ## There are no factors return(list(x1.idx = rep.int(FALSE, p), x1=matrix(NA_real_,nrow(x),0L), x2=x)) switch(type, ## --- include interactions cat * cont in x1: fi = { factor.asgn <- which(factor.idx %*% factors > 0) }, ## --- include also continuous variables that interact with factors in x1: ## make sure to include interactions of continuous variables ## interacting with categorical variables, too fii = { factor.asgn <- numeric(0) factors.cat <- factors factors.cat[factors.cat > 0] <- 1L ## fix triple+ interactions factors.cat[, factor.idx %*% factors == 0] <- 0L for (i in 1:ncol(factors)) { comp <- factors[,i] > 0 ## if any of the components is a factor: include in x1 and continue if (any(factor.idx[comp])) { factor.asgn <- c(factor.asgn, i) } else { ## if there is an interaction of this term with a categorical var. tmp <- colSums(factors.cat[comp,,drop=FALSE]) >= sum(comp) if (any(tmp)) { ## if no other continuous variables are involved ## include in x1 and continue ## if (identical(factors[!comp, tmp], factors.cat[!comp, tmp])) if (!all(colSums(factors[!factor.idx & !comp, tmp, drop=FALSE]) > 0)) factor.asgn <- c(factor.asgn, i) } } } }, ## --- do not include interactions cat * cont in x1: f = { factor.asgn <- which(factor.idx %*% factors & !(!factor.idx) %*% factors) }, stop("unknown split type")) x1.idx <- attr(x, "assign") %in% c(0, factor.asgn) ## also include intercept names(x1.idx) <- colnames(x) ## x1: factors and (depending on type) interactions of / with factors ## x2: continuous variables list(x1 = x[, x1.idx, drop=FALSE], x1.idx = x1.idx, x2 = x[, !x1.idx, drop=FALSE]) } ##' Compute M-S-estimator for linear regression ---> ../man/lmrob.M.S.Rd lmrob.M.S <- function(x, y, control, mf, split = splitFrame(mf, x, control$split.type)) { if (ncol(split$x1) == 0) { warning("No categorical variables found in model. Reverting to S-estimator.") return(lmrob.S(x, y, control)) } if (ncol(split$x2) == 0) { warning("No continuous variables found in model. Reverting to L1-estimator.") return(lmrob.lar(x, y, control)) } ## this is the same as in lmrob.S(): if (length(seed <- control$seed) > 0) { if (exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) { seed.keep <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE) on.exit(assign(".Random.seed", seed.keep, envir = .GlobalEnv)) } assign(".Random.seed", seed, envir = .GlobalEnv) ## why not set.seed(seed) } x1 <- split$x1 x2 <- split$x2 storage.mode(x1) <- "double" storage.mode(x2) <- "double" storage.mode(y) <- "double" c.chi <- .psi.conv.cc(control$psi, control$tuning.chi) traceLev <- as.integer(control$trace.lev) z <- .C(R_lmrob_M_S, ## NB: If you change this, adapt ../inst/xtraR/m-s_fns.R x1, x2, y, res=double(length(y)), n=length(y), p1=ncol(x1), p2=ncol(x2), nResample = as.integer(control$nResample), max_it_scale=as.integer(control$maxit.scale), scale=double(1), b1=double(ncol(x1)), b2=double(ncol(x2)), tuning_chi=as.double(c.chi), ipsi = .psi2ipsi(control$psi), bb=as.double(control$bb), K_m_s=as.integer(control$k.m_s), max_k=as.integer(control$k.max), rel_tol=as.double(control$rel.tol), inv_tol=as.double(control$solve.tol), scale_tol=as.double(control$scale.tol), converged = logical(1), trace_lev = traceLev, orthogonalize=TRUE, subsample=TRUE, descent=TRUE, mts=as.integer(control$mts), ss=.convSs(control$subsampling) )[c("b1","b2", "res","scale", "converged")] conv <- z$converged ## FIXME? warning if 'conv' is not ok ?? ## coefficients : idx <- split$x1.idx cf <- numeric(length(idx)) cf[ idx] <- z$b1 cf[!idx] <- z$b2 ## set method argument in control control$method <- 'M-S' obj <- list(coefficients = cf, scale = z$scale, residuals = z$res, rweights = lmrob.rweights(z$res, z$scale, control$tuning.chi, control$psi), ## ../src/lmrob.c : m_s_descent() notes that convergence is *not* guaranteed converged = TRUE, descent.conv = conv, control = control) if (control$method %in% control$compute.outlier.stats) obj$ostats <- outlierStats(obj, x, control) obj } robustbase/R/biweight-funs.R0000644000176200001440000000446012420464132015524 0ustar liggesusers#### These Chi() and Psi() used to be called by lmrob() functions #### but no longer --> Have interface via .psi2ipsi() and .C(..) #### FIXME: integrate these with the psi-rho objects --> ./psi-rho-funs.R ## In the vignette ../vignettes/psi_functions.Rnw, we call this ## scaled \rho "\tilde{\rho}" ##- Maronna et al (2006) define their rho to be standardized ##- (only if possible <==> only if redescending psi !) ##- {TODO: *Where* in the Hampel_et_al book ??? } ## Hampel et al (1986): \chi(x) := \rho(x) / \rho(\infty) ## ====== ## <==> chi() is a scaled version of rho(.) such that ## \chi(\infty) = \max_x \chi(x) = 1 ## ==> Chi'() is just a scaled version of psi() : ## with current scale (new for psi()): ## i) Chi'(x, c) == (6/c^2) Psi(x,c) ## ==> ii) Chi''(x,c) == (6/c^2) Psi'(x,c) ## and Chi (x, c) == (6/c^2) Rho(x,c), where Psi(.) = Rho'(.) tukeyChi <- function(x, cc, deriv = 0) { .Deprecated("Mchi") x <- x / cc x2 <- x*x out <- x2 > 1 switch(deriv + 1, { ## deriv = 0 r <- x2*(3 + x2*(-3 + x2)) r[out] <- 1 }, { ## deriv = 1 r <- 6/cc * x * (1-x2)^2 r[out] <- 0 }, { ## deriv = 2 r <- 6/(cc^2) * (1 - x2) * (1 - 5*x2) r[out] <- 0 }, stop("deriv must be in {0,1,2}")) r } ## we call this '*Psi1' such as to not be confounded with ## the (future!) S4 object tukeyPsi() ! tukeyPsi1 <- function(x, cc, deriv = 0) { .Deprecated("Mpsi") ## This version of psi() is scaled such that psi'(0) = 1 x2 <- (x / cc)^2 if(deriv < 0) out <- x2 > 1 else in. <- x2 < 1 switch(deriv + 2, { ## deriv = -1 c. <- cc^2/6 r <- c.*(1 - (1- x2)^3) r[out] <- c. r }, { ## deriv = 0 in. * x * (1-x2)^2 }, { ## deriv = 1 in. * (1 - x2) * (1 - 5*x2) }, { ## deriv = 2 in. * 4*x/cc^2 * (5*x2 - 3) }, stop("deriv must be in {-1,0,1,2}")) } if(FALSE) tukeyPsi1Ex <- function (x, cc, deriv = 0) ## tukeyPsi1Ex <- function (x, cc = 4.685, deriv = 0) ## ^^^^^^^^^ { ## This version of psi() is scaled such that psi'(0) = 1 u <- pmin((x/cc)^2, 1) if(deriv < 0) return((1 - (1-u)^3)*cc^2/6) if(deriv == 0) return(x * (1 - u)^2) return((1 - u) * (1 - 5 * u)) } robustbase/R/adjoutlyingness.R0000644000176200001440000002147514052244671016207 0ustar liggesusers#### -*- mode: R; kept-new-versions: 30; kept-old-versions: 20 -*- #### MC-adjusted Outlyingness #### ------------------------ ### ### Original code from the web site from the Antwerpen Statistics Group : ### http://www.agoras.ua.ac.be/Robustn.htm ### which has a "MC" section and for the software links to ### ftp://ftp.win.ua.ac.be/pub/software/agoras/newfiles/mc.tar.gz ### and that contains mcrsoft/adjoutlyingness.R ##_NEW_ (2014): moved from Antwerpen to Leuwen, ## ===> http://wis.kuleuven.be/stat/robust/software ## has several links to 'robustbase', and S-plus code ## http://wis.kuleuven.be/stat/robust/Programs/adjusted-boxplot/adjusted-boxplot.ssc ## (copy in ../misc/Adjusted-boxplot.ssc ## MM [ FIXME ]: ## ----------- ## 1) Use *transposed* B[] and A[] (now called 'E') matrices -- DONE ## 2) use IQR() instead of quantile(., .75) - quantile(., .25) ##--> but only *after* testing original code ## ^^^^^^^^^^^^^^^^^^^^^^^^ adjOutlyingness <- function(x, ndir = 250, p.samp = p, clower=4, cupper=3, IQRtype = 7, alpha.cutoff = 0.75, coef = 1.5, qr.tol = 1e-12, keep.tol = 1e-12, only.outlyingness = FALSE, maxit.mult = max(100, p), trace.lev = 0, ## these are all passed to mc() {when applied to the projected data} mcReflect = n <= 100, mcScale = TRUE, mcMaxit = 2*maxit.mult, mcEps1 = 1e-12, mcEps2 = 1e-15, mcTrace = max(0, trace.lev-1)) ## Skewness-Adjusted Outlyingness { x <- data.matrix(x) n <- nrow(x) p <- ncol(x) stopifnot(n >= 1, p >= 1, p.samp >= p, is.numeric(x)) if (p <= n) { B <- matrix(0, p, ndir) E <- matrix(1, p.samp, 1) x. <- unname(x) # for speed in subsequent subsetting and solve maxit <- as.integer(maxit.mult * ndir) ## ^^ original code had 'Inf', i.e. no iter.count check; ## often, maxit == ndir would suffice if(trace.lev >= 2) p10 <- 10 ^ max(0, min(6 - trace.lev, floor(log10(maxit)))) i <- 1L it <- 0L while (i <= ndir && (it <- it+1L) < maxit) { ## we sort to get *identical* projections instead of "almost" P <- x.[sort(sample.int(n, p.samp)), , drop=FALSE] if ((qrP <- qr(P, tol = qr.tol))$rank == p) { B[,i] <- solve(qrP, E, tol = qr.tol) ## if(trace.lev >= 2) cat(" it=",it,"; found direction # ", i,"\n", sep="") i <- i+1L } else if(trace.lev >= 2) { if(it %% p10 == 0) cat(" it=",it,": rank(qr(P ..)) = ", qrP$rank, " < p = ",p,"\n", sep="") } } if(it == maxit) { rnk.x <- qr(x, tol = qr.tol)$rank if(rnk.x < p) stop("Matrix 'x' is not of full rank: rankM(x) = ",rnk.x," < p = ",p, "\n Use fullRank(x) instead") ## else stop("** direction sampling iterations were not sufficient. Maybe try increasing 'maxit.mult'") } Bnorm <- sqrt(colSums(B^2)) Nx <- mean(abs(x.)) ## so the comparison is scale-equivariant: keep <- Bnorm*Nx > keep.tol if(!all(keep)) { if(trace.lev) cat("keeping ", sum(keep), "(out of", length(keep),") normalized directions\n") Bnorm <- Bnorm[ keep ] B <- B[, keep , drop=FALSE] } else if(trace.lev) cat("keeping *all* ",length(keep)," normalized directions\n") B <- B / rep(Bnorm, each = nrow(B)) # normalized B {called 'A' in orig.code} } else { stop('More dimensions than observations: not yet implemented') ## MM: In LIBRA(matlab) they have it implemented: ## seed=0; ## nrich1=n*(n-1)/2; ## ndirect=min(250,nrich1); ## true = (ndirect == nrich1); ## B=extradir(x,ndir,seed,true); % n*ri ## ======== % Calculates ndirect directions through ## % two random choosen data points from data ## for i=1:size(B,1) ## Bnorm(i)=norm(B(i,:),2); ## end ## Bnorm=Bnorm(Bnorm > 1.e-12); %ndirect*1 ## B=B(Bnorm > 1.e-12,:); %ndirect*n ## A=diag(1./Bnorm)*B; %ndirect*n } ## NB: colSums( B^2 ) == 1 Y <- x %*% B # (n x p) %*% (p, nd') == (n x nd'); ## nd' = ndir.final := ndir - {those not in 'keep'} ## Compute and sweep out the median med <- colMedians(Y) if(trace.lev) { cat("med <- colMedians(Y): ", length(med), " values; summary():\n") print(summary(med)) } Y <- Y - rep(med, each=n) ## central :<==> non-adjusted <==> "classical" outlyingness central <- clower == 0 && cupper == 0 if(!central) { ## MM: mc() could be made faster if we could tell it that med(..) = 0 ## vv tmc <- apply(Y, 2L, mc, doReflect=mcReflect, doScale=mcScale, maxit=mcMaxit, eps1=mcEps1, eps2=mcEps2, trace.lev = mcTrace) ## original Antwerpen *wrongly*: tmc <- mc(Y) if(trace.lev) { cat("Columnwise mc() got ", length(tmc), " values; summary():\n") print(summary(tmc)) } } Q13 <- apply(Y, 2, quantile, c(.25, .75), names=FALSE, type = IQRtype) Q1 <- Q13[1L,]; Q3 <- Q13[2L,] IQR <- Q3 - Q1 ## NOTA BENE(MM): simplified definition of tup/tlo here and below ## 2014-10-18: "flipped" sign (which Pieter Setaert (c/o Mia H) proposed, Jul.30,2014: tup <- Q3 + coef* (if(central) IQR else IQR*exp( cupper*tmc*(tmc >= 0) + clower*tmc*(tmc < 0))) tlo <- Q1 - coef* (if(central) IQR else IQR*exp(-clower*tmc*(tmc >= 0) - cupper*tmc*(tmc < 0))) ## Note: all(tlo < med & med < tup) # where med = 0 if(trace.lev >= 3) { print( cbind(tlo, Q1, Q3, tup) ) } ## Instead of the loop: ## for (i in 1:ndir) { ## tup[i] <- max(Y[Y[,i] < tup[i], i]) ## tlo[i] <- -min(Y[Y[,i] > tlo[i], i]) ## ## MM: FIXED typo-bug : ^^^ this was missing! ## ## But after the fix, the function stops "working" for longley.. ## ## because tlo[] becomes 0 too often, YZ[.,.] = c / 0 = Inf ! ## } Yup <- Ylo <- Y Yup[Y >= rep(tup, each=n)] <- -Inf Ylo[Y <= rep(tlo, each=n)] <- Inf y.up <- apply(Yup, 2, max) # = max{ Y[i,] ; Y[i,] < tup[i] } y.lo <- -apply(Ylo, 2, min) # = -min{ Y[i,] ; Y[i,] > tlo[i] } if(trace.lev) { cat(length(y.up), "lower & upper Y (:= X - med(.)) values:\n") print(summary(y.lo)) print(summary(y.up)) } tY <- t(Y) ## Note: column-wise medians are all 0 : "x_i > m" <==> y > 0 ## Note: this loop is pretty fast for (j in 1:n) { # when y = (X-med) = 0 ==> adjout = 0 rather than ## 0 / 0 --> NaN; e.g, in set.seed(3); adjOutlyingness(longley) non0 <- 0 != (y <- tY[,j]); y <- y[non0]; I <- (y > 0) D <- I*y.up[non0] + (1 - I)*y.lo[non0] if(trace.lev >= 3) { cat(sprintf("j=%2d: #{non0}= %2d; quantile(D)=\n", j, sum(non0))) print(quantile(D), digits=3) } tY[non0, j] <- abs(y) / D } ## We get +Inf above for "small n"; e.g. set.seed(11); adjOutlyingness(longley) if(trace.lev) { cat("outlyingnesses for all directions (of which max(.) will be chosen:\n") print(quantile(tY, digits=3)) } adjout <- apply(tY, 2, function(x) max(x[is.finite(x)])) ##---- --- if(abs(trace.lev %% 1 - 0.7) < 1e-3) { ## really not for the end user .. cat("Plotting outlyingnesses vs. observation i:\n") matplot(t(tY), log="y"); axis(2, at=1); abline(h=1, lty=3) Sys.sleep(2) ## somewhat revealing: 3 groups: very large | medium | very small (incl. 0 which are *not* plotted) matplot(t(tY), log="y", type="b"); axis(2, at=1); abline(h=1, lty=3) browser() ## <<<<<<<<<<<<<<<<<<< } if(only.outlyingness) adjout else { Qadj <- quantile(adjout, probs = c(1 - alpha.cutoff, alpha.cutoff)) mcadjout <- if(cupper != 0) mc(adjout, doScale=mcScale, eps1=mcEps1, eps2=mcEps2) else 0 ## === cutoff <- Qadj[2] + coef * (Qadj[2] - Qadj[1]) * (if(mcadjout > 0) exp(cupper*mcadjout) else 1) list(adjout = adjout, iter = it, ndir.final = sum(keep), MCadjout = mcadjout, Qalph.adjout = Qadj, cutoff = cutoff, nonOut = (adjout <= cutoff)) } } ##' Compute a "full rank" version of matrix x, ##' by removing columns (or rows when nrow(x) < ncol(x)), using qr() and it's pivots fullRank <- function(x, tol = 1e-7, qrx = qr(x, tol=tol)) { d <- dim(x) n <- d[[1L]]; p <- d[[2L]] if(n < p) return( t(fullRank(t(x), tol=tol)) ) ## else n >= p >= rank(.) rnk <- qrx$rank if(rnk == p) x else x[, qrx$pivot[seq_len(rnk)], drop=FALSE] } robustbase/R/anova.lmrob.R0000644000176200001440000001245212737461447015210 0ustar liggesusersanova.lmrob <- function(object, ..., test = c("Wald", "Deviance"), verbose=getOption("verbose")) { dotargs <- list(...) named <- if (is.null(names(dotargs))) logical(length(dotargs))# FALSE else (names(dotargs) != "") if (any(named)) warning("the following arguments to 'anova.lmrob' are invalid and \n", "dropped: ", pasteK(deparse(dotargs[named]))) dotargs <- dotargs[!named] test <- match.arg(test) ## method argument has to end with 'M' (req. for refitting) if (test == "Deviance" && !grepl('M$', object$control$method)) stop("For test = 'Deviance', the estimator chain has to end with 'M'") if (length(dotargs) > 0) { length.tl <- function(x) length(attr(terms(x),"term.labels")) isFormula <- vapply(dotargs, inherits, NA, what = "formula") h <- vapply(dotargs, length.tl, 0L) if(all(isFormula)) { if(any(h >= length.tl(object))) stop("The first object does not contain the largest model") modform <- dotargs } else { if(verbose) message("All models are refitted except the largest one") if(any(h >= length.tl(object))) { h <- c(length.tl(object),h) dotargs <- c(list(object), dotargs)[order(h, decreasing = TRUE)] object <- dotargs[[1]] if(!inherits(object, "lmrob")) stop("anova.lmrob() only works for 'lmrob' objects") dotargs <- dotargs[-1] } modform <- lapply(dotargs, formula) } initCoef <- lapply(dotargs, coef) return(anovaLmrobList(object, modform, initCoef, test = test)) } ## ## "'Anova Table' for a single model object stop("'Anova Table' for a single model not yet implemented") } anovaLmrobList <- function (object, modform, initCoef, test) { responses <- as.character(lapply(modform, function(x) deparse(x[[2]]))) if (!all(responses == deparse(formula(object)[[2]]))) stop("Not the same response used in the fitted models") ## nobs <- length(object$residuals) nmodels <- length(modform) + 1 tbl <- matrix(rep(NA, nmodels*4), ncol = 4) tbl[1,1] <- nobs[1] - length(coef(object)) obj0 <- object for(k in 2:nmodels) { obj0 <- anovaLmrobPair(obj0, modform[[k-1]], initCoef[[k-1]], test = test) tbl[k,] <- obj0$anova obj0$scale <- object$scale } ## return dimnames(tbl) <- list(1:nmodels, c("pseudoDf", "Test.Stat", "Df", "Pr(>chisq)")) title <- switch(test, Wald = "Robust Wald Test Table", Deviance = "Robust Deviance Table", stop("invalid 'test'")) variables <- c(list(formula(terms(object))), modform) topnote <- paste("Model ", format(1:nmodels), ": ", variables, sep = "", collapse = "\n") note <- paste("Largest model fitted by lmrob(), i.e.", object$control$method) ## paste("Models fitted by method '", methods[1], "'", sep="") structure(as.data.frame(tbl), heading = c(title, "", topnote, note,""), class = c("anova", "data.frame")) } anovaLmrobPair <- function(FMfit, reduced.model, initCoef, test) { ## 'FM': full model; 'RM' : reduced model X <- model.matrix(FMfit, data = FMfit$model) FMod <- FMfit$qr$pivot[1:FMfit$rank] asgn <- attr(X, "assign") FMt <- terms(FMfit) RMt <- terms(reduced.model) FMtl <- attr(FMt, "term.labels") RMtl <- attr(RMt, "term.labels") RMnumtl <- match(RMtl , FMtl, nomatch = -1) if(attr(RMt, "intercept") == 1) RMnumtl <- c(0, RMnumtl) if(any(is.na(match(RMnumtl, unique(asgn))))) stop("Models are not nested!") RMod0 <- seq(along = asgn)[!is.na(match(asgn, RMnumtl))] RMod <- intersect(RMod0, FMod) if (length(FMod) == length(RMod)) stop("Models are not strictly nested") H0ind <- which(!FMod %in% RMod) H0coef <- coef(FMfit)[H0ind] df <- length(H0coef) pp <- FMfit$rank switch(test, "Wald" = { t.cov <- FMfit$cov t.chisq <- sum(H0coef * solve(t.cov[H0ind, H0ind], H0coef)) ## return c(FMfit, list(anova = c(nrow(X)-pp+df, t.chisq, df, pchisq(as.vector(t.chisq), df = df, lower.tail = FALSE)))) }, "Deviance" = { y <- FMfit$residuals + FMfit$fitted.values s0 <- FMfit$scale fCtrl <- FMfit$control psi <- function(u, deriv = 0) Mpsi(u, cc = fCtrl$tuning.psi, psi = fCtrl$psi, deriv) iC <- if(is.null(initCoef)) { res <- as.vector(y - X[,RMod] %*% FMfit$coef[RMod]) psiRes <- psi(res/s0) if(sum(abs(psiRes) < 1e-08) > 0.6*nrow(X)) stop("Please fit the nested models by lmrob") FMfit$coef[RMod] } else { idx <- !is.na(initCoef) if (any(idx != RMod0 %in% RMod)) stop("NA coefs in full and reduced model do not match") initCoef[idx] } RMfit <- lmrob..M..fit(x = X[,RMod, drop=FALSE], y = y, beta.initial = iC, scale = s0, control = fCtrl, method = fCtrl$method) FMres <- as.vector(y - X[,FMod] %*% FMfit$coef[FMod]) RMres <- RMfit$resid ## as.vector(y - X[,RMod] %*% RMfit$coef) FM_sRho <- sum(psi(FMres/s0, deriv = -1)) RM_sRho <- sum(psi(RMres/s0, deriv = -1)) tauStar <- mean(psi(FMres/s0, deriv = 1)) / mean(psi(FMres/s0)^2, deriv = 0) t.chisq <- 2*tauStar*(RM_sRho - FM_sRho) ## return c(RMfit, list(anova = c(nrow(X)-pp+df, t.chisq, df, pchisq(as.vector(t.chisq), df = df, lower.tail = FALSE)))) }, stop("test ", test, " not yet implemented")) } ## anovaLmrobPair robustbase/R/glmrobMqle.R0000644000176200001440000004264513325654420015067 0ustar liggesusers#### Mallows quasi-likelihood estimator of E. Cantoni and E. Ronchetti (2001) #### based originally on Eva Cantoni's S-plus code "robGLM" ## FIXME{MM}: All these expression()s and eval()s -- once were really slick and fast. ## ----- Nowadays, with 'codetools' and the byte-compiler, they "just don't fit anymore" ## including those globalVariables() {also in other places!}: globalVariables(c("residP", "residPS", "dmu.deta", "snu"), add=TRUE) ##' @title ##' @param wts a character string \dQuote{weights.on.x} specifying how weights should be computed ##' *or* a numeric vector of final weights in which case nothing is computed. ##' @param X n x p design matrix aka model.matrix() ##' @param intercept logical, if true, X[,] has an intercept column which should ##' not be used for rob.wts ##' @return n-vector of non-negative weights ##' @author Martin Maechler robXweights <- function(wts, X, intercept=TRUE) { stopifnot(length(d <- dim(X)) == 2, is.logical(intercept)) nobs <- d[1] if(d[2]) { ## X has >= 1 column, and hence there *are* coefficients in the end if(is.character(wts)){ switch(wts, "none" = rep.int(1, nobs), "hat" = wts_HiiDist(X)^2, # = (1 - Hii)^2 "robCov" = wts_RobDist(X, intercept, covFun = MASS::cov.rob), ## MCD is currently problematic: many singular subsamples "covMcd" = wts_RobDist(X, intercept, covFun = covMcd), stop("Weighting method", sQuote(wts), " is not implemented")) } ## (new; 2013-07-05; -> robustbase 0.9-9) else if(is.list(wts)) { if(length(wts) == 1 && is.function(covF <- wts[[1]])) wts_RobDist(X, intercept, covFun = covF) else stop("if a list, weights.on.x must contain a covariance function such as covMcd()") } else if(is.function(wts)) { wts(X, intercept) } else { if(!is.numeric(wts) || length(wts) != nobs) ## FIXME: "when not a string, a list, or a function, then ..." stop(gettextf("weights.on.x needs %d none-negative values", nobs), domain=NA) if(any(wts) < 0) stop("All weights.on.x must be none negative") } } else ## p = ncoef == 0 {maybe intercept, but that's not relevant here} rep.int(1,nobs) } ##' @param intercept logical, if true, X[,] has an intercept column which should ##' not be used for rob.wts glmrobMqle <- function(X, y, weights = NULL, start = NULL, offset = NULL, family, weights.on.x = "none", control = glmrobMqle.control(), intercept = TRUE, trace = FALSE) { ## To DO: ## o weights are not really implemented as *extra* user weights; rather as "glm-weights" ## o offset is not fully implemented (really? -- should have test case!) if(!is.matrix(X)) X <- as.matrix(X) ## never used: ## xnames <- dimnames(X)[[2]] ## ynames <- if (is.matrix(y)) rownames(y) else names(y) nobs <- NROW(y) stopifnot(nobs == nrow(X)) if (is.null(weights)) weights <- rep.int(1, nobs) else if(any(weights <= 0)) stop("All weights must be positive") if (is.null(offset)) offset <- rep.int(0, nobs) else if(!all(offset==0)) warning("'offset' not fully implemented") variance <- family$variance linkinv <- family$linkinv if (!is.function(variance) || !is.function(linkinv)) stop("illegal 'family' argument") mu.eta <- family$mu.eta if (is.null(valideta <- family$valideta)) valideta <- function(eta) TRUE if (is.null(validmu <- family$validmu)) validmu <- function(mu) TRUE ncoef <- ncol(X) w.x <- robXweights(weights.on.x, X=X, intercept=intercept) ### Initializations stopifnot(control$maxit >= 1, (tcc <- control$tcc) >= 0) ## note that etastart and mustart are used to make 'family$initialize' run etastart <- NULL; mustart <- NULL ## note that 'weights' are used and set by binomial()$initialize ! eval(family$initialize) ## --> n, mustart, y and weights (=ni) ni <- as.vector(weights)# dropping attributes for computation ## if(is.null(start)) start <- glm.fit(x = X, y = y, weights = weights, offset = offset, family = family)$coefficients if(any(ina <- is.na(start))) { cat("initial start 'theta' has NA's; eliminating columns X[, j];", "j = ", pasteK(which(ina)),"\n") theta.na <- start X <- X[, !ina, drop = FALSE] start <- glm.fit(x = X, y = y, weights = weights, offset = offset, family = family)$coefficients if(any(is.na(start))) stop("start 'theta' has still NA's .. badly singular x\n") ## FIXME ncoef <- length(start) } thetaOld <- theta <- as.vector(start) # as.v*(): dropping attributes eta <- as.vector(X %*% theta) mu <- linkinv(eta) # mu estimates pi (in [0,1]) at the binomial model if (!(validmu(mu) && valideta(eta))) stop("Cannot find valid starting values: You need help") ## switch(family$family, "binomial" = { Epsi.init <- EpsiBin.init Epsi <- EpsiBin EpsiS <- EpsiSBin Epsi2 <- Epsi2Bin phiEst <- phiEst.cl <- 1 }, "poisson" = { Epsi.init <- EpsiPois.init Epsi <- EpsiPois EpsiS <- EpsiSPois Epsi2 <- Epsi2Pois phiEst <- phiEst.cl <- expression({1}) }, "gaussian" = { Epsi.init <- EpsiGaussian.init Epsi <- EpsiGaussian EpsiS <- EpsiSGaussian Epsi2 <- Epsi2Gaussian phiEst.cl <- phiGaussianEst.cl phiEst <- phiGaussianEst }, "Gamma" = { ## added by ARu Epsi.init <- EpsiGamma.init Epsi <- EpsiGamma EpsiS <- EpsiSGamma Epsi2 <- Epsi2Gamma phiEst.cl <- phiGammaEst.cl phiEst <- phiGammaEst }, ## else stop(gettextf("family '%s' not yet implemented", family$family), domain=NA) ) sV <- NULL # FIXME workaround for codetools comp.V.resid <- expression({ Vmu <- variance(mu) if (any(is.na(Vmu))) stop("NAs in V(mu)") if (any(Vmu == 0)) stop("0s in V(mu)") sVF <- sqrt(Vmu) # square root of variance function residP <- (y - mu)* sni/sVF # Pearson residuals }) comp.scaling <- expression({ sV <- sVF * sqrt(phi) residPS <- residP/sqrt(phi) # scaled Pearson residuals }) comp.Epsi.init <- expression({ ## d mu / d eta : dmu.deta <- mu.eta(eta) if (any(is.na(dmu.deta))) stop("NAs in d(mu)/d(eta)") ## "Epsi init" : H <- floor(mu*ni - tcc* sni*sV) K <- floor(mu*ni + tcc* sni*sV) eval(Epsi.init) }) ### Iterations if(trace && ncoef) { cat("Initial theta: \n") local({names(theta) <- names(start); print(theta) }) digits <- max(1, getOption("digits") - 5) w.th.1 <- 6+digits # width of one number; need 8 for 2 digits: "-4.8e-11" width.th <- ncoef*(w.th.1 + 1) - 1 cat(sprintf("%3s | %*s | %12s\n", "it", width.th, "d{theta}", "rel.change")) mFormat <- function(x, wid) { r <- formatC(x, digits=digits, width=wid) sprintf("%*s", wid, sub("e([+-])0","e\\1", r)) } } sni <- sqrt(ni) eval(comp.V.resid) #-> (Vmu, sVF, residP) phi <- eval(phiEst.cl) ## Determine the range of phi values based on the distribution of |residP| Rphi <- c(1e-12, 3*median(abs(residP)))^2 conv <- FALSE if(ncoef) for (nit in 1:control$maxit) { eval(comp.scaling) #-> (sV, residPS) eval(comp.Epsi.init) ## Computation of alpha and (7) using matrix column means: cpsi <- pmax.int(-tcc, pmin.int(residPS,tcc)) - eval(Epsi) EEq <- colMeans(cpsi * w.x * sni/sV * dmu.deta * X) ## ## Solve 1/n (t(X) %*% B %*% X) %*% delta.coef = EEq DiagB <- eval(EpsiS) /(sni*sV) * w.x * (ni*dmu.deta)^2 if(any(n0 <- ni == 0)) DiagB[n0] <- 0 # instead of NaN Dtheta <- solve(crossprod(X, DiagB*X)/nobs, EEq) if (any(!is.finite(Dtheta))) { warning("Non-finite coefficients at iteration ", nit) break } theta <- thetaOld + Dtheta eta <- as.vector(X %*% theta) + offset mu <- linkinv(eta) ## estimation of the dispersion parameter eval(comp.V.resid) phi <- eval(phiEst) ## Check convergence: relative error < tolerance relE <- sqrt(sum(Dtheta^2)/max(1e-20, sum(thetaOld^2))) conv <- relE <= control$acc if(trace) { cat(sprintf("%3d | %*s | %12g\n", nit, width.th, paste(mFormat(Dtheta, w.th.1), collapse=" "), relE)) } if(conv) break thetaOld <- theta } ## end of iteration else { ## ncoef == 0 conv <- TRUE nit <- 0 } if (!conv) warning("Algorithm did not converge") eps <- 10 * .Machine$double.eps switch(family$family, "binomial" = { if (any(mu/weights > 1 - eps) || any(mu/weights < eps)) warning("fitted probabilities numerically 0 or 1 occurred") }, "poisson" = { if (any(mu < eps)) warning("fitted rates numerically 0 occurred") }) eval(comp.V.resid) #-> (Vmu, sVF, residP) eval(comp.scaling) #-> (sV, residPS) ## Estimated asymptotic covariance of the robust estimator if(ncoef) { eval(comp.Epsi.init) alpha <- colMeans(eval(Epsi) * w.x * sni/sV * dmu.deta * X) DiagA <- eval(Epsi2) / (ni*sV^2)* w.x^2* (ni*dmu.deta)^2 matQ <- crossprod(X, DiagA*X)/nobs - tcrossprod(alpha, alpha) DiagB <- eval(EpsiS) / (sni*sV)* w.x * (ni*dmu.deta)^2 if(any(n0 <- ni == 0)) DiagB[n0] <- 0 # instead of NaN matM <- crossprod(X, DiagB*X)/nobs matMinv <- solve(matM) asCov <- matMinv %*% matQ %*% matMinv / nobs } else { ## ncoef == 0 matM <- matQ <- asCov <- matrix(NA_real_, 0,0) } if(any(ina)) {# put NA's back, extending theta[] to "original length" ok <- !ina theta.na[ok] <- theta ; theta <- theta.na ## also extend the "p x p" matrices with NA's -- ##No : lm() and glm() also do *not* do this ##No p <- length(theta) ##No nm <- names(theta) ##No M <- matrix(NA_real_, p, p, dimnames = list(nm,nm)) ##No Mn <- M; Mn[ok, ok] <- asCov ; asCov <- Mn ##No Mn <- M; Mn[ok, ok] <- matM ; matM <- Mn ##No Mn <- M; Mn[ok, ok] <- matQ ; matQ <- Mn } w.r <- pmin(1, tcc/abs(residPS)) names(mu) <- names(eta) <- names(residPS) # re-add after computation list(coefficients = theta, residuals = residP, # s.resid = residPS, fitted.values = mu, w.r = w.r, w.x = w.x, ni = ni, dispersion = phi, cov = asCov, matM = matM, matQ = matQ, tcc = tcc, family = family, linear.predictors = eta, deviance = NULL, iter = nit, y = y, converged = conv) } ## NB: X is model.matrix() aka design matrix used; typically including an intercept wts_HiiDist <- function(X) { ## Hii := diag( tcrossprod( qr.Q(qr(X)) ) ) == rowSums( qr.Q(qr(X)) ^2 ) : x <- qr(X) Hii <- rowSums(qr.qy(x, diag(1, nrow = NROW(X), ncol = x$rank))^2) (1-Hii) } ##' Compute robustness weights depending on the design 'X' only, ##' using robust(ified) Mahalanobis distances. ##' This is an auxiliary function for robXweights() activated typically by ##' weights.on.x = "..." from regression functions ##' @title Compute Robust Weights based on Robustified Mahalanobis - Distances ##' @param X n x p numeric matrix ##' @param intercept logical; should be true iff X[,1] is a column with the intercept ##' @param covFun function for computing a \bold{robust} covariance matrix; ##' e.g., MASS::cov.rob(), or covMcd(). ##' @return n-vector of non-negative weights. ##' @author Martin Maechler wts_RobDist <- function(X, intercept, covFun) { D2 <- if(intercept) { ## X[,] has intercept column which should not be used for rob.wts X <- X[, -1, drop=FALSE] Xrc <- covFun(X) mahalanobis(X, center = Xrc$center, cov = Xrc$cov) } else { ## X[,] can be used directly if(!is.matrix(X)) X <- as.matrix(X) Xrc <- covFun(X) S <- Xrc$cov + tcrossprod(Xrc$center) mahalanobis(X, center = FALSE, cov = S) } p <- ncol(X) ## E[chi^2_p] = p 1/sqrt(1+ pmax.int(0, 8*(D2 - p)/sqrt(2*p))) } ## MM: 'acc' seems a misnomer to me, but it's inherited from MASS::rlm glmrobMqle.control <- function(acc = 1e-04, test.acc = "coef", maxit = 50, tcc = 1.345) { if (!is.numeric(acc) || acc <= 0) stop("value of acc must be > 0") if (test.acc != "coef") stop("Only 'test.acc = \"coef\"' is currently implemented") ## if (!(any(test.vec == c("coef", "resid")))) ## stop("invalid argument for test.acc") if (!is.numeric(maxit) || maxit <= 0) stop("maximum number of iterations must be > 0") if (!is.numeric(tcc) || tcc <= 0) stop("value of the tuning constant c (tcc) must be > 0") list(acc = acc, test.acc = test.acc, maxit = maxit, tcc = tcc) } ### ----------------- E[ f(psi ( X ) ) ] ------------------------------- ## MM: These are now expressions instead of functions ## since 'Epsi*' and 'Epsi2*' are *always* called together ## and 'EpsiS*' when called is always after the other two ## ==> do common computations only once in Epsi*.init ==> more efficient! ## ## FIXME(2): Some of these fail when Huber's "c", 'tcc' is = +Inf ## ----- --> ../../robGLM1/R/rglm.R ## FIXME: Do use a "robFamily", a *list* of functions ## ------ which all have the same environment ## ===> can get same efficiency as expressions, but better OOP ### --- Poisson -- family --- EpsiPois.init <- expression( { dpH <- dpois(H, mu); dpH1 <- dpois(H-1, mu) dpK <- dpois(K, mu); dpK1 <- dpois(K-1, mu) pHm1 <- ppois(H-1, mu) ; pH <- pHm1 + dpH # = ppois(H,*) pKm1 <- ppois(K-1, mu) ; pK <- pKm1 + dpK # = ppois(K,*) E2f <- mu*(dpH1 - dpH - dpK1 + dpK) + pKm1 - pHm1 }) EpsiPois <- expression( { tcc*(1 - pK - pH) + mu*(dpH - dpK)/sV }) Epsi2Pois <- expression( { ## Calculation of E(psi^2) for the diagonal elements of A in matrix Q: tcc^2 * (pH + 1 - pK) + E2f }) EpsiSPois <- expression( { ## Calculation of E(psi*s) for the diagonal elements of B in the ## expression matrix M = 1/n t(X) %*% B %*% X: tcc*(dpH + dpK) + E2f / sV }) ### --- Binomial -- family --- EpsiBin.init <- expression({ pK <- pbinom(K, ni, mu) pH <- pbinom(H, ni, mu) pKm1 <- pbinom(K-1, pmax.int(0, ni-1), mu) pHm1 <- pbinom(H-1, pmax.int(0, ni-1), mu) pKm2 <- pbinom(K-2, pmax.int(0, ni-2), mu) pHm2 <- pbinom(H-2, pmax.int(0, ni-2), mu) ## QlV = Q / V, where Q = Sum_j (j - mu_i)^2 * P[Y_i = j] ## i.e. Q = Sum_j j(j-1)* P[.] + ## (1- 2*mu_i) Sum_j j * P[.] + ## mu_i^2 Sum_j P[.] QlV <- mu/Vmu*(mu*ni*(pK-pH) + (1 - 2*mu*ni) * ifelse(ni == 1, (H <= 0)*(K >= 1), pKm1 - pHm1) + (ni - 1) * mu * ifelse(ni == 2, (H <= 1)*(K >= 2), pKm2 - pHm2)) }) EpsiBin <- expression( { tcc*(1 - pK - pH) + ifelse(ni == 1, (- (H < 0) + (K >= 1) ) * sV, (pKm1 - pHm1 - pK + pH) * mu * sni/sV) }) Epsi2Bin <- expression( { ## Calculation of E(psi^2) for the diagonal elements of A in matrix Q: tcc^2*(pH + 1 - pK) + QlV }) EpsiSBin <- expression( { ## Calculation of E(psi*s) for the diagonal elements of B in the ## expression matrix M = (X' B X)/n mu/Vmu*(tcc*(pH - ifelse(ni == 1, H >= 1, pHm1)) + tcc*(pK - ifelse(ni == 1, K > 0, pKm1))) + ifelse(ni == 0, 0, QlV / (sni*sV)) }) ### --- Gaussian -- family --- EpsiGaussian.init <- expression({ dc <- dnorm(tcc) pc <- pnorm(tcc) }) EpsiGaussian <- expression( 0 ) EpsiSGaussian <- expression( 2*pc-1 ) Epsi2Gaussian <- expression( 2*tcc^2*(1-pc)-2*tcc*dc+2*pc-1 ) phiGaussianEst.cl <- expression( { ## Classical estimation of the dispersion paramter phi = sigma^2 sum(((y - mu)/mu)^2)/(nobs - ncoef) }) phiGaussianEst <- expression( { sphi <- mad(residP, center=0)^2 }) ### --- Gamma -- family --- Gmn <- function(t, nu) { ## Gm corrresponds to G * nu^((nu-1)/2) / Gamma(nu) snu <- sqrt(nu) snut <- snu+t r <- numeric(length(snut)) ok <- snut > 0 r[ok] <- { nu <- nu[ok]; snu <- snu[ok]; snut <- snut[ok] exp((nu-1)/2*log(nu) - lgamma(nu) - snu*snut + nu*log(snut)) } r } EpsiGamma.init <- expression({ nu <- 1/phi ## form parameter nu snu <- 1/sqrt(phi) ## == sqrt (nu) pPtc <- pgamma(snu + c(-tcc,tcc), shape=nu, rate=snu) pMtc <- pPtc[1] pPtc <- pPtc[2] aux2 <- tcc*snu GLtcc <- Gmn(-tcc,nu) GUtcc <- Gmn( tcc,nu) }) EpsiGamma <- expression( tcc*(1-pPtc-pMtc) + GLtcc - GUtcc ) EpsiSGamma <- expression( ((GLtcc - GUtcc) + snu*(pPtc-pMtc))/mu ) Epsi2Gamma <- expression({ (tcc^2*(pMtc+1-pPtc) + (pPtc-pMtc) + (GLtcc*(1-aux2) - GUtcc*(1+aux2))/snu ) }) phiGammaEst.cl <- expression( { ## Classical moment estimation of the dispersion parameter phi sum(((y - mu)/mu)^2)/(nobs-ncoef) }) phiGammaEst <- expression( { ## robust estimation of the dispersion parameter by ## Huber's proposal 2 sphi <- uniroot(Huberprop2, interval=Rphi, ns.resid=residP, mu=mu, Vmu=Vmu, tcc=tcc)$root }) Huberprop2 <- function(phi, ns.resid, mu, Vmu, tcc) { eval(EpsiGamma.init) compEpsi2 <- eval(Epsi2Gamma) nobs <- length(mu) ## return h := sum(pmax.int(-tcc, pmin.int(ns.resid*snu, tcc))^2) - nobs*compEpsi2 } if(FALSE) ## no-eval version Huberprop2 <- function(phi, ns.resid, mu, Vmu, tcc) { nobs <- length(mu) nu <- 1/phi ## form parameter nu snu <- 1/sqrt(phi) ## sqrt (nu) pPtc <- pgamma(snu + c(-tcc,tcc), shape=nu, rate=snu) pMtc <- pPtc[1] pPtc <- pPtc[2] ts <- tcc*snu GLtcc <- Gmn(-tcc,nu) *(1-ts)/snu GUtcc <- Gmn( tcc,nu) *(1+ts)/snu ## compEpsi2 <- tcc^2 + (pPtc - pMtc)*(1-tcc^2) + GLtcc - GUtcc ## return h := sum(pmax.int(-tcc, pmin.int(ns.resid*snu, tcc))^2) - nobs*compEpsi2 } robustbase/R/qnsn.R0000644000176200001440000001274614123440375013743 0ustar liggesusers### Note: till 2010, a slightly wrong constant = 2.2219 was in use. ### Error reported by Peter Ruckdeschel, U.Bayreuth, 15.Juli 2010 ### correct constant == 1 / (sqrt(2) * qnorm(5/8)) == 2.219144 ### -- but wrong constant, 2.2219, is already in the the original Fortran qn.f Qn.corr <- 2.2219 / 2.21914 ##' Qn finite sample correction factor (not exported, but "available") ##' Version 1 Qn.finite.c <- function(n) (if (n %% 2) 1.6069 +(-2.333 - 3.1/n)/n # n odd else 3.6667 +( 2.591 - 5.5/n)/n # n even )/n + 1 ## Version built on res <- cbind(Res.sml, Res.mid) ## and the models there Qn.finite.c <- function(n) (if (n %% 2) 1.60188 +(-2.1284 - 5.172/n)/n # n odd else 3.67561 +( 1.9654 +(6.987 - 77/n)/n)/n # n even )/n + 1 Qn <- function(x, constant = NULL, finite.corr = is.null(constant) && missing(k), na.rm = FALSE, k = choose(n %/% 2 + 1, 2), warn.finite.corr = TRUE) { ## Purpose: Rousseeuw and Croux's Q_n() robust scale estimator ## Author: Martin Maechler, Date: 14 Mar 2002, 10:43 if (na.rm) x <- x[!is.na(x)] else if(anyNA(x)) return(NA) n <- length(x) if(n == 0) return(NA) else if(n == 1) return(0.) else if(!is.integer(n)) stop("not yet implemented for large vectors") nn2 <- choose(n, 2) # = n*(n-1)/2 (double!) dflt.k <- if(dflt.c <- is.null(constant)) missing(k) else FALSE stopifnot(is.numeric(k <- as.double(k)), k == trunc(k), 1 <= k, k <= nn2, ## but k *may* be vector is.integer(l_k <- length(k))) if(missing(finite.corr)) # smarter than "visual default" finite.corr <- dflt.c && (dflt.k <- dflt.k || (l_k == 1 && k == choose(n %/% 2 + 1, 2))) if(dflt.c) # define constant constant <- if(dflt.k) 2.21914 # == old default ("true value" rounded to 6 significant digits) else 1/(sqrt(2) * qnorm(((k-1/2)/nn2 + 1)/2)) ## cannot pass +/- Inf to .C() {"hack"}: if(any(nFin <- is.infinite(x))) x[nFin] <- sign(x[nFin]) * .Machine$double.xmax r <- constant * .C(Qn0, as.double(x), n, k, l_k, res = double(l_k))$res if (finite.corr) { if(!dflt.k && warn.finite.corr) warning("finite sample corrections are not corrected for non-default 'k'") ## FIXME: MM--- using the above qnorm((k-1/2)/nn2 + 1)/2) should need *less* finite correction !! ## ----- ~~ >>> simulation needed if (n <= 12) ## n in 2:12 --> n-1 in 1:11 ## n=2: E[Q_2] = E|X - Y| = sqrt(pi)/2, fc = sqrt(pi)/2/2.21914 r* c(.399356, # ~= fc = 0.3993560233 ## These are from MM's simulation("Res3"), Nsim = 2^27 ~= 134 mio: ## ~/R/MM/Pkg-ex/robustbase/Qn-simulation.R .99365, .51321, .84401, .61220, .85877, .66993, .87344, .72014, .88906, .75743)[n - 1L] else r / Qn.finite.c(n) } else r } ## This is the old version -- available for back "compatibility": Qn.old <- function(x, constant = 2.2219, finite.corr = missing(constant)) { ## Purpose: Rousseeuw and Croux's Q_n() robust scale estimator ## Author: Martin Maechler, Date: 14 Mar 2002, 10:43 n <- length(x) if(n == 0) return(NA) else if(n == 1) return(0.) else if(!is.integer(n)) stop("not yet implemented for large vectors") h <- n %/% 2L + 1L k <- h*(h-1)/2 r <- constant * .C(Qn0, as.double(x), n, k, 1L, res = double(1))$res if (finite.corr) (if (n <= 9) { # n in 2:9 --> n-1 in 1:8 c(.399,.994, .512,.844, .611,.857, .669,.872)[n - 1] } else { if (n %% 2) ## n odd n / (n + 1.4) else ## n even n / (n + 3.8) } ) * r else r } Sn <- function(x, constant = 1.1926, finite.corr = missing(constant), na.rm = FALSE) { ## Purpose: Rousseeuw and Croux's S_n() robust scale estimator ## Author: Martin Maechler, Date: 14 Mar 2002, 10:43 if (na.rm) x <- x[!is.na(x)] else if(anyNA(x)) return(NA) n <- length(x) if(n == 0) return(NA) else if(n == 1) return(0.) ## cannot pass +/- Inf to .C() {"hack"}: if(any(nFin <- is.infinite(x))) x[nFin] <- sign(x[nFin]) * .Machine$double.xmax r <- constant * .C(Sn0, as.double(x), n, as.integer(!is.unsorted(x)),# is.sorted res = double(1), a2 = double(n))$res ## NB: a2[] could be used for confidence intervals and other estimates! if (finite.corr) ( if (n <= 9) { c(0.743, # n = 2 1.851, 0.954,# 3 & 4 1.351, 0.993,# 5 & 6 1.198, 1.005,# 7 & 8 1.131)[n - 1] } else if (n %% 2) # n odd, >= 11 n / (n - 0.9) else # n even, >= 10 1 ) * r else r } wgt.himedian <- function(x, weights = rep(1,n)) { ## Purpose: weighted hiMedian of x ## Author: Martin Maechler, Date: 14 Mar 2002 n <- length(x <- as.double(x)) stopifnot(storage.mode(weights) %in% c("integer", "double")) if(n != length(weights)) stop("'weights' must have same length as 'x'") ## if(is.integer(weights)) message("using integer weights") .C(if(is.integer(weights)) wgt_himed_i else wgt_himed, x, n, weights, res = double(1))$res } ## To be used directly as 'scaleFun' in 'covOGK()' : s_Qn <- function(x, mu.too = FALSE, ...) c(if(mu.too) median(x), Qn(x, ...)) s_Sn <- function(x, mu.too = FALSE, ...) c(if(mu.too) median(x), Sn(x, ...)) robustbase/R/covPlot.R0000644000176200001440000002667113434014060014404 0ustar liggesusers#### This is from the R package #### #### rrcov : Scalable Robust Estimators with High Breakdown Point #### #### by Valentin Todorov ### This program is free software; you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation; either version 2 of the License, or ### (at your option) any later version. ### ### This program is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ### ### You should have received a copy of the GNU General Public License ### along with this program; if not, a copy is available at ## http://www.r-project.org/Licenses/ ## I would like to thank Peter Filzmoser for providing the initial code of ## this function. plot.mcd <- function(x, which=c("all", "dd","distance","qqchi2","tolEllipsePlot","screeplot"), classic= FALSE, ask = (which[1] == "all" && dev.interactive()), cutoff = NULL, id.n, labels.id = rownames(x$X), cex.id = 0.75, label.pos = c(4,2), tol = 1e-7, ...) { if (!inherits(x, "mcd")) stop("Use only with 'mcd' objects") covPlot(x$X, which= which, classic= classic, ask= ask, m.cov = x, cutoff= cutoff, id.n = id.n, labels.id, cex.id = cex.id, label.pos = label.pos, tol = tol, ...) } covPlot <- function(x, which = c("all", "dd", "distance", "qqchi2", "tolEllipsePlot", "screeplot"), classic = FALSE, ask = (which[1] == "all" && dev.interactive()), m.cov = covMcd(x), cutoff = NULL, id.n, labels.id = rownames(x), cex.id = 0.75, label.pos = c(4,2), tol = 1e-7, ...) { ##@bdescr ## Make plots based on the covariance structure of a data set: ## dd - distance-distance plot: Robust distances versus ## Mahalanobis distances ## distance - a plot of the robust distances ## qqchi2 - a qq-plot of the robust distances versus the ## quantiles of the chi-squared distribution ## tolEllipsePlot- a tolerance ellipse ## screeplot- a screeplot of the eigenvalues ov the covariance matrix ## ## Distance Plot: ## Draw a Distance-Distance Plot: Plots the robust distances ## versus the classical Mahalanobis distances as introduced by ## Rousseeuw, P. J., and van Zomeren, B. C. (1990). Unmasking ## Multivariate Outliers and Leverage Points. Journal of the American ## Statistical Association, 85, 633-639. ## ## The dashed line is the set of points where the robust distance is ## equal to the classical distance. ## The horizontal and vertical dotted lines are drawn at values equal cutoff ## which defaults to square root of the 97.5% quantile of a chi-squared ## distribution with p degrees of freedom. Points beyond these lines can ## be considered outliers. ## ##@edescr ## ##@in x : [matrix] A data.frame or matrix, n > 2*p ##@in which : [character] A plot option, one of: ## classic: index plot of the classical mahalanobis distances ## robust : index plot of the robust mahalanobis distances ## dd : distance-distance plot ## index : parallel index plot of classical and robust distances ## all : all three plots --- this is the default ## ##@in classic : [logical] If true the classical plot will be displayed too ## default is classic = FALSE ##@in m.cov : [list] An object like class "mcd" - only its attributes ## center and cov will be used ##@in cutoff : [number] The cutoff value for the distances ##@in id.n : [number] number of observations to be identified with a label. ## Defaults to the number of observations with ## distance larger than cutoff -- missing is propagated ##@in tol : [number] tolerance to be used for computing the inverse ## - see 'solve'. defaults to 1e-7 ## NOTE: The default tolerance 1e-7, will not work for some example ## data sets, like milk or aircraft myscreeplot <- function(x, m.cov = covMcd(x)) { erob <- eigen(m.cov$cov,symmetric = TRUE, only.values = TRUE)$values eclass <- eigen(var(x), symmetric = TRUE, only.values = TRUE)$values leg.txt <- c("Robust", "Classical") leg.col <- c("green", "red") leg.pch <- c(1,24) leg.lty <- c("solid", "dotted") eall <- c(erob,eclass) ylim <- c( min(eall), max(eall)) plot(erob, ylim=ylim, ylab="Eigenvalues", xlab="Index", type="n") legend("topright", leg.txt, pch = leg.pch, lty = leg.lty, col = leg.col) lines(erob, type="o", pch= leg.pch[1], lty= leg.lty[1], col=leg.col[1]) lines(eclass, type="o", pch= leg.pch[2], lty= leg.lty[2], col=leg.col[2]) title(main = "Scree plot") } mydistplot <- function(x, cutoff, classic = FALSE, id.n) { ## Index Plot: ## Plot the vector x (robust or mahalanobis distances) against ## the observation indexes. Identify by a label the id.n ## observations with largest value of x. If id.n is not supplied, ## calculate it as the number of observations larger than cutoff. ## Use cutoff to draw a horisontal line. ## Use classic = FALSE/TRUE to choose the label of the vertical axes n <- length(x) if(missing(id.n)) # maybe propagated id.n <- length(which(x > cutoff)) ylab <- paste("Square Root of", if(classic) "Mahalanobis" else "Robust", "distance") plot(x, type = "p", ylab = ylab, xlab = "Index", main = "Distance Plot") label(1:n, x, id.n) abline(h = cutoff) } myddplot <- function(md, rd, cutoff, id.n) { ## Distance-Distance Plot: ## Plot the vector y = rd (robust distances) against ## x = md (mahalanobis distances). Identify by a label the id.n ## observations with largest rd. If id.n is not supplied, calculate ## it as the number of observations larger than cutoff. Use cutoff ## to draw a horisontal and a vertical line. Draw also a dotted line ## with a slope 1. n <- length(md) if(missing(id.n)) # maybe propagated id.n <- length(which(rd > cutoff)) xlab <- "Mahalanobis distance" ylab <- "Robust distance" plot(md, rd, type = "p", xlab = xlab, ylab = ylab, main = "Distance-Distance Plot") label(md, rd, id.n) abline(0, 1, lty = 2) abline(v = cutoff, h = cutoff) } qqplot <- function(x, p, cutoff = sqrt(qchisq(0.975, p)), classic = FALSE, id.n) { ## Chisquare QQ-Plot: ## Plot the vector x (robust or mahalanobis distances) against ## the square root of the quantiles of the chi-squared distribution ## with p degrees of freedom. ## Identify by a label the id.n observations with largest value of x. ## If id.n is not supplied, calculate it as the number of observations ## larger than cutoff. ## Use classic = FALSE/TRUE to choose the label of the vertical axes ## parameters and preconditions n <- length(x) if(missing(id.n)) # maybe propagated id.n <- length(which(x > cutoff)) qq <- sqrt(qchisq(((1:n)-1/3)/(n+1/3), p)) x <- sort(x, index.return = TRUE) ix <- x$ix x <- x$x ylab <- paste(if(classic) "Mahalanobis" else "Robust", "distance") xlab <- "Square root of the quantiles of the chi-squared distribution" plot(qq, x, xlab = xlab, ylab = ylab, main = "Chisquare QQ-Plot") label(qq, x, id.n, ind = (n-id.n+1):n, labs = ix) abline(0, 1, lty = 2) } ## end{qqplot} label <- function(x, y, id.n, ind = sort.list(y, decreasing = TRUE)[1:id.n], labs = labels.id, adj.x = TRUE) { if(id.n > 0) { ## label the largest 'id.n' y-values labpos <- if(adj.x) label.pos[1+ as.numeric(x > mean(range(x)))] else 3 text(x[ind], y[ind], labs[ind], cex = cex.id, xpd = TRUE, pos = labpos, offset = 0.25) } } ## Begin{covPlot} -- arguments checking of preconditions if(is.data.frame(x)) x <- data.matrix(x) if(!is.matrix(x) || !is.numeric(x)) stop("x is not a numeric dataframe or matrix.") n <- dim(x)[1] p <- dim(x)[2] if(!is.numeric(m.cov$center) || !is.numeric(m.cov$cov)) stop("argument 'm.cov' must have numeric components 'center' and 'cov'") if(length(m.cov$center) != p) stop("Data set and provided center have different dimensions!") ## ?covPlot says it only needs 'cov' and 'center' ## Maybe should be smarter and *test* for non-singularity if(is.numeric(m.cov$crit) && m.cov$crit == 0) stop( "The covariance matrix is singular!") if(is.null(cutoff)) cutoff <- sqrt(qchisq(0.975, p)) ## now "more in line" with plot.lm()'s labeling: if(is.null(labels.id)) labels.id <- as.character(1:n) if(!missing(id.n) && !is.null(id.n)) { id.n <- as.integer(id.n) if(id.n < 0 || id.n > n) stop(sQuote("id.n")," must be in {1,..,",n,"}") } which <- match.arg(which) md <- sqrt(mahalanobis(x, colMeans(x), var(x), tol = tol)) rd <- sqrt(mahalanobis(x, m.cov$center, m.cov$cov, tol = tol)) ## *Never* here : par(mfrow = c(1,1), pty = "m") op <- if (ask) par(ask = TRUE) else list() on.exit(par(op)) if(which == "all" || which == "distance") { if(classic) { opr <- if(prod(par("mfrow")) == 1) par(mfrow = c(1,2), pty = "m") else list() } ## index plot of mahalanobis distances: mydistplot(rd, cutoff, id.n = id.n) if(classic) { ## index plot of robust distances: mydistplot(md, cutoff, classic = TRUE, id.n = id.n) par(opr) } } if(which == "all" || which == "dd") { myddplot(md, rd, cutoff = cutoff, id.n = id.n) # distance-distance plot } if(which == "all" || which == "qqchi2") { if(classic) { opr <- if(prod(par("mfrow")) == 1) par(mfrow = c(1,2), pty = "m") else list() } ## qq-plot of the robust distances versus the ## quantiles of the chi-squared distribution qqplot(rd, p, cutoff = cutoff, id.n = id.n) if(classic) { ## qq-plot of the mahalanobis distances qqplot(md, p, cutoff = cutoff, classic = TRUE, id.n = id.n) par(opr) } } if(which == "all" || which == "tolEllipsePlot") { if(p == 2) tolEllipsePlot(x, m.cov = m.cov, cutoff = cutoff, id.n = id.n, classic = classic, tol = tol) else if(which != "all") warning("For tolerance ellipses the dimension 'p' must be 2!") } if(which == "all" || which == "screeplot") { myscreeplot(x, m.cov = m.cov) } } ## end { covPlot } ## ddplot <- function(x,...) { ## covPlot(x, which="dd", ...) ## } ## distplot <- function(x,...) { ## covPlot(x, which="distance", ...) ## } ## chi2qqplot <- function(x,...) { ## covPlot(x, which="qqchi2", ...) ## } ## ellipse() exists in other packages ## ellipse <- function(x,...) { ## covPlot(x, which="tolEllipsePlot", ...) ## } robustbase/R/rrcov.control.R0000644000176200001440000000430112440116711015554 0ustar liggesusers## rrcov : Scalable Robust Estimators with High Breakdown Point ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, a copy is available at ## http://www.r-project.org/Licenses/ ## "FIXME": If you change this, you must "sync" with ../man/rrcov.control.Rd ## 1) covMcd()'s default in ./covMcd.R ## 2) ltsReg.default()'s default in ./ltsReg.R ## 3) covComed()s default in ./comedian.R rrcov.control <- function(alpha = 1/2, method = c("covMcd", "covComed", "ltsReg"), nsamp = 500, nmini = 300, kmini = 5, seed = NULL, tolSolve = 1e-14, scalefn = "hrv2012", maxcsteps = 200, trace = FALSE, wgtFUN = "01.original", beta, use.correction = identical(wgtFUN, "01.original"), adjust = FALSE) { method <- match.arg(method) if(missing(beta) || !is.numeric(beta)) beta <- c("covMcd" = 0.975, "ltsReg" = 0.9875, "covComed" = 0.95)[[method]] list(alpha=alpha, nsamp=nsamp, nmini=as.integer(nmini), kmini=as.integer(kmini), seed = as.integer(seed), tolSolve=tolSolve, scalefn=scalefn, maxcsteps=as.integer(maxcsteps), trace=trace, wgtFUN=wgtFUN, beta=beta, use.correction=use.correction, adjust=adjust) } ## allow direct fast access: .scalefn.default <- eval(formals(rrcov.control)$scalefn) ## Only for back compatibility, as some new args did not exist pre 2013-04, ## and callers of ltsReg() / covMcd() may use a "too small" 'control' list: getDefCtrl <- function(nm, defCtrl = rrcov.control()) { callerEnv <- parent.frame() if(is.null(get(nm, envir = callerEnv))) assign(nm, defCtrl[[nm]], envir=callerEnv) } robustbase/R/adjbox.R0000644000176200001440000000735513762451710014236 0ustar liggesusers#### Skewness (MC) - Adjusted Boxplots ### modeled closely after boxplot() etc in R/src/library/graphics/R/boxplot.R : adjbox <- function(x, ...) UseMethod("adjbox") adjbox.default <- function (x, ..., range = 1.5, doReflect=FALSE, width = NULL, varwidth = FALSE, notch = FALSE, outline = TRUE, names, plot = TRUE, border = par("fg"), col = NULL, log = "", pars = list(boxwex = 0.8, staplewex = 0.5, outwex = 0.5), horizontal = FALSE, add = FALSE, at = NULL) { args <- list(x, ...) namedargs <- if(!is.null(attributes(args)$names)) attributes(args)$names != "" else logical(length(args))# all FALSE ## pars <- c(args[namedargs], pars) groups <- if(is.list(x)) x else args[!namedargs] if(0 == (n <- length(groups))) stop("invalid first argument") if(length(class(groups))) groups <- unclass(groups) if(missing(names)) names <- 1:n names(groups) <- names cls <- sapply(groups, function(x) class(x)[1]) cl <- if(all(cls == cls[1])) cls[1] # else NULL for (i in 1:n) groups[i] <- list(adjboxStats(unclass(groups[[i]]), coef=range, doReflect=doReflect)) # do.conf=notch) stats <- matrix(0, nrow=5, ncol=n) conf <- fence <- matrix(0, nrow=2, ncol=n) ng <- out <- group <- numeric(0) ct <- 1 for(i in groups) { stats[,ct] <- i$stats conf [,ct] <- i$conf fence[,ct] <- i$fence ng <- c(ng, i$n) if((lo <- length(i$out))) { out <- c(out,i$out) group <- c(group, rep.int(ct, lo)) } ct <- ct+1 } if(length(cl) && cl != "numeric") oldClass(stats) <- cl z <- list(stats = stats, n = ng, conf = conf, fence = fence, out = out, group = group, names = names) if(plot) { if(is.null(pars$boxfill) && is.null(args$boxfill)) pars$boxfill <- col do.call("bxp", c(list(z, notch = notch, width = width, varwidth = varwidth, log = log, border = border, pars = pars, outline = outline, horizontal = horizontal, add = add, at = at), args[namedargs])) invisible(z) } else z } adjbox.formula <- function (formula, data = NULL, ..., subset, na.action = NULL) { if(missing(formula) || (length(formula) != 3)) stop("'formula' missing or incorrect") m <- match.call(expand.dots = FALSE) if(is.matrix(eval(m$data, parent.frame()))) m$data <- as.data.frame(data) m$... <- NULL m$na.action <- na.action # force use of default for this method ## require(stats, quietly = TRUE): model.frame m[[1]] <- as.name("model.frame") mf <- eval(m, parent.frame()) response <- attr(attr(mf, "terms"), "response") adjbox(split(mf[[response]], mf[-response]), ...) } ## modeled after boxplot.stats() from R/src/library/grDevices/R/calc.R : adjboxStats <- function(x, coef = 1.5, a = -4, b = 3, do.conf = TRUE, do.out = TRUE, ...) { if(coef < 0) stop("'coef' must not be negative") nna <- !is.na(x) n <- sum(nna)# including +/- Inf stats <- fivenum(x, na.rm = TRUE) iqr <- diff(stats[c(2, 4)]) fence <- rep(NA_real_, 2) if(coef == 0) do.out <- FALSE # no whiskers to be drawn else { ## coef > 0 out <- if (!is.na(iqr)) { medc <- mc(x, ..., na.rm = TRUE) fence <- if (medc >= 0) c(stats[2] - coef * exp(a * medc) * iqr, stats[4] + coef * exp(b * medc) * iqr) else c(stats[2] - coef * exp(-b * medc) * iqr, stats[4] + coef * exp(-a * medc) * iqr) x < fence[1] | fence[2] < x } else !is.finite(x) if (any(out[nna], na.rm = TRUE)) stats[c(1, 5)] <- range(x[!out], na.rm = TRUE) } conf <- if (do.conf) stats[3] + c(-1.58, 1.58) * iqr/sqrt(n) list(stats = stats, n = n, conf = conf, fence = fence, out = if (do.out) x[out & nna] else numeric(0)) } robustbase/R/nlregrob.R0000644000176200001440000004760313266661623014606 0ustar liggesusers#### nlrob.() functions for high breakdown point nlrob() methods ## concept (and original version) from lme4/R/lmer.R getOptfun <- function(optimizer, needArgs = c("fn","par","lower","control")) { if (((is.character(optimizer) && optimizer=="optimx") || deparse(substitute(optimizer))=="optimx") && !("package:optimx") %in% search()) stop(shQuote("optimx")," package must be loaded in order to ", "use ",shQuote('optimizer="optimx"')) optfun <- if (is.character(optimizer)) tryCatch(get(optimizer), error=function(e) NULL) else optimizer if (is.null(optfun)) stop("couldn't find optimizer function ",optimizer ) if (!is.function(optfun)) stop("non-function specified as optimizer") if (any(is.na(match(needArgs, names(formals(optfun)))))) stop("optimizer function must use (at least) formal parameters ", pasteK(sQuote(needArgs))) optfun } ##' Utility for all nlrob.(): Find how and where to get parameter ##' names from, also check lower, upper, and replicate if needed. ##' ##' @param lower possibly unnamed numeric vector ##' @param upper as \code{lower}; both will be replicated to ##' \code{length(pnames)} if that is specified and longer. ##' @param var.nms character vector of which 'pnames' must be a subset of. ##' @param envir if not missing and an \code{\link{environment}: possibly assign ##' 'lower', 'upper' of full length in the environment \code{envir}. .fixupArgs <- function(lower, upper, var.nms, envir) { if(is.null(pnames <- names(lower))) pnames <- names(upper) if(is.null(pnames)) stop("Provide 'upper' or 'lower' with names()") if(any(is.na(match(pnames, var.nms)))) stop("parameter names must appear in 'formula'") hasE <- !missing(envir) && is.environment(envir) npar <- length(pnames) if (npar > 1 && length(lower) == 1) { if(hasE) envir$lower <- rep.int(lower, npar) } else if (length(lower) != npar) stop(gettextf("lower must be either of length %d, or length 1", npar)) if (npar > 1 && length(upper) == 1) { if(hasE) envir$upper <- rep.int(upper, npar) } else if (length(upper) != npar) stop(gettextf("upper must be either of length %d, or length 1", npar)) stopifnot(is.numeric(lower), is.numeric(upper), lower <= upper) pnames } nlrob.MM <- function(formula, data, lower, upper, tol = 1e-6, psi = c("bisquare", "lqq", "optimal", "hampel"), init = c("S", "lts"), ctrl = nlrob.control("MM", psi=psi, init=init, fnscale=NULL, tuning.chi.scale = .psi.conv.cc(psi, .Mchi.tuning.defaults[[psi]]), tuning.psi.M = .psi.conv.cc(psi, .Mpsi.tuning.defaults[[psi]]), optim.control = list(), optArgs = list(...)), ...) { if(missing(ctrl)) { init <- match.arg(init) psi <- match.arg(psi) force(ctrl) # } else { init <- ctrl$ init psi <- ctrl$ psi } c1 <- ctrl$tuning.chi.scale c2 <- ctrl$tuning.psi.M if(is.character(ctrl$optimizer)) { ### TODO } else if(is.function(ctrl$optimizer)) { ### TODO } else stop(gettextf("'%s' must be character string or function, but is \"%s\"", "ctrl$optimizer", class(ctrl$optimizer)), domain=NA) ## Preliminary psi-specific checks / computations: switch(psi, "lqq" = { # lqqMax = rho(Inf), used in rho.inv() *and* 'constant': c12 <- c1[1]+c1[2] lqqMax <- (c1[1]*c1[3] - 2*c12)/(1-c1[3]) + c12}) rho1 <- function(t) Mchi(t, c1, psi) rho2 <- function(t) Mchi(t, c2, psi) rho.inv <- switch(psi, "bisquare" = function(y) { ## Find x := u^2 which solves cubic eq. 3*x - 3*x^2 + x^3 = y ## <==> (x-1)^3 + 1 = y <==> (1-x)^3 = 1-y <==> x = 1 - (1-y)^(1/3) ## (where we assume 0 <= y <= 1, i.e, y-1 < 0) c1 * sqrt(1 - (1 - y)^(1/3)) }, "lqq" = function(y) { uniroot( function(x) rho1(x) - y, lower = 0, upper = lqqMax )$root }, "optimal" = function(y) { ## Salibian-Barrera, Matias, Willems, Gert, and Zamar, Ruben (2008). ## The fast-tau estimator for regression. ## Journal of Computational and Graphical Statistics 17, 659-682. sqrt(y/1.38) * c1 * 3 }, "hampel" = function(y) { C <- MrhoInf(c1, psi) a <- c1[1]; b <- c1[2]; r <- c1[3] if (y <= a/C) sqrt(2*C*y) else if (y <= (2*b - a)/C) 0.5*a + C/a*y else r + sqrt( r^2 - ( (r - b)*(2*C/a*y + (b - a)) - b*r ) ) }, stop(gettextf("Psi function '%s' not supported yet", psi))) M_scale <- function(sigma, u) sum( rho1(u/sigma) )/nobs - 0.5 objective.initial <- switch(init, "lts" = function(par) { ## and (h, formula, data, pnames) y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) sum(sort.int( (y - y.hat)^2, partial = h )[1:h]) }, "S" = function(par) { ## and (constant, formula, data, pnames) y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) res <- y - y.hat ## Rousseeuw, Peter J., and Leroy, Annick M. (1987). ## Robust Regression & Outlier Detection. ## John Wiley & Sons, New York, p. 137. med_abs_res <- median(abs(res)) uniroot(M_scale, lower = constant[1L] * med_abs_res, upper = constant[2L] * med_abs_res, u = res )$ root ## == 'sigma' }, stop(gettextf("Initialization 'init = \"%s\"' not supported (yet)", init))) objective.M <- function(par, sigma) { y.hat <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) sum(rho2( (y - y.hat)/sigma )) } ## => psi(.) / wgt(.) for robustness weights is ## Mpsi(x, c2, psi) or Mwgt(x, c2, psi) formula <- as.formula(formula) dataName <- substitute(data) varNames <- all.vars(formula) obsNames <- rownames(data <- as.data.frame(data)) data <- as.list(data)# to be used as such if (length(formula) == 2L) { ## as nls formula[[3L]] <- formula[[2L]] formula[[2L]] <- 0 } npar <- length(pnames <- .fixupArgs(lower, upper, varNames, environment())) ## ^^^^^^^^^ -> possibly changes (lower, upper) in envir. y <- eval(formula[[2L]], data) nobs <- length(y) stopifnot(nobs >= npar) if (is.null(fnscale <- ctrl$ fnscale)) fnscale <- sum((y - mean(y))^2) ctrl$fnscale <- NULL # remove it there stopifnot(is.numeric(fnscale), fnscale > 0) ## is used in M_scale() in any case, and in init-estim. if "S" constant <- c( switch(psi, bisquare = 1/c1, lqq = 1/lqqMax, optimal = 1/c1 * 1/3, hampel = 1/c1[3]), if(nobs %% 2) 2/rho.inv(2/(nobs+2)) else 1/rho.inv(1/(nobs+1))) switch(init, lts = h <- (nobs + npar + 1)%/%2) ## FIXME: "optimizer": initial <- do.call(JDEoptim, c(list(lower, upper, objective.initial, tol=tol, fnscale=fnscale), ctrl$optArgs)) names(initial$par) <- pnames res <- y - eval( formula[[3L]], c(data, initial$par) ) med_abs_res <- median(abs(res)) sigma <- uniroot( M_scale, lower = constant[1L] * med_abs_res, upper = constant[2L] * med_abs_res, u = res )$root M <- optim(initial$par, objective.M, sigma = sigma, method = "L-BFGS-B", lower = lower, upper = upper, control = c(list(fnscale = initial$value, parscale = initial$par), ctrl$optim.control), hessian = TRUE) ## 'hessian': experimental - FIXME: eliminate if unused coef <- setNames(M$par, pnames) status <- if (M$convergence == 0) "converged" else if (M$convergence == 1) "maximum number of iterations reached without convergence" else M$message fit <- eval( formula[[3L]], c(data, coef) ) names(fit) <- obsNames structure(list(call = match.call(), formula=formula, nobs=nobs, coefficients = coef, fitted.values = fit, residuals = y - fit, crit = M$value, initial = initial, Scale = sigma, status = status, counts = M$counts, data = dataName, hessian = M$hessian, ctrl=ctrl), class = "nlrob") } ## nlrob.MM nlrob.tau <- function(formula, data, lower, upper, tol = 1e-6, psi = c("bisquare", "optimal"), ctrl = nlrob.control("tau", psi=psi, fnscale=NULL, tuning.chi.scale = NULL, tuning.chi.tau = NULL, optArgs = list(...)), ...) { if(missing(ctrl)) { psi <- match.arg(psi) force(ctrl) # } else { psi <- ctrl$ psi } if(is.null(.chi.s <- ctrl$tuning.chi.scale)) .chi.s <- switch(psi, bisquare = list(b = 0.20, cc = 1.55), optimal = list(b = 0.5, cc = 0.405)) if(is.null(.chi.t <- ctrl$tuning.chi.tau)) .chi.t <- switch(psi, bisquare = list(b = 0.46, cc = 6.04), optimal = list(b = 0.128, cc = 1.060)) b1 <- .chi.s$b c1 <- .chi.s$cc b2 <- .chi.t$b c2 <- .chi.t$cc ## Preliminary psi-specific checks / computations: switch(psi, "bisquare" = { b1 <- b1/MrhoInf(c1, psi) b2 <- b2/MrhoInf(c2, psi) }) rho1 <- function(t) Mchi(t, c1, psi) rho2 <- function(t) Mchi(t, c2, psi) rho.inv <- switch(psi, "bisquare" = function(y) { c1 * sqrt(1 - (1 - y)^(1/3)) }, "optimal" = function(y) { ## Salibian-Barrera, Matias, Willems, Gert, and Zamar, Ruben (2008). ## The fast-tau estimator for regression. ## Journal of Computational and Graphical Statistics 17, 659-682. sqrt(y/1.38) * c1 * 3 }) M_scale <- function(sigma, u) sum( rho1(u/sigma) )/nobs - b1 tau_scale2 <- function(u, sigma) sigma^2 * 1/b2*sum( rho2(u/sigma) )/nobs objective <- function(par) { fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) res <- y - fit ## Rousseeuw, Peter J., and Leroy, Annick M. (1987). ## Robust Regression & Outlier Detection. ## John Wiley & Sons, New York, p. 137. med_abs_res <- median(abs(res)) sigma <- uniroot( M_scale, lower = constant[1L] * med_abs_res, upper = constant[2L] * med_abs_res, u = res )$root tau_scale2(res, sigma) } formula <- as.formula(formula) dataName <- substitute(data) varNames <- all.vars(formula) obsNames <- rownames(data <- as.data.frame(data)) data <- as.list(data)# to be used as such if (length(formula) == 2L) { ## as nls formula[[3L]] <- formula[[2L]] formula[[2L]] <- 0 } npar <- length(pnames <- .fixupArgs(lower, upper, varNames, environment())) ## ^^^^^^^^^ -> possibly changes (lower, upper) in envir. y <- eval(formula[[2L]], data) nobs <- length(y) stopifnot(nobs >= npar) if (is.null(fnscale <- ctrl$ fnscale)) fnscale <- mean((y - mean(y))^2) ctrl$fnscale <- NULL # remove it there stopifnot(is.numeric(fnscale), fnscale > 0) constant <- c( switch(psi, bisquare = 1/c1, optimal = 1/c1 * 1/3), if (nobs %% 2) 2/rho.inv(2/(nobs+2)) else 1/rho.inv(1/(nobs+1))) optRes <- do.call(JDEoptim, c(list(lower, upper, objective, tol=tol, fnscale=fnscale), ctrl$optArgs)) iter <- optRes$iter status <- if (optRes$convergence == 0) "converged" else paste("failed to converge in", iter, "steps") coef <- setNames(optRes$par, pnames) fit <- eval( formula[[3L]], c(data, coef) ) names(fit) <- obsNames structure(list(call = match.call(), formula=formula, nobs=nobs, coefficients = coef, fitted.values = fit, residuals = y - fit, crit = optRes$value, Scale = sqrt(optRes$value), status = status, iter = iter, data = dataName, ctrl=ctrl), class = "nlrob") } ## nlrob.tau nlrob.CM <- function(formula, data, lower, upper, tol = 1e-6, psi = c("bisquare", "lqq", "welsh", "optimal", "hampel", "ggw"), ctrl = nlrob.control("CM", psi=psi, fnscale=NULL, tuning.chi = NULL, optArgs = list(...)), ...) { if(missing(ctrl)) { psi <- match.arg(psi) force(ctrl) # } else { psi <- ctrl$ psi } if (is.null(t.chi <- ctrl$tuning.chi)) t.chi <- switch(psi, bisquare = list(b = 0.5, cc = 1, c = 4.835), stop("unable to find constants for psi function")) ## FIXME: b <- t.chi$b ## b = epsilon (in paper) = fraction of outlier ~= breakdown cc <- t.chi$cc ## cc = k; make c <- t.chi$c ## c = the factor in objective c*rho(.) - log(sigma) rho <- function(t) Mchi(t, cc, psi) M_scale <- function(sigma, u) sum( rho(u/sigma) )/nobs - b formula <- as.formula(formula) dataName <- substitute(data) varNames <- all.vars(formula) obsNames <- rownames(data <- as.data.frame(data)) data <- as.list(data)# to be used as such if (length(formula) == 2L) { ## as nls formula[[3L]] <- formula[[2L]] formula[[2L]] <- 0 } npar <- length(pnames <- .fixupArgs(lower,upper, c(varNames,"sigma"),environment())) ## ^^^^^^^^^ -> possibly changes (lower, upper) in envir. if ("sigma" %in% pnames) { if ("sigma" %in% varNames || "sigma" %in% names(data)) stop("As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'") stopifnot(lower[pnames == "sigma"] >= 0) objective <- function(par) { par <- setNames(par, pnames) fit <- eval( formula[[3L]], c(data, par) ) sigma <- par[["sigma"]] c * sum(rho( (y - fit)/sigma ))/nobs + log(sigma) } con <- function(par) { par <- setNames(par, pnames) fit <- eval( formula[[3L]], c(data, par) ) M_scale(par[["sigma"]], y - fit) } } else { ## hmm, this case *really* is not CM properly objective <- function(par) { fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) resid <- y - fit sigma <- mad(resid) c * sum(rho( resid/sigma ))/nobs + log(sigma) } con <- NULL } y <- eval(formula[[2L]], data) nobs <- length(y) stopifnot(nobs >= npar) if (is.null(fnscale <- ctrl$ fnscale)) fnscale <- mean((y - mean(y))^2) ctrl$fnscale <- NULL # remove it there stopifnot(is.numeric(fnscale), fnscale > 0) optRes <- do.call(JDEoptim, c(list(lower, upper, objective, constr=con, tol=tol, fnscale=fnscale), ctrl$optArgs)) iter <- optRes$iter status <- if (optRes$convergence == 0) "converged" else paste("failed to converge in", iter, "steps") coef <- setNames(optRes$par, pnames) fit <- eval( formula[[3L]], c(data, coef) ) names(fit) <- obsNames structure(list(call = match.call(), formula=formula, nobs=nobs, coefficients = coef, fitted.values = fit, residuals = y - fit, crit = optRes$value, status = status, iter = iter, data = dataName, ctrl=ctrl), class = "nlrob") } ## nlrob.CM nlrob.mtl <- function(formula, data, lower, upper, tol = 1e-6, ctrl = nlrob.control("mtl", cutoff = 2.5, optArgs = list(...)), ...) { stopifnot(is.numeric(cutoff <- ctrl[["cutoff"]]), length(cutoff) >= 1) trim <- function(t) { # t = residuals Res, or Res / sigma t <- sort.int(t) i <- which(t >= cutoff) h <- if (length(i) > 0) max(hlow, floor(min( (i - 1)/(2*pnorm(t[i]) - 1) ))) else nobs list(h = h, t = t) } formula <- as.formula(formula) dataName <- substitute(data) varNames <- all.vars(formula) obsNames <- rownames(data <- as.data.frame(data)) data <- as.list(data)# to be used as such if (length(formula) == 2L) { ## as nls formula[[3L]] <- formula[[2L]] formula[[2L]] <- 0 } npar <- length(pnames <- .fixupArgs(lower,upper, c(varNames,"sigma"), environment())) ## ^^^^^^^^^ -> possibly changes (lower, upper) in envir. constant <- log(2*pi) if ("sigma" %in% pnames) { if ("sigma" %in% varNames || "sigma" %in% names(data)) stop("As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'") stopifnot(lower[pnames == "sigma"] >= 0) objective <- function(par) { par <- setNames(par, pnames) fit <- eval( formula[[3L]], c(data, par) ) sigma <- par[["sigma"]] tp <- trim( abs( (y - fit)/sigma ) ) h <- tp$h h*(constant + 2*log(sigma)) + sum(tp$t[1L:h]^2) } } else { ## hmm... this is not really MTL objective <- function(par) { fit <- eval( formula[[3L]], c(data, setNames(par, pnames)) ) resid <- y - fit sigma <- mad(resid) tp <- trim( abs(resid/sigma) ) h <- tp$h h*(constant + 2*log(sigma)) + sum(tp$t[1L:h]^2) } } y <- eval(formula[[2L]], data) nobs <- length(y) stopifnot(nobs >= npar) if (is.null(fnscale <- ctrl$ fnscale)) fnscale <- sum((y - mean(y))^2) ctrl$fnscale <- NULL # remove it there stopifnot(is.numeric(fnscale), fnscale > 0) hlow <- (nobs + npar + 1)%/%2 optRes <- do.call(JDEoptim, c(list(lower, upper, objective, tol=tol, fnscale=fnscale), ctrl$optArgs)) coef <- setNames(optRes$par, pnames) crit <- optRes$value iter <- optRes$iter status <- if (optRes$convergence == 0) "converged" else paste("failed to converge in", iter, "steps") fit <- eval( formula[[3L]], c(data, coef) ) names(fit) <- obsNames resid <- y - fit quan <- trim( resid/(if ("sigma" %in% pnames) coef["sigma"] else mad(resid)) )$h structure(list(call = match.call(), formula=formula, nobs=nobs, coefficients = coef, fitted.values = fit, residuals = resid, crit = crit, quan = quan, status = status, iter = iter, data = dataName, ctrl = ctrl), class = "nlrob") } ## nlrob.mtl nlrob.control <- function(method, psi = c("bisquare", "lqq", "welsh", "optimal", "hampel", "ggw"), init = c("S", "lts"), optimizer = "JDEoptim", optArgs = list(), ...) { psi <- match.arg(psi) init <- match.arg(init) dots <- list(...) argNms <- names(dots) ##' argument or default -> return list of length 1 a. <- function(nm,def) { L <- list( if(nm %in% argNms) dots[[nm]] else def ) names(L) <- nm L } switch(method, "M" = { list(method = method) # not yet used }, "MM" = { c(list(method = method, init = init, psi = psi), a.("fnscale", NULL), a.("tuning.chi.scale", .psi.conv.cc(psi, .Mchi.tuning.defaults[[psi]])), a.("tuning.psi.M", .psi.conv.cc(psi, .Mpsi.tuning.defaults[[psi]])), a.("optim.control", list()), list(optimizer = optimizer, optArgs = optArgs)) }, "tau" = { c(list(method = method, psi = psi), a.("fnscale", NULL), a.("tuning.chi.scale", NULL), a.("tuning.chi.tau", NULL), list(optimizer = optimizer, optArgs = optArgs)) }, "CM" = { c(list(method = method, psi = psi), a.("fnscale", NULL), a.("tuning.chi", NULL), list(optimizer = optimizer, optArgs = optArgs)) }, "mtl" = { c(list(method = method), a.("fnscale", NULL), a.("cutoff", 2.5), list(optimizer = optimizer, optArgs = optArgs)) }, stop("Method ", method, "not correctly supported yet")) } robustbase/R/mc.R0000644000176200001440000000634313762451710013362 0ustar liggesusers## Left Medcouple lmc <- function(x, na.rm = FALSE, ...) { -mc(x[x <= median(x, na.rm = na.rm)], na.rm = na.rm, ...) } ## Right Medcouple rmc <- function(x, na.rm = FALSE, ...) { mc(x[x >= median(x, na.rm = na.rm)], na.rm = na.rm, ...) } ## ## Generic function ## mc <- function (x, ...) ## UseMethod("mc") ## ## Default method (for numeric vectors): ## mc.default <- function(x, na.rm = FALSE, mc <- function(x, na.rm = FALSE, doReflect = (length(x) <= 100) , doScale = TRUE # <- chg default to 'FALSE' ? , eps1 = 1e-14, eps2 = 1e-15 # << new in 0.93-2 (2018-07..) , maxit = 100, trace.lev = 0 , full.result = FALSE ) { x <- as.numeric(x) ina <- is.na(x) if (na.rm) x <- x[!ina] else if (any(ina)) return(NA_real_) ## ==> x is NA-free from here on ## if(length(l.. <- list(...))) ## stop("In mc(): invalid argument(s) : ", ## paste(sQuote(names(l..)), collapse=","), call. = FALSE) rr <- mcComp(x, doReflect, doScale=doScale, eps1=eps1, eps2=eps2, maxit=maxit, trace.lev=trace.lev) if(!(conv1 <- rr[["converged"]]) | (doReflect && !(conv2 <- rr[["converged2"]]))) { stop("mc(): not 'converged' ", if(!conv1) paste("in", rr[["iter"]], "iterations"), if(doReflect && !conv2) paste(if(!conv1)" *and*", "'reflect part' in", rr[["iter2"]], "iterations"), "; try enlarging eps1, eps2 !?\n") } m <- if (doReflect) (rr[["medc"]] - rr[["medc2"]]) / 2 else rr[["medc"]] structure(m, mcComp = if(full.result) rr) } ## eps1 = 1e-13, eps2 = eps1 <==> original code which only had 'eps = 1e-13' ## hardcoded in C code. ## These defaults do *not* make sense here, but in mc(). ## However, currently they are used in ../tests/mc-etc.R mcComp <- function(x, doReflect, doScale, eps1, eps2, maxit = 1000, trace.lev = 1) { stopifnot(is.logical(doReflect), length(doReflect) == 1L, !is.na(doReflect), is.logical(doScale), length(doScale) == 1L, !is.na(doScale), is.1num(eps1), eps1 >= 0, is.1num(eps2), eps2 >= 0, length(maxit <- as.integer(maxit)) == 1, length(trace.lev <- as.integer(trace.lev)) == 1 ) ## Assumption [from caller, = mc()]: 'x' has no NAs (but can have +-Inf) x <- as.numeric(x) n <- as.integer(length(x)) eps <- as.double(c(eps1, eps2)) c.iter <- c(maxit, trace.lev) ## NAOK=TRUE: to allow +/- Inf to be passed ans <- .C(mc_C, x, n, eps = eps, iter = c.iter, medc = double(1) , doScale = doScale , NAOK=TRUE)[c("medc", "eps", "iter")] it <- ans[["iter"]] ans[["converged"]] <- it[2] == 1 ans[["iter"]] <- it[1] if (doReflect) { ## also compute on reflected data a2 <- .C(mc_C, -x, n, eps2 = eps, iter2 = c.iter, medc2 = double(1) , doScale = doScale , NAOK=TRUE)[c("medc2", "iter2", "doScale")] it <- a2[["iter2"]] a2[["converged2"]] <- it[2] == 1 a2[["iter2"]] <- it[1] c(ans, a2) } else ans } robustbase/R/lmrobPredict.R0000644000176200001440000001667412221620231015402 0ustar liggesusers# File .../lmrobPredict.R # Part of the R package 'robustbase', http://www.R-project.org # Based on predict.lm (cf. src/library/stats/R/lm.R) # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # http://www.r-project.org/Licenses/ # Note that '# *rob' indicate adjustment for the robust case predict.lmrob <- function(object, newdata, se.fit = FALSE, scale = NULL, df = NULL, # *rob interval = c("none", "confidence", "prediction"), level = .95, type = c("response", "terms"), terms = NULL, na.action = na.pass, pred.var = res.var/weights, weights = 1, ...) { tt <- terms(object) if(!inherits(object, "lmrob") && !inherits(object, "glmrob")) # *rob warning("calling predict.lm() ...") # *rob if(missing(newdata) || is.null(newdata)) { mm <- X <- model.matrix.lm(object) mmDone <- TRUE offset <- object$offset } else { Terms <- delete.response(tt) m <- model.frame(Terms, newdata, na.action = na.action, xlev = object$xlevels) if(!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m) X <- model.matrix(Terms, m, contrasts.arg = object$contrasts) offset <- rep.int(0, nrow(X)) if (!is.null(off.num <- attr(tt, "offset"))) for(i in off.num) offset <- offset + eval(attr(tt, "variables")[[i+1]], newdata) if (!is.null(object$call$offset)) offset <- offset + eval(object$call$offset, newdata) mmDone <- FALSE } n <- length(object$residuals) # NROW(qr(object)$qr) p <- object$rank if(is.null(p)) { # *rob df <- Inf p <- sum(!is.na(coef(object))) piv <- seq_len(p) } else { p1 <- seq_len(p) piv <- if(p) qr(object)$pivot[p1] } if(p < ncol(X) && !(missing(newdata) || is.null(newdata))) warning("prediction from a rank-deficient fit may be misleading") beta <- object$coefficients X.piv <- X[, piv, drop = FALSE] predictor <- drop(X.piv %*% beta[piv]) if (!is.null(offset)) predictor <- predictor + offset interval <- match.arg(interval) if (interval == "prediction") { if (missing(newdata)) { # *rob: this and next if statement are combined warning("Predictions on current data refer to _future_ responses") if (missing(weights)) { w <- weights(object) # *rob if (!is.null(w)) { weights <- w warning("Assuming prediction variance inversely proportional to weights used for fitting") } } } if (!missing(newdata) && missing(weights) && !is.null(object$weights) && missing(pred.var)) warning("Assuming constant prediction variance even though model fit is weighted") if (inherits(weights, "formula")){ if (length(weights) != 2L) stop("'weights' as formula should be one-sided") d <- if(missing(newdata) || is.null(newdata)) model.frame(object) else newdata weights <- eval(weights[[2L]], d, environment(weights)) } }## "prediction" interval type <- match.arg(type) if(se.fit || interval != "none") {# *rob: whole 'then' statement is different df <- object$df.residual res.var <- if (is.null(scale)) object$s^2 else scale^2 ip <- if(type != "terms") diag(X.piv %*% object$cov %*% t(X.piv)) else rep.int(0, n) } if (type == "terms") { ## type == "terms" ------------ if(!mmDone){ mm <- model.matrix.lm(object) # *rob: call of model.matrix.lm # instead of model.matrix mmDone <- TRUE } aa <- attr(mm, "assign") ll <- attr(tt, "term.labels") hasintercept <- attr(tt, "intercept") > 0L if (hasintercept) ll <- c("(Intercept)", ll) aaa <- factor(aa, labels = ll) asgn <- split(order(aa), aaa) if (hasintercept) { asgn$"(Intercept)" <- NULL if(!mmDone){ mm <- model.matrix.lm(object) # *rob: call of model.matrix.lm # instead of model.matrix mmDone <- TRUE } avx <- colMeans(mm) termsconst <- sum(avx[piv] * beta[piv]) } nterms <- length(asgn) if(nterms > 0) { predictor <- matrix(ncol = nterms, nrow = NROW(X)) dimnames(predictor) <- list(rownames(X), names(asgn)) if (se.fit || interval != "none") { ip <- predictor # *rob: just this assignment is needed } if(hasintercept) X <- sweep(X, 2L, avx, check.margin=FALSE) unpiv <- rep.int(0L, NCOL(X)) unpiv[piv] <- p1 for (i in seq.int(1L, nterms, length.out = nterms)) { iipiv <- asgn[[i]] # Columns of X, ith term ii <- unpiv[iipiv] # Corresponding rows of cov iipiv[ii == 0L] <- 0L predictor[, i] <- if(any(iipiv > 0L)) X[, iipiv, drop = FALSE] %*% beta[iipiv] else 0 if (se.fit || interval != "none"){ ip[, i] <- if(any(iipiv > 0L)){# *rob: next steps modified h.X <- X[, iipiv, drop = FALSE] diag(h.X %*% object$cov[ii, ii] %*% t(h.X)) } else 0 } } if (!is.null(terms)) { predictor <- predictor[, terms, drop = FALSE] if (se.fit) ip <- ip[, terms, drop = FALSE] } } else { # no terms predictor <- ip <- matrix(0, n, 0L) } attr(predictor, 'constant') <- if (hasintercept) termsconst else 0 } ### Now construct elements of the list that will be returned if(interval != "none") { tfrac <- qt((1 - level)/2, df) hwid <- tfrac * switch(interval, confidence = sqrt(ip), prediction = sqrt(ip+pred.var) ) if(type != "terms") { predictor <- cbind(predictor, predictor + hwid %o% c(1, -1)) colnames(predictor) <- c("fit", "lwr", "upr") } else { if (!is.null(terms)) hwid <- hwid[, terms, drop = FALSE] lwr <- predictor + hwid upr <- predictor - hwid } } if(se.fit || interval != "none") { se <- sqrt(ip) if (type == "terms" && !is.null(terms)) se <- se[, terms, drop = FALSE] } if(missing(newdata) && !is.null(na.act <- object$na.action)) { predictor <- napredict(na.act, predictor) if(se.fit) se <- napredict(na.act, se) } if(type == "terms" && interval != "none") { if(missing(newdata) && !is.null(na.act)) { lwr <- napredict(na.act, lwr) upr <- napredict(na.act, upr) } list(fit = predictor, se.fit = se, lwr = lwr, upr = upr, df = df, residual.scale = sqrt(res.var)) } else if (se.fit) list(fit = predictor, se.fit = se, df = df, residual.scale = sqrt(res.var)) else predictor } robustbase/R/huber.R0000644000176200001440000000570313175561506014072 0ustar liggesusers ## A modified "safe" (and more general) Huber estimator: huberM <- function(x, k = 1.5, weights = NULL, tol = 1e-06, mu = if(is.null(weights)) median(x) else wgt.himedian(x, weights), s = if(is.null(weights)) mad(x, center=mu) else wgt.himedian(abs(x - mu), weights), se = FALSE, warn0scale = getOption("verbose")) { ## Author: Martin Maechler, Date: 6 Jan 2003, ff ## implicit 'na.rm = TRUE': if(any(i <- is.na(x))) { x <- x[!i] if(!is.null(weights)) weights <- weights[!i] } n <- length(x) sum.w <- if(!is.null(weights)) { stopifnot(is.numeric(weights), weights >= 0, length(weights) == n) sum(weights) } else n it <- 0L NA. <- NA_real_ if(sum.w == 0) # e.g 'x' was all NA return(list(mu = NA., s = NA., it = it, se = NA.)) # instead of error if(se && !is.null(weights)) stop("Std.error computation not yet available for the case of 'weights'") if (s <= 0) { if(s < 0) stop("negative scale 's'") if(warn0scale && n > 1) warning("scale 's' is zero -- returning initial 'mu'") } else { wsum <- if(is.null(weights)) sum else function(u) sum(u * weights) repeat { it <- it + 1L y <- pmin(pmax(mu - k * s, x), mu + k * s) mu1 <- wsum(y) / sum.w if (abs(mu - mu1) < tol * s) break mu <- mu1 } } list(mu = mu, s = s, it = it, SE = if(se) s * sqrt(tauHuber(x, mu=mu, s=s, k=k) / n) else NA.) } ## this is a compatible improvement of MASS' huber() : ## 1) returning median() if mad()=0 ## 2) " NA when y has only NAs (or length 0) if(FALSE) huber <- function (y, k = 1.5, tol = 1e-06) { y <- y[!is.na(y)] n <- length(y) if(n == 0) # e.g 'y' was all na return(list(mu = NA, s = NA))# instead of error mu <- median(y) s <- mad(y) if (s == 0) { # FIXME? make this warning optional if(n > 1) warning("scale MAD is zero for this sample") } else repeat { yy <- pmin(pmax(mu - k * s, y), mu + k * s) mu1 <- sum(yy)/n if (abs(mu - mu1) < tol * s) break mu <- mu1 } list(mu = mu, s = s) } ## Originally from /u/ftp/NDK/Source-NDK-9/R/rg2-fkt.R : tauHuber <- function(x, mu, k=1.5, s = mad(x), resid = (x - mu)/s) { ## Purpose: Correction factor Tau for the variance of Huber-M-Estimators ## ------------------------------------------------------------------------- ## Arguments: x = data, mu = location, k = tuning parameter of Huber Psi-function ## ------------------------------------------------------------------------- ## Author: Rene Locher Update: R. Frisullo 23.4.02; M.Maechler (as.log(); s, resid) inr <- abs(resid) <= k ## psi <- ifelse(inr, resid, sign(resid)*k) # psi (x) -- more efficiently: psi <- resid; out <- which(!inr); if(length(out)) psi[out] <- sign(resid[out]) * k psiP <- as.logical(inr)# = ifelse(abs(resid) <= k, 1, 0) # psi'(x) length(x) * sum(psi^2) / sum(psiP)^2 } robustbase/R/MTestimador2.R0000644000176200001440000003657212553432042015276 0ustar liggesusers##-*- mode: R; kept-new-versions: 50; kept-old-versions: 50 -*- #### MT Estimators: [M]-Estimators based on [T]ransformations #### ------------- Valdora & Yohai (2013) ##' Defining the spline to compute the center of the rho function ##' @title Provide mu(lambda) as spline function ##' @param cw tuning parameter for rho ##' @return a function, resulting from \code{\link{splinefun}} ##' @author Victor Yohai; many changes: Martin Maechler mk.m_rho <- function(cw, opt.method = c("L-BFGS-B", "Brent", "Nelder-Mead", "BFGS", "CG", "SANN"), ##optim(): method = c("Nelder-Mead", "BFGS", "CG", "L-BFGS-B", "SANN", "Brent"), ## MM: 'Brent' seems best overall lambda = c(seq(0,2.9, by=0.1), seq(3,100)), reltol = sqrt(.Machine$double.eps), trace = 0, sFile = paste0("MTesSpl_", format(cw), ".rda"), recompute = getOption("robustbase:m_rho_recompute", FALSE)) { ## FIXME: Solution without files, but rather cache inside an environment ## ------ For the default cw, cache even in robustbase namespace! ## Instead of saving splinefun() ... just save (lambda, mm.la), it is much smaller if(recompute) { useFile <- FALSE } else { useFile <- file.exists(sFile) if (useFile) { ## load the spline load(sFile)#-> 'm.approx' ## check if its cw was very close to this one: if(cw.ok <- is.numeric(cw0 <- environment(m.approx)$cw)) cw.ok <- (abs(cw - cw0) < 0.001) } } if(!useFile || !cw.ok) { nl <- length(lambda) mm.la <- numeric(nl) s.la <- sqrt(lambda) ## MM: Speedwise, Brent > L-BFGS-B > BFGS > .. for cw >= ~ 1.5 ## L-BFGS-B > Brent for cw ~= 1 opt.method <- match.arg(opt.method) oCtrl <- list(reltol=reltol, trace=trace) if(opt.method %in% c("Brent", "L-BFGS-B")) { ## use bounds if(opt.method == "L-BFGS-B")# yuck! why is this necessary!! oCtrl <- list(factr = 1/(10*reltol), trace=trace) for(i in seq_len(nl)) mm.la[i] <- optim(s.la[i], espRho, lam = lambda[i], cw = cw, method = opt.method, control = oCtrl, lower = 0, upper = .01 + 2*s.la[i])$par } else { for(i in seq_len(nl)) mm.la[i] <- optim(s.la[i], espRho, lam = lambda[i], cw = cw, method = opt.method, control = oCtrl)$par } m.approx <- splinefun(lambda, mm.la, method = "monoH.FC") e <- environment(m.approx) assign("lambda.max", max(lambda), envir=e) assign("cw", cw, envir=e) save(m.approx, file = sFile) } m.approx } ## result 'm.approx' will be used in mm(.), and "everywhere" below ####################################################### ##' Tukey's Bisquare (aka "biweight") rho function: rho~() = rho scaled to have rho(Inf) = 1 rho <- function(x,cw) pmin(1, 1 - (1-(x/cw)^2)^3) ## faster: rho <- function(x,cw) Mchi(x, cc=cw, psi="tukey") ## NB: in sumaConPesos(), mm(.), ... we make use of the fact that rho(Inf) = 1 psi <- function(x,cw, deriv=0) Mpsi(x, cc=cw, psi="tukey", deriv=deriv) espRho <- function(lam, xx, cw) { ## compute E := E_lambda [ rho_{cw}( sqrt(Y)-xx ) ], given (lambda, xx, cw) ## for Y ~ Pois(lambda) ; rho(.) = Tukey's Bisquare ## ==> E = \sum_{k=0}^\infty rho( sqrt(k)-xx, .) * dpois(k, .) k <- seq(as.integer((max(0,xx-cw))^2), as.integer((xx+cw)^2)+1L) inner <- (rhoS.k <- rho(sqrt(k)-xx, cw)) < 1 ii <- k[inner] terminos <- rhoS.k[inner] * dpois(ii,lam) if((len.ii <- length(ii)) > 0) { primero <- ii[1] ultimo <- ii[len.ii] ppois(primero-1,lam) + sum(terminos) + ppois(ultimo,lam, lower.tail=FALSE) } else 1 } ################################################# ##' @title Compute m(lambda) := the value of x minimizing espRho(lambda, x, cw) ##' @param lam numeric vector of non-negative values \lambda ##' @param m.approx the spline function to be used for "small" lambda, from mk.m_rho() ##' @return mm <- function(lam, m.approx) { la.max <- environment(m.approx)$lambda.max z <- ((m <- lam) <= la.max) m[z] <- m.approx(lam[z]) if(any(i <- !z)) m[i] <- sqrt(lam[i]) m } ############################################################################### ##' @title Compute the loss function for MT-glmrob() ##' @param beta beta (p - vector) ##' @param x design matrix (n x p) ##' @param y (Poisson) response (n - vector) ##' @param cw tuning parameter 'c' for rho_c(.) ##' @param w weight vector (length n) ##' @param m.approx the spline for the inner part of m_c(.) ##' @return \sum_{i=1}^n w_i \rho(\sqrt(y_i) - m( g(x_i ` \beta) ) ) ##' where g(.) = exp(.) for the Poisson family sumaConPesos <- function(beta,x,y,w, cw, m.approx) { eta <- x %*% beta s <- rho(sqrt(y) - mm(exp(eta), m.approx), cw) sum(s*w) } ############################################################################### beta0IniCP <- function(x,y,cw,w, m.approx, nsubm, trace.lev = 1) { ## computes the initial estimate using subsampling with concentration step stopifnot(is.matrix(x), (nsubm <- as.integer(nsubm)) >= 1) p <- ncol(x) n <- nrow(x) s2.best <- Inf; b.best <- rep(NA_real_, p) kk <- 0 for(l in 1:nsubm) { if(trace.lev) { if(trace.lev > 1) cat(sprintf("%3d:",l)) else cat(".", if(l %% 50 == 0) paste0(" ",l,"\n")) } i.sub <- sample(n, p) estim0 <- as.vector( betaExacto(x[i.sub,], y[i.sub]) ) if(any(is.na(estim0))) ## do not use it next eta <- as.vector(x %*% estim0) ## adev := abs( 1/2 * dev.residuals(.) ) ## y+(y==0) : log(0) |-> -Inf fine; but if eta == -Inf, we'd get NaN adev <- abs(y*(log(y+(y == 0)) - eta) - (y-exp(eta))) ## poisson()'s dev.resids(): 2 * wt * (y * log(ifelse(y == 0, 1, y/mu)) - (y - mu)) ## == 2*wt* (y * ifelse(y == 0, 0, log(y) - log(mu)) - (y - mu)) ## == 2*wt* ifelse(y == 0, mu, y*(log(y) - log(mu)) - (y - mu)) ## where mu <- exp(eta) if(trace.lev > 1) cat(sprintf(" D=%11.7g ", sum(adev))) half <- ceiling(n/2) srt.d <- sort(adev, partial=half) podador <- adev <= srt.d[half] # those smaller-equal than lo-median(.) xPod <- x[podador,] yPod <- y[podador] length(xPod) + length(yPod) # codetools fitE <- tryCatch(glm(yPod ~ xPod-1, family = poisson()), error = function(e)e) if(inherits(fitE, "error")) { message("glm(.) {inner subsample} error: ", fitE$message) if(trace.lev > 1) cat("\n") ## s2[l] <- Inf } else { ## glm() call succeeded betapod <- as.vector( fitE$coefficients ) if(any(is.na(betapod))) ## do not use it next kk <- kk+1 s2 <- sumaConPesos(betapod, x=x, y=y, w=w, cw=cw, m.approx=m.approx) ## estim.ini[l,] <- betapod if(trace.lev > 1) cat(sprintf("s2=%14.9g", s2)) if(s2 < s2.best) { if(trace.lev > 1) cat(" New best!\n") b.best <- betapod s2.best <- s2 } else if(trace.lev > 1) cat("\n") } } ## s0 <- order(s2) ## beta0ini <- estim.ini[s0[1],] list(beta = b.best, nOksamples = kk, s2 = s2.best) }## beta0IniCP() ##################################################################### betaExacto <- function(x,y) { ## to each subsample assign the maximum likelihood estimator and ## fixing the case mle has NA components p <- ncol(x) fitE <- tryCatch(glm.fit(x=x, y=y, family = poisson()), ## TODO , weights = weights, offset = offset error = function(e)e) if(inherits(fitE, "error")) { message("betaExacto glm(.) error: ", fitE$message) return(rep(NA_real_, p)) } ## else -- glm() succeeded ## if_needed_-- MM finds it unneeded ## beta. <- fitE $ coefficients ## sinNas <- na.exclude(beta.) ## long <- length(sinNas) ## lugaresNas <- na.action(sinNas)[1:(p-long)] ## beta.SinNas <- beta. ## beta.SinNas[lugaresNas] <- 0 ## beta.SinNas fitE $ coefficients } ###--- Utilities for Asymptotic Covariance Matrix ----------------- ##' computes the First Derivative of mm() mmd <- function(lam,cw, m.approx) { qq1 <- qpois(.001,lam) qq2 <- qpois(.999,lam) ind <- qq1:qq2 k.. <- sqrt(ind) - mm(lam, m.approx) dP <- dpois(ind,lam) rr1 <- (-dP+(ind*dP/lam)) * psi(k..,cw) rr2 <- dP*psi(k..,cw, deriv=1) rr1 <- sum(rr1) rr2 <- sum(rr2) list(ind=ind, rr1=rr1, rr2=rr2, d = rr1/rr2) } ##' computes the Second Derivative of mm() mmdd <- function(lam,cw, m.approx) { out <- mmd(lam,cw, m.approx) ## FIXME: can reuse even more from mmd() ! ind <- out[["ind"]] NUM <- out[[2]] DEN <- out[[3]] mm1 <- out[["d"]] ## = mm'(.) k.. <- sqrt(ind) - mm(lam, m.approx) dP <- dpois(ind,lam) NUMP <- ddpois(ind,lam) * psi(k..,cw) - (-dP+(ind*dP/lam))* psi(k..,cw, deriv=1) * mm1 DENP <- (-dP+(ind*dP/lam)) * psi(k..,cw, deriv=1) - dP*psi(k..,cw, deriv=2) * mm1 NUMP <- sum(NUMP) DENP <- sum(DENP) (NUMP*DEN - DENP*NUM) / DEN^2 } ############################################################### ddpois <- function(x,lam) { ## The second derivative of the Poisson probability function dpois(x,lam)*(1-(2*x/lam)+((x^2)/(lam^2))-(x/(lam^2))) } ##' Compute asymptotic covariance matrix of the MT estimator covasin <- function(x,y,beta,cw, m.approx,w) { p <- ncol(x) n <- length(y) mm1 <- mm2 <- numeric(n) de <- nu <- matrix(0,p,p) lam <- x%*%beta elam <- exp(lam) r <- sqrt(y) - mm(elam, m.approx) psi0 <- psi(r,cw) psi1 <- psi(r,cw, deriv=1) for ( i in 1:n) { ## FIXME: Make more efficient!! {mmd is used in mmdd()) mm1[i] <- mmd (elam[i], cw, m.approx)[[4]] mm2[i] <- mmdd(elam[i], cw, m.approx) } nu1 <- w*psi0*mm1*elam de1 <- -psi1*(mm1^2)*(elam^2)+psi0*mm2*(elam^2)+psi0*mm1*elam de1 <- w*de1 for (i in 1:n) { ## FIXME (?) -- can be vectorized zzt <- tcrossprod(x[i,]) nu <- nu+ (nu1[i]^2)*zzt de <- de+ de1[i]*zzt } nu <- nu/n de <- solve(de/n) ## Cov_{asympt.} = de %*% nu %*% t(de) / n } ## cw = 2.1, nsubm = 500, maxitOpt = 200, tolOpt = 1e-6, glmrobMT.control <- function(cw = 2.1, nsubm = 500, acc = 1e-06, maxit = 200) { if (!is.numeric(acc) || acc <= 0) stop("value of acc must be > 0") ## if (test.acc != "coef") ## stop("Only 'test.acc = \"coef\"' is currently implemented") ## if (!(any(test.vec == c("coef", "resid")))) ## stop("invalid argument for test.acc") if (!is.numeric(nsubm) || nsubm <= 0) stop("number of subsamples must be > 0") if (!is.numeric(maxit) || maxit <= 0) stop("maximum number of iterations must be > 0") if (!is.numeric(cw) || cw <= 0) stop("value of the tuning constant c (cw) must be > 0") list(cw=cw, nsubm=nsubm, acc=acc, maxit=maxit) } ################################################################################### ##' @param intercept logical, if true, x[,] has an intercept column which should ##' not be used for rob.wts glmrobMT <- function(x,y, weights = NULL, start = NULL, offset = NULL, family = poisson(), weights.on.x = "none", control = glmrobMT.control(...), intercept = TRUE, trace.lev = 1, ...) { ## MAINFUNCTION Computes the MT or WMT estimator for Poisson regression with intercept starting from the estimator computed in the function ## beta0IniC. ## INPUT ## x design matrix with nrows and p columns. ## y respone vector of length n ## cw tuning constant. Default value 2.1 ## iweigths indicator for weights penalizing high leverage points, iweights=1 indicates to use weights iweights=0 ## indicate notto use way. Default value is iw=0, Our simulation study suggests not to use weights. ## nsubm Number of subsamples. Default calue nsubm=500 ## OUTPUT ##$initial is the inital estimate (first component is the intercept) ##$final is the final estimate (first component is the intercept) ##$nsamples is the number of well conditioned subsamples ## REQUIRED PACKAGES: tools, rrcov stopifnot(is.numeric(cw <- control$cw), cw > 0, is.numeric(nsubm <- control$nsubm)) if(family$family != "poisson") stop("Currently, only family 'poisson' is supported for the \"MT\" estimator") n <- nrow(x) p <- ncol(x) if (is.null(weights)) weights <- rep.int(1, n) else if(any(weights <= 0)) stop("All weights must be positive") if(!is.null(offset)) stop("non-trivial 'offset' is not yet implemented") ## if (is.null(offset)) ## offset <- rep.int(0, n) else if(!all(offset==0)) ## warning("'offset' not fully implemented") linkinv <- family$linkinv variance <- family$variance ## Copy-paste from ./glmrobMqle.R [overkill currently: Poisson has sni == ni == 1] ni <- as.vector(weights) sni <- sqrt(ni) comp.V.resid <- expression({ Vmu <- variance(mu) if (any(is.na(Vmu))) stop("NAs in V(mu)") if (any(Vmu == 0)) stop("0s in V(mu)") sVF <- sqrt(Vmu) # square root of variance function residP <- (y - mu)* sni/sVF # Pearson residuals }) m.approx <- mk.m_rho(cw) w <- robXweights(weights.on.x, x, intercept=intercept) if(is.null(start)) { if(trace.lev) cat("Computing initial estimate with ", nsubm, " sub samples:\n") out <- beta0IniCP(x, y, cw = cw, w = w, m.approx = m.approx, nsubm = nsubm, trace.lev = trace.lev) start <- out[[1]] } else { ## user provided start: if(!is.numeric(start) || length(start) != p) stop(gettextf("'start' must be an initial estimate of beta, of length %d", p), domain=NA) } oCtrl <- list(trace = trace.lev, maxit = control$maxit, ## "L-BFGS-B" specific lmm = 9, factr = 1/(10*control$acc)) if(trace.lev) cat("Optim()izing sumaConPesos()\n") ### FIXME: quite slow convergence e.g. for the Possum data ( ../tests/glmrob-1.R ) ### ----- maybe improve by providing gradient ?? estim2 <- optim(start, sumaConPesos, method = "L-BFGS-B", x = x, y = y, w = w, cw = cw, m.approx = m.approx, control = oCtrl) o.counts <- estim2$counts if(estim2$convergence) ## there was a problem warning("optim(.) non-convergence: ", estim2$convergence, if(nzchar(estim2$message)) paste0("\n", estim2$message)) beta <- estim2$par cov <- covasin(x,y, beta=beta, cw=cw, m.approx=m.approx, w=w) eta <- as.vector(x %*% beta) # + offset mu <- linkinv(eta) eval(comp.V.resid)#-> residP ==(here!) == residPS ## As sumaConPesos() computes ## eta <- x %*% beta ## s <- rho(sqrt(y) - mm(exp(eta), m.approx), cw) ## sum(s*w) ## we could say that "psi(x) / x" -- weights would be w.r <- Mwgt(sqrt(y) - mm(exp(eta), m.approx), cw, psi="tukey") names(mu) <- names(eta) <- names(residP) # re-add after computation names(beta) <- names(start) <- nmB <- colnames(x) ## maybe: dimnames(cov) <- list(nmB, nmB) list(coefficients = beta, initial = start, family = poisson(), # <- only case for now coefficients = beta, residuals = residP, # s.resid = residPS, fitted.values = mu, linear.predictors = eta, cov = cov, nsubm = nsubm, "nOksub" = out[[2]], converged = (estim2$convergence == 0), iter = o.counts[[1]], optim.counts = o.counts, optim.control = oCtrl, cw=cw, weights.on.x=weights.on.x, w.x = w, w.r = w.r) } robustbase/R/glmrobMqle-DQD.R0000644000176200001440000001276412150222661015465 0ustar liggesusers#### Quasi-Deviance Differences --- for Model Selection #### --------------------------------------------------- -> ./anova-glmrob.R ## MM: These function names are really too long ## but then, they are hidden in the name space ... ## (Maybe it would be nice to do this as one function with "family" .. ) glmrobMqleDiffQuasiDevB <- function(mu, mu0, y, ni, w.x, phi, tcc) { ## f.cnui <- function(u, y, ni, tcc) { pr <- u/ni Vmu <- pr * (1 - pr) ## = binomial()$variance residP <- (y-pr)*sqrt(ni/Vmu) ## First part: nui nui <- pmax.int(-tcc, pmin.int(tcc, residP)) ## Second part: Enui H <- floor(u - tcc*sqrt(ni*Vmu)) K <- floor(u + tcc*sqrt(ni*Vmu)) ## Actually, floor is not needed because pbinom() can cope ## with noninteger values in the argument q! ## what follows is similar to glmrob.Mqle.EpsiB except a ## different vectorisation h1 <- (if(ni == 1) as.numeric(- (H < 0) + (K >= 1) ) * sqrt(Vmu) else (pbinom(K-1,1,pr) - pbinom(H-1,ni-1,pr) - pbinom(K,ni,pr) + pbinom(H,ni,pr)) * pr * sqrt(ni/Vmu)) ## pmax was needed to get numeric returns from pbinom Enui <- (tcc*(1 - pbinom(K,ni,pr) - pbinom(H,ni,pr)) + h1) return((nui - Enui) / sqrt(ni*Vmu)) } ## f.cnui() nobs <- length(mu) stopifnot(nobs > 0) QMi <- numeric(nobs) ## Numerical integrations for(i in 1:nobs) QMi[i] <- integrate(f.cnui, y = y[i], ni = ni[i], tcc = tcc, subdivisions = 200, lower = mu[i]*ni[i], upper = mu0[i]*ni[i])$value ## robust quasi-deviance ## -2*(sum(QMi1)-sum(QMi2)) ## Andreas' interpretation of (4) and (5) ## -2*(sum(QMi1)-sum(QMi2)/nobs) ## Eva's interpretation of (4) and (5) ## According to Andreas' interpretation -2*sum(QMi*w.x) } ## glmrobMqleDiffQuasiDevB glmrobMqleDiffQuasiDevPois <- function(mu, mu0, y, ni, w.x, phi, tcc) { ## f.cnui <- function(u, y, ni, tcc) { Vmu <- u ## = poisson()$variance residP <- (y-u)/sqrt(Vmu) ## First part: nui nui <- pmax.int(-tcc, pmin.int(tcc, residP)) ## Second part: Enui H <- floor(u - tcc*sqrt(Vmu)) K <- floor(u + tcc*sqrt(Vmu)) ## what follows is similar to Epsipois except a ## different vectorisation h1 <- u/sqrt(Vmu)*(dpois(H,u)- dpois(K,u)) Enui <- tcc*(1 - ppois(K,u) - ppois(H,u)) + h1 return((nui - Enui) / sqrt(Vmu)) } nobs <- length(mu) stopifnot(nobs > 0) QMi <- numeric(nobs) ## Numerical integrations for(i in 1:nobs) QMi[i] <- integrate(f.cnui, y = y[i], ni = ni[i], tcc = tcc, lower = mu[i], upper = mu0[i])$value ## robust quasi-deviance ## -2*(sum(QMi1)-sum(QMi2)) ## Andreas' interpretation of (4) and (5) ## -2*(sum(QMi1)-sum(QMi2)/nobs) ## Eva's interpretation of (4) and (5) ## According to Andreas' interpretation -2*sum(QMi*w.x) }## glmrobMqleDiffQuasiDevPois glmrobMqleDiffQuasiDevGamma <- function(mu, mu0, y, ni, w.x, phi, tcc, variant = c("V1", "Eva1", "Andreas1")) { ## Notation similar to the discrete case (Cantoni & Ronchetti, 2001) f.cnui <- function(u, y, ni, phi, tcc) { s.ph <- sqrt(phi) ## First part: nui sV <- s.ph * u ## = sqrt(dispersion * Gamma()$variance) residP <- (y-u)/sV nui <- pmax.int(-tcc, pmin.int(tcc, residP)) ## Second part: Enui ## what follows is similar to glmrob.Mqle.Epsipois except a ## different vectorisation nu <- 1/phi ## form parameter nu snu <- 1/s.ph ## sqrt (nu) pPtmc <- pgamma(snu - tcc, shape=nu, rate=snu) pPtpc <- pgamma(snu + tcc, shape=nu, rate=snu) Enui <- tcc*(1-pPtpc-pPtmc) + Gmn(-tcc,nu) - Gmn( tcc,nu) ( nui/sV - Enui/u*s.ph ) } f.cnui1 <- function(u, y, ni, phi, tcc) { ## First part: nui sV <- sqrt(phi) * u ## = sqrt(dispersion * Gamma()$variance) residP <- (y-u)/sV nui <- pmax.int(-tcc, pmin.int(tcc, residP)) (nui / sV) } f.cnui2 <- function(u, y, ni, phi, tcc) { ## First part: nui s.ph <- sqrt(phi) sV <- s.ph * u ## = sqrt(dispersion * Gamma()$variance) snu <- 1/s.ph ## sqrt (nu) ## Second part: Enui ## what follows is similar to EpsiGamma except a ## different vectorisation nu <- 1/phi ## form parameter nu pPtmc <- pgamma(snu - tcc, shape=nu, rate=snu) pPtpc <- pgamma(snu + tcc, shape=nu, rate=snu) Enui <- tcc*(1-pPtpc-pPtmc) + Gmn(-tcc,nu) - Gmn( tcc,nu) return(Enui/u * s.ph) } nobs <- length(mu) stopifnot(nobs > 0) variant <- match.arg(variant) ## robust quasi-deviance if(variant == "V1") { QMi <- numeric(nobs) ## Numerical integrations for(i in 1:nobs) QMi[i] <- integrate(f.cnui, y = y[i], ni = ni[i], phi=phi, tcc = tcc, lower = mu[i], upper = mu0[i])$value -2*sum(QMi*w.x) } else { ## "Eva1" or "Andreas1"; Using two terms QMi1 <- QMi2 <- numeric(nobs) for(i in 1:nobs) QMi1[i] <- integrate(f.cnui1, y = y[i], ni = ni[i], phi=phi, tcc = tcc, lower = mu[i], upper = mu0[i])$value for(i in 1:nobs) QM2i[i] <- integrate(f.cnui2, y = y[i], ni = ni[i], phi=phi, tcc = tcc, lower = mu[i], upper = mu0[i])$value if(variant == "Eva1") { ## Eva Cantoni's interpretation of (4) and (5) -2*(sum(QMi1)-sum(QMi2)/nobs) } else if (variant == "Andreas1") { ## Andreas' interpretation of (4) and (5) -2*(sum(QMi1)-sum(QMi2)) } else stop("invalid 'variant': ", variant) } } robustbase/R/nlrob.R0000644000176200001440000004656413266661732014116 0ustar liggesusers## Directly use nls()-internals, i.e., its 'm', to get a next 'start' (coef-like list): ## (In principle useful also outside robustbase) .nls.get.start <- function(nls.m) { ## stopifnot(is.list(nls.m), is.function(gg <- nls.m$getPars), ## is.environment(em <- environment(gg))) stopifnot(is.list(nls.m), is.environment(em <- environment(nls.m$getPars))) mget(names(em$ind), em$env) } nlrob <- function (formula, data, start, lower, upper, weights = NULL, na.action = na.fail, method = c("M", "MM", "tau", "CM", "mtl"), psi = .Mwgt.psi1("huber", cc=1.345), scale = NULL, test.vec = c("resid", "coef", "w"), maxit = 20, tol = 1e-06, acc, algorithm = "default", doCov = FALSE, model = FALSE, control = if(method == "M") nls.control() else nlrob.control(method, optArgs = list(trace=trace), ...), trace = FALSE, ...) { ## Purpose: ## Robust fitting of nonlinear regression models. The fitting is ## done by iterated reweighted least squares (IWLS) as in rlm() of ## the package MASS. In addition, see also 'nls'. ## ## --> see the help file, ?nlrob (or ../man/nlrob.Rd in the source) ## ------------------------------------------------------------------------- ##- some checks mf <- call <- match.call() # << and more as in nls() formula <- as.formula(formula) if (length(formula) != 3) stop("'formula' should be a formula of the type 'y ~ f(x, alpha)'") ## Had 'acc'; now use 'tol' which is more universal; 'acc' should work for a while if(!missing(acc) && is.numeric(acc)) { if(!missing(tol)) stop("specifying both 'acc' and 'tol' is invalid") tol <- acc message("The argument 'acc' has been renamed to 'tol'; do adapt your code.") } method <- match.arg(method) dataName <- substitute(data) hasWgts <- !missing(weights) # not eval()ing ! ## we don't really need 'start' for non-"M" methods, but for the following logic, ## Want 'dataClasses' -> need 'mf' --> 'varNames' -> 'pnames' -> 'start' : varNames <- all.vars(formula) var.nms <- c(varNames, if(method %in% c("CM", "mtl")) "sigma") # <--> "sigma" part of 'pnames' ## FIXME: nls() allows a missing 'start'; we allow *iff* lower | upper has names: if(missing(start) && (!missing(lower) || !missing(upper))) pnames <- .fixupArgs(lower, upper, var.nms) else if(length(pnames <- names(start)) != length(start)) stop("'start' or 'lower' or 'upper' must be fully named (list or numeric vector)") else if(any(is.na(match(pnames, var.nms)))) # check also in .fixupArgs() stop("parameter names must appear in 'formula'") ## If it is a parameter it is not a variable varNames <- varNames[is.na(match(varNames, pnames))] ## do now: need 'dataClasses', hence the model.frame 'mf' for all 'method' cases obsNames <- rownames(data <- as.data.frame(data)) ## From nls: using model.weights() e.g. when formula 'weights = sqrt()' mf$formula <- # replace by one-sided linear model formula as.formula(paste("~", paste(varNames, collapse = "+")), env = environment(formula)) mf[c("start", "lower", "upper", "method", "psi", "scale", "test.vec", "maxit", "tol", "acc", "algorithm", "doCov", "model", "control", "trace")] <- NULL mf[[1L]] <- quote(stats::model.frame) mf <- eval.parent(mf) dataCl <- attr(attr(mf, "terms"), "dataClasses") ## mf <- as.list(mf) if(method != "M") { if(hasWgts) ## FIXME .. should not be hard, e.g. for MM stop("specifying 'weights' is not yet supported for method ", method) if(!missing(psi)) warning(gettextf("For method = \"%s\", currently 'psi' must be specified via 'control'", method), domain=NA) ## lifted from Martin's 'sfsmisc' package : missingCh <- function(x, envir = parent.frame()) { eval(substitute(missing(VAR), list(VAR=as.name(x))), envir = envir) } aNms <- c("start", "na.action", "test.vec", "maxit", "algorithm", "doCov") not.missA <- !vapply(aNms, missingCh, NA, envir=environment()) if(any(not.missA)) { warning(sprintf(ngettext(sum(not.missA), "For method = \"%s\", argument %s is not made use of", "For method = \"%s\", arguments %s are not made use of"), method, pasteK(sQuote(aNms[not.missA]))), domain=NA) } force(control) fixAns <- function(mod) { mod$call <- call # replace the nlrob.() one mod$data <- dataName # (ditto) ctrl <- mod$ctrl if(is.character(psi <- ctrl$psi) && is.numeric(cc <- ctrl$tuning.psi.M)) {# MM: psi <- .Mwgt.psi1(psi, cc=cc) res.sc <- with(mod, residuals/Scale) mod$psi <- psi mod$w <- # as we have no 'weights' yet mod$rweights <- psi(res.sc) } ## else mod$rweights <- mod$psi <- NULL mod$dataClasses <- dataCl if(model) mod$model <- mf mod } ## {fixAns} ## switch(method, ## source for these is all in >>> nlregrob.R <<< "MM" = { return(fixAns(nlrob.MM (formula, data, lower=lower, upper=upper, tol=tol, ctrl= control))) }, "tau" = { return(fixAns(nlrob.tau(formula, data, lower=lower, upper=upper, tol=tol, ctrl= control))) }, "CM" = { return(fixAns(nlrob.CM (formula, data, lower=lower, upper=upper, tol=tol, ctrl= control))) }, "mtl" = { return(fixAns(nlrob.mtl(formula, data, lower=lower, upper=upper, tol=tol, ctrl= control))) }) } ## {non-"M" methods} ## ## else: method == "M", original method, the only one based on 'nls' : env <- environment(formula) if (is.null(env)) env <- parent.frame() if (!((is.list(start) && all(sapply(start, is.numeric))) || (is.vector(start) && is.numeric(start)))) stop("'start' must be a named list or numeric vector") test.vec <- match.arg(test.vec) if(missing(lower)) lower <- -Inf if(missing(upper)) upper <- +Inf updateScale <- is.null(scale) if(!updateScale) { ## keep initial scale fixed through iterations (e.g. for "MM") if(is.1num(scale) && scale > 0) Scale <- scale else stop("'scale' must be NULL or a positive number") } nm <- "._nlrob.w" if (nm %in% c(varNames, pnames, names(data))) stop(gettextf("Do not use '%s' as a variable name or as a parameter name", nm), domain=NA) data <- as.list(data)# to be used as such ## 'mf' now defined before "dispatch" to method ! nobs <- nrow(mf) if (hasWgts) hasWgts <- !is.null(weights <- model.weights(mf)) if (hasWgts && any(weights < 0 | is.na(weights))) stop("'weights' must be nonnegative and not contain NAs") ## initialize testvec etc fit <- eval(formula[[3]], c(data, start), env) y <- eval(formula[[2]], data, env) coef <- unlist(start) resid <- y - fit ## if (any(is.na(data)) & options("na.action")$na.action == "na.omit") ## stop("if NAs are present, use 'na.exclude' to preserve the residuals length") irls.delta <- function(old, new) sqrt(sum((old - new)^2, na.rm = TRUE)/ max(1e-20, sum(old^2, na.rm = TRUE))) ## Robust loop -- IWLS / IRLS iterations converged <- FALSE status <- "converged" method.exit <- FALSE for (iiter in seq_len(maxit)) { if (trace) cat("robust iteration", iiter, "\n") previous <- get(test.vec) if(updateScale) Scale <- median(abs(resid), na.rm = TRUE)/0.6745 if (Scale == 0) { convi <- 0 method.exit <- TRUE warning(status <- "could not compute scale of residuals") ## FIXME : rather use a "better" Scale in this case, e.g., ## ----- Scale <- min(abs(resid)[resid != 0]) } else { w <- psi(resid/Scale) if (hasWgts) w <- w * weights data$._nlrob.w <- w ## use a variable name the user "will not" use ._nlrob.w <- NULL # workaround for codetools "bug" ## Case distinction against "wrong warning" as long as ## we don't require R > 3.0.2: out <- if(identical(lower, -Inf) && identical(upper, Inf)) nls(formula, data = data, start = start, algorithm = algorithm, trace = trace, weights = ._nlrob.w, na.action = na.action, control = control) else nls(formula, data = data, start = start, algorithm = algorithm, trace = trace, lower=lower, upper=upper, weights = ._nlrob.w, na.action = na.action, control = control) coef <- unlist(start <- .nls.get.start(out$m)) ## same sequence as in start! Ok for test.vec: resid <- residuals(out) convi <- irls.delta(previous, get(test.vec)) } converged <- convi <= tol if (converged) break else if (trace) cat(sprintf(" --> irls.delta(previous, %s) = %g -- *not* converged\n", test.vec, convi)) }## for( iiter ...) if(!converged || method.exit) { warning(st <- paste("failed to converge in", maxit, "steps")) status <- if(method.exit) { converged <- FALSE; paste(status, st, sep="; ") } else st } if(hasWgts) { ## or just out$weights ?? tmp <- weights != 0 w[tmp] <- w[tmp]/weights[tmp] } ## --- Estimated asymptotic covariance of the robust estimator rw <- psi(res.sc <- resid/Scale) asCov <- if(!converged || !doCov) NA else { ## a version of .vcov.m(.) below AtWAinv <- chol2inv(out$m$Rmat()) dimnames(AtWAinv) <- list(names(coef), names(coef)) tau <- mean(rw^2) / mean(psi(res.sc, d=TRUE))^2 AtWAinv * Scale^2 * tau } if(is.null(call$algorithm)) call$algorithm <- algorithm ## returned object: == out$m$fitted() [FIXME?] fit <- setNames(eval(formula[[3]], c(data, start)), obsNames) structure(class = c("nlrob", "nls"), list(m = out$m, call = call, formula = formula, new.formula = formula, nobs = nobs, coefficients = coef, working.residuals = as.vector(resid), fitted.values = fit, residuals = y - fit, Scale=Scale, w=w, rweights = rw, cov = asCov, test.vec=test.vec, status=status, iter=iiter, psi=psi, data = dataName, dataClasses = dataCl, model = if(model) mf, control = control)) } ##' @title The nlrob() method used ##' @param obj an \code{"nlrob"} object ##' @return characer string .method.nlrob <- function(obj) if(inherits(obj, "nls")) "M" else obj$ctrl$method .vcov.m <- function(object, Scale, resid.sc) { if(.method.nlrob(object) == "M") { AtWAinv <- chol2inv(object$m$Rmat()) stopifnot(length(Scale) == 1, Scale >= 0, is.numeric(resid.sc), length(resid.sc) == nobs(object), is.character(nms.coef <- names(coef(object))), length(nms.coef) == nrow(AtWAinv), is.function(psi <- object$psi)) dimnames(AtWAinv) <- list(nms.coef, nms.coef) tau <- mean(psi(resid.sc)^2) / mean(psi(resid.sc, d=TRUE))^2 AtWAinv * Scale^2 * tau } else if(is.function(psi <- object$psi)) { form <- object$formula ## call method="M", with fixed Scale mM <- nlrob(form, data = eval(object$data, environment(form)), method = "M", start = coef(object), psi = psi, scale = Scale, doCov=TRUE) mM$cov ## stop(".vcov.m() not yet implemented for nlrob.MM objects") ## using 'chol(): --- is wrong, unfortunately ## AtWAinv <- chol2inv(chol(object$hessian)) } else { NA ## instead of error } } ## The 'nls' method is *not* correct formula.nlrob <- function(x, ...) x$formula sigma.nlrob <- function(object, ...) if(!is.null(s <- object$Scale)) s else object$coefficients[["sigma"]] estimethod <- function(object, ...) UseMethod("estimethod") estimethod.nlrob <- function(object, ...) if(is.list(object$m) && inherits(object, "nls")) "M" else object$ctrl$method fitted.nlrob <- function (object, ...) { val <- as.vector(object$fitted.values) if (!is.null(object$na.action)) val <- napredict(object$na.action, val) ##MM: attr(val, "label") <- "Fitted values" val } ## formula() works "by default" predict.nlrob <- function (object, newdata, ...) { if (missing(newdata)) return(as.vector(fitted(object))) if (!is.null(cl <- object$dataClasses)) .checkMFClasses(cl, newdata) if(estimethod(object) == "M") # also for start = list(..) object$m$predict(newdata) else eval(formula(object)[[3]], c(as.list(newdata), coef(object))) } print.nlrob <- function (x, ...) { cat("Robustly fitted nonlinear regression model", if((meth <- .method.nlrob(x)) != "M") paste0(" (method ", meth, ")"), "\n", sep="") cat(" model: ", deparse(formula(x)), "\n") cat(" data: ", deparse(x$data), "\n") print(coef(x), ...) cat(" status: ", x$status, "\n") invisible(x) } residuals.nlrob <- function (object, type = c("response", "working", "pearson"), ...) { type <- match.arg(type) R <- switch(type, "pearson"= { stop("type 'pearson' is not yet implemented") ## as.vector(object$working.residuals) }, "working"= { ## FIXME(?): from nls, these used to *contain* weights, but no longer object$working.residuals }, "response"= { object$residuals }, stop("invalid 'type'"))# ==> programming error, as we use match.arg() if (!is.null(object$na.action)) R <- naresid(object$na.action, R) ## FIXME: add 'names'! ##MM no labels; residuals.glm() does neither: attr(val, "label") <- "Residuals" R } vcov.nlrob <- function (object, ...) { if(is.numeric(cv <- object$cov)) cv else { sc <- object$Scale .vcov.m(object, Scale = sc, resid.sc = as.vector(object$residuals) / sc) } } summary.nlrob <- function (object, correlation = FALSE, symbolic.cor = FALSE, ...) { w <- object$w ## weights * rweights, scaled such that sum(w)=1 n <- sum(w > 0) param <- coef(object) p <- length(param) rdf <- n - p no <- names(object) no <- no[match(c("formula", "residuals", "Scale", "w", "rweights", "cov", "call", "status", "counts", "iter", "control", "ctrl"), no, 0L)] ans <- object[no] conv <- ans$status == "converged" if(is.null(sc <- ans$Scale)) ans$Scale <- sc <- sigma(object) if(conv && !is.matrix(ans$cov)) ans$cov <- .vcov.m(object, Scale = sc, resid.sc = as.vector(object$residuals) / sc) if((ok.cov <- is.matrix(ans$cov))) if(!all(dim(ans$cov) == p)) stop("'cov' must be a p x p matrix") ans$df <- c(p, rdf) cf <- if(ok.cov) { se <- sqrt(diag(ans$cov)) tval <- param/se cbind(param, se, tval, 2 * pt(abs(tval), rdf, lower.tail = FALSE)) } else cbind(param, NA, NA, NA) dimnames(cf) <- list(names(param), c("Estimate", "Std. Error", "t value", "Pr(>|t|)")) ans$coefficients <- cf if(correlation && ok.cov && rdf > 0) { ans$correlation <- ans$cov / outer(se, se) ans$symbolic.cor <- symbolic.cor } class(ans) <- "summary.nlrob" ans } print.summary.nlrob <- function (x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), ...) { cat("\nCall:\n") cat(paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n\n", sep = "") ## cat("\nFormula: ") ## cat(paste(deparse(x$formula), sep = "\n", collapse = "\n"), "\n", sep = "") if(is.null(ctrl <- x$ctrl)) meth <- "M" else { meth <- ctrl$method cat("Method \"", meth, if(!is.null(cc <- ctrl$init)) paste0("\", init = \"", cc), if(!is.null(ps <- ctrl$psi )) paste0("\", psi = \"", ps), "\"\n", sep="") } resid <- x$residuals df <- x$df rdf <- df[2L] cat(if (!is.null(x$weights) && diff(range(x$weights))) "Weighted ", "Residuals:\n", sep = "") if (rdf > 5L) { nam <- c("Min", "1Q", "Median", "3Q", "Max") rq <- if (NCOL(resid) > 1) structure(apply(t(resid), 1, quantile), dimnames = list(nam, dimnames(resid)[[2]])) else setNames(quantile(resid), nam) print(rq, digits = digits, ...) } else print(resid, digits = digits, ...) cat("\nParameters:\n") printCoefmat(x$coefficients, digits = digits, signif.stars = signif.stars, ...) if(x$status == "converged") { cat("\nRobust residual standard error:", format(signif(x$Scale, digits)), "\n") correl <- x$correlation if (!is.null(correl)) { p <- NCOL(correl) if (p > 1) { cat("\nCorrelation of Parameter Estimates:\n") if(is.logical(symbolic.cor) && symbolic.cor) { print(symnum(correl, abbr.colnames = NULL)) } else { correl <- format(round(correl, 2), nsmall = 2, digits = digits) correl[!lower.tri(correl)] <- "" print(correl[-1, -p, drop=FALSE], quote = FALSE) } } } if(is.null(ctrl)) cat("Convergence in", x$iter, "IRWLS iterations\n\n") else { if(length(it <- ctrl$iter) == 1) cat("Convergence in", it, "iterations\n\n") else if(length(cnts <- x$counts) > 0) cat("Convergence after", cnts[["function"]], "function and", cnts[["gradient"]],"gradient evaluations\n\n") else ## length(it) >= 2 : cat("Convergence\n\n") } if(!is.null(x$rweights)) summarizeRobWeights(x$rweights, digits = digits, ...) } else if(meth == "M") cat("** IRWLS iterations did *not* converge!\n\n") else cat("** Iterations did *not* converge!\n\n") invisible(x) } ## Confint(): ideally built on profile, the same as stats:::confint.nls() ## -------- which eventually calls stats:::profile.nls ## Also, do emulate (to some extent) ## str(lme4:::confint.merMod) ## function (object, parm, level = 0.95, method = c("profile", "Wald", "boot"), ## zeta, nsim = 500, boot.type = c("perc", "basic", "norm"), quiet = FALSE, ## oldNames = TRUE, ...) confint.nlrob <- function(object, parm, level = 0.95, method = c("profile", "Wald", "boot"), zeta, nsim = 500, boot.type = c("perc", "basic", "norm"), quiet = FALSE, oldNames = TRUE, ...) { method <- match.arg(method) boot.type <- match.arg(boot.type) if (!missing(parm) && !is.numeric(parm) && method %in% c("profile", "boot")) stop("for method='", method, "', 'parm' must be specified as an integer") switch(method, profile = { stop("profile() method not yet implemented for \"nlrob\" objects. Use method = \"Wald\".") ## hence unused for now : if (!quiet) message("Computing profile confidence intervals ...") utils::flush.console() pp <- if (missing(parm)) { profile(object, signames = oldNames, ...) } else { profile(object, which = parm, signames = oldNames, ...) } confint(pp, level = level, zeta = zeta) }, Wald = { cf <- coef(object) pnames <- names(cf) if (missing(parm)) parm <- pnames else if (is.numeric(parm)) parm <- pnames[parm] a <- (1 - level)/2 a <- c(a, 1 - a) ## for now, a short version of R's formatting in quantile.default(): format_perc <- function(x, digits = max(2L, getOption("digits"))) paste0(formatC(x, format = "fg", width = 1, digits = digits)) pct <- format_perc(a, 3) fac <- qnorm(a) ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(parm, pct)) sdiag <- function(x) if (length(x) == 1) c(x) else diag(x) ses <- sqrt(sdiag(vcov(object)[parm, parm])) ci[] <- cf[parm] + ses %o% fac ci }, boot = { stop("\"boot\" method not yet implemented for \"nlrob\" objects. Use confint(*, method = \"Wald\").") }) } robustbase/R/glmrobPredict.R0000644000176200001440000000547712113111622015547 0ustar liggesusers# File .../glmrobPredict.R # Part of the R package 'robustbase', http://www.R-project.org # Based on predict.glm (cf. src/library/stats/R/) # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # A copy of the GNU General Public License is available at # http://www.r-project.org/Licenses/ # # Note that '# *rob' indicate adjustment for the robust case predict.glmrob <- function(object, newdata = NULL, type = c("link", "response", "terms"), se.fit = FALSE, dispersion = NULL, terms=NULL, na.action = na.pass, ...) { type <- match.arg(type) na.act <- object$na.action object$na.action <- NULL # kill this for predict.lm calls object$rweights <- object$w.r # *rob: used for predict.lmrob() if (!se.fit) { ## No standard errors if(missing(newdata)) { pred <- switch(type, link = object$linear.predictors, response = object$fitted.values, terms = predict.lmrob(object, se.fit=se.fit, scale = 1, type="terms", terms=terms, ...) # *rob ) if(!is.null(na.act)) pred <- napredict(na.act, pred) } else { pred <- predict.lmrob(object, newdata, se.fit, scale = 1, type = if(type=="link") "response" else type, terms = terms, na.action = na.action, ...) # *rob switch(type, response = {pred <- family(object)$linkinv(pred)}, link =, terms= ) } } else { ## summary.survreg has no ... argument. if(inherits(object, "survreg")) dispersion <- 1. if(is.null(dispersion) || dispersion == 0) dispersion <- summary(object, dispersion=dispersion)$dispersion residual.scale <- as.vector(sqrt(dispersion)) pred <- predict.lmrob(object, newdata, se.fit, scale = residual.scale, type = if(type=="link") "response" else type, terms = terms, na.action = na.action, ...) # *rob fit <- pred$fit se.fit <- pred$se.fit switch(type, response = { se.fit <- se.fit * abs(family(object)$mu.eta(fit)) fit <- family(object)$linkinv(fit) }, link =, terms=) if( missing(newdata) && !is.null(na.act) ) { fit <- napredict(na.act, fit) se.fit <- napredict(na.act, se.fit) } pred <- list(fit=fit, se.fit=se.fit, residual.scale=residual.scale) } pred } robustbase/R/anova-glmrob.R0000644000176200001440000001337512425760522015351 0ustar liggesusers anova.glmrob <- function(object, ..., test = c("Wald", "QD", "QDapprox")) { dotargs <- list(...) if (!is.null(names(dotargs))) { named <- (names(dotargs) != "") if (any(named)) { warning("the following arguments to 'anova.glmrob' are invalid and", "dropped:\n", pasteK(deparse(dotargs[named]))) dotargs <- dotargs[!named] } } is.glmrob <- vapply(dotargs, inherits, NA, what="glmrob") if(!all(is.glmrob) || !inherits(object, "glmrob")) stop("anova.glmrob() only works for 'glmrob' objects") test <- match.arg(test) if (length(dotargs) > 0) anovaGlmrobList(c(list(object), dotargs), test=test) else { ## ## "'Anova Table' for a single model object stop("'Anova Table' for a single model object not yet implemented") } } anovaGlmrobList <- function (object, test=NULL) { nmodels <- length(object) stopifnot(nmodels >= 2) responses <- as.character(lapply(object, function(x) deparse(formula(x)[[2]]))) if (!all(responses == responses[1])) stop("Not the same response used in the fitted models") nobs <- sapply(object, function(x) length(x$residuals)) if (any(nobs != nobs[1])) stop("models were not all fitted to the same size of dataset") methods <- as.character(lapply(object, function(x) x$method)) if(!all(methods == methods[1])) stop("Not the same method used for fitting the models") note <- paste("Models fitted by method '", methods[1], "'", sep="") tccs <- sapply(object, function(x) length(x$tcc)) if(!all(tccs == tccs[1])) stop("Not the same tuning constant c used in the robust fits") ## tbl <- matrix(rep(NA, nmodels*4), ncol = 4) tbl[1,1] <- nobs[1] - length(coef(object[[1]])) for(k in 2:nmodels) tbl[k,] <- anovaGlmrobPair(object[[k-1]], object[[k]], test=test) ## return dimnames(tbl) <- list(1:nmodels, c("pseudoDf", "Test.Stat", "Df", "Pr(>chisq)")) title <- switch(test, Wald = "Robust Wald Test Table", QD = "Robust Quasi-Deviance Table", QDapprox = "Robust Quasi-Deviance Table Based on a Quadratic Approximation", "") variables <- lapply(object, function(x) paste(deparse(formula(x)), collapse = "\n")) topnote <- paste("Model ", format(1:nmodels), ": ", variables, sep = "", collapse = "\n") structure(as.data.frame(tbl), heading = c(title, "", topnote, note,""), class = c("anova", "data.frame")) } anovaGlmrobPair <- function(obj1, obj2, test) { if(length(coef(obj1)) < length(coef(obj2))){ Sign <- 1 full.mfit <- obj2 reduced.mfit <- obj1 } else { Sign <- -1 full.mfit <- obj1 reduced.mfit <- obj2 } X <- model.matrix(full.mfit) asgn <- attr(X, "assign") tt <- terms(full.mfit) tt0 <- terms(reduced.mfit) tl <- attr(tt, "term.labels") tl0 <- attr(tt0, "term.labels") numtl0 <- match(tl0 , tl, nomatch = -1) if(attr(tt0, "intercept") == 1) numtl0 <- c(0, numtl0) if(any(is.na(match(numtl0, unique(asgn))))) stop("Models are not nested!") mod0 <- seq(along = asgn)[!is.na(match(asgn, numtl0))] if (length(asgn) == length(mod0)) stop("Models are not strictly nested") H0ind <- setdiff(seq(along = asgn), mod0) H0coef <- coef(full.mfit)[H0ind] df <- length(H0coef) pp <- df + length(mod0) if(test == "Wald") { t.cov <- full.mfit$cov t.chisq <- sum(H0coef * solve(t.cov[H0ind, H0ind], H0coef)) statistic <- c(chisq = t.chisq) } else if(full.mfit$method=="Mqle" && (test == "QD" || test == "QDapprox")) { matM <- full.mfit$matM if(test == "QDapprox") { ## Difference of robust quasi-deviances ## via the asymptotically equivalent quadratic form matM11 <- matM[mod0, mod0, drop=FALSE] matM12 <- matM[mod0, H0ind, drop=FALSE] matM22 <- matM[H0ind, H0ind, drop=FALSE] matM22.1 <- matM22 - crossprod(matM12, solve(matM11, matM12)) Dquasi.dev <- nrow(X) * c(H0coef %*% matM22.1 %*% H0coef) } else { quasiDev <- switch(full.mfit$family$family, poisson = glmrobMqleDiffQuasiDevPois, binomial = glmrobMqleDiffQuasiDevB, Gamma = glmrobMqleDiffQuasiDevGamma, stop("This family is not implemented")) ## note that qdev and qdev0 do depend on an incorrectly specified ## lower limits in the integration. But this does't matter in ## the following difference, because the difference does not ## deepend on it! (Hence I could use the centered nui ## (cnui= nui - Enui) in quasiDev as the function to be integrated. Dquasi.dev <- quasiDev(mu = full.mfit$fitted.values, mu0 = reduced.mfit$fitted.values, y = full.mfit$y, ni = full.mfit$ni, w.x = full.mfit$w.x, phi=full.mfit$dispersion, tcc = full.mfit$tcc) } ## Asymptotic distribution: variance and weights of the sum of chi2 matQ <- full.mfit$matQ matM11inv <- solve(matM[mod0,mod0]) Mplus <- matrix(0, ncol = pp, nrow = pp) Mplus[mod0, mod0] <- matM11inv d.ev <- Re(eigen(matQ %*% (solve(matM)-Mplus), only.values=TRUE)$values) d.ev <- d.ev[1:df] ## just the q (=df) lagest eigenvalues are needed if(any(d.ev < 0)) warning("some eigenvalues are negative") ## p-value: exact computation for q=1, approximated for q>1 (q=df) statistic <- c(quasi.dev = Dquasi.dev/mean(d.ev)) } else stop("non-implemented test method:", test, "for fitting method", full.mfit$method) ## return c(nrow(X)-pp+df*(Sign<0), Sign*statistic, Sign*df, pchisq(as.vector(statistic), df=df, lower.tail = FALSE)) } robustbase/R/lmrob.R0000644000176200001440000006304613537200341014071 0ustar liggesusers ### The first part of lmrob() much cut'n'paste from lm() - on purpose! lmrob <- function(formula, data, subset, weights, na.action, method = 'MM', model = TRUE, x = !control$compute.rd, y = FALSE, singular.ok = TRUE, contrasts = NULL, offset = NULL, control = NULL, init = NULL, ...) { ## to avoid problems with setting argument ## call lmrob.control here either with or without method arg. if (miss.ctrl <- missing(control)) control <- if (missing(method)) lmrob.control(...) else lmrob.control(method = method, ...) else if (length(list(...))) ## "sophisticated version" of chk.s(...) warning("arguments .. in ", sub(")$", "", sub("^list\\(", "", deparse(list(...), control = c()))), " are disregarded.\n", " Maybe use lmrob(*, control=lmrob.control(....) with all these.") ret.x <- x ret.y <- y cl <- match.call() mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"), names(mf), 0) mf <- mf[c(1, m)] mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) mt <- attr(mf, "terms") # allow model.frame to update it y <- model.response(mf, "numeric") w <- as.vector(model.weights(mf)) if(!is.null(w) && !is.numeric(w)) stop("'weights' must be a numeric vector") offset <- as.vector(model.offset(mf)) if(!is.null(offset) && length(offset) != NROW(y)) stop(gettextf("number of offsets is %d, should equal %d (number of observations)", length(offset), NROW(y)), domain = NA) if (!miss.ctrl && !missing(method) && method != control$method) { warning("The 'method' argument is different from 'control$method'\n", "Using the former, method = ", method) control$method <- method } if (is.empty.model(mt)) { x <- NULL singular.fit <- FALSE ## to avoid problems below z <- list(coefficients = if(is.matrix(y)) matrix(NA_real_, 0, ncol(y)) else numeric(), residuals = y, scale = NA, fitted.values = 0 * y, cov = matrix(NA_real_,0,0), weights = w, rank = 0, df.residual = if(!is.null(w)) sum(w != 0) else NROW(y), converged = TRUE, iter = 0) if(!is.null(offset)) { z$fitted.values <- offset z$residuals <- y - offset z$offset <- offset } } else { x <- model.matrix(mt, mf, contrasts) contrasts <- attr(x, "contrasts") assign <- attr(x, "assign") p <- ncol(x) if(!is.null(offset)) y <- y - offset if (!is.null(w)) { ## checks and code copied/modified from lm.wfit ny <- NCOL(y) n <- nrow(x) if (NROW(y) != n | length(w) != n) stop("incompatible dimensions") if (any(w < 0 | is.na(w))) stop("missing or negative weights not allowed") zero.weights <- any(w == 0) if (zero.weights) { save.r <- y save.w <- w save.f <- y ok <- w != 0 nok <- !ok w <- w[ok] x0 <- x[nok, , drop = FALSE] x <- x[ ok, , drop = FALSE] n <- nrow(x) y0 <- if (ny > 1L) y[nok, , drop = FALSE] else y[nok] y <- if (ny > 1L) y[ ok, , drop = FALSE] else y[ok] ## add this information to model.frame as well ## need it in outlierStats() ## ?? could also add this to na.action, then ## naresid() would pad these as well. attr(mf, "zero.weights") <- which(nok) } wts <- sqrt(w) save.y <- y x <- wts * x y <- wts * y } ## check for singular fit if(getRversion() >= "3.1.0") { z0 <- .lm.fit(x, y, tol = control$solve.tol) piv <- z0$pivot } else { z0 <- lm.fit(x, y, tol = control$solve.tol) piv <- z0$qr$pivot } rankQR <- z0$rank singular.fit <- rankQR < p if (rankQR > 0) { if (singular.fit) { if (!singular.ok) stop("singular fit encountered") pivot <- piv p1 <- pivot[seq_len(rankQR)] p2 <- pivot[(rankQR+1):p] ## to avoid problems in the internal fitting methods, ## split into singular and non-singular matrices, ## can still re-add singular part later dn <- dimnames(x) x <- x[,p1] attr(x, "assign") <- assign[p1] ## needed for splitFrame to work } if (is.function(control$eps.x)) control$eps.x <- control$eps.x(max(abs(x))) if (!is.null(ini <- init)) { if (is.character(init)) { init <- switch(init, "M-S" = lmrob.M.S(x, y, control, mf=mf), "S" = lmrob.S (x, y, control), stop('init must be "S", "M-S", function or list')) if(ini == "M-S") { ## "M-S" sometimes reverts to "S": ini <- init$control$method ## if(identical(ini, "M-S")) ## control$method <- paste0(ini, control$method) } } else if (is.function(init)) { init <- init(x=x, y=y, control=control, mf=mf) } else if (is.list(init)) { ## MK: set init$weights, init$residuals here ?? ## (needed in lmrob..D..fit) ## or disallow method = D... ? would need to fix also ## lmrob.kappa: tuning.psi / tuning.chi choice if (singular.fit) { ## make sure the initial coefficients vector matches ## to the reduced x init$coef <- na.omit(init$coef) if (length(init$coef) != ncol(x)) stop("Length of initial coefficients vector does not match rank of singular design matrix x") } } else stop("unknown init argument") stopifnot(is.numeric(init$coef), is.numeric(init$scale)) ## modify (default) control$method, possibly dropping first letter: if (control$method == "MM" || substr(control$method, 1, 1) == "S") control$method <- substring(control$method, 2) ## check for control$cov argument if (class(init)[1] != "lmrob.S" && control$cov == '.vcov.avar1') control$cov <- ".vcov.w" } z <- lmrob.fit(x, y, control, init=init) #-> ./lmrob.MM.R ## --------- if(is.character(ini) && !grepl(paste0("^", ini), control$method)) control$method <- paste0(ini, control$method) if (singular.fit) { coef <- numeric(p) coef[p2] <- NA coef[p1] <- z$coefficients names(coef) <- dn[[2L]] z$coefficients <- coef ## Update QR decomposition (z$qr) ## pad qr and qraux with zeroes (columns that were pivoted to the right in z0) d.p <- p-rankQR n <- NROW(y) z$qr[c("qr","qraux","pivot")] <- list(matrix(c(z$qr$qr, rep.int(0, d.p*n)), n, p, dimnames = list(dn[[1L]], dn[[2L]][piv])), ## qraux: c(z$qr$qraux, rep.int(0, d.p)), ## pivot: piv) } } else { ## rank 0 z <- list(coefficients = if (is.matrix(y)) matrix(NA_real_,p,ncol(y)) else rep.int(NA_real_, p), residuals = y, scale = NA, fitted.values = 0 * y, cov = matrix(NA_real_,0,0), rweights = rep.int(NA_real_, NROW(y)), weights = w, rank = 0, df.residual = NROW(y), converged = TRUE, iter = 0, control=control) if (is.matrix(y)) colnames(z$coefficients) <- colnames(x) else names(z$coefficients) <- colnames(x) if(!is.null(offset)) z$residuals <- y - offset } if (!is.null(w)) { z$residuals <- z$residuals/wts z$fitted.values <- save.y - z$residuals z$weights <- w if (zero.weights) { # compute residuals, fitted, wts... also for the 0-weight obs coef <- z$coefficients coef[is.na(coef)] <- 0 f0 <- x0 %*% coef ## above ok := (w != 0); nok := (w == 0) if (ny > 1) { save.r[ok, ] <- z$residuals save.r[nok, ] <- y0 - f0 save.f[ok, ] <- z$fitted.values save.f[nok, ] <- f0 } else { save.r[ok] <- z$residuals save.r[nok] <- y0 - f0 save.f[ok] <- z$fitted.values save.f[nok] <- f0 } z$residuals <- save.r z$fitted.values <- save.f z$weights <- save.w rw <- z$rweights z$rweights <- rep.int(0, length(save.w)) z$rweights[ok] <- rw } } } if(!is.null(offset)) z$fitted.values <- z$fitted.values + offset z$na.action <- attr(mf, "na.action") z$offset <- offset z$contrasts <- contrasts z$xlevels <- .getXlevels(mt, mf) z$call <- cl z$terms <- mt z$assign <- assign if(control$compute.rd && !is.null(x)) z$MD <- robMD(x, attr(mt, "intercept"), wqr=z$qr) if (model) z$model <- mf if (ret.x) z$x <- if (singular.fit || (!is.null(w) && zero.weights)) model.matrix(mt, mf, contrasts) else x if (ret.y) z$y <- if (!is.null(w)) model.response(mf, "numeric") else y class(z) <- "lmrob" z } if(getRversion() < "3.1.0") globalVariables(".lm.fit") ##' @title Warn about extraneous arguments in the "..." (of its caller) ##' @return ##' @author Martin Maechler, June 2012 chk.s <- function(...) { if(length(list(...))) warning("arguments ", sub(")$", '', sub("^list\\(", '', deparse(list(...), control=c()))), " are disregarded in\n ", deparse(sys.call(-1), control=c()), call. = FALSE) } ##' Robust Mahalanobis Distances ##' internal function, used in lmrob() and plot.lmrob() ##' also "wanted" by 'robustloggamma' pkg robMD <- function(x, intercept, wqr, ...) { ## NB: 'wqr' only needed when covMcd() is not (entirely) successful if(intercept == 1) x <- x[, -1, drop=FALSE] if(ncol(x) >= 1) { rob <- tryCatch(covMcd(x, ...), warning = function(w) structure("covMcd produced a warning", class="try-error", condition = w), error = function(e) structure("covMcd failed with an error", class="try-error", condition = e)) if (inherits(rob, "try-error")) { warning("Failed to compute robust Mahalanobis distances, reverting to robust leverages.") .lmrob.hat(wqr = wqr) } else sqrt( mahalanobis(x, rob$center, rob$cov) ) } ## else NULL } ### Method Functions for class lmrob objects ### ### ---------------------------------------- ### ## Many are just wrapper functions for the respective .lm methods ## ---- sorted *ALPHABETICALLY* ---- alias.lmrob <- function(object, ...) { ## Purpose: provide alias() for lmrob objects ## Cannot use alias.lm directly, since it requires a "clean" object$qr, ## i.e., without the robustness weights if (is.null(x <- object[["x"]])) x <- model.matrix(object) weights <- weights(object) if (!is.null(weights) && diff(range(weights))) x <- x * sqrt(weights) object$qr <- qr(x) class(object) <- "lm" alias(object) } ## R (3.1.0)-devel copy of case.names.lm() ...../R/src/library/stats/R/lm.R case.names.lmrob <- function(object, full = FALSE, ...) { w <- weights(object) dn <- names(residuals(object)) if(full || is.null(w)) dn else dn[w!=0] } ## coef(): no own method ==> using coef.default(OO) == OO$coefficients ## ------------- ## use confint.lm instead of confint.default ## mainly to get t instead of normal quantiles ## Either imported from 'stats' or then copy-paste-defined in ./zzz.R : confint.lmrob <- confint.lm dummy.coef.lmrob <- dummy.coef.lm family.lmrob <- function(object, ...) gaussian() ## == stats:::family.lm ## fitted.default works for "lmrob" ## base::kappa.lm() is "doomed"; call what kappa.lm() has been calling for years: kappa.lmrob <- function(z, ...) kappa.qr(z$qr, ...) ## == kappa.lm(z, ...) ## instead of stats:::qr.lm() qrLmr <- function(x) { if(!is.list(r <- x$qr)) stop("lmrob object does not have a proper 'qr' component. Rank zero?") r } ## Basically the same as stats:::labels.lm -- FIXME: rank 0 fits? labels.lmrob <- function(object, ...) { tl <- attr(object$terms, "term.labels") asgn <- object$assign[qrLmr(object)$pivot[seq_len(object$rank)]] tl[unique(asgn)] } ## Works via lm's method [which is still exported]: model.matrix.lmrob <- model.matrix.lm ## identical to stats:::nobs.lm {but that is hidden .. and small to copy}: nobs.lmrob <- function(object, ...) if (!is.null(w <- object$weights)) sum(w != 0) else NROW(object$residuals) if(FALSE) ## now replaced with more sophsticated in ./lmrobPredict.R ## learned from MASS::rlm() : via "lm" as well predict.lmrob <- function (object, newdata = NULL, scale = NULL, ...) { class(object) <- c(class(object), "lm") object$qr <- qr(sqrt(object$rweights) * object$x) predict.lm(object, newdata = newdata, scale = object$s, ...) } print.summary.lmrob <- function (x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), showAlgo = TRUE, ...) { cat("\nCall:\n", paste(deparse(x$call, width.cutoff=72), sep = "\n", collapse = "\n"), "\n", sep = "") control <- lmrob.control.minimal(x$control) cat(" \\--> method = \"", control$method, '"\n', sep = "") ## else cat("\n") resid <- x$residuals df <- x$df rdf <- df[2L] cat(if (!is.null(x$weights) && diff(range(x$weights))) "Weighted ", "Residuals:\n", sep = "") if (rdf > 5L) { nam <- c("Min", "1Q", "Median", "3Q", "Max") rq <- if (NCOL(resid) > 1) structure(apply(t(resid), 1, quantile), dimnames = list(nam, dimnames(resid)[[2]])) else setNames(quantile(resid), nam) print(rq, digits = digits, ...) } else print(resid, digits = digits, ...) ## FIXME: need to catch rdf == 0? if( length(x$aliased) ) { if( !(x$converged) ) { if (x$scale == 0) { cat("\nExact fit detected\n\nCoefficients:\n") } else { cat("\nAlgorithm did not converge\n") if (control$method == "S") cat("\nCoefficients of the *initial* S-estimator:\n") else cat(sprintf("\nCoefficients of the %s-estimator:\n", control$method)) } printCoefmat(x$coef, digits = digits, signif.stars = signif.stars, ...) } else { if (nsingular <- df[3L] - df[1L]) cat("\nCoefficients: (", nsingular, " not defined because of singularities)\n", sep = "") else cat("\nCoefficients:\n") coefs <- x$coefficients if(!is.null(aliased <- x$aliased) && any(aliased)) { cn <- names(aliased) coefs <- matrix(NA, length(aliased), 4, dimnames=list(cn, colnames(coefs))) coefs[!aliased, ] <- x$coefficients } printCoefmat(coefs, digits = digits, signif.stars = signif.stars, na.print="NA", ...) cat("\nRobust residual standard error:", format(signif(x$scale, digits)),"\n") if(nzchar(mess <- naprint(x$na.action))) cat(" (",mess,")\n", sep = "") if(!is.null(x$r.squared) && x$df[1] != attr(x$terms, "intercept")) { cat("Multiple R-squared: ", formatC(x$r.squared, digits = digits)) cat(",\tAdjusted R-squared: ", formatC(x$adj.r.squared, digits = digits), "\n") } correl <- x$correlation if (!is.null(correl)) { p <- NCOL(correl) if (p > 1) { cat("\nCorrelation of Coefficients:\n") if (is.logical(symbolic.cor) && symbolic.cor) { print(symnum(correl), abbr.colnames = NULL) } else { correl <- format(round(correl, 2), nsmall = 2, digits = digits) correl[!lower.tri(correl)] <- "" print(correl[-1, -p, drop = FALSE], quote = FALSE) } } } cat("Convergence in", x$iter, "IRWLS iterations\n") } cat("\n") if (!is.null(rw <- x$rweights)) { if (any(zero.w <- x$weights == 0)) rw <- rw[!zero.w] eps.outlier <- if (is.function(EO <- control$eps.outlier)) EO(nobs(x)) else EO summarizeRobWeights(rw, digits = digits, eps = eps.outlier, ...) } } else cat("\nNo Coefficients\n") if (showAlgo && !is.null(control)) printControl(control, digits = digits, drop. = "method") invisible(x) } print.lmrob <- function(x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall:\n", cl <- deparse(x$call, width.cutoff=72), "\n", sep = "") control <- lmrob.control.minimal(x$control) if(!any(grepl("method *= *['\"]", cl)))## 'method = ".."' not explicitly visible above cat(" \\--> method = \"", control$method, '"\n', sep = "") else cat("\n") if(length((cf <- coef(x)))) { if( x$converged ) cat("Coefficients:\n") else { if (x$scale == 0) { cat("Exact fit detected\n\nCoefficients:\n") } else { cat("Algorithm did not converge\n\n") if (control$method == "S") cat("Coefficients of the *initial* S-estimator:\n") else cat(sprintf("Coefficients of the %s-estimator:\n", control$method)) } } print(format(cf, digits = digits), print.gap = 2, quote = FALSE) } else cat("No coefficients\n") cat("\n") invisible(x) } print.lmrob.S <- function(x, digits = max(3, getOption("digits") - 3), showAlgo = TRUE, ...) { cat("S-estimator lmrob.S():\n") if(length((cf <- coef(x)))) { if (x$converged) cat("Coefficients:\n") else if (x$scale == 0) cat("Exact fit detected\n\nCoefficients:\n") else cat("Algorithm did not converge\n\n") print(format(cf, digits = digits), print.gap = 2, quote = FALSE) } else cat("No coefficients\n") cat("scale = ",format(x$scale, digits=digits), "; ", if(x$converged)"converged" else "did NOT converge", " in ", x$k.iter, " refinement steps\n") if (showAlgo && !is.null(x$control)) printControl(x$control, digits = digits, drop. = "method") invisible(x) } ## practically identical to stats:::qr.lm : qr.lmrob <- function (x, ...) { if (is.null(r <- x$qr)) stop("lmrob object does not have a proper 'qr' component. Rank must be zero") r } residuals.lmrob <- function(object, ...) residuals.lm(object, ...) ## even simpler than residuals.default(): residuals.lmrob.S <- function(obj) obj$residuals summary.lmrob <- function(object, correlation = FALSE, symbolic.cor = FALSE, ...) { if (is.null(object$terms)) stop("invalid 'lmrob' object: no terms component") p <- object$rank df <- object$df.residual #was $degree.freedom sigma <- object[["scale"]] aliased <- is.na(coef(object)) cf.nms <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)") if (p > 0) { n <- p + df p1 <- seq_len(p) se <- sqrt(if(length(object$cov) == 1L) object$cov else diag(object$cov)) est <- object$coefficients[object$qr$pivot[p1]] tval <- est/se ans <- object[c("call", "terms", "residuals", "scale", "rweights", "na.action", "converged", "iter", "control")] if (!is.null(ans$weights)) ans$residuals <- ans$residuals * sqrt(object$weights) ## 'df' vector, modeled after summary.lm() : ans$df <- c(p, rdf, NCOL(Qr$qr)) ## where p <- z$rank ; rdf <- z$df.residual ; Qr <- qr.lm(object) ans$df <- c(p, df, NCOL(object$qr$qr)) ans$coefficients <- if( ans$converged) cbind(est, se, tval, 2 * pt(abs(tval), df, lower.tail = FALSE)) else cbind(est, if(sigma <= 0) 0 else NA, NA, NA) dimnames(ans$coefficients) <- list(names(est), cf.nms) if (p != attr(ans$terms, "intercept")) { df.int <- if (attr(ans$terms, "intercept")) 1L else 0L ## This block is based on code by Olivier Renaud resid <- object$residuals pred <- object$fitted.values resp <- if (is.null(object[["y"]])) pred + resid else object$y wgt <- object$rweights ## scale.rob <- object$scale ## correction = E[wgt(r)] / E[psi'(r)] = E[wgt(r)] / E[r*psi(r)] ctrl <- object$control c.psi <- ctrl$tuning.psi psi <- ctrl$psi correc <- if (psi == 'ggw') { if (isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.95, NA)))) 1.121708 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.95, NA)))) 1.163192 else if (isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.85, NA)))) 1.33517 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.85, NA)))) 1.395828 else lmrob.E(wgt(r), ctrl) / lmrob.E(r*psi(r), ctrl) } else if (any(psi == .Mpsi.R.names) && isTRUE(all.equal(c.psi, .Mpsi.tuning.default(psi)))) { switch(psi, bisquare = 1.207617, welsh = 1.224617, # 1.2246131 optimal = 1.068939, hampel = 1.166891, lqq = 1.159232, stop('unsupported psi function -- should not happen')) } else lmrob.E(wgt(r), ctrl) / lmrob.E(r*psi(r), ctrl) resp.mean <- if (df.int == 1L) sum(wgt * resp)/sum(wgt) else 0 yMy <- sum(wgt * (resp - resp.mean)^2) rMr <- sum(wgt * resid^2) ans$r.squared <- r2correc <- (yMy - rMr) / (yMy + rMr * (correc - 1)) ans$adj.r.squared <- 1 - (1 - r2correc) * ((n - df.int) / df) } else ans$r.squared <- ans$adj.r.squared <- 0 ans$cov <- object$cov if(length(object$cov) > 1L) dimnames(ans$cov) <- dimnames(ans$coefficients)[c(1,1)] if (correlation) { ans$correlation <- ans$cov / outer(se, se) ans$symbolic.cor <- symbolic.cor } } else { ## p = 0: "null model" ans <- object ans$df <- c(0L, df, length(aliased)) ans$coefficients <- matrix(ans$coefficients[0L], 0L, 4L, dimnames = list(NULL, cf.nms)) ans$r.squared <- ans$adj.r.squared <- 0 ans$cov <- object$cov } ans$aliased <- aliased # used in print method ans$sigma <- sigma # 'sigma': in summary.lm() & 'fit.models' pkg if (is.function(ans$control$eps.outlier)) ans$control$eps.outlier <- ans$control$eps.outlier(nobs(object)) if (is.function(ans$control$eps.x)) ans$control$eps.x <- if(!is.null(o.x <- object[['x']])) ans$control$eps.x(max(abs(o.x))) ## else NULL structure(ans, class = "summary.lmrob") } ## R (3.1.0)-devel copy of variable.names.lm() ...../R/src/library/stats/R/lm.R variable.names.lmrob <- function(object, full = FALSE, ...) { if(full) dimnames(qrLmr(object)$qr)[[2L]] else if(object$rank) dimnames(qrLmr(object)$qr)[[2L]][seq_len(object$rank)] else character() } vcov.lmrob <- function (object, cov = object$control$cov, complete = TRUE, ...) { if(!is.null(object$cov) && (missing(cov) || identical(cov, object$control$cov))) .vcov.aliased(aliased = is.na(coef(object)), object$cov, complete= if(is.na(complete)) FALSE else complete) else { ## cov is typically = ".vcov.w" or ".vcov.avar1", but can be *any* user func. lf.cov <- if (!is.function(cov)) get(cov, mode = "function") else cov lf.cov(object, complete=complete, ...) } } sigma.lmrob <- function(object, ...) object$scale weights.lmrob <- function(object, type = c("prior", "robustness"), ...) { type <- match.arg(type) res <- if (type == "prior") { ## Issue warning only if called from toplevel. Otherwise the warning pop ## up at quite unexpected places, e.g., case.names(). if (is.null(object[["weights"]]) && identical(parent.frame(), .GlobalEnv)) warning("No weights defined for this object. Use type=\"robustness\" argument to get robustness weights.") object[["weights"]] } else object[["rweights"]] if (is.null(object$na.action)) res else naresid(object$na.action, res) } #### functions hidden in namespace #### printControl <- function(ctrl, digits = getOption("digits"), str.names = "seed", drop. = character(0), header = "Algorithmic parameters:", ...) { ## Purpose: nicely and sensibly print a 'control' structure ## currently for lmrob(), glmrob() ## Author: Martin Maechler, Date: 31 May 2006 PR <- function(LST, ...) if(length(LST)) print(unlist(LST), ...) cat(header,"\n") is.str <- (nc <- names(ctrl)) %in% str.names do. <- !is.str & !(nc %in% drop.) is.ch <- vapply(ctrl, is.character, NA) real.ctrl <- vapply(ctrl, function(x) length(x) > 0 && is.numeric(x) && any(x %% 1 != 0), NA) PR(ctrl[do. & real.ctrl], digits = digits, ...) ## non-real, non-char ones (typically integers), but dropping 0-length ones PR(ctrl[do. & !is.ch & !real.ctrl], ...) ## char ones PR(ctrl[do. & is.ch], ...) if(any(is.str)) for(n in nc[is.str]) { cat(n,":") str(ctrl[[n]], vec.len = 2) ## 'vec.len = 2' is smaller than normal, but nice for Mersenne seed } } summarizeRobWeights <- function(w, digits = getOption("digits"), header = "Robustness weights:", eps = 0.1 / length(w), eps1 = 1e-3, ...) { ## Purpose: nicely print a "summary" of robustness weights stopifnot(is.numeric(w)) cat(header,"\n") cat0 <- function(...) cat('', ...) n <- length(w) if(n <= 10) print(w, digits = digits, ...) else { n1 <- sum(w1 <- abs(w - 1) < eps1) n0 <- sum(w0 <- abs(w) < eps) if(any(w0 & w1)) warning("weights should not be both close to 0 and close to 1!\n", "You should use different 'eps' and/or 'eps1'") if(n0 > 0 || n1 > 0) { if(n0 > 0) { formE <- function(e) formatC(e, digits = max(2, digits-3), width=1) i0 <- which(w0) maxw <- max(w[w0]) c3 <- paste0("with |weight| ", if(maxw == 0) "= 0" else paste("<=", formE(maxw)), " ( < ", formE(eps), ");") cat0(if(n0 > 1) { cc <- sprintf("%d observations c(%s)", n0, strwrap(paste(i0, collapse=","))) c2 <- " are outliers" paste0(cc, if(nchar(cc)+ nchar(c2)+ nchar(c3) > getOption("width")) "\n ", c2) } else sprintf("observation %d is an outlier", i0), c3, "\n") } if(n1 > 0) cat0(ngettext(n1, "one weight is", sprintf("%s%d weights are", if(n1 == n)"All " else '', n1)), "~= 1.") n.rem <- n - n0 - n1 if(n.rem <= 0) { # < 0 possible if w0 & w1 overlap if(n1 > 0) cat("\n") return(invisible()) } cat0("The remaining", ngettext(n.rem, "one", sprintf("%d ones", n.rem)), "are") if(is.null(names(w))) names(w) <- as.character(seq(along = w)) w <- w[!w1 & !w0] if(n.rem <= 10) { cat("\n") print(w, digits = digits, ...) return(invisible()) } else cat(" summarized as\n") } print(summary(w, digits = digits), digits = digits, ...) } } robustbase/R/ltsPlot.R0000644000176200001440000002347213434014060014413 0ustar liggesusers#### This is from the R package #### #### rrcov : Scalable Robust Estimators with High Breakdown Point #### #### by Valentin Todorov ### This program is free software; you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation; either version 2 of the License, or ### (at your option) any later version. ### ### This program is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ### ### You should have received a copy of the GNU General Public License ### along with this program; if not, a copy is available at ### http://www.r-project.org/Licenses/ plot.lts <- function(x, which = c("all", "rqq","rindex", "rfit", "rdiag"), classic = FALSE, ask = (which[1] == "all" && dev.interactive()), id.n, ...) { if (!inherits(x, "lts")) stop("Use only with 'lts' objects") ltsPlot(x, which, classic, ask, id.n, ...) } ltsPlot <- function(x, which = c("all", "rqq","rindex", "rfit", "rdiag"), classic = FALSE, ask = FALSE, id.n, ...) { ##@bdescr ## Make plots for model checking and outlier detection based on ## the LTS regression estimates: ## rqq - normal quantile plot of the LTS and LS residuals ## rindex - standardized LTS/LS Residuals versus index ## rfit - standardized LTS/LS Residuals versus fitted values ## rdiag - regression diagnostic plot ## ##@edescr ## ##@in x : [object] An lts object ##@in which : [character] A plot option, one of: ## rqq: ## rdiag: ## rfit: ## rindex: ## default is "rqq" ##@in classic : [logical] If true the classical plot will be displayed too ## default is classic=FALSE ##@in id.n : [number] number of observations to be identified with a label. label <- function(x, y, ord, lab, id.n, ...) { if(id.n) { n <- length(y) which <- order(ord)[(n - id.n + 1):n] lab <- if(missing(lab)) which else lab[which] ## how to adjust the labels? ## a) adj=0.1 ## b) x=x+xrange ## c) pos=4 (to the left of the observation) ## d) additionaly to pos specify offset=0.2 (fraction of a character) xrange <- par("usr") xrange <- (xrange[2] - xrange[1])/50 text(x[which], y[which], pos = 4, offset = 0.2, lab, ...) } } ## The R function 'qqline' (package::stats) adds a line to a ## normal quantile-quantile plot which passes through the ## first and third quartiles. In S this function returns the ## slope and intercept of the line, but not in R. ## Here we need the slope and intercept in order to sort the ## residuals according to their distance from the line. myqqline <- function(y, datax = FALSE, ...) { y <- quantile(y[!is.na(y)],c(0.25, 0.75)) x <- qnorm(c(0.25, 0.75)) if(datax) { slope <- diff(x)/diff(y) int <- x[1] - slope*y[1] } else { slope <- diff(y)/diff(x) int <- y[1]-slope*x[1] } abline(int, slope, ...) invisible(list(int = int, slope = slope)) } myqqplot <- function(r, classic = FALSE, lab, id.n, ...) { ## Normal QQ-plot of residuals: ## Produces a Quantile-Quantile plot in which the vector r is plotted ## against the quantiles of a standard normal distribution. xlab <- "Quantiles of the standard normal distribution" ylab <- if(classic) "Standardized LS residual" else "Standardized LTS residual" qq <- qqnorm(r, mgp = mgp, xlab = xlab, ylab = ylab, ...) ll <- myqqline(r, lty = 2, ...) ord <- abs(qq$y - ll$int - ll$slope * qq$x) label(qq$x, qq$y, ord, lab, id.n, ...) } indexplot <- function(r, scale, classic = FALSE, lab, id.n, ...) { ## Index plot of standardized residuals: ## Plot the vector r (LTS or LS residuals) against ## the observation indexes. Identify by a label the id.n ## observations with largest value of r. ## Use classic=FALSE/TRUE to choose the label of the vertical axes ## VT:: 26.12.2004 if(scale == 0) stop("Index plot of standardized residuals is not avalable if scale = 0") xlab <- "Index" ylab <- if(classic) "Standardized LS residual" else "Standardized LTS residual" x <- 1:length(r) y <- r/scale ylim <- c(min(-3, min(y)), max(3, max(y))) plot(x, y, ylim = ylim, mgp = mgp, xlab = xlab, ylab = ylab, ...) label(x, y, ord = abs(y), lab, id.n, ...) abline(h = 0, lty = 4, ...) abline(h = c(-2.5, 2.5), ...) mtext(c("-2.5","2.5"), side = 4, line = 1.2, at = c(-2.5, 2.5), ...) title(main = "Residuals vs Index") } ##' Tukey-Anscombe Plot (rename ?!) fitplot <- function(obj, classic = FALSE, lab, id.n, ...) { ## Standardized residuals vs Fitted values plot: ## Plot the vector r (LTS or LS residuals) against ## the corresponding fitted values. Identify by a ## label the id.n observations with largest value of r. ## Use classic=FALSE/TRUE to choose the label of the vertical axes ## VT:: 26.12.2004 if(obj$scale == 0) stop("Standardized residuals vs Fitted values plot is not avalable if scale = 0") ## x <- obj$X %*% as.matrix(obj$coef) x <- obj$fitted.values y <- obj$residuals/obj$scale ylim <- c(min(-3, min(y)), max(3, max(y))) yname <- names(obj$scale) xlab <- paste("Fitted :", yname) ylab <- if(classic) "Standardized LS residual" else "Standardized LTS residual" plot(x, y, ylim = ylim, mgp = mgp, xlab = xlab, ylab = ylab, ...) label(x, y, ord = abs(y), lab, id.n, ...) abline(h = 0, lty = 4, ...) abline(h = c(-2.5, 2.5), ...) mtext(c("-2.5","2.5"), side = 4, line = 1.2, at = c(-2.5, 2.5), ...) title(main = "Residuals vs Fitted") } ## fitplot() rdiag <- function(obj, classic = FALSE, lab, id.n, ...) { ## Regression diagnostic plot: ## Plot the vector of the standardized residuals against ## the robust distances of the predictor variables ## Identify by a label the id.n observations with largest value of r. ## Use classic=FALSE/TRUE to choose the label of the vertical axes p <- if(obj$intercept) length(obj$coef) - 1 else length(obj$coef) if(p <= 0) warning("Diagnostic plot is not available for univar\niate location and scale estimation") ## VT:: 26.12.2004 if(obj$scale <= 0) stop("Regression Diagnostic plot is not avalable if scale = 0") if(is.null(obj$RD)) stop("Regression Diagnostic plot is not avalable: option mcd=F was set in ltsReg().") if(obj$RD[1] == "singularity") stop("The MCD covariance matrix was singular.") if(classic) { xlab <- "Mahalanobis distance" ylab <- "Standardized LS residual" } else { xlab <- "Robust distance computed by MCD" ylab <- "Standardized LTS residual" } ## VT:: 18.01.20045 ## set id.n to the number of all outliers: ## regression outliers (weight==0)+ leverage points (RD > cutoff) if(missing(id.n)) { id.n <- length(unique(c(which(obj$RD > sqrt(qchisq(0.975, p))), which(obj$lts.wt == 0)))) } quant <- max(c(sqrt(qchisq(0.975, p)), 2.5)) x <- obj$RD y <- obj$residuals/obj$scale ## xlim <- c(0, max(quant + 0.1, max(x))) ylim <- c(min(-3, min(y)), max(3, max(y))) plot(x, y, ylim = ylim, mgp = mgp, xlab = xlab, ylab = ylab, main = "Regression Diagnostic Plot", ...) ord <- apply(abs(cbind(x/2.5, y/quant)), 1, max) label(x, y, ord = ord, lab, id.n, ...) abline(v = quant, h = c(-2.5, 2.5), ...) mtext(c("-2.5","2.5"), side = 4, line = 1.2, at = c(-2.5, 2.5), ...) } ## rdiag() ## parameters and preconditions which <- match.arg(which) r <- residuals(x) n <- length(r) id.n.missing <- missing(id.n) || is.null(id.n) ## if id.n is missing, it will be set to a default for each plot. if(!id.n.missing) { id.n <- as.integer(id.n) if(id.n < 0 || id.n > n) stop("'id.n' must be in {1,..,",n,"}") } mgp <- c(2.5, 1, 0) # set the margin line (in 'mex' units) for the: ## - axis title, ## - axis labels and ## - axis line. ## The default is 'c(3, 1, 0)'. if(!classic) par(mfrow = c(1,1), pty = "m") else { par(mfrow = c(1,2), pty = "m") ## calculate the LS regression (using LTS with alpha = 1) ## if intercept, obj$X is augmented with a column of 1s - remove it if(x$intercept && # model with intercept length(dim(x$X)) == 2 && # X is 2-dimensional (nc <- ncol(x$X)) > 1 && # X has more than 1 column all(x$X[,nc] == 1)) # the last column of X is all 1s X <- x$X[, -nc] else X <- x$X obj.cl <- ltsReg(X, x$Y, intercept = x$intercept, alpha = 1) } if (ask) { op <- par(ask = TRUE) on.exit(par(op)) } ## set id.n to the number of regression outliers (weight==0): nx <- if(id.n.missing) length(which(x$lts.wt == 0)) else id.n if(which == "all" || which == "rqq") { ## VT::20.12.2006 - the standardized residuals are in x$resid ## - no change for the other plot functions - the residuals will be standardized ## inside indexplot(), fitplot(), etc myqqplot(x$resid, id.n = nx, ...) # normal QQ-plot of the LTS residuals if(classic) # normal QQ-plot of the LS residuals myqqplot(obj.cl$resid, classic = TRUE, id.n = nx, ...) } if(which == "all" || which == "rindex") { indexplot(x$residuals, x$scale, id.n = nx, ...) # index plot of the LTS residuals if(classic) # index plot of the LS residuals indexplot(obj.cl$residuals, obj.cl$scale, classic = TRUE, id.n = nx, ...) } if(which == "all" || which == "rfit") { fitplot(x, id.n = nx, ...) if(classic) fitplot(obj.cl, classic = TRUE, id.n = nx, ...) } if(which == "all" || which == "rdiag") { rdiag(x, id.n = id.n, ...) if(classic) rdiag(obj.cl, classic = TRUE, id.n = id.n, ...) } } robustbase/R/comedian.R0000644000176200001440000001401712441664610014535 0ustar liggesusers### -*- mode: R ; delete-old-versions: never -*- ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, a copy is available at ## http://www.r-project.org/Licenses/ ### From package 'Biobase' (has only rowMedians + rowQ) / 'matrixStats' ### MM: all type checking now in C ## --- TODO: implement hasNA=NA ==> do check maybe differently than = TRUE ## --> ../src/rowMedians.c + ../src/rowMedians_TYPE-template.h colMedians <- function(x, na.rm=FALSE, hasNA=TRUE, keep.names=TRUE) .Call(R_rowMedians, x, na.rm, hasNA, FALSE, keep.names) rowMedians <- function(x, na.rm=FALSE, hasNA=TRUE, keep.names=TRUE) .Call(R_rowMedians, x, na.rm, hasNA, TRUE, keep.names) ### Maria Anna di Palma, without consistency factor 15.11.2014 ### Fixes by Valentin Todorov ### Martin Maechler: added mad() consistency factor, 27.11.2014 ### new name, class; more compatible to 'covMcd' covComed <- function (X, n.iter = 2, reweight = FALSE, tolSolve = control$ tolSolve,# had 1e-10 hardwired {now 1e-14 default} trace = control$ trace, wgtFUN = control$ wgtFUN, control = rrcov.control()) { ## ATTENTION ## ## Med(abs(X))^2=Med(X*X) only if the number of rows is odd d <- dim(X <- as.matrix(X)) n <- d[1] p <- d[2] if(is.character(wgtFUN)) wgtFUN <- .wgtFUN.covComed[[wgtFUN]](p=p, n=n, control) if(!is.function(wgtFUN)) stop("'wgtFUN' must be a function or a string specifying such a function") madX <- apply(X, 2, mad) I.mad <- 1/madX rho <- I.mad * COM(X) * rep(I.mad, each = p) ## better than ## D <- diag(1/madX) ## rho <- D %*% COM(X) %*% t(D) U <- svd(rho, p, nv = 0L)$u ## DD <- diag(madX) ## Q <- DD %*% U ## invQ <- solve(Q) ## == t(U) %*% D -- since U is orthogonal! t.inv.Q <- I.mad * U # = t(solve(Q)) = t(t(U) * D) == t(D) U = D U Z <- X %*% t.inv.Q ## much faster than for (i in 1:n) Z[i,] <- invQ %*% X[i,] out <- comedian(rho, Z, X) ## Mahalanobis distance for(it in seq_len(n.iter))# allow n.iter = 0 out <- comedian(out$S., out$Z, X) mm <- colMedians(out$Z) mx <- drop(out$Q %*% mm) ## MM: These are "raw" distances compared to covMcd() mah <- mahalanobis(X, mx, out$S., tol = tolSolve) ## compute weights weights <- wgtFUN(mah) covW <- cov.wt(X, wt=weights)[c("cov", "center", "n.obs")] covW$weights <- if(reweight) { ## above 'mah' = 'raw.mah' .. ==> allow another reweighting as in covMcd() covW$raw.weights <- weights covW$mah <- mahalanobis(X, covW$center, covW$cov, tol = tolSolve) wgtFUN(mah) } else # no re-weighting weights structure(class = "comed", c(list(Z = out$Z, raw.cov = out$S., raw.center = mx, raw.mah = mah, wgtFUN=wgtFUN), covW)) } ##' Martin Maechler's simple proposal for an *adaptive* cutoff ##' i.e., one which does *not* reject outliers in good samples asymptotically .COM.adaptWgt.c <- function(n,p, eps = 0.2 / n^0.3) { ## default eps ==> 1-eps(n=100) ~= 0.95; 1-eps(n=10) ~= 0.90 ## using upper tail: 1.4826 * qchisq(eps, p, lower.tail=FALSE) / qchisq(0.5, p) } ## Default wgtFUN() constructors for covComed(): .wgtFUN.covComed <- list("01.original" = function(p, ...) { cMah <- .COM.adaptWgt.c(p=p, eps = 0.05)# 1 - eps = 0.95 function(d) as.numeric(d < median(d)*cMah) }, "01.flex" = function(p, n, control) { ## 'beta' instead of 0.95 stopifnot(is.1num(beta <- control$beta), 0 <= beta, beta <= 1) cMah <- 1.4826 * qchisq(beta, p) / qchisq(0.5, p) function(d) as.numeric(d < median(d)*cMah) }, "01.adaptive" = function(p, n, ...) { ## 'beta_n' instead of 0.975 cMah <- .COM.adaptWgt.c(n,p) function(d) as.numeric(d < cMah) }, "sm1.flex" = function(p, n, control) { ## 'beta' / smooth weight stopifnot(is.1num(beta <- control$beta), 0 <= beta, beta <= 1) cMah <- 1.4826 * qchisq(beta, p) / qchisq(0.5, p) function(d) smoothWgt(d / median(d), c=cMah, h = 1) }, "sm1.adaptive" = function(p, n, ...) { cMah <- .COM.adaptWgt.c(n=n, p=p) function(d) smoothWgt(d / median(d), c = cMah, h = 1) }, "sm2.adaptive" = function(p, n, ...) { cMah <- .COM.adaptWgt.c(n=n, p=p) function(d) smoothWgt(d / median(d), c = cMah, h = 2) } ) comedian <- function (rho, Z, X) { p <- ncol(X) U <- svd(rho, nv = 0L)$u madX <- apply(X, 2, mad) I.mad <- 1/madX ## D <- diag(madX) ## Q <- D %*% U Q <- madX * U ## invQ <- solve(Q) t.inv.Q <- I.mad * U # = t(solve(Q)) = t(t(U) * D) == t(D) U = D U Z <- X %*% t.inv.Q ## for (i in 1:n) Z[i,] <- invQ %*% X[i,] madZ <- apply(Z, 2, mad) list(Q=Q, Z=Z, S. = tcrossprod(Q * rep(madZ, each=p))) ## better than ## S. = Q %*% diag(madZ)^2 %*% t(Q) } COM <- function(X) { ## Comedian *with* consistency factor. Falk(1997) was without it. stopifnot(is.1num(p <- ncol(X)), p >= 1) med <- colMedians(X) Y <- sweep(X, 2L, med, `-`) COM <- matrix(0., p,p) madY <- numeric(p) for(i in 1:p) { madY[i] <- madYi <- mad(Yi <- Y[,i]) for(j in seq_len(i-1)) { # j <= i ==> madY[j] "exists" COM[j,i] <- COM[i,j] <- median(Yi * Y[,j]) / (madYi * madY[j]) ## COM[i,j] <- median((Y[,i])*(Y[,j])) ## COM[i,j] <- (1.4826^2)*median((Y[,i])*(Y[,j])) } ## j == i : COM[i,i] <- median(Yi^2) / (madYi^2) } ## return [ 1.4826 = formals(mad)$constant = consistency factor of mad()] 1.4826^2 * COM } robustbase/R/psi-rho-funs.R0000644000176200001440000004200312737461431015310 0ustar liggesusers#### Psi(), Rho(), weight() etc functions for M-Estimation and extensions ## Use an S4 class for such function classes ## Follow a similar idea as nlsModel() {in "stats"} which returns ## a list of functions sharing a common {non-small!} environment ## NOTA BENE: Experiments etc are currently in ../misc/experi-psi-rho-funs.R ## --------- (FIXME: move those to ../tests/psi-rho-etc.R and the vignette ## ../vignettes//psi_functions.Rnw (and see ../inst/xtraR/plot-psiFun.R) ## ---> look for 'FIXME' below !!! ## ------- ### A. (Symmetric) Location / Regression ## A single function(x, tuningPars) ## a. 1st argument 'x', numeric; must work vectorized on x ## b. further arguments: tuning parameters *with a default* setClass("functionX", contains = "function", validity = function(object) { ## "function" is already because of 'contains' if(names(ff <- formals(object))[1] != "x") return("first argument must be 'x'") f0 <- object(0) fI <- object(Inf) if(!identical(c(f0,fI), object(c(0,Inf)))) return("F(x, *) does not vectorize in 'x'") ## Otherwise : valid TRUE }) ## A functional --- i.e. function of "tuning pars only", such as ## Ep(hc) = Int_{-Inf}^{+Inf} psi(x; hc)^2 dnorm(x) dx ##' This one is *not* checked for vectorization: needed when length(k) > 1 setClass("functionXal", contains = "function") ##' Here F(k) must vectorize in k setClass("functionXal1", contains = "functionXal", validity = function(object) { f0 <- object(0) fI <- object(Inf) if(!identical(c(f0,fI), object(c(0,Inf)))) return("F(k) = I_k[f(.)] does not vectorize in 'k'") ## Otherwise : valid TRUE }) setClass("psi_func", slots = c(rho = "functionX", psi = "functionX", ## psi(x) == d/dx rho(x) = x * wgt(x) wgt = "functionX", ## wgt(x) == psi(x) / x Dpsi = "functionX",## psi'(x) == d/dx psi(x) = rho''(x) Dwgt = "functionX", ## wgt'(x) == d/dx wgt(x) ## tuning parameters, i.e., formals(rho)[-1] tDefs = "numeric",## *named* values of tuning parameters ## FIXME !! {see 4 lines below} Erho = "functionXal", # = E_X[rho(X)]; X~N(0,1); Epsi2 = "functionXal", # = E_X[psi(X)^2]; X~N(0,1); 'A' EDpsi = "functionXal", # = E_X[psi'(X)]; X~N(0,1); 'B' ## name = "character", xtras = "list" ## for flexible extensions.. )) ## FIXME: need other E[] than just wrt N(0,1) ## ----- e.g. for robglm(), need E[] wrt Gamma(.) ### Constructors / "Examples" [the examples are the objects, we'll really use!] psiFunc <- function(rho,psi,wgt, Dpsi,Dwgt, Erho=NULL, Epsi2=NULL, EDpsi=NULL, name, ...) { lent <- length(dotsargs <- list(...)) ## '...' must contain all tuning parameters and their defaults: ## NOTA BENE: Now want at least one tuning parameter.. "worst case": a dummy stopifnot(lent >= 1, length(nt <- names(dotsargs)) == lent, all(nchar(nt)) >= 1) ## Definition of Dwgt is optional if (missing(Dwgt)) Dwgt <- .defDwgt(psi, Dpsi) ## rho, psi,... checking: must have argument names argn <- c("x", nt) for(fnam in list("rho", "psi", "wgt", "Dpsi", "Dwgt", "Erho", "Epsi2", "EDpsi")) { f <- get(fnam, inherits = FALSE) ef <- environment(f) nf <- names(formals(f)) # "x" and "k" for Huber's if (fnam %in% c("Erho", "Epsi2", "EDpsi")) { if(!identical(nf, argn[-1])) stop("arguments of function '",fnam,"' are (", paste(nf, collapse=","),") but should be (", paste(argn[-1],collapse=","),").") formals(f) <- dotsargs } else { if(!identical(nf, argn)) stop("arguments of function '",fnam,"' are (", paste(nf, collapse=","),") but should be (", paste(argn,collapse=","),").") formals(f)[-1] <- dotsargs } environment(f) <- ef assign(fnam, f, inherits = FALSE) } fnctl.typ <- if(lent == 1 && length(dotsargs[[1]]) == 1) "functionXal1" else "functionXal" new("psi_func", rho = new("functionX", rho), psi = new("functionX", psi), wgt = new("functionX", wgt), Dpsi= new("functionX", Dpsi), Dwgt= new("functionX", Dwgt), ## tNams = if(lent) nt else character(0), tDefs = unlist(dotsargs), Erho = new(fnctl.typ, Erho), Epsi2= new(fnctl.typ, Epsi2), EDpsi= new(fnctl.typ, EDpsi), name = if (missing(name)) character(0) else name, xtras= list(tuningP = dotsargs)) } ## Generate default Dwgt function ## Unfortunately, MM can't see how to make this works nicely; ## ._.. = args should really be something like 'x, k' {no parens}: .defDwgt <- function(psi, Dpsi) { args <- formals(Dw <- psi)# -> same formals body(Dw) <- substitute({ y <- .X. .X. <- .X.[not0 <- .X. != 0] y[not0] <- ( Dpsi(._..) - psi(._..)/.X. ) / .X. y }, list(.X. = as.name(names(args[1])), ._.. = args)) environment(Dw) <- environment() Dw } ## so we use this "less nice" variant: .defDwgt <- function(psi, Dpsi) { nf <- names(formals(psi)) eval(parse(text = gsub("_,_", paste(nf, collapse=","), gsub("x", nf[1], "function(_,_) { y <- x x <- x[not0 <- x != 0] y[not0] <- ( Dpsi(_,_) - psi(_,_)/x ) / x y }")))) } chgDefaults <- function(object, ...) standardGeneric("chgDefaults") setMethod("chgDefaults", signature("psi_func"), function(object, ...) { lent <- length(dotsargs <- list(...)) ## '...' must contain all tuning parameters and their defaults: stopifnot(lent >= 1, length(nt <- names(dotsargs)) == lent, all(nchar(nt)) >= 1) ## rho "..." must conform to rho, etc: nf <- names(ff <- formals(object@rho)) if(!identical(nf[-1], nt)) stop("invalid tuning parameter names: ", paste(nt, collapse=",")," instead of ", paste(nf[-1],collapse=","),".") for(fnam in list("rho", "psi", "wgt", "Dpsi", "Dwgt", "Erho", "Epsi2", "EDpsi")) { f <- slot(object, fnam) ef <- environment(f) if (is(f, "functionXal")) formals(f) <- dotsargs else formals(f)[-1] <- dotsargs environment(f) <- ef ## lowlevel {faster than}: slot(..) <- new("functionX", f) slot(object, fnam)@.Data <- f } object@tDefs <- unlist(dotsargs) if(identical(nt, names(object@xtras$tuningP)))# TODO: should update even if there are others object@xtras$tuningP <- setNames(eval(dotsargs), nm=nt) object }) .sprintPsiFunc <- function(x, short=FALSE, round=3) { v <- x@tDefs n <- names(v) ## do not print a single dummy parameter "." if (length(n) == 1 && n == ".") v <- numeric(0) if (!length(name <- x@name)) name <- "" if (!short) name <- sprintf("%s psi function", name) if (length(v) >= 1) { if (short) paste(name, paste(n, round(v, round), sep = "=", collapse = "\n"), sep = "\n") else paste0(name, " (", pasteK(n, round(v, round), sep = " = "), ")") } else name } setMethod("show", signature("psi_func"), function(object) cat(.sprintPsiFunc(object), "\n")) ## moved here from inst/xtraR/plot-psiFun.R; called plot.psiFun originally matplotPsi <- function(x, m.psi, psi, par, main = "full", col = c("black", "red3", "blue3", "dark green"), leg.loc = "right", lty = 1, ...) { ## Original Author: Martin Maechler, Date: 13 Aug 2010, 10:17 ## Modified by Manuel Koller, Date: 7 Jan 2013 fExprs <- quote(list(rho(x), psi(x), {psi*minute}(x), w(x) == psi(x)/x, {w*minute}(x))) ## build legend map <- if (is.null(colnames(m.psi))) { 1:(ncol(m.psi)+1) } else { c(1, c(rho=2, psi=3, Dpsi=4, wgt=5, Dwgt=6)[colnames(m.psi)]) } fExprs <- fExprs[map] ## ... title if(is.character(main)) { shortMain <- (main == "short") elist <- list(FF = if(shortMain) fExprs[[2]] else fExprs, PSI = psi, PPP = paste(formatC(par), collapse=",")) tit <- if(shortMain) substitute(FF ~ "etc, with" ~ psi*"-type" == PSI(PPP), elist) else substitute(FF ~~ ~~ " with "~~ psi*"-type" == PSI(PPP), elist) } else tit <- NULL ## plot matplot(x, m.psi, col=col, lty=lty, type="l", main = tit, ylab = quote(f(x)), xlab = quote(x), ...) abline(h=0,v=0, lty=3, col="gray30") fE <- fExprs; fE[[1]] <- as.name("expression") legend(leg.loc, inset=.02, eval(fE), col=col, lty=lty, bty="n") invisible(cbind(x=x, m.psi)) } setMethod("plot", signature(x = "psi_func"), function(x, y, which = c("rho", "psi", "Dpsi", "wgt", "Dwgt"), main = "full", col = c("black", "red3", "blue3", "dark green", "light green"), leg.loc = "right", ...) { ## x: psi_func object ## y: points to plot at (x-Axis in plot) which <- match.arg(which, several.ok = TRUE) if(missing(y)) y <- seq(-5, 10, length=1501) ## For backcompatibility: if(!is.null(sm <- list(...)$shortMain)) { if(!missing(main)) stop("You cannot specify both 'main' and the deprecated 'shortMain'") warning("'shortMain' is deprecated and will get defunct.\n", "Use 'main = \"short\"' instead of 'shortMain = TRUE'") if(sm) main <- "short" } tmp <- lapply(which, function(name) slot(x, name)(y)) m.psi <- do.call(cbind, tmp) colnames(m.psi) <- which matplotPsi(y, m.psi, x@name, unlist(formals(x@rho)[-1]), main=main, col=col, leg.loc=leg.loc, ...) }) ##-------- TODO: Rather write short __vignette__ with these formulae: ##' \Phi_j(t) := \int_{-\infty}^t u^j \phi(u) \;du ##' --------- where \phi(.) (= \code{dnorm()}) ##' is the density of the standard normal distribution N(0,1). ##' @title "Truncated" Moments of the Gaussian: Int u^j phi(u) du ##' @param t numeric vector ##' @param j an integer (valued scalar), >= 0 ##' @return Phi_j(t), i.e. a numeric vector of the same length as t. ##' @author Martin Maechler PhiI <- function(t, j = 0) { stopifnot(j == as.integer(j), length(j) == 1, is.numeric(t)) if(j >= 4) ## recursion formula -t^(j-1)*dnorm(t) + (j-1)* PhiI(t, j-2) else switch(j+1, ## 0: pnorm(t), ## 1: -dnorm(t), ## 2: pnorm(t) - t*dnorm(t), ## 3: -(2 + t^2)*dnorm(t)) } if(FALSE) { ## Checking PhiI() visually: tt <- seq(-4,10, length=64) j.max <- 5 oo <- sfsmisc::mult.fig(j.max+1, main = "Checking PhiI(., j)", marP=-c(1,1,1,0)) cols <- c("red2", adjustcolor("blue", 0.25)) for(j in 0:j.max) { curve(PhiI(x, j=j), -4, 10, col=cols[1], main = bquote(j == .(j))) if(j == j.max %/% 2) legend("right", c("PhiI()", "integrate(..)"), col=cols, lwd = c(1,3), lty = c(1,3), inset = 1/40) I <- sapply(tt, function(t) integrate(function(u) u^j * dnorm(u), -Inf, t)$value) lines(tt, I, col= cols[2], lwd=3, lty = 3) } par(oo$old.par) } ## Huber: huberPsi <- psiFunc(rho = function(x, k) { r <- u <- abs(x); I <- u < k r[ I] <- u[I]^2 / 2 r[!I] <- k*(u[!I] - k / 2) r }, psi = function(x, k) pmin.int(k, pmax.int(-k, x)), wgt = function(x, k) pmin.int(1, k/abs(x)), Dpsi = function(x, k) abs(x) <= k, Erho = function(k) {iP <- pnorm(k, lower=FALSE) 1/2 - iP + k*(dnorm(k) - k*iP)}, Epsi2= function(k) ifelse(k < 10, 1 - 2*(k*dnorm(k) + (1-k*k)*pnorm(k, lower=FALSE)), 1), EDpsi= function(k) 2*pnorm(k) - 1, name = "Huber", ## the tuning pars and default: k = 1.345) ## Hampel: hampelPsi <- psiFunc(rho = function(x, k) { u <- abs(x) a <- k[1] ; b <- k[2]; r <- k[3] Lg <- r <= u I <- u < a m1 <- !I & (I2 <- u < b) # a <= u < b : 'constant' m2 <- !I2 & !Lg # b <= u < r : 'descending' x[ I] <- u[I]^2 / 2 x[m1] <- a*(a/2 + (u[m1] - a)) ##x[m2]<- a*(a/2 + (b - a)) + a*(u^2 - b^2)/(2*(r - b)) ##x[m2]<- a*(b - a/2) + a*(u^2 - b^2)/(2*(r - b)) x[m2] <- a*(b - a/2 + (u[m2] - b)*(r - (b+u[m2])/2)/(r - b)) ##u=r: a*(b - a/2 + (b + r)/2) x[Lg] <- a/2*(b - a + r) x }, psi = function(x, k) { ## this is "optimized" ==> factors faster than using ifelse()! u <- abs(x) lrg <- k[3] <= u mid <- k[1] < u & !lrg # constant _and_ descending ## x is result for |x| < k[1] x[lrg] <- 0 if(any(mid)) x[mid] <- k[1] * sign(x[mid])* pmin.int(1, (u[mid] - k[3])/(k[2] - k[3])) x }, wgt = function(x, k) { x <- abs(x) lrg <- k[3] <= x I <- x < k[1] mid <- !I & !lrg # contains constant and descending x[I] <- 1 x[lrg] <- 0 if(any(mid)) x[mid] <- k[1] / x[mid] * pmin.int(1, (x[mid] - k[3])/(k[2] - k[3])) x }, Dpsi = function(x, k) { stopifnot(length(k) == 3, diff(k) >= 0) # for now u <- abs(x) lrg <- k[3] <= u I <- u < k[1] m1 <- !I & (I2 <- u < k[2]) # k_1 <= u < k_2: 'constant' m2 <- !I2 & !lrg # k_2 <= u < k_3 : 'descending' x[lrg | m1] <- 0 x[I ] <- 1 x[m2] <- k[1] / (k[2] - k[3]) x }, Erho = function(k) { names(k) <- c("a","b","r") a <- k[["a"]] ; b <- k[["b"]]; r <- k[["r"]] ph <- dnorm(k) Ph <- pnorm(k) ## rho(x) = c0 for |x| >= r c0 <- a/2*(b - a + r) ## coeff. of rho(x) = a/2(c1 + c2|x| + c2 x^2), for |x| in [b,r] D2 <- r - b c1 <- -(a*r+ b*(b-a)) / D2 c2 <- 2*r / D2 c3 <- - 1 / D2 dPh.rb <- Ph[["r"]] - Ph[["b"]] dph.rb <- ph[["r"]] - ph[["b"]] ## Phi_2(r) - Phi_2(b) := dPh2.rb <- Ph[["r"]] - Ph[["b"]] - r*ph[["r"]] + b*ph[["b"]] ## E[rho(X)] = ## [0,a] : 2* 1/2*(Phi_2(a) - Phi_2(0)) (Ph[["a"]]-a*ph[["a"]] - 1/2) + ## [a,b] : 2* a*( -a/2*(Phi(b) - Phi(a)) + (Phi_1(b) - Phi_1(a)) ) 2*a*(-a/2*(Ph[["b"]]-Ph[["a"]]) + (ph[["a"]] - ph[["b"]])) + ## the upper two can be simplified to ## -1/2 + a*ph[["a"]] + (1+a^2)*Ph[["a"]] -2*a*ph[["b"]] - a^2*Ph[["b"]] + ## [b,r] : a*(c1*dPh.rb + c2*(-dph.rb) + c3*dPh2.rb) + ## [r,Inf] : 2*c0*(1 - Ph[["r"]]) } , Epsi2 = function(k) ## E[psi^2]=: 'A' in Hampel et al.(1986), p.150 { names(k) <- c("a","b","r") a <- k[["a"]] ; r <- k[["r"]] ph <- dnorm(k) Ph <- pnorm(k) Ph2 <- Ph - k*ph # = Phi_2(k) {see PhiI(.) above} 2*(Ph2[["a"]] - 1/2 + a^2*(Ph[["b"]] - Ph[["a"]]) + (a / (r - k[["b"]]))^2 * ( r^2 *(Ph[["r"]] - Ph[["b"]]) -2*r *(ph[["b"]] - ph[["r"]]) + Ph2[["r"]] - Ph2[["b"]])) }, EDpsi= function(k) ## E[psi'] =: 'B' in Hampel et al.(1986) { a <- k[1] ; b <- k[2]; r <- k[3] 2*(pnorm(a) - 1/2 - a* (pnorm(r) - pnorm(b)) / (r - b)) }, name = "Hampel", ## the tuning pars and default: k = c(2,4,8) / 1.345)# 1/1.345 = 0.7435 ## TODO: Biweight : ## ---- -------- but note that we have ## (non-S4) ./biweight-funs.R already {used by lmrob.*()} ## ~~~~~~~~~~~~~~~ if(FALSE) tukeyPsi <- c() ########## ## maybe TODO: Optimal tanh() estimator for location ### B. M-Estimators of Scale --- need chi() and slightly different functionals ### --- ---------------------- ### ## one "challenge" is the a(b) needed in chi(x; a,b) = [x^2 -1 -a]_b^b ## for V-optimal M-Estimates of scale ## --> but that's solved (!) in ./scale-chi-opt.R ## ~~~~~~~~~~~~~~~~~ ## Then, I'd also want the optimal chi for s robustbase/R/plot.lmrob.R0000644000176200001440000001164712514020510015036 0ustar liggesusers## MM: more following of plot.lm() : ~/R/D/r-devel/R/src/library/stats/R/plot.lm.R plot.lmrob <- function (x, which = 1:5, caption = c("Standardized residuals vs. Robust Distances", "Normal Q-Q vs. Residuals", "Response vs. Fitted Values", "Residuals vs. Fitted Values" , "Sqrt of abs(Residuals) vs. Fitted Values"), panel = if(add.smooth) panel.smooth else points, sub.caption = deparse(x$call), main = "", compute.MD = TRUE, # maybe (n < 1000 && p < 20) ask = prod(par("mfcol")) < length(which) && dev.interactive(), id.n = 3, labels.id = names(residuals(x)), cex.id = 0.75, label.pos = c(4,2), qqline = TRUE, add.smooth = getOption("add.smooth"), ..., p = 0.025) { if (!inherits(x, "lmrob")) stop("Use only with 'lmrob' objects") if (!is.numeric(which) || any(which < 1) || any(which > 5)) stop("'which' must be in 1:5") show <- rep(FALSE, 5) show[which] <- TRUE r <- residuals(x) n <- length(r) sr <- r/x$scale yh <- fitted(x) if (is.null(id.n)) id.n <- 0 else { id.n <- as.integer(id.n) if(id.n < 0L || id.n > n) stop(gettextf("'id.n' must be in {1,..,%d}", n), domain = NA) } if(id.n > 0L) { ## label the largest residuals if(is.null(labels.id)) labels.id <- paste(1L:n) iid <- 1L:id.n show.r <- sort.list(abs(r), decreasing = TRUE)[iid] ## if(any(show[2L:3L])) ## show.rs <- sort.list(abs(rs), decreasing = TRUE)[iid] text.id <- function(x, y, ind, adj.x = TRUE) { labpos <- if(adj.x) label.pos[1+as.numeric(x > mean(range(x)))] else 3 text(x, y, labels.id[ind], cex = cex.id, xpd = TRUE, pos = labpos, offset = 0.25) } } one.fig <- prod(par("mfcol")) == 1 if (ask) { op <- par(ask = TRUE) on.exit(par(op)) } if (show[1]) { if(is.null(x[['MD']]) && compute.MD) { message("recomputing robust Mahalanobis distances") x$MD <- ## need to recompute robMD(x = if(!is.null(x[['x']])) x$x else if(!is.null(x[['model']])) model.matrix(x, x$model) else stop("need 'model' or 'x' component for robust Mahalanobis distances"), intercept = attr(x$terms,"intercept"), wqr = x$qr) ## try to "cache" them with the object .ge <- .GlobalEnv if(identical(parent.frame(), .ge) && exists((cnx <- as.character(match.call()[["x"]])), .ge)) { assign(cnx, x, envir = .ge) message("saving the robust distances 'MD' as part of ", sQuote(cnx)) } } if(!is.null(xD <- x[['MD']])) { if (p < 0 || p > 1) stop ("Tolerance range must be between 0% to 100%") else chi <- sqrt( qchisq(p = 1-p, df = x$rank) ) ylim <- range(sr, na.rm=TRUE) if(id.n > 0) ylim <- extendrange(r = ylim, f = 0.08) plot(xD, xlab = "Robust Distances", sr, ylab = "Robust Standardized residuals", ylim=ylim, main = main, type = "n", ...) panel(xD, sr, ...) mtext(caption[1], 3, 0.25) if (one.fig) title(sub = sub.caption, ...) if(id.n > 0) { y.id <- sr[show.r] y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ")/3 text.id(xD[show.r], y.id, show.r) } abline(h = c(2.5,-2.5), lty = 3) abline(v = chi, lty = 3) } } if (show[2L]) { ## Normal qq <- qqnorm(r, ylab = "Residuals", main = main,...) if(qqline) qqline(r, lty = 3, col = "gray50") mtext(caption[2], 3, 0.25) if (one.fig) title(sub = sub.caption, ...) if(id.n > 0) text.id(qq$x[show.r], qq$y[show.r], show.r) } if (show[3]) { y <- if(!is.null(x[['model']])) model.response(x$model) else yh + r m1 <- min(yh,y) m2 <- max(yh,y) plot(yh, y, xlab = "Fitted Values", ylab = "Response", xlim = c(m1,m2), ylim = c(m1,m2), main = main, type = "n", ...) panel(yh, y, ...) mtext(caption[3], 3, 0.25) if (one.fig) title(sub = sub.caption, ...) if(id.n > 0) text.id(yh[show.r], y[show.r], show.r) abline(a = 0,b = 1) } if (show[4]) { plot(yh, r, xlab = "Fitted Values", ylab = "Residuals", main = main, type = "n", ...) panel(yh, r, ...) mtext(caption[4], 3, 0.25) if (one.fig) title(sub = sub.caption, ...) if(id.n > 0) { y.id <- r[show.r] y.id[y.id < 0] <- y.id[y.id < 0] - strheight(" ")/3 text.id(yh[show.r], y.id, show.r) } abline(h = c(2.5*x$scale,0,-2.5*x$scale), lty = 3) } if (show[5]) { sqrtabsr <- sqrt(abs(r)) plot(yh, sqrtabsr, xlab = "Fitted Values", ylab = "Sqrt of abs(Residuals)", main = main, type = "n", ...) panel(yh, sqrtabsr, ...) mtext(caption[5], 3, 0.25) if (one.fig) title(sub = sub.caption, ...) if(id.n > 0) text.id(yh[show.r], sqrtabsr[show.r], show.r) } invisible() } robustbase/R/OGK.R0000644000176200001440000002043414123440375013375 0ustar liggesusers####========== Pairwise methods for covariance / correlation ================= ### From: Kjell Konis ### To: R-SIG-Robust@stat.math.ethz.ch, Ricardo Maronna ... ### Cc: Rand Wilcox ... ### Subject: Re: [RsR] [R] M-estimator R function question ### Date: Mon, 5 Dec 2005 10:29:11 +0000 ### Here is an implementation of the OGK estimator completely in R. I ### haven't touched it for a while and I forget how thoroughly I tested ### it so use it with a bit of caution. ### http://www.stats.ox.ac.uk/~konis/pairwise.q ### -------------------------------------------- ##------------------------------------------------------------------------------- ## Computes the orthogonalized pairwise covariance matrix estimate described in ## in Maronna and Zamar (2002). ## Use: pairwise(X, 2, gk.sigmamu, gk, hard.rejection) for the ## Gnanadesikan-Kettenring estimate. ## Alternatively, supply your own functions. ## MM replaced sweep(X, 1, .., '*') which is inefficient! ## == used crossprod() where appropriate ## ## I don't like the names gk.sigmamu() and gk(), ## "gk":= Gnanadesikan-Kettenring; particularly not for the Tau-estimator ## which is not at all related to G.-K. ## ---> replacements s/gk.sigmamu/scaleTau2/ ## s/gk/covGK/ ## -- also in the line of the many other cov*() functions I've renamed ## s/pairwise/covOGK/ ## NOTA BENE: Is *now* consistent, since MM made scaleTau2() consistent ### Documentation -----> ../man/covOGK.Rd ## ============= ================ ##' Compute the mahalanobis distances for *diagonal* var/cov matrix: ##' @param x n x p numeric data matrix ##' @param center numeric p-vector (or length 1 - is recycled) or FALSE ##' @param sd numeric p-vector of "standard deviations" ##' @examples all.equal(mahalanobisD(x, FALSE, sd), ##' mahalanobis (x, rep(0,p), diag(sd^2))) mahalanobisD <- function(x, center, sd) { ## Compute the mahalanobis distances (for diagonal cov). if(!isFALSE(center)) x <- sweep(x, 2L, center, check.margin=FALSE) rowSums(sweep(x, 2L, sd, '/', check.margin=FALSE)^2) } covOGK <- function(X, n.iter = 2, sigmamu, rcov = covGK, weight.fn = hard.rejection, keep.data = FALSE, ...) { stopifnot(n.iter >= 1) call <- match.call() X <- as.matrix(X) p <- ncol(X) if(p < 2) stop("'X' must have at least two columns") Z <- X # as we use 'X' for the (re)weighting U <- diag(p) A <- list() ## Iteration loop. for(iter in 1:n.iter) { ## only a few iterations ## Compute the vector of standard deviations d and ## the covariance matrix U. d <- apply(Z, 2L, sigmamu, ...) Z <- sweep(Z, 2L, d, '/', check.margin=FALSE) for(i in 2:p) { # only need lower triangle of U for(j in 1:(i - 1)) U[i, j] <- rcov(Z[ ,i], Z[ ,j], ...) } ## Compute the eigenvectors of U and store them as columns of E: ## eigen(U, symmetric) only needs left/lower triangle E <- eigen(U, symmetric = TRUE)$vectors ## Compute A and store it for each iteration A[[iter]] <- d * E ## Project the data onto the eigenvectors Z <- Z %*% E } ## End of orthogonalization iterations. ## Compute the robust location and scale estimates for ## the transformed data. sqrt.gamma <- apply(Z, 2L, sigmamu, mu.too = TRUE, ...) center <- sqrt.gamma[1, ] sqrt.gamma <- sqrt.gamma[2, ] distances <- mahalanobisD(Z, center, sd=sqrt.gamma) ## From the inside out compute the robust location and ## covariance matrix estimates. See equation (5). ## MM[FIXME]: 1st iteration (often the only one!) can be made *much* faster ## ----- covmat <- diag(sqrt.gamma^2) for(iter in n.iter:1) { covmat <- A[[iter]] %*% covmat %*% t(A[[iter]]) center <- A[[iter]] %*% center } center <- as.vector(center) ## Compute the reweighted estimate. First, compute the ## weights using the user specified weight function. weights <- weight.fn(distances, p, ...) sweights <- sum(weights) ## Then compute the weighted location and covariance ## matrix estimates. ## MM FIXME 2 : Don't need any of this, if all weights == 1 ## ----- (which is not uncommon) ==> detect that "fast" wcenter <- colSums(X * weights) / sweights Z <- sweep(X, 2L, wcenter, check.margin=FALSE) * sqrt(weights) wcovmat <- crossprod(Z) / sweights list(center = center, cov = covmat, wcenter = wcenter, wcov = wcovmat, weights = weights, distances = distances, n.iter = n.iter, sigmamu = deparse(substitute(sigmamu)), weight.fn = deparse(substitute(weight.fn)), rcov = deparse(substitute(rcov)), call = call, ## data.name = data.name, data = if(keep.data) X) } ## a version with weights and consistency (but only one tuning const!!) ## is in /u/maechler/R/other-people/Mspline/Mspline/R/scaleTau.R ## scaleTau2 <- function(x, c1 = 4.5, c2 = 3.0, na.rm = FALSE, consistency = TRUE, mu0 = median(x), sigma0 = median(x.), # = MAD(x) {without consistency factor} mu.too = FALSE, iter = 1, tol.iter = 1e-7) { if(na.rm) x <- x[!is.na(x)] n <- length(x) x. <- abs(x - mu0) stopifnot(is.numeric(sigma0), length(sigma0) == 1) # error, not NA .. if(is.na(sigma0))# not needed (?) || (!na.rm && anyNA(x.))) return(c(if(mu.too) mu0, sigma0)) if(sigma0 <= 0) { # no way to get tau-estim. if(!missing(sigma0)) warning("sigma0 =", sigma0," ==> scaleTau2(.) = 0") return(c(if(mu.too) mu0, 0)) } stopifnot(iter >= 1, iter == as.integer(iter), # incl. iter=TRUE is.numeric(tol.iter), tol.iter > 0) nEs2 <- if(!isFALSE(consistency)) { Erho <- function(b) ## E [ rho_b ( X ) ] X ~ N(0,1) 2*((1-b^2)*pnorm(b) - b * dnorm(b) + b^2) - 1 Es2 <- function(c2) ## k^2 * E[ rho_{c2} (X' / k) ] , where X' ~ N(0,1), k= qnorm(3/4) Erho(c2 * qnorm(3/4)) ## the asymptotic E[ sigma^2(X) ] is Es2(c2), {Es2(3) ~= 0.925} : ## TODO: 'n-2' below will probably change; ==> not yet documented ## ---- ==> ~/R/MM/STATISTICS/robust/1d-scaleTau2-small.R ## and ~/R/MM/STATISTICS/robust/1d-scale.R (if(consistency == "finiteSample") n-2 else n) * Es2(c2) } else n sTau2 <- function(sig0) { # also depends on (x., x, c1,c2, Es2) mu <- if(c1 > 0) { # "bi-weight" {in a way that works also with x.=Inf}: w <- pmax(0, 1 - (x. / (sig0 * c1))^2)^2 if(!is.finite(s.xw <- sum(x*w))) { ## x*w \-> NaN when (x,w) = (Inf,0) wpos <- w > 0 w <- w[wpos] s.xw <- sum(x[wpos]*w) } s.xw / sum(w) } else mu0 x <- (x - mu) / sig0 rho <- x^2 rho[rho > c2^2] <- c2^2 ## return c(m = mu, ## basically sqrt(sigma2) := sqrt( sigma0^2 / n * sum(rho) ) : s = sig0 * sqrt(sum(rho)/nEs2)) } # { sTau2() } s0 <- sigma0 if(isTRUE(iter)) iter <- 100000 # "Inf" repeat { m.s <- sTau2(s0) s. <- m.s[["s"]] if((iter <- iter - 1) <= 0 || is.na(s.) || abs(s. - s0) <= tol.iter * s.) break s0 <- s. # and iterate further } ## return c(if(mu.too) m.s[["m"]], s.) } ## Two other simple 'scalefun' to be used for covOGK; ## s_Qn(), s_Sn() are in ./qnsn.R s_mad <- function(x, mu.too= FALSE, na.rm = FALSE) { if (na.rm) x <- x[!is.na(x)] mx <- median(x) c(if(mu.too) mx, mad(x, center = mx)) } s_IQR <- function(x, mu.too= FALSE, na.rm = FALSE) { Qx <- quantile(x, (1:3)/4, na.rm = na.rm, names = FALSE) c(if(mu.too) Qx[2], (Qx[3] - Qx[1]) * 0.5 * formals(mad)$constant) } covGK <- function(x, y, scalefn = scaleTau2, ...) { ## Gnanadesikan-Kettenring's, based on 4*Cov(X,Y) = Var(X+Y) - Var(X-Y) (scalefn(x + y, ...)^2 - scalefn(x - y, ...)^2) / 4 } hard.rejection <- function(distances, p, beta = 0.9, ...) { d0 <- median(distances) * qchisq(beta, p) / qchisq(0.5, p) wts <- double(length(distances))# == 0, but wts[distances <= d0] <- 1.0 wts } ##-- TODO "pairwise QC" ... etc ##--> ~maechler/R/MM/STATISTICS/robust/pairwise-new.R robustbase/R/detmcd.R0000644000176200001440000004123012441664745014224 0ustar liggesusers### -*- mode: R ; delete-old-versions: never -*- ##' Computes the MCD estimator of a multivariate data sets in a \emph{deterministic} ##' way. ##' ##' The MCD estimator is given by the subset of h observations with smallest ##' covariance determinant. The MCD location estimate is then ##' the mean of those h points, and the MCD scatter estimate is ##' their covariance matrix. The default value of h is roughly ##' 0.75n (where n is the total number of observations), but the ##' user may choose each value between n/2 and n. Based on the ##' raw estimates, weights are assigned to the observations such ##' that outliers get zero weight. The reweighted MCD estimator ##' is then given by the mean and covariance matrix of the cases ##' with non-zero weight. ## ##' To compute an approximate MCD estimator deterministically, six initial robust h-subsets are ##' constructed based on robust transformations of variables or robust and ##' fast-to-compute estimators of multivariate location and shape. Then ##' C-steps are applied on these h-subsets until convergence. Note that the ##' resulting algorithm is not fully affine equivariant, but it is often ##' faster than the FAST-MCD algorithm which is affine equivariant ##' (see covMcd()). ##' Note that this function can not handle exact fit situations: if the ##' raw covariance matrix is singular, the program is stopped. In that ##' case, it is recommended to apply the covMcd() function. ##' ##' The MCD method is intended for continuous variables, and assumes that ##' the number of observations n is at least 5 times the number of variables p. ##' If p is too large relative to n, it would be better to first reduce ##' p by variable selection or robust principal components (see the functions ##' robust principal components in package 'rrcov'). ##' ##' @title Compute the MCD estimator of multivariate data in a deterministic way ##' @references ##' Hubert, M., Rousseeuw, P.J. and Verdonck, T. (2012), ##' "A deterministic algorithm for robust location and scatter", Journal of ##' Computational and Graphical Statistics, in press. ##' @param x a numerical matrix. The columns represent variables, and rows represent observations. ##' @param h The quantile of observations whose covariance determinant will ##' be minimized. Any value between n/2 and n may be specified. ##' @param hsets.init If one gives here already a matrix with for each column an ##' ordering of the observations (first the one with smallest statistical ##' distance), then the initial shape estimates are not calculated. ##' Default value = NULL. ##' @param save.hsets ##' @param full.h ##' @param scalefn function (or "rule") to estimate the scale. ##' @param maxcsteps ##' @param warn.nonconv.csteps ##' @param warn.wrong.obj.conv ##' @param trace ##' @param names ##' @return ##' @author Valentin Todorov; many tweaks by Martin Maechler .detmcd <- function(x, h, hsets.init=NULL, save.hsets = missing(hsets.init), full.h = save.hsets, scalefn, maxcsteps = 200, warn.nonconv.csteps = getOption("robustbase:warn.nonconv.csteps", TRUE), warn.wrong.obj.conv = getOption("robustbase:warn.wrong.obj.conv",FALSE), trace = as.integer(trace), names = TRUE) { stopifnot(length(dx <- dim(x)) == 2, h == as.integer(h), h >= 1) n <- dx[1] p <- dx[2] stopifnot(p >= 1, n >= 1) scalefn <- robScalefn(scalefn, n) ## kmini <- 5 # number of sub-data sets(if we use them some day) ## # for now we use it as number of rows in the returned ## # matrix 'coeff' for exact fit (also not used currently). ## cutoff <- qchisq(0.975, p) ## chimed <- qchisq(0.5, p) ## Center and scale the data vnms <- colnames(x) # speedup only: store and put back at end z <- doScale(unname(x), center=median, scale=scalefn) z.center <- z$center z.scale <- z$scale z <- z$x ## Assume that 'hsets.init' already contains h-subsets: the first h observations each if(is.null(hsets.init)) { hsets.init <- r6pack(z, h=h, full.h=full.h, scaled=TRUE, scalefn=scalefn) dh <- dim(hsets.init) } else { ## user specified, (even just *one* vector): if(is.vector(hsets.init)) hsets.init <- as.matrix(hsets.init) dh <- dim(hsets.init) if(dh[1] < h || dh[2] < 1) stop("'hsets.init' must be a h' x L matrix (h' >= h) of observation indices") ## TODO?: We could *extend* the sets to large h, even all n ## ====> could input the 'best' sets, also e.g. from fastmcd if(full.h && dh[1] != n) warning("'full.h' is true, but 'hsets.init' has less than n rows") ## stop("When 'full.h' is true, user specified 'hsets.init' must have n rows") if(min(hsets.init) < 1 || max(hsets.init) > n) stop("'hsets.init' must be in {1,2,...,n}; n = ", n) } nsets <- ncol(hsets.init)# typically 6, currently ## Some initializations. hset.csteps <- integer(nsets) bestobj <- Inf for(i in 1:nsets) { if(trace) { if(trace >= 2) cat(sprintf("H-subset %d = observations c(%s):\n-----------\n", i, pasteK(hsets.init[1:h,i]))) else cat(sprintf("H-subset %d: ", i)) } for(j in 1:maxcsteps) { if(j == 1) { obs_in_set <- hsets.init[1:h,i] # start with the i-th initial set } else { # now using 'svd' from last step score <- (z - rep(svd$center, each=n)) %*% svd$loadings mah <- mahalanobisD(score, center=FALSE, sd = sqrt(abs(svd$eigenvalues))) obs_in_set <- sort.list(mah)[1:h] #, partial = 1:h not yet } ## [P,T,L,r,centerX,meanvct] = classSVD(data(obs_in_set,:)); svd <- classPC(z[obs_in_set, ,drop=FALSE], signflip=FALSE) obj <- sum(log(svd$eigenvalues)) if(svd$rank < p) { ## FIXME --> return exact fit property rather than stop() ?? stop('More than h of the observations lie on a hyperplane.') ## TODO exactfit <- TRUE ## coeff <- ... } if(j >= 2 && obj == prevdet) { ## MM:: 2014-10-25: objective function check is *not* good enough: if(identical(obs_in_set, prevobs)) break ## else : if(warn.wrong.obj.conv) warning(sprintf( "original detmcd() wrongly declared c-step convergence (obj=%g, i=%d, j=%d)", obj, i,j)) } prevdet <- obj prevobs <- obs_in_set } hset.csteps[i] <- j # how many csteps necessary to converge. if(trace) cat(sprintf("%3d csteps, obj=log(det|.|)=%g", j, obj)) if(obj < bestobj) { if(trace) cat(" = new optim.\n") ## bestset : the best subset for the whole data. ## bestobj : objective value for this set. ## initmean, initcov : the mean and covariance matrix of this set bestset <- obs_in_set bestobj <- obj initmean <- svd$center L <- svd$loadings ## MM speedup: was L Diag L' = L %*% diag(svd$eigenvalues) %*% t(L) initcov <- tcrossprod(L * rep(svd$eigenvalues, each=nrow(L)), L) ## raw.initcov <- initcov ## rew.Hsubsets.Hopt <- bestset ind.best <- i # to determine which subset gives best results. } else if(obj == bestobj) ## store as well: ind.best <- c(ind.best, i) else if(trace) cat("\n") } ## for(i in 1:nsets) if(warn.nonconv.csteps && any(eq <- hset.csteps == maxcsteps)) { p1 <- paste(ngettext(sum(eq), "Initial set", "Initial sets"), pasteK(which(eq))) warning(sprintf("%s did not converge in maxcsteps=%d concentration steps", p1, maxcsteps), domain=NA) } ## reweighting <- FALSE # it happens in covMcd() ## if(reweighting) { ## svd <- classPC(z[bestset, ], signflip=FALSE) # [P,T,L,r,centerX,meanvct] = classSVD(data(bestset,:)); ## mah <- mahalanobisD((z - rep(svd$center, each=n)) %*% svd$loadings, ## FALSE, sqrt(abs(svd$eigenvalues))) ## sortmah <- sort(mah) ## } ## factor <- sortmah[h]/qchisq(h/n, p) ## raw.cov <- factor*initcov ## raw.cov <- initcov ## We express the results in the original units [restoring var.names]: raw.cov <- initcov * tcrossprod(z.scale) raw.center <- initmean * z.scale + z.center if(names) { dimnames(raw.cov) <- list(vnms, vnms) names(raw.center) <- vnms } raw.objective <- bestobj + 2*sum(log(z.scale)) # log(det = obj.best * prod(z.scale)^2) ## raw.mah <- mahalanobis(x, raw.center, raw.cov, tol=1E-14) ## medi2 <- median(raw.mah) list(initcovariance = raw.cov, initmean = raw.center, best = bestset, mcdestimate = raw.objective, # determinant (goes to crit) ## , weights=NULL# FIXME - goes to raw.weights iBest = ind.best, n.csteps = hset.csteps, initHsets = if(save.hsets) hsets.init, exactfit = 0 # <- FIXME ## once we'd test for exact fit, we'd return: ## , coeff=matrix(rep(0, kmini*p), nrow=kmini) ## , kount=0 # FIXME ) } ## .detmcd() robScalefn <- function(scalefn, n) { if(missing(scalefn) || is.null(scalefn)) scalefn <- .scalefn.default if(is.function(scalefn)) scalefn else switch(scalefn, ## Hubert, Rousseeuw, Verdonck, JCGS 2012 : "hrv2012" = if(n < 1000) Qn else scaleTau2, ## Version of 2014: "v2014" = if(n < 5000) Qn else scaleTau2, ## otherwise stop(gettextf("Invalid scalefn='%s': must be function or a valid string", scalefn), domain=NA)) } doScale <- function (x, center, scale) { stopifnot(is.numeric(p <- ncol(x))) ## MM: follow standard R's scale.default() as much as possible centerFn <- is.function(center) doIt <- if(centerFn) { centerName <- deparse(substitute(center)) # "median" typically center <- apply(x, 2L, center) TRUE } else { if(length(center) == p && is.numeric(center)) TRUE else if(missing(center) || is.null(center)) { center <- 0; FALSE } else stop(gettextf("'%s' must be a function, numeric vector of length p, or NULL", "center"), domain=NA) } if(doIt) x <- sweep(x, 2L, center, `-`, check.margin=FALSE) scaleFn <- is.function(scale) doIt <- if(scaleFn) { scale <- apply(x, 2L, scale) TRUE } else { if(length(scale) == p && is.numeric(scale)) TRUE else if(missing(scale) || is.null(scale)) { scale <- 1 FALSE } else stop(gettextf("'%s' must be a function, numeric vector of length p, or NULL", "scale"), domain=NA) } if(doIt) { if(any(is.na(scale)) || any(scale < 0)) stop("provide better scale; must be all positive") if(any(s0 <- scale == 0)) { ## FIXME: ### Better and easier alternative (and as "FAST MCD"): return "singular cov.matrix" ### since scale 0 ==> more than 50% points are on hyperplane x[,j] == const. ## find scale if there is any variation; otherwise use s := 1 S <- if(centerFn && centerName == "median") abs else function(.) abs(. - median(.)) non0Q <- function(u) { alph <- c(10:19, 19.75)/20 # not all the way to '1' {=> finite qnorm()} qq <- quantile(S(u), probs=alph, names=FALSE) if(any(pos <- qq != 0)) { ## the first non-0 if there is one i <- which.max(pos) qq[i] / qnorm((alph[i] + 1)/2) } else 1 } scale[s0] <- apply(x[,s0, drop=FALSE], 2L, non0Q) } x <- sweep(x, 2L, scale, `/`, check.margin = FALSE) } ## return list(x=x, center=center, scale=scale) } ##' @title Robust Distance based observation orderings based on robust "Six pack" ##' @param x n x p data matrix ##' @param h integer ##' @param full.h full (length n) ordering or only the first h? ##' @param scaled is 'x' is already scaled? otherwise, apply doScale(x, median, scalefn) ##' @param scalefn function to compute a robust univariate scale. ##' @return a h' x 6 matrix of indices from 1:n; if(full.h) h' = n else h' = h r6pack <- function(x, h, full.h, scaled=TRUE, scalefn = rrcov.control()$scalefn) { ## As the considered initial estimators Sk may have very ## inaccurate eigenvalues, we try to 'improve' them by applying ## a transformation similar to that used in the OGK algorithm. ## ## After that compute the corresponding distances, order them and ## return the indices initset <- function(data, scalefn, P, h) { stopifnot(length(d <- dim(data)) == 2, length(h) == 1, h >= 1) n <- d[1] stopifnot(h <= n) lambda <- doScale(data %*% P, center=median, scale=scalefn)$scale sqrtcov <- P %*% (lambda * t(P)) ## == P %*% diag(lambda) %*% t(P) sqrtinvcov <- P %*% (t(P) / lambda) ## == P %*% diag(1/lambda) %*% t(P) estloc <- colMedians(data %*% sqrtinvcov) %*% sqrtcov centeredx <- (data - rep(estloc, each=n)) %*% P sort.list(mahalanobisD(centeredx, FALSE, lambda))[1:h]# , partial = 1:h } ## ## Compute the raw OGK estimator. For m(.) and s(.) (robust ## univariate estimators of location and scale) use the median ## and Qn for reasons of simplicity (no choice of tuning parameters) ## and to be consistent with the other components of DetMCD. ## ogkscatter <- function(Y, scalefn, only.P = TRUE) { stopifnot(length(p <- ncol(Y)) == 1, p >= 1) U <- diag(p) for(i in seq_len(p)[-1L]) {# i = 2:p sYi <- Y[,i] ii <- seq_len(i - 1L) for(j in ii) { sYj <- Y[,j] U[i,j] <- (scalefn(sYi + sYj)^2 - scalefn(sYi - sYj)^2) / 4 } ## also set the upper triangle U[ii,i] <- U[i,ii] } ## now done above: U <- lower.tri(U) * U + t(U) # U <- tril(U, -1) + t(U) P <- eigen(U, symmetric=TRUE)$vectors if(only.P) return(P) ## else : Z <- Y %*% t(P) sigz <- apply(Z, 2, scalefn) lambda <- diag(sigz^2) list(P=P, lambda=lambda) } stopifnot(length(dx <- dim(x)) == 2) n <- dx[1] p <- dx[2] ## If scalefn is missing or is NULL, use Qn for smaller data sets (n < 1000) ## and tau-scale of Yohai and Zamar (1988) otherwise. scalefn <- robScalefn(scalefn, n) ## If the data was not scaled already (scaled=FALSE), center and scale using ## the median and the provided function 'scalefn'. if(!scaled) { ## Center and scale the data to (0, 1) - robustly x <- doScale(x, center=median, scale=scalefn)$x } nsets <- 6 hsets <- matrix(integer(), h, nsets) ## Determine 6 initial estimates (ordering of obs) ## 1. Hyperbolic tangent of standardized data y1 <- tanh(x) R1 <- cor(y1) P <- eigen(R1, symmetric=TRUE)$vectors hsets[,1] <- initset(x, scalefn=scalefn, P=P, h=h) ## 2. Spearmann correlation matrix R2 <- cor(x, method="spearman") P <- eigen(R2, symmetric=TRUE)$vectors hsets[,2] <- initset(x, scalefn=scalefn, P=P, h=h) ## 3. Tukey normal scores y3 <- qnorm((apply(x, 2L, rank) - 1/3)/(n + 1/3)) R3 <- cor(y3, use = "complete.obs") P <- eigen(R3, symmetric=TRUE)$vectors hsets[,3] <- initset(x, scalefn=scalefn, P=P, h=h) ## 4. Spatial sign covariance matrix znorm <- sqrt(rowSums(x^2)) ii <- znorm > .Machine$double.eps x.nrmd <- x x.nrmd[ii,] <- x[ii, ] / znorm[ii] SCM <- crossprod(x.nrmd)# / (n-1) not needed for e.vectors P <- eigen(SCM, symmetric=TRUE)$vectors hsets[,4] <- initset(x, scalefn=scalefn, P=P, h=h) ## 5. BACON ind5 <- order(znorm) half <- ceiling(n/2) Hinit <- ind5[1:half] covx <- cov(x[Hinit, , drop=FALSE]) P <- eigen(covx, symmetric=TRUE)$vectors hsets[,5] <- initset(x, scalefn=scalefn, P=P, h=h) ## 6. Raw OGK estimate for scatter P <- ogkscatter(x, scalefn, only.P = TRUE) hsets[,6] <- initset(x, scalefn=scalefn, P=P, h=h) ## Now combine the six pack : if(full.h) hsetsN <- matrix(integer(), n, nsets) for(k in 1:nsets) ## sort each of the h-subsets in *increasing* Mah.distances { xk <- x[hsets[,k], , drop=FALSE] svd <- classPC(xk, signflip=FALSE) # [P,T,L,r,centerX,meanvct] = classSVD(xk) if(svd$rank < p) ## FIXME: " return("exactfit") " stop('More than half of the observations lie on a hyperplane.') score <- (x - rep(svd$center, each=n)) %*% svd$loadings ord <- order(mahalanobisD(score, FALSE, sqrt(abs(svd$eigenvalues)))) if(full.h) hsetsN[,k] <- ord else hsets[,k] <- ord[1:h] } ## return if(full.h) hsetsN else hsets } ## {r6pack} robustbase/R/covMcd.R0000644000176200001440000012226713175631765014212 0ustar liggesusers### This is originally from the R package #### #### rrcov : Scalable Robust Estimators with High Breakdown Point #### #### by Valentin Todorov ## I would like to thank Peter Rousseeuw and Katrien van Driessen for ## providing the initial code of this function. ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License as published by ## the Free Software Foundation; either version 2 of the License, or ## (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, a copy is available at ## http://www.r-project.org/Licenses/ ## No longer hidden in namespace : ## easier to explain when user-available & documented if h.alpha.n <- function(alpha, n, p) { ## Compute h(alpha) := size of subsample, given alpha, (n,p) ## Same function for covMcd() and ltsReg() n2 <- (n+p+1) %/% 2 floor(2 * n2 - n + 2 * (n - n2) * alpha) } ## MM: the way it's set up, *must* be kept in sync with rrcov.control()'s ## defaults --> ./rrcov.control.R : covMcd <- function(x, cor = FALSE, raw.only = FALSE, alpha = control$ alpha, nsamp = control$ nsamp, nmini = control$ nmini, kmini = control$ kmini, scalefn=control$scalefn, maxcsteps=control$maxcsteps, initHsets = NULL, save.hsets = FALSE, names = TRUE, seed = control$ seed, tolSolve = control$ tolSolve, # had 1e-10 hardwired {now 1e-14 default} trace = control$ trace, use.correction = control$ use.correction, wgtFUN = control$ wgtFUN, control = rrcov.control()) { logdet.Lrg <- 50 ## <-- FIXME add to rrcov.control() and then use that ## Analyze and validate the input parameters ... if(length(seed) > 0) { if(length(seed) < 3 || seed[1L] < 100) stop("invalid 'seed'. Must be compatible with .Random.seed !") if(exists(".Random.seed", envir=.GlobalEnv, inherits=FALSE)) { seed.keep <- get(".Random.seed", envir=.GlobalEnv, inherits=FALSE) on.exit(assign(".Random.seed", seed.keep, envir=.GlobalEnv)) } assign(".Random.seed", seed, envir=.GlobalEnv) } ## For back compatibility, as some new args did not exist pre 2013-04, ## and callers of covMcd() may use a "too small" 'control' list: defCtrl <- if(missing(control)) control else rrcov.control() if(missing(wgtFUN)) getDefCtrl("wgtFUN", defCtrl) if(is.null (nmini)) getDefCtrl("nmini", defCtrl) ## vt::03.02.2006 - added options "best" and "exact" for nsamp ## nsamp will be further analized in the wrapper .fastmcd() if(is.numeric(nsamp) && nsamp <= 0) stop("Invalid number of trials nsamp = ",nsamp, "!") if(is.data.frame(x)) x <- data.matrix(x, rownames.force=FALSE) else if (!is.matrix(x)) x <- matrix(x, length(x), 1, dimnames = if(names) list(names(x), deparse(substitute(x)))) if(!names) dimnames(x) <- NULL # (speedup) ## drop all rows with missing values (!!) : ok <- is.finite(x %*% rep.int(1, ncol(x))) x <- x[ok, , drop = FALSE] if(!length(dx <- dim(x))) stop("All observations have missing values!") n <- dx[1]; p <- dx[2] if(names) dimn <- dimnames(x) ## h(alpha) , the size of the subsamples h <- h.alpha.n(alpha, n, p) if(n <= p + 1) # ==> floor((n+p+1)/2) > n - 1 -- not Ok stop(if (n <= p) # absolute barrier! "n <= p -- you can't be serious!" else "n == p+1 is too small sample size for MCD") ## else if(n < 2 * p) { ## p+1 < n < 2p warning("n < 2 * p, i.e., possibly too small sample size") ## was stop("Need at least 2*(number of variables) observations ") } ## jmin <- (n + p + 1) %/% 2 ## if(alpha < 1/2) ## FIXME? shouldn't we rather test 'alpha < jmin/n' ? ## stop("The MCD must cover at least", jmin, "observations") ## MM: I think this should be sufficient; ## we should even omit the (n < 2p) warning if(h > n) stop("Sample size n < h(alpha; n,p) := size of \"good\" subsample") else if(2*h < n) warning("subsample size h < n/2 may be too small") if(is.character(wgtFUN)) { if(is.function(mkWfun <- .wgtFUN.covMcd[[wgtFUN]])) wgtFUN <- mkWfun(p=p, n=n, control) } if(!is.function(wgtFUN)) stop(gettextf("'wgtFUN' must be a function or one of the strings %s.", pasteK(paste0('"',names(.wgtFUN.covMcd),'"'))), domain=NA) ## vt::03.02.2006 - raw.cnp2 and cnp2 are vectors of size 2 and will ## contain the correction factors (concistency and finite sample) ## for the raw and reweighted estimates respectively. Set them ## initially to 1. If use.correction is false (not the default), ## the finite sample correction factor will not be used ## (neither for the raw estimates nor for the reweighted ones) raw.cnp2 <- cnp2 <- c(1,1) ans <- list(call = match.call(), nsamp = nsamp, method = sprintf("MCD(alpha=%g ==> h=%d)", alpha, h)) if(h == n) { ## <==> alpha ~= 1 : Just compute the classical estimates -------- mcd <- cov(x) #MM: was cov.wt(x)$cov loc <- as.vector(colMeans(x)) obj <- determinant(mcd, logarithm = TRUE)$modulus[1] if ( -obj/p > logdet.Lrg ) { ans$cov <- mcd if(names) dimnames(ans$cov) <- list(dimn[[2]], dimn[[2]]) if (cor) ans$cor <- cov2cor(ans$cov) ans$center <- loc if(names && length(dimn[[2]])) names(ans$center) <- dimn[[2]] ans$n.obs <- n ans$singularity <- list(kind = "classical") weights <- 1 } else { mah <- mahalanobis(x, loc, mcd, tol = tolSolve) ## VT:: 01.09.2004 - bug in alpha=1 weights <- wgtFUN(mah) # 0/1 sum.w <- sum(weights) ans <- c(ans, cov.wt(x, wt = weights, cor = cor)) ## cov.wt() -> list("cov", "center", "n.obs", ["wt", "cor"]) ## Consistency factor for reweighted MCD -- ok for default wgtFUN only: FIXME if(sum.w != n) { cnp2[1] <- .MCDcons(p, sum.w/n) ans$cov <- ans$cov * cnp2[1] } obj <- determinant(mcd, logarithm = TRUE)$modulus[1] if( -obj/p > logdet.Lrg ) { ans$singularity <- list(kind = "reweighted.MCD") } else { mah <- mahalanobis(x, ans$center, ans$cov, tol = tolSolve) weights <- wgtFUN(mah) # 0/1 } } ans$alpha <- alpha ans$quan <- h ans$raw.cov <- mcd ans$raw.center <- loc if(names && !is.null(nms <- dimn[[2]])) { names(ans$raw.center) <- nms dimnames(ans$raw.cov) <- list(nms,nms) } ans$crit <- obj # was exp(obj); but log-scale is "robust" against under/overflow ans$method <- paste(ans$method, "\nalpha = 1: The minimum covariance determinant estimates based on", n, "observations \nare equal to the classical estimates.") ans$mcd.wt <- rep.int(NA, length(ok)) ans$mcd.wt[ok] <- weights if(names && length(dimn[[1]])) names(ans$mcd.wt) <- dimn[[1]] ans$wt <- NULL ans$X <- x if(names) { if(length(dimn[[1]])) dimnames(ans$X)[[1]] <- names(ans$mcd.wt)[ok] else dimnames(ans$X) <- list(seq(along = ok)[ok], NULL) } if(trace) cat(ans$method, "\n") ans$raw.cnp2 <- raw.cnp2 ans$cnp2 <- cnp2 class(ans) <- "mcd" return(ans) } ## end { alpha = 1 <==> h = n } mcd <- if(nsamp == "deterministic") { ans$method <- paste("Deterministic", ans$method) .detmcd (x, h, hsets.init = initHsets, save.hsets=save.hsets, # full.h=full.h, scalefn=scalefn, maxcsteps=maxcsteps, trace=as.integer(trace), names=names) } else { ans$method <- paste0("Fast ", ans$method, "; nsamp = ", nsamp, "; (n,k)mini = (", nmini,",",kmini,")") .fastmcd(x, h, nsamp, nmini, kmini, trace=as.integer(trace)) } ## Compute the consistency correction factor for the raw MCD ## (see calfa in Croux and Haesbroeck) calpha <- .MCDcons(p, h/n) ## VT::19.3.2007 correct <- if(use.correction) .MCDcnp2(p, n, alpha) else 1. raw.cnp2 <- c(calpha, correct) if(p == 1) { ## ==> Compute univariate location and scale estimates ans$method <- paste("Univariate", ans$method) scale <- sqrt(calpha * correct) * as.double(mcd$initcovariance) center <- as.double(mcd$initmean) if(abs(scale - 0) < 1e-07) { ans$singularity <- list(kind = "identicalObs", q = h) ans$raw.cov <- ans$cov <- matrix(0) ans$raw.center <- ans$center <- center ans$n.obs <- n ans$alpha <- alpha ans$quan <- h if(names && !is.null(nms <- dimn[[2]][1])) { names(ans$raw.center) <- names(ans$center) <- nms dimnames(ans$raw.cov) <- dimnames(ans$cov) <- list(nms,nms) } ans$crit <- -Inf # = log(0) weights <- as.numeric(abs(x - center) < 1e-07) # 0 / 1 } ## end { scale ~= 0 } else { ## Compute the weights for the raw MCD in case p=1 weights <- wgtFUN(((x - center)/scale)^2) # 0/1 sum.w <- sum(weights) ans <- c(ans, cov.wt(x, wt = weights, cor=cor)) if(sum.w != n) { cdelta.rew <- .MCDcons(p, sum.w/n) ## VT::19.3.2007 correct.rew <- if(use.correction) .MCDcnp2.rew(p, n, alpha) else 1. cnp2 <- c(cdelta.rew, correct.rew) ans$cov <- cdelta.rew * correct.rew * ans$cov } ans$alpha <- alpha ans$quan <- h ans$raw.cov <- as.matrix(scale^2) ans$raw.center <- as.vector(center) if(names && !is.null(nms <- dimn[[2]][1])) { dimnames(ans$raw.cov) <- list(nms,nms) names(ans$raw.center) <- nms } ans$crit <- ## log(det) = log(sum(sort((x - as.double(mcd$initmean))^2, partial = h)[1:h])/max(1,h-1)) center <- ans$center scale <- as.vector(sqrt(ans$cov)) weights <- wgtFUN(((x - center)/scale)^2) } ## end{ scale > 0 } } ## end p=1 else { ## p >= 2 : --------------------------------------------------------- ## Apply correction factor to the raw estimates ## and use them to compute weights mcd$initcovariance <- matrix(calpha * correct * mcd$initcovariance, p,p) if(raw.only || mcd$exactfit != 0) { ## If not all observations are in general position, i.e. more than ## h observations lie on a hyperplane, the program still yields ## the MCD location and scatter matrix, the latter being singular ## (as it should be), as well as the equation of the hyperplane. dim(mcd$coeff) <- c(5, p) ans$cov <- ans$raw.cov <- mcd$initcovariance ans$center <- ans$raw.center <- as.vector(mcd$initmean) if(names && !is.null(nms <- dimn[[2]])) { dimnames(ans$cov) <- list(nms, nms) names(ans$center) <- nms } ans$n.obs <- n if(raw.only) { ans$raw.only <- TRUE } else { ## no longer relevant: ## if(mcd$exactfit == -1) ## stop("The program allows for at most ", mcd$kount, " observations.") ## if(mcd$exactfit == -2) ## stop("The program allows for at most ", mcd$kount, " variables.") if(!(mcd$exactfit %in% c(1,2,3))) stop("Unexpected 'exactfit' code ", mcd$exactfit, ". Please report!") ## new (2007-01) and *instead* of older long 'method' extension; ## the old message is still *printed* via .MCDsingularityMsg() ## ## exactfit is now *passed* to result instead of coded into 'message': ans$singularity <- list(kind = "on.hyperplane", exactCode = mcd$exactfit, p = p, h = h, count = mcd$kount, coeff = mcd$coeff[1,]) } ans$alpha <- alpha ans$quan <- h if(names && !is.null(nms <- dimn[[2]])) { names(ans$raw.center) <- nms dimnames(ans$raw.cov) <- list(nms,nms) } ans$crit <- -Inf # = log(0) weights <- mcd$weights } ## end (raw.only || exact fit) else { ## have general position (exactfit == 0) : ------------------------ ## FIXME? here, we assume that mcd$initcovariance is not singular: mah <- mahalanobis(x, mcd$initmean, mcd$initcovariance, tol = tolSolve) weights <- wgtFUN(mah) sum.w <- sum(weights) ans <- c(ans, cov.wt(x, wt = weights, cor=cor)) ## simple check for singularity, much cheaper than determinant() below: sing.rewt <- any(apply(ans$cov == 0, 2, all)) ## Compute and apply the consistency correction factor for ## the reweighted cov if(!sing.rewt && sum.w != n) { cdelta.rew <- .MCDcons(p, sum.w/n) ## VT::19.3.2007 correct.rew <- if(use.correction) .MCDcnp2.rew(p, n, alpha) else 1. cnp2 <- c(cdelta.rew, correct.rew) ans$cov <- cdelta.rew * correct.rew * ans$cov } ##vt:: add also the best found subsample to the result list ans$best <- sort(as.vector(mcd$best)) ans$alpha <- alpha ans$quan <- h ans$raw.cov <- mcd$initcovariance ans$raw.center <- as.vector(mcd$initmean) if(names && !is.null(nms <- dimn[[2]])) { names(ans$raw.center) <- nms dimnames(ans$raw.cov) <- list(nms,nms) } ans$raw.weights <- weights ans$crit <- mcd$mcdestimate # now in log scale! ## 'mah' already computed above ans$raw.mah <- mah ## mahalanobis(x, ans$raw.center, ans$raw.cov, tol = tolSolve) ## Check if the reweighted scatter matrix is singular. if(sing.rewt || - determinant(ans$cov, logarithm = TRUE)$modulus[1]/p > logdet.Lrg) { ans$singularity <- list(kind = paste0("reweighted.MCD", if(sing.rewt)"(zero col.)")) ans$mah <- mah } else { mah <- mahalanobis(x, ans$center, ans$cov, tol = tolSolve) ans$mah <- mah weights <- wgtFUN(mah) } } ## end{ not exact fit } } ## end{ p >= 2 } ans$mcd.wt <- rep.int(NA, length(ok)) ans$mcd.wt[ok] <- weights if(names) { if(length(dimn[[1]])) names(ans$mcd.wt) <- dimn[[1]] if(length(dimn[[1]])) dimnames(x)[[1]] <- names(ans$mcd.wt)[ok] else dimnames(x) <- list(seq(along = ok)[ok], NULL) } ans$X <- x ans$wt <- NULL if(trace) cat(ans$method, "\n") ans$raw.cnp2 <- raw.cnp2 ans$cnp2 <- cnp2 if(nsamp == "deterministic") ans <- c(ans, mcd[c("iBest","n.csteps", if(save.hsets) "initHsets")]) class(ans) <- "mcd" ## warn if we have a singularity: if(is.list(ans$singularity)) warning(paste(strwrap(.MCDsingularityMsg(ans$singularity, ans$n.obs)), collapse="\n"), domain=NA) ## return ans } ## {covMcd} smoothWgt <- function(x, c, h) { ## currently drops all attributes, dim(), names(), etc ## maybe add 'keep.attributes = FALSE' (and pass to C) .Call(R_wgt_flex, x, c, h) } ##' Martin Maechler's simple proposal for an *adaptive* cutoff ##' i.e., one which does *not* reject outliers in good samples asymptotically .MCDadaptWgt.c <- function(n,p) { eps <- 0.4 / n ^ 0.6 # => 1-eps(n=100) ~= 0.975; 1-eps(n=10) ~= 0.90 ## using upper tail: qchisq(eps, p, lower.tail=FALSE) } ## Default wgtFUN()s : .wgtFUN.covMcd <- list("01.original" = function(p, ...) { cMah <- qchisq(0.975, p) function(d) as.numeric(d < cMah) }, "01.flex" = function(p, n, control) { ## 'control$beta' instead of 0.975 ## FIXME: update rrcov.control() to accept 'beta' stopifnot(is.1num(beta <- control$beta), 0 <= beta, beta <= 1) cMah <- qchisq(beta, p) function(d) as.numeric(d < cMah) }, "01.adaptive" = function(p, n, ...) { ## 'beta_n' instead of 0.975 cMah <- .MCDadaptWgt.c(n,p) function(d) as.numeric(d < cMah) }, "sm1.orig" = function(p, n, ...) { cMah <- qchisq(0.975, p) function(d) smoothWgt(d, c = cMah, h = 1) }, "sm2.orig" = function(p, n, ...) { cMah <- qchisq(0.975, p) function(d) smoothWgt(d, c = cMah, h = 2) }, "sm1.adaptive" = function(p, n, ...) { cMah <- .MCDadaptWgt.c(n,p) function(d) smoothWgt(d, c = cMah, h = 1) }, "sm2.adaptive" = function(p, n, ...) { cMah <- .MCDadaptWgt.c(n,p) function(d) smoothWgt(d, c = cMah, h = 2) }, "smE.adaptive" = function(p, n, ...) { cMah <- .MCDadaptWgt.c(n,p) ## TODO: find "theory" for h = f(cMah), or better c=f1(n,p); h=f2(n,p) function(d) smoothWgt(d, c = cMah, h = max(2, cMah/4)) } ) .MCDsingularityMsg <- function(singList, n.obs) { stopifnot(is.list(singList)) switch(singList$kind, "classical" = { "The classical covariance matrix is singular." }, "reweighted.MCD" = { "The reweighted MCD scatter matrix is singular." }, "identicalObs" = { sprintf("Initial scale 0 because more than 'h' (=%d) observations are identical.", singList$q) }, "on.hyperplane" = { stopifnot(c("p", "count", "coeff") %in% names(singList)) obsMsg <- function(m, n) paste("There are", m, "observations (in the entire dataset of", n, "obs.)", "lying on the") invisible(obsMsg)# <- codetools with(singList, c(switch(exactCode, ## exactfit == 1 : "The covariance matrix of the data is singular.", ## exactfit == 2 : c("The covariance matrix has become singular during", "the iterations of the MCD algorithm."), ## exactfit == 3: paste0("The ", h, "-th order statistic of the absolute deviation of variable ", which(coeff == 1), " is zero.")), if(p == 2) { paste(obsMsg(count, n.obs), "line with equation ", signif(coeff[1], digits= 5), "(x_i1-m_1) +", signif(coeff[2], digits= 5), "(x_i2-m_2) = 0", "with (m_1,m_2) the mean of these observations.") } else if(p == 3) { paste(obsMsg(count, n.obs), "plane with equation ", signif(coeff[1], digits= 5), "(x_i1-m_1) +", signif(coeff[2], digits= 5), "(x_i2-m_2) +", signif(coeff[3], digits= 5), "(x_i3-m_3) = 0", "with (m_1,m_2) the mean of these observations." ) } else { ## p > 3 ----------- paste(obsMsg(count, n.obs), "hyperplane with equation ", "a_1*(x_i1 - m_1) + ... + a_p*(x_ip - m_p) = 0", "with (m_1, ..., m_p) the mean of these observations", "and coefficients a_i from the vector a <- ", paste(deparse(zapsmall(coeff)), collapse="\n ")) })) }, ## Otherwise stop("illegal 'singularity$kind'") ) ## end{switch} } nobs.mcd <- function (object, ...) object$n.obs print.mcd <- function(x, digits = max(3, getOption("digits") - 3), print.gap = 2, ...) { cat("Minimum Covariance Determinant (MCD) estimator approximation.\n", "Method: ", x$method, "\n", sep="") if(!is.null(cl <- x$call)) { cat("Call:\n") dput(cl) } if(is.list(x$singularity)) cat(strwrap(.MCDsingularityMsg(x$singularity, x$n.obs)), sep ="\n") if(identical(x$nsamp, "deterministic")) cat("iBest: ", pasteK(x$iBest), "; C-step iterations: ", pasteK(x$n.csteps), "\n", sep="") ## VT::29.03.2007 - solve a conflict with fastmcd() in package robust - ## also returning an object of class "mcd" xx <- NA if(!is.null(x$crit)) xx <- format(x$crit, digits = digits) else if (!is.null(x$raw.objective)) xx <- format(log(x$raw.objective), digits = digits) cat("Log(Det.): ", xx , "\n\nRobust Estimate of Location:\n") print(x$center, digits = digits, print.gap = print.gap, ...) cat("Robust Estimate of Covariance:\n") print(x$cov, digits = digits, print.gap = print.gap, ...) invisible(x) } summary.mcd <- function(object, ...) { class(object) <- c("summary.mcd", class(object)) object } print.summary.mcd <- function(x, digits = max(3, getOption("digits") - 3), print.gap = 2, ...) { print.mcd(x, digits = digits, print.gap = print.gap, ...) # see above ## hmm, maybe not *such* a good idea : if(!is.null(x$cor)) { cat("\nRobust Estimate of Correlation: \n") dimnames(x$cor) <- dimnames(x$cov) print(x$cor, digits = digits, print.gap = print.gap, ...) } cat("\nEigenvalues:\n") print(eigen(x$cov, only.values = TRUE)$values, digits = digits, ...) if(!is.null(x$mah)) { cat("\nRobust Distances: \n") print(summary(x$mah, digits = digits), digits = digits, ...) } if(!is.null(wt <- x$mcd.wt)) summarizeRobWeights(wt, digits = digits) invisible(x) } ## NOTE: plot.mcd() is in ./covPlot.R ! ## ---- ~~~~~~~~~~~ ### Consistency and Finite Sample Correction Factors ### .MCDcons() .MCDcnp2() & .MCDcnp2.rew() ### now exported and documented in ../man/covMcd.Rd ##' Compute the consistency correction factor for the MCD estimate ##' (see calfa in Croux and Haesbroeck) ##' @param p ##' @param alpha alpha ~= h/n = quan/n ##' also use for the reweighted MCD, calling with alpha = 'sum(weights)/n' MCDcons <- # <- *not* exported, but currently used in pkgs rrcov, rrcovNA .MCDcons <- function(p, alpha) { qalpha <- qchisq(alpha, p) caI <- pgamma(qalpha/2, p/2 + 1) / alpha 1/caI } MCDcnp2 <- # <- *not* exported, but currently used in pkg rrcovNA ##' Finite sample correction factor for raw MCD: .MCDcnp2 <- function(p, n, alpha) { stopifnot(0 <= alpha, alpha <= 1, length(alpha) == 1) if(p > 2) { ## "alfaq" "betaq" "qwaarden" coeffqpkwad875 <- matrix(c(-0.455179464070565, 1.11192541278794, 2, -0.294241208320834, 1.09649329149811, 3), ncol = 2) coeffqpkwad500 <- matrix(c(-1.42764571687802, 1.26263336932151, 2, -1.06141115981725, 1.28907991440387, 3), ncol = 2) y.500 <- log( - coeffqpkwad500[1, ] / p^coeffqpkwad500[2, ] ) y.875 <- log( - coeffqpkwad875[1, ] / p^coeffqpkwad875[2, ] ) A.500 <- cbind(1, - log(coeffqpkwad500[3, ] * p^2)) A.875 <- cbind(1, - log(coeffqpkwad875[3, ] * p^2)) coeffic.500 <- solve(A.500, y.500) coeffic.875 <- solve(A.875, y.875) fp.500.n <- 1 - exp(coeffic.500[1]) / n^coeffic.500[2] fp.875.n <- 1 - exp(coeffic.875[1]) / n^coeffic.875[2] } else if(p == 2) { fp.500.n <- 1 - exp( 0.673292623522027) / n^0.691365864961895 fp.875.n <- 1 - exp( 0.446537815635445) / n^1.06690782995919 } else if(p == 1) { fp.500.n <- 1 - exp( 0.262024211897096) / n^0.604756680630497 fp.875.n <- 1 - exp(-0.351584646688712) / n^1.01646567502486 } ## VT:18.04.2007 - use simulated correction factors for several p and n: ## p in [1, 20] n in [2*p, ...] if(alpha == 0.5 && !is.na(fp.x <- MCDcnp2s$sim.0(p, n))) fp.500.n <- 1/fp.x fp.alpha.n <- if(alpha <= 0.875) fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) else ## 0.875 < alpha <= 1 fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) 1/fp.alpha.n } ## end{.MCDcnp2 } MCDcnp2.rew <- # <- *not* exported, but currently used in pkg rrcovNA ##' Finite sample correction factor for *REW*eighted MCD .MCDcnp2.rew <- function(p, n, alpha) { stopifnot(0 <= alpha, alpha <= 1, length(alpha) == 1) if(p > 2) { ## "alfaq" "betaq" "qwaarden" coeffrewqpkwad875 <- matrix(c(-0.544482443573914, 1.25994483222292, 2, -0.343791072183285, 1.25159004257133, 3), ncol = 2) coeffrewqpkwad500 <- matrix(c(-1.02842572724793, 1.67659883081926, 2, -0.26800273450853, 1.35968562893582, 3), ncol = 2) y.500 <- log( - coeffrewqpkwad500[1, ] / p^ coeffrewqpkwad500[2, ] ) y.875 <- log( - coeffrewqpkwad875[1, ] / p^ coeffrewqpkwad875[2, ] ) A.500 <- cbind(1, - log(coeffrewqpkwad500[3, ] * p^2)) coeffic.500 <- solve(A.500, y.500) A.875 <- cbind(1, - log(coeffrewqpkwad875[3, ] * p^2)) coeffic.875 <- solve(A.875, y.875) fp.500.n <- 1 - exp(coeffic.500[1]) / n^ coeffic.500[2] fp.875.n <- 1 - exp(coeffic.875[1]) / n^ coeffic.875[2] } else if(p == 2) { fp.500.n <- 1 - exp( 3.11101712909049 ) / n^ 1.91401056721863 fp.875.n <- 1 - exp( 0.79473550581058 ) / n^ 1.10081930350091 } else if(p == 1) { fp.500.n <- 1 - exp( 1.11098143415027 ) / n^ 1.5182890270453 fp.875.n <- 1 - exp( -0.66046776772861) / n^ 0.88939595831888 } ## VT:18.04.2007 - use simulated correction factors for several p and n: ## p in [1, 20] n in [2*p, ...] if(alpha == 0.5 && !is.na(fp.x <- MCDcnp2s$sim.rew(p, n))) fp.500.n <- 1/fp.x fp.alpha.n <- if(alpha <= 0.875) fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) else ## 0.875 < alpha <= 1 fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) 1/fp.alpha.n } ## end{.MCDcnp2.rew } .fastmcd <- function(x, h, nsamp, nmini, kmini, trace = 0) { dx <- dim(x) n <- dx[1] p <- dx[2] ## parameters for partitioning {equal to those in Fortran !!} ## kmini <- 5 ## nmini <- 300 stopifnot(length(kmini <- as.integer(kmini)) == 1, kmini >= 2L, is.1num(nmini), is.finite(nmaxi <- as.double(nmini)*kmini), nmaxi * p < .Machine$integer.max) nmaxi <- as.integer(nmaxi) km10 <- 10*kmini ## vt::03.02.2006 - added options "best" and "exact" for nsamp ## nLarge <- 100000 # was 5000 before Nov.2009 -- keep forever now; user can say "exact" if(is.numeric(nsamp) && (nsamp < 0 || nsamp == 0 && p > 1)) { nsamp <- -1 } else if(nsamp == "exact" || nsamp == "best") { if(n > 2*nmini-1) { warning("Options 'best' and 'exact' not allowed for n greater than 2*nmini-1 =", 2*nmini-1,".\nUsing default.\n") nsamp <- -1 } else { myk <- p + 1 ## was 'p'; but p+1 ("nsel = nvar+1") is correct nall <- choose(n, myk) msg <- paste("subsets of size", myk, "out of", n) if(nall > nLarge && nsamp == "best") { nsamp <- nLarge warning("'nsamp = \"best\"' allows maximally ", format(nLarge, scientific=FALSE), " subsets;\ncomputing these ", msg, immediate. = TRUE) } else { ## "exact" or ("best" & nall < nLarge) nsamp <- 0 ## all subsamples -> special treatment in Fortran if(nall > nLarge) { msg <- paste("Computing all", nall, msg) if(nall > 10*nLarge) warning(msg, "\n This may take a", if(nall/nLarge > 100) " very", " long time!\n", immediate. = TRUE) else message(msg) } } } } if(!is.numeric(nsamp) || nsamp == -1) { # still not defined ## set it to the default : nsamp.def <- rrcov.control()$nsamp warning(gettextf("Invalid number of trials nsamp=%s. Using default nsamp=%d.", format(nsamp), nsamp.def), domain=NA) nsamp <- nsamp.def } if(nsamp > (mx <- .Machine$integer.max)) { warning("nsamp > i_max := maximal integer -- not allowed;\n", " set to i_max = ", mx) nsamp <- mx } ## Allocate temporary storage for the Fortran implementation, ## directly in the .Fortran() call. ## (if we used C + .Call() we would allocate all there, and be quite faster!) .Fortran(rffastmcd, x = if(is.double(x)) x else as.double(x), n = as.integer(n), p = as.integer(p), ## = 'nvar' in Fortran nhalff = as.integer(h), nsamp = as.integer(nsamp), # = 'krep' nmini = as.integer(nmini), kmini = kmini, initcovariance = double(p * p), initmean = double(p), best = rep.int(as.integer(10000), h), mcdestimate = double(1), ## = 'det' weights = integer(n), exactfit = integer(1), # output indicator: 0: ok; 1: ..., 2: .. coeff = matrix(double(5 * p), nrow = 5, ncol = p), ## plane kount = integer(1), adjustcov = double(p * p), ## used in ltsReg() ! ## integer(1), ## << 'seed' no longer used temp = integer(n), index1 = integer(n), index2 = integer(n), indexx = integer(n), nmahad = double(n), ndist = double(n), am = double(n), am2 = double(n), slutn = double(n), med = double(p), mad = double(p), sd = double(p), means = double(p), bmeans= double(p), w = double(p), fv1 = double(p), fv2 = double(p), rec = double(p+1), sscp1 = double((p+1)*(p+1)), cova1 = double(p * p), corr1 = double(p * p), cinv1 = double(p * p), cova2 = double(p * p), cinv2 = double(p * p), z = double(p * p), cstock = double(10 * p * p), # (10,nvmax2) mstock = double(10 * p), # (10,nvmax) c1stock = double(km10 * p * p), # (km10,nvmax2) m1stock = double(km10 * p), # (km10,nvmax) dath = double(nmaxi * p), # (nmaxi,nvmax) cutoff = qchisq(0.975, p), chimed = qchisq(0.5, p), i.trace= as.integer(trace) )[ ## keep the following ones: c("initcovariance", "initmean", "best", "mcdestimate", "weights", "exactfit", "coeff", "kount", "adjustcov") ] } ## ## VT:18.04.2007 - use simulated correction factors for several p and n ## and alpha = 1/2 (the default in rrcov.control()) ## ~~~~~~~~~~~ ## p in [1, 20] n in [2*p, ...] ## see the modifications in.MCDcnp2() and.MCDcnp2.rew ## ## VT::08.06.2007 - fixed the simulated values (especially for p=1) ## VT::11.05.2007 - reduce the usage of the simulated correction factors to only those that ## are definitvely wrong (negative or very large). This is done by: ## a) reducing p.max ## b) reducing n.max ## NB: In general, "wrong" are the factors for the reweighted matrix, but whenever a simulated ## value for the reweighted is used, the corresponding simulated must be used for the raw too. ## ## MM::2014-04 : MCDcnp2s <- local({ p.min <- 1L p.max <- 9L # was 20 ncol <- 20L # the number of column in the matrices n.min <- as.integer( ### p = 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 c(1, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36, 38, 40)) n.max <- as.integer( c(2, 6, 10, 13, 16, 18, 20, 20, 20, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60)) ##was c(22, 24, 26, 28, 30, 32, 34, 36, 38, 40, 42, 44, 46, 48, 50, 52, 54, 56, 58, 60) ## these are the right (simulated) values for n.max n.min.rew <- n.min n.max.rew <- n.max m.0 <- matrix( c(1, 3.075819, 1.515999, 2.156169, 1.480742, 1.765485, 1.460206, 1.603707, 1.427429, 1.504712, 1.334528, 1.48297, 1.355308, 1.383867, 1.319241, 1.36065, 1.307467, 1.365596, 1.255259, 1.352741, 1.239381, 3.15342, 1.799889, 2.258497, 1.688312, 1.906779, 1.548203, 1.724785, 1.500873, 1.573442, 1.417137, 1.540805, 1.395945, 1.472596, 1.394247, 1.377487, 1.337394, 1.369354, 1.333378, 1.3181, 1.313813, 1.315528, 2.12777, 2.718898, 1.993509, 2.220433, 1.820585, 1.97782, 1.672455, 1.770151, 1.587478, 1.685352, 1.539295, 1.584536, 1.499487, 1.50702, 1.41952, 1.449058, 1.393042, 1.432999, 1.369964, 1.400997, 1.333824, 2.950549, 2.145387, 2.382224, 1.927077, 2.032489, 1.8371, 1.877833, 1.710891, 1.756053, 1.620778, 1.657761, 1.558978, 1.56257, 1.508633, 1.534406, 1.46709, 1.468734, 1.432529, 1.455283, 1.386975, 1.417532, 2.229573, 2.494447, 2.016117, 2.190061, 1.877996, 1.978964, 1.767284, 1.836948, 1.677372, 1.743316, 1.616383, 1.655964, 1.55484, 1.594831, 1.502185, 1.543723, 1.467005, 1.491123, 1.44402, 1.446915, 1.401578, 2.580264, 2.109121, 2.240741, 1.944719, 2.043397, 1.821808, 1.89725, 1.748788, 1.786988, 1.659333, 1.697012, 1.610622, 1.616503, 1.538529, 1.562024, 1.499964, 1.529344, 1.474519, 1.483264, 1.441552, 1.434448, 2.165233, 2.320281, 2.007836, 2.086471, 1.884052, 1.950563, 1.76926, 1.843328, 1.708941, 1.741039, 1.627206, 1.644755, 1.580563, 1.593402, 1.527312, 1.568418, 1.501462, 1.502542, 1.464583, 1.467921, 1.431141, 2.340443, 2.048262, 2.161097, 1.926082, 1.995422, 1.81446, 1.853165, 1.738533, 1.784456, 1.679444, 1.696463, 1.612931, 1.629483, 1.548186, 1.580026, 1.52198, 1.531111, 1.482914, 1.484824, 1.442726, 1.447838, 2.093386, 2.185793, 1.948989, 2.02804, 1.867137, 1.907732, 1.771923, 1.800413, 1.691612, 1.720603, 1.642705, 1.649769, 1.589028, 1.598955, 1.539759, 1.55096, 1.503965, 1.50703, 1.471349, 1.469791, 1.436959, 2.218315, 1.997369, 2.041128, 1.887059, 1.928524, 1.79626, 1.827538, 1.716748, 1.735696, 1.658329, 1.664211, 1.599286, 1.611511, 1.553925, 1.562637, 1.516805, 1.529894, 1.476064, 1.482474, 1.453253, 1.458467, 2.0247, 2.07899, 1.921976, 1.949376, 1.824629, 1.851671, 1.744713, 1.765647, 1.683525, 1.685592, 1.625113, 1.624961, 1.571921, 1.581223, 1.535257, 1.537464, 1.497165, 1.504879, 1.468682, 1.469319, 1.448344, 2.092315, 1.941412, 1.969843, 1.844093, 1.866133, 1.766145, 1.783829, 1.703613, 1.709714, 1.646078, 1.654264, 1.594523, 1.598488, 1.545105, 1.555356, 1.514627, 1.521353, 1.483958, 1.487677, 1.449191, 1.459721, 1.958987, 1.985144, 1.87739, 1.879643, 1.786823, 1.799642, 1.720015, 1.724688, 1.663539, 1.662997, 1.609267, 1.615124, 1.56746, 1.562026, 1.520586, 1.52503, 1.493008, 1.502496, 1.471983, 1.468546, 1.435064, 1.994706, 1.880348, 1.894254, 1.805827, 1.815965, 1.744296, 1.743389, 1.665481, 1.681644, 1.624466, 1.626109, 1.584028, 1.5818, 1.54376, 1.547237, 1.504878, 1.515087, 1.479032, 1.47936, 1.450758, 1.45073, 1.892685, 1.91087, 1.825301, 1.827176, 1.745363, 1.746115, 1.693373, 1.701692, 1.648247, 1.637112, 1.594648, 1.592013, 1.554849, 1.55013, 1.522186, 1.520901, 1.492606, 1.493072, 1.460868, 1.46733, 1.440956, 1.92771, 1.835696, 1.841979, 1.775991, 1.766092, 1.703807, 1.708791, 1.654985, 1.655917, 1.602388, 1.611867, 1.570765, 1.573368, 1.53419, 1.529033, 1.506767, 1.503596, 1.481126, 1.471806, 1.444917, 1.451682, 1.850262, 1.855034, 1.778997, 1.789995, 1.718871, 1.717326, 1.667357, 1.666291, 1.619743, 1.631475, 1.582624, 1.58766, 1.546302, 1.545063, 1.512222, 1.517888, 1.489127, 1.487271, 1.466722, 1.463618, 1.444137, 1.8709, 1.794033, 1.80121, 1.736376, 1.740201, 1.673776, 1.682541, 1.638153, 1.642294, 1.604417, 1.597721, 1.559534, 1.559108, 1.533942, 1.529348, 1.499517, 1.501586, 1.473147, 1.473031, 1.457615, 1.452348, 1.805753, 1.812952, 1.746549, 1.747222, 1.696924, 1.694957, 1.652157, 1.650568, 1.607807, 1.613666, 1.577295, 1.570712, 1.543704, 1.538272, 1.515369, 1.517113, 1.487451, 1.491593, 1.464514, 1.464658, 1.439359, 1.823222, 1.758781, 1.767358, 1.70872, 1.712926, 1.666956, 1.667838, 1.62077, 1.621445, 1.592891, 1.58549, 1.55603, 1.559042, 1.521501, 1.523342, 1.499913, 1.501937, 1.473359, 1.472522, 1.452613, 1.452448), ncol = ncol) m.rew <- matrix( c(1, 0.984724, 0.970109, 0.978037, 0.979202, 0.982933, 1.001461, 1.026651, 0.981233, 1.011895, 1.017499, 0.964323, 1.026574, 1.006594, 0.980194, 1.009828, 0.998083, 0.966173, 1.009942, 0.99916, 1.021521, 2.216302, 1.418526, 1.635601, 1.31402, 1.33975, 1.251798, 1.210917, 1.133114, 1.150666, 1.138732, 1.096822, 1.076489, 1.058343, 1.045746, 1.036743, 1.008929, 1.049537, 1.028148, 1.027297, 1.020578, 1.00074, 1.73511, 2.06681, 1.545905, 1.659655, 1.456835, 1.47809, 1.331966, 1.334229, 1.231218, 1.220443, 1.198143, 1.193965, 1.142156, 1.146231, 1.124661, 1.112719, 1.089973, 1.070606, 1.082681, 1.061243, 1.053191, 2.388892, 1.847626, 1.96998, 1.630723, 1.701272, 1.521008, 1.553057, 1.382168, 1.414555, 1.326982, 1.321403, 1.265207, 1.264856, 1.200418, 1.21152, 1.17531, 1.168536, 1.140586, 1.14457, 1.111392, 1.112031, 1.968153, 2.168931, 1.784373, 1.894409, 1.667912, 1.693007, 1.545176, 1.582428, 1.45319, 1.480559, 1.371611, 1.358541, 1.330235, 1.30264, 1.257518, 1.244156, 1.221907, 1.22455, 1.178965, 1.177855, 1.166319, 2.275891, 1.866587, 2.014249, 1.750567, 1.829363, 1.650019, 1.689043, 1.562539, 1.561359, 1.473378, 1.488554, 1.411097, 1.416527, 1.35117, 1.361044, 1.30205, 1.299037, 1.250265, 1.260083, 1.218665, 1.236027, 1.95771, 2.074066, 1.847385, 1.905408, 1.71393, 1.768425, 1.63908, 1.67234, 1.564992, 1.562337, 1.49229, 1.499573, 1.420813, 1.424067, 1.383947, 1.378726, 1.33062, 1.330071, 1.279404, 1.295302, 1.263947, 2.164121, 1.871024, 1.979485, 1.782417, 1.84489, 1.706023, 1.734857, 1.622782, 1.634869, 1.55196, 1.554423, 1.482325, 1.509195, 1.440726, 1.436328, 1.386335, 1.396277, 1.347939, 1.346732, 1.310242, 1.309371, 1.938822, 2.050409, 1.834863, 1.882536, 1.737494, 1.761608, 1.65742, 1.687579, 1.591863, 1.60158, 1.520982, 1.535234, 1.470649, 1.486485, 1.42892, 1.435574, 1.384132, 1.382329, 1.343281, 1.346581, 1.315111, 2.063894, 1.880094, 1.907246, 1.78278, 1.806648, 1.6952, 1.720922, 1.63084, 1.635274, 1.565423, 1.56171, 1.512015, 1.4986, 1.463903, 1.456588, 1.422856, 1.407325, 1.376724, 1.373923, 1.346464, 1.34259, 1.898389, 1.950406, 1.812053, 1.849175, 1.72649, 1.737651, 1.646719, 1.655112, 1.587601, 1.597894, 1.539877, 1.53329, 1.495054, 1.490548, 1.445249, 1.446037, 1.410272, 1.412274, 1.375797, 1.369604, 1.341232, 1.992488, 1.830452, 1.857314, 1.758686, 1.763822, 1.683215, 1.679543, 1.619269, 1.608512, 1.565, 1.562282, 1.498869, 1.51325, 1.470912, 1.464654, 1.427573, 1.439301, 1.402308, 1.391006, 1.37074, 1.367573, 1.855502, 1.891242, 1.77513, 1.790618, 1.706443, 1.713098, 1.642896, 1.636577, 1.580366, 1.581752, 1.542937, 1.531668, 1.487894, 1.492039, 1.460304, 1.449762, 1.4219, 1.420953, 1.390137, 1.388677, 1.360506, 1.908277, 1.802091, 1.806128, 1.723757, 1.727249, 1.659883, 1.670056, 1.605209, 1.611481, 1.558846, 1.551762, 1.512951, 1.511515, 1.468948, 1.476073, 1.441508, 1.434997, 1.412687, 1.406782, 1.380452, 1.375924, 1.811415, 1.822311, 1.740544, 1.739355, 1.68127, 1.685342, 1.620281, 1.622572, 1.579611, 1.570103, 1.529881, 1.530097, 1.490041, 1.4947, 1.457329, 1.456344, 1.423363, 1.428653, 1.399988, 1.390069, 1.376594, 1.837723, 1.76039, 1.771031, 1.697404, 1.690915, 1.634409, 1.63713, 1.589594, 1.586521, 1.552974, 1.545571, 1.505923, 1.512794, 1.477833, 1.477821, 1.444241, 1.44452, 1.419258, 1.421297, 1.394924, 1.389393, 1.779716, 1.781271, 1.706031, 1.71224, 1.655099, 1.654284, 1.608878, 1.605955, 1.565683, 1.565938, 1.523594, 1.531235, 1.492749, 1.486786, 1.457635, 1.461416, 1.432472, 1.430164, 1.404441, 1.400021, 1.378273, 1.798932, 1.735577, 1.727031, 1.671049, 1.677601, 1.624427, 1.617626, 1.579533, 1.579987, 1.544635, 1.538715, 1.504538, 1.50726, 1.477163, 1.477084, 1.450861, 1.444496, 1.428416, 1.422813, 1.400185, 1.39552, 1.750193, 1.752145, 1.690365, 1.692051, 1.642391, 1.63858, 1.600144, 1.596401, 1.558305, 1.555932, 1.525968, 1.522984, 1.491563, 1.492554, 1.467575, 1.45786, 1.437545, 1.430893, 1.413983, 1.409386, 1.391943, 1.762922, 1.701346, 1.704996, 1.6556, 1.655548, 1.611964, 1.615219, 1.569103, 1.571079, 1.540617, 1.541602, 1.503791, 1.50195, 1.478069, 1.47678, 1.452458, 1.451732, 1.429144, 1.426547, 1.40363, 1.402647), ncol = ncol) rm(ncol) list( sim.0 = function(p, n) { p. <- p - p.min + 1L if(p.min <= p && p <= p.max && n.min[p.] <= n && n <= n.max[p.]) { nind <- n - n.min[p.] + 1L m.0[nind, p.] ##= } else NA }, sim.rew = function(p, n) { p. <- p - p.min + 1L if(p.min <= p && p <= p.max && n.min.rew[p.] <= n && n <= n.max.rew[p.]) { nind <- n - n.min.rew[p.] + 1L m.rew[nind, p.] ##=== } else NA }) }) ## end{MCDcnp2s} if(FALSE) { ## For experimentation: ls.str( ee <- environment(MCDcnp2s$sim.0) ) matplot(1:21, ee$m.0, type = "o", xlab = "p - p.min + 1") } robustbase/R/BYlogreg.R0000644000176200001440000003605014107541105014463 0ustar liggesusers#### http://www.econ.kuleuven.be/public/NDBAE06/programs/roblog/ : #### #### August 06, 2010 2:14 PM 9121 BYlogreg.r.txt == BYlogreg.R (*this* original) #### May 04, 2005 9:24 AM 6702 BYlogreg.txt == BYlogreg.R.~2005~ #### May 04, 2005 9:25 AM 6720 WBYlogreg.txt == WBYlogreg.R #### #### Sep. 27, 2017: available at #### #### NB: Splus original Version of this file: BYlogreg.ssc in the #### -- in FunctionsRob/ (from FunctionsRob.zip) from Wiley's book supplements #### http://www.wiley.com/legacy/wileychi/robust_statistics/robust.html #### see my ../misc/MMY-book/Wiley-supplements/FunctionsRob/BYlogreg.ssc ## Computation of the estimator of Bianco and Yohai (1996) in logistic regression ## ------------- ## Christophe Croux, Gentiane Haesbroeck ## (thanks to Kristel Joossens and Valentin Todorov for improving the code) - ## ==> Now "contains" both the *weighted* and regular, unweighted BY-estimator ## ## This program computes the estimator of Bianco and Yohai in ## logistic regression. By default, an intercept term is included ## and p parameters are estimated. ## ## For more details we refer to ## Croux, C., and Haesbroeck, G. (2003), ## ``Implementing the Bianco and Yohai estimator for Logistic Regression'', ## Computational Statistics and Data Analysis, 44, 273-295 ## ## Changes by Martin Maechler, ---> ../man/BYlogreg.Rd ## ------------------ BYlogreg <- function(x0, y, initwml=TRUE, # w.x=NULL, addIntercept=TRUE, const=0.5, kmax = 1000, maxhalf = 10, sigma.min = 1e-4, trace.lev=0) { if(!is.numeric(y)) y <- as.numeric(y) ## if(!is.null(w.x)) ## warning("x weights 'w.x' are not yet made use of") if(!is.null(dim(y))) { if(ncol(y) != 1) stop("y is not onedimensional") y <- as.vector(y) } n <- length(y) if(is.data.frame(x0)) { x0 <- data.matrix(x0) } else if (!is.matrix(x0)) { x0 <- matrix(x0, length(x0), 1, dimnames = list(names(x0), deparse(substitute(x0)))) } if(nrow(x0) != n) stop("Number of observations in x and y not equal") na.x <- !is.finite(rowSums(x0)) na.y <- !is.finite(y) ok <- !(na.x | na.y) if(!all(ok)) { x0 <- x0[ok, , drop = FALSE] y <- y [ok] # y[ok, , drop = FALSE] } if(addIntercept) { x <- cbind("Intercept" = 1, x0) } else { # x0 := x without the "intercept column" x <- x0 all1 <- apply(x == 1, 2, all) if(any(all1)) x0 <- x[,!all1, drop = FALSE] else message("no intercept in the model") } dx <- dim(x) n <- dx[1] if(n == 0) stop("All observations have missing values!") p <- dx[2] # == ncol(x) family <- binomial() ## Computation of the initial value of the optimization process gstart <- if(initwml) { ###_ FIXME: Should allow many more schemes: ###_ 1) using MVE with much less singular cases ###_ 2) Instead of {0,1}-weighting with cutoff, w/ weights --> 0 *continuously* ### --> glm() with "prior" weights instead of 'subset' ## hp <- floor(n*(1-0.25))+1 ## mcdx <- cov.mcd(x0, quantile.used =hp,method="mcd") ## rdx=sqrt(mahalanobis(x0,center=mcdx$center,cov=mcdx$cov)) ## mcdx <- CovMcd(x0, alpha=0.75) ## rdx <- sqrt(getDistance(mcdx)) mcd <- covMcd(x0, alpha=0.75) ## ----- FIXME: argument! D <- sqrt( mahalanobis(mcd$X, mcd$center, mcd$cov) ) vc <- sqrt(qchisq(0.975, p-1)) ## ----- FIXME: 'vc' should be argument! wrd <- D <= vc ### FIXME_2: use weights and "weights.on.x' as in Mqle ( ./glmrobMqle.R ) ## glm(y~x0, family=binomial, subset = wrd)$coef glm.fit(x[wrd,,drop=FALSE], y[wrd], family=family)$coef } else { glm.fit(x, y, family=family)$coef } sigma1 <- 1/sqrt(sum(gstart^2)) xistart <- gstart*sigma1 stscores <- x %*% xistart ## Initial value for the objective function oldobj <- mean(phiBY3(stscores/sigma1, y, const)) converged <- FALSE kstep <- 1L while(kstep < kmax && !converged) { unisig <- function(sigma) mean(phiBY3(stscores/sigma, y, const)) ## ------ optimsig <- nlminb(sigma1, unisig, lower=0)# "FIXME" arguments to nlminb() ## ====== if(trace.lev) cat(sprintf("k=%2d, s1=%12.8g: => new s1= %12.8g", kstep, sigma1, optimsig$par))# MM: jhalf =!?= 1 here ?? sigma1 <- optimsig$par if(sigma1 < sigma.min) { if(trace.lev) cat("\n") warning(gettextf("Implosion: sigma1=%g became too small", sigma1)) kstep <- kmax #-> *no* convergence } else { ## gamma1 <- xistart/sigma1 scores <- stscores/sigma1 newobj <- mean(phiBY3(scores, y,const)) oldobj <- newobj grad.BY <- colMeans((derphiBY3(scores,y,const) %*% matrix(1,ncol=p))*x) h <- -grad.BY + (grad.BY %*% xistart) *xistart finalstep <- h/sqrt(sum(h^2)) if(trace.lev) { if(trace.lev >= 2) cat(sprintf(", obj=%12.9g: ", oldobj)) cat("\n") } ## FIXME repeat { ... } {{next 4 lines are also inside while(..) below}} xi1 <- xistart+finalstep xi1 <- xi1/sum(xi1^2) scores1 <- (x %*% xi1)/sigma1 newobj <- mean(phiBY3(scores1,y,const)) ## If 'newobj' is not better, try taking a smaller step size: hstep <- 1. jhalf <- 1L while(jhalf <= maxhalf & newobj > oldobj) { hstep <- hstep/2 xi1 <- xistart+finalstep*hstep xi1 <- xi1/sqrt(sum(xi1^2)) scores1 <- x %*% xi1/sigma1 newobj <- mean(phiBY3(scores1,y,const)) if(trace.lev >= 2) cat(sprintf(" jh=%2d, hstep=%13.8g => new obj=%13.9g\n", jhalf, hstep, newobj)) jhalf <- jhalf+1L } converged <- not.improved <- (jhalf > maxhalf && newobj > oldobj) if(not.improved) { ## newobj is "worse" and step halving did not improve message("Convergence Achieved") } else { jhalf <- 1L xistart <- xi1 oldobj <- newobj stscores <- x %*% xi1 kstep <- kstep+1L } } } ## while( kstep ) if(kstep == kmax) { warning(gettextf("No convergence in %d steps.", kstep), domain=NA) list(convergence=FALSE, objective=0, coefficients= rep(NA,p)) } else { gammaest <- xistart/sigma1 V <- vcovBY3(x, y, const, estim=gammaest, addIntercept=FALSE) list(convergence=TRUE, objective=oldobj, coefficients=gammaest, cov = V, sterror = sqrt(diag(V)), iter = kstep) } } ### -- FIXME: nlminb() allows many tweaks !! ### -- ----- but we use nlminb() for ONE-dim. minimization over { sigma >= 0 } - really?? ## MM: my version would rather use optimize() over over log(sigma) glmrobBY.control <- function(maxit = 1000, const = 0.5, maxhalf = 10) ## FIXME: sigma.min ## MM: 'acc' seems a misnomer to me, but it's inherited from MASS::rlm ## TODO acc = 1e-04, test.acc = "coef", tcc = 1.345) { ## if (!is.numeric(acc) || acc <= 0) ## stop("value of acc must be > 0") ## if (test.acc != "coef") ## stop("Only 'test.acc = \"coef\"' is currently implemented") ## if (!(any(test.vec == c("coef", "resid")))) ## stop("invalid argument for test.acc") if(!is.numeric(maxit) || maxit <= 0) stop("maximum number of \"kstep\" iterations must be > 0") if(!is.numeric(maxhalf) || maxhalf <= 0) stop("maximal number of *inner* step halvings must be > 0") ## if (!is.numeric(tcc) || tcc <= 0) ## stop("value of the tuning constant c (tcc) must be > 0") if(!is.numeric(const) || const <= 0) stop("value of the tuning constant c ('const') must be > 0") list(## acc = acc, consttest.acc = test.acc, const=const, maxhalf=maxhalf, maxit=maxit #, tcc = tcc ) } ##' @param intercept logical, if true, X[,] has an intercept column which should ##' not be used for rob.wts glmrobBY <- function(X, y, weights = NULL, start = NULL, offset = NULL, method = c("WBY","BY"), weights.on.x = "none", control = glmrobBY.control(...), intercept = TRUE, trace.lev = 0, ...) { ### THIS is *NOT* exported method <- match.arg(method) if(!is.null(weights) || any(weights != 1)) ## FIXME (?) stop("non-trivial prior 'weights' are not yet implemented for \"BY\"") if(!is.null(start)) stop(" 'start' cannot yet be passed to glmrobBY()") if(!is.null(offset)) stop(" 'offset' is not yet implemented for \"BY\"") const <- if(is.null(cc <- control$const )) 0.5 else cc kmax <- if(is.null(cc <- control$maxit )) 1e3 else cc maxhalf <- if(is.null(cc <- control$maxhalf)) 10 else cc if(!identical(weights.on.x, "none")) stop(gettextf("'weights.on.x = \"%s\"' is not implemented", format(weights.on.x)), domain=NA) ## w.x <- robXweights(weights.on.x, X=X, intercept=intercept) ## ## MM: all(?) the BY3() functions below would need to work with weights... r <- BYlogreg(x0=X, y=y, initwml = (method == "WBY"), ## w.x=w.x, addIntercept = !intercept, ## add intercept if there is none const=const, kmax=kmax, maxhalf=maxhalf, ## FIXME sigma.min (is currently x-scale dependent !????) trace.lev=trace.lev) ## FIXME: make result more "compatible" with other glmrob() methods r } ### Functions needed for the computation of estimator of Bianco and Yohai ---------------------- ## From their paper: ## A last remark is worth mentioning: when huge outliers occur in ## the logistic regression setting, often numerical imprecision occurs in the computation ## of the deviances given by ## d(s;y_i)= -y_i log F(s) - (1-y_i) log{1-F(s)} . ## ## Instead of directly computing this expression, it can be seen that a ## numerically more stable and accurate formula is given by ## log(1 + exp(-abs(s))) + abs(s)* ((y-0.5)*s < 0) ## in which the second term equals abs(s) if the observation is misclassified, 0 otherwise. dev1 <- function(s,y) log(1+exp(-abs(s))) + abs(s)*((y-0.5)*s<0) dev2 <- function(s,y) log1p(exp(-abs(s))) + abs(s)*((y-0.5)*s<0) dev3 <- function(s,y) -( y * plogis(s, log.p=TRUE) + (1-y)*plogis(s, lower.tail=FALSE, log.p=TRUE)) ## MM[FIXME]: first tests indicate that dev3() is clearly more accurate than ## their dev1() !! ## MM{FIXME2}: In code below have (or "had") three cases of same formula, but ## with 's>0' instead of 's<0' : This is == dev?(-s, y) !! ## for now, 100% back-compatibility: devBY <- dev1 rm(dev1, dev2, dev3) ## MM: This is from my vignette, but *not* used log1pexp <- function(x) { if(has.na <- any(ina <- is.na(x))) { y <- x x <- x[ok <- !ina] } t1 <- x <= 18 t2 <- !t1 & (tt <- x <= 33.3) r <- x r[ t1] <- log1p(exp(x[t1])) r[ t2] <- { x2 <- x[t2]; x2 + exp(-x2) } r[!tt] <- x[!tt] if(has.na) { y[ok] <- r ; y } else r } phiBY3 <- function(s,y,c3) { s <- as.double(s) ## MM FIXME log(1 + exp(-.)) ... but read the note above !! --- dev. <- devBY(s,y) ## FIXME: GBY3Fs() computes the 'dev' above *again*, and ## GBY3Fsm() does with 's>0' instead of 's<0' rhoBY3(dev.,c3) + GBY3Fs(s,c3) + GBY3Fsm(s,c3) } rhoBY3 <- function(t,c3) { ec3 <- exp(-sqrt(c3)) t*ec3* (t <= c3) + (ec3*(2+(2*sqrt(c3))+c3) - 2*exp(-sqrt(t))*(1+sqrt(t)))* (t > c3) } psiBY3 <- function(t,c3) { exp(-sqrt(c3)) *(t <= c3) + exp(-sqrt( t)) *(t > c3) } ## MM: This is shorter (but possibly slower when most t are <= c3 : ## psiBY3 <- function(t,c3) exp(-sqrt(pmax(t, c3))) ##' d/dt psi(t, c3) derpsiBY3 <- function(t, c3) { r <- t r[in. <- (t <= c3)] <- 0 if(any(out <- !in.)) { t <- t[out] st <- sqrt(t) r[out] <- -exp(-st)/(2*st) } r } ## MM: FIXME this is not used above sigmaBY3 <- function(sigma,s,y,c3) { mean(phiBY3(s/sigma,y,c3)) } derphiBY3 <- function(s,y,c3) { Fs <- exp(-devBY(s,1)) ds <- Fs*(1-Fs) ## MM FIXME: use expm1() dev. <- devBY(s,y) Gprim1 <- devBY(s,1) Gprim2 <- devBY(-s,1) -psiBY3(dev.,c3)*(y-Fs) + ds*(psiBY3(Gprim1,c3) - psiBY3(Gprim2,c3)) } der2phiBY3 <- function(s, y, c3) { s <- as.double(s) Fs <- exp(-devBY(s,1)) ds <- Fs*(1-Fs) ## MM FIXME: use expm1() dev. <- devBY(s,y) Gprim1 <- devBY(s,1) Gprim2 <- devBY(-s,1) der2 <- derpsiBY3(dev.,c3)*(Fs-y)^2 + ds*psiBY3(dev.,c3) der2 <- der2+ ds*(1-2*Fs)*(psiBY3(Gprim1,c3) - psiBY3(Gprim2,c3)) der2 - ds*(derpsiBY3(Gprim1,c3)*(1-Fs) + derpsiBY3(Gprim2,c3)* Fs ) } GBY3Fs <- function(s,c3) { e.f <- exp(0.25)*sqrt(pi) ## MM FIXME: Fs = exp(..) and below use log(Fs) !! Fs <- exp(-devBY(s,1)) resGinf <- e.f*(pnorm(sqrt(2)*(0.5+sqrt(-log(Fs))))-1) ## MM FIXME: use expm1(): resGinf <- (resGinf+(Fs*exp(-sqrt(-log(Fs)))))*as.numeric(s <= -log(exp(c3)-1)) resGsup <- ((Fs*exp(-sqrt(c3)))+(e.f*(pnorm(sqrt(2)*(0.5+sqrt(c3)))-1))) * as.numeric(s > -log(exp(c3)-1)) resGinf + resGsup } GBY3Fsm <- function(s,c3) { e.f <- exp(0.25)*sqrt(pi) ## MM FIXME: Fsm = exp(..) and below use log(Fsm) !! Fsm <- exp(-devBY(-s,1)) resGinf <- e.f*(pnorm(sqrt(2)*(0.5+sqrt(-log(Fsm))))-1) ## MM FIXME: use expm1(): resGinf <- (resGinf+(Fsm*exp(-sqrt(-log(Fsm))))) * as.numeric(s >= log(exp(c3)-1)) resGsup <- ((Fsm*exp(-sqrt(c3)))+(e.f*(pnorm(sqrt(2)*(0.5+sqrt(c3)))-1))) * as.numeric(s < log(exp(c3)-1)) resGinf + resGsup } ## Compute the standard erros of the estimates - ## this is done by estimating the asymptotic variance of the normal ## limiting distribution of the BY estimator - as derived in Bianco ## and Yohai (1996) ## sterby3 <- function(x0, y, const, estim, addIntercept) { sqrt(diag(vcovBY3(x0, y, const=const, estim=estim, addIntercept=addIntercept))) } vcovBY3 <- function(z, y, const, estim, addIntercept) { stopifnot(length(dim(z)) == 2) if(addIntercept) z <- cbind(1, z) d <- dim(z) n <- d[1] p <- d[2] argum <- z %*% estim matM <- IFsqr <- matrix(0, p, p) for(i in 1:n) { myscalar <- as.numeric(der2phiBY3(argum[i],y[i], c3=const)) zzt <- tcrossprod(z[i,]) matM <- matM + myscalar * zzt IFsqr <- IFsqr + derphiBY3(argum[i],y[i], c3=const)^2 * zzt } matM <- matM/n matMinv <- solve(matM) IFsqr <- IFsqr/n ## Now, asymp.cov = matMinv %*% IFsqr %*% t(matMinv) ## provide vcov(): the full matrix (matMinv %*% IFsqr %*% t(matMinv))/n } robustbase/R/classPC.R0000644000176200001440000000361513710224622014303 0ustar liggesusers##' @title Simple Matrix Rank ====> ../man/rankMM.Rd rankMM <- function(A, tol = NULL, sv = svd(A,0,0)$d) { d <- dim(A) stopifnot(length(d)==2, length(sv) == min(d), min(d) >= 1L, all(diff(sv) <= 0)) # must be sorted decreasingly if(is.null(tol)) tol <- max(d) * .Machine$double.eps * abs(sv[1L]) else stopifnot(is.numeric(tol), tol >= 0) sum(sv >= tol) } ##' Flip the signs of the loadings ##' - comment from Stephan Milborrow .signflip <- function(loadings) { apply(loadings, 2L, function(x) if(x[which.max(abs(x))] < 0) -x else x) } ##' @title Classical Principal Components ... ==> ../man/classPC.Rd classPC <- function(x, scale=FALSE, center=TRUE, signflip=TRUE, via.svd = n > p, scores=FALSE) { if(!is.numeric(x) || !is.matrix(x)) stop("'x' must be a numeric matrix") else if((n <- nrow(x)) <= 1) stop("The sample size must be greater than 1 for svd") p <- ncol(x) x <- scale(x, center=center, scale=scale) ## ----- if(isTRUE(scale)) scale <- attr(x, "scaled:scale") if(isTRUE(center)) center <- attr(x, "scaled:center") if(via.svd) { svd <- svd(x, nu=0) rank <- rankMM(x, sv=svd$d) loadings <- svd$v[,1:rank, drop=FALSE] eigenvalues <- (svd$d[1:rank])^2 /(n-1) ## FIXME: here .^2; later sqrt(.) } else { ## n <= p; was "kernelEVD" e <- eigen(tcrossprod(x), symmetric=TRUE) evs <- e$values tolerance <- n * max(evs) * .Machine$double.eps rank <- sum(evs > tolerance) evs <- evs[ii <- seq_len(rank)] eigenvalues <- evs / (n-1) ## MM speedup, was: crossprod(..) %*% diag(1/sqrt(evs)) loadings <- crossprod(x, e$vectors[,ii]) * rep(1/sqrt(evs), each=p) } ## VT::15.06.2010 - signflip: flip the sign of the loadings if(signflip) loadings <- .signflip(loadings) list(rank=rank, eigenvalues=eigenvalues, loadings=loadings, scores = if(scores) x %*% loadings, center=center, scale=scale) } robustbase/R/AAA.R0000644000176200001440000001211613170446321013333 0ustar liggesusers ## if(getRversion() < "2.13") { ## nobs <- function (object, ...) UseMethod("nobs") ## ## also used for mlm fits *and* lmrob : ## nobs.lm <- function(object, ...) ## if(!is.null(w <- object$weights)) sum(w != 0) else NROW(object$residuals) ## ## for glmrob : ## nobs.glm <- function(object, ...) sum(!is.na(object$residuals)) ## } ## Here and in NAMESPACE: if(getRversion() < "3.1.0") { ## cut'n'paste from R's source src/library/stats/R/confint.R format.perc <- function(probs, digits) ## Not yet exported, maybe useful in other contexts: ## quantile.default() sometimes uses a version of it paste(format(100 * probs, trim = TRUE, scientific = FALSE, digits = digits), "%") confint.lm <- function(object, parm, level = 0.95, ...) { cf <- coef(object) pnames <- names(cf) if(missing(parm)) parm <- pnames else if(is.numeric(parm)) parm <- pnames[parm] a <- (1 - level)/2 a <- c(a, 1 - a) fac <- qt(a, object$df.residual) # difference from default method pct <- format.perc(a, 3) ci <- array(NA, dim = c(length(parm), 2L), dimnames = list(parm, pct)) ses <- sqrt(diag(vcov(object)))[parm] # gives NA for aliased parms ci[] <- cf[parm] + ses %o% fac ci } ## cut'n'paste from R's source src/library/stats/R/dummy.coef.R dummy.coef.lm <- function(object, use.na=FALSE, ...) { Terms <- terms(object) tl <- attr(Terms, "term.labels") int <- attr(Terms, "intercept") facs <- attr(Terms, "factors")[-1, , drop=FALSE] Terms <- delete.response(Terms) vars <- all.vars(Terms) xl <- object$xlevels if(!length(xl)) { # no factors in model return(as.list(coef(object))) } nxl <- setNames(rep.int(1, length(vars)), vars) tmp <- unlist(lapply(xl, length)) ## ?? vapply(xl, length, 1L) nxl[names(tmp)] <- tmp lterms <- apply(facs, 2L, function(x) prod(nxl[x > 0])) nl <- sum(lterms) args <- setNames(vector("list", length(vars)), vars) for(i in vars) args[[i]] <- if(nxl[[i]] == 1) rep.int(1, nl) else factor(rep.int(xl[[i]][1L], nl), levels = xl[[i]]) dummy <- do.call("data.frame", args) pos <- 0 rn <- rep.int(tl, lterms) rnn <- rep.int("", nl) for(j in tl) { i <- vars[facs[, j] > 0] ifac <- i[nxl[i] > 1] if(length(ifac) == 0L) { # quantitative factor rnn[pos+1] <- j } else if(length(ifac) == 1L) { # main effect dummy[ pos+1L:lterms[j], ifac ] <- xl[[ifac]] rnn[ pos+1L:lterms[j] ] <- as.character(xl[[ifac]]) } else { # interaction tmp <- expand.grid(xl[ifac]) dummy[ pos+1L:lterms[j], ifac ] <- tmp rnn[ pos+1L:lterms[j] ] <- apply(as.matrix(tmp), 1L, function(x) paste(x, collapse=":")) } pos <- pos + lterms[j] } ## some terms like poly(x,1) will give problems here, so allow ## NaNs and set to NA afterwards. mf <- model.frame(Terms, dummy, na.action=function(x)x, xlev=xl) mm <- model.matrix(Terms, mf, object$contrasts, xl) if(any(is.na(mm))) { warning("some terms will have NAs due to the limits of the method") mm[is.na(mm)] <- NA } coef <- object$coefficients if(!use.na) coef[is.na(coef)] <- 0 asgn <- attr(mm,"assign") res <- setNames(vector("list", length(tl)), tl) for(j in seq_along(tl)) { keep <- asgn == j ij <- rn == tl[j] res[[j]] <- setNames(drop(mm[ij, keep, drop=FALSE] %*% coef[keep]), rnn[ij]) } if(int > 0) { res <- c(list("(Intercept)" = coef[int]), res) } class(res) <- "dummy_coef" res } }# if R <= 3.1.0 ## Not exported, and only used because CRAN checks must be faster doExtras <- function() { interactive() || nzchar(Sys.getenv("R_robustbase_check_extra")) || identical("true", unname(Sys.getenv("R_PKG_CHECKING_doExtras"))) } if(getRversion() < "3.5") { isTRUE <- function(x) is.logical(x) && length(x) == 1L && !is.na(x) && x isFALSE <- function(x) is.logical(x) && length(x) == 1L && !is.na(x) && !x if(getRversion() < "3.3") { sigma <- function(object, ...) UseMethod("sigma") ## For completeness, and when comparing with nlrob() results: sigma.nls <- function(object, ...) ## sqrt ( sum( R_i ^ 2) / (n - p) ) : sqrt( deviance(object) / (nobs(object) - length(coef(object))) ) } } ## shortcut -- used often in print() etc: pasteK <- function(...) paste(..., collapse = ", ") ## stopifnot(..) helper : is.1num <- function(x) is.numeric(x) && length(x) == 1L ##' return 'x' unless it is NULL where you'd use 'orElse' `%||%` <- function(x, orElse) if(!is.null(x)) x else orElse ##' Augment a vcov-matrix by NA rows & cols when needed; from */R/src/library/stats/R/vcov.R .vcov.aliased <- asNamespace("stats")$.vcov.aliased %||% function(aliased, vc, complete = TRUE) { ## Checking for "NA coef": "same" code as in print.summary.lm() in ./lm.R : if(complete && nrow(vc) < (P <- length(aliased)) && any(aliased)) { ## add NA rows and columns in vcov cn <- names(aliased) VC <- matrix(NA_real_, P, P, dimnames = list(cn,cn)) j <- which(!aliased) VC[j,j] <- vc VC } else # default vc } robustbase/R/glmrob.R0000644000176200001440000002561213325654420014243 0ustar liggesusersglmrob <- function (formula, family, data, weights, subset, na.action, start = NULL, offset, method = c("Mqle", "BY", "WBY", "MT"), weights.on.x = c("none", "hat", "robCov", "covMcd"), control = NULL, model = TRUE, x = FALSE, y = TRUE, contrasts = NULL, trace.lev = 0, ...) { call <- match.call() if (is.character(family)) family <- get(family, mode = "function", envir = parent.frame()) if (is.function(family)) family <- family() fami <- family$family if(is.null(fami)) stop(gettextf("'%s' is not a valid family (see ?family)", as.character(call[["family"]])), domain=NA) if (!(fami %in% c("binomial", "poisson", "Gamma", "gaussian"))) { stop(gettextf("Robust GLM fitting not yet implemented for family %s", fami), domain=NA) } if (missing(data)) data <- environment(formula) ## mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"), names(mf), 0) mf <- mf[c(1, m)] mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf <- eval(mf, parent.frame()) if(identical(method, "model.frame")) return(mf) mt <- attr(mf, "terms") Y <- model.response(mf, "any")# "numeric" or "factor" if (length(dim(Y)) == 1) { nm <- rownames(Y) dim(Y) <- NULL if (!is.null(nm)) names(Y) <- nm } X <- if (!is.empty.model(mt)) model.matrix(mt, mf, contrasts) else matrix(NA_real_, NROW(Y), 0) weights <- model.weights(mf) offset <- model.offset(mf) if (!is.null(weights) && any(weights < 0)) stop("'weights' must be non-negative") if (!is.null(offset) && length(offset) != NROW(Y)) stop(gettextf("Number of offsets is %d, should rather equal %d (number of observations)", length(offset), NROW(Y)), domain=NA) method <- match.arg(method) meth. <- if(method == "WBY") "BY" else method ### FIXME: the whole 'control' should be changed to "copy" lmrob() and lmrob.control() ## ------- --> *one* exported glmrob.control() function with 'method' and switch() inside... ## see >>> ./lmrob.MM.R if(is.null(control)) # -> use e.g., glmrobMqle.control() control <- get(paste0("glmrob", meth., ".control"))(...) if(missing(weights.on.x) || is.character(weights.on.x)) weights.on.x <- match.arg(weights.on.x) else if(!(is.function(weights.on.x) || is.list(weights.on.x) || (is.numeric(weights.on.x) && length(weights.on.x) == NROW(Y)))) stop("'weights.on.x' must be a string, function, list or numeric n-vector") if(!is.null(start) && !is.numeric(start)) { ## initialization methods if(!is.character(start)) stop("'start' must be a numeric vector, NULL, or a character string") start <- switch(start, "lmrob" =, "lmrobMM" = { if(!is.null(weights)) warnings("weights are not yet used in computing start estimate") lmrob.fit(x = X, y = family$linkinv(Y), control=lmrob.control())$coefficients }, stop("invalid 'start' string")) } fit <- switch(method, "cubif" = stop("For method 'cubif', use glmRob() from package 'robust'") , "Mqle" = ## --> ./glmrobMqle.R glmrobMqle(X = X, y = Y, weights = weights, start = start, offset = offset, family = family, weights.on.x = weights.on.x, control = control, intercept = attr(mt, "intercept") > 0, trace=trace.lev), "BY" =, "WBY" = { if(fami != "binomial") stop(gettextf( "method='%s' is only applicable for binomial family, but family=\"\"", method, fami), domain=NA) ### FIXME: use glmrobBY(..) with these arguments, including 'weights' glmrobBY(X=X, y=Y, weights=weights, start=start, method=method, ## == "BY" / "WBY" weights.on.x = weights.on.x, control = control, intercept = attr(mt, "intercept") > 0, trace.lev=trace.lev) }, "MT" = { glmrobMT(x=X,y=Y, weights=weights, start=start, offset = offset, family=family, weights.on.x=weights.on.x, control=control, intercept = attr(mt, "intercept") > 0, trace.lev=trace.lev) }, stop("invalid 'method': ", method)) ##- if (any(offset) && attr(mt, "intercept") > 0) { ##- fit$null.deviance <- glm.fit(x = X[, "(Intercept)", drop = FALSE], ##- y = Y, weights = weights, offset = offset, ##- control = control, intercept = TRUE)$deviance ##- } fit$na.action <- attr(mf, "na.action") if (model) fit$model <- mf if (x) fit$x <- X if (!y) ## fit$y <- NULL warning("setting 'y = FALSE' has no longer any effect") fit <- c(fit, list(call = call, formula = formula, terms = mt, data = data, offset = offset, control = control, method = method, prior.weights = if(is.null(weights)) rep.int(1, nrow(X)) else weights, contrasts = attr(X, "contrasts"), xlevels = .getXlevels(mt, mf))) class(fit) <- c("glmrob", "glm") fit } summary.glmrob <- function(object, correlation=FALSE, symbolic.cor=FALSE, ...) { dispersion <- object$dispersion if(is.null(dispersion)) dispersion <- 1 coefs <- object$coefficients aliased <- is.na(coefs)# needs care; also used in print method if(any(aliased)) coefs <- coefs[!aliased] covmat <- object$cov s.err <- sqrt(diag(covmat)) zvalue <- coefs/s.err pvalue <- 2 * pnorm(-abs(zvalue)) coef.table <- cbind("Estimate" = coefs, "Std. Error" = s.err, "z value" = zvalue, "Pr(>|z|)" = pvalue) ans <- c(object[c("call", "terms", "family", "iter", "control", "method", "residuals", "fitted.values", "w.r", "w.x")], ## MM: should rather keep more from 'object' ? ## currently, cannot even print the asympt.efficiency! list(deviance=NULL, df.residual=NULL, null.deviance=NULL, df.null= NULL, df= NULL, ## (because of 0 weights; hmm,...) aliased = aliased, coefficients = coef.table, dispersion = dispersion, cov.scaled = covmat)) if (correlation) { ans$correlation <- cov2cor(covmat) ans$symbolic.cor <- symbolic.cor } structure(ans, class = "summary.glmrob") } ## almost a copy of vcov.glm() [if that didn't have summmary.glm() explicitly] vcov.glmrob <- function (object, ...) { so <- summary(object, corr = FALSE, ...) ## so$dispersion * so$cov.unscaled ## changed from cov.unscaled to cov.scaled so$cov.scaled } print.glmrob <- function (x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall: ", deparse(x$call), "\n\n") if (length(coef(x))) { cat("Coefficients") if (is.character(co <- x$contrasts)) cat(" [contrasts: ", apply(cbind(names(co), co), 1, paste, collapse = "="), "]") cat(":\n") print.default(format(x$coefficients, digits = digits), print.gap = 2, quote = FALSE) } else cat("No coefficients\n\n") cat("\nNumber of observations:", length(x$residuals), "\nFitted by method ", sQuote(x$method), "\n") invisible(x) } print.summary.glmrob <- function (x, digits = max(3, getOption("digits") - 3), symbolic.cor = x$symbolic.cor, signif.stars = getOption("show.signif.stars"), ...) { cat("\nCall: ", deparse(x$call), "\n\n") if (length(cf <- coef(x))) { if(nsingular <- sum(x$aliased)) # glm has df[3] - df[1] cat("\nCoefficients: (", nsingular, " not defined because of singularities)\n", sep = "") else cat("\nCoefficients:\n") printCoefmat(cf, digits = digits, signif.stars = signif.stars, na.print = "NA", ...) summarizeRobWeights(x$w.r * x$w.x, digits = digits, header = "Robustness weights w.r * w.x:", ...) } else cat("No coefficients\n\n") n <- length(x$residuals) cat("\nNumber of observations:", n, "\nFitted by method", sQuote(x$method)," (in", x$iter, "iterations)\n") cat("\n(Dispersion parameter for ", x$family$family, " family taken to be ", format(x$dispersion), ")\n\n",sep = "") if(any(!is.null(unlist(x[c("null.deviance", "deviance")])))) cat(apply(cbind(paste(format(c("Null", "Residual"), justify="right"), "deviance:"), format(unlist(x[c("null.deviance", "deviance")]), digits=max(5, digits + 1)), " on", format(unlist(x[c("df.null", "df.residual")])), " degrees of freedom\n"), 1L, paste, collapse=" "), "\n", sep = "") else cat("No deviance values available \n") correl <- x$correlation if (!is.null(correl)) { p <- NCOL(correl) if (p > 1) { cat("\nCorrelation of Coefficients:\n") if (isTRUE(symbolic.cor)) { print(symnum(correl, abbr.colnames=NULL)) } else { correl <- format(round(correl, 2), nsmall=2, digits=digits) correl[!lower.tri(correl)] <- "" print(correl[-1, -p, drop=FALSE], quote=FALSE) } } } printControl(x$control, digits = digits) cat("\n") invisible(x) } weights.glmrob <- function(object, type = c("prior", "robustness"), ...) { type <- match.arg(type) w <- if (type == "prior") { ## Issue warning only if called from toplevel. Otherwise the warning pop ## up at quite unexpected places, e.g., case.names(). if (is.null(object[["weights"]]) && identical(parent.frame(), .GlobalEnv)) warning("No weights defined for this object. Use type=\"robustness\" argument to get robustness weights.") object[["weights"]] } else object$w.r * object$w.x ## those also used summarizeRobWeights(x$w.r * x$w.x, ..) if (is.null(object$na.action)) w else naresid(object$na.action, w) } ## Stems from a copy of residuals.glm() in ## ~/R/D/r-devel/R/src/library/stats/R/glm.R residuals.glmrob <- function(object, type = c("deviance", "pearson", "working", "response", "partial"), ...) { type <- match.arg(type) y <- object$y r <- object$residuals mu <- object$fitted.values wts <- object$prior.weights # ok p <- length(object$coefficients) switch(type, deviance=, pearson=, response= if(is.null(y)) { mu.eta <- object$family$mu.eta eta <- object$linear.predictors ## we cannot use 'r <- ...$residuals' __ FIXME __ stop("need non-robust working residuals for this model type") y <- mu + r * mu.eta(eta) }) res <- switch(type, ## deviance = if(object$df.residual > 0) { deviance = if((nobs(object) - p) > 0) { d.res <- sqrt(pmax.int((object$family$dev.resids)(y, mu, wts), 0)) ifelse(y > mu, d.res, -d.res) } else rep.int(0, length(mu)), pearson = (y-mu)*sqrt(wts)/sqrt(object$family$variance(mu)), working = r, response = y - mu, partial = r ) if(!is.null(object$na.action)) res <- naresid(object$na.action, res) if (type == "partial") ## need to avoid doing naresid() twice. res <- res+predict(object, type="terms") res } robustbase/R/tolEllipse.R0000644000176200001440000001100112440116711015051 0ustar liggesusers#### This is from the R package #### #### rrcov : Scalable Robust Estimators with High Breakdown Point #### #### by Valentin Todorov ### This program is free software; you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation; either version 2 of the License, or ### (at your option) any later version. ### ### This program is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ### ### You should have received a copy of the GNU General Public License ### along with this program; if not, a copy is available at ### http://www.r-project.org/Licenses/ ## I would like to thank Peter Filzmoser for providing the initial code of ## this function. tolEllipsePlot <- function(x, m.cov = covMcd(x), cutoff = NULL, id.n = NULL, classic = FALSE, tol = 1e-07, xlab = "", ylab = "", main = "Tolerance ellipse (97.5%)", txt.leg = c("robust", "classical"), col.leg = c("red", "blue"), lty.leg = c("solid","dashed")) { ##@bdescr ## Tolerance Ellipse Plot: ## Plots the 97.5% tolerance ellipse of the bivariate data set (x). ## The ellipse is defined by those data points whose distance (dist) ## is equal to the squareroot of the 97.5% chisquare quantile with ## 2 degrees of freedom. ##@edescr ## ##@in x : [matrix] A data.frame or matrix, n > 2*p ##@in m.cov : [mcd object] An object of type mcd - its attributes ## center and cov will be used ##@in cutoff : [number] Distance needed to flag data points outside the ellipse ##@in outflag : [logical] Whether to print the labels of the outliers ##@in tol : [number] tolerance to be used for computing the inverse see 'solve'. ## defaults to 1e-7 ## MM: This is nothing else but a version cluster::ellipsoidPoints() !! -- FIXME ellips <- function(loc, cov) { ## calculates a 97,5% ellipsoid ## input: data set, location and covariance estimate, cutoff dist <- sqrt(qchisq(0.975, 2)) A <- solve(cov) eA <- eigen(A) ev <- eA$values lambda1 <- max(ev) lambda2 <- min(ev) eigvect <- eA$vectors[, order(ev)[2]] z <- seq(0, 2 * pi, by = 0.01) z1 <- dist/sqrt(lambda1) * cos(z) z2 <- dist/sqrt(lambda2) * sin(z) alfa <- atan(eigvect[2]/eigvect[1]) r <- matrix(c(cos(alfa), - sin(alfa), sin(alfa), cos(alfa)), ncol = 2) t(loc + t(cbind(z1, z2) %*% r)) # xmin <- min(x, z[, 1]) } ## parameters and preconditions if(is.data.frame(x)) x <- data.matrix(x) if(!is.matrix(x) || !is.numeric(x)) stop("x is not a numeric dataframe or matrix.") n <- dim(x)[1] p <- dim(x)[2] if(p != 2) stop("Dimension {= ncol(x)} must be 2!") if(!is.numeric(m.cov$center) || !is.numeric(m.cov$cov)) stop("argument 'm.cov' must have numeric components 'center' and 'cov'") x.loc <- m.cov$center x.cov <- n/(n - 1) * m.cov$cov xM <- colMeans(x) z1 <- ellips(loc = xM, cov = n/(n - 1) * cov.wt(x)$cov) z2 <- ellips(loc = x.loc, cov = x.cov) x1 <- c(min(x[, 1], z1[, 1], z2[, 1]), max(x[,1],z1[,1], z2[,1])) y1 <- c(min(x[, 2], z1[, 2], z2[, 2]), max(x[,2],z1[,2], z2[,2])) md <- sqrt(mahalanobis(x, xM, cov(x), tol=tol)) rd <- sqrt(mahalanobis(x,m.cov$center, m.cov$cov, tol=tol)) ## Note: the *calling* function may pass a 'missing' value if(missing(cutoff) || is.null(cutoff)) cutoff <- sqrt(qchisq(0.975, df = 2)) if(missing(id.n) || is.null(id.n)) id.n <- sum(rd > cutoff) ### (2,1) is wrong for 'classic' -- we *overplot*: ## if(classic) ## opr <- if(prod(par("mfrow"))== 1) par(mfrow=c(1,2), pty="m") else list() ## MM: this is *NOT* good : ## else par(mfrow = c(1, 1)) ## 1. Robust tolerance ## define the plot, plot a box, plot the "good" points, ## plot the outliers either as points or as numbers depending on outflag, ## plot the ellipse, write a title of the plot plot(x, xlim = x1, ylim = y1, xlab = xlab, ylab = ylab, main = main) box() xrange <- par("usr") xrange <- xrange[2] - xrange[1] if(id.n >= 1) { ind <- sort(rd, index.return=TRUE)$ix[(n-id.n+1):n] text(x[ind, 1] + xrange/50, x[ind, 2], ind) } points(z2, type = "l", lty=lty.leg[1], col=col.leg[1]) ## 2. Classical tolerance if(classic){ points(z1, type = "l", lty=lty.leg[2], col=col.leg[2]) legend("bottomright", txt.leg, lty = lty.leg, col = col.leg) ## par(opr) } invisible() } robustbase/R/ltsReg.R0000644000176200001440000007101313325654420014215 0ustar liggesusers#### This is originally from the R package #### #### rrcov : Scalable Robust Estimators with High Breakdown Point #### #### by Valentin Todorov ## I would like to thank Peter Rousseeuw and Katrien van Driessen for ## providing the initial code of this function. ### This program is free software; you can redistribute it and/or modify ### it under the terms of the GNU General Public License as published by ### the Free Software Foundation; either version 2 of the License, or ### (at your option) any later version. ### ### This program is distributed in the hope that it will be useful, ### but WITHOUT ANY WARRANTY; without even the implied warranty of ### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ### GNU General Public License for more details. ### ### You should have received a copy of the GNU General Public License ### along with this program; if not, a copy is available at ### http://www.r-project.org/Licenses/ ltsReg <- function(x, ...) UseMethod("ltsReg") ltsReg.formula <- function(formula, data, subset, weights, na.action, model = TRUE, x.ret = FALSE, y.ret = FALSE, contrasts = NULL, offset, ...) { cl <- match.call() ## method <- match.arg(method) ## keep only the arguments which should go into the model frame mf <- match.call(expand.dots = FALSE) m <- match(c("formula", "data", "subset", "weights", "na.action", "offset"), names(mf), 0) mf <- mf[c(1, m)] mf$drop.unused.levels <- TRUE mf[[1]] <- as.name("model.frame") mf <- eval.parent(mf) ## if (method == "model.frame") return(mf) mt <- attr(mf, "terms") y <- model.response(mf, "numeric") ## was model.extract(mf, "response") if (is.empty.model(mt)) { # "y ~ 0" : no coefficients x <- offset <- NULL fit <- list(method = "ltsReg for empty model", coefficients = numeric(0), residuals = y, fitted.values = 0 * y, lts.wt = 1 + 0 * y, rank = 0, intercept = FALSE, df.residual = length(y)) ## alpha = alpha from "..." class(fit) <- "lts" } else { w <- model.weights(mf) offset <- model.offset(mf) x <- model.matrix(mt, mf, contrasts) ## Check if there is an intercept in the model. ## A formula without intercept looks like this: Y ~ . -1 ## If so, remove the corresponding column and use intercept=TRUE ## in the call to ltsReg.default(); by default, intercept=FALSE. xint <- match("(Intercept)", colnames(x), nomatch = 0) if(xint) x <- x[, -xint, drop = FALSE] fit <- ltsReg.default(x, y, intercept = (xint > 0), ...) } ## 3) return the na.action info fit$na.action <- attr(mf, "na.action") fit$offset <- offset ## 4) return the contrasts used in fitting: possibly as saved earlier. fit$contrasts <- attr(x, "contrasts") fit$xlevels <- .getXlevels(mt, mf) fit$call <- cl fit$terms <- mt if(model) fit$model <- mf if(x.ret) fit$x <- x # or? if(xint == 0) x else x[, c(2:p,1), drop=FALSE] if(y.ret) fit$y <- y fit } ltsReg.default <- function (x, y, intercept = TRUE, alpha = control$ alpha, nsamp = control$ nsamp, adjust = control$ adjust, mcd = TRUE, qr.out = FALSE, yname = NULL, seed = control$ seed, trace = control$ trace, use.correction = control$ use.correction, wgtFUN = control$ wgtFUN, control = rrcov.control(), ...) { ## Analyze and validate the input parameters ... ## if a control object was supplied, take the option parameters from it, ## but if single parameters were passed (not defaults) they will override the ## control object. ### MM: FIXME: this sucks ('control' may contain *some* but not all parts!): if(!missing(control)) { defCtrl <- rrcov.control() # default control if(is.null(alpha) && control$alpha != defCtrl$alpha) alpha <- control$alpha if(nsamp == defCtrl$nsamp) nsamp <- control$nsamp if(identical(seed, defCtrl$seed)) seed <- control$seed if(use.correction == defCtrl$use.correction) use.correction <- control$use.correction if(adjust == defCtrl$adjust) adjust <- control$adjust } else defCtrl <- control ## == rrcov.control() ## For back compatibility, as some new args did not exist pre 2013-04, ## and callers of covMcd() may use a "too small" 'control' list: if(missing(wgtFUN)) getDefCtrl("wgtFUN", defCtrl) if(length(seed) > 0) { if(length(seed) < 3 || seed[1L] < 100) stop("invalid 'seed'. Must be compatible with .Random.seed !") if(exists(".Random.seed", envir=.GlobalEnv, inherits=FALSE)) { seed.keep <- get(".Random.seed", envir=.GlobalEnv, inherits=FALSE) on.exit(assign(".Random.seed", seed.keep, envir=.GlobalEnv)) } assign(".Random.seed", seed, envir=.GlobalEnv) } if(alpha < 1/2 || alpha > 1) stop("alpha not inside [1/2, 1]") ## FIXME: change this analogously to covMcd()'s and covComedian()'s ## quantiel <- qnorm(0.9875) if(is.character(wgtFUN)) { switch(wgtFUN, "01.original" = { cW <- qnorm(0.9875) wgtFUN <- function(r) as.numeric(abs(r) <= cW) }, stop("unknown 'wgtFUN' specification: ", wgtFUN)) } else if(!is.function(wgtFUN)) stop("'wgtFUN' must be a function or a string specifying one") ## vt::03.02.2006 - raw.cnp2 and cnp2 are vectors of size 2 and will ## contain the correction factors (concistency and finite sample) ## for the raw and reweighted estimates respectively. Set them initially to 1. ## If use.correction is set to FALSE (default=TRUE), the finite sample correction ## factor will not be used (neither for the raw estimates nor for the reweighted) raw.cnp2 <- rep(1,2) cnp2 <- rep(1,2) ##cat("++++++ Entering ltsReg() ...\n") y <- data.matrix(y) if (!is.numeric(y)) stop("y is not a numeric") if (dim(y)[2] != 1) stop("y is not onedimensional") oneD <- (missing(x) || is.null(x) || NCOL(x) == 0) ## location model - no x if(oneD) { x <- matrix(1, nrow(y), 1) } else { ## x is present if(is.data.frame(x)) x <- data.matrix(x) else if (!is.matrix(x)) x <- matrix(x, length(x), 1, dimnames = list(names(x), deparse(substitute(x)))) } if (nrow(x) != nrow(y)) stop("Number of observations in x and y not equal") na.x <- !is.finite(rowSums(x)) na.y <- !is.finite(y) ok <- !(na.x | na.y) x <- x[ok, , drop = FALSE] y <- y[ok, , drop = FALSE] dx <- dim(x) n <- dx[1] if (n == 0) stop("All observations have missing values!") dimny <- dimnames(y) rownames <- dimny[[1]] yn <- if(!is.null(yname)) yname else if(!is.null(dimny[[2]])) dimny[[2]] has.yn <- !is.null(yn) if(!has.yn) yn <- "Y" storage.mode(y) <- "double" storage.mode(x) <- "double" if (!oneD) { is.const <- function(x) { c1 <- range(x) c1[1] == c1[2] } if (any(apply(x, 2, is.const))) stop("There is at least one constant column. Remove it and set intercept=TRUE") } ##cat("++++++ Prepare: Ready.\n") xn <- (dnx <- dimnames(x))[[2]] xn <- if(!is.null(xn)) xn else if (dx[2] > 1) paste("X", 1:dx[2], sep = "") else if (dx[2]) "X" ## else : p = 0 dimnames(x) <- list(dnx[[1]], xn) # also works if(is.null(dnx)) y <- as.vector(y) if(all(x == 1)) { ## includes 'oneD' and empty x (p = 0) if(qr.out) { warning("'qr.out = TRUE' for univariate location is disregarded") qr.out <- FALSE } h <- h.alpha.n(alpha, n, dx[2]) p <- 1 if (alpha == 1) { scale <- sqrt(drop(cov.wt(as.matrix(y))$cov)) center <- as.vector(mean(y)) ## xbest <- NULL } else { sh <- .fastmcd(as.matrix(y), as.integer(h), nsamp = 0, # (y *is* 1-dim.!) nmini = 300, kmini = 5) center <- as.double(sh$initmean) qalpha <- qchisq(h/n, 1) calphainvers <- pgamma(qalpha/2, 1/2 + 1)/(h/n) raw.cnp2[1] <- calpha <- 1/calphainvers raw.cnp2[2] <- correct <- LTScnp2(1, intercept = intercept, n, alpha) if(!use.correction) # do not use finite sample correction factor raw.cnp2[2] <- correct <- 1.0 scale <- sqrt(as.double(sh$initcovariance)) * sqrt(calpha) * correct ## xbest <- sort(as.vector(sh$best)) # fastmcd in the univariate case does not return inbest[] } resid <- y - center ans <- list(method = "Univariate location and scale estimation.", best = NULL, # xbest, coefficients = center, alpha = alpha, quan = h, raw.coefficients = center, raw.resid = resid/scale, raw.weights = rep.int(NA, length(na.y))) if(abs(scale) < 1e-07) { ans$raw.weights[ok] <- weights <- as.numeric(abs(resid) < 1e-07) ans$scale <- ans$raw.scale <- 0 ans$crit <- 0 ans$method <- paste(ans$method, "More than half of the data are equal!",sep="\n") } else { ans$raw.scale <- scale ans$raw.weights[ok] <- weights <- wgtFUN(resid/scale) sum.w <- sum(weights) reweighting <- cov.wt(as.matrix(y), wt = weights) ans$coefficients <- reweighting$center ans$scale <- sqrt(sum.w/(sum.w - 1) * drop(reweighting$cov)) resid <- y - ans$coefficients ans$crit <- sum(sort((y - center)^2, partial = h)[1:h]) if (sum.w != n) { qdelta.rew <- qchisq(sum.w/n, 1) cdeltainvers.rew <- pgamma(qdelta.rew/2, 1/2 + 1)/(sum.w/n) cdelta.rew <- sqrt(1/cdeltainvers.rew) correct.rew <- if(use.correction) LTScnp2.rew(1, intercept = intercept, n, alpha) else 1 cnp2 <- c(cdelta.rew, correct.rew) ans$scale <- ans$scale * cdelta.rew * correct.rew } weights <- wgtFUN(resid/ans$scale) } fitted <- ans$coefficients ans$resid <- resid/ans$scale ans$rsquared <- 0 ans$intercept <- intercept if(has.yn) names(ans$coefficients) <- names(ans$raw.coefficients) <- yn } ## end {all(x == 1)} -- else { ## ------------------ usual non-trivial case --------------------- if(mcd) ## need 'old x' later X <- x if (intercept) { ## intercept must be *last* (<- fortran code) {"uahh!"} x <- cbind(x, "Intercept" = 1) dx <- dim(x) xn <- colnames(x) } p <- dx[2] if (n <= 2 * p) stop("Need more than twice as many observations as variables.") ## VT:: 26.12.2004 ## Reorder the coefficients so that the intercept is at the beginning .. getCoef <- ## simple wrapper (because of above "intercept must be") if(p > 1 && intercept) function(cf) cf[c(p, 1:(p - 1))] else function(cf) cf ans <- list(alpha = alpha, raw.weights = rep.int(NA, length(na.y))) if(alpha == 1) { ## alpha == 1 ----------------------- ## old, suboptimal: z <- lsfit(x, y, intercept = FALSE) z <- lm.fit(x, y) qrx <- z$qr cf <- z$coef names(cf) <- xn ans$raw.coefficients <- getCoef(cf) resid <- z$residuals ans$quan <- h <- n s0 <- sqrt((1/(n - p)) * sum(resid^2)) ##cat("++++++ B - alpha == 1... - s0=",s0,"\n") if(abs(s0) < 1e-07) { fitted <- x %*% z$coef ans$raw.weights[ok] <- weights <- as.numeric(abs(resid) <= 1e-07) ans$scale <- ans$raw.scale <- 0 ans$coefficients <- ans$raw.coefficients } else { ans$raw.scale <- s0 ans$raw.resid <- resid / s0 ans$raw.weights[ok] <- weights <- wgtFUN(ans$raw.resid) sum.w <- sum(weights) ## old, suboptimal: z <- lsfit(x, y, wt = weights, intercept = FALSE) z <- lm.wfit(x, y, w = weights) ans$coefficients <- getCoef(z$coef) fitted <- x %*% z$coef ans$scale <- sqrt(sum(weights * resid^2)/(sum.w - 1)) if (sum.w != n) { qn.w <- qnorm((sum.w + n)/(2 * n)) cdelta.rew <- 1/sqrt(1 - (2 * n)/(sum.w/qn.w) * dnorm(qn.w)) ans$scale <- ans$scale * cdelta.rew } ans$resid <- resid/ans$scale weights <- wgtFUN(ans$resid) } names(ans$coefficients) <- getCoef(xn) s1 <- sum(resid^2) ans$crit <- s1 sh <- (if (intercept) y - mean(y) else y) ^ 2 ans$rsquared <- max(0, min(1, 1 - (s1/sh))) ans$method <- "Least Squares Regression." } ## end {alpha == 1} : "classical" else { ## alpha < 1 ----------------------------------------------- coefs <- rep(NA, p) names(coefs) <- xn qrx <- if(qr.out) qr(x) else qr(x)[c("rank", "pivot")] rk <- qrx$rank if (rk < p) stop("x is singular") ## else : h <- h.alpha.n(alpha, n, rk) z <- .fastlts(x, y, h, nsamp, intercept, adjust, trace=as.integer(trace)) if(z$objfct < 0) stop("no valid subsample found in LTS - set 'nsamp' or rather use lmrob.S()") ## vt:: lm.fit.qr == lm.fit(...,method=qr,...) cf <- lm.fit(x[z$inbest, , drop = FALSE], y[z$inbest])$coef if(any(ic <- is.na(cf))) stop(gettextf("NA coefficient (at %s) from \"best\" subset", paste(which(ic), collapse =","))) ans$best <- sort(z$inbest) fitted <- x %*% cf resid <- y - fitted piv <- 1:p coefs[piv] <- cf ## FIXME? why construct 'coefs' so complicatedly? use 'cf' ! ans$raw.coefficients <- getCoef(coefs) ans$quan <- h correct <- if(use.correction) LTScnp2(p, intercept = intercept, n, alpha) else 1 raw.cnp2[2] <- correct s0 <- sqrt(mean(sort(resid^2, partial = h)[1:h])) sh0 <- s0 qn.q <- qnorm((h + n)/ (2 * n)) s0 <- s0 / sqrt(1 - (2 * n)/(h / qn.q) * dnorm(qn.q)) * correct if (abs(s0) < 1e-07) { ans$raw.weights[ok] <- weights <- as.numeric(abs(resid) <= 1e-07) ans$scale <- ans$raw.scale <- 0 ans$coefficients <- ans$raw.coefficients } else { ans$raw.scale <- s0 ans$raw.resid <- resid/ans$raw.scale ans$raw.weights[ok] <- weights <- wgtFUN(resid/s0) sum.w <- sum(weights) ## old, suboptimal: z1 <- lsfit(x, y, wt = weights, intercept = FALSE) z1 <- lm.wfit(x, y, w = weights) ans$coefficients <- getCoef(z1$coef) fitted <- x %*% z1$coef resid <- z1$residuals ans$scale <- sqrt(sum(weights * resid^2)/(sum.w - 1)) if (sum.w == n) { cdelta.rew <- 1 correct.rew <- 1 } else { qn.w <- qnorm((sum.w + n)/(2 * n)) cnp2[1] <- cdelta.rew <- 1 / sqrt(1 - (2 * n)/(sum.w / qn.w) * dnorm(qn.w)) correct.rew <- if (use.correction) ## use finite sample correction LTScnp2.rew(p, intercept = intercept, n, alpha) else 1 cnp2[2] <- correct.rew ans$scale <- ans$scale * cdelta.rew * correct.rew } ans$resid <- resid/ans$scale weights <- wgtFUN(ans$resid) } ## unneeded: names(ans$coefficients) <- names(ans$raw.coefficients) ans$crit <- z$objfct if (intercept) { sh <- .fastmcd(as.matrix(y), as.integer(h), nsamp = 0, # (y *is* 1-dim.!) nmini = 300, kmini = 5) y <- as.vector(y) ## < ?? sh <- as.double(sh$adjustcov) iR2 <- (sh0/sh)^2 } else { s1 <- sum(sort(resid^2, partial = h)[1:h]) sh <- sum(sort(y^2, partial = h)[1:h]) iR2 <- s1/sh } ans$rsquared <- if(is.finite(iR2)) max(0, min(1, 1 - iR2)) else 0 attributes(resid) <- attributes(fitted) <- attributes(y) ans$method <- "Least Trimmed Squares Robust Regression." } ## end { alpha < 1 } ans$intercept <- intercept if (abs(s0) < 1e-07) ans$method <- paste(ans$method, "\nAn exact fit was found!") if (mcd) { ## compute robust distances {for diagnostics, eg. rdiag()plot} mcd <- covMcd(X, alpha = alpha, use.correction=use.correction) if ( -determinant(mcd$cov, logarithm = TRUE)$modulus > 50 * p) { ans$RD <- "singularity" } else { ans$RD <- rep.int(NA, length(na.y)) ans$RD[ok] <- sqrt(mahalanobis(X, mcd$center, mcd$cov)) names(ans$RD) <- rownames } } } ## end { nontrivial 'x' } ans$lts.wt <- rep.int(NA, length(na.y)) ans$lts.wt[ok] <- weights ans$residuals <- rep.int(NA, length(na.y)) ans$residuals[ok] <- resid ans$fitted.values <- rep.int(NA, length(na.y)) ans$fitted.values[ok] <- fitted names(ans$fitted.values) <- names(ans$residuals) <- names(ans$lts.wt) <- rownames if(has.yn) { ## non-sense otherwise: names(ans$scale) <- names(ans$raw.scale) <- yn names(ans$rsquared) <- names(ans$crit) <- yn } ans$Y <- y ans$X <- if(p > 1 && intercept) x[, c(p, 1:(p - 1))] else x dimnames(ans$X) <- list(rownames[ok], names(ans$coefficients)) if (qr.out) ans$qr <- qrx ans$raw.cnp2 <- raw.cnp2 ans$cnp2 <- cnp2 class(ans) <- "lts" ans$call <- match.call() ans } ## {ltsReg.default} summary.lts <- function (object, correlation = FALSE, ...) { z <- object r <- z$residuals f <- z$fitted int <- z$intercept w <- as.vector(z$lts.wt) n <- sum(w) Qr <- qr(w * z$X)# 'w * z$X': more efficient than t(t(object$X) %*% diag(w)) p <- Qr$rank p1 <- seq(length = p) ## even for p = 0 rdf <- n - p mss <- if(int) { m <- sum(w * f /sum(w)) sum(w * (f - m)^2) } else sum(w * f^2) rss <- sum(w * r^2) r <- sqrt(w) * r resvar <- rss/rdf R <- if (p > 0) chol2inv(Qr$qr[p1, p1, drop = FALSE]) else matrix(NA_real_,p,p) ## no need to reorder R anymore, since 'X' already has "intercept first" se <- sqrt(diag(R) * resvar) est <- z$coefficients tval <- est/se ans <- c(z[c("call", "terms")], ## not again attr(ans, "call") <- attr(z,"call") list(residuals = r, coefficients = { cbind("Estimate" = est, "Std. Error" = se, "t value" = tval, "Pr(>|t|)" = 2*pt(abs(tval), rdf, lower.tail = FALSE)) }, sigma = sqrt(resvar), df = c(p, rdf, NCOL(Qr$qr)))) df.int <- if(int) 1 else 0 if(p - df.int > 0) { ans$r.squared <- mss/(mss + rss) ans$adj.r.squared <- 1 - (1 - ans$r.squared) * ((n - df.int)/rdf) ans$fstatistic <- c(value = (mss/(p - df.int))/resvar, numdf = p - df.int, dendf = rdf) } else ans$r.squared <- ans$adj.r.squared <- 0 ans$cov.unscaled <- R dimnames(ans$cov.unscaled) <- dimnames(ans$coefficients)[c(1,1)] if (correlation) { ans$correlation <- (R * resvar)/outer(se, se) dimnames(ans$correlation) <- dimnames(ans$cov.unscaled) } class(ans) <- "summary.lts" ans } print.lts <- function (x, digits = max(3, getOption("digits") - 3), ...) { cat("\nCall:\n", deparse(x$call), "\n\n", sep = "") if (length(coef(x))) { cat("Coefficients:\n") print.default(format(coef(x), digits = digits), print.gap = 2, quote = FALSE) cat("\nScale estimate", format(x$scale, digits = digits) ,"\n\n") } else cat("No coefficients\n") invisible(x) } print.summary.lts <- function(x, digits = max(3, getOption("digits") - 3), signif.stars = getOption("show.signif.stars"), ...) ## signif.stars = FALSE, ...) ## ^^^^^ (since they are not quite correct ?) { cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n\n", sep = "") resid <- x$residuals df <- x$df rdf <- df[2] cat("Residuals (from reweighted LS):\n") ## "cut & paste" from print.summary.lm(): if(rdf > 5) { nam <- c("Min", "1Q", "Median", "3Q", "Max") rq <- if(length(dim(resid)) == 2) structure(apply(t(resid), 1, quantile), dimnames = list(nam, dimnames(resid)[[2]])) else structure(quantile(resid), names = nam) print(rq, digits = digits, ...) } else if(rdf > 0) { print(resid, digits = digits, ...) } else { # rdf == 0 : perfect fit! cat("ALL", df[1], "residuals are 0: no residual degrees of freedom!\n") } if(NROW(x$coefficients)) { if (nsingular <- df[3] - df[1]) cat("\nCoefficients: (", nsingular, " not defined because of singularities)\n", sep = "") else cat("\nCoefficients:\n") printCoefmat(x$coefficients, digits = digits, signif.stars = signif.stars, ...) } else cat("\nNo coefficients\n") cat("\nResidual standard error:", format(signif(x$sigma, digits)), "on", rdf, "degrees of freedom\n") if(!is.null(x$fstatistic)) { cat("Multiple R-Squared:", formatC(x$r.squared, digits = digits)) cat(",\tAdjusted R-squared:",formatC(x$adj.r.squared,digits = digits), "\nF-statistic:", formatC(x$fstatistic[1], digits = digits), "on", x$fstatistic[2], "and", x$fstatistic[3], "DF, p-value:", format.pval(pf(x$fstatistic[1], x$fstatistic[2], x$fstatistic[3], lower.tail = FALSE), digits = digits), "\n") } correl <- x$correlation if(!is.null(correl)) { p <- NCOL(correl) if(p > 1) { cat("\nCorrelation of Coefficients:\n") correl <- format(round(correl, 2), nsmall = 2, digits = digits) correl[!lower.tri(correl)] <- "" print(correl[-1, -p, drop = FALSE], quote = FALSE) } } cat("\n") invisible(x) } ### --- Namespace hidden (but parsed once and for all) : ------------- ##' Compute Finite Sample Correction Factor for the "raw" LTSreg() scale LTScnp2 <- function(p, intercept = intercept, n, alpha) { stopifnot(0.5 <= alpha, alpha <= 1) if (intercept) p <- p - 1 stopifnot(p == as.integer(p), p >= 0) if (p == 0) { fp.500.n <- 1 - exp( 0.262024211897096) / n^ 0.604756680630497 fp.875.n <- 1 - exp(-0.351584646688712) / n^ 1.01646567502486 if ((0.5 <= alpha) && (alpha <= 0.875)) { fp.alpha.n <- fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) fp.alpha.n <- sqrt(fp.alpha.n) } if ((0.875 < alpha) && (alpha < 1)) { fp.alpha.n <- fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) fp.alpha.n <- sqrt(fp.alpha.n) } } else { ## p >= 1 if (p == 1) { if (intercept) { fp.500.n <- 1 - exp( 0.630869217886906 ) / n^ 0.650789250442946 fp.875.n <- 1 - exp( 0.565065391014791 ) / n^ 1.03044199012509 } else { fp.500.n <- 1 - exp(-0.0181777452315321) / n^ 0.697629772271099 fp.875.n <- 1 - exp(-0.310122738776431 ) / n^ 1.06241615923172 } } else { ## --- p > 1 --- if (intercept) { ## "alfaq" "betaq" "qwaarden" coefgqpkwad875 <- matrix(c(-0.458580153984614, 1.12236071104403, 3, -0.267178168108996, 1.1022478781154, 5), ncol = 2) coefeqpkwad500 <- matrix(c(-0.746945886714663, 0.56264937192689, 3, -0.535478048924724, 0.543323462033445, 5), ncol = 2) } else { ## "alfaq" "betaq" "qwaarden" coefgqpkwad875 <- matrix(c(-0.251778730491252, 0.883966931611758, 3, -0.146660023184295, 0.86292940340761, 5), ncol = 2) coefeqpkwad500 <- matrix(c(-0.487338281979106, 0.405511279418594, 3, -0.340762058011, 0.37972360544988, 5), ncol = 2) } y.500 <- log(- coefeqpkwad500[1, ] / p^ coefeqpkwad500[2, ]) y.875 <- log(- coefgqpkwad875[1, ] / p^ coefgqpkwad875[2, ]) A.500 <- cbind(1, - log(coefeqpkwad500[3, ] * p^2)) coeffic.500 <- solve(A.500, y.500) A.875 <- cbind(1, - log(coefgqpkwad875[3, ] * p^2)) coeffic.875 <- solve(A.875, y.875) fp.500.n <- 1 - exp(coeffic.500[1]) / n^ coeffic.500[2] fp.875.n <- 1 - exp(coeffic.875[1]) / n^ coeffic.875[2] } if(alpha <= 0.875) fp.alpha.n <- fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) else ## 0.875 < alpha <= 1 fp.alpha.n <- fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) }## else (p >= 1) return(1/fp.alpha.n) } ## LTScnp2 ##' Compute Finite Sample Correction Factor for the REWeighted LTSreg() scale LTScnp2.rew <- function(p, intercept = intercept, n, alpha) { stopifnot(0.5 <= alpha, alpha <= 1) if (intercept) p <- p - 1 stopifnot(p == as.integer(p), p >= 0) if (p == 0) { fp.500.n <- 1 - exp( 1.11098143415027) / n^ 1.5182890270453 fp.875.n <- 1 - exp(-0.66046776772861) / n^ 0.88939595831888 if(alpha <= 0.875) fp.alpha.n <- fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) else ## 0.875 < alpha <= 1 fp.alpha.n <- fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) ## MM: sqrt() {below} is ''different logic'' than below.. (??) fp.alpha.n <- sqrt(fp.alpha.n) } else { if (p == 1) { if (intercept) { fp.500.n <- 1 - exp(1.58609654199605 ) / n^ 1.46340162526468 fp.875.n <- 1 - exp(0.391653958727332) / n^ 1.03167487483316 } else { fp.500.n <- 1 - exp( 0.6329852387657) / n^ 1.40361879788014 fp.875.n <- 1 - exp(-0.642240988645469) / n^ 0.926325452943084 } } else { ## --- p > 1 --- if (intercept) { ## "alfaq" "betaq" "qwaarden" coefqpkwad875 <- matrix(c(-0.474174840843602, 1.39681715704956, 3, -0.276640353112907, 1.42543242287677, 5), ncol = 2) coefqpkwad500 <- matrix(c(-0.773365715932083, 2.02013996406346, 3, -0.337571678986723, 2.02037467454833, 5), ncol = 2) } else { ## "alfaq" "betaq" "qwaarden" coefqpkwad875 <- matrix(c(-0.267522855927958, 1.17559984533974, 3, -0.161200683014406, 1.21675019853961, 5), ncol = 2) coefqpkwad500 <- matrix(c(-0.417574780492848, 1.83958876341367, 3, -0.175753709374146, 1.8313809497999, 5), ncol = 2) } y.500 <- log( - coefqpkwad500[1, ] / p^ coefqpkwad500[2, ]) y.875 <- log( - coefqpkwad875[1, ] / p^ coefqpkwad875[2, ]) A.500 <- cbind(1, - log(coefqpkwad500[3, ] * p^2)) coeffic.500 <- solve(A.500, y.500) A.875 <- cbind(1, - log(coefqpkwad875[3, ] * p^2)) coeffic.875 <- solve(A.875, y.875) fp.500.n <- 1 - exp(coeffic.500[1]) / n^ coeffic.500[2] fp.875.n <- 1 - exp(coeffic.875[1]) / n^ coeffic.875[2] } if(alpha <= 0.875) fp.alpha.n <- fp.500.n + (fp.875.n - fp.500.n)/0.375 * (alpha - 0.5) else ## 0.875 < alpha <= 1 fp.alpha.n <- fp.875.n + (1 - fp.875.n)/0.125 * (alpha - 0.875) }## else (p >= 1) return(1/fp.alpha.n) } ## LTScnp2.rew .fastlts <- function(x, y, h.alph, nsamp, intercept, adjust, trace = 0) { dx <- dim(x) n <- dx[1] p <- dx[2] ## Parameters for partitioning --- *IDENTICAL* to those in ../src/rfltsreg.[fc] kmini <- 5 nmini <- 300 km10 <- 10*kmini nmaxi <- nmini*kmini ## vt::03.02.2006 - added options "best" and "exact" for nsamp if(!missing(nsamp)) { if(trace) cat("non-missing nsamp = ", nsamp, "\n") if(is.numeric(nsamp) && nsamp <= 0) { warning("Invalid number of trials nsamp=",nsamp,"! Using default.\n") nsamp <- -1 } else if(nsamp == "exact" || nsamp == "best") { myk <- p if(n > 2*nmini-1) { warning("'nsamp' options 'best' and 'exact' not allowed for n greater than ", 2*nmini-1,". Will use default.\n") nsamp <- -1 } else { ## FIXME: Add a test case for this ! nall <- choose(n, myk) if(nall > 5000 && nsamp == "best") { nsamp <- 5000 warning("Maximum 5000 subsets allowed for option 'best'.\n", "Computing 5000 subsets of size ",myk," out of ",n,"\n") } else { nsamp <- 0 #all subsamples if(nall > 5000) cat("Computing all ",nall," subsets of size ", myk, " out of ",n, "\n This may take a very long time!\n") } } } if(nsamp == -1) { ## still not defined - set it to the default nsamp <- rrcov.control()$nsamp } } nsamp <- as.integer(nsamp) ## y <- as.matrix(y) ## xy <- matrix(0, ncol = p + 1, nrow = n) xy <- cbind(x, y) storage.mode(xy) <- "double" # {keeping dim(.)} storage.mode(n) <- "integer" storage.mode(p) <- "integer" ; p1 <- p+1L # integer storage.mode(h.alph) <- "integer" ## Allocate temporary storage for the fortran implementation temp <- index1 <- index2 <- integer(n) weights <- aw2 <- aw <- residu <- yy <- nmahad <- ndist <- am <- am2 <- slutn <- double(n) .Fortran(rfltsreg, ## -> ../src/rfltsreg.f xy = xy, n, p, h.alph, # = nhalff nsamp, # = krep inbest = integer(h.alph), objfct = -1.,# double, if remains at -1 : have *nothing* found intercept = as.integer(intercept), intadjust = as.integer(adjust), nvad = as.integer(p1), datt = matrix(0., ncol = p1, nrow = n), weights, temp, index1, index2, aw2, aw, residu, yy, nmahad, ndist, am, am2, slutn, jmiss = integer(p1), ## integer jmiss(nvad) --> p+1 xmed = double(p1), ## double xmed(nvad) --> p+1 xmad = double(p1), ## double xmad(nvad) a = double(p1), ## double a(nvad) da = double(p1), ## double da(nvad) h = matrix(0., p, p1), ## double h(nvar,nvad) p*(p+1) hvec = double(p*(p1)), ## double hvec(nvar*nvad) p*(p+1) c = matrix(0., p, p1), ## double c(nvar,nvad) p*(p+1) cstock = matrix(0., 10, p*p),## double cstock(10,nvar*nvar) 10*p*p mstock = matrix(0., 10, p), ## double mstock(10,nvar) 10*p c1stock =matrix(0., km10, p*p),## double c1stock(km10,nvar*nvar) km10*p*p m1stock =matrix(0., km10, p),## double m1stock(km10,nvar) km10*p dath = matrix(0., nmaxi, p1),## double dath(nmaxi,nvad) nmaxi*(p+1) sd = double(p), ## double sd(nvar) p means = double(p), ## double means(nvar) p bmeans= double(p), ## double means(nvar) p i.trace= as.integer(trace))[ c("inbest", "objfct") ] } robustbase/R/lmrob.MM.R0000644000176200001440000015754213537200341014406 0ustar liggesusers## The "regularized" psi-function names: ## .R: the redescending ones: .Mpsi.R.names <- c('bisquare', 'lqq', 'welsh', 'optimal', 'hampel', 'ggw') ## .M: the monotone ones: .Mpsi.M.names <- c('huber') ## Note: there could be more: non-redescending, non-monotone {such as Cauchy score} .Mpsi.names <- c(R= .Mpsi.R.names, M= .Mpsi.M.names) ##' This allows synonyms as "Tukey" *and* partial matches such as "opt" : .regularize.Mpsi <- function(psi, redescending = TRUE) { stopifnot(is.character(psi), length(psi) == 1) psi <- tolower(psi) psi <- switch(psi, 'tukey'= , 'biweight'= "bisquare", ## otherwise keep psi) nms <- if(redescending) .Mpsi.R.names else .Mpsi.names if (is.na(i <- pmatch(psi, nms))) stop(gettextf("'psi' should be one of %s", pasteK(dQuote(nms))), domain = NA) nms[i] } .Mpsi.tuning.defaults <- list( 'huber' = 1.345 , 'bisquare' = 4.685061 , 'welsh' = 2.11 , 'ggw' = c(-0.5, 1.5, .95, NA) ## (min{slope}, b , eff, bp) , 'lqq' = c(-0.5, 1.5, .95, NA) ## (min{slope}, b/c, eff, bp) , 'optimal' = 1.060158 , 'hampel' = c(1.5, 3.5, 8) * 0.9016085 ## a, b, r ) .Mpsi.tuning.default <- function(psi) { if(is.null(p <- .Mpsi.tuning.defaults[[psi]])) stop(gettextf("invalid 'psi'=%s; possibly use .regularize.Mpsi(%s)", psi, "psi, redescending=FALSE"), domain=NA) p } .Mchi.tuning.defaults <- list( ## Here, psi must be redescending! -> 'huber' not possible 'bisquare' = 1.54764 , 'welsh' = 0.5773502 , 'ggw' = c(-0.5, 1.5, NA, .50) ## (min{slope}, b , eff, bp) , 'lqq' = c(-0.5, 1.5, NA, .50) ## (min{slope}, b/c, eff, bp) , 'optimal' = 0.4047 , 'hampel' = c(1.5, 3.5, 8) * 0.2119163 ## a, b, r ) .Mchi.tuning.default <- function(psi) { if(is.null(p <- .Mchi.tuning.defaults[[psi]])) stop(gettextf("invalid 'psi'=%s; possibly use .regularize.Mpsi(%s)", psi, "psi"), domain=NA) p } lmrob.control <- function(setting, seed = NULL, nResample = 500, tuning.chi = NULL, bb = 0.5, tuning.psi = NULL, max.it = 50, groups = 5, n.group = 400, k.fast.s = 1, best.r.s = 2, k.max = 200, maxit.scale = 200, k.m_s = 20, ## ^^^^^^^^^^^ had MAX_ITER_FIND_SCALE 200 in ../src/lmrob.c refine.tol = 1e-7, rel.tol = 1e-7, scale.tol = 1e-10, # new, was hardcoded to EPS_SCALE = 1e-10 in C code solve.tol = 1e-7, ## had ^^^^^^^^ TOL_INVERSE 1e-7 in ../src/lmrob.c trace.lev = 0, mts = 1000, subsampling = c("nonsingular", "simple"), compute.rd = FALSE, method = 'MM', psi = 'bisquare', numpoints = 10, cov = NULL, split.type = c("f", "fi", "fii"), fast.s.large.n = 2000, eps.outlier = function(nobs) 0.1 / nobs, eps.x = function(maxx) .Machine$double.eps^(.75)*maxx, compute.outlier.stats = method, warn.limit.reject = 0.5, warn.limit.meanrw = 0.5, ...) { p.ok <- missing(psi) # if(p.ok) psi does not need regularization if (!missing(setting)) { if (setting %in% c('KS2011', 'KS2014')) { if (missing(method)) method <- 'SMDM' psi <- if(p.ok) 'lqq' else .regularize.Mpsi(psi) ; p.ok <- TRUE if (missing(max.it)) max.it <- 500 if (missing(k.max)) k.max <- 2000 if (missing(cov) || is.null(cov)) cov <- '.vcov.w' if (setting == 'KS2014') { if (missing(best.r.s)) best.r.s <- 20 if (missing(k.fast.s)) k.fast.s <- 2 if (missing(nResample)) nResample <- 1000 } } else { warning("Unknown setting '", setting, "'. Using defaults.") } } else { if(p.ok && grepl('D', method)) psi <- 'lqq' if (missing(cov) || is.null(cov)) cov <- if(method %in% c('SM', 'MM')) ".vcov.avar1" else ".vcov.w" } if(!p.ok) psi <- .regularize.Mpsi(psi) subsampling <- match.arg(subsampling) ## in ggw, lqq: if tuning.{psi|chi} are non-standard, calculate coefficients: compute.const <- (psi %in% c('ggw', 'lqq')) if(is.null(tuning.chi)) tuning.chi <- .Mchi.tuning.default(psi) else ## wd like to compute.const *always* -- but slightly changes KS2011/14 !! if(compute.const) tuning.chi <- .psi.const(tuning.chi, psi) if(is.null(tuning.psi)) tuning.psi <- .Mpsi.tuning.default(psi) else ## wd like to compute.const *always* -- but slightly changes KS2011/14 !! if(compute.const) tuning.psi <- .psi.const(tuning.psi, psi) c(list(setting = if (missing(setting)) NULL else setting, seed = as.integer(seed), nResample=nResample, psi=psi, tuning.chi=tuning.chi, bb=bb, tuning.psi=tuning.psi, max.it=max.it, groups=groups, n.group=n.group, best.r.s=best.r.s, k.fast.s=k.fast.s, k.max=k.max, maxit.scale=maxit.scale, k.m_s=k.m_s, refine.tol=refine.tol, rel.tol=rel.tol, scale.tol = scale.tol, solve.tol=solve.tol, trace.lev=trace.lev, mts=mts, subsampling=subsampling, compute.rd=compute.rd, method=method, numpoints=numpoints, cov=cov, split.type = match.arg(split.type), fast.s.large.n=fast.s.large.n, eps.outlier = eps.outlier, eps.x = eps.x, compute.outlier.stats = sub("^MM$", "SM", compute.outlier.stats), warn.limit.reject = warn.limit.reject, warn.limit.meanrw = warn.limit.meanrw), list(...)) } ##' Modify a \code{\link{lmrob.control}} list to contain only parameters that ##' were actually used. Currently used for \code{\link{print}()}ing of lmrob ##' objects. ##' ##' @title Minimize lmrob control to non-redundant parts ##' @param control a list, typically the 'control' component of a ##' \code{\link{lmrob}()} call, or the result of \code{\link{lmrob.control}()}. ##' @return list: the (typically) modified \code{control} ##' @author Martin Maechler {from Manuel's original code} lmrob.control.minimal <- function(cl) { if(is.null(cl)) return(cl) p.MS <- c("k.m_s", "split.type") p.nonLrg <- c("groups", "n.group") p.fastS <- c(p.nonLrg, "refine.tol", "best.r.s", "k.fast.s") switch(sub("^(S|M-S).*", "\\1", cl$method), "S" = { # remove all M-S specific control pars cl[p.MS] <- NULL # if large_n is not used, remove corresp control pars if (length(residuals) <= cl$fast.s.large.n) cl[p.nonLrg] <- NULL }, "M-S" = # remove all fast S specific control pars cl[p.fastS] <- NULL, ## else: do not keep parameters used by initial ests. only cl[c("tuning.chi", "bb", "nResample", p.fastS, "k.max", p.MS, "mts", "subsampling")] <- NULL ) if (!grepl("D", meth <- cl$method)) { cl$numpoints <- NULL if(meth == 'SM') cl$method <- 'MM' } cl } lmrob.fit.MM <- function(x, y, control) ## defunct .Defunct("lmrob.fit(*, control) with control$method = 'SM'") ## .Deprecated() till robustbase 0.92-6 (2016-05-28) lmrob.fit <- function(x, y, control, init=NULL, mf=NULL) { if(!is.matrix(x)) x <- as.matrix(x) if(!missing(mf)) warning("'mf' is unused and deprecated") ## old notation: MM -> SM if (control$method == "MM") control$method <- "SM" ## Assumption: if(is.null(init)) method = "S..." else method = "..." ## --------- where "..." consists of letters {"M", "D"} est <- if (is.null(init)) { ## --- initial S estimator if ((M1 <- substr(control$method,1,1)) != 'S') { warning(gettextf("Initial estimator '%s' not supported; using S-estimator instead", M1), domain = NA) substr(control$method,1,1) <- 'S' } init <- lmrob.S(x, y, control = control) 'S' } else { stopifnot(is.list(init)) if (is.null(init$converged)) init$converged <- TRUE if (is.null(init$control)) { init$control <- control M <- init$control$method <- 'l' } else if(!length(M <- init$control$method) || !nzchar(M)) M <- "l" M } stopifnot(is.numeric(init$coef), length(init$coef) == ncol(x), is.numeric(init$scale), init$scale >= 0) if (est != 'S' && control$cov == '.vcov.avar1') { warning( ".vcov.avar1 can only be used when initial estimator is S; using .vcov.w instead") control$cov <- ".vcov.w" } trace.lev <- control$trace.lev if (init$converged) { ## --- loop through the other estimators; build up 'est' string method <- sub(paste0("^", est), '', control$method) if(trace.lev) { cat(sprintf("init converged (remaining method = \"%s\") -> coef=\n", method)) print(init$coef) } for (step in strsplit(method,'')[[1]]) { ## now we have either M or D steps est <- paste0(est, step) init <- switch(step, ## 'control' may differ from 'init$control' when both (init, control) are spec. ## D(AS)-Step D = lmrob..D..fit(init, x, control=control, method = init$control$method), ## M-Step M = lmrob..M..fit(x = x, y = y, obj = init, control=control, method = init$control$method), stop('only M and D are steps supported after "init" computation')) if(trace.lev) { cat(sprintf("step \"%s\" -> new coef=\n", step)); print(init$coef) } ## break if an estimator did not converge if (!init$converged) { warning(gettextf( "%s-step did NOT converge. Returning unconverged %s-estimate", step, est), domain = NA) break } } } ## << FIXME? qr(.) should be available from earlier if (is.null(init$qr)) init$qr <- qr(x * sqrt(init$rweights)) if (is.null(init$rank)) init$rank <- init$qr$rank control$method <- est ## ~= original 'method', but only with the steps executed. init$control <- control ## --- covariance estimate init$cov <- if (init$scale == 0) { ## exact fit matrix(0, ncol(x), ncol(x), dimnames=list(colnames(x), colnames(x))) } else if (!init$converged || is.null(x)) { NA } else { if (is.null(control$cov) || control$cov == "none") NA else { lf.cov <- if (!is.function(control$cov)) get(control$cov, mode='function') else control$cov lf.cov(init, x=x) } } df <- NROW(y) - init$rank ## sum(init$r?weights)-init$rank init$degree.freedom <- init$df.residual <- df init }## end{lmrob.fit} globalVariables("r", add=TRUE) ## below and in other lmrob.E() expressions .vcov.w <- function(obj, x=obj$x, complete = FALSE, # <- differing from vcov.lmrob()s default scale=obj$scale, cov.hubercorr=ctrl$cov.hubercorr, cov.dfcorr=ctrl$cov.dfcorr, cov.resid=ctrl$cov.resid, cov.corrfact=ctrl$cov.corrfact, cov.xwx=ctrl$cov.xwx) { ## set defaults ctrl <- obj$control if (is.null(cov.hubercorr)) cov.hubercorr <- !grepl('D', ctrl$method) else if (!is.logical(cov.hubercorr)) stop(':.vcov.w: cov.hubercorr must be logical (or NULL)') valid.corrfact <- c('tau', 'empirical', 'asympt', 'hybrid', 'tauold') if (is.null(cov.corrfact)) { cov.corrfact <- if (cov.hubercorr) 'empirical' else 'tau' } else if(length(cov.corrfact) != 1 || is.na(match(cov.corrfact, valid.corrfact))) stop(":.vcov.w: cov.corrfact must be one of ", pasteK(dQuote(valid.corrfact))) valid.dfcorr <- c("mean", "none", "mn.vc", "varc", "mn.df") ## old: cov.dfcorr = -1 | 0 | 1 | 2 | 3 if (is.null(cov.dfcorr)) { cov.dfcorr <- if (cov.hubercorr || cov.corrfact %in% c('tau', 'hybrid')) "mn.vc" else "mean" } else if(length(cov.dfcorr) != 1 || is.na(match(cov.dfcorr, valid.dfcorr))) stop(":.vcov.w: cov.dfcorr must be one of ", pasteK(dQuote(valid.dfcorr))) valid.cov.resid <- c('final', 'initial', 'trick') if (is.null(cov.resid)) cov.resid <- 'final' ## do warn only for *specified* cov.resid: else if (cov.resid == 'final' && (class(obj)[1] == 'lmrob.S')) warning("ignoring cov.resid == 'final' since est != final") else if (length(cov.resid) != 1L || is.na(match(cov.resid, valid.cov.resid))) stop("cov.resid must be one of ", pasteK(dQuote(valid.cov.resid))) if (is.null(cov.xwx)) cov.xwx <- TRUE # == _THE_ typical case: not part of 'obj$control' else if (!is.logical(cov.xwx)) stop(':.vcov.w: cov.xwx must be logical (or NULL)') if (is.null(x)) x <- model.matrix(obj) ## set psi and c.psi psi <- ctrl$psi if (is.null(psi)) stop('parameter psi is not defined') c.psi <- if (cov.resid == 'initial') ctrl$tuning.chi else if (ctrl$method %in% c('S', 'SD')) ctrl$tuning.chi else ctrl$tuning.psi if (!is.numeric(c.psi)) stop("parameter 'tuning.psi' is not numeric") ## MM: lmrob(..., method = "S") triggers this wrongly if (is.null(scale)) { warning(":.vcov.w: scale missing, using D scale") scale <- lmrob..D..fit(obj)$scale } n <- NROW(x) ## --- calculations: matrix part ## weighted xtx.inv matrix w <- if (cov.xwx) obj$rweights else rep(1,n) ## use qr-decomposition from lm.wfit (this already includes the robustness weights) ## update qr decomposition if it is missing or we don't want the robustness weights if (!is.qr(obj$qr) || !cov.xwx) obj$qr <- qr(x * sqrt(w)) p <- if (is.null(obj$rank)) obj$qr$rank else obj$rank cinv <- if(is.qr(obj$qr)) tryCatch(tcrossprod(solve(qr.R(obj$qr))), error = function(e)e) if(inherits(cinv, 'error')) cinv <- matrix(NA,p,p) ## --- calculation: correction factor if (cov.corrfact == 'asympt') { ## asympt correction factor if(cov.hubercorr) warning("option 'cov.hubercorr' is ignored for cov.corrfact = \"asympt\"") ## precalculated default values if applicable corrfact <- if (psi == 'ggw') { if ( isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.95, NA)))) 1.052619 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.95, NA)))) 1.0525888644 else if (isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.85, NA)))) 1.176479 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.85, NA)))) 1.176464 else lmrob.E(psi(r)^2, ctrl) / lmrob.E(r*psi(r), ctrl)^2 ## MK: using r*psi(r) instead of psi'(r) is much more accurate ## when using Gauss-Hermite quadrature (= default in lmrob.E()) ## but integrate(.) is more accurate } else if (isTRUE(all.equal(c.psi, .Mpsi.tuning.default(psi)))) { switch(psi, bisquare = 1.0526317574, welsh = 1.0526704649, optimal = 1.0526419204, hampel = 1.0526016980, lqq = 1.0526365291, stop(':.vcov.w: unsupported psi function')) } else lmrob.E(psi(r)^2, ctrl) / lmrob.E(r*psi(r), ctrl)^2 ## r*psi(r): see above varcorr <- 1 } else { ## empirical, approx or hybrid correction factor rstand <- if (cov.resid == 'initial') { ## if the last estimator was a D or T estimator ## then use obj$init$init otherwise use obj$init ## that way for SMD we use the S residuals (and S scale) ## and for SMDM we use the M residuals (and D scale) lobj <- if (grepl('[DT]$',ctrl$method)) obj$init$init else obj$init resid(lobj) / lobj$scale } else if (cov.resid == 'trick') { ## residuals are in fact from earlier estimator, use its scale to standardize them obj$init$resid / obj$init$scale } else obj$resid / scale tau <- if (cov.corrfact %in% c('tau', 'hybrid', 'tauold')) { ## added hybrid here if (!is.null(obj$tau)) obj$tau else if (!is.null(obj$init$tau)) obj$init$tau else stop("(tau / hybrid / tauold): tau not found in 'obj'") } else rep(1,n) rstand <- rstand / tau r.psi <- Mpsi(rstand, c.psi, psi) r.psipr <- Mpsi(rstand, c.psi, psi, deriv = 1) if (any(is.na(r.psipr))) warning(":.vcov.w: Caution. Some psi'() are NA") ## mpp = E[ psi'(.) ] mpp2 = mpp^2 mpp2 <- (mpp <- mean(r.psipr, na.rm=TRUE))^2 ## Huber's correction hcorr <- if (cov.hubercorr) { vpp <- sum((r.psipr - mpp)^2) / n # vpp := var[psi.prime] ## ~= var(r.psipr, na.rm=TRUE) ~= Var[ psi'( e_i / (sigma * tau_i) ) ] (1 + p/n * vpp/mpp2)^2 } else 1 ## sample size correction for var(r.psi^2) ## use tau if 'tau' correction factor, but only if it is available varcorr <- if (cov.corrfact == 'tau' && any(tau != 1)) 1 / mean(tau^2) else n / (n - p) ## changed from 1 / mean(tau) ## if hybrid: replace B^2 (= mpp2) by asymptotic value if (cov.corrfact == 'hybrid') { mpp2 <- if (psi == 'ggw') { if ( isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.95, NA)))) 0.7598857 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.95, NA)))) 0.6817983 else if (isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.85, NA)))) 0.4811596 else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.85, NA)))) 0.411581 else lmrob.E(r*psi(r), ctrl)^2 ## more accurate than psi'(r) } else if (isTRUE(all.equal(c.psi, .Mpsi.tuning.default(psi)))) switch(psi, bisquare = 0.5742327, welsh = 0.5445068, optimal = 0.8598825, hampel = 0.6775217, lqq = 0.6883393, stop(':.vcov.w: unsupported psi for "hybrid" correction factor')) else lmrob.E(r*psi(r), ctrl)^2 ## more accurate than psi'(r) } ## A / B^2 * Huber.corr : corrfact <- mean({ if (cov.corrfact == 'tauold') 1 else tau^2 } * r.psi^2)/mpp2 * hcorr } ## simple sample size correction sscorr <- switch(cov.dfcorr, # old: codes in {-1, 0, 1,2,3 } "mean" = mean(w), # -1 "mn.vc" = mean(w) * varcorr, # 1 "none" = 1, # 0 "varc" = varcorr, # 2 "mn.df" = mean(w)^2 / (1 - p / sum(w)), # 3 stop("invalid 'cov.dfcorr': ", cov.dfcorr)) structure(scale^2 * sscorr * corrfact * .vcov.aliased(aliased = is.na(coef(obj)), vc=cinv, complete=complete), ## scale^2 * a/b2 * Huber's correction * Cinv -- with attributes weights = w, scale = scale, scorr = sscorr, corrfact = corrfact) }## end{.vcov.w} .vcov.avar1 <- function(obj, x=obj$x, complete = FALSE, # <- differing from vcov.lmrob()s default posdef.meth = c("posdefify", "orig")) { ## was .vcov.MM stopifnot(is.list(ctrl <- obj$control)) ## works only for MM & SM estimates: if (!is.null(ctrl$method) && !ctrl$method %in% c('SM', 'MM')) stop('.vcov.avar1() supports only SM or MM estimates') ## set psi and chi constants psi <- chi <- ctrl$psi if (is.null(psi)) stop('parameter psi is not defined') stopifnot(is.numeric(c.chi <- ctrl$tuning.chi), is.numeric(c.psi <- ctrl$tuning.psi)) ## need (r0, r, scale, x, c.psi,c.chi, bb) r0 <- obj$init$resid r <- resid(obj) scale <- obj$scale if (is.null(x)) x <- model.matrix(obj) bb <- 1/2 ## this is always 1/2 for S estimates by convention ### --- start code from .vcov.MM --- ## scaled residuals n <- length(r) stopifnot(is.matrix(x), n == nrow(x)) if(n != length(r0)) stop("initial estimate residuals length differs from final ones. Typically must refit w/ lmrob()") r.s <- r / scale # final scaled residuals r0.s <- r0 / scale # initial scaled residuals w <- Mpsi(r.s, cc = c.psi, psi = psi, deriv = 1) w0 <- Mchi(r0.s, cc = c.chi, psi = chi, deriv = 1) p <- ncol(x) # possibly p > rankMatrix(x) in singular/aliased case ## 'complete' handling for singular/aliased case if(is.na(complete)) { ## previous default: work with full rank-deficient 'x' } else { aliased <- is.na(coef(obj)) if(any(aliased)) x <- x[, !aliased] if(isTRUE(complete)) { ## nothing } else { ## isFALSE(complete) : p <- obj$rank } } ## FIXME for multivariate y : x.wx <- crossprod(x, x * w) if(inherits(A <- tryCatch(solve(x.wx) * scale, error=function(e)e), "error")) { warning("X'WX is almost singular. Consider using cov = \".vcov.w\"") A <- tryCatch(solve(x.wx, tol = 0) * scale, error=function(e)e) if(inherits(A, "error")) stop("X'WX is singular. Rather use cov = \".vcov.w\"") } a <- A %*% (crossprod(x, w * r.s) / mean(w0 * r0.s)) w <- Mpsi( r.s, cc = c.psi, psi = psi) ## 3) now the standard part (w, x, r0.s, n, A,a, c.chi, bb) w0 <- Mchi(r0.s, cc = c.chi, psi = chi) # rho() Xww <- crossprod(x, w * w0) u1 <- A %*% crossprod(x, x * w^2) %*% (n * A) u2 <- a %*% crossprod(Xww, A) u3 <- A %*% tcrossprod(Xww, a) u4 <- mean(w0^2 - bb^2) * tcrossprod(a) ## list(cov = matrix((u1 - u2 - u3 + u4)/n, p, p), ## wt = w / r.s, a = a) ### --- end code from .vcov.MM --- ret <- (u1 - u2 - u3 + u4)/n ## this might not be a positive definite matrix ## check eigenvalues (symmetric: ensure non-complex) ev <- eigen(ret, symmetric = TRUE) if (any(neg.ev <- ev$values < 0)) { ## there's a problem posdef.meth <- match.arg(posdef.meth) if(ctrl$trace.lev) message("fixing ", sum(neg.ev), " negative eigen([",p,"])values") Q <- ev$vectors switch(posdef.meth, "orig" = { ## remove negative eigenvalue: ## transform covariance matrix into eigenbasis levinv <- solve(Q) cov.eb <- levinv %*% ret %*% Q ## set vectors corresponding to negative ev to zero cov.eb[, neg.ev] <- 0 ## cov.eb[cov.eb < 1e-16] <- 0 ## and transform back ret <- Q %*% cov.eb %*% levinv }, "posdefify" = { ## Instead of using require("sfsmisc") and ## ret <- posdefify(ret, "someEVadd",eigen.m = ev,eps.ev = 0) lam <- ev$values lam[neg.ev] <- 0 o.diag <- diag(ret)# original one - for rescaling dn <- dimnames(ret)# to preserve ret <- Q %*% (lam * t(Q)) ## == Q %*% diag(lam) %*% t(Q) ## rescale to the original diagonal values ## D <- sqrt(o.diag/diag(ret)) where they are >= 0 : if(any(o.diag < 0)) warning(".vcov.avar1: negative diag() fixed up; consider 'cov=\".vcov.w.\"' instead") D <- sqrt(pmax.int(0, o.diag)/diag(ret)) ret <- D * ret * rep(D, each = nrow(Q)) ## == diag(D) %*% ret %*% diag(D) if(!is.null(dn)) dimnames(ret) <- dn }, stop("invalid 'posdef.meth': ", posdef.meth)) } if(isTRUE(complete)) ret <- .vcov.aliased(aliased, ret) attr(ret,"weights") <- w / r.s if(!any(neg.ev)) attr(ret,"eigen") <- ev ret }## end{.vcov.avar1} lmrob..M..fit <- function (x = obj$x, y = obj$y, beta.initial = obj$coef, scale = obj$scale, control = obj$control, obj, mf = obj$model, method = obj$control$method) #<- also when 'control' is not obj$control { c.psi <- .psi.conv.cc(control$psi, control$tuning.psi) ipsi <- .psi2ipsi(control$psi) stopifnot(is.matrix(x)) if(!missing(mf)) warning("'mf' is unused and deprecated") n <- nrow(x) p <- ncol(x) if (is.null(y) && !is.null(obj$model)) y <- model.response(obj$model, "numeric") stopifnot(length(y) == n, length(c.psi) > 0, c.psi >= 0, scale >= 0, length(beta.initial) == p) trace.lev <- as.integer(control$trace.lev) ret <- .C(R_lmrob_MM, x = as.double(x), y = as.double(y), n = as.integer(n), p = as.integer(p), beta.initial = as.double(beta.initial), scale = as.double(scale), coefficients = double(p), residuals = double(n), iter = as.integer(control$max.it), c.psi = as.double(c.psi), ipsi = as.integer(ipsi), loss = double(1), rel.tol = as.double(control$rel.tol), converged = logical(1), trace.lev = trace.lev, mts = as.integer(control$mts), ss = .convSs(control$subsampling) )[c("coefficients", "scale", "residuals", "loss", "converged", "iter")] ## FIXME?: Should rather warn *here* in case of non-convergence ret$fitted.values <- drop(x %*% ret$coefficients) names(ret$coefficients) <- colnames(x) names(ret$residuals) <- rownames(x) ret$rweights <- lmrob.rweights(ret$residuals, scale, control$tuning.psi, control$psi) ret$control <- control if (!missing(obj)) { ## "copy" from 'obj' to the return value 'ret' : if(trace.lev) cat("lmrob..MM..fit(*, obj) --> updating .. ") if (!grepl('M$', method)) { ## update method if it's not there already method <- paste0(method, 'M') } if (!is.null(obj$call)) { ret$call <- obj$call ret$call$method <- method } if (method %in% c('SM', 'MM')) { ret$init.S <- obj } else { ret$init <- obj[intersect(names(obj), c("coefficients", "scale", "residuals", "loss", "converged", "iter", "rweights", "fitted.values", "control", "ostats", "init.S", "init", "kappa", "tau"))] class(ret$init) <- 'lmrob' ret <- c(ret, obj[intersect(names(obj), c("df.residual", "degree.freedom", "xlevels", "terms", "model", "x", "y", "na.action", "contrasts", "MD"))]) } ret$qr <- qr(x * sqrt(ret$rweights)) ret$rank <- ret$qr$rank if(trace.lev) cat(" qr(x * rweights) -> rank=", ret$rank) ## if there is a covariance matrix estimate available in obj ## update it, if possible, else replace it by the default .vcov.w if (!is.null(obj$cov)) { if (!method %in% c('SM', 'MM') && ret$control$cov == '.vcov.avar1') ret$control$cov <- '.vcov.w' lf.cov <- if (!is.function(ret$control$cov)) get(ret$control$cov, mode='function') else ret$control$cov if(trace.lev) cat(", cov() matrix ") ret$cov <- lf.cov(ret, x=x) } if (!is.null(obj$assign)) ret$assign <- obj$assign if (method %in% control$compute.outlier.stats) { ## only true for last step in lmrob.fit() if(trace.lev) cat(", outlierStats() ") ret$ostats <- outlierStats(ret, x, control) } if(trace.lev) cat("\n") } class(ret) <- "lmrob" ret }## --- lmrob..M..fit ##' Compute S-estimator for linear model -- using "fast S" algorithm --> ../man/lmrob.S.Rd lmrob.S <- function (x, y, control, trace.lev = control$trace.lev, only.scale = FALSE, mf = NULL) { if (!is.matrix(x)) x <- as.matrix(x) n <- nrow(x) p <- ncol(x) if(!missing(mf)) warning("'mf' is unused and deprecated") nResample <- if(only.scale) 0L else as.integer(control$nResample) groups <- as.integer(control$groups) nGr <- as.integer(control$n.group) large_n <- (n > control$fast.s.large.n) if (large_n) { if (nGr <= p) stop("'control$n.group' must be larger than 'p' for 'large_n' algorithm") if (nGr * groups > n) stop("'groups * n.group' must be smaller than 'n' for 'large_n' algorithm") if (nGr <= p + 10) ## FIXME (be smarter ..) warning("'control$n.group' is not much larger than 'p', probably too small") } if (length(seed <- control$seed) > 0) { if (exists(".Random.seed", envir = .GlobalEnv, inherits = FALSE)) { seed.keep <- get(".Random.seed", envir = .GlobalEnv, inherits = FALSE) on.exit(assign(".Random.seed", seed.keep, envir = .GlobalEnv)) } assign(".Random.seed", seed, envir = .GlobalEnv) ## why not set.seed(seed) } bb <- as.double(control$bb) c.chi <- .psi.conv.cc(control$psi, control$tuning.chi) best.r <- as.integer(control$best.r.s) stopifnot(length(c.chi) > 0, c.chi >= 0, length(bb) > 0, length(best.r) > 0, best.r >= 1, length(y) == n, n > 0) b <- .C(R_lmrob_S, x = as.double(x), y = as.double(y), n = as.integer(n), p = as.integer(p), nResample = nResample, scale = if(only.scale) mad(y, center=0) # initial scale else double(1), coefficients = double(p), as.double(c.chi), .psi2ipsi(control$psi), bb, best_r = best.r, groups = groups, n.group = nGr, k.fast.s = as.integer(control$k.fast.s), k.iter = as.integer(control$k.max), maxit.scale = as.integer(control$maxit.scale), refine.tol = as.double(control$refine.tol), inv.tol = as.double(control$solve.tol), scale.tol = as.double(control$scale.tol), converged = logical(1), trace.lev = as.integer(trace.lev), mts = as.integer(control$mts), ss = .convSs(control$subsampling), fast.s.large.n = as.integer(if (large_n) control$fast.s.large.n else n+1L) ## avoids the use of NAOK = TRUE for control$fast.s.large.n == Inf )[if(only.scale) "scale" else c("y", # the residuals (on return) "coefficients", "scale", "k.iter", "converged")] scale <- b$scale if (scale < 0) stop("C function R_lmrob_S() exited prematurely") if (scale == 0) warning("S-estimated scale == 0: Probably exact fit; check your data") if(trace.lev) if(only.scale) cat(sprintf("lmrob.S(): scale = %g\n", scale)) else { cat(sprintf("lmrob.S(): scale = %g; coeff.=\n", scale)); print(b$coefficients) } if(only.scale) return(scale) ## --- ----- b$residuals <- setNames(b$y, rownames(x)) b$fitted.values <- y - b$y # y = fitted + res b$y <- NULL # rm'it names(b$coefficients) <- colnames(x) ## robustness weights b$rweights <- lmrob.rweights(b$residuals, scale, control$tuning.chi, control$psi) ## set method argument in control control$method <- 'S' b$control <- control ## add call if called from toplevel if (identical(parent.frame(), .GlobalEnv)) b$call <- match.call() class(b) <- 'lmrob.S' if ("S" %in% control$compute.outlier.stats) b$ostats <- outlierStats(b, x, control) b }## --- lmrob.S() lmrob..D..fit <- function(obj, x=obj$x, control = obj$control, mf = obj$model, method = obj$control$method) #<- also when 'control' is not obj$control { if (is.null(control)) stop('lmrob..D..fit: control is missing') if (!obj$converged) stop('lmrob..D..fit: prior estimator did not converge, stopping') if(!missing(mf)) warning("'mf' is unused and deprecated") if (is.null(x)) x <- model.matrix(obj) w <- obj$rweights if (is.null(w)) stop('lmrob..D..fit: robustness weights undefined') if (is.null(obj$residuals)) stop('lmrob..D..fit: residuals undefined') r <- obj$residuals psi <- control$psi if (is.null(psi)) stop('lmrob..D..fit: parameter psi is not defined') c.psi <- .psi.conv.cc(psi, if (method %in% c('S', 'SD')) control$tuning.chi else control$tuning.psi) if (!is.numeric(c.psi)) stop('lmrob..D..fit: parameter tuning.psi is not numeric') obj$init <- obj[names(obj)[na.omit(match( c("coefficients","scale", "residuals", "loss", "converged", "iter", "ostats", "rweights", "fitted.values", "control", "init.S", "init"), names(obj)))]] obj$init.S <- NULL if (is.null(obj$kappa)) obj$kappa <- lmrob.kappa(obj, control) kappa <- obj$kappa if (is.null(obj$tau)) obj$tau <- lmrob.tau(obj, x, control) tau <- obj$tau ## get starting value for root search (to keep breakdown point!) scale.1 <- sqrt(sum(w * r^2) / kappa / sum(tau^2*w)) ret <- .C(R_find_D_scale, r = as.double(r), kappa = as.double(kappa), tau = as.double(tau), length = as.integer(length(r)), scale = as.double(scale.1), c = as.double(c.psi), ipsi = .psi2ipsi(psi), type = 3L, ## dt1 as only remaining option rel.tol = as.double(control$rel.tol), k.max = as.integer(control$k.max), converged = logical(1))[c("converged", "scale")] obj$scale <- if(ret$converged) ret$scale else NA obj$converged <- ret$converged if (!grepl('D$', method)) { ## append "D" to method if it's not there already method <- method if (method == 'MM') method <- 'SM' method <- paste0(method, 'D') } ## update call if (!is.null(obj$call)) obj$call$method <- method obj$control <- control class(obj) <- "lmrob" ## if there is a covariance matrix estimate available in obj ## update it, if possible, else replace it by the default ## .vcov.w if (!is.null(obj$cov)) { if (control$cov == '.vcov.avar1') control$cov <- '.vcov.w' lf.cov <- if (!is.function(control$cov)) get(control$cov, mode='function') else control$cov obj$cov <- lf.cov(obj, x=x) } if (method %in% control$compute.outlier.stats) obj$ostats <- outlierStats(obj, x, control) obj }## --- lmrob..D..fit globalVariables(c("psi", "wgt", "r"), add=TRUE) ## <- lmrob.E( ) lmrob.kappa <- function(obj, control = obj$control) { if (is.null(control)) stop('control is missing') if (control$method %in% c('S', 'SD')) control$tuning.psi <- control$tuning.chi fun.min <- function(kappa) lmrob.E(psi(r)*r - kappa*wgt(r), control = control) uniroot(fun.min, c(0.1, 1))$root } ## "FIXME" How to get \hat{tau} for a simple *M* estimate here ?? ## lmrob.tau() is used in lmrob..D..fit() lmrob.tau <- function(obj, x=obj$x, control = obj$control, h, fast = TRUE) { if(is.null(control)) stop("'control' is missing") if(missing(h)) h <- if (is.null(obj$qr)) .lmrob.hat(x, obj$rweights) else .lmrob.hat(wqr = obj$qr) ## speed up: use approximation if possible if (fast && !control$method %in% c('S', 'SD')) { c.psi <- control$tuning.psi tfact <- tcorr <- NA switch(control$psi, optimal = if (isTRUE(all.equal(c.psi, 1.060158))) { tfact <- 0.94735878 tcorr <- -0.09444537 }, bisquare = if (isTRUE(all.equal(c.psi, 4.685061))) { tfact <- 0.9473684 tcorr <- -0.0900833 }, welsh = if (isTRUE(all.equal(c.psi, 2.11))) { tfact <- 0.94732953 tcorr <- -0.07569506 }, ggw = if (isTRUE(all.equal(c.psi, c(-.5, 1.0, 0.95, NA)))) { tfact <- 0.9473787 tcorr <- -0.1143846 } else if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.95, NA)))) { tfact <- 0.94741036 tcorr <- -0.08424648 }, lqq = if (isTRUE(all.equal(c.psi, c(-.5, 1.5, 0.95, NA)))) { tfact <- 0.94736359 tcorr <- -0.08594805 }, hampel = if (isTRUE(all.equal(c.psi, c(1.35241275, 3.15562975, 7.212868)))) { tfact <- 0.94739770 tcorr <- -0.04103958 }, {}) if (!is.na(tfact)) return(sqrt(1 - tfact*h) * (tcorr*h + 1)) } ## else "non-fast" -- need to compute the integrals : ## kappa kappa <- if(is.null(obj$kappa)) lmrob.kappa(obj, control) else obj$kappa ## local variables ## n <- length(h) ## set psi and cpsi psi <- control$psi if (is.null(psi)) stop('parameter psi is not defined') cpsi <- if (control$method %in% c('S', 'SD')) control$tuning.chi else control$tuning.psi cpsi <- .psi.conv.cc(psi, cpsi)# has its test ipsi <- .psi2ipsi(psi) ## constant for stderr of u_{-i} part and other constants inta <- function(r) .Mpsi(r, cpsi, ipsi)^2 * dnorm(r) intb <- function(r) .Mpsi(r, cpsi, ipsi, deriv = 1) * dnorm(r) ## intc <- function(r) .Mpsi(r, cpsi, ipsi) * r * dnorm(r) # changed from psi/e to psi*e ta <- integrate(inta, -Inf,Inf)$value tb <- integrate(intb, -Inf,Inf)$value ## tE <- integrate(intc, -Inf,Inf)$value ## calculate tau for unique h hu <- unique(h) nu <- length(hu) ## Initialize tau vector tau <- numeric(length=nu) tc <- ta/tb^2 ## --- Gauss-Hermite integration gh <- ghq(control$numpoints) ghz <- gh$nodes ghw <- gh$weights ## Calulate each tau_i for (i in 1:nu) { ## stderr of u_{-i} part s <- sqrt(tc*(hu[i]-hu[i]^2)) tc2 <- hu[i]/tb ## function to be integrated fun <- function(w, v, sigma.i) { t <- (v - tc2*.Mpsi(v, cpsi, ipsi) + w*s)/sigma.i psi.t <- .Mpsi(t, cpsi, ipsi) (psi.t*t - kappa*psi.t/t) * dnorm(v)*dnorm(w) } ## integrate over w wint <- function(v, sigma.i) { ## sapply(v,function(v.j) integrate(fun,-Inf,Inf,v.j,sigma.i)$value) sapply(v, function(v.j) sum(fun(ghz, v.j, sigma.i)*ghw)) } ## integrate over v vint <- function(sigma.i) { ## integrate(wint,-Inf,Inf,sigma.i)$value sum(wint(ghz, sigma.i)*ghw) } ## find tau tau[i] <- uniroot(vint, c(if (hu[i] < 0.9) 3/20 else 1/16, 1.1))$root } tau[match(h, hu)] } lmrob.tau.fast.coefs <- function(cc, psi) { ## function that calculates the coefficients for 'fast' mode of lmrob.tau ctrl <- lmrob.control(tuning.psi = cc, psi = psi) levs <- seq(0, 0.8, length.out = 80) ## calculate taus taus <- lmrob.tau(list(), control=ctrl, h=levs, fast=FALSE) ## calculate asymptotic approximation of taus ta <- lmrob.E(psi(r)^2, ctrl, use.integrate = TRUE) tb <- lmrob.E(psi(r, 1), ctrl, use.integrate = TRUE) tfact <- 2 - ta/tb^2 taus.0 <- sqrt(1 - tfact * levs) ## calculate correction factor tcorr <- coef(lmrob(taus / taus.0 - 1 ~ levs - 1)) c(tfact = tfact, tcorr = tcorr) } lmrob.hatmatrix <- function(x, w = rep(1, NROW(x)), wqr = qr(sqrt(w) * x), names = FALSE) { H <- tcrossprod(qr.qy(wqr, diag(1, NROW(x), x$rank))) if(names && !is.null(rnms <- dimnames(wqr$qr)[[1L]])) dimnames(H) <- list(rnms,rnms) H } .lmrob.hat <- function(x, w = rep(1, NROW(x)), wqr = qr(sqrt(w) * x), names = TRUE) { if (missing(wqr) && !is.matrix(x)) x <- as.matrix(x) ## Faster than computing the whole hat matrix, and then diag(.) : ## == diag(lmrob.hatmatrix(x, w, ...)) h <- pmin(1, rowSums(qr.qy(wqr, diag(1, NROW(wqr$qr), wqr$rank))^2)) if(names && !is.null(rnms <- dimnames(wqr$qr)[[1L]])) names(h) <- rnms h } hatvalues.lmrob <- function(model, ...) { if (is.null(wqr <- model$qr)) .lmrob.hat(model$x, model$rweights) else .lmrob.hat(wqr = wqr) } ##' psi |--> ipsi \in \{0,1,...6} : integer codes used in C .psi2ipsi <- function(psi) { psi <- .regularize.Mpsi(psi, redescending=FALSE) i <- match(psi, c( 'huber', 'bisquare', 'welsh', 'optimal', ## 0 1 2 3 'hampel', 'ggw', 'lqq' ## 4 5 6 )) if(is.na(i)) stop("internal logic error in psi() function name: ", psi, " Please report!") i - 1L } ##' Given psi() fn (as string), possibly convert the tuning-constant vector cc ##' such that it "fits" to psi(). ##' ##' @param psi a string such as \code{"lqq"}. ##' @param cc numeric tuning-constant vector, for "ggw" and "lqq", ideally ##' with an \code{\link{attr}}ibute \code{"constants"} as from ##' \code{\link{lmrob.control}(.)$tuning.psi} or from ##' \code{\link{.psi.const}(psi, *)}. .psi.conv.cc <- function(psi, cc) { if (!is.character(psi) || length(psi) != 1) stop("argument 'psi' must be a string (denoting a psi function)") if(!is.numeric(cc)) stop("tuning constant 'cc' is not numeric") ## "FIXME": For (ggw, lqq) this is much related to .psi.const() below switch(tolower(psi), 'ggw' = { ## Input: 4 parameters, (minimal slope, b, efficiency, breakdown point) _or_ c(0, a,b,c, m.rho) ## Output 'k': either k in {1:6} or k = c(0, k[2:5]) ## prespecified 6 cases all treated in C ( ../src/lmrob.c ) via these codes: if ( isTRUE(all.equal(cc, c(-.5, 1 , 0.95, NA)))) return(1) else if (isTRUE(all.equal(cc, c(-.5, 1 , 0.85, NA)))) return(2) else if (isTRUE(all.equal(cc, c(-.5, 1. , NA, 0.5)))) return(3) else if (isTRUE(all.equal(cc, c(-.5, 1.5, 0.95, NA)))) return(4) else if (isTRUE(all.equal(cc, c(-.5, 1.5, 0.85, NA)))) return(5) else if (isTRUE(all.equal(cc, c(-.5, 1.5, NA, 0.5)))) return(6) else if (length(cc) == 5 && cc[1] == 0 || (length(cc <- attr(cc, 'constants')) == 5 && cc[1] == 0)) return(cc) else stop('Coefficients for ',psi,' function incorrectly specified.\n', 'Use c(minimal slope, b, efficiency, breakdown point) [6 hard-coded special cases]\n', ' or c(0, a,b,c, max_rho) as from .psi.const(',psi,', cc).') }, 'lqq' = { ## Input: 4 parameters, (minimal slope, b/c, efficiency, breakdown point) _or_ (b, c, s) [length 3] ## Output: k[1:3] = (b, c, s) if (isTRUE(all.equal(cc, c(-.5, 1.5, 0.95, NA)))) return(c(1.4734061, 0.9822707, 1.5)) else if (isTRUE(all.equal(cc, c(-.5, 1.5, NA, 0.5)))) return(c(0.4015457, 0.2676971, 1.5)) else if (length(cc) == 3 || length(cc <- attr(cc, 'constants')) == 3) return(cc) else stop('Coefficients for ',psi,' function incorrectly specified.\n', 'Use c(minimal slope, b, efficiency, breakdown point) [2 special cases]\n', ' or c(b, c, s) as from .psi.const(',psi,', cc).') }, 'hampel' = { ## just check length of coefficients if (length(cc) != 3) stop('Coef. for Hampel psi function not of length 3') }, { ## otherwise: should have length 1 if (length(cc) != 1) stop('Coef. for psi function ', psi,' not of length 1') }) return(cc) } ##' @title For GGW's psi(), find x with minimal slope, and the min.slope ##' @param a "scale" of GGW's psi ##' @param b exponent of GGW's psi ##' @param c "huber-cutoff" of GGW's psi ##' @param ... further arguments passed to optimize(), notably 'tol' ##' @return the return value of optimize(): list(minimum, objective) ##' @author Manuel Kohler and Martin Maechler .psi.ggw.mxs <- function(a, b, c, tol = .Machine$double.eps^0.25) { ipsi <- .psi2ipsi('ggw') ccc <- c(0, a, b, c, 1) ## == .psi.conv.cc('ggw', cc=c(0, a, b, c, 1)) optimize(.Mpsi, c(c, max(a+b+2*c, 0.5)), ccc=ccc, ipsi=ipsi, deriv = 1, tol = tol) } .psi.ggw.ms <- function(a, b, c, tol = .Machine$double.eps^0.25) ## find minimal slope .psi.ggw.mxs(a, b, c, tol=tol)[["objective"]] .psi.ggw.finda <- function(ms, b, c, tol = .Machine$double.eps^0.25, maxiter = 1000, ms.tol = tol / 64,...) ## find constant 'a' (reparametrized to 1/o scale). { val <- uniroot(function(a) .psi.ggw.ms(1/a, b, c, tol=ms.tol) - ms, c(200, if (b > 1.4) 1/400 else if (b > 1.3) 1/50 else 1/20), tol=tol, maxiter=maxiter) 1/val$root } .psi.ggw.eff <- function(a, b, c) ## calculate asymptotic efficiency { ipsi <- .psi2ipsi('ggw') ccc <- c(0, a, b, c, 1) lmrob.E(.Mpsi(r, ccc, ipsi, deriv=1), use.integrate = TRUE)^2 / lmrob.E(.Mpsi(r, ccc, ipsi) ^2, use.integrate = TRUE) } .psi.ggw.bp <- function(a, b, c, ...) { ## calculate kappa ipsi <- .psi2ipsi('ggw') abc <- c(0, a, b, c) nc <- integrate(.Mpsi, 0, Inf, ccc = c(abc, 1), ipsi=ipsi, ...)$value lmrob.E(.Mchi(r, ccc = c(abc, nc), ipsi), use.integrate = TRUE) } .psi.ggw.findc <- function(ms, b, eff = NA, bp = NA, subdivisions = 100L, rel.tol = .Machine$double.eps^0.25, abs.tol = rel.tol, tol = .Machine$double.eps^0.25, ms.tol = tol/64, maxiter = 1000) { ## find c by eff for bp c. <- if (!is.na(eff)) { if (!is.na(bp)) warning('tuning constants for ggw psi: both eff and bp specified, ignoring bp') ## find c by matching eff tryCatch(uniroot(function(x) .psi.ggw.eff(.psi.ggw.finda(ms, b, x, ms.tol=ms.tol), b, x) - eff, c(0.15, if (b > 1.61) 1.4 else 1.9), tol=tol, maxiter=maxiter)$root, error=function(e)e) } else { if (is.na(bp)) stop("neither breakdown point 'bp' nor efficiency 'eff' specified") ## find c by matching bp tryCatch(uniroot(function(x) .psi.ggw.bp(.psi.ggw.finda(ms, b, x, ms.tol=ms.tol), b, x) - bp, c(0.08, if (ms < -0.4) 0.6 else 0.4), tol=tol, maxiter=maxiter)$root, error=function(e)e) } if (inherits(c., 'error')) stop(gettextf('unable to find constants for "ggw" psi function: %s', c.$message), domain=NA) a <- .psi.ggw.finda(ms, b, c., ms.tol=ms.tol) nc <- integrate(.Mpsi, 0, Inf, ccc= c(0, a, b, c., 1), ipsi = .psi2ipsi('ggw'))$value ## return c(0, a, b, c., nc) } lmrob.efficiency <- function(psi, cc, ccc = .psi.conv.cc(psi, cc=cc), ...) { ipsi <- .psi2ipsi(psi) integrate(function(x) .Mpsi(x, ccc=ccc, ipsi=ipsi, deriv=1)*dnorm(x), -Inf, Inf, ...)$value^2 / integrate(function(x) .Mpsi(x, ccc=ccc, ipsi=ipsi)^2 *dnorm(x), -Inf, Inf, ...)$value } lmrob.bp <- function(psi, cc, ccc = .psi.conv.cc(psi, cc=cc), ...) { ipsi <- .psi2ipsi(psi) integrate(function(x) .Mchi(x, ccc=ccc, ipsi=ipsi)*dnorm(x), -Inf, Inf, ...)$value } ##' @title Find tuning constant 'c' for "lqq" psi function ---> ../man/psiFindc.Rd ##' @param cc numeric vector = c(min_slope, b/c, eff, bp) ; ##' typically 'eff' or 'bp' are NA and will be computed ##' .... ##' @return constants for c function: (b, c, s) == (b/c * c, c, s = 1 - min_slope) .psi.lqq.findc <- function(ms, b.c, eff = NA, bp = NA, interval = c(0.1, 4), subdivisions = 100L, rel.tol = .Machine$double.eps^0.25, abs.tol = rel.tol, tol = .Machine$double.eps^0.25, maxiter = 1000) { ## b.c == b/c bcs <- function(cc) c(b.c*cc, cc, 1-ms) t.fun <- if (!is.na(eff)) { ## 'eff' specified if (!is.na(bp)) warning("tuning constants for \"lqq\" psi: both 'eff' and 'bp' specified, ignoring 'bp'") ## find c by b, s and eff function(c) lmrob.efficiency('lqq', bcs(c), subdivisions=subdivisions, rel.tol=rel.tol, abs.tol=abs.tol) - eff } else { if (is.na(bp)) stop('Error: neither breakdown point nor efficiency specified') ## breakdown point 'bp' specified function(c) lmrob.bp('lqq', bcs(c), subdivisions=subdivisions, rel.tol=rel.tol, abs.tol=abs.tol) - bp } c. <- tryCatch(uniroot(t.fun, interval=interval, tol=tol, maxiter=maxiter)$root, error=function(e)e) if (inherits(c., 'error')) stop(gettextf('unable to find constants for "lqq" psi function: %s', c.$message), domain=NA) else bcs(c.) } ##' For ("ggw", "lqq"), if cc is not one of the predefined ones, ##' compute the tuning constants numerically, from the given specs (eff / bp). ##' Much related to .psi.conv.cc() above .psi.const <- function(cc, psi) { switch(psi, "ggw" = { ## only calculate for non-standard coefficients if (isTRUE(all.equal(cc, c(-.5, 1, 0.95, NA))) || isTRUE(all.equal(cc, c(-.5, 1, 0.85, NA))) || isTRUE(all.equal(cc, c(-.5, 1, NA, 0.5))) || isTRUE(all.equal(cc, c(-.5, 1.5, 0.95, NA))) || isTRUE(all.equal(cc, c(-.5, 1.5, 0.85, NA))) || isTRUE(all.equal(cc, c(-.5, 1.5, NA, 0.5)))) { ## treated with in C code: in ../src/lmrob.c, functions *_ggw() } else attr(cc, 'constants') <- .psi.ggw.findc(ms=cc[[1]], b=cc[[2]], eff=cc[[3]], bp=cc[[4]]) }, "lqq" = { ## use pre-computed values for (the two) "standard" coefficients: attr(cc, 'constants') <- ## b.c :== b/c if (isTRUE(all.equal(cc, c(-.5, 1.5, 0.95, NA)))) c(1.4734061, 0.9822707, 1.5) # as in .psi.conv.cc() {FIXME? only in 1 place} else if (isTRUE(all.equal(cc, c(-.5, 1.5, NA, 0.5)))) c(0.4015457, 0.2676971, 1.5) else .psi.lqq.findc(ms=cc[[1]], b.c=cc[[2]], eff=cc[[3]], bp=cc[[4]]) }, stop("method for psi function ", psi, " not implemented")) cc } Mpsi <- function(x, cc, psi, deriv=0) { x[] <- .Call(R_psifun, x, .psi.conv.cc(psi, cc), .psi2ipsi(psi), deriv) x } .Mpsi <- function(x, ccc, ipsi, deriv=0) .Call(R_psifun, x, ccc, ipsi, deriv) Mchi <- function(x, cc, psi, deriv=0) { x[] <- .Call(R_chifun, x, .psi.conv.cc(psi, cc), .psi2ipsi(psi), deriv) x } .Mchi <- function(x, ccc, ipsi, deriv=0) .Call(R_chifun, x, ccc, ipsi, deriv) Mwgt <- function(x, cc, psi) { x[] <- .Call(R_wgtfun, x, .psi.conv.cc(psi, cc), .psi2ipsi(psi)) x } .Mwgt <- function(x, ccc, ipsi) .Call(R_wgtfun, x, ccc, ipsi) ## only for nlrob() -- and to use instead of MASS:::psi.huber etc: ## returns a *function* a la psi.huber() : .Mwgt.psi1 <- function(psi, cc = .Mpsi.tuning.default(psi)) { ipsi <- .psi2ipsi(psi) ccc <- .psi.conv.cc(psi, cc) ## return function *closure* : function(x, deriv = 0) if(deriv) .Mpsi(x, ccc, ipsi, deriv=deriv) else .Mwgt(x, ccc, ipsi) } ##' The normalizing constant for rho(.) <--> rho~(.) MrhoInf <- function(cc, psi) { cc <- .psi.conv.cc(psi, cc) .Call(R_rho_inf, cc, .psi2ipsi(psi)) } .MrhoInf <- function(ccc, ipsi) .Call(R_rho_inf, ccc, ipsi) lmrob.rweights <- function(resid, scale, cc, psi, eps = 16 * .Machine$double.eps) { stopifnot(is.numeric(scale), length(scale) == 1L, scale >= 0) if (scale == 0) { ## exact fit m <- max(ar <- abs(resid), na.rm=TRUE) if(m == 0) numeric(seq_len(ar)) else as.numeric(ar < eps * m)# 1 iff res ~= 0 } else Mwgt(resid / scale, cc, psi) } lmrob.E <- function(expr, control, dfun = dnorm, use.integrate = FALSE, obj, ...) { expr <- substitute(expr) if (missing(control) && !missing(obj)) control <- obj$control lenvir <- if (!missing(control)) { psi <- control$psi if (is.null(psi)) stop('parameter psi is not defined') c.psi <- control[[if (control$method %in% c('S', 'SD')) "tuning.chi" else "tuning.psi"]] if (!is.numeric(c.psi)) stop('tuning parameter (chi/psi) is not numeric') list(psi = function(r, deriv = 0) Mpsi(r, c.psi, psi, deriv), chi = function(r, deriv = 0) Mchi(r, c.psi, psi, deriv), ## change? wgt = function(r) Mwgt(r, c.psi, psi)) ## change? } else list() pf <- parent.frame() FF <- function(r) eval(expr, envir = c(list(r = r), lenvir), enclos = pf) * dfun(r) if (isTRUE(use.integrate)) { integrate(FF, -Inf,Inf, ...)$value ## This would be a bit more accurate .. *AND* faster notably for larger 'numpoints': ## } else if(use.integrate == "GQr") { ## require("Gqr")# from R-forge [part of lme4 project] ## ## initialize Gauss-Hermite Integration ## GH <- GaussQuad(if(is.null(control$numpoints)) 13 else control$numpoints, ## "Hermite") ## ## integrate ## F. <- function(r) eval(expr, envir = c(list(r = r), lenvir), enclos = pf) ## sum(GH$weights * F.(GH$knots)) } else { ## initialize Gauss-Hermite Integration gh <- ghq(if(is.null(control$numpoints)) 13 else control$numpoints) ## integrate sum(gh$weights * FF(gh$nodes)) } } ghq <- function(n = 1, modify = TRUE) { ## Adapted from gauss.quad in statmod package ## which itself has been adapted from Netlib routine gaussq.f ## Gordon Smyth, Walter and Eliza Hall Institute n <- as.integer(n) if(n<0) stop("need non-negative number of nodes") if(n==0) return(list(nodes=numeric(0), weights=numeric(0))) ## i <- seq_len(n) # 1 .. n i1 <- seq_len(n-1L) muzero <- sqrt(pi) ## a <- numeric(n) b <- sqrt(i1/2) A <- numeric(n*n) ## A[(n+1)*(i-1)+1] <- a # already 0 A[(n+1)*(i1-1)+2] <- b A[(n+1)*i1] <- b dim(A) <- c(n,n) vd <- eigen(A,symmetric=TRUE) n..1 <- n:1L w <- vd$vectors[1, n..1] w <- muzero * w^2 x <- vd$values[n..1] # = rev(..) list(nodes=x, weights= if (modify) w*exp(x^2) else w) } ##' (non)singular subsampling - code to be passed to C, as `ss` in ../src/lmrob.c .convSs <- function(ss) switch(ss, "simple"= 0L, "nonsingular"= 1L, stop(gettextf("unknown setting for 'subsampling': %s", ss), domain=NA)) outlierStats <- function(object, x = object$x, control = object$control, epsw = control$eps.outlier, epsx = control$eps.x, warn.limit.reject = control$warn.limit.reject, warn.limit.meanrw = control$warn.limit.meanrw ) { ## look at all the factors in the model and count ## for each level how many observations were rejected. ## Issue a warning if there is any level where more than ## warn.limit.reject observations were rejected or ## the mean robustness weights was <= warn.limit.meanrw rw <- object$rweights ## ^^^^^^^^^^^^^^^ not weights(..., type="robustness") as we ## don't want naresid() padding here. if (is.function(epsw)) epsw <- epsw(nobs(object, use.fallback = TRUE)) if (!is.numeric(epsw) || length(epsw) != 1) stop("'epsw' must be numeric(1) or a function of nobs(obj.) which returns a numeric(1)") rj <- abs(rw) < epsw if (NROW(x) != length(rw)) stop("number of rows in 'x' and length of 'object$rweights' must be the same") if (is.function(epsx)) epsx <- epsx(max(abs(x))) if (!is.numeric(epsx) || length(epsx) != 1) stop("'epsx' must be numeric(1) or a function of max(abs(x)) which returns a numeric(1)") xnz <- abs(x) > epsx cc <- function(idx) { nnz <- sum(idx) ## <- if this is zero, 'Ratio' and 'Mean.RobWeight' will be NaN Fr <- sum(rj[idx]) c(N.nonzero = nnz, N.rejected = Fr, Ratio = Fr / nnz, Mean.RobWeight = mean(rw[idx])) } report <- t(apply(cbind(Overall=TRUE, xnz[, colSums(xnz) < NROW(xnz)]), 2, cc)) shout <- FALSE # should we "shout"? -- scalar logical, never NA lbr <- rep.int(FALSE, nrow(report)) if (!is.null(warn.limit.reject)) { lbr <- report[, "Ratio"] >= warn.limit.reject shout <- any(lbr & !is.na(lbr)) } if (!is.null(warn.limit.meanrw)) { lbr <- lbr | report[, "Mean.RobWeight"] <= warn.limit.meanrw shout <- shout || any(lbr & !is.na(lbr)) } if (shout) { nbr <- rownames(report)[lbr] attr(report, "warning") <- paste("Possible local breakdown of", paste0("'", nbr, "'", collapse=", ")) warning("Detected possible local breakdown of ", control$method, "-estimate in ", if (length(nbr) > 1) paste(length(nbr), "coefficients") else "coefficient", " ", paste0("'", nbr, "'", collapse=", "), ".", if ("KS2014" %in% control$setting) "" else "\nUse lmrob argument 'setting=\"KS2014\"' to avoid this problem." ) } report } robustbase/MD50000644000176200001440000003625514124314172012745 0ustar liggesusers66887f2a4a84d1b8823efaadb4d2e817 *ChangeLog 67a7444597a31fb8d508056bc99331b4 *DESCRIPTION 45248e48803a2ce6540a83eafeac7433 *NAMESPACE 9f45d305486ad91f13f6dc08247b73a8 *R/AAA.R 08efe13e20dfbf515725d8753d228ee8 *R/BYlogreg.R 7478e0bb64ef0b7c0d7ab1b778496418 *R/MTestimador2.R 05c9151dad5fb4bf5953fba77aaacf51 *R/OGK.R 4481cfaf01b87a16269d156e056b0a9f *R/adjbox.R 3c3888392a6105b40a1fb9e8291a4be2 *R/adjoutlyingness.R 959de6a22a12a273711cc6975c9dad24 *R/anova-glmrob.R 7372a74eb2fe5b4c97fc7315e924c234 *R/anova.lmrob.R 186ce151b871686bd85921f1843ed90e *R/biweight-funs.R abd567be7414be168c4d18995c5d67b3 *R/classPC.R b8872e7f5f158e3f742c71a8ae030ed0 *R/comedian.R 810d7224c354f66c9e8a00bd325e145a *R/covMcd.R 6deaedfb710eeaa98ddb990382b51a74 *R/covPlot.R 31276c72c47df1cb6aaadc1d91f38d5a *R/detmcd.R 79316c1ca445f99587e3b7d583656aed *R/glmrob.R 47e68c210ff891cecec23aa54369ec8f *R/glmrobMqle-DQD.R 24d0de58576935e718ea701a10b2d4af *R/glmrobMqle.R 3f50418b4f9fd47918b8881bab58b6bb *R/glmrobPredict.R 926c7fc062e97f0fe1c96aed62029f15 *R/huber.R b456cca632bb6de69f1ed8a4278e44ca *R/lmrob.M.S.R 7fd6f04272b008c290e17884b19fbdb8 *R/lmrob.MM.R 5b6e2f997a12271f4dbfaff0bcd4a3d1 *R/lmrob.R 8130354e92350a040360b85bd36995bf *R/lmrobPredict.R ee8e9a359407bfcce26eb0f77457f0f6 *R/ltsPlot.R 4d645592c866bbaa3ad30760d0ff20cd *R/ltsReg.R 7d6b027d7279b4f58bc8dcd3fa8c53d1 *R/mc.R 621fe5b6d8087287f219f347b5fd5439 *R/nlregrob.R 5efd5210edae7aa92157d91ac3a9ff40 *R/nlrob.R 544e1b62b77ca840ea441f126a88ac9b *R/plot.lmrob.R 53800cd013755518211c5a47d48a345f *R/psi-rho-funs.R 50b2b1aa36b2b2a4e6fa1c607c11bfe9 *R/qnsn.R 1d34823c1885d395c7ec8af83fcf57b8 *R/rrcov.control.R 0ee6d9e98d6746d0a2159988002c87c7 *R/tolEllipse.R 1a2ee76ea750e0d5fa8b9229a25dc598 *TODO 0582c4acc7ab91a7864946907f2bd2dd *build/partial.rdb c790775f55ad8e758bd117265ba22adf *build/vignette.rds ae030dea61f2f374479a49afe8077a6d *data/Animals2.tab 19e4eaf0977f829715ae36606a377bb8 *data/CrohnD.rda 238aac3ea05d306a86708f1bb39dcdc7 *data/NOxEmissions.rda 37e841b5fc4a056e5f931d43c9eb4603 *data/SiegelsEx.R ab736be6db06a387200ce0f6cdedc697 *data/aircraft.rda 017c2bbdd22e598584d8045afde64ffd *data/airmay.rda 20bdc21492ae84b462c7080ee6aaaad6 *data/alcohol.rda bdf56c95aa99517460a387f5fa1da399 *data/ambientNOxCH.rda 48a56104b0c39bf40fd10c8b19131eca *data/biomassTill.rda 3cf0efaac9b7ef93f8603bbf2d6cb337 *data/bushfire.rda 6e659a95f595d45c911308bc253c661a *data/carrots.rda 090b86c587b70c48367bb030fe2f9d15 *data/cloud.rda 4c30ccfadda6ce5afb38e46b07da22e8 *data/coleman.rda 1961f8041c73e1bf90c5847c108c5ae1 *data/condroz.rda d4d34970ff2fa110fbc21e3f2b227704 *data/cushny.R 03cfc37554676f5b36f9e269c0bd7244 *data/delivery.rda 7baef2ba826e98bb5cf01bce930ca49e *data/education.rda d737cb0f2d23c29f2a26e96276aaee13 *data/epilepsy.rda 7104e0d903ad8fc993b13257772413f4 *data/exAM.tab e941fedb2c9e4c6f71c7997f226a8d82 *data/foodstamp.rda d173f6f42e6ec9addb714375ae4b4483 *data/hbk.rda 2cf47a5fbdfc0f13ace274e431afe686 *data/heart.rda 46364afa47910f07eda003121c6205d5 *data/kootenay.tab b7ea6d1b22c131ee080418d729dc0742 *data/lactic.rda 692c41d4efee538cddea9859f6d230d3 *data/los.rda 453aeaf5b65b5d166f0b62d9b62e77a7 *data/milk.rda 8b64e711004e709cb46ccef1d935bf16 *data/pension.rda bb1fb728be30d2c32afc8881b0c5802d *data/phosphor.rda 1010341878a68422ebf3d09b2e053c5b *data/pilot.rda 735465ec97480fb1a6fe586e85792f98 *data/possumDiv.rda 60f04bab62d0679fcaf5231d7baa2e92 *data/pulpfiber.rda 5d86b54bde8121b6dfd3c801dddf2c5b *data/radarImage.rda 3075d6647227e039ec8fbae2d2a88b9c *data/salinity.rda 3a57947173594810a0e44798404afac7 *data/starsCYG.rda c44feedbebdeaf0fcd6419986b201800 *data/steamUse.rda a7a3edcf24050d5ff10e4b2367e16cd9 *data/telef.rda b8244eaef97350532d9124998430d396 *data/toxicity.rda 0b9904a2e9bf30bdfed52ca1f238257b *data/vaso.rda 40b3c269b89c039779bba44dd63d4a3b *data/wagnerGrowth.rda 2108ebd3782f664ce37eeb36d4d2bac4 *data/wood.rda 5735d59a0fd8350221e71c757aff3022 *demo/00Index 4eaa659c5287d2433f0c497b91501654 *demo/determinMCD.R d2aaf14df328163a55f468468fd0f6dc *inst/CITATION 5d05206ddf06ffd03093afc086a6400e *inst/Copyrights 5a9335652040a5d1a11ff0764da43f67 *inst/NEWS.Rd da7f5aa078e39b61fc11c70f79acda31 *inst/doc/aggr_results.Rdata fd283291fb16f251cb6d0899d4e9310b *inst/doc/asymptotic.max.bias.Rdata eba4ff0f00afa90a6de4faeb8fbeb118 *inst/doc/error.distributions.R e87e964dd949e0d5d3ac6677cf8ce850 *inst/doc/estimating.functions.R 84e4b173d0410d358ae5ed7d782d8c28 *inst/doc/fastMcd-kmini.R c8992760bbc781798df7d3dee8839d56 *inst/doc/fastMcd-kmini.Rnw 8a48fc7f7c19ea13885035a05470d746 *inst/doc/fastMcd-kmini.pdf 2eb6dae5393fc8f1f3613ea7eacf362f *inst/doc/graphics.functions.R aa4176684274cf23ab72b699568976de *inst/doc/lmrob_simulation.R ad9971a72d917743efbdb264b7057d97 *inst/doc/lmrob_simulation.Rnw 70087e7171e3223c42e5414ed0828001 *inst/doc/lmrob_simulation.pdf f52efc8694f09fdff19c6f54fe501d45 *inst/doc/psi_functions.R 5dc951a423ecf0cea599f84e69eb1055 *inst/doc/psi_functions.Rnw 74a810bf56830dd048e3b73c1865549b *inst/doc/psi_functions.pdf 8fbff7afee995686ab14f490bfdd3bd7 *inst/doc/simulation.functions.R faa9ecb70de3e867cb0b0cb8f56f3074 *inst/doc/simulation.init.R e571b79e7316389c9251508b0a64775f *inst/external/d1k27.rda 2697fe55d9d2cd8b788e735f21fce763 *inst/include/robustbase.h 4d5298481597000e4fbabcda334a975f *inst/po/de/LC_MESSAGES/R-robustbase.mo 6ef699e2c1dc80f8f76c3ad814a43594 *inst/po/de/LC_MESSAGES/robustbase.mo 0c1f0887d5c9f0c101a53be477913c14 *inst/po/en@quot/LC_MESSAGES/R-robustbase.mo e77162459322771f5573e520ee55eb4c *inst/po/en@quot/LC_MESSAGES/robustbase.mo cab4f7f139761c02c5281de692c2541e *inst/xtraR/ex-funs.R 130761ff8fdad503b3eabb72b95676ab *inst/xtraR/lmrob-trace_lev.R b65b9ee10513815ae1ce3d469496299f *inst/xtraR/m-s_fns.R a9bcb5636de95b43522e387217b7ed4a *inst/xtraR/mcnaive.R e36c6c8c36b8ad9ff096d5cf2af8e5a6 *inst/xtraR/platform-sessionInfo.R 87049deac986592f8a86dc25370a2832 *inst/xtraR/plot-psiFun.R df9ad021e5fe4f589da91f5ee859ec35 *inst/xtraR/subsample-fns.R d59c9d987e5930b5518e5f115a12fc93 *inst/xtraR/test_LTS.R 1faee3e2b0c2c41380c3e25d3f5fc067 *inst/xtraR/test_MCD.R cc9b23ec76ba93fbd5c15656bffca0ad *man/Animals2.Rd 2c6b72764352e084191b0c1d09d1e075 *man/BYlogreg.Rd 64236af69ca22e65d23c6fded4f74428 *man/CrohnD.Rd 13ea7f6a276459c00d98ad0b52cd150f *man/M.psi.Rd 6824d0737bc148c4db2525b346dbb22a *man/NOxEmissions.Rd 08467d483cced3e914773083f6d5ce3a *man/Qn.Rd 083dcbb9a8612332ac5e46ecf5815782 *man/SiegelsEx.Rd c3f637a0a5930223093b8ec3ad839b87 *man/Sn.Rd a6290198f8565ffb5c386e0b2905f29f *man/adjOutlyingness.Rd 360fd8e7b1c72e10c2925ebb8d8f9a0c *man/adjbox.Rd e5ab4ece8120c2eec58539399edfac64 *man/adjboxStats.Rd 01186d1727fe4e272216d4c47c6ec907 *man/aircraft.Rd adc69c47692a6374530016dd06284aab *man/airmay.Rd 64c8c4a203a83f12f93a4799bc3fbfde *man/alcohol.Rd 7ea04b818be06098d3d5695068c3aa69 *man/ambientNOxCH.Rd 66453a19f77de93edbc0bbe04c5cf097 *man/anova.glmrob.Rd cbd3b489c97a7f8a2af5f6f5d061da76 *man/anova.lmrob.Rd ee08a50d99ae4fded27606eec359ea0b *man/biomassTill.Rd 28a095234e74bb3a25b9d5b97245e6d0 *man/bushfire.Rd 162c39b3fe90cadbf7ef351522e3a282 *man/carrots.Rd 072f4e54832ae70552419daa0539f165 *man/chgDefaults-methods.Rd 2aa4b0be1d4af9f052af28d677c7cc08 *man/classPC.Rd 81bd9b7ce5691a0603f3cf4c7c6bd7eb *man/cloud.Rd f99a65e293aca936bae4d50b059fc857 *man/colMedians.Rd 1c06929304084ebd0f599ef857aa8714 *man/coleman.Rd 5e60414598e3a152c9894b47d1d29128 *man/condroz.Rd 6ebef31bebcc045b5484160facc4c439 *man/covComed.Rd d6040abc4784c9ed2698ea79185ab670 *man/covMcd.Rd 5cf99856fcd52c766bb062890465eab9 *man/covOGK.Rd 58a839ee1a490ad81e9f460aae800dd0 *man/covPlot.Rd 7a4c3b20b2d890cda14449a844ec1954 *man/cushny.Rd ffc71a09096c7ff759123bdb52811331 *man/delivery.Rd 951faf63d07a05ff53ca1d58ea8a65b3 *man/education.Rd 4c4b144e1b46f1587eb78d9131ee9454 *man/epilepsy.Rd 98afee28d8bfdbcccd61dd43e23bc1ef *man/estimethod.Rd 217b4a7355c2af12eeed03d3a1dbfaf1 *man/exAM.Rd 584abcc49b18120f1421ed11eecf0859 *man/foodstamp.Rd 5d06f4a85eae6ab0ec6eed5d0d3d9acf *man/fullRank.Rd 9e691245c9e0ba04ff72965a52165f89 *man/functionX-class.Rd cfa1f98cd587600bd8351e2342f7f2d3 *man/functionXal-class.Rd 84dcfeafa47c81c575ca84f161615663 *man/glmrob.Rd 93e97c9b0bc9b0fbfacb666b1dfba939 *man/glmrob.control.Rd 84768b70921a2bbc585b95b8c4ed16c3 *man/h.alpha.n.Rd 3c1636c9c383c9211383efa380286309 *man/hbk.Rd f7215a5e99b27a7c233a9165944ec9d3 *man/heart.Rd a95b1266c928eaec27f8a07005c7b922 *man/huberM.Rd 0e5741ffbae2c8ab72d6c806a9875a23 *man/kootenay.Rd b3904bd9a90c4a1744c7a10287729796 *man/lactic.Rd 34a6eb5be483a909147651b19b681a87 *man/lmrob..D..fit.Rd d8bfc631978504b52b8c960873201ee2 *man/lmrob..M..fit.Rd 1c18b310fb1c008792bd5e78b1ceda52 *man/lmrob.M.S.Rd 5af3b480d40e288cb4bad813d08a0089 *man/lmrob.Rd b1117eef380ac38b510ac3945f60e430 *man/lmrob.S.Rd 2a4cc24063f1744473724ff594019a60 *man/lmrob.control.Rd 823296c1001fb79b67740695fabf0577 *man/lmrob.fit.Rd 0ff3ff25d0c6407c0ca8a31c4135b827 *man/lmrob.lar.Rd d622e40aaa83ee46cd18eb6358781768 *man/los.Rd b67da02f6a6e8c88a465f9a6a1ab1933 *man/ltsPlot.Rd e9f2e9950c217f3a8f5e3419b9bfcfa3 *man/ltsReg.Rd 23a3ba4a5d60f2710c35524753f86551 *man/mc.Rd 7f90043269895933a5037812df80e98e *man/milk.Rd 35a4e8718b06d2eacda6e04a62b0a7dd *man/nlrob-algos.Rd 4f524b3a96b0e8a77c9ff7386b446f35 *man/nlrob.Rd 946021063d04345859980aa2415aa698 *man/nlrob.control.Rd 637c345a708230a67f888f30b112bb33 *man/outlierStats.Rd 73ac7b614b68fde4b56425dcd14c3a7e *man/pension.Rd 8cd4b8129699cba4317d4811937a9556 *man/phosphor.Rd 59bf70cca397d8ccaab9963f939462db *man/pilot.Rd 2cad8d62257bac1749e3c2f84871b2d5 *man/plot-methods.Rd 526daa81051c7dc411213912c5177f93 *man/plot.lmrob.Rd 9d897884dcdf746556c3d17c6f38dd9d *man/possumDiv.Rd 666a83e065be3fa28732922ac7bd0e2b *man/predict.glmrob.Rd b029d6d4e196a3617062e86120d20703 *man/predict.lmrob.Rd e8b57d5568e0ea910048d168849a0a8c *man/print.lmrob.Rd ec23a0d599e8694219f01f037dfa2826 *man/psiFindc.Rd df4376b1f8f500c99501d230a8ce225f *man/psiFunc.Rd 525bd5bf9d6fcfbfba6aef15f78ee9dc *man/psi_func-class.Rd b51db5a87f42e8420d22fdc796aefce4 *man/pulpfiber.Rd b162f832ac1b1721feee4d0eda5e1b5b *man/r6pack.Rd 5f49f210129f8b69df88605fea9102de *man/radarImage.Rd b3767d3ecc4211f6d8a11e6e89594e0f *man/rankMM.Rd fa74cc0d6c0aedc3cf0ba3965253b17c *man/residuals.glmrob.Rd 8d1fe97485ffc542a81a1ddf35001741 *man/robustbase-internal.Rd 1ffeeeda93ef6545e9e4f3e5ea7c3040 *man/rrcov.control.Rd 362bbf4ab0688eeff8703da6f0d8e2e7 *man/salinity.Rd 1fec7004e56dfaeb1b8db9ec7a0cf7c0 *man/scaleTau2.Rd f23358f6a5f3ba91ad590a1f97398c7c *man/sigma.Rd edb9e05b5fe937a216f49b05858768e8 *man/smoothWgt.Rd 47ec76a4330aaba272533c17baacff82 *man/splitFrame.Rd 2fb66f97bca59867ac54b8be9c7bc214 *man/starsCYG.Rd 4526219d6f49e21ae4e58ca17be1891e *man/steamUse.Rd d7189923c3153490b9abb2d43a52e440 *man/summarizeRobWeights.Rd be2fbf543226e5b43fcd4a3e1ffe8f9f *man/summary.glmrob.Rd ae03d2d8c3d04a3f4e6ce0aa9c0b3cf7 *man/summary.lmrob.Rd 8219765216d02b84cdbece813ef82fe1 *man/summary.lts.Rd 48c1775d93b2517b44554f48b0090c54 *man/summary.mcd.Rd 047c1d860a5d9d936abc2fb9de819554 *man/summary.nlrob.Rd e8d1c797428107b3174396b0d75b40b3 *man/telef.Rd 61eed38c560d7fca1d5a9127b8baf70e *man/tolEllipsePlot.Rd 2cf58f13cbf2bf1324c6d7aa3e410af5 *man/toxicity.Rd 426909afe46975b17f2409fb50e17f41 *man/tukeyPsi1.Rd ed7afcd97d343cc453602f724b0a0df6 *man/vaso.Rd 64cf61e38da1d48c16c348cd89a5df83 *man/wagnerGrowth.Rd 971d8b3f3dca2ffec5980cb423226f2f *man/weights.lmrob.Rd 281444c1e74efb2422bc9b31c6d46dca *man/wgt.himedian.Rd d85c77f6ee7bfc5f5d6fc78ef947f7e3 *man/wood.Rd 4fc726bc844a65fd1148f249a8be4b3f *po/R-de.po 8c146ac6ceea57474e788be58e1b1268 *po/R-robustbase.pot 7f1bf272d5c2a3081a2da8cb88fd65ab *po/de.po e0bf6b14177155339da0fad0eb818064 *po/robustbase.pot b5ec8eb59d50008ef39073a7446433e1 *po/update-me.sh 6670f8885ea9588ef2905cb7b368a500 *src/Makevars 174d300e4520673a0114f4d6eeba98a5 *src/R-rng4ftn.c 37cbb87bdffad7231d34bdec470100b2 *src/eigen.f f027a8f63fd9aadceeb42354e926590f *src/init.c 5ae62783a6300dfaeff6fbafc45843b9 *src/lmrob.c a0ec4bda77a7f2031b2947dcb7d02b39 *src/mc.c ca3f9ca757c04ac615bff592c0f7ac25 *src/monitor.c 4a412a7e96341e9429a76dd2ffa8cea9 *src/qn_sn.c 6d2330779d4d08c3a60aefaeeed9e942 *src/rf-common.f 8e8d600eec1544c97dff077bcf1f5bbd *src/rffastmcd.f c142a095ec08ea082ea84133dfa39ca2 *src/rfltsreg.f 3bda168ffa6d97a853c1a9458466c030 *src/rllarsbi.f 93f5113f34542949020302b4d70e9c14 *src/rob-utils.c c407492c61e48893a5234823e3747d25 *src/robustbase.h 364118eacaee8ff4915c5f50097ad460 *src/rowMedians.c bc28fcadc56c4269604e622f460a42a1 *src/rowMedians_TYPE-template.h d2a3478b9dbb06d56cbf729dc0fe9681 *src/templates-types.h a3a2fe76cc1fbb676f92d2da764fcaa1 *src/templates-types_undef.h 178577cceffdc422ed73a07d4f89d085 *src/wgt_himed.c 0d3d1bebd878a77ed6d2e17a34b3a265 *src/wgt_himed_templ.h 5d18a48b53c3942cd8ba54b71aa86d5c *tests/LTS-specials.R 53111611c3e4dbfe6fdd55f89db16ba0 *tests/MCD-specials.R d0f66667fb798a04e30148e6c355e8cd *tests/MCD-specials.Rout.save 9695a33ca8982b07a63265750c1ea7ba *tests/MT-tst.R b5cf1ea84e3aa394e45c2a5a209b0905 *tests/NAcoef.R d00dcbac5e56a1b43065ca9a3633a8b6 *tests/NAcoef.Rout.save 2c7b55a37d3d91bb7ac967daa654c0cb *tests/OGK-ex.R 2061968598e36f8ef9af0f6b3afea782 *tests/OGK-ex.Rout.save 95abceee1da97f2efc59a83579d3fd0e *tests/Qn-Sn-plots.R ee5551e06a97b2fa119de6e770e5e3a8 *tests/Rsquared.R 58387af397fbfad9e6585c9fe48a680b *tests/Rsquared.Rout.save 71ce3f6b1a05667b2f4222d6f23a2a1d *tests/binom-ni-small.R 06ecf1e2d46b910a4aa0d648c48ffffd *tests/binom-ni-small.Rout.save f6db581dcd9c17bbe848995b33c58ebe *tests/binom-no-x.R 8b6f16685db3b472f480f71639dcb552 *tests/comedian-tst.R 4fdc17e44096a893d3f2536213577153 *tests/exact-fit-categorical.R b2272fd2825490cf7ddeb2aa14f6940c *tests/glmrob-1.R 860395b194cbcf95736cf42be73f53f8 *tests/glmrob-specials.R 00e92a1b77c27acc29837e01d5dc3ef3 *tests/huber-etc.R 509afaffe676dba8daa7acf6d87423da *tests/huber-etc.Rout.save 3ad3b0de91401054067559a4c852b8ca *tests/large-values.R dd1939914e8b52bb0c781fe58095bc6f *tests/lmrob-data.R 84d45d449493aa949dd2d5c9ae0e64a8 *tests/lmrob-ex12.R 6caa9c579fedb116e9345465d8bc21e3 *tests/lmrob-methods.R ba916df3922fe2ede537197211d5ec5b *tests/lmrob-methods.Rout.save 014e3868bc91234440d8d4da80b4e394 *tests/lmrob-psifns.R 7b75cb19ec1680fab600d33e264b38e2 *tests/lmrob-psifns.Rout.save 0f1cbd30b2149227618ed2f7963cd4a0 *tests/m-s-estimator.R fc2e03a5567789ba9cf6dda9f2c17173 *tests/mc-etc.R adbe5546a8a06c4b23b3d029216e7077 *tests/mc-strict.R 3ad1362c7f97f69f4d17605e6a737d29 *tests/nlregrob-tst.R 98cc8750b25902ac138908d7a45a8fac *tests/nlrob-tst.R 065488497fd670ac470f821dc6ab10a2 *tests/poisson-ex.R a2ccec3324ea3363e9fc2dcbb0aeab9e *tests/psi-rho-etc.R 39e466468e228fad715bc725f30eb678 *tests/psi-rho-etc.Rout.save a9b4b4ba78cc408071e294b4969bf99b *tests/small-sample.R 76445671c74a4ba3fc5e6ff008d902b4 *tests/small-sample.Rout.save 52017cb52ed82c7653b3b50ff0c1a8fe *tests/subsample.R 91a7dbe2f427b417675bf21252fbd4ff *tests/tlts.R 2f36514f3f511d80bc2f08c296161193 *tests/tlts.Rout.save 75be342dc39166432ab40d1d73f1e1cf *tests/tmcd.R 49ffd56e4f0501dad12c3c0949ca0c14 *tests/weights.R a974a5bac33a1f4c825fd68756da3a03 *tests/weights.Rout.save c0c81b768815c0d6b240a0dadb065ea5 *tests/wgt-himed-xtra.R 8eca80b4303ef72bfaccc3ede681c0d6 *tests/wgt-himed.R 56f1be5e2e78bef6d128f3317fe64b9b *tests/wgt-himed.Rout.save c8992760bbc781798df7d3dee8839d56 *vignettes/fastMcd-kmini.Rnw ad9971a72d917743efbdb264b7057d97 *vignettes/lmrob_simulation.Rnw 5dc951a423ecf0cea599f84e69eb1055 *vignettes/psi_functions.Rnw f15d124e194c8c441ba8bc0833143243 *vignettes/robustbase.bib robustbase/inst/0000755000176200001440000000000014124272431013400 5ustar liggesusersrobustbase/inst/Copyrights0000644000176200001440000000243410353034413015455 0ustar liggesusersThe C code for Qn() and Sn(), src/qnsn.c is based on Fortran code that has been be available from http://www.agoras.ua.ac.be/ (Antwerp Group On Robust & Applied Statistics) (-> Programs -> Robust) by {at the time} Peter Rousseeuw (rousse@wins.uia.ac.be) Christophe Croux (croux@wins.uia.ac.be) Department of Mathematics and Computing Universitaire Instelling Antwerpen Universiteitsplein 1 B-2610 Wilrijk (Antwerp) Belgium and carries the note This file contains fortran functions for two new robust estimators of scale denoted as Qn and Sn, decribed in Rousseeuw and Croux (1993). These estimators have a high breakdown point and a bounded influence function. The implementation given here is very fast (running in O(n logn) time) and needs little storage space. Rousseeuw, P.J. and Croux, C. (1993) Alternatives to the Median Absolute Deviation", Journal of the American Statistical Association, Vol. 88, 1273-1283. This software may be used and copied freely for scientific and/or non-commercial purposes, provided reference is made to the above mentioned paper. where as Martin Maechler got explicit permission from P.Rousseeuw to licence it under the GNU Public Licence. ---------------------------------------------------------------------------- robustbase/inst/doc/0000755000176200001440000000000014124272532014147 5ustar liggesusersrobustbase/inst/doc/fastMcd-kmini.pdf0000644000176200001440000020614014124272533017334 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 4371 /Filter /FlateDecode /N 78 /First 646 >> stream x[is6 |] nJؖ,Yrl9TX$P83_O$njD~]sOlHƙb6aIaJIf%ؔ9ffrW`+L4aB184IL:E)2-,.Lqb+`F[l53OYDU)'̚4qf6e O5S%Z<% %sS95LR S K-})Ng N2! `Fb(ر[$kD =eeq!6&ebo (+i34e f`\C9F>bnUh aǂAxЎOP6"I (:FP6s@3gI@٤\eˉl;ʖ`H <\m &c*D"x'|i1q>怣o.С;~`g?Y\elF/vlk|9+ܚ0&OfhM }?f{ta(Hak1X.gӬ87jMhLJosp k?%(QA6 ! BcjiM¼)?wwXh/m3Ca JvuTzɭ/?134+LbJM0`$މwy|Y|gq6=ͯ*q-9 w~}?L6a({0} 0RM-7 KFxC~LWhEz+{ t¿.B_O> |I d2 ЛY|MX>gSme&DCs:F" ɾ^zs>mCt!@ 7T'~Y-1՞Owʄ'y.ܮ;.5pmGCMmMI =pz$-D&o"sV/c?q|QHӀulUCWEyd/ۚTl5Cd@ڸTs`;{奈7q}X@X'R6x̩` Bh4'P,6\9'; !%b:K#' SwR@I!McMRf:R왗_ k/Wa|;OK؅}e)l9g_a4Ƴie:H8De mIo,1J7T o%aj>;y'~4|M`׶Ñ>,؟ 9X '/_?']s-+BIޓFgN 5`k[y}VS8'+Hm)=v)²CH lRIi4 4WH]sUė ody+uWiQ1Pp6Mm BZU¥ ԡm$srzzAKtd ' Uva] Y:yrÄ\ 3G2eU&uӍQۨDguEm:~ovtUڶqTGͪX+Nמw8M}wInr$ E^ λ^.۔wjH'ʝm[0zrX\;[㟮UF,_bq9_|<=u۳<+J_ې^TeoâGSA6✒+ϧQa-e!_Y+Lh;u2ZɲOBCźiHi(S\tX"F^5)r*Ld^1'ny lhJ 26@I5.;M'"J%B' **%"z]:e%$2i*F|/I_N9 M)Z,2Uک- / )%-)5-)-սHYݫ({վfPX TUB-po?y}ɫP4C1gy(vhep8*_nx2L,,xd:m`$t?haLZA/@&<r6͹981:]0[z.A҂ Z[QԝRp)Q5WWP:$X$N S(a]*W>2d˵LkRx> h+ H[h&j1 Ɇ۠lMꑜ{E9Ut{l6 +1QeeY.=hf)J$Np~ߵSߩ&A =ЂDG6ߧS/u45w4nFblhM*ȉ}G/f0Ob^?9|e0_2~mR^km'l̡ׅ2tt܃+:M 7a\ei3I$|YT$^mU'n_EVP*xcGi!BbĆj5uM^ϧXG kD(b"}c f#q|Vђ :!( (֮_BfY kxf:\7Z{ZU8]k Tk6KAi}ê@ԷdTff. SITu*s5}$k>S(Bw> stream GPL Ghostscript 9.53.3 2021-09-27T09:40:11+02:00 2021-09-27T09:40:11+02:00 LaTeX with hyperref package endstream endobj 81 0 obj << /Filter /FlateDecode /Length 6607 >> stream x=]$qvk!z6?$ˀ#Y\[s{'άnO =U$&{NgAi" wgl3n.6bm/Vg/qngr/vz;ngxOumw$=gh>o4 O5U)iakCVXf=0.aboby|+`R!yxWo3uP/#2H?brk-6r-0~>}Dzej9,|(f'M/gW_OyƤ3:Na's*zUr Z.G:H4L|wnWںW'(; zƹ(Q֋N-y>K'91ӮHV WT&lŎ[k6;`'Q5,qmÖk \WV KK]=^8*bq$χmBȁNE>xq4%vW%Ҡ8$V6Ly`Rs;@<鍍x[kFRVwߧa&F$w^#qy1j91hf^Epqg d}~$oIzoZ"ۿIjqqK:$6` p7 $Xk%Yu@n@$ / B"D]ҟV x<zh`JZlR۞; &.p9(:QLt<>DH++ ta8NUT%|&&1 0qkmqׄ_6S0I Eͅ,R(5v$f*?u6 SA5!w 7eaAZ':pP<ǔx# 2ő22XzO> DP㣀q f2W%uD)Em/;ۊ^ KdXIFK LdYN ^*!hy(:Aca٥cD# Oc qrΟ'cT ؂194i0|r]6OZ"7gIߵAl#c`KI}=#y?><X+1"C$7C X]VKjxZZ?N;v"BɸȨq|ƏvWA~:*ǀ 8/&wU$_=qW9d3 'W@v Vmzd}@g1kxuh|j4*z3fh4 RLhxe_̐R[\a/t:27"wLsQiWtסޚf&':i8oѵN)⃴ /KXbo&%Mm14P!_T(+bYMߡ/0ZR#{|c`̹Q A7Ե6嘎↷ۈ3< h7ߺS_|+Ѳ[_V(eaD4|#~՜vD0}`?cޓӡˋs! <2<\KA B|sȷ)+Ce9+EdQa=] o)bD9YUUn}/)B]b b,JۚYv{q!Df(@P'lraTrU^vטnIA6)r6^FR(P梂GGm)O9L_\<\)y-̚ q~!}j փ |2ʌy8A;=2ÿGכz+63 QAI$ouP"|4̀p`: df#3A1CY~.IO8K&&bJ遉&#1LRHL*LNxDncȣKB)2dkĐSjמHVcD/Q)1!uzEC2SڎQFẊ Q7)N,MR ^ipkt|SRT]vF;1V(@QL$Š.O->AFe1ΗuО>7(7*R) Y+ݍv].0Ecu#as/:nMŢ2QLe`i N/ @xl L#qO+Ð4F0M&`A:$jb_ Ѡ9+r,UiʗjBXITFc6~ͪYE0^\`hb)Y`5I\vRx6-Ҙ$ig' `zfNf!X^ 3 ؗa|[|1 ~f=|&&/ʹ})WSϐ6b~)4"߻Lk?̓CdE |H>(v{0$'&%ZłY<7Aux+k^XW(eM,lvd -Sfh'4=/JP|[ÀUwQqd8NZ=ҽr^"sTm/6މ"EZ氍'g1~>2ceWOМn%;q:)Yxyk6`Ҕ6vA'·h YkJ* [e1ys#3>%1ez~&pI!2UU+E^b8Oc߹ zٖ2= ђ˚4IO䇨`ǦB)PFsUS8ݓrydXl&hTwmQϔ~֛y ᓆ2,j&ӏNN A2;?IlS3)7=ug2HѨ (M / =uI}fqέ;$|U? B)xZb9 gK@U*_(k>=kbAP7'kS 2Î90iSѼ) T7As݇yoJLe;**r$YQ%gY Ca|8FVEH-ړ!OFm8Q9 ߗpKLҩfX7v#ǭ0G>Žoyk l4wEñ[T,=*Rl/^l18.н!dSDqTMՃp O 5;W0*^ؖoU)Q["t 8R_!b>5آUK=đ +FRL %@nߵj5j>SRfv RaCPf'O~nzܯ7Ƙ$*T`=eMmg4l'`4֋u7 -0E6;a+**qa+]-[&[6/gZf'45XxWBY*r$FK]]ڊ>+'[TT*^7%ョQv%2/3]l+h\ U YG,P@VѺG|"SEV@Xg)P_EWSR|Ne`Leɐ%ubwjBCZڔW,y0)T6!Lo3 Az\,nk8G|\Gɓytz/<:Bj.l^`͏>FJm]<{"oĈtC=-+KVy.CO򆰆:D1g.㱧 abv'9Ɨ _[GRZ .9'&iŕkiSrNh-)<˱|PKO(L Fɰ#!HoP6V [*s >f{{|zVwB+|}"6޸Qˤ= 0'(W l}?qa`Lso__z2jo}<e\n86@6]Z 1fE_k j:/nvNLEj+˻m G=)"FaԲd8FIyzx-+Uk<."^H+& jtbn0`1PV7KB1[ C /R)(h˯VO\r6, C?4 4 4c3]=EͫXW0"b0G1hx i#4 ~w\U6'L?x|H [POG_l>HԼ; q*(kAp~ Qq2zs :endstream endobj 82 0 obj << /Filter /FlateDecode /Length 5223 >> stream x\Ko$q`@ڐn|kTm~^V$h kE2ɦq̪*69L*׍F͑\HG}飈rsQ"7YlNo^ul/7&1 hQwSNq{k70+ygi=vѤ0HJaG c4"Ź;?l!uc7ݤ@a'1=ӌ=5IOOޓ9ӒT~[C$F Jd[tKnElp ҙ¨j82:(KHL_}ee6j~Ffpf|?3ӇicζeVG}v"*w&oBxô&)& 3&s :^5fyAd,s'd {[Ij 80L_la4.u}Ծai* >W%H^Gӽ&l!{4vwGB] }zH& Q(,6ߦcn{ Rvg6Bxﺯ,Ӳl 3 $ z8Z0SG/>PnL I Lov1͑2Xďol}0}J^K v϶O% !iCPc{oZ:LI ٩ p!ZH{c@[oN4V4VQ$t0l;7ؠRDZr2F> L;{bHͷB C[syKykuprJ+B?XjqCE{ #BG0E CRWo#4K 3l'ja1M:*{' ( -.ۤ/\j`+xJzzK?<B'-( ?< 9Qd{2bSVY B)#Ҳ54B^5=v-o|NpJ|/0m)VŠjė=tc9d#(oq1!M*εq{v'%^wW3[G8& z:`gxހ0>'- @,јҥȐo!uf]9#lF ʇUZ~W@Y&'UR|D`CJhƮWApwEp~2)Ì H|tЇs.hG8(LiNq>=(f\rȞ,5 K$)w$2h=z c XL2j1In)rp;j!#bl8i0jӅgۼ.4>b˂Qz0:x+DSJS3T)92B<"ΚG35|M9J43+J8e+6h0&$PjAw+AlQ*k76GNvUBn U?JSBFO]Y>Ğg.82R9 p0 ڥ! G!iSm Or4sG+B@IZ@Kb~*Z#h#7-Lh3$JHvl}4yaJp}`QDؔ$ 涩/-e,2 ~`=rfۏ_PJWETԉ6X ,>Gh5闹+&Rxj iO /r)Z?)_RԲJxBm|M,:BASƦg{> ޏ]֋]./SANȧ2~uzj 9Aa@-enUz3gonzk+5U*P^WELۼ,|ab^i(dg*zb=i3~/2\fmIp yp}s؎:~hC]\*{Z>}xX4T,؍Lٷ! RG _,^OwRhuBf|glx'EzUN=,!lOrt~ [W/)x*=?K@*>Nʆ1l6_HWqˡ~480,h2cds PFvX'ǔEd2&K-Ky3 q(PӜrSÞxF+Pgg_cℕϒ<ɏi D&a}s'0OtySd^ڔG7,$$(aX]#i3ƙ-8f@x$dά 3QOV/t/<- B@ ۑO Zt@~X:ZU% . F^(Zd x?񎎗# @NSiϯv҉AuDm*?/F ܂Ey/|强`KZD//R}{r<:W$J:hV-10X4\ez:HU~> O JyðNKjH4 /cNPPM9vj:CCP7lm)VdxRv\C G:AV? ^ϝ!m\|"*. ?3gLFj> ʻٍWh2Y0,TC&2v;H' XkeD) ϧaR@nbb:kbxN .0pU-ӽBgWZgwb r6`•ٝ `\jqrP؀z-|{j;8_7endstream endobj 83 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2551 >> stream xViPgf`$ډDK( hbPx 侦{<@GD#1ukb.ķǏTmk+yy?rthVx >yKii4QK N ߚ13W\.?{mY}\DXN实3gٳDk"TpMtN~RҪ#4:p.fWRRgpt6.l]*@KԄtw5}z&N m(mwQ_b]Bbp{&,<"*}:E-VPS*jHS5{BʗZDyR)?j z򧴔35z^frvP7t琩@qE''IC3;<9FdHR)$ۇ} %i2MA6fY~QW'/~vldo]Ve r)$_p&42ʃ &6.A 4AhH 72E2p+SI3ώ3C0JZL'$jGY!g9Ty$(榒d`L:IH\YĖ烞'ɪ, 2t Lvay%Nze6Q%@wIҫC+l2/o<~FǞKs#ŁA5D8kRp/3A+NԞqÙ T6c{7W/nnZw07B@qA)lZyVUM6n:<9VGYZ@G9SȃX35c}YJt9N 0O<>86mm`J^M TR$O4m yY^C%x&<: ɳt^DT>naM]k=Y^נ%$u ƆZk#OJY3'˞ȖhCS0$%3To'Bw{o +,tB Du!TPc?oc><&ǥ S~ƫ z~QO8[J\?"Vd*~ZM\AmϜ?+k&`:t%c)EƬ_'D l67Ç1TT#߲)ia%j˪-JTF#ڑȑ}U#GQFendstream endobj 84 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2035 >> stream xU{PevUD Z='4*h4Xp<,ܝ*46S鴳3;3;y-Ix{$I£c=?!9^\ 6O\bZغso/̙t}Ӑt:A<`BHKWK$/Z2@4(hd}L,KtYT-dJ*3d<ɂtZ:0P.fe/Q, h3XYL#K%1,d{"KQThEL%'URNgl* ^ 6[D<Fm"x&b1xA$ V& 4RCzzuR & |L>C2Q?Ckϙ:wc%1)~ ҳ\ X1~-s }v҉)m 7\>P ͍Kcĸ;FV~|>G(TWh'*0b_`rYB]17fuq7%X@y}sG9I`^xʆ7d,ZN8`cχBܫ1R/%#w6[ J׮Q"?tl8:\.MOZ~|~>l%OBGI]GY);--Ξ489>L78nc4;f&gxH?}TBφ";r?`'ć;/b Rawv>%"߁|kkph(0px`bVGh88Xp{|$| #rvEQ*7ZN+!!`_>3F%\YXY`nBz]Q(\3p_7ԝx}Ɓ" \,bg[Ɏk7x*-{O_-Bҋ[uieۻrZ t ,tA6[3?#3#JiStHDQ(ere3yfc1a!e?0.rܞv=@~9!27/ ]bS뀩*3i3T~xO̻NYZ$H控?x+b~cxʬ]jxKFeBi<(.7>$iLO?C^WԽc\job}J  (K='J<^l1%=)Sۣ_珟=vlkMS(yVUҘU99*-i7c8ކ RWzv0+E٫h:UU+*ʡnڻ9=No0)*2ͱzf[ɞT3Bqw,<,4mdpNETQ4` Z{#}λ/`iEde4r7GNIP[7B(l0&{Epӂtʵl7Am̨N3,gEgpL>q3ec?|{";yHQ(Ո tqEBgѢvǕmN%B- f6,X4Tښr-e`4-/|4 ̣;n> stream xV TSg>!!;ڣ9Gj%S[EQThQI(AI6A) FgG[,ǖY֩'t{vuo箮a%aJfGI$7g"WwU0N.RҚc?tz?)I N&=/ '`KT"&jR4QIJs^ܼb"4ձuRTD:I nD$*]ߎJJ{k”ڄwWh##tJ?mlr:&B9肱_mL\rRD2@KQ˲XĕI;"kcyXu.E͠ޣYl*Cm\Z@*ʟVS:JKM&Q/S)Gi(ʑ!Q{ v%}"9dM/U!iOABZ6{ѩzx~Ha,fVKmVo6~_"$ /|?5/ڢBH&U%o1ڠ>FfZdmUI8y*䗉*y@'|ͣir_Hg!d\ah% %R^d;G|j8ɒ\{}yrOE7?#xjh2p)] .w %$ZQ %s&L u[#y%u 'N;V[ ¼ !,dY0W0".BqԚmd$[tvU2p&dMv0KF:gdg^C6VQ=93CWn=< ǫrcNT&^a}"Ep0 I{>#Mt ud>N < a +Bx/,(%=0T/AyFr2¢yEC;"!<_J([ p"Dw@iGZ.PxBӻ!깖SQapTxsa>KSrM{UpPZ"g5%9#%"LC"*y^ `:xe̦A )jX>?G@'xR{F7~Q3w}b 0PyWx|^8)fq ZC:hI<9tP1Fq3? 3>ֱ?auK6r&~?$s5p%ee: L/=S,NaQfGZ99V/#"S'c-%ߌaithFaK%8x;S[L=uyӪ8sEs7›NE#HݐIs}''/7S!hdd}+uׇa_!>N!U-Ig%:J(t`BvU[8bS/ ef@Jk68se sh28;)ϿМ3S!8%y1=}8XWl0S $ۣ L@%bh`bG߷,kr_[ۘEj| ]=]G,DC rt^1ۗFL/N mo?5$3]΃g`~€@ȗzݦLqtK>9Xp2*2Ve~7;c3,6a_"z6C|[|]yCq*(_-(ʖ6vMB=?䐹hЗq9icw4tל[Ff/S$dk'.Չ]'jlovgUmJkE4g2[Z֙R;ժ0] k $^Ίʰ> ;5 "AոÔWZ|G~[B5Ҵ^gR "ȃB& E) A)Eo_3 h[N :Tg:s0(4ngxds䙶5s3׺*5(d͑ZK״8X$ό8mVwz$2kh6ıdWЩPTdUENw'toM͵%k]ŽoODp$,qa"C | GUgAAtEd-U@^'+qaZP ̾j}\+ d LAd.GYr=?$ge;U‰ZU"Zj9ZN[Q2sy;9tU89i|endstream endobj 86 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7535 >> stream x xSնOHsd(x<)2#eQQ̔йMIVMڒ@P(2#Ȥ({I[~w}#~kG@hΓ,r a}ׄ^XBޙf5pޜv ͮaon B˔@5?,<.2?i'3w^N+<}B<ɛ`'083}뭘!Q"3)& iOOoaN+=B|h}.tZAQԲ/ raԢ%;x,Zgj?.낂7l'O:3f=Fώ^VQ#Hj zrFQzj KmQyxj35B-&R[$jL-PSTj5ZNS+wT5¨ ʞRRb^bH Rèd('@%+G-'Ql(`L> B m"!`0_җ}#m}De`o_r g+^Yv<^Mܡz8.rtupoȆ!AC:0kk&/W #q|Spzx[/w$+:6%:AC\!zr6D g&a6y%W1tzСlP?Lo4BFr E :'4rr qp.n֞ݰڔ%q*Hʓ52_Rf޶MjG}!sPzM5hjDսb/9]~-g7B$v2ȅ;`nNԭy=l#e-M+g hIn=,Q$߇E*QΡEްlt,fjlOjnk`]N_sS>)8X8^mV? b !/YTG8`VM'ԁ6 Mo8M(J)^R ȓeeAJ:yoQ؁7xi~,~ٳ4iŅ>G.I҃t`Ë:g5to {#Gf"FZ<9ߜXkx"I,ط`,]%&|7ctnr˟xѿ?!}&ي%{~o(f^_ŃкPhQ0&JN wf{FߺtmDݧ̨q 8(Dîv^"HQ$=;Uw~\Ĉ*#Edc9g!N V&@r,a0$:՛v~+=5Xb5~O=vMvث`=WY,AI*O* PŤ@O4Ts+oqvQ'bf6s0 k3ٮ7&U8㡤ITw { 8r{ /BcR9b UQ1{m` Y7`>kt_rUv*0ɐ't̴j2m72_2Z܎m8-l)҄ԯY x%~OMSZ8"\7䊃̈FPr !*ˏd3GT3Bd1үq=>JiPw w"rMXw1<[ܧflA=!6kp5fb.)>0V$=N(~/ D?H ɪ(}`P.HAm$A~k3Fٚ@ƣ~KuѼYbg$4u2vo=m=!Z]Lfafe&2l&a95r=pL52=?,ⓄTt-wV͘he*`&^mNEU ʒ jjM{+D墌pRhKNC$!7IY%]]HTQ(5D6٠1w#4}k"8u45¤Is/$|._MiG 0|7?@?)fUZx[y\d1KJ̒B(ibjV{9(3'Ѩ!X=#{6) B{jrear($/vtt`ٽ+~+C8_r?hrsJB?l#vF!3ěQtA}ەsI{9|f{zx &R. :!W`_[uI.TW3g6nSKj5sd/-wt9yqilrjg2;(z-M?jfT]H~:GP;Ek@轪hH |w8 O#M+Ho@T`waGv~K˶.W ^ܽPZnE2DAA- 2?sBjr|,Y?:BYYκBD;UVpkV+wc0mҘ&6eM7H%POjC$/--1B;5 {T;{ D3B4=€|za+-kAzdvF|(m󥡽H5G,"ylwM#HU"O@]TG5p#j|Lڵ Un;}׃DB~< y)]G[шWۍfUyl1Zl$_B,~hi_穐@2QSgUb0_+?&Ҁ: 41a>4MbbtB$æp (ppibibHhj''9ʱEH9AuA+bϡkzE2pX)0mF)!lA.1%"fk\Ղ6pC׀4(!1)UUp!F66Gzp7#v FQ8mnZo`,<v4RMݢ3SV p-SݍH>%݊9+ t:~F%vNfӊUX2ЩT bDVe] RԪ<`J!Jk)Qj2aƿ[O5\5\3ؓ1/lUt6(Ecnw,jqc21oă$DsC%\ow 5iJy vF|gX])5uN7 ?v,ڎǑIo)^'ؓD< -DK84qk ;b pįMKbNZk"z; @6g|17~_[e\JV\2D21e a8BHd&t2CO(c,ꏨO~ ~Ma@,H"O س=yA}]Ge4rS_e(a“ZuCQ_\]0*:\)IO"BbYȆ ݞVE0ΝҒY-i՜SI**YV Rªꊔw衕3q >ĸ@gĝcVm/fˑMIG6qJR/C:({H79zpw~cd/ExtqI˿ᣬ^zlYK0}-~o˕ j8niӖeFCIW+Z x2cbsu5zr|A >G-rPi2s e_8UHFoAPE2^InUB$CLR ΁LhS@YxV>\*{Tc#d-Hl: ;M 9^V]>GJA%]!W؅K\ 0XH|+lûe>(C^l*@SJ:ӥN7•twno77~1>|%|18ľ= QzJHfa"8pl'dlyLV.75kN 8`I\<6!;vu:hӴ%@ⓗ]f9ݮ?;zt`/:{oa*b!IPbb 6ÍNU "DIAU)soއ{͖>FdO j4OKP?(R*!i(lE?h穔AhJ.񺶹qMXa?şo$az1\o$5X]͞j o.//4[Z,lD.'b2rr]C4gU'ER*$\CN%P>kʒZv;υhQ5^!5t:{n[B~;}?VωA 3g{0'۹uw/c~w r5ዬ76}/;c8gU~ `U.tٰ ΋dǯ qJ+HZu~I;_ak@ͭX6̖:U&h-ř ]l{n\Ί V_$_$X,{=lrͺ+hN'H|a۹oMv9 ڊVS?] *Wٱ<6\B@er?iQG&̐ Č%Z] SjOm{g?,ڇ7kO'n97gh7 ]!3:[ LGRwIE*]̀tU RqL~͖Ec𰅡T|/i4`7 Jrh  ~psn!fރ q;N\nIu8i=M>1ⷒ&),7TWDhfBKVQr|[(aX~=Ƣ~3`j?muƌPP)A)I`[>]$fR!=Aq^fsE-n@Ǖja3#~*#7bk%ՊIc&䟱mڹF"1?%d|n߈K ,/G;쩨{SRWZKxGV@RVP+$)) kZ ІW/ˬ_`/}p.n54ƗԴ \$w9;<  ĈGHݷdxXȉ輦 /!䔼,C%sU&[as_Ȫm"]7~Pȇ}Y"lʒ*&R= h N^O/,a&H@,7H>~,O3}~UGGч-M緱vf .\H-CY)6 y]c !rJYpO'^!RBiqItX84YiqE\OsQE죻B~:L^5rYɅ331%7 ,lP],9Urudm 43XaZzފ-Y5Q|wuZ8.C %B ]B =4erʴ(iqBe&c'5+PhQA=iss6u_2NEendstream endobj 87 0 obj << /Filter /FlateDecode /Length 278 >> stream x]n0 U#DӁ &Ua[{6jͭ+64lBOl@O-V M2m4 *6 T}%u%\|.f{}YV;Ov9Pgendstream endobj 88 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2493 >> stream xVkte[t%Jwa&.Uwu]PhJmJ[$4M/5I'ysOK&MҦizJEA,EDY*({vNwNS1gf33=6{b6dZ9}*KVb"v9m&5)y6,. lȘݻp֟{5IRX,AYT- GZfR+Wn䋅ż*nO*WMwXȗʹHOX!ɖ*%E]ʕ n>_KUIx| 3͢R#*ያxZ"U0쾍Uu%aYO=susaدb,ہ`b l+3lva$l> 6 v&gײL٘^j~4q-3N4=%AeNj g,J?_TD):\g;(-=TXlB1iIY.NU4k}BP㕻(Yd*Wa*TW}jP#F&S ACVhujt*Xb&α͡uRr :3n }la3Y'?1WiYnz~~R#5*AF9[5@jv^rE\!2pol^{П~%^PW7rWd cC݊ZJEfNJ/8+-6Guy-Sdth2=QW<& |{d3Fj̃s ~2Z2͖QB띹EOv)I;&0Τ6Q&p̶?(/<Nf&GK&39Bp m-N2 3ۼIٚ A]Gpp{C9PRFYK# ہ_fRԓTG469DetO SV x fAKՉ2֏l@ K3 9Q6 *%P7똚ޗ_(-# xO@i(M6ػ7/ "Щeb$K:F ;JB|*t 2w.t U?_JsɆt Zo- =xR\S^x\v%LkC@q^7}OF aNx'sLЪh\1FmZ5AɶjpUtds{釖(x AC޳`Ɠy24QD W'8z9^|AxAөмkdtQϠ('әG#wJ75j*#^2AKYҠوg3 !lwW@4>xO$f^  ;B gG6lSQXJdxbt'S8viuuMu> 28c3hA+.XƧhf fn#N?jk[ds>oدQA-w@p=>ֲm/g2ܭyO$ MT.; (H+2_C8t?Vs^G[7vUZ\U{xKN~EL̲xKvL'uuf43#3om.PkSdZ(Ain#oFۇCDQle)C~kNZ`V85;;`"uf#͆=ɬvB{DOo&b~#z:ц;,b"pIL,{VW+a1L&M&cF2RbvEZ[0p6z$'&TUI$UUI,ĈE[:dN{dTCgi)JK (dCFg44*&drşyb:U1+νލo)> stream x{lSǯq0k*Vɽ]'!Bf&!a@7G웇+c{_8ΫN@4Y VmǠi;IVJT0M:i{ӑ|~wDX L$IJ^}vqa_,6OVcrqUS'R(CaAvêGFa=jx+4.T3)ŦwܱYm;%j^Q)FZI/M*ʨشt/l޾E6l^zzjTT *}Z (WU%-K{VkTzEJQ5*1 [ѕ565ŵVUb[W}~5lȍ͗`E5' W>R+t>̓";91E|/L.Εۀj$5`ūh/RpQv°SyJd}219Vu#/Qiٺjh=e:m a wzo]|0o5:$0- fhJ9D,"|X Kpx{_ d7XG=ttE\2 ~ߧ0i&ӅO[v PY䨑kQz8vR!> y/7{!` 3g蝼|)3t бZ. ?ȉodѷgby2+IA&u.zp{ a {LXIŠhK^V΀#J!㛃`".9&< /#zL8 F"[ظAu#Hg縬'?| N3dAޠ'_ .?ĺ/CA8]V7$OU@d+d|F|{Ӂ[ɾ7]ͤb6`:Y!M&4/7^ٿC_ ߞOm/,{-͗F{z.-NDWXS\aa! #s|endstream endobj 90 0 obj << /Filter /FlateDecode /Length 260 >> stream x]An0E7$R4dE=D^XYY<_O{/9}K)JQ)7LLa1=͗=Up_wg;RX"݋T}Rs 4 ᆝFVǗD{TX{*;=*]=E#*XAVTUPT哕Pa%ցt֋vʋ UGTX\B՟#FS߿W"-4fendstream endobj 91 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1916 >> stream xUkpZfD.1vÄC B L, ɒd6zX֞zX8aDRB($!i igڒbNNt'{=;qηw1^v0{JVM+(kYqekV>/gWT0sΚLj梿Ͼb8~?ꟛt5z(]"_*]zKˤ+VK7T+*L+!3V)eFA#ݣz5UFcO/7ͅjCN\tԬ2VI_QzBE5Jwʪ ̲IW]SkT;t VG4EqNCuuc2RqiLܢ+EBk( ˠVid'յJN6ey<-;J%o<ܕx<= 4FbQ1/֊0?yl(Y?̟ʶguYq|n %L0)W#*;OIF:4Գx qzH@s<@W=)`_.ui;U8W^ƽ7 Iग़QΟ~|]bhNr._g"f/9"P.:5Rq=n:pZ"L(J0t#]͹ɮ=ƲEb@GBOOyz.'(8$kuJrUsjvT2$ڏ~G͗(I fNHiձ=/]5eRoPz{chH:|M*y}"Zoj /2-,c*UC(|#ĩo^!;X 7bN*ܵd3rN V`K eŅ @j)voPXK<'PF!SLۦ }->׈'b*ag@~esd$!'q.PaO/ ͶzіPf/0T6,ҦfkkT.m@Ð:f?yƾڨ,݄0A SAh$atKQ^&NjQLL\/>u35z9"G͟/OB5KlRo۽e/j468Iv¡ݹ"{hoEҐ";!.`$?P;ۨ<^zHAi{k|?o];}x(9AzZ vCsۀ(㕴~vw`xUU- tgJ6e[s鿴oG#CT`W:v]ŮB܊IʧE8XC z_^bBVhNrĩ+lGp|'s N>D( ]$E,Ɨ2_PH}!uٛ:/lݻCٛo9,*R QZEY܄ˇG8veݛ*Ƙ(:QOC;;h?Շ.<鿇E'`jk@u =Q!b4i1蔠M }u80$H<9]spYDvendstream endobj 92 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 513 >> stream xcd`ab`ddd v541~H3a!3#,lP+nn?' ~-Ș_Z_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*@\\KsR~a`ad3{\e݋u}\}sپkZ8eZE;uRS|䨳ū[FdÍ߹9VK9ʪyݓLh\]U9{e`Tu7O#=btoz {˖w9j^*C׊.^s֌\Es*~}NtS8~wO^<;[]uwyw ǟݍ+9>_-:grzggvςC|Oylo-b>s e`)m`endstream endobj 93 0 obj << /Filter /FlateDecode /Length 181 >> stream x]OA  ~@KMԤR/=h KA ,Xc<&;;ˆ4:(FNGX fH˩*}X)aY+0_x(zװ J7FNv0%Lhi' it, j\*_Ԙ:>|@ oi \?endstream endobj 94 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 652 >> stream xOkpMu˴v^(m:/DD)l"蓥K45+-YdnCe:AKa:WA_J`6||sPAQt=;汚RPbj3t/m{ 鍞9< 鳈E0/P,|ħgfnLW\>Ig= =,Ao`m?ÄnMM,;!ÓAzsg#X)! $ d(4>\ hu ZwI q9 A>!  F+ҏĐh*BK K+d)\% B*e>K*lX).s@x|L$3 mMT8,IߐҨXZ+J)BA>;t_v?1C)d-ڊѯ`;ǽ:K4pR^@`ZbEǣpe )I$$1emk58mPǒ5ͩYdu%.pi;Նȭh-Q/eAsǴFWrK>o(vaO&~piP0JY\q]8r I + \͂,@9ݻ`Tti2sA"Nendstream endobj 95 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 860 >> stream x]RQL[U>koY\a !P)DlacݵuT'òPV N]@LMN_K@hbeYbz$񔉊';H0!녣2tc*3%)TmUWuPڅt=kmCȎ:x>ǟ/ކN5^.RskkKn(_.S[t=D=pyo"~'VŠ6^TtPN5MRO*ZԀj^oȣ)T _JYM_[^! ׅ pgpU9#Bqt &ica$Y$G1\Y4ub{a',zc\oX1̈́:D"}ҏho_)} vLO4M34~ZV,LYX̹9*NLљM-FVp%'1=FcxOSӻh#}2V{^aҡgvb4Ah61V˰BXt"ٚM1X]L{u86 S+l47IջT|25M7CLgۇ^N"V,oh*95sy֒ΉkV=tT*Kt?{GtEbYQf E6B,+*!3Z`2eJ3|Np}[]yr,)rc@GOW&VGdἵXQZ]!DUD/usendstream endobj 96 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4200 >> stream xW XWמ,"ĩuֺo/ZUP DB`YXW\*.AkW[&{B_=&&foX44`)1A@h+$25aT;J/up19\t2i,*:W}@`GHư)SMw1s9s_x-.xXG%<Ob"6.fb)F,'V+ WM&܉yŠXB %a aKME {b4Csb1ǖ0%o}&L$&UŦf3Β+#" ]*nא2d~bE[6[>g*ԪP[Mv&h`c/\ ]A,=wN> E(%c^ PZVbZZJYU[bP[%kǘĠа:Tž +{ ش(D^E9U7vdK[穳}ONr?ƅ!5{ woY"Eq6XN=uYd-cv5MNI>td#b% )i:B'8z Pl=Ŧtl?O02ϡo?_qs!u\NСkCW0EzRj:ޚM|4w$vë(1Y³ԉ #w qo0 &~[ϥs l^ln\3ڐVcx`* x[\fO^|q gzHr+@g'lQMYj? J/m\Y7W90 ۳X{TD`օ1Xba#@ k.>+&'2Q5M'7Rd_ b :T @M6,\ Ps8 Ŵ,?&OZA:DkmkuXDbZ/2Jbi!9߿d }b;S8r׮`ǭwLzUFiQt ;I%9eLg٬F_dѯֆwMF`^Ra>Y^?ϿGV CHf~M(Ǚk7o=SѱL$3l_pOKJ9!ZTdE=ji@*}΂j0!d@+JțX~h& B΅OyPCi$mqIeAEը g=u .Qyޠ哈D ^b"_Nr3>mMf?pXq94%%Fea&#&YgeɐZ^=~RM*Ka!Ttc7cK6=A99\u^T ]ƊOz+.`D W<;>(Q;)(޹RH`A^׃:{>D s<s~!lEP;f(#c7/9O-8wt;.B2E@zF{Q+ EP(7M=D] QZ\RVL*/ꌨKxD0J{%9ժ:Di+nA>i-~{i,{I1zh wA}pftB@rxZ8/,0NTGUZ^($ڈʽ׀\D ̏//H-L)K-C{XW-rz#|X%jy w Ur] q@fpsP3IqgS`,~ue"KW:=h;C EJ<ڴ!Yө靖Ks--yϐ V\.%ۦ riA 'Ξ?z̧lݸ~6/vF uP։7?iNnmdeu{s wצATemYSeCGAl\i`7.s2\Ί16% >Hy߶+Nmj`|kf j9QJu:[rQ~:2=ѪV [_Qn"МmNǓϋp6!+ #k(TˤsTwق8B77:,anl5\icTD%D5642'3WUCNhӾA).|6d|]]]blخ3[K0]3RI^vGU*26tt`^>5ɋ*M3wذfGۆ2; ]yH;g?38N&]_,Dfmiݧs3xO@U}4hΘ6н>K~u‡O )3#3V Ӄ o"h:ArھWcԢ6v Q~vl2-2}goV@MȾUvjνzȇ ~9OtJ/*i53]tqBnem-%PY_̓ߛtaB#Zַm@J\9WtQEBT2&*U\Pܰ~g^h:EJgeGg! %J/>\x(ӻQX @FggnC^:9%-zT~GZӏnu qPlh]ƲEL-u Y?ZKࣳ=ڳvx{ {SgX-oY\d1--7 ֵS>aQlH4+ k}pJn8uk">&YE2,yo)<K`~B{7,KPx 8 ~O" !Y^S$^#a/q<oY=؄s^+g0vv ))A좬JTDu_:gW:q]o[Z]'8?»6T()v .\ FU( #u"9:ІrukM漢Z@ʡvq?uwoZͅ2X':Oݛ{[`y?FSFNz8 lǷ.:kEڥ]׬poxۦ78ho1jeqə*~i)HnwNͫjfp- rBeyχ)~޷%B;GLX};3+o)zvXcEiQ>*5x;%Y̽@S<(Ǽ4 Om(iZL4 XP 74~f(GeRJ:RR)1MdK}eE޵_&`x7rhb)/%H3[0 Ҋ i1endstream endobj 97 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 340 >> stream xcd`ab`dddw 641~H3a!O/VY~'٠L7|=<<<,k-={3#c~is~AeQfzFFcnjQfrbobIFjnb ZRaQRR`_^^[_nPYZZTWqr-(-I-ROI-c``` b`0f`bddS{='8[w%iKw8f|[Yw_/~7b"??Dlwo.y҅?|_8{!'nrq> stream x{PSW%\1E{h5I,NGݢR|@cRQyB mƀ <  Z-"EaPQwڎv?fN۝9s朙7! o/$IȘ+=pu.6L?>ೀ~킭i Bs)g= I*K"YT# L Z"D,,ldBJKJȔ$02"mʤ4)S( \#cK&(rBB$iLU#UI%QLFJ< ,JEV.#UIbRU&A2 i%a˖Gb="D,!$9ī/1Nxj)ܫVzOϏ|mV>7;IH1ceҡ{"sƑ81n&\3n@-ؤbA}EWJ-knݥ[/w5TSnLD8!\(R}EL`cEm;-"`n7@| /sFx) 'kM`#ʏu:~hwPx]!a+ץoPX fG\mj|9y8CE sh ȩw}sϬ".߈/el$/KA:xUvҎzEflMݮv~ͣpqDߐys'zH;C;؟igf32Wⲋp0Ѻd-sp/xJf mgweW04~sfVF(M)<m7}}]CfcQ>MVrA-P?V\~: &1N|5l`5q 9ӾNMX5EË1n<V`xݾu_\~4a (1: \Gpnߎ^<]]pְ‡my rO;YĤOrBC^ӆdri}8ˍp!=ns|Chtlze|CX&lYܩr]ƯaG`_זpb },bpc\8⡓Ͼ_>E?P z@OlYOdZEDҳEI!Oc0U<w;WIVoHi+wtSn̥ ǀOAfX`UeUu܇džcmfH44DX@KF3}j0R];<,QhB#/~t\ѫ@wň|,ՕWTA+F<o 3<TUAUa(o;"Bĉ )< لG,|pT)^ap‰f+|"endstream endobj 99 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2890 >> stream xVitSu!P+RD佊l*.R+XhA쾥iH-iٷ&i{C ).ЖdT89gftym8|H^{f [➜^?<+)J&Yݿ;#! <<(*Q+КV`۽? tUjo+j1[~XUP0d [tj0Tbm6Ւ'*)p-@1(;rydHJW\ bE9*d:/ߺxcgA:ZFV ,F o[;LDl?32O).7'mu濑{DLHq6im``qEҡ`d/࿸$<$&p W*R侏Vw/^ϰQ.zj_e92\j55gBx|u6^lUhf'}40+=Ϊsc1"+bLn0eY)M,9ϲDR(C-yOҩb<{]x+՝ V{{b ō1Î27w=e \KlTpʋxTvu|tnuyTrvD؂-ߌtjY䑄#>|pg{{#QG<Vu%QM.GCSJY!ze?G΃㿺5 \)/-XF1jճ Q^<Qv|}0՟&i/iڗթJôi0Qi{ cq)r]ٕFFiyNrL$uYds;C4CcSp d$[~ Hk#K:xЧdwCpdK̻l1 ~;Ի "ŬЀz]m@ 8,gm HZu%}dM-G.UT[4V:Aڤ3Qȝm3xS-[YcDf7"*囁3J'4vAtkBSUtUʒf׃%Ћ4zHDr$f{Ƴ[*XVP>/#U.G=oO5P DTz{;\Dk0weiʪs܃eOz [9|l׺ąb(KeMM= j WARl·旇49*Q,ȹ} rnv7T8AY^JOinsP&T:s Zbm c XϕG!/0Ac95Eh3?}0*G??NבW[].zF˹ݔKŪ~0{GKyryI~Tfi|ƌs@MD_Ta!VY/+n_H` 1&!fx vZ*<%Z)hi]Jm0vYoęb*H6/UIoրd0zGHd5ZVӮD=4YW-{LPԀa)kQ(K (2Le0Y 'NywtDn PjHn)$s@jM%-g@Ictk| V4A.ύ/Xz]B!Z\љ7-=K"ȉch` \+xjr+w y~;$%HM-$yS`4'cS&? Ѧ4):*bOb X!cBwLgH1)gI@KJ=M%7l~t=| ob>Co.k GDjB#˦V딛%*>Ѭ/?XFӪȔyBW4 jVB]ZSôj-Vj%޹`oۗg7oEi&BRI=>ߌ:zmkI}ʤM=[>GChZs+3D2$wbCmE;_[@̟/b^a̳endstream endobj 100 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3110 >> stream xVyTSW~1PDimuUnp+UD@1 1, YM @XHE[N"(.3ӪnV;}3/ N3ӓ}w^1p8VoX45!0yÌČ#)}o 6|1܈Q\=e$ˆ7F`\'Gn^$ȥYҞ:{i ^ YiթL0U> Nq͔$sLϟ*3Y,͘||\~,3n`@'H["^M NH,ҸtTa8X##Mݙ.ؕ[81~) SMŹ{~b, ۈmb+l)[^ŢaX46`#ǰQ=("!POl̎$s. 96/׆G EΊ鈼<$aHp脡UQ#ED};lڰ'he"Bd ٢ pko۝PÿK7wQfs;~Z+% q3[)k*MXNw;"}IQfBRs 2MB1( @4"./h8m#y+xҼqSE4Ʌ uV @a0 F§aĂr rFI-YGBn2hu&l= N޼aS|vQ 5QMEXyVZMmɁ8+C2Px=} VXw˂>y_DD?涣#T=p@{ q.hDc24+qA$ ,m([4YKL+fTh[tQ~>q@45}b{.2i2f,挆0a#· 󪘏ý Ѭ&s!Scz>}]?};:.S9G :J&ؑsOvv1rfm-ZPþCg&d?SY @nlc-XXe ೹q(ohmK%f#f~#j m?vrn|sfi(,NU_YkkpTsv ^/;߶Ţ!l"s!>~Z٨9; Rz^OtIn@fQK x&l,ZA6P^^)ij ۢ)Bw=gӸJ%'}NϬLG7t?NҐN.\ `.39M6ԉwҜ@^BH4r@~faw/~ovp`K 3#z('lr68F#_SBtϯFĴMy#mdJ̬ܩ{U#|;X 6L(J),Dfj |v/zLeޞ~%Z5& $3F8jgU}GNFvF0oaCL͢tp{kgȽ̬,@X'9ֶpYZΟz.Y.WPWkԃЫP̗{5n#&Ĕ8 P 3JyfQ*p | & yw᪻0s/NE&ݩ0%ځy `m//2(K,@Hh`A^jɃsXn5u2Ca |E|7?c碈W@}6٩og/"g(MJ=)ū8l.G;( rN!gA6*:}-yyxtRD0ߛ h'׼I+$Nl<޲&n\%U_YP6::eaI(o@D-Y&U ۛnBL$ 0`ƅ)gi\*9z-2@aop9xdfm>/ R1< K ;PUlD(+TҊ c{պ1i⤅ [}ohcmWՋf/܄Ø[+.r~%#~]uߧshM%ίp$w J "1Z/endstream endobj 101 0 obj << /Filter /FlateDecode /Length 2297 >> stream xYK9ϑkL8$;j^>_5Ş}(Fm|+[ik?ژ j_Pƈ.v|S6cܦt+ux}3:Uv. {c-[cy9f{9bc xvޱ sv9q}),Ƥշ yuö:OR/;\X n&W{Sa8>CZs s'_sᔏpNzvO0m,Bv. >!.5k1Tɹ֊ձ`wR&'\6F]`*l:W?WayC6CF"GmT%B>jJׯhRy ڢ6̯֙jԊq`e󹃕5i6w*` ?\Z\c %/M1mKDuMۀ5)iC&'C&RiklC(m% (RZZ招Ut~|] H A8j#%mW/O2x_tr vELB-PY`x&v1L; wE=&EѰ0p bY`!c!0wB>0 2LB 8%r2}1s_R'//OAZF0IXQP\$5y4PVQ!\#/XIft ߵwQ`j|TUvI=iiNZР!OXE!["¨̘GM\$W腘 G0R\@zr]g8wgjQ.^12EȾ9*~N:E]lgHW }**g=V@~>o]{ ր4AӚ7^H;3< [%(vT h;(om#5!2sOQ@[~Q}+v>>(X<>?͸ VUV 'fWS`r'b -R%CŲiy,9 aڇ`9>"0h2JE&{ hlH{_;YwbJ{.N7u<PuU=Α/C2Sq="Z+h @8<{p,.^V\rΓVcA`AOfb(4T,zh+hJ6HV{SϫT)ۘCt% ?#oc Y~c|Jy:M.߃<3{=.U[u~Gg Y_jsrK[}?ֲauŷ4mbm@@ߞn7]TһCک Jǎ(=x GweƏAZoNEC;9qC ޫkT ~tL^EpÇ&jE8l7PlqXԏ>hb1Reݪ 6}>7z1SZ#ǂdF&J/:LQ^zC="Sɶ} GOeea`=^KI͐Qo7ve,^C7SW5`Wl3!o.; ʣ"s: &w)3ܒ'&$kaR85~<l06/ymgC=(eH{wXᇟDҽh*eo2)@8Y`Uډ6~QD PvJj!`RfVY> ?TR[|;7sd0?cZܐU9T\n(GUpY8՜L$ʔ^wG2h7ez*xy;'> stream xmT}PSW{OĨķQ_mEha]Yt.#֢ui]"$GPjIKR ؂$#jv:qq;Uf[s^2>;g9wG62NM"Fy"NYwl"-a1qqabB`dEl,C4 c~}}Q^fuV]&ݰh\kTiqmj4;՟W ;%˰|ܗY[[aʰ;֧j˝VC⨱6abxtΌGf{ja(Z6BȬM\{mBȓd;I^ +H.B2I'V7'ɷ̟q#{PxȸLVp[bYwp.*MgkL빜yM H2J{.KHWo`>+8R! K{h%u'RIFVbdStD<#unx{|S!C5D!SW*,/Ɛpt zmZr4r2b ~K)):{c>7ѱn+ HN|eݥd 8{HI)e1;:>~$Э8 _| ;0t}J*]y،4 \9A_ \wogMU:@roO ](DtZCvY@ թ_tGE}Qgب~Rԛ,ҜdWp΀mtPj N 1/ԕ۬K@SJ!{4l_1=(6jCW۩ WG:VHb3Ԇ=zXيm( JtNY"np^-^5qVTٟD<ʒ.&Η*Ytdv' {LNS E5)fT9e$gǹ5%%qLx|/[LW4I]iT{?{BsusOjNؼA_-|>p%q}*j<qέlp/ է.TTu1{Pus58\5p\K>r N 9O\>oC"05_nx^WcW5A,[8jTkJ7Y*W5<2uTQxGsp.\~[+ANiQtÉ.rNI e)R0~fm}޳dJU[c#WFUMݽ/^ֿՑ3+):.vf;꠩t]X"px gh, kN3信&"I}Ne@sÍ0tzK(+tG)t-'…~sZ\D#ĩhhdF #pcH8<"Y>l?/ZNtO3''";SL_L1ԜlO,)IKOg)Cendstream endobj 103 0 obj << /Filter /FlateDecode /Length 7763 >> stream x]Ku'7]xuRӬwe!I3@,Z3֫G9_Uo|0ǯq7Q8:ory4O3ac?v<(:1Jo0Q_x ?OvTŎ9(hWJj?⇑Ƌas[b0D7 RQlJOwfw?HoqB<3[E9)gzۅ6$HSm`>Ld 3':3KFnn`kO Zrșq;Wݥ C HKbowqk-˟eF>\ 5|(>r>2Mm ^Bta1 BbP^ W6.vh; \C'S[XYpD`FL w![tvEx&|9OS*ʍa˿_^JGOڂ h-ɼȟy0ZZdhb3 ~o/!SK}Vv,3iRw2:g\k ЀdE+ )e9t<ϳ I謬"\)wLYqaKn\*A)UA w? Q2>$-X$#RGiyQs>j bMw tnN|&5UO(hK>֦]!YV dWpq%YhYΏ/ =% :b\rH*s= 4ߺŬ}73Y Pj{LhX"yU!ć\d#OX oskX .e3̒`Jn_}gC,t9!$I!CnL}Z._F&'b(c#$`\X8 Q@FP|ws$kGt^ιM:MKOvkx;! oN0ګ7< AjyLٍr-rEq"y*0_hr}-:pC+GyNaNw]h0ZՎktae4,0ff fsbDsa~fU12NC>٣W`ف('k1M|?Hyԕ8(Fz~J67O.~OF;x>pZ f\⿺BȕBHQeRУXPuGuAdKy=J=w1m9 *un,WbSB:ihR=U|S+^09ג8CF* ݔ"R*P1ED{ x2O];g>Dh֐$<Q yC$>a3# kLI.E1C9'KCLY'O-*Ѧ?GDNORVfA@O6Btލ)֧]eH^L30Lژm)%/boP9g-Qȟ*aOB[ZK҇BL^Ԓ 0T&+p͚*U(XK1_LXywpzA>Ӵ+O"erYY4D7.kĈ=-~$*'$hs)aI!q1⍻HӴɍƺLhiaӘWH?43tIƩ\h`P(|i'_4)# m,'="Lѧ)謥deKzrgjYMj0҆Yt~+ږ Q.j ĒtXT[94"~?IG Qq `6o<Č픑[> Ir+{B]JV$R·i퓛&'\GJt{h

wq}7Ѫ;,u.DTcY^ ¸ޔYVF#=h{-D.uK1@DׅuS+/,DV.u kn~9YNi[etbboUBYҀMYjSdw(-ovS~ F'v#yIlpeC"S9 BJ95$=X7u'0?qG,AVvoy$@׶PZ 5H*"k4R*%2(bTQp+$ Ÿ?GWFlW~ꮵ^U"^iUUFA ?N\J07u.Ϟak7lf䛄({-!&{ $L3 pwiS$a~ZB*$S`#UXRRgROXI*Ժ \2؁:u-)zq)NxM"sXyإMaÿUa0|AYǻBĎ;cv`<-pHTU[z}DOL[I/.<,_6a2-!8V"{6.ɴX4HsPaکRq6 ܖKKd˜长ft_;*K'9& Eɿ"_ ' oK֦ۃ$w{$u)@FԾ!I :*p_'^=!ʷcd#޼4G+>!.`ͳJMeJ~P^g!TWN!bnްA6bւbra>@ȃtp{A ^WH1I^ ߶Y 4AOyg=9*PY:4G;\E*:DV(1K Tͽ1ˡ˴?ޔ0 w˼-U4_59G @"+g_:O\JɒQt`Qg`*R.BdnT R6酦G ߖcq'n3S}+cM4F #5"oӫJ\XWQ)Kkb%p\ǭVb"O!xZ5 ZЬ-jU{EESo) R(J͂ 2RdǑAdTOJ̻DRv=zP$8=!;bW$V( ΡG[@Ʌ %0NIznF2}L- ~< bO.͞2{Z.nbo"^9p$IY,Ij(S}3e촘,20k+Ԧ"GsC'T#uL;9͸Xݳh r !vb`.'|RLﰚH)By'κ{-c4(|Y 3 z#o}I~lE4||Sa0EB_S!#\%\WB?Ehu;M cC=?B)R*,7V?`cIq!KoS1=ߢyIwoEtS1k?Y΄JNQc1戜R1US<(GGcx)˭ʱ.6~.6ڪ|)JޢџLdH]՝֥-rNEgOnf [$ۄg ?7--\^NǢfś֋w2B2LBkr&x,]~/w :VϸtTZ2~e6KxJE  ho^6큐/s p>ny? +h\+]'-6%Up. yLU Kk)厜|k3Ҋm{X7H?[;'v7<"fiД^h+j?v@ ӗ@,=]9?i")wg4_g*⎳}V/p'"AgM 5MuQֻs\W޾5 GmJ }w}ज़Qͪ2X||pCOQ$P[xS#A\E&EqnD69Dؒݍ\TҰʅS_u*8KHlէN ̷߮s$a\PYn=v~_mW4"mH,Pe$4d)9(FZޑ&͚N'`*S*b4>ܫVGq<յJ͆{o|/]wOsIUJʰզ_5TXF{y_$ kCФmQJL+(&(ygl`9^j-=$- =wErku޿[?k=jO5?X%c׾) cf Hq8\ (K3J&8DK3(nT"5d>ڜCendstream endobj 104 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 361 >> stream xcd`ab`dddu 21~H3a!]cO]VY~'٠Lnn/ }O=^19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMUSD8)槤1000002012lOՌ_}?ǮEt_wgNݿ|}e9 nv>}̃K^{g҅zժM벺9p=lo-b>s${sxxendstream endobj 105 0 obj << /Type /XRef /Length 121 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 106 /ID [] >> stream xcb&F~0 $8J?`f ((f$@"9|A$I)& }1XdOA$V)x,rY&A$3شr (?0 endstream endobj startxref 68309 %%EOF robustbase/inst/doc/psi_functions.pdf0000644000176200001440000114612214124272534017536 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3542 /Filter /FlateDecode /N 57 /First 456 >> stream x[[s6~_Ӊ@B;8vdc'l7AhtE*qxL9jwh ߹f$ D"M$QD""!HM!!)g~H(%$aLB2IBE 5B`%JBB!ZL•)5|$eI"lHG2 S$B8QREU 0bs90h!`_V>WĄ nQ2hx=B@oC\Cg(ptF xC_j0f@qBF Pf PfJBe̴A<+EOp2B/b4#4!Vl 'U$qiE8-?]8V1fػ *.<9f|Y>0 H$˼ %Gnj_9w=MT0@} (t5#ǎdViSـYr.2,ɋU:F\> [aŇ|k 7Mm@.NMox+%q5w䮟5KiNw|&APgt(/^݂&q6I)(͓+dzit|(6m2C=\ض3Lcja>(3<R ̸2EƱ *YN ^'KWpl cDkTcJt> t) 'GSzFszA?K+ҏtDNhL}.:`PgqJ: ҌlMtIs'lǴOVu[n^`|4vr^y<ڗi2)-ނ)pE#;vdU/~0)z[ϧܫ_O__AkoDo+K$n a Q, Wit 1?eXOdx`JSÁ5* QAlCg 7[Tk `b}2-t]q cxqXt~`gJŀDhߡc4qg1} sэ$ߜ]|<uD&"(["G[ !D"*ܚx,.~ӣg@D6OESju,BJ`Y hiHG"0Uw {e#K\?]2sL`ss8hALpT6Y}B_:-}g56h |/lkme,c5?oQU_YkWR7}۪sm[xW9?VYO涶/li=*^$<|W2F~AEA?21^RgA`l*L23>Yy0c4Xw+JUYiSIt*lJD4:c$@ÊϪgPթP_IAFGY5ònUS.+ᕛӆ\bئӀesYfЮnG~ (%GC](pwOI%1*~5O W2.)R֐@U@rkK{hS]vv#KdCw+o,}XޡxDe5mIz;_MwwT.AeFڞ;g({bgS"9x,kDFO԰g!?n@*N7z/ɦP<@x'I0}@pSkv]іi F(Ǘ-kz <?}tsSC7 ߆51 eEYר^_s?}eScG\I_ԥZ)ڰ,-'\<:/^M(0az#0Ɂsn]'gϜ]ڏ'Ii=@'i@C)_Č`e$T\I*tH$dg)TF؇) )7֚ ͯkwn⢭* 4KUzZz]tZ=J8/^A"7*^+XI2E^)J.jr7/ӿo%ղlm9oz[밦^PMk1zpvg;ktTV770D sAeS FcƈLEi+ ) O76qq}/n n/U }C[VzcR~FzZ]̓iwců87251u \.ZLjT=[ص礽&d<>ݴkhq-3z>/{0tlWvVqqkL%5ZŲK :[ۯN۵=/ȸ[2j'uW^<نfol^Kiv}IdcXfCP{E!D~p "~Iv( !n/az!D8pnο~aA;H|@" Ux,"0p < pCGh/O<M)@D}'qA.6 n">l&kÇ;I%Ea^!k+Y|T@>sVYMu8s܌I`d .O9zܿD6QR;|0<J湁DZMxhKust1sI=e1k`RO?n~=mnƅj&QE2HYojH\> stream GPL Ghostscript 9.53.3 2021-09-27T09:40:12+02:00 2021-09-27T09:40:12+02:00 LaTeX with hyperref package endstream endobj 60 0 obj << /Type /ObjStm /Length 2371 /Filter /FlateDecode /N 56 /First 453 >> stream xZn}?_1-r7(ߒvZil6QYr%99_ߵ̋,ӑSqH5k}#$)s)3L ɜe1"sq1`p1hIa!+&5fI#ɈyeJuǔ8zT#:(.$A;KF͂fFFFh&D jςgCDmnY)Q0,YQ1HرgDch[u,bAAFRH5-Gy(p"U9"XM&-)5 D.h7,H+, "L0;Œ9b`,!n2a<%r^ݝߧvgw0>Q=A1HH( <プjAW};|qO/bZ9>7_KRkk-Z`Xy;_L傦/weNi%fX@ȣhm diѣ|1 F`$jv;:B~ L8tTWyg $% ][6tbBaB˗ p{~%^lĵ(9<>^GIX'\Tmd8>ߢ"vY܌:C#Xkz5x'W}{4BE1J4}= r6n$3~rwLy\]q&a܌(y(p7ɩgӜ9_K N|>ky|Xvt/X0tdkbsrޯoHYzY64na~_5#:.+G#J#TA(d}nDRC[<9m:}ZV> stream x\K7v*7Y&=ڤķcxIxaenX"=!Y*ֽݲpEw?Q ͙89Lo?x3F'܋18w9F8;\FQ5B0ģ^0\#Fg?7Z=:n-K oͨ({3y6@dpD F}X)6턉X9F3ek܏ы4؍JrtKyGX!.ߴZ+y mLs8J<ϲ =ccN+hhfV PD~y{F59pvc+e럜Ad[ $#X ˥P=\d[魅hrz_LXбp=8'[:4GpYX]|VZlGf%,+tmUdK~ѦZ凄RD*U8Rwy8 `aL3+GCi< 86 Iz.hVi5!iDtoHy2$ X04_~NgrGB7ig-W6_>5לr:2~XHI;/xWc/cw=ɱ~T/=^KXY)1=ZU봊3&@y|Y_GCzTGZ*S_׷.qt.\2]-&rVmN9XB1M8$){^ rKģPR5=2F;{<mZ> xW_rC}<#_oO>JFO_t#dTBq@#1Il,%+ 0At1@5lK(џɘ-OvD$@vJOE/z:- f{$G gTl "W!Ŝ&'[8j|l.M~U6Wg& %C'~lIPZ_JVF. ;10B1=- 8;>u x$.~ɓ'/I']4b˺:GvJ.&Zp?<מh(u} \=2q1ū $ -o&ޅr 4K2ha3jY I.*n#V[4i=3u f]lT6-WVW_jyI!LNS, (L0/:HS#XyKrqY|&qIӀ?D$0.9T(6`U#JO*D^qth4tnz]6`3$*6͈Dq?VAȳۆ4J*W L'b !;Q^`X;FMP~&@sQe +y#0%P,hb,c10C&϶~:36)2҇C-k2Er-Sbhth=D 3qF4ψ&(xN9ѽ7A ׶(^Y{'[֓WtejžPv}؋jA)Ԟ4V8蕋tnx40jP}_`zH/mm&a[i( W]^tT.yI,+ΰ֋:g`2(v9EL KZ9rv'LK,)MGX*/gDYhV-bm: etq+ D%DCAa?%yZu4w!XYlc =,G2^b(9]Hp" T*VG?GYkƢb*.>$tjCJI<~TWy ̵+.kVf+K$pr؞eR&Ģx뾌)&2"4 G{ę-d9/hDH _PJ_yy3I8*;KAnH.xr>kZ]RSu{G,1VͽVjSwwR_OW8g >cb|;jo+_ZwJ>S|U Dl}J=&#[)ϻn[=4vY$-mWտ;=,iwOA(6Ӻc,S" 6;j:iKMMNmt#oc@zqk/U % (w)7,(,[qm5RH\8V5o+A!Ʌ+,_̈|ud4ӕӝE0) 7#*//3tݢ:ᒅ3k6 KE@Xd"2'T5ߜ:L`v=q1w{b۽FXS?u@GWUIKBNJ[MxIzaD0">XcB.3jT2$x;gNQ)̑/XH\2[j3*YQy^O.vHOM ( C! v0֞DK) _9Hj3e w=*=]Dܞ:rk{y 1 vʲS KrGHqt7C4WyY!: ʾpBg9 G UCRuZml9A_{$mQ H7lt$l:Ķ^3۸0%EHŝr,y.9)~ݵb\VnUk(o,{֠ly|ǁLƑ:ꉍzKwX4uYS{ck rk=>rW/:TV<Ht;@R_-$ۡk)9-q( sQԂǰ ҍB +_%t#^Nl; ޖ AWxUpԔ=GnfmSflIGQCOc >/v{cH3n3i9>y0M`RѨ?)̌eAE}Gg f<sR@gI;Nۘ`[bHTy ߛKZ,WAI6*/ILjs$_fwÿ'#(!]pM1 :D 8yMշ>L,/$;͘]"!Ќ,ZPC%- 2^&2.҂pE)T7`L|.ݫH 蓀lIGA~e?3P"9m%ZWwL_d$PS|S׿γ#ud0 *Qzr"-c3CïZZ.2V}J?|~ jo&bvG;~W-3͆ߑwF"c@#B5[?̆endstream endobj 118 0 obj << /Filter /FlateDecode /Length 39143 >> stream xԽ˯7ɑu+.P_Wefقi#9]9'"*2ؤH` UUV#2Nĉq}Ogw??w})?;cǏ/p}}| K|K4Ugy]OsUF/R_c?֍ۗ}Ǐ#Z*k|}:*~ tҿ/y{n^o_r_oyo/_fuY?pGr~׮2k+bQl?#c|iouNyh}u>*\JfWx~7j?~'~?}ro?n؞K@gԏ1z/{\{?}͟c8~oGszY=#?K}n۟mlmKs~V?ϻ2xۇ<Կc?v{w|Q{,ߕo3#_uK~V\}Nr>F~gq~}ysɟmO{9Qܿx~z?Z?zN#\c~Hsba-,LsτGT?w<_w{IY} $3a'~կ_A^ʞ/iɃ_폎I4[}?-=Ye_޼Sq{ rWe}%CǵG~>m|Xi'?1}OW{@Z~{kc=6҂'X_?~5w9~,϶pw= f߾O3{q_ ?}?iakS񌫲}m^^E,eRkM:~oux]^-~}Ykј~?K[ۯgVc%G}1D*#?~82}הWJUc`+7}q?{x3O^ɘܗеRwzW?~nao۹GkO德r=o~~jk^>jnG/}Omo`u62Hi`ϥK^g;67ͽ2FmgcOlA"j=^zkޥ}.m2 dm8oFiT"{Eu=n~mVl >}Lea lD~Ŧy]V߈|Gݧϋm!^u ^nڛ2m 𘴑 a z͟mxlA1 7oi6m%=듏?ZCoCɧq@ oMqB*~:n|}@Hswئˋ Ҥ*vm}kH'lw/0s`& tMxF%dy }{F^Miv17ruB?aYMd vp鞄{>tIVᄍڦ˦^g=in; cM37-ei&cصtOg4~]1 ב4TnJs1ж?-M%+Fwux&M6L%eo{]vHZ{ `oI0=yi/m3doeٓCMN"~,ď.;$B IkO|:픴p$(@ݖ\Uevup{l`iZxD_{o{qzu=)<68ٕ ӹ).{?>P&Mqq#RK~ E֏!׵IzU5< ^מA6Yx)KV*;uYb|bʶ!;u=ܖʽ'Dž{6hXC2U&ٱdT1ښzZƊ=-vԥmv7]faHTn:ixi rii#߾aúئ0`zX*5jD9am5ݓm pr٧CqOm](XC:˛ݼxZ*XvzZH~3ƍi#+F.mWٓk|yzX@3&r o0.e_*{#wɶXةML6#t#F[{ɡ[,ЍTno…W=-8 N$m3z&iܽi fT<}Ih@nop7Z< Q>B7/>uSL Ć w%{=߶ \cϡ[4̔6Ʋ/kjx>u Gg^mz^ȃሮܖOy&~]x[%]>E,2nvQp'i!F. $ֳR)S2ѳRQ:.=,mD .[ =:K gpȥk@Yػb|_av:1GHQO/|poW]:i (8X:*Mα /nM\qnD=]4B-bJ4e97*ܺQ"G&3C-ȥ+3(>jnD< Xhpgk?elj ]:~U:6"FlkY*Ru5bO Q d{z}s%td7D^;ւ4X13vYۃ=w3hSGp5foDyl{~G&^f}#@Z}Fn*&_px}:b#/v<{nq&hƩM5NNTnҎ|iz3h,Ϣav ֈ!aZߣ;X<}x0L_K<;64/}$ᎇ .㉇IeLሣ^p B6 )kX3XBs8߳ho^B3⼄. ȹ3,9oKy4:ά(k-ti8"ti8f=I+S6bj`'qʰ) D;Ap˕9 aP0x^/'c>; $1Gy{;3&v]zn FO—o—+\y-}a=`s|dٱ~4^_0H_\>t0H_voa?Q s!b߰w>mq$;(];t7PDaݥpKݥl5}$7xct^g}xW4xW[8x.}eD_b Pn#_M%Ɏ zez`фezA2} Y% ёD@,(gP'pa:]XcFzNa"`:YʚB˚–~f-aK %liu]$Rڈ/T5U+DW*€2l#6e_Lpq)WA(bwJ ^ [e 4bEDF5V–eKWSh͍V-5rf>݀FQTY ([jDF-52hz,Q6ހ/l1 sgGSr{#4Qar:g0z'{EPFW\|NWr=@-}c*nb˨rS4: a&T۳KnK_U,3Hy+U(=iO\}O1ז>Udɾ VD! D 'sU _l>'U=q<!f#$9%雠V4$ fV'U, pL,O{=KpvW̼ ?6~Oޫ%0B-S+%XɿBa_4հ"o]mوavg,n٥ոfzcm~#+1D=r#L0,ˍhebV0`jٚL$ZFg% M0Њ- ɣ>#/ Z. ZuW{sY7 ZIVp`jy0Wps芴֐c](Y!Sc_Oؒ2+BVx.9^A#7{ ^A.2+B6|EC6xfoH-b#W9{+HPA*X TPe 6 &lPA&~P;O>6 +u C <0 vzIF+}u!*0 %*2ׯ$(6R&a2s)]]T4&awͨ L䂯F2Z ١'" BzوХ`H͎o"4;$%\wx&HTP1=:RʠTXT"zbJ)[Z*Aɓ>tgsQDi ] 3FɓiG%a"y2BG$O` ҳ:K;ƒ~[# #U'9h1{Vݓ-6}hRMv:R( 850]m0ѵܓFƴWئ c9{kaLT,E4:Ĕnm *iAd갻´6F5w%xKKٗvX{ٓgGЩ^ Ybp0<;?>]i`s`%!&;(hmU lW6Bvܕ4q)Sۀ,0,om"diٴ#ܗt#C7,4ܗ2Eܗ̦゛M-0m#li6ܗ`Cc.;)+6 ;*6ˈoK|Ov{H@@&Bv!r841/aK;lDC,uta BT ML] i—v1O [&BGy005+!wKp]i>vhLm#iBvBvtm#_NJ am $^:wnK 6A{Yv!i^mI;3O\`ٱ#('.$cǮ{i!L;ي#nےt{!?Ҹ/!{?^K.6c'^ᘂ-ܘ@ybbk.iɎ/;+[Ҏv|,OMp c0] (x/+jǏz P&0"ܕpޏHJ n6˜v6˜vzzKe&˜vܖgrZCҎ%Bvp ]5א!ֱx@:z#Қ—vHkLm##6—v`%. B<+ 3 p[tu!YlK <3}ȶp^3)$3i!=zZj ڱKctGBa[Ҏ8m/—v \lSE>_ڡ!–(mC3G/S/Z_ڗnDfS .-X$|5MiMӗn@ҎK* 7z015z1DPP}Yh1;?d5P w<1m@`K GyT:@<} a:4p]qtH޽6g)(Eҁ ٖ{xٖ=F}q rm#:UӁ_ $֠Qˣon\^؄8(M0ICۈ{>ˈh5o_Gϑ<˜#Fod? k9Ocݧ aLMA1Ey~tu{˜a ~:w["CY!L"ǎ–!~)gl@wnJ NV~)T@Aw]rڈZ {PCۈ{ 6BеUۀ,BJd߸3NDU 2AD!ls˒m#Bmw8ܴpPp7eI]ғR+LRAW:lm#!<&TJXK84{YUJyHF{@AM@*/$ĄܨnttnIFɕW+ 7ˆYZz2ܓybܓ$i}vLM84!J2M2K`° B)ՈQ:~- Rũ6`%bUԬm(uYz.DD+b 5iAvP¢tȀ0ĦrwŎ}ܗNŝd]ĭ6W틿mCܫF<Tt EijaKܖ>,F)rm"l逄6–2M:9t@6(vؤ1Q2InJPG䦄 1x6–)!u­'_K7~$%tmaM=rhaK,GܕD~Jd* b@& KǺEeHtEGe6–d,m#t逳"oָ-=^8$eE_:y/=඄uXH>Y EQ_:{piKbt ypgq{C^݇ TU|aCÀk6—oH]YL]S7x)| "%K(HèF)҆F _]G̡ Z dEv­3ǃO;׀(#;{UEl>3a%$ ^xtg@|K{!m*R~&!ܘ@/r!"l鄯jm-cѾ7 D&qJ9cs"6`&ܗ`4ܜt"xL9Y:q^D҉0x2S)Al] t"i !K켄ȽOXO!m#,mDӉc]D ߳t 8rcK6=v^B%.jwPa:q8b'~4[2;.0q(1H ;/eĉnMu"y$uB8L]:qآ_ !ܙɎi VVOSIFӉ#]D|beBNDs_*DMa8mI5| ݖܸ9bN;.zpenٖ\* -!Gt${ ZT%!BKyH42oo# 6I|*ȓ7o,ǛGxaJboA>z 6Yv31S }T'r z;VVnFhe 4^J rkoEEL96}Jl-oMw-G?W<+܋א*F߮DlϧjY +5m'jWM*ںt߄`j!_Y꫊TVVUU54!aC9 ̈́hf^j&WULTLЫ0UyQU3AQmPE3AF!3.=0LdSaD3E[e4) T8U4Q*b~|h&:&hŰW4yWͼe\5̝JU3!SL$VaMfS=Gf^ڝT4SL^FE3aʧfJ1ﭢ.&T3*꩚b)iǼjf^czf( & >$3QRv+)U7T^̻UU3oc9\1הPLC1\G3o}4Z]TeL.#^&⒩nz8hJf@@ CUZ*f*z3qD:)f"5oȔ`VyL fbZ f f z3AT0\悙n`&HԤ^eʱCE2lU/MIuL4aR/\L0ء3q`8zpb&X3GS,z3T0ΪI6Un6N)y*f:Y<*f % fr7bQQ&Y\A31T8T1[ 車]d[E5݂͡3ڤ*fVKv̪VKeb:K ʉKee?^ft7LI*bXҭ 2k+vy%A2e"SU,׫)DT+Уʘ2b'U+S1L>JeVKsL0\L*J%2rI0LdHrI0L08*IL,`iRJe{~VV.YL22[X+ZjbE^zr$\.2DUl&Vj졩e6+.jjRu\+OhrevrMXR22 K&r.$rCC]-s޺*Zb&9DZRӫ@S-siebIe[Z&VT|eWX vR2ۍT-SL*˃JV&. )2ZX ,VUeLje>tL E6)jb.X&eJ~{Iq-OU|* @bs^LĶs2W2_>U|UL-U3Lq%2*^LV[ZdQjYTLL,eJYWI21rpsU0L5e3br#*arpë*e"devsLeSW21VE2B-52{e KN@2+`bps^&jQ1Le5WOO8/}I21UU2u~zpsWԤ^&J2%{TL|n^L8.Z`׫ u;2mGUQ}OT+;G4 U+ӽX9GtLzʔd.J V&:/R`V*)GfRtP\*sX$Ke') 1JÄQ\)sו2g7U)U#4a4.˥2ЗRXvP1LdTf*eB_`鯆jIŔ2屻R줢JȖR&^UR&^J2/URhV(*e'TL}E_R&TR2J^JБJRfDD)3C+ 2JRfD*e(eR,3"ˌ2#B̀\fD(ˌ2z^fD eeD2z릗nzQ̀^#" eeD2zi^拘^f@T/3 *){*jfD53#B̈P33"t53#R΁ꚙffD9P]33"ifF`53#B̈P33"̌53" U[@f953D3X< nyQjjH`DXc0"1fk F5#2RϩfHÔjH@jT?@F( hHKV hs)JkHMT5d$;@5#BiiiRijc2Gd&Cr#RjiZ.s@49"F2G${r#2զ\dX.sDfW-9"%T2G'Krci.sDȳ2G${r#8\h.sDj249#٫eHOv2 'Re2GeeHM2Gdaja.sZ2Gd&3r#Rjci.s@49"-ǩ2Gq\\戴d X.sDi X.sDJZn\dZ.sDfleH2Ds#R-\戔ue]\095 TeL2G$Sr#rHs\o)ߚ22G&3r#2N3r#R`eH=Vzn) \dX.s@49"5٫e2G}&k.s@49"5SeHv2Ds#8\串\di.sFeHK2Gdj)\戌d X.s@49"5y,9"3mrn\4P-9" eH2Ds#ROr#2j) e2Gqj\戔dh.szeHKs#2R59%m6eȝhVeIz.sDV2,9"f\$5&s#hVeHY-9f\$s#hVeHO42G$ѬhVeHY=9"f\$s#hVeHY=9"#ӕhVeHY=9"f\$f\怔Dz.sDf\dr#iVeHY59f\dr#iVeHO42G$ӬiVeHY59f\dr#iVeHY-9"f\fG<9"f\戜42N42G$ѬhVe@Y=9"f\$s#rҬ{I42Gs#2O3r#xVeHY=9 #ijz.s@fY=9"O3xVeH}_g\$s#r;ijz.s@JY=9"g\$s#xVeɳz.sDI<2I<2G xVeHY=9"OƝ=I̾avTq%7xU!S7y`؁XI ~\D詐Hz*`>T >Te`;D wK=uWwn]9Ott| K+`p 9J*B72x(pDsB wPyxNsA@gotPɖ7tP!LKy OhÎNqPK%&=o:aE3#Rn|*@~s;2Bˣߒ6 OǎwQXO'ioȽ.O'F ) G>gl^@2S?&l?  ۯ" GQ}l85c'Ez뗃WEgl?7Oh-:Cp3tz'tǎ7li@;!w:?d>?M` Ή^Nva90|\:'ԹxI$a“ :WюK W X"+Ƣs-3eߟ"tN1if䤱#נy9x/Hn]MoV ,MG#Y77L iyӗ<9BўW&H6"Mx@&K"M:}by} \6)jjSχ oBøo$:㾁,]R@!Iv5u=@~θo9֝q_ ~ΰoWyo^|o \_v`0ʛOF}#d7νD}Я0ݯe#g1G6QnmCq-h6㳶a7r9JKi {iF}# rjF}-I{꨽rUt# E0o03^Aߝa@ %ra70ޏ<?. :ʠ^}yʠ/ _n ?A߽"M";`7NGΓ#~ A|vY| t|Vhh# tݝHATg7rA{e$h>{PV#O<$N bNA~PF~?n@ )2Hݓ 8pYT>b7rJDVgZ CoẑF몔K½tF dp-EkC/a7ٔ8'a7%EΏT}HLOY)HLD &v~q 0ڻϦ_U ї!Ǫ sRZH^0ػ/*Ji 뱛7{]r+`o.vI7Y9Jc7Y9`q5]L`qa ?<(\) ^3[<(|XoxP޼3ު3;cJ zÉf0N jj@Vzp0QX8Q*;zd1;"zG/arc#X0;"OK9NawDX0;"35awDic#X0;"Vrc#sc0;"ȓ?``0;" 24J!twԊ#`;`HwfVD{G/.{G]sj@nr!ri$j@ IԈsB:o^HDIԈJ5"ZiF5"=E C5"% Y,jDjÜ;YԀ+|E5"8.ҹĶyO5dQȢF,jDȢF,jDV9d FȓF)yiԀt=Ҩgϕy!!£F׮fd~StHKyJӒ'e=4N9;GMyw:%S>st6Wpt]+ptHOVB?i qP]Wād pT8N[>L)uOSyY`)pi_LNЩ$S` Y9T{޺_!So`ő5}(,iQI^F'z"O0E5r{}"D3du.8v"g.hD'K K{!}D K;au.P,0%ZDy-u_.ET4 ,/7-K"]6,ztlyY[h=j=yYe^{+KpѲD$beVS-uxUetW%"^ū+W%ZUW%ZDP!bUf*2ߍU%ZEGW%dU(S-uxQUE *f-s yQ"8~O+JW-Jn](9ӊ!ÜcE֭k%ZNE/J=nE֭Cԋ-ߢD@hM"DʳVh]:]&<$BV#;j5h}DQk55HjWPDkͥI0$ľ5yXY4$j$eD)/I4^h'K=DS %K %$BE +I4%$ ֊D0Dy% 7a@;MX=9tzDcfWg##ozDf׍aEX="*t(6x $j+#N/GDPh1gG滱rDQMz9"_yF-G1EH!`srDq{#炷lrDOg[lM@KSDKJM𾥈-RDCὭoRD"[hNd RD໲RDX)1Co)"#h)Y&eDE8[ MQf"77۷T-D4ճ"bPhj@[h-D,E _"bW(D4g"B eּ]-D!z͋-C",C[2D|MueeN!;[h,^;"Z압!bPh(]!Z"Zhh[iC_o[( ZhLo" UIURoazVLB4xI B4&ώohWoV!CW!PD M*Dc*U9UUuժ _W!"+!_  D aU4DCI` z+@4^ht2^hh[ht7o{h" -@ID GWg/@آED D կ B@ ƽ`DP@X~|V~h(bhǗG+ J准ˏ`准ȼ准oo!xέhzD C$bwF]wnՇՇʢV2!<DhZGdZ a ",Fh#ه}RNaQf!,&}(ڇC($bDDh.H!L4nJ桕KM<2 DhB<C`gNDhZ :p桕 <C+nZ {67yh0!y(=yh-<CVH! a?yJڡeصC)];)v(j v%6#!mgOڡDZJ CP;*j@ :ҡk Kt"IL:^v>% "% !#It^$ B•!ZJ5CP:ņt(J`-+&JR:~z]:C{K捄t(CX I2j(Z#CP:,kth% @вr(C»+rhY$rJH "fb'#JHGRrȖI90Y!CP9peK(H9d$J + CP9pL(hʡZf \9Dʡ9HJʡe惎+rC(ZlMOrihኟ0 %b!ߐi(2"P"4 -\JDh#i-i(q4T>si(u P"f#iW#4 ih"0 %BӐUU94@+4MC{$a4M{9#XB b15-Y}LatB1 9pŒ %€)rH0 "X4L$ -\D0], -#?2# WU %ˆ2-C0bu$\ӅK2# C2C0d0C'C 1 )ʥ`ԁ C a( "0Cpi% P"&x$0 bt~`i[i4 %&Jh`7I -ːbCw+PMZbd$XAO$T[!|-;J!+~.SH/B5B Q/T$YB 2q&NG/T D`k;N E~!_g;\D.\DN~!j1YB)$VOSĹ*CsU jN!$8\2g `Vj_Y_ PCb0qEB f[$/Pm[~!b/TR U<@&Hz!J/]H8QeƓ^dS"-^TD>Bz!I/*֌GX ]S U? Hz!L(RDyyӉv!u.bم@FA u]Avj; ՙ.TNB`}n3 Zzf@s[ϣF81˭!=t X*U=\-)ZΒI-Գ 2R Xo(P絭Z @k^Lj^BHѲAB P-ӸDP-BBv&\,B ֒hv!aBz,b]B PO*3vj*p.C4 Zf!q?qYȟ\-^B>n!}pO{[\->%|B6b!s+Xȧ9],S.^|nB>b!Sw߻Xȃ.H!LN>!Z|BȜ>']Hl6&* m}Bpv2u2Djۉ|Bpv"\Wm'r}Bpv"\WmO(N>1P" e¥ۉpv"\-@P&\rsO(.Nd*2ٯNP&\n'µAʄkWpv"\>Lz;[vm eb2D;.v"P&t' #AʄD*MdTO(Rʄ eˆi" eϖ|B0brsO(PʀD'ti ei ei"V*NJ>L3Mb 0f>L3M1D3Md9}B0f|i e˜i2a4 %BP&&¨i"&¨i eP{NP&e P&&2_|Bq|B8@>LK;O(Nd J>Le( P&y( P&\ȝ| '~ 'tm e]|B|B|B't 'v 'xu2a42`O(Pdd C %BP&e P&ud7}B,3EP" e2] e e2^ e\>D" ̗a|BAd d J>LpU>L0@>Dʤ>2 O(2.T>L0@>Dʤ^|Bl|BLA}Bf*P" eR/U2.T>L@@>L@@>DʄDkCO(2/ >L'D>LKCO(2O(2O(2O(262.V2NP&eFP e2]|Bq|Bq}B,v*P" e2]O}B=7Pe&P&e&)Oy6/w+Ibvw%p*mR>XoS +e',xOmo abg #KPu)P´,2 aZfBQBT1-k0/+WTn OYLu5G03ۣB]y ' 3NG01=NI)ꄐN*ͯ av&*jy"\m!jO:!:!TH,x8uBȋ(tB8{NgJS_Eи-P'yڄ0~6KoCf馢MS!ʄP&%s˴ ! 5&4–MVQ6![0ل0oldB%|+&fiNƷONh*EUQ,ߪ(D~+%ԕB:F^P߀T1%mu/)B?wB!Q[Q( 7 m< q4 PES&yn+s%*Xe.ل_)@uBӨ2 }NR'#tB?b1U&Ns X6!+(4Mr8M )M< X2لP󀇂6![6لq ,T*DtBX}>@pO(xOQ:`'>!\ *)*"VxR:!Te :  NUj :B@~h~R'@uBZXmJ@XtB2PDH J) &Ŵ viBĺل@(IM'6!&&buefruS u:B vK(@}EVywPBIEPTt@BQ*O}B N)Pîi@xp#ALVR6i&ՈUQÎuBG?X !NE/ }B\8OB{6 P&B{P"e^S:iQ1J NYG@kbd"jzZ@tNP!I'·dG؄z~!+b-K6!, CC*O&KOM@ Nj&btBӪ M%^i:!*(uB*͝tB v'N! X/%PWd!UZmh6!++6Mh#lB alBXxلalB V$Y:!K'4O"K ]B VY.2~p 1|Kh: G@KĮ@p*PÇ$zU@hZ28t DG@hݡK/r Eu>B!6@K! K @F  %·hjS%4Yu#TB dQ%kG"XO+ak\.^\Ģ -TM^@J W ؀K*!:cTBF, B UB 4 ƘHm{@P e{@=~B Q$2X(uU>_"!R(:Hz ];@F P DPo$J^&!hATBXAUB@聢Jm*!B M*!.-9B%N`l?iv)Ydt Y$,cNYT PC*I.dTeF#3ŊK%b]TB tP%b2&K<$4Up rm&Ɏ*!R NDR PCR ·jbX?.*ٳ\B bt.!_JWt3`aT$b7@1JBbaTaH%4zJJ럎P MʎP $!zv9%b@0jI, b\B߲KQ%ԁ=@Kƚr $Ez=E[@캓K EQA캓K(MP.i]B }]B\vKhwY5TBnxnMB3HD}54 e *0;c@1P,U oR PDݷ*LD-tC@T e jxKZ@A.L,3ΏP n$"VɍE~.yH$Be EBzY"P& qHm0ZFIm(üA(LB$ edG%=<4 e*G# e PaU0 %ia~htQ 9I0;MB)aJ";aWa*a* J2 %iLaH&D=-;JI&D=-U^(aTvnJR d=-RYJ0M7TB0\%QR %dTɇ⟒8!IES 2+F,DZXs0B0 VP s!3QB0bHa*j"LE|=IDȆDZKD(*XDZY{$ "P"LE- E(梖ŝA%d"7[a2j"LF-,%dT\`=mBH{ B0(8D3Q*u BAd*L P"DMJ#Pp{5 BA(* ֆC(JB.JD(*,(J!)JH"%BEc%bZ! "P"V!P"E(l&P%B-:EH$,B"TT-BE(Z*P:P!ԟdr PsB!BE!B!d#BJd򝦺FDJ rvP"&|D(ZEθ[E(,B"T P"EWP"%BPY-BAdJDwzdrDJ!DA&#J դwP"%BPyݻG(zG(z.o<%ߏ6WvCe/~~[Xn|o| xK|tԶEoݝ/eQ&A6%mmtmz/ lcy;}#ۈo^4[e/IG foGdoK;hF/Y#oϟwuB3~Jg Wg>xLw/tEYl8h~=M;;v+4~h~]n&K]F'r{mN|v!z/2-7 vwj] m۫Gda ?|xFa7x˾Mr5d|O{{7c[}#'0YoG cvĦ>zۻuӷѡ6O09׸KY~cݷ}{?6puCF,SsشQr{m[k|w|o:y3_/_/QG]?Jo᫾Wsmcݞk:RsT?)|`43\0݆{ߎeoa2 vۑ<ލ>0Z@H-'7><8>>(ώ6+kh~V!&i/q źﭫAo |Xr7'X.q+<>Ogcjgvikl/F66kdʭ,7!:,pԠ`]sYo/mUyJ}[{`~]E/8ue)rc[vfʨk%b_7>t<~zUmn/c.//>釽F{ >_S97g#[`~6x˜޻x?V*zDӎeru[m]f& úӗ|a=ws+_ /z/7?%NxC۴yrpaMrڈ9:tӍRo$m64=l'o._²{n#q4&em( 6 E]USm߸2!5:?E |3T3>G҆|ycWvL.>vo{sJV}̲cE, uԵ Q {lxc ~o?p;3q:m{g⟮y9Bo}QZޥ!;ςϻ:"$:$,]{c#mӣBN;P?|Yi0}e_'x>*\~9W&hڜ.pӛxe|/?w6^~]{=٘r!Y?mzpq޺aEU"[cߙT_c#=OԣOT<|c=dʱ?}y;}}巧MC7珥_ݗONPzKGcN it>7wr">yS.eȻ@HN|wc;5Tm|.Ѿ}۸}pS<["p>vk;x4gpFmt2}@L}?cgQtk{6ҧt:;݉VY|m<@ܞ+3`X= L_ۛ{Ґ*O~u-L{G=.4W;OMzlA>ߡSZ`'Ed0XO+σ[3[4֮Ogiw^B7(N͢=п_ MÇ~p9@yX|hA)~k>m4>wTxj抷q6=BG/|t:jO?d}%d4|x&cϏz?}`,Ep,?_=\'O}'e; !=ܞpۚR0lendstream endobj 119 0 obj << /Filter /FlateDecode /Length 31794 >> stream x˯.qgO/ v{сuvWefUe!t6,ʃ+DDMiݹb|p>,'n}UY?o໯?:MA>7_|kו>|s<]>|Wty~|x3yv}s\VZ~AhJy^q㯿(gii^qqo6'^.;zw5nq>uzRq|'<~s>rmzlFm۴hFҟ /W?~Nw#k<ޣc>&;]ϧNAgߺvTs^v?w/&1nn^CjǿXTg(>\?|ï>?_n<>oW5K?|{_/~lG}oGRvoӏ)ox'o)_WcH?3{LUV˳=颫4{}™8h>>@S랷~/۟[pؾۗ>Mj;?[?|3׾w=g:O>:qǤ|oXR*b,fy{rgJod?߻syL_>yֆS=?|g?fǜ.;}ZF2Oo}Τ3`5f@y֘[ھ3~OQ+lJӈb{d_[Z?QW[oG_co} :#շ t6^?^>PxsiFq qo|]KW,|K).fG}R=M#6[FJ?[{|V[A:E5.̼yt=Guu?~_S;|ny÷iĶ?ß}!vPG%퇱k3mZ?~[\[+`K>}} ԋG~z07-UEsJ sCG϶Hs\}%o+_/t7`)NgzoGk {w|#Ur?w[WO~}vkM'w>}Rs64)뽽i Mh*ua(._o>sپwҮzwcKgVV=4h'Np}7ɡ<̉a1W?3gh^ =N)źZD~!ai=ѷe{4~ͤpjgҏ?0A/;e )w6-?һ]lMo:|׻|ϨGgoy{A?c|;R~HqnU}UL`y/ϦL4㨕LF-904LK[q1٢z// ,g֩Eyů]3Ia6,c?o~Iǻiiloy6{/!hW>7ҧi˼}SOP\N88xs^M-|# 5FRpjf>?~7Zdfm *y٨ѧKY˵Ucv;w 05i8׷2mg?|41l/v6OU5L]OG;'gO|\p/&33kֺOҾЗ5 C~u}7ozlpĭppkLuRGi2mOǟY=ӣ?=J2nƟ?~^ホoTgnm_]SMNɦܥxwڟ?O髙ϟ+sǟ?xO{:NLWӽ7&~R{_% iGZ/*ynZ[ E$No3]6&O4.~C3\Q Xi`pN|xwnkZݗ^ۧf_'Ps3Rn]x!e2?(xv%%p~r|C@g_q]w_D~Oޕû/wğ?qMk_?s5y:=M?.wi;y{ͦ%yZz`w/ `Bz9^kΟ?~yia/ǟ?_?[}S{;4i74c_߼;ݦ c֤YM|wwg=k/a޾ Ao}:Qߞz*B@?GÇWz3o:?tuߚ,\r;[?|yݥ~!yAr c/%C + VIMƐwQ4Jh˔ܒoIJL̷i|RI$[%};/^  ,%{&Bk~[)IS7lŘ5O>HI;}*t~)lj y͓ޞJɩk.ky#,>2$ѣuC zAR[-\$7FhmśgGdM/ 3GM뱇i+^Яz~_;p_4]pGJ6w G@{ݼMm=Ey g}>H1d<jNKx6"77><)C}W/c\PL<@2%}jApc^ƛwI6I}>'*lv}}(0e3uʐXLl~xS?}$}v?KY_ MHHR_'ks_]rܽ+nHrܣٞQ5f[s= |mߵO5WOqx!u$[e̛ZlVHb|$}^leꃧs(IonXD.I%gpLƶlGCO_ǰ `_kz \!<59m׿̎vCO`îKƖ_Da}ƏG<\tj[VV o}$i^G] 'l7>: Rl#~dSWؚ)k]RrA]NSPꃭ!pW+ٸxM׵}Rh]ݲKi Aɛ|*訜G v>9 ֐ Iݞ+U,~=E;+H mK iiW_9[W0H>Xrv]*>0JsG+^S;U|o}K%jδ>_yC {nCXW%m^}Z]|mdh5p5aƀb*Ϳa6"K`f#yx>;0BqfrҺKp⓺rbZ_wo( lCr_4\)|l/}3I:ނZiIN%# &QR>3%]ޏ!kD$J9$ wHV͘&HmP 3tm]/%նDW;PeK .l٨>;0@fv5k]*dO&shxNZ4%,#fCʅO]>ޟvMW-lT1OS+F?5]r~ #0CrpLzS_(nc/S O|ǔΜ}`>OviS_>.d>~zKۧ74aƆ?ÿp׹' D>s2)?o:љrXL`bd1]r߷5y%q:öGBI4xykVwɍRG ץ.= z1ixx6E(=]Ƕ|TP7$]Pl?^GOdbJ',|T15#9r>ECZk\Wp/|ՒS**FWOwgDuQF>E0ԣ +|6>. |dx]LIRfj>&) `k#iƷǛg`5/A'z_N[ vd  *avY ꐺyWuI_!HTgk@~"7L}`kN-i$n5 *MAeO 7LcI  >lzΟnؐKf`#Kj AF Xj\R6Lvι . )ּ>:i)nJQJ\uAI8,j Xo/j]H-XO<;]ZSg'ߟS}d&972M!KDŽ=O?t`}|Z˟Əέz{pJF2 AفO#)6@q|v >=|XLh^om-ǑAd[풾CXⅇa}zc)ʇwz}c(vh]RN&p[Ó9]`z||'3/=$15 Ĭk>pLd &g֖(A2$E hK@phq̈+;%I+)B} T> kU6Fq&䍳)ɬ|[7}7X<#O#١e|<:ui3!uQ5L(Azs2Bfd C34Ekxz.}2נ~mTllc}1yZ&-Zmc?'< ijI8\f]cKpQ3.!jC M.8@`%d-0x%ct$, 1|rڒ%f0Lq$fgJCG_p[Η]r,Mh3ɭe9Pm!dnAS ȲA, ` UംWgQZ`_j;Zw zd+K8QjMS z5 ]rsYq7EmO,rg-XQ`.`oxg⧢]$6/Rp[/qv>~6)6xI 5c!+%+״KV+%l4Sraa%59tyMM*(t/>NlS q\ʾT1 ij3u%6V2iq梹$4\WͲ:W%/ F ==]..1sI4ƦR#ţI8e ~.$I_^0?t~f[(j<*GitNϹ I>uf^sN on'ϓ5>Myܧ52`Axܭ1Nt 02Ԍw: ('z~nחtZMmC4V2*iႪgy:PyoS_UJ}Xiqj|?vV1鴪i7Ǽ3kz1tԱ|.m;S0TsNu:}BV:`@i@q0]@g=1Lyh?4q-YONSrOSn=:p 3-NGnF@GrX{Lw{˳i#XGm (د |]V/4|>e>R1J^@0VSosssF[BvYd3ۖ`4U؟,hQA:Xd2Uغd6zYbǻ٨ªf62!:lsUgB0Fw'aOBD55 }wFI͌K &ۦE).O ?ڒuda0ضЌF]ݱ/fF p؏aԆ0l-uCV݊mÜ݊Osx+=FsY]=,C;vf8Y[HhhO'ggB}:c/{fE{Llx@ZtIyqRrJĴJִF}3}=o@i8SAk۹K: W5pwu..4fh/\XNf]i纄pe89 7_pv =.Dϩ;@Sw:f8sꧥ.ԝ'9ug$sKs*GiӽN.ݭ9ulsn.5jh9uoF[..1i8p4#<ጶO i84]Bi8pgx>i46bzT2iQ\ F䀍2Gt }t4У4"ӈvYO.DDӈsȋ.1SG4<2HJ,>%69A' Lq)>wNEJtķtO#fbONy$WZ^~wjje_uĕu9PGYݢ<8Ḱk(>qOGJ1C25OtU)%EAy]{%/ةƖ{`p'# ̃:3#6).AmYxwY:Fp|=޶6w3"6;rɟ|gl7)be󟎬-3 vx2߳2mJP肛%yF{*D:jK6k'R%0va;7ޓ'z ]bOn}O$]t {R2פֿ.0Hk;yOGKd柸C=tyOG)s"N$xN>yLU.1H 2C]bӑ^w\9X3Hddz"!t瞒2JY=ʴ4^e83ge82pe{FWS*X.4t /LSJ)7ϔr`J3eZ$esSr[Z$eKlrӽ)kCrIvk't)Y/SB_'u SN#1K.PODmm~(/p[F f9ݞH˘uwsX֩DzfY7%=Y_vOMǙShxJpt\SZicI=3ԗ29z|yW -`3yT M%J<[˜b%znO=Kx]H iC5SyJ2p2%"wvNT|殄24r3l3M]ݷ̫)/-43'3ervۮ)ۻKx]eidgS{$\{%ϔ^9딿wSq7NS|iOU#s&Β[ɜ>x:Kx:Kx:K,tL<%L<%L<%Z>$<$2Og3Ogɳj<3t!a,!)=%=%=%L>%L>$Wڛ~Y,_L>$7OgI8ffΒ٧%LYIR}:K}:KM6p]%L@%N I*5w[%IU'ھUҶυs&Β{8 p]eX,(*;1$'1us-z[*2܏_Y$I'l_ Ir@}do&N $-KBqɭ!CqER9Iޛ\RT6$m8<"VZN,i!I"`~هik"ͪ,SJδJv{{slj@;ۦtJ]Hs?G>:r*4sUUxW=z~ToծcJ'U乯j@U_mw޴W-yԞc_i{ʦ~-禮©j^(Ϧ }kyKoYro*PI^Ԗ^G|]mY.)0{V t dMpKU (p9jUe+igP뾵\PZ I[i's\?u몫P_uU \~?0풶l>)%tIݛ^U (pi}VMK>N۹j>4MpQJ[%i'M`ynVM4:-5>VtjAɢ ב|c}zj&PP vkzΫ&\ևcZ(TW7:WEKvɵ>ziz@;ן<'j\zmY$imܫPo۹1LkT侯ieoy7>U༺Jچi9c+}_޵M@ߪ-tSV_56侯㴤kSJz65n%.{SVt9 ʱSW5ǭmj[ӯ)K>UUo~j@I\$*WeւxU c-o-/ ftkX`1jkZP_~Xc-pί 0x6kA㪬A\5 X mZPs~_"Bj~]6XPX lWEeׁ ΜqzZPx~UPx~ZpЯ @EUkC:P_ym.ւ[˯ZPz~U_)WE׷y6kAU~kX *ϧusN>Lb-2]o-o?jm<W596k{n\זX\*\yB:LQ~X]^ז#~ ׇZ_m.ւU @U @vb-ȃZ~m.ւU @u>W?֪z! zUQ~~UQ~~kX W A%nBٞO AaHg5!@1qQKO/4]t| 6O>7KaN_|ZhOן*O ǚ^(X>d IF p{W ,WtUڙLWI2_ey-eKUa]~5k|W,hT٩ן|,4p4̿ݿ&8}ǯ۩P%,ծ1跼> ?~EHSdyjXY|v߭׸Ě4g 1q :QU>ץh{Bt2~?$[ѠQr0ZV=kN%>*f^AyM?R]|>yR ē{D'eCר0+'eEHeO*qQUѐISee,Z:U^4 ݋&e/h&`!R@4)]&TB IYChRV0ѤLE *IT q\7RG.PU4b^JI%Y^AJliRVTiRVx +SѤIYV&e.mtTB*ZIYL7<)+c<_X M)DCy^2HJc4ѠX ɇՈ(eu"(AJH{ԛ(ŚļRIW11 ~Ěh/D -ߔ(+Up&D@wCXRTM2!V>Z'm\vœԤ'R''e O*5 TB6~d8)}0ŒhHV'$(1HJq (YI%E`OJQ%QR eFn"J #DIV.GI%+rB >rGvIҠ!I P7vAR=Y@RF t+@R Bh(C\ARo& DJPsJh(b#$IRv@AR t$I*Ȏ<+6 _O$)d]Bh,luЀ١'TBx̑J`K  *TBA"~9q p 'ąY)"xcr K;28đ:Z2Ӊ[?2q̑2֒ VHI,V=Z'ԉPWnq $9R%HJ8bF n#Uj)Ń"E ҵPJ)FN:jDBk;E =ycAOĕL)h=Bƒ RقHQh)R҈Et%i0"u@"VHrxbk0!u$~Yq|2w ѣBl!ѣ#"Q dG1/Q'?k࣠㣠 oE|Y]%|o GaD¶F]\o#zvqNE®= I/b=Ezd}YѣRReG%_V$= *pRG!5EW3vZQaxxPbW,3< [ ӣB($EzTjN7}%MU%N%QؗRtbH($q:=J(ڳiGa[mHr.(^ =*#| > XGey>*E|,,nRT(ԏ*?k~%|T}68> ~=5K(T+Xl9U'|r$?~?ӣT=e{УAuG~X zT:=*(|GLGp~%xVuɈ>QfuU-w[Q/!TQϡ0QOy[QO*쨚JP;HiAҏ(E;gG#g_QjSN R,k.:_?Ԧ %xG!D= F﹢#nѣ,"Q׭@G]eU5Ga`&Ѣ 56BLǟ0ڂK+󎏺Ɏy |MGhmďHQ"E~>ZGݬ(DGGeeQuyQ(=&xQw@f3>  E_}&(TGgE> t> !HB/vN.ۈ@נGW+Rԅu(4W#q|uQOsY'KUHBw)L||3 Å?-(KGVP˼> |vG ⁉Nk~ZG!7> ?|ZQpB+}cף"69MQJK(aG29> vN|G-QeG(E> ࣪W ~TXGYa 5^>>QJ(S`NSHvzP$|P$|TG0-z*NQrzapA$G.ȓQv5ӣ5G>Qp|s1sz߂(Ƿ.z<<:=>5`QGYi_B!_jGȉ?> oĄ2[%|YGʉ-> f⣬tSGD|6GAG> ^쎏BJS.ᣚG> \.w2SG Q0ӊ$z̴"/ 3xMGJ+I|㺍Q $|O EzTSR`K1OB5*C6QQ zMRruUR+dQ + B-BUa|? }hW$)? YEu;%5QGKp?*juEDI#B*!(@( keETj[pI~e^Sk55L  R|H2 a=H%TbdL+w/ >SvtV~RaE"A$( eF RY)d35%^LM", R))D"IOuN R>GM- RI_$H$A$HͽEܣHͽNeR&@j RP"?yG#(ڙ5l?jGq(e^SN3ޘ(C~ԘΏQc΋5GQcyq~@Nt,R lMQc=t|X3e*G2G@)%QT ~m?l !Av&TeqXIKoȍ,u2 R 3Av[ܩA*ldzlKftl @ʶb28h9)"2(2'Aғ eD$Ky(2 R ENy&H#P R E)MXDHbEdCx95PYk."A*!>I2A@:D2)SiULQ=r":@W e4N]aӄ̏2QFQ(9ď2]wOseQ"LďJzdD~qIO;řC '\"2Qvv!`R-::@M/j0=E$)EG=Ax9"<ʸG۾(sP@vNa<)+@dG FQm?I2z(5:E~m]ʂV?7G/b(=39*!7E)9KE237UeWW̕Tgryfw"9*]Iu%E)TD21Q^JJp̭Fpy]C)<䜸Gkz2 ;5 >>.SD2tg|2MX)L[r)i"fTBJ"<@)LS|PzghT*AR4*r4*ǿQ<;4*Vq 6E>[FY(R6*Z$[\E6*aFDlT*N j #5ʂ[[F4WPJq!5fJy'HoFR}كJvHQ6Š( F-lTRW`,HGlI^Cl2liPO.bDr~ e FAaX&~m2LQI_2! ųEB$!3jta:K5;$dF-:Lg ^ }83j0ee:K2%tL' Q|ޛNf,!3jg:K3%OTpIۿQ5-^Z$N2 }| L' Q>YB,@3jg:KkHČZ$tΒ{bF29Mgٚ.f,!3jmH:PŌZ$Β}-1 ̨EN&3jT? Q$-'3jm:K:NŌZ$ybF-M' QnYbnYpǪ̂YBf""3j<[ɌZtΒR1I݆QnYrmm:Ku!!3jcH}bF-Mgɳ̨YBo") %3jT!!3jdCr |4!!3jCrKN' Q$M'3jw:K6JŌZ$eSČZ$uS$HҦH}5IČZ$yZ$ukQ$m߫mɌZ$mSHH(jmHm5KȌZ$yo1AݾQ$mj mjQ oZQn Q$mT̨ErZQ̨E7-@̨ElZQ$m:PgɽbF 3I^gF-gi3f QԙQZ3I~:JHA$dF-lΌZ$mm'Q$-'3j<̨YBf")0uf" f"H̨EԙQ̨ERVșQM'3jU pf"yVMQ̨EWMQ3IQynVMQ$oM3jUisf"I&̨E򬚀3f g$3I}[Nf"I@3jܫ"̨YBf"o%ת:3jHҪ83jБ:K֟H:3j<(3jU3jܫ̨Y¬EW5QY1&Qj3I]UgF- R2Ό%dF-jΌZ$uUH83j<̨YBf")*f" 3j?a"yVeՙQ̨EOu 3jHҪ:3j<(3j"(%3jSHjrf"yVM̨I f"ɛ f" f"I(%3jܛ f,!3jbF-ʵ!1f"HZČZ$["f,)7%̨ElʪQ$mT̨Ero:Q>W@̨ErmT̨YBf"IzʦH򦬊H}ֶdF-ݛ f$3jM3j<3jHʦHꦬH6NŌZ$Ϧ5K-{SVɌeKIuf"65@̨E65@̨E7eŲER*f"I f"y6=@̨Y򜛲*f"ב*f,ǦHʦHڦ>P۵bFM1I1ɳmzQ$ozQF+gF-[̨ErojQ$ f"6ęYPMY3jl>VgF-̨EXH63cuf"|Ό%cuf"|ΌZ$ՙQd:3jl>VgF-ӶXH63cuf"|ΌZ$ŲEX5Kcuf"|Ό%ƌZibF-̨YR63buf,!3j.VgF-goXH63buf"\ΌZ$ՙQn.V1ӺXHXHV3buf"\ΌZ$ŲEX5Kbuf"\Ό%yu:3jl.VgF-̨EXŌZՙQds:3jl.VgF͒U3jl.VgF-̨EXŌZՙQds:3jl.V' Iƍ3#}xȤADgRQD9)aamX1hAXt5쎐JQ3"?qNN Ȝj2'2HL)ybG R!dy di3"W8CT!2dy. rp 3_ CLRBRCQ!C2CH+%l p[6X1EveOdB" Q]dL-<+\+>dLIB>ȓIlSwO&!A<$+x2V]D!d$?3O&͢XvAhދq1IX|MOF`'GM'Mʰ BUey9d? o4\&crE!M&ef'&|V?&cUogًS-R #{j '˥ &3*eIr#j a2VUM nBFM1ʥDURg;X]C>g Id𮺘 _lȒIOd HJH [$cE! geԛWHf*+ QP1kIYFAG3Hl,%g`ԛ $ gq*d|/`$PmGAEH&zyɌ6d,R̠^Ottezfe{Kte5d,V,+ N2Y2*KQPpd,UVKAU5r&3p&Js`2+͠IplB zX`1_dV;L I)`2V:XdZ9$x)@1 EnCHvLO0K} &.qXy2dXw۾'i2*^^4dGB6HY|^PƊj s2zXbRd\ʰ B_0~a Йs$ $3 L2jR(|QƊaQ$$$B2nP**p (cU0 EB.aAʰ1畻<s-Zy2 bΐ'c#x^a8/O3N h{`sHIwҏX .Xy q2v~dDk #g͋Zy2VxœIf5ƓTDē"'3*8OjVAL(coc{?^ӈ2нQAQQ;aKD[e0ۊ(c JW$ _`^fDePJA@& ]2Ȗegw1X^dJP^$ru eOٲrdPaE`E<O- cy-{,@ԧ (X2(Ձ(c Q53*TTk!2'd20Csy]'<\^("HHo:NO_D҇2(jH%"ʘOEB {#ev+2HDP9^+gX,  1`drt l,kePY2f=eȕb|20'ff+ =H%25BAʯ0( f,3o;Xv[E`g[\`Pˠ5RXⰨ1E%rDA*;"7' ,dzɘߴf`@X22@"$2V\mZ$ YM, 5920޺^Sd:9E`HđYܕB+25{%cˠB @R+zMkvFe hX_, [ Lc52\؋3WXXdtst $T+% B,1˘Z@M!E`H9A>7@%ij,DB`YbJGkefI.T1n#,i*#"LYfj2{,32-3>er2ch-3Feb9YfA'ˌqd1,2cF,3挃eƼr̘{2c;Xfsˌ2T8Wf&+eƪ`r9X&7ʌO\B:WfΕ+sej\b;WfΕ_X38Vf;ce.XS9Vff;2cOt7+3Vʌױ2cvǝ+3zqe6\18WfhΕsev\8Xfh9˄\)9WfhSΕWpeB+ Lhn .2V&TʄXP5+h`eBe L 72V&4ʄX+z`eB2MaedX8=V&CHpeX8V&;#Q`eX8YV&N_ce)α2Xqt81:Vf*+3NSʌseXqv8++3NΕ'nʌSseݹ2qw8;WfĕV Kse5¹2b\ap̰|8WfXGĕ seƹ2aq̰8Vf{+3lB ceeɱ2Xar̰b9VfX+3a {ceMͱ2\09VfX+> QeЩ2ÐTaltL#*3LNfM ӧ28T:TfX*3̰NZ sceױ2*Xa9v̰.;VfX+3Ԏl kseEܹ2j\0;Vf+36| ;ce+2]XRp:8Vfx&+3Ε /seĹ2Ûb\mq8Wfxm,3<; e‹`p4Y&QAUc,N@˄+2 L8-.@˄.2 L-.@ˌR[ behQq2ehpZ& jehpZ&\ 7oehp;[& se1lp^[& 'xeQlp[&?>2L8, Dpe<| 2a`BLD*X&,DDe""2}`LDqY=, pDHIe"2`^ [*/AxM@e"&2P LS&~)A@DQ0e" )2Lf LD<S&)S*gDU0e"F+2L LăT&b*qeDpZ@e"-2T T&*wD^@e"/2T L:U&B +ሁDXc`e"12X$V&B,+aPD8g`e"32Xu98Dj`e"H52Xv LV&f+aDxn`e<72XL V&+AǁDr`e"92\RV&+x`w`e";2\!LW&Bу+D{e"4>2>`LW+hDF@pe"k 2Y\>pL$(Xs+iTDDpe<"2tXHLnV&;+) 4̒'I'~ ݧHl :DLe"&2hdH LY&Rz,i?AԠ DPe"2h1-3e")2hH L$W9[&ү-)Z4`xWe",20lH*sZ&҂.kAD \e"M.3J瀙H L$`{AD_e"502>t0 Ld!]&2.37Dce"+22cwuLdW^&20/YLDgf"#435ꐙH, L$dS1  DlfƠZ3exDD.&}A`Om51!C(0IUU%1((؊1MM&Mom!d."ώM0zeD(\gkhċXf5x1H$!y1fgIK-ŋA1GbbP%^x1"ó$O;/J- ϒWS !E;JM&p1t8;sm(NQÏ1΋Aċh|9bPCRoxG߮zţo'\}Gc:_N$A$8 K& p1ۂ1 b,[ b b"^ *V )#^Ð:0/0 ` rlN_/0ƒn/0u.OAe{3/X] Xԙ1HhD)݇8RO#f Rup1c쐱0cp8f Jf43c`-a}*Cv$y"d %bc1Ve+d@ 녑8؜3cn3^LcgLZ1Hb,|âY%"uqae'ƌp18 6#l 2`3ũH(ac,׈ bcPsƱ1n@]8"! u:NQZ1HPbSQcPfO$QcpDʇ)ˏ95& ui5} h 8y<cYN0N#GCcPqTdAc`jF1S; 5W#ck0c+73=1Ң3cPeČt㠺X$XŌ 3cQb Jx1cpdԨ 3%&0'4ܚ@&1> ;&H6`@;Dc| vLnoc~Ń3}c&cpcb|L灏=1>&4DŽv8>fh)c#|Ї3t& 1z9>fhgc:?f(Ώ c:?f(Ώ 1Cv~Гc;>fh펏cqBp|8E8>f43#Dqqx̄Aft3>!c1ar|̰T9>fX3,^V1c1öas|L3x> kc1äavt~LX&3ΏN #1LaJu0:@fd 3̶] d2Êaiv̰F;@fX 3o d2aw̰;AfX 3N' d 2pe8Af; 3|"N~' ߊd 2?q8Af 3EN RdpMA&WA Wd @&i w[d%p@&\{ _dEp#:@&q_dc]pi?&ܞ hc}p?& Gmc™p?} c±p>>&ԁ 'vcp>&掏 zcpA&A d" 2*w/=nSaY:\ƮĦ +d7tQ&.S(P~)s|L"j>qfN2~ #M8@H% d$2  D&d(29hA kdd:29A ɂAF #9, AF"#ɱd DF"#HPd(2C"FF: ##rHBad02Ueadd02]0F&G(2fq4EF"#FȈad2#sxQ=8EF"#HȈfEF"#Hd;Rd")2{<#2bL&DF"# ȈfDF"# \Bd`!2BAڌȈBdq!2"^/EF<"#BL3Ȉ"Sdĕ)2"gh4E&!2l6EF"# xVd)2u;EFН"# O92{LN#d2 CF!kGȆCF"QȨP@d,(2Y] :DF"sdF2%,ȨdTX2j.U AF vAF DQ"(d DQ["Ȩ6dԟ2!Q(Ze@F QȨ}dT2c,Ȩ!d32lu7JBFm!Z ;BF"GQ( Rd )2zD"EFǑ"IUI6%DfqM3" ܄r'CF!#ʐ#͆)CF! jhbdc12-[&.GF[#ˑɍ_R0FFq#\̑Q@()gGF# чLdI2$\ΐ6HF$'蓓dtI2zgIFo$N'ȓdI2$},hdI2$w ɸI]I&߉$ {w9Z$br.GUTITITIfTIfTIfTIfTITIbrldIdIdIdIdIdIdIdJ2ˤJ2ˠJ2yR%bR%*ɘTITITIfTIƤJ2ˤJ2ŤJ2ŤJ2ˤJ2ˤJ2ˤJ2ˤJ23J2vǧ8*DOj=՝.5<1Ӫӟ;Ve26_ ܞ}K= ;;D=}-':vd.U7=zi/r#b՝7Y n7*:#tj؜~wfcUƴ٪7ٕk+WDlm^Y%1]IU6&*鐶.6M]Y,Ncz9)PCMǔ.Fá;6O_y[:Cw6 tx|+/: i?%ty6nE:W*}>M86n>0J;g𹳻zĥr:2]yQ}p/oӻbnC~=1gb3ַm4?vwo65ָ nhoQw]?l#> stream x˯-uqa؀G]6pKfgDdfD[mYd(")>lV޵E5}wfFfdĊX?}ʇ _|'/??C+Jy~g'Cy~轿ϿG맟mouR>}[i{> _hVo>s]Up:0G?իy6?vٷmk􏿄z\WYZOqz/lo^uus+܏y/-͏'63`ޟq&6OkOóoǸǟç_t1ǟ὜|1>~vwvյY—)|}C0Q[I|<_>v^y%85ot||\Uq5xX[f/Og?]NrWj+qV{Ϣ;' uBkz;cnovMɷڎ̞v͆ķoUʇf/81a:Y{[ۿ|[kz? ^뫥bytΣWc}pXxxׇ;4o7k\LcxfϘ|Fm|k|+|5ߓ_ݯr>ǁA9lZW8˳+e/mjp݈3Qڞ.;㸮Bwms8<Vʧz魶?_^Uͧ[{^͔D>|~Ao5YQF*@{恪̡#%C>7`luK?IҶ([ Lrcv9;ssϞC}_|oGV?Kgx˘q@g*}l?s5׸G(%7>6 ?ƏsӟZߎ~ W|9Ͼrv?G;۾}|(uK/أfOu+rq7EGxw츍3]ӼOJ{D[;?aVȏidk~p{D5ۼ/cz|Jo qC!}94xms9+L.aCsrV59f뾶OLk+s|ۜ{՟bɨ >߶9&!aesNsGV9ok8,iR癡h&꺞mM eUݣփ6׍W0Jo}Ęme\&7~w.^^s\w/Bܮke/=6|s+'˺[Kv^9]=Xg^ƪŠ-&mL컖f^S=+J|y$X#>Y OCa>COm gŘӕ!:9V/Gu Ƙʧ= 1pt(ac4Y;GIks|2Wz :9Op=-c k\mǺ^x:[ߵryѐ{\k-;O/GȎEZ[?ut;2k-ϛ#3߼h{M2Vil.}ҥxc.wl{;3Q܍6Ŵhއmj(D+w>vqn"ws; ˾s-W1/)A]`DWzb01'_f&ڹ_/3Ϸw/?wv\?EqnF"8O_g/{ʦ}\~cm;_}O?;4&>xAh~}?o}q>ǹ gx;{?GA~9bVևFy ˏ[{ՊōkpĴM]>ΑoPi܀?//8cv)}l/v_C!H1}y7F~M/p}z/|T[VM^~<:f/~y_Ɏ޷g۱ӵZk(6 џp)r\e͕6B5*&ڶӟazia/j2l= xMSП#|kFR&o69WWUUG:>>zt@<˶] yZM?wV5r;/fjxu㭷Ւew{E2Wسz0m sNzuc"1{(Gdo_m_q^q?ouWkc/%q crkw|kuΥ5Og>_O)Ǟ_^}올>߫nc5^  z%x\ŪNc겮=l[&{Zz24_7/sҼ>^YWeWlE?gr[Tވ\Pۃ}Gwǟ2^?=Yz=z^o׷,~򞼞"m_q%?)Ͼ:+9~oǟj`v㎧|j\ WG~/?[Egq)^f,3N}|">}5RQ-!瓿|skǗo~٭ˏsGXѥ85{)VɷV½~oL(@ƇvL?Tl7?7~\ sa%He0`JǗ2kSp2@9ࢦR6iGU)XBA89}"<2mUc e>i^9}N3;c\8qݚ/>Wur{ ?*);-[_uNwUl~~yxbf{o\(sv5]7s 2R+_y`1դ45.pηs٬t==< [^0_6 ;t|7N=qIAl-;q62Svnw^QکRj@ّbX2W85"V'AO9|I柧oa=_4kH9 ΦJGT-T܀t|76Gje>/㊞ }۳CMǞ y'ǸKtɳ9lM|a7`*p˱'B \m z9:n6G}>t6ǡ*[+4Cǎ7J6gsgǹ]gv s*B38Uka+o<, znc|ޑO# 9~j 9 ]_㧩 LmyvtnCA<9G7u5V`nR搏=h5cpoqmہn19_`o<;{= y|u2摺~:912E[q8LuGT8 fEŅ52p_jh͜m*ԦRErΉ{蝯M gjSGiErzCE';u *Cb*'Fi fSᓞ.nm?t8|E-o4Ҵ 0n{ԫhT e:D|(ET= 42G-?-j(3l^X_p4UjN0:˚Jç~P=-STӳb:׋SVё?;Ttmq+&CR:M+iry8V*{*sXKS|J/2_P 3'/Gir8N)ѳ.YNE;QTn~&iw ͆Hk7S+RL yz.fw=5LD6Ado8|6`:g=ÛθaG%9lCT G^ߋA/Ϙ;M܁))_5 Upvlj<"} a4ۤD-pWWt׌4i>)[[STvǜ_:0Ό"Ցpf :h:|Q)2a[+s`o[!8lVٕ ~2 MlpIfUlʠr3DŽ7gvLtPa~cO>P9c[Jfl8Q< g̤4kbK S>66c m|lGpz|3jRwS>\:C6gmҹ넽>w|37yuS*GvtNmZ>ߩ5 ؤX{yz-}Oؤ.5jpG*]P8W`SbcGX[x4`ݻبMܠبjΉW;(_a}89\<v/pr4X7~0LS)wtsaZ"*6lYգؾG$R_1)NcýyV8v8F7vfJWTv78?yu\VOe ?7FWTbkfVt1f4]|N~ѺxYbK/Tl hreREPftVŏ.5<;pa0-9NnV*!C;l{b Y*FZu5R>;HŰZ9iJiO£`X-tV.zJii{s~oEWiV8;ϽyɉٟO9cpmۡc 0L"aҩ4Ϗ(yro>U!T!JG}S2˩tMeu>*}J+35A R ~gЁ}T|/}{z`]yg Lkc9' x/yi ip{:igtN=s?|:=:2{1ǜeΉ \'Sl >eJ<@S.PS 6YVp8 \ؔW0]*!4WBtY-><4Nsկ¬_t.e_;7؆e9 ݥˢa: ݥvzK.Kd\hc b|4K`ɺSjh newa ]6 œ{_׺1Mέqptv 8.=v'ekfM)oyhC*vxeV\nRmѫr+ nnIv[Rp֠VcmI\<ܒ1:Ʋ5r vg۸(]Ƶk0EG鲝;sʺ'HV_ eo0#c{W7}K^煘u7*.;p*yey<,\K\x1߻pp.Uzo(%ߛp.7/4,(P vJMk(3*W_<1NkYz;j9e81뒯v ]|6j&_i8G/MS[>[n.<O -k_?m rύ 闛c5ap*rP'q vNix$^9_ͩTQ>vo>, kK ;KSKÛc z-`Ƴ)Y^e.MsS~zU!giz"giz"giz\c3nviڃ9?}S4 ]{/~.J?J3@HD0J3ЀKؒ`80̀YA e4%0>;1J3AY3c*FF}394=Vp<12yJ3qfc ƐNE|YâA|6аSCxE,JkwkX3/b-qs+%CЗ;2/"Q| qgT.5=碥(/"{,1e_6xٯcMlX}3 ogc!ɨіHɩ\K$%C쌶lXz#/#2xRٰf;2;qݖPWK)CėQ v.f$*CKYUBhʡ0ӈzmD2Cg[tѵ FGF2`\tԎm#?}S{G 7 >vDgq#3BzKsÒ}?H%;3ZBTXKb_0 f# qxO; aUF3]4əf08g*#*a0\JWD3&G쓆 HgJ"rTL d4ǹd U30K3f 0UbNÎ3sr裙3fPcsb"3|}5b՗|$}Yb!3ag世ܧ_{?(4sjf%ˊy/X̝<;YՎDjQKT_yad_1#/e ./g/5"$ؘŠuJgt8^mɘcFmYrjȻc.&c2o/K^K˱2xK "ILC9呖Zņ] ZFzҮ%a_h%M쥑J,pXnq&EJ&vgm2%}d^;S-#퓩0uj(%{IMmBeB2LcA'fF2+3ێTLm3:g`erTe s,Vif6xXxי\SQi$O+LY9Pksܐ^)(4ScIonݑݰ;6%M!T2&G|b vf@Gv:(4rѕbSQi7캍O/i p3U}J1t(4SZ׷sIoǥLJe#g:^1\9(44R2RV~-KkLAY`*0 ^v; 4^R*aJ2U-M})0ef9]E6:J646Ͳ ) ,15%\BU&׷TRLZTb-,OPы/;kJD]vпkgVhD˳GK';ѐ^]c 0z SQi ipk)+dǼGI+ w7'{A7;(^"E *J0oJ0ED\1jŎ.2LfT\5O(aSzU)FvRE;Web׃UʢWebWo0f~8:4 ׎n A.HC !dpJCbW~顼\*/,F/EĖ+VP~Wοvy3LSRњ.T;]*mc${vLr;.Edl\cԀESw^͍J/E9^zERzaz)ʸK1 KYS:(BK٥\J.=" f#yg,()ECA4n`[s>_(PRf eČFY14Wq,=i[ڣ*p<S[yRFԩM-t5z;,c̠h2v% &20OޕX E >˫!o}s=b2zh>S>賨oOzK'1zr']4V6)\zΖ_v4NRZ4FBޕ/O,uZ=<-m6.g n68>Y JݥTd1Q:F*0CѤ~mSy#Th uإPd\r5T*R%_{m_G==_Z6Pn ӒT:Ke۝R@H\x7RfɥUMBMWFb8J0#SC2 U)ƽd*2:oF-8<Ѭ}I!*G?:/m(:/A-l:jAiC%[&Ew1h-NKhGBV%_vʭKR*N)ݿÔ[/A)נTVx5c?>lX:7^2u] u7{ߥhE 7;&S~Ovolxx7L\1m(Od}H૰~ܗb/9C6]]mzRSǔJ׹ = ѸUǩE-NVC=U';|JgJaQw\:LT*TmuB+NaNB(;}TН>S*s6w8گ*e8“QuԓjCq[S۴צ &^¡(rҙ\̳\wx`aUt{QŎqהБ; #~}=h658^S*3Ql^ӆP+:G'|䲙+æf^~h } hueu E,AA.SX)e {l{PSMh4n_uIQ7/v.g E.U>Sgt*5LPqJa<8g ^4Ltjy; ҟN)r"|}Qw9L4f<`cЁKOԱ]!)}OKrBQW׎Mʮsj@/nv@};?w_z*Oihn/[>0KQRo")8>g*"ynq.KGto;Ra"΋+^?NÝvnĴ6)B7;'}l)򗞧m]:p-rB/w wĸoE:!E]Al"_)"rީ%*BpY KY|ѳMR(@"|(uPSYPJ Gug{Q ſRߧP^Ǯ46d<8I:y^vZN$=Oy {U;p#r?/Mp#GpR6AR|P2o.H#^;%)`ijs:'9l8Ÿ9IȨB<'nd7gwIuNݡ;it; *&fیO)zQ" fAiBkߥV&G"D5܎Dk8tnR.orFdvPp#j9wrH[WJ_% ]))ެ=$7wQOOAi3H!ƂogsZV4FCHì;aHq*Mz3'w`I@5r# cóꜹ@N;젘G E& p;}?ӖЌu; aE>S=fC9 x 2? xDvnURP^tf"LکT*÷:[?z5Hj*.Vq"瀾sE X?xE;68]ɚͅtE E#$eH]a(T4f9Kijh[Ns( mۋ3صõޫnE>R(URÝÉE:UR4[cu;x? Ot8y5ē{??~CZ"pITd3HhFYsu?IHAsY95'RʆC>CuZs _h"VNjB$G Hw6?I8x',7) }LA>R(JwtDNCrX&)zW8}ae{Ɔ 6cQÙ;vi"fʊ|PÆ;';N3.R*pZ8t>^(n_JslbfhȜ/&I;͛44aGd<Gh⾩ϮTnNR"+ItQ$a<#w )ŊP4b}N$ 4{"M<.)B9C E3voC [`['+έz&x!Rt7zNNJ8YDף")݊갧9bǣ 7)K+HrP46It+C2.z[iq'4q!?e%ߺ⤮f[r6R| U>ÿUlOc{v8ɏ>~v|ـ.+ fs(vO_r;[ݙm(\RVY+d'˷2_[Vo1hv[<| 1s|÷[}n3< nOۿ@O?+2v<1nLk-Km}-l;_u(oEEeG hfb66v:3AjiS63_y8YQ ㋣`,|<l XcTб\[q- Su"E XEnTIIncTDt%yض[Q1jIԱ &-fwpTJok0XTEconLS1pXԱ`h;Vl2c,*ncTjc,M.cT69|~`Dcs[Ա YeQ1JW)}Q c}u! "cMొ$rcv[1 jzȲ48M1QiJǺOT|sDcycDp!3^ŒlcT|}"Ud(cY:c*(n[5fEAeP;!Auc޶1Vc=~Gొ( HClYU51Lx0F!xZI2nzz%Iƞ =V.M|OMς+L)>b>\$SaVT(w< b#oa )5;Vs1w;҉D#`΂Ib:F,h\G+XaaJD+c1.uyeb]vƶWBM$z&A*80 T1yDG{YdRT <&V r\i04yLqOǸjXGLV 1M9yw$Ǹ7L15"5ylwILACNf etp`v1Ә1I1c4 {5rO)b1",@-="sldW7c4=$=C5-=(J1w{=vFMb_`i w;&%I,7 AC4 RӀ1s1U;M;&z`I0xju1Xfy Q/ 1lI; e[0I0xl?c;cTS#Ǹ(=<1[n:?oǐlƙcǸ4&køjnc\|fQ!cTQ0 *1GBa'$4ǎ([1oՏLC$*'}4I;ԧ=Cc~Jz81Vz iZf!]A1zEyu&PW8c1pe6qTk#%| :c2J15Æ1Tfa/ǐcznn{ 6^f~P6:(bb1#5=S=vDQDj f"fQ1f!71BqQV^mDUBqS nh`aFT1 =!|Ybbcp A#u+L˄P/aǸ.UAoc ;լct q >"XEѓ䆋:ForqA1T1.}ѭ 4u\Q@W)ˆh :FwYCFA1n*L1t9 %@R19t1Ṉ̀11GֽcI hX3rNKǸcctj3q޷ f8 &=ׂ86ɼҽɸ1jƸdpct&w47VQKqcčq44*[ÌΘqcrR 7VQ{W,ܘ\ۺb**inr#ܜ5D׫`w|ݧn,_UqckB7n;]<ÿۑ$O)'S1h2p4p6ml'p{*nXe9H1F7Y%TJR<3S樨tǸ5kdc ЙYqW&68maA1FzEXmgKn<xc=^vycܣ6-K.UZUlIuu1|/Ƹ?ü1Fhn3oFoc4*]xcܯsobQa䎏,Mo>mƍ{ θX7V1(qcB%XPY1F(i3r ãcT"XEJ S&'ıꖚLbc wD,.*sDchU%br"P**#S1~NjJYmFхv 9VQ+bnԙE+*ScDcE1t-Q8p8O*frͽMfA~VB1 7Fg]QՊ6=e>`c๗ 6?5G1*#/)]Fz -#ln b`c9VG^RXW T{}0m"'eF*,;#J8Ij:6Va!>%ژ2l8,u)ANق7ƘPS~%-7J`xcTL2o~pdޘVuvƨhoXn얀c ~V昉7v7ZSԄ)'L1*<(טaX*;1*JU\u)WB1*X78aQȢG?xc 7Ql*3 7F7P1h4o8ycT oК]cȂkƨ-%XI7oJ&E쵩U⽣#7FEԼ1pWQPXj5nqڵ2n+X'mJ&XӳpcUõ3nqcDbH1*տ$N%\WP0L1F&G 1qʼZvon1`2ƨ'XEvQq+cr3ƨ,Wh]xc6F^R쉘')i 7F3 !(h¼1;R%]cKP̿qJ]LCıZ:cTqƔ8F*9 Qds?ka9VQ_#cTLsYZv9FŤ+1Ǩ%Q2(%XΚ!(ẖ;N,cTvJnR(~zkm[T̓Zb1&71ǨgX0AocT@scL=rLy<>ܤ Vd)')6/&ƨqW5 QmcTǨ?%ҕyIk97FX+*vde$7FEyc '4J2xZ F31&e%ҴtdǨ7pF 8j3d71XZ. QQ1oA ƨȰ3o8bxcTԗ`cf 8FEcTtP1*[Qq"p>0ǨMQ0N1*A{lQ1L1*8ƘXZ1 &7FO1&.ycTTּ1*Z 8FE5HzQQ5a2I%Ջ7p3 8V1K1*ujZ5qccTT;1**mrfT 9FEUč()G)Q݊tP醊7FX5Ƙ)jVɩ&Ռoq^bcTT1*~1OS1*Q0N1*斉7V3N1*8k?_ƨ7vGo 9F5!ǨcbEyrX^dQ([R-U1*F 9F +!Ǩc(CȄzFQ1FK*X1 *qnX)ǨE3qIj"QQbA(x%:FEsj؛9F%(d^T&c t^NCǨ4tfERzY :o+rwAlQ1L1fP6uTx0uLyoǨh6} =(rBD~l>?5bcT4?k EÀcT24? kZ@?F[|LA.4LzMEf >FANdc>e EVcT6+t\ŕab =(Bb{P3"{1*{P3eLhH2lUbc™M78L;=dPI&)k*揭g=?g EcTD,2~AB g E#cEwcœuLÏ*&=(#r@g ElLh5AnANUny`M [ȨT+ͽV5el%eA2*yMWeOLOKQ=4=`?a E dJukك")SH [W|P;hp|M {P1ES( = \~J u0b;T:]yM&ك2Aw L {P5]_e٪Akʭ'+fE9A" ٪OyM4+Jv") Vawe?=< zL{P[ ׭5#  t9e]J:@cwo.Q7]@&\ ,q"]Q;@W1!@WBu E9o. [Kh$w'@Õ+n.6#3Qx9@&@#՜PqzB0FP 4 4|CĤ@FB TQd*v"S|n$pQ:@csJ$H" ԯ`$@x;`aoG&Pw {Ѐu;d ԕxq3z2 ϸI5 &P?$*WQ>faL O rգ|2pK$ u3H *j@Tmbu/3lnc$#`TŠ0@$fA{5J0p-̑ܫ C7hb;d&f2g t>3—fQCLa1 D@%1Xu Dň%AOmC!F@뭱D *Ao"U7Eċ0@!.!(80:@T# mC Z Cc(Pkx$Ah Uk7)ڈP d(]! v T7L Y| ClG0w[e&a$&R{e&$&HL ߻GbQ}mC0]0 'UK"S#*Kܛ#, d{R($, ]-CcbDb}Q1wGT ^X Q%hQT_JXh,K<+ap@w"`$@T:"$EZ؊Qj.kPj0H څ@l& c"#,qQZQQ0%1 :@T)"! c @:bN~b@T c+Uة);Ti*Ȩ ( P *9QP (S[!D 2tB U#$3n@ D *DAD *5D&@TL "U\e D'! tC0` @T4$ 2"(S.#h2H0M چԠnVýDPD"qʈD4R)2ѰP; "XC p-ԧ@k ]7@T4BBM"CJD DL0TꦊDE@Td} D#!h>EUjnE3Foh$ `P-h/G@j"ЃW(DEw5eV*MOE}*lQw[Qv^a@VETAzPw,@] *} t?0@C*5 D$QT ݃!eGЎ;r`O,~~ tx)A#Q!@TTT 0*ni*PJYGQ %_ b@TTT :@` *AavTkH@JU``{ 0:QQS2"0P-xЁn%G;@7j;@4-{˥Rݦgpp+F\ *g.p an>@\Z:U =aP2k7SX תg,K++qY Fo7zIX .I}*aUMc`6JX WZ !CW#, ]@",Ɛ*A؝ m,н Xڪ@!·WE۫EAQ(~{U*j @·WEVEۋ".ЃUQ0|{(=(=(^oWEܫEAQժ0{]-]-]-Ex ^b:ЃWWr>=vRIzPS/5AqūEߞW2\)WWr|wɫE9[խ.y(բ\M?խDͫ[W5RS/5AqѫEqѫEqѫ[幛WZUZK/ZZZZ׽Jń==.ZZSZW{Ap9Tk_-k_-Wr=>-3Z <(  .~+sˍ ZO*.8A Ov@pVz*h'hQ Zd'hU'; @AbP*ȅ*S/ PЪ\Ov@Ve Zd(hUC]h RЪ\OIA?ܤUOf@VENU9iVz*UO4XAҞiV? ZVe2*XЪ({U'S hAr=A ZQM ZQ*\Ъ'[ pAb\Ъ'[UO@V>5=pA"g\OyAО:jVeĠU9AbbЪ'3 À J}2X*sˍ Z#VU8̀@J V> Z 02h Zd3hU̠E13hU'|2(fJ{ Zd4hQL Z5hUƓԠUiOFAПiPԠUٟ-x꧉ Zd$6hQ!ؠ[ lТ(sG nЭ7hQSGMpЭ'8(o-d$8V(HrТ'C AEif[e<Z@-d:ZQ*vЪ';U8iUiU-;hU9-AҞV?ZVe~XxzXU9̀I*S'5?hƓUiOf@V?AZVe *&J{n BҟL -B?!tBBvB( A}}}$(}-Dݾ@@>@~@ݾ@`)BJP"+BKP:Β!ε`-BKP!d;0B2LPx!nDk2)BLP8"^Є4!BL P[".ل6!BMP".9(BhNP:"S;)BNP:Ǔ"=)BOP"?)B P$E("1 I0eCr2"9B1HPF]$G(#3$  eH2R$@BL 8IPI(V$-I$ eL$IЪjA1IhQLZ[uUCnE(Ui+4JhUFXF Ju7;frWZnYB˳1Khy~f -8K1|-f -, `KK {r5L(&t ݯaB[0 KM(C&tAP¤ eaЄ!pB8 lLP?&N(ǥ ee҄2R4{ Pj&M(9&!IФ eh҄24hB~4 QMP&N("]&Ig ePmЄ26iBL4 M=M(À&‰p Eq҄6hB4 nNP@'Mh ȩ_#x2:YB,NPZʘd ݦI2vaB|0 OP'L(&tۃl eFA„2 aBiP&/$O('94 e*E2"qBBIH̶OPfR(s)yD e}D e D eD ANAAA&BnBBBBCCCDbDDTv"qBY"qBY"qBY"qBY#qBQ%#iBYH#iBYl#pBY#qBY#qBY#qBY#qBY$yBYB$yBYf$yBY$yBY$yBY$yBY$yBY%yBQ<%qBY_%pB.~ㄠ'`< A1`)T׿8!(ƹ' ^p&A/hBP&\Mr/+F 1FG6MiB̺1Nȍ1NㄠX[*'w8!'=Aߘ&T~ӄ}7M޽ʲf͵A[Έ""S'e5ULdYX~?s )1jR( -RR)sBbޢMRRAU2UpD(@ --VBJyaPhYPH?j`5BJDB v** s/` )ZV2(k*j'(YzZjsBw ͺkڠE )(wѠznRc(^!pBcoB mޢOHkR}L )Of`qQHA}R )$->ZM -9I!%`6BK/}NRh)RHPLYl**` )eRBJ gJxJ(d%酢FRR(l~(PB*3t@!+|PL:c@!%נ $L!%:ئ!L!% IBJ`fFZ6c2B jYsH!}`Rh$6K6!6J80IFNL!m)Y։)J/F}oBBt4nEj~6Qh;,FY(MQHI$B)($(3 )uL mFB mqBiǤBiǤf)#=M dS62>))q$eB(B[')qeBz5` mb mwIShjSh{eB h83M!}aGئ/I02 Ǥ(/vBWDqdQh (, (B .I1I Rh5 )4hHUHX1)4>-b Ǥ~@ !T+W)5$E kn! {'$jȤP=#Bø\LAxBc)4}%BQ*)4ݭ6嫂"fɤ0Shlmw1Sh 86ԚO)4eOSh5M1 Ի)4VP ?*4 FZ2T*4THkoL!u_Ma0؍ R3# (Ce RMqiSh_)Li ÔMaѦРZ9qeOh&OhpD{zB?4P T=qdO%xB U,sB3Ҫ380(eN%pBa0ǜ>cO%BǠx1(4BǠJZ(4?A ocZ( o|j *% 5PK&B-oMQ(A!u8V'ߌAj >*'< OH1'STPKjszB-1'" P.O!P.!}X(2$-zfWO P/m-U% 31'Ԓ~9P19 iK)8ZBɴ%L fbN%LuGR5P3m 5әڙPK-'iLPK 8Z#$ Ph0(9PB VRR; Ř':PxBJj=Vi< ʎ'dK;PxB! )uґ@C&k sBL"GZ" )M*Q(zD²G( 8 u]@!%ܠPxBc )E6X1Rل)d ;m -660` )-6%A i: RH wzB q|- A/B"v:"Οge!{i4jZh=mf:"}h!-xB/YZO :"xB@Hꀄc,9 ZvR! i<n%&T}ҤyI_4- }I\Z9TGSh0,i-bX׹ iOZ sBߊc +EqR'@-+M:BįOv4S m7?pTíl\!N&B(pQpTíAuW=&[[ϺB܁Ҕ?WVh[? Vc 3a-VHt` i7<Nu/:XkUgRMZ+Q7 WH {dMk+t-3چp TP[VHR+J67+<.BZA0[XDBoG]8MX:Bמ֞X+Ĭ:*^`D I%P*٪V٠ iv0z.H[Mr4X!aInuao0pԆ ?+ƌQAp1ΥTEp])tQuC ?Mqc.g). XL!ui?)&d)4ռzRƣ.Ph,w3IBcY)Ph/Php]фԯQoքԯτ&tFsEr0hBZP4!FQBSPo\k愴`'М#ԋ''ԓONh&''4ON%PK>9L>9|rB-z "Z ''ԓON%PONZo cf,{B`OhV% ʅ=Yݰ'4+ fĞа TEYg0Y1&4k5Ƅf=ǘЬ(dIhV, I(ŧHB)PEJ+P ]R %Y(r-+rQR"^@E(ED3B)3J)2PʕaR #gF|jG(8BJ6PJaR #pF(0B)PJ;P aR #F(0B)߇r?PQPz  [k!P:!P:$"P,#P5#P>3ČPG$PPO%[Y%P:bk&P:k&P:t'P:}(P:(7)P)PW*PʈPdEJVuEJWEJYJ#Z4JC[4Jc\4J!wJ^ !J_J`Ja4'Jc49J#d 4KJCeBi!Bi܌!B!B"5!6BiE !vBii!Bi!YBi!Bi !VBi !Bi!Bi?!Bic!VBi#(B驎"(B{#0B#^q3B&"0BiJ#0Bin#0Bi#F0Bi#0Bُ#8B#8B!#98BE#q8Bi#@B$@B$1 CB$a@B$T!Q22E(c%Q2zbE()Q2F(C.a<E(2Q2NF(#7f2F(;a2G?a2"F(cDa2jdFȳHQ2E(#MQ2E(QQ2>E(#VV2E(ZQ2E(_Q2E(ca2\fF(ga2F3la2F(pa2.F(#uf2tF(ya2F(~a<E(cQ2JE(ӆQ2E(SQ2E(ӏQ2!E(SQ2iE(ӘQ2H(S2H(ӡ2AjH(C2H(ê<G(q2G(q2]G(2H(c2H(22I(cő2zI(ɖ2I(SΑ2 I(ґ2QJ(Sס2mJ(ۡ2J(3ࡄ<&I(䑄2mI(E eb=PC y=Pf# e~>Pf# eߒP&# e?P&# e+@$l$Pv`BuLb ecB,lU% !P7ʎXBKȫ"B eD(l&ф"P[DhBYN{4 eՆ5,&ф#PDrhBY M(KF y I0l* &m&$PhBٮbM,ϩ )HB5FRR&s MH O@44!%'IUO@ -w+愖*1/'00pBJDhB š3@URdR(li/ʠ'$BBg)BوB獏BmӈB:DI(tx(t2)H!U4)$BBhF fB *IL` )s-ƢBRZ PKV(nDnHɢPK+gTdQHx(Ԓ;45(t](DQH$$B3A:\P(tQHIB >O8 m әX:n* ,-J/Ԃ5S'{,'O[ȣEb^%O[rL -0/Ufb^(Ixy %/4B31/4B31/$Lf`^h&慒ɚ[^&/4B3r0yyJ^h&f/4B3y" /4-B39r L ļrtBI ļL?ɟJ^h&fb^h&GNyH3 14C39zC % 14'Jbh&&fI-"/_~u__^/_|YϘ_VOoN.z1vpuW^_ICzc 0=Ix?IHI$$+$j7}}h'!}Y ~×_~j_~o,'_V#6G|o?Nr߿>u~}'[?[va73{z+\?P]~3ӫx}}׸JsJJx]׻}_|-l{Ͽ~o]󾯯z{=ů??sf:^W2-_|o5p?^wS |Z?_zaK_׫9G} mӺ׻}l?{eͯ>Կ!czTug~zw>owW=^߯W߾o׀1wɻ/_?~SFgq}Qg׃ 0Nܶzo~Sg*LZ^57\ִ9nNq>o~m_zgcw_{|~o={~鏷wFۺt; o_nuX6!Mƽ߀z!~'f4/ߞ:z&W.]ǑGrܙkv?ǯO۲?{+߯{/;w}7ћ??ޮU~usڗ_vƺ9_սuA:7H[O]3+/_QG{3.aD&endstream endobj 121 0 obj << /Filter /FlateDecode /Length 40163 >> stream xԽM-ugO ? y3sE~٦ KhV%(m)iZyTٲ6jPw̌Ȉ>?Z>G+OOGi]}>}'syyG/oWv׺-ގ>?eَ᫱,o/|ޗ%g}.7_ N z{O_?ۭ>!vo_ȟ}uE?d۹sguoN+|}>۰wo՛]n]igk,k~缺s=yXc{_X~k]8_ ޏery_ޛ@_MYяpoٷNוw䊝?1x淣eo?Ӡ}^h(~^}^tw5w|Wwe~O?OU~}-_ٗy/oE}٧DOޏk~Mu9_q>tYߏ|FwEG(u~0J=.`Ķks6"}xKwFK_FW{۽W=:[=)boC\+.>麱koro.8_C㺟o=s=/[\׽^ߞȴ$o#6mo݄޾='K, 3cWXkgj[7-xonW|t'7}?8ݖ>}vW$wC|gWXDosxKMi}Śz=׷_Oze5޽~8l'>4g~[yͳi>~UFQLgOc,Э7eC6y~ ?G86y~=Ի[Tr|:~lMbDѭyp۸m_tc{_=oݧշoFҮo~>-7-|ȿS[׷V?_=^ڧϣ=Ӆ_ӱ '=V؇NoYxV9!{%st|_+alpPO_ 8\ju+[@}ՎZt Ǻe9އ~_g}ykG'>:V~;w;?f['k#>8>}]φaS>?ѧ>W3Mi6^?!~DG'\*a M[W ~]nD)X_+1J=]Q?_f|マ5K2>~ݟ~C7KmN7lƟ||1_8>l|77?yߎ~|[P5>;W}?ܠieG:GOߌ/o5>q> syߟ1ރ6nviHsSu8?~{zm~ߊUޛׇNo?COo|;~8ԿPÏ6>Nߝ m?>Ѹlݟ~En|7Cw>O?Z˻E*=7X6ۇ_X?8_͏>l?ʿ~];?:]>᦮ Ⱦx֭S_k{7ʏi[rO٧?n/W_x|}%MOמpo|Ǐ~  x(0^]pV6]9&ۻPrSWK/{IYCJ7!,8n_}.!H?_7({RʂŲ~Prߺ!!l~U e}пscU(eq¿PZ ʉ }>/gEReARGK͛[o֠+XO>SÍ1(XH/ϿI4O-l|u}ohٟWjNG]P9*w_t7BT8}=pcOgOu&QPpP+:w@>l7tnCuR~J]po y1gνonl9xh~8w|o#cmtRi n.+TV.*k>{{jqŦʁQ91t&Qm ;]Wv0%V+G_@9qTeu5)F{^,ny_YפYg_/~ߕξf<*\xyZh q7+>F퀓#[omÙۏv_7G/(kʉJCOW㸥WdjdՃJP}"A]}vx~?~W(t%q>*l:pQNwL+N~hx]:7 lj(K^ ' 5 lx?NO\`藊.zժ=+':8]y| ^ʮl>7]?7[_iʞ=J䃸~ue=J7pzL\nw%ˁ oR>eu; :ܫyJ}J:+K6lٟBgM5=N_ u{yWiO}W3y~i,}¼5s9 ﳑͳ óqU:SгOtp?{ WcrWr-qQƁriqӑVNrco+>LO: 3NlwuS {^ Nnn҅}Z׎[7K}3_vȀWV/| "} i .k}SXq86 _.g=ܕI /6w$YW߰M1vj8u "CJ[hV~t|ZǞ84 &:)&=!aZSLk@_BcaN1Q_i-6}=lSX+q%ҫ\G3#E_{=}@OX8hqƯ40B5fפ]Ylp~7~W!38Nun630cAĺ&ֽllKiзSDŽ&F7)^97X貸nX nqazA+1{;=Dbզݧ$|wKwGeVkw64v0cɖVs&3F2noG QWy{6u&(;gac/X+Gڳw1ÉkyO++~i5ҸĂV.>GW_ܻl3LX]n70﷭z+85[uq}4RẠ5 zW l`]\]H~Z`~\r}3Qi?am:o/a-#te]8Z˧9iE2bt{ 3nRg~EH9hvin-V}]hƺg#O(^nvdЍ?/?t-O1۽'@˓`zPv;]1NL F=3lV-TGL~Mht!dm˹pzۛGct-Z>JdpPޕ'{]E7кy[w߬no~G4_ !F5;Օ KW?a_aW 5w]+ocpjWszv𻾲.npҕS{>)jʁ)PKMTIWѕsE脄3>~|Gagf]ЕB7GZFX!sqM f izJ|`=2Gh7zF.ɗP_{|Nhc :r8AtĆ~A栙֕WowQa_޳.2}5S1,S_R0|r-ՈKYdf Z8ti9ݕP1yHibvpo݊{ h]u.V?xۼa .p. Inʎ+l85n8}ho`_/W`P}ܤY;wخӐi?h;}pzij#*`pvekX+ʺp{Ҹ$p> Q⺽s6ޡAKYߏ žޘz۶s8`_:whp.݀q; ]XZ&cʆ?+ )/޵L nL]i!>~mLtVpHprхNzaQsoZw A {G`tnn]8%~Wn5>pj[Z6b`#ҭ-ѳ?S?3]bagllBX|V7Y>\ɅJW~7)8R= 0iΣi5.'3OԳ ]"P.&a\1w˱E<Ԃ VTvPlyVLs=^do[w6Rpr^¯otuwzA?>e(6"[?++yJa ,H?#~|'8E[^m!0#w{kϭCcҋ>C أ[퍶-d84qSG]S+s03+MFu {+K. v!ܻU?rJ4X޶tvqJ!Ţxqf7wGe@S;tK.XR8ryiAoi-45{i6z TwS/)^ҽj\Pԏ}vpeimͮfKۖHĞ{z]BNm+0w'vatסeft]y [~zLQ+J܏MfȮ~I[;v9c\Z~4]˴+iӎS7 P;MiozLX~^vj8F'x_2 8v.ep7mJlpc[Z\Q5w+W;^21ctz'Xtnof{>̪zE-hNo:ẈWCfX5;=cHQޙc`l1~Gy|P:|J5b힆㤿tae1Tp\Nsq_:/009~v6)8cˎi9ψǽkYN3+{N S?k0kΕW: JdSq:s=5m+S/ׇsna+ӲNu꥝3vivX:-vLD<1X*wecIlyxiG;&"t}ɿXrUK8}^َR]t,sm,?Kp_ W]X2;oKƒ]tҲ?W_ktZ~?֭Jo鴶e 1ǼFWps_+m:+x%ʴsXcyw lL?syAۼWW;#<o=*ұ'>APT[4zVsN;=sOvEǖҥxi sϝ)I3\ ;y ǼK/;H!lvtNqݴX{vM鴯7;kt26F"6?1MFr6"dG1bmhrXy[u ';]۫_;?6i tlɎlbs#s}# sW>i~Hi1M>7\ap¢r81={twGd.ӱ7}޿q:|np08&o}g:nȓOwL>6 Uߓ58>^g/ . nቹ/Mpc+_!oZޣ@[JSW7-'T+LGU ̟ntf i9bY7MX6-Y~O-ol򛖃+/l4}g; ݦPt 7_ v2p7MfW6zڕM_#5fi:[c7MlW7϶ v[7o4]`y;M}MŌZ,:nߴ\ձ;+˻+[/>#Tb}B ?p!?vXUQێhO #+\k8XOzMG,TݎZRw;c2+𚎨. ȬX/zM+x+mgXwcDKU XW.eX =bɺ)N,:huTcPHRB}C6KUx\~Bו|dۺ(-C2X/o|!Rו[ΐh>G v!֖aA|zpcg,3Xsa p B1AhrJv)c+qNq |:LGf( mt3 8L+M2 xW|#+Lp18Iw ݥ#+ڝA';*7e|_] Ef@pW/A]97զ5*@+ ]tC/.FLuW8A_:b)coG&~āwgNc֯~wzLGx{W1!g; 89>0KV}W.gD~D[ xǟq:MG@`naIf) md.(ǴSM2A92䈀&@9ɢ+kg;&J #+J2ٿRG9D2MlҥJO ]q+Ŵ\5ɝ&+/UBML1.(4rm둸p3${rO @Pi% uE9HpSQLLG]M9s1WZSWcZOYңJL2$.(ɴѦdi%tEk7I_]Qi%uEI<pSrY~#,0H(Ǵ=Ju ;).RqG\ |TV]WcZyW;s-3/j)ɯ+1Dpk$ vE9O3/s9lz% fzc8)2%Ha_)+J3lˮ(ʹ22v6=6vffg ٖN삳Lk}/J3>}f4LE 3L3ujei32b&33w63k4tt yMݕK#,JS0̄δb: 6I8e'#]S3mv2PQVֵyS^6yLNm)+J3OV8u0*+|S>:r+gN9fBg^;r+Ni1~+ϴ컲NG%_Ae>?=p袕cV](ʹjġר/S@z"]Pi1芲LA saJE]ÔE;;`G6Fگ[;M}©1"{FaԳb/c;v/no'!Elx>.7)rKtg>q㟊. /8创\Ţyʅsܬ+wg`s޿Vw8k]o1LqՈje}Pb瓺rCq:վiÅZV.64g+ݮ!k/GMY.Y;I@9M0;3:#hBL]:0u&E{&}l9]ᩜ4`ݹj=+U'}g/M} %'˛ %qZ",i0/ +/uZ4`l0hclζQv2_#Gf;5^ 7X*e&45G4nYRa`p`.]^[4޹l¾l:L56, w=d)4Q$Cnn |[4cF4\jqbU#[6maN`'0Nwm0Ƃs톆PM5 4`[6N}XaI85c)6]{rCgԟQ2 7S?t`prZ4cV ];Jpq\ҭڕS}p݅[sj[UsmAN ~#KdA`mezɺiF~5Kc[[v©+~P_'Jh/W.Q/ >^El>6w[&Pjhps濝 5ݙ`rh柔xW]dg;m=P@>x40 uKIXNV|tfaQ*BC&@)feߔn~ӇB)n v~Y MV8<N{+\:?Mhޟ ;|v)zRѠiPr1PaE%p{jVzhh>.:兢AHTTaEygt rTVqP?*S Z7Ъ.ZQBzA$:zIge W]-GCQJE6Vl>psBOC@,S4ʰ98U:+mBWCqO[ (lCDBr+ƭ@WC-k}(9V*t>#[9\Ey(>JY40RZ<J }'{vT>n:k(\ʺCPh6>8Tߚ2 Je?ӳPnX=1R*Z4g|mq?Y(gTnk5=KQ+֎P6Cm ?Kgbgϓ'RP/"~*g5=3gQS= ~*'_P矷'RֵڝJxi!}9ʶxʖN6y(wR+J8~(Q4ݶPbt(g4OC9ly<_aOa 0cuڴzZ=* m ; tnY N Q͵pyO^<1-6HK8=p׳}>VO9G.#-ߧn)l볱HU_gO%\%0Ǖ{nhab,_]OEB[=Ǘ $-\Rؗw^Ymp.Xkj-5[Wq.6hJjRоhy֔nA^!\k@ |(^e3E[VSvW |Sq|-5[8k.϶"CsY|&(x(/=g[8"gfl, ׳훅)lk;k–/_uʮjp<; χr<pDٓ2'-_˘pnePYh/\5EK#-DM/ulŸse{0[rbIv/$,P6ʳo e|A1K%|y Uo}:n T:wUGNs?_}$ߖ>6p/x3m]l)QK*뗥0{ѷ *kMr[Ufb>:}d>pokTroMmnoQIb1_<*1"*X`[X3%`ͥj&` @EGafr3BR3g ԄU+ `X`/^,"3A 0ȩ70& [bJ@[1e7K)y},0&6 hJ},n `B#֦*`LM԰`ݷD _D htA)n!`̝H% F [:]=]qHԟf;`$ XUVz,ڥ+6kkiH6 ¸c_wd@$k[^_LЁM0S' ƛagS6|¿Wr&jѿj_[E Na1/lJU/*- ABD"Ӗ _0 f\Y/x4:$ #-A'_xe.$ .]_xh*1'/ʆߐqMB5=;JL;iP_o%  7PPҿNMGҿ"ѳ t4C)upҿ.IWlȤ=_p' @"_i[P/x)!"_)"!` _)z&$&JWÊ|MT g"! g)jW,E~R,E|&$ULM2e"1HC|iG/td{Uڄy 5 Rkciӯ}V/{ [||,v%,.Wx*Wە O 2NJױ$*_GbuU&6_%뼒,+oW򿊔TNVˆ%>;a\ckQ5G;aa_"CU_ӇH _f_pZN% ۭ~yjӿkåQof1Z<ߌ:+aF`Fa^V*e./ӋC^C\fԥPh[Sf,v^/P/۔/z/)M:#TEm % k sIB0Kҳ8YHҁM:|m//ՙ B/ ZLP2l_:ese)lkd6/2/2L3G05ÿP}Km0뼳& rM2P+/q y j_ZkZ:_j=JQѿd_>(Ji#A3X<}IҽW/KQ9?qyrrl׵_ѿՐ/8+ӿH_V$ ?_IΜuQ̧J_pJ B,B,=d!"ܗgUr+$ *3Xb}:_wF^K|FڸK= b*=/ݷ Bm\ Bm\{@ҿPmB)m=_b\[v*5hU#AwW_rY}_\hA Kג/zybcLp-ӿPK)_L_RLj|(W+XcSHנm%kϽ;=De;Y/B5:rB)W_9%#H_Yp2l4 uSJY2-_,` 9{|qf_qg>N, k& F\:_,{%w6s4}J3Kһ &fd_TNG,_Tj wnn%JEE oKYNmPym**0'sFQ1 J3 P2Ztn7ÈP4.GErѿ曣z7Y/=5?+ѿ$T˳D_fib_T0kEE:k&cǗF5j3i4QPB%_{E48ű4$`qJ qoLkhK=_8љQT3?0`rbn}GK_%iMd!_/N:81&'߹C+k)/_ߑޓ`jىmWV]n_{ɩ{; VqUkF+װ;EčGU-/4 EGU.(xJwɟ+`T)E-$qhAuQh `,3FQ0Uu=u J/`Nu&⮪䆀Ѥլh e|%)*6DŽV Qcr]`%+>Fk 610 0mWF : `\k&%Kkd3>RX?XC)`1l(`c0T&֟G$We ƕT6 I&!jI0$EcvTYOlfXpR4.fX`}$g%`\@r "&XP.0כjX`AQ+W"aY7/$KNS%ht6fՋ7+Jeb{,&W ]_@w1p:_-{&fQ }}gʚ_c3&_ܰQYoX ݏ\/n& ح0L/ұ5+ :I+p _ޮ?1Eyw[_J _{x0+B0L /7&N/w l+@'W` )򿸋X!M\WF'7+r\qSј)iLi:_t\FeE>S˜EN=_MB8/%+{zI n>R1=3/_*Ȗ6D{5 $W 6ѿyI r_ŭg6k5י%` ]_i M޸ ӿ_O ~$M/k4 q\qߗ#A_$KSro7G_Ď$$t-r;J=+ }%+'6_6 5;C_th7+-ئ"/b]+XF ^)w:6_t 7+"/ dH8 y+b//: 8,U ߕ1d~i ~#D/z:+͊"w_z'rܯN#sӈiWYZ/}N;L\ɛE2~^#sD.;L h} &^EG h}גl0Lϴ8 :Aw)2=ص4ק~ s9e-> 4T)KWAsȢ* '+uPnfJ/:uM",k2E>Qe"O/b&Q ۝䯠\7P/%)]Dű'/X_q{t VQ ).Sƌ_tkfW A p ]j# |XaxfQ0AA/>0Y/*~,~'&q&q)#Eܯ@_sd, r[nJAد@x +cؿ%)JK]U/~ E+aE!6ERnC(/C(%U$QiWd\2Q9j_VrO=h"*M~15qh,1Cs=4oŁw,EtR,eU]_ou5~LMyb~Q1sLƁ`)sH (1>N/*>_JN/!zM>"W K_jE"鬴zXHNʡ; $g: N\CP,a:}&KtBT g$W: x> $z:*yJ ,O: tNK: rA^Yt!z濷z(fAI8ZctQ`A~Y9/:)=~r?F$"謴jv*Ί"C|F)!SotR{(ۗ-7:+0=bP勖P;G'YP⋇eDC9h aa):)vN[b JWΚୈP V|*GuTzXVA;H'I9laPN;H'U'Mx+WMV.;H'%jOeNY|*wMKDЃz>sމtvQ_ dzRLbʠm/ MABx-{Ei)u@;zM QBK1h"EZ+P ,t5wK1IaMuGڀn!YՓE'hPӿ6k>LB ( T}2 (m  1@6FXJ0 N4pMeO6ϑ\$)@T|;)@XD&겉;l KTAQ%'vyāc;0@H׏t;{w`.NCann>͓{OjMrnGH ݒ tg ) PA tɱI ]݋.!>0@ Z 2ceUft2K}`0@anc=40@W%(1@^lCR UkKSb-D*VaM+\t`\&t2gq+O:| Wr0 (FaPcxfPGbL^"*I$r-E!Sb;`Sb % CXb kݻAK (È4hnAQMPJ |  7eA@ *"[r D1Jv`c T m8Vb  9m8 K[M6hV0+r`h9@4 s1@q-2h<ǖ nR0:Dr"VA˵8@F4prF?OKWh %2R_ Xz q r\ (F2_p$ YzE&Ff04$aܕ$ (OЀ.4Dab( V /D*$*cZ$ALИTh$A37 ӱR&aי F2 @ CvOi`$ Fnz`hNP(5H@P4b' h:M_5H@,sR E='Q@0NG}FTHа<*EkϦ ;% 5FZelnj*( Q6ciDa>( ,FԱdkJ( ^EQ@e( ,KDaF2  T(E⍌W4 eª0(2dګP@pѲdV1d%>7]A@m=Ei(ubj%* bo% &|\?tрP%m%0 n+ ¾jM& J r0 $誔c€kli@,}Kq*< V^a2p@T8 j+P ђ&ȘPR]@4D Dn_slMd?3^e :Hx#ev7 'siB2+&yji@y[4 I@_$/PIL"U5"eUMeD0AqvAABNBBR(A@U@@U@@U١@@U@@Y 8@UC8@Uf8@U8@c*iQ *{Q *Q * *Q *Q Q*Q*Q * R *R ,R >R PR,bR,tR,R, KI˩&Z`ka;b_A@P|C B%A@޷H L XLjbbssZb$9@P˓ A1q 5B m&Ƶ H^Slcyud9@M$b #pchH Pk( ˿fIj*=¾DFAy`d4& Q@0|xBLBFAђ9Y@ wJ^Ӷ'j%Y@P4% ֣hNG.dA1A0 2HNP;D0 V#b۞< ^z$K2y@P1~ePU< j8NP30 ~A5A10 F^R[8&QHXÀs€`zk#2a@PNɥ=b!Q]Y@5AH, fИD^6 Y@NC1 Qfa!+6 {6, V1bؓY@X>% 5,, R"K+Y@Xh Hp,$ ]KQ#À"a@XݬP^k@0 (ME#Y€J24 Vt$ g€fj€BÕs€XGLײfrino€h0v9`@7n0 (D&.8E$0`"~*@C$\x0 4@&fIh$fzI IMW$PqDy_#hH(ِ͜"P<ڻ "А4Mh^dH93E!)jE# E Lh栛"\YD".֩D!4WQ4"̑P 4$M!  4PfVH hHWiF24s^Z@CB hӊP&f4$- ȉ-Z@CB h詴2qLh -!4*-!4$4t6-!~^l -!-s$ae hHh)H[@CB hHhe" hг4$̮АZ@-"A@R`ȁ 28vo02DP28Dv02$ Lde"2O@Nle"(@;Ci9 de"Sȉ Lde"(@llN LV@NleB(@;i9 L,%22N3Le"(A@N,eB (ջDԀ2).Je"(G+$2Y|D*@n>@@2P&D8P&ҁ2]P`v9UR}yF#AH duAU"'ȉLe (({DPCAS%2UNc Lexϕ DPCA,T '2Je"((n3 eR\L DPCA$wH % 1 % )I(HIBAL RP$$ &$ %d$$ % )I(z RPd$P P$ % )I(HIBA=PP{zad\(eqK+V-%,+[ZAHbdYAH3Q'?왨ZT{*ԋ&E,R-*x[RA9"aD-B_ha_L+A,,+ 9ZAHbkYA/pK*V.ta*F- LGO"$tTcezAAh69#BAlZ tXqPw B {%!C(V z+.~JAHɦ0 $q( Qn)aXg;DBR BK B[!$D(7Cc7'YPOBA}h„+,/L(3|SWo/H( y(C( BAw[ b+AHH ր0!- $QJ$p-Qq, $UbIa(-DP\_D@"ķe4HBB&  $!WB@%@W!f(B&ǩ#!IS0D8j Augs>=>B?;k%~ AH(@ȱb $($ CB:\X|e AHv;W Vԗ;Yd A m!\B0Qb;?7 F0h  =!A^@H8BPD$!A}x>;E$B au(&D7'^ DDHS/7q㴐WJtPE5G'c BߘNPca9#:AHh* B0 ,o6:AD B ,9AH  5r|JAH$> D*h#DET̓`! BB/TS!? ,jMQAHypK*׌x Tm(a>?< \UhP hOJ ZlK#JAo"kQ+AAHE  Z ~ 2| BKPk+ Z}aFA|8 qrINfDkNZt LC C'OLkG« ª UukG« Buu)K aCa=O&@Pu~m9A||EB\NJ 2@q Br5 Wbvb|M@O9r2=JN_PV+1t :AHH B{nL8A^NPU # 8ma!Ÿ ZWqc@$/ nL 4ZԐpMԕ L!bC# ({4\#&Aߊb.#hD;BBFB(;deBk? IaůvU|"$XDV>sYbm*$0DкsЎ wKq4D$ +"A}JL_HHnڔ I+@I ꭹ5~u)#Aԋ%BE EA}MHڄk|6 ̿!C$N$ :Br%Huu1 xӉO'=}h !D$hqDx~_+F>ڦ][,J#BӖD#3O4/J#O4-Vf-T$0f桢I YPaR|A] ,_H#w ’-W0nz%6eyNqFA$# X5F&Fш!|,($ 9-$(WJ0\3RHq *nHn0r* LemQڄaX]OHD AX5VIZ~;AƬ-" @>%q$gkA:YO B@7RݒDU$AL3ND*C A/-N T}[r /! ANXB%A"U@l|3룁 _C :k bA\]@.0䶃 /  b Anrk@[Dh2v yf-8@nrC>ܠ4F 7L-*![ An[r;B޶6 5 e/ ([ABPD Ay3""(oXDFBPH[# Ay$!($&L@Pާ {9Ay' ( }U@P {]Ay?L (] x{s@y. (e?) ABPJHʞ Aٻ!"({@HeE=-"7FDP؈^)AC%(ecPRJJPgI >/*A+&%({Τe쁓tR'OJPu%(eV]R{RJPvaJ nN*A]R;UJPvJ nY1Au+&(we%d)A,%(eז}R\JЅe/BG_HPv Bၐ| $(A BY|!$( cnSAEFP>OpS!A$HH %#A~d$ᕕ =2G`FLH 6#A4BY#=#A~g$ȏᡑ ?`4䇐FHf O#A~(*%ȏMѪ ?~GVJZ `)A~`l% =6FJK r+A~n%ܭ ?V<@JX 8+Ak`%ŷ kB<2HGO  #Aa$#5y4 A!F(|@oHyy 0y<@<@`Sy:y O0i<>'g Aa!A,y O()'<-BX Ac!d,y* O 9&z@uo3i} 5yA Wb Y` >4 yA^@Wk0 Œց&y aKK恼Dy ba ]Xbց`t /anX恼|y /aˀR!恼y 8bHX%ցu bk2*"$v>FLբ~5@HX!!B.D!ADhU WZv "Ri $q*"I4!EeCnI!P0 AoC!PJE^B8(! $Qىya!.ry+.!y .rM,I⁐"W⁐ķԇŏ:w!CIT~ҁ"IU:P_ ׸ZRKBtnց@HԓBՂp $,āDE!%`cLD!I\E}hBۄL@=: HBxaK!$x $DA$y>21v<ZE䁐!$D&$V;U}pZL8DāP!T @HvۋāLLi)h<8NNāD*$"C_>w!)E'R<G䁐De-I<.@Ŗqu[@WTW!YR$W!Kd᪼0t[@`KG> 8ĥC>ИҞDKW 84ādc!]&I?B)@cҟ"SU8Vp1Hv& H֡4&Sꋅ1cL,Li[@c2B:P-R=' DhL)&E4x`ړB67i@CBpo@CBpHo@C6PD6?0 4$Z[ie"nh mi m2Iё 4$6Pp%@*6А@e#(@h@CBpq! 4$āZBH:АP*eS@CBL$9 mā8АD8Pвād+!c_o4$ā8АT8АTﺒ24$pbB hH ]g" j$ !!!!L( !!4$ՇJfL$ ! !h( *BАPRBАBADBP&XDBP&2CLde"#(ANle"#(]W"#rb#(A@FAܿFP&BW91IR"&(1ANe"&(1A DL3A3A dbb&(1A,u%b29d2=W"'ȉx50Aի[3D ҁ,. d9 2@o@G@o9]&r }@ .^uAuad HM  K3X> {@Yg 2i_ 9  LEՙdJV3dLeR\dJ:3@dJdJew 3@d=.3@Ne2zuy%/J91I]tᶀDP&/Jr"(:3+}u$9Lf_ޕd坉9L/J/J91tas@+9\Jʤ*t&s@̾+Y|uWs@ dseb(Ww%`dw!eb(q@TAln)9|gb(]7P&\Nej82YNG+9L91IqcLI}:[J_d_kTPe0ir@82z*91t2{nȉ9Lʩ&~_Ndv;@rss@rb(r:X20eb(2Y`Sr2)bP&Les@ Rr))5r@JӞ䀔$$9 %䀔ӞR 9 %`8 %)=p@LHr*`?FHI9FHvjPO~ݗח@vYc/IԪx1fq(1XZ۵<ˌU+)>n[S}X'EIWv-StoaBem| .&߱u_׋K^Ջmahr{S%ڑr՗>{qx45[^} _RM KX5zf+8ÏVr+#w=;Z [[Z]Zekm_ c-jy_7ݗYk'|ݭVKinWW:0a%^qڏ/6/N_M?kgE>֓<{g`O߼uc튅~kk6 Ns?߽z)9]˺_>᱕Ĺ1eǥ/Wm7]c|nD=Gޮgx#fu#/9m[qFvyQlǍ_sG.WK]/_?`:y7xq۩Ծi2 ҩ߮Kv^Lv򍆏|x/rïKu+R&a8ngD9拭Ο/ߕ!,%Py}7>{7qLSv:_K7]Lq^m#z87?oխq1| gS? e*r$1~ Ǵ+Jq浝V?C@ i<̷ ΄G {kTcnV /y[IEj_kE`eEZU8x6oHW!mg1v@_ק_⮢|o(߸4U? q7KPìe[nlȼ~%_ޔ鏌[xl4o֊tה۾skn|jfnolzyK٢LCղ?Xa4/<XH`^ jVi+GR]aog?/vL]ݭgjgdou=xhahw*>O= Z엨9\prAk;wouݫq.[ _Jt_v |2.֟1تw7V"SSKKDkTѵ]J_Aq8FSЩvWL GCc'8`4ǟgi<1G9}L}lգCPvR)|}ױ 9o=ʹC7>~7{GhuI\k|_թ|#'M7eȾx.t6NYOф;~3\4^7ֻ"X-z=sZ8NZ1[-]nvR e~.z0v]'Sp<>WP_-+ޭmޯn?y~[:vl']g=}c!ڮX2E^eo4ܱw}FvxnmsJc|XoA\|66Ϡ֮hن؊OY 7Po!/sw/khm;M}4nU믇S'9Ԛ 3nߏ' endstream endobj 122 0 obj << /Filter /FlateDecode /Length 5387 >> stream x\Iq5W]|z>_ bk#ٖlϸ7E jם@ Tn99$_xl(7_7n?Ǘke7ҏJZ9<9HȍTr:nc˃?ߊQG^owgY n8g#qf!齎N{3\{c>(mN [Hp= ?IG!|_:un睚Fhq%z?o…ci_ʋ/#rS}C1 ↟,0 ۭUp.H/&m#^'*)`eN2fvrϞof6G6Ù#{V lRt&iBHyRK؀0=\zylR9MIB&u?|ƎxEvBh8+>U{| -St:2,& 4>H\bq8s,2 |6spkߞa9iqH+ *oC2t/;5y3N8>*6Xp͌HIwwf3ȴǟWhI=LX8,-Q$G-IAo \Mk^@(qybl"gC%%[$| ]fJl*-'Q6i@ Y"#3t fL dHsJ+% uMCmF;_$fݮOv0J+7NIk 33_/E؈rcR+Y~y_w5 JoQy;BTG=}g?ß7GJwQP|} q⻨Hq0f/OU(\I@n `0I]pCl՝IT47^iXrp?XBY?exyN8w2~\$E #+W(G `C bV蒉lִm8 Gx^ؖU!sFt3Uh6X7 ƣ]T`-=J1 PbA _D2|BG,A$s sH燙*R^aa/e9;. ܃8_I5]gy r!< Q'f:AvU *yA \i"ǚ'Mk`r_Yr+_rX;"VKk9nxy,3~}^l sE$)g`^f<ڜ(ʔTfC7F[ÚTRu}iXሯzE_4b!}S-:'T^ҹ E%L*v( d9W4+ Lj1ZtwP "{}RsJȉF-L6uO:V:`Ъ.-袞 oYSO.DdN%4qxip6TNE_'╗C%Q $[a:U!Y'mZ7亳hm:(@VI-Wvwe ! ʛ0»`D* %e3 eF7c_A8הU:fYE:~VާGxsJ$a0>:mp}ٌnrgpn'V8=C!E.-h7y52<2Rk&M4Y6Tu#jMsSSG\&5C8JE:Hl|A2edN5 K؂ DFbIb._Lr-ŏ*DR)%ҺC,xja)Kn;%$Fs^OO}]PΑ8Xq.:;op4f֔͟& 6$1 ۠]Y(yk77hih6x HDxЗ?'Pf648cXsAM (}0dԙ4ALH.39pİaZID֯a6 'Q|>0uWmsR$~`&͝Yq?'~pY?1Z*rnl'2Fi8V@TĤT&-q]j]Wuŷ ẄʰAFsd> "l고,L&CjXUY 70m-uZ uRFE.(s'!R>ྮg1c4@>1AL8[YEp @ͬyã7Hcl(e+yf ࠝ,(ӲWkBelNb+g1IpYeQ}UIBE3c@AW]ډ0̈*ŒxQ@i_Լ%`[osFERm+lwA'atT1ӱGXYl/hTC~rbO2#NAaiwm1Lc)>7}}9w`$Tgldp|2H9Fا %Wwn!غoFkE`moC4zU0{Տ5|icG dw3ԙƇDDa]4$$Cn,؞ʏ1I3B-nm']vͦhkPhқt"z1zmHp_c7="qrQ A&X{QhLT3qS6!<9?ȽӭDx\rXF=^eעhj~I[vFGV1ї7Gx I@BB3;:R޳eWkXr/j)DV؏c40VhW5^ɱm)uI7pK}V.Rk$7:~M4|5+ҕ.?M([痔 !SgxzlnC }Obzڋ@1.w\Zcyw "% sR Q>,gJw}4Ƽz-XZ`3;R}"^$Z>BV( \-} 4,SGuB%E &?2ב_d|6ad:G.QoPEc%LWZ,״|{qQuU'l~[RF0MK$aieۭMRU_}d38/,@. (R4J\М>J%jO 2Ǚ#f&G:JFú=aЮ:S0;%`YM> stream xW T׶DlRQq5FEQQpZ@&eD>4 DfATDMHL&sL|Oi)m0_{Pν}%cz02uvsZc?u4X& ! bn>4{sJYݐ~v?AM_F.F&;n P1ajI3Ts5a>:_?T+B}41ѳ#"δrM&,Jr P-QkT;52Br Մ0 32)y0 "ދ}|5K7/_zDS7a2;3Y,gV0hf3`gf3c\I|ƞ̸2S(>LƊyw(ZDƌ endzh{<;ʿ35;k>Cz8CAw{W$_xepo,Xd~q$CTVqxVeF>_8D^{lְ8'Ez8=h9!S|cKu&=E2|Ydga6.IXQ)AzY5Zb.Zʥ qSbOڒwɀG DG]lA,=Th V'7GxAoŸKh#e4;;e)v^qz9`MvdLq ,*Ѣ %#.1bOܿ6?}|Ud.)r+aBE<8B/ 0^Phvx]p<1Sp/H&>$DX27:?q5md fWis-o2@7+<;Jl#Èi8es"2^=}-m8[M37u6uŋ#y}6N΄XG=gwR^d&]5Rҭ2q ̢Ñ|HiKQCVxgn"Xy}؀UjMㄕX NHoA%U**zKEIqžԴY~@}#SŊT}^8t2  jP!X \EeCr^#EM/`\>M](zd:҃pJP5/L'doK dWQ8c[FEx a M mF>aѻհFrƷ)3@Kx^uSCWn֋5g )ޮFXO rF8H B;s4n]R܋~ Uu  Z W+9Ɉn` 4*Bh~~t +—FwWLV~@el ?BeNZ(&p %_ K\4u%7QȘ9YF-DɌUJ=zWTF64/0w^3Cad2eOB.ih,7OBd?$NqqCGd 1~@}BwB?W@bo<'||zg)p/ygEv6*֥O D6B'Bi3uh>Z%&@6>UH޾upp|S[mZ6]+튃0N][^RQ=f".󨮳dc;eSV{Q "UhœdxbvDjĤGkcRnubnt ٫̀RNby/ ;U=fST^@ /eRK# ` UsF6hS ~)DElNC E:q5.o&D=^f&KJkmYeP1MR:bAwMz$6, ʫonh +>ZF#L6'pkzY);iRm ]<Z|Q4gԕ/5ӔC,3v5t Bwy'$a$ %CtI aqcoRA[g6~}j^O,uQ0:aZ^) UWF nCS 6Hz=W2_PMb!9sԐX0wk'uFCbkC'<~92bnJ+~/-8Hjfwb'[Fm ziRCcNҵ<߻Ԍ#-?ZN=hY? \ʁ]ڄZ1{P*GR+y2"+Üo4\lY 2a)3lpk6Lѥ87ׂog>tɖ?# 6o'y'^ez4wP6#&b 6yٰM4 ]@``/I8_*h-iKg vgڮMޥSE"HgҚ|0zD<2gN%JLy"M,cxRT2 *h_&4 %t 75zW\{SWoMoy#sl;-q-ʓdw}~4O!fڤ^F٫j_|v.>pFvmz:5%!]MB$\NKA揍􉠁 z}0dh'C\asmݎ#NjN%֬Bp%8x%lKפk{S pYP]N*BZϿtdƱEvoXL: s "3h׭u,R(O <eNa(&8QkvW@ςW꫋5Qr鐚(lر.aɺ4]p+F2r*ÄY`Kl^|mĬz{Jx6D-dȣZ -u3 [9cG|6# yEj +zLPF>A=wpا\SxhxVPJ9@lf(%X9[ɱDYe K%/>endstream endobj 124 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2975 >> stream xVypUq 4ģ{P@*]$ "DP dL&oz3L2! AG!E**[:pkĵjvv{{t !^b.w-=B1Ϙy=_aL´IwVx; NG@:L GQWjs/Y|#-IT%ErqnF*h} F/\є-}aN@^Pɚ7_+H/JUXR!׈/I.(deZD%UKT"iID%UIKqGJѮT޻l˳hs~bD[lb+C>PIM?d~;Mj|;2[.o/hS7h6CTPQ:IPʵ]GՇ _OuoBy/c,#o&DM0qؑ0!74axbujR,ˡJ$Ej2uM|hTuTRbq_8ByY!]WZJysw|9 ]Ef\˛| 8RBn?Qsh:mN \{-8i qo02=E@qNXkn! !{fƭ xѨo7XP(!1 J̬ 4Fߖ=7jy6/8_d^ܷ#FyGbsح6 qҢ9il= 5n0Lu}>:Lw/$\ đ?Ql xABz+nސ[ٹSq+`5brf*d~g ъƯN ФQzE" X=vۥwPx K`4R*bʹuy7P*MZ)J Y&i-嫿 %h&Ѽ}pƣp 9%ϖ%~k6/y5K}hxr "gEVG X[6Dˎ"ͻz8 f`虪O>C". +7mt{%cay#z` Q9j.mEo@/KB=rx)<{ÛјHm8 A{eB(̫!})ftRd1Y<2ɯuto;жi;E8/3,ƑM)]JOz<5mjK`4'C+vendstream endobj 125 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 8222 >> stream xy XSㄕTP{SjZyGdd"3D d%a@ (@NZkVկZ>i~> 眽Zz#P@x&ila~xM;־w&I ZM0@`kZ!(@8(8*tO86s̞d3}ڴ96v]mֺx6A=ãl ;ujDD׀)Ad;fggOeA6\>{<xHY[:vh<%V?!ó~:oMzD#mGu~G0F>Θ_:5$LnW :5lJ23b@!O]YEmsuP1t СS0@oWi9j(b@tOVjj`?ݨ !8jUoC\:>E9,0R%s y^">,.d=Z%q堆2RpwyaCֿET/<ġqhڄ![@_Ն5A? '}ɡ ٳ`>`az6E@WD>AQf6*"ڇ,^#OKii5nxC2Z4EIY>QnqukH34>ڡGZ 嚇BEx!0غmЏsޖQIhgE ʊd\%^]z҂K7/.;D| fQK6sPݦfhfrvF BPi8*"BX]>οօ_[+\HEmHmt;z4J/otc"WҠNi_z7H.}NoNfals2ve>}>@[X]Xlq?ב"_xH`+:sRNMIa#rg'ؗ'r"i"ІOS'fb`!TR)R&*zOUSd`O Ue:i+iW8v\s4)ihi|4#8MJ>CZvZAETw{ d8vv-pƛWT$lwyW>+Qg3Γ*G2Cg)F!VQ gKaIIXR'MdwX*],eJ4m5ե橌s 5U۔aQ5A2'>]9 XMBe)ydciwt!8yD/]!=%^L_INC2#rz7y^9WEWՔURKrt->tbbbMݡ[8Ev- LwV*c`8UmĬd =`OWi *Veejt~X,Y~ _bbl+Sl8@&1 (j[Dl dg=jD(]ZUPƈnbX#f.1QDRȍRPz?|DފiDIJ%"rՕ᥄mηEF](ؓ*eJ[XeHL#]X\wFX |Є.UJ|<^9hjHBk%,1UEV[N, d?lևqzY4V*M!x&$xߧ O~?ǯ8uhAJ.pʪ/`,q=}eSW~%DlNA6&vLSpW_j ߨ>ֹJE΁Vˢޮj~jr@L<_JA]H@ϿGC1lv$ 2׋26>DDk` bIʁnzFq=Θ!ANLk{tt,J}f#ı?R`|!#/:aWE)$u1*@H4MsG Oh58F}F Qlt61O$ _+*2UBVA$U(SԪ49?UvZ LdJct5=#QP/t8Kݘ (Gt&^ 5 h~`aEu"txL: }>#HXIITʓh[ }_xSpI%ҪU ;=CBd<Y7OC*4@NNCA({hK𛳧J,٢wtdv=!Z/fC\%FC(^Q][XVUe[Fp_+q t :PM}L"%oJ?,hQbUt"4Fp3 حm:sAeMGUjC!_d(-dg“ju]^WQ = $% 1,AEqE)0.]Z!iߘ^b< J"&:?(48#YʹֻjLY=g~$>6|q'~.,q3@~n׾Rtd_6/o-k?q@a.2YK'oyvIZ'좱-ϞFEC6(A櫇Y8k~1b>'=w|0]܂#J K%;ߖtI\IK$h"sBXehȮĿ&L}4i)nρ\Y*iJ"wp pGv>3[ Q/ay}',:å::aD]B }_5D,ʣtM-1q'u{,r?A=Fſذ7 X--sVM 1nOT(쀤G7.\Jl>G_X^}#_bcg zT>$<Л%l]hy48$XbїבQ!5tN`}oHW[HJiR fʊÎ.e$c^eŗ 8kuJ8piePTEEEl=tG%sYmXe``XX``eXmmee-^$sʑ]Q,sJ "4Mey* ! q#9wQ)%A H(oc%iХѠз2vtb*xqQUҭ}ЬjV( \jQ֑ѩYM9@ '-RiR2`L|.'|{(DZM\+U "=-D!J`T1,9Im&xb0yUȧ,Vו*5^'d\^@ehqy{*;"n\ΩƄ1J(HG<9LIK[_tI!uZoB]afM/v}?â)~Rû0ͶeW"Iwuү {+lG}/;,\Y'KQz0.F˱rI# MeuV&[zOGuT1] Q#_?VdԸp =sm_c_IJP)O6Cs缻}ʌF0ZE5hq){cVyaܿ2qȣ+P>["w.x6D)t=goUd֌SZ~.P=F`Z>&(78KvVTadHR%`/#2;,մ|<,eaYICA08iV"ۧYn>;hw%nF23Z̎cDo‰cQ=1wg$*\noHo`B|_sSh myǝy bQ9O0%ǛC[2y}r'>6?CR"E#bîL$d I1|L6#-j:t~WVT3ds EXxӉZDp++q>ɻf]$_{$6Ze= }a1Deυqm "4j ĥ%/^V@v05{/Q?my?͔9~do `  w!+[IZc{e|u'X]ѾaH/qbH3 x/R@GHNNU=QwONbèpqK.v׾P|q+.se?o:OO;nJƩՑMh lc,qCG<]wFV.<&gu33֕ED:k[Z3unN;hcW=DˬGΪ-6Jc=VtėG~$HOo¹9Bf<2Ŀ|-g{l QMp|VT}@~.fp&GA=[#MS -4iyE$AO~Qd%vjM*ppj.S4 2{:!Np#I ;:ٌl2lޔd9}E7wD)$`l|+#h!xJ#| 톌4ʪWxH,'1'G)l|4q~vںx_5O@곜E{KϚƓf@E:A0=:W#\y}8zӣ7^,ooT^JXٽG$PByr-D>h%k6pX06iWH08Xϼǯo.?>i]7gik8+ \eu֖5nmZimr0?>ƓCd݌4 NuNv.F7^uGJGJG۫ӌ v C42珜XFwgv`c Uᡅ9\pQTf4QmEVm+{ꘒb8SFxj$[! I"+-i_&,b<Y&Zt-Bj{mcn)?𠻫rHfJ~4!nEZ|=4SS PFI~w x^_Öݹf<rOҳYX6Ox9q3~X2ovt1{B~e6m jȟo6왚6٧3 3[1%75Qb(^Ȝ~|c5Tt2D!4cMaPxzvPHM;Կk*flr\˶3%NBVyK6 )A՜21 "@SXHuba> stream xYTgqd-!hĶj4G+j* 0 } Cg(ƠX5QSdwMs7=7ٽgf{}+1|dXi]=3V#dfK.KCV$Ǧ%DGB6Ef&Gf$lON r|FΞ>/%-YsB2CŦǦeƄIelL :輡/Ri!Rbbx c ʔUS֤gdʎDDol=-aIc<1w^Ͽ3fΚ`La22012elclg``dbffad1V12^gccfdS6Vcq98:z|؄2 r'&M|0M 5r1)$Q3|~x^oy ځ8FhuR^zBk*3UprTK0P*Caw\GA_ NDH% 䃉l6S+4am|(-Ph^"z$4JHY vz|s=Lׇ6X Qcx֦hJCq6&4TR \ gm"K_TX嬶WRh8]/T &'# 3Dɂh2'X ,Vv D5"@#^OTz5hR{;Vf#rAGA'}6>8۠4}Ϲ6i+lFr y$KlejAɣImKPk' FS4ۻW9(:šb'f59 Vj @X^6F+:ʿY h=>⦻)<s{d5b5ZD-C)ގ={?eٟO/+ .өA*'Xqukqp(:Τ%?LC$ڍq *%(z9=v@,y,r -QWAK2J$ˋ . zG 9FҢQ5MZ 1|eN\LFmªF3ѬBOKQ߀ )Թ ­rnKӣy)IZ=@lݷ & ׀eB(pkh+ڎP&iE_`zl3``}V8=~ ]<ڻe@L{ WX>o0Q+O}@,z={-5T @lsrF5@U-:dƭؼlOzd;ءB_9y ^SrtVn{,B] XyRm" CP]-<ҳuhM\X);/nnău71_ia#]eG._7z h xe-(뒀VɜEiٗP.&]+fPzQ R#YVU܁}l r6pB&'4;w60A7v+W FsnhޕgQo u`'_6-s唈gXY~vM,E&j|R:;zzdw?+jq}L2\/Zt |8%]nA3ͅ`ĵ+Zdi,1 Gpnk4"5FC6>GA~m|*A_!=ׇ|N$ъd"\d%O#W]\A /}_T]B(#NF̋gd,4vo׽@\t?/J]v {ygN7W;;UeUE^\*2o RQ9Co 7އ8* P(0E<:<: 5~f]qHX,9Hm: 8ڡ:m7㨀M`s:%Wx-8|c8//#-#ClSZde) h~L)=iuRl-IZ x迪py}h^JO~ Q &k5_p}Ǥ[F?{BuzCJs|sAmsp:߫r̓į]2w'F Dat:V \?rxJ>FΐrNroǡy~= ;UCHue钽4J)ڐ,z ,8--PLGFҽ턴oNU)H =N{(`o,y GD;hۈ. _@L Lג3T ǍJV+=9skeQ]ұFh_cH@ 'PGuM*Xkkmj5БEow{|RO=KWb_^`0 ג_Dȕ#Qmݞַ_~U_}j}2eL,(j5d"|k}OpTEt%h<34Y.F)sؙ{']("qCΏ/~aڲ/br z!EcgbHs&l<: .5O;n ?o{$\UH9L^ ?i8)+ jQW E+Vs1j'},nXlVod?;iNI*+%?rA IwPBRsb[r}we$b6 ePiz&=yX;eT 7(7/2Swu,?FU9څ3yqlizz^:ԑ[V^P2vh8l06:/^:ON(9Ӄi2}9ocme@\+UiI%2pVFy8) nzK%:99/9# ˨81=UyZyvJ_bE_{m:XWKp{rbښjS2UXB9Sqqg`rG\%E^uᮕqߊ:87YQ.)TCj0ʨl3ڗʶeS܊BwmYFLoZ&w; ^/c}ݐI˧ZHs"oBO][seB1_{!co R3ʢ FtW78 Zv^! TWbS+$9Ƅ"aj׮H+5>/ ]8n\e;>xb($%wn|ddŶ U̜dؒuoL$-==]̮Ta,naK[X81l7؍5Pķy$ mtRac"h*5hsψ䀓*g v/Kd6D\[8@4+@T$^`J`Ni)3S%\*-$RLj3bkw إs-*:)tE}) rA m%v4HOka? x`-|]VkPr0@\Y]󆣢sHI6ATǃb{jK[swEDF'֋DB<9̯ҭΆ\k ȒKOVZK*ӄ)܇vNv {rJNNm iچOo9Va$!Jxa [X밄-7m)w|FK BVK\pi!aCzĊz2̙_WWLWB4qMG,3< Vjqr_ ۸?ZO BkRJѓ 41:1:-X!+w\{f-4aY/5!H\v*<~ N}ULI+zf$+joJߔsI~3L䗗4}ޅ)5t#ecNŢEz ;*AR5o%N i`GЯ<z (SyΔoZ=XoލNNWv(gB3Y("#*OI<|(TXmG~?dPte: CηΙ|?)8GfY";۪2YL_kdz'y]65zZf{EЫy" h M|veA+ztπTg F,?ߐmՁƴʛ*Z%Ƞ#$h l?~vᒙ[p~dzKy#h6^[z*au7 +nhMb֭qGHq6佀{Sϵ~8}bޓ١e^ʑJ$aV{SSED8c.4gn}ƔinL~%$o8=}2^KKw2W⿹֠3H&\"!1*+Wi9J$KTO6"o2,{cr0 ;e _}gwl*VTxv"@W2i:V*V306f#OQ!߹7%+:Җ̃71x 0cjKqZMBsn(! 58[({?1i&Ew}ĩS2k ^lvl#+&[ދ78Ep*:EYdAԣ'9/yoHX\WVQY!f rVL'-zcD˶ggk?h'MZ/=G[xI!996اIS_i3kB& )Q zapV-zS"(?g&},h= Dq"-{09]#WG .^ H}r)YSh4NiꎈG*PRp:1XЦhI ҹc}sH" 1tLMcRI/S0z}B`Ұu[ cd|a8's2݈< Tm2RbX 3[FJ:"/ZK(Z |Jah󇮗gVџML}Cm7}ϩAx)3m-*).v* E\A9ͅ(%JӒtȼhl2+$^3CgTW+̕t.%гWgɴ "n6,$`'H*A͕Ֆj VPæ#]#&G _33endstream endobj 127 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1396 >> stream xuLW8hD۪ҖLM JƐL(fTJmimAZ[mkqDp3Y\tK?.2q&KyW^]vX]w}DrA$dk\c.d&Y`[1KJ1oӛYg2 SKyhGЙ4T580qDUoP˫k܂yIQeRxT[I@!.S ^QXoXFH49*u+Wrm}Fèu~qJo1oN"ZUQ+jJ?Pf{`T:.(e `V+ZhfRrHAUlܗ PD%q@vIH7UH&f%r#y|;* ՚,Nv$?b3b( lJbW4@0h0z04 qGF1`w t ~BB;S *R9  ;Cl +HqB#SDDdydW]a?({fV8m_4CTӸRc4 ~)*4_nFhTŇQkmDRT?k4!MqUˑE^W] y=h1?D@#ɬEx)^uHfKx80 -9G63BN fP v~e:8Qv, hZp*#{?44\OP*# 1?_\4$\('Wܼaʋ!H}R;{t`_IkG~%.$:Q\E!_׳~wz,0>fNE-pr6["{ H+"Ms*pOY}V&)*/շy!wt@9.sv[z!\~yDN8+ؔ `hѴaZ*SzjXz:w%Qdendstream endobj 128 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1606 >> stream xTmPw5ѻzfk*7ԪLϳ\E!dC!/ $ț$$(`Ajlm;79o7s7{w/p7<r`8/+)+6~ί_oPpe تo>69~#*T3,U:D1i4ڶc+E[wP(MurU&-rZij#Mk~aEEU*, Mt#U)mzjZES{- )-O%Vh2uBR1 [QɍN\a VJױw]JQ/yq O.y]"K?.]{ ϟo>,L,z}ݽ'xN]Vh:Y(FkE/HdgK3>swzH@ ^rD͡ƃh'|ٟK_U5M_y>?pN*) N'?z59:> 4'ΜM\|h4-mik:pؾח 2@${z:z$҃w9P8QOU"}|^%F>r\4؎:O\*vZ  xSDQъ37nN\#i1WO 8{\6e oVT.ܖ&Gf/LN0lyz]g>CC ,I=afGX=gZkB8{C/ ;k >*V_h?߾@Ҩ'#?Hg|iY`,3S^iS-UH֞BϥW,^f03ᛟ3O({˵x<|u& *k]YeѕTEIѱEb/&R'l;0е}9Ib>5B=DyL\7#3U@z!l:83iX5WH$tָ$k4VPTX#˾+ڀhÖܩq{`S36̗ H3L+9 .Ors1??*endstream endobj 129 0 obj << /Filter /FlateDecode /Length 207 >> stream x] <o0ejpыQ_A1dGۤniͱ ) Q=) mdH~zh6{W. ~p7hNmKC~ p/Cu W1Z9V: c׭eZT5UH` uQj9.-HeP=/Yvp?j> stream xUmL[U樓^֮×&I{Ma/0jfL٦Dq@vK }uBKi֖Z``bnc%3Q-/]4999'y~_+,p/ohem!kGfch|M6zVo3bC'7R6앪+14ք5c)܇X`+bxKAG=Q)qiMs)Nѹ{,)̅.LݼbX?yO5~tp@򩐙u0h ^-hFtgXn"~}`dC Df%\e9B8J"T;-ÀWw$$yMp9/_r?פF[p=ݓi k0v1Dzf)S6Q1W;l65 (v_İ gl@>/ 0Krw-!ʝ@6xI+*7~($`\)[~we" Ҧ> `dtbbnm@D\쐟/?>keÑ/3a/p|).M(%?;nO/~cHr*y\z3teuu~>AT\0^ګKZ­V |jC I*P˩wU;yDh3/. B\1&̭}:av ^+8N%Kid[T*mendstream endobj 131 0 obj << /Filter /FlateDecode /Length 181 >> stream x]1 E{N caИ"L 8XUS;jWᅉtoO8[DõtUXT`pS j)ڎnq 0*7#ZHN0cI錐$eIDIsM$>rbD,]Z˃ų_:endstream endobj 132 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1660 >> stream x]T}Pw%@V ԯ$[-p ^WҴ~ + Ā#E|FHM@@KNydô]9^n|szEyeeYug$V#x?V<0~<%$ZgB{0lX^Z0L .J\W/X!-a],f=l'gY/c{^&6,y$ca&٧Fr0IS*9V-t:NOg6?-'5nps=^B}ni???{$ K;Ϊ#5V(N 5TXp`/%wr O"DwǧSyF0\@C.ݭ/>u}*>z {9NB K{;RBTK|eynI ejv$lMuay`oaZ)*4skje7p~vY/A`%vMä܍K b3ڂJ! 37OxT۠ ]R@,V|@~LC&p4ydVaM-p^ƚm\4[r61Bc,^K6E[]*=wQ w㴣4!l$,T{Z"FyYVϼTr0f;> stream xEmLSw^[M@hod/;h#!f)BY"ܶ )EaP8c"–(ٰCA058ܖ|I6#;i9ɓ|CSQM1Eon6*a-$G )" bRm)MH"`/<ߓxކ/P"6~Vdjh6jnχ`Hxp_A^|vmG@<'S};W|5 U5 1ç[zůX^3s\ᖑ^qW~is`F_~^koQҮheg{^楟-pdnpzNARPfRLIW> stream xWXT׶>#bĚsH%F `XEHgdޑCG JPb"Fc%wI垡8K}o6{{06"h˼?q$#maVp~s0yegy-#Oz FhB(H’WH*~>2[γps-m~2_ ?txyӗd{|\.:G"Y,K;[eZ,\m9VHɼ/oi0As%{CCearݞ^{||\g=g .ZջKMƧHl"gbBl&VŽF VsUjb aO8 u#"Fˉ(œ &"񄐘@L$baJ 'x c"X *)b^6&^&v3&emp5f^X<"dD#G<;yG1i1,R,Ί d̲1 |$WЩuZE7V,Ѝؓ#(0'pa ȢjT EQ;"'9|\d^u5u&_l=EDP)ZJ+*@{J˗]snU>2ODEf$-d ;Ԍ4jv+f^F0+9H--ic/ Ѳ,y4;8Btnlx(($QEfD6 Jдt]"x?Y|*q +~ҞM {\pWʤ ($/Q2>ky!o|p@iM@$ohU xqr$ Vcd?rzaث}>;S4HF CrtEO`E*@u,B !(udJ@?>- XuE>9SzGEGI; ZA%WcA$n,n? ~RF4 @M}kVBr;'h B"R_6|DIUǞ;%ŇKLRpƤ,7_נz ɒeF(3L[PUYNl<,+0 YUN 1 _} 'i}F1o2/?Y.s ~W&-C0xĐ}@̻n\V.I$azk軦b+Di!12?$":`p:^ T>cq'|@|XD$n9v(ҁ1Ati$UF@%aI%bB'ijRI6`-5 ]E'*%dʵ]X!C|W G |WHuMe+&w,UPY_kQ>"oVgV|>RwYnB+F|%np( S^+9vOTtqdi`:) 62r)`m2h `~Bfk25dlO67 *\!g ta3jnEq,I"_fT`.f.tKPu40pO}H᪆79(8#%OO^_NadB&+sKQ.y?[ciEߵ4\Vp\wOa[E؈AͥuL6l^Ved"J)% }"XQ@݃ -䵦s?^(״㉎{SW'Gw;@"u]<}tm_0_mn00{.ji/]uW;[=hA^!*]3bSp|>N^PvTO?Nhӹ@]}ڼtlLއ}T%Y9TCȠ|4׼9l穃,©V]9!&O5~FSxgE-i'Xk~ ϖWf'g) owxa`a%YKU]xOkƜv,!}+L#-m:B%M'E$gc "䀦,87:!)ESI9:#&[\QWo-npCk> stream xY XSWھ1z]~Ku .,a @BB d9IIXvPPqkөjZiGkj۱?' ̌3rη_]<56o!>I{>"(S|L_:b[xp? b`w0g{]k'K7,MT4Ԟ>{%|]Nb_T.x{[9#4ӰQ? E,>aO 9Mnvݏ޼tn#('zs-I_`w植S)lFS5b׳S_: t W)* '>hPo[SfywgL@ ZI3r{܉%BQoǿg*X#+&-v2vb/a'ּyo|}jeJ&^9Pފ`v"u (RFSyP15U Z| "[ajHMz~PSiUfCyM!c]7=~ݗ:r3 Wi%|PnEEeOJJS 2%L=/B4FZ?-· MR@W9a2k<;v_<i8Tg-PQI`<݋fE0h(CK'h?O䰻\0Bts+ό}ٓWr^J w]W0j֖6OU#_,Xty;p`0/8;G3x4ϲü R5 y_ 3eI Åu%#U_);p.xŻВTp@nαiȬjېK! ~^۞z6d;荍p.-ךR0:,MNho!0FǫXM7kt:$3Ez*9K5zW]TBG8!>| ?&j&{`2_QHuι,OJU3K9 lE`xq$8rfWj@t*[+jVi%oֳFSF[/}r{Ebvϸ$O L&QXL:1v>"=[p{&>Ƶ.EsHUA{p)eSGj#?ڎ7&Z] ^ם,tK oVP([@VHx\b1ȞHG%LKa.wa$Ұ/ ?]SԅE|QTaudغ#f\>8wȳ?v /aÛHRÎ*+'ワתJ4M<  yFQDA.6gӦL'ԬX$;% Dwtjp>3f`7zWb۩K/:x` ֲIz̺Õul#, ):~sq%.|uN4Cn-[`9$=uࡃp8ܦc5L9!CJJ@ З1%Y&y= 7cs~p\k~YWP+vH'˗h^T8Uʦg*t-¤`#ϗ8O1˛]z7D*zN%vc)=v. U*%ȡ~zZ[R)HrP'e~aTX֚@95@ QHJ/XAu{Ė_t],=FrZS 824FqQ#MoL'/n,AG|C b@ʄ70>6f[:mLW}*0;3DSk-zm(fܫR yZgm.4bN|xsKH|al[N9 . UKZ8B]r%eֺRg!Ort.HC4٪Ŋxo5WC<./D',&8CI~UNO_ G#rxPJQ梮,sHsPTv%E>+\^AJٗkYI7E>mv )11Ҳ#^Blf}a]s d$p :guyp~54IZ@[[{UFzVC`2# )Wˤ4ǂ6`a02 :JYVTj` xp?·pNYJ@&%/t ~[={Sbc3ZJ @ot?;{0N߃h}ϺN,@꧔`m7nKº;tx~ef-jBCVU tBYgUt&5XJPძt!6..1:љ|'&jޟ>}hSqa1(bq<7y زJ,E ~|*kҐ^ yo? mD(~¡p3 g }E }uhfD>s5}o^J[FVBf-QIuZH)ꏴW:fѷN|@(4B}CP׵YGM]ʢ615 ?_8um* ,У#TZCiPyRvi$$uǡh?YL^Z B2)l҇ Bo8D6r崞܀~T/7a-8,c :U^T;jJj؍Fy%S3:Zl;~S2Ԏ٫RVl˝zo7p|Npr:}gdG.ٸ@'>y> ѷ:\U"Jd48ߺRZ;nwpMnKZb4%z$sFԩhX -g˩c&fKBX]g Λg-ֲP4ds~8PeB2I-]Ɯ g5k6z|/ _dLǗEN}xӫ'B^\h0USW1ML-uF"3<p]t6z4@l5Mjazbҫp m0xx'ߟ?Wq Fo.}ѩrń(^}watpD^ͻ9}ALp= do"C1(Vdjf?(.QE2lpk^SQW(32p.,?O<&F^n+1qmхCx 3 ̹qjN/)󲁔Nޭu~D)cj2VG&kRԌVSE2!`ݱG!YRp5 B+ft{ƛ`tʇoS2BmԔĢ/6jCv)+|텙Vjdn'W޺TFQ}PuAր,Q,ړL/'#Lٟ(\F唋+!y8) ?,Ժ eӥERPI9%Y> stream xmL[eǟ*&&F`fPa E`XJycpf4aP$#.+edkmF~o?9 r!aSFPZׅH\yZ #,c[^ @K!gTĚzCyI,iV;Q& BѨ7VM U%W7'V}""cވ C(d䉼7zq$P: @qBEEb)pÐi'06WFԒCZ)|d!]^# m%s %ji'M=M|idۂ*W_fkb"\C7F>kWo-J:Z-XP(m[#:ԃ̘ɺsS rΨ*;WfRaz@Fl5>.†n!l*V ŻmNwg>=ӻӿ\lH)8TO׷}zR`'ݲ80e.WhriH{(?bd(o9}s]d(&"''"T7{Yvdu+[X7^17S`ԟamAk\)'pendstream endobj 137 0 obj << /Filter /FlateDecode /Length 31812 >> stream x&Ir&_a>=UG$ ô|X0.W~#"vOdVDNLϒ(AHۿg꭪̌?^݅oURϟ~WKzkw~??I뽿Z}oU)}}+Z~]W7r59ۯKPל~K\]+ 9kT|k&T8ZyO?3\+ʽ˨3ܰz ٯ<2w}rhvqxgzzcPs oVt6-~_3 lꥏVD؞-$%y])G;1{\=lwǧ~Gw-Fzfdn܏}> ?_c^7:2I^9^'_w5cw|re $*w}"Ẏ㯟o>*N1GZG G~;3:GVҐ9zw-&/zBLr|H/[5KnY^r\sڳ Ol(W76G˗'pFp؏W9M=j}D{R{_Wcr)SMcQjW2ܯ42>dkWW࿦j;??;9Hk2UѶ/Z_̯]>(+*'YWYVk*n+`ksj+YsJП8ϝd?W/Y֪Gt06n%̅_UbSԿ|Dy]9oxn)0ϒͳ:}l|/D$ߙ'ϖϛfT~sImʲS eKP U(OV'Lx-vY~Dª%b*OdV_t*E7WNLWN r,7TSO}w~o],TW.j|?¼mȊ=yؼ)ܱ䫵O?ݖHObC!2R "J#XC3 .dg6& ^ӷO8btl`b}ܸeU$.]11dS7ooHf{<)~~?lÉV.Q/Qj#_s&ƽ|w.ĹXur$n2 Ǵt9t /']2-a|!tƁ),ǓMrgсYS8~u*o?G>r9б{Tjmj}aٖR39ȾP|}n&ߢ}ܰ}+`L[3|Pwr+{ཋlc; "CI"e,mο{Q]j~9˸|V a1gm他ާL$>9OLX [Psw}‰~нK>XiyMK*}pBUv[`d8h-#ϵIvdd9kռNZ[T]O2wS~/ <>ga0Ҿbm?R#VC۲s*02c~°>fXQɰroC6*!ѝaq]T⾪M?hT*U%᏾Uz9 +Vhi ^||>c7+?lwuSLaS[YуH":FDݝKNw1Cob^1k|KO男,uejO.7$nr[fqG_i9Dz0_- k5y\foq!9plny*Kq_ZIƬ's(mi 4阽& m!Ӓy'Wͪ>gzm{o)WC^{"z߻%`Fճ|Hj6iuGwQ>q͞?U*:kÌE"܏6bG#Tt"^h-Xf?%r/Y>uw~Gާm6u>{[>*GYXԯAYtHssO+R O7ao\Cy s->K{#?m-ET?H P}*_ GkK<@ ￷INkaEI hy8~"N8ҍ؅7 cAΝ+#xۅ~|\kO6Q;#[n&Gtk}f/=g@9q&ܠAs'{*iҟ%o sl̫/ *M7Z~Kw[c}]>>o.y!M9}{jߟ~mKN 6mH>g9Bbۿ4Q>?C~=^6ӛCPOtg|]6lx 9ݡأ-}ǭӯ.y>Vk=BzUk>ߺMk"dŪX1k8VYљx!+}" }#$!m,&G{'B[*Tg#?I(9;^͠ R?fI$ d -Z- iD,Ga#Fݕ*aeNhVcn<b?Uԙ詔TUIY?^Y`w'mVol04f;m7Nr]Ψaww {z |yђȿMV_і0h[釳Nww(r|×#}t#j&;DӮvEQx.6GQig#Xß-[W"1.( K|jNW MVzu߯D~}t2m{}^򥴗Z(ɉ56rr< VQ#T-)7~<^ uWf^;=v4@ şɂCܹjs\GFmJr"1^{8sɾuP4gK;<臜Y΃쐕04>0L>)l9,5Jߟ/W ?Laho1+TYYڻ\l-'Ve٭^=/~-KtnkiJT;BjD2L*?L1J2I0B@2jE#0[]MSNH8W2dj"=x& oHyGr}0@L Cq za6br!v7ʡȤ2v^p޷d)Ҋܔ.Lo!D&Ul{KS12St1!%/3~-O& sr>[.. ȍÌ"r@PFc1Dg˼dhq QDS&wf*hk2 D ZlDn|&A.\ yJ.6 OmPbSH7.J6t.8- `2.LHD.P?夅'#ȷ+RL\rdvx<*Ԁܬ,yDKDUK}^(E\)Щ.@$VSkڪڸdɪ v]ypj,4|$6`KNrd\eA|ۮevx2;aKB1x&bsz 6P-IʁeWH)BGi%B*UTN3&8C&lr&R3JqT(xe1G yȢ*hAw>@K׺Zbl9K$ f1n\X TmҠZ}PSq4FE&fאub@lfȭ) ('f H"k р@!1\M\'0`xC?V$C3e c޸uJ'h[Jēa^+4'fh%ϖY![ ն/Lzhf:̊dL݆[9 (grrmNc IHI0wHu U62PV圗jHiu-FX$UrNy!^nXW'\c8\GIO/ G ):d)CN,)zp4llOy I$a\L> y)ڋ-Yt/0MΊTUhFSK̪P`NW2y_Yh0a`bj^ǘy#PPp 114*L->VyW)7TĻH$v .9 )ѸM},;|2oGL҂6D~p*Ə@G>=~%(h >>QEl5 {-@KΡ*H3#_4`23lNAE > ϖ59e`/69L7&fbN j+2(uΟeNIw؁# c(&HΟGb 7ʝGLؤ't3r&[54 u0`T~"켦=|lp7ٲ_|v:yp[a8h$"rU}2, ]ǹ6E{O5"wS4Ą%gˇ51e~`,9 p%f %Y0OeZblKLĂz]]E% c`v\NC.f~c\NT2Nj1 |(t453y332^B9dA̎(cx(3T͊KOE-n .f!TUij 2DK̸T\sRW Ij1:TI܌ɺN`SH6d''!@]B5 !RfgC3OڪzQz0а>Y]lKɈ'7D>EƎqɷډ[=t>ωgކdjF\z/:cMŃʹpfRUfeTR`Ε8!Z|:HYJ[˺.x㠊_!bH}EW$ϰg\9o8fRqU>q 5X3edt#RR`Ӹp΁5Dψ?QPG^ X_MtNy 4@/CVCO.Q6<^Id`W$eU&}H!.!C}pƝ2w<|WiXU)8Vxj\,\ECR*9^+2m]ÚrJ,'|܌sW9tpp69y2ΏꞔJ<6ȔGwN8ڋrĺ㜗-41p]p.#Gڬ׈<| tm#E Rl̻H4e%O3:>"$w'EU$[T>9\{#R{LZvW;i'{[/uexJ c5j_/ mͩvl.֤fuFÂhT?5D*&",=XzPV?ï\O|ri4 )r*m!=rd }Kܹ1 Ǜ3uZq&g=9FӲqJx8c|[nH&ƁLlV5J:fTr8FI3ܖ(=Ma6nm%E~R5Je&.NM}zX}5]te8Cn_k rtqhTbKY0^8֟[PFI3Ó/8հSNeBT(kP"ScdpL@Cl;iT2@͚ܷNMF s";kL.wh#V>lNϝ5D-擲~2<1"HޥRfl4@@ߨ'=̥9pjU.zEhd޵s2Y=Mps RAV'"4U[ pAGh}סF$unP~1RI"N/SP bk4* AM"r`\O{4 QC=BuI+B%}Ě$uA%]#u>Gnm5g?XӫЖitDGpNe=L;Dp/=Ep}L-kQwK33RVϼV4Nf:ħrDG+AݯjYLW M,IpW-UfM= n2z"CWXDT;*ܺJ1Ws{+>" 6Դt{PZW[gQ覿1fRb~/xN%"=_ҵvw~U{R;aNYm:? gU K9R?ib6aNN3X1yȿO#iԝ|Gh;ct~T#>ccb0}M w11`=~Q0u$R 9{ h{)l"< |:0?L%Pǧ2ɱ~pDPh5`IJ-[ zl8w Ѩ6FL>~4:||mc2bdajhW+y2y1bňLQvUbp3gbqyΤ)*ky2o* ¥ufK,Rm]JD=n^m=!(,FN"Q?s b?>gD~)㺽mp>m6 >+cAvJ/:R0L?oK=xu>N|D$ X> E0X@}>R1;cO4cvJ+}"ƽD,`}Ghd ̎`陓'4';bTکTҩ݁0;patůt-WAVLW^\ >]0MKwM "Xw X{avDKwPFKwPהˆP.2Dh ,*,]AM]u;YYhGu).|Z,<-(Mj ;;[@(YzI>l;#賮gW vA8"p;](`Kp.{9;^۠GͳƢv_=FKw@A|vY%`7(#Xsdedt-4ηmҝPe.? >BQP^wEA b;([pQvh@_C`t'XN0)RRatBnˢᠢ;ߥҕSd>kҝYk9v}b 2t *U,,e<۝ =/Tp(;읝wTeƿs jttV wSA;h=PsREk˧*(+Wmh:'ݷ*k*c`0dtwL2A3ݩf@Kw:7%RPt>j ķfͷtK%閔\6 4<ݣ4o}6nZwŸnZ*J =*1o%65oƆ(Ʀ|+Q>ͥC!NdJ4]'+3SBN,kJO#O 1}EAI%[N7-,.}V *sVAn&WP؂8t鳲}jIDåOnAep󩺺6qvl;W >ihIDucg t' 6'IY >̂IvDOBPН3-KjA4\Sdg4ֿ3KQUjLpAty h!; Ҏ'R'1] >hIpDåO|A]SW|A}d>&ѕo/`%YpQpK/kW[B-_=L1%">4T(0XXb5 jyhaU9a E%LB K)/_l:6]!I5VdW S J >\IAp}W ,]"I5qĪ^ ZT c|xF_^#@fQ*Q+.)k]*DG"{ AҧhH ʊ 볻#Q1u>Ij:a%Lh)sRc .DON>Uj^8S]%ԥ @S&YXAt22Fwf=Car4A4Pl UGwm8ҲY])*S;.+cߥzjHS'Ν, *wkު+,(SWW &T0 sr,.[U]+)ζR$]5u$]uu$]uZI;JU*+IJTV شN]+IW= VIjVJUZ_JU4]ZIc Ss5 ;:[Y5Jpź妻h󪭚+d }ʂ4Mv^Q]yqAzrj᪔XU|* *v.* *UU5FVjnժ.k.p aҧ[2]- x <%尔]yAz*] Z] <>]WsUb'U ո]x b]6TtWNB (A>]5Sw]ө`O7кʽjk A;'H[aBAd]^ ԯ8e-$VAt?xA,cQ\5!Vʧ =$Vs A4T4XgEA|]{HF7|; uϘ&9O OSX; p=H!@g "d,."!v!:R  KZߘIA:(n\SPiUi)q9 SU}Pi9{dc8۸L VM9G!Ď6'H94RZq /5+RF &bs1ߎ9Gc:zHiE:wY)͡Y#t_D!%I h\7rb+en'8D\ihX>8?D͘UCp {Q6'f&@~kP Qᤶuq3D,\5\LHdl Vm2UG:!{rRҡ bhGڿ@iI~5.J>,q {?\tB쪤Ž`XdsȈBS0qZ7՞SBfr  ?e8I-QdS(aO%QZ< @,'{8 t;d7*RmIkIDn~#k|">Y^~ɣ͎P^NqnZfGnO@Eo NZ6[Nlp\@|}H@n'U /RrdRk^.رkXi*k*p7[i3q8VaCt Vd솲u"*$bu Vx)]=^ ~Ҋb^h-\%Dlk٫`+6˽[%|٩r{ӧm{skVPI9ii&Ƞ҃䐫RAn:q4L"zn&7rQ>z͠4ZZN  QvlGv ҳ-sᝋFK+Ddji-Z ;6e8\W-}ݸhQC,UnInQv[Jj;Xk*DL@@ NK*0; T{JtLQ ~'v"-(cv]Z*F\5N'FK+I]h.|HJT8Z抹"WFU ~ "6smp8+`{c&gpYMʊا.с]4.12]*'z䍓KzpDåL8i"xYå)w*rnxiE e?x"*/pehT_̩(|)&TYk*Bɓ-oh֩?j ?rՀ"o ^|) 3Wxuр"7Ϧ@Ak]&bY 6"8"&C׈iEAɳe?NFL+bjkhTͤsjT/85q~҇ͬ>:PtUM%Tg! ]4\u*R ¯N ,'< A<tED >;+H'WiI= MDWvu1LduJ2 *ߔ6PERT"ɘe#5aTdIfw0DpEAo*0U$R^v G.މC4`H 2%}=o*rӀh4 '=Ӏ6PME72.шi@FLSL DMED hR5+Hl)53 :\=5h'DѠGeJ 4 }^Ԃ FM=R҅h4 `J oAOn DaQπGRΡjPR8j_ܝx=F釖Nrh`'^s&[zx=RO'{zdơ=j)H;$a<4^ZD8|G&MrHn逰CuH^،#PR IflSZ^QqY#7z$j̀S^6}h)#>_ x VfԎ0IXH3sAɗkR=W K:̀Հ4TdD3 >̀Z):Z5;.f>H;vv"@9{jP4Su#= 0PaY|v ƪ!35˩>f6v@>wRP$vF->G;W|t;ɟƹ*fZn nW>UDuhF3AC:3Yw*L |QԴGc @cZ;DᣒPV^>՛iMh!)@|n>Uvl(  􌏣@;<]cjC臹@,>he|43>**QΒ=E]{,t@cGj: g|xmk|6>**Qλ06>G=EZo46>Mmh!y9>|COfq>t^Ehu!x|4hjx8~3@[DatM @;UDZBrTS @D>J x$LJzثWPSG+%P~H~\Eh 9)"`E )VEh")VE{44O:>q8m>ašV #%31u׌5v -oS|z ڈ2s9zOEn>I=F6OA"?_ӂÄrdi S>ɚ|=۠|*x@c7yҲ^U W Wxy ѳǍb4 P}"~>>ʀUD3?OT! P> &k_F\1oP!2%FLjФ̮K˺׸D%ZC7Aj,߻]Po,à&i#)rXNcȎe~cӆTlO&Zvbk~z`L`HE0eZq0.i-h<`Y d{ev,0mue|<`B,+diahF|!˄$$`l̄$ XI-SHe$P=LHed]XmװN&$Y? I0!MՌLY2܄$ 6b3!d_4anwmsi7`)vӞpœe|x$`I΄$ Xփ8$ 3!I1&$IkI$`aB I |#P#3!IoBL虐RLQHPmr.꣐F1!`%FE 0mG!,lR"]w:y )2(QiA j`5pwo S0mjhb,c01XƂcb,וj8I0ELLRΌ|9Re~&$ I5ӄd64!ҚL(ov a}69Qcr2 \;PLWd񄱿z&'I J' jI:)iL<`Q ('yI"<`ڱ$s0%iOc}"Xb6 gXiQY"XֈM&o.4"j+ iD`ܔBXĈ# I"0ELJ%p3$ƪ&$r|XKHiV<`nӦF<`q<` $X;$%e8ML)hnRe]&w6<`YK(.M#$/1Is#}$n&&2B&&GI"0E5!ab,T"0m,k#$v Fv!qifװN;14:0r/C4Fy@{i:Q%wH920Rid#bs֒Oz*@R0lrWY9r< Ic`.wHV31 G31 gbLdװqɮTҔv)2z&')lrW7AQikSۣeOa41I MLReMېLnS$'X@! y]<.wa;, rv{ Hޢ/]<.w.w -X@ ˝G!C yDym<22^͝G#\y6wh9dn`)r, cR"3X@9Xd !C桤 z*2`p2pM!TR2=b HC2, C, 3j H F+!6, VCnQM7G rNnSvr$uے, 6, 6CT ک.QQ;X@:m̨, \0αbdZh:d Ph8qHn`fdi"1Da PQ( A S # .F&F0HLc C6212xf Cg/;+(LWv´藩!6O؎m0A`Q#] o'̋(L]f"+յ=" HC<:,vds2۟A{!B0ġ]aԱ/0֋& TEzфQM=ޢ >h‡h4`4a&UaY´aPS2.UxIR*v Iۏ=$ahCX,a |?c C 4,aC^,ahF&  *EFe,aF91vF0100DHZt_ $0VE}vQ NIȏe$aӕ50|{xšnKtQ1q "C60 ,aHأB,04%{K~PvY fcTJ&,UxH@x4ф{ɰhP}jTbFVvфA?#MX"& [6MXCV-Є(]4au,!M;c30k'nK)Om'6CVM7IآH7IC׶IҊE6$l$l,nM67f {(Kzjdi# h=Lx#L[0V(qً#l0p=J#L;# jnAT #EPFa*7G떑 k93^/sC3!C20F+BfD.oEv \F*!FFh~OFO0C:PCX a6ƽt´ ol aqC8(bLE!l3-(6QbC428 мхmM&4Ɩ/ ^P6 & C Iia@.w}Z0l/0=@f4Šv|aowپ,sZy0L}FE0=sԳ<EV\$az,li]^|a]ML- +(Nh!S +WZ~Sca$ +hDoB0L;&$ ᓙ.!|aU1)0l6aXA% I°'8 ?(Ϫ{ưr]k2ư.!cFE0u]!ӂf LL]hٮiFǛs-B$ Sgqme N+'\a31EN2H ]cӹ(U1Lh nZtcXFяY-t:撳Zt,ʫjӱ # (/2AoFNǽdZ]FMNB49NG^r6l=E -Yt5(2jLnSTnӾxv2L}<XӺ+xZÂk 20C84*krN=eOQP6j^aeh(,z"da76uJbNgˋM2F(qȪAk jaFQo7L+O)4jdtalb.lm07ٮHIlא/,I0M}39тs2rcc10bϞ-Lq%hJKL1{b-LMLi鯌-,I'i|et11-LlaHHa/$ ӼEdaF֢be;m0̈́,Ls$K4{XHaQL# TKc!#YfSLc iװe`'4˓rYXMZErMNiFY+,0]& ˰MN҅i:+Yی.L+LN҅)ry0M d hNE&A1 LJim R,,a)xMJ:3&fӟžEzeaG븄u,8yuaGXg3}# L=ɍ,#$ L=#3u:S##6.^daaGStu!YX@Xg-4 MBğ,, ,3LBsH:daaGXg~hyda),, *v kM=ROy ȌCedaaG!ydaɇ,,6MBCr# Mda`GꡤF:daaG7u55XϷdai$ b5SSM՜>dͩ)S 155X郤|ndaSXd+:uH93C3NdD33O<# ՝:uCQ,, ,, ,#$ H9 `v`daaC,,TTdf$v?aYX@aYX@X~$G,, 0,#$ H9ң`daɇ`dai$ Rjda野YX@aYX@aYGHR=rƪdv,#$ H9da`da1c,, :da)F\50,, 0,, PT zjda!`da!YX@LH;l֘x5&aCؚ/ 1kL<!l֘xDkL<0N-eF@Xd֙xu&aɃXsc9_@Xg6 L<:CSAGؠ/ %ju `>C_@Xkz}qC_@XkvJ}aCQ٣/VoyJެ!S7y~w7q՛8zjM<2)Vs jNb5'G+yH.}܇ZL?H`Y(uHX݉C!0SGJfu H;zjjrjj2;Zv 8ZzjSZyV}i[uu }֭/ 0[_@ʩVusu H>lk~֯#)jҢZ>0__@a X`eCQa_@a XǾ}i_@aXӾSvv <f5 Hf5 H> VkvJ>aX㾀k0sCu_@aX뾀 `>u_@aZ뾀 }a뾀 }aタCM}_@aX>}_@ꡦ־/ 0W}_@a}_־#zֿ/ S|Ca}99=|(j~-=H; AV? C`7{!%mCa&~ɧ諉Ca&~99vn|ڡCG9X#}<mH;!#0aX'?石N~aGq_@m:pZMMm:pN~a'=]2N/ ң{UyR_p_@F N~1|:Gv HA/ 㔝_@J N~1x:$GE]ctu+R/#_@ZN~N~(1xZ_@)9;$G5]Btu;_@jL[2b{u Ha/ EgStvv @9պdu H>L vvu S6ɇlv9f Hf H>kvHnm̀#0V?X?^]}<;`5s5H}@;EFF)F9yzy)N)ftHVrXGaɯjȈV#=H9yF5ݍ<2yZydFM#)#=ZCGjT##rv?CGj4v?St̀#=ZCGJv?h ɇ63ZG!#9ZGzv?X?h pk|(j !#u\rG{u7t5HvnVl!_:K@C4tȗF4tȗ!_>ȗ A4ȗ!_G4\H?nE6wWhh5b0IMq(%&؁JlL w"B_A _Dȣ0$؁@&] R-36ʞ=68 T2-lW#BObRO[ ;PɴEDXA f &> $Q&$*23Pٴp*Db*jqnTD.+PɴcR1kR+p|Rh3oUp`tS|-Ւ@)ǪtS|M7hln)TJu䔺Ǻ@X2ND#P#۟Q PFF (L>G amLZeJƤe R.TZ `-DðJ0!7o<飰jI0I(X;E]R_RJ`QN~OuDxD"KP X?-Gz5PF3tP>4I'QLZLD S-$6A ({bdKH 5me8Zfd޹;@P~z3.O 3h &v3͂wVs v"tNi|F(.JR \`)P6ҤN8ya)dlS E]bruMtMh_o5RIX~JYgvrI.O!Ljh>LAj|C& o.\5bS.jt vrhV}OTEq%}ʡY>\ (4ݧ"kL0*.%Bs1<&}:Rh55i!>ٱE/.OžuUD=mO6{7=2MĐF]L V)4-%~znsv)4;6iE=K"(4^BsOwvo|F$r&r&}HM4S͖%z-WS`7~&)&z#%SEbPlS60L7}]Bit.c)RƏJtKlVEMS + {Zn(TS]|cU"zJlL,j.ڤl7n?O*Ӿp bfeb\u/~J'BjaKfo$KP&FAu]j%>]@yo>Pe}F{k*\Ijj*ٌoX^S5  T}?ʍd@TN(b"ӎRfz#tV.x}k'ȫe:͚=5TLu U[{+0)R/Q"yh7ޝ+CaHZh2Tf3r+/_ٶ$b*;%z⟜芤(- l5e F`<6èݗ1Ǣv_R0)MHfdq< @7rHrQ艄QgL$'K$&zQ+#&%ȴ_1Mn1.*3`f=Syޘz?*YGy^Za4fy_5fy[5E孹4Y@vfyc͹.Yμsg7;w6ºy,Yl`7Vw&m"w=K]jL&ygO&y1쥅Ͼd7d7ѩ$oj $YmIޚn0[N [9E٦]o,.o$Lb[F7$L 0[ o]fxy5(ģo3!ɔ$=k<wc 7 LoBWwcUn'~nTR#Cٓ&3QA09$ݺUFIZ 5Z}dzAsSfv (sߠ,\Obf7vIF%=ܚ[>safsff&ι( L6Ծ혜(2R.+rZ35s\SslE2kMyle 6 uk'%|K^i7{2{HlA]J"=QdHemͭ{ɱ8ؓ2}(r/EQ/EQ/EPpm GGp9 Gу|(ZQ _8h#_8A (93ǩgb( NX((((3(19(99׫99̜+'z|΋K/~w'z|/~Ǘ'z|/~Ƿf7r VOek!&A6h7°=؉c~ Dx N@"\Ib\G 'xDxᣂG*<"XlF /`D€F 4`DԀadF 8`Da 0`aEF@XD "NCPDAŷk#$"쏐HB" "L0b"¬ (~N8DX>a Cu4"̥p0!¤ #+"̮pCe6"L`0ނ!œ /"L`0!, (~b(DXaKBu f(DX# 5"| b ¯ O7"| A}sDAũ3;" @mzDqŸ??DZC"~!A"(~0!?DlC??DQ"~P !/>D|Ї@"}0!?Ї >DlѢJ> &CĜ`=D\ ]"{!^"({0YC`H=Dh 6C`=D.!bv"{H!b">{!졄AS=DyGCDB"5 yd%!җHhB" y'!ҠHphbpF 2 R#!n1!wJpC7DpC7DpCt$7%Іkm'Іm* UuІ4AR6W|hC=-*ڐ:Ջ6$lhC*U2!)~ mh1a!)xCRzghCR|LCcڐBRasRhCRR).cP`CR>PO 6$oC SBN7kHqXCRb:XCR4֐_2a I^jH`]PCR| 5i8B IEԐ ƾ:PCR.A ~mjH:R@xZ j{|( 5Ġ"ne:HCR(Ґ_g! )CJHCR|C iH/" [0CZn ̐?3$b0C B fH/ 콇R!) 7¼gPg0CeHMeH>S}% Io(CRju9w CR|2oU I Cj CR|24: CR\< ]|b/!)ۅ1;"x|9!)709cH?CN|3!)~_bH_+>!)|0 1$NB If0$S@CR<0$o! Z?10TuxVaH+| 0$ŧ`' {ӕR)~ aH> aHaH\Ks04o04>x!rBRz^ <`$x* ^H߱G"x!)>EJ%_tr>.4;ߓB&t#.?.٢ IBR \p!)~E\h^R\H/OJ k.$RB I{PBsF \+R)WK%:l! -$hBR[HF>oݖvl!)c^M` I9XpT[d!uX)+Yw7.YHɻ`BRރ" A,X:A:BR IGWHP\!){BqԺ]0WHR7\\!)'BqKqT޻P\!)wBKq̥,9+4{:BRޭ I9x/+$e^WH}*K_ݏ_m0~+$ϢpL^fGO`i߫`4BR Vhvu``*s-X!)}+$'\BRJqT`b>Un*GBQLQ UH{UTH_ʥ|_ZBcP~W*$ŧ`Be]U+\2*9r|^kB6*QPRPr BB6*UPr r BB*QPPr E nzo_޴M5rs<ݜzirЋsMPN,!^WMrUeJg}/z2UTxޮRJnvroV)忈JvzJ)^on?_vEԡ!]=d/j{u]t<"ёͧ>Awtq ykⵣRW\||ϧ~έ{~2ڃfkjļ7 99veUww\ ޮV͉r\v_+swO'ovI+ݾowr-,n6yl5}s[v_&3v#ݩ8f_v\.i;9-Zqc{&z*?dtR?ގ%xt}H[`INj69qGOmoƆ?AW-(}53_Ni<ׁ-1OinWy7e^6\~~Yjg'Rqݾ*_#ힵ=T޿Iڕە9CV=}Ջ?ߠ01T'e18~?NY#3WS*>'ܞ}4kZ jka>@endstream endobj 138 0 obj << /Filter /FlateDecode /Length 37608 >> stream x[Hzg 6XkH˖ 8@ j V.zgtȚiiZ|T-I.Kb?lÆۇ?_"Q|׿SIRzmǑ?|_'C^Pk}=_%q{[Rzyj{=T1//\z][ιz۶S~]i PY>|G/_yr/ޏRW4o˟[S;vmW;^~/_://y۶qߎ۶zo؄T3kJ[9oq~4\h9h֬Õ\[Ѯ-Y/.۹|w>C>eW9eڎGȮ׸o[3߳yݼTRk/'yUM߮T^-SJ~}܅Z.ϛKj% =Kz1ڱ:5&-~\9<'BMԛnzU>{oHP_j!<}3C߯;j/đ߫ǥt_wz7~3f{ov>ԣnm{y?wmR7kNhOf'ыp>mmY9{cˡD75yڿ/WzGM+J.{Ek>D^|JɯW ?HOc<_͗_R|;_J+`S}wׇ{}XFmOC}mhI[?|Ϳ%iۮhc|~R oߩ8'@0>Nn甮ט5}u8+Ay`!3:s|?[C=eoj޿G7?R>~ZǕs?x}ggpz?JcAE˾>>7]wP~'޳A?k<[C6W wAS#>hP^SZۃ.!1]_vǵ%~9/%tv ڇ>1m| %_8v~=_{N.|Iy5!Px;#Q_ W?ql~=Κizr8m2."[W?gs>޾A;FM叏x|bE}]FwG޾8ἮnT۾,ȯ~|fÆuz︷;is\O]osRz~wWn˽kv7Twxgӻ]߈X1?ږZ_d뱧-e;*,M,)Tݣskx?|](glM7BI}x}nRzGۏ~jfnڃ=d6#~ad񷟵[sP^Vivϙ.|z'9}qG/\?heu̟XcEs\~>yZa?VΜ>=[wl^tǃZuxtڎ2ex(꽻1w_?Ǹyg_Q}/W>KoIRڶR޻OO.̰>;} w}P}8XрWg7YV{T;h0_znmHߧ5'}}=.H}1M$?uW3s?<~@䍜 Uc=*}I}Sj7l N֟.Ƚ׌rEǣϞWx~ܭO g|g7)~:]sҧn=uuq}Ğw؟ ,d˧N׮RG󛵓%F۷n;(pn7[WߟH#Fƒ~GSKGy=Wnl:{/x{I;t~S^|ʛ~^7=#)e#,B^cy/n/H?ZW~zі{_R_3dvFx{E_s/? L_zZ}mT6K3{&p붬WXkY.z?&:Ya2Zi҇ۏ씅SDܬsۏ7|w×,q`a'}?vwh'uۚ>vo,c' _#Ozn?A$Bm^k}xm/zO}]8\?2J=p:r73 3S&w}$痚F9RpL-5V%s%1h 󙠊uA1,+׶{}0eZAW4D5}I`ֲdx}4y+aO?W{k/ b\){cK>ďӨ?o<>hPj_6I/ǿ;?~7?r߾1s:W38zǿ~_Ϗ~;?~7?~;?Eq47pۻq̏?wr3w[J}ܴ }rx̏dA q~wgѴ;E'r&}gggNǥG~'t*xAtcs#z̏n̰f}D޻.O퇿^_\~7wˁ//ĄwDʷ\ηo,Fkr67'c?kB[_G#t #gD-y~-zM!>1}[pw(xgv_ʻoR^-! ''V"O \6O+[w]}{}s櫬ު?v HwW>߸~*xюg޸ys,X͟kn FŽA <>ܕ^~A~NSpU5}x˷oJۖQ} "ܔy ٮ?S2R_ݳdN>o#4=fmpC|#6ʻt޲1ÄIVU[~]zOX3ٝFϾ}nX| ?s} MJH\O~}T/w U{yK±7{{z` ǐ4O[׼~}/Td`KoUƼXrӭiBD 0Э&u]iL>^~wvfL~?,8̏ü[Fʟ,\xoWztdjȒpSԦYT_} gď%qbO,K~Gq?3VKj~uvu6/^ӥ}O; ̸gWO6wI'wzqQ?1㿟AV[߽y[4?3a ݻS\B߽oR;m*;V逷~ rB*t_}?>c?mDdX+z* =+V#W=|H+{ߕ@9&[]%rB@e$Q(GnՇhl?p]A~;җ(8w_J1+EJf "( R0Yҁڨ+Wqhۇ89Ru`GC):w_ m:SΝxUP†ՍH!ԙ]jcs 75ܥt́scF׹]; VC =C9j\3t5}vkѝ8Oy!?~yf69K-h*MFnCΐAwM~}8u%!ʡx"OB`vA=:53N:ٟŁ|`ԑϢ` }s>s" '{aaq kx#Kc<(ݳ4|*JID,:L70YuyL8[{WVG˵u>[Rq+{@?el8yв=/o{.]9K"JZpy\qxYVs< /JDI':77' d*9"('8u.} Vrٽյ"B{?dӕ K@(\V}9biV߾Rnl̽}.x҂ .'wN!Rlx1[\X^8!׋(P9u%Q8E&ΫwrRs;fr)o¸.rz$(܂~5]VE>=>iaOXe{sB $%xj}$2*'|>7RXKpy7,-^KS '3Nw%cTG.В %߭xNv`mX1]Pv\w [[hvV`4H+'g sڰs#vہEφGӿgXt6L8y5EJ%`ƹ@prЕ0Üh+!|_,q|we;=fggݱ=]~9ŜPr@IK7rx0l΅N 3֮sÂT}~b'-XHׅCKXҧ+~`ʇI&ږ>.?KT}/ Ga:(}h:ʰg`/{kŒB8qfQ H~3cI3aw4"` ϩX M4}s,aW0Lx8w^bi!ZȄEi uQZ MӺg>,G O5jv(rXjO9Y9pt ut*  ~8A3Ki}Y !f}]3.Iwߤaѷ{[$֤xG5"1iچnŽ[>_eT.K34N4Vӣ⛊[cկ ˁ}F8p;'O̓[ivmvl3SGnxL:4R+.$/BOZ,wAs.cLD8[qz 2JGtC9o_p SuԞ)JLٻ^ʜj{+1텮Ү$p`eJӍٛ7(ލ>5{Yj4XºIv!L-8 <f[#2\TlqyO3MBV*0YKGunlVJsڽEKR5rgۅq ~A^wv`:ka JDetR.%4ZIw(8{)u6Y6N\ehM><s&J?xnwQ8ׯ3StKKY߆#+u岼ugty3}S{c!7stE#w3>uyrX=֑Ծu=^3#y61 g-c0wu{3yMUYk׌?fx]Sْ1˝ ^em+[g|Z7z79n}0sn']@mcY%D/\HDS֨jj)뚥N*eS~`"Êt^?VYб^ekgOj,hc]X]=t,aӢ4M//Jc LG41u .˾.Af{Kq tYuדee^OzH;X>.Vvei46cݔf kܸԋ.esK/fsTGe]o׺עMXcq(ZlDm>:~_o?6Z;FZwWtى»n+h/]wWtû^׽3{G/F/[y81>뭬Բ >Ůp휁XL :%]A/b LNutiMOjZ\4U:%],7I/b݁w n4G UQÎt4~t>QpH4G u sY[lخF'ba5Zi/a6l!{0-G0?³N0Q6-O3fKrNSgKtNchKrN)lvT[t][tf) y1¹N7W,rbV#gF,VEG:mmtڿ.鴑ù!鴵Hna,L]uL鿝N@;%.vM: K:pcUpg*Gx!5=':VJ(ޗvQ:=4Q:8pc~x/59JqtDK+rx<}yJq D+WC(e9JK+r]ozu,M+` }{IXY4ܐVWevId\pxv^p֯mʀzIõڧ8{I0}uvnR{q /{zI 2m4ngb04q0uW(.Q:kqױ2>S:]t8呺ǥpܣh.MQ:JG@(Q]tD0 ֨F`#^$E̘V%`\/;-;+ QMŜ?"4 6QźtyHӌax)KbƏxƤ]KIIENJ)&5V #+12]t03)^3` D1Ꮸ=:)aA DD>tFQQ}Vꊜ#*'zb8#z爌*cq6 xk|U\K C)qZuE,/^</EXWb"џދ3"n"`۷SE(\*K\W.E@]iS[)u_ :G_V- @2x/7 %,2x/ ̋Ȁ_FEbW&]t?"ٕP20KĤ#d3KP]-sl&5t2 #ڈ]tvPlil7?a'ޖpY#jl@?nr[ZGe;GBf4|iKpZ-`/%k d-"ːlyd"27%[+(uBcPۜQyAFSf^iWvyNd3J#K9L+LN&V32=e$2{+y-$34rSafGjWV9L]gZi2i+[μȧU*sۻJwRgёR9/]L7Yb9ϑ@drKrT:r N3y.pcy lvNwEI#9L*.v*t])(J)KAS8죵+ώ;|^0!ikCiPNebPְZOeȡ;QaZ\QhDEVI~6wXݥT4PGQ.n G{crI菔Bw)͑pVbK̒PG5@BkӎDE.jp7PQM=H:A{^_msW0rJPtuXj `䒠R;&ST-9UQւ]JET:R  lG.]2Dՠ֮Cs^딢Wj-Cf/"#1'.&HXߚR](H/8jsއOQ=P[ tp*7gGz=veWEN/rT#*MI4PpXu|LpRJu,N\K)*rHLU7v.(E GνaJBoAL¦sM{~e)_Rt5ǩgD݋tI(zgּx:NT+U9tԤ^zDm!QMUW;}Ցp{qjkxuΩpOKMwYW.B?Q37v)t#Ȅ0ܞv]V77Oo XO/oo>Z6=f۔ ~ V7KaI^8öɠ.j{N+rBQ-䗔CNSEL.izFt՞J%r::v*T4]9M캟٧V| )ݘ*.L)_Bg r G c49MQ^O39/P9MWNSZw$si(rBѨgߙ4;'9MWxP+r.JtU4I(g9MWENE)򚮊,YhME^UtQ:MWnStH>Λrm*򙮊|"颜rBiVxTStQ6"KCtQ.9MWti*xTV"Ϧ79MW:#]tUGIn'/2ޔ*骜ˀ*"o*M/Mq MUt*¾oJ(ǣgyXYّ~o:IUtQ{)oe UX ޗpXR}'٣XTGy8ˀCrncJ~,z<|,swRr'gr1Ǣ{ӯXgӯ;O1֫:Nӂ/ޒܨGGjs!rFB{aY/h6<_ SBO^:qSiz_&'GGij~#׫?=l |/S+7ST/P5 Z_OႿxvSJˀS)-7eov令_T~)c}n}PY&*R yMU|#~UWჯ_]n*|>VHmȏ*jWp?}UX"VEGX"`Cֽr+c}*UU1&n,1<2`2P~3 鷆˜ߊj0[CYh 짆FƽoRM#+YW6)Q"KȑS,1B+_F=/47E]Xd| 9,vbqhШfYB.Xb%HC^]8,i,dYr YB1Z;9jgGI-#K5G*ohLH YӃcKgYBM,!+8pd04D G oY6,έp &cFK3idLRkbJMEsɩӿa^Om[MΦ%Wm:hd )XzL#D[cK`,!H4ӳ*YBU_`d cȸFu0T4_ZnWkyOjFq(?':ֺ2T",`%SM?j|%~EƵQ%dadܙ`4FJ,jYB 3GU Qȸ5L02.,1Rg,5ꕚFP8[aHB*4GE$GƅN.YBJHy6đnd |95EM$K\aE$KUNɸ[Qj#u!ɸ1LHq-!ɸ3Re@#$~BL} 4Si&EHXH2|`"ҩ:oI;df I[,$YbmGH2#C ,1dW{Qc#K.W$V* $.w%$ #T@2*V HdT?vr6dTL HFAfIImTTL2*ƘIFELR)pE*l=d J6L~VN2jlbɊl}0Ok֧K"A M$[dT4>H5!UwH2*BI~:bQL2*Ro&_#$t1,od˛j&>%&f-d[HfqR %8f6sӘi(Yb`E(LӺ%jndzAva H.!8 l!8Q:,d$hE%PAj#8md Q_I)Xɚk@$T[0i_5%`lBq$Ya=ڛI%vbq١*f (4ȆqZ e+%)3ɸ2NL2Ғ.&Wrzf%P3KD$X-1ɸ4'F'6` Rq J^3CkR!JX+BɸP2.P2n+ ͽ-LPɸQ1%T2n|CE%0Qɸ3RKT2ZԓL%ǠqFJ=JT0>%*S TǞyHPi(M醒ŀd Q .2J JFC"&YBz3hј-&-Ez[$Fd4S&&4IF+*T,dΩwJH *4 D=v'OƼQ “("?dx2l' X3Et'+cx2i3 {ͤ1PaYsdHx=.OL3ve@MdX'Gx2Ԋ5Ex2M 9'ޘ.PV3vPVfɘOt6 uu!R!8-z||2g>'U*mP5 sS|= >羣擡&a}nTdXkN >';a}FOvla >_& 񯦆OWV'CTw%Pؤ;kheGhke,|L2͋2 Êe5ˀ2յPv=X1E"@#bF@ZfIbQPvD !V7ek2tdʘةPzd-Bmʎ8 s2DaOD BA(;"0 e6 eQgʢ$ 3,wP_-,zPV :yΘ2 &@G(;q2eg (cU2֛3 ynFA2lFq(fqBm(CM`&3ʠo0ʎ'2( `fePl)[~dH٪Rܐ)});V^S6{:8eNx6T6K aѕ숀)]rʎ2tm3 },^)98e2Swzw)k[lN>sX=Y0e`+”`SvD1@:|SƌH2&ޙSհ(8JeSvDgp0ҫapX )Ì)äbQeG A09Vـ? U4`e6oV6ʠiXpep`e V&v#*m^&VwV*Ò-XAaeGrFÀȻa+cx52֓D^;s;F,Ye fh.)N[@oϴ Yet?A%V9Ц]UF:Ye4 ?@녅,+MDe,蕯O2A|}b畦 fMuhp 4L]s8٬2n4UFWYe4bUCR}h5iVY&Kl fݭ̬fU&'yfUjYesiF1,慉UF#V}& V0V&O#XjYe YeUFP fef槗ڬ2+DfV Pe p)JJvˌ*˨veTY3lR@hT9tW/5fmUFSYe+2v8xbjښPe:Oigm+A$bkeT͵mfe(kUF5f1Ŭ7r)9N%Vmž9deV<<%Pe417 UKL*SNQQO2oyNaֈmR"yN1E1* Iw`g&Sƈ%rq`[.Nt)C TP*@Aecq1J+NaX@T1oP0^2tTƈ/ut0GK)KK1M2%TYdq4-L*E2FK%RYfR]Het+,# V1}%Z]?:`esxg(t aX| +˨u:P*(*XYe ͬ VYF2ɜb=Ye&VY*J/Ye e*':R 2?}vUN1̾ӡ*M<=Kp.;OOjL*ϩwzJԨye14ĻfQewz8"Xe 1Bɬ28*c\*/ƨ"~(Ĩ̲BP%NUF^)71TfPeN9 Xr*v'*C*s&]:-2lzUQLݾA0LaZayUkS-/%qbnXe y^ae;4RRתȰ2*z VQ(ۘ6pmhetLSnOQۏX2*VL9>иL"*uf|Aʨ#'\ F,1  WWHߑJwAes029PBohe$ Aie7t QZ1b#e-ئQ1KK2VaFR\ ʨ(²dBfIrjzqem02F4 @E[t2kd^d,^ʨFv"w}{#^^敱ܐxeLPG6,JexeT#L#:xe9]fWFG2•1d>ʘqeT v",'E \cN&)-ʘ'+6,æVFA#ie OIVqE+bNʟ0iܰ2[ifX+c202\y5 1KV2wZ+y {Y1-I]Ͱ2*2 VƐ2- +Uaew2:MLF2*ٿk|QѢǼ fX2f_~ W|,O)w2*f Wƴ,2NBʨLSMnS.* X2wQ2q!0~X@K2*ne XFŽV2&ib`-Z , hL4%)}E Pq˨,pi%`yvYXFE] G˨&b4XF$,`bUuP(%bYoDӐeTL"1 YFEce[2*`A˘ T3N]7m@e4:Z[nhy40ZWAQ1U2&x(rB0HL2*bexC%(fr"Tk93˨hW"fu1˨+)͇pAfQ̲ fXbQ1O2%fQ,?9D)˨"&`YJ2*',chgD%0heTL]SH  ,c^I^Q>\ BEW6CWcԄ+r•QaD+`Q%ZCրKŤ/~鸢Q VF,u鸂Qu\ʨ&X-q 5#|'Xأ VF0LS(YON2G,}#Q-ʦiETFE=ͨ2*U+R6'Tї*bPe3?PeTLAZBe|@.ʨ%T*b̘PeL )D+b*Zqq Ƣ̅Q1M2*YgՀ47|sO7v|.F6VFT+UFC&)MwB2l:Xe3#`eTdN1MHh:(h\6xeTLm,#2<ʨ'^?:ʨFG^^qevѬ2O4׬2*IUFX9^}`.UF',;`QoVӊP3-T]He4JTF%Y&-X%C#IeTn2*Tj&RMO r"QIV2MA*c2w&ݔ3 TFEIeR$crB S rB`iJM)nAUl JM^'Snܥ^o)e3,(e7EE{nSn"(ঔP/]K StU0]QnB*a(ݔ(eBJM؟-i:SnJѼ(ǣRvS'RvSvOStU6]QnJySSnJ?l(e7etQ7]qSϦ8(e7Ir>q=:)e7IEqs(50A))罧R*NiEϦ_N;JKʳ6.NbJMq8tQ5PD))kc |"JMvRvSα=z)e7ϖR*ݔz?(e7>)e7%?ݔ 0lUD))rTXSngE)[QnJySQnJ}L)[SnxZOER6QnBPc!`JM Sn訢݄触ݔXR*OW|tTSVEUSnJ}\U즔G?5\~jJMɏu)e7>ʕTpV&uM?2~궕DFn"CŖAsI,EQ'AQP)dTXgR6>(ëZQ **D*?-|Uv4bZL[)ATT **mU6@(#(TXX6ȣS EN=,_C შ8c|K8 a|7>#zP{x#შ^1oATĂR R`7]AT g QQ=B6 HV ժ^BT|ء(Xˤ+bEo(t(̌:UsQ1ubSsQ-* ( D ?g^6 *zRQ1%A&AM\&>H%2|P H/A͎j 9A';evvPs~d5{&;3A-JvP;N;`=nTT[ma>A;ęBQ>j%ApdxPk u.Ã~ sEA1;-OfAQvPk1<"D7A.&A( d~t`:AP v (* T7R!( Ԯ? .e#"tB(g| BPt 4\A"\S!N0!(A)4B)D+Ž2B])MFA1!q0 !(1CBA1&@!XED@!E5BU%|eJ Vk<*3zjEdBhB!Eؑ@e5!j o! s N)A  "B-A1V]yPzd)BP1Ev`h Ш!_bȍ9B08BOJ tbr1B(cb1BP/c (:@BMȅ B cn d /"QAB:(B WPYv0AJ:0BQtPXFg2E "D4Ne$NG~ C\ ejhy I4HABP  1Hnyb:HB$!(ZL$AG1H[| jG0$Ԅx !( $=]#:ZGn!(AG[A!ZKGv} j@$4qjf"ĠJ!(צڑct`&250BPP;bQ?x`T]`O!\Q c#|&HA1J./1&5B`A | P.? BPX[)BIլ!E5!ĺU1zf0Kv0j(MPۣX0ov@S@XGK2D]PE"_CCSCç1^0XZ2ChqChS#F7 !tkS3-o!4FB6cg-nBc X +ozjWW!4V!"oBGT)@@i0㦠%;o.cF֞@ц凩] B,T}ZQ-<`nviCS "Į2N4e#r#ֈA+YvÙyb 1Qfj2Ca*.gЌ Ј Z@5bLA![zf1\T`Cbr#"1B_Fku.s"Ķz)Bl\ЈY *Ҽi@m h[^E8 Us -p )rv.| W"9G[Y9`"~"ܟEPLYE=0?P=0?Eh֚Y 36gw ^h1?S0gʣ ׽ gT76?\2~*TgY|ol}AhEPY..XErP}!`,&L@E(I9x>p=Zj"{KYDLA>Psr`|,Bv!99>p,!g}Nr"9ܞ=P ,B왂=>sr {`b"{ ^")ֳg0H=P+%szg ,~_g{ B"EY9;P<#}xFzxFJ`3g$HH,?Xoó_69K3DϮm{<2RJ dG5k' :e7X $‘d !="nABgB &xLA Dv~Ai}hT;LlE6Bxm6"TE # oyE2xDcL,}Ar>lhwQ=D"H Olz`Vps"ᙸq3mi6&Ã$t/ [%gbUϴJ0B w%'AJO<$}]Z$$CAKa&dH2ƄHVi=D"^x#%覧" ;, u&eHH≗_Z[7Ho3xă7V %IOA nF.xd[AK$Bz( 8tL~`>!-K<ӡĔxȶU! NKb $Rxăd AbYCd'QxWx6ăOR] ABEx& A^,BF):x6ƃz -[/,| !ŃD/V|=#Y&O4-i<1#:xb!Q!O0RhĴOxQOG H=9%fq<Óބﰨ3D%ۉU >,FوuLj!/EQGE8WLO"tqÚ tԚ>P=>i<(?ǃxf\`^q<`FEtǃyz{`^q<([bcDU7X8/ Yiy<3]`y Aa]7Sp:=yO } Yrvȃv$H<ún=(n vϜPǃZ&I@ =`<3kٮy0ЕX=*<< ADAHn܉M'wk݄xr7!N'wfC<OS'ē{ٖxjO[}sK<.6 !7ēxO ⩒A9wƓx?5lO'$ǓtxrAj<9O1H'G$ȓɑ xBz<9x!=ΐOoH'g<Ǔs YpDb<9m"1Ha'gV$Ɠs-|x$-za@a^a4$ A"!GFxP3 w4D>D񠅌ExJgߘx)l!AҕpϴѸi\-PilA#TOJgd=Hp$:d% p"U$YGh5xy=Iǵ)$oD4DDmS$RaDx[<xlJM"-Ht9;i00]sNS7x-$a;HNL|8xlJH&]$"gI/A"mL%H 9L"YI״SA sOcyU7xڔʓ,$\3&fH2ǃNJ`Mx,<$Y37 )xd3WUKz-)Q̡3xƤ }MC+9S03&W]dՌʏ4!3&]3NUlϘlt^I\EarogLg()1?o.x*S3/'x*韍7OǓɋ3$/gH^ϐ8'/SɋSɋ ɋ(q<߾w~zk5FĢw 5?i+ Et)p}[z;X;)S%}uD$(>^E\xE~\ *5*߿w-Ow?lާ|ƋG7 C O7+`z_q_盟{7]do°)]w_|T.RE~H+Ճ 7vzs<ҷcBeڶ?cQ_[?ǭ|DK/}|E 7E~# E}ulW ?o}e|T.~ŸNX'50ӏv/~vU]?zk˧Wpkr(7־}W1o: 8>fimɇb8ZqU0,k\u)헸m}>g}Gwǹ]u_}eD>Ϸ@o?x:~_"Nx>c~o>Wa|#{it?l>$>¿~>߆ Wx*}Dq}~.>/g롾O1Dh8;/t;϶Å56_u,"?}9ѹ[/pcj8Qdy,xGwا}q<|ϣQux/)~ oї/>%_K_=/q6oP!endstream endobj 139 0 obj << /Filter /FlateDecode /Length 21801 >> stream x]-ɑc7t;ЍY@a 3=dlGTGfD>=M{lA^UUYoz/7o_W_/?W~=Fk/G+/_}Qx=u_Oʣ'l0?_^WN}\⯿sTӤ}'G/__? [JgIerΏBop+zD9t"my`Ԙ^?ѫ?zKzex@skyӉֿwkF{z5{~ 9YzsoMKvϿf҅ܳ}LIʸ ȵl156CkWtq]}/}%R??||*]A[YsN{=>;?=)x]9[D׆zpm^붏_S˝qRzxپR~n4wsL}ǺsyGtQB_򥺨O._xP~L.y4\gG ?©ݢܲgIr6L;'Rq\?>O~WO;I]#okr׻~^@W j/ۛa s\b_4}~m{Ãy^~[sWګ{,G$xoĿ{unFg{m0lo7^p3)mSga+Z$fGט栌FΔ$߃_]=oec#C| }]2xRw퐹?~zUy/kQnM}LT_ԻҫG *eiknNmF!ׯ>{r8m<{om3&9)G ,>׻^~nO2eʇd |^OÞTnwFo|>C;>LIDKrl 8;=:#q|;{ᖽ}|7%`zw`#Iͧq乨 ~fMh`f;=Nr%h[JB6WR'sܕ/cVؽgu)Lim{ MĔ<:m~!vߔk1#e_o*r!+_N9ͼq$Y7B6)!@N7uT;u{޾߳YcL#Oj2X ?;Ӥ W4ikXW4ǘ3ğ.޻_@v L*!::?- c|y7W+Ay1 Ihw#U\h%}x|uzAzs(_~c8_ɍW@v͹A/?ٮjc 7~ڂן9Qjė_aBCG),p)n! )LXd^2=׹זDwoCR?Oݥvz /~}c746ʤ[CO+o"TB/٬]D*Z9A4(! Mr>ֱB'(-o PD@\19KKrC܈IL( S4\V %r(ztͳDžI%r5p7(7R2 J_%8$TZ%HJ MQncB]rhӫ|%IK*FR JyB?R*.vRmǍte7^AonUݮV|Z/|i̭nπ:qSP[JqaXpdj^QJAaV5c:+h8v (2pcsm{.fʒr#} wP{X)rn( mqWQg [WJOBclRKUIJs.ZJB: JA{j=TeR \J~d9g.cHv=qY0HI%IIkn2P2GMϐ^7sv r h;SbeHZV9C̹WkX߯Bwoᆖ}x0(I,D{ISNڍW"d쾖b5xsPnyI<z"#) Lۦ'kP(6|&XBY@,'^YV._a)3;U&􌠗)+ ۝ : tpEAʭpABcwtmN eps0ݘ%~EAq)+;i:tBC? u *`_z@Nڍo FX/6 7n++i}!Љ֐o!PKlؘnrvx, bF~'zܻڋhN);զ.B=u3N(x/HX?iaM- scwӀMiz):uǔ!S'8h =nTp4Iq = k՛) 0ҫ|Y 1>/N N<8xKq\裸2Sz򠤟Ê(c-JU7% ň7LSt@%nT֊-{q(?ti} :N+1]uR!Fզ*횃?%x8#rSpFT1CBp ѨMg$T;6=S$ǦK td]+Q4: $ Y4t xfLiKA-(E]_ݐSrz!!ӓ^W:a75A:$z:,bbM ~DHBICBAޯpH t r>/ `_jY#vH%1864TƱk֐@[ώc7D e p}I(RpH6M5 Ák˃oh/YD u xI/{tYCXaQ Z!2&\i8.ګ(RtHn(=j !"qcc%%ӕ320Ȏx`d ! ƍt/()m]ʭ!}DF[ pFډ c)US:Dn9-Gg8!$`ȕ O  N˲cyj@^IPz2Nݨ6y))`T \ ,s yr>lt3HlqP~cAc6x# q;~d^2c_]ϸ%)6|򤖋'H啕GR/ݘ`isӠ̔|HI4gBtBFJa1)xo*!&0\ )$q/`AU"2>q(]P,MARc#jMd:  ;製i@F:/BV&=qYqܦop&Bʭ]ظjo7³B 7Ex8 Yi15+ ELP>J Yz4مҤ&S;BCÑ`VxKoGQ{0BR]$Hl!9 шt+jR@ OƲ24pN>KԬ=)U:' Jgr!gebtզ><ɍJC^Q ,;DX"{$MykE >)?3Y#"sGi[NOGtU:G}(tz鷥kF1DP3)UykoI입K:% Jv36}+#; ?噥f#r9.~.%lOa`T+_v -m-tF(k4{O? uR}/l͹Q=-cP|ooWi4lp7]U|ܳ;糕Km0kOyrd?4|H |Ѥ[q0ڒGVzndMُH̶цwFwQ=׈_[Y07c"y؀h$;ƴQ$8FXn+5L B"&9诘Iwn $;+I__Ez2-+γq3 W $+v)"yoi 3>A' : !)T)ep\J̲5t +C?>v=ƗV;*9Z )ՒT1IJ5 K>Ųa3p6j܌kw#Uvwwn;e[Uo SoN륣Sի1l_^p_pp \Z.Rs-\mnz'F_mu7;\nt7ḊC]Fo3r_ӿr^|GbԽyD{R>2M}qlY\ Fa-̞~ A£{wsQAFQIFm2RFeͷ0'3;{f*3Q6m^s?_=DԚALԚe 3QQfƸe&&2pOQk ;=6f5wiҍ>Ƀs67OK|=a8/Iל":iGuxk6H!0X4M,+.`"bncvH1X ޳K&,!\}9# ߙSL)&uI$R$]vS3\)b.*iB/5g+_xʗbdt .-_Ifmf\ij9UAE6,ZVDx[w,5n.X%]JJJS+|=؜xd-%]pIh-Yx^Re*wi |>&OymR/K6Ic} pvH89QDA`w 0G!=r Q/5vFƨuB 9ޖ}5 :sᰅ-Q/*~#|`nz_P6>0]w{hkoG[;Nz!]p`f :AY2[(x ^hkj|ʊV(Qໄ|:!]_K(ZeKG[Q>RZKIR~ - )_B~ uzJzg8jKV&U[` yxMN/KMbF:;-4]qyɺ-*az ]fl-1P4D/./}3<ܕzWK豱xya2,k@$ˆ]+f!0\s}[nYsEzY!lp_fx]B1B1&tǬsX™FɨU1y Qpǯ̘r)C8X9^ɬW쁨u1sllm1-DCɳ!-D%DCHJ0"S쀨i!p<:ΑcHF!EǐEڱ3=j`)e>$C$JngɀLB ';s BrspllprŻڛ r zLB~ jH ڮQ˓Q[ A -l=zܒcuܒBP&<-x;dr$ԣbr뿁>+$!Zx$D YacrAqR~ i< CRX GM#!:x$D pXg7?CcKpst 7,!kH]CK[>nI Ѿ+_GK{ JzH]I#wnƈ %3wnx^ahߑ컂톐|E;wKQ`X3 Ѿ+9E\p*9w$`톜0hTŶhX %wkCQkZ}W`톀2;2솜KC=7G Ѿ+X$慜cG?:] 谋Yh8! kC<-t@.A]rGhߑo7IXz CL.)wnlm];}Wo7R}GB?[}W`h,vFs7e,t J$CP.-wtѾ+pwCT:pRF :}W96vD,3s hߑRhq o75d XrEt B  LѾ#!w9w[D4s0vCNc7|D4Z}WƶhߕZ5w9wn}Xrڢ}GB9nJ]J;}GBH]2.s;;}W`토JgH] ;}WPfj`.!UjwJ}GB XRQ!*s74-B9ܔ{GBt 톙.Jt CV{WP'Z{WZ]z %Zp Czt ;{Wڈ].Gѽ+/1tuF .P[b2 D 1eġ]Ap wN?5Eχ!$ m5siaѽ=~%wՏ|_ڛn hޯЮGm4j<ۥuk?{o+̉`JJ%n2Gi  i"}T.< QkaɧD 5&3c? (~ъ%]S>@AU +$?h|9ʱ fHM n_uNuAGRNOV 4pJDkr.h 7(ŏ9MUx>9d7/X3w.TF,s;^6ؼ0zDVZ}S:G`XaV ؜$yW-YeI!s s^V$\K\JqK^/9-(]llo:(`CM%ؼW6 ƚk^P ֜߾*\Y$Xs8Uгyvrt5a AY&BSd9GeR` G.tRe7?.Xkγ b3x&5yK  /)\s=ל瑕.\Gsy[5/X%p*MϹ "Ryr.aaȞk2k^-yy  [׼+yP`q=K"\J~Pb9j5z~B5/ :V::۔VZ9ۄЛ-P󂐯`x[ȷ5/"j&h֯pM2ZݭP sY΅77# ֜)9zmk߿1v`5@9WptaX A {AN=ќ[ay~h^&e3ټe F6(GR9ȊOB6畤_%bQ҅lŅlkGd.hs8mQC~T9Una*ʨ`ls؊2*DTls V9ĩsam,ͱ,T!6w(Âaʰ`hs~bhs^*W Y0e 旅E6'9Vc*\lXs,a" :9`ʰ`Xs`= .GTej>P+aǚC392* -k¯kLŚ5>caͱ(\曱XX69=ͱ|(\X?s+G(j`s {Q-`s,Z%.l@ 6dP09mM% ^+ !ts>I B7Wki1fpe+_R9V;Vş3nNMmsa;[.lsoTa;ΕQʨ`lsiQZ598IQ7C091lFm*6TXPrecAɅm7Fv#hspƮhs,UlhsX/5́³ͱ:VE #3P%8Bo8ǫS 8Oznή*B'H ytsEe.|" W9"r~-ع *ݜMxJ7)2ݜ]٭saͳFM7E7ݼ([}͋`ݼ;Wy(U*6|ˍnAtEQ?F7Wto޹+|Ѝn ]@7OVʲ昘$Qx Q(: F7uF7] _NGٸ76nJ]p+wۭlsuW9!cE/͡Tylls^'l, @6ۜ 9Aa[6IT|&PpyS5,m@jk.\séإUy_֍lΫ>.d66^KKyPVVF,B7yP0^+b:E8A+lzA S<(I"W18NyP]<@σ?!hA;Ez+U )KN_vʐ%hM )[>:2ЃΖw5 9h9h'A;Ez+S)YS)sKzvSe{徏+=(N_J_vJZ?k$SZ ,? Ihԣ* +BHJ:n2҃ҎITIhoB;E)覊JJ߄vB9[G2/#ү*0=(覊L*RF:>J\mbNU_6e*)WMGەЦ @ B=(WMGӕڔЦ_6eUQA_J= (N+ P=(-TE{^G*P#PzP@JQqd WJ:Rū%#PzP(c=([בf=Ni睚襊ZJ>4@qAIG\J4@AGuWrtREeTAG#PWj:^:|U)AG?U{P(+>eؽ2#_U{POaRك2\#P2{P (+&(z=U^@{PRLX~4]!^J{PQ;ڃD@YAG"vkB=۽"mJ;{AAG?Ur{P(=(vvS{EA͒ڠqB:6ŸIܽ"$# {(+7(Y t8nֈOAGT#YU{PIqvJ>ɪݽ"#P{PƑ*=(l`ރҏMA^{Pʑ(=(Ht${J?ߝ䣟*=(hb߃5k߃24@AG# P{PQ~tTe{E Agq+%iҎ|Ua^a|(>(HttSe,@^,|P(>( 8rL[)>(#fƇJCˍnj(>(q5Rcei+ĔxAN!%-f|Pznj(:>)fJ^~|PJL q6]AI1 0|Pz#%LPA10|PQ'8v3c&`DʔʈIQ僒b&`\ Y+J2J::b"`yc>(qlRcjyg>()FJV̚6UTAI1P|ZL :y4Ag/=JwRb`~>(Ϫ 6!轒 A9lV{%6RϦf5Wa>(f5Wa2>|<(jpxƫJ:(Ǫ$ rxFJ?8(Hkc5Wc5}P^Ijx2϶c5}PU)^(jrxƺ>(jrxƼJ??(j}b5~P ^~PjX b5~Pj8rX8,VtqΖb5~PjprXEA9," kQȾWaf?(j}hj?(jrX ar?(jaS/X b5~PUA8,VX b5V@1O%|cSQiT Mo8Ҧp ߨVGT:DH޼) yE(?V7/0ٍQ7ytf|gͣ[]%Ll7߲S x3Vwy/U J(¼U Ea\dRt~% M8Mf71ͅE^#?fQ.txo;VR:ޛO=wxv{~(t 6I\5I|(pRtw6 7!5yr1k(' ;f7%ϕ,MNGty{s{smv{s({ JvX R+an'$-)i #ՈN ' uoA`TG8śb-( )v@t;?.,b vB>ڪn'HI)pB=N&( | | #@Cs;!=PNХ[Gc^g>ף*{ v?8Ab[s~vBhN RH̓ýp;A Pc3aV h򷷠n')sp<m'H #do;!ǘlm'Hufv68l-(p )dm;A e K0ֶr-FvˆA8N' !C4F v18Uf+vBAN-(W )vAj-FecjoANȱP #fvS {4m'*K}1+4R vBA(F{ JvN 5?Ch;aƠlm'>Bcg;ǘl-8}.G[cAcZhl'xS e;Ġll' {c\6Pa0B\6H 5e#d;aƸl|l' * #vAc;!lhl' Ec;!`lyĸlXl'Xb;AN1/T$c4 F ˆvBq`NP0* )T)NhQcCL6R ~vU[w vkɆ^QbL6ZɊޟxB(!+4ܵfvBy-E^c\-z BpA0bV[1S2c\|%z %f n{B[o!Ũ[1*/ kjz #J iЏڄY/Ũa& )Fe%Y{Ǩk' 9J ˆy" -J¯v_«cPVtR [쁊vP"j/LS̔Y3%V;Ax^1& 1&+ ªB9n0MaWLj ŒO^( 1&+ 9dS{ǜPN6J ʊ8+`j/䣱ByB Lj/H򨽐bR8j/FaQ{!ǬPa^GkEtܗC2YBbV% )tAO{li'| 94U^H!(w -FvCb!0+dM{!xJ֪ƚΡh@kz>ᦷinz{ƛ޾j*oz;ƛ.loz;ƛޮ3]nzۆᦗKnmpzFކaomqzFFFEFޅJ޵ F F5JeFֺ M M M M"MBMbMMMMMMM"MBMbMzMMMMM!M""MB#Mb$M%M&M'M(M)M +M*,MB-MZ.Mj/M0M1MkYwݙwmwwwww5wŝwUwæHӻHZh]9h]]h]0ӫJ0ӫqaWLzř^53*gz^.4δp.̴x.]U)0ӫtaW3O̴W<1[ybLo剙3'fZ'f)O̴S<1^ybL/剙v3m3'f+OR%<1KybL;剙3'fڔ'fz)O̴WiS<1NybLo剙v3'fZ?>1[ybL;剙^3'fz+ORwOLCU@QZ4Y״0ӝ:2͘EF00W7gW6-t%3r iL4p hq249rM[9Ӆ B5BAudJw|0-Oa,raL#'DkӦ jMF:dѦљtٴBSEM+)ZT(~Z+-4twaPN00,NwN.>N3N 8]PܡdNrZ֜mp+MZ!K06q`)[%NTe([5- +B. !N2p7qo0 B.o q qatm!NR=A!NFNE[ ]!2 zARhYB.xJ+… B tz~3ߺ:tAA@NO66q.EǽdAyxn`6撽*m+𓜛R@ҺZXs_(=OY3S 6pU|KXf? F8'=+pO@U.KQHΙq'].TV4%M+h[ӳ*mzJ%ۦM+&Z1!Jq(P⦧7o7"˱7CҺ>}h/_i ,i<3Uvi./)nz]p($nM^i,pt?N}L=⦹@QҶRRML b'mIOti'?iOB}ҦM;Iޟ'm:|I_yҦMOp'm OtF6li7l^`\.rLlz 6^`]/lz 6<^uh^ijz5LŚ^FbMoe/jzY 5jz,*/XU`UPӫaWWABME^6m 6m5m 5m4m5 ^U"ƙޅ$&F)FE+Fޅ-F^/1%4e6Fޥ8F:F%=JE?FޅAFCFFFEHFޅJFLFNFQFUSFޕUFWFZU\u^޵`b5eFugF޵iFkJnFUpFޕrFtFwFUyFޕ{Lޕ}ƜUƜޕƜՈ9*9jsz=.\U?ӫr1WbNZMcNj^:mE :FtzՖ.?}BSTi<[yBN/ :'t O*OSi<[yBNo ^:'tڔ't)OWk?q@x_=ԛ uCΎBFx]C.u+_{}YS"i>㽽e ˤukz%rZz>Cӓ$fendstream endobj 140 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2063 >> stream x{TgXWc,;%XcZ"h۲ee`-쾻u/"݀+Z9 'mli6A-Utm(Vsss E;$4g U0pڣ,zsHΣgŕg>0L\v"jq~¯@No*94rD]~ B=Ihc40#Bߋx9^j*lг`CNЪ] aԧ۩#q#h Zw1 7 ءZ}uߦ}@8.Z](JoGuv%BOѷV e}|->~+q_tS4Q M99&SSm IzQa>L vhCYٔ*}9^BLaP~W, ï؁i4W|*CN975[5c7㗪(ipLnN fCWg .}P@tyP}{ n}R=I׏<{ BbPlȝkzO\Ğ0=qb(֛r \ޮĔxkUƺ~أ(cBS񝇉x>f|o 3nщ"B)NWI4)vm}]x0å/,^a5=^cqNjI/?M¾vaڧv=m*I6msMFp(F]͋c@l˨˛wo.؅/8 h9[s4tPJQӇ #(IVVGQ9NvNn+tVz%aiLendstream endobj 141 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 473 >> stream xcd`ab`dddw441H3a!;'O/VY~'YSzxyyXVp^ ={ #c~iKs~AeQfzFFcnjQfrbobIFjnb ZRaQRR`_^^[_nPYZZTWqr-(-I-ROI-+(d```T``b`bdd9Нv>wSGO#=S;j~[ղdE X?aF\c~6M?_n6tsLӕ]Z/[[vӖ朖jv}r[}5WmOqϝX\Z\^?]nJӲrGogs]n?N` pq\8cendstream endobj 142 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2460 >> stream xVyPWa`U$S{ (`,5@B@+03 3\Ja[ ( 1QCV9ncqSv{~=DQ`63_>R̉[.cr⁀>n*W7-yI$,0PV&%$Oz}}īW?*U&Jd0*Q*Q?)$*[ԆDJ;33s$5}\KJoK8rJ.Ig\9**R&*eA J-ٲ/6,N=%5kj_xxJl'"DEAJEe"x#‰"b!XLKxXgBC|%P ;-u o;8k]p1LEy'7o|N[f bN:,덪I +ʤ߉|~mq$dt>02]wl`3^>ܒ, 'Xpݠm'FAr1(Fw®^x~O#k8Vِb@{15`;qzxvG\xH(k{;=c|?ZszeED|kB4%3e_)aM\= hh2ڻ3^Y!UYƧߏ=r;}^mST̒j46ut*l{ɼJ-|,tB\,d@yCG|Ԏ&2m{PHLӫ4Tg«#c;dL 1<^Fni]Ywn`r]tfS# Eb̦@+HdkZMz ![b'#gH7á'}Sf!2<}1cr-ֶ6k`-}@u^ƒxf/l6V^xooTEu%4 {Nܖ^+&d Hg=:͡.@>rK9_hk brce'ey i*t<2Ҥ3(tGSPbdz? !}nddd>tYu7xt 7fԇ Ϊb]]YE)P?C~W; Cܮȩ~}ۈx9!=!\gLVچZOH]QJIAVkIP?/H̔ؔ9o fdq,B7΋}m9V~xf+O;k]]1h < H*|B?>v@uʵ]]lЂ (-3&+ RQ^7· F:`$*ʢzXRTps&22E >4,g#VO2F;A-²/Z?.D#Mz!̰Ӥ~ƋDNS' $xodUANO' T&>5 c]1!g8EcBn1:@u`& <߮@B`Ҫr6%(ɓ6aRrstA[ʌ Ҕqj1aByeXFo,K0;[V0PrK-hˡ*.H~fSx@pӄ}7͑d~Ђ08ԻG>mF)__} Spw.u k`S~xv JO?~M["IW(i>NMM&E&݁#t̢B4L\~:J)BZ-(_\KuX.\l+>wU[]'rؖeV?P^-qK:'&F> >!- @E)8,׍55\f^"쥈{~uI>&fiEԩKj` ךDX@O,`;]\]'Z]Ŀ(endstream endobj 143 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3093 >> stream xVyXSw>1sPJz q]Q;@ZWԨ%HBX",IdUDeU `[ө˵t23N;޹'`;3sy/o{$J"|8 S$14)S[Q|LS~(W):N& >J$鵑jN\/P-]djmt6248FӄG/勇džF9oit7/NNN^(VjUrNM STU.KԅkUaĮ%aߍnׅ.{P[YvʋAvQ>^j=S~?6@m7hJNM( 5JQNT2uW2W/y6Ϥ+w69]ts}HϡXv)rplXIǥkGA><EP&^A(sWtKn:hu^(yzvSejT[^wm ɚlg5UaJp?MP$ +RcXxNX2x_zo/qG&(/&KW:R41ˁPDp^%SwװJ-q xH@F,:MGn#Tm6qxtq&>bʶ;wsn_ݣȇ#D/Fg8JE s?^g_˛#)><m-- ֈ.p.wI>b߃.*NBMu:+7T3EFǗlN`mhH9cpu}I9ׅN`1|Fm$': MXC(htA[<EF4ڟ'- (:_0Ȇ̖/L(Q,(ӉZfjѼ ;}nO8P&KƟ& 2tO:4-T]9,%5Stn-8cX*,Pq&CDPAط{_7F/ of)>L%xGIAj~f-xB~IA9<Ǿ_~eF!^l 7 G{N5Q4Y#&=O3os}enz5w8'02aK!cD23p^/#qH'|+97x}oyD\"l9't67w:@NU!8NK*(0±,nն nBV}12T]y/} iaSю}~~I2Բj!9kc9k UT/z">_g8,/ z9ZpyZ>W_RjipJ#${ꅵ[㴖V˸G%r8M/tW|}O*S[q+7&]xf ̓M}F2h42MLQ } d:7P foاX\[ Lw`&.-Ȣĉ/)eeF;?wA6v_&TZ*KJed!͏E8n.m(sovS[endstream endobj 144 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 513 >> stream xcd`ab`ddd v541~H3a!3#,)5|=<<<,+~ }O=Z19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMUTB10001030gZ}?˺D׳}]3zqʴI}݋9˱͝=wr뤦^QgWL<ss|7slݕU')=%rv\&ͩnjG{Ţ7޽@-Mrպr#T*8~~]н䃭~T6-˻peڽy.w6?ٻ &Wr|ZtROY {g*_~'Mg̵[|> <<@2eendstream endobj 145 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 335 >> stream xcd`ab`dddu 21~H3a!.,)M|=<<<,k*={3#cnas~AeQfzFFcnjQfrbobIFjnb ZRaQRR`_^^[_nPYZZTWrp-(-I-ROI-c```4d`b`bdd{7m}ClΕVNHa_}r?Yu*r )lov›݇(oJ`W鞱FlYN^;q&{r\,y8WO7rtendstream endobj 146 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3281 >> stream xV Tw*J$WQ*d'i}C!@DInF+$,+]tݲvÞ9frrfΙ{]1|!\6:of'`_Ny@} ҞYn*?ѨuǐB@ P&""5)aS|}M+0Y4H+eA!ݬ kS"53g&&&z˔*uӥQH&y\_WHɔrΟ26A#WKT{ 1xM,,h|}xDdB>f; b=HL"6[`]bK›XN> b%XMEhƒ4H#7^ÉwD6 {.\,px\6xRKVRӨ,꣟Fl#GF~:ٵQ`])Vދ὞3|aϥ K f9(6tϻNt[A [H jU xxeEA~]•DpVh4lF9)Ή'I1S~ m̪ gtY./m.h. a:TkW (Z]N9=bex't/u{Gpsk%` bxM\k@HS̍ޖG.!):o3wK#C p &ͅ8EA<[q.dLa>%7b8 ՆeSiYzHKe,E1v߲%qU zESƅ *&3 R>LctsP)p$Y!@zhg ss[YyT[o]?hKD/]V ]ԉț614,,9'T;p~/8.[µ*0/i:JGt\ozRqHKZk ؖ(Bz5Z;ɽl^fKi$!WvS$|N)tq+%/c5#w<Z|tΕFe)+H¶8W`ɫ>9m@[.[8Pc״$s@]ϑ#5D~iȄ }{3hXbKO[ ZZ af؝Z;6~Ukc!Kى)Cŕ'Ts\Y4OQEZiA n q m.ԫy=ǀ!@/xO82FWu\R0C/Ӑ ׬o)>ɎCWAwh"dWR솰긖V%8m掚ӕ'EITe:ұWt*Gs< G~rDs oӗ#%/v포CR}te.T|?*]gL~y~?ЭYU^9.# ?;`MS -G/peFn 6p:v`V y"HƯiw5Džrbl eGI ,DN1Nt\!'3I/MV|p_C.WgZ l~8KvR$?e G)p{yYp{l 3lڏP_GB+hAǡ@=vU}jtcAH$Sڼ K#,&DV-]cz ue$z*vATY-ypIOp לjfsF GL1$Yni~1$ TLǪ%R;P_4͒'ݰ`S.\뗼0! ^!XM e?n6NG*6NMNZ ;Hku4(W3ywx+(>$Oruv9l]_Th11UjG]uu3{@dXyg Kp}p~=C w_o2[pksL%0r͹%.{wlr ]4(xrMCz UJ@>[A&q:MC = >ljʌ2{3F)T*,-gN\ O \͜GU0\KjWѰ|dgA6,sF^>۸S{#bcc- R#GtEP6!6a}9 $*eu| j t??Ắ?ЇK7zXvt`XMZ޹},z-_]7 w)NݠP./A?Ý&IxnE` 9as-8hbĴ`R3AkfR ,!uiLBԘsKhm4+r.up<}\x_ =VG6L㛗;8$vr϶֬ʺƺ,WOtaz;ޓpkZ~%8V,:7wr^VZ5scj2 QO ȃ\ȥn'!/)`HIg4+r9myO+WZdp..>_ȘLF(dR|Q~J w~;sq>\n NQ+˗a՟{gt$:9du8%?Ga7_Reiyn=U: -eW>uF |?Y/ZqDےlZ~Ow҃9Zsg$&SNRyYʹ4Xl6g+~J[B%p c ͕"nO;1|^ϬfW2Q7Iendstream endobj 147 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 407 >> stream xcd`ab`ddds4~H3a!7,)4nn?Xg~O$ȘW_PYQ`hii`d``ZXX('gT*hdX뗗%i(gd(((%*&s JKR|SRYfq1޾}kN=7KDUW]Ww1G͌+&uϐl+#ќۨ7˵ߥP's~Urscߊߌi!yI9s=Ew+[yi Nʾ|u<< [Nendstream endobj 148 0 obj << /Type /XRef /Length 164 /Filter /FlateDecode /DecodeParms << /Columns 5 /Predictor 12 >> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 149 /ID [<1a2487cbacddc60c5d8610ecf742795b><49dc3bf3aba7c61e942c1c5af2c68e9a>] >> stream xcb&F~0 $8JR}f; 7ڣ!LͰm8 dmL# X[ bOآ "*A3d="HF H.D]*b  Ddء% # D2Em,L`r%?2 endstream endobj startxref 314011 %%EOF robustbase/inst/doc/aggr_results.Rdata0000644000176200001440000167431512321016053017635 0ustar liggesusersXIM1bBEńHƄ0`E@b΢(DPtsD]s^wg陞0it\\\<\/O>kmpolbeݷ~G~?T_O^6 L,aW_F$~09!>s؄qM= c/>m =wPjMC=#X֭'U-#yT /ӆ_ˌ};.?C_\ u MtkSO5>rTHv+P}x@㰡6i7OԔ>GG,%!) w<'C㥖L{Q=jo4 Ih0Wq)0cyL a" 9}u@SQJ|0R41QHXOcg%a&rġSҒxⰶ-uן~fhadgDg/g%Q]/r?._3ӏUSh,bzu'{+N׫hr1OjąV EBIzYF|ICmEM:=&2u5u5O*JJ̓#gk0b iii# Mmתi9O_?>u(2_#yO#%AqJ[':cMϲ=/9f6I?gsLz&I?gsLʤ39?/ BOvƿ6sV1gvsv1g?rDE z:Mk)PUJ7kWL/i>k%i~rq4tre?/.aRKri*HHKK+(KJϕKy׿tQvr*bR3ha"_=ljizCzN ibbF%)m4apOla 졞?%.BBAB!ф'N!F(F8PPPPPPPP󷜿-krr?]}e@doՅZP=bȠC׼ Aʃ "SY1݌vP!i#P5rkv!+%@/voB7,=iNyA:,waM7j/Y^Y{P!.mJ{@dmcVUO}GfʥW?h/bӬ&x~{w>,;>$׎x嬊94Y1Y!(rcnmjoBŴG}jBYM(dd>Qz#Y!yV({pb4|Qr@pԄA~PyQ8b Nm d?C;rAZ^ʾ'+Sv1pqY>e?__ 6/Ȟ' y.HOtXDCeT(̷O<SWO4Y~TkGVHP*;YU{xVe#j>!|-!g)MB/O|?U,l v9Py;d9oO|5!?j2"&UP`W_r!T_`*NvTTS} rV޸%buYiݦ/q\>ӠbӬmzUs#||.(3+: ODY;sPcH%z7a֭.PNY#*V:Gk߾si؅,17h0?"f"'Y9K+*W}b ,/X4kw*Tiؒ5|<#tNdd|X2Jjki+q /x~Nm{(gJ-qdn>0 9Pp픣r^ZPiw@KweM,y*:掳 ]5[f(!+Uv*;Tvꌁ7̷Kz'ϧ;@MKMD$}YO}2hHcvǟj+tDV2 Q_Uvi/ȊGM,V91Gi5APmaO8h/ooe{AM'p#Adn͚~ E_5JC;OS.[/`"/ձ+=L:f!Cy')"vcPݞrՆ 7<\麹(J)<)wP8T3r%?^)r$ktS.G*'\9GX5wh˾DV *uSBͬgVxlFV8 Xd{5j_~eW. / 7/޻Wd%Z̒=StPxd5IP9P|r+EP],jðwC22ZmnPΘKȊ9Co 0HMdybnxao _`P 3) 5)CCgG`grwX*5xjY/+SBVz蘽 *Kd.ߧas'doBbG?7y߯ׄ/@LԋfP=JKO}yWndeQK~|ٶ8뛷&Tb&iްcPx?/\#fCM]s[Ʋ;?3d0#av֥V0YS/ꦛ9-]{7rT3} Oثqߖv`PqS8>EOOgZgUKBg5P)m̭5䊿q /hrP9:0gP?ٲ3r3j7uߗ$O`%=O{3]=GvD<>dz%Y/)qPeZr6#YϜ.=s`}P5h#qK RRS9+U[QUλ?o۝s?%4"ԛ̜z^3\1 f5HE^'z ɴJHK8_, 7O=ѣ(,2xQcZ$|7ZLz5f}F:;@v֋8_-;ꥐCsS6,1_U~*^n?̅B7f3 VKGhI|;k*1vOnE~o~RJXn( sV<|&X|Ї`]3r=ļi~whf)ŰTF)8u[qbo{3$5<٬80; ѱ(gʱŃ=7 FI@e($pw8Y}Ku@@HpI|`Usu`=>2DYmw`/o**oΖĩ6!pcYv a:4Yrq|`?qOc_|lr(^2\qοs\P l x2b~Y"7<'2avd: t׺Xr6v2i>ea[N[vt-Efk ZHFvqr+>!}?y} KwZCPXK;!nƀ#;YiHJ~gLuʧT8iZ]h 0l88MlBە>w*H-^\^ q3]tKFp t, " LrbGe4@2y:$V#g殈Hqv9]A䚳Ҽn -=i !2 #!}"QH[c2^}Ao/SR32>^ Q,*CB\T Hw[[?k*wn><:Z$?iopEkCQad+y]%]e,?Q^!u쇐2ɶd~ߩl4$Mx6igWtUr/6lVI6#5]sj_`EzV''S0 eae6 Ar|Bv1PlZcG+2ʿFz#'>}g6پ@buضLIzlt4+rY 8-8qH Ʒϼ>8g j=_O}# yl=vv;ݐ>^nl . ȋN-aϑ+l_oɹ{S>g~jG~P-<"|=ezo+_5FKgwrAܴBo4*\HL .?fP9?,ߣj\žHiOY~P /Q_!ŪK?Q*,]i{29?ue,ٯs9JdZ#=?4!)!jW$P(e7||w쐄GmC;+{}}oO7ye^Lno3=/?H4"t.wyorwy{- 3T9}S:e_wzȑo}O!eGzؕ;!\Lu[н>&C`hjB]Q෤`íWA&Y g}Y1<#=bɵ5c!|Nm9\m%1"JB8#jev_[8ӗ)vK3=8 ҮA "<1#GG?%x5]/J_ۭӗќ-9LVc5 }yHoJնpe;y辧ol/BS>7MY`:ӓ6i]k0_ YܱP -pzNJs]g*A9|tTPoQbdA^po]ȆҮ7y%"6"egFU"ӭ!Oe!l_ytE\s_D'ײ~`z<26=,>vȵvdjѥFٙ^v2l\2z 9Ѱ5کt|]?yqFc/6Ce|pzHMzGo`m-+@v6 #beڶ?h7fF.i/2b tf9Oĕn8%P4>t̻"4\qtlk4:e34{lҸީCr @6??\2iOĝaZY /Mwo:.ׁlnܬ *:&V 9ݱoדs1woNqm4lϿSr;A0ı.;v5ڑl[l}FlVdITTd{'~01ףV$7Bm$Bٺ{Ac?a{kĐ˴JJ!nr"=ro q$NadMB Z4% [?ZGxIƘmW-/;ކF{x7<%'hăom@®@Ɇ-3#$#2κvVCFώ3_{2ASTe!2iwp)do!#%*5swG&7UUO=,D9x⢹ARuuQ'3bW+dkk#$߷,ၮ(ΩrhvyqȤ:RO~Cf.lvYLiq83{je{߸tH:h3dx6 [i Vjv@mkLӺ0n12Um>N?p_8qiK/Mz?4zC;V,TVjC򫯭/+ _+PhEF7[!5ڀ dHݛo#%.#+r_CPQyn( 7/ . E5:P\$N\(^Sg $y܀$gcɑݟ /kL2{9dA3,Qw֌dM2}a a̴իT!ʂِ\rn2|H15}׼q dV‚A׆  }5t؅i,L)"3,d,AszJhGY7mKAjJL=dO;1 26-g ogn2)eVOJ0g+=PL8>(a'U'?\:?v*RR0e iHҎi* IF/=X>O¢&Z!Y_N{̊ }QdҀEwr5zh+4*5X.R#b%2%7V}`_0 $H It$8ģrp=T_8CƴmPkzt\d}}G{t[|adC]S>WfƽvDw6l|dӪĞCwXӡ>OPe>[ *BkKBа{!OzXf:8]E0m&镵n>D6iހl|zҤBd _ʺ.@ ^l]ĂWLw; _}TT}B.tfMHB*ݟP~e Zm|-p>VJgmK >߾Ou w^ҸiZHyfA?v]߮څ ty Shv/ V]XuAqO_ W9|[ l0UP|Yuy}2VkLGַlxh0Ȧ}D#~De1P~xX:UuB)׸(%ez8pzEd/A&M~ 7zX"m}l9zhd9ˁw"[3vΕC6׊. ZHm,VnShoZ^O8,"A7ȔiB)g/"t.Eo6S=4[{""1V $7Dj): b~D ͳeSo&W@\熮TaB#O ӭlVL߭mLцg2Qc edկ#SY ӎڼ 9R]^HIq'ib4!;R;vYǔ96 ˗ܩe(K_kYYuKR8WAw,&VAOR( %;$T`((?"WH}adfj2ռJ"WCG&~/LVo~R7JɒVWL6 hPxX22M%)u$_sg-Hue7Mfb97$}[},2[-hldq/+MȬEKE!~#4hZ5d.#3,NԎ>yZ!HKt(f.k%N>-c^ULz<ָH4'Gҹlaφ"s6/^dťT]Χ"SԌKA'Nj7VOIcw-83Ҽ^@z[$2n9cdo7Sg!dí[yOӗF.1am,)r- )nʖwGP?r@ŻClBvW VݻSl-һhEU١Hkψ:#P 2įps}Vutp2#5O۲ x@ƨ}݃t,&ݏt?a;gvsd٪.!+lD/N iOHF}>G1Y<Ïz Z>DfNy[q, L]OW*D[,|lvOsd3q2P,z,?|>R#ţl8LP=~(uUˌ_εȐo")dX_ 9dlS:dge; .Qp5wgnȔ.Cз>C b}ADQu.( :ĻB!Y?Z#w82nӒ0diIQCduL!Sd|ߺ"sޭo7ݸS`IO5@c{W8bAÀuD8 +xSf%\N8n< 2PH#F˥ Yw i&XvxX^&=(qHT ^ `_~T!'~abzh)=+]w#ùg=E?7oY!SOEtʗLF~Zx[tpUOI7ROlWC39R4E-i{iuH?KGF72Xd ǑZKDvz<<"!,+o൑۽ "V#^;95 jk;~c5`ŠSϐo>?g.9}{LdT/ ,P!2ˡtkӧzu/=$ ]L̻kLڕu$}2I!|۠ȄQ VL݈Ot}u<2.;37H6nQHJ/! _H.nV|*92REȔ= Jd 27I( .6G& 9dr%_ 98HHsaC4M\Rv=dOS4NA)<~2.LE%4Y\Af -99 Sb[wwGn.ۑLLOm<b]s[HxhK;>9oA&.){"yj$KRgiHLo^u kOgc~zahSl?ZFgR2Ѵ7H㫄裃t3"\5QkᖷD ;NӞxdY(1--UtɈ~+&Ry$DW)_ֳwgB& *s{ZL|dCV8ֻg!YV7Lg#Əx`Sh_ ;ε AfCH]]WST$BqS;)|?d[ d7>aq9r$·o7I" Z#YdrB&INn1?f >p鰅7־7qqsâ9޺qJjRd  sFW@bqİArdnQdىȌY!ip86 2fGAԀhvށyYm02)K ꋤaB^!N)AFZ|@dV !/Ȩe/##/0X'"^H xvX]Muxڎ*qi'dʗ*H֕4{cjIE4Ј C&6ZC֚ V X_9>% XTN zٍWvt㡼մ4ʑIoFN^7Av/H֌I{NCƭ~F>%[d^KNHVZg֯}?ɀA]u!1R|02GI-s2:udNżȄwfRY-?1ZvޅG&λ>)x2?ːP%rh0gQJ~\=W uy5/{O?"_{G8`5YSoq)יޟt?rs{F/r.84~_Ag޺*G9~ 1ƁP_v_^cCc. ?~/.3Uh}SR^y⸏g{-5UE vV?/ڹ|㮴#g>Z=]="ǗX{sS唳8i߮XLo{ÙOѹ{*wًln굝8]!q\ya_vڵ&߯/q+)M^< *띎[zF_kG،+8GJWz 12[]>ZO˙^= Ï#g*rNtF]η^5uAˑ-?n 匳 Q:zϣ_M5= ^$#rN ~r ?lVݦ׆Vepů*>wS C==v;S|"N Nkz4zMOZ?t`q>a3} 1}\ l]NϜg^O'^؞kũglܗ4kq>8%.w\$MAu '*qգ\ j9Xz;_~3n)kEt~7ڧZ [5sO߽V n];U 匁O@92oM``ܥ8X/{%w=󪍃 }OMWE&rAA~UcDԟ2?  *2aNѵQΆ=s3n@qN'$;C9lb[htN6=}]\||:4 uoQ9䷮_}Ar)),,sr3>Tf% Ԟ;.aT/j.z\mx*GF' :W\= ' \9VլlVÓZBT-4 Nq  l 3]cYA6Cq҂2qՋ3`OWC>Q.dFXGhyU!367 D܀l.]-L+mgIhv 9)W oyMy{-U28 Zo~<> ]u3/Cns}ԉ`˵I.%#C쀹Ǡ +ˎA˰GseяreWws `h_{ٯ9}m15d_ ? \.xo?7ѶX-! .ꋯ+wZʼnԮfpٷ.7p^@jZ}Ǣ< BPwnF.l nOKzw0no <ǮNf.IരKB5&p#]ЦGNRnZb-wv̀GbDB[rugK|\I{zq$gLg>7#b}RB#g1ǟe ?{x?|Z/'CC^Jnžv&=FwDwKs‰o^ DH?'5ϟպ z-\$tg%`k"OC}sCE/gq ;'O.aDb_ j H>~6>>3~[y 4m̸ot/q= ]wھ>Ϻ=H۝==GǑϻWg \H퉾#czL ۃ])  o[A8gcW~z!hP~^gT/PGE?jGAY<zD|jT_P:}?#c{RO}"Vc*'͟S?LmXs=s4q?GS&jU!'O2ԾiPBr'zGۓ/OO1OZNZnf4 ۉ^Oӡ&OMn~+#ϋ'[byL?J/bDi>=hvg&a?Ek?-7_W1ꗞEEҋ?Pi?DꇖK>EO.9{La/)CZnzԟE$z$~I("777ھ$ ިL=%~5C{X~CT~,Gjz~Oz QZ_4q駩SRy>@cjssi܀~G퐒K?~ǗU<^SXt[mZӑf-O%j831oyBZX$nq/5mo/%/}zwGoz'1zL%rbMDG Gkt3FP 'kݱ&mw1C}Fg=%Qn̟5ghH/CqE&)MOZ$A;/Qf\NtZ>g~q?_F)i?W)qKX.;~W_Kr|13c~..T.Z/'Ĭ7z%>h1̇'>81HG!3N7bϰbğLuƛy&{Gr_HOzً3ەiGCP/d΋1rtzc7|˜z$rH}Mc~q|S4#AHǓz#/No7N @kXįHχt_8AIF4Jz_4ߘ? 1d\CC50'rD}?끖2ytJ}&u$ 'wfF!Zh91DvcH<@|WiE1Gaqg*=X!uty8}T>rΗq|0|ɧ WLǷTމ<1?QۋC1si;R8C~*ӞyOs/ATKɘhxh~'iH: VD;A= 3߅+ZX?N?'|<8Ү0 8R//;C~JGꅶ;./O_!4?Ӹ: &~q73^8˘a3M y;Geίu< s~9ÌxY^'֫džcu및('3>03dΓ0Mexo0όyhZ>J|:0wz1yNqs}qcb}1Df\6C1.d;q*L}aQaȍO2-yrS Gq1C#)|;t\72獱 ;3쨷u&>-?,9OȔ[8/d<#c<[ud~h!i2J*',3AʓA}&)o.ʹ#<HzK$^L?::./A3gv!yPH!KYlH%HZwwid3f$WsH?Xz6k֝P.5hE&Lbi?J_,N,gi\~;TW-H30 g3#=A4/]I\Gg:GI ]_4c4Nq L,z"?GDP,TS"G,NDŽ$L"Kҟ?? Y2YDoh{x+'bt]G\:~-;ZB{h~?%c+O`7+oi{o,ǢIyIkk~p 񧴽踙'qcc_ oB qgu+b43zo'3_9$f7sFvBRz+"|\'̫PHܙG>+$)$"bE_)&qT1ߢ:b ~,mo(AI2J/?, 8YHGO;L!)W IAIY@<NL?ʐgw9A4~%c ٧H^rA2id2O&zEh='>C%ğ8#9ݳxOʛOJ?#A,"]4NF+'i:r1-zRd:㧚L&O='\AǕxH9|O+!~AC 'KXD_H,ҞoѸ77N%!~]Zo餜8%E3OK|<^ȸ(ݯL)7Dހnq>8zL=oc:?OZ/2~k8LG夤t3O$*7&ٳ_' ?f\Nc4.24_B}8~qywrD.{G} ݞۓ#~4>cĽ `\ŌIyp_ig,7Z~\G#vKc{ho/[q1">\~4tGkqc_"͏izFcjLBڇފ~ĥ>K<$$u%Y$N%oU>H|SM*TUM柪\U$έ ('b9w0c,!QJ/#[Fr+h.'U7ML2WOZꞝxA9Gʈ_*!Pwi\D[1R?DoK:i YB/d^R!DdޮYd=CJ'zNʓN1ĥt^ޏq 4&2-Wq iAI|O-鸆_}"H1n'_dTDqP*!zZBb1+ r0 I80xƝiyhEi=d82KrVdm“3Yo"zI3HLYń4NzUN_Njt[I+߬$zXNQA$RQN22M-Myn!Y)$u!;GL'D Q(q7c~)@(_~7Ɠ?t_OA%q3={u z?Oߛ 4n={ޟ=~??J杂ɼ}@欤ϞvB/Oi\MBڟVğ3Wzi}oxܸ-|1Aq ͏'2cގoc8A3"Ki}i4 gJ@t1/?K R^Zr1+c;ccgw@ۑ3} zcIj8p|σ?o_ =~;%D?hjy%R=Sz|OciVnii8K9~OyIw+wZ<2CjϵjFۑI޻}4CU}oDzs?%m(0נD )2͟)s^'z✗|<~wJx&'< sޘggTh܅$M:OI2EO䢤8ߴ/~,D|_Γ}&zW2@3(s}OP?ߍvDR=bC@풾|o] 2f͟^!=@?=-?JwSi~m?jL;fԿtz|?Jۉqr3~b~B셎hS?\W~ܟGY?11'b|G~vD;t|N*u!wz{wɘ)uЏR=ƺ0oِcGw]1Oy9?8gcQ?JD~GarQ{zF27bzPyiL=]=IZ/&>Q0'7>W>iyThAubJIh~?`Z =y:?F(tݏJ7/T>lO߄#{^}=N ɗNz@`O4~s,c4ߓ~["~[A4D8F;c*7`t]<O35b$矫Or0CFC#q}]jWTqU>{t_1]t?{~T^臭t7P̛ӸT?hS}~3(cR~~wە&~/J~yHAg{4O%'}$_IhT'Ej[ux&[; Qe~;/~ s#X_t"?>vjWr%xi~d_ s {74"qf,/o^2։{t_Q?J^=~/1/I>Oz7NH p?X_~P{*==`1,0PGq4O:+ vB~ (}n? ?zoC(ۣcGK_~ÿxq3}5ksYɒˏkcu& h&#ޥȟJ9q.kICmrw^ sW62I>2|4thHKHQHoIO֟k1nrX[XWkxam '~'v_g}=@?ѫ<#aC=c{TrCF{JJ0NIsJ*YΫ9j.UT?zԟOH>F>@_~=@(UunC4ÆZh˴Mϰ~F%tOgǏjL#|jdS~ko '5 Eu~#zsiHnxɏNwPZC0z;ldO(5?k`9jlҋ4aǿ2 *̿{܇<OQ #v;BMÆѿ$>QSXVB~Ish2_+=?_~? Z2GLI48&qL:f w/&VwEN;s895뀦D?ahbyQEÆRg!y_axT#X I?cZF~<;Ns2#]fk;yCyi$1}08Nu$@gY/L&29LzIτޟ29Lz Ws&=EAqw?c??c? ?~OnChQOٽ c-E v_IfreP hq E N##I(N(A(I(E(M(C(K([r59Nnظ@mAPta2ԿR{/j וֹ%9!Ț1F/fD _!`:Aߠ:kf@V1M!PLDPԟŎ'>l$t6dI;ڛZ@}aCӑM6PV½x#&}znP%NXWY:k]dղP~bhUXքlX}4kjގ ,G6g _/6eCġ}ay?n#躻!lwxYw{PΕӔ6bTSHn_J]% '>_tYiコ,R1ښ~B lXOItoFe\ŚÑ+h[4`fbHEx9^u/7&8(9QZ)q\){{ѪqȖJſ|_rTdKک"l?[-.dOGhMI{:ZT}]-o#NL߁l*(, K M<KV b@|6;oXdO_RKuW؈omck 6 ӝ]}͕/[-bTz@SS,\xAאm[- m*z*м9J/|\Q*`wdl1|{%g\ *P7U vD%rٛ"aJhdS.MBSؔjhΰjMrPIJ;Oc멼s _R*qqTa9|eY#<ߟX׬N![r%C]^\{ن"P\4pA!_M[ a?WS,eJcQ&qXPo5t !:Pob83˳zJ3|1> ,hBzIri?yߦCCH 6I 4fm?ڥG,jB\neؿ-1sZ8ҿZ7y_mVǮOHoɮ0WZj6uÑ:EUٹкr8+ %(I-RSHcҶAJrr/iҠIϧF]HX;+% ·V\wb=@y; k2yw~B**,&Jts^6vv+Q9қ'iZj@W|OcSeS+[?,xNˊ+qxx}Ś-㻓Gzs5ZH$]#i^-4/X5b'~}y,F:ߎ1NC \C o_Y-<BZw 0w15;>(EeCڹ N;V]Q*:Oiu>}k'_zn`NS32e5mEz ͵p}GOqH_ OA>TK^. ~V DKWIp~G̯JOD:jDGޛb Vo&אַc΁oVq0Lbٷ#F߅m|g B7[okRg?o+Ex}%t0'Y dPS1ɺaf-6F/;;Csa# ͬeX;(? AxSIzCfgI(T4{rR߰)|]1ιJOd._wMz\vٔWc*%s^'q'/[uXwj颮`]S7n*߁$% |U!{!]Ď@"komu6H<{bVg=2U\0 $0I/cy>_!{҅RT8$4kP m983®ywH{6˚0`Pqc72uNߐVyfAzG&C!̞sѸ7A[ׁf7ߩQ :!y xa@8ϯ3"C_!C{V+}J^"}>L7:vܪg6LͫTYCЦO%OV#>w  .}A7]ym?N5݃r7'ڎ,yqV, 3"j#3|YvJ2_oPR@&[8  y"2 Ek ]!ދ9o^Ct!Px#hgd4IWVя;˗GC74(ϴݷ>%;^@wLؐ5Vk"_{=K( %>cjŔ2O WGڢElU|KJGlZu]IXJ;%Ncϖ3Dxm#dnL7ո7!vHVճFփ~" uj8dl9!q&Hv3L2# :R=g93/?5{rVSE>\)$~u;a4$Z42uD mɳ}WXVB)[ ėE.Jv}X%tMM)HOVS[ "&qrW-$e:iH/M]=gyc4'15ve,~ Rz? o<۟a#.&iPfm"G@"Z:F'2 a͜U5޿-wyCp翘7NC9n0|Ȫqtՠ 9ғʬ[tOn8{˾wVoԊKCY:ҰYۿ)Y/7c%dS΍/Hadi܂1=4;y vhzne.A؈özݻ -i!|\lTUH >l{Wxtmnqqˁw!W9_lp{\eUe!ME QlA7qdJ Ohp6geQN;/B87Mܾ{:Sd䗱=c`<&+wڟH {4([đOˡY-tX(=!I7#k)EI_d#)s{JSqq?d}͏bK-LG6}ٲ}- [wX Wψ/M^B{dַo-hS<"4 ~)e"۹KY@,pm~E@Kx>_>%VCow,axӷ% ϕ:AC6N7dݐRghE6f_CuG+<>S+z nzW81s2urhx9JEE-ly5azdSQUFU#4- ޥv#lrucKT=fh3l+4.. (aS\iя '~л_,cݪFHo)!5c}E JHoIOn#ۧs/.@z߷H8y6W߲"ۡet 'v|X^7B>^o˚v?0D\S|fCӊ#'7-Sb?ҴwQ4TYӾ2}8s^Mg:%={Oۜ%u-?&:PdHkhι;ߐ= YA,ݱ}4TLC "д+?y !,[B]| |& l\4Eox$2m} RDp2v @tݥ;jk=2!İC"#󄆵v89#,Ꮖ 慂 qJ#l. e#O ,V>#*~s{uGFdSq"}wW;g CvYU-!2l}+R/!7dl x7 :EȐ8%מ_/KSA|1cdh?\ CǞ.өz/P;1h_mWy" 4 {?&Y-ʀM衯yk7ෛ{F>HKjz Y*rG!= H@ԥsu1Tqh5nG"dMR1#.o#:\ PgۉjuRV6YlyRh5t'#K4ؼr#]E9'mz3j(,>rzdl@=d*5ڃ_x B>Pjᄂ yO (]ђ潭mlh2I0`wͽ!˧C6nY9 OYd=YA~KI~>D6I9*c7N6%}WC}V_s_= vzNS=4<^lt}'ƮޛOF6}MvٸqKjfݰ '4N["=omȦbAצ++kSueAJ}ϊ9F3xV;}TcȎy@v93l\' (A^##;>5[PB׬}7v}vľP,_ |2 Zԇf Mc$ ?OS;lZ( |Sc>-4.D6;/{g&dbi֭oZbSjy܌.',7攖SQk\'' wˑŮ-Yiu=${r_nL^lo||kE®,ndz9/]{Ƕ>Gn{ڕ]*ٟU~s=lWp/%G6خ{L1.0inJf?wPo=ѸD( R BǞp\zCf"E^V.8nlj̏lܱQI3E9޿nJ_V.W#RLrG?cs= [~תx4s`[~{ȏVvWK޾NuruHgcه%EzxWwXZH̐~5Yw& 'sNy)-MH]' n: .3(Rʸ`⠔SY<:"ǩ.(-!Pby%2@t6ydpȩ.\`'kY=Bod7d؆#K4z9pD]T'_cc7ANAJrχW3"?!u)_2m֒]HVʅՐ[EdMh<[^s6'tit`M aeH\zV~x F A K_6nf ʐ~U:?>!}'tlQ| sYs"% UA+h#CdO`?' PTF+wJvByvGD.KHxb{bΡwPUW*k!mZ]Ց ^t"CbwW!C-L SJغx (F*aEWSANB]2Af=g9萆N4_t涑wa)dp. ă C}׽_3jL!kRU!uriêŧꭦ VJ @7 h/~H*B}| uO6EkYoL'deg#&l~=y.pLvy4(P[ >y%(O_:ń5rm!vȂfqBW1^BfȂ0+'&"K^9,0"h<\dq~ gg>EB@7?"Wok&.%ZL~Ȥᳵg^\2<깱^5/%3dAsE!ƤӆD Y13C<h:P]d":rr%=^鵿t72w6#`dO7#Df\Zm̶?.Bx=2gqZsȞqE1r/d+6kAX>1 u3VƊHǝJm^o j\Ga {k/;g=,:mڎa<`vODz 7,چ0yhHgwi[?!aZ|B IgJ"/Q;:RJ2ۅ7ej>A A L7¡d~ לth{OweښS#w+!CIuڟg%qA@!1a(2TDp0wx{ 7afu\ ]'Mewa/X Kg#"~{9[n 2dl(2!WvJd({2ȤD#=;_vS]RT/`γb=al)>H`mdF}Ȃͅ!O2=DZǷĺ"K^?j)d{ bA[1Ppv0չP&r7"-ۏҜ$M} Ğ.7qO!d|e6ȤG_G&LA&OZⱒKx--dQőYi{WA,V?MYo?7+HQɒLƼ]2zSk{5ޏHEN=I0m2o)wٱғ#\RҲY R'+~@\<,7UEBJJqe'!'Z) z|/Fz_1I4PzqUdQ!xrH+pY[qecGe!d\kN44YxЁn6l7mtY dʫ֌@ƦV6KB,WvA&.}qRs8dުd܉ڥ4x /SqYGH|čB︃ }CV *+EQ’},%_>c쫾M"mfϑlcU2@g }4U{¦YE,Xwi9ҧV#3QJ%_!zq4ɪWs!f5$!_Yyd ;72pqcuJm"#CMU_@~y#[鯺DdUNFzKouk aYp(pS~:pXX2,eI;Q L> T I FF} Qv6QS:s25A g >‚:[v+ m̜/wBȕGz3 41*HoѳLB^W1g#籒}d#2L^m3@C鹭|},һS@aHϠ,>i ߤ.Rq#(tBhiVܟcU& 'pkgpCG`Q)i@![ -X0Ά͏vx/ _EO.@zO]887z KJ૱~yY; s(.v5 |殹@D=l/-HÐYpauǴ8'Hz5WBeۧoY}*lV䙊3=12`vp)y2:z+\rB)D=A~s~slS6!$"JX 8#g 2倫d@@{>!&*. z}$H~`|&iXaO*@F:? 6!ȨNJu6"Cs͑@F=3-2D7#_i2v|TDdؔXW=&rrخ& ak]'_5>Y4$L5sonNFzqzHםU\BwDKt*Dz*Qt_Ho(Lsn[zs8>UeYTq>X}5Z^K0n2nn- R+WԕgOEz4p-DG:x#G]x^fyAve۩앟 Cz8(J|'@z :.O劑ٵc*W'geު}u6t!}KEcMZ}~EոoJG4/ʪ*t DE%s dp׍c }nqG>[m^ ]JKvVJvaԏ'JESnqO==fŽ 1=e4s Ȼvg>Kn,{븠yFy#)o:O,=}zO7ޫko<$\sry_]۳OJ_:CvԴOs<mqʿZǢ>?s2v{zw5{Oy|>A! !;{Op {{qf\Gn[)g/^!d-.W,XŇی X)#.bSoj{ׯ¯SF^ci5}Lo1n+T,9ډVDK,/5<=9{[c"?oЛqE{S[S\`ƚ^_O`9ryyrZ pw9SU5eĒ+zsBɎ;9Xzu_sЯ]{MwZϽt! 8dO9[[O8o.n_zg^,C X>B/>/yͺYNwY{R7{?eSBUN;*7/isۭCя=ЌU\~/xo[罨0Yk8ӟo98bM$yѼ7? ?N:8XfuMVqA:ko(Ej%ٗO\{I/ug'RιPGKəP˳kd)T,``AkLJ{8ioD$4&,xсYB *>G z:';ST gGd1g]9pGՓ7E9XҾHRzChwjI;{2yt"HQ7tzxz0yw7X?TOQ?yz7ry#}$K3O^GO*/OTOD^ЏtwXoTs\T޻?o?9b ԿS\5A7[/X9ߪ# !Kɘr3%I墤Jw䘒sN&iSD)3_HyvlzLc??? O*gwD߱~{z@zdI*gO"?_ǡ>Zom?;0!׿i阰^h:C/H~ycFOYOLOzLSߘ(T>Nz7j_xڛ˔/VGxT9bȷXN*/}>G[z+oS=Im޴>hk*%)ii]1hDsIj:uZ|sssSE \7wڄB y:Qv}y?;pa%ۦ  y\ytrLyC9J lt r}T^D H{sP@gj^(`;YAk sXUMX `\(( *vw؊Ԧx78@>nQIq6eُFq4zI8\PI~b"|"IҀ$ZH7 *ȹ-C"[f4|xc,?O$ō0^}N|ϹQ~')z\"h$30gÑǹyDPB6я|2:;k rC/%-G =͍hOZ~=ی;ZI@Kx-Ώ#7AhE6[x6qY:"$xK(wn_ \\\7 U|Ks, -9v爥H쏊&t|~FI}4'S?zOG˅Ҩ]QS~.zM?+Q4/.DS]Po-ުc?u4,;b' -ڙLt5ɿEI"Wd%I%W-'Iu79\tikH˟x=OϝD$G_h?|`Ø$uBNf8riKm0Ƕi?B?Yz>$_Kq,$}`;t=ҮI:GSNv4_П~"vYKGH?$~6ƻ}TGR[ag*-7 bhMuʉ%Sȗ>?~K ?Yt/sЖӺZ_B$$-cH O-ᱮ|'hB|Yt#i>O@i?5_>Fk?Eu*E*?lZO4_FG*~ǡ8Z14x/I?LѯҼCI\I%%ZH|3*:AJ*G^~pxu(~PBˁS-Y \{n擒v) 7 V˳Wf/BB=~o)_$G-M gqD!;S)~e I/OˣO|[-f|ytcg>Q)q[!e6Յw|Tހ?ݲheQb@Kh=AE"Z?ETiHZ?%o-MJiSz]5n) GۣPn)AB/~ ⻘\@㣀Y>g.~Z@+]Hw |w_՟b:.q:E빘/@ zQB F{7ufL#%[DyQ/xGp ʧNji|)"Dmr6p^ܜLq_i 7mw:/z viUCPx˧O"t_H KWL>NhBڏBEt_۠s4Α4Ϧv>fTէlԟ8> 4i?evKGi\HW/'ڗOC}H1_Lf,g.3ڝO=>??YTiO)9@(Eu+3ϊcA>R28)ꗖ[BBE-y+A B?BVDۡW!7~t<P=(":N|Ji~_J; Ow+WJǓԆp1!ρMzz!1W <?Ki{q])l~/@(ut\JȥO(:\@ 4^Qu_.y4~ h߂ޔ|ډM+<FXPPqyg9ӎtgנ3TWz ?|n ՗6Rێ[gx>lC9Mg q gk|4f@dg$[ٺ.Χlq|`/u_WaQhh_S #n4/ɥ|}{ ,R?|%t'B^M{ hӼ4/qs.>:}>ܟQ4_8σt|_?@YqE1< 6[ț6ߍ]J[VGp}=~,EOwlmޗ֏7q>γ| Wh>0MR7f&;gxʧawq76w>%);>8#[~whx[+wGGYy*Kc<Lviom/y!^yWnx Ƒ!!0674^zO'[d}<D?ܮR~Oi\僟iNWȫDyIV^ʓxU^?~PO~N迬Tv:_ 6q8{ ooxU$;15/';{t={^޷6H+ws/8屷}cw p?{/sz7w_ tߛg~d14[ ﱂ}=wz^>,mG%xp=_z{rºv:9 >{{| d~ze%=QxOޯBN])NöCmUcc@yGߠ_xg ?CP\ZzqBz,Bm5a=WCzP>_N? MbK C۟9v]::%.E~ ;FՀނ~/ڣ-uJWډ]~(y?P}zx`9<~Rda7~]Chg{n 5Gy'z^\Wg??WX=`cCcHgmh\%c:~סB\ץ ySm켄訸[Ά^ (^P?+؋vC޳kY~~ ~JכA]e:g:Ov? (Wjw?l=b6{XonaQP)q`m}'p= {77l݅z^vܳ@:~_~7X 'l~Xwq7u]. avc3lc}zd~ uƥx> (;/aƓu棿Q2!b^@^߰<: zQ_7YYh4ldoXz+R[: ((mwx~Ǚl{q2NkAȟ[0ϣ~  bsQ xqv?e'Q.BCAXA'N]n+'ΐ,֕|vᗆ(S? 8/l]q`#k/;^?φ5eg,0ּ>==aYWΧ@v= fϧ`'\:7ּ|_֛I:ȣX:q';`@뷂<sGCddyDžs_QkVȳ&m?(j'C}=<qcO'Ni0ym$Q艹p vCBc\WuyfIy(k=Txwv_~&T'!n0q7uxEC4^AW1~B}Bތ"T15υ^yf/A;oQv{x w<5?laQi% /gyteܧuq8- q~uhe͛'|qL<~ЖR<GYqe b0Ef Y8{ ga9غa~ O`<-G=Im'\wQ;nn[GYv%;6(iO;?ßy)v+jt s)ac^UEiվ}W]wjվ}Wvc2"6m6-2W`l-abhn N޶فUS-v-ft-XT3ܭm!@g3 7IMY ˶hEas_w0c] ㌟.137l:lK똴_wH6cmuyΓl݃go?Y6n\HzXnmz*ʬ]j{(޳4y=k YKRu55uC 7FFMᰉ}nSpi8?@ jv'}ŽAt,t7sgȶh8 |G~觽22ZZz1ٶ; ~l똵PanFlѵf?㏂¬O{eCeԓ̡_ SB?a;؁?~Dl/+ẋ)6S Cc#H7wm44߾C GCuh416Շ|ۣ5~VUV[semi/_M"g&Fo;uT٩cvB^XLϑ⺻oHEw)jﯷfZf[X765n6w',Tk)TG{eCeF`6&lbdM5QI2"^f{PĆy/?`h/} 5BlƿolPMiq?܏$ߏo7-EK~:s?OG22 9[kapl21 HjOq2Oױ{T J@RĈp'RO?ÈO`]kRn0oDKyP O[hlqs >j9×pzg@Rwni!)Uz_ꢍ_F0&2"sHHJoO_fHk Ew3l}K_!j#kcw'gZ!E _I׽"@R'`I~,q=ʫ6nwׅݼq. !|9gؼ%皣B{WrP )x5FjeKF0Ս ֩d@Ro!ޗ|86f-÷Y V=z%5W50B-NZFWwlXO:}uC|yC {zQ+dխPE+fhkms?=\:k]ȂOUD Ur\wnu֤`fm~HRtd!֒JC%IYhhR~킐|RIV`^h0#}L ^켉;Fo,&Rc\s&X M :Yf "edB׾+ g>)Hd:t=XXp9)_&9CR6i{[Z S.BHe[vcdv~oWFjef1BKC<"FHG_8E=_5#>PZT4/9}i켠#ZO?-㔙23RħJ0Jo:&s䆧!S}„j&f:jzfy̏kKMfvY=Zp*O` fq`=?7trDNڌԦ)oĞg]0 Ӥ8V}_-X`7Ϛ?}*Fj@t-iCIcKQSySu_ HImJ@\M*U"iW6T#f0Sty7GgcS=pHU(R~ǣ=0ˮutjed{|)搸|]Ft=<G=XF8u$RþIC,t|e%fHUu~nV'#CUf"Z>9Z)-.K^-#e@[F H{ ,G_}*vl5>zWKan W!0-Y|$ߺ R`Eu[ ϺJENǻvvN`"NdTs\ˈuq ODm:34×P꽆VI4W$ul_%9TY2_ SO#Wۛ4OU׏6j2nFE@:Fnqle]?Ȧ)lt޿xi kٯ׿{}͟I:-Sy_ %X2)mZA[c[v|ψ59n6DKZ/9# )u]ac8յ>#r ψoNFz`\&l҉dZyO{i;׭4)=o5nx]<'j7օ!ꆃr+}XַW23Az+#s\]"2{|r߶SDMc ]4^pp]2XG lr|n^y#x؆*n&v/w'p!3o xw8(y(ӡQ:qEqGKGNKU!=yjCn?·{gom>Tg& ReG~z"@o}Lrv{Gsnn0dr^o2]"lx[_ON0f麗pUfzmu^8#9R|{~݅ٿmije!i&FP?Q /[!"ܼmMS˞v0%W9_Xaާb1׬g;bUUѫ9-튪/#w!I%vhog Z)F3ȫsjukbDsmr!vQ5S9tD7[%_KL9d䳰7T{:vN48=VY[vwFa~f¢CfϽ',@;v5!o 8WH:>-KOƌUٛ qHӫ8$sOAS2x;3>E'7%Ή!SWt s:cRs'Rh^ ;ފ+2;k !mS8o|trTfr/nOLy5} srV7jG3]'e{8"Þ6yHe:N?ncN°z5{MSEڕAFNY>ȌIzt7cS{/ ^I'~n&;֓郷?>}/[=pNɞwXdLu*HÓI|vUaGȌ'a}]T NB 񽲛4VV|kBjΌy,tC2)xuoe!˅FIFmsL^xu('G?T Y[?n)'lqR{gvgϚm'=, <]cR:F$c2yg3.ޚ$Πk2_c7]1T|TdRiWJDmU"_"u!Oi O'L.LffcGZ)I/y鴷}S8Ck5Xb4Ŝr9 {V)am*3yn"Esx"X5պĖxe9yICxT{bьV. ENMO`H 7&!kFt/{£Yh1=^o~Dėc&#ȵM3Â@޼ars'W'Gpؘ0$M+-K<#D%ގBk֐O}s]ԧ{BNƞJd&#5&?#kC6I^^Aǩ^$i7F"VԺi&!ƛ+NN[@*^.Iox?j$=,QhUI3Y1W">5eV.dVY(a]V fQHԳ4IqigW3[/Y9qY[Zt' &v˼@j3nsҗ"sf*9^U!& 瑲'B {#3w+ XXD2(d%zLEs }Mj?yW\|R)Q+U( %od4mM)?1¶G.I8sM?Nt9`{ FhUօNȂS;@ 87L}&R4FY278ӧ/˸N]ą쿪ȓxa%fܹcn8ivwlO;W6w]=>9-G#IlȻ=Ґ<{2DXdmt7ҥN@2"Gexm;[c#:?iDkd@ M?>>ǩv\!Bמ},&V8ݘZ;{wO; 'B "H&d鈸jO}ħ!?8 Ymv2]2YVNBٯ|,-Lh oRdrd)yX5N0QTi$% 8Z#SҴ/rU<\|Ȼb٪ >#4Vg_6YbfȲB2de[=FV@ )YbDR]~tE6ל;;+#+^XN*[+O,&V\Y%poޗqr7nȪ׎U ˓-^ZQ8/2ǚ\(Rn$zk):)+ޑش?"ў6")YFD~2Y!/(D6U=]1hjq _iݴ{=O^i՞L\O˽0TH} 9c'RP2RqJ=d%΃JbꙒl7"+{uùY=)z/#Rs T{[P0T+/|L*|`J/}qɾ S+Ů"k>ȍ:x|59.oYBjYg09Ț+"ͮylL4)Zs=VRsRpR]y+'H5Iε eRquS2>PRnv"ŎɤéslUSG~3g`J CT) o+CV(p=t V&W=D:~O"ҽ++ N$-B;O\Vm8],Xx l@~ ƍg Kׁӈ[M{[2͑bR6V"9릳-H|u!Hތ$s*, oK<}_ygY?6p0,ch BPs%>{⥘4Cz+ՓVg^z>;g!}?9t_){> Hځog!}u_]y]!=JV=@i}޷s'_n˟Z}yF}ykY>w*ktz+ =&:~!&UNԜGzˢ$?eoYCr^qW>|ƞz1.sGO|IZb9P3UvɚE/S~'1x5D|rHg7'!< #7ɖ(uxz:ؘ|%cGOJ<>rk\>xq Uux\y0Ho9+o"^B}/Ά}1H,G$~2>ZHzQ| KYfCV0h])5 Y6!}i,dT2+IBc#+tK~!e_嗯'e,'Gމ:] uꍡǦZAWO +,oyΡU\ge_T#'}<(O*8Y^@=ew41 2&5e/쾔iOYGLºL&%me{7Y1bшd| dș9RȊ'WV"6ܷ YaHyȊr7}Ot^r*eͯ]KJ7y։ԽJaz!)N{ݯ+GxcBH V3'r(2z2jǁ2Nԩّ)IY8H$B!WaONV)(dlǸARnWD}9Y[і''㐎n󛾹0L񔸜eץ7ov;5>c7ݡV&2QĽȳHB%CnfJ.B +ZtHI/Bz]Yro( Xv?pm{9MF3ISe)O!V";/riNBW| Gz=3+9w@l~D Veѝ~8 oM8X.a;33aú$͝Bc&#*]lG,ԤMQ{}OkL9 @q2*g[{i]N_hS6+D}> (f47$1&7l~ei_8G-Jpc=ky3JަH̠n .:Rǚn=.Ce@kx`DJ5+zlC,w=x*6ݚ@<3u=v-YX'o:{+u%=f٩5H%ou'>$vVn^iι='*]򭙶Qow-T9V/"? g~5Ǘ`);:>]w qԜ7q|dP?3QM뤷,2CّKHW"DLc{!]Osb &!.n&nK$HuXM;9gf"AgM_4!Sz؟x3'8dƼ״yŠ!s%6}#3o|,6o]2gu$#|IzҾVZ$vH}5/b¹u!jGdg !^pBs1Hbg##eff][S3ی'W ]شϘ#{V%VYF:v#O=D~8ɇ*'E!A'ض.沊M\<=YH\9K}qW,; IHk|l,|i17t+ħzV4ʎKoF>=2IS[+Nm$'ݵa[C+~اn;{eͭHCuD=YkRWbgbCS2ڠC]%FP5s{9^-3(η]i7kT7׽4Hq-Aވ=杉 ۧ1,ez 6vgŊ.nO\]| ls@OŸcOL=J7җOf3TNpKҵI ӑ&w!F?i2rԧ["M0uqRiysq\7X&>昛5e/ 52ˠ/Q lؾ#sj}K 3rwn,})2g٨[^$#wKIzT$z/3HdP<}2GzR~1/{o'dD|7L@wsn>#Vu(s]C*()Qk_$"g/ޅ6_`Kw>wYqj-fa+MIZ s2afmxw/܈x>HSM:o qjͅhkYsZy}wwI>v+U)N.̖nB:?ݾmk]յnÑ!]er1DǓJ\g_x=g;,m `sia0Fa+N[t}ѠZ#UHe޸t+6y!)w*!ѯqevRR"];}p[T3΅a5sꇄqj~FvGueݸ"{ҍoΰӦ[̩^뉻FYcF-$&L5$kңҾ&+Zɗ~ulH %/EW;0e#K}磖#?2T5"=¦٭@|׈ &z3|$yO44p+oˬgfBz|)xm}?qХ+H1 ut,lB"ݤ܈zp{_~ݞ,_#Ъ72RI! kK吶3iVU&m/93eh׏3}C6-w[,torܺϚwN_5}/ >_|uxo> Vm>*Ukx-?'=_!_,sĶ&u_WǍ޿jz[K|>^;b}.Tǧh P?k+ui;K.Gխ%Kv[?gw:Fm~j6?]sbvǭg\yûqoEctڿsݘ6p(7m;V_'*sȝ tصg<_;wĦۮoyG3x;/pOѣWMm?jNmNGoȵyw?3iwdCD;ݮN[]_j'=.:m i;߿[o>c0fXN*6td({a+N`eᯢjQf/{+ÞT]f-K+B#Lw/S:,gݚ,HKdIFMsHɐ'D>?'Io?s "yx"//Z=>7MTF%}L\_êq7/ǯZ*!g$rN$oʞt٤rgFR1}ɫq"hgU / NHkn}:k]*$Ern7Fm&E΃3xXy.>fdVAϻFJ#:\vVW6=fg<-=݊]1'k=N/ՙޞ mZNr`X%~v(>:\[.F}cg3^oUbUđsFbW[Yr .{s+Ȼwf.!6΍,70K^+T{_\Hh}ŗ 煅:wqL~8ʷpr.\|={ Y|*i>7orz<P_P'$gKޒUH<.@>'D9t yrxygGZi@ڍLGk=RsqY3]-`"AbvyNo(.q?}F_?}ޗ[*vOy^5#w zb !ewQh'<-aa Q'ީ}F]mZ.<'}.{OḗҼW;;`C=:%I/~vplLۿcv=W+Zl>PO _le_l{8cp+ %U_/w$%W-yA1G[X~: ?>p5+l]co.R}K7;dp_ ׁN6?p|Wwđ@7[iy}߂)-=]F;!?8ƃwݩX_,}~ο@PF;i}:RFyo;W y _a؉:b ^zni${t$qkZnDi}$4׶-@;-cH"Dn# $$HlyuI$IljIԾĖ" ~si\FJRo ;м/%ch $I^K$-D%I*mjZZ^@R[_RKiP?$㊐$~ ţ;[-5.jy'^~RIJWԹH"pn!"iD4~^F©v7K|u'M_YW_`kS ~BIHdaćƽ;og=|zӥ9'_5_Vxz|qD/n`Ø$ΚcgqH,{ImW -iF iq$wXoy$3h%mq $_{u#~ͥD;mӫv(_4|}v2LbZ*‡zygLm#1Ebs|*mqtosO}D"hs57B M8O4jg }X:nmqIG,؟FTz~ }N/]tK'Tcsq^ ?#[XD{b]cꖭ^iR}`56v7 ~F?AZN\ב8Q?GG#Fնiq~$x+s?S}BsGo;Zu#+PZ`G5;R{_ DzN'~:erSG&<σTRhxJnڰVAoyų妔:qۜ@3f Ec>2oI!w؎H/ۛFhJu7{ _ Xw|G?~ONB hqI$I`Q/y;)4^ea#l{5gχx=xW< h>X[$W@ЭaIxwPqUѥ7JQ#h~qv}H?~u[>4o+,~^Db/, 728'TdR]ͦq[Lux،ϢH>N#,clo|3r.m_3ZTshKͥys.'(ˣ]@7r-K:'_:Տusu g^3~7wA2?Q}+~U@'yb@z]>\OQϥS4/Cy` mB|l<_mB;^G&5h^[wV#-J[ҽT'2iÙԟiO0^if@=QȤy8cyt>kGxΡɥ媷ڧ3~AgBqK[k_`(Y:id(@x3,duu_i^MCgP=z}(1ANrm|oӸ.zqQ@?G(vv* <΋@sA?UDu?| 6۟IƼu_69ȵ&E>< 1Q.C3~@|RZ若_.RWM6XB'޸S!xsOU}vRx>Nmggot i38_J8g\{|! /_Jhzqq z;@AGѴ[NkVտgxsE/0[ӥn{Lt/_sZЪ0mm/xnt tdvCqSk]h><-r*)3h0:> !E4~99t-Y6w8 ]H=rv) > 'R]s@_/ C}nc?E'K鸪γCQH깈jr?+̳`ި^& vȣ_'8΋b90w-[e ~OmJ֪g߲r{K$%tOyt`ჾsu"nk|9sC<5i{m,乎w|bȃKUw*JJJazDiXD dO[o *TOnm3G~_r:V~@؏N>bmaT8b1gfv~׿y6Gӑ(_%̋y_jyڗ՜RV]q bx@?a| a' ǩ?[0fClz*s/m_z'ہGGS+>fЍ?Iۚ'7c$䕐o#CwϠ\ h Qg?mo(o<;㻿I>Qx/ ^(E6{t>| ߷r`x ck:Lz\2n6kz}6x_E}=7t==dձ~/ {+9=O=p|ώ3xd_ہ?º >^=jGWp? x%HXW֓nLkExG;az?ޣe? ~wdg/6sOe~?޳Š3XÁ~ º]ߟY|7h?Ԩ#}_/ޯ8:%wOЮ3=^=q ZW·r^AG`89Bq;@e.^Qi䡐úYu0z5k}%Q엚W⟽3 Ov qA~(x:%KO z:Jc'`B?`7G0^|1{v귱 Mx.\׫@PϠ7v=rCGP7z'z :HX7l~~>Qvֳy [Gi:JP﨣4/<c3{]$WY![o:nw>A?$x"p5':n롳dB 9J̟X.{]avWQe3 {\f~A0^Xc<:JU/8KGBgygXǛ㺭yc+Q\gkKQz_m׳C{@C |/q=kyv>Eb(x[3:66ud_ۚvQ0?aCy;C ]`(Kk{yt`'̣=5;gӶ6=QV?t<'y;]wBigP 㐷wٿCb167mO~ڥKivKjt s)ac^UEeJ]wjվ}W]w&Qi;Lt- m`kCtKp?=lTn.0ۦknaҧnm :mIjʪ`l_E,Zrgtfce[X_ǤeeCm~6ud<;1ߴPPDwwƦF;׳rm֫WQfR|(޳F{&Ycxς]p;pٯj?o1 74 5 o MM0sG7K4!555_hP?-% cLX=CE;$f'?#D?eBԫA ͶmTcXǬu2ws6Dd7{`_f}l/*,hd\XϝB Tx#^f{\ă(d>L ZAټk:f8lgBC󦁎>\狆Un9}ప +nÇO{eeh9S70xSީmN3ص 22p`}ݵx@u.K@WKuxM-0Ӳ03Bg!w;e_K٠:l/*05ac%kM؆g2%6D 6p΋1C{K"f6|glbO˨~$~|o)Z әo2%(`onacuO:f͕1R:ʆZ*ck.-5aܹt# 122%NӦMղm[Zv=?ZߺhĔ!}[|Řm 77;=+OWձm;~fMZmWoc?3y֟4D76he'Ni91 wIF۸ϡc\ZʪjjGkjjQjٲFT_](څOB!2OAe"d:587fZ .o٥ѺC%Jexs(NF5O֯]XRR ewޔ))QTF9RRRRRrngvGu+;OpЂ3v,vX"Yu3+-'%# J1m;ɭ#YɽFڙ;rݡ,M*VJoPvd{ϐ&Hoeڴ~9E?* 'Y+tWY-F os֓y2. RgEWcIİDRhRyZ3 s^aPnl $M|H=–.c$.Ĉ=/(HuzK~?mWfν:$!w*qZ1ec4;}ۑK18}5ļ|L;zȾk"¥̒E䜘: ]G$WA<7| 3;[,m-^]?n|c#w#{z+vaLo%H_ڐ~)Õi= ?# N۩H5]_l ՇrCAo:j(#SK&53vגF*"̐aF=1',ߓx RF~ORUe32.ίZW9x!#$d9L{#+;[?*#-Y1#gϷK(O$Pfx>b>K,#>\Zt3iDsiףH_xuᛞ|aDYu H39|7 YJg/* desmACcMCS#wA;psD7h?3hڰڌ¥[~gJg֟ͪSB f<{'}0O!af|`g#)hlA$GdwlwkdvGQ W?lAfL=Z62:~w<1$R4J)`%8DR$Mto{26e[dhӺ݃5'#e+ߍU2}z$sn|zdyN0_\*{l$[siª$w;)nIao cqDIn\E'tCؾIyw] .o&Rf׈ 5''ngBvr|VFaL̚4.;d\ZXA>~ʌK]=3U֝˙vj߾~cw}N?+~9a2M VnݕiPm--u,xhbQљM{*/˭q{nf426u>X}qqQ)LSLC)ƳL:iշ]iɊ=iû׿ÃIu"uqn{05]te?5@v#~oFb.#ξczWݱ/R2O[vi0"[Ȉ_ Maz\:ef#^@;? 4䧈==sDl}̌ !r˄Bf൚,rA;쯢+ԊFi7˳cD T.rt3 lggȲt)w>3M[2ڹm{=fHΪ6cQ$3HKZ{їBN,bv35)ALBa6ӣ|_t_H1jrN1¯Mgas]5eymDMӈKg>Ly`;Yuv+1Ce z}^{9'2A{`jQ =uy9Klݮz Q5H >2g3_s~5s{H3FNz/~s-3xK u(fR:&)hlz=jby2~l󠑗W*or[۾i<NjkճY'ulˎߑ9Ⱦ &6+}8-kz:R6aJ4ˏ*]#o 9^$&?lRnP;2˺&+m, )1ys\nЌ3ks1]~o5x^sbɆLyNF qč܈w#D;{e?} "O1 _51|IՆ?Q=m#숱iq΅7oṮXzfm)q6LtOt' `:t*6D䮇=ɰy% +E̪C skVVFcJCy♑x=VvytگId#mfdmI[M#"; !fW:#oH\R\;Wqoj} cwҙY۶K#>tܝXg[9lڒqoKEcKrCkn#Vґ{xo b=6qէRO4֘ț&ܼ\DE70q0mHd6 ;pnU=K\ra əJN+->JdKdgY]=qqب`'n:g#pf`221j;ցwHΚ~$6:D*}a܏T4sƾ'̰Q2UJ$V_HY_^Z}S1qn˙.ӥBϯC;k>),gWgR홍;^_):D651f.ZrT`9LfFGI2iR5w>3gY 7Z3e(:ʬ>q4 fvΡ,CK-0iדEkmȼ. Gl=W|ڀ]Ȣ#=;]LϟTOj@jU}/#|zIfiPoztb;+vj.fxc0&Q3WW(`5=RPYE/sEkX )cj#x -c$陥OReTȗᏽuc3:of4m8kG9nD-[y>3X/aN0bO#q@RUƕ|O2١mt{W )7=o/)3;m1?wn N[B") GRXǤ}uv$iwLxty$71VCD[s%ojb+[5z6miE u3j^yE])?R[ss&"t-|CLde{4`c_CL:iݚojs"/Ж vA'܃ KmZ097[b_]IdUsIL#!_= LJ@E^fqJƸ`s?Re1`R?޾:qF1 k>Oa6d :vFONX2o ^r# %WF|T<8{! 8m;3v2<^1#Dn5f!/Z}p#9פa<30Lmf儀Ռ)Hz,UmRGCמ#ڌV*#HzNJ@]_zU=9ɥ@RDуd i  ʀZH_'S3£Tg:~r?qcih)+Y"y.=T!8bP{;R݆wa:!0#Gmuنcmt㛳wz!Looivo/:=^d 4ҪX )קuN=q >K?H˻9{E ۲w$v(K#<}kǚNs]\e<ߊJ[ѻ-|rV0mggߵ1ۡˏ{os\Ō on}orsx{bgL&m;/,Xw1?x.geD2 sQR΁k0Ozu'wfIm] ّ* O}4=ڮǤ." /zs/I5A=K{\_-}cȅsv]61Sy[]%KH N>!f:2ߎ7KNO`Ls\fKAFlwY5Ҁ[ŕ4~jR "n,u|#dI@K8Hإji$#ϙ$?Yr0PR.QzD;uYlxbԙȊ:FEh}GVYx#{^)R0{s[IdCQ9瓯o Ҟ ecfTi"8"Ygg;=n^#\nVVV&eL,R Uô6t9f\6h@Dd]|n d%1YgH33rC2b HY9hhǤaӗOOPB6<_ճvoD&H4xUK@Fҭ˓̐ClL}gYBeo+! +ܛ"%IK#G@VdYw<띳tU%|{IU7Xf_CV+/oYcp뷯"ߗxنT%ORuu"R>i9Rn28y*nmR;i”uȚW:|kDV3?zǣCk H닲3dBU\^dux\f! 24n\?RR(TE_۰ YD{)뎄jdU3G߰J Y2H}qe0үW/zx S$@Kx1Qmo@v}Q ec@B0.^?9IrιI ݿ>K{l8kfkS?t$^3O"3x: 8f?񘟻A=RރEH!Ae*ĉ7|' gpA%1d2{"Z6w$޾Ǹfm&s ]UqDO#CqK!Kv"7#3N!KS\e>I0ª(%di8埢%ٟ'EݶsF'RfWC7vaGDnmQd:*Sun!eEzs&!]vj!d;R0Wq zՉUj'6Hv }OrSsIȐUeT+&g/;4Ho7Kd؍l(rȹ~Qdz,Vn4׼q, izz>ܾK)0YM ͅ&ř.&cwiW|!F{6v&e5;"7v35-dY ̐Uat5 qX@'j89Z%YzG2|yg '3}53**V2TݩS[B*}!3T'N哛v*fH i|o5r_fJEVt 5`u 5pW>wLGmUېa\d}3)Wrp'ە|-<x=*kHRi QRkB:R|fG>Ѓb{#sG!˟dȽ!+L=Yhɓ!"~j /Fc\bsaRҥK1RjՀ/q>ĸ_*wk臬*n*P(;8RB5.jflBUkpY0G`dUÀ+y-6%dHw$߭Cu48띔GV jh~&(7Ϻ{xz?HR;G^?`ㅯK/ނt^| ~OIicea#N.L ye{]<+J)LM |69['Ȫ_??sUpa҉ s gu{Br=N}+tS+[_!'9C/:Mg21'~ 'igEB_b4 (Bz'5p^Mq;R:ژM #'d;!c3?L5 (B l(8.h&d|Q͗9$a!$ba~w<% gE?WWAςېCD ܔO!=wZ.>`iīGƊs@H~MHsoAz2sx,2' !O;hpK^WXyx[8$̑qwsk|)gy2 AL#H߻U @ϮZJ΋/T__N|}mFEX-mwI3~NCG~%~[_kYJ'#sE0a9!Ȉ}rrS$,Ssmbu;{mZߕ|xI8 [M IhAwы'{EBd+~dй$h}2e{v@F<ͺ_mKjEjkA I8,AboO}kE IRo#HTS+ m+ˎJ\ntîa@4TD@3 c:xUl'cJn;@0$Gd'_ {q^I:΋nIHjI~K^9o iW3ca  4>ZH^ 8[%v -MʧBPsNsed-vѫ"v; {D42Ή:h+' ܦ`y'^m}Q# y6~|Hwsoe9ne݃W~y|5|LNdХ1f瑻^}0Ӊ{+:rUmID0L6'Hxǘ$$_x9oQr- 0d})̦@9M6# V3eo$:xbx \۾Z]/ۆא5'h$$r^=얤ybr+cCGվG:/|D Qm j:9,  |"|?2|:Td1ޙY =qz-WdY&dzSE$w.7ݺ$kӬyEy\ q@WD^sɵ_j_!_pF>.م\H2"ͶHkz}̑". vI]` +;_Tl> v9j;& V&yƎ$2'EFhjc =^,2)Ʋ{E,H1ĬHFT9HD1'YL3 3H*$ÒQT[Vg^sgLTWWU3GYWts3ُk#fXdܼ;1޷}YC_QO;csIT~(I,tXsqˬr8㈺$1nW.Xy$I.$) ѹ{D q5*Fo ݌)[YCb{vl$nuN~C&v=%i߳|j\/N2Gܑn$/wm++;ިɧdVs"b\Udσ$.=aԑԁ]SV^+ѣvL@96 mıx#aeqKֽnMɾ"yS4p2zE z)䟘\W~\DwOMܷp˂Ϳ#fP"PoPѤraJ5Mn\xy~(po;aO< sĐ!"84uYON1b4ȦEﺉ JVan z }11{}HcT=3`UlYp?P|#A e8x$sPb0#H`ҾՎսێo~'w1>iaQҶ2[5L}HХ+e`1IpA@j)bx_^C1e xl6Ĩ Zw=-:aHMxkDyH|G"sq ◬F+A ;.Ƭ=פ$$/~}vq"cr>)]n}M؎vyV+Nq NVO0A>TmkJ|؊58]#_YK }^} ?du`Ġ!{D"3s|'ب#>{In@^{KjhWwD_?;f^ L&^=O;iW@W?=I.| he;@p% 6I: =wFoD x%S;{˪,)賠¨Snފ >! V_s| .9cg$HcEʷFtNʪLoN|q/;Ip$A V xԵħG}N(o݀6-d=}}.3\N~ w4mM\$`LiU돑@q+g?86O6?Fl("-$P_jC_1Le|}yj+3MW;mp;"ܔ8Hn":W+lb\2&4o{{ˎ\e0J]) IUo$Wv}^;r3+ .*?E}˦/KV[W䷹=԰5+.vsKSm9+ :=ia,Xyy`}V9r;ߖt Ge.pEˇlWHBEE|~/9|BѠ>/LXj ʞIXڱJ|*T:Yz TI )X8Z/Ip;vUS/g1~[Πv2R&]ˎ{9\mC}O n^rr${|' ,w*Pp;xF{׈D|cow}Ϗ*z @c X5JCS3ΆG7(_UsO-WKjZ֒upNuTZ[r6}So);6 a1{"~_·KlcKw !fTym%$YwޫԚI(i>",4ʇ6wx{eoa>,sc6)S9DRwIu$\>䔉 мE}otBwp,ϒ„qeHN"(gڄy|X:X'fhRlAv|XDߒxRYVӷɕyd !R7kRo市7RO/R:u^~YF*D?>UHj+W9·uʧҧ: OC˲S^yծɦa|X8|iۦY¤6q@ '3od(sh%]zVYzzA|,5xV.3]7w>Դ_}[3/F63$r)fңU˘j/{vdV]!m)ϴ#Ka&ssٸv!t $CRa^ڔαƓ)ΒGCﳬn|zդH{6qn a9ct.)+i8 Τ2?+uɿ>М?,jdM\G 8SΏ} ~T7WǏݖ}~.);.L>b){!fʚATs۰}*bB>T8.X!>di'^ms$Le!Xthp OqsB'ĹuM" [`+>$-+j=bm#&c vv CwtWIIgR'3ni< URuCrQ1o/ΙEBe3j$TA.n1o$ U N-ggM"λs5!\WCw&5֒W͗M#NyŠN4+SD{]rKs7H\Pk'AWsE'SEnZDWPcϜM3ǐWNmO j][EG !ϸ>̐9ihB Z+}_v㾰hwVēfD|؛*Vhn,!빖0 }Rq97@֯ ~Ax)W UHO (d$+ /^_NoS y M~!.$qMb|Ls lp$trkW1~Y^劬t!ʇzronle{X73' |l~}whGzqO7wN5g@i)W_%LX.6~%ڧlπG/vz]5&[wAV@OBBC?]C=A^?A~*'=xN#xvtBt9aVQ qM={MK8cv ߃nCv~.qm.1j~BY I@Sܗ>Ǎg!xk 3;q jh ˑW^{ӧ[;I4~<=]i8~H2'/;:q164ϵC}Ko-_UXLbIM~$Q]֢5$Y$ W.2:IjtH mG2TT@y*fjM^CR$dr$R{K4UJeH\!Ih$S{JmVF74|mU!fX0ھ𤦎OiNMP@dPO4=}sƓXI(|ĹI~sr'IʜeU.$%F$ebKLHWrò5$Mw^*lV߬r+<AB}t:ܽ ēڡ 7\!`H(GиHY07Xt/Ems7jVE>Mj{g2 ]cHEI+ЉDMIBAɍrHbnF㑦ф 9ԟ@?3_gB =x7_.ԋv-wH# }σ$t9k|4$E7{M59k״=ި72Dg\$BG˨~ g}oRR5uѩ;6$E (S*IUJAI'3luH'RF˨}5RJʛٔorh;r9 9 '%F>JJ{02{~z$o;fjGJ9z.-&?&ugxzuURB{OwrS>8 *oL_2i'Q}BM\ӨfP=eQR?,d<5gY_s(_{7 ;RD2h{vWrhSyO>O^r4KH|6ʅvL)Q?̄8M氵m&J2?׹=#:o[_O2x=k+汅40yԀdj$׽N\nldѸIti|zOڗOJhYN,6SCBʟEh?˨S>/~/XHvr"tx{3 e$踢yl(<;/q.?chG-Y676{L$EGrbRwn|޹ ix*f ǥDIpR]l2ѷ"e4_h>ZF S)BŴh~ȣ\?%/ir('MM!-'lڿ94(T[apּ)nNu<5+}Xwm&s-q$߸ǜeY<\|d^=z AyӠWhM>RkLY&bq$}[?Ԏ h8e`);j94qF_ .r qH'ʩ?qR-yO1b EԎrCĜqo\GbZ_/#I|j'h\|Oy&5iT%"h_phF8 u~+hpW~<+l˹D*|M>W~L*hPYA94zB~ǡv%+};:*̷i -eh bε5<]IIJpҫWNuӌ7S6OpLN5ϳf jeԎGP9W!C xM {./i˥\9oԞ(CAy?:?QJ}8c#84II*N(vy)?~27_ =i9ͻ+% "!|[@8jXRJ0nYq/l!)vTGz)i[RFiWN)Nh 4-nIQ <iB 7"R{: z?-R \$3Mz7Fڅ7ByG y#r:ReeMǫ|WDh?o~`OvR?ApFw_ڹ]rB|MWyq}F*^1~!Κ;/T 櫊h- )!ʻ0QBa~ wt~ ?)_ /A?{Ayıo~+Wv'<ǔ`0D gVQq-a8D{q˾#3|Qŵ/lF.q'䃐זP~@}^~'+ @KmnC#/W\sp4:/I0cՇ{#ǣ0.<y4;]RB?* '"䕂zp?|PNK:~ >ίq\:#r oC:nNJ Sq Ϳ`^J 7Dʯ%4_x":5ίcx/ rs{NS%/<ש _PDq`܁4,5+Ir -NQ&YO8W?>v4 M{6E|Gy$ּt)r: |`~ Cd(7{~8 ,Ex'waIyEq|A7|!BЎ ;a=Az߯H`;;y{~/~}A~/_eo|ןR?>X<_Ox7M:fŔ {A/bo)|a9N+i?r> y0Ga\q=_y_SSȣ.}ID!O4j~o<̊Ӭ rţp>0CdΛ O +XBFT؎כnϡVM\ auO~-Xg-P?y|Lt χuqַ>T.\M utou>;a} NѕKu;1`adhk:PZ\[qt<zX >u^JN_\M} yWXz`=8wuBnG#z:@\ N]W,g ǰ?AN\L{f47āVcM%H3zAz}(?7U#ϛZ :L[:8. v !q? ~=B^ 0Q?$Ag)$Hic޸̓þM=yuЏe'^BN, eނ*_?A?xi N yh@9# Åup+֍:kX_C?tM`] { \Lׇ `/h= 'Lz@0%EwM_)x~=Ñ77zpXg}O?a] xқzh:OO^h?_ցS4?_;-hZ1a<~ O~C4~b#g5i_?:Ou=l 1QN;// 9}l֛|a_X |xևӰ>6ߣ4ޤb\gp_"Ϧ(\5< ϠbH ]'w$ p0#7tjvloyk#V+.S`Q:_ a Е|g8+[>hǔ ΃ܯ<(~vP(#v ϳ17 Rg}ܞ5t wy3Q:No7=uR}@B>:v yYQ[د'z'w9 Op'~O|*q=m4>xx}B!uЯ?+_(7OH~ q;u8׃+l㻬a #̛)xE.( إ³K:.y M̛> ~y=2'S=x )߄qwz%d/p'v~#KqQOp;?B=7S=b8"|7M+G+e`Yy(ίY0yJ,>})0Ѽg_}e0gD BAn>®?@8?Jy}~_qG?Q ~@qΓ橠r6ܠvDy;<>WK?@?͋'e^3O'}?< < +/8Ndogwh=ԮpŸIȣ)̚?ʚyqطMO uѴ#6{h`q{~/d/`{`妙d QQsjo0? z{q xw#+'QwXH?gw=MAda[6B{ANQ>=B^!-{qaވ-; v{ÜcaLjsx((YNȣiBeA>zyQȣyxm`-{^OllˡxTHT ۚjo] /NIT˩S-ZNj9rT˩S-SB7㐱۷lz&pd-T $O ]wmգG蘙uXtSS^Dho t@9?n:2T,i˺+~XOp6ktXO{KDkL6x_(v3xUm&\ocEgOr=FzNJ Vz˱N)jj,UcRJW6p'Vӟ'- ¿l ] {$},F\o0N£a`hj^GU^n2=Cp[٧MuTg] Y~>ӯ`,鲳3ޖ:[iԉo%N9MM9?52;pU ϕX oBJxwJ*H?O6iߥǞ;~ݿǞ;~'Z?mHPK35Ubb Mb6a j+!6* 8$<3鼎4Ӄڪ4y 7M22CNf.U%9EqTTT+ʿ[_ݵE Q_" yG) l\R¿x>;ɫ6ͧy':7ذ&PЁW0避X?a!;QFE))P80#((GQEEJRT-r"gkr Q.缰{8"6 ][CtRK Ǎ8rTl-)oT~ V,|j+X=!ƁT_uI iv.\0TZ|VIRlZwܕKPjۊ6+]ۿzq|lt<#Ty:{/bE[ %3zzJݻ߀Yb+1U>kR~HuҏRѽڿ@*8MF.3^T-~STKb(gGBO.w'mHAjKn Bl9%5TYs>-zGOQĪYNJV<$U/b^Ct>XgRYwsxRpXcR[u,aFzԐuӭOxuf6 N}(ͻUo21"ޢcwz0"y{_[δڼx$HI;OXgm"yz>4;N:ۥojR>B2.Vѓ5zģ|j_/ !ս$Y&+ӕV8DӗjW|%Ձv>sNtĚ]>o@j1GN'%GxNBz .qoDj6\Hk761a m8+# v6<Rtq[Bg2B"7g=懎?z|5sZ0#$!{rM؝}m?3,irHu k [єgbHGߑb'y`#%TtE~;:\eԿ~tn%UCW&UOJ\Jyh]Ru᫳d]W<<+&YɍS{m@]f9]}ƮT]k&9ɒ*ڹx[9&tM, 5fcpڻ!¡ոn\V9rb1ts}zRZ=gR9?RR.R>{ޝ<9Ws4K"֞:+N*qYRck{[Uj,Z\6 T>g7}M ܸ{`f]!V>rs/7BCTiXp&5]jnu*FX 2jrďf9_#հL9"֮3ڗu=x"}+Ry~bbq{R?^Me3R׮"qrp3ESv$Y_ ֻʩwF<%Ş2Egx e#9vU8\qj r;!Ib]?o849MҽvUJF_gy WHi+KRu96"͗nwYh5~CB;rΫ&uƄ^49lrWҠHNkenCjtl3|&gNűkG}c׷椪;TN$vIL;&ͷ/(ʈK/O#'lG>?G_y-n!ڊ+g>k8zuOΟ_EðxR; קQ7sKQw_[7+^n?yȇk=+%\/Aj4is/_Ѣv ]U"Ⱥ˯r֭5lÙԭ`isW'5 Q]Bm;ۧhH}R%6g[Ώo˗}E>z__?Ijk]oUK"mbk7F쎗Z;A1 R%ǭWƣ?|Btg=Fݓ8lM qvtm &yHoC"_}Aċw\(H.4`c)mw 9m;MlvtyPIpʓzC/0%e|3JPg^Ft,=K Rm:S׉_}}>\^HoNx_H܌Y줾P[ĜGbFʰҫIٱn5'+g '#kI>^TrFD-b`Î.}#lSHļգ=E ;wWqHhgY#.p^y}E& δXvsHĪNsRNءIG>bˎ,Ħ$N;!퟈4F+X:Q,D4.=)_ 1Ri֕+ϼIć& /N@ ИUE*$qN>ƾo!޿< L}Q:HwMzXk$e[ ý '#2׿RNQrKo#F@QyiR+[=XOĚ;5&xuCMR|1Ru!:m>}$UoC|gJ$?⤺ڡ2nnzKjetp4?xo83>< M)&:u^ |ѸOY{*bVpE$hi$NAqB8۱x$|W$ژ_HuQI$ҭ:$gm7I^)or>ejDdUj2I4}Tt"} yC %3.81^tHԹ5CҬ@z;uH vY64DȺNhyDϟqP=0)j;Gng3yil1&wFwn%ZǤ ퟉"$0uOz?JEGUm4v_?wcR|K5ݾr]kVl^<^,wT+6a&~IlS~8.7|s8vfkO PLkCg#^_ʻigGuIWICY2F>'uWC 5b|۠x,4WC#y19KB:o3n;.| nc1n+bRJR7$s]˚ˡq[-"~7Ru4Gďa63?SC2JÂK?ٷ2uiv0Mnhcne~|uw6T,ubwxzFh Ryr]۹%r*n=%É'UNK%U]nLqJu"\׎DیZobei2[߄u@)+X}ŞC'#V000=;OA+;k=]Tp~I]o=fW 0Ms蔢jQnQ¯DmEt,jO]{ѭ~cDG/G!cČ&!:v~kK+DYI!n=lV_ nB[^jE#>M2jY>'yTv +s@ }Wf]3xVGe"ѻwC#VC,z$Ԛۚϼ)Un Ӟ(ý7)D |U rpŅ#*yغa-MΨAcoC@}ZyW$@ aduٓЄ}^ZN :AZV4U6btGfD뙟W[#+mj=ݰRda%Di2E =F +آOnO| 5<;vcO J=L"y~^cD7?!j/#Z~ Vr:sx*MM/( -Gkm [oz>*K<\iSXFbڌÈ;woxX&s Q +Y>{+87n{'Cbe.BF0g+.8{ϋ SX~K-N=_9V VbNB#Vmz;XSh3&i!U깗< IlAHU=kGɒrmaěXϫ5XH꣛/Fv@9$%bX>Bд5{o?+ӘFRm0llWRݫow.2ZC${GS\/Ի"6|6'5ϜwF!66ӮE6dZ:Pj) Ő4|0?vd=K_u3;zDZ!6zZ=I*d]<4+ہ=m"Yyhx' l.;%3YDʺ\r(wg uX%63y8i=Rxx{V<\"z4X3dĪF_z~$zcn>p R5E*Rv ippH{#񰕃֡Ջ*l'XlZ]}VX1 שO7 V__d:"h_kdíH@WѤzV6a5R-C/}܇ ~ounnzԽGtn}<ݾH搊Vy}/ z$ p1▾!ծTI{uxNE'[u?Zh7Nۛ9mj-sCk7QUJ W"3=^sL@Ϻ{Coo}}g8bǘr$bOeӢوA=-ޭ|0Āzwf!i9z1 ڈ> N?h2JĀw',uC 9$wl+(K.nEb·.en{~Ę]K!&F$b'{$!K{G1V|ֈz=[~$fdߒM̊\0xM7bY@|Ko+݊9wPROO9/=_/A;gDe+>Dm4X~5{NSm!>6_9믉礈|16k7S{aşN桟AjnpGr4b g8D Ȏ?'cOlw3b[ҭZ%I5I+lHeyٯy;f^D#^i#D>B93-Z]L[n텉=̿ک{^ֆ#EN0yPx=`rpϠ! \?Xu:qwȥnsc=ێq} Q31X56;`55Ð>f^6oC]7>0{bU07^S]ȺmnnTgEֈ"9~#ⵐkSbVkN`` SrRZ +d {VA@ˠ~66ofw|Dˏ&1U߿\VJ^d+g]-!:Y_o_^庿@ <ѹ4Z\Đ!mTn$NG $/gi̊$=χir#I1e97Q@|Q8;/.oۣ'Q%+F{L!]V_?> Kq9L?]x=4 A 9q]sb|]]"&;'.dNuOxީ)="sICH7ϵkOrC>~I4d9o-}UBxEG{>u)K1țsvԆ fe.3DM$}sԔ=IwrUI׹DlI/rqX)\N03W":3CtKX򴭆ӡob}}vDtV-ɁvwxV*?yĿY瀕䈹ȣ^[Z s [hjU,ɂ[]p@drIv٥~#,T$C ,c^hin~W"=<@k!CbwjV9#:9} kL| 1pgT>,DZB q|gONO%'5 %S4G&VR7*%>ev :Jm}w#/ C^~;uEax\%xõG-7MJHځBO&p׺eSݲ̺ Sc˓3nڟxw1[jV{e*bN3+yiS6BHhWi7A2|WL ^ul卢gԤ)z$]u 9 w1a ^1)b$#Ə[~ 1ЮBۺ &۾P>zrٮ$S1$@eoRlG<'inǍm1ΊΞ+]VCtI_=\D13mFtRog:8#Ag=\_L#z-oI56nw-&#}r|𞯟QI sqa[5qK8pOڊd ݝGM B.5cW z,ghD}@) ѭGwqDOτ]kgW\pkIW+]]tGxG~du߷za=c8DƽUz/|4+6##Ηv=A-L==3Ǣ o{p."⎏#y}gea?n׮_]*WGܽGBtVtI}jSgGh w;ѩ}Y D n#:>18ѽ8FCfKq8ҝ+֮zuio91aj\32qJΈ^ ϫ~;}Fa՛5jo*EL {鯵}<7LJ%޹%8m EIz$xiRg4Kzg2ksM}ZiDo ;t됯GzhGD "S>Ӫn&Qq%GħFҸWw!A+%E.X!Gpm%bnbR:߀4ěK1G[顉ippJ@]q8̍@;3"kErCkKĞFt=mNr6a Ljg\F\O5g qup~b Xb o'} o]Bخ'0 62ˏ_T4bzFs[{]GTJYH*2olv;(5}S?ݞ9(!h[+>ɯAy BJI-畷'u׳"\>OBۧLoF~Ҿ?>6_nguzpΦgض,aDO{Mh>c%Wq=w'炰;/ pRp;J_xv,r;ESpyVwm( ݭ>#o_3)wQ=rT.H.wU;ILpI,|̏WY\#[dǏ~էX !K.qz&ܼ3pv6́WUC9w8ŏK{&5ѷޘoZs%5֚vLJ%cKHMJIvhu ?ޚ8q@Rz)mz 4x=(]e}]k>9;TfKy1pˌ~d6|PwjO٣ EVw7ЎP ];¹|wU"m^QE΄U97HŦ1(g])CdqZ}H+-vbsdsm2jHJTV^<#t׍"ރeNĒZDɂe\p#~?ݍ."7 ?.0ňDRǏO]@>d-7(o-ˏybo1"OBUʦG\7'j1"iU~q/AYP[`SÈ#b{|SV|p.Kjyŀk/aeXOa>oci(ZzhGnnSbfuɧ֪/G|yHbg'0xy"A$n7GwX&^ow휴R!O.( إB8SsifB3xGe3%7-Hj8pQN˟w@^N]%e'B}U.vuM$/mӵ"|j0y Xz45!| }aĵhai>L$NTu e#z_(VX/9~A_P񻂈m\ǾTcZzB?lqn~U N sj'Q^+jOp ;c@5t #[IzfI~Ln "ݰk|P ǓtW~~s j' [~[xzE_p Ϩ{'h/O@.@k|G^P/ _GA^sՐI[h=PK="&/&7i!/S~{;e?wl+8<,Kb+UY*"y/ yA/oOh<e`/le~yP`,}'ПzyIC{a_*^ث\N4D;'H  {ʟKr#<*g)qmly9NJkB{ [Bl~yEwޝ3E=ą;;@zd cͣ:d62:=!qInNcIFB'MC2Zd5&+8kmHVWA$늞Wr2IۛI4?7nZ&dr8ir9a$ӅHθ9O*^RFrwɥjdg4)flnϝnɽ,W>t1CI!X>"7vJSIbsHRL&4h6~$ʛA)liM$A]%q;A#I>Ib EyfGC!$k]53H$&zDPHh6q(FSrQ; ~l=Ix~@8DyK:13YHddhw8$mxHG[$-sU{5w2jve$'Os)@h1I"Ǵ;;ےsUU$ecFI# MIMy^KہMʥ,!<'HNԶ0)S32q!ԇ6֖k$j_?w4>gSȦM5>ɡ|#q ;)Tٔ'Y_AD?#i{#'|$on4OE6*  |Bi6:\:G!⩟C<{|~/IHy$7PT+Y )O@|8y CV p܀Sq]FCC좄>y jg7 6L \kP_iD?zzƿ<xi7r]R=qY2J)_<Ƒ2i +kЧx|ȣp- HC0g(.b;hpyUsz}@|Ҹʡv ȡי]7 i=z)?P//?Etq@CJ6W| m/+is3ħb4(_F 簜`weT2jOO'/HB}et~&'DU4c~A4C?C> vzKﻧ G1+_ *.,šk̓.!7!L*hCC "!Iy !>S~9}\ y~/`ECetz)#B;AT^}:Hk+?uzͫ wa\Bh7,r'@AC7pD>㿍A vE\9^'@ vB\\z/ǣ)]o#0CAm`'G0A:Y֔$}".1ALC=RگX?|< '(o1eW~d2_~88Oָ1/a 0ro{T^O_7~g7C 7i=bO:<I j7` E <<q씞y_qSP?̯qJ? ?||LoԮ^_ Bgyƫ?}y"wa|N_|Ǫޓ^?ۈg?_^<w=?Ohxo?ˣW~w>Ml/]?^/*7wFZϟs9ʣmݼ㿍"'G{Ka=d )< p]5 K \oIS>VL e\ XzqåzK8F9Xeֺ99,gutl=za=} duNSB}uq=2=@\O;=ŷ~^Wh[NXG˖@ yX_FXw^@^ou"eGeuPkp?7BR=#ph7=^g*g oB>W@ XkZZ +#<֯aדyo{3k X ]>a>(}Jh}``leò` SX q4f(ZOHs@=`xO+zNAߠg/h^8x~Lg-ԇ 08|J܀o@ȣu*m ;G\\n!~B܄u}XZ֣y~A^w; wuv_r T?hQ6< l6a@p?{?GnC?^Xr"R`{"iQ_Ϋq{־*[ ͣl_;|$\A rB{AQA!O GildaQ4|_(0GY~ɣ!?X& Qq >ki<γ?1e e;'G1켄ͣ8oCaǣ~Y"Q'<)< ;k,~eO<_({ x?{, Gow y*?{γWb װ Ɠ|}[Yix!^|?gASy8l}<85޷>//{NF>Q7X=3Gacʧ3N'}^z`C-aW#[8~ ;`pVZv!͏0|${Ǿ-8ィ.t 7Z?vȗ8[h}=b Qv~#(Oav v >u?#lyюXҲC zEqoVɞbsAolfQ!o`<ţl{{#[q ϳm7}VyA;eQ(0}Y$=N{z/7G^/ȣx=7A<bopd@>WzyC T6'}tD CS8z|=<ʗ7W<} rٰE=(k=^fϣyT{R< wQ{6FOa~^B`<~~mģܟn˯?;ktϷoV|?/n$b-6whyy6ז76}^ѵÅDL&#"'J-u7d`բԪ MM6mW~hm;!=\ksWNL-u7SQK12* Ud4(O8-Xlk:quK-u}u% affjXͦS Ͷm6f9mBM[^?4j\X9 ρb9y?rï:[Dsko721Ӷ6aS;kNP[l?/&s:r]NF* 榆:koW3j=l ';wTUl:[!<5?'ldM$?ދ1QZlo'9V o[M15֏5$Qזk'FӇi흹._W׽Ζ::r0J33ߢma;<1mn:Cmbxޟ=~?fLRTO#N|(qijqf,8?^c%\JxwJxz?U;s%?V U ϕηGA8}I.m?c?c0?IhCZS^l 3]UHU;5] YW_UYy%硞AMu6PEl66Ve/U 5_ i r ?4Kv*)(*"2^ >UݺP׿ꮅ(ZQ&?Ja8d{ aM^7i>;ѹņ59&WM Q؉b7(JQHQ,!QAqE9(*RT8wlE9[_S跏TwgW&DP(ߕW@2Fm<H$Cǒd-I;-S\FfIOUg:Iv}26ߔCJ;7|tt;DU Ìԥ^Zye9j;3(87!ZTq!4h!sR6pHS]#{z0ǧ9vV!mg`Eq> hG57 7qKf[~>:3{3To옱#\Zv wSs̔zPN5ݞ C3ݓXsV8fuEaLjrgfͻ?F.YKyNOCҝyYLKgG܍(nnPj!È͗ {z\iY3Lk 5$WW1%Ơ+UDB\{9,}0Eb8_M*8HNfV42ҞQV;KvNt]ly鹸S]4ȌY.qmif ]P!GwzQp(y; 2C"f V9n6>$p3TΝ5ٲ>Oε<_Uœ;$0奖IYǫ[%9i53V,zdktym )༜913ǵ 1;Aes/޷ە$?@#[IWRnўj"5d0ߌTė~8|l•K&)2+zXvq2 a"-F)SrG3 4,eV|nBYP1,vv&ˀ~'ux r._DSCNrVj܉j|P41hՍjJגXUeTt)׫XUtQ5sU{p~sV[;wvRNjj95d5*I>rv Zo/̟3;UzPyq^ћKMy1[a:J* hk ,flPuwO}'t}hs-hۧ[~;{N>q[neUjc^)ܺrK'S{R۽vjnB /<1eFf;V̽I \g칋,Uݖ{?iGTP7CfZzd^$3i}ʭD /{g(}>yQyJwbGa+k23#NY|c ;T{Fqe}|O#*kw`}$>9rFAsa1_<u5aS̸sï?g63n Zk1oRpFDcIBbkǨ U_Yw9?{-4ٺnK# c)qqN#>ldfZK fX7Ѩ=R> #7]1y3ɹS^Bƪs3׺^p)v ɜͣl$KXՒʐۺu9,Y9*+/ӰRO*f'+/sw3%>2>PW 1c9^]f4-cr+ K7 T$s;0;Ml/W>RR$g43bŇY%, GgohGqjD^+7wd]쁪ՔMY1|Uʇ3N6XNG*j2{LJ+7zSX٤G;O3`k0ߔ\敿tBG4O->̍DN.$V;W*A\/xM{88:>wD?!Z4j9oa?rq/}hoˮZ,~YH]Ig M·"Vڡĭ6kD1k[A34"I!NzxBuK,JY7 r2j^rv@k(2!e{fJ͙BЩ] 31F*(ZJہ9gKvICl-%F.PtL_6=lHczjَ93k╺dfCcQ=N b S٣k.c< L񞘏SM27ROyhx[E𓹖 a/?(Xq'IB.stZ+}+#&֭Μ8C дsޫo*\OPEj3v{Vh@(fǩWHDDcLH)bS%SL|S'0m$VlM*e8fL/E*sTf0[XrVp?,8YlU8DEEYfewxP;ױm%nlޖ͋ӹ\gaJ@ҨM@@pse1ic6xhmc"rJWrj1D>WG`?*(- qM3mTe+g!vˬLWY(mMk{mη$%s(96U#3F0'v2#m WnejIYǣNK$0Bhv'"ŭ%DnV#^y$YN+y~NNSȕ9׿>REl 9N7i)DzE‹rQS>˷{v!{f4Z!o!޲y8Fӕ\S_I&Y5OkU ī.j OlX<[~{CfwfbVoZht+rMDnMlC\N%柝\GcodWkdsE9,:p| KLmP@, wn[C[. mhҏ1Hr|(B4+a.mld6v2Qiӈ1˸EmQl: f ß0BFz6uRטrL^uk!vtԝԊ*e^,`gX놄Zķ% Y"[FUgMR,ܮ}r*Nm_?ZKQ5签r-$M(#|3׆cڕkfNw9x-"Ρ:~bjY.3.ǝml/پipg軓Vȼ?{#Inju0ZHrD]|#MGLg'|~}@]j2sQ펶:o:iD~%[2KUYJ=e֏U}|9B2 pUn@8NںZ~>!K"֟U޾W!cO&O1ʫ֙=F8:~\ >C"8գ6UL ON`.2;|!^94sYpEN .3K>h^luL\UڹZ1e*ܯ@<1"e971Ob_zJp!8CEkPcK. =_6hדXA%g QC$}C:KQ63ח!WX>LPqiT<_W[;a%׍uپqJ48KNBrFYÖtWtj}1m6Ha>~9_BS ڊob$q_pٟTqnfB2J2Tnw\p'DuqRw2'^:o_B}Ynƌ|g' [,囥[zٯGm<XF'_I*VF3+ϟWdi)cƗ-z%O ma%}1F7U+2n}W0yfY;3i$geymɨ'J]/L> s0spP[Oweԝu۲\}e =ɚ 3fi^B,v]됎[n{g9O-Q'7tּ^Yb|j¹|Jgȯ}XTr<л@sїWOa._'0FWt+۬:/WwO|ήʹGJ?X|jAn{,kNB6NDIaX{1So"1BTUx|v\9җa9N~:l|Qb"]흜{˖=i<[X!}w \ ^36WrDN.)kg5fp]ꑯH́k56v%~E>>1|!x;Ī.wNX*buBF&FkM*gXmx/˖^ pT Xg3}PUjJ/FŒ0iA+OJj/wf_LgZUrNN_HSl>ز271B]7<Y﻾z5D-WA*ݟF0i%3{{2b"S7I1/kfEyv`ZwL]8\wȀTK){'hkS;{pGň5_..Xq=Ͳ*U夻q]iqJڪ>ޜHm [cZq<%*|e8INhXeRm)g b}C/FqĆ'gv% 33^_z 'V쇚Ia_m6γcDjF''LCAT3(),ѱVtLa9+}6V[T.e{37X}ƃZ{ߢHY0w"h~ԇ=TYBNh9&i{~PQ-۾7 `@Eň#JPĉY *9!(&A3t793dD _wý{cUj֬͵9{t쓃SKS굲Uaa[Gm,ag鼉UQNM8MS5zԉoM{GN K8Iٱ%_G,ycDhc %M!~4XADP39D,Ɏ]* 0X@1_b'70:F.ہqS /P?l&{V!¤e ~̾I18GNW3$L_@|TJ )a\8%>WEgH7hڙɇBo Wd /Iu)0~/UqB_-#Lpj3[ʔڼEo *L2W7Ԥe-E!SWm dkl *e(QYǴMdk'̈p=#E8⡳S ܧٕ-I2Y@S999Q#} $5p̒LoK\GۜuwEgAV 1@ `pv$.O=a .ɾ۪7 f"לZdCΑ/)^ӕ"?eW,$xו&I"W2x &C'2(r{L]SkMaPGnhxG BTMɌ0-C]mzT $[&BUg~tthJY7-x8g=3-Y6:=ب0*–eB}p%gI 0-'o}!cOfu!+:L}VwaNcz!^}hܳ(HڻaGMqhvOLHW.x%b@زY/iba[N@kh˾ho#yR} akTEЪ/~ٗWla6A"QLnxV@ˊ{B'l_-sMqҀ]߯$YMx6b$Žs;T^-3ʠI 4vObr@dY*mZg;cXLؕ}h$S=[ }оXMZcLeک[TuG2ށ ҪnT4PҍõvUx=׸pYف)죹AK"GW-PK^-g5\EczY YO'y:*-?J\ݧ[ž2Cyj;ڼo ;g|h&=[ -҆ ޓI}ͯ{mS]]5)yU" >ʾ7[ A>Жֳ?\9)M(„]_Vk\c/A5 .=5ߝj&|[3cC>v0jͻ5llv/}MEwEoGywGJCWDhbj._fͬ{l 2yBemO\ ڎpX-U|TH _ngtK^|F)g)s\;tt.92xRh1rˉ-8^={lw:dla{eA&+$ܓLؙ{atHKž$-IJn~nQŒnORˎAϮ{u/MLt^#.9<{jG*GS6vf(]JHvP"{0>4wBcSv܉:I69a$=i C/WI |w#!>W`q%#k<3b0m[iHZN=HFXkQDԀ2"PB?e EjvbSX򞐚{x+aZϧ5Hk8uh$LL\ej~)v*ńi'U"_WvHO{$u?ӘwUeE|PiaŚ{>&,_a/a'3ucFʾ0߄JaKwX,#W5`/s#vC듍wA{ˡmJ"Ll.+|r놄ʙ46&^z0><3ɦ yqp?P->_=ߐ0~%ϓ"q&]iI;w;w{:H{>^FR!~UeP=}YP{r?d<U|MH<\rf;ɧf7/!̸i=sܛl$۵9ܛNy콛993Qilڌs@RΏ.X7ǰcdPCb^?C59V S¾z;V~z Jw`b"a܏ y )k|Z nQA\Ķoe]@|E9za|KՐ1OO;ɻ m`aә޲@j b @ҋvgӣ#n(Rn0 &L1̓ mlC;)o>dʺWE#إ%+xkk@CE) j ^Hj]R.$Z%KcVb?elj%(/iWT sFؾ0fwQ1Ao~hDa~ vYCcW}MO~+kojw@TVv/n>A"kE63NG]T4Lx;z/evxb\o>d`BM7C숀`֨9Ǥceu}EyC`Cm8F-"$/qbGlQhEytsJ>|$q4Do{=+Yҳ[L0{Ro(څ#OCFQpGÆ: }^0 &l}/4T'ВCuʷc%uJ].{ >X^a*M;3&}jH,۹;ve:]i109od·.`LY7e;v\oȦ 8)=dz̨7}Bm~DŽ1w͊Z7F=do!&I:#;%2m<%(92캥.7+#!2{6JGkz0}`'&tj aNaQnwswCjxSm8F"x0-G?c0’ϒez|PWu/$' a .u4"( ٟ$rt,Gg-OEae&&ؾ^d8t:͜ULQ#,{fC6YPj=yͤj}ro#@5;mQ1Xv3'd NS:.w241Z˜OÄ/'wϵΆǺjQ3FQƄ~F W]~ߣ Odyh+&PԚLF{wU윣s<oj(mѥy '|uU CA+fIjc>3uy'&;yPpt`=od@Bөwkyr!?\%(TO80+qɯ,!9 3i=o Sppr*$<99=,HZFʻ'ɥO]մ04X6A#t E`ݯ @'ıϥZSMTk ^vٸl6abQ`*$X,Pn˜s 1 ړr妵Q~hU-/mfyn/2Kj{N S_7PNG1!y_'!r+aꨑAe4S܅CgT +qCu܁KaeT~¸c NkL_>[fEͤ̈́4)w#G-nH7Xoܸ˸=N)*$ _OZysHo]Ҋ.m}04ڧi10eo%SU `8/c ixG_w:g9㐶-cQ =js3|Ԫ3Ge)V& ^70.ݣs˼xV_熉NGH{UaV,Ծ}#3>uCGqː=!|hO|6 ҾnR@?051>Rnazf[ _l ]St Sh]CHY4raL!mtm@T$/(ezb vj7l_f Q2N?.g 2O0iaG3Nv/{a c@{C=sĕʹQ)؏^iC tRX?Vu %M4xYXq8<^Q \nޱj>y˱]ְ}6GUqSgOc+v9k5~tƾ 5TZve J18A fN<͸M$mŢO~4=Mo;t/v=u>Gvf,U4ktʽ{:MevGoi.O ⊷v9Soܠv T;{<΃V4B|ibr&OfၪQSᦷP{wyxcCj4\߇t-f unk J}sF?3i}S*Emj?jw<3K [H9k]fTKD)\lk}Z˟pmWDiۡM֬-Ƅ r{,}Ac&PtYKŜPT͞>r:pe4o ?~A}4'm_h4 :_~{*ws%/Mgehp}d\NfpFEx=N,fG߼nT9$.frLeOr+k#T9$LzvL@E'E^\e;Α'/Tޛ%gH}V/lrSyC3&p{}vAӶdߪ\gyf EobߑeviJjt1-^\xj'*AUF٤uFWr~{_1wٚy\6OHw&mZvz.7W ?x-. f[S_ۨ_/59H͇zuM@U]QM߫&aMk^qyO5.yrd%YJPIg BH4n$. N 7A3KG<)DI9KCC#ۡylN\l`xvn>֑sш5Zۢ'0E|=߹.-5E|7mʇg.E]D<4Idh e%~UPlo+8t+f&Lfw3GHB|2ѷg *-|303a~3"|rp1؇3ps2R>!']k;-g'?\o1kWܷ|2@V8bѐv:8ħq;s 7!Kǣl=!l!E{/ެSЀk=>삠-T7k_r? ȭ Vxd I|r]U{[H(@; ^ވYλw ́EW;w oʜZaq*||o/W&^ ,׿)l6OOV@%X (x'jtg78Pf1k֬O? U!Y s ;%9Bn,HFTrXci%{/W3cpA+jϬ:HEHewSkHAOf$-7hk!s>n!ڗ {Ȍ9I#/  n@aQ|f)dZ>Վ` 'E if2I>ՖBr+}m a\3[7Y ;M @`ZE'>q}-x|BDN>V5ow@ % eSA9)Գ0#4"3b=! E B=`n\%+܏|eŧ~T9 ~GQG,,?3+BïH̾9r_S ie[Yrb?X}& %~> 4d_ .ޟ8f D5y볇Z2l[ϊ#; au*_[AgCR ?*;vnofgg^Qbͪи#>9 UP _~jǜS=sc}!q66}#,G~&^eFiX'71&4Olp*W>IH$ݏUMHyZQs^L`wlH ۉ@ȮnSot@Ĭ-A_&C)ح5hS?|A+RYnQ x nh ,7 hb'lGJ)G?n&r=rvZ]c f ϰ?vZ6BkX^3mqKѨI'/uBd&YgB~E0{ cYi%)01=ѣD#)[X/bxvtvk4q ȏ$:>NeUl d IAuf̣fc)9R7B/{uU!E$V .q~)Cz|&%u<]]#6.H?[I&ЯM=by͑l' q,$⑽nEt#ַgS⧃rj/ћxGGOkwL^#OqVo3;W)l<53 'Cӣu|bƾO*\qu;Lѥh|ukךi0l!UϒYn#iBě/=A |{G8/;FPE# 3S?R uhbơP kW4^w>b! ujDOiq襨-&RAÂ?HXjRhwΝYH1_ ?$  =G_BP6)@';ĺ:|/} >C?u4`D8ڢLw6i|1h#◰?B /=:'?$MҜgp=`?Iƕ,diHb}gSom*Ǝ[Yv<ѣ?\ozѬBECİIsZI?Hї0= ߧ3{ԘT*dCV,f! 2QO {b Eq>+B,;C 5\VC S1lYNtpDHµů!]7h]h]~~37?Tp>q(bQNw+Bi:4h @v[BN#u 47&r$< @Gi(wT(cŴ,73u#AgɿF+jvk, q%bʱ8_ uIH&~i#/ꗈDH}|x@}1ܑ[q#<(Uh=kBvd~|Np(GzwO>J?JxA.QQmiBh#~_ X[1=l *PW>#eUՠנC^y:GWS9_}ۡ`j7#{+Gx dhͯjмGӱ2Fr}zPǺy74PݾCFPc5P9|4[>rtYf(dw5(C߫@vP.9\z4r Z=P0":j~.alׅcƷuwA=NJ!W%Hߗ"%Hy qƺ6 '<~q1Á Qd Hg~d|!jmLH@Hwl5J2Ѹ)#hSQQQ}屫{CqlˠUg^>)L*ط)ȱ/.uke MvB=ҋT ;D=<bOuCG8N].+g4Tɴ0_)u$BP}>J~qFj7⹶B`BqNktB݊]V_d ?ۙ;HGW ]Wki3(@\K%q]ZLZ5(U#uGLj|F5 PžA0\9~r4,EY⃕*PR][zEDz!ڀtv ԣ4"=YƵz'SD04uT94vBv]@5*w5_*L"l 5,5'FPo5ճe\h@S;lƠߪŏ6@y\5oV@ңhP?t\~KvC_8d\ml{PMq\;N5(NYe#ǹRJjF.C2Qi'x$Et/G1~.B% s@2hSC:;{'RYGW%wW@j٧P7~މ8=fI,wCEx+=Ɣ:DC%KNg@/ @yWs2L~ |v.1Z׬E$vlv Ќ99h\ODIXa?ϥ1,4l^^-~"k{AEeŠ۠*иI}*AgQ]gՂ 3=kDrHOX޻Pu-h%FbFR6T?TDUhܨiH/usd>8Cz>^|%H7V`c LgN-3QT\e*BNj . bl^CqBe ?Ԁ}5y>6zofuՑniE]:jwj騟 ҿ&6:jn@vUCӌ ٘.1l2vQZj챀VtM&mh:q%Q?mE'2&0~EGȭɵШ7U>ldhF )iZ,% ÞJi*ʊB5 7B+kRt4>!و滸Qo@FZ>тgKGiف?ҟHWgvB,A:Eau14#d}ල6;$A.r?M~q?nB:S4qt!h(QGm54leW 4j8wyMAh9ESA w7z?u -%zR:_å,hA}š%|V Bڛ=az /U"jzTߍ}nfGީYpE 矟1,?OyZXalV{r|Ӂi[Ͱ5o~s(GMOMh݄5(S[ th~@^ z.${ Ԏ>( Cԉ_qfd-hoA&]3rAs;VrT2R [X,9xv|0P#ƧFݽo%Gg* Ӫ=_a]u}7T[a(eВu::hEqGx 7~pңD:lm%X'gvB.F]_CL3d(DW<(^h}93< %vY2ctqi#:#myam/2^yk%_W+Pmp1iwl54}+G{뚲jCP?bM!vN.ֿX64:QKU Rw7BVaPƝB Q|?gmn?'~dz Ϸ~G< Uق(Okם8)w%7-Y#ͬN+y % M_J-uڨ ,7Y@[Z-h04s)4]Rp*wvhFm'~@sUC)絸7-QkoyY ۽N&k i}Mlwd\'+Q؏b?׍h:d DBteI9s Iu-r˖<Q21{OT}{xw~e3oEv@qH$GX9xW<0` ;úg_1[.zu0DMq\ {Kũj`s1@Y'T{A+ziȟ~t(鴆VBKC&8hlq~ho7`뽙J_ 7[_oё_#C:cљ_wѸVM"Sh~Pڹm#4Cvvz]\:j{癢tS "\g龋Pkۧ'[Q^3K6ҿ hlDaG?m ˓49ٟ .H٥y*4m/؞נ{fuΨ1?~,\ctrpܫ?U};-/mENU[CYhdˊ+д+-h|"ɫq`9>Ic.j7PƔ.MY|7f{´W: d;3k߲IkUIqR׬^{;{S9t1~vMs&]kAt` u:KYC+kxt/1|o_@7]fztZsnr(dOOQ1$)&NGq6ݣ- 3HGc=KtqZT̮ӘW. ̎Wm`^ #]<G%я~여GzEGq|1E1ɸzk/SgxjwREvڌ Fz6q?ߗ/AH<=O)Eq ܀=4^~Z|u 5轷1~/M=AM#+px{2lG 3&"~5n@qmt ?:wL;g;hhV >H' O0кS)XY1Pu%oQ轕j!`hE:|:4) Qܺ!bM(?t;׊D:Q|O9+*,OʹT dxw5;Yu~i>5dY;gNc3QaO"5]أG:[vv2Y0yx2s<3T(JEs=9DG {c h@Zoh}4Zf_";`Iy<*t'c/2;fHң hx4 =]=J.]rJ31ֆg/ǣA,W ]]~3Σ d 4ItL?zARי~Zp1Am%S\ǞWU?'_%?6u wbA4f?8ք޻-x[oq!qtx2s^z]~_К@wM}KFj'r̭~.f$!`&1ggq7_.@1ވXG׳~hE*7yӆ{&&~Wdeg#ݯ }-*{nBk56}`7VŴrt9Wazk/_?ڿYh`=L߂VqܲwHm-GiE_>~0 3ӊ6hyN" 'Z:k:ޙΒS?":z^T;[%T}o+GKA'mQ<B,ah|Bmux=/klu_!p(#z[4>uH'FzpYD#]Eգl@oG:0.Tw{;~B a Ij֣u!hxt⶜)o9Q ߏg}Pdx]m szPdGd'Z׭WuQd=z,G2$h~\~ KEJg%|gY߻BߎBz\n\x:S|ۀZ%K_QMzyY뗰]"ݏs x<`4Y'&BvOo~ ~ hOBq l h <Ax2nO~x.o5?zly$~9-{xYχ "?FӅ6/~ 1cف :_*x=02{S+PhUQ5j~~ωEB>_Z+ p)NYx;E*&RkxϜ.Q4OLcJ'Y_ś,?m&Y?o8vG!#oT/x](y} {n? Bo]!B<j`8Ǐ/Y53uh~Oz_Ou>H~׉,x?7I֑Nf9&_z&2>Q'u?י}+nPSJօƹ<LQGqk?ާezzg^Ley<"6Oɧ!xY'/"{|3 IZdȞ># ߞHeab}J^؁nRNo'x:Y*_F)t龝hp.o/DI9'Fhlx.^yӘ5#">.?J]:^hop?!~Szׯ؏uZ?aq%#ǿx'Yo>ִ7w-F8 Sx]}|SS\o?@Wo/Yܘ5/G>/ݾ9X%9 k"'W(ܶ(ȏפ=Q?/<ďp~4Q4Dx ȟuvx 7?ާ*CO ꇤ>%7{~'o~cJg>'7HApY5ɺ8bud=(XGY脒u~_UP|&r"}=ُC(^X`?כuh+ Gޢ굘=|ȼ?Q\KH?}4p4G|ڙQCߣeM'n7hNʉt^/׷cz\v<"Mdd8sN"~`?h%7 OX7GRˁ+?kjAW=엁%QֆOW%!~^ٟhxlocA(ԣd4z&<~p|ˏ;ZksbZl|OxPѠ0KBJxp|֗L*@Yg?p~;ޗ"^Զe׸惾H`Jt} yٯ= oG'G ~X=Ⱦy281KtǣB*ז02m ^EOxL$z7L\d\}t/'>>l~Xߡx\:۶_Hs\Ox\l0Ӱ[G7,Cd|BQG=C5EzkQdyޏzRUGM|TEq,磸.~>ˉESF⧈Bv/)ApD<αA$}k ..Dr9`~_>ξh} 4N}R%|߆h$0jVO9~'ڰO?8b,i[VQsa'#zǸ?9>SxD'~goC?$>p Q_Ϧ?b'oOTbS~Jpf<ѣEH-o6 Ǜ2#8>c/$Ql(>*[w/G؏rfh(xgy4XG`D%o|QkL] 玴|1NJx4p_GEq}~x<'"./P}qMR.iGϗ0LB,ѣ(S8~)~;֣xg%xOS˙N 8^u.:h'G~ӨވE'Qm˻º!,ln c-a=ui6ď6xN?`=G+uGqOEǰ.$G1hg\bd$00> _㏡Y Cd{a(?G~ry={rd_{3]e~ >C?Ne>C? !qCxX=?H&, HK:5tjЩSCN :5tjЩ?y  OcFVN]G0џJ5QJMI#t4\℁&z N>;3\kNVJx_8ɖIei8#Ɵ~bfdhi|LXHϜ}yhwLOܟ 4{9gȷxO?o9bl`ldnpSr#F <%?y⌑!CRNrb}kVpK[R>5K'd =?ܟ|k:IlnvҐt5#fS]X݅aߺhސ?il͋7 =3#/7D>g~23 }g(ϡ<Cy6 5#z֦VI68qZJﴞq0P9翜1Gi@s :̒نGذP068JZ15=sVYd1PPr ÷m-1*F k8BMdz?8(>Cy!yj?Ԟ.zE,NzbXiY#oaxg#Ld~& 9?_:$֭R#,? L8 3 _3s&_e9L87!_dmӟQ30O|F?_ _?q#%5P6?a4Y5ai87lxDYn9g7dt[/R2 2r7nQ 9?9 m ,jJy XZ`$xv[TKX]'$`]otª @8q* وs!G\q 4 ,rrpC*P9_+'򣿿r9 "R}hmRm+ns_kچwƯ )֎h/&Qƈ1>1n۟բҠ%n6vRM*hZ%|;hӊ1t`O׺ H_Ϡ]\rrB8ŤQ~s~B4g`}M߿S;{O1[lDžnvK|:4d}:re%V(zR8 Lf"lvc"hC[:)s.5=z msӻ˳iN9CUhf9͙б~ȷurT_u襌SÝƌTџ?_s:&~22^t$^,vamE·+2}3&1zt RcՆ;.߰ fPګfB_̆>5tBL%Ʊ*<%^8yfoc&? )k[vUrؑ@} mu5msO3rm.3IyөmoTDv6&lslR֚SBQbvæ_GMwgfRwbB7v!tlA㳀i#/qT?>[.ݟmF Whq{=_s8`w-[`dܭ^z:v6lL`-o];N?zdI@qg.C/KСԳeEiC{z 易tYԩáaʶ=e홢ڛm<3 {Tf;=k7b@{DGZ30/qSAYn{5Jftu*! S\y#M ;Uk37dn̢/ ;ˠC= [}L-#>v3BQOB±M.ڝGsK3MT.ҥ?`ӓ#X.9V,]`%m6=if61ims0caTÍ9.~ h 0W O;6;[h2Y;SCigCW|FI݁RW l~3?u?V'd<:m7IV@طK'_,d2U1#X5e8VtH[y6N+w$^ho{xF t%yJp)hv1wttQs"l*о>Fе&@m6,xʴO/QzaV,aQ|t+}zKPK?j}>!ӽkt)[m|UNw%b' !ˡOaؕO{ }&~[;o]]-:7konS1a[?|omߵM^5_֤L}]xa1 !|~pCw98-Cpm萊>zڎVֿQ'lW189@[cH a]u٫1U C{Vh?U:L6/>3im^[P#ɺqd^!2 3G&/QNȧrJ.~I{mvë~Q7a^:'o Ӡ]iu %l놎Pjr(d*tG_~5Ca>:t,)z$k/tD<Zb-_2s>f~z{˦o3[.H2wI$ 4Zh˚yޓ4--RDIt~tyv;|_.^"H5!*.圄0kVvd*62鞝n'd4O{oV=NZ-o'1AKBhw~NǔׂW7@wN}f@?:Hrw0 Gq9Z; 2hZ1^7@ܬ/xᦍAb?}|*w6<~;JXo/w=㏞Lo;ot'Mƕ[Yfk]sP+ȚuL=`.>2&طyB[NI\[N >?1WuMg ^i}s͡;=8qrcWz-KC+e]{GLVgnƚ>ӻg\6Bks/GyP7vp)P'.)vtΪ&r( 2;DCmqcu䘋0>;]V1BЭc~}v<"~_ :m}%Z;(om0˝ίus& /a팱l\u ]?WNP:2,W:&}9/Ax&??r6xjЅSLjÃJA_>*B@v&g .?bjāSfArHYn&;B煔KO=o/Ac (=/?E`HHɌ5%= ھ>|4&,*n!Ҍ0rDil=v~HX6y1 0Ahsr:mq翖 cWSk3fOˆԊ'OEC׈BR;BsçTD]sNJoH9%Ϗ&aItwT\ഛ#WIؘ015`sp ݝ0aޙb\H;bf俏0)j.<rG( y/{^BÕACK_ǭI9=^F--_YsK߭/3nY= ߝ'd :+aaUR?DXi{8,ד~ mW@pMU)23iďQlOsõZzL 38|td,HaW9 KSc߃_F !yӕ:ww?˩wA3W8 8tAZܖU {A?\4zd6E+ >׫&, ܽ6݄֙ȳ| 8!Q=_t!{ 1>6i_\iKnݓ\ ~|%L&O7a ׹Eypu 淘[ҙlP _!yhOsc !mҤ =YrJҰmQ-6nߺlT&-^G %E! Sذ I|Y I립L³uJp FhPuK.MVH[4(a4f0 $QI_Mۭ|iI ze8Z:GGԆ:9vW٢R\~{ѭ4%~u`H3}}INqDGHq9{{ad5CkJR6:PRlt ]cU4>N],i7I;ҁnV{fAq*yA9XȐ]`~;+ ܫoAV,#[&(9+T!R,p5‚_:7P.Q!aGkkYw6'2 rnO=A!LyO}. BъY< ̹)I9~6,H6 ,{Aʿ2 R1~RΪ$aBN*!+ɭp䖭P\0ps\t}}4M]nPapHuH4栾Nq0]ݐt#gQgy!_(}Td,֎w~@yr]'qFp|W(Կqo`ka־lpRs팝l 9d i2b[-Kk]͠C30rMY t^@+>I#cG1~t !FX1?5+}Py/pO%+%4]t ,1J\lDN8HzrwsSrE VݞjΕvE>]OJz#|=|vXWzO=۷2I#3"sm\).猈3 JOR9,Hi^ϕfs}DepոRRxe\o4IB=nBf*D]J/}$q\;p@rο]lfuȾ!` Wz W͐v]mWz[JG׹3^[kV (&6,\NE i&ۤp甤/zr|s+sg8Zac`K 﫳S*< '{|㶿uiS!YL];bHرrHׄ+=LZ`O ۛ+-}YkK)<$e+N 4j=l;B f9}Qx)ퟲe'if7v,=293ٸhWtt %ݺa.a /S¬Q8zQu'y?U"X`HOZGog᫱Veh]n#wbaъ* ט-CRnFL?7'%NgS!oAHk`UK f^ž0n_Xa„;:jFƉ V0v֯q3fg&x" V3C!a<:p³~P|(vL_Dp+ rt/Ԓou- wڗ$pbU)=PQ& nתq)sp7Z9ٗD8Ԧ̹؁ÍΡb;_kXp0sïl3Ȑ<-e d} r2ū2Ih߁R~ +:|>lمWUsL&,6.KR!\;쑚8%ݮ&j' 74]uO*fVAʤb&P#~W$ZFF F{2؟G!Yos0#'} L huk:aVu隄O+^#L~[20KZ #dg,b6FQz!=QID߹S=ո0VHpai;NfFwJf=? /NGMiga'3GlKfeP:ՌCr1ame)H4WZ0/o¤EG£~]L!LO,` !|-c6:JxSaJSϑ!mե^n8>:HѤ-~mw :I8ayysF$&}q8H9W[+"L,`d۱G!G'L9rt:T(i8$yTLcY3'=c37@vZqߒÔwWK?S!dT-4Mw"lFqZ!k6mchz*]d9qGڽ:thot>ʟԙW|፲[zn-M11vy-aUp@Žt+CۏIVHvXsWCAW C ~.60ᵜKb-9T3[5uF 0_$8?ǓMe4.UWov*:f=: _3V?CE|SѳA>QFV#wn 6QvrĮ꽄?hWOPʉ!LbbB)Ʋ//!yVX| = LFk)nݚ ҡD.amݟ*k 9.Д E=9R-McMq"laK#常z N~^}5{QЧziر-? {#}iNا|߼3N4@w:IGֹdiĕK9tH98m3rZ~>ez?@G:_/@BD͋giQ7 ;v9}"8\(<̄Ǘ_tC~ٝg,;қ r8bW>hk`rlh_g,hgop+6\L~a˾Ց9y ;*q8pJ?v/\0AUaGyʆA׳]WIm0n'w in@͍0NݍӄI{=Z=剉B5$,S%7iAs*K~B3ŭ987)cI&L>ܢ0usWe*^i4\55aOw5֌1!uzi˼= k҃3t6#L9ÿ0oI(^V"̈H^:U0sgsVު c~FBв@-.1U* "3~An U\-", rAXsGrC/ krgJBQxNd(nu\X0W.R ۔ WkϙoަF>pȳLVN}2f.H)kBڊD< #>MYl4h ]f~ZA_&sLֳha% rߊ*<97dGސO75G& AЋI]3{͢]G nO" ߵP`i+[3!愡5g !8tkD7 Jz -b/!;Wy>T'~{Ήza2~(wKr89g)(QΏɯnӄ ;k-]bǃBsJLӛuoau`yLux~^J_s?{^{BӴF͘)зtƲ/yJ3qyF{ߋJ#ogjؖQf2GX=/iV-%|霴>aB{:!-^'A3^\FcQmQ3!J4_""ӫCQ:!u/Ӷ@ɦ= g[}",͟9aEO |ާTVIL:ya9o5:[7+TZK\ҝ㻾)Aiه'' BA yl@VyBg4aZy)Y}R 3׮NmtzFaAY^ǔOUfv -RC?멄yuuq}@Xo2dJ>ThJ.}FXb.?k!aI܈Nkϟ1z.ab\g’uY'.xm'Ra됇V~~(995sR,S)Ui][&AžiI&2-m9ۏ(_})" ܹxCA;B}-b aԇϵ{6?x._vg槊,;Y\9 3w_s _p#{ۇ=$ y$>[adfC% 5.&y8=0fTb7-;:Mx(akY SY/J|̨QM0Eyb6-$f/#L]; :w?H7eU/:=4ARC Lf/I-q‚#̖t~0l1l¬} j:͟D=!0tY:[>k4Aw)qܵ(*pBڨ“!O39P KWwQL+a¶s?#AH3ܼ:0КRs!v H '>>)$KD=<*r]\~B⪦%T˺ Q"N:HH{YsgBx;}paik4]I I}A5C1Wvl  f}&L )_JHfCx`򣄔з&N/>EHwnH¤(I[i$$10{ )BJ`-DͼaHvFk”MmN%Lԕd" L~ȷ0c L@wڲK'*"Lg_(O׹P&=x$38~T57$G~R6\>J2a3}T~KTL¸C| &,90nƛ=8Ps>aޭղ;!NZ>H8tlI¤Z3~ ɇLs\W&\(xid# 'u]Kx.!-ɿS7i¹xAr:RȜ3] R'}E}M ”5F&i cf m,¤"&&Wy80u*k S\~_'eD37S*o',(f55˞n?GU{Cauw ̻q[F!JԺ/:g>aVlT(:Odٳ GWzB]1c%}|Qm"!|*B|I- ^NHg|6a,2 !Ql3cz%O;GP*&+ȹʰV} V)?x Z)bjrhrJDj ˤR<JHi!S(Ɔ0!ud<:m/ ^-P9o $=B0 3 _J̣&įQBHI<$a⌬3 )wDL$L>=n~]NK@Vǟ JmD=ݿ4|,丏'P LT|?h#TV鄉WGX2e/Ճ<̳#1>ע0xL%:+=N w~JztcKO跃.mV4x+i~g~l~ 6{q <fgOH9p>p#(7;O?jstk =3UgsLj9׺SMi/ts=.v% "t0o/3[V n/`O]<9C&m|&VM ~o^~Ŏ~8x_4[:F?崽qpyL&l {գin[ ߯ss~ -*|77+Ry%zcu~o踜*sm>fH%*6z Uپ0t1\ܲ ZSeڪ=J\\YџwʆORx~ŴU6>{WE\J_R/frM֫wd~=_0K,-+|/J߼#f?3]EKv߉Xe–f=c*}FfVAN[?3o.) ^*M j4bћ!.>>f _|G)tk4|F؇kVoZ!?̬2/sn+ \}6tJB;?>*wD#; bC T7! ]2QfXwJI9PzX]U{[RN|}\Oˇ#h Ս|>3sT[y˦rߚ_7fC rB᫒^fz\Aa6|_/c~pGvyop{c|}VivwcnqCzÿ#v5>>rvx6g)/WT.?;g CPpl?+!}@݀jvpAO8Yx!~~Dv {vk5?8>]vd"]b;GpUK}cB3ӌ9Et+A'ݑ}~QMHÿۏ7>yy8!&֣T0>bYS`XmJ5w;g+X{Vx18!5jH84thְ M'Ka _4v|@Ā,Y44ųO ZN@+u{1oH4, h"}0aQ"qH?ٳDRpPa ).?xG9PXq1 G ㈮:ǺxށHyB ҉]yM ?<ױnGae5pF R5ʧ>c6PcH'E"]t&Ő|p{cDz3p{"0Gv"l7H&[cgh~70_;v/I쎊I`Â&H9eFTwr.WoD#NtB=e<$ "nGܟ0}~jvnĺZp>0̌7IW&ANCCE"!=:!qy-G:I~!DۥQrSk(E"4>!Q9~ "/C=B)+$o%{rE BC %lY H_D)B:_R\7SC? =to8ֵx$=\0Pe#]+Scx^uP}Ӄ D|r"=&lToǖ!+ o6?Q=z,d:a\DYH?g!ݚ#Q^ӛ? / {A~]1RnNeM!]}Pa%*O*<7vEC>ih>QlHULʫU2ӡµoْPn!{D ]9?ήBvn]w,5%‘>E:$8\+Ͱ~$z ԞX!:]/PM%_G*TuH֢|jQ*.Dv^۫n|h^TUUhc![TG+d5(]ϊ#=_ʉۭ =z_P=yV1#?֗?5zj@:ft]|\H"XwEqu?p4lͩ#KsqC׌M(}یW3j*v7< 5jP`7vnFv،o~pz5H~s !qCȃ(GZjEn+ ]DGzwQ#: q:?h"!KΣqw9j oHysjNGQ([P$P:-(JG1Б"ߡQ zO:t_h\o@X7mE:(>,ЊFhфۊu|rB>K#:_C: *P=Tz }kATN^u>ntnQpyoD{Q|̿salHOy@[~ {iE#;c݋3olAvL;t4lF%/?ޚQf\3G8!?"A2eq c o>c < |yaq\Xr"}@{86<)I=@:됸* x݊e a ]Ch%DҊt i'n8Eg<;[QO2:_W5XWywա9NCe2nx3X/գvCz=o==Mոq} =FH7s__#D:G"u4]+U |#_`׈ڻ!G`]ۄt/ot]|šqܸ?ot̻py| f)3`|}1{F7kspuhH?@~E z.+G=M=:ΏEop~? !qC3~h}CC~tȏ!?:GC~tȏ!?:G8gȏqCD!?:!q3?ӆ>C?Ny>C?h=qCGdq,?3ɑz憇d :1tbЉC'N :1tBn^0b st, C|(:z4sO&jZ_IY;ikZ0 Dr_oaI`zGqk J;E/YGVdKÿ$4|O?1324>jGr,dfg>P5M$N67;iH:). +0o? n4oFlQy ŋPJOꈅ"l3<0Gfd &Է~]Ѩ_4sdw9IEZ>@2__ww@24^U=F`oXᬏ:'Q3'jKil󈹮byPBI^ vR?W_ThifddߧeQNEbջ,쩝~DGCe cֻ0?GWI9,{㏑R#No_V/? ȿ g$bgQ51yqJ\zKӥuz{<=jko!S?b%^r>mnKr=􋞦얝ywY^^(3cccyY.M $WexHVs@ZIa`>xy+audΊ/24~L3RI@2&2ZGO_?d2Ӗ_ ׃g_K%W 9fyE?dQuK OA0fAHw1t+rw_#G/41qFŶM1LjBL_.V3-=G̻\P#{jX1Ü02ET3S\ZSO3YQbSFNd4)+laֵW.y\Qf?*~%l Qmy㙵<);Y:JfA[fmJG2m||.0kss)wD.k얮̯[^US03fA&a7#c܊ÌZ[zUIrh׺;ݙ!ީcSjI*FZ-ݳCYQ9YV\Džr]SWUfHג B)!v|N VKwFy|{($Wy˾̭粝02#z1o0S5V9ܮƤG6p>CBBK*\uȚ fl%Rf>ƌ _E%ʱqOrѬĄ5|/;I^@oHi {@K[>=fKM)ԑR%!T&5 zOEk$dvio|>$dYRLo_\lԪ$ c\3G]b;/,By&I]]p@-o%Owf<}1|=/.K҅Us$;WR/S¬aIZL=ڰ~Pgw75?V3k&wf^QN0~&_\$&2OF"׏,Y[ w4Lb䤳c[Ll91šǎ*{䷛ND|Wa{c {.vE*'WjiOWVyHq7D8#uX_z<|\7*SO^7T,(pgj=RIW d F~_ly@5r_擋5U:w(/1eC$9Zon |5J-0vlO`2O^[?`><pzsn泃& =\Z;Y[@cvh9¦ȱptÌ^ӱx c*DX)~sg4E˭^, B9V9/"r!8Td}:dz3G.MY ៻I~sOJ=:mN bV|=m9z3T!Rqp#F ̡͂[ޑ&q-5c,Mo߰)XfŌL]D]l:}1(՗St_G{(R;[GyMMgHi/pW]}7>M08?0[uôH Cj_yj!Gym+v됅:N9[RITQ^v嗸Yfיr7ẙy2;%&qo4.?9@8{^Ϩlb҇\4Be)ѵۏ^rL8_ƬɻTS2GI,aLQܺGt!AcfEkFK'̱e,O*KZY$}͒Yy~u̓r qsݘ 8(_dQw%Nf˽6(;Gyf؅I %% ^-z,u2)֙q1?agOI̢GϺpȫ;,1Hm\l09xomf1YAGO 0_Q.2饭~Fjm}Rf2S#'r\c~EdNf-rɓ|KYFffrUc3]cV&}Q.ŶZydYaE@Z|>4W}|x^wmJ2Ӫbɭ"R$\T=h8u>w{VBª6otyUܵ[e0Jdo ݣqD# /PNk _rզ\- u "*޵j<쉧t6]ܸ͍$fJMTv>|}jBwN]kIػg-i'OŒ|./&\3 wmvpS6M!'b@Nfic#njƬ+=k0uf %g׶|,ݸ]!F)ktS`guWݘI':Va&]f`F5F0xGԵ+֋+6wƛcSw}{б%pHʩ>߭Old3}㍬#hۚrޗNv2mKKcZ)u]uw0[:hh-.9 v1w^(}g"]qinϬ{| t.h ӣeOe=`l/1dg' "'oq`[xs/#}n_v93cfk/b6 I_b+ci8x (#+mqs?1eRG.\~KfiH&Cb ro\oiv31:숈{ 3`cv7|gd]&;D{0I&@|\Lͧ#7,Oެ`h&zRrD{3#m~݂Ba[>O_cL甉.L祖ms:?pK4}(Ѩi8yinOO=&<9 =r9ydө ~PɳP_$isH5)枿>]Yus3T’l`ec̹$VV\Lz0ԓIڻaTB}rXr\]U"iisDΖYd`o sɼ0Ŗ$09Nd#6k3}:]G{:-Xn{=]#gAc 3߫%wcv~Z-8]4_岐̹;L,2{Gw}ğr\.aj\W:&d&<̞ ˻ꢤ.Br 2n3~,?` G}BWg1SNuo˹]aP#6k[9.bkc |)Ֆ̆S;C&p3UFdZ鄊Wm97?iIst?yr]b Г(_)SF)/`d={##r$ؿ-]z1U8 WXmc O.$|ݡy3\O2kF\^Q~&<';y`S[gIy'ewޝ,;AkalN효1`4VXPԟTM:0Vں/i6ɹu"JMA֔yhzdXȺiʶZ ֮yA,9dǾ2NȦo}EKjo0!ZeG4Qlv^eN_} $gJ~2\"Fpɏb; 7 =6<МEV<;W"nG;*LDKì仂tHdRO 4-By6w/ৌlsVxɤN祒M d#u6~HE6y\q l6eHʥ@v|+9W|;ddrY)#g ~Ϯ"Vz#[V F6Vk,>sVd#O7v"ux,"p!͊[9 GGɑC$5+}x=FVXm={YQGvBϐB:W^lE ٳHT,-~"d 5-qbbդ5hUR/|OvQ;6 ky"5{dq߀r6kTJKdnF5N)?rm繁k# dx䐺F RS&Te:hƯc? 3M=L~~Jo2f5ŦF#␱oJڃ*=oy~wt 2"zdӔ_ 936,20'#o:nA5o_1L$qi2}H{9e-G?Һ1ȃνN$N0tȤgyuhDŰ$jT1#Fs(L_r84Iq6)fsߜIÙ{C}7E>y$.N=W撤 V7ҒO-F$[} Iz>9eO"3;+.m2u,S̳Af$ ,@f?ugd.ӕj~$garŏ$( IzD47?&{mjZzd~ +?#O[wo12i] 5WO~<xɶY=[ZhS9^L`v6qZz) ],e֨nؓȴ'AvK q984F۽qB넾VLUu9=&rXzaq 26~8Ip:IQP o5Pg*OM*ؤ,c>{@fz=䃬Ojd٧IS"k t|o9I#/ XD__uQ94 H394Ij4Z("5mcUxY95~]N CVMK GitdLyJdó7E֟.Keh#U_4T\_i @eVFV?]x Yq"@H٥2jNEsdSrP#nCXMJF6)n\.@꧋lH*g¯ M|tJ~$z䡇FFw~B&ySh~2uf@d3Jmô%v]F&խOď_=@DΉDԏ=Tӗ$V~Pʃ/3.7jh du} 4: 'g3"gčKj$UĠc5'/Z12mSIRV“{H$*du.= n#2SmU;?/2j .2ryaG}dJ~__etZWߏF=oXzHIzuݩ$ukDE6!qIfǢH=M)w](=&Y"U?aB8(!C+v' klCTNh[l9`35T[tR0 O€&l&tGQq_02iUΖȌ2/y32Yd^ǻGlr9?hueÞRd _fS]!$vIڬ\ oGfiݦ]̫x3.9d1OI2Rj'i $d8Wb#2beakʜH2#ryM-$TӮdI{)bd2d]hۭUs:#rhan,xv#SٔE^G&qL8bv2YΧ,0W6{j\yW8? ;NY"S#O(}A&]^¯c tzd,m瑟Ʃu _Xy|e]jEg_ϗLؘun_FcV0Uu?=ue2Jq}dݝ"nyKN~O{ہsjɉ~^>ӀĿDL _#57oL~@_TQ% ";4IZN醖1@?!FFf"O(? $.Ş.kf&}jyHTTFG>s-\L>M^*dTNE#ΐG +k74w 1s|Vp=}5v)_4`iǕW#KvZY<Ѱ)Nt3cՑA9Bґe !g=U^"Y0:}Ba/)qZF)pS}F>?W1x4<ccVJx_μXuyZ~߇InȄ3+w"?|wgdo"nm w&j{#cR$ 3]%p@&8KoHCFtykAY<̱IHXSPayd!ȒOBŧbMF(*B{u Y6`39=yBd͢iڳyYXn5)x{H,_Ľ+m[?M$^eA!  N9ަK|Aиȇ-~d ROGƔttz;hmA+76yn=#ɗ.w}/+6ox&2T2t# ex,~a̒5{92&_m9&N*TGfkbIX5|zAd ڳ$Xl~N/}h3@x=t E:4c˲o'4A>*|WOl>>犾|e#o?CX~D\pd_9L7\:8oŜ@?%/~i%k8LH^q>1䬸#%fIcHl!Sfm*eݜl%g~ZZqp.G|;7&$z#dر">uFfx HLDE`62J#!,7BE 6F&saIjj)bNb^}98$_شAkof`Sd͂' EBRIZU{iv(29k$9WB_02!@v.2bWSgV!ɂYl?ȿWc,Itv<=!< > S<|=trez:d̅W'#Lqlr*2ˬXyx'Wya}t;dKHFKH6+<9jښS߳tm(VB2Niewop7^d'YMIC )AO|1H6|aEb$C;$GI/ĺLpi;f 2ɇ+#SMu-:LoЛrcȤR:uG#SM44 :0SA:QLR"7HwGDSoH0\K$y҆.oBf_ȪDj,@fq6=} 2wd6MMːl""ݠG\e].XlD-,o)U?$-~t29V^ LXxYvRK$ s$iÈ~c_sVA&.Ѻ2pqLI+v*y=^OhIn|L.?dq[<@&ѱJd &wȔAg .Mxr F`ڞoY7#NcǹI/1/tnmNy'_W|#s-0eO2fȌa]IYɗj *FJ;I. "?|Lt?SD}r$U|;_HF $an%bfΞy9e})2Ii“ >1vIVGW-s9Ò6GNܹf!2\H02{J9ETg~y:$}R+At3#g&hZ^ zΔyw?9JnrͿ*{;;w [)'W{u9%gt.iqzY>N](XpO_刿,g4L畨*Z琾U层;Xp/|y_oOEߕޞt!ky!qzO]ճ d5YάL?{\K^CN>+}2yġW$^^킳;S[GZ2HuBʩ^#Kջ]138ÎYf1^Sy"nGJv>Z͵xXӛ%?sqsJ_srN~.SRw}2ؙ<&TGfR:+бQ53# sBu#ilϲ0.!CJwq|R)Lp 6lc4}|)-]TE=I?KYvlei*5RC8MF$*|47Mqrq}- J^}Bڸ>"}HT5iLr5Rp~o#)X#s6Yj6*x&'M3 l!-OT H[ׅI[^`{EcHìHT  G U#mGY14G|M[đ+* '7|TO/vA+9qAX)!0{'GZe:R[]/¸T3p3,7gIo~YPzj!ufǺ3$a f_A6RKl ;u 3r? [P# !7IK?oZJ{/ϱW ??E^fkśpWE֙IY7w?/D~^vI$ݭ|aI,)8I=X̅ԗDM);wK]du_M,qI6u݂'Xh#!w?t2ՊE0>l0q?m(=d#_t{H~J"wl@Su>ǐDB7gC@,IֱrFـpPiww%2īu$g3Ywb׿!>"m*WP oigOnx;@+g"n/fD"t[u|]ܘ[F"OWKX7krZZJĕ7;mWhT.nm[F䧀l9{{|Va=]Y]z9=Z ,1`ȻSKAOXv|fSmZ;ߨ[{Q~ m0O"G=ڞ598ۍ Tzw#~H;wcXje/о?RqvSރFv,ĉw&? T?"I<kWvpjgpSYww_#^?7}m/ԮG_Y?;G>'POvb{ Vׅ؟J~ Շ,ޠ\_I|y0VJ[%a6WR@yqG) Gf?-ohh{R'fz с,z_wخ"&pw@8Bºa;o 9;Wz>ygЏR_SFd';TC1$9$v\y1y ԿZ~tD' XhOcǥq(՛+JfŶIkw1$D&޴>Ow=A]A@?Aߣ9~$+\='Km7 O4;9ᾠPN? #ۜC|iFIۚM"D"i;^D6F"d0;5_m-6 KPc{.O=ED88A$4br6iB}~x yy#pGx;ڮ.ߦjЯ`'p>&ԃ7r=x `TNyDq=>1{qHVԿ~Bv<;4?98 a.j p_P/%ׇ;9'oDw|@~PzB-.YbqG{3z?P8'!8 y+fk%8 #O S  ; >}v~xw%at0cӇnҲ'ǖ>f^}\BC|ۉ"S~C#i G?B.{h/跷 w&%*s5)w:QŋDgT?z'ߡq%~A^~ŠP;?y;A/0vz3j`.aj#l_Зuc?iNaYAc>>p>G~쇶+  Amş7aˁyTa^-s1 ha|)? a|@0~{`|?^g蟣OGD$7x H*_}8scaqTKO6CS~ "?4o; +/͊w=a\q+x ~@/};} 0Ȍe7:_ q ' ̫|"+v<p=?|8 q=rj +$j/AC6Λ'#'x rҏ)̏X_y([\_a?1H`uG8 /]C=p>'@ovʅzB?D-g׀y? K>WuH[X J{!ڿt\ u gȟ{z~ 3TqJ8 Tq:h/8Άy.0 |z'<{@{\ 0 BB>q\H< σ&= ?GC~ 77%'#]'\s=Maw}}DKc0NO< k}Ⱥn$ nG؏0}0~%Bx ~+ΓC/jJ\w{з8`_?#ރ^Oj<>t~ü4osQ~4X?=> _$]IP ~*K("IzE-LHE8G|zNPg#MUH\"٤5 OJYmk$] {Rg֓$çzh\ʦy[.]MzHjȷw1bM !r0uD` >dzT4ʠq(vOOQ ?c~:7şFwG}Y܌㰾"PgC~B|u۞y_ݟ=}=vt )xăΗ [>'a _2i>EK`} z~Ka|@GX`4jy7eA1O$ڎ44_!.83GyA\u=wD4,c]Ih6]oL$to! 㮞>t>>/yZW`"^Ɨ7pX_u7>ЏP/B^NsAׇ %ʠ[@R}9 CW:_j]^__jOi~ 08AG |?Mt/T0Il*4|% yawϡ-y !ށBEϕJƫo }gShxAU)[kp?"j NFTKytQhk0`ߜQi|+գڏ4ʡ qмTGB`!;p2}oC0NS{-e1m4jwtJaMSf80MSyJݣq5 7oC~S>g_๋*+‚.GHU$7;2i=9 8_E4ya%9 ?ƹ0q%M90y#KuI&w`_BL.0ߔK̛A8 z p?>k;s4uGϣI!kp"vBfn .L ԟRy y0 >;t 3E?Ar~ɧvzF+qOX8OO%]I2 q]CBA8~ ^6~w/+&qo+?#qB~ےKHımf <`Go˨_,yA9=<R0/= >&84S6l$5m ) |Pq i=i|Y&?A|,'?)TϠ@i}?)=6]HS=)/~o`GOKW<]w;׿.mWOEJzrqgE4q/ zQL/{Qu ȉxo>OP.i/OIsD0}V0< T:DIa W0z$<y/)r@\8hsFّ߶@R.ܱ$~= y1Gt\TBMXGȡO 0B0/ׅyp^ M?Z3C{F")zi9ԯ/zM0.( ye0MuL_Cv4)cb>rYyɧH \RK * ~)jTO0*\2#yCn%T`^  wa\Dc|Wt< 03y,C:c<6췀j]z| <S?q':>Hst^?gA}üqo<a}aS>N;_ͰH%~τq7q\=3ѽ :c:;@NX7$P?9vW$dj>[=׽ o}Ht  ~¾v| AnC9+؏ y[x^ v<`#>Ig=k0pڿ~t<υTa2 4l֞yx1B==I}4>EXW}m%>Q{z>-('ͷ!'wqr>l{|= p S|=WGA<] qyo}pchW/0G߃=^pp]N ?-{>O) J xO5 !9O`^da3/8'?cvGB wXp?5_q<N@܃:H}`> >_cyO3'ʇCQv {qs:׃#*/<}?oߧ}߃wOGacO@{J0BS99(O  W~ z旐ǁ?}}`װ. yV6mwͰ )+͟`__No ;Xo<slPp{Ճqe|" |{0=s?P}x_K?J=߄6gO?t$w ~5 q= v~|ߟDS=s_8y/O8u`G- 7S¾M'b'8Qw!p%a;'z>>WC< âiND4N G>\=k8nvOq>mNc==Qhx/Q:~IQߟE?v^zGi~=|doGCO ~g> ~ SQ(z8J'_о_a.7Qސ~8O vEK0¼ABsLRG[aA? <\øD}a1~R~/ס{>'y37?: =(<~?ׁ=u1$Oal\GQdb^k|*/=|g7k&lW <Y0o彉q2{1{Gv۰6>7#xΆʋz'`<'}OqOσ`;Q߫"0oI<ڳ=]iJ^X*'_Sj>E¼?g#yko z#!~4%:o0? ~hu~'}99zx~RTp~?ymw# wh74N~A?8N׃yx.cف߅۞xe(/G=q=kQujO\7+8\`~/~L0İQx?.s?|wN=Ր?5?P a 7JG!.AA9n8y ` KTNX??tH5(̓w9;3Ю 7< y+5AjL=l78?㽎8. CwG/8^`g rty3-Ǽ8qP?IB8z 1? 9X_e^('a qQȇ`}F> lxOQя{=G=Ä~#BߋM{?Xhv}=D?JǡO{a~ޏNu{$ԟ&Ti3:q#w:~~z`}o@>O#`|~=;?pG`\}:]@{o QxOr ~Wf€n??=ooEGXkiKF~#>#>#>#\|_MB|Ӱ410ѣxUYJpJU~]M`>ms 3_7c #x)gcmk}-~#z~.]R=p>K,9γ8R=>]?T矌d^#ssbxDM3=s?1Q_5JX(KCJ2Rx궒W -4,`Vu5տ,(Yz=ӏWShș,azu'{c9)87er1Ojĉ E`wp$Q,?8/޽1N KB}iNOcQTGt -{ a-4u(R9i_ܞnl׌wHi0@8%$Y3p9<ÿk%,W_+aO?W>k%*as%Ѝ"8`wkn'>=gvOs>?0䧉wnS âwL9rXʉ)(RSNI)I)E)M)C)K)G)[r59j%["-ׄTM{pCHʅR ZہQX#*̚Ԩ5?Bj').T~h35gL8y\il^爬\%fN%z;Oȃ+g/+Iغ;/Pz苛{#;v ˹ _O\SEVW}7FT}HTwVO(r$'ϝ"ucDLCVu^TKLt"UwwZμ@?Jn'o?!@굮d|_crvTXzٚslii-[!iΩ^)>ِ_i<_r8mh38ƗҗQbNyU“u% izZK  rb9%@NF@(c|5ig3!haS^xS]iͯ1!jwʒGOrJ";v7A:'s #][l UZGt&]>Ss?Ldx>*Up'>ؓo_YtDgB÷~Cʑ*[f= %; +f6 2ȫ kXSK'5n^grBI}3OfpGMaCIK,N?Pp!͔̅'U˶JIO/)|IU }Ajr%w*{OY!k\^'5,DjMv[Y]k1qxA*j?E꾌~>}7++Zɶj=_zyf䨫CHZE>Ead͛Q&^ia('ݤ!9(ּYg1YZ[pp ^e|To]T4GF߳ $+J*g:;Ax3:{ H:Mp.$ș6C9s#an= IE9GLt3rT1<1ǯV忪b~ȟ[Up"MrKf,gd gj{QgS(^(׳o  Wrw bxy4״Spԙ2e33r鏮Sv# VlqY׉B^ߛz3Ͳ Ҹː/-[EO)`3ٓ/ICJky%KyĐM ē&-TפAkҴhryH[Kf {m9gőV΋-CH_ԑVĐֽ׽ M$M"dS+bΙ:Jʎ4ŧc"ruҨMV\ kDIn'my 'ƎSJI#[':oDZVf<.lնB>lќ4;n7xs3Yqǡ[ь4mtl|v@Yp|rB ?/g`TtNiv%4i\[Zrv}䋴. g)bal6x'-R &?lן,izŷ`yߍ|;d5g{4di,k2$H(rѤVNqNj}P"MOryҾ4AfaGy^Fg i13lͅrJlI ΰOokYꙎ[ήM."]F\hSNY7]NZ<U4Q4M +oYxKNn,C<ĕwpQ?zw$5nf)q{ܨ$ Wy!uZ~9<hxfe$l~gvo2+"vW=D6mܦ]ߚ IGG-"QÖ 6$}ߍB9kLKp㛷ʂx/t#wǼ W"_ :%y%H{hAo<96n,@֫%|EzE qI cIds' <4H-Vs$I/\@9ݞ{Bb#+mO{#;C_IRȴ!&sI3 4Hъwa'j\%O|ǐ̽E*iy/;lIAܟ_NEj7plq{,H7|SwՓ{9>ōGC ?:-j`œI8m ds_$-_gMNid&;JIm\Ku#_TAdv}KHa.z~+/\eCZǖ4g$ngl=vlSlҠ{rn>7氥 H S}GW,"9Z Ym K8ه$v;L jqwCfrN쳇d]DHb_'9MN_yѵ L!.n(gIÏ+cGiv( XۘOJGto{YGi&)r3+ƶ8*9?"9Ff{$+jfc1K.;CH3%C8%^WT_m{D%/D"NGFTBI&=_Jo<5ݓ9w H׭}W|!3gO"?t-I⾆.A&7N~Eg_jXnHfj^q1:K6yorMP[؂ =sG! ?#3wj=s3#W#F;ktlǒE {&NmpYi iwpCV|KbʅR)%q\<{Lb.^D r?q9<$-ɜ7#H˶ . >^ÊO4̱G GO-ANܑs3yqXB`lN1$uDוH&s^5Z3 y>A49ї΢HϥDefElt͉᪵"H~d$oc~l7vٸ1i]5/oҢ|Ț c/W&Zڏ|8xgm~3m 츴Srto+.7-6<縭B[Kvԣg+ ]W Sol;.iN7w#˛Z#s[g$I#'-=I_7T.N꿐=>T- f)=3 :-1[%Ž?+ׄ&EcZ'Dzr Kr5E_Mg'UF捞a?b6Yy.lT>A*bRO]9hl9yFC';ꤪqx cH9,_Erʋs&UƋe2l.0G$z}v5` dw=Ƴ{GgNm-м G+Ǒf(w* KO-XoP?!4Ky K Hk 8s5 uŐ\S+Qa6HyߕyR p%%[8-hۼyʤcH-#NJnj&mͳKrW潵i }N}5~2BU4'it9ʓ-dCq;y`S탕b"Hͧaҋ.bnjsj}6 }[ 9bUC,͟uVrB6j a3Y#g?r ٰ7iYqȆ߷!J "Ǚn@s64{|p#o͌;fk\ih0B^sI^Y+"=lo>ӞEfC}.צּI ᦮}fг]Y*elfk6mhmQB6+~1r47]-7ۊt1}d|Ʈ*KHS٭kI3*[B j2bdma.ȶo[]@t 6yl?e?椻& eQQ2o'A>2fu4\JHp I͏uHݹ˖7骷en76KKνFK8_} 5gnuMl07d - eyn+ŵmےQ} I<%j;d݅Cd%woIl0{]he# _:hzKw">)xV͗Ic/9JARAJU< vf>|z%.]>'|t "y\[62hF-2fOA CF<&a&}jN/6& I8[c4>pg氹xǓ3ђ{|1F ~]a#Je\1D0 }#}xڃS$&1kn&1>G>oW%_}t2ԙ^xEϭB&pO职w{q3D9d /$،UCBcrKX׹}̙$n#٫Uu'I#+" 8]b(ձ9QwCy~Yl]QHgd>9o"_>tl9Y54[#֔#m$ {ɺ_1C?Ȩϓ?F[E)"_}kݗHdx(2G"dl}9sI!;אv[{A Y1IzImZ Sc2_T)2is /6@& A&g2鈠37oF.`[I+~ܧ$i?~$m_c$?l*N"VNb>M Nlv_o _ڨr 2^+d}ϑ?.o%fȘ/]1.A3-K>-&ꗢ7\I&Vk*b:Dspc"2Ȩ* ǎR6tn6D37b3OH02LLXd܆e VF?ZvD0(-XG8 y Y`T.wYdQ5d#un< >PN~ԮoIMggXNRU!i~ij;I]֔{z%#{9^1avENtT(t ۡ3ٰ}u+!><};|9M GUJ_\O"ߧ N|HꋧWil?%jQ$s_v!)upEoԱ&>B6Ōkzl H3}U1S{ UE2m$Nv7Yq\-Q҇"}m$1y Nv&om6s,CZԒ8c p ՚}뤊4t^ʵ]|1S?ng5LIs89~Bl^٬TMa ~lnΔbQĹ}Xz7nesb;;ݿ45q}k4%Wq-I3I CcYyԡOoBp9B ۾xP>`ŦǛG=n ۇLV|ӟqC9{!6Ol^81o"mڟ>9o%mkϓ6q?ʮ}!Mxkk/Ȧ;l~= ~SK)MlXQ6Ak!VQc|3ЊY9em/iZ-~*}Ǫ%:/i|7i!̛#+/Tx4i0ԋI)fbSo.EO_P;pt]'{`#r? !ƅ_C{Vl }td##en :QuCd] eEȨ??%TՅH r.Ir\bV20+4]V&2fqPdI1S}6~2fٓ]vn7_-2D̰ńuLPnc3ΗM#q:&qֱ j\ٸo%O;<(!yO}%l,r>p/CyQxxNd&S=% FマZ־2 Y1~hdgaɫ,u|JnLi,{%&;-Ny&ttFF{2^,oCGjDʽ|F87 #dn/LL9H26'܉$q$Wّn*D6V IT(g,M՞; ߷!fX`eufd!coڏLIU IeyT :#a|$bDQdM{A7(^İ&ndxS.Ⱥ**#õMs6J; #_ ]> nwU;&$/^~D&6YX[D"_R뙥5DgLJ3}2nȨK#P1ȨRl5uz6Ղ("ά]1g;2NI6dӅ$ڄ֬lگD?w\k+G>J6) #G ߭u<yԽvq+!OI5H'rxy~/#>{oиDJK#t1.== g_@&̻z2L#k4vTCZ]52`nE+yύjZ; sΡI~ Iw+IĕxX?@?=k=w!}_<?EF[m":OcE G~ڌ svqf$-pX q? w$'|Dod'DI~ ?Bqʚ&6G>ь,aң D=%,T4ZUdAx~X;gE$OHC^UZ+DD I]Fl# }zW,6b";l=8IW$E7ߙ$2t_dZi+O!=": #ث.2Tlzy 2 Y1bq72|o{6hHh!üŝPFszu m1b_ȣexBwn"##_lVXq%Am!묠V$h҅tM+yCl6)/tחGm^?_iZ@ӗ'Nr ZVjIjWá#$.&x)'ϺH6W0.zf#&Msr-l^vM̀^@L~Hx?e=$q՗o wΤ^ Y"ӿ_Ks8ᝐ8:9Oֱil~T`ӂ]"6w anD~^ 3cywvo.AO`dd)l2֜Un1/)]NmcԴ:au?\r)XdgE`9{F ydOpdq7J5"פ DFШ,?.gwQ$2zVI$뙻Iklf:ißrӅȰ!UCN#+En*Z1Od2̃Hj>HxggYi y8._zzOÖSs֖9u 5rڑ0gƛjZr5OvI '58vűV{w..RubŠZV_HOS`BƗŇH3Zd9Xj󤗤:kN'tnFR371TW&Ϊ?a*{ΰzn޹YC8)V|/<~n`C1=[dw`׸vM=̣oqRPҢ[Ig RI}WFV}//d :e&ߞ?5ZG~4i N{'׽];yC Nθm1T_.]CH5j0~Gܤ5r};r%6YeH߯ڛ8CDx%2f>]%\,\z\ktf;&8u>\nD3KH+neXPϪ2f[#؞5ן`퀧g;BZ.o-fWVN UTX>zN鴾#me1:"et<#hީdqo?H\qץߊxpGa}ۂ+/^j^59'I[Uu㔀݊8ݍ ( JXtw7!J78@|yyyYs5טc1sYݼklÊlo#Jrfg*D%Ci Ʃ3seMzM :w~[[+El~8v ~yɣCO# aS],Z(n]~̸"$!,^/+Ll .JfI.G<ꕴj̡b2Әʌ''wEwVn70ۺ)ً_yEh}`[j` _Zhm~قQgίc ](х}zqhy7g3+}D%ױCzVg؈_{ދC $zv`P?35:!E/أIߴɞCvg}_JϽ3ZWBz (Xʬz;b99{8P/jCϡ}o[̬gYUR@?o?k]/?^61:5թ/ r]rlfS^__"\f^g*!=!zSTQN[ʅ;p|QCOģ(/  y~ɾ)c{G8:{g˄LfV?g,f[+z?}^9cP~ou(&;ECE;EE{C9~RߌBp8h>'֣>Naя8v͏(6uBHv~G~q Gqߢy qɯG#⸠|wDloG/vϡa(}SQ[ | y<"A(.Aw ~vv(- xv`ģb9Jc+ؑ4,s4&~z`..'籜y@nu`sen]Gb.z8x+gO=a%N-km^y ^'tOwwrW&js 11u#ڋ՛^߯<؋'+'x3ߧ5 򡼮/ Fdw?'G5<{{U:y! ~>?w`r<~gg/u֧}q&| {9__P_$ r_b<= GrA )>(w Kk%|y V.ˮG޽^ ȯ/rxS7݇|vOD׆O n 8]eB(7<x5!YBFbހv~zE?GFDACA>A}c&/ʁy5&湘?9l7xv 4_^?boO!"_wGag|y(`^czz=^ȳpv$X.I,F?v邨,Rop7[# x1 #x?*qME<<vzayL"yW4!CC 8 yb\Ycq _Sد =\b ~'}@NĕneQrÙ(~<>/^Bi! yy0Oo0+xȿVz0ogoK~bc`/gA/2fQDA!> B @\D{=0^^({0+=cKؿP(Ch?a2տ}7G8ay G(䫨pB€=`Tv|@v <~<, ;y(&;Ayo;[,-|> R{XX_~@<~ SNZ, 1qW$z΢!OF^zx+,G^HE>thc r2boݰ-G"7o |ȣK%BGy?F@~|~ z"?"#  >FC{"_DA>OD?p@<` ?Qyſ )_, qAމAD}FCrb@^v y;'<y0C G~E}Ѹ#/C r 8B~0>C>v !HS$=gI?%B|B> ? _Q8^~yc^^ᔿPy G$M=gzi&) YY, ,WWCƄY3 ' E;.< +}km6f2N5C> u6iO:Y0?"΀xBd`i'@?ȯR~ yY:.- q$NM?;΄OIyaAF/G!?ZO)}vc!AAOa""7 x2 > ޓq2 <0I`IPN y"1+I"@7H!7?|.O4w$A:o2y`7ϩ00L; x6v*f*ON:#ʳ3A/_LK9$d |<ʟ ^FKxYg?R`i>L{ c3`ӀHC&đ4LSK?2?2 <vK,,K]RG-KXT4,o dC  >? ]} =9y`Oٞ+[zr ]"˛T=l3Q@r Ac+Ʌ_Dza\!̀ue+9ޕ ~ ט?b>2xO|¸<>Mנ$kȻp '[R!Φ|8 |R%@ Ʃtx.)'8bqEɅϣ+)i1H^K;T0〼ABDSg\OfC0ȆxlX_}eC~ xy4=_ ^@惙`?4<'?4v~ zF?> w_3y[Mx_>590y0A}w<eӯmg9Ӷyr;]%* !'E{ yd٘ߣ##gCޅ|?Sσ?8@އ}}yx"8;IqCLOqKALuט`;0%4Oy/Dxać _.G4ȀJA=Ώq}~Z|4 4w`~4ǀc(]גa1G ^?ul 0^@ޏq ?m<(y|+_xbrCw|?8Oa3[>wA>zh`q11X[wլ@ƮDLfYR{Argz<ɉEb8?DyQ_8 #os4ƙ<5{ȇ!~{p1/"yد8+8Ly0ǗO!'"y/oM"s*EYf,_9xb=D̃#T}AϘ|xC/B>Dy!;_~q|@j9Q '4G 2<0iӔtXx#Q̏ y:;aރl :X/< ˁ <reAy /`ja 7-^737^_N!N/Kē8υ@ט/AOG @xMa<~ʃGm9Nw-fW?נ߇~}@73ojsҾ}~P>~ߛi?;}w=~DWq9vwXNAOwwḯPl{PnM=P=v+a<i5}. P(gco(shv߿~D?Vi%Q^g cq~DAп>?"ѹ0^sP'g;?&އ>۽r߇8|ToyY9`5}f==U] 1Ѯp;}q:g&;^s8Gys5|Q<C`|}ObXOz' ߸_nq/G`9~;'< ?Ez~?8θ Glw|h?t~N_?Ec8ȏϡ@>y~y,_!|hDy ' FQ;BD9Q<3^ >7~{tG<(< y?׆S>>1i\+~| |O=:{P@>|>< Gvh0\*:oyϯsམdO!bA}<_㉼8;l< = CßWj?QǍ̓ϣzsP~ @s0GG<;k9XO @}GOQ޹yģυ? &jp|G~?n?yo8|?EtKh>\b;O풿ǑoB<ʋ87;7|Q^LQ+QNs}agt+%z[<?.(7G['/t~?{޺˨7>!~ 8+km(]ȋg|寻b>(q1%ͧ?yXw=guQ< 엿ȷo|Rky͋h/8sC>?5z(Ky"6z(~߀O[y(Gc1Ӽ ]99΀}=(|OoӶ srdO>ϣς L(oyy/Һ~9|;%=OLߓ{ Qy&>?ȣB0?aAo]>M?hbMß_y)j&l&l*Wǣ""TD*5 1VW<nDh \KۤW+ -*Y9d}\I6ݤgfnJMֵMM* 6lݵ؀8jJ(t?na{;~{@h.]q@פ6,g㦵?'{i(D:x 7o4ضr& UTxEjEuw k]wi5R.,jwYF6yŰ/~#Phꢛx>9DPrfuR,Oſ%э6h6Zo^h^8 J9\DKG>Mm6Ygbh`b 7nZ]ץiCdy#"ig.~9ǿr ߽n4?N ,Y|U1 t&Ÿ?XlI@_LI >RMm6ϵ?H%PffiFL7m٪kJͺ[ЅZ]5lu!zJuÕ4FA}ރժJ*~Wƿ{>Mm6L40ݺMiަmJtMi jSMm9%--h:=-!`enjAV-F˺pbia:ljj_NafÈg&K5IH'{q}OSMmCm%:ؖ65;]9[! "##ۍ$>mvO{CSMmsmpn`fnklEα~axZ3?%fZ*pakddꊴTm- ށ[`溦ZMqVy:(uX4.K>QOo?"ʿc!uŘ6. OZǞ779)㯫ckry'-bA/(+OQuM1~y(?x5N,6Z~ã4JJ- ֮3W X_lng5i܌y:?3 |۟\=glͦ66) -q*( WTV7"pHC4HÂ4HCjF[&^+b]X6UWvuW#H?hC,YTdiCЬ/efZ݉YPKac;}6V5.je{ ߪ/oXc]70wI7 [~뗞0%U 5#44TT5Fßնֿ]Q4Br@!m_识>;ΫE m]l`S sEm^¿XpnJH?NPP=`g }8P PPP pp@_$gMr6"?@|vDy,aA,Lu̠,QUzȞׁe7li2=`6fV_s)9ò3:~?;IGda؛7NK,ġAZ,=];_fRzyg*5}/!#ə5ƍ0Kҭ/,\J\Yśz2{OũFnU!̴8Np_` Xn+wC6fT]<~z&8e%Uξo%&mXoQ,Š^]۴f?cZ) 6vw)˿p/ zN75AG{]G(t@{˷IJ,YUˤ}ǪF. ]:cDVy=[YͫV h~uy&mB;Gդl4a~* ~aky"rѬ-'Fyķ;1 9g$ tی̉޾|gҚ*U6M*&Up9-aY útѓXzWƄ;Xr'~o߱ksb9ѵӕ=Yt];k{(X5'%L5^;PġCʣ9gESٍ*}ZUrƠu|&~vC8Ӌq#/fۻьk$,*ģ#%4|b>ff͞Weeiq:~IV~9!a|^gr*XDz=CXA7OfKxh-8#_|[(lc; gYZm·EKX~ˮrg^=4 r7maVCANi}:~Zǭ;XAjkqVPfvI2=d?-"OaN%62%JsۼҩSsN媫C# NFr,ض0uN+\z;u9kv#xdYS74ftlj;^8*~"UF%Ka{DVsHOfYIuyN<~db*N259SXeUaq} 䵴̴[ZP|gA\r'tu==\H&97#)7Pur9?9{;-ŵ?yFBϚfsRE=YζkmXewWW )=F_~mDqbnUZ/qo N}9U"qWy4IvOV`(=+.BP1w>+\e jpצB*hɋ=8 _yqNcrw (j {o$Q=KV m,ƾ(>vH!+ΰ;;@>_B>_+ߕk[fk\{9"T/ ~U 'y.տ-grV{^82Ptk _]2Hy*!}w}^ω t.Yž[;/F7^[- ꐿ>g|`NԦ?Ʋ#,+]7h%;⤻;OӎY; rg+O]> ^I9ػY4O(2=]^޻:%mm Җnyyhq읦Ih3+#O٢a^76׏HPӗA};gP$|\J2=[zrGvDNv/Zl{]x}= iqvFE,XHv{QNNaȃՄӻKp<''͞jtlDv0a=E͖LXt;bZt3UEcCvgEH ,ȏGw갇C<ר&D^f/9}gغ^2'u9AS5|* G/v;1"v롓6ЊΈ!X4׳cں2ͳ eodԅ3f4efo=wѵ\cWrhuמŵbɆb8A$\-B(65bN4LfE䕽GqAf,?}6HdS²ȭcv.g4EyJpR'Si^}er.~)Vֵi{Va0PmkSaüƽ~=@(zXxDYV^U.HX"@i VGFi̻+wNbU],[ ,v]3%+t=۔~] U)@'T(Pޒ`6 9FPB N6\Ή+}K"SM[U_eŒG Of>~; W0.w?aҷE;?7ɛyΟUOeq |>ZnւE/e{ ŘLdAco$^&|0tۣUê_;aFk~j3^Yʱb>g^ł.Wmy]i4ܴj9hv\bJb^Ks0~IU. bCNĿ~٤Y#a=+{Z|scND97j=JVgiNėqr3N$9?O7ل0un۹\l΅-9OwzPހs;to6Wٮ3k G=|{R_nȚ_ovS|Z|=hC.&?tͿ[:QS02k2Sо' =0㆘~tc'a[r!"+n)|Or40U 10WūmgXqvgOC#Ň$,vy콼5+~ygYm٠sbj^7sQ&Y36s4m{'l}O2{,'nوoQV3^=86kfzZnGrEmǻt8ѾlDVQ_,[:Ieqӭ  9#Z3ngT.IhkI8HFΣ7xrOUϔc'FDd?89knXlc.9wtcٹgXyV^F"P.KVy!N9qV]P=Um{nqT/!9g9ӝpsSE^m8*P>ot{#A|J|cZ/a}<l:j훶o90buJnH. 7[}zcg'(#m ?xG +;w̙uߎ@NRZY}l]\q s_myUr/¼iS޽?IX/ǽӄN~2t($,9^-;ٽC`T6kҬDI::{B/ԜD*BF2 $[>yƂb׶҄s%1J+cmύmt\pA$Zg _O# |-{vc}ru3aQф&wm7NFXr-GeEE?~%,ռ2Dy04br\ŬT5?`buLT IQ9R/LXͪ x| V0ɉ&{s]KDNt!' RYŋ>[gma9rͶg4egb8m /K[T߳ ךӘ-Z|aE+9{Xmm4!,sx;-{. ~FXth}*>Y^~6taɲa;RgAY G i]Q.˺%bOI~(қ#ykLn;Kn+b>k2#( MнϾzx>J!tMݛjзs%2EgЎ[^0+EUSY'mg-aF<1k Vgw d a`oWl0pS4|p0dJVOY#ZaA]e wd?f鶺ԗ`oAztcO B"9ؾ݄A){KyLT o+cLUg̝p# _dgAK]{ORbY~s9e cRclYx/_nxl|wGmá̧$́- -_7Z(<^0tayr] }l,BꥁXBg<# [.9=S;28,ֻZUS,>s &S5o,ߢxa],J{ꭈXG/-E _ھ!>UT6Gna5W:umG(j;m'[/^07B7:f~‚۟z [a2CVaA}{zNQs G"B{#.cK;Vܾi:= gOTb+g ,$9yݑnK%,zwk5a=x9rݢEi҄nѾHXZk,|L6+TXx.MVxawy7P.ȤM+|kiHDTV¢4/<|-/x : ,59WHXݶC ˤ%xNX6hǬ~e>_dZܷ& >󉋳lIIx,aC\p3Ӕjo% LC)Y{{{Yobm+ .[F¤YDᖽH0Y=E S^>ڭ?aGsab;׉K GZ;ǒMP%.{6*%^s[rwgzO|+ڔ񄾕%%!bIAc< %>fv8:ap8Xw0he׵ ֜feq݃yKYY&ߝDe#ӿe`ykv#6daͪGc{ݚ9a5Vq:v/"2}BpmbYSY]-pqfn7@}ǗYW^uJ8kGZ{Z="Xplx'zC01Ó /$myyEijШe]bv,5I ٮշd;7Ov|gї7>WZ·LK.TNx;L 5BkC_\%tW8Я QgO YN._m6{E#4Jۭe- n88͏j}D0ѷ]g nE5E6rk'ng Y*oMd%B,#3٥Z%hqТLL-ܜpK0xU-9*-990G% FB?f_' ;0g/ mZq n4#z1 <#P ~+,h,p~x8C"=˛KԎ%K?Zي>=pC&ʌ |V»svm$ b>ֻn Yʆ}Ɋ': x4E.QGߙ6IazBۭÊf2s a޷); R[' &`_ZFz-q< ATi S-+a0`3= n{S3k}hPeiY1o{b:_1N73'[xs&G2Q]{q,:%N" X#!խU~ }&t嬜}yc\#BUr]֞R{šĽR`Pgg4`ggb?N_p2԰# <6 j?-nϒTz)gi9|çFz]6 ?Y˪?9'wbW!E,PѩOyS]r]" "`au;~- %lU5Tn'Ǵ[&M?tݓԼ$|Ү?m <~10_ZPkEL'BR xpA'gMFm >r|ۮdBa9%]n^u,9ݼ CڼoжcRX:VLda>0 mVٲ/ }>޲=ai>5m]N0oѰz/O^YLs/3/nfܑ)L݉\oblǹ!=v^y3a@EBvH[ n^\<пf\:C/cY)Q:la?V AHUveAk/,6`E)WSܪ9sQjD İ ** QlNVkqi{¨I-Z:w_廝05Ka K֦cdᒷ}nw71[{L`i.>oam ڌ柧!^,l8u HsLo=|X*j}0(o {8M! YF<Rucz;7Q_Я@B׹#:o4k/h?cƹ;a( 2w* GJy HͲm kyk+T^5qƞn'd &x}Wnnl~6G ˩y[Ad{.β5ZHZ;_2,7,T^8)_3s}ɉL.\iw4OyAw owow*Ol>P͡|P(z}.gsV7^"ZϞ7$:xƃY_登ڱCJRʹg}[a9׈=9vvqťJodR\Q;#G/x27Zo#?*XkUFU~xï 6O*Pf'5v_WԾcۆ0˻Yh\Y;h9uhݿ,[ohܟlOyBr޻XۂTfP'9YN_9 F|~3 f5Zo`(v$P,Yd\X|@Kouu88ANdaor^m/{5΃v~q5qYBw}ִa,{wM.|rHv;TFlb?#lgN,tuƸUsȷ۽-usFnEx8wQ!6u?~>|tz?dnJl+1Xa n+r>r (몷e;əY{40'XbuYTO!,2nŷvh6V;v8e?{H>+uR Vd*[Vmѕ,KϒEoS+l߶a?dy,щ^hkq܈ژdʳq,mxXA?)ג` Y('7䡉_,+ ;f2y{O\Y[57&,V0#E#kMPG竹~}VF:~ʍ:vobn\}>>s׶=xʾ+>؟yE~F70juE !!*9Ke+z03\wxDV܋faySn~r<6J'9ˣr:NZYSt a儚SY9|'Ysh{+0} \#JUv;h$aO+<4HHaIf.|%,g_ cHcX}[D.(b_kS'.reO\3MW{D<>2d%/sDYQju@%gQ̝>|X|D#!>h௜fǙnjNCDz~|de#ʸj''<^=a +WvdžZF`099{eFh_4w8e>{nB.~ɾK㎳W\.WdWUgmg/~~տ|2< v=Aʌ'~whJu+$#8pܿ@ajErXϲ .~ >ԫ͌Y nwxa9ʉ﷪U Q/(a?1i;hJ̾]@?a#< _tyNxZph`(7ڥ}֫7ɉa]7̪JFhhǡn;_$9.uIr|hIbw׵3{dVϺ>9qoP#*w#OP yA8bm :l#X!A=ͣ6(3ۧkcswCF"X2W*G}!?`9\?5)))ϾNQ?8>ʁ=O+s,,cth[ysh|@;#"@chUoaON أ/"$?G{{vvGD;Fzx?A~~;c{|Da '/Gr'ػٟ C_+coXwd&`fTvIrg)!N&~C.* QN׶'$'whhW6hǨg|/3Wxwģ^'C.gNo#bނyƜ6sF\cn;Կf,fNg/p<~G "# w&9x:8P?_W9eW&ׄqF=/_K!:x؈MEϧԱ֖zh߇s^~l71_D{msDɮ<|̎'|?FDC?! "'#ʁzޏ]+??]{ȳ?Gp!8a<ϏKםKiegOVUeNGuO\ۻ8\z 2WB _KG=/ƛϗ|;?TT[mxB%;ģc_8+o_S\],^0V?.g+q<]Mȑu^]>^'tOV `q80SDyC?ՠqnt^|y#~D'~1oA;Ⱦh)΂4X+9q&oo7Ggkω7FMBcǍxƍaHGi-P8к,+ϝ =\GQmS؃C~֣.!cnE1w7P^ʻ`0~a+=G DžAy%޳ndz_?#"??a^vB< s WXZp;+Vv!~D.ˢϢb"  q2J`jX,8;Y99> b>c10ou6|ow\,Y$ x_,O4b033P_mT ,haW~ RQ ysu <!0!ПЧ/ȁo(Q0]x:71_G~e^o>a^do08| Wϟ}~?o3?^:8EMAx rP!`g!0aWauRJcaݷ8+k@aP Eя}_;> #8?c'~@?_7Qo0-Ud=}J ~"x@W1Xo4̗#NEb;xp;0>`G1W888ģGcWA_ȟW6}bR 5;e"woY 1>-g{Xص^TO~KxƳؾ/ ߃y W닷=#o|ϼiqOqy C>|!1ȋ#! 19q/]/?iQ7p!@~  x@\v ~y=$_S}:hgG:.aȚlc2>2!}A| sM(ƿ ~ c<%e(ϛ8`Ea,a %/_biitXJutttt3!΀~*vC:3 #)T;)KR@o0=f"zs頗t/%CTYT[_,d23wD.EƳhsjIJ~cHI<; 7v~y"X ΃`=!)cw#AoQ_`7[`Wʷ瀟0B/A_I0`Oo@k#8_CJEDA :p^ OHc"7o`ȋv ]ƒ/Mdz =,c0x0i~QiYRp,KxzFA=%C>.xv3k%A)p?Q/EiD_G3I0_H|0 3 & #qBCLywì>Kqx4(_*a`)א3`!O&>|$ +7.Ɵ 㝊ֶN<ݨ~YR4iKKv|v|L_п!ߡqH~o~%h4UH ` MODA $/8o qDyΧп`J< G}[DWDaW۾x]$?: [qq[1`8o{Eypޔ}qH u]̫}=E' De`RЁ%}Uң%ͪ*gK8veI,=$%׶Ӹ@ y4S$p27ȟN~$ORW) 919``ģyfB\˂TU3 ̅q΅|& \\x_.9 r?sao*K!φ L_Ey ˅Q\[q~iGg sI,e_V]fOc4K]BP^b^q׍s`~]׭rfY]=>aYu]U3[Y)7ҽ,CO+X^}C>\[h`_4gP^q2 < ~C:@9'@^~yH8=y{&D'g&YBK< qOG?瀟~sp lȗ/2nsAģȋȧH>\>?#1F2v8x2ϟxQ]?@屶,A ?"S~ '[`^ބD{l> h 8{A|ra~ˇ,9 /|q I9 / q=.$'!kj!fb<v.L`A.%=sp\0^bkO5;o\uTSp~y%l*M;0ceo_?]c|y9'CCy ò<Q~\7A>ʄ *̛ϳ2!g` Ac57x絸^q;^h by;y("׉'y|v{]|C@_f{+rgWtStcWw$u{4~JKϑ/2!@~qp}}cyr2̿{d(Oͫh] iy5 SAdހߋ0g< Y 65#i6#jkȉGO7/Cb>ï"}N;47>>/o^ Axe=>f'vW{@a(W~`93}K ߌ}{OqCG>ǁ p"O{i"/ߍ@ I {o7߇r>(]76פ}> >~~nD#=µ菠GH K%xi=q;)}d ϯ߇vG~/7GxO>Q+(pT5wGA&􂸎Kg1;|| ȟx~C:_nџ`G_p(Sܘ>q7G;}րXvx}dȟ('t^>O[%Ks9 _(/ΔEQ]D Q߸_ϛ0vA?('-o>1Oq]Of6S׸ k8ף>qDDKEiq9;y|k`9_: =Fs]}G0+_/y߇r>3x0/&E;{Q3[t.EE]'xzQ~w0Xv?_ 387FFykEyўPx $?q O-3'ƫ_x(_# y?%^ӹ4s}n O}">U{v={ <'GŠE'|JQ1]sk{Pq<y Aost|硍Sq)tޓ;3 |M~~vHcn^|>yZ<77Bs()\oD9e8~W4_qrs:7rȃ4iB?lϗQX%(wdxNpgQ_x?>o]] ѹy|s _EdPyr_' /<ǝ{3%Q:~M`އr=a;D%.;'Ρ|~L癔ox7'Ρ(?S_ G;OG8^gG@?Q̛<;@~B^u\ \/!q狡hƁ.+=?_ll^O:ȣ 9^\?@oC>Ƞ7z=Q|#)o[_P> Gg>r!b~v+@σ).?r?1LߋJ<GP1|?_ƼyTZ巏z m @B?tuPxK:=N1nϟWh矏 G= Q zXB>6t;\hW<y篈GwoGzBiyy-y?yh]sA^ONvo~d?9ȣ>JsN^Q~^,j }%声^(ȋyb=W9x3 D/~y~q7:{0&?\_<(/?n7|ωqBq`<%U)y} yQ{Z'=aJA>`GywOoUǣOOiOϯ~ڄM؄M؄ 8TGE$D *5UMMMMMMMMMMM""bOȯy8 =i&10_Io512Y W6m?[Tr>5Q|fmIܔJk6T4\m ٺkqՔTQj\K]7TQL@_wZc]㺥I}mY SwMkN'PBwn?t.CC#=# ohmZ %߼t*%^pQ]Å.|]w ]QA3ͦ^~1_~(f61ެO0@0&ܥY-+vrItM6d֛8(FpGzuzf~jbkuuײoU7N;ҤPR[zKOkڈ*#{j[kꮉ(OD!9sUE6aWXj@"6.6ΩTԹ"V6T {_p,87P'( (3`w@y@ 8 p8/lI&9MN wF~kCRo5=2 YZS8'KM}3u,FW S[3K |5{rI*nlTOT+9XןUYJG,Ufa7~<ٷe\ڰ}vmm2tfHvpҒ0u|;9,ջu =iF,uauM"˪Uu0%CBYbzKR}'z,[bMLLV8Kph[g)qЧq{nSX\V/KجWjg8u_Z'xזPc79K8,&椆pY"ce&=rwťhcW8=}zdUoFunb,,Jrr^>Ǧnpҟ.PpS9wEoB\uGdyNM9n4z9PE"i59N墎l9 T,ฎS|)LBѱ8BgM+ǜ25z7O okgn+/Ƶu{7͖7[)<*QNja_9ؒ^BIYc[ E/ؿU:>.t6;^<, Tr84y9vîzf(xs_{2k.nvc/OqQٰ0I,[?X*)wsl K{nkGD>i:dif=>2<3>=놩-|n5euW}z ߆<{EaҜK?;vs}k%?mx<Ӷ~==LOrm, 9/p~GB kW]/pQose6y?r]ە{,?3D*p8r QF*,ۼw#͝ӱ7RJx68K#]ܙۣ 7pXWp{upFpԇ+\qq;ylk;R:bZ$^v}~#<5C>9|$)qzdPk)ĵ]\ZN%zBJRưKv\ۉKnl?|P6=ĉ`#ڝkٓaFT= {~MM#6WHNV&.s"||,p eq; ii͕7zOG2:fWi {lahUr] ?붥ք2^ %ӡGHpkV4utV٧6Mr>h975n^H\o%z$lm#K8d>?zdI֎P%j\wN{JqrK svRrĂJrRgxEI.(8?[$y㫺 \Pgxϵ9[F%d5#h1+8<䎧p(խ+Y☋AÄ-j9.:sSZ`/SjLYŒ{ܗe {+马u{$(ˬ>x$0h{>K>݃ev|l܈՚[seÔf(w~w˨{ շ;^n5\G(/֛+\uŗ[ΙWp]_|ڤcV&T?ݵ+qn"[o\6'QB|aYs?pK/M[.T9^GV\!Tvɍ6W}"d\kIʎrr=>S2δ9~Lϴ-ss "ގx'TʒiQAϸnw]ҧu\XS)}->KG]Ә}ܣXj-=]oAy6#9mjԔnZ١CJ%܈E#k^-h7OAԸ@a}ٸ{.iõ>rXBu9c'kK9MlX)fWsvΥ \#~⽏>'bףw-vi7v39oZuݽR;6xp{Y;"wLԕ-oKegO)?d$}mj)AOSlzsj5;qc8c D+vVmgubǫYG7z(Hk|vd<)Ah3A~fm'nr9aQ;+|+sJ>Y'gϐ('~xʀy/5໷\"7썭~ޜџ&7ާ4iNL}F\҈*}ײ*KŔ8&zȊg-ŠZTieUmErZM:5hء?76%mu nNHl}n7F+bՇ3ޜ( ܄v-"=F.9NsB׹ѥ3K8TֽC9CnTsszdvFw1vY3I7sx']#Y,%"]Ieц8ѐܰ]ֺ"ec?\t7D\Ȋ]>IU:.'>>Գǭ/s|b9|dˏsINMNh*-{nX7ߋfGn-m~ VGV+ I:*Y"o}ۃ\y_9ʝJcq5)=/].Q1vJgZO12֊og^nu|[7[3g6>5]/ ̂yVmn`DE^b]0jnl:[c>; [&h_-lBuig%9, X&ݍFgGgog=h980 VAB߰rV |daf_=>hÙ6ssbV x]ʝ}nэ; :x >6ZQ_2_+z5J0O0|n~Y}۩ܰ^*צn~I~- 9- F-:5OofNS?Չ\ cZjgQSƍ;)3O QqGMu2&K)k?nЧ[gx+=ԿRL-~&#LR8pqW- 8 :tTS&:ZdcBj\/}x7Z kCZZnXI!|xel}V qܼf7Lد9wݍ#tLN]>[JM{fr2מjYsWX1p ,d5m>)u^qqقwܜ ׋rsLr"w24E-HΌ"n_yT.HwKŸ- /~2+[ukɵ<7ΐI܀;isCm-ȼ=sn\qS>OXF,~ƞO4X˩^mfߞӕi&k^[5llue\+='Z9q_xs: 2 zKdHo!K7v\.VxȓANƜ57[ rew?_yPsjuh۪rMv:6?C\+FG8oMoamnc;nu03GȜzV#G^ۯ_a@ŀ;ENBolVTpwW9fsyHS"s?,s =5UYq|Qźy,3/7w{)FRGߕz,yȸoTs& ==zW8RN.Tص9qPn7:q"gw\seYNFd̈́T+.S^2dP#eN_5WE/iU܂Mu{Z<7Eea}ګ-zZvoIEͨ-/s^s_^O8-enVm9O_ 7'ܣfjz9E9թV}?T"TuZ|U^ շU h{n:ZbnsSK";M<\ǀo ڽk%bB~T;yk@Q}NНCq~KnڣWv7pg%T/7a/KNnחg…'_TZT|h'@i&=, {'!f =P!,P*0G-خ}‚P\KQkeX‚]=EX<[,6ngWY,eRJlJTȱ7='~}CX~*!K>N ,zfzaAs+O`@pI;[dK<맨-7_嚰!Sl$?<{9~ؚ?ž7u2٪Ja%$8 ,:tH.a ˄iS>m_]wxz5L1oעBVN3+GCwVjW.q]:Ka) nz]v\9jvV/WU]և -3JFD־h_ռNe~]7υVȳ/Ų'ϪZ̚/T>cކ%",^~+p+U /Ƙ)@Xpf8믄9sv4@X8w %V<|X,q{Y2Oxowk+RtjܥergcK[M'p0K3;&?-?…,~S=_-ٳ-N˰2/-| o=|C+]ľ>Vrs*OiGLZ$1H摆x‚3jIX,m=a2:-)n@kYʴ6÷;Xް.؏m?e Z|]M #N<76#,lu@aiW lvL|oN}l~Y.ŊiWcEC¦¾1/'uw\_Y#DZ OKЛлMf΄^]fz+ܜ~C*Wvetcy+c!SP7]قg7O3 Zd*7?i:%aAӛ YqbHzaPur'4hXE๧ZINl6am`AY:aX-ϯl]Em˵|͂~,eCeS)KvrY-!u0ɥC?t.m@~r3Ym+BW&g;rSos~Ǔ^ϋ{s_h+QڜYSЙ6z3M`ފs>wcW0S!̿e|=F¢SK xWy`JXiHMTYzӀ_toU$gp5ScXY1ۙQ5 #B|\%ƪG"&dϴyӂzs_(CE^Y:;+j?V`h}]쾬Pe4*ўy/xHxKBЎ;^5\} c}t?/t!{Wu>sc7nN{? (0-ILs#e:E+q#XQ+M-j>iGtkr1pɄA6MԀcd65`II]4`:au5y,j¹/GXзttX%XO/,Ǚ;لr$ ]t⏲"pgYOo&Kyniktöw_?\ƀ0X f,V_K~.`K. ܝ2Sc,u{Ni]%g%LiQ!}y6Eti>4MY/a?"L?6=amo s?Ϛb,mfM{ R!NYOLgTׅ vkml}@35>u`!Zv$ 5$ =$ V(đ▯,`YX͖{FfaݧMq'z~JV6%U _̹xa'=Y͍PmO!]FjlEۯS7wc |"\GyٱEWB"~z;=${0n4g~60EM+".ikK[ߔ0pkMrP;v!7ȏkb8OG"r:Xx|?LQ<3H wP֞0HS ?ˆqk& gnu\ӹn=з嘫Cm٩{-:|UeA~iP˂V+ly7;SGB/U}Ҍ{KP |5ֿNBO_7'kۖʄ GxhvrYSoN~{CGʉ7t 'f=y [gF,X*c@?B5ژ}#~ "YrPBbYA(3(*ւ9%H2s# yIɒ$d$忋]^9s}ezfzU31~ !hx/;O-~|` "b#D1҃1O]g,g}; 5@jw`ڞw1) |B1=7W`€)+Hb6 2KJ+ Iٜ mxSLvG";RZLn_[QoQb?+(0BQox  |vNu6]7DM5k ZeAN">.zI1"W ~b]} e>+6Gک5}Y{DF1ܢöXBy"^(ZFbr)z]P:3჋b){,Y9Ĺc3PhlNniX,xkYyЮq^ݨktgTٱn6P,8<8Lh~P8JEP$ !״(ThXml]r9O\I'΍U@Ie R-$;m)',xIK%a(U_pb˼BBqQL?W&Š׌N(`%}9|=ݬ]N߆@5}9Bz򓐧q o?jܬv0dua3QL\^2-Ō eS6-/sGv&-WRIY_崾^R]+ AQw j|m !]w3Mc݀wVY)&(2MYf/Dp_T]hq-}!"w1բ M,qeLqk(=}|bd}JD]w]2M흐Q]'4x|P "بYG}~8b~h7ms֋);|^+>nI+#)Y5HY)}}0q} ;*=uJ8o 'n j!!0ń g^6'kv%ۉV]=F/8eI1QӢ$ ڃA½M=-!q0k ݵ$]pqN1}۰}.r DcL('O1q <1"mМ#u ] d, aH4NOH3A*|MePL5`J6Ť/*PL{1b)a=(&l3ubҍ'?cd9O Ťz0] Ԃd5q|'}㡡p!]7d QL:AŬ A֖]S@c 2)fE9D1{ThŜsV8RL t a[{sH8ƦI,y^ ƅxY"_aƾzt$wBJJ=gGrn!FI2(e]a&Ťn<)QbOmWm'FXkb\ɍ!.y ~gwH:Ehm7 >IHFUO) vF3X{RoL\bXrbc(F ={vzq_Yt e;R)&|^YNRL\]z3ŘwM2)&<gyϛd$߁nĪeXP?{,Ť{VkMN PB1MiHv0G1i-u(ՖXtkڕcd%$`_} \3k_@1k{WPL+23&qr ; [*DbftQG) PXO1Q8P4Ŵ&@ZV=|>ʶ2p7H|>k&'D&?ˢBnSNYԬC1R.Nqa*|n bQ+@1*:nIvm4/ߜW357z9ć ,$A1p)" i=IR}Wn׾}A1fM+q.ٻ'^s_6ń=λXyxFfap!V^XsUu0HԽ st@7 en_Hb sҒ)F5Nb„#o6S͡>JK8H0y $첛ey<4AI7Xys])M5qxыQL"8ͣm2S(FeK7=Wk=VN< nN9pyL^뛺<_:gLsf}ew |g>\卼2[k?=ZM]6t(*/UpT{5Z:#ƛvf=<_#>ȥqF O:uMnkv_}/vs^mMrT4}}t~;mٱ塠^λ1F]?guaLu}p54Q]r˹~#j]!l,s]WU34,#Ӵl絢Ac$tymy/1%?zvţqE1zVJR{8ߌa Pkij#z,JtC kՍQ|nra$]A+^A} %D(z'o Tne}+7~dl|sjN!Ņٹ:N+6$scREP?n݆ܸQi ԜsL57N7+}j/vs2 n'x64<"l 7UW]pƂmMiŚ+y\ؘi65h\Y%L,{ _CMo+y 5/ʍۦu3Cu\PBNC$ϖ\(P'ږ!5Hp算\(r4cYAB 2KUbϿj[AET;s6qaMA3P#[ F_v.i"qaoy4;rj\e5[ }hAmm}oݩB&f{F+GNw~28 H9yЉ|`ݫs}`:8vTNk{:0Dž={]u/q`![DoNfMώ IzVie;d%": !}|Y'{mDEB2V^㳱 uT ;zo]g VLkld߸^MRHb{F@N雿C0:'{rKZ/ԧ##OOR|Oi!ͩ؟lo~Ra)R'c{P: :cB"?]>r!rʉv"Iq~O5y#=Q7{kLJ 7!1\Sh/rr(a.3A9h@x ~ƼPwD^|..|#A蟜'cwGt07( aL1/ bg9oۜz؇9%_Ѽ4a:qľh`Lsԟ>_xg1 AW/ȧ(vr;gx=o4&Phhߨw~#+8.mot\D)P{ qIc|> ?ʏzBTbq"~$Q_ʊJzy#XS38!"Ƒ`̯ިvchbQF EAG^BD?xNw|ʣ!?@CB=c|@ $ }'q: AN'AdyC8qMoux>7(/pEw8oĮPOvB}^e=+yljt8IzyOIyNy_zg{i}Dt`?I5ht<^{?qi7#_H{8'i?rb<q ul7j\sʧ~NWl:?'qq ^hg=c">s%4^7l'm1Oxƒ#: Ϋ&L&d"o ׾G)Ğ?x"R4!g,x{>ݑh‡H|!@*OIܡ H'~O'|m7b%H\#I ~H.g?!y3݉#٥>'1l5H3(+r}ǿi4.`J%rGÈ?߱}t\Gu] D"93vI9C !#!$~K'̻ *G2?:=$?BL$|$yI& /d~DJ%qہ8w@"GQ8DK",T5XwCQDD(f;|O!q^~OxgZ՗ưv|$#H#vLGD{`҇Kx7ը&Tv#T"''gc64gYلW^3ɼ!^<~M8<#6g6@WQ$>q=CِB#'Nz-9Dߐ< cßȆ@(G(!oc~ވ`p"#ϡ\h?G#9D'{$OAQ>̿`\|yYl_c_J.1{Da^0A~xCNT)u R^Uǭ^>nMRl0KlGզO/ Dtb> ~1^b\p{h‡Q~Sl7q ;QU"{3(a!M$yH.\£yrI; BRGMaBg?'OS@g"{!'WD"E$~;'#zøO{%'7*/y_ y$^$ywOg^. |O%-;E| 7D.I~٤ Q?`zH\9k?W|O?73Ю1<\?/2="aN /0P$J&;='$N)$3W1ߧeIA׃q$I;:5S8CYD9$O%~sO8>C#yjY |N 7|9=EA$N}"#B.!v\Lbe<)1W`82 _>B?#GAy譀WH7\_D"2~- g1NO'gC?@{'"$_C($蹘] +?-$`;qy cIvKD'E6ɻcR aLyQwN} $)i;wP`~EG 0?18os#d <|>Uh)ľpAa:q<g<p|h7 %edeqVD.nGpyk7I?~| y4whwd܁+d]AYFSJ0K%>3 /a`?, ^B[L쮄QUH쩈o)"vPFQ2eD?%)b;ț8~Hx%;Ⱦ11 sB/ZLE>>Ayn?dzIy6.yc;Dydc|G{ 8s|&[y3ةBysB{ƪ6PHcK UTT"VA1Uȏyȷ~:%z'PBO aey/$|B-&?3b_ S"ym.yd~xy]))/%(#<_BxQN촂䥥dUAWIx/&%h#=D/'*e+_R$y%[轒irb8^Bڇt8~rN ˉ?< 8BAhe^KID JϷLt8/;X1>lU MGo@Zs`3i;H 8K=$a^F1z"y^LpܘMat#=pQAOi&X>>'@? y~qAG?o1/v!_c>v2@;B'<:B#qY6k:ͧ>/FQ,$Ϙg\ߩn%r"!ah73,YO( r<y Ǘ8P1߃~dCy-W<.#v[Iyj$>W?B{&q衚[5J`Ec>\N4#,# TjR}>_5jTU|+/988oYJ즂;ƛJbd>-dTEWE])o_ @O # {ϟA>D8ߒEEtj^2I~q 乘7R!|QB-% _2Cq'Η$~UN'q> I.%K P/4(~;e$!w#rbݞ߬áq֌;P} F88q^%J׫+H?!A 5$D?CpS~O _CP(?xVE OK _S$*#ɣď<'/P_U$nTxPMƍQ$.S}ޭ!|_M wX)27 ]*r/f%iOsijHRCm>i7#'Wpޖs~RFd uGUx}>A*4 DdW:&~]Y

SN "l+>-&|QĞҀ*f T/T3H~RAl̯0Ç"`țH*!W~~.']s.pMGzp;,"~ߵw=~ϊ-Dn3ùN ׳uq7w6|w ~u{k-(/~D9ۏI {O]@P\_zs~ώwո :~Jp}=]J>~̱ qL(' *??\Oz8)xMz8ׅr# sw/\q7+\I[XYqCY$@D}I>Y;[{zXJPu(wzHv/K%>+Ўp=s8CDWoʣ8܇G} #b*D~\/|.:pR/ I&a~K ~MWpxyu.ϹOk)S2_w>Oȉ9[R'/B9~^= OßiQ>X/z$>Xgw/ݸ~LU wt A9\!<˹^s}焜~ c(U? CM|\ܟs;|Fyv\W9A= sE ?M'l7' G| (7%Fc:(/hWnI=z4Ύ܇(g^cS^G1/8yD2?)r?E.Y](?pRp ]g@kLE;\̙8Q/K$/=!"?>+ğ9#hG/s% s \¹ M[wi^OqGz)QR?z{oOȣĞQܷs :'170\gס^"DQ"@<8 ~*Q#\@}29sq\N?9գ9ֵY;~8ǣ4/}(Gq"/^yQ"sJ(獼܏G~4_韮某ю7z!{II8tcކs$N\~ܟ vegh/s_Q]'/~0G>ȹO')ʣ5J }$+4$ϡyW+<k~8݇M1;(t:,GQ4WGs}=> /{&\_ʹ*縜s\z'+?Q}so'8<)O7>Nsb>BsO瘉#(լ3/匟y19D9gak1 'p<o:Q/S ~|zgOB@i+N?<#ż/a\78rqzv"b~<"_g"SBq=(O'+~B|3(}D?8~QGq-W羁?6wi\vEɣد_q7BǙʣps/<Ƽ?89ʣD>KY;?N=>z\Fks8?D7&{<4D;&~AQRʍcoPt?|sY7=slewhݿϼu7vc7vc7[P6b< xxz`y~s+tT 5uu1A]5n5}~^#]I~uv7ɶh=U8yX[_wCmqVa¯j~:ǿr =뮳οߜdW{(``۬ٓ'جQ9I翖^ t]Wo @,hz*1d48bOGGЅzhM~i8Uq\qir~/Ӓ~Uƿ{?_~uv7,SP1:l&nk,bO?\3Afr`1?eOgQR4󠪬qj/kz&N=&+KPuv7/0=0b r$k|?I;{pm':77f=l-G_bwX>__o7$'`~ opwu}uqn`.Køcbx8⿍;J d%ȁ1qMCyXp]$+%A ;n*(QPEP>N|g%٬OrϫhײtNs_?+(\eZVOZ'w3Mbyuu-ϑ?igޕ7A$^)޺6 >)jܸՃb!oKu[(ڸ3,HPzDҩP!K?(I\b8_IX\5;FHkmeSfܓB9#3j}8!CA>ԝ`}XAic2̓%3DZs~>E^^-} Jk&V}T>4^>N1\/[tS!~T݄1U~yr> +/fv6 2>;gU#ƣϸ|N; IeNJyIhG")=]>; &KI s)1&lp`~ks}xOJ918'~{@=H.ug2ju D]}.).߼zmA6W+0Ed> aj{-kcQշƎ ½>Ė c쁒1GϞsj(70mzol/~K(ܝBq%ucޭOJ(Wd aq; vk1f($1wh1<(|_sq(*߇yeNn\i'o[,}'qrk-+Wv{KU%7wjHm| "#O]a[&;rז/Pbܫݟfr Y^=uyTz7XVm;|œ]V0&,IXGЈ0%_yZRTsƈGeNף2ۗwkNd40`BߦfL1!Nr =.x1_B'JV:qǦ&"ZNY oSvnjS! ~.|v85 ee |6e_y⨥5](Fv%cdzņ/Qlqwb clƘ˙wR^-3tnOr&ʩ!8οuE? Rcxۺ4Ƙ(4:Ȁo61T{T /5jfV~EFcQ־iCpY_w)?+{Řmoܗ C~r!Q>aF6<>5lrÌiOQ0#!I92^=;/Y$|TcM*\C}Hoߖ14qˇ0FLۧi ~8+bG9JJm{{.)|S>bL!ԾԽy/t%!,t!4}hsYV7HuPtJ\+~v{Sk ɐ;}~F=cIՃOLQ&WҸ΋(~SaA gQ|iy|âcϻ^zɯ'uiakSЩuV{ &7rnyϰDC][=8O!:\O]=/b7U':W,ai JpޢEњ1YkREq/W]e8P[[Nf =2,皷R 0Vh^1\|j{62e ^+U3"آ kz-P?RDD׋"U]^\uSW%<֯Eƽ:oϽL7&›ɻOD9R9D6@T wB7ܬ`C1;%N:6Iġ )K)Fo>V5f+|TD3z>Ev"T\p$izW!Yjr{hFbޮV̦\X,_EBSR듷3f榜g*R9C+l(I⫏xp_;n+ns)5sm v闬ax׾Em>,ǰ8ܱr]o(*:,@2WB; F2UIV)^6ƛhS9?a?CQ JFP1`Θ6iޑY_AYʃzo- km閎yi}{X/ԷX@%~UPU'^sgm`^yc1^Q9gOC5B72FJj@Dy!%Vd#d]>PeSe9T鯵K Ge,?,ڹ($V,5ꯣN+0DKKIJrk$/9y"Ma\dbDTۘ1(fn9R퇮*}[#FPLqf~l $.(4e T|34 gQ"Ƨ@rxU*zF[FftD\PӾ3t4m|nl:Ţ Ӟ9̅́^ f9T|Emw1Ӡtp+IO^ IEU}@AWi<(۞냬NdF行Q6Kͣ nj2f Cv)x 7qG׿5G΂'_iϯQmSC먼̧h/Ri?my\?ToঁxVq,;{{X'>ysngF_IKmE>Q{/6\<> Ƃ{Q&x\v4 = oXCQ)ST~ٗx9;ガHp$k_+x[Qwتڃg?.N>Mqݽ[K7X%Pxq crǨv"g$}"X[C(fs4D(vV)Qe@O;{3Sgy4bA: uƀec52ԨsYFʶ2Gd A(OP6qjz{O=*R,`VD7{.n''?>47r)^SΖKj6mȨ{8u^)r{_g*`z)rQC>`\6瑦K=nS}TRSRϩ9Ca>f銌}d#B$e P0y~ >F6;J25?9e_]@o?xT D6G1+J(iFT 5)>8PzId;chL-&EyneCkdH٥U(]InFϨ7=`2F^²Хb gw2i?70Udq^|ϩG ?z)ŭ[[˜YjlRdR?*6 JLn y]#ǗAK4żKOWU\u?Gg !L;8+,#ʞFwCzG3s\%?=Vg/H,e\]䔫çQ[R:6UDžcLgI "|=>Wж}>ߛ}[ijC9+Wf:)1Mtڇ #<wSeKQN7K߂-\_Ψ*PX)R_:}~ªM vۋ[^4^9Mj5K9GG=nKf5}ƟOT-ǑVMÍ]ۓ~oq?LQ,k$ũr2UF }`0.k?C뼈Ķ3^3?:KJ˾ձ*PZ[n?i.Pg?~E͠,QfxoqQ2<ƇskŹ?[xtyG/"%v-#XP_SuVy@ɀͪ7ҭG\G|!P:VԝZQ) o:_Zpzֱ9PJY΋}> %0.Ns:s֬Zw܊]=s̓Ynqm=i97\oej_Xm_՝'@՘p'l|x y9h>e{(;Mh_Svdf<\3fC OsfmIt;s?wΥRho8Y4=.|eGg)P`?nnu皔P}hT=y jͬ$Lz݅M<k3b#\6'K( ~p! ?k4jśUAjҪP6lp&5y:T8:[3nXb]ņy;m6oYJ27ņQp~5 (;˂-qbГۄ(oSS&0˜WP7nẼŒ Wm /e)~n<#_5n;ߖ[f8rQ^GF DWea3yEnp(6 M%z@QPSKsnB1BŶo⻍i@chaO͚EP'l>q@e>NM- FBsv+F̞U_B+TQ]]t UK+:_J~ܵ4XhgET~d'w#(>2ZnR ,_C F:[ ~SGE?Y_B݉~ϏnGZ_/NS݂ [f zQPh%/+ַy$_l0|%rxV9؁!UwRl8D`ҏ X _k9D|Sܮ]$^VPparu!?_Occ,5DV-Ybː$b71bԞ9dtbiRLQ7CD^P+Aqr1>0>){mKM+ Fhdb]/8y04tY&W GPt"s~Wc`Jx5JfD 5D0pY4av$n6wxM{\nEEv~oZQ0Ic|}?HjGZF\U)s;0o\{Fa%0twsE)\i/Yߐ _4vʀʱnZS/}$=KJ"K~T(M[ QV jzKĆ^Xsf5P;;~(ֺkC8fi%P}h[.B߹3L`ڮ PvJdP?4< U7^Xw1T w>c.Q?0!2x ]I+0fZ9e5"Oˍa/z34̗)2x?FY0ݾ1 {恩oTnzj >ާX7d^å+e#䅎@)aۊ)6M߸;~MMoW?Slp5siljРb̙,;z7)UGY=|'4:_Em)M56⬕Zp$3%,i.b){KBWFAp#d(~=hTq*gAaK2|45aU{QVCWoƲ jy@޹kx,Xňw(7> aEPUwDB:"ņ+"|y"UM$t"RqϚYn 410fbG= Yi ָEGt%rr'ӟJ_uV&]v[Ekh/qKaF GXO74Y+.O1_ǨM3?ȷ|]SSW!I)\kw`V1)#oM.a.+؁[(2l- a]} p~AL\~$y'0߄fY Ξ/'.s)vaJų^^kHYqVR=C RV - R{L`^GǕ&,oU?<`)p{w;n""WHBuԲ5|a/)+aL7_:"Ы`r("TO9$MC+j9UyيpuYi_@ mׄ[vf…^?)z,,T#1*M@ ^SIF1jڳu/R\8]v>0.K>bTzbN{]f3PY_<Ȅ^JA%Moo烨䃳i% ֐btbmof;LzVAcv,>o>f3۽]4"+F_h7,y͙ CQmiouE1+U~ղ*6nryQwP3m#D2@1&8u|hυ֋!zlg:O&11`sƋb3 /ka3M&ߴj8lt $ tؾ_J_@C'ӞWLXM1o!6Ct51OKx̉V®m97Ի$U-3K`"[d}3LRwp y,ڰ>A_v7q S(.+X@[;.mwdb ڱr.r= <GሉZog> /z<3|+ZHpO0u ~{蛚.d3zO3Z{, b^ut+7}i9tvgEKsAѦ:+C +)rPgS=_1ۃdU+WZn8 U*گpr<чBPz`vݥPx]al1J{Jk nUP w7=r?EǢugSL66kX s<)&~1GH,$0S &j;dY!&3Ė#vh:Hnky!Y]5hjх…fFCՏ[QL)NS/[XbsNV rt<|п}][bN(ECNBPuգgPT]r  ;6k8]),ms{.G2t}|r/EzMuR pY[|~}}5~G^wk $d[gueJE!q Xi/3c x* |ć 8fӌ ?/O z{n nnZhMӤ-~>xc;^SbGw^_Ty͑r#fnU2ALXAXƸYу]ËPgt\o3|ග)z)߱bD|t27e}Nhz.#eA+ma渘čUmeVYKv?fҘ{>{-`-tܾw]1ymc!)zlަ͚Ek)\+;;z8Pt;v 8m]kv\Ыj~ ^sDbVNlR ΀`Y_ڌ-f- D35ӸrbpM[ډ-||uć`Ա(29FD!A:ee]ִfmʐp!85S.u?Rq7Rt/X6UY?F9yԘ 9q$ 'ty~U KM+Lv oFn6 ŝDQ-u 0?i<\ǗLOæz\*\1CҿPx&Ы(]?ȜwW!}Xv ggJE{c̗)7Kco):Dm7byo+L`3N>&2hq Jo)z>%':P2QKj8HvWcvߛധ"lE+U[y4+'l?;G*\g9?y{/upzI>^Al"u-l}'Zgugmf5NODeQN;^&qMx:RpspI(ɻ)}Zs}#p{yJ<\,ybOQ<`}.EWӈZOPtRsM-כCʳY_,;_3oڹ):(fDSpuZm' >^Sy~ƾy8²=sQkv逫;aFK,Z pSh=.la5V./hjc( +_: j_^;?K|ǼGLљa7]aϤ +6a9{h<gٽ% le.烎A(=bpoL/r#):2$WiGܢ&8DIea6õ֨ͬ=X[3:Rg'NoZkGų=NaPp]599v:gN,^,@UpJ!I.bKnO[ŗ#ׯf_L>3{7"7.hᄥ{QW4KuUf|9ஸ63jkNMpQsuImΣТ&"SC`ԞoWX ?{(0gR)Ba{wఊJGp۱-}pebCZ.mNn/V"SF9c zFh /+5V1c]yEiߏBFmeFV*9_spzjrE}_ ;Esrs8pkuk-kp'`yLAv|.-zaq6v>?{=!|㺾ޫj.׶]Yy/v}<שV߫tϻ#u\>Z\ᱲ󈮟ӛ͇{WSǻ>US[1\ҥ~ܱb[Sω;/ްÍJϯY p?u9vZΚTzm]3%]?=Œ9?tq>zx{᧺jbb=\X~Px/*y2.s5_Ye 7V:{kWsaC1"nB[iyrрP&iQuTؼp*.l\o|i*Ξ *~O6e6JAxZ,K 22Ԇtƹk#㠲djtlmo17zvKyr2 Xj=jVoPc81l7JΕ0w0_]1PnU ̋͢1[̗j}DgauJߕDR}qnpr_n >dc ޽54.|(} k`jvL1A똍w@ZiyPU_o(RM,^5J[ 3l>0; f'W>r9͸߅ Pj#J/8@e04.gj8 ߗ]I 3rgM0̈́l|ۓ ye*yEo3xWZȍUg-yӤ;aׅ雴 rU#Y61|-4fKf1%~M*gҬI6xh S*ϸ{dۖgi#/{=b ;/6NY&={)#&}zK͑We6ʣvC.w $}+=|}DJN)?[D Y+;?}˞)IW j^se guSmR\3oVWz%ĆK6-zށJag 3[>+/WBBسׅ[ ճJ`r빋h]+='ΨdޛÒižlw=nT*7Ga%<qe{Sb= i=7V._<$\Hm'퉞mW!{LAoF}sxZz݇{Ɖ p2X <{\JUfmmU(GAsIo&<ϵjPp=eq#b4LyB ui۰wmgӚCs/էZUG>X]y7kbpޢ8 .+DhxAc_!ޮ ^埔3{$$|oZnI2:%[GAk3BowwH5y8K\t%Mn&8FMz˜[@Wp C,+ߑ<ǩ S!!uKf߻|prb-x,lӚYVU6d@!`:T3;eE|%9]&;[g_($v{of5f'\#=dӖ׮煃[L/ayO^j:)GXŘjaUؿ쨀փGݲC0u]r@0W/Q$9l%Sv}xa$7ʓ-ϭmygX 5yYYa73ʣO6L <?o/Yz[b4xk>Gyej:S>_ ưZ|~eKg\z;o }5lÑ4b=wPx]9M:!kkbo=o۳ڻ5g֌- ejЦ3(o;6 4Oq?;ުY|:S8ǰ BaCZ@5'i'rzN{6):zܳ]lCk50ݴQ n҃-gџ02 O#r=a{ qD^NDN7}!0Ŧ/jI"Qju.>zl%WHq%ݡɣUJH(%}ى|(gӸPFˠRrH@ՊOvb PYU;v|;o"TZoKPq#!n4T?*'ovt@(/t⋗PlRyg/|*/J1eKCO7¢R(ҫo^exT`>KFF15^?u)QgTu3P^s 1_^E}X#PD"v= 'ie n;ʀIF24yH0s=$IzyO IlwV $BigGĜ>gLɄwR|Dβ#sԶLˢkK#A%Tě"YK;.9 ksG KBmnjB(~>G(Ѻf.l= WUNf/}zz3&/ԕK A":Uס-(}ATPJ_LB7ߠHn<nț@5*V85y .S\;3 ^PgSzu(@yǰf$/$vP@~Pzq=]0WګI~Pԑ~M^O_;RU("NE&Py.itQ|vZo[ďN۱r9;*lEX,ʢR("V])ˡ]>1,^4?>~ ,#ry?C<4֒yOamnGbm/s=~ tj7gƼӉR?7[?ڎN$D@BZ5j8* >΢z(kl|1Ud\]1 5j"iPJMġlD1pߥ\55,4@͜ TF=U^EPN@wwl6X܉&o,xܵj_gW\C5$אxJıgZ2(EjN!'@횳ZgC&k{A-ZU@E ;\rHZKDe?/u?$?%y2w)Ihvgq2|~~΃p(.(ꋳ]^.vԌ=v5@q|Z>gUb$^)4whj{w=V0}tYst'=8yN܉r2X9E9tUEjT:Sh1PaydK.PA>34'mvPF%w@7@5/u"|r;2)#E7䉵u~7q67 g=]5g7|;SUc" wkٯ9ư+™4vZ*p/˝4lI[vP#yԔYTIѩ?9y1! :}ܷqGuqOԒ<Nq⯃5w]ߡN(R$|?xx4lYw\?3]}˺_F}a~7'}gysO 1ޫGdٹ(_;jg_<8bK4_$qjԟ?ծx6i9vE^}oţ]_a_)sϧ~_kk}y"ɖяGA)(gaRfN_s̉fč8~ؑq?'1"uY_^A"gry(v>OYJndG@W?ͣZwv)̛w%ɣt>y?9o^۱Lo'NrksE9yj?o~Fa <F|m '1@0/IqoLwe/yGqW"?{O &߽xk ު~C^lQبh :*\Fևzθ~ ױz=O=/7H<%w]R_m ^d}`Ctegлדp}<ŝKUzQ^&t=[ޏQ5"f;]f;r>XULuLtxf/pYIlȺF~\ȹ. c{+"/ϖ //l͞vu~_i< KyT~ d\/s8 .|E[p훼OH|sj/Ɠ$m p:ɚCyq;wS}p}}<(~G;,%gXiJQxJ4#}B/1es/0veC `2}}>q#Dq\@q8PQ}u}eQs풂^}ػ삧J|; ol/Ɨx~^"vlAwg= 쯞 vX'plqfP'%:U=nrQ?"J,gw ru.@yD{L{3܉"N5YT><"_B(}9/)װdKbzᣚ!O̓vv١yť?YIX[˞l;HpQ' SIIMIףݣx̽7/ 0٫_.`Ԙs5:;69k:@^B#\>1~?ȧl )Omx=84ZgΞwR(~?8?țԂ=fzȺPeQܧuMQ%ɧpDt}=;l(Wwd*)}JpoǘPGyThȳ@yKi'S<`xN*hĖ#{>%wPK̋g)G9Y瘉# r!zB)>@9)eGMyC SBQ{k=q(G>rǓuG~d>d>*EQ>EJ쎶+%hP᯦x(ysCq9̋<I˜<:<|œY1qkM I _£oۨWv9GB~Bļ((wV&n%((g>Jy9s\@Qb'G }]R%Vuȣď^[2JBkG<ʜ;\@Q>ȣAd^ |1Whqf. UELO'~th $| !{uH b>rؐ(+8x)9dECH;ػ0(NP2DO}Ae7~paݗ xEC1=9Sgi{QbÇͦ#s?a{p@?r?(g|%Vܧ p "?Kd2Ofԧɋ8vZ} q>Eq՜7QW<0p7 /pWu>r>)d9Cy݅[y c?Iځm xyԝ iQqk߼Փ׹_ 'i\"o~(΋z=ņe }Gd?RlΏ:~A~$Oͣdɺ`SGzH'P~q=gѯ< {ռGg*:>1^q(EPdu ΓџG1rV9pcjhgh7t\H9gQldmF[FƯ A<('Qb_(ϙċW(}x{W<@NʣG==Gqm|?O#(SQy~E;Gyqߜ+.-C9xZR\B/9گw˯::s4>e*F čUuUU0g+p&uv߬_L0g,,>JfT5NMe 5U0T1Udɱd%c)::F6Ad3Q'Igx]gwS?݌оǑ-"r@1lcTa|?}7e~7tuCmuYƝ%óƩ0mlQb&+AUkΓC"Y) Zv[VAӏb*q?+feZ (^-Gsj石W^[Dw,?/˟W~°:N>wlϫèk9~_:9~r93]l׌::)pM#) RR,KZ ˅U%ſWYk%zUY{%V +,4x”qu![.m?Yٿ9w8g ~OFĒ'wMw40PTU}%f5 y].W58˩:,~-;ye+M:j/R]s%ϗ^0?gK]n&KD8Mbt6 ѿп:_|v۫y:\l۩$f]O<1?X2x !80#8$SN#8,%J"8\~brv-69y뷻76PX5vBT9Z߇ZI\=W|/N!]T$ z 'Z/LĻ ,4=<UvC oT hf݁F^Y|5~?S;2T4{Ql}F ,K*1^pͽWvO/wn,hԹwa6ٲņǕ,,E;[@uF3q~oq%xAs7*g󬤆{]MVg[Ffz&]1gʪjh++b 8MaaQRjhX CünPw|T2d78aR(n1p4Im~4&b:-{k&S>mEa')=LaшSRCS}7(%VW܏o RfrHub&{iވsY\4xO9O'$B e{<_Y%bmR6Eo"AkSU& ڬg;^A]{kg^ G=:OrY]Q?e{j u5=W@}+2P-r6zšN;{Efk>B}yHK}k]eڕł6d ܓP\?{ŚRֽ,u#lfvJߊx~Ǘ̠nm^P_bT>ڳyhL~'ܘoi_s"qKDdc +m jWCAvFפԈ>G=1M3X,!0d4ԧڹ_; Z&XK& *PHR@E< "$$d0(bD%@$%)$H9gޙݻw>wC>]][{jag?< қd Q%oϦGvbݑ1L͗x10HD\yXle胫SK+kt\75`L; ԧkT.-/3cV.t[1->ZX19uZ-r} he빆DW6 IY)/_CW?Z67s=s`#fi0hoCOz C pO{QM~y9lcuWđ뽗ٮmM\0"J8|o*؈|ZzX"u8~ɶ\b}l~ Dx}}5^;(C zo_RגʦY}bX(!qe0R7ƀ]1.6_OAg(U:CM5ID\Y,KdwY /$8 v~i8O| C/C]6[ !?UlDD"N?p'lvݹ%E<"I}dBqz}^<D! }[`rrYBa9ac6>Or%'p؟XW36Pڽk= ~55vOj8D:nњ5iQæs'X7Au}G4)sQ-K[QimhQT}NYşqt_sR=_w&'R؇LǰKcG( àMh-.i%&g8o~bFbﷂK?~/uJ0D=)Gk2Bb(K/Vmhc4LRn pd* JMUD.YY@y3smha0rf!&h C;~^>-0_mN&f~|l9za76/ѡ4vDpmon~`z{}~:] ׁZ(#{M>^Ж۫Fٞ{vShe&a @ӹ)ZGgϰ qb](4L>jȧsS֤ 0?o;;:beåVLgL#NAw赡CEQ4ӡQ ݉׳f{hG?K2έmx,,X5o}ZUxTYF^r%ruF|5ӣ̨ >Jrr? gNkqOhL?b˃s_n0 ?n"1XcH]t4%y7$47#nׅX,B.-b9Cksv3JҝV !&YȘaɂ჏!^.n;*H Il{ ͡rg7ӓuf) OqɍEΊXUZ1Lm={nPD 3AQ4$QTCV8i "6hwԕMm^h. }+l4tEqg Ǧ C[b햐<'/Su"F kB:YR>Fg4L~@"."FZ(Ȼ]m!{*9];)uC~Hs M(a٠绪 TU@C_w4\9ReI|2<)m#k1mgyNm\(me7{K8A ϝI _{uJӢf᥻oPO h0#W;Ļ>`ug%HBo- ]0s4j21 cY[9q\?g[PCкW!(mY4u5܁S@cy"s BR{^bA&9yEVZH9xMy ;b*7"L](d>ݧkeK^kAQe^K౛>bcNA\lJrhau,m]9#Pȼm;HG.<Q t5ܴFu}#GD{v]L-jwYGW歞?`SםSĞF9 40̿a7R 7߆16ٸ^%V1[7xC$ˇg|tE6PPZ '.t*ذ*o Ɉa;'2 , KH~/b)+f'd]!j).},y2u9hb ~|{J!6 [Gb!`I5- ZefMDN ,zCFd!eD˫WĈp RHZy;H~ aۜ s0Ǚ3Ԭ>EN~8bcbQكn`C6ĝ@ _tf7}+ bGϿٻV1Q.;g)VM\fus.}ia ))rne|qj'2Yv~۹ $٢wdc1}v~ bwjq AߗȆC1I{C,krא#)u9#&D9"m􍨛.zB|e<9tFY>{M;\upEҁv^m`Nɇ-}eupFjT םJࣰG9s9"LO'7l2K+OR>tC߶8~bk[>3j4΋<^e7YM)Zyr`PK6yFc5/gqXH;pãĬ nHٞW^$6\p#a! {kmaujL ~ӫ??o 9F\5ʷ1|L~\>lGa'_S=ɮqO3@V1HS:8J^PsO=,eX[ ^P?Haf}Է&9qdD}0#L9Ha-^|(bdv*e=[[=MVn޳i5E i^ɪ=מc4b?no~dA_V~ b ] /1/#اsZb9 +ӷۃ_ضx|OU}=~%]_~2y7{g)~(-śljLjUlF|tj=%U~oYm6ҹ*u35Þ{WI~OR0xWI6}V0!9=rTFƖDޢĚ] <2ޚh5GC\)5|,7 [WgaU|v0"{W:;afp,k'f"{%߰@:FKdUpA d2;`wtCAı >"3beM^̄q,[<_|8fc@5?Ȇ8Qm8=Xjy/72YLg5"ԍ0: ^o9^sE7Β}k ɽkps 9U!@~Ynf7y %c" |"X@ 0qJ|FZ_"w~(qp%]~CġҘhFA%}2#,9Btf_yqԞ>9<%* C'3<: >=oAQ:wo y^'j}}#ԛ=DK"F;kbZ_^y_E/ēW~_!R IIc_ %{3bB >| 1bq#m.`M2_V#LkMX"o~Q!bhGGa-Np+p#ANv1q|dQ"!F"İ҄GO@ wa:1Kffyw;]bXbppOͲpX咖>R{>Dlwⶻ$ϑQMp/$gcǕivs+qtǸBdS#":>j)Dc";nn1ۂؿxܙq}/3 /.?qbbڀi:n~Գ"7!09J7Zq2zߨQ:yrnǎ/YWR,ƥqD:GG}F|lmKVDm6|h,t1. ;``']g]wWm@o uf9}|ӭc8{BVA1+^aSڊA 98"ܸq~!//e"aހpy\ xj]vRaN=|[@ MMRA譪vCL8mH[;3zSS GVLiÉQ3>J- `b:n2a>LE} {Sei3`5#yβY,  pj/35?RA,%M=՟$1-ו:G.۸F_~Yզ#\FC09{>;~IY = ۄ8VtxRf:YVxpems#އ#Nşv8>!VwqKBI57x'<0^#+ .0oUpAc2U5-{{(;:N!~!58p5sn+Β5g1 6/_hO3~|uKvBWY~w1`fvq0qZ )đeT?.FUxQq~lDƝ嶇!Y mT[D|B% 75IJ[Nև=9~^Ao~ wіZ2>epB<[yџ4=' w}A6_L!DH0'WoC 1ܺr$bpY 2Ġ;t R6:sK_炰/]#*vD iqlD u'kbKc$ 3_# ,B9TFG!l,֟yK=0)x *h>bz< xT!7iňjY?LDq1j+N-":! J4}Ap;bsڱ{ড়_p|b X1_(=n1qnW,u{F2m67T)Dplb*V)l )bޛЧ![= Ajaׯ| b͛M"v#w2p?D{ŷ7,b |0İ6ňyffJ k:b:u$rfE)D.R9 f@-(^2*+?Pvq-fĀlӦY*&[$5u}E z}BKTѿ}gkbZ@L~ Dƒ̽!Hnwx'W8)1L)1K'ў^C[";d#K,:1ϫ{5!+.اE+@0 F,6ma\G 6 VNwNȭQ nj9F^x7 NPaZ ۻ;qN+4ķۗ5֕ގa 厄we#ZOPXhd!DL_* 1rnl5=bwy MDS>)hxAUĨ+/ͣYYF\LFLȼ"WĎi*9l+?ƿkfz;5R׿B]1JƈɈǟ.lGR1Pi -Y:!g1ijx'bWFKs/utt: pF :uL a˔_;%+?oz ;zMW"ֵ><ؼXC~r4#|=b=.c%sXt;69 ի{蝄4ϿBSM` s 1UEKS5bx]U b\8}篈bif  Nd !F5C̴oTB$OIƬn !+֦"sH*z`f$bȳ' L0A̫?ޯt3b]kbh,ӟמC2GDn5}O[v݂4mZR:|c#{.h|j9b+Mt(b.吶;]fGD>iYɌ^Jy)懙nҜs1A(f"J&ڝ!Cw:yYliZ i5.1~\ by=ww"q1COn=>g.7(G,ݽ{1W;G#)nJ<(.6ܴs͇RNG"VUZeX1~K(S/OcDk;y8Xb=e݈풇#DܮsjK pc]рV#pn}p 6T[<5El%XLd0g7ޅSrK7ɻ~c102Gy5[WwnOkz޺M(Ȗv&qC+D̽Eb品/Дr:Y ",9,x댬i~]IňuiuȢ0]qfhr9TiJo9Ův$@LøabG3BOSC`qnG&8t"j{2"Fwm߄ЀHƥk'yd@xK Eag['=?ImV|OXl؎8bC[ |tW83O #{>= GOtLOXȮ\\ +ikAja5$r={['M+KADov9 fTyێ5At<;׋g!}IT+woDO^D?Y ֜*M/.QB{n|탠/Uv#~1һ}e*D+/= >T݁gA+v 1ktғ9+#9* A[iPI@`T)(cVl&>_~0Ɓ5AU0*3Li@ 6j`.ŖrY!K}LAu4/`[4n~a009%8YMXt?iڀ blzH!eK%=y/#FSu3 Ɨ KE4' Fz܈,{t"=g^+zl#Y{VK}46"z^SD񯈞㡼Vy.3?;y06Gb~tS2͌-|B,&|wG&7=U[~W{3 -fy|]ڲO>-JWw!N0C?e9Y 5L˯r~qqEEӹ];~)ŬL&{>cGԚ"W>rѷTmGج!DOx8 >XF 1@?F-RWeS|p9M p ƳtXCoܰA."Nw'_s^De, J,6MDtKyM2`L1:W&.Q}?Z;Rw?rk^NՖ~>nӓ Y=ѯ߆jyDl+~yyFya迪7- }_r<GsruPj3]}1P;6Z,61s#ud??~Eo4B}:4ֈ77]mCvW^ 1h A8 z|4AC㛫[_CpS& :N!^{-[&'1GbR^bPmznAĀ(/k!WoJ` <lnzzK™G JU"1Bz qo&_YN#Ɲ+ 11sU{#۬zl̞})RD7"WmӂR jêt%L_ ߵCey0ҍVDeO ׶O=ډI&PDDOiv1@9oNTnD>eqDߕfAE/=֌3ct wVw)DZ me!sI| T~0knS8y[9D/ Yξ2 Ear?sѿ/YF"뵗7 J4#0ZoހcIJ27A T`hkz ;' `춾jĐwL+"6L6]5ПW7g&ZD%rF$mis=?#)?qQqW0*[SZZ~WJFr3=ޭ<>XL{/zR(9qe[闲G"m/.gn{YEŔ7z3>웚_[!Asۣ^o5={rRޔ ztt.4[єb Y~ngWPG}9lw<5{x:qHjn{WKsq:یğ}tmfR:D>]e ZɝG+y漏t6^?\ǝx9v=vn{Y|miv~YKV-v֫ $綟R_rnە{oZH'Ge;[!rúY-eblN sv%S%vsQytxQv{OיϤѫ)ԫh]y>uEsOqf=: vnO,2Od.s.pN_}?4W uް}ן& m;w{R10fn ש]nO6y?Pϊ'lv|0K\QaNyNNwSv'LAy=_~ůT#̃#K(_Q2q~:xE9?h]OSOw[GA[ϥs z}xGR^v7)P7(p~yGO@yJW$ 6$&CeyߪwB/#>L sQ&ɬ]UC<,h))g, dp2u˔ ii(ט>gt k8 CX4v0А\i=/ZCmƘa*;ۜuu%]|Lc0mतZԆkCv/yw3`I`XLR Ll.㽲3ywtgSM2W%ƬpKueg̬\ PP`\z3;n42y(YxL-`cYHڈY]"XqjϾf=)eq6\[RK7{,I]Ke2t*C(10-7Ml@?>1d|kNI ad/9Ig)Spt ͜GS{)%/JޱF1x}oaDFqSDh}:;݇E\mO,X)sv[OE҅[$Z x~nho͓;ȓ=+f;,>3zfM3 5'xnze6x/nGx|qDfjGud+ѧN:D!'"ޞ$='kVh/Xcf9߮hoψޟLgIjޗmsV[y}ifymn»:Ft~荝MI߾XKucFohHl-#Q< "y!ؿ l%u8Y^ Rwks%mItL8eC^]$]"zCl&=v(UL?p'*oC ZIۺgCA+/'NCVJ҅Vd)->&{Ow7]u?;ڳgf,efaYC(~ m2Lim-bNnoR2g~3\3U}~aNTA{uuh8>Wb=|)RY^Ytz'RxM)eՊjts|f}GH蛹]?J ~,q"WͿ82q? Ў6[}:G?@ūcf*Y7 l~tx]SwKI:?]|;IϷv?w?x=ຈHjo||#vO))~}#/B~*ҽ:uדvT8"5OR5IzS86no m-ipj'-n3Ws?eJbv"Q(X{1$ vO(jlMgIvxw/HzX3DEi,/IBQO%OxJVR{;܉:h=x>]˟:[${ˌCsq+%ҡۋ/HyJL3 IoQ~kfIҕnIbY*i,8Zu{ ylLЭ"aϳtU8AuO9-)goa_kS'~I~3$)(_u^l|X4KҸ?Yoo?KƳ\OHKGMm<Gzn%wygs[QyY|?ޢUxw~t[uμ6;:3T:>/ч Ťo_{C?/1[uz>O?Fp?/1[u~WT:+{XB>?/1pi<+|^> {I m2Sq/竒ϳy?`yh>7>7|%͓#q>3yQ}|z|6R{&ͫ'.?t??'Q G>ɉ%'w$ͧwJsh=>Ϟb^(^~h>%YĤ&4_ox%w$qxIN4O|'ZO'ylx"]gO;'v ||||nyoyA?]tK'q֯ *ޞ!k>#~IhĞb>dz'_?P';_㟤h'i-%[߄\P%+=ys͟''yd뇠ydz[;y\POghyu຀ϻ'GqX߄q}S#+ :%dqLtA딠qsjM Zl|XOT}u&]z"oO?v%_W+^mu^vUSd흳Y?Qq亊k !O/rE@3ޮ.瑨}uVt\7׋ Q?|y^@P(Y~JN/ c*_ןu=/ʫ!ۉ~Εx#sB:K7}:?Ebzk#Edu%C-G(t|!d뵐W߸ޑ=ΥxKfpk> 95]I~R(:GhԾup%7Q<CGי!;?s(^H^?Gɞ"Q?d E:xsRQwF:?S%{+!(t4HSkG!skGC<^Qg||NoO(I'w}$Mz8 0)s'%ON> :?\r% KIDQ!Q~{IxoKs?ld߷TN!uߡ_dm<,ŷSGԾ:׋_!nDQ4.xl}\r$7s(DoI8Q=)ttqs(:_Gk=4)"CFozNߛoum<L}.hU 3%%Ld?-{(vC:[L$ɿSGI~Iu{*\'qXw,Ez'Y>J=+s({?2=CHqLGxޟH$%ϗ Gun.%:J::d~C(%(KGtBGID>>w?x?ZCZ3:Jj 4w *UWѯ_E~*UW[D`]\2Ҳ76"m/Rб3Ʒ]27%mg|R46Ǐ淸fCڢ9lfenaE735,$n k,Ԗ6̖Ŭ\d,mbmk5PGgc>ƺ?n~~%h/jS|/uzchbsY|[ ?Y0e^;(A(^{ /Z%aTP¿7Ż: n624F CF™pyM?6~["<8_}?|{Q7D[Bq;qƟN5z/?Iϯ:79ӫzW30XjmjK.[ khY߂̿_uMu!(z|s|\*H\@/wT/y3oC_u}u!hH= h&x9SonzI =mΔ!tfSOD?hc/dgyp_ 7ƿU::hȑ9q7ZƗ5.kߊBqsf8z媄,eÛW0% bdmlom>UTbcQ~1~dc1'똙I3Eل쉏%D(>G9^LߙdΓ6t5 5$_˲Q*we1M20&^.Kwץ%_HTXXLLTTPHtv"_P&XEO6,D___cvLT ,Y$2[gm *AP g ;fN'I'qkId&Dv9I&q $O B$ $_~󗟿'IGUZĚ9-洧7Wd 4'>BXI4| Z]{xe߲DjCtLeVN\; j-q#PQ<4?. SvԪhfV rB10 jtŘW@@pbu3-{.XyycPW̝,hYVE[~hZ|@[󲺟 jΊfd5P988'&yм{rOw+&r(qoYgˢTGLe0&ED[<ʭg݌1WLkd7Z[oáCؖy;-z0V@kӭHzڻ_=1'eʑG nFt }A8Zps#- `]7ϏV8tX-., b|qZ`k=,!-xŗX܈ 4[`!"ۏa~ƿ9߂e{ӂTe69J6ۘx?Ρ0L5n;"k_˅غS1.F +^+2m^~w=!v}FהZXj}(lɪإan>5yk #?Ck "^h .Z)ꞕ&`:&A(_w;^D޷^i`lv3.DmnJ3U1z&٘`j9Km}e:&583LIJIA4Sl뷘MZK-5]1 ]kFO&^u7{։ 3A\vfƸ^lRWH/,DcE0Xݝ1Novkm7 8Z&$ؿCdu.LVhL\}rD+{`Ϸի\pZ`j[1oę9vAqY)l 6ge%UySaLnB؞w :ZIX7Xu* ,u']:X/(\ M_B].{즭INU}ڎ2b9S*P/wKx)4Sw2B}UBumPa0|O ewTk#O|2b"bYBh:nxt:tj*Ozm)ݽkt("J/gz:;BD(g41rVW#os%o'&OD<{Sru{%O%0ț+5wډ%xǜT.nt ~ >4[|\a]XS3Qhh/P NڥYTމ L}rAԞy19Qj睫#I˝TUBv;WJ\tu!ք;=&RWH ӟX4(sR|)vKKag[#rQێx]qrf zLy>D.6Lc#rM=-شz㱃Sxbۻyb& y&9+A+*8%Imo\!jǍ#l+|;R ?9Z'QNPeL`X( gnֽ︍I,iuwa7b6:JN9|8Aau>i.ϩmEap)ISuw_\?# <;_Aڵy08E';keBؚ3~)G/^ު{ZnU?;ϻ3ĜԾ%YMa7~\<يy.>ax¾=ϻI::|oƣ*R#6xH1͌%;nk;FaЗtK70r~8]:hʼnYij4!fÝK8L zok_#dաp*еYP6tnKXNd3H:js v!ڰf(EꐳB }ŵ8l>A)fgBp{tL=YxCp/w鴛m{XHwsqyj? Bj^ŌhQ?$;UA x߻n; {؜E|ź f?=!gw6=<'VF*uu| >1BوȓuG.?ڼGyv>Ol{a!B>g*}8aHID ؆+|=sƯUi:g#mVؚiZ2jGaʳU9_&yҟqaɑG '5[0'3$7_ۇexXI y{xaEhkrvO½/#pzG#?EzFԦTi9H7=Q?\*J/H,9HݢO׿I3N_L9~88GL&fa b0~yRvۡӓpΩ0"zS/8_cwx7v,.Fc iUӳ$–ъ1oAj}1գn{\T:g7+Sh-ks8푣pBt}c4cξݱc[gʀcCc F%Z] /s:b^ Ą ]ȡ0rlӉ;?+<0,t;8.K0L o2T[ N/#G17f9 ^9Vҽq/Ofܫib@T*c(d”Vo* -ݚ9އ.uυ3؆ 2^ 1 $0D\&U 6O96[IӤ'tp}֢i<հ^үk`Bhq]AEJj؞ϧ؟yQtBxkD` j`qU:5WS@tPX( {?>,Ն6U<jzO$ j( g4%7 NZ_ 5c{&^+<5^ O IvUbzO -x閉Ո Aă~3}󳜆O)5|e1 yo.“C Gt*汀.Y+ƥi" oGw~J.XcUM9D1[vO%vt1M* co\z;~@MX1ƁM䦕˱uE:azT2"__BzLP3~WU;^ekih6ֳ-h:7ҭp2Q ۬N}HnAg] =!wDtuW)A9[e"vlmW۴1Ö>Y8|}gloܘUOSWl%eN22El(=*hʻj+:}72X ˈ\n <=W7moӰCVXPcG_5/Gdp~CpI–ϻ7-XݪVuݼ,[+^^1Sr׭+C>-9srpO{g{,]UJ V+ص7Z`jD!#3S8qTu"]olўE0YV_|R ۧ.|00,9e=Fg# bVσ R]{Gb9-Ͷ83)aq1c5mC# [0 = ".c΅˸6/&Zg_dghV^M$MW'QrB`UGI\瘺 =c9 P}Ea5} uf9rxT14p.; ;)>;3B]sNs {qs;3Me5Ws `=) ^?]\So/L8u] y/Eal\}` ΖpRZ%V{!gqc٢ٱiX[R*_!(Jǿ8q{wB&,HbZ2뫾O1LQY9ocSsZS,– Ktu-毐$" n]؍e)/5d0J:rd22xUs](vgΊboOdQ7}aӭbehN7e?=x6XylV?>m+`={w:hFإ-p3B{Z6{{ybk\la|VKsn5bˁIBg>*B6%bEQĮřwCIвaǧЦ ?RM} .#:;=rjWަGWh`dG!$B$f%:`) l5 N\ޯP S4.7ϕAGȋcٓmP&Տ(_&uo"J#l<)8pXs8!)Fzޥ0u#_nS-qsD{\Rau0S=]bU~,Hwv>`Q]+dl S&kC!7P6ː«/jVzQ}cؼSk}b}aT֮~ qOVgysD3'ݹRjGk1jfgcAq2bO\YC_n+ +/F,Oi;#4X }rE2t?Rcڄ8\,U\qV/f90|p{ ͺw8M\ue?Cj/K#W>C8vqdekG ۆaͳnk׺e9#`evD܋K`MZ0Z$dP&&\.6A8ΗFWmRuOE1%;ҿ^6Gt9Y={>Y&t4Fdk|JEgtػEο`4U /^J:9W[U2N0s0NFgd}oطh\8Cq]ܲH)#5wTh4蓆IvaNA_tCez_qXw| g5&7܉-# *l!#h'pGw ƪn/p,>qFBC[Jj`ñ_?FC1Ƃ3 B+) ^ uAcBlI굽7>C#NprN<>)KTC$}*bmsH8z/qZzE~1MɴMwFWW">61VA-uœS]\((?b0~NceqC&z^4 mdmE,8ޱb9)YJ PJ.bxz%xPm=M!{Om# <#pvC'ynR>+yoHrKۜ1>$t-EL~6Ĉ{S7Rz}߽ewB-f"ymX b{g`C1]%yi"D-If !> H^hyl 1Ǒ 9Չ@̤dFdL&*${~bh=-, 1!T> ƿВad'[΁jIHXhU1Nv9)W#N\݉w֕C8ݴ/͟ :1S]"$ mϯ'nD^~"';(@Ws_ F1=lstv$$g2pwP0flj<r-9V-vHD±#ʟܲ1u'] ?%H<=ж"F7NH}<ۅg9)Pqoq r8!GG!Apԧ|m1Rk|Gz 6}&"0ts<kйFn1O=!HJL@&Z<NM9>ј.Еy7wxX Z6&*>6m0Ut?yz,bg u={+}oZWhž0bzRbU>~ϵR0<Y\aɈNh%rC1֑gb>Z_ :>ڤ;Fv/I-wC"<+{vLӸ!=i)ā\&*\0<Ӵ=aK_g#]g@ SS#C-iD*eRW!V 0"fV|-|1Wfd^cA5k[. Sc 9"P(^,[6^H+GlmeqP3D^8m8Q d:G!.HRՐU<آ&kq nb:}O^.y6ؓjh#b)bۧm;?nZ7186'>JSZ0C.?"3F`g 0G%82MCShZΜEOkОcJ_(8g#Rrgc7Kn짉1XT^if@|-U͵'f#D˱sҋVRjSҙ֫=DJժfyՄHX(ڣ])cKu &}ĸPfjA8[P-@R{Nj"qd%)!!%~ś´w5w]bvƤXe|?Mw@\ץ*2Kc Z2Ǔ{"^p ʅwP-%{CrMuDO?1/.5EL3[ka諬zٝzb=3V\Q*57].1B| hc"n;JqL w7w@ɑ{QgOB[14d0iĪFAOZ &(~~.}7³F6/ BEzu(ja$*\ Ő= O7T/%XeC@+il\gZCܣͼW p];iVHop< i:c*v p ZcK=ΆD%B* *%yxp ~݆yۣ R@]ݖ'IeUx,A+?\ -:.!*pBNgF{#-kEL]1qR /gcV;vW#F+H݂hEQX7 eK.A=e!DY҅xn 'h+#b՚TU:JؿPR2?x |毚oWGxUǕ _wmwG ;i0nBeQO ]s^!s?t-0w{o~_킽t {T6^­ _XRܶiCּ0psndUcA{AjpO ^aNHVѬ>#iI/y/#y'dO!+sj=1u\sv/ъnBe>zo}'!t7ME ey'D>[qq"* B8PNb-67, %[6XJ@I)k6neJ/4ko_e2-'ڡmk8-bC؏-:m4Ff+{HW2vCδčJGbiqZo: "Nd]r%Tk]H:lV:+9L{I(YZ,Q݇bI(: Lo xzC̲A/YX+qBc&$(p?FHyN'֝]ci k *O.5(8YMxUߙz7rS `S@]{U"qP$/}Vy+m'ltxT}6q9CʊY|k4D9iMSѴ#$~7ތwď-_!߬B4{F[󌐿R"@>/#^6kZ y\J8!{ 풛6@j*ETNr\ jf-&b[B\GNǡ\`9D-K>h4wr<-[1(Ҁeu݌5[Uz履~J@jO&Dl,q#4/?QCd{$WVqڝdh|e-$%frw{<|hԡu2:|͇d˻SM{ŧ'O F[[M"(A!NlM9J>IM gc9pBAGL]8X5bTށv"5;BFL=+jC!Nfac_md%TVmqsbmF"U. ROf^2HPEL&B-|!1^_i> Z"~ܿO" џNCgvf7kvZ)Ro'@*Off QnlkUIB6KO_Z!YW܁ ~Ү#]Pk5vl DˍH%7:qBj;t$na@xOI'|i^9Sh|i<.F`KvtN ľJVq̿ o9gCx40uJ cO|}׾ɞnӶ~dFѯrbrrS4$Q j4Zd)vPgq]xr=_rT:V-#"[!}x ODym?휀8XSJb Fo21ۻLE>1XVk.P7lh.WPu;J\A*!ՑΉ* rTS˝-8c%"!c CL1 t"K)e~bp nrd +.XW̽1svDy+ ?)A;S]O@J|ܪPZ5^GLup>QhD1 q_!rWV sP.O%Cr%JP[1ݽ̀V֓D~r2;[ Q%4<(sn ~IЪL&tkR?bjz\1򕑛$:- +z+A2G#F.&t ",Zx 'nG W[abMA]1\ ^:HG#jJ?sNg!猪6b媈V+2UO!&mt,t;!'"F풛 ¿.nQW5k#v0͵7/.1N'͆2 ֲ m}IJ=v"A-Z12Bid 1J{$Y]dl93_؅r"B8 ֱ/HHk,X <ފz*dC$VZOby'a#x~_cSgVbLJ Fm]NrE/!fU![* 'z?c",u:0QvR%tkrXN׽ Cm_C4m$柇\]z!bSCZڐP 1YPմ$문Xљ O9pіBn-;ݻ툡SЖ7̩1sjdۊ#*r8Ǿb ]$ꥉjHnȽqϗBL=}.`Ę ^{ҕ']!)1Y*ĚŴɱ}2l7=Wk'$c 1A4XX|h9ّ; x3xYy|E:-rS^i,u>^6ʱ?^pyPf rT黮1ԳPH8@ ju]@x=LYU^Sn!F1EAyĨ/ljö硐<ȃ~;DRoy_ksbH X D`Lv[t16IZ&!*CȻ1NNAYȍ>6=Q|8/WB%ي숩I/AypUbrh4Qb)A; Q<m"q|{ q=Ǥޏݞ=: R5|ڼOU9qG>Ԕ12%yzۜ";+(Crgֻn s_GN|̟SriXZFQyNK|퟿%ߙ)^Mc=l+?+Qއ*4My?_w=L/ iwW,U kW0Eme*ޯ,vRf霿vc&lr֕QL702i*ez0=S;uP{ֵ|;؜Aɦ /Kn_Ja/wÐҚ^ NhĿ ]ʺX)^yFr a|pG 4Edg|ˑؖA&vrMj|xKF}v B- ]FOLݮ|׍  -H6Շ//Ak']q },!ZL79A:e?8گP;Hl7c3% U@*/;J)ؙq=4{2gRɽPh WەP[Yu۶Bs.}r\V Ij;:lG:X([u@*mKѱJ&Er/PPؼɵ |/] j~~XHlxLi|ޕF%!%g=/!-rN.ڽa~8f(~?)Z yy|"!n*PwzHi(Hpk JJ&R6lut?9\ҐI R7zɞzi[ߚ!?R7d+z >KC0%ŭ¢!Z4|zuva-Tp_{Sm!v=zS4T 5~ŵ.P/ Wn Ql+?-E3_>* zE/A|d 97o9'HLbC7 čS@̑Q/ Of}'R8-0,_:ێ;@EmWKbJ[8]:fBRlTP~˘u ^io",7݆hE]kqib.5D;xkJ u'?MԐڿXx fFRz<~ٸKu1ABQHfJx}qCi/W#ܺޒ)a2eywL}8*D*[HMi tM;w5ՠ'X葘~(A;("@0GߟR1\R?;'39H6vP~ x^vG҃ 1/x7\7:umw9q>Z,+@8xӸ{ҶB$)?eI2BxZ}pGYv@%3Ws ֤0A>wn͐񠮱;dE.hE-0/;!.lu7W\ kGvg ⺘t*1 +9$Js,ft̷;1e'~^| v~Iqlڏu48HS{-< ʐ]<^,r M{ . p& ryb/ +"[ iͭt?e*'ݨK'oC/pڍtAJ1PH*opQ 3KBrQIN;/n$fyB㛭Ee!Vp!ILqf(:`b)D&*gB޽~rS?ӊbf,9GfSYk[ P/L   '=} i pc26ȅSM7tB~ʴT קؗ{fAI㚞lz ]OO9Ն^Qo<{߉w{W̨Z9Ue>v„7GeTBKkUUsAꤟon{}toyx3~SgjH&M-i \ I6~f45QSf&,siBG>DH%JI_!dF.ԮqwxC][W!$myWoB(^4Kjff4~Thz\HªZh_"ԋz6tҜ3Рc[ ;6uqA]\O(  *|^_./|t?#i\H#yes趖#($_rJNr(CьlN 6FkQP7Pio_eY =q8KE}h$=Os?|QzR^_vFHA{LGTFN5ƷFY'Z4~ *OϜh{Z9w@UeW-:Pe340~Ãwۏwax#[s4Τu>킆JY4Skb1ĥz14Ϥuw\mt2{P{$/j>:- ebX߄JIg_nP w98 Hy^L3DLi5gMU'vRxs`3}(/+253j$a:K1hD~b"Mq4I45~Sen_r†th4H6K,mJ6u>SGd8 m{ޯc,\Obb+" &~WQDZ@LL.QRNڥ;83s={ٝg|gcKVOUV4T/BXU%H &x씲%%UP~Η{<~A;k磡2#}b童gtɩ7n *C]r=Ogq0tdBo" oEa޽ J~q ?/}8{AS#6 ?fN?^d~~`"|3LD 6-aPTFIt#;l8w( 6 G§]Oǵ8ޢE+XT_󭆜S3 vfw[]R0zk)}û$кIq dk}B Hޣ<{0VFAM gЭՓÒsGbw9 叠q}}|OirC1Z;ˠB̬m TX'6}Ttunt}eL{#7}D։>0(vWP8r[&FF7Ȧ虜{#R/Ww;!Fa J3VN%hgFP7V= m;KԻ"(*z^^{!N-}>hhGYFrB^KHӯ_TdkA}_Qf:Y2OqZktFyډ4^Hugꏴ3}TdҐ=BX6:Ե|J3[ \i[ he&2̧2Mр쳪eCAuXaN#ޥ=ܨqj:9𵪛IOTͯz:QxjMwF퇾ՄPM!wgwHt]wRu&T \Z7t֘ [ Ōֱ,ަCM eEGX_Ѻ{yʿ𱹯>a_WrZBRk;f}oz3i$TOCmVB)Ժlvy#]$KH-8am$qqS`{ UO({) 0&luk4.Pg̠ۚemgoY#QIȊ(74Wk΄@=㉃t333G*gQKh4gK-~sęHag5P&3z!hb뿻N`(OľSGQ6c89P]CuPC?y 5v(ZƮҺ]LDDGW$jG&:^_擩=LMܜݍ@L e/.y(m*>f*S?-מA赫}܄R Kt7;kKQ9yD9i(W\qT Wr[ȁjh&Ӎ}ϵwWv C6۫ -LYmUE^QľRzҊmAʝVFKU/m87ߙ{-P кxZ/Cv(f=9w(wlej6>O[*4xAW4'F;osrPx|f =}5h_saor ԼyX!m "џG}źQy֜i2i}J&d/P7| a߆l;wtˣәu4=pI|f}lU/&:J׋MǿQ$g-'բu>YZ hnt;rcaz~64 zKrd֬1Zē@m'0}wNm7z8"E+y[]h}x0O#?1ukL1N?}47޲mepd׿M,ʯW9b5&]~Øu3w3#ah?*.V桕h?>tO)aD}E)T0t%kaW|w4Bq5*zEϧ~&:}[T_OcXQKO7\j_^^}ߨzF$’Wz1*5b-Z_m2i:ЖRjguPRnz1VsD8ʬ|UP$qI=mRn'LU7_!$t?k{fV:sy=4QI_~<z`Hxʧ3qn>~\OGԽ,N}:Zѷ:j׮dq(>/$h:TqN]~ckT f{%NBɐ}:PҬk/5}gk*սϿ֠u-l_R{U]?s(}#&ED{^jWogߺzFՓb+zom} 6ۭy Lb'yȏcn7_+{U C{k#K'ao51nj9;ۅ_!Yv"[Q4uԙ?gh>o|?俚4`j6<]Ux4?OFTxݖSg8x?ˌHt"c#j/R<ꓓAߋ@mcq֋h ~)4CC71<{NMˬuHGGz?GegEoÿg|:u(9ul?^\UdOνz?Eڧ7 -=ܘZa:w|p0)ڌQ>2b/ |'c~wwogS4BOGQ?٣轗~GGk .O=(~}S;^R6{5Vdߋ &m]{wҵ3e#6E|9(Gt߳l/F?/ۣ6{e˧焣27h2z>8Dtbdy+}<ǬXO]glwwcZ/)点/n> ݞ m3U\(zϵs#{/)NC75BڨXKE -Xyٛ%;^p=>g˫s}˸:rq@|9w ݙw X} {Dh?4HF*)d=r HxP1hn$ѹz[t>3~s x4ga"O%"G21u^ 9?r.t9HV}ĉ36 7E{C~ mg9WEEic{BjS'ⷑzPIa&-cClӦ#[x9{rO,)Np)P6}/j\u P|:_#] <&ڛmW~=> B\{|{*z-c{|zOڑv{H>ht!xYyHt^:oO :7: \>__C:I6zOHE\XvB-boM}ne ay\NYOeeO.hwBO*/5U3imljح|?m9 b,.G-R_Hί9Ruw;<|y\R|p괹?z/$U^_?_N;A1UoR {Ry> , Ϭ Γc|Q2srwx1y{"?qt5;|-=DG:?f2:1tKo}ȹ\K{:8M/ a+Z? 04~gzsn E9tƋd°NOB'Ƌgg+ȏ:G ] yW }q Dilx6ڿz4g,4g#]H~1?.bcfwh,zSq` ᳅0 F\$fywyuF9!I$a%Hb?߷01  q\Sk5EB)qi,8Q\x< 䛦b?گu47{'7 8}PaHtgQ430J<2f\m67SRv<Se%sNK!֓yu::m?Rftqw5}6"%H0U_'> nJk~Wl4^f[O:aJ'"MN2CGF 0<O~ `=%}J;E*~%մ'e݅'DG~ow tD/c'D3$~<7ÕDH4`N 8rJ'Y-?z''dpgWGZޜ(>Db]oBQOt^]ۆL`1M:dt8\t]vEsѹ%KBto>nW3l(^@`%:[gBx`="~?~uvPm3EyQ"Jb-TixC={o~^MZױhWؘWRteXA+$]~:Q|7Q07~[d98YvxA"8WйPρ3b{??(R,;4^GJ:=!uZ{Qf?$~p={ ?Z#r^1\{&dZ+=2%:Jϱ"?I|$FhC)b?%XG~b?%%:80=\˚=S{luMq%(Ә g3x H\W-ߺ+xUv3;Wu@᪕ϲ(ѱ(BL)}ڞ-;u6Dz^vIz?%h":Q]mՈ?W,_E8٣yWV/YBGݐ b}\6wR"?nˀWw7 1udM$Gs"O&Qt bݕd>'߲l:=azQ>ΪYy{fl7}#zGXGѹZ7_w?deI h$( ێ]{]أ }L¾#DGG3 l:gEan":m/g3uۣ:Z{%ǝZ)RNuT޾H~bϛ7 >RM֦y}r&Db Z:S~1'A:8Otq{[.O+G{ڧBQ~Px9=oz?O<ޣ%G/ *klxDlcY-^ItW4>^Y xg>lO]?蘂3WƲc^xT|^$rabE.Oo3isN3G듲%FAD#L2!~WveZvz3_Pj'f>_*\4ӏ/79~ۥ8b?d(1|IDOl;^ia?XGT8nhI8ʦ^QluR><öP6s}ܨS38-\:3MYs Z\A1aO-n:ʺо=b{4`uA6=hB-'š7F{/5taɗPY":}̄{f&oFd^QlwUǜ=mwo#u4R2./~D]1V2uT)۪#xk!ƹ \"n,y[?"{P=b(*Wh^C=_L{#;dc}" 39 (HZDGcyZܞ 1 /E߱٣hAtx21dm4kG c<_fݧ}%}TtN,ځ<=ǠsGEp n=(ٯEX?Rĸ+WcDG5[ є˴(/o\qlѫL hUO(3MQ/u:Dt)uE~IҐ'7V\œL{;vA2(ڏH71QNJDdg:Zp4Q.JGQ~Xu4(@<E'C~=ɾ{EG?H%GQ~􉎢x퓅xBQƬ^ _u@Ex~鉎>K,> 'bbE%|<6}[ûMSXGg4::qA~XuGLDGdĸ/$B%:ûYaB-ͣ /{u;X2RpD5\}GdvN9>8#hx1&G4d$$Ԥ$$ z9~̗dw5?agt]*ji+U#zGtjca?~j fvX ] jbajfBĄ1~e/xxWj$`ƕ3Ta,??PKTOP\r=PKH_i0#sbfQ"̜khSc976:e&&!$ dؿ%-9o-f%k al?_|!pikE#? Fwal'pYw׏p^܍f с6WѾō'Mu3u1muC-Uc4uq2FHs Ϳ(MF­j.C,&FqWqin Ax5Ï:̿{=3@QtA%sqK* O8KEO֎*<:{4? F-hzFfzd3=jnBf0 c\kMGbk1%(~Oђbr\5pHs Ϳ _:79 Zf0:&~L݄LcǺ:@arbǜQ.3š}073Чan.ehGd ''L6\i/0\[VWm4xL5~F^Hs Ϳ(_/d͋S\GU_kp11:3Ę fEpgǏ ƋP3K9_/BҵL͎jɓqҷ0,*nc*ܼ/TKKS^]3 y) `ط-w~8A3u3y}͉ʥ=+X>~VZKﴐ^xws;\;mo/cw+;7$/_bn /;5X0oטK-Eǵtt$Y3#X5D]CK~*vZvZn|jZ?[iLi4N1EE1z+8?) D& 9/0Ms"_% 5fH"?eo$Q(W6_a]+>Ňu g_E69|r mCHE#ݘ|f N kglNZ!j~0W<=7^Wa!cP+zodP𣈍T0agc%OAס!;fISUvRnkKa(_j/!lXPr!m󻀶zǭr3_<9g%IkIc5M@]h:GjTl\A٦xLxALl\L0.~+P/Z]h:s"z5g9EBǼ/b_)7+9"j_a*ZF8[QAA52wOCMSB=:/ χz=2A/YTVIgP$3N\jE"@G! )ϖGC(!lljYZ|KNdh*;74Zg˸nۛ4~&"̋BO4T̹-*v6Yym44-޸}y4]a: {CslJ[h1YV[LN=H`38=2',L9h~}8NH?؅hϻ9t<ۡc~iz-_u0ȫΜ*WClA3Mc2~)MٹkKD ¶AC:mܓOؼ{²=Q͏w>)8Cذogа7 4[V|Վ~ wցC 5)ku6-w:(jMzoc&B|=|:$l)Ѣ-*9BKZM ;)MLt\ҍ0]@X~yO4 ݺ_hT_ tz+J$)m.ގat!աͳ >pNcԡq詇NrXt PZgOzڎ k?1j[Y,u<<گ^ (PˡZ:.ǜ? 6`SNjֽhZ*u6<83 itOX}}3 _9wua7@ I3~5 m{"F&cn# ^6I'G vBGœ}*:4z͂@ϳOee\&6{탎*xz@wYEՄYAҒYxZhAޟsqa3ʡ8MRoSΨ sI9OMB~f GSL7_Ps}> ge)=^mͤwŀpѢ#oR쓴{K߿s;i|ޙ&PfIy:ؤ{eS}H+Emc_^"Vf3?J4ͅ9\c<=klܕnc*<;\kwhnL2Z]s=b6O3FO> ~6{ә>U"/jyPCc667jea|`m&~Wzq2akCkgh'h??ڗ,07#^ϰz ݗz;6/&t~9LtNhXNUDu1whkP_ؤJبtyM=:e C+M lX*:WtAx$m3tp>pp+cyU1uKV^͆[)^am l>0eżЮ[\>N۟mVQ}(t\6u NtFˍGCC{Ҡ >N nr:J˶}Jo Q`7| ԝ:~pX'{ɋqg}ȋ| m_)C­3F C4z8͖ oU5eLd] {>OBiY]IU§5+@њMl8Η]˕mSI0d':͞_Jc+hjU\O窽c7^P#`5! `rVۛ{2P{yX'%BN4%"hh1!k==.n]hZ&^OYɁt|,̈́>/[/󜫶 K'5L/g"{y`i}|n>p/tC*n\e pU+\Ix鼈hXU6ܷ=hIxk*|xn ܯe9mMhuF[s^H9u!m~C51IE!.Eӵ"Crx!\iR&HLt1Ղ]J2%)0ON'Ȗyi=e'ebEI¹%uO)s$O~nD~?ĢqS&;P ef9ل|n*)M"L(d<ńI3ag<ʸ L\hճ7V PfdLL3Al1/̪k;FjûdGF/|ql D:t{7DOO=N% >!oJ:9HF[|<ĮP2gW 1cx!aj/7I>ӷq;^IzGn~W.$XL'Q]Q#bveTh񛿖~|Fyѷl9D\vs /ٻ'NkyiQn͊bi%u-H颮$|oiWm!̼:oD>aN5H^EJHt1vLeN A/{! !cߙ@杹k!Z{@4a$5q]2oG*O fҬir$pk<Y \Ǔ3Uyeϭ|O݁ mܗON$ډOHXA:" K!$0E2aqnj'~ x-lKH=<"o.y_E Rz.i+TZz}9f/x <҂F>!4kU) G̓vUI]I`n\+-cV>)wBYkot^Zա^u1i)gݪm oEҟ@@F!vެ=}gXϿR'rT/!@0'QzdBZ^\,铨xO[ WWlYYԎLK]/$%?(QwEz tRxm4n [Hn5{ 9γ*^c)$m>Q?$:jnVllQ)縌}CSOzBmiVW'Qreɍ AGde$WA4jp6[C1aSH~-GY"eo)osm>*)㷭Yz$ )eקl! ؛x ƯjWߗGvqr;ܞiv$Mt7[Ovё7]s"AK4$SԿB\և!!k}ɤ'7!sz6#ΏJbgY0.bYD%k D!4j˜crQo/d8Bs1ar $g_oşXV_ˁh)ez=Ⱥ>At! np#|)iokܤ}. ~.Xɼvn7gHW)B+ٮ͏ܽKfg=ss TܛDOgzwcHZ\$j\+>7/KH,;knWs< {.KCӖpL:9H,@0Bj;gWxx÷ sU 1nzW BӷwykCߥes!)f1,p*4yGsZlW fT8l[+]5hTNI/GpwaH-ӛgS0IƼ:J厦ݚú;<_dY1a 8|}qW -Џ,dOzKs- جS(@K$Jd-GOÛkB6E5l>);/?pNNqhvL,IN~-~71Sx->.NLvi)NduvMyjxGxm_})ANO]YnsgdyϨ;sğ.e`bh,2b%4f<nS^J(,@Y>i_Ao~n'؂#^ a9> " n+/E>$ ʝsaR8Ok ]]g cBdzCCijB{R&@ӯ>&lU| nTWߴ6yi{eh^uc]֜8ޢIؼZts 4]<^cÒ=MFzx,dyFд^qs1?h&7L("J?ZDua2YLFZ/LVh&Ϲ-_mLGؾ.3g԰ msx&FWZ$~ĶQKL06b]k]Gn$ٲ#KWf![;vQ}( !뾋U|YJ6.DM<΁E}q\ *k< mJ& v]BgOٸлkG{BsqlD>?< =#%n.Qu%n|zyvOaڣB_4CMe|Vi*!-Eki7oZ [<*p-<ºSyyvsuoPfl"a݈ǻ't:SncZܬs>5-; BƖs{nn:8R&>p[ȑBº8;AƄ 6t5}pJ`Aija6tT+^ֽPG~wb޽P5yZX)g?<7t^4,ի*6z0ݞ3˥{K;J? &l1۽8aõCNw8Xͪmc$tCcA½Р[nMin1R([G Ovk{ %Ӊtޅg%-ℝZ7o`RbXpKaO;7}¤pZn1RF:p]E {w*= -w+ l?*F|Ϡ^6Otn]jMk?B@ԶxKg8y|?|E9rl|ezZ2rQƀ0dB#_0TH*+C:3*bC-O<0*p[y̹ Dzp4 ^ؤC6/""vb~Xf x\vPy)-*MYYG8< z\>.䇅'<~.5?<1?0ߧ;r1WQ MpZ;kEPSAG޹o"%sQ`"bbԷ́/6㋙T⏻'3+|]'PٰF>YU|'֎! T(MԕG O܄s H㾳F$&J\R]5(3wwn,-+BkƄJMW+ J2;@NQxKiMXTm1OlJC'N ,@LZ$ K[oY I8EyryjIfKH2ta !{sZVc0%i*@a!ck"0%RuVL銾y;/DAFW!@-(Q{ mMt`ÃELJM4!Me7˭̂]AuNit*^XQ։laA[Ǣj(hݼFC ƈ)s=~4 >Za鏦{swʜ$x<42ʐe:4*aņG ˰gk_A>Z!lSg ߶ z;0ZEۉnapsݰl[Vǽc w_a )H&L-P6atfaƇV[ cw儚=yq8Q 0ZK Ÿ[^BTwJx '9 o;0@{Eƌ9abLP h*{ ;wKAenΕ1AFg\h%ІfHρ]KGE.4@=?RRި0 ǝ| {!s赇b wa7{0%t90&]L=B%OGQ/FMz&S_H}N )mၶlKp(#sk&K= 臞4 Ss5jԜj vFaXW̙E\%}faT0'J't)a9à2po>(5S,g*|LXv[>nt5] u%Xi=>E->$t:VYE:t͈FmFۑdFHBb >j#WGES΂{Kz.8K*h;5,P`*8 rj>Nu?Dnη0mΣFWD'o7 >a*˜{uy}\u(9R˜/_x7<-[bN貾y[=s<%tlңC>~ g&굷ֆû/njKρo&gmuf⣄^ֹG W1bՊ>gzi y;TB/E'cbHձt x1IAHK=]/AE3f\>gW[~$[e* +997B%s2%[AY%Kvƍ%w7'&1^s]uTE|G cLSܷ|* K^Rf54]VaJz6ς-_ҦqC v;à~VSî+pl>aД`1'@Wo@Qd[AOA4HA8M==nZ/>y7}cLB^(҆i:zrX\GqCn,sT'tLl t+$!"BE؎3J Y)Wݏ]†a?_Ȍ7 ёw*{2qB𝉩:SGLkj]FL%t1»$B|Dg$BBNOwZLZ6DFQǝ}y4p_;K~tp:AYjQ-?Y vϩlXMYЫjLNey$>vu/&t=`i1U ]KB8{'zʉ^*G*GnQ/u^6qyfvq]BbE0]rGQл;C]wj|XxFc ՞ބ>KI%j:K5x-zm&9y_^1y"|[, ԀSǿx An $ K۝/(a"/@2=={f&asGOR A/O !5 LE0! CCzKb;=_YE#z;+Ɉ5ox,.?nO8gYL ^px^κB証|p\]eyf'Z$t _>~=qwѵk*~k"+`n]Fw@jŻv)%{)]ܒL ].-O{.GO\ŲOn*uOo {;.5vKVsoS&Cu8=|K/Zh,5rͤW\zq:e7oT$PzVS7(w Nnܻu-K@Cnׄǎ|6hH'P6BkW,?ޟS[&..~_q&_ۈ׹#?)"rwNw?O7T?W/o?{ |ҚcJ}.F?<"zݷ̇g݆ύcx>?={vW>qO?>d<}@=~lǥ<-Nܶx#u6^ rwvd=_?wUvd׹wv?hs:q^)k8} :n],o8#_԰SzJ+p'oJ5A~o{MϡrdY{Ð;aF8^O41sA-N%æ,{dHtlҭ}N)לj.8os<#h\vψ]UW_jϜo?>}͞ϑ!ni~%K_'JͰZkv]wk(]sp@+VeIW:/MYIw)6R^Uj+Q+H=*`Pl̅N4s&|Ksj\Po$>j,UL|;FYoqbA;'iJC#Zv\4 8U+b(ogPN/cvH8IO?r[??VrD g!<4sbiz`K;;u(YCݽgʕsSCЩߐ{*ݡNu\/csOvOrBxjG} jl<.3n Q9xh,4i)ѓNf5JE(G~nN])g 'NJ9y|b^RlIy8[sCyPkY/(X3'J +3ifI|,I%όN)VGzel ŊUêM I;7<ͦ@YO c" H>[sWFe>Shzw՞unտ]yo(ܼV3a#r*z[{hn/A̢S+cH>۝{T^3p*;PqbcD+Kwfר0Ԟmc %~KupNx8f_!Vu_#>y7ۀW49{8kzMx*uk^shpbv{ɧO܏IU:߰}tXjm8x,;w8] #E&81B3;Fo.xI,X͖ohIin<ru׈ i?ppu]E*qk //L7Mt,N~vHnO[8=}?uŖwUǜW{m3sQؤ J0!~WzѫْwWpWׄ^HxfuB'`n[e!B8s=xO?qi} `!x\4y %scιo_wTv}7H׃_ʑ|zpz&+\Fi[\j5y~n]"ޞ]"G$swϱ҆PS@x=/.O8]ܮ񵇷T$8߸=Ƽq #n.S?w[\1<F3:tmvG%kKd>n_X't˛3zn|ө1d͋pg>ptyW ? DNpĺӑceD@0p,qtaxѮLNo'/K~zK~Kxy1/#?[t:Ugipjx9XN%^W7d&ZnxyE1Puz -޼Ǔa1'. hjH0zqF~v|r~i/S渘]Gq?C#O~q}t Wc5&-S{S-~݀;.tqBî߼*Y͸ֆ7;ItM***s&P'UhnL }nZe @5/zKWTUQwPiP'}VQgAhk@kPޙ]3+ ׮ϫW6j4%h9y= ڦW] hK{xMp&fe*ФzB%)k偶qs4%ҧ0jl_ACM;{@e^DϧP Tԏ>^j UeNH@10d@5]T&Bۍ95@:,1e?Pn^I5T!hWqP22,eJI;4Yu|Ta |͠Oo1v`uBz6uH{ٻtP){rMjPyZr*#MJFqt7im,#qP蹪Q{F*hZ+vt5kj[6PZ9~۲`Qm߱A&QezVTl㖭P6'>ΖQP&wTq?J4ɂ2ԢEUW]6-P̠M.&[@}?`+J֬"׸=x:;+ YPgp̢%@I9 On8^M5P`?W *dPd S꫞Z [w*n>;2 KtnҋK?~v2ׇvmnIS&kےx#/=q]V&7=9I4^~_c}BwӿϬ/| |v5WtT]e@oO^jH]&F-r׸rZAuby }Ш=CՍk -b&5[׻(Aխ 7 I{zI&[=tUh$ *Vգ*h7BAw@U_]UQuՁAsȸIS/9"E/ϰŠvLN]K*< !榭 T 3f /4r+ZBڂ}cF3^g~.څ֠u4N0FYk cڞ T&7N䀯ULeXT3\j~zKI"]>fڃh}W-ሗcXP嘕;)T>;*N~ŶDC_FoWj|?T>OGv>:(5Z?xg0#8=p@Gt}r LG:p@Gf L<>k|ua~0Epa])L7v~K[,<_Č#yYr΅ss|<㔨@'9oő=p|.g;W#KzYے|(:w@s3#Y<>z$;|_C#֐sp?$=QG8ɹu|0&=wr;9Aȹl<\(.'?|_sׁ\gv qs2y]_|3=:G΅亻c+]~E"p$POcsQ;z.jD3?ʙ(Nտ?:'Ǘv`:Ae7i /x}qv8oq ͦWq:Tş 2P{ rgKsvz ?OX'XLK*d 'ˁb=@09@Tb!~vP;b?OG1YuG#tsIRX7fV9/.5'Ox}I>eQ?Y'ɺ8V{ 7$Dx|Xg݄u#xt w;(߱}J^E:nb]fp=Cy2~za]QoY;|5RX8Q #d^Q8(zhq(/"#K,GY;;UȺ%K>q}dqx=4Tl8XQ< Xbb xzg}XuDa=,8}엖kX8s]CON\#wZ`(j_5=.'"+X?iV &,:](q`v*YEIj g8_Ŀ/^Aze߈?t^Y׵Yp:ˣ_xY;Qo:[^z$~sL=K&g"daQELƦd^ϲ΅ :N(]UG9N՟/QcٯC`DG(z%QlnQP:ʲ:G(g :2aQV{[VE<$ﴰ2ѝ~G}Qz:-XCd%:QI=(J]GgQ}~&__k(xŞfQ4bQ%:Ӱ؟xI:w>n>&/m:1&G4d$$蟁61A_tͫekT͏AWܫ?5rIc-t5x f ٪]JJLgx/׃WfhS_j`oC-MS=CurpC-#}?kw t~a?g{#![G[ZzzZG4N2sehluLZ 踖K{[2ߒaؿ[8h(R$`ɯR?_H|!r?_,%:1>bhI:*.݅LeA]?yq75F_F7n^B4ŴM W=opnh}xg ́44 jԪ @3]ǥ 3@bnjOV0[?:ǿ83 @iEi)m`U.EjK kjDDOC18~B^3pq 9_濜pmYG_ !,7PEF2yg ́44o5/NsU%]П~c6??v7/xB?g,#8@]t AɁt Xm)ª'[1 *c#4}ۄ_ߩJ+_'@ݗc>6C@^=^S`UK_{uKQPViP'$ENIKZ| aՍ*5jP2ikΜpj)GTڡRVZABBv'.vnW{r +NJJV}N SʪՃ.r'[_p>)ClރkGAG;R46͘HȡyyG)ڭ>R8ۣ>ZiflNò=ݟ -3j-'R8QgPO.2|f@Ɋ=.|^0Z\G;Hx#W,Ňr"e뱇f]T!T,js= ]Ωg'ܺ:bnjϨp鄲| ,*U}&8]%9oNJY *(k\M}V:V&mZ5o˸ [pѺWLh\rIRZ渥Apt|# z_?%F톞.P"l;w ]BkX2гT4c 5(\cbo(1P5҂:Ŭb Ň(ufXQc$)\;VS:`BNa%S89a+ odB Myf#'Hu9 V;Rg92S2\|v`@BiOCZ}ɽ uS:ʭH٩űkЮ|*Cоl坰 ( aB ed2ΖZ'} ]N[[w>U.!}$#G>Hr&Zyaй23G.w2ٙa/d ccB,!WCˈ3&Rx4 <-„֎L]B;Ɂ$[]ݜ)*|zl;$lK=C6j@XY̝{.e%VLuyj4,KXs{h|T,v6t >@ץNОCNʻPwlfh(+y?3CKՋbRܾm +vACF.n*/Ɯ腪1M)M +/>%< xߧw_~fj; +DÞJq;\tr:x|4ԯZuSgy.D8QsUcRk ?>DRe kܙ2)KϾńΈ:Y3mݔ  p:A{hN/) |ÿgRΊ<_E7j>/6"r,;u}ϕOvLudo\w%l[̘cK:73z0%Anña{ =V^8̱ _[UCHEYƒ [J.Y\MSS'RG-K|{]3>O좤Hy8|5ۋSɒS3ٳL3){42Ւ˜Щ$$ӓ:w5ϩI&))d O߬XӫGQj@sW rxx ͒"$6ޟ¯T~PƧ]e</^~Y?>;{)ԍlT'լ4^ |Res PF?| [1{Alzb-t5XO֦\缷/c2h Wړ6}8~Jl a[O:M'ӈnOUy٬pԫiU Y߲u k_䅗*WjFăRvsv6Ca21S¹av^6d^[J g\P>\hѺ<@isv*;Mc I: |0/mU=2<ۣ-xNx~ƹq6ᬹAnYkgڻ&u^ "V-BGWEt^56 ^.T~Of[%>Ek6aкGVBփJq+%Cs w!LҺw*HһkȑSm^aJ H01e9|Al="PٻJKVIypNFf%Y@y\'7܎~NJLr8IAPXUO\lvsZ^M?Z aKɱΔ+Nʳ#&LM=`VK2DC/eA Fj ByhǷw YW}#Yb Yn'|˿oW.8]V^>^ =çB뗕6H>klZ7HsGAܵfPʩ_}kHCid،u+BA܄Rg+e )W7uVIZfqUy(PY,#gՌY6 P7CNw&T~H8s靳+];0AK !$w|{5r<60.abu$NR$Y^FlH5ErW$@jBz!_TlV*8Au>e8(`k@&̏F/|V.xgfkJ@+|Bv^dH$X!|J;L[dJnů͟h`kSz;'>+Y,8l6u܊'0=rz+ҝ[HhaOd>XɶXd,k9 ߹Q%X::΍8 ˷,~jkS)e> 7_DM C ӂ2w#RRm ^Z..0}֝t(Ո5[/(|+=s}6iknǎ%Ofw5;k,<tƦSȜfrS0ӹ'\(tzc|#tO~5}h(=m,K6VD55:7[s—w$i# zL]@i/L"9`1hܠ)DD.|luy0aI`z f.t>0v;)coŕ(' i8JC-KqJa w?]tōrH7U>QkykŇU'6yUQiyw~ Yz L(M}F;u#R%Cۑֱ>Q6_lj>ل9]KiBЩ#bC)O=K /G/tqRYVJފ,, gBO%I]ZОov<m tUt)z뻣 CWh(kд!$n4a ՚wm-յq:s]NiZTzaT KOy'5 hVLmB9*&t|NrZj+bPwdFtA!-19㌯\4َq_O Ca9"NẺA֜iՇeg[M oA}C''T-r9oF?K MO{F[ˉslH(̍>9.0uyαMzj3!Ą0;Y>7k uA]εQӡb+/DyuXVe%UP/bܻl/(+lwoƏ9u_/GpnM__H-<)[ژ۞VWS*Ǐ_<2'C/[ a] ʁct>ۆOtDh9lfj0k]J*wdG9=N;!|R-8A@ atSe=%dAlώ%t /}Qa-~1o~ʊ7;ty%AB-gnVLHo`7TC"J>p9->Ź*x"KHQa6?zg{3dO@U-U!Dj$k&_2aҳ%S/"!-k̃/u|>DJAK\ilY}~jL[+MrUHs/t.!kqeN?>Kx,ޏ>l2}#'<)`Xt2=p~| [{sd WɎk}۔uP{)BjjP{$O|z2Jkq.Ue~h΅u#:D߸BX1}U:΅ytn{hx--FF9zPe-l9h}uk*=~T5VAH[?UFp]i\'vIo6ȧ96D 4 t.:!>il9GX{*2ITn;êW: 6X9vIoHppF2!_@i))Nx^8Y2Ut=..i}&ly(Vr\ iv_NM;}4Aʢ\awFС"x=3|Λtyp,t~Yda ՏLZ?~rZL 4131HL 䎶k(\_KvS8؜co^= OƞͶ>O#lҬړXüc06DoJ6/͟7d\+aqk8f.oݩBNjYn3&\hz=vJhhn. 2r?!tu\#ͣougrT_GWuQ9[ \Z@P=Y\ M47VN#J.S;x*oAy^Zk3}s}.<*th~ը^ʡsܡ+@ӝ$a]h%>:Vh6=2>7s$whbM_1٠uBN)w a'MSU;Bݼg)m O> j|:ih' u.: :㿛Γ4͔~7_^F¦\'և?_;+e/ m&l^9]x uFU%hxB׭#u }1j1c7@#AX9h K5^]wy_-O '@ĺH癟՜0h\$p%MWRq' t}T: }zOzk&}^>ď,`\YQIՂ9_Jk/K#5Q=^dqN1l?aܤB/A+N@\'] \DdZ w>ݠTv ̱_vCBXH~#R6-i^'w^,r05z9x? 8ޑ`r3o3v>_X9>MRICGO#9*eBb!wdǓtUϫH-|F 25@9oOIR 4Iԥ~gVfm3㿧@.bLIL;N[Lf]Axjo >.&W, /s]a Rϴ̻!m X&@ \ƥ !Ƚ;ǴYkk.6ҌnB=2b#&n<*rݣ <Ӛ_ڃ$[&zGZj >'3Pdx&B?#.ݽ9Yyi=< c>'×N)d0@HF+S״/Uo% }`BdFvܞdϺNq*x7 \O+3Bݏ0|ߪGRzѤ)r ZM\T>L 9Es@怀Jb3*fP@rΠ$(sF@=sݽ}ه}Nꪧ{{ǂ;/'Pi:o5{[c̅jGCq[Yv)V n974~4JK]T\LRh1(=A|Ruy1׊bE*aqH"Pox (:e;}cůT*<>GCLg1iˆ4kO|a]UO@g']qӚb|cQ(?\D2rIe63Z(Vm!~n=v5N#FO&Um)wWI ݊ UL0cÕltzr8%4>ZXeU[>|] *?u.ܙY&xn%YOѱ0mp̝m w]dH`gzUzsl}h v® Q]o&]:UC4H~dA'㶂/n+xcz3B%PqVpYPkx͏!Ŏ&IѩOSt7s/U nz -\0*^rB! 9 Ζ{VaF%t.kc0'f%g*Ӊu7!lYe4#+ߋ/w[RNJ)šg(z%Y!fip 2S%(39 2朗ԗWVKx$n~/~!T2bgrwߙSCroXm3hd.~sb>eMxDzJmx9 ģA<7 E泮L Jyŷ/6Q]q:8dseXnM1yx}٘g\~"6znU;:b5S̼ٸVH=D/w?Oro?tۑ'>StOueq:Dt )RVޤq*4z"Dߐ1Z>D_4pw&{'W6M1Gȣ^CN9H _/ZLZ~llȸ@IPc6|QLl{+Pm>AҐbn/W}*!UKF[6Q{?~8Xw2_+B[S1C=EN*EwLcI*|Jz3C|֙US"ppAa6pa%!r:_M.!šB]`؄-}s=ߖ⵵rBgM.yrܱM52$tޛ=uWD}*<$O&$J-:YԹ#')dĐk1C-nlM~e?7{I}.ni oڕvu/,{PP> ~WY)6م[djӸ4c%ǚ!|e=]":Cb5>Zka1ʟ&쥘S<3gn .J]}`1b-M&P9wD5qJ}z9rFl>- N6d˳Qkg#H7HFu{ [۷Gm|ol !:'n쬠,4vipw{n`wj6"Oby l<i;/bZU=52ןXrXW<nʗ@]ݽ<*yqvLLӤq4W VyA i_^_=aN۾ow  5|-@" YwC Ў[dCO~w~j˷L|{ldr K"O6g\Bkیm@C؇7JtBoFcJ@9Bت<`u8zF-!},e~>a}F]ͅ9)z;^sO}=^ PL:ӆbf#SL=㹐j*9b?>a/5ŏ˝㧡kW B|Ewjm|n^ Tp>::i4 8f<ȟލۍы?!o\Gp[QAc:*6 Ffp4c,zd8})d 8[c[˞wxzN' \^ I婶_):W)۫M7ͻE5"9'LY.feW$x]Zpvp :KktOe~qi=eYKM>b 4p7')Y|x,xd<ҪY''3;Q"kep 类nur[KGStɑ96W\>/k'0D1xcb]sIA6bP[5|!7G|*M\>v`#<$18_8lz{WeM(:m髪y6ٲ CE{GN܄,7Ε+/4R5|am5⓾o 5}&SuV`,Z%=ߨ2w\7897G\pQww3ŵ%\ުE6˞_{$_g,)i(:m/KakA/PhQ٤-\㎁SQ[(7ʖh}\t:pΞWz8_Xcp5~ }wVnFp閹E>7X.œ-%saU9} R=V-\5YM)ߕjfˡtȸ{k;GiX{'ǞW/]3k$ !E52]Vl9X3j)䫖OxMG瘦:ŀ*o89ѥ+ʅY>CF/Wel:7nA(NXs|" 5zʹ'|U=ѪfTE/?Yg<5b"‹74?/禅Or!;^?a jr)B3ԟk3\˦_o>eRUJr]f4NC~T {+>Tzr {}Iq!ToZdSRulX4w9rZUPzG OnWL;Qݾ\97?-3Qzs݈uM~/fmH~;J2N)SR|PMqHa(]pńYzJ<֜2 Ԙ~PXUxI ]^^X4ݫ=@aMPx]jh9< j(M,|a΁Eo_)0|n,ʉ>|# JNjoklP|G$l }6hUlcZQ j͠^B)Xb9[쫛^s`%'r6,:E-&.d4}ֵ<ŁE{擆GoJ!6B}AdpDH>O苫 Afo@ٸs οw2n4׏WX^\0:*Sz \3ܧi)zβeEcа')\UnpgwpQ#9*EF'wls6 ?TZv #zoJ}t8ZvsX_ԅ<<{Ȉ˧Mk"xjzGonMJƃ#YUGL^D^9[(9y/[n/;Ru׷JC%iW!`qʬS{A:tM%pSm ^gF=u%af @h|ռ]'9~'V~\tbV}%LgC=lVo)i~1rrMrsx|rk.p?~<+p z&GkC65#ob?^~&v>Yg}sȳןgQe;UrD(#iMxOY?{N>Gkbs¤1mćb& Q};/*RwB%'=' vq;,0抡vz_OyenӷtƃTͫwSEy]?]\ik;r?^;<*z<{y'cI68^a'ooBy՞گӢ׃ͬ3:y LE8fG2z?xdA"x?&ot[w %f~a_Q9~p2>1Heua.:;(G&Am^N% By='r?V,z ؉;"~!OEp?Tڿ8_^*gLJOY| w}6ִ!ܝw7,)#!<:p']OvS%%~QzGyy}R-H@ѣq<5P1# @1œ;bl18p/f-EcO ^zs.s5mR6ֈ+(0S. z^az-HNS H^(7Nte8.[ه\܆ ڠ}ϒ7NaE (P@챠C>ڻVRH!~G?uWr}Ǜ0*7;xdȾaPD%e$n^mXvgGoxńO3ؚ|屚)PcZ\y(`U T2YH)(iwqP} yƔ4!-nPJgM7gџ)evn 8] Cb'-˔cA(`)PUJ(,'w*8oξ5X6n,bkO&[f_Qaۏo?=pt@Q;1JCoqHyxWS)R^Eeڡƈ5P֬17N^@>E^z2enc , U% :Byą_|7׵kCZJ!;_=U֪;ԠIQΦE:I^W]lwJ&%hԯZtngBׇ@Z\2 ;^֣[YRPfs\>#!AD;O~{eelK: C;(.=kJAoN \e >[gM#JO-UUx }3FoZ8 d|2$3Sx\2=wd|>t>/qw8k)bg_ZOR#Cɏ{u!PJggP^3'2NYGǵR(H7BaR@TrXۛW=ɗ =Hgm>,ɗ$>gW fZWZktک* "mmXHCx#ą_ :yq͢ȵjw8mbj?4+&|>׫Ž4 gl>s C;EQǼv4BY^wCjG4Uf7 ẗ́jp@<*|⇲vu[=p|T̋VH'* bgkO##A |;A(!岹 H>][R rͧī:5ټ. /~SV4|gϠ1*,S|S٬Ǧ>(HG2ٌ_زNO]m̏Fk;#^/T b?ո_T|X d鵲%; vRqĖ;Ur|ᵭ\_a7P _??7\v(olƩ~߇bwɦ;l;Ծ҉ {rv:`97·<ДqU2yRU1z>x|*4{8{6-/'v4o.X eGvLJՠ9?n\Fx!!ĥEzqcy-N$9Wluamؼy$bw'q µ# qG ?z=[ҏ(Y%q?i9ʇN<%Y7Z#c:`f,C|{}-O@_8'y&yJ_f('\GI\/^ai9aޕ(y 5~]VE~  p~+k^*ߕB3>ur;kZ 'C@ǶmZ$[Ƴ u $8Yޑ&P}>'n_ؚƘLvm={H 9߃v5?xn'sm'_@[\nuf}R o9x^_KcO;ܿg8vWW=ܔu'LBQgy'sI|_{E>Hܗ Ƒ [zAaST^<G>}d?K`D2wd_߀`<@f(GG}!_!(@zOW x=㸻,ir=7"yd/~@ K'WGrk^M7z;~p:_葞2sxb;˭y$t=C'NcU v4h7<~r0ޙx^COϞs6:60clcnI"Þqъ'?f(_:x<>k}֜=)_yEv56t ۝瞐_zEmߥx=^zD{Iyz^b>ōG1CDRs59S2xM&<7(Ǹ<݃3w>(G] "e>)'O&h/4q0(Yw8Oyk\z(s̟G(Oʣ 9><w< }?'bG R%|:ǝZsL#O'LeKQf>jM k>3|F1u9^؞q."|\z#( H09ȗ8nɹx."c?i>X~y Rgؒ8y"|jV㋼|DIy$Ǽ y퓞GI׻EDR红_y?*>(Ky7Hy3?鼞\3S<ɣ\2uRg[<9e@ɣhy39''74CQ_QCehHVPx)hONCd QW3g}G\Oވ"(b/6iQ\o@#佪y=g>JxϝFd< |@3>'Sy+z)wx(2Pʣ(9Q~wI΃qpx8QGXPy2(\(/g2Qem| z>E^P\<(#O|~<ʜ ɣq&}QKpQ yc};ʋG1'xQ;ʫ&<9ef~Ky|$_])jק_8ۅ]؅]؅9O;tgcO#u}dew̔************{x؟6inM5Lt ɵMZgM `){>PP\ huV B@U]9_^FVZʤXƚG4uo=; ~m^39_~q_z~Cڽ[GCGK_ìSr-=}Zv0QrCZF;v09y4λ8Rk&]X@j~!_/z1 _/fFy Z_P:B-Ptvwi_;O[[ƟBF]6ev?/n#u=-Ï!O>fWSV͇V-@o>=#]2j?/ԍ: d%&\Dh ;{)PZjͿߜBX2>c|*Xת,$ b*$-3OjͿAX2Mьu Mta^Kxmۏ.ԣr>4ׇ&Ԑi&HUK2kjͿ=gnu}&o24 ȨP7؟EcLPj濜`e,>H&P4Rsw)heangþl'Rl6S0buc$kI7{p}"677Z|ѿ֯ٺ?"'Ӌ?%eqӿͮ66s8K26WRqcaxHu#mc%ZZJBOtY,^$/K :VRQ!:;Au#}F}柕d?-\?e\aTQ 9c]vOZoMG0.ϙ?i_7@$^)ƿnDqbj:7"8eTTd+Xf_R6vHgtHotHgjF:{SEޔqua6]?Uٿ\;>\a;>BGt?F7bɣ :f~i6aj;16kVΥ|*rlgL{_&zzJkQN7.츱;Ӥ}~-_4UFNA^~ Y9Sl[~iꮋ(Q$'نtTWljjy|{G΂.&T]+D:+O8_XnGp GMP8R'DP,A9FpO쒳K.9oh[#b~R,Qyt ՚::Te+~XScO](ڤwF(?{,$N";m=Q,,+$_žo7 ]<{;(6| ZSr't۸FbQnjIϵP4Si#0-zba e@d߁ 7B1!]f ?!% }׹B 4(D]zi((M`W-9Oܓʝ%cVC9Yº.78Dl_7q#ֱvf?>?~|4{7`Y=DR.}f%S^A(S<&!E{L֙? ewق, 5jTi3#X2gJxUɳfm3ukܓFy( x4vyW J>='D*.’SoO-kP`L:žY%?cU"k߲Ɩ.2b}sv#Kw4-TnZ/f7qS;0_6孈6UCKzD݇#_$,e"|WYtG.|.lhR4'{3(;>81<+uierʞ"ߜHGx?\0h\y/Dh- ZN4m~^KdzMkC]h7`(}6nlK{n6LQs)-foY.2`4DsP9V}Z?@5!jK(6lq:JV V)ˬCDȊoPweOiK>sh". [3Z TZyfy%3{:B*@tOuhçs%X*c}o8 '(O&],^S&݄&X?FT?FxjX=ӳY+x5IH3gDmgQ,꺉Yʲ$nz(k<zLxͫ:Ǡ+ʞ@]5C)պG w4:~T.dƳy3ΕCC9gbT7AڪLPy-d!k}wP)gr=X*wly4S*>MI1/? ϟ|x*oͽj[CRP̿߾fT^úHh&x"JoxƮ۵sQ隵VP|i5T7-hؐ- R췔W%Y] C)N^WT͑}LF$G2Y/EOZ;ZAۊ? `Bz"2R9%GJQ4VzXG.nuKgg /^,>_v#BӎXqa$-o/8d'eD)qrḀ̇̄T!B%1lTo@9YV;oUl9 x>[OQlm3q{ǩU`C5y5μQa}RzKDV]' .CȇS6^pM-)%,lOy٫nieN?}%f=u{˱h-b񙘖Gf;?fbf#Oșx$T%4gZ>-:UJ^}_~ky\v?/Ꟛ=jkQTKSjc;A$aQ׍]JQH3@/%8-%bٹS<}cbJ=;G&+"Bs U<~,M#nDѓ%6Q5ք5y52yP8eQR!TB|7~mWrcyc̷r *78lP_%79J8r"TV<#KmCmBh͂c;&?/jfzeWdrV> G)G^/{O߳8?h{WޯY߲佫ktB^ C{J0g^lGw>Rt;i%ζ3-BUfCJ_C䤴^WoZrxs?GƑg^3Sx~Ė/Q,-$3iW(1¿7G@|UP8ץ#(fdM:s"2 _:M{Yg]eoP˝*,,15t~{,X?5j>gue۝(JWNjjeˮ%Ոb[\1: -.,vCbxd#usOQKQ6)ő/}lXNj8%u"B"SOԡ>,K y"B?+B=TG ֠el+# /%ؿ$pg~TA kҁ!WE9'xJWCG Ύ3ǥ_~VwΩ蚘l?d*md+ O%[l1Kbjk7GW<)2ܞPm)r%mY/LD&Oe[y,k/m/bEeP4:.wV!SF1*o{5^{þIAES/"_Go΃ hW#uٓY[_~)[~QQ.YL=Á~:n| YUxc4)ΓX:bDD0PXaX&2*sn"}DvZTA=Q외\zVo߷F-NW[3%EgvUm/ z%# +\:cTd/R^O\ m ϰ{& 79j%/-͒dcVж; ό5AT)}qn'?jKXx{CIcu^|Mߛ,2.vJ,iǣXsz]y!(ɣL?9^s5V2sF.G0iI.["v r!O[P_%Z#W݋%R>ni_ԇ=o;4}pnuq9s EN[ja DFnҾgh-kj^PQ7B!7%7/ G}^T9P#YNeaE]6~{CѬk_8Q_:T<;?ݫW_S<ŸU,9UZt\I ctZpSh#k^mYp7țbO{,/שa ygsNxG%q$Ճǖ8[4XpuVPr$""Ri(%`<Ŋ-(.nB#Ŋf'^Q,,ui@l|) $sޝbEjlhA݋+MRl8m|''E Js)6|h&,dQl7~˭ SIɓ{Im* .Kl^P?XJFW;xyGP* ΅bj0tPws&|j DORܘT}fR~0 -,+F /r fǺA:P.nh>XBzsN|<ơş(~z+obL=(՞%Hz. ->:ڝ< AUyc&_3wh~8aUʣ/su~3[YubnRt6?Őуf<1J#*Ő]mQ ! ŗ\P^m[5u Dqovʇ#[IB}c6KgȧƟo(kwᮭ`W3 5ubjТN v{f'j?I#bkL"W( QOLQf7Is .TwԞAVQDlG1.7i &^͢Zuy'ӶFhUwʲxoNo'h.xgKO,kY#3s}[;U)+>~finB=)ݍ!-`E1T顒)Y=ʳb^7QR6KAŊCT{ށ>5A= j{<_N:j;NsyMi/x5bbu۔)F\!=y ŨG<.?@.oJ!Jo~ ĄR{p b*=: b7otbENZ~V'ztp.qko6Z֟qZ)@S;~DA|큝{!Rj\HI@q4D(bt8OLj 7b}s;qg􎶾#;܉cCV^D1:mc߈:;"tnk0M7|ϑIz! $RCEq]׳hdN(3DO!:d0,cBG*?8ZNMUz Z:Y2VVC+ڰ,R;j[(I\!a|b #)<)XsvY1&X*"ԉ.Cѣu\y]{kcĚy_Zslņ;(~YO+S&Pl,p2/OJشU~.ņ6DhPlJ>i5RAcP_1l9e=:~mp5Xi׸Z_(qX#a{J1u]1(v7wM Xӽ:d\w[HVCrӎm/̇ f|G%l+!6`NTy*Z&DMX{I3X{ܼꝥ* )6L ?)bxq QP?˗4Z]:eEg>,z.o-]J15ѦRNԊ貱bHq yÍ+g~E1DiȹßƺU?(F]JzgIz _ӷf ᖫ3eO*Ad"c|nQF(O1zn(F]&׉^$JiwbhubP8rya=е/+b)D}|Y[j;fjC!>(&l7Cjcqv{OϺqbQ|>bѩw5T))=\v \uˑC) ~j q' K[eB܇5g?S~ޔC1cQT?s k>)=Ó*(f前aӫ}/򶧘k6ⷓoN   "J;qΞϐv׈=Fλ\nYB1z1$v„bEKSnkňl}UbdSj-ײ"wl? 1 ڻ>=XOAa]Yh4hhޝ/QUJ ~+/x6b܏6? N/8WkAPL1aHx:w H02HPiHi"|_C1zm^YQ˟nR?GwM,>#/_щw|ZҦ]X{ya16} 2V42q*Df] ۾6BԱ 2ngPLi/إF1zJb]:wwvWs(F߮#2ɖb/)YN{WHY./aϟ?? y4䬲oB"hW:aPt;{RS(]-- Y|7S U%nOvEn󕊄S=$8 {(k"oG>7M 5"^,x(b!/#BbâcB(ic,q1#sx_tE#6Vuj(\j8k3SHx|DW^v~2}xb͗ 5(Իx ~+$f+g"͒bfm &, "(;ïz!83_B Ϸ\ \=5?dKR=.%zUnΝIӀy;7S|Vd<+[]vo_^ﷹ+ƽG B ;UYO}`9dS5e*UC\#z4Aj#[s#\hl" :4^vguO9;bԶgXj]_z^ ·(J y}cJyA s[N]S CX}bi)R)PjO1E\r&qI[O3&.A :Mb(ՠ0kgHįK&~hy:tOCLYKQn~)/YVs+);ooC  Oϭ{4iǦCC6H0:7t0r}1`kgcP=+{sDk4yX= `6HSQ<~a:%Gҩg>UyP[Ҏ=~(ڧS v[n].Eő)GLM%_v$DoˋVvR,׻,wVY++): U~cpt\)+!t&_Wv/)5&{P4ŌCEyP̺j0љb=uQ͘vvW߃bsS,z4ZVYl}FeY. {?h%q Bnf{C /(F >7g wG7,\惣Ek){7잕*Ŕ8GBV fYoZ!oU~܊_ K{;6C̉תQt˄9^RMW8l ŏbk >O,v8RaK _wMX]Lznw˂ҷnUFT/sCL^~hݢVWCl7yoQuF>w7m2\! XrKAo Zs2ŰATjTS]J1im){I%:Jn,ܱzZ]ȝy7&׸l߻="=ڞ4Q TRA2[M!AW_j)Ŕw3~dS gjqi'ŏ7=c?bŖaW+՜GU<@ؼ9}yqy=oYie'no}4оQs W%\-RUecW%E1ӓlR%v"D& Q;bfA>KB]g (l?Ũ'"'1sNQ#'d ^ÎNxb#PQ72Jvw+GCTIy9\P5kg+7SqW$S-D]>)wmech)#-^=%8f8uZzj-VPqbZB4Y'?Cyo.DJ<4 ?9b %75_5/䉏;Q|/E`)'(I gM}u$~]p^N\QTDܲ^pO'JݥkI'nNbٙ ')7MV~ O#FVDQ(F"^~f Dn3 1vP}?|Y>BKbDlyt#g)Sz;Ƚ'Q xC#[P|uj4NHAd| lS9kkeuunkC1je SJS8T/QC L4OGe$(1;YI1\c;y'QR.{Q?υ3uCs¥!AI?k>_bܰu׷ B1f‰'(F[n>?,CIQ%86/ά)٨e\F1`$BQ2<ͯ O_暉Cեs̀ĕݤ\C]\~#z8t]GQ i2zA_!l_uV09Dz\]dC儕&:@D9[(\f0Ű/ߊ̡qhIbQ!w]C炛CD口E!i_]>QVnDN9؉J^mD֙K+RqevQ2Č)?7ňl) 0G%(UWJow[EW%3CS'șJ13}h8ň|1B_xK?~2HwjCNG1e^fA1R]0Suqw]1׋ֻ,{;O\띶DQ{?|qRI:\=$yԥޏ;/롐~|7GK_"i9_[˵Mh zwaYF8fQZg/-_j~7dwq†p7^>+?sg^sWQW7F-W5=r7rsI\))s g[(ͽkNNµM0uC)9wxO5%w9lwr1Isw|ݒ>_Z|i9kE/Zoҭѳl3̶k]vn嵜]gyn< <T}_>{TsG/gwG{Fmv7k/ˉ_3KD|7AmғC=8ndكlhllUw%jjvF9Yˊ(rM/g-[)=G]t#f`^x؃ AwͶe@F]0$NTL2^r5%3DXO-oz)wf? :H'Zo6_ &s,E0N,}9ϕ,ď@l.h 'o}dxgُO}u [$O뵜gCss+pե꿺/u<GF>YN9k~/E֯Cπs7qEf>I-d5Ac%Cx/Op D*nK%ܾ͋-w$W9#(<^5sIEN llQhϝSB_ )Nf׀rǞZ/90ñ>G!h/q28D&Ӯ G7`(p㔣-Кi]ƪO/3xڇg9ͦ8|% Wt(]{{b7~# ]}9KՅ_*MkOYK\בr}'r>PhgLD{~HCP>3iC0or_inި>ZvɴCcp9MG q!zvI@yQ~o"Qly47RܞPc/³8<ʃ`9/rQ~B^%vE=b?џPo_᲎K&a[7"I;KPN/f PfԾ.3UX;} +y=gɃ"c_^e^9ڞ(|?O2L0yt}bv0.!oR= oѼyz״8(/!Oprzԣ=R>'vsf=? e;[ǻGEE $v~;8 OhD#^{3o"DR5# 'Ñ~ğ^_΂S^zꐃxvcg$J ~ M{32 t)ķkk:Bv$F$`y$Ϥv󒪇wngyK&#w^ I>;؀7etO_AOGyJ8?wQ*P;K"b8Q S=֭(qD[J,Mq)T`ICH}]x٬17_xulUAFxE!ȅסdyD__ Ŀ~|_@Op;2I܇Y$$b_}}! sv;n_ w|3 ڰAf8y(o$>bX@4;vq`:\8w_7xHtH| vU=IN%LɄ>B<>~|"㋘FEߧ M) ?˟ɼ#O/Į~H'vE3oi/{3HHJ)E^Bx=?ɺG_~ %~1$|C;~\2|ү\d ^"M7%!|O&AlD/ecy:/Dl/8K?=#Ie?ʣ9$Cg$relw?lw$"f|O6;I6ɿ:c?'Ko |ϹD/xy/O  Hy<y&{&?O$΢#{Bx:P.wrd#gbȫid|z'f8AxHx3S'>_IހFD·SH^B‹4.Q..\2yDnʣ ]qE''|IaIoIܥKg"A#$ aL'|:C=%z@I#<-t'?eʣEDb"w /,#VF]FYB{(#(!yFG922e$/%~[x?v)-%^N+'GXHx3 ?8#'XHELJ b%8/!DYs哼#?R}vQ$gJodCH=M3Dh$NR!.%F9 /I9_m)s#dPN햐~?=;+!O)$PHF#*%|ZJWH ǥ<乔GOI|)&q- 賄ėR2ʈ_`{ed~[J-%q yH/#vAy1ʣĞˉzbo9qI}q,"~B3_ a;.BOKH~Lf!׸D/=t^Kʿ>ՔW,{%z=8:=٘g?yGm|DvA%\ HQڑRE?"E#~P@t]ĉk?Fׅp]U@]?"༸A}<,-9pHob*8J|HNQ»?=Q@$Q> .%&Oȓ$#O?KDd^q*)'p0. v~F%G?%AG+0O$z YAS9 b_Nˉb΂y3!굔/x"t>HZB=%dމ㼪K1>HWĞ.PПo#z "wo9'l_J<=!O78PF/tH*N@Ďi$Oż䭝|?(Y\x{t0$ץ$3gO8seݥ+a]M\?"\'_bCXfSkd@q;|u'y$cYg?h<%J;S$u'O9YOqO)LE$} d=o< ן|wk$zQ\"z![1|x\o@ 'Ǩf>7Џq>FABc[=W=!O#8dh?\:>AƝ]bĞ9|%4?kGȗ\IV 1cW(b?0k&qgK$S{aŘ1~Pg;7=ʿ֬ ~Ӓ<-{RX (3npض&CK:K1Η0`8BdQ=83/l<ĜcJj'\ /{*#߃2/]gh00++W&ZyF/<$w= Y̛wg>How2xʃ~GO8qj,Kqc8e'G8G0vq}(:/=zcщ-?Cqv 7!(yE(OY>{q>O277Y,&y$ߥ=ȣ̼ Nʹ7&x͈8:Dt~Hx- GN~2,_}_bp&i/23kvdIdp<ژqF}ksZeshω>F0}7K4& 99q#?]ܿY[`/y>_; bG? ociKҙIa '2v$Gvq-?~lâxvv~cK2؎%A_KܧAbK03]s#wbOz}7 d!G~p)AG; &pq_0#ǡ OR9(S|Ϙ=<M֫>wCq|ў^1^0 Z[1 0.'o]2z?J?zy =h`!c_;+qg'd//#y9>=/l։|Tl0aBϩ`G7}D^ySz c\v }}W=7q*ܷ8= cQzG#>gsjq,;gvɴO&` {OfG bȣ< <*[;ż{hX`C{n|s1a vL^{8'y//FG}1y*F>ec(=G'#IF~vBdoCBbkL>E;C3C y.=7  8 |'%r`>Q#^Dxk7Yf9s=1ʣ~#ƒ~nQ}21nG\.WQQszYWsdĕNb><<8qhOh4o"y3?e`9ֽp= #s<2QK4c~ ^&2ϥC>?ˣ4%!0Ϗy(CxŬg(ד~ ~~DʰSr1O0y>MO< '(9h|R̛ʃ \#?'(0?>y y3>qK}oy2=7Oԫ{(t>=H\@{ d(20g$s\u=nguGsɣy#|y3``iԾ)?B%\uas漞"c}XnϴS9sw7ތ8\/f39z3΃0E^̟9u|r>UcPro h|DFyw8v8뢸3֕|RCQo_3q\qQ!* pGGq0Q8_Bkw:c̸r2P} &>q7y2!ȣ32f/Q.Gq?;@IAD^w#h|#c ?[FE#{.*?=0ϝ}Gɼ =GPvfGϿg(ͻ"p~ȣ7#?3{"~X3gGIhNIQ\|I+1ף7=-{{&1y9]iO3/-"ysvava[><#ӽW5wL!'UUUUUUUUUUUGiO#wt[lacOTLh:S}}mrſ6SJUc|jV52061-/22d_66 4Os ,۵pIgo=5ty/iihk߮{iwgs"*^%. ޥ{YZz`^ˣv퐕as5iw)pޥyLλ;%{<-04B׋^Lb ^@b&uZ.Ҿw?˅Ѝ$lFN0/~4/^MuuFzZ;;8m7凟|ȧͮ66ۭZX5>=$}zFdL~^uȰKLM8nu v9S0 fWS9 d}WT0UYH`|kU?0IZfz#fW_0d(th~Su#:l/۷]Gu=C-|hM!~=MFA=X-'#WefWC9?zBLdhQ?nD?\v91#6w%N?Y|Mܣ$7i0R21DTIKP}N<$KP]mv7/0|+a=aH;5!o(D>]mv7oo5/޵N-B12_juc:fEOOo78-JZ?O?W]m}mqelD@gIF2;J4dɅ!XHI^u|Buwb&F&Jp?+ZZ(^-ҹ0*¨-;r+ƈ0󟴎O̭'9)8a]3"N~97-o߱BIREOo#.Η0NWBur[oxde҃Z:{LB,gkh)-b-N믽{/:9~r.35jͿfJ}8DqʨȰẀm,,_aHgt4x)vmwLJw|LJw|?~nĒGk)ulXS S>wblܭ4KT.Θr 㽿T-M.ԕײ8o/o\qc{70w`I [KCi r 3{ٶֿ]Qt%Z)%;IN ί>+@};\Lݩd'"VtVLkp,!؏@C $8$qN 8 AYr N%8%g]rv&'ѶF"wrAsc}ziyv~ |2)n}(;|)%S(=kcK5;.u.QyUp)^qr}{_8 y r]6o8dR(9pJ5xg%VS9 Ny74p Mo٣J%lb4m+y!pHߡ3bN̞/@ۏL"_(UPZO1`׹G@{B@nBKS2Z](Cd(/8{R5hZ^u]%KTgܻO{9؛ս4ljVǎOZ),rX"=&JQ{,IUg֘7 lnc]ͳK\ەbQ9O3cwkldNh=ZcKYg5w1_[M*o>=DQ䱐+sYr|8˚jv@1ʏ:o`IOңXŒQZT?"B#c 7;5 ]~4bY%\m94`KT)?/lNqe1 XBoWKQ˴Im,Dg9~tt Y7{~Y=:8E!qWa Os\"4TDQd5pgS9*[Ч(8HVuKdfɬ>^+OS<4}|"K:D}KݵȝeriEbJ4쒲Lcu=[rV< %k&`%[>?#ձ,a 4ߋ8ѩf\X@q 欁WETT˶9b@0"(" *V11" H0'̘0 I$(&$9$k5g}w{ Z֬Y3Uuuv0i6H{_Nf^|dS{ƠK/߯&\6cY:xxs}@q4䜝4Q," S߽ dLU |z'V.1rdɄ/>zReIKH.)_owYL"e><p|c)}6RS53}w`.Qp()?eKRUu1ԎXo4LY)K*> Yʼn iG9LF^ﭢ{:L*r]ԛ__iYvIYcHE:7L&&ʍ;H*!KMP@Jnj?&e:Hu짺ďIwKdI,pOY<9h:dV0mptg(>ԯ`ā `/ݔ&\#n٢rn!|K󮸡IڳN-؉;=qUST)(edØujg<G>xz2F|sdyΉ[B +/#9^ו1n蟉h26ZWʽ Q~,<4{;Tvg 㔝n@8r{N{Gr^..b&2a$:]>QDۮnX?+v(5Frʘ+P{q;rXkwTuWo[̨L:CJۃ%.935öOҌa*sOdHM8sKv]ƶ rbOPΈ ;|_wyuk{;;Le{颵*)5eN/a +?iG >rnrcbn{J!EIaJvcJl)Cq:!!Όij9֯fݳ[[O~}ruѵ{"qU`L7,Zqk7ƈ~ QV'0)%vZU! :1i.[?뷊^[QW!$%bV1Ħt_xݎ>Szlkch ȣCoSi'c:4E)r̩vG:}Y?l-!NHWx![Ҵ=1&t6HyG\)"$fŒQlgb{2VqQny,IpO{]T746ms0\g}[[Y805MyO5oxf8g<[sT?z9[44gON ZwK}},Gӈ฿i[9N=b|:[YV}}̤1 (yڅw/8۔o˹E:M]-cc3cX wXyG`i`x!5:](Y?T\~o9O]yg!VK ƘN <SxxcZAϼM^wscЄ5Ar2hzacivV^?o稺1Xyc 8iC-$.6Tz S&w8zXȞ) VqD9 ~bo|X$nt;n>D}Uߒz|9I'beqMu=. $xiG$uJv$f{ .NuG=;gLEkbJ;??THBmmJ-o%ռOқGG~ұ_dlIB\Vot|xyﵦ^) Z$WAW<:MBV㛫o%ɘ*?uR=rݰC?=d|wl@G2e;6Ld\]k4HN1Wu'CXNI YKi  T\9)c#>xLP䊩(QG.yU4qUS%Z . B&**OR~Uv#秋|{.!?d+ ʼns~쌜a^~9ch(c:1 7_+U>"5I,;QN;[݃k#o|3˙1r-?[rC>/H{4~XPUed >&2uиƤ9y0+r5H3ϓ,2h&<C0FL-pF9fpRdA *'[1c>>"HA%#;1tB@BxΎF4زH{h;LI:FV1m~ƮT#.̝zqK2rhRf0)?gj첀CTQ)!F~~]4Yfd)0N/!]5Byt1{k:}TU6>@o} K+\;%).Uf*$- ;ZI7+ 0Wv6NDLd|kƜo?GCnGiHH0tuX>9Y;9C|!r[ij/eƒe͌a½ 0f|xsaO <Zå+'L.͎"1cc"H7/a+]E6yUxs-]Iˎ؏p@JQg?#~=*qho*k0\F d0x}C'͌{J1 :Dп0x%b-jfcLG˄ _fp=[9a"] U)a(hښů" :mZ_)c̻M7r1Q/W1F0 ưҏN֑c萖a |#7kf}Hi4 `S[2$ &_͘=_&ь+aƊf( 1M[Q0grpc޹\z.Cvec<UBrVe_Ilcdt@9ƫHk'acO[͐}aOƲUfk3S(7DQI$Rv1OkDiyy 엣 {Mql~HN#gNڍI81u;ywO!￙ݹH*[F;;BV֏V Y7C&Ye1W>d}S[Ȫ<'"뙚Z984[c%ڬp?D)OՃT=;=!4ep)p8.aQTu>\:2ɚ|<5 YU_.~zZ,d$g/rxb]h /)R4xb>g 'r8\%q@.?^ɺfhPD9owL^~vExC&Wvۖ W[I![ZMB^+N@L\~uH#[&}x#xg!z#Eš=Ș4/?thU$ٱgt"[?m79Ơa~gj*d9CőT=#G/js2lxW ӗG{NZ$;%-i&sǒ~#-G/_&u=, k,.BiLߏilLuiȺN1,7[|mD[ 5:$ڵ$$t+:ya΀`H_9} wK "֎w"9ƵZkUg]PO=ZC_eO,C=?ղٺٮ=9XidшU"zMgxmP -Rq4px͢Pban3+2RixY&2tC7˃!|s&^G^'R#2]NE:24H+2h.GqH#I_=ǩЬ/^nI+&fW1y^ .G2KX&qV%E,FKu8Lϝ^C$!̜G)NgHבãn/|O"מeI$1D'M{ToE&mhŠȄr!u2,GƸE\0i[wd|<[bBV_ '2IR|$VGC֘?# |/pF ~Z%uda$~#,_\dQ˹/Y<<`zdmaLdDգÃjacF"cG"zGDSM"ck!)~dI F;I1׼ۑq7C%n U$B rUM.6!y%pajM g6f-2%5a2^9OY$NϨį"@' Lѭˑ1]fLG2-!&ɜ"8[528Ӑ+| ႌzcȨ| 4~B|w\}N|G_KO@?np}427~j$KEvMds8q²h/,]AYx$;:=cG|JE>4vvEIUOBFlڛVO\ۓ!e$l`N Xl|T;uy啾MDF{9Hۭ2amN=;t nr㖧=:>!GΏD2_ݟfcN"g y5O]z7DP8,pO8_J#<2a dUhdmurdL6x"_z1|n\uK|\bːS=RxرQ*җ>Qk§N%lG{NQEh֜"%+Rj:zan ?,/gK=])ψkfD?F"nxd=2mjdtڋE 3  / TMvܻߺ +dwɦARSrD= ILgDgێ n^hDb V }2Ö!.v=ز%>4"c2+xX/WݡwHl!ȸCoL]$sIo/@OM_}{3IPS8S)" ?TMc ogC%#Û*AFܼi{<*oqdwxpĢ.AQt}12Zb@zlB"mtQjI$[]7<;xqdE/Ks ɂϷZF)o%s[pxFOrN\[I~ _%r-'m=pdTY3X!+'Z볤_mp2a&"XcDscd_!Kqr"Rn?ycmeX"osd򅏹RU`hZPo`CskJ藒X̸@w&kC!O*_t+fHKU*V#U$!%M퀁6F*\Wl#aϕ⌝ 'vgs*3t>ziϞ6xe ^(Dsx$v92zm laL˔矲Sr˒dk M~,VJ!Ss2Y(CϴV{R|QyN>d\GJ4dq8\%*r/Y4a30RX}\Id:pz5jM8Rڒ$wmޙP[=G r.'£A%l0_Lt8*Ko~]@#2ɰG;]Bvdm7Q5dbD!CHa$`OռKM R' wY$^y|a:2$hFf\0@m9&2W5ISNa7yg+k@lVAR]Mru$YcUy:w@@cN^/.9Vt}, zz+.^$A9u~>?ݼ@”jռDNj|ڠ;y~3 8tKc`ruq|d4u䣇onzz SJ]_oYe`B̗9"S}_Y: V]]ǩ琉7N3"A W.&>ZvWWn%R2:50^ښ}c`~QwߙP (|F~Ũѷ7җF Ɉ#5Hfַ5rȫ1Rh  ֌5;:NY@f՞{D?=?_覼D%\,jܣU<.GYvS$&)arY=N`N>JlrCIiƇGxba;[ۄI$)ٻ//JΝ? /5kfǓEg?!f^ o]ksdMއKgC&9eN >SlU% Y8w)MR"ǭE9=8: ~026٬Q$%ku+e$n$9Hvо!"3]ې=l].:gǨ0+}hk~H wZ0ܒȏ@,MNjWrSNשV)NsUNjmxٻ|VGS,Qtnv`2N}О/vwe<&c+2m_O!ENtC9MdbޝfOs]v B88m κ~o5OÊZD& #9MjVtj4a=2(L֣aoA*vO;~XC=?*&ⷀD![)01/76f!gX~M9(d*p\jd װH٣#C3Sr8yÚhvR$>0DqkyC+_(D_tdB&j}{,|ZzX7ų/@Fn9;9tH oCF_Tɬu9\%-*{qKY=[Wa0SaX=S@ꭒF!SΆzGsAj[c'VEFfz9xh_s9D'IEOגۢ3>ݧEH@ƪ>9>r`dd%;moWTK@7?rr42RGswdC\Nd$NtGC$$ڛI> km:ԳIܧc󓨍7ۄhtFF9>oCyT),QF2.Q@F)":i2Zu?2Z@2[gjsy@DU:yI6]HCqw!o@&&>Nd:ÐI)_E,)n$ITupA$ibY#ds6 ]7K$Z%gڐirzqJuL{ G*S* NJFc9f=dxkeAn|-XDG$ǽ6Cؠ?4k 07^'o!fʐQck;̸n3O wV <  $!"E 3 SV&VdȔw-EFT;!LTy*i5cp`6qh♱w~h䦛X5ώ=AF|Y{.ˮK:$6M3 qTQG/5ߕr4 f,ﳽaJہ}J~k'gԮ97rsNWߔwR;nqt>Q e(9e97}l^Hp۫^K޷ʪ嬯YΚMq^}V}>ȜPwM׽ʿȐyjп>Jǿշ>+#F;^;1ea=RϳAFjA}?}[%>|H?yɷ7ѧyb)˙7mEٱ̴qu}ײַr+_&]Sc-L?HtU`|~R?,ysrSfy.Fr/[@f힯N_-gi⮎fszu~rsBȚ+q}^d|Jynnuu9]͗gCם[ي#Rl.u";+ؗ))NZ ݞg{N|4讬>({ɯ$r@e_y%?}۾PƤ}ۧ]_êRMIef)6<3P w2anԦ8ҨY˛G7xV" ?f% /3 cVCMw.%أ<,^z³?Fe3Y4{j񞯭b@Aʧ\tQӃ4>0ӻa/Bj4<9dUyyit(`6,0#4ml^6㻘=4%>U_w*m$͓^dS08j0%aSc;i|׵,옷Z9^-Og.xagi=ߣ]w~<Ҙ(`|i7I{9{Iת7WH/w Mw>݄%!T]q$%rV>gK$->sMi"3dGĿ\ΊzSzosbm ݲxGz)vxGz6k۔Gcө]Hc/ɛ҃,6NoA~;z $,7gPzFo3DTNUvAWGt< uϿP^'VTqOģwGkZ&Jxh|TP4lH!}uЏ7S'3,#2w<[Q(=7!6ëf=j *o[c $G{ψWW[/sAܹw~ky_P~OO:Ot_ڇGLP.nyEx+{Hbj h`7G a0:Ol^y|f- H[jWpGz kO' uT ~5kY-q]%Yr|qӖP=Od p?>D`q1#UwPp? C{c QW%?Q\L7K7= >?&Cw[n{vq =8'Lzu{Nqo~_wIfa\D9P}0; C\Nׁg>Ԏ@ᆭ\L0QQ?9!ރ|氋y2|;j :':'=Q\8 #rRmƿAC=B<8}T_"\ywQ|*%Gi~~CY"?h;{Oz=ȁq?üy7*;3=gX'A<  jxy%5+(7m4i;!u쇒hx^uȑMM"{::R<3W2C8Ƌp^ˉ7tch= n9:Q0{MzӚ=}wHccUhE]!=xU&踙RDNrjA)-.x¨]\g_`#z'YTOZaEbzT.. $$#"!4N[ 9/H?߁_:._Z_Ay%r?Ոz_8(c\Rp?H~ q>]D$zoa@"zMi;{G±+*WXnaB}Zwq5ܙfF>Ch> 0B{.gΡ~E? {v'K^C,IDю#iqy?¤Ny,+L*gop?&C"i~yB|!8>*a7G``4`q %Q:~TrPϣ^i|= 7U`4Ohބ C0j.$'۟NϠ7ky̟;`_=n‘Dz8o`H_{9j!^Bu w. 7;=*·x r@|bA矻>񂾠 ;2S?H}.}?@/^8 y֛h4>`IBi~;< q%?b^# zu& 'S >A>O j __xAߒ0% u6c58 zjhTJ"N^\zEt~ P WHw1#<{͝N)TD罐ƱbW{ )rT5aV8)S_>Q))>кhSDA}uti|=P{A.,4Q?qPfPh#qlx3E d:I1ItF$OC4.$"c;:4$(- f$P9b CIozSdq(s?M]xTLqbZ7}iyɢW4.gR=BE[INMyYʟJF~ c>g%2 _pBZ@̠ jW9=ő<w1.R{AS?)~RH q' ~G!C|Z7| vM`> {̵K"/hPLSPh~$a)f]]0}z6pv )ug >kit K&j?0iT/ߦSi?SJ,Z/Sɠuso!>l"''G ֹP㺈ƥ× !77[+l9l4@y!G EtFL:Y|S_4|oh;?-E\94~ h5P*})-cE$~OC Nz!7sh*h]RDRʨ>hWNU5jZwB\NWAt>z+!-t*g{T#3MP+OXSy^s|CLP:4_i|Rʩ}끰(vSJJjG54oXBD;.rT:_jo:ߕ4Bus#U4oCWKHg9%ď gi;A9]xTC R_N穂m_5o ͻ .0Oiu/SH/Jg謉($ 9#rgVϊr9rjTxal4RF jUT*w9?q/2쟕S8JQAR9]oU;IePk?C^VA8z ԉ4VQ}@8Z#{0_C^;5Tu4!ިG}Z*\_Ov89VK-ޟ_k1+;x|'v5#럝njzg3Ԯ NҼvFN|qQ:uta4z휮/1^R8 c|-~?SJ(A ~R}R2W6"á)^J1z5i*W6 BPC}:'_,q)~/NѾ ^R?OCDc\u>::/멝B\8 e]o@*pCb]~_`gsiO/|N 9)a>ޭƸ #y^jb>}!'WPuI%4Q9a]ݬYW^JN9rU'Hg Iُ>@P/:m'u;/%?onv@5_C=W\Iwzz 7sjPeWgh+$:`=}AzʏvM xwO ~ u Խp?X¾!~BfEɱ ȏBJi=]W\zi]JN;, u%e}a_ 曻mPq]qH+ 8W<}R]&bvO3a%s C0a}mԎa}:y&a\1{Xc= tb݆Bs=Re0{ R7H%{+9ZW:khO=4.;Xq3{?kv߃)?`)p o|Ou1+9@t#_d@\~x>ן:E-POqY&-ov)`'vhax]Q|/+ ɒ+JWyC՟+xn~.4|Σ?<'D;Toh8~'rVƲ]zĭҫg 9k@x2yk` N\=|>H R;}s!_h1'7 |=s[5~xW;W.Gy9=Ia ?Q"o>>H _?}9 #: xy> x.}/}NOr;)ob;C|~.T;~S@opf!?|}9k[z ߇\os8g/S3^A;]]cx? ϳ:ߟ@q);%'|=\Ͻs_ [s5+ g {9R^P)nz"S > n σq)S|G׎ ȍIO<=0[|@m?!ޣy}sPP;u1üs_ߧ@=.3D:x ωsU4Gx? C=|{pHT;HyKQ7S >?o/q$-/) ~q(8e_P_A.}?R8 x; sPc\U@&8 <qC3ߨB ~qwBs@nVq/}`}O;osu3+Sp?|`yY1r' x;w^}cT4@ 'C;v(#K|ޛ=9킿~=$T?mkx=oO| OȐwAPsכ1'q (·}3w|(8J$ϋ08q?s>:|G!/~輁Gi\ O aúQ\'{53^(8 8UBy|X|ɢ/ys4/q]O/S,w vQ*?֡ǰõqM({n%s|~(+QQ\̝[R"g:-rfK@OuO=ʵ~w ORqQoTGpG!o~xޛH 3|okߑ=Z q\8OӨ<=mG}yT>A]U7{&S|z:M[/)|!E{M`^!.{[7AxkC=𽎔'_v ~?.ϕ~9}kAG='?C8 A|y~qʍyԟ` ~#]OA=q f\i?`0KtmuQ\ ASQ?A|G:֝Wb>a?\x ra RA;سz$rGqL}rg^G>%7~s({Y@3W^sqg=J|oQsgqIzz;gΣ;@ ~Aߏ{`ŕgxG?3mT;>)q>0=fa߁;{/E/3`^a>O'B=rB1'&_ܟu W>~/p=^%O9q;rջ ;Sr8exO*=Q i?OwyqfX/s ພkݎ+`? uGz_ s_ X`}gG83 ?a?R?Hc{|14>0?g9ϙs3?g9ϙߜG}^!PT!=ܨcz6m333ԥG+F5=JY3F:HH1^{_eli>:4ueʁ˱Pϑ)D_EL>0+m=]M?d>,LThCsꐞ 3Mc\m?.$sxDH>$͙%s@VQ}YNvr?+z O_M?]oZ_ 6abMS "dy/7a>? y =mU"zل" (npz(}8oo$oo?}qѰJ1154QA89ӻ1Bƍlk8ˎ:=O|;as_;s' s_;s':N8cx1={g?ǽ;aO / / %lSKclbxܘ4{~Մ zܬ}XE}~${kSZ@W@S~-+{¿pe=Àځ]&gCVOӸTeWRRT'P~ޫWZ_(  fΛ#7 S<58ggx|S {]lfS͛0Ӱ]zcQ SE)F)N9r*tʙ)PRΣO)OR#?r#?rMN>G#מCuNXZ6ԕV OtG DWOx_6iXDgkd}\nH}ReAL"<&Y!ެ~% %R/)s[sN^(-00# -Ovɡ7 d; .'-sFuM=\dm%FZ~%863b?vkyy.PfqY&6pD6献dwk@! 6arS9ŭej`EfӬ\/ERiAҼodq6'HK+Ȇ&l}4o,- ٠\:4}g:ºs ?i>6oi}.m {Eڤ/u UApء`ψ!c*1E(Q)5①!4k!*'I~Amֹn)nO䛑-;5(8MZO|9Zґ$Prt/A}~c3Vt:ϱϰ~&K#/m ;2юZHc[HցA~ aW߿"*Gr$]9Y^G3Džʈn' g9y[i|$?p_spᧂ}-!ɼ%>L;$ i;O:j̇d,8ܽ񒰶oϼ¡Ye!".dAKu/[{=}w4VMq';Msnz ɪҐnЛGXϛ.&w~MJl6n'%"s-lS4NR<aY)l7`#iMxvzi4;ՀIulƒaڿQV9Ñ-*Oe.n{ H@`e>_zKɢ!IȆqo$ ?ob:Ok#fU4Rw_Pš*i33$neW1oV>GO;!ꂬ0}RkM#ix?5n5;}5MU, <Ƕ7̂gI)'bO#Ր37Ic]UߺǶ :@e3ZFB6Nua>@$Oޓ/@6:Ń4 6=e9N4sY3kvQx{YݭhDɾWy͝.! ".LXN]M} 8M.VTC9x-Vl=o+v\ i{"47:ObD,Ώgi癯LIQΎҤqmIsNE~ȆА~x:V4 9xۤeV~'f R>/z/8*U:irУ-eq6& ُ_sl~S̈́tF_iٖ:RƯV/ ӾOAy!vlYq jF7JuIҩUnQ[+g}vs:HcMө8q."RϘCt uۭ9ST񦉥 kX;nZUViEM_KF>EX. j Fs*mQ1>8-w3m)w/) h78yEX>Fi!׌ 09Ѹa.(tg&i<- %!0#vAo8v|R,>O^m%,f;1W[N|vqYd놝ѧHpcoqiŊUMSkOOܟ͢mgVs" bJ$e. M7Ll8F9!iuM{"x[ۦ'Ikvˎ4;GYA9Puy@W)C:sq:iKxd%}ɴ|,`vHMac6VҪ֒ [T_ږ_':+\䎣$ G*eǽV#GS7U2L:t03O];"u[R%GaI zq.>mqC"I}Ţ%6i&Ol9e06γ<4.",G+yYl{ryXQaB iA3 A奻 mg|s.g#n{+]uyjFʦQm®Wgh;۶oB6Yi'7*4W02gobhB6aܮ7B]a=\k-g+I%UŢiT`s]Xm^~}vgH}B6~Z] j/h.rL~ [1K"]uG&4o0ͪa|3Ҷ)rui{뚖ezaHa'uZϲZ<|4^='ŖXȯ>lL˟I%u>xDZ%~{e@m Z2^tQfBɛ%'6폜N g:Cz4X|ێx,q1 1!"('`/|ٿH$.|7ʙʷS +C,#!'G.7~N^x6LB٭Z|J")$yk9mmg:e^-3)V$ >$D&Ƴk? 'vIyn,, 652u|s{ԥ HʯV :dx@zA+M_$vE:}1jpǚ$ǟ|H9n}9oXv}8g!$i߼UlHҒɫ .,raPg 6:ǎ&]Ƥgʻo⁦׻8Cȼ^ !ye +A[cJ'MV 9IύF7ߒ&azY.d')a[ $< k2JJݍWHpBqp^JdԸX$j5-V$q9ن_$:J.$~&˧&zzA%97ǣMH% ,%I)U!qE./e]Hq%cHxOc3*ȤUJV Y+(Hξyxt02Vqگ$əIb_t9O;Ϟdž_I֥[&wF9#C\yx>=IRZ~bЯȈ ?}uN&CO^IH_cuU"sGl|OKxG[?r0'+T D#S?8ُN "[$+qPΌs˜)7;=")Ku.yLxCFW\DQs8dŋ? ʬb6nYE&Ip?MjU~$!Ёo`VcK_ϸ6T ^\۴y,W$rXٚ#g^XJ#a7SrFz69\xx z|e4.oLB~e6w4G!S_D{ץ? >jFG>qw8%҅զw>]/W'& [q6v<-r:2Ӎ]_ސPSw  iJ#F呌omT !sMtpM1$&鷚y3;x؄p蓆ϑȰ㋧>Dd YHʀ3R)(HZ[IAE<@U9\8)% dH~'9`xW|7o"- s#kv.=uSmRH[fY;Kn)7c񰘄,3u )1 o0x%-3GH?+mH~7$>3>ߔn鏌|bF$Z&JxRo1衱W>HN&Q>+eƑDc5O>VEj#VNDKk;W;"bk$^QSI|/NVXa$I9>ڧ[\F_{zuI8Ȩik7RmHEkcOժ'ç9t;'4!}QC/]l0B>o6)$~7fP7`IrfbmUdR̽3HGY={B~L2ru$rqGe۞Y[ [%XM/ϩ !WIY1{ q2!cX٠:L}'I6hGV먏=A)C ۡ,ۭC$bfN?/HM$ G:rC޻xomt=-ì俏XE-ELO{kHNi/ؗWR$ޛ3cޭpJ`Qw^?9*Ry<5{_HYei NE+w[]'sy_}X2ޱ/ȆN !_6l5zZqD>9r|d3J59|?=aU+'fH}~cH|DjovP݉/P~Hț"jI6d6-E{!9h$i*it}6" 5*H[U&_32^Ows'7MN7^CN߀l8KɺrlW>dWoA6[bӼ62̊}7?~j"ǐ,Qvkp..ٶ)" ,ڀSTi 7Cv}]y>qFOi sr n1~^|'O=;}ӼIN!- |A6(Ób\C a9c$V5Ҿ9&c@Fٚ77_qraJW?~U~ FF:<<V!N9"Bο^gWG9^t@0~پ&ӥn:"fB9}x1S9t5E^+5XR9i_X^ibn{# _laX/l\vOU<6Yz54>!Y+|l4O2A|}ȡϳfl[6:4i&% .7"6D NZ.G {HͅտN#KMd=vkbٺֲ@)dӆqBoUT0;^4{.aNٽh_Q٧.v$&EZ~#%l ;}5?v CȠ;#K/H")Mi?c ٥?wmÁ3IP*HPI#ߐ`FP$Rau`w3 -6n|o 3 }^U2yd(ǐp!dXeDfdJo#/=H:Gn C>ϸ$B X.jԉa[%IW#c 򺈌*Q) [92ʎI3## F9䁌*r/ڑC}fHԘp{$jDy__r\LHwY#rlTői?!s Bzj2of fȴtl%Bz!I:K$m $mϓeH3]C"̉K/}DKY }Gx<4r薝Yp%{đAk\>A?!2ȪŇYK-/*MI;S#ᇓ_T ڝO#QdtI;R nuJ $ScdЈ;W E{_:4+C+qw/If+5~Dm9[]GyJI[A$$O_nol> Yg3m3ٷC'񉴴C\0Ǒ(}d~I#II[6yvSHHǭH/Iؘųv 6[K< -ՀUS8Vr.dZRdL]B]%g.Wj y-Ў ye,fv';{=9A߷D=^ĩGڍ>-'[0姭GX/dÈk#7"cZG6R~V8 Ywd$cusi0i"MtV\vm S݅H̥nC_!wilzv(uص= xPb|\$+09ad1>6cIٲ/izckkzBxfީ8ܛ$Oq9c#Yvk%N!ٴ2AEڤ-BP'ф%v󈁣8C wn,.LEEl<[Zo]_ˤmpz3|y +N 12:UDěZH{GKԌ$^*Ɔx{Rlor^-i1k}iO~ pfX9q(c^᫭+lpcLЪZdkzs{]rQtHCWcM4>=Qޖt=C2i0m ##]!۷[|cal_?n-nơc8*DPvM'ˬ1g'vw.zc*iyn"ʶ߬+W rª*U9|1ep;sUm\ր.HVtܸ+ 7*^-{J45:?Cx]f&mjNXG>Ȇ s&!Y_զ!,ξzTɲ||jK9cֈH֣W8^lbia^|+LXZogք_"fb{|Yf/ u ]GX"6sOيo&A\ltcn<DmP,vT*2Dg6M$oA HPiGWO<[d2dƮܻlZ͡]Cԃ9wi߇ H|pmul&*xs(8~b> (vP4٫HՈ/7"ლ,D >S؟7Y6 IQcס6"kϭ 17Tg4ne-!1;p0a]JT5AF;}nw^!9jI$Խ+;8[!W4rY]\iaC / s}Yv.kwJ*g|uM{,SANdXqqmPU͟OHڜDzd]#2ƹm ӑ#Պ3g_ )JGK+Z ?W,m5dCFw9 {h* ]5eZ8d湑y w rL4䀣; ҟDO3+߭noϮNIe$oГ"d,_`E;2M1zdpaԹ:dF-iCn: [|-OLzRT8 HBrd] #e*_`׻-m!#:0t2v6t#R߽Cv4i OmA7#/KB?czAk v٨>K: ub灶%6lތh.T0s OmOܼ_͔B>=7XJ&!3f ![}yMDx@gМ q7Oyuj>:?md?8`ILgMtL[d?4+N" ۾mEX9o;g@r<-$ɫ,E5wLx8=kKdaC"qC&[LŐdrGp{] w!ov76G#"KI蹗{瑜C=\[2wdvѯΟG/ s| 8!Q 2we-) <]aK^vP#9q7l?DrlL9艞u{:IAdq3>\bG!=* u"?O\K/a}K9JjHymoI,EӐ<wxLzԘ;x:4)-$n\gM"$O{W]D~8s_dPջgGBF߹C|!kLܰڨkȴAFLhmI  a@k`%I#}L?فj/o}S:Ts[x5XxSesf"k/h; \Sݒ=2`eE ;Mg'"CcZoC<&cqc ɎOe?%H91V$ے  C-TC8m9z5yodpsϣ|qf!Cv+~4F&ort!p:$jDTXYx1ǺÐy&f#?\yCŹ|FÐB/A濈f.,1)dZ=wDX;ngSde"q{REi:G i)D=ߎC&Gx]kE&\id12k@,d[͚:~dς .{~'#gH|7I~7sci)c`2sɼᛓrڻBfg/r2;z9널96s?ک sOUD{ci,>w{Y{-aɝ"9&^Fv/FjZI܏d￈tv5!%~Z1$"f"^Hp-)_Ҩr f} z+1e4Tg!A Q/Ie5HK >ft7&2FyvL5Vq%g=Lf6Ԣd󕪧E`A%H0:k[. aW}g]o}弐. 3dqaTtM/\xsw 6`m%)TcE'2xɮ_L?Y<}}dȚCj}4I! <ВKI[aZcV_y&OX/\sc|vu%d蚁*z`~}#CvK{6T>pdն+IX}'A'H[хH9}A2+Ɔuc"lW-ުdI1~ ؙ ]ײcL92L[];Psū 忑S*0S}]hC箽zZF/$*Ob,݆L*T90h<2Iʉ#*Df˰]1K|G& +2P|ӿH6FrKI3۷Gfo'.$I&/ߎ-gcd`@bBHWӲB>?h;^ H_KuN@}|gA$p"U$hM#HD֯ZMKH3{u0b9l/c]FL:lG̤q@ar~~akdg~ϳh}6a]ZJ'BF(Į:^4G. CG6aPԹ?8 Q|>֡V ~2ץ@[!7uc?\6oc" 8)Ks8|pc*u[a$s{td.uɌ%y3gY~Feh*"'e==`OZScA$DDc2 1Ju‰NKHN#Yߐ/k3KGŦ&mD7'ҟ nY2?fA]F}?!2G2Єyߎа|I# mI U.ε}ЦWBSЭw{f*DnP}(ZK5V̌tavEkٴ>;4M-OȈ'$ViJ- ׯ8zZ2ի岚AIU d/氾amPsKYa__` 2Ep%BK?}3˝UWɍߌ!UGD {r#Iy!cg60ciod[n ȇlZvPw 2\_2:$(߮v8t|$7Fy^5H{"'m\f 35oyV|D{\:h mFxQ+& ]o|O->xl l*yE$h]JxȀȠFt@~} Z'}RCƭ϶F0槩 |15p<2Z߶ݿO{?Px`vcF#̃UR6m!Ey=͡wnȐ^S z79dpS!QA"1[AkK~>Ei_3FM=xyx/De1v]54.2d$̒Y ^l8qȐm6CȠ;'/G~ j'j +V9t!;UODB,/8qR])Iv:n /T=2Y,)sao?_/:wS2u}qsx)z.t(8G,~mKnj5aQ:]n_v$#n_Sk/.r>PWU?R7/?Ob~u,p%?~my4ȴN{?wI߄%pfc/F.xwdzUn)_~_ޣ&Cn׾}A~=M&jw wq%l8bXl;=Tޞ9~YD8=՜kY}<4}OH]yKD/n&l8ڀcy;gq`^9ܥ|ex~lY(혵;|TqLSpOnLμgK>K/|F(:=G~m UvwN7~IF+esS:jDž.^ x2]/l1}cW7}[|R;>=ՖZ[|5/wt} {>wd}=y D9kZEeqhlJ?igdKOK5&0J4̌UadIgա[^`iꈥ쯴uot) 'cM痛׌y6kIzh5uLFYX2 %lx[sd dW6=$`_<albxƶn>ZZ ue5>2yskʅ={?yXķrQ RkWwxy8?)p^&yHEIaιZƤNetvRn}('k}uor:G7}>>53:Geӆ4=N* y3yr6հ?a-tw8aY/D9[67ߙsN ڏY{?7sCjO*owoV0/d&i2{VVRҲ#yh *p@^ ?2Myq_tv\6 *N1@D<ek_`he6Փ/ÚX_C=5>c791IXnz$b6C|9(@\ž{{$ʋQ~ la*cГ]O[jp|}J.PڀrG10NQ@_wch;OM쑀msSq~W} ?5U B;c+#Ky z.sg U]Gy0P}@ߞxWtɶ蠫q>SD9}@N vS=(/?ErzǠo c ycn=|r_R=y'wj/@c}|¶Yxsʇq>ǖ??z~Qw}/}\>\x򁝡?a8?\mߠ' ׁ~Kcsv\籎u:pÏUAO Syy7 W}q4w/Ğ/Ir$ʑA{HKH4Bi=ө$g{RIwHROs+<Oϳgl1dZ!{APDnuXB"j )tywH OIjϧ&!|Jɑ{Ӝ U0FqD0T]ID(?V'X3"{3Pyb=X[0d='^sQG{ AԞh+ Ls֟^Qoc$M(o r; cII"g7Dz=uJ7Pjyz0j' ^%.1$l)ID{'(D"t^èyd\N|NAjWpޣCt~=q7 s=Nh܊5Qh/tATL$$Ѹ7$At߃iIvK{cL-;9O .Ѹ{]`z q0'7M1qT 4O@&=z HB~yK4"Lq>vKād:=Oj=Qt'~ҸB'Om $T:4/p C!@QN?~s*{c"|꩒JpJJJ:/4n`4xz_H[N~kྠ2Z'i<ҼYLb_1C4˨:94:KhˤE5C`;~>;t dE*)Y9͋ԯi>(L:Oy4~G3ֽy4o!iϦẓ=!迌']HS"~,/YDXL1XGdA8.UDU`Ŵ)yƻ*gUtA02`)+ʨRj`e4oQBTʞ0L2UL/WA=4A\xI(%IC\_I室JW0Rt94_zW5rjp\F8M/uCo1;./ɴ}o2hDR}[gP`@SD+TBP{.q9ĭgTHgTh7=E4O:1gߨ^boSzh=[O uW=z:_4|@hy s]:j4OxWG uu4Q!'S;CvXO]O3u4UC~uCK5 >kxӵ4*fM:~yc X~U?"O Lop\Q"ϡl~)v+D柰[|WOCO0?[t=_?|4V/C~`;!ID`y?dc #;|f̎1gz|y2S>7+0}g!s E,dBʣc~H?q^_DVo(@GE¸z] <|Tm!O<^c<#'Џ!"!ݱ DwEL߉qyTFeb;#蘏}/ljP>ﲸCH~gy!8>P>c d_|=b-tG;b~&;ʍg4.h'xy|Gy?cSǏ/ymgiGdw<%mj<>ÎwN1>oj^3:3>c7=c[>|.QȮ8&gsL~\˞0(`|y~6'o_13gXb#O?d|Nօxu1X|CBV?,bAqY;=<=!3 ;(''X}x|M:CօM-DcM^OFY;??߃zn{NUꟅ'iOc^f[ ՏS]g'YIu/cǃ│"v> @&f&qz^?5w4ag6?&^_!Bq3~a̓1׋Y=[X'P𾚺?#|ˏo4e œ|OxA~[];7IhQxM=>:/\_g~uyMub^'~qpLh=˓q=;\7?cMywq]+7q%3j.bli=><оxqH>OY~__zY<7b1_lEi"{Nxcz{Lp}+mj]:QcGh](}(돿c篓'F`QPNW4|l^~u}Oxl ߧǝ7xz?7&7 >&S`<#oQ&?x ̞<#>^=='w_8?si>"|}>썿O{d<}#C}6Ͼgxx#W i ~hO}Oߏϻ G1?yO<ϏC|2ȿ~@x8lQ) xL@9/bL.OU8|~>P/~쒗_1^$e]! '/"/?< yܯHSIq/>]W^?NChBƋ5a~ɋkg~'ģ8nؼ~9}$b;xGq$S!|y}rA;&I)k 턿oO)aI_Q 3&7ߎqQ|wh*Vڧ-O]Ӿi'~ ƣǏGQNQ\ūW>MO7yG}FvujB!E>Fy 0_q<"?6{ zJ|1Jϙ2$D7O{DX&8Y섷o(?᩾<ʫG&l9SS#o*~8GYꌼ}d<ʋp^Q~]ϓ('sF^zp@zŋϙG(yy/N|AiG{ȣM_7Gy((kB|hy6.:2OSyQsx?oB<{GQv.ģ8''uΟ/<Q^݉8~$7g!ж<*(:o 8TGEH PTXWSINS'~}'~}'~Pi&&ԉ7+ ZNjcnhb̎%4כƆH|O_ⷦʚo߬ǎZ75137ԩi G&xlv ;YAVq#f3_[ ?}HOH~9nmep>~=ѦM&?~w]3tC713(Mzc֛M,L55dyO_(|UW) _5N*<%WzBN4lc~>P@∢nfcͺdSSa47>-:[GImP/qT/Q]-s-Y}S-#=8\Dyӌ}~zCnƴZl&#Sm٭hji6jfބHk ?_ 7}~_9dMfW4б/Ҙl&O-7T> 42uO}{h4YB43CUFZ0ܬ?mhe)e6mAjU^h~_WGXQVYwwY^VNoσG>LںMBBVB˔?NY?@>}}N{ AӖԈ:#7FZ[U6 6mk!>/Sj1~ Z5@Mi^o=J>mHmPKtؖ44So,P}RiȊւO?nԿܞ.-t|: >2O}Y*E㙆ȾZf gtU؁6YCsU%9<0jxuw-Cs-SsMX>dԿ,ʡ(}X,ӏbVS?7fѐyY6- OjǞ_nwtbT>[?Q/? ȿ3A1U#-8U~ Ɵfeɪ(zBqZTKGOuڜEsz~,~=w2Q.?G?G?/P#NY Y=` R?;4Ƃ/4?w"pӿvx-O%'Gߥï~eć_a'>O?6Z2Mgjjlb4hLWCU+7*5q~L籟u6Ԥn Tm~—* 6\X;¤M&t~/1 c*++*վt(~OD!1wHam+_cZo ɆSJ'LlhQɍ7ElhؠX~r" [2b(ð+Þ 2p 2p$CYr *0P[rakkN<#!{e^[ q*T܅wJ ˢ]7&>Jգ R%s^w{~4.0c`k`H'pKF Ѳ7O#(Oz\sN>l;=?0яPJXu ;]VsI.SfiʍrsOP{ʍO9/qeS-v';õUmN;3h)q2SAʾCĞW-_O) =}|*e\a78հY%E”cOt׀"ju9+'4~ݶϝ0}vO|ӋɺX+F^~lWsv[6mHhVWrh8gܺKă֍#v~{Jm":W1SdÃՖ Q9'PmWjRQմZEH7B5zgզhn\#myo{.R*r84%g5n#,Wx@l]7+j"[ٜ;_xޢ Q9lMȹpbÍKgRG.ZM[u:;Yn^┯ͲP0*yVEm\W&ӬzNk?L쩱ڴno.K?PcIѧň\Z7%%ةzZr>qG hl/~r2Xiå/GvW/㑰;|UTL=~֋0}q\M.Z"–ݍ"ǂi\gr[ D \׫ǫo f[t:amz]5\!͙89z'7sC, U^4+FY<[Ap3Ԛ/|Ggg{ꠒjb7 >A䲻 h^ޗ4?x 9~s!pk\d}!Vgr:tuS.Vj/ܲ(m=w3VfiAܜQrVWJg8QX5ddrCAA_" afwbۈ[q/҅5 U][pJܻ +-n$6'WDŽ~;ōn7ȩߖSk#3I#mNEB*~aj {[;{[wQBg{^%ʝ2uXߖsqBUއܒ5,j_*-\%([tQ7B3赻G^/jZyr\oyGn}>7 o61Ef|v4I9Mm-j]^C]' ]34gJsdr7_d@EIȠ5Br6zܨCSj,d9nLU+1rj?y,Z%Yt↬ Qfc, v~ƆXn 7}H[@u6<"G6u3GYܮ| 89$U5DS`^2}O1'fUׯ\7\;c<%P~Od| ǖmǧNUmXx/*.pw}gl9+b͌n~];ޫB'«J40G*z>e|npwlI]pP*| +}$\9ק}mcqc:hC8߆=,vs#8r`6Y,aowI[Jؾhp>)*?^*R(sOƳc;npKrN16~g -| ܥP9iv8\Շ}^{闆qz:js[OǏ^%+)!ܫ@nDvX.PhmK3Mop:v"9Ui}pr4Ez8rX1ȯ)E\6zP&fNzЏqRo=_^xjUI=+,ҙ`ōnuH|nBov$s>{Hcu޻Gq:K?ғpXgFnff%f DS܀YO*>#m ƜZ_ aDʊbqڵlVE4T;wyPcزsqt,og^s0XcE\:ON|mZpbCs}GG$l~1\W4$¸{w<u;\nNu3Hť utsi\\n.FnpC׈[MnJ_Н\Ocov$}"N$:ܤzgp J7൮ȇ*3!KvF7U=puc{9E7uwf]W >&7\ri%1OͽW51-ošԝWqcQԉwka62$&43»O/G8^sn^6,Z{ƣ x|)"1 l]J{wŖOY0r!g[e-; LXn+[=?ڃÙ PNN+'\V)^qm܇IWޘ9Sucw'5>_'$qcBNqQ]͜XOgn"mgPrnB赎 "QQ?G'tp{7&r(t3jj3Y9W]Nݕh]#<&[u6|8!cܩp{hϖ{ iE}9 -d ޤۜF J[8k276O.pĩ[vշ-D3gpێMb坹EΗZ)&W;zSËOzKߖspȴS ?Wy\+Ծ٣TsfhK}އ% rj~e6'Rbߖs_޹ÿ:QxxPn|}iH=ڷt3ݲbG1T )ya:[Nu6gN ןO8JX?یٹtu/NoN5]G/U/!>jQ0Vn _U>v;pߟ0PaDrU,e|j׸oe8|gpLmg{O%q uF@`θa7$ys̯K>a!k!W{nq߾t"a#v%,і23M> պw^ %^ o<>J<ߪ} a G KvsQ0obHf3DWM%,2y[WkĝO:y ݝ{ՎVO@eϒK# ͯ]ϋzPBXbWy4庋_$,u?+#J *fշi ߜ/Iw'2Jh\2'm1ƤCiU.7ATǚ̓+V,xѓPY=1+Ix{V >8ѥVt3'GPy{ mk'[C(1l\9Y8ѽf?8V+$@մᚗ%CŨ =w","t$a:3;'=Pc3 ʓAϒYP:,ao'M[JŶPH' UCPzȤUThL|waɏUȩ'Xu`ձL1y-K@eWgXֶTtTG6J׫F|շfO9<۟閷 *{ԎOA~QPSUQd]c7Fh*QKW<0Ho#g?_mW7_=is$!#L0\V' ~s$LZ~ aU2Wz+춼% Xiכ0Ϫɢ_4WfBN ggf@p;q;D'O,{^Fy|W}OwDuj.aw?(s7 0[wѡ5o 里9BIJAr Wx^{A̡dJl0gy,n W_} kT<9ݴu28Bg2Mdc쳕6W4 k;zlDXްFlW0Ϋ:}k.ѹd> ~w _Fxmu0j~ k~85a {Gޢ}ߺCi#j2E[j]1f0jyO#gG@ͱpG'!L~Zu[[t+1 -֛ ?W90vˆ{>yqm.tq"l, jUK:F~$hU""?uS҈0dF ]q= o  Gt<ީBL4k3.Yvľ#X^r0J&`soؒ00av~AWFd\xO0fmZ ~CP%!n&{ C:UAx{W" q6omC Ik@:EV/Ok0Maa#J;*hE^>x8lUu}x@~)^?02|bS{:m!{ 9YcNxmqIנ_oS$7  z yӵAXkAfr}tN%,Y7 Qɷ[sxYRB[DG<*",R[~GZmt#vvɑ?KX4Z°Z;FLXtݧS\rZ7[ވ} ʧ,[3<;mJ/l0j93T|ݵ(aY^R)ң]UudOX*69ae>PYm٬G-uJINXuiEP?e耣P&󭡼ԃ܈I"ոv'A(rX:Uvs{YON(q '|JBqi}+ K_ aq_M"rZ,'Jy^NBKɜx+{Uڏ\[ ŝ[1#,{S6vC~NE^6>NNmW\(#}TI.?F^s+ 6'&6|>b(Ϯng蝅9*ӲkU3,oLXqPsY%: WT.JVNX+/9{P0Qw@HElk*/j {5TN}&TRÏ$8J ",Yt^¢+Q#K?toJX1m9a=\ e_%:Ve;׌9(?k g#J{u3jWޛZ _t',?%e/{DXqDo67iC7NʽUAy-"ѕ(KXaKTˮnݠsڈ"P1$q݉G=D=)r_Vۆ[{4w2av_vB˴G<+k!ע7Bq }/)Ǐ;j}g vуo D_.. iƧ;d^?a2RV #(9.rIX=9;kn0N/j+;m0'W, t՛C!_@T&JFWT'|:a* >*Fzˋ #DN~Q\U|ނ<ˆEγZFGxZTC0oEAIwJw-gP6&a8\ [̟ Qo"?ކ6_SZ G $,Pozf:a`uaK|ZhU=2UhEGv;r^p78 &na~a[NP402D5Ͽ"2%:p!x#7)&?i@&rGK?8y0C!"N|p[';t\m΃[mkѶٿe6L%fW/hMf YY¸*+{mK=P/nĖĝE\>a'GjDφ!۷3ӒY7|۱CzB㧫kC^m~3;ŭ>Vx4HPU n{ & 77>i)FԮYB^w9axMq̽>6N?~;X+A֤Q er<>aDnPa~} C>Z0x2A3G(x qB\tF<;fv' (y:^0ҩxGI>׎S!df{!EK yռ(ΗcEF% \УلQWhkK\xwkD[ $|DŽᓕ˟$N*a #!o+^ݔ/rG^U-WEf$tl|&-6zd:Vs< \={qD}ZDO+Ynw6烳f29)W#7]~F6$: x=Lp8']pKoӯ#t1IMsk̓u\zI_z7"QpfD݀"p$ 7 ' Ѯﻛ pZ^ݏU}H?p({#>,؟o@qо#ց$!-0f77|m.e O.]D](~KZ+NOX2bR8H,v>;ua3;K&xm$J)f 󪺭>> 5_GϜ/Dt}d0Bg> \ۄb6 r9L=]ms<ޕs/>PΫTmF jǂ;Cwu:ɭ2k.pWL\X/AufwU?\lsMf atlv'YV|8ljAAF]f~:t%8j}x?,b^0~l~n13Vq\нk Ǭޝ/m9Μ0,! Vhm-/*]7z쏩`2 Vok]_45qtǵSFtp .1{}]ʞ_CO6'to=s唲eQW_y WɃ^/[ 52 ʎg>]Bx>27}cVUZ'$ {an:=Ѻh<6H~I1҈-g?|"u,n o*mഅ ߲ 4[;a}LK!|%kN* cd--ja~O {¼{  ;/cIXV(r <5d#M 6e6gӒ|ߘniAh?dRǞjbչsV'`^\x:Bo>x=xPpB@Qv֟:~nDҚmZA;Y)GI•uFG/WAkAs=;lk(ӱiQ?&^z*i&. L3w״͐t`6BHf Sn/h&a2RH)jSb'c>ϕ<4bKV>'y:Enp1!B殸]|_%tg3ǍoYf"a#?sanR0t( 8#j~tg #jm8eJu|;Gƍmo,naڜ TO) bP!K}K!8ka'5bJ䷍ r0!1lCv 9)!]!c({AUڡ! @ #TWc=@=% ħGxBӄCt8"L0r!r5nV-VUj?(o>RQE!dn !oƙCNH^O*!*Bȗ7+!ls-xyGX;FLV% ~u,ˆw7LR^IN" ~&qɇY~kǞVUP5!q{v{!x GbX6p^ja5p'ݦ!Oa~iS5_l'cu€TS!%ل~ '~Rzz]Lp՗Ы`]GڹɋnT}!2=ytmCfE7 õ{=vA/v-a.:!tۃ!D~^SJm$ PrC2r9Bfԍ ӹS^B_9[m-LT'}~BHm'ߤ?/s:Ck3#’ =ޝ05q ;rAv,o-pJ&Gz);U-a!ҙ!h59'!pQO^yto){P^SXWݜ\l/Ƥro nʉ}+nQ{ݴC-?|d{Iյ |n}̤eJ_t}il?NN&ۋWxg]tۼcħ G_3]׶6ٞ?:`&vkt{-jG yThJ˿>K ܧbΞkM*ˣn!m"˪O_Ɲڡ.S: Mۄ\̰ sa[=u9{;xqb|sSr Kϛ_0ޅYVYUxx@&ۋv?ҹe/w|?/g<4?sE9acaj*M;oWXNefII~>5riMi_1#F -o9/1j'M{lMy[4 ީ lZoOj/y\97ۖsIdkj+t^^ap"i~HH̛4,xZz@yS7ijǻ˸u=˚s ;?sxUX.wJS< ""y$qdu,~Fz"'>zWk a˔ӡHBXzg(|s5¡9=m%]̚6]Ilk_v.тIon(^(sNBX:|[P*TG\⺮\|_$z= {$]\;GrM20>P$2 j\'2+aZ!r}{K!3@| |KܔqS'-LjҎMu4O~S;CUY;ZL+Dsxu]g!̘RjP4}QLP>³  ƙ[.Ґ5ew ]3w[n Y;GB~dqӔo1߳񱮯2QZ_r1¯$f=J*^-=7-qqu YAҾ}Xka2ldq a15BQaBm-Ap]+ k[˷\2%>c[(|'޷xLw#CWgBc tÐUv g×qbZ =qfNP2‷5(oi% lt#oaOm"n:9~ !FZiJlYnśs窪xM:eJ.`l}z>uf<.i6tzh)?8\*M6=ۂý/(G:'Umen=7¶:oo@z@T!@ |,HJfGj_b`sH|#rpninv7}'AԎ&Z)mHpZqv0A2z؃key1ӫAL(jG|z(g=OC}NޏK Nvlʞ~t;ޱP7}?XL4A\ry^;xjOPQoNSݭT&|̓AsGdM`\-Τ 8/l|}d` Iw#W"Z,G g>f)4 7JQ7 e4;DZqqcE-فOɲUǫA0Af4̾p_5\ޭ>j_A§Ep<<<Q΄xԓнY~g(;|f'|kԎro`bۇ Hvv nsLn3T/нF!A/,Za>b݋#q:#?[ &؃sz[ʋ%g?%p#Gf@[ z7}!ݿðO'^gͅOVJ!м3CegӀT} 8+;G_88t^^u_ߟͻx%G|}|7{iJ=|VALnv >Cg`Cɭ "Mr>=Bpk֖=ݢ?F{(Y}+ՠCHߐ]:8޶#8?Sxpa\~QП}`]G?:miއh^O/^7h<߭{Wt4<]҂N1GIwL7J=ma?o}Q"}-׮ a!`'`cp0A$av 501p}2zhgO,plx)xMbW&Q4 1zsᕛt0f68>īG_I~w; y󊏌_QOq0n@ɮ>U[nH<6;Ⰻ LQ_Q1/B6]S pѾ0~1{$\M= x6~0w'CÀ5͵)>b纫ayhO;'jr {:Tr;794K6^ȃo~0GS+[ ՛鉋v'̨^'SʛMq;ާނ<S.G w'1OC~${evO7wu7]&8nxP|LgˏLOܦsl CtYE^I=:N)NY9hO4ެ? 1 '?g<#6v= 7/K<0Ls!qp;, [] gG1~g~9џF8i R'z̗_aE-L]҅dwc{$X|7 ?q$3^BIfB o&1~L?>| D W᪟:&Mj3&iA˧Ø1`[!ċ۹n6SR>9~L߱?EkX)Gn,Ÿ _i'00%̟1fXz롺 5޻~N@V>gΐ b8Tg(ICXirMܨA;`|' ֵYVgb!GW>&6\N|YnT4O0?Bѳ~΂wz>>Ï'?~Ju?g'iP)Wp ֺэd/ ԫ6QD0?Kd5TWA%z wϓMr&6gt*Yuv"!diR.q4c>,\8 ꫥ!?b^b?58N&2hz7Ͷ`3#{Vz2x=~81LO'#Y>IԃhƇģ}@xT7Je2?|oG(uxQIQIhR}ɁGP{q̲Wu9qLX0D;î6a QlI~k-Hy91c ka!6Tf&#X'>euj>j[| ?{n|Gb=y$v[{ g o1~b~2#M}uD%Qtb8vp^1j\Uuxũ~WݪE߀``;9E糑,/AA{b|?@lޅ3cX>K:\)_s% 歟U}D21wOțX|Y+Sl#4<셯NSЏ _2>@{ź=-m~kKwG=%1~L8=Me%Uux2}Ig]VOc;oy̯Ĥ͞ rXk_!l64;l*Sl3gq`!=G9s~eex?Ź9O,BNĤͺ&x8}3SY~'-)VQGS!Jd1c}b~4i糘e~ +~9P??Ubyks?3#sh~NV?}:K:'Qo1â=y![<qLx~bl _wnvDzN14KaRZovۢ!GY永$aZAOwqSOCDI2WM5ʓ?y¸8= <]VY<Þ3{Ib k_cX<x4@e}* L&G)+UË ?UO. \{'?Hc '݊7geC.ϒƒLO‘gY^{/"^HטG2?!Q0i 7 C }02f18WSCF>͂;>KJ<ƥi,LcjHWa0mӺzOd$ŋ9^&O0~eq^vw^avbz6زCKҙ}d"t^x7^30= g/gs|TZz f(1Qӧ.a (6QLX&#[C{3>3bןu*2}'훣 #^գzB™1<3C~q ٮ !~Րzv_5jm7Hr'd0 &wm"T=OɎ=ny} 2^;xaU x h b9=VŲ&3x&8NJUJ>Oztfi4Ǡ2<1t[xfyB6eV/]!Ճ3ΐ 7LdhALM7 o1b|L092/տYih -)y$S-HvyrxEm߇ZϢsGx0=c8t wiNGCn}@9}BD!1>.buNb'iB!Mv_ YXXݒGGr0Ec|W"E=Z{eQI~fE~VZdDYv,by|~|ͷBk_G8^>a_?=L`~8懰upʇ PGR}}c9VOQ!Z)vj¨f!FT͌|&6|h=aq+%Ɠ8ٌ_s*N+)R<"F?~W I>3+웯ji;;c=akG|]}Wl9|ʀ4tMq`& w?f7LsVc[yueqA><$M>|l~ ~33Er+*c{]2~+duV!{6[ 5pų/$&Qf,3|UZȳ %V%Br4fϞ0{yƼ7 Wjw3D.IJ<6s?'b~.CuzyEﲺM?Oˏps"v\Ef(6?Vi ^Y3{cvJ37٬?|3?|ubW#|kflSǂlvyv,Meyj&Cܘ?ʶ=_so?,zXt:ODC~3Nv t-lLa}<ɇ?#6D0> gƸ1_ $6KbywNq D9QaJ1=x+L NZ9OϜ(R0_Ozn Yz1JOW )dy&Yia}&tfϘO'?~r3 % :W e)(@uX1a`y+X\Kx@}a4~; ȟdW,*b9Hgq']LXOk=$&G\|}4 mQBvqB>'>Y܇7=y G{{` ס`Bv^P }9=:5'S|n7dGY^uV):zƞxʻ1ļKf7/6*8GY;Ƒ0( "Lz8z? zx{~r3;zVZ]m7~{O9>pzz?Bq@,/'=<7dionL/i}{z1>w/t9I#=e { :\GCx0`=]'˺k ?PNZGV^׿>K,ޤrq8M>ܯ[~C\Gl;u~<οm}C@'#{] tch{"YY}4\c;)FwoIO[_^l4gѺ>7h'n-ۿ-gi5ҸoOȣ_\Oq')/ox@a0Fe<xޫ%|H:KOlaL ^ \Փ,藐'Q~\?r:'wO a}@G\o<-9i{bCpoboL1O+9~T^S Cu3W{('(%ƧNnCH`'*v ;i7;v>}JD\ﻺ kI?yrҺ6׸1++icq?<q /W!NOq{uPh=*Nr`+?S?G'U 5=j\w@ƙZد/'q'Sz&f:7S¾zzIb ]PGW#|!8$ɸ>l=:K%\gN뺑G6 lz;~O1{8k1Dpo3\ۢZ3Gq_"|>S \D=ֽ0縞㢃J3~ףbUOWnwn#|=?&jؼ2vEpڇϣqxx4gKmȟkվt !c:i_L?ȏc^^ڋ"?ԣ}3deu/?#o ^~.@S֬j}+GN^G=%DWx?kaαǝeGQ^ :6]zi^>JܑG3}02N8|QѾywy}X گQ}W!z,rl Y0yB$e<Ɵ b{xx3s w|h?Q/1xǗ\q<紻{a \T=Gr,Q}>8+z.GQ7$q==ev~'/y=grް 1p."=Bu'Q`^_O|>BqPs^?Grq<`Զq}+w?GcPyt!:>G; fq:?ː@8ȣ/gw^{ {xV1}jqV%`݁W#TguG ڧ':5<%?͞o}1楨_sa:%#vpHlYrGqY7ɉ.λ2-33)AFy=[Mkri_YύɅţ,x;O',ú%~*S\~"?1hd'_vm9L=g(Xú/{~D0x_H'RQ"/i[L^uI(_8/2b:ţ ɏ8#/oxutKGߦ|Ǻ9#yڇ,2ml|}φP< )c1}k]qUQ߄x ף=Ӿx=C}ds&z?qK_oȌzƼ3zƣXQ $LO2QQ;LG' 4+җţϠ|XǛQ6>=>D}4H?q-p2fWXw L>SbvO'ȣX'}}V|s:ʒyGC|#s6{a^e?/Qbo;ȣO׫&nmZy"(v?a47Ho&")VZƺJr J>3>3>7gD >H >슖ӷc =&xx;bb q!~kz|S3sSNzfj\pdk`jdQ ?2ol6. \Wt XFzZ '~mdb|gx׵iWG}>Yb͘Vm2Bddj-MC#-3[̛0 }-sAq+@a>Q4g, :VESٟiE'fZfNBi}s}b-&PffjHk Ͷl2lܦ-hBkm/uK#+*.?6)-Cy0c>>14qzZ[7ZXjYhR)5Si/PwUa;hxڒ^GU~d_]0U575(\kfCa;fMU-d>>e #6W =Ư[K^&I ދcG >> y.?0۲&bj㭟Z > YZi#Ӎ۳ޥOGGF}s}b0K3372S%h(fbe׿JqOcba:U9Y%vR`<_?N+OjNWNܯگgX9Ghw%S*c)!+XYtZ?_s'W4?wXӿvxN_uxNϷ%I8dhOUï~u+G?i?FKLMUMLf{Mb*U%f]}U&Ώi<3Ɵԍف*-ORyԆ ocADp?ݖO:FV^YAaXee9yqc^ڗO}~o($)? s%k \`CZ pJt 7* /ߥT'°%C)2 2ɰ/ 2p( G2e(P1 *-o9MNƣu5(^6nuz(P:K8}" h[kR_(ʙ+/ EǗ{BnvC{֚ >tSu~<JUGɎRYc5Hβ=NAXأ@(*]J_:$= ,̛<3V(f杉P:w儅sVCy- ӔnɄ wr8 vkͼҡ$Rb.Bfw} e=j,Br֥צݗ#zYi-|}i7|'5GSaEH֍K"tGI:*wprŻCyؖP驖ieuPzX(P7v('שٗ \Pyg)F݌;oūKZk;;Jx]"%mfz/im,nTj"YS=q-;=PւkSd24KwN~*iˋAEш)kWHA: Ptʩc^̳q/~!N̈́c~r]ܘҡ͠BoxPR-N#9¯ZZX _)=?L;twšy7I7= 5ډgVW||@Sw,IMuVY*tޭOm,0jk,TI%9?WY;V7;ʸŅJ~U+eMqʣ:~#n@T ozgBe'&,3IIZS4ʶ(ocʭTȌg6u9jΥǟr}ځ#&fa*P[5^ >m T\}F`2 F(r> jmW²[g:_ȄM/2@m *˄xhZ|m8!-{0ESu.ⓟ:AK^u E_qe*.? +nLFX{8'(h2v;Fa4aWt1v+3/CZ0uw( -(J|bd(ڱY$g+W? ,m0IBcWNXT~蜊&-y:6HٙH}嘮x?w{pnpΖP4j_@ٍ{߇yLo.?I3|/P4qbն̂=BWVm6sN޽j<OXݹًPQ}[P3͂ÄW@쨗Cՠ1Q߂/;˝~g95hrɩyFsu7Ss95_w=OV\{3r~2&}{qNY~,;Iڽq0A'Nj˔qVՙ\'–q;F혬׳}4Pj/bfC>~NVwrz TUܻ q74@Xqً-ȷWa{J'\vҌ4uu B[AbۅkbDjH'<0OsVD(:|y u,73O!"OO͑oxi&9>W|߱|^]?P{9kAIWNNQr9cNjw)_;Gߩ˻6#T{P{i_C; +w ԌJ>m9u?R=qMBɵt{u; ͦ K&XT?;͵a{eHkuǵHaaO=Ont;:Qlc\FaTvRPsCZt1YwTٞW#_]*`=}Npn[I 7;Z3|{/jrqtz#aPo!gGJK+"rR: J YJn>qۓs xPxU[xŌΉ8\xxCr#|pŃBd_-ʵm4|Go>Am17^*g|]o.+_,U:|Z"ǽlsC/:u,+p|'qzÐ% olfݻ*17ݎ΂Z.n2|^n׍&cߑ5mfIIڄ 9q{/#I b;Bk$gqUs'r@] 2#n"R9p⯏lsh}VgjI/o,ISC!SZV;bAw3-[=* xߘagC9Oo{1$z؇gK?Stcv1 1OL\^0Cv֏_'?) 'Woof~ yn?ܧ>6WK+@FJE3cgfj[\25Y҃6Hڢ"-JQ 7׈GF VܵiM#z P|#$}6aoT)$b1K=Q8r-ᢝe:*G%}?YXyÍuS8rm|0x3pwY,ge:R0(R;vNUk?+!DykL㦧d=?Aeaҝ9]kt^fMs೷?oIq U jOv|dH{ aNۯk@ fC^DB i_!qvBِ8{g]P 5Tw=Qyˍj@ 3||m͝0s邮>~1|]}j'Zu- }$s3Z-ON"CUO՝^[u*}Q&\kǐ+ސjVMmc\j+ #^! ])iq;a͘!q~ķpZiÝ,"1_#[nպg='[9KS`೐~6`A¿j< k ~O-I /V2 +DpY%]@DA{&m9K'&~ : tNF kF퍀 K2v }T&I,,<1{*73yD𽧒뢅"ϡhoA%6;JXlAd7MXڙ 1#K[29>J8tRIAiזּnRoAgZAS@Q٠PF%'>&aiK-YcFX.NOa;',.zkxK=?]x/2[!g+&Mp $rL0 w>g?=&uPJX~ü|My#ZGŠ D&.",_ u2a%QHXko2!z0ŠOJϤ[Byj }yBOӱ7nj~+/GX3>á>?/[ZrT%*2Dq2KmU[~7ĿPDN [εjfMuP^%j۾1}UӫLr}7;Zln ڇ\{-ae#RtZ'nMXP5,=da^}A御זoʬ7']S"7CЇ㗫gͻ?߈7l_ԟfv\ȳF{T6rk8}ڈ6◳=-G@MOkZCNĥ~rV=ȓ)uxnnqXUe67'=^# / ,$]o WS7͉a};>,|o&m =+5캿mx_).R' zhoCtٛ1.G :vO'Qwr'?ֶ|€4oDnMuͨ=6f9'9z=^Da&ń'E&n:i;a ~Zn*1N$nTL"L^3]"f!aL愔X;lЯv.c yWp5x$;jO >dUuC g*>$tWrYDm^}r ~*o9;}-jc{Xw t!~)_~Vjjو}5buބ9nG~8zC3Agz{EKmzw&CfCpG{85 Nm sg;z=5M% =->HHzj>^+]gWwL{WISbot p=pO*-EϺԥ8倸B;MzOt~/׳ݶAަfN',j'c^*8@X&mZ7¶a~ֶub6ay%0hɥӂG.4)LˈCf/ Cٳi_w8?JUoDوˇm$RC\'}.au˷gΩEXVQaE|fx[e|;e~\=QSiDXKDȵ_>H^jw*g"?5l$lQ¯[N:ȉrG/D __[{Ⱦ j0?TOR*ݾѵ/4 . "&u+ʈOev)VpU׈~W`8PʒNUO:gl{u#tM|VB63 t{eUk~/~9 L2>R)a˰eRJW(NY|S~ƊiDvO&mwћ3zmhӇbϫgjKƻs3JO 1Ү&Q=N068;$Ya򹕧B*zhv}aD:kcMrm*d3no0>͵ sv- K3c*f8xk}"xwy5BB!^Mt蘳&[UL~8^$G!{?űww־3$Ls| Mʲȥ׺ۂ6] |*% nqQXX [46Pc1=v*{51$"`i KQ *v+]wV 4W3Lhb/9^Gh 2voR/&ke~Tv1㍰A"1wu c6r&{\0Ơ?f E 琉>?>z]~w|_i"7w/yƾ}S?)`s+f[>tIBoѵ_֯zŸY=mU<Ε OO5qS ',`chw@[j\hzM.Ls../m3q]xᴵB3G'WVr`<3cUIzpÂ1zO1LFxkkzWxdqsYGOMqz:'ՎsnXq8ִUa"p2uzV2(Ao8yxc/vYsׅm7m qQOc0a; ʊ0?MvMuz[lkr. {}Tt1֜(jcS)d]AEc~Ō˽1s֤LּczTAFVD&WmV?6P$ix:ѭ"|QȚM}^bTZT1fUG2F ;"1&jK> cN7g݁/""?VDO1 \f%"&n8c-?צyCYUR5]3>IwcbK1&ؓwDvvz=c^̘۵j17jݏ~&F;XFW52i;H C7a<;ϛW3rK޽EM]np`\`tU4 Su0 CN\pU}߫8>"ٴm٢w-etR~;Φwݓ攁Q1pÝgeyqg_Neܫ%TO-N/*mZ\O,Ŕϫ>yt}&<.e>㖺 ]-^2WWŜF :x`!}ezctgJ~VHDտw9k"jV˴m"хc+{-x;|8CGİ.CĘlwTk:}ɘ8ag./y_zcj)kX͛] ,.*=f:\TfΌ,50~7G-]Ng X>+F'lOe Z(>at)Y[O 4!lj6sɍ$qW̎7ͩđ˙V2q{*1&FւQmi?}c ;]c ˿c-WUyBݴlJ7g7JͅbNCb FoZZhϨlqՏ :]aT7_=jSDqM q zV-55BfB3 &3Ku$+ncpʵzUզ[Ԋ1 Z2{ _T?Kqm|ZAFv'y-u}&AnW}?=-n"X{8chګ3o9hhccKqਢ8FͻB3wZ<AV[ga N$B#|*L.^VšS+vB=ݎ"Ga8~ cboC?a~s8cL<ܕ1]]1Z9]_gjĈE!įf"\"ճz=ԥF;mO۵IzU1cT =_2B挪[.ek\U u m~Yn~m:rnsf-xȽwބ 1/kQ;l3r|BFߞZw0l]^#e;wuİ[FXܙQyeQzkCwGof!4łsBJq^&{5Wv10n.e \٢[֎Se cq׺˒Og tFw5̨ڛYFu}WnZ̪XϿP|N4ɶ>BW{EZ@uveT >Gwc3gTYtvovT7ֶ*b =u,w#K+~u>&xhJg׬;¨jxn+? f|1c`̜kS` `jv)wk!B}t_Bf=/۶/YN[%4-:Z1i+w5aTi}[Uowmͨ>5D5CCfճw nxd FM#ڗz{ZFh>S/'9nuߕJ( 6 ܾiv{xo_wżw4Ry-6_7|1.xMB*ޫݫ-&ģ<1~rY3ӍO:ohV*{"Όs>rԜ3}hТY[zq9gq^XAQ_ɷm?lTq}"͜}PsYsKgLvisǑnׯ=gǹt@qߣ׻Ggj>l8O (Kb5oht!e^9 gU;'NW|0>2Z~cq[>u}/9*ݮw8|pr׃2nS]mA19"2k_=w`ƿ#NtꚃFCuJ\[T"-.wqU7u~ZUZf~fxaQZ^ff{Qb;ӕ4V6HS72rhGGn=zI|ע(z~b=mMExG1Q6C &c6z`w֧iՊ;*/n,?$W]}coM>HCG~YxXG(vó[E Viеԥ9;̶x-%WlWxĉvsEƉS)iG71[S9%i-xx]W}sxRc PnXǹv(^\>.H_:cmS=B0bS~$0iIxBpCŕ\/|8qS՝S]<wM{->)O>>'[=Ug9(\[=WZ~=ӣDCW8x|tO8 vG7$'E0B]QxCNCA^;|W^3a="9ޠ_%wy#r׎z0hF\zH??ꡍ^%,/ṭ!zv&>_W)Sk¿=Mk8ߢy ҃?wȉ$9GIO?쎸= ) '>yF뫔$ᵥ/rh7RSv!;|ooј{TD ?eH@"}A("hцό"K}贈(1-J1|PDѸE|,ؒD'T+^/aT]"#E ?d6DLSĐqKDxD^O87P6]lV{5ڣ;%%ɮh~4[^5K7,qFBM]#^''=7%ӐQߨN*m?~ɮb5گDG%|_Cr|hzG>f0_Qj"/&w$T"$Z!E r4*&`g9?KN_M=F__^D(e?x;RTD "jV/q;C.)G| )q'7;qw:츈5Wf5W3yoa"RṂķ "OAoц0@T?P!ATπTQ`OL oBg%GInc%C~{ȥw% [E6Mٔ^C#̢M̦qgSA\[.\,3\RҸf>rΞo<:;7\F?DV/DG" ωd$C|.{S99,w}C\{Gu{ +#?Ss]7ÄE!'C]r]iq=;3ȗ~:{&$^ˤzg )$_ t^ɟEMk }'x/a&ə"pG{=;}/ާyڡ$T{գi x/x3MMp|PKqAA%;Q=OqGCupEyTOfeSܧQ^ˤ5"EJA$g&]}?IiįiO!\ZT+Ox+Qodw/" |E^YT'3SoTȺsL"qz 28 )gṢ+琏o9N;=wnσhܙ;#J2P`"oްxhFBTX!b~P? 9hIw<#C]r8zDzG8?t #3(^IT%Qwx!Q;#kR Q7O;&Nx҆۹_B%r(>ZZё)kGudGnvy[4<1xK6O4O#(8;ˈKIdϧxu=<+G:Cu\O{[4^I/w)daˠg,Z?Ȣ ?d>~%yɯiGR<"{Rށ}귔G>~^KO:ė9ϊZ\?8;*(<*4?j)^t'2jp9n=[Y4/dDt]@s!^OZʏ_؍?H_#=KN /~y:L:4ʣ7niOTny(+}T!os%^*Kr>!XO|O8<:+2y^?:87^72/#c~y{xd:_~) ^i诀ƣz6ċ's3|xF ?`} =ˈx3GA7Yo# !87߀h~+_緲oTCnz;eQ}vY){>/3C FInw/0W'Ov O\/o ;!&>C~Rzy,C^E.uZD=x~o(Gk2OBo$O!Ϳ`O^w54Oz ~"cG*|q:QOP_S}OR]/; w?zבW\GoI>C h]/ygdG~N`иrRryxģ|~z?և/_?OQ_I6i?|naOyѺUC|~B~/)G8SR,+CNǐ ;>@x}s@\Cȋ?}9Sއǰ|b?~]x5N3g<'f<'u"!>Qb<̣8E:H|!?.ȍ׈K?yBǼ9[4>^E. uTp^Λ168֣K$yJ7x^$Eކ8/KPOR`y%ϡ zλRF+xT'ȋxyC3"Q.DŽ>ür?Qb2K\z^Wxrbe2_X y,(xYSG|4溝/C?8yuu<<%͟OD o_dP!yy=BQOUR='o.Kߓ | uz%7=2BWi"J;dܼR KRy^%Xה|Τy ;뿈#œxIO>y^;ݘ1>xT>OƺVtky~vρsXzR:yЧoN^#x8s>4r92ar_uXȅ<序WH/zRya8Owy=L+2 &|r<>IF|*w+/.qY8{IJ^?Jq|ϑ+)MoR29Q7)~ϓߒIϷu#ٿY:xa^a=K^U-3\8s7ǿ@uyz)G̣%(ƋGr;ZQ7I~dϑm}r\HI^F\qGzh^'8sR}y^Qw9.OzQn߃Hr]y_xN}]QsI<ϓx㍏7>x Keߙ`?q"{8qY<9tUߢemKW.OF8,с{PPAGL*շSxp]RrXڼtߚMΒ^^t|=oUln|oT-yf9W{ $=W?Uf g(={y3WJnkxu'Hxr[vh/ꤼUTS]OY)R>MnUS~ߨ0s/:}xË.^X~xaEWh^f1o7pP.%eݻWD5}ܫܫ 2k޾[;:̳3y\k<"1robustbase/inst/doc/fastMcd-kmini.R0000644000176200001440000000134014124272466016764 0ustar liggesusers### R code from vignette source 'fastMcd-kmini.Rnw' ################################################### ### code chunk number 1: init ################################################### # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0))), width = 75) ################################################### ### code chunk number 2: h.alpha.ex ################################################### require(robustbase) n <- c(5, 10, 20, 30, 50, 100, 200, 500) hmat <- function(alpha, p) cbind(n, h.alpha = h.alpha.n (alpha, n,p), h. = floor(alpha * (n + p + 1)), alpha.n = round(alpha * n)) hmat(alpha = 1/2, p = 3) hmat(alpha = 3/4, p = 4) robustbase/inst/doc/lmrob_simulation.Rnw0000644000176200001440000016447413774561564020256 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{Simulations for Robust Regression Inference in Small Samples} %\VignettePackage{robustbase} %\VignetteDepends{xtable,ggplot2,GGally,RColorBrewer,grid,reshape2} \usepackage{amsmath} \usepackage{natbib} \usepackage[utf8]{inputenc} \newcommand{\makeright}[2]{\ifx#1\left\right#2\else#1#2\fi} \newcommand{\Norm}[2][\left]{\mathcal N #1( #2 \makeright{#1}{)}} \newcommand{\norm}[1] {\| #1 \|} \newcommand{\bld}[1]{\boldsymbol{#1}} % shortcut for bold symbol \newcommand{\T}[1] {\texttt{#1}} \DeclareMathOperator{\wgt}{w} \DeclareMathOperator{\var}{var} \DeclareMathOperator{\diag}{diag} \DeclareMathOperator{\median}{median} \DeclareMathOperator{\mad}{mad} \DeclareMathOperator{\Erw}{\mathbf{E}} \SweaveOpts{prefix.string=plot, eps = FALSE, pdf = TRUE, strip.white=true} \SweaveOpts{width=6, height=4} \usepackage[noae]{Sweave} \begin{document} \setkeys{Gin}{width=\textwidth} \setlength{\abovecaptionskip}{-5pt} <>= ## set options options(width=60, warn=1) # see warnings where they happen (should eliminate) ## number of workers to start if(FALSE) {## good for pkg developers options(cores= max(1, parallel::detectCores() - 2)) } else { ## CRAN allows maximum of 2: options(cores= min(2, parallel::detectCores())) } ## Number of Repetitions: N <- 1000 ## get path (= ../inst/doc/ in source pkg) robustDoc <- system.file('doc', package='robustbase') robustDta <- robustDoc ## initialize (packages, data, ...): source(file.path(robustDoc, 'simulation.init.R')) # 'xtable' ## set the amount of trimming used in calculation of average results trim <- 0.1 <>= ## load required packages for graphics stopifnot(require(ggplot2), require(GGally),# for ggpairs() which replaces ggplot2::plotmatrix() require(grid), require(reshape2)) source(file.path(robustDoc, 'graphics.functions.R')) `%||%` <- function (x, orElse) if (!is.null(x)) x else orElse ## set ggplot theme theme <- theme_bw(base_size = 10) theme$legend.key.size <- unit(1, "lines")# was 0.9 in pre-v.3 ggplot2 theme$plot.margin <- unit(c(1/2, 1/8, 1/8, 1/8), "lines")# was (1/2, 0,0,0) theme_set(theme) ## old and new ggplot2: stopifnot(is.list(theme_G <- theme$panel.grid.major %||% theme$panel.grid)) ## set default sizes for lines and points update_geom_defaults("point", list(size = 4/3)) update_geom_defaults("line", list(size = 1/4)) update_geom_defaults("hline", list(size = 1/4)) update_geom_defaults("smooth", list(size = 1/4)) ## alpha value for plots with many points alpha.error <- 0.3 alpha.n <- 0.4 ## set truncation limits used by f.truncate() & g.truncate.*: trunc <- c(0.02, 0.14) trunc.plot <- c(0.0185, 0.155) f.truncate <- function(x, up = trunc.plot[2], low = trunc.plot[1]) { x[x > up] <- up x[x < low] <- low x } g.truncate.lines <- geom_hline(yintercept = trunc, color = theme$panel.border$colour) g.truncate.line <- geom_hline(yintercept = trunc[2], color = theme$panel.border$colour) g.truncate.areas <- annotate("rect", xmin=rep(-Inf,2), xmax=rep(Inf,2), ymin=c(0,Inf), ymax=trunc, fill = theme_G$colour) g.truncate.area <- annotate("rect", xmin=-Inf, xmax=Inf, ymin=trunc[2], ymax=Inf, fill = theme_G$colour) legend.mod <- list(`SMD.Wtau` = quote('SMD.W'~tau), `SMDM.Wtau` = quote('SMDM.W'~tau), `MM.Avar1` = quote('MM.'~Avar[1]), `MMqT` = quote('MM'~~q[T]), `MMqT.Wssc` = quote('MM'~~q[T]*'.Wssc'), `MMqE` = quote('MM'~~q[E]), `MMqE.Wssc` = quote('MM'~~q[E]*'.Wssc'), `sigma_S` = quote(hat(sigma)[S]), `sigma_D` = quote(hat(sigma)[D]), `sigma_S*qE` = quote(q[E]*hat(sigma)[S]), `sigma_S*qT` = quote(q[T]*hat(sigma)[S]), `sigma_robust` = quote(hat(sigma)[robust]), `sigma_OLS` = quote(hat(sigma)[OLS]), `t1` = quote(t[1]), `t3` = quote(t[3]), `t5` = quote(t[5]), `cskt(Inf,2)` = quote(cskt(infinity,2)) ) @% end{graphics-setup} \title{Simulations for Sharpening Wald-type Inference in Robust Regression for Small Samples} \author{Manuel Koller} \maketitle \tableofcontents \section{Introduction} In this vignette, we recreate the simulation study of \citet{KS2011}. This vignette is supposed to complement the results presented in the above cited reference and render its results reproducible. Another goal is to provide simulation functions, that, with small changes, could also be used for other simulation studies. Additionally, in Section~\ref{sec:maximum-asymptotic-bias}, we calculate the maximum asymptotic bias curves of the $\psi$-functions used in the simulation. \section{Setting} The simulation setting used here is similar to the one in \citet{maronna2009correcting}. We simulate $N = \Sexpr{N}$ repetitions. To repeat the simulation, we recommend using a small value of $N$ here, since for large $n$ and $p$, computing all the replicates will take days. \subsection{Methods} We compare the methods \begin{itemize} \item MM, SMD, SMDM as described in \citet{KS2011}. These methods are available in the package \T{robustbase} (\T{lmrob}). \item MM as implemented in the package \T{robust} (\T{lmRob}). This method will be denoted as \emph{MMrobust} later on. \item MM using S-scale correction by $q_{\rm T}$ and $q_{\rm E}$ as proposed by \citet{maronna2009correcting}. $q_{\rm T}$ and $q_{\rm E}$ are defined as follows. \begin{equation*} q_{\rm E} = \frac{1}{1 - (1.29 - 6.02/n)p/n}, \end{equation*} \begin{equation*} \hat q_{\rm T} = 1 + \frac{p}{2n}\frac{\hat a}{\hat b\hat c}, \end{equation*} where \begin{equation*} \hat a = \frac{1}{n}\sum_{i=1}^n \psi\left(\frac{r_i}{\hat\sigma_{\rm S}}\right)^2, \hat b = \frac{1}{n} \sum_{i=1}^n\psi'\left(\frac{r_i}{\hat\sigma_{\rm S}}\right),%' \hat c = \frac{1}{n}\sum_{i=1}^n \psi\left(\frac{r_i}{\hat\sigma_{\rm S}}\right) \frac{r_i}{\hat\sigma_{\rm S}}, \end{equation*} with $\psi = \rho'$,%' $n$ the number of observations, $p$ the number of predictor variables, $\hat\sigma_{\rm S}$ is the S-scale estimate and $r_i$ is the residual of the $i$-th observation. When using $q_{\rm E}$ it is necessary to adjust the tuning constants of $\chi$ to account for the dependence of $\kappa$ on $p$. For $q_{\rm T}$ no change is required. This method is implemented as \T{lmrob.mar()} in the source file \T{estimating.functions.R}. \end{itemize} \subsection{$\psi$-functions} We compare \emph{bisquare}, \emph{optimal}, \emph{lqq} and \emph{Hampel} $\psi$-functions. They are illustrated in Fig.~\ref{fig:psi.functions}. The tuning constants used in the simulation are compiled in Table~\ref{tab:psi-functions}. Note that the \emph{Hampel} $\psi$-function is tuned to have a downward slope of $-1/3$ instead of the originally proposed $-1/2$. This was set to allow for a comparison to an even slower descending $\psi$-function. %% generate table of tuning constants used for \psi functions \begin{table}[ht] \begin{center} <>= ## get list of psi functions lst <- lapply(estlist$procedures, function(x) { if (is.null(x$args)) return(list(NULL, NULL, NULL)) if (!is.null(x$args$weight)) return(list(x$args$weight[2], round(f.psi2c.chi(x$args$weight[1]),3), round(f.eff2c.psi(x$args$efficiency, x$args$weight[2]),3))) return(list(x$args$psi, round(if (is.null(x$args$tuning.chi)) lmrob.control(psi=x$args$psi)$tuning.chi else x$args$tuning.chi,3), round(if (is.null(x$args$tuning.psi)) lmrob.control(psi=x$args$psi)$tuning.psi else x$args$tuning.psi,3))) }) lst <- unique(lst) ## because of rounding, down from 21 to 5 ! lst <- lst[sapply(lst, function(x) !is.null(x[[1]]))] # 5 --> 4 ## convert to table tbl <- do.call(rbind, lst) tbl[,2:3] <- apply(tbl[,2:3], 1:2, function(x) { gsub('\\$NA\\$', '\\\\texttt{NA}', paste('$', unlist(x), collapse=', ', '$', sep='')) }) tbl[,1] <- paste('\\texttt{', tbl[,1], '}', sep='') colnames(tbl) <- paste('\\texttt{', c('psi', 'tuning.chi', 'tuning.psi'), '}', sep='') require("xtable") # need also print() method: print(xtable(tbl), sanitize.text.function=identity, include.rownames = FALSE, floating=FALSE) @ %def \vspace{15pt} \caption{Tuning constants of $\psi$-functions used in the simulation.} \label{tab:psi-functions} \end{center} \end{table} \begin{figure} \begin{center} <>= d.x_psi <- function(x, psi) { cc <- lmrob.control(psi = psi)$tuning.psi data.frame(x=x, value=Mpsi(x, cc, psi), psi = psi) } x <- seq(0, 10, length.out = 1000) tmp <- rbind(d.x_psi(x, 'optimal'), d.x_psi(x, 'bisquare'), d.x_psi(x, 'lqq'), d.x_psi(x, 'hampel')) print( ggplot(tmp, aes(x, value, color = psi)) + geom_line(lwd=1.25) + ylab(quote(psi(x))) + scale_color_discrete(name = quote(psi ~ '-function'))) @ \end{center} \caption{$\psi$-functions used in the simulation.} \label{fig:psi.functions} \end{figure} \subsection{Designs} Two types of designs are used in the simulation: fixed and random designs. One design with $n=20$ observations, $p=1+3$ predictors and strong leverage points. This design also includes an intercept column. It is shown in Fig.~\ref{fig:design-predict}. The other designs are random, i.e., regenerated for every repetition, and the models are fitted without an intercept. We use the same distribution to generate the designs as for the errors. The number of observations simulated are $n = 25, 50, 100, 400$ and the ratio to the number of parameters are $p/n = 1/20, 1/10, 1/5, 1/3, 1/2$. We round $p$ to the nearest smaller integer if necessary. The random datasets are generated using the following code. <>= f.gen <- function(n, p, rep, err) { ## get function name and parameters lerrfun <- f.errname(err$err) lerrpar <- err$args ## generate random predictors ret <- replicate(rep, matrix(do.call(lerrfun, c(n = n*p, lerrpar)), n, p), simplify=FALSE) attr(ret[[1]], 'gen') <- f.gen ret } ratios <- c(1/20, 1/10, 1/5, 1/3, 1/2)## p/n lsit <- expand.grid(n = c(25, 50, 100, 400), p = ratios) lsit <- within(lsit, p <- as.integer(n*p)) .errs.normal.1 <- list(err = 'normal', args = list(mean = 0, sd = 1)) for (i in 1:NROW(lsit)) assign(paste('rand',lsit[i,1],lsit[i,2],sep='_'), f.gen(lsit[i,1], lsit[i,2], rep = 1, err = .errs.normal.1)[[1]]) @ An example design is shown in Fig.~\ref{fig:example.design}. \begin{figure} \begin{center} <>= require(GGally) colnames(rand_25_5) <- paste0("X", 1:5) # workaround new (2014-12) change in GGally ## and the 2016-11-* change needs data frames: df.r_25_5 <- as.data.frame(rand_25_5) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print(ggpairs(df.r_25_5, axisLabels="show", title = "rand_25_5: n=25, p=5")) ) @ \end{center} \caption{Example random design.} \label{fig:example.design} \end{figure} \subsection{Error Distributions} We simulate the following error distributions \begin{itemize} \item standard normal distribution, \item $t_5$, $t_3$, $t_1$, \item centered skewed t with $df = \infty, 5$ and $\gamma = 2$ (denoted by \emph{cskt$(\infty,2)$} and \emph{cskt}$(5,2)$, respectively); as introduced by \citet{fernandez1998bayesian} using the \T{R} package \T{skewt}, \item contaminated normal, $\Norm{0,1}$ contaminated with $10\%$ $\Norm{0, 10}$ (symmetric, \emph{cnorm}$(0.1,0,3.16)$) or $\Norm{4, 1}$ (asymmetric, \emph{cnorm}$(0.1,4,1)$). \end{itemize} \subsection{Covariance Matrix Estimators} For the standard MM estimator, we compare ${\rm Avar}_1$ of \citet{croux03} and the empirical weighted covariance matrix estimate corrected by Huber's small sample correction as described in \citet{HubPR09} (denoted by \emph{Wssc}). The latter is also used for the variation of the MM estimate proposed by \citet{maronna2009correcting}. For the SMD and SMDM variants we use the covariance matrix estimate as described in \citet{KS2011} (\emph{W$\tau$}). The covariance matrix estimate consists of three parts: \begin{equation*} {\rm cov}(\hat\beta) = \sigma^2\gamma\bld V_{\bld X}^{-1}. \end{equation*} The SMD and SMDM methods of \T{lmrob} use the following defaults. \begin{equation} \label{eq:gammatau} \hat\gamma = \frac{\frac{1}{n}\sum_{i=1}^n\tau_i^2 \psi\left(\frac{r_i}{\tau_i\hat\sigma}\right)^2} {\frac{1}{n}\sum_{i=1}^n\psi'\left(\frac{r_i}{\tau_i\hat\sigma}\right)} \end{equation} where $\tau_i$ is the rescaling factor used for the D-scale estimate (see \citet{KS2011}). \noindent\textbf{Remark: } Equation \eqref{eq:gammatau} is a corrected version of $\gamma$. It was changed in \texttt{robustbase} version \texttt{0.91} (April 2014) to ensure that the equation reduces to $1$ in the classical case ($\psi(x) = x$). If the former (incorrect) version is needed for compatibility reasons, it can be obtained by adding the argument \texttt{cov.corrfact = "tauold"}. \begin{equation*} \bld{\widehat V}_{\bld X} = \frac{1}{\frac{1}{n}\sum_{i=1}^n\wgt_{ii}}\bld X^T\bld W\bld X \end{equation*} where $\bld W = \diag\left(\wgt\left(\frac{r_1}{\hat\sigma}\right), \dots, \wgt\left(\frac{r_n}{\hat\sigma}\right)\right)$. The function $\wgt(r) = \psi(r)/r$ produces the robustness weights. \section{Simulation} The main loop of the simulation is fairly simple. (This code is only run if there are no aggregate results available.) %% set eval to TRUE for chunks simulation-run and simulation-aggr %% if you really want to run the simulations again. %% (better fail with an error than run for weeks) <>= aggrResultsFile <- file.path(robustDta, "aggr_results.Rdata") <>= if (!file.exists(aggrResultsFile)) { ## load packages required only for simulation stopifnot(require(robust), require(skewt), require(foreach)) if (!is.null(getOption("cores"))) { if (getOption("cores") == 1) registerDoSEQ() ## no not use parallel processing else { stopifnot(require(doParallel)) if (.Platform$OS.type == "windows") { cl <- makeCluster(getOption("cores")) clusterExport(cl, c("N", "robustDoc")) clusterEvalQ(cl, slave <- TRUE) clusterEvalQ(cl, source(file.path(robustDoc, 'simulation.init.R'))) registerDoParallel(cl) } else registerDoParallel() } } else registerDoSEQ() ## no not use parallel processing for (design in c("dd", ls(pattern = 'rand_\\d+_\\d+'))) { print(design) ## set design estlist$design <- get(design) estlist$use.intercept <- !grepl('^rand', design) ## add design.predict: pc estlist$design.predict <- if (is.null(attr(estlist$design, 'gen'))) f.prediction.points(estlist$design) else f.prediction.points(estlist$design, max.pc = 2) filename <- file.path(robustDta, sprintf('r.test.final.%s.Rdata',design)) if (!file.exists(filename)) { ## run print(system.time(r.test <- f.sim(estlist, silent = TRUE))) ## save save(r.test, file=filename) ## delete output rm(r.test) ## run garbage collection gc() } } } @ The variable \T{estlist} is a list containing all the necessary settings required to run the simulation as outlined above. Most of its elements are self-explanatory. <>= str(estlist, 1) @ \T{errs} is a list containing all the error distributions to be simulated. The entry for the standard normal looks as follows. <>= estlist$errs[[1]] @ \T{err} is translated internally to the corresponding random generation or quantile function, e.g., in this case \T{rnorm} or \T{qnorm}. \T{args} is a list containing all the required arguments to call the function. The errors are then generated internally with the following call. <>= set.seed(estlist$seed) errs <- c(sapply(1:nrep, function(x) do.call(fun, c(n = nobs, args)))) @ All required random numbers are generated at once instead of during the simulation. Like this, it is certain, that all the compared methods run on exactly the same data. The entry \T{procedures} follows a similar convention. \T{design.predict} contains the design used for the prediction of observations and calculation of confidence or prediction intervals. The objects returned by the procedures are processed by the functions contained in the \T{estlist\$output} list. <<>>= str(estlist$output[1:3], 2) @ The results are stored in a 4-dimensional array. The dimensions are: repetition number, type of value, procedure id, error id. Using \T{apply} it is very easy and fast to generate summary statistics. The raw results are stored on the hard disk, because typically it takes much longer to execute all the procedures than to calculate the summary statistics. The variables saved take up a lot of space quite quickly, so only the necessary data is stored. These are $\sigma$, $\bld\beta$ as well as the corresponding standard errors. To speed up the simulation routine \T{f.sim}, the simulations are carried out in parallel, as long as this is possible. This is accomplished with the help of the \T{R}-package \T{foreach}. This is most easily done on a machine with multiple processors or cores. The \T{multicore} package provides the methods to do so easily. The worker processes are just forked from the main \T{R} process. After all the methods have been simulated, the simulation output is processed. The code is quite lengthy and thus not displayed here (check the Sweave source file \T{lmrob\_simulation.Rnw}). The residuals, robustness weights, leverages and $\tau$ values have to be recalculated. Using vectorized operations and some specialized \T{C} code, this is quite cheap. The summary statistics generated are discussed in the next section. <>= if (!file.exists(aggrResultsFile)) { files <- list.files(robustDta, pattern = 'r.test.final\\.') res <- foreach(file = files) %dopar% { ## get design, load r.test, initialize other stuff design <- substr(basename(file), 14, nchar(basename(file)) - 6) cat(design, ' ') load(file.path(robustDta, file)) estlist <- attr(r.test, 'estlist') use.intercept <- if (!is.null(estlist$use.intercept)) estlist$use.intercept else TRUE sel <- dimnames(r.test)[[3]] ## [dimnames(r.test)[[3]] != "estname=lm"] n.betas <- paste('beta',1:(NCOL(estlist$design)+use.intercept),sep='_') ## get design lX <- if (use.intercept) as.matrix(cbind(1, get(design))) else as.matrix(get(design)) n <- NROW(lX) p <- NCOL(lX) ## prepare arrays for variable designs and leverages if (is.function(attr(estlist$design, 'gen'))) { lXs <- array(NA, c(n, NCOL(lX), dim(r.test)[c(1, 4)]), list(Obs = NULL, Pred = colnames(lX), Data = NULL, Errstr = dimnames(r.test)[[4]])) } ## generate errors lerrs <- array(NA, c(n, dim(r.test)[c(1,4)]) , list(Obs = NULL, Data = NULL, Errstr = dimnames(r.test)[[4]])) for (i in 1:dim(lerrs)[3]) { lerrstr <- f.list2str(estlist$errs[[i]]) lerr <- f.errs(estlist, estlist$errs[[i]], gen = attr(estlist$design, 'gen'), nobs = n, npar = NCOL(lX)) lerrs[,,lerrstr] <- lerr if (!is.null(attr(lerr, 'designs'))) { ## retrieve generated designs: this returns a list of designs lXs[,,,i] <- unlist(attr(lerr, 'designs')) if (use.intercept) stop('intercept not implemented for random desings') } rm(lerr) } if (is.function(attr(estlist$design, 'gen'))) { ## calculate leverages lXlevs <- apply(lXs, 3:4, .lmrob.hat) } ## calculate fitted values from betas if (!is.function(attr(estlist$design, 'gen'))) { ## fixed design case lfitted <- apply(r.test[,n.betas,sel,,drop=FALSE],c(3:4), function(bhat) { lX %*% t(bhat) } ) } else { ## variable design case lfitted <- array(NA, n*prod(dim(r.test)[c(1,4)])*length(sel)) lfitted <- .C('R_calc_fitted', as.double(lXs), ## designs as.double(r.test[,n.betas,sel,,drop=FALSE]), ## betas as.double(lfitted), ## result as.integer(n), ## n as.integer(p), ## p as.integer(dim(r.test)[1]), ## nrep as.integer(length(sel)), ## n procstr as.integer(dim(r.test)[4]), ## n errstr DUP=FALSE, NAOK=TRUE, PACKAGE="robustbase")[[3]] } tdim <- dim(lfitted) <- c(n, dim(r.test)[1], length(sel),dim(r.test)[4]) lfitted <- aperm(lfitted, c(1,2,4,3)) ## calculate residuals = y - fitted.values lfitted <- as.vector(lerrs) - as.vector(lfitted) dim(lfitted) <- tdim[c(1,2,4,3)] lfitted <- aperm(lfitted, c(1,2,4,3)) dimnames(lfitted) <- c(list(Obs = NULL), dimnames(r.test[,,sel,,drop=FALSE])[c(1,3,4)]) lresids <- lfitted rm(lfitted) ## calculate lm MSE and trim trimmed MSE of betas tf.MSE <- function(lbetas) { lnrm <- rowSums(lbetas^2) c(MSE=mean(lnrm,na.rm=TRUE),MSE.1=mean(lnrm,trim=trim,na.rm=TRUE)) } MSEs <- apply(r.test[,n.betas,,,drop=FALSE],3:4,tf.MSE) li <- 1 ## so we can reconstruct where we are lres <- apply(lresids,3:4,f.aggregate.results <- { function(lresid) { ## the counter li tells us, where we are ## we walk dimensions from left to right lcdn <- f.get.current.dimnames(li, dimnames(lresids), 3:4) lr <- r.test[,,lcdn[1],lcdn[2]] ## update counter li <<- li + 1 ## transpose and normalize residuals with sigma lresid <- t(lresid) / lr[,'sigma'] if (lcdn[1] != 'estname=lm') { ## convert procstr to proclst and get control list largs <- f.str2list(lcdn[1])[[1]]$args if (grepl('lm.robust', lcdn[1])) { lctrl <- list() lctrl$psi <- toupper(largs$weight2) lctrl$tuning.psi <- f.eff2c.psi(largs$efficiency, lctrl$psi) lctrl$method <- 'MM' } else { lctrl <- do.call('lmrob.control',largs) } ## calculate correction factors ## A lsp2 <- rowSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lspp <- rowSums(lpp <- Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) ## calculate Huber\'s small sample correction factor lK <- 1 + rowSums((lpp - lspp/n)^2)*NCOL(lX)/lspp^2 ## 1/n cancels } else { lK <- lspp <- lsp2 <- NA } ## only calculate tau variants if possible if (grepl('args.method=\\w*(D|T)\\w*\\b', lcdn[1])) { ## SMD or SMDM ## calculate robustness weights lwgts <- Mwgt(lresid, lctrl$tuning.psi, lctrl$psi) ## function to calculate robustified leverages tfun <- if (is.function(attr(estlist$design, 'gen'))) function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lXs[,,i,lcdn[2]],wi) } else function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lX, wi) } llev <- sapply(1:dim(r.test)[1], tfun) ## calculate unique leverages lt <- robustbase:::lmrob.tau(list(),h=llev,control=lctrl) ## normalize residuals with tau (transpose lresid) lresid <- t(lresid) / lt ## A lsp2t <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lsppt <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) } else { lsp2t <- lsppt <- NA } ## calculate raw scales based on the errors lproc <- f.str2list(lcdn[1])[[1]] q <- NA M <- NA if (lproc$estname == 'lmrob.mar' && lproc$args$type == 'qE') { ## for lmrob_mar, qE variant lctrl <- lmrob.control(psi = 'bisquare', tuning.chi=uniroot(function(c) robustbase:::lmrob.bp('bisquare', c) - (1-p/n)/2, c(1, 3))$root) se <- apply(lerrs[,,lcdn[2]],2,lmrob.mscale,control=lctrl,p=p) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } else if (!is.null(lproc$args$method) && lproc$args$method == 'SMD') { ## for D-scales se <- apply(lerrs[,,lcdn[2]],2,lmrob.dscale,control=lctrl, kappa=robustbase:::lmrob.kappa(control=lctrl)) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } ## calculate empirical correct test value (to yield 5% level) t.val_2 <- t.val_1 <- quantile(abs(lr[,'beta_1']/lr[,'se_1']), 0.95, na.rm = TRUE) if (p > 1) t.val_2 <- quantile(abs(lr[,'beta_2']/lr[,'se_2']), 0.95, na.rm = TRUE) ## return output: summary statistics: c(## gamma AdB2.1 = mean(lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, K2AdB2.1 = mean(lK^2*lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, AdB2t.1 = mean(lsp2t/lsppt^2,trim=trim,na.rm=TRUE)*n, sdAdB2.1 = sd.trim(lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdK2AdB2.1 = sd.trim(lK^2*lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdAdB2t.1 = sd.trim(lsp2t/lsppt^2*n,trim=trim,na.rm=TRUE), ## sigma medsigma = median(lr[,'sigma'],na.rm=TRUE), madsigma = mad(lr[,'sigma'],na.rm=TRUE), meansigma.1 = mean(lr[,'sigma'],trim=trim,na.rm=TRUE), sdsigma.1 = sd.trim(lr[,'sigma'],trim=trim,na.rm=TRUE), meanlogsigma = mean(log(lr[,'sigma']),na.rm=TRUE), meanlogsigma.1 = mean(log(lr[,'sigma']),trim=trim,na.rm=TRUE), sdlogsigma = sd(log(lr[,'sigma']),na.rm=TRUE), sdlogsigma.1 = sd.trim(log(lr[,'sigma']),trim=trim,na.rm=TRUE), q = q, M = M, ## beta efficiency.1 = MSEs['MSE.1','estname=lm',lcdn[2]] / MSEs['MSE.1',lcdn[1],lcdn[2]], ## t-value: level emplev_1 = mean(abs(lr[,'beta_1']/lr[,'se_1']) > qt(0.975, n - p), na.rm = TRUE), emplev_2 = if (p>1) { mean(abs(lr[,'beta_2']/lr[,'se_2']) > qt(0.975, n - p), na.rm = TRUE) } else NA, ## t-value: power power_1_0.2 = mean(abs(lr[,'beta_1']-0.2)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.2 = if (p>1) { mean(abs(lr[,'beta_2']-0.2)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.4 = mean(abs(lr[,'beta_1']-0.4)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.4 = if (p>1) { mean(abs(lr[,'beta_2']-0.4)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.6 = mean(abs(lr[,'beta_1']-0.6)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.6 = if (p>1) { mean(abs(lr[,'beta_2']-0.6)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.8 = mean(abs(lr[,'beta_1']-0.8)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.8 = if (p>1) { mean(abs(lr[,'beta_2']-0.8)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_1 = mean(abs(lr[,'beta_1']-1)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_1 = if (p>1) { mean(abs(lr[,'beta_2']-1)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, ## coverage probability: calculate empirically ## the evaluation points are constant, but the designs change ## therefore this makes only sense for fixed designs cpr_1 = mean(lr[,'upr_1'] < 0 | lr[,'lwr_1'] > 0, na.rm = TRUE), cpr_2 = mean(lr[,'upr_2'] < 0 | lr[,'lwr_2'] > 0, na.rm = TRUE), cpr_3 = mean(lr[,'upr_3'] < 0 | lr[,'lwr_3'] > 0, na.rm = TRUE), cpr_4 = mean(lr[,'upr_4'] < 0 | lr[,'lwr_4'] > 0, na.rm = TRUE), cpr_5 = if (any(colnames(lr) == 'upr_5')) { mean(lr[,'upr_5'] < 0 | lr[,'lwr_5'] > 0, na.rm = TRUE) } else NA, cpr_6 = if (any(colnames(lr) == 'upr_6')) { mean(lr[,'upr_6'] < 0 | lr[,'lwr_6'] > 0, na.rm = TRUE) } else NA, cpr_7 = if (any(colnames(lr) == 'upr_7')) { mean(lr[,'upr_7'] < 0 | lr[,'lwr_7'] > 0, na.rm = TRUE) } else NA ) }}) ## convert to data.frame lres <- f.a2df.2(lres, split = '___NO___') ## add additional info lres$n <- NROW(lX) lres$p <- NCOL(lX) lres$nmpdn <- with(lres, (n-p)/n) lres$Design <- design ## clean up rm(r.test, lXs, lXlevs, lresids, lerrs) gc() ## return lres lres } save(res, trim, file = aggrResultsFile) ## stop cluster if (exists("cl")) stopCluster(cl) } <>= load(aggrResultsFile) ## this will fail if the file is not found (for a reason) ## set eval to TRUE for chunks simulation-run and simulation-aggr ## if you really want to run the simulations again. ## (better fail with an error than run for weeks) ## combine list elements to data.frame test.1 <- do.call('rbind', res) test.1 <- within(test.1, { Method[Method == "SM"] <- "MM" Method <- Method[, drop = TRUE] Estimator <- interaction(Method, D.type, drop = TRUE) Estimator <- f.rename.level(Estimator, 'MM.S', 'MM') Estimator <- f.rename.level(Estimator, 'SMD.D', 'SMD') Estimator <- f.rename.level(Estimator, 'SMDM.D', 'SMDM') Estimator <- f.rename.level(Estimator, 'MM.qT', 'MMqT') Estimator <- f.rename.level(Estimator, 'MM.qE', 'MMqE') Estimator <- f.rename.level(Estimator, 'MM.rob', 'MMrobust') Estimator <- f.rename.level(Estimator, 'lsq.lm', 'OLS') Est.Scale <- f.rename.level(Estimator, 'MM', 'sigma_S') Est.Scale <- f.rename.level(Est.Scale, 'MMrobust', 'sigma_robust') Est.Scale <- f.rename.level(Est.Scale, 'MMqE', 'sigma_S*qE') Est.Scale <- f.rename.level(Est.Scale, 'MMqT', 'sigma_S*qT') Est.Scale <- f.rename.level(Est.Scale, 'SMDM', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'SMD', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'OLS', 'sigma_OLS') Psi <- f.rename.level(Psi, 'hampel', 'Hampel') }) ## add interaction of Method and Cov test.1 <- within(test.1, { method.cov <- interaction(Estimator, Cov, drop=TRUE) levels(method.cov) <- sub('\\.+vcov\\.(a?)[wacrv1]*', '\\1', levels(method.cov)) method.cov <- f.rename.level(method.cov, "MMa", "MM.Avar1") method.cov <- f.rename.level(method.cov, "MMrobust.Default", "MMrobust.Wssc") method.cov <- f.rename.level(method.cov, "MM", "MM.Wssc") method.cov <- f.rename.level(method.cov, "SMD", "SMD.Wtau") method.cov <- f.rename.level(method.cov, "SMDM", "SMDM.Wtau") method.cov <- f.rename.level(method.cov, "MMqT", "MMqT.Wssc") method.cov <- f.rename.level(method.cov, "MMqE", "MMqE.Wssc") method.cov <- f.rename.level(method.cov, "OLS.Default", "OLS") ## ratio: the closest 'desired ratios' instead of exact p/n; ## needed in plots only for stat_*(): median over "close" p/n's: ratio <- ratios[apply(abs(as.matrix(1/ratios) %*% t(as.matrix(p / n)) - 1), 2, which.min)] }) ## calculate expected values of psi^2 and psi' test.1$Ep2 <- test.1$Epp <- NA for(Procstr in levels(test.1$Procstr)) { args <- f.str2list(Procstr)[[1]]$args if (is.null(args)) next lctrl <- do.call('lmrob.control',args) test.1$Ep2[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r)^2, lctrl, use.integrate = TRUE) test.1$Epp[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r,1), lctrl, use.integrate = TRUE) } ## drop some observations, separate fixed and random designs test.fixed <- droplevels(subset(test.1, n == 20)) ## n = 20 -- fixed design test.1 <- droplevels(subset(test.1, n != 20)) ## n !=20 -- random designs test.lm <- droplevels(subset(test.1, Function == 'lm')) # lm = OLS test.1 <- droplevels(subset(test.1, Function != 'lm')) # Rob := all "robust" test.lm$Psi <- NULL test.lm.2 <- droplevels(subset(test.lm, Error == 'N(0,1)')) # OLS for N(*) test.2 <- droplevels(subset(test.1, Error == 'N(0,1)' & Function != 'lm'))# Rob for N(*) ## subsets test.3 <- droplevels(subset(test.2, Method != 'SMDM'))# Rob, not SMDM for N(*) test.4 <- droplevels(subset(test.1, Method != 'SMDM'))# Rob, not SMDM for all @ \section{Simulation Results} \subsection{Criteria} The simulated methods are compared using the following criteria. \textbf{Scale estimates.} The criteria for scale estimates are all calculated on the log-scale. The bias of the estimators is measured by the $\Sexpr{trim*100}\%$ trimmed mean. To recover a meaningful scale, the results are exponentiated before plotting. It is easy to see that this is equivalent to calculating geometric means. Since the methods are all tuned at the central model, ${\mathcal N}(0,1)$, a meaningful comparison of biases can only be made for ${\mathcal N}(0,1)$ distributed errors. The variability of the estimators, on the other hand, can be compared over all simulated error distributions. It is measured by the $\Sexpr{trim*100}\%$ trimmed standard deviation, rescaled by the square root of the number of observations. For completeness, the statistics used to compare scale estimates in \citet{maronna2009correcting} are also calculated. They are defined as \begin{equation} \label{eq:def.q.and.M} q = \median\left(\frac{S(\bld e)}{\hat\sigma_S}\right), \quad M = \mad\left(\frac{S(\bld e)}{\hat\sigma_S}\right), \end{equation} where $S(e)$ stands for the S-scale estimate evaluated for the actual errors $\bld e$. For the D-scale estimate, the definition is analogue. Since there is no design to correct for, we set $\tau_i = 1\ \forall i$. \textbf{Coefficients.} The efficiency of estimated regression coefficients $\bld{\hat\beta}$ is characterized by their mean squared error (\emph{MSE}). Since we simulate under $H_0: \bld\beta = 0$, this is determined by the covariance matrix of $\bld{\hat\beta}$. We use $\Erw\left[\norm{\bld{\hat\beta}}_2^2\right] = \sum_{j=1}^p \var(\hat\beta_j)$ as a summary. When comparing to the MSE of the ordinary least squares estimate (\emph{OLS}), this gives the efficiency, which, by the choice of tuning constants of $\psi$, should yield \begin{equation*} \frac{{\rm MSE}(\bld{\hat\beta}_{\rm OLS})}{{\rm MSE}(\bld{\hat\beta})} \approx 0.95 \end{equation*} for standard normally distributed errors. The simulation mean of $\sum_{j=1}^p \var(\hat\beta_j)$ is calculated with $\Sexpr{trim*100}\%$ trimming. For other error distributions, this ratio should be larger than $1$, since by using robust procedures we expect to gain efficiency at other error distributions (relative to the least squares estimate). $\bld\gamma$\textbf{.} We compare the behavior of the various estimators of $\gamma$ by calculating the trimmed mean and the trimmed standard deviation for standard normal distributed errors. \textbf{Covariance matrix estimate.} The covariance matrix estimates are compared indirectly over the performance of the resulting test statistics. We compare the empirical level of the hypothesis tests $H_0: \beta_j = 0$ for some $j \in \{1,\dots, p\}$. The power of the tests is compared by testing for $H_0: \beta_j = b$ for several values of $b>0$. The formal power of a more liberal test is generally higher. Therefore, in order for this comparison to be meaningful, the critical value for each test statistic was corrected such that all tests have the same simulated level of $5\%$. The simple hypothesis tests give only limited insights. To investigate the effects of other error distributions, e.g., asymmetric error distributions, we compare the confidence intervals for the prediction of some fixed points. Since it was not clear how to assess the quality prediction intervals, either at the central or the simulated model, we do not calculate them here. A small number of prediction points is already enough, if they are chosen properly. We chose to use seven points lying on the first two principal components, spaced evenly from the center of the design used to the extended range of the design. The principal components were calculated robustly (using \T{covMcd} of the \T{robustbase} package) and the range was extended by a fraction of $0.5$. An example is shown in Figure~\ref{fig:design-predict}. \subsection{Results} The results are given here as plots (Fig.~\ref{fig:meanscale-1} to Fig.~\ref{fig:cpr}). For a complete discussion of the results, we refer to \citet{KS2011}. The different $\psi$-functions are each plotted in a different facet, except for Fig.~\ref{fig:qscale-all}, Fig.~\ref{fig:Mscale-all} and Fig.~\ref{fig:lqq-level}, where the facets show the results for various error distributions. The plots are augmented with auxiliary lines to ease the comparison of the methods. The lines connect the median values over the values of $n$ for each simulated ratio $p/n$. In many plots the y-axis has been truncated. Points in the grey shaded area represent truncated values using a different scale. \begin{figure} \begin{center} <>= ## ## exp(mean(log(sigma))): this looks almost identical to mean(sigma) print(ggplot(test.3, aes(p/n, exp(meanlogsigma.1), color = Est.Scale)) + stat_summary(aes(x=ratio), # <- "rounded p/n": --> median over "neighborhood" fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + ylab(quote('geometric ' ~ mean(hat(sigma)))) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(test.3$Est.Scale))) @ \end{center} \caption{Mean of scale estimates for normal errors. The mean is calculated with $\Sexpr{trim*100}\%$ trimming. The lines connect the median values for each simulated ratio $p/n$. Results for random designs only. } \label{fig:meanscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(test.3, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + facet_wrap(~ Psi) + geom_point (data=test.lm.2, alpha=alpha.n, aes(color = Est.Scale)) + stat_summary(data=test.lm.2, aes(x=ratio, color = Est.Scale), fun.y=median, geom='line') + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels= lab(test.3 $Est.Scale, test.lm.2$Est.Scale))) @ \end{center} \caption{Variability of the scale estimates for normal errors. The standard deviation is calculated with $\Sexpr{trim*100}\%$ trimming. } \label{fig:sdscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(test.4, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + ylim(with(test.4, range(sdlogsigma.1*sqrt(n)))) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = Error), alpha = alpha.error) + facet_wrap(~ Psi) + ## "FIXME" (?): the next 'test.lm' one give warnings geom_point (data=test.lm, aes(color = Est.Scale), alpha=alpha.n) + ##-> Warning: Removed 108 rows containing missing values (geom_point). stat_summary(data=test.lm, aes(x = ratio, color = Est.Scale), fun.y=median, geom='line') + ##-> Warning: Removed 108 rows containing non-finite values (stat_summary). g.scale_shape(labels=lab(test.4$Error)) + scale_colour_discrete("Scale Est.", labels=lab(test.4 $Est.Scale, test.lm$Est.Scale))) @ \end{center} \caption{Variability of the scale estimates for all simulated error distributions.} \label{fig:sdscale-all} \end{figure} \begin{figure} \begin{center} <>= t3est2 <- droplevels(subset(test.3, Estimator %in% c("SMD", "MMqE"))) print(ggplot(t3est2, aes(p/n, q, color = Est.Scale)) + ylab(quote(q)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) @ \end{center} \caption{$q$ statistic for normal errors. $q$ is defined in \eqref{eq:def.q.and.M}.} \label{fig:qscale-1} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t3est2, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Psi) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) @ \end{center} \caption{$M/q$ statistic for normal errors. $M$ and $q$ are defined in \eqref{eq:def.q.and.M}.} \label{fig:Mscale-1} \end{figure} \begin{figure} \begin{center} <>= t1.bi <- droplevels(subset(test.1, Estimator %in% c("SMD", "MMqE") & Psi == 'bisquare')) print(ggplot(t1.bi, aes(p/n, q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Error) + ## labeller missing! ylab(quote(q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) @ \end{center} \caption{$q$ statistic for \emph{bisquare} $\psi$. } \label{fig:qscale-all} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t1.bi, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Error) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) @ \end{center} \caption{$M/q$ statistic for \emph{bisquare} $\psi$. } \label{fig:Mscale-all} \end{figure} \clearpage% not nice, but needed against LaTeX Error: Too many unprocessed floats. \begin{figure} \begin{center} <>= print(ggplot(test.2, aes(p/n, efficiency.1, color = Estimator)) + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + facet_wrap(~ Psi) + ylab(quote('efficiency of' ~~ hat(beta))) + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(test.2$Estimator))) @ \end{center} \caption{Efficiency for normal errors. The efficiency is calculated by comparing to an OLS estimate and averaging with $\Sexpr{trim*100}\%$ trimming. } \label{fig:efficiency} \end{figure} \begin{figure} \begin{center} <>= t.1xt1 <- droplevels(subset(test.1, Error != 't1')) print(ggplot(t.1xt1, aes(p/n, efficiency.1, color = Estimator)) + ylab(quote('efficiency of '~hat(beta))) + geom_point(aes(shape = Error), alpha = alpha.error) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + g.scale_shape(values=c(16,17,15,3,7,8,9,1,2,4)[-4], labels=lab(t.1xt1$Error)) + facet_wrap(~ Psi) + scale_colour_discrete(name = "Estimator", labels = lab(t.1xt1$Estimator))) @ \end{center} \caption{Efficiency for all simulated error distributions except $t_1$. } \label{fig:efficiency-all} \end{figure} \begin{figure} \begin{center} <>= t.2o. <- droplevels(subset(test.2, !is.na(AdB2t.1))) print(ggplot(t.2o., aes(p/n, AdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=K2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=AdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=K2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype=2) + stat_summary(aes(x=ratio, y=AdB2t.1), fun.y=median, geom='line', linetype=3) + geom_hline(yintercept = 1/0.95) + g.scale_y_log10_1() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(t.2o.$Estimator)) + ylab(quote(mean(hat(gamma)))) + facet_wrap(~ Psi)) @ \end{center} \caption{Comparing the estimates of $\gamma$. The solid line connects the uncorrected estimate, dotted the $\tau$ corrected estimate and dashed Huber's small sample correction. } \label{fig:AdB2-1} \end{figure} \begin{figure} \begin{center} <>= t.2ok <- droplevels(subset(test.2, !is.na(sdAdB2t.1))) print(ggplot(t.2ok, aes(p/n, sdAdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=sdK2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=sdAdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=sdK2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype= 2) + stat_summary(aes(x=ratio, y=sdAdB2t.1), fun.y=median, geom='line', linetype= 3) + g.scale_y_log10_0.05() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(t.2ok$Estimator)) + ylab(quote(sd(hat(gamma)))) + facet_wrap(~ Psi)) @ \end{center} \caption{Comparing the estimates of $\gamma$. The solid line connects the uncorrected estimate, dotted the $\tau$ corrected estimate and dashed Huber's small sample correction. } \label{fig:sdAdB2-1} \end{figure} \begin{figure} \begin{center} <>= t.2en0 <- droplevels(subset(test.2, emplev_1 != 0)) print(ggplot(t.2en0, aes(p/n, f.truncate(emplev_1), color = method.cov)) + g.truncate.lines + g.truncate.areas + geom_point(aes(shape = factor(n)), alpha = alpha.n) + scale_shape_discrete(quote(n)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + scale_colour_discrete(name = "Estimator", labels=lab(t.2en0$method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + facet_wrap(~ Psi)) @ \end{center} \caption{Empirical levels of test $H_0: \beta_1 = 0$ for normal errors. The y-values are truncated at $\Sexpr{trunc[1]}$ and $\Sexpr{trunc[2]}$. } \label{fig:emp-level} \end{figure} \begin{figure} \begin{center} <>= tmp <- droplevels(subset(test.1, Psi == 'lqq' & emplev_1 != 0)) print(ggplot(tmp, aes(p/n, f.truncate(emplev_1), color = method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + g.truncate.line + g.truncate.area + geom_point(aes(shape = factor(n)), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(tmp$method.cov)) + facet_wrap(~ Error) , legend.mod = legend.mod ) @ \end{center} \caption{Empirical levels of test $H_0: \beta_1 = 0$ for \emph{lqq} $\psi$-function and different error distributions. } \label{fig:lqq-level} \end{figure} \begin{figure} \begin{center} <>= t2.25 <- droplevels(subset(test.2, n == 25))# <-- fixed n ==> no need for 'ratio' tL2.25 <- droplevels(subset(test.lm.2, n == 25)) scale_col_D2.25 <- scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(t2.25 $method.cov, tL2.25$method.cov)) print(ggplot(t2.25, aes(p/n, power_1_0.2, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.2) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.2$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_2} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.4, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.4) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.4$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_4} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.6, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.6) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.6$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_6} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_0.8, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.8) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 0.8$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-0_8} \end{figure} \begin{figure} \begin{center} <>= print(ggplot(t2.25, aes(p/n, power_1_1, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 1) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical power of test $H_0: \beta_1 = 1$ for different $\psi$-functions. Results for $n = 25$ and normal errors only. } \label{fig:power-1-1} \end{figure} %\clearpage \begin{figure} \begin{center} %% now (2016-11 GGally) works --- but fails with new 2018-05 ggplot2: <>= pp <- f.prediction.points(dd)[1:7,] ## Worked in older ggplot2 -- now plotmatrix() is gone, to be replaced by GGally::ggpairs): ## tmp <- plotmatrix(pp)$data ## tmp$label <- as.character(1:7) ## print(plotmatrix(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) if(FALSE) { tmp <- ggpairs(pp)$data tmp$label <- as.character(1:7) # and now? } ## ggpairs() + geom_text() does *NOT* work {ggpairs has own class} ## print(ggpairs(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print( ggpairs(dd) )## now (2016-11) fine ) @ \end{center} \caption{Prediction points for fixed design. The black points are the points of the original design. The red digits indicate the numbers and locations of the points where predictions are taken.} \label{fig:design-predict} \end{figure} \begin{figure} \begin{center} <>= n.cprs <- names(test.fixed)[grep('cpr', names(test.fixed))] # test.fixed: n=20 => no 'x=ratio' test.5 <- melt(test.fixed[,c('method.cov', 'Error', 'Psi', n.cprs)]) test.5 <- within(test.5, { Point <- as.numeric(do.call('rbind', strsplit(levels(variable), '_'))[,2])[variable] }) print(ggplot(test.5, aes(Point, f.truncate(value), color = method.cov)) + geom_point(aes(shape = Error), alpha = alpha.error) + g.truncate.line + g.truncate.area + stat_summary(fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(labels=lab(test.5$Error)) + scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(test.5$method.cov)) + ylab("empirical level of confidence intervals") + facet_wrap(~ Psi) ) @ \end{center} \caption{Empirical coverage probabilities. Results for fixed design. The y-values are truncated at $\Sexpr{trunc[2]}$. } \label{fig:cpr} \end{figure} \clearpage \section{Maximum Asymptotic Bias} \label{sec:maximum-asymptotic-bias} The slower redescending $\psi$-functions come with higher asymptotic bias as illustrated in Fig.~\ref{fig:max-asymptotic-bias}. We calculate the asymptotic bias as in \citet{berrendero2007maximum}. <>= ## Henning (1994) eq 33: g <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) integrate(function(x) rho(((1 + theta^2)/s^2*x)^2)*dchisq(x, 1, mu^2/(1 + theta^2)), -Inf, Inf)$value }) ## Martin et al 1989 Section 3.2: for mu = 0 g.2 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) lctrl$tuning.psi <- lctrl$tuning.chi robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) g.2.MM <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) ## Henning (1994) eq 30, one parameter case g.3 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) int.x <- Vectorize(function(y) { integrate(function(x) rho((y - x*theta - mu)/s)*dnorm(x)*dnorm(y),-Inf, Inf)$value }) integrate(int.x,-Inf, Inf)$value }) inv.g1 <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2 else g.3 uniroot(function(s) g(s, theta, mu, ...) - value, c(0.1, 100))$root } inv.g1.MM <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2.MM else g.3.MM ret <- tryCatch(uniroot(function(s) g(s, theta, mu, ...) - value, c(0.01, 100)), error = function(e)e) if (inherits(ret, 'error')) { warning('inv.g1.MM: ', value, ' ', theta, ' ', mu,' -> Error: ', ret$message) NA } else { ret$root } } s.min <- function(epsilon, ...) inv.g1(0.5/(1 - epsilon), 0, 0, ...) s.max <- function(epsilon, ...) inv.g1((0.5-epsilon)/(1-epsilon), 0, 0, ...) BS <- Vectorize(function(epsilon, ...) { sqrt(s.max(epsilon, ...)/s.min(epsilon, ...)^2 - 1) }) l <- Vectorize(function(epsilon, ...) { sigma_be <- s.max(epsilon, ...) sqrt((sigma_be/inv.g1.MM(g.2.MM(sigma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1) }) u <- Vectorize(function(epsilon, ...) { gamma_be <- s.min(epsilon, ...) max(l(epsilon, ...), sqrt((gamma_be/inv.g1.MM(g.2.MM(gamma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1)) }) @ \begin{figure}[h!] \begin{center} <>= asymptMBFile <- file.path(robustDta, 'asymptotic.max.bias.Rdata') if (!file.exists(asymptMBFile)) { x <- seq(0, 0.35, length.out = 100) rmb <- rbind(data.frame(l=l(x, psi = 'hampel'), u=u(x, psi = 'hampel'), psi = 'Hampel'), data.frame(l=l(x, psi = 'lqq'), u=u(x, psi = 'lqq'), psi = 'lqq'), data.frame(l=l(x, psi = 'bisquare'), u=u(x, psi = 'bisquare'), psi = 'bisquare'), data.frame(l=l(x, psi = 'optimal'), u=u(x, psi = 'optimal'), psi = 'optimal')) rmb$x <- x save(rmb, file=asymptMBFile) } else load(asymptMBFile) <>= print(ggplot(rmb, aes(x, l, color=psi)) + geom_line() + geom_line(aes(x, u, color=psi), linetype = 2) + xlab(quote("amount of contamination" ~~ epsilon)) + ylab("maximum asymptotic bias bounds") + coord_cartesian(ylim = c(0,10)) + scale_y_continuous(breaks = 1:10) + scale_colour_hue(quote(psi ~ '-function'))) @ \end{center} \caption{Maximum asymptotic bias bound for the $\psi$-functions used in the simulation. Solid line: lower bound. Dashed line: upper bound.} \label{fig:max-asymptotic-bias} \end{figure} \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/inst/doc/psi_functions.R0000644000176200001440000001026714124272532017163 0ustar liggesusers### R code from vignette source 'psi_functions.Rnw' ################################################### ### code chunk number 1: init ################################################### # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0)))) ## x axis for plots: x. <- seq(-5, 10, length=1501) require(robustbase) ################################################### ### code chunk number 2: source-p-psiFun ################################################### source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) ################################################### ### code chunk number 3: Huber ################################################### getOption("SweaveHooks")[["fig"]]() plot(huberPsi, x., ylim=c(-1.4, 5), leg.loc="topright", main=FALSE) ################################################### ### code chunk number 4: lmrob-psi ################################################### names(.Mpsi.tuning.defaults) ################################################### ### code chunk number 5: tuning-defaults ################################################### print(c(k.M = .Mpsi.tuning.default("bisquare"), k.S = .Mchi.tuning.default("bisquare")), digits = 10) ################################################### ### code chunk number 6: note-p-psiFun (eval = FALSE) ################################################### getOption("SweaveHooks")[["fig"]]() ## source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) ################################################### ### code chunk number 7: bisquare ################################################### getOption("SweaveHooks")[["fig"]]() p.psiFun(x., "biweight", par = 4.685) ################################################### ### code chunk number 8: Hampel ################################################### getOption("SweaveHooks")[["fig"]]() ## see also hampelPsi p.psiFun(x., "Hampel", par = ## Default, but rounded: round(c(1.5, 3.5, 8) * 0.9016085, 1)) ################################################### ### code chunk number 9: GGW-const ################################################### cT <- rbind(cc1 = .psi.ggw.findc(ms = -0.5, b = 1.5, eff = 0.95 ), cc2 = .psi.ggw.findc(ms = -0.5, b = 1.5, bp = 0.50)); cT ################################################### ### code chunk number 10: rhoInf-ggw ################################################### ipsi.ggw <- .psi2ipsi("GGW") # = 5 ccc <- c(0, cT[1, 2:4], 1) integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi=ipsi.ggw)$value # = rho(Inf) ################################################### ### code chunk number 11: GGW ################################################### getOption("SweaveHooks")[["fig"]]() p.psiFun(x., "GGW", par = c(-.5, 1, .95, NA)) ################################################### ### code chunk number 12: lqq-const ################################################### cT <- rbind(cc1 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=0.95, bp=NA ), cc2 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=NA , bp=0.50)) colnames(cT) <- c("b", "c", "s"); cT ################################################### ### code chunk number 13: LQQ ################################################### getOption("SweaveHooks")[["fig"]]() p.psiFun(x., "LQQ", par = c(-.5,1.5,.95,NA)) ################################################### ### code chunk number 14: optimal ################################################### getOption("SweaveHooks")[["fig"]]() p.psiFun(x., "optimal", par = 1.06, leg.loc="bottomright") ################################################### ### code chunk number 15: Welsh-GGW ################################################### ccc <- c(0, a = 2.11^2, b = 2, c = 0, 1) (ccc[5] <- integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi = 5)$value) # = rho(Inf) stopifnot(all.equal(Mpsi(x., ccc, "GGW"), ## psi[ GGW ](x; a=k^2, b=2, c=0) == Mpsi(x., 2.11, "Welsh")))## psi[Welsh](x; k) ################################################### ### code chunk number 16: Welsh ################################################### getOption("SweaveHooks")[["fig"]]() p.psiFun(x., "Welsh", par = 2.11) robustbase/inst/doc/simulation.init.R0000644000176200001440000000530212321016045017410 0ustar liggesusers## initialize R simulations (also parallel workers) ## need to export the variables N, robustDoc and slave if (!exists("N")) N <- 1000 if (!exists("robustDoc")) robustDoc <- system.file('doc', package='robustbase') ## load required packages stopifnot(require(xtable), require(robustbase)) ## load more packages if this is a worker if (exists("slave")) stopifnot(require(robust), require(skewt), require(foreach)) ## default data set dd <- data.frame(X1 = c(0.0707996949791054, 0.0347546309449992, 1.30548268152542, 0.866041511462982, 0.275764343116733, 0.670798705161399, -0.549345193993536, -1.00640134962924, -1.22061169833477, -0.905619374719898, -0.678473241822565, 0.607011706444643, 0.304237114526011, -2.14562816298790, 2.34057395639167, 0.310752185537814, -0.972658170945796, 0.362012836241727, 0.925888071796771, -0.595380245695561), X2 = c(0.119970864158429, -0.738808741221796, 5.49659158913364, 3.52149647048925, 2.02079730735676, 3.82735326206246, -1.24025420267206, -4.37015614526438, -5.00575484838141, -3.56682651298729, -2.82581432351811, 0.0456819251791285, -0.93949674689997, -8.08282316242221, 9.76283850058346, 0.866426786132133, -2.90670860898916, 2.95555226542630, 4.50904028657548, -3.44910596474065), X3 = c(1.11332914932289, 3.55583356836222, 10.4937363250789, 0.548517298224424, 1.67062103214174, 0.124224367717813, 6.86425894634543, 1.14254475111985, 0.612987848127285, 0.85062803777296, 0.881141283379239, 0.650457856125926, 0.641015255931405, 1.51667982973630, 0.764725309853834, 1.61169179152476, 0.596312457754167, 0.262270854360470, 1.24686336241, 0.386112727548389)) ## load functions source(file.path(robustDoc, 'simulation.functions.R')) source(file.path(robustDoc, 'estimating.functions.R')) source(file.path(robustDoc, 'error.distributions.R')) ## set estlist and parameters estlist <- .estlist.confint ## nr. of repetitions estlist$nrep <- N estlist$seed <- 13082010 ## set errors estlist$errs <- c(estlist$errs, list(.errs.skt.Inf.2, .errs.skt.5.2, .errs.cnorm..1.0.10, .errs.cnorm..1.4.1)) robustbase/inst/doc/lmrob_simulation.pdf0000644000176200001440000220303414124272534020227 0ustar liggesusers%PDF-1.5 % 1 0 obj << /Type /ObjStm /Length 3881 /Filter /FlateDecode /N 60 /First 489 >> stream x[Ys۶~om'ӀXIdƱ⥉lWZmʒ*J_s );+ N""bDpЌh"Ә$$108aa1WG8ap-OK"XAW,eЈHA_ t͡<%*Қ@R8ΈsΉf1t&1 "QL`HD0&1pHfSD"!""N#V0N$Dr K$ЩP$M%E "lERBf\%LIL:V`1t+@>aAАI1$)г` = = BWL(X>П XA)PdrҘ"CW)D=KQMHwX*q$8w~"tIʀ#B/7W_9P>ߑWLelr0J2bEĿ~͗#|瓯X'qM/ޑy*b"K%^)׳"-&% @ 6żbf~ /-ީ}3^U0n͒lV%T;'EzU^v^D~Khbr boGrݲ||z18tU{"n27p0ofA~1;EqT,}D(]K3. feFF5l1Y㞉?O8ӈ~R%7XGŽ7Uej;ܘ:U H2ֆ,Y Q$n%@3zCtBszK=gS@gtNtIKki# fw%2CF0m)n iBɢdeS`b|0P7R r.W{l!~+Ah:?^7h6y/p;O뇽Xc?IFbG ՐBupV ,o #?~mt]oe5x]NYn!9@|9o;Ѕ ($C]Ga+f/@`4HPҀxHePWa.[6jKw@7b`o7I|`:U;` +IQQ5=DȻ-> tfgѺȗ|R2+~Td5ne(rnRo(\o/E-J5xe8dbo*>Yz55,ar`B׈OY:`SlކZ W9=pѦ>%x`0cmXt>-:/g~kݓp88 9d_PdwF7t0g!Uy*1fFch7]m㘠)×Ɂ2|}^}b @ .q }i5[1 qs ifpe{WK2{Hfid1_ .7>o:TqD `ڼ:׫h|gFl4sMQVM#o w{6oW64}.I\Wfj/P؉Z&/ǀBVGmedK1ah.5wY7os?z3ʔie<ˌF/X0Jv8<>iH;O4=q1[]~A-a$w0+s~&#ҬEhWk}3M2zh180/-dELSEY/,~ c?\+\Kiz[__\`6bm 9vb^Ɛlwf=k:kdY  ̧r@]t9:d\ ޞ`:c'B) ”{v}E( { WkcPc c+AFsd}dT:(de6#_=[{}2Ne+O(mUu׳I,ehi_Gv-Uk~DEp<Y,{lѻ+02X_p#Nƈ*mutG:U:%U0k RT+QqY gT|t~GCr& L3n[']WՉ62V?MNVM1}BZI+64r\DAaكIci{q%^@bJn/frhcee{`d><Yfa>1lޮ>,q.M@fCw!R.3!\%*bĞ!|9_㬭p$cߦƛ1'Xi&tmmď= AMt]S\|*žN$JԴ3:m\.ՊqHRdVD[ x7m1E Зk KքZIG-='~0j"Ci ׉/@\M3gW:U{XcG@53P3jpaf0! Rg*xe|_*5FcirheHe#Hf'>GQncb *9$~@eg^ٲwJ+Ӎpx=w譵NP4ί0HZ=eܤtG{"y> stream 2021-09-27T09:40:11+02:00 2021-09-27T09:40:11+02:00 TeX Untitled endstream endobj 63 0 obj << /Type /ObjStm /Length 1393 /Filter /FlateDecode /N 60 /First 497 >> stream xݙkoXﯘBsH-Х$UXJ61}ljiUT_feyq9KZrS* )Eq%Pyl)ޤ`%o_3J_./ˢh0,,wzhC4w:Y#={VX`NKOîYLt$1&w[>fՎ,B.:>V-A`Q.;.ֹh(F> 4sLf=oq WK|6fp|ndߋcգ*1u%![YՏ%TU! kPY.{ۑ"{DQoggXlGj\R\+'_ _&tmVdI]ЏMl6Xp-Q *lVFov'aw%xr* b֑c_hW؏W{~l_AݟȋQ$Js:YGdxbngJhAޱ>Z C)V!eJfAWi{gZ'TubI5OT5}VQ8H0z,%{y2{MR, ]5z>dc(J$J$ԉp:"eeCg}/$?exjp͡Ud M}XʤIo0 ɮ-\WV=jcJ7XEuVjCfq\6wdu'&wY2 F`Gosn?Eys0_mgZAU/endstream endobj 124 0 obj << /Filter /FlateDecode /Length 3421 >> stream x[Ks Wq6]qAuH*\);_bLre.[ ?;@3Y1rJ\:9i++Z]|8]}yzqkwzCFm*ZʮNoO~ެ7jrN:u80]Ō1c'8V9#-)u{1N,tXo2[ S.qǶ~OSTZA1ob=` |nA5 pV3^1m]ʺ$mp.^܈݇;zp ~ݟf,K$}N6wg9rj8m1{]\ފ ~πFYh~4sXY+T4Z:>9sJ[+f,,S!ǰ28OmrӃUorbd4F ~:4{6W vkdoP`/[`/4&1< 57q!(q=QyQ͍(f'}Xa\C&3 6M+"F1~AOi_M'j#knk](F쫍 [X{Iʌ&߭- nesMf`o!eI/'` v$ƞL.(Ѡ0Lb(NC<{O2tQVXtA}#'ͨFϬ=w ["D^HWCR#2[ ơLr[{`uc W[Pqg{©9[c9C$!x sQC<('P2=b圀􌔪 n5iDt G1ҁG;1>!zM#}ńX$AfǠ2hūW RRDz},3C,̍y%@pϵKrG%" cK$1ꭐ)CCKe=O*A}58K{$`z_ _#[B=RB :&$}қ ʌW5pd~hOo*@m 9{! {Aomz6hxÏ!`O .έ .muj QABi.Ѝh=N[ ꡘ~{ɕG߸Q0DHwF\tEG-OVv R;WwUbJqo@X\;N6C%6Z$rA$øp Poiyo_߈.WBpMӅK"2_cD6ז2(vdzKQнl1z: ~RƮR 8CYWQ_Nt^u9- ͨ5kl3s7=u.9㸡::{1M.p- m2FmV*"T5 2%-'1làT(L_3q, SvH4ǥMSrMZr}enh"%~v!@z/i聥|#oֈo〱܌2aCPQSK|"KHVjn~QVX@;Z:؉㚎 U*M8v0C=S9cwQrxg#'m%RLj7(yY gg V}ipBS,n2j\UKDަ%S\^Η;Ë\7Ɉ8ZGLcvVIΩi޸|Ӡ1wz:E {( 5Qbss _^ ulM[+ʂ᫊@!_A3KSp{ODUGcn%~L!jSy]٢u~$w': N|{e#ñ!rЪҰ#gnC*ë~K.~zfܗ\kk(j07eJC!&AHVZ]e{~P>YOsDn1"TjUR/8I w|tTgƹ!\jZ; WK^K% vZ;td>S kMtk,4DR46Ow?kqm,5K(@1,%0\:m i/ ?gaOpC/,JB`لbZXyЙ͐)3PgXQ;~Y&056j7n9pC X%t,Io-U׵ؑnwrˑ §hum_X R߽xC{+dCv(_>ޟ4eZbbG2qqn4'H\{*XH|, Op ç6!ec/Jݳ"ٶNET#oJ1þQ&ܱ*>`1KÆۮ}ŞI~L^*4}NVbOGLRX XM|= "-G2RN`J-)5s0C˧hZ(5 J?K%nVh?3lQ_bieD GXXU#+sѿ6Lk߻"sCu~EJYoUL_f!r{PJ8(hODѱS1{x1K*l+RO(y(}$%bljo+rs=ݘ6gmba:vFWA0U":TgOA읝,6.mKgy k95 BOy33扥&A]55}0a &N7d<̟qnIbapfȢč<$4;r1G";rÇnSH:"vQ|ە;KvG){rn?nFKlWߦb ڄ^4q9k~:fy:ɮ3#~n:Eߞ]"*9'9 54w^&K}}ר ,up$ǺBNEXX@X<:KK`<₺x8̐`cJ l7t4qt woi7vbj76I1kg&D*[4 LQ1F1koow9g e8|)}ni5P97ܢxdG.E#6T>- j\_߿];(6endstream endobj 125 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2612 >> stream xV PSg`>ZQ*BJBH 䞄@ B7` !jcG[W묵]_^3;sw1i€'Vٹy">{bݫf/Oxu<4 ?ҟ@'1'$(FHXw /cqD#)$%^4Z"?FvٲT"G_85V#ȒdY8Po(dy<dL-VF y'(I IV-{+Fصöb۰X(vbKuX&: 6` l% [c[56sƮ24v`Pϧ4]8S!QRC6{Jlk+wQ]Nsr~Q[sGȹgOW\+!lTD\zÀ3.bM0яp4h/^lcyL"4;plnW#_}fQ\8)&MpzӐm?^޺  [*Gɀkvor~{({Y4VYN0ݹY^R%}<<el`KD`bsGO/焜p_Lt{ : %`'SD9ZE_Z(HS%UT;-Mmq5xMC3r?A3 BYG{!Ngg(Ϯ9ca2B}nN>#xHN ݾ}ݔ|LV[LRc٪׵T : u Js ))6KkMbj4ؘYN+ZIiXz!];?,3QƂb2(o5B5Èr"81 +~g ui!TK m(_ {8GWD?X5߼Q ͜(%06BKY٠נBi-i".b[nI?^2=\8zaYQ_9\\Da7k7ęxUąr'Y74Gsێ5>=gP'PBHk,~!AoJ*J0L!]qG'#bu] vH#')I dSzTBlIeFP/^*8"lOtױmT㈹DN)&hGa&ѣLA4onI35W|.8@6lݧGl>νkCR|x8:LE.CktKphE1j& RNAyШ :hKK Ӽzy?~sMѭcp9'8Jѡ+MCʯ!h:10o h2{glK쑨AKV%WM )5TaҷL QTC]^&{W{Gˣ ͻ“L"Z$;Վ:mgi1{BT %iVHHw/Ǐ_AA og}>o];uSZtVJ_%^x(~rh1(c }v?{Ci=Z2^<6Eݨli6á}{*⏱endstream endobj 126 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 2131 >> stream xyPg{(v ڬ%0 $PU@pa3pe8DkfY4H<sDw&'[ ljVmuUWwUv@ yyO=yqDx-a % BV8?ry<e$s @ , +r3eqnb/??wxc4#9N"HI4I㒥\IJ]앒̕unde8L)ȒƋߗ˔PIT<]{?:P9/`UtZ 1;u[5%d/e=WG*ꛭC@ݽw֥ ٬|sg_ {(~{:V1dFcCUa޻FShPͺ`N ӂf=, " 5 ^!6!Z-`H"mO wX8<U ׫-{_D Wv<u xx9 !hM1矅8 Y,'(/u28rt#o+FhD{i^Ծޤ1@GnM--D8ȶ<R/6{4}`/W?;c`ji^RayR8lSF9Ѻ#=:ɦ"@.b·nj=܄lRGYHYy>@:lXFjE%E,<8 r32d~9zb'MdTvtydL3}h6"QVVePJU0 3"ag<( y߳mgp6j6<\u=a@x #3$ɬ.=M=,C@Y %lvj\.BPN?ҋ5q-BN {Ȍ; dh.9wοjxw8r$*Ǭ:Xxt5MHmsr+J02Q>l 7,g&Wa!RV[e:;qAmHa@cxTG6|`Ѫ\Hf"M٤%YjFYYakfjkEYW3"DjzgC0\n]mk0|PUzŐz.Ԩ&%toB,|׻[bDZEs/5VvEB8aGoLH@ʁ*\ǐPVWS;rm=^w-BvWcl`L}|LS>+&D~FEcʓev]i"LuMPy"ۂ 2VṀGA~Gze9tAem?T]TVV@P:+ے5vhAMUUG$?򲌶EFA#)}NãEv%ʃ CO`0|2sWF(f}\ xgQV7ZM 4$MUoAoh*LŮi;I=_ŤDpi=RU#SYߵCP#!*fqDၒ0~`Q8D~z #sW~Tx Zz)lh:j1ȋAa ]x '=Dm Ӊ29Tjn>W% v\J9Pޠ6P*Q*vhU6܅F$+vW/ž-, L:}rЄu",'-f3};:5;!zSendstream endobj 127 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3701 >> stream xW TSך>1$[sTJO* jAPy ɟ -"-RNjkjTR5#vڹsZw+ s{ߖ1.CL,Wo%iȶ_ K0+Wnrpsi{ŠxE#b ~{St SwN7U//_qMBdx&~ݩ OHFdBIJJ洙IHq0ؘ4/:{_lĄ8ذa<,ۻtY\|BJ&Y3 ,|WDz]PTޯ'yYLdI:Ɠ1&f:,a0~Vf)3Y3˙Jf.a3ɌbF3<3Lȸ0#:YӐCʇ\]V0(Ky沽\znhÎ->|pp%FLdT?z9U2i{/o8i:q`1 k3B"p)j)Pu)Tu`!bznjJU⥘V6Y{}u&KKxHTd(GnG\:Wo!~t(ɂC 5A>c5[mNQ"E RvZ6vG%@E/=ڝ'pMYj*bZ#R&zڤ->Vϐ?nP&vƽ+I{vY8:Kۀ~%#~WU#xMQeqN8F=#HHs,/t>|;\- mnoTzK/8(dڮd p W E;6h253@,E/%۪sve f*~3AݯP\99a"oǹ*0Nk_[k+ >_s[$whwKyF]V%ͶI ֛x\.-!?5YB1Ðc-ݕ\4d+BbcXc>Yq5Bvٲ8=D²`(ɷBip$_>[IQ۬Xhm\?J- cNnTjV~nc$AyjԩFnp7۝o ݚ`vetLZQM魠5r]9>mfv,ʭ$#ֲLvʍkn×)lDɅv x,.ei hVb5 5b}v@˳ s*%!I*>ȱ9 L l]U LBcC`ZF7:lU,HT{)Tp*ɑmCmdʹzxYD45WN/l5P͎p_q'K^%swrI}B.e1̂ B5GdYRl}LwGZ/=){c_ 蛫!v&3m*W؝-E{nES~`j})pzpT$[L| zRZ6EEN _Lee].eSޣ>~C2>04yV<Ÿp VnĨ=v]8XEڤY|މ#Ǯ9A֙r7Ȗf4fN9!*9X I?߾B:dY- +Nw߻YAq0RP_mSc9Kg@9=G0$]1o_XŔc6MfhLK8NӼHѡSл|AZr^s>G ɢ4^vC fS9g~Q:;8UtbG32 aCqO+}LW= RSѱzȂ,={9iM,QOe\A&>~ //,v%i >CUl|%:3Ŷ^݂AKq_ ;΋{u!'I<~8e%}lLZ;uDyeMJFէmMXfZeQZNܟ Uy >; sW|{w>XY siJ9u~z+c}s6/ӏӂ##?.r뵫P"8yhN{Hxֆ-P8aT; i-O=+H0z~AYv;8LMUt4Kۥy\O:l6Ŝ-QXX zQ݆ۄQ-W[A8ogQ)iq4rôQF]GC KJ+ 8~=G4~]C5*'p]JV `.>1u ќ%UniN]aBo"[zx1tv_“bSxZ},l7\"6C$N5Y& P(**8^[mwӜ!çO#!,v VW?-Z[ _Ұc#'"q|KV Їp-Būg [4Y1v:.{HY'Nc' ڢڞAJLk:謐ATq?כ`x5;]rk? TQڭ?y}0wP]et@-[yȰR([ג"u q8bꙛՍ-QQGsG*\`V-hE'@~im(3pEg8A' [01d;L ~QP \GLVd# 䝸oɜ:/u|JDϔl7!}}pS9ɜk6ryP1N;-~e:K#q)&HMHZ%zc*fx|PMD-Uq{TCs댔RP5zmX~mBq9¶[3`=Ft/NBP|keL$#v9G}e_ݮ)>a8!EUzX;* d[>aPynnn(vnqS1^Жendstream endobj 128 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4151 >> stream xW TWq`U@e{pK&n(jP\Pq*035n, #;?.1-D/Z@L/9{ܾ0&}D.pq\`o '#qkZi:8o``&3, @T" ܵ d{lX-lfΜag3~ ߰-^6.^A^m6CFڌ?"bɓ'yO 3&: 77,fQHpr _^C'>m q ff`ǐ۝#"?:p[$)SM1sÌdV0hf3Yͬa2nxfl`62 IYLei2ƅY `,93Q0C0=&Aҟ1dLxdς>#&&&_Z&eud6U~9_h6ۼ8p-IX7AQmDbISa5MTqbiԹEdjhspL}ljA.3OS@sS,] cL!lϒbeڔ,,/,QF'ʬm J}=缦Rk|}(q$j.OrK nCymdX2/T.倏Ң<̀KE+<# Vr ܏2^82%E*dF%pֽ~UN'Wd<!K2-i b+pdbM>ŏ/qOe "dʃǚK:wM9ou=I +8QTwHr!76EٕĒD2x/O 4%@V #lqhEUg-x 2_>?EyfNS\N_AcjIa%g4ZVop;~:]QQcөRஞ[N,u;UpCT:86ioMn1*)=m[lR%NF)~n_ZtePX;-邴+TH‡L!.;;+)sRZl~x~ 桝 C>b mfw.KD.QkRטhaX,e>ʞӵۡ'V~Z9ZyDWUՔ4^[9 8 qCA|Ʋ:&?V4yPP|w Uf7ץwv)F34Çgo.Lbx{qѿ>NNȐv+ay!rD͖D>(viJ"ac @:(ݛo<U$1-uJmمi-P ryyΛizvq`x*RcU2WXƱc՜2wR_#:Pf)T`]vAR&R.#īٙOdmT hC\H)6wɩ2o8pkxgR|T(~Dd*)9zp#2B|Q"$I))r'a`m>Jg<=XNj5)J‘L" 3lA vfaP!/3y.$#8`Y} f Gb'\V\ A\G-0} 2ΕXdL\sH~̮wk2 $'|0lGC;arB-uT^\%HJ R C sk׬ӦS7Cz.M4dd̘8t,TL=ۮhO+bDJsMqؘ:Q>C !CL}T EQ6ў] AZl/*9Ek ZDKQ)TVt)pB ]jfnVYqG/Ra=T4Jꩈ; )v;4Q½C>W~j#X&!])f=$%m9!ۯV*-~[ގ=-J[p+ʓ jxQ. R Q5.Xj;"6é288]>8SK9͉`] {fbH(̐zrF|@BHH0A(5ળFw4^EťV\;63r*A`cpPu9MH&뽆6q8SѠjK)OQYvg7-yp: nMMa/tzHӤHSFmr t;Fh򨉾A\r /艕Ο^thyR\RxtJ& b*\mkX2UI,'5崄>{v4~Ⱦ}NI4.llEUj|lW|j5khbBӶF j(Xƭ?;Ut#+W2ۜT /koYK +FSpYK.Cѯ aVySp N|WVc?g/_ J@/K%st?؋P|/XO닠j!Sx-5R-o +&'h X)ӛ5_iwVsg7zGhoy*G?dO0BtSGc^V:pNj =_W~^u-͆V!YZ,y<ïiLjS-kuKw_pм+R}ON=6jwΥFL|Խ$RٱvJ9TKƗSlMF%v8.ݑʭsOs_n|8ToJg}c{'ŸR܊ _}N-PV..zI,&5 =@UZqo ꬄ}d6 !/ss~:vpGJidB#>:+AhRDA m5zz?Vq4zu-Mmp壟LiAGd@}C{7h/;:w)RWblߩiBktI])\X事RŶf^Ck/OlXIwzA҉qRhNYsWR ANJ ]|ҕ!psy۫$;N*SBN5Hvps|[r`+nY3^&߄#M&EBpo%ԊRl4 AwYy̴Wm^ +881h5XHN&uqP6+?J ,YF{D-hb\v?T'qi` Sj8VUtjgcrJޣըO̕Tȫ_FPǷkR?KHFkiRf8")_\%y2 P֟g2جoc*uf ?> endstream endobj 129 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 7694 >> stream x XSO 'SA NupZ<ϳ2#!$+ <@ @ڪUPZZvvOsZvE7J Xk?=?P tڷD>s|o8B/}FE }{P  K'n(;M8qcgNw@>䅿ݺ Qv#gϘ0!""bk@POFc33t pvu7, [HQ A[Y0lQb]K#\E-r_sjo;1`Ә^cŌh?iC9|4}γFӊPjj5ZC RRzj$EmFS1fj.5BͣQ[xj@-&R({j1L-P˨rjrVRQ7eEQ}5%Q,>Al([j5D8*(NV],(`P_" E 'h)3a쾠{eמTC"z zojկ}{دߨ~ILVj`h{t'\̓& pkvvu{12dǐC{ 6vh^aՖF/ jlJ23b@!O?Dmvݠbr!M=UY:kjԪsQN!6`st CpTդ4]ELY-|,-K,Y`-zk2_|uCY\F;`1QE*ɳu<{ Hr׫/^ݰq T{`+խ#h#ƃeȕ$+I/Q(ߍE*V̡Dm\ %O祗/uᄻ}F-iůO0~>~o`ad"'efUM4}ĊW D{O!_}Gc_Ǡ/?!1C\ʂPd=Ga8$Qր'x%p 0o!m8YZ 0F_ X QӍE2y|\2}H-3ጴb.x+lWjL_A(yl&3~Q*Y Ӷӟ(rHڪKjlg> 5?;~`;J &2e ߭PXJyjAhӾHh"Pi@QLh$h7] THH8zo>>l*<O;H[F#mB o.{{Lʸ)wm>u@rIg1M'"蟱D|DH-.dQ]rk'cK{1XWjБI/~!kW]S784Fk@dgKp۱fI;B9\?:{*,\5"xG"t"Ryg)FJ{N?w@ }+xH_h?$ vfGF=ܻ~iFRk3I3}4j@c Z vy]Ԅr-CEx|\<lY6\i* MQ@Y{a c!ev͛KoDXԍn]@|w9nTAhf4o25p UD0AeC^MN|s.x/AvԎlDG;\o@C BvL;({W*u[PIgc!NTsv;2:%Bs$`+iֵOQ!Gmdwc`}"9E*m70%6t4$禫եFîGTeD/>7!O[E|s]*nNMɇR/KQBBH$xuգ׮ȕ1j.'(=UNNOsOCѫ#-9n̯3%%aI!5aիQ)qHT2=@Gڬ 슪9܀ȩ8f,ҥ䑅u]ёTب8)5&=Ndʃ&#To7Lo惶#7 >ڈ[79\F{ 6}k30e ]G |Ssȉ&8q`K6`/tZ*UnWK%r$->tbbby<Gr ,mfGz켝[ꎟjgR,]0ށEcV։oնF:DG kayC胪pHݻ%j rBKpYFbv<$x@ a\;ZuyZv**&Y(T!d7j;dQ̊'4;>Ebͬ_Ma :%NMjU1k3ܶfrӕIYRmr>ߌɒ%&f=]Di«M#.؊%՛EW:il( ZU /lr}`R3ςN1QXFC)s>UG>³MK*ypBYZ(!=ǒx:;SCFlH6<@|(VL$ $Ov/U_r;/'Т.wnNiUEd<٭R$|U6X94R PulPSk.IK-\)@lĘ"2('=f O(_TK(eiFG$5e۸L螨"4{r cښEѱ{%4{Rf_{ES973ݠ*~zŶMbTHOESjD2tzZ&Ƕ?NP2)R ͯB}T[ [5>~Nt\5f/Eg 4a0r@ NŢ^ e?oxN0  x= VG4'I')9aOԓKmB.m)v1d\qx v#X7OD R4MFώG(}սh|K$VlєYz籈BH.R! /S]hݸ`g7jDFc _.GE,%B틟F~?78X֢@,*d H!O6/`rrU7u.gP4/k <*PIRBx;B5*Jyv͆rIhU$0 E9 5.r7WεU+f…= $QÈ(4F̼#+ז[uq-Uk`%KeR&eÅ-'o8|&z,`[FI:e)`Y! L\VE{2ܪ-\'{BZdG*d9ՏoQ8zâ<!OT"_6gW:>/Dl/2M^Dhn~oŰ=OsnU 1Y% ,MŭK 5O˨o\/~+Ylg l912}Wj'5ĎҴ$5DK - CUґZƶU֧+C..n$ ^6,g\Qq^9yX?rrvhy~KV9n&t<soKrS[D3g{&Wybo8;FnIEU*' ]ïyTkr39Pk';/z?"}P7XrAGRrt0sVܟ^zɴ^5fahĩR!?+L2JtЙ_|Bjx갊ꊊj^$ːcހ,m|t"t4"eY* ! qC8wQ%%_AY(gDc%iDХ@nP[ :4 \yW(Ļa/TT^(苧nnZQѩsU4eU)xbFI ܕv\Nօ&M=D({&qrdq c+>+)(M!?{ZPޛƃ-sZdA]kሩy)wlx M@DxI-ed@h\Ŵ۶ ]aOWW;pmU\JRȕr} ^k g Ih`G 01'mkȇbФx5::;ݩבa·IK9`b+_td—+HG)|*MAgҜEfuиu\s [KT^߀*CG:oyPȏC&)(.$Պ 4""8\kH *A("u:(-:,1Bz&Zd?=~{43My!cT؊穀J{ K~~wsSʃ /-//&f,nҤ`]up]7R\_i4j*nYJ'\NV$ݾh =@ rQUUY\*>Uȧ,Vߞ*5='驼Bo CYt;)w|oqz+!n̂aN}C3EMj.߂ XXmQvgp.e,E=od=_*frإz`OIkB\$v*BUI0e2$UdPmX4 ZcNg>308ϖ z%G wZ֑`4Ȼ 7$F)&@rva+&8 ; ~}FLtϵ$ F+DHw4Q{fD+vSvxsh&bsI~"=YE$"qkbpNeIZ%4b$ImF[p[h"/laoIZXџi1 -6TTD||O<YHĿ!Hl/1ZKn__h=vdGW r2Wij50KK|$!Pa{ߠ2s!$y<.}_܎lзlM&N=ι|8D"~u Nzp-4Ҟ:V^u\#OL_y<6V~4}M;`pVu1Pw%3 GjQ;87pWߏBT~}ve {ϯ#Y9/m9lg>9z2ghҼi|OpJӕD5WE YB9&'.ȯ Zy}C5Vs\MVփiӟ4Sfdk&fv$'˹-υ°5xW!pw7y, yW ) 3* OPrsr4iYF,G3_䗝hqM6IU#d^p{iLZJ!߳ yLITITL&Op/<8j|Cٷ:!ҚpvTELKJ, ۃhAq¶y[؅+?:P7 +i( \Ȯ3#Y6X[vNY!Y2Nhq_dh~4A}MUe2Sx5wKBBrqS`i0iLF1MH-YB}~yj\(O#ɀ={:cԌ_!g\ ꖇp*]OF*uC!!m$ň}|Oϑ}[♚6Kf,f6aJoˣ'oXdI?űHPs*:Za=d6KtaFΧfvz(&T[}kc:W`A4xz_|VQP&AHbJR+Ws3"G{\p벮Wohop&endstream endobj 130 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3967 >> stream xWytNH)PJ]2*~_\jQ;\Q* `, M4iM-˾$MӽI7("-E(#usoWu.Wqd9y2 SL&sZVvE Ǿ.?ΌϙUL[QM}1{+O< I,HJaC(6A c&)klEW"K'.}8%DE/gdW%\[^-H_%Jp'~n#KTRZ!ᖧg |W)└p9R1'p* e9=or ye*i֣yXa2,k+3V1V32~XǘŘHaPLfieژvA2ǘ)IS< z=Ok͟>%n،3sggMҲg&g$ߙݗTJUGqCr+E$ DߝoPT +$qGkRGl :ͶϿ⨻P~z7AAߏryT`PFGVmB4[!4wd-ڶN"'u -Pʪ@O(=`>D['4(Pq7GQxfjQ zHvjNktC!*v+8ݾCEg+v!z~[(廻 dzQd`jĠ˩ #thm ɸvA!k>qXbs|:+VD)J?^+ FaqS4? D!*z@!TH zF.Ep׏/~[c C6vڠ % 1^)nFs_oq[kwmIIѢ؎C7'gǀL0%-~b3^Z؇~c{" S$V)AZ0Ybݯش aQ zɠБnT[,N[J(E=Q--g *@ !fX*-` R!u7 uiC*=xջaH! zўTeQ  Ku+tPM1"& \>XBA]ESP"^*(l j~Mx'TN'e1)S ήcy^hn /I_k keQjh G,.{qCm|n⚊ u20wmS-bkbhv1*zhPhFI󻪈"D;Ч,|ءDrC0 6K+vT\ŋ 6L X!Ocr5*QH 3!N #G&0b gnK >𬂱"X<MJdFsRx:d2Kp%V&~ TԘ:M$M0I5c[FQTEHR㭩6-Jd[nX!xRylYh=fwM˩k]s pYY-Wݎц #>2v!.;؃XKs"KhPyzgs FNSbJ5;|i`Ck+yPCHBmv6!_Bj+ J10hn$2Jɞ/iz(i D'-Vp>">xvԌfD2a2o?|Gõ$eҀ\>u H-ߺKSMT>;~ ˚+ŕJRMջeu$vN`6A!xI4_,`P5H:TLzr{n-k1BigvSCEԯ:[K-YfEPJ 6y Ng&BI0<`g؝P, [ĝ--Lmm./|/l: ʭh ZIC{ 0)* :PAS'$X#/")kw>]a}R8 *Be4T#{yyyYECV^Eklu[ƮhzozcֻbMm1|!cvJchLGl$xfp3yN.P eV?Ձ<}7NE@DŽ6 mܱc7CLt<>%ݝXkHㅺ,6< Ib}S_}Yw4Xq'~b:. 8dLEKg_B\|n`z.'h{[~p_{Q>83ZwG[TMe'WDY^DiwZtLrF‹魎$7endstream endobj 131 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 206 >> stream xcd`ab`dd v 544qH3a#UAVWewo\0012:)槤)& 2000v00tgcQFBըqVн_pON@[{W4_N\;~*{r\y83mEendstream endobj 132 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 4750 >> stream xX TTG~M'( {$%Jut#PL(jDdߤY}]d`4FJIl&&:IDL1&'ߧiww[ODQ"H>Na~6C33&OfONQB+eQ1rk;l |}ݼ ٿ7g6}zzzzzrS)wj6Q˩͔:ZA͡VRsUjmʙZ@P "ʕZGR)sj5$ʒdj eBqʔK-)cJA*5M1Abm}&v&iwERLgƼ4TlUfRq=y|Jl3w'0fO`blQ/:&=f,ԎjZŠniPP7RhJLEILX#UU"nIգ*1]وZfI3scP"bêO[уB]kq_:KXĶr_n9 #QzF.#v[e(kO.xu^.~N+(.!{gj tb!BY86sfX-я?}B?.vs\9DV(ҊRXn!Lũ*IY ^^ڣPJZ4 W-ڎ,x=g<Zsijaj0^*N_vn_ftYfn|HZb@tG_~KK7@B6zm:`Vڠ@lF3fxұ: CHe(,U]@F[UG龺~t37ą-x:g;J 9]9/t{aq/:A[k$Kvp`? O'l%xj#bJZ3qcHr<ՖLu궥(``7 b_MG(ֺ/F3Ooņz0#Q",h!Y>PFseyKчLc[/ [ïPqoG0#!`J^z+/!㲪K"G%I@ uG)pZDh1y#jB:1{Cܰ-ʳ})굛G&08'6quKFRt4w$6VAUJW4֖H=r?d!KP{u?(g뢮rҸ&7٧VT/ ˉ#-cOE:u?Hv WT4ȺաApԁX+RĂ~ q" )x,Q~Iݘt,®@Jb+sr&&]C2=A-|ϋM<*muYn-=:*T:PnE^-3ص{K0q퉹;0ia9Ⱥ4 /4Hx@bLҊ蠞%T_cgxUw +we16Eehىou[v>t ʊ_eFt&X[ 2~/w7>G-\v5uM=o7z8Pe醊 :}KQ3=c|.N_f><K?S@ + >~O>ںۃFw!+-iM>o`g]T֕6ŵnHْ'V{"m .㥘JIH dɎ^h5Ja>Y`ғo:'6 }j!f%PDRrjn,ocvƲ#W *b-^mp O4X=[p7oY=GƔ_t`kY m8|t5 `G$ }wD{YGf}6"}6H SX|  kJ؏ l#-2#rt|:v*J㒛5ʒg"dv-{e_Lp/g#ḁ?Gʸ)6""&&"6ɐ:ցwzN39NOK͜Ǩdz~ƒ0W"Abr`T8gT"lkO6t1}׀r˨sΨN&5¯CẆEYإ]A !^ˈQ-2qr"!fo}]0U|80G%`% /OwXW XU)2}^I@0 K.Z"?y,ޅJIa\Ej3ö'GT#Fz3)bTļ&*Dlk.ZJ8&nMHNݥBr&<'`otX_PPTuh9bQc确< }nN,8@=[z8y|NDP頒D9D1e;Cd-mZ[1bm ]yOC˜p8ºi`ea2,q L{pjkxM6a1td/ߟ28E)qX`dUYՇ ~ ,$!|:4ğc3 m@ªqn4ɟ9tEfӯt"JM]eSi޴BXgs-9{Kkq;SK lMg'̵c 6[eXY]\ke}s}DWXb>$R Pe'"HG;F?8{֯]Z>F2{D-,{Gwna $YeN =[/pڏ*R')*ycCMeKW'H&q؋>`~̀^Џ8<⽶3`.hG\*)P0!Q#tljN2='vێ ݜaIҹ!诀GOu=\__;څ2c_,v;HN˲5.<wܿrQ-msN{wϮ]y`B܄NTRſS7~lngPQzV&Ԁԕ b^ B#'9s6JC؊T+j"e-"![Ȑ:[,nOPST ~5]k/w_9-K7ABY(+/1칡gCR j|yn95ݫ=_cqoE_8gIG&kf$8QU6dAޞ"T4EU=-&׿ehmA A >-C]}ߧa=bSjzV2Jb+ciK;pMp7B,`5o!GU)X:?ԻG':(WxȖ'=8?JUD e:L^~LubA4Ȏ#0 ,*S _{5c 0} QbjVW&Լru{dPJ j&5rxFgٺpHS*Iĸ̽<ԻIHnq0*2[v^ܜqTy:5lg:8#-IhA~'Üө*P5ՍLfcff:qrendstream endobj 133 0 obj << /Filter /FlateDecode /Length 6001 >> stream x=ێ$ULMN扈s~XybW<<]?nWۯ'g]O^WިU:{:_[)'^q=+;G)gLwO[{go3 xlz_x|U[ KxO=c֚۸>,ah}wϠ6oo_{u/4?p%x+{UUwؿx|T^H~MUP%ڿ7蚜_oS]c=."02Cz'i"s.WvS &%R۠~c=Rx[8gڕPjp) F( A##"1Cr@ oH>Ray>evD`QzF``UنƔr.Y:9/$/wL[$,]:UAAh*QhԪ/QΝZxfh˒:S虲@kr P>/C7F V;WUY}䯮oFNN9Ome:y>2sy,?*?ɏPqYsz `ۆ/~RR/@>9PB|~;oA+-_A&js}|a5l^\~\Zh|@9pY񃟞<݆3scVipw ojR(6 )p`}& FO/fLmt՚SApޮak C + O.Cك0*ak 6,Yck.#bi:qm1ACKI>) AQ>ߞ>|͓ YGəD["_"B\ LoR@.xRA1pl0 @)ϐR),hY֡O4i/D@P}mNs8[ 8Rwӓ@g%0wH(d$UFVw 6Ci24r.hiF#,Ԩ2Lj k |=FXM9ph`BNj{w_$mL@5h͟ϖȉj &|Ӝ`0)h p(@te[ZԐU {G4fP)0e p03*:WtŸ[gЕDZKraRy 00S؀9jhqQ.2}&-r!",%x}Zj&x& 32rAD4@uR g(4*AR:^'( nXQ0{pH?b\ ԥ<]U8E^ ^JJL|L`{Y C?,WDmBWp'\c!@e|p/!F"-zAF$xu(( U=~0 CC_4~%{,9Lu똘ɾys2)2T4A#Y"CW!1mg5xyEiʚ*+l`pxww|,/vHtj0>U 5 `ZqV1ݺc_Dՙsvw]Y6aaA]dNZlF']Tvq@ .+N<,}YXaV( w6hf-:5晙 m҆@| *HKv Lb\Oܐ"߭p*pZ?TC&x/bos莿f؞vu0 l v*fhոK<÷؞P_4@fƨ#")Mffvf,Њ'ԎN~w쿪kB'gVLBBjF5U!sg下 ":l&Uϥ9Cl@#)Av(b, @zڎu+vSy6Xm!Cz$l蔍.Kp ÇA{Z35'žgeFCtxBRxjn~cg@e THV5<˅M5wdD8͍9kiy.>GIs)bOu9mS/N3=}|ǁB5nҽ3&s$[fA1h<@ f0u¾+QE{o a㰳vXY.f8 JLJGܠqElDYT6-Tn8#Y0`*Sd+w;$cƈ3i\GֿFcnL(yZ*b"A1B>Lт dxaPZ.q )II}V\iR{C k*;s%|jdKWy |y> Je e8,dl!bܛE=/3~ 363?T\fcS(ImQ.G*MNMc 9{9:~9 KetZ v$\#NfK ~ֿ/LDʰ@06?D~Ϊi@"V4*T`H-N\@29mF֦h4 L)PR~IqH{dNeȞ@73QDY)F{50Ձ/ wĒӐ]f,S׮LbY3[_#nly+kggJҌ$D.kcOP`; YN{dﯸqX_)2ؼ ōON5/I0@DOGFXw qXЭ'o; +FW´j)YE`EɌo58L`mn18W/pWO\ym|!`T} eϾp-{+RR# .1^* jI TPqU/D_o_&ɭ7dTu "Ŏ o_;DӬUrrlH \'/Uhv60ef=zeit(~(E=F.Pc3;/q֙t!C,X;Ӟ3C/H.pg0!BU-cdL >|9 L$\AdV K֡X/W)\1:t.D!)]@i B-}Jd W[&fH =͙ 01.nlbv+Yva&>S.zPO<L[pH0/؛S`^|Fnw[cMbbߺ-&cgZy@2Dt 2l Lhb/镎2A;tI 2n9Fb`m\_{94JۦE7"ap h6`ֽm4v̮zhTnSwMy" y9&yҭ6RE- ,N9AhEۀ3{i^ TI9q:OѺ7ʦ4lF (?ǻ,BDUTDp.+L$;ca]Ljo> \)yK"t\{ǝ{nG x!/Kҍ"B Kۘ:Wa\ӕ8_GU }XЉlc u3Z'zZ$QP==nEF LtƩوYm(|NDT2 T IzϱK1zS4 {wYjf%i{`1D v)[qǒv r\Hq'[?+Q7(HU6k0dXU+nZF{OE{L3U($7DQ6raJ s92bxE\S*9#i;ˀwq0)N%)oR Y,4wf XUt*3oF=Sd/'O%Mm\FWꊌP383B9sۉG3"L #+aht5̳%XcuF\/ U L+LP˂V?!NӴ"b֣SŠHwb(\RiM>Tj !|.a̎iCxŒQY۸=p/@JEX\JF!3^np#'X'F<,NKox]M 'KvԚKwTʶbX0#'(b$pcL,$ZI9j !DuN]K:WeNqt3e3V0⧏jwJ0DT~b|>wz \'G;sʼnU>Lѡ|iz,Ty(!3s,/dm[X:gSȫΆ3մ^{[Tn' ɂ FОcRVygo.ȻnTxy7̗}ޤ Oo7xw/?AG^U`}8xZy'l&|w8basw8ÿ:c<*'d<=<}zEdSv}yާzA࣌5\Iv endstream endobj 134 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 5006 >> stream xX XSWNkUZhbVӪuAj"**ne_E"{Ivs$d!a'ՂX]ZvVLm2KI3:3ig9߻|wѣ8\.wL@u x~\u=7!?z¥K_KV%Ǧ'FG"3b#3/BRc3rLH]+#OIk\Č-cӳbcNemL9o)ɩ~A)1"3{(% uMZ`w2GFmȍ Kؒ/9tEt3ęy3ƙyYÙ YYYYYY l,Lxs|9dγ1\Fhm⨛ G_u0fꞚԇ3Ԅyt3}'~>i*_1A&4e.-'tc'V%XI&zh64Z81c)TQ*]_l,)LE9|+{4B&i N8d8@{JTPҠ"| FX]2kQ a1:JO@òqh .,")hѪeh5"5uf zkFlc9xbKCv89'`h :HV]>D21S+ZZ(K*feytQWQU˄o6]Y> 쬙}A4&Yk9?>рxSZq)|SD̤`JuG. N<'t>^.LPD\Y!aUĕ;AFIl&+ee&"Q(⢽ǬrPrvKx}Y4|=Zx.Uߧ@xi$)ϵm]eFv';|+W%ޗ@OA0D!FXiTFL&hv90G$;d5*h9>y!4/ǃwE'(iXIϨ j C`i? -I8ѻ4="pw,AopNyh5"QX X9Y :GQJDeb{?W T;!FBݘ\aֵ֍0v&ljז5o1 f3QޮuB!ELJ:vاع?lhlb؅F.#,(4f: mІuU5jQb[أh9DcS}'LUUlAЌF4Z~Ήڹ< $b6)XfLX.g~zWP]e8U+Dh* L62 IA˶\2jcH\4zκͲq}h9ФBʬPvLt0(~x54E/ٞHCނ5w4#>e}PW{O.ou5sMv=w(X DiybǕ|GSu4tE6lءi)C(ɤW#X]'[Y.Y~f5.shI?"w ]*|'%]F]44BQ#JcT!݉g.RgceTQIߊ6κRbKuw$xEֺ&vp&mst|9`o(7*l yjؖ ǨOtbjˣ#7y'8GLA~=t~j_~Ȅ?,XߡIH4L+TȘ~QSI-zmLEHje9Kh6\c\6;}N8766_Ys -w6F>!=5Q?_:jS/3à:u! ZXVf/3f O#^.k29GВ V զ&Ng^̙ KWٵIFѬg_C{P*rhи2wyS߻v|>(n_@gynZHGMWM~GZ/v.GTl"J^Q_o8U5'$Q^ C2{&Qa&[>3MB '$J}WI!56vR k3FZx&>n^";G^Wu~HC%/)+/$VCE߆^*"'Ng[$Y)vEeϦ he~~S54,$]6Y.ޟJh\s\2T̵IlO,X>Օ+<56l$uzL9[!Z CXqV5&NoVki @>E>V*)5m1+VZݹߴvUdqc*ƫJG-b=yw/df!(ACK(q8hA!c89jk&2րJr`ڲF穁}'02T'bxq4Uu5䶇mN$0y݂~u&aUgCpc]R£ӪUIez^`Nd ӛh?9G;S"m!vzr-Y#Eil3<noR}1*sm@!YiC NJ H)sER{֥2=D#vh̹CG$;u` \+jAӚNP%#?dΌ u[uk幗^qQPqsJdLXv]DB vԎں5u9endstream endobj 135 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1495 >> stream x=S PTU~oan3AJBiX b8b.,Ȳ䲳m~::C< '9֠9ZGt:sX9Ҩ#H^JƑOHޣ: -7*2p\IuxN?ʶ)+ %4Oz,GRH*&:cWa ƙ'ZB:W"a1N3/ g R/k:j*kjƲ g7&p'D2vVw G ,׭*̆\BJ\ ) (U 776uvt;t,pS͑iu韧}ȳVb -ۉQ|ܽ,܃]*#yCB +lUJo%*Z .xnG~BᵴE@ FwZq_b;(9+k/.Tt Nu\73*{$oa{\c%QFUdUS0J#yD  V1}cLBz~*[}OĂ$d> stream x] <o0dE/4F}p^Gۤna1L!L-Ɂ! ;_{3I4Ig"Y~07jN> stream xLSWߣ>m:іLI s ǘkikZDZʏBOEhKi DFECLtˌ-YbʝfO,ۿɹ7ߓs>ߛC1I¢ kdŰy9_-'6._Xƛ d ;r'p%x^:=rBXpCIAW-e* v P;[HzEHw%t١̠14ڝvM`jZ՞=vpTjPϚ_>R&+h0JJdpgh(H(=CB?f0֋d )_vο)!N= E5룾Cc!^F.Ғ<@5>Rhvp@ 4vyqV+q. Tgګq|r.sm=dSjxݷNſ} E8^U{dst$eA/nDCǧNu lVwm!( lŗSC%(9nD2h<ޔw*1@uaxN`u?u0'ffnόsAD"5l0;QrᝧGGVw=>y.i P֎v|c.ǖV,"#u82ǘ0?^B!nC4 A ;l\a03K}sj aB_bendstream endobj 138 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1510 >> stream xklSeYzcq88@A#87:붲=6֭ueݭ]GWAx c^Bbb{c|&I<'88.,,.~uݭnqjrcvGbl/,~I_6d +=HXz݋4Q,n{B٦ŏWW].~jTU_-%tT&Sz)&~:V>bEKKKDWj[.nۤjJ#L*4[)iJ\䴤Y]_+`6GTooرh!mƶb۱jlevұK6mAZ`t_/8kl!Qq{F*:O0 H&ϝ=1QM /l-::mg~>!1l&THj@~8Kgo>FTN.=\ + FtofȻ'̭fntX*t1 ,xGQ"_O'PrU6?OdlAkb>ZYzЕF$*D^G%h/>\J&4W^]})X7t>2u!V,ji{Lyj`ɾ=Ų-T@_e΀9!& d? NDNvf|C'qG {{tͽXmE(5dLk#^2ݼ}}99-@gE&Z|}`xd0AvYkܲw~7t:wlF) F G+]&ŝwz L,D߬0; 2*{!MrvYGӳ"7vM&M'Mz]k=@? ֻMTn8s` #X&F~!gt3znd֢5)kYȷ. Ȧ\E3qe%A9[=2v!&~oޯ1ER糱A렋6qm[? zFJڧS˚Mp|7iK6@;h]6h1Ҥ =zM :νևZ"+̒O v4tXS/HrǹR7p\{anlz{W*>'1d mAyP60iJ 96݄^9Vr ji[(>FԪr jebW8L[@xo%PJMT#cqA\OQEG8EHx0 0!q B HVL)ح^zs3ׄks.5zL׏hw&5?,ػB>Mo݇y(Ba}k_E&?99њ`ї<>DB Q!_pչdfCYs⮬, 7]1endstream endobj 139 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1292 >> stream xUS}LSW*['Gds!(eMPQm#PlT zhR180M3q4QCt[=s}siJ@4_jsvxHCKlhe]dU;bTjR.(4#Lp4gAi&T'5u:ISw1*NOuN&pKӹlqI0+ȭRsWhl|Y n ue6A[vg+] ml;k2Q%p̕Z" }arSڐ"q 2z٠\lTTIll"1mC7HhB ttIGc+t8HG!sFu1KNNOU94SC(_endstream endobj 140 0 obj << /Filter /FlateDecode /Length 170 >> stream x]; D{N7&i\$\/Ea@} (ŠAoy?2[ yh kB&T`揫':2_t|" I+0ZFJOZ!ڎ7Oj/'YTkm,RQ~O@\S"+x+> stream xMKAg\3>=sT0.eD_B7 qhwm[D``&As?Щ0!>K iӆݖqYcMCq?h!ĕxQ%-"-TouV-ڳݑy5[q7ћ5y~) ~endstream endobj 142 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 473 >> stream xcd`ab`dddw441H3a!;'O/VY~'Y_ݙ|=<<<,+}/=K19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMUUB9)槤g2000*00v1012Sw‚ ^;;)#맑hwO)M]m5r-jYL}\{a]f0{.ǎ1iU&M/ 9n.]]ҭ};iKsNKvsNK~w;G^-mnŚ+w6u'8Nj,.-..7iYAwxwz]wn7_قSM^wT\gXp.bendstream endobj 143 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 6190 >> stream xYtW!4@f @B1-K`c۸ȖdK,[[,E H)`A HB*,ȦY9H잵sl}~W{</lkl:Tn$Ջ{$W !.^̰*|пwȦ!ܼ@xp2%% (91);#Ι3kb)SD.JK%ŦGNJHFn%'dK"1w似شh(q#󒳓"7%d%r# ӳ#Ŧ%D>r5/Ki9 ȵQ:A d,\&Z+޿F.a 6%oޒ55mۄLo򔂩d_1zLܿ<;o  _ij&b,Gl!ۈbbD$ˈrb J$jbI%|)b1xH,$b(Fp'z# A aD_1Xa—$<o5LŽ]sH[y}^Ro W~+uxjS7NhxЉ+kțC+a/ vlgtNk;1yÏEDG".=mxQQGuY򌙙sZ=Ur_ѺF#PJ2dƒ,R  A{t6C-Y@Ug(A.))M/A{sHe\>^DRHP$yR(qї\o/wBJz#6'|D&d;k{`vZm_FP;ž%!ZjӭM>ԉRqzPb/*/m i{Ih0!Q_`? 9C-K'-3r^pSP(F6\C$6=8Pӕbb3B~*fnqN^ȉ2rCNhfH䅓i{kEma {/:eӞLq&L8Y'& FT:ڒ&_lƿw]@?m "Nd؃>A 2AF{I8kvB fu$jCӄht4 e'>|q#h[):پFN>gr0Hk/x1WK( X  f`8 tRQ,]s6K tIKGC\ P՞Ƴ|52kt<Ӈ`lHRt@E7[b,s/EfH@ p'yA!|ܖpzJn&P bg>f쉒@ /{v_V0ΕҀXPPyU^üxptSGjؔZec}OB!Uj`je k0Qe%U*i 8w\[0Gp^4t~R%i),+2)8yKDFpPk@ Jё)"4wgL x= yj¸9‚D'['Cg7o$Ϣ[֤jP"j@ pQ wgaI~,ᓧqMU(*I hRW@p~#x8 uQ}hGlFCq XP^dBIIP^b(: \8:?{y`@YQ yhv, SkyTkeZd/h<ۛh|hW zo2XrM_ЪdmAb^f$@mK脃Mc"pNGm+4jv Z랐.6zM@3{hA* ރ/GwM$@{ ҈j&G .8md|TnOqnG36?'e‡opȱjUi:YfcKG p^^ ~hy4]K~8mU[!)0t3\wy Ed**~{49XZ  8 {P/qi/9/Nqk/ˎ7.zn<%9LJi^ FJ<FVJީW*fW`yx**ciLO=PϾ0mr;ըaٲX@+{bC-e4{D{t؇ޝynkYq:6H_uxvˀu ,[l\yz~ ~ttE-cQ=vKO:~t}^O?CPΔf̶ȭrE4HO(:v}@!*_ƦR*ސ "0\['xj::Oy~Ti7̎JsbeψRn4|xEuyU$`Ъ_h3_햨"}1X z#[U"t5+<)KMK5[@5T jU])$蠗-^!ֱ=v릕?_za]>7@׋kBQ'Ai4$$g5+}@1! A>&8C$$*Z=Ʉn35lsF3䵉 Z~<2ZNJ)3MZ,1ۏT3?T1 ${?hĖTʘjs @nGֿ>p]FYU~ jr Kv~7,8+ܞk5,z#8HMx~AxN ]CET[zw9` q5;' |jcˁ`{oo &$~p}kg4-Ϙ}ѡC2(Rr{Ce=f!kiÝ_XN@9JϽX<WWmǁ)S`&.sk|n0-[zzU\P 7%ZF.^TZq59J@CE5))99-!6/=}LrW޿?6PFUyQ `KZ(kKeV8suϵphH yb0 mB(?gHc }KO0+gLEڱ~ 0Iis5FC`XQ͹\4FSzWnlU%) /qO4 *&m'ct-= @{~SF其׍·n g؝JWn~Ba=\HGj[f &p) qЛ}45KW3^'ɯm5 Sw̑ 裔P|LUB] dU"+AˬAFYحcC?gso%^z-VYoeZw 6dQ+3$Ӌb49:l._+IҒy(Pb/W۬B&R|DL-YiaX ^/Û(w[M{/Z0.-k69jsۋ&;=o ^V@#unE&l]=šcpXF|znLB:{ #M۠1Zf0|:d,&s]!JL^{@e `R-"hb3Ur#YDY ۼpwlIm.!E@W%Š@5.sLv;syK+zڶn0i2VêJV l+p8 vg}]8F0ޢMN Nt_ Jڭͦs=iY5yMM>Ӻ^(x KLF;6UYXUǜ0<yw(u70rr<0"~_Q, wyHڛv> stream x].Ǖyn` \C23ۖ@4EOwC<Ԩ5ϞX{,RTm4 UffD|8_ٗo~)Xt=Sy÷?Wo·/ޜC5?O|~[GΧ|>\Si]D7}M`OqBOho|Zۍ?Qe񔑦?[b{UVA1%?ڕtש`{3xH /:o ZD>OmuJgC@idE::W79.=)]Oъ&꥗"d.J%>]J}:&<K,pG/J1ozIfdMn(*ΕF9 Z{Vyniʩ\nܪjzKOa/ '^.tҟB( pImJR$)U1+N&kZBS"=z;=GD%Pl \4ZdTr 2ڇ:lKMqKx6'~>l>BI^njf_k - j.7/rL( x4C= JJ+I#uLl.KS5/ Ҝ,b=tF_y jIc3#.s#.ҳ# |ci .K87 3ow=lT߁S,WiЀg*TOiӏ:娴7M7]2uI%z^|5Ĭ+V.' f 8ǮT;LT`jjNLTh P`&Tz+vXuJT2:+;ǚ{TU)p; [ڽcn %Zh p ,_4Sn3V7[:*Rhk2_`Af3LY@M/dֱ]ګu\2eTth[䴐hKXٱ& !K+Xϧ[wΕRy7v7Y49Eag˙V% o3+hL7ПVuJV7ٺq.K<͐S֝uXv΁r(*b 5k3Pk,?uqEY]Vw?|Rρ]\j\J0/85J6/9A:]J \kJ6+a^Xe c.*LޕvJbx:@! *w/etHr61]ę)J "[z%.L ESļpym7ھhZӕ1P]սcåUE>>^պ踠;6K.&Kլ[6pvL1a]O\w5?}زw:ց"^43SZVX*h}:M2{܄)'>s`7S/mQӦ 'h7f]3M8>43aR99o%_lju0ޅEnt*XPeaV5]iX ?RlꔱCCb #?|M#>HX`̆i`Pܕj> uC{W`> |M^+ۻQ`Лռ}/U8l4}(I<i0GWYWbknyc'xSlL ˠ?Dꦁ}0 #- RF]ͱa}5 -aZp5X(*@lw |꺜3h+:ͫ.,=$[d;BM/,W[ j\6fUmĚs})$}I#^>˜<\XЖᰗ?K(ǥU[ 6ARU:d!i݄n ,bqe+9HEL%KsLo9BoVbɅe#O^(,_0gBhV7!Djxhk}(ME'/ Ի [/lS`)hqB mmk˲|As! J~#x! VC`*%%K 3՟²=JP'¡6tJ}pdMb}|F[WY1[[!6 ^J!xN@*TB1Sɰt*],x%1 ^%L`g]JU<:{Kb)ɼURKXiTЮ!M^'vK=:RoFX[(ͳֿh҈l8fnasu}.d)wxl{]-jTy]HvȑM,oqUU>ԝt&9'v\bn kآ:8a-hJF Muci.m\;6Bw_ޢyHI-]Xk3 L$u5y/l[[viL"98'эBWK`=4ƾLlzQvmy~(΋.)%S MVاfyY_T6Ax/x r1)ڟ<a$Is> į`֊8Q3[kTrqqaIVg'ii(uw+*Ѭ4+Fo+J0+J&vf{eU24)^IIҤ9TJ9֢DziR t+pΚi[ފCE -dF,J{ZYd{dYyt5Mwҝ4eR#/ƫky9aUڝ]= izӷzmRzME[tJܼT=+ؼT@)j8خJ6m^BL> [~k-EIl li^wؼT{޶4e"*V/58gKcm6/\V%m^j}`4X[pzQ%̡&K;gb&X4eӱ77#V#n)d[]+kKڌJ݌u-Mٌڱu۬JH}J~`Ľ(a5RWj9mv(i5R81+׹+i5R@RwjDL-F GߪQnijtaaMӎNN}%êN[dkVSnv~(6U[sJ<6;adSy4'%],/J/Eȶ$(iuSusSyVln MXZڱ \qKcm67MXSk[[TVnn5Mؽ 4!ln ؽsi(ysSǬtnkNM iy)f'nR6?EK,JOE bwVmϓ Z +ܹ֫ kxO12mu{V',~J!l~JfgSBR^)[I槄*u6V?$uS}V!n~V,me,JHOX4y]J{R] ,yn~g͡kMݳ)_qB^W?!HiB ص !B&o~BX꧜OOڄ)<=)uudW7egVRMB맺wꖦmnV&Dm&lYnB&ˮn*;uMsM[!:V%˒Q+ eKь'yi-MTҵ.qN%N%N^sUf'J DUiJ 2wW槂=qmW.s![TsS= i{V?!`SE&4y?bkxm~1o~AO5]2wW槊+ܳ کsS{Vl حv0ϖ&]i*,JT=i6;;^f'ǔ-Ͷڵ^ԺyV;j-EɛZhZ֕l;pBZvຒ6;!h)M YnSd\MogÚM Yڵ-Q67⦈8ey+iuSWꦈxA\斦KD\Y]\qy±+auSW=lCxŹ*iuSWںꦈ kK#o{pE)Ok&poMRֺ*e5S<օ/wevfk5SWjԭtiYrf󌛙sۄc]JX{L8.{iBt GjW3qۄJtL:kI/Jts3 ¶r^:+X?T8aV^ Κ7/9^ED4ꥳKg6n%"xu@[y KFV%o^2Ś-M^EkyKS׽xsq.4>\b"ҪJؔZ(kznps_ Vt^2Ŗn^2E\,G0.V/!la.4e0ks4yRںQ1X.C\K}8'ZH-Mk:D%ZKOkLkQ'Z8ZAkC7%{x&z& IgX?SZMk:}Zʀ8EѲ7< +s[µ[s<[dx`WMli~s[N??-% ʋ[#ŏĖàR@Rgс-yl_G2` >3[ <1[RL:?`^-ج3-8Głu!K![INȖu9-ɏ,dKüR2:-+SȖ8fT;"gdKqDݝȖt [ר#[R:%-z>R$fyM1[֠l\ Fقȃق!)lIٳȖ\Eh(>f*bmbFقYfԙ-hAXlg{b2 f 6->Ef ^/]- Æwx"d (D`O%?,f DIg "1[#,9j~-8!-؂R"[pV!Bd C1[(B -8@p3[/C#9Q+'ʦ-Wق]ə-ͅlAl;ؑ-98ȑ-Nx-XgFق`qfԙ-qjg Dg *"f>,]"f "uwA[̛]NpfˬexuRl2[fŘ- fˬ2+L-Bjˬ2+L-Bjˬ2)¶ -Bl˭8eVmb[&EؖY!eVma[fؖY!eVma[f%ؖI2+us[f%>mBNܖYeRfmYip[&mVܖIIV2)i ep[&%nVܖ[qnˤJr+mq[&%oVܖ[qnˤJmL-R779e&܂[&%l7-I eRfni[n-7; rPeRDni[&%lvIieRfAn'LJ4-"t$NnGLJ\4-"v$M2)ms[f%onrvˤ2+dJY2n`L-V7 vˬ2)J^4-"xˬM2) -RV;9ej'LB>V; zˬ2+mӠJ^4-"zˬO2)J\4-Yk2-"|ˬ2+y[&EY!eV'ǷJIeVVL-W;9eR\f! ."ˬNp1\f%ˀL .7;9Vaߵ@\(& w2 n2eq+NqyRMDqSaDqA$¸``]m!epby1.89+D ).dQ\,NqyP\pg̝ÿ|I7;%rJ̢22؎#?$ f{Y ).o Ni qwL"K~:!. !K&Jq _NqA!d͸؜[ b/V#~vBQA2!%ca.'y^ѴBdjдBdD.#4.&a#m@ϊJ ⒱|R%ЄlMq3BKNz!.Q"mjd#Hj%#&4 Q\CˍuK*[ץRv9GHl qɉdMüb[ˉVA\2x ,vA\&H .˝ .4d, BKD0B\ d&R !.l-qɶ+EfPW\B\d\A\2-1\rt3\}&VA\2טKB\ X i6$ 3IKƢ9M+Kzbd,"KFҵd{h3%^ < .U`* l*p1КqC|2\^_ paS!iY\2'p15-+K1ob%gp13Ԋb:[2г[`][KNi%c+-|Kƾ*ķdѯķd4ߒqJ 8%ky[2Ajo3X[b9~}OoHohY[McѲ][2N3%c%_Y_u$D ߒ*ķd@%#ւ-"OCӳbd!d#!vK)d%c|B% -!EI=i.`d`&n1> 얌a"*vK'ݒqړw!%#f/-F|޸[ x٪=z:䖌St-Il"Dncž%۩@6S ǎM䖌*-]Q#+pKƬ$Kь ܒq-bLBnKƴ-S+-]Ib:b_@ÊےfBܖP8d 2m'<-Ml'm1 5q[2Xr[24-VVG ےj&Bؖ8{5a[QH ے1g&lKa{UؖքmPے6Bؖ-l#m e%#,TEfD%cIݚ-!-q-AmɈe&jK>N֊ڒxDϊڒ pRjf֩-Xb3Ԗu>ڂ%0fB* z֡-̫ڂ:G̖VqѰl1[Fn͙-]b-[cq43[x1[/N: 43[8iblA@{Bd ud ٯ9+#[z֑-'ӑ-oud ִL8ޕ-Xg؂g؂l&Ra b+H-T1*\ WKs\K[Z`?zUk7pNu\KuXmDk;ˈւzi-"ukoZN[" X }:R+iւ!ZY0~ ib-6/#V M>X-8We`bS̨X-:b`y2``mdi%sIV-Mj)?V zՋVpd"V S " TKj)fQ[bjTujȥ:Bs)TM:B`mjڔvZlm*S!֦,jɅAjAj ~I-6UbkS I-Z1H-6bS|@Z `QZPuZ8UyZ%\ dԂ=[b> j)>@-5&A-XibkS|:ZuZ2s)PKPM:s)P &n8-v֦,iBM Ĵ`I:Ŗ"bZlm֦,i̘uL ֦h[Ǵڔ1- kS)-7;(-|2ҒN扈[5hځh)>-.< тu)6Bز(D-Kلh:Meh눖XuDK9#V"`YJ%.B-KDh)fS-hADnb S -;-2ըЂ)nh=[uh)>-XggD|[eg)>,Xb|IXg)+/YB՗o > ֦عؑ<|[b6g)>,CfC!>-03N|<M|<%"<ˀ2z,|bHĐ[;L@ N.y[@sd@z@sd)>b,IYtKj^scId`,/,bX,8C0bAtKwKf(b!Fbxb ܅Gł4d`X,1'ӈł* XF,fuX0ubH\ 1zX0ƏJC F׬g< ggxҐłi Y,cبł`2֖X0GbXuf,XppU( yQ,' o qł(>Xy3kB1|b(c>P,U{$ 󠸣XlϒzXp&_e,}Q,R4P,XQ-G#sbB ?$ +F8+0*XGNb1\!"n?$DĂ@\箐ă(&"`K$X,| A,NT*5b LY{b#C@,ow BuXbw x.XOu:B%xL r5 ;KXgŢ楐Âc6ax`QZ\X.;|sX-8,X)bnbAĂ |ĂD UpEVxL>p iD8L X+WA,Yb$@,eaXl6w Htq҃ÂmNdaA{`B ڙ X;Âs<qBLjr2氠F *f)`O弼or@ 퇛 z‚[|81XNAP`(Y (WSD_qS +?8QIEE "b .W0BT糝SlJ ΅#''XT.qy݋;_k`n /٘;;E!%!fQ`+v.ŎbW%!_Iͧ`؇*mEK`I-,+ aQIn<@)D$hKl `I:gKW`vKG<"X/ %!CK#0ͣq`I8a,)S'KB`I6  Y, 2XVb;Dv`I`IR , &%a-H9M (2Dؑlw`I3ꉝ L, sjւ,v(&%7`ftC1XRZsKBE^d$ӈBKœZ2XlOPbXR=>h)İ9QX6_jQX6O’J#b;"5RXp%!dg1nӒ’V'QX cX 7İ$8p Ò0[o3%a( CD1Wq ¡8,vP39;͝E]N!a8,]94%EXR N%!*uv[ KB=M*RBK*cr+K*FbSĒ̻H, C$Tb9Ēp%%aAxX8G!$$"ޚ$jja( }9, aI˒bg/Yl'aB'֓CaIXH3%a;/0,vh\bXγ#BKyb- 5sX9TqX80Ţ(XCdWFd{)rX,>/8, &MrX[8,hE [mčÂ#J8,8$I"K-8,iĂ]X# 6YvG*b } <ĂމXybE arX 7c 4VĂS$4h|Nb]YHb8pĒt|Ò|cXlۙaXR%E  '9c8%ڜÒ҇X08kłSp_H,ɿI,xCĂ $DIDb p4$)Nbp>EHq-ĂH,9I,;(DVusK؃bAXh1  ˢł4B B "Fp-DGłbA,JP " $A Y,rD,v"Mvv|X+~ WF""GXpYX03BKڈ,H>Kb 0nƂdh0.bph<(x)feXnXp\LnP,D(W(et"k@ <(i(Yb1bP,X D(kF\(;O$,0l" >t'`MoXFҀX|&M8fK(bH[P,5]P,A{b$ĂuXZ bIr n8K>BXH,X(* xQ,X^ 1R*G'S | |p +d FRXF.P,B¬łU39bKµod.8ea`NX bO%p;Ʊ!p,A=l! ±X,ı`iReA ]cz&X4CH!kʷh,$G4D e#K Y,XA/":%\ڛ,y7%\0SXzcwbjx-b`^8Xs|,,"łHeRbZ4c=ςƂ5ma,xt߀B0C17p'9Hz刎Q6 c9Zc9D0a,W Ƃ-h,3WE4l#R43 Ƃ툼XD ,\uA`(t ^%8uX(аbzrQ,O((Q, :P,E@@b)ٹ4b)IG`#rKxwXx:X,,RxX6oK94,({+7%;hXbToK.\ѾQ,9JŒTtXFd-(ڹQ,9uŒ$ dY7%[>H,YݛĒ7%;pnX! K&4YnwM7Ģ RjK "0&K; 7C7%7%$uXJwX@,%饹A,_ox7EЉ">aQpao9o`{â* ٚ9,R&KrsXÒ)rXf2WˢrXaWWˢrX&"2 )IyHaS|尼rXrX^9,Wft$}8gE hR0˥\sXgrpX9,Y.9,E.?R>8,5?OvHarɍaXtj`XR%7/_ǰT6N/_aXA#9(,^g;%SXln K蠰dʁa:pcXکRv Q`Q\qX&)rXt͌:̺8,X`EÕKaXA\ .1," \, F'8A,F A"p TU4 T+ BCi$Wp:WÂX, 9,X*bÂHAWaiM'Rp֝E,KA"Rp kR~rXb);]Mb'I! A,n̦@,GxuR\<m$KAmDKCH$P-ڈ$#H,ob\?m$Kl4Z%`/XKKsXgA,1*^X F*^X#R# A,%jDʈAK> `$R|Ra)8QHRe)\ 9,CScFa)F0Iv>19,71,% ǰtPhlva))ha)ϢXh]*brXJdvKIJ%K *%aZɧa)8ːRR\g K6ʜng}{sX 6Uw Ua)hX8yֈRpNsJS-:<8,˼1,%0,]9W2XJq`)vet2RpP7AX &\ Rp9SX4aXJj>]D⭉a)80,a)R< ګ La)gy.C Kyo#%0,<$ @‘R(RL#RR" yE$kLb) ]ăH,9m끉a a)sR! [g<'#}M{Eyle@X0^e:%v!,~Ni@X&<70r#:‚ad> ,X!Q ‚yRUBXБȎCXؐ@Grjp3,8$ , ;3b$lVd1[0Ŏ,]ifWbYD1D`>2cSuCطF~'%( :ץ$v\Ǿ_C]j$&KA"Vyb}, a<)ԑ,ޔ{`(%1bXk,+7m{ԣ3X,, 3ub {,h g,J[7`²(ipˬ²(iqˬ²(i/esQXfEϝ²(W9eQsǰ 1,#eVaYFc8eRaY+2+(EɛbX%bb;eQf'Xe?wˬŲ(aX,sˢl{bg_J,YXe?DZ,ʶ@IeQf(QYfܾϱ,;eVeYmݱ,B,ˢP²J˲([,6?2 ,ʺ\EقG˲(6L,6;9eRf-xtYn,6;9eRm}Yf%ovr2ˤ\6 Y&AhYɫ2)B::,RϽf,779V2+F`JlYkP`L,679eRf,gIpYg,[Y&ApYI2)J\2eR7:,R֠gy,`g-ntY&FeV%JܼpIeVpIeVj%J[pYY/gy,">ˬKgZf%n^r@˭8eVQZfm^r@ˬKhZf%b2)aY)kJܼ䄖IIk J~ڄyIYHZNhy -"Bˬ5XlZ&EY r+Nhy -HˬKiAZf%m^rHˤ2+qCZ&EY vYQZf(-7#2 ̊QZfa %e5mFrLˤ2+q3cZ&m2+y5JX40-V# LˬH2)´JF8eRibZfFY eRұ:eYH2 eYeVjZfYɫV2+JA-6/9eRDjyI-"Rˬ2+eZf%l^rTˬKjIIeV&ԭjзR6;9eVf'gLX-eV`J䬖IeVڧ Xˤ2+qZ&EYL`-6;9eVZka-BX,ML`-sZˤ2+qZ&EY 2+Mg]q`ˬNl[f%m2)"JĖIeVVNl[f%lvrbˬ4- eDlɉ-"b$GWV2)qs ܊[&%lnĖIieRmLJjo@[n7eRAmiP[nũ-b?-J4-ԖIvnj˭2 iS[n-Ԗ[qjˤOr+mi`[&e~-6? l˭8eRֹ l˭8eRvxp[nŹ-mܖ[qnˤO2)՛2)i2n%2n?-Cinmmr[n%m~-C[v&rPV槛2Ani&HBnq%o~-R^-Cy%+e(Ey% 2+ Ey%L+E+e^-R^-JnF?9 Zlc|ةL""&hgүDU`ዞ<_ٗ7?E,%o?%y]l$.(|汿o?7]$@KC˭ 5aBʋ)򓞛ayy}9Jf)@%±޽݉$̉2\Xa N$aNT,\DNA7+ %+#eީrp-Hu+w`_ݓTrnNyp?[W敏 ^^r_c_> 0K~Dd_7٧K|z6Gڏ!տh}'p@N'(ECL,O#޹K%O%2ooF,S*̨hoPK}<Y|xq6fog߽AK{o=#sgl@Re>K_Kd?FG_$, c].gӥ̗Ɩq/֗/]|icRKc$^=N׸NMg~ކh_g/ƚ~w~JŦ2y~_buoc8h*`[aWo?c8r{|K/~[#~Ʃg ~&ߵws_9e^Q6VO1R_wJ9>3?^s`.x voϟ0_+^$ղ Na|-?8?OӐYcij7WnN+g0o?7X/y~ٻ>?~_o~ϟmo,I]?eWpǯݸR o}O~d} Nv51s7W7H<1ǟ/ފe|w}|_5o_cyc߾lt=_*__;/>vI,mds×:Qbw}z׉"{rkow5YGZ=vwW5Mv}OW/=ݟo?}<}^D՟Jcy]FboLdqdx߿SK]߼\><Ͽ(x-*DO_Ǟ_v>bS>ŭxHr?R!ǘaZ75=w9O,~;otcj<>yJr _bk}._?_-y|no}@b |L?/Up?!?zkG>/6rѢD˓.o-9~7fMRlݾ݋]9hz4[g}؃#goҠ"G :Bs{il>hsX0x# KKZ;pyC.G;_1A;IOu)d{#MLqZSK"#1Yd!^||N R/z='zJ271sx^/8#wҼm^H,μfҴ;aoy] 0ۑ{;ĥ_=&P9bbwX>_ A=CCո=T u?^IV45^REߛϦɴ_-G6復f.r Kx K?@]D{ 'mhz_9l#zJ缚Ru9{7MX?MCWH,LMo~<`~8\n/fgdb)S}wݷCqވjhjLl_KBuW0R U=a?j,ęiMaKx +fS rOt>)nx) % p\e2^둖<e@3s4j`A.~"}0DHiI2/n~g3D[&mulچ=O)8̬2bX uZ? 8ڐZe1d)[ ":wmj{ ?Ш,w|P[7R0UzL7v}G?[ܻݲ_ԛ"|_,ϓw^-XbKQ#ч߽t_~?+ǁ?ut5>%ܜ!t׀ΗG^h뇉V>]8rTNyE<~r 42]{7d^ɲӿ_w׊SJ˴o{5\gzW||gH6>F l[8\_RߛKssk~g_6:hUN=`i}bY ;tc #To^o U |μ c恀:biZNS3;Pw8B{VjK4 MrwNa}%s{WW|, a_h[?S^tsn#/i]Y>ke\_Ae۳`endstream endobj 145 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 3837 >> stream xWyTvEQ[DtA " "ɾ) AD0vQ[ IA@⨼%H^3gLߺU_}_U5Mh6HMIOJ4ً88Bg>Aψ#M(d"A&FGG 6 Ca JBӓfn=v8Sr\]ƻ[-̷+ߔL+[MXLZ"**13(9g2ʬ윀+'$- OMK^0ixW7 &Me8֞0ʃ FS*h*KRlj5|(Gj.K9QJʙ\(j<@RA;5 BPj2eJMRөA`ʌQC(JS45SO7IeDMVQ(oeRId*>0Lsnf˸\2w| _?V+Gg2>f|ٸ~LFD5ijel@!.^(3R3*y͢K-fR/e>xN`Q1?M6 x5 &H*T$F>N+2khnobA_9Yo /.'ZE+#V.sBq=U$GZl` A6a{ u<܁yKNiP!y۹Yc_XCЄAQ-Cog&NV!-QjV ES3b}Rm6Fz;?.{}sbqƸ3Ua!^c kn<8u@77,7n@a.368^ȮG< ;aB"$87+U˻S`< tkd wZ: ! x_MwjZIP8B00 @C,0Oq(fG+ǵpdox[5Cɚn'3w5(WSg8l:L2l T5PCJ` (~.jXd'hS#ޣ?s+m9-c%xYkN*nK!Dwؔ$B:5r2 R)+8Y]?Ħ0@?]+6<.XT\9]|fv<V "Mѓh_Vl#Q"onYmS﹠3$jpnՊ:ku*'\N=*-s$<3 aV ;[7ܡbd\ʭEnގsz/6OCo_I V{T>KCPHG齰E/fz=4<" NFY8@|ž;3.M*6l݌6!>'pWb;tEr<{z|S͈ziib-5l:53?Pk][5/cNB%xl ZqUVNz">(L%],7r8&/|i Gb3dX{p.b^` `]$47Qe+$EQ5>d2[(i&xO+|, OV)lqcHʡZ-35a1NtLn{!w ,(GL.X_, ; 7A'ѩG>q/CN:/!_7}TjE𩙄.؈ի aFG1-i/❣aj™{LQ#HĴ(N  &hD`!I߁fTI16*DIi쉕/0 ?V-WV=*1Q"EL!oIGCO5\# 'FZV?X2'4h0U&Zke/5%,pW*۵m;,\WH*w‚ W>UpY OZ얐~_|*?XvCFNi£)*hi!묚;L !\k28YĄ@K,sx[XEYBe-\q*y7^Xq@ d8;DTX6gF`H? ^I0'ظH~,OZ cfϙ5WI_&i?Y_TFXIfA+$Zq*|٤W@w;֬ʧpu[ˮvLHK8]G,BHqYbgq_%EL|4Vb#6q")pfe.|^߳z?fJ\@.UDaEsfCx8`^MV/ ?x/0aX0]~G^i$PJOK2dj<"e/jR<$Aߟ}Ҏ~} DwKg @QRJavGŗ;z_'r7+bq\p9M#v3i2]~]G^{ ]]+~\A=)\P>J&+ |V9B^waДFlmyb-r [xuG.]=vg$Rd_9t<8?>р/Ϭ 71N|&,Ǿ6Ae$~l~(Tq`5_Y _֒Kgi1?fm~{rzG%Q-T\u]JM7*0ր)䅔)A}6GXzʬdw0 ,ƿ2eE!WGWR{ Ua\xLY9v&.Z% kI$uc ϝ?c |Hs7b{x˄5w:y'4ѥ]PFRAlT zJ/^H4)䚐3K{[l-acp@- A^,haBPsr q{O5uަ}>F}} x~yJ1cw%AK`'<!<a8-6on`3"pGV^ &KP$ bំDӄ`G A9jM[vܰE6wrhTi^8P*?N7+;:W3* 0b9GoWsc&&{LL)[endstream endobj 146 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 250 >> stream xcd`ab`dd v 144qH3aUAVWw>_c7s7B``bdT6r-(-I-ROI-S,H-/$敤````Tg`igOIJL~X}Gr!?dmnl,i*(iooojln樜0[m~_9[IN3]x9r:q).k/4{N͘\xendstream endobj 147 0 obj << /Filter /FlateDecode /Length 41288 >> stream x͎.$ϧS #f'Ji0͝[NAFp9d i_?\k6G*|֫ԯ|ܟ^fmd*bWmJ6/_{1wO*Y=Uz{q*/1~][VW[dQ_iir]ݫ<{ &[AwYv^^q/ru玫]W9[sx^{;rp~W}nٽj[7޽d9ܵv\9^c=q/e9굮I+z6|վuēR׬+[muAYh<,u2UyܢeX7YKZeV-Sֵh^Oy-f)_uxai_AeU_g w{j܏I\?0[%m_¥)ϺƏo0_eZz*\jYփ?u=\Xdc<{ymu<׍ ȣ\uMk⒮[FUZxu. nZ^~uų;YuO_/`~g;\泊ϺnuWoU+ݨϺJկb_치ɻQuyQXw||Q8V~YCPgyeuz nTg=!5]Wn 5s~W޸:뙱Fj ^L}7ݚFC+ Y5gȮ4Xò/{;,e[IeMXOK0(wf=Uv~YSU'->G2. ,ϗAlzwz)+^.Ketc0F=m=/{s7Yz-z>˲e1:<ސn=#^݌3T>˰;˴*6]1+ތ'rY=A׃8]rq/z1ok,ޙ$*W~^e!.<sexxO/Vt˲g kxn\OY\eY2vf#^}oY7j=Ty%=Tlv snj%ם7K<| r_[w[ww=ؠjIٲ4NePWo7-ӣ=ueCUYϜ sqSy=s/\]NnY=k^)jixm+_yq^N-z¼_Xj'o&:;XmVI44ud` [WtYno=G,ތ\ova0 Zk°\ `Q۫\mZ-KFL8Ec<ƼFYчZ':zu_g\OXu]-Wj4*μ뉝EfYw֥@ReY,Ƽ^3Yǟnw 1z.k3i0bpb˲ -sQT/-^N w `~!,-ݾv˰pL+q^,G}V=b K+/ L_ N;?euv~m>;b&$1!@=C<_gx`h@RW6Z 50i(>uuu\Hny01{C03q!z̊*U?l\eDfd~ ںDzl@xC_=u:9Ɔ;L^z+eiVݍMq|c^!z輡êa&z>bcekfi6K< 0#r]_isqfynΒ߮G`uuBw uuu6Kƚf*~1WBա\gr<ㆮ#^]]E6idAΤ.nu؅6K"y8K^x=},:-̦ɫ5~ӱVR8E<?JYN p+{l7\Upl<(mx6Q^,)FCkN-z1srv٘L#)`&c-w'E_SlܴPǾ$rc?+52^][=v[ 6qcasVAaW=Wc} [5p~(EeeR,3u~l b#~,=x˿uW-L ff𻎕4ߋk߲1F GV oZؑa,cM}<{ ͯ-6˴!Xqٽ pFwPŗd_;^<4,4Z8nnZ)_8x -K_MyC=2 ҭCaMM 'v ڗ7mڷ=^6:3M*zwx=wE{?cuua>o&-ܷxx\o/nfv\ꫧ~AӊN_]- EWWgcCgre!lekէ<~T!C}@Ptyѭ{pl_y}lЗW_^ Wc_ܧ]1K>wtw6U9 wy96=0}9/__F"kпƠx5O :ᖽW_(Qu~|>p@++[W a _sr}X N=h"jRG78=o>>wVccDWYB:1WGvdBL(f`7W8+CUr]׹akCZM떾4-j=R\:}zGWgqs-{yk\ ɱ!Kvp]֎V 5Zco9ϔ:;, ^'?$c 3;xldFۍl}zr#,v1uv+Ys{r5y7/^/Fgr˾)N MqYC;7ťWXv/\,[6Yv/yɲ{ݦ1>.Ϋ=W_e^Ӱk=܄'y{D˾ /HX+-wu帓rI^nSr)^neK\t]܇fY6|YlfkM@ll'ا7 TchbPE|3#`afL~d:*يol5xWY=E5XO ih )5K0،4HKq=#tRdL#j!RvqMGίxe./Ѱ}q3BYGZ~y"JCg+if>mZkV q &=dYGZp+>J˛ex#j"!` 1Dmo5V(L{Fܾy`l"(2?ư(6hZ*s ްI,Xͪ/V z"M o8 2,#-V3B)~YlX:* 6 MM2moVI0_}}Ŭ)ċSpo{|6F` CT ~ˈym|YAYE黧qA_S2BYGZz=SGB"՛ Yd;ۆS6l1S d$=1>J[axtsFg&(QJF2bo.?H۶'d6,b5p,3M, CxQHfa\F9 2<j?ڡm~wL-!OVyWܾ9y"(2,[3`V%? j&xߏoތ_eg[v7N1|=H_&=seYGZ2ZCTAY斳k4Unۑm!Hsu+!,#-Sps e0jg;Ը&ii \\-!,e.eLgB"_ ɛn78x2kc 4K@Y\0h#*^E?L)0 ]Ykx>36->lKlFPE|{?36m%XG}1Xqckٶ>o.;7w'͑G"j(cDYƆt༷p9T"8p7p2XO 1",bC:5o"km=* QVHHVYLL(Ӊtɪ[ z;M3! *لLlQI6 鐇p E4ؐJƊ}CS `z8CUgN5p%-B!< G8ƈrkH8{o(7#VFzAB"s|c';]otv_e_9_>oyWLkνyeG7ԠD7;X pbw26Yv/26/YvC7z~]/ls>ʶ{ĕ弲@Xn9;XnّlmXخ,]"%^{l^w~ܲ{9yɲy)b,7{PVBt-b(HH:" 83@BVl*AgUY&0HFY胴Ԃ(N$=rdYF a UV%UC}6@@|n ˴ǡ-~= O&PH2|}rts[L'"M8RVViqH+O&PH2|FN#<Gܣ?, E{tI(GzJ=TӉ{jg;ܣ?bYh^M""=w"[IOȣc,ADtB *ل ?(jH qG(GzJ:`;# dzjrcݓ;N#=lztlḾZ+r$ 4"BYF6`Z!Ң2N#=TKE8q}ͯ:F:Ϗ:,tA`##(mE$qqk̈JCTrJ?Ҿ55^^렄 |ai/#>XG6+GюX aylHCǑ |aОtXx2N#=TKE8qkl6 (pFqخAX##(2۸όP;AltZ*َ/Xε6#؀5"^F>) |(Gzފp>:A!|X|K(Gx8"28LYFD<e0Gzpc>'O2˧; P: f tz)t UT ;> Ӏx!{aKwtB@҃FFPJHd} Vh[L'^G؛p ,,C+%'N$=oDHâ5A GюY 9,my!`=P:pp#DE$.F]B^­MUQyG+ d9',8[|&!BAPI(HC|g:QPl7qzgq bd%qBA҃ GFPe$ LKRe0HHzpBCl3BclVB!VS(XHz2 YFqPtb!z+َ?B\N{={,1&r#Y/n9ps^l%߅j,Pe{Wr\ hW׬~͊QmD0,G vr\YG"+KHDzr\9G"+KHDzɲy96m؄lyJH4ܰpp!("6l~Z7#.TG8[q1)I; $K"*On؄~wAd ]dy@c?TLoDxp6'Xb{"J E40 P%x 7"Fբ܄F@DYƆQ@}0 (_=F Rvk@ aP 7"<J @"2T7"<l T@:ͻsYDБ&ހp""(]*,8n%XrC)j&>;Da::y,㣶 7"<J[Bܖ2:P*G+N^̱8t(wؒJ ",c(.lǜ P\$2"j!Rv;@L1l* )Om~vAZ KdR~3a)ـ(Ԙ۰ !͞؄} 8{? FtK*7d"<wP Az|D>H/2!Gюhwd %0k <;xH݂-{%p h ) P =6h'q ;{$p%C$ H\gT MzLoDx p6%.{U~;"ÞKxz%Q4p pYGe0Z*َ(6\%s&.Iݲapp!"(26\7YdRLoDxpq!B..`]&?}t,`rkBr_Poqk=ljY+O/V]XNqcIпc{DVLc +wXl ]^{{8pkK46]Yvw7%UnWXv/,C22>PH|HH^p\G[r\!cI^۷S $/.-[L/hr #yr\r\]"vMd9帼nٽ/w5^sl%ˡe^x 1@43]yʗʤڌC^ ؁=i.6ph˴gv/sY;4.k]C /6%FEfp5,VPB ҏ*k4lZfQd2a~H6l訓FX^`6L.UZLK,\Zz[jhYdr.ػS G` oĩrJ͛4PLy?FC m$la TK|OWb `N3 +g11aP=g\8-QLx˜eBUtBFSso1YT/ֶCFrZol%lLKx ,_ܜuYH6i lJfyym@_azl4ÞYPQ0צ7`$, 3=63=xYK~Eimz %>z;9p"̟8h0xMf1^P{tI<[}& C7}-Hll=,  ލDT2];{q{;kԕƆnxTytAU98@x%bubjP{c }Ml+|oJ]98⣡;zg"ex/w!v"XK,7;nb8¹ ݲH-ZG&V+\\5ƻ;y̗͋_ -LU ~eU観dwY>5 pp!I" U.:qk8mIy /KQ 'Vs+8IGm WJzM6ux+^kβT[\h5Y=ɉp,ٞzqSl"^ԄϔMBj¦0}?޿-KuZp'XFRɭM bj$65`)+V <||cQ4 JVP,1~KĄ滍Q]aYn]UR-*jsiT( nl)m@ܶIN;-%f15aS)4Ulޅei%'3ҭK{.0 Ln/^key98&̴^$#/E$W6dyU`Ir7X3l_:o'FLME{ᢐ/`'-/v coᲸC0\1+dlM拺,ddwzV 3$u-;{dbu;60eQJ/sIJu/S]c;&/O۔o3gf;&OY젯vŤM_ׄх淉1yMIx`.jz.]wP)(<]_z %"(f-\?d[p6.K5ZM1=~ix\P]Q7^]wfF 0KV[T44 眜QǾP꽨 _@ejYK seu 0e{e&q.4DŽ{梞eIpYlzd 0(|MHg""ul wm\Ա`Ke΁Dܤ^{ӷ Zu̩9UiE{&~Y&kNls`%]Ф녒KFj zscR9O]IҼšg]or=EL9BNa-WouqX^/f'2TTooV}ܤ` 0,!eeq:,/ov|ew@3ezu'ٶ,0>.Ը%,}If5ދѭ*[к,lkE`2u~/ Y|,vt 6U2]2LdزKĦwZ8+ þQnZ,#?\Rn?_K#7n,w-.:E3tŋBWg|A[)1lJxYCX8zރvRmXO.AՋiahGV37 Mh0 fț{2q֭lFaX8bk__F1=\ Nv =M‘ bTOZ ?x`9ɝu_]!q_}/_AC([ 5&Qb>]6,4j0%`Ds`hqƪ6jvsfںB\7rD=*'KiBmޣMW*dy٨TXMts>< Xe}V?gzWa]S'rC"o6zr^7zUw1 eD}Ԁע_(;|~u]9]z%φkp9cYK_vW>:T e|PwRJUwg跩|RoSCt/ yY/)yh\6K))i%ebΗrqڦj.('(:=ɣN\Z?v'WP˿8k48MԾةuʥdK Ԋ`l71ןnxUqs2>WPTŇ }>z+.w"_.@0\Wy'5ϬˏKG5&>ĵt2iԂPx2:U!m+3 {:EM})*)a7**_P =\:5[^myꖥ  \' J:\; z=kڹ Qmݱ2) ˯vr` 6mw \i6K~x``'Z{T-pRW^j_B ҨBo/:A6a)>0z`ti[oDe1Iݻj뮾0װAϗjSu 4!5A#=?0閛 BĐ -6m&B}sS9& rWs1)K߫f+DACyRtFt^aDȟdže<};6.O°fO%6ӡB~[=5jT_ wgg Ka,A;Ϳ`?QV$B,ӸKx=Hdp?:Ӗe dF oqANzIpgđaV ~yD~%6kJBS2OmiJWhJצ)^)^dl~KAYvAm~K/Y6^aٽ|[:atu%JW2nJud^ԕ&]ɼn+^ԕ Jƶbշm-жշm-,,ӕ/ЕL e! ΂5d# Yԙf1 #(-ǴP2#P72ːdB"MöfZTL u! bM v M~sA~7}>@i>Qt%rPl7 3u%BUH[7u.sh抸ȣJK1-TT2+jg;H+B~y,u%uuoY Eȴ4BUȈઑQFJF-Cd,p+B/H.6j}a(t%c S*߷JK1-TT2+jg;8h|Y i󐖪z 4 Nx;DZi(dFhd!UɬTGƵA(JPwv^D|9L2|L %!3%#iJf:* oCsӔA댷vyӔjǮ8(ӡ)WT)FԔT%CVi؂b4%BEU?tr M5pQcy4Ai=Y4%P-lo;aqIH|_tv$ڦ:id`*eCT2, \32UɨEf`IfP<[$$$B5H8HzK~0tyK5Aii=jjY$P-l7 ' Puk BOҐ%3]Ҳ+!ǴT'Ug )$@Z\snj+RMJMw[@i"(킏i =]Y4%P%h߶iPhJYKo6 m(s:"DU@:"$,pG8[naɴPϾ(Y0f%m@yѼWW&%dVW =:sdZ( i[mfF~L~s}#gX-|L U!37F:AuTHY w-đZe%KᔲJWJ&+)8cet8ce^aٽglNn}؜dCiɼkҒym%-7%JZ2nJKՕd^ݔ MZRvue9er}lWNo[v/>6/Yv/>6/Y~'-)!%ʐ!)"%e~LԄrM9| E:wUZFHHKҐB!$0EjK H@i?!3iGF.-U?|6ARZ2-#<Ғ/w> 2?#IKf *لHiɴ82ҒAȆΤdBaO,@ҒYy Ғi0@ G HiIA`##(-ǴP2#P82ːdB"8LC upHG%BYR~vPC#)ǴP2S72KX2˗~4@=RX2- H$RWRE:JK1-TT"$+ug+c B:IlZQtJE!#FF!+jg;#e%BQHA(-҃FFPZi(dFhd!YɬBR2  H!)))"=md]1 "$)*G+v#%ep%(R.B.҃FDP: >ZCg##$BNt`"$\ ZQ$rPlŁ}dZ()!u"=ld%JAfJEfZCTbaq)H.bB.Ñt=R"Zg;#$B)H!th"$] tB%Ȍ@(BRY9 p?RK2-Ԃ H&)#=k+{L 3" If-Z*َIE0 o8!Ӯe Ff!)pIɨkJ80L %!>H2RDJ H"Ai >%#Zjg; $E%BMHҌB0҃GFPZi!\Fhd!Yɬ6I?TK.uh.WWyuS]2%󺥺dyEd9usZZY`vܲ{ H/Yv/%%ce?%B߀\<2 p!<|JK1-TT2/jg;N"%b%P?R%]ώ=(~L !3?##%|9z p6D(B]2-Ԇ #BuABmȌ@,CY yp)B]2-Ԇ #Guɘ%i?Q% k?p.jC@ڑ!d DDPZʏi6dF K!uɬY%P-lljQdX()p#r+ҒaqeʑQ@hKF *لmɴP20 !%3um >zS ,U,Z?P kB0d CDt(>5#PZg;Nx"T%BMȀ\32P x!<~JK1-ԄԌ2*jg;B"d@AhJ?(B]1 TT")pM8PL pHa!(B88cZ(\.REd!TG8[q'A0jd CDPZZidDp(#$Z*َ=ɴMAI ii=J3" If-Z*َ=ɴP 2Wvr.@i=!3E&U?|6D&BN2- L"5AqBAȌ@,CY ypD&RT2, )T5#[J,#!|L E!#kFF*ZHU2j鲒?q+$XU#8]ɀá|L U!#FF+[vDJt!p BW2pAcZ EHY2+!QVD(Kʐ pd,Bx8cZ((eH^2kQK'4i8d.AK׀fo Au&+kKa;^ZH-rCc@j"?ژ$HFc Sb{ xa!?;@c_7@C$,€ma!:d9 !EtTUᐅP}#zP4l BR7ئJj#1jk߇>-hA]iCj  FD7 yZQ7dU0HX0CKW`8 ͂C&R|A>R"qL4k#U61nm֌5JthEA!t5ؽ2%~3z#ƽ#fRTqrK@#Ckӄh=,~kmca^ еmE4[TM eŦ\f>q zs,M!TX@0;|DlpI8{MwAӺyؙB+4 bk>M&M@*D[4gI+TG= 4ĪK@gRlR"˖>MK!ks߼Ku!SaοSnjpM56O6|*6b/\x.FK^*WcQ\AJ0ل؟UHhOkV[$`ÛEMg~^O^5F'g ƊD]bKu K#8\Ov۞,٠h]?9i0l6=ˁu q{p :|P9έU8ãô1UZ=M9ʞ!G]y 3\]8RlIJ\ҷY z3fY|r]/"`7a9cEkM?*+,9Am<\u4_5Btk>ʝEj E LØ Wo|_o= rʊ]6Anc^!k,l:w؍.S!j[ Tm[cj,Kud>W(ZOPYL|4zT?`: (ZW;Ua_aʩMQjGbeZM1} h2mgm3Kg}u)?2Nq쫩ʼn:䬶 \lYm!-[09` mM譜iV-ϧX麎roCUH[X0+j/_Vr+N:lT7o&:W\zg'./~v"~زHg`y LU[6 ܺw} 5jrK◑QlU7|ttu/Wy֠sӖ߇^>9ن\VgrhG$lzw('11K0 5\@ Ku52f wp5[$էbRƪ%ՠbd::u ZFb-%f#*@ ZVFo ju.ds2Ȕ!S]g$p(A ҩ+]H5p=iY#(Xȣ<)ܳY3Y)ͭ_&ŧ]\>zp~)-NJE|t ?jU4NoEj.O\ nx}}@*܎aj lh?>̚ejɔ4v`Xv 1,3pOgw|1&nkn~7Q2Z`7 Ո0z#`t-WCm.]2l_>puOb%sYHю9>݆ \MoRNX,WK) Ӟ  Xk3%)`ұHz&aU] lf_`t5K02=.G3,sf ="杼SP 4 AȖT-]XӚ oSV;4ھ{gl0 &6*fZ04D.V,[&}ƚ5 mot 6g&e@M;xdžZ?'gv@${ngK"imlډKDDDIWIF$Mi ,;YoJۼdټ)-²{K?$&nIM$y$:H%It^9DK H-mr;6K̷mL˷m^wY2; MN<%=Z#˯"x:)̊*"(UJ+bqksXXH-I͚+mQi.h3I:GwՐ? HBZf$[$3#AdNs@"Mv]i$sZH AڌBZHQߛ}Dn%9,NIٜ9k UP&]膺QI9,NɌhԷ:c\Ǵˌt:)#S6GAjg;qfoHb̶r֜u~Z>չbz^VRi!)s'is Z笁{D=&ج+hL旟k3p7,g9,N9b稅I;62g;ay sS1Gj29j!#hu6Ndaq*RБ$ psx*/21+5G AuZV[֫9,N (h圩#I ur~pMDQ9ԕ Q'Z8+ urN mI*g[} n.GPZ4i!sF Qs!*笅GN, Z9>YAOG'G2>9j!Rv@'ɏ/rd'}#A,'+鑣'GP&@'ɏ/rd-'}×#A'?Ne}rB"8ON ُc=ّc>cM?<|?"(-r8qDpz(#P-G;NT Z?VAYkkA`P I#)Q(GwsM'{DZȱVɱ/+B8 >F"DPٳ'aqcËY+I|yh_"aqQF&G-Zz8рMNBȒc?<|?"(-"㴐8"8rdQP-l $YrXX Nu`Jeٗ={bnA&9w䨟$h MrZrK΂2"DZLiw,rDY]5dM81$Jk]4ZOdG(9,Ns9䨅o{؋=IrcsQtx pV9,$7BZ_W%9,m8v䨄tr,c ,/ێ,=,/L&$֛vk} _<ާdz;rb0wy/sWx"7 szyd#3qa:2/?[(K64i:w&D#7/(/ @VL^ \v#w\v&Z"Mq#%z:㭁hܔN$a,B,ͩ]Lч*e9N,Q*瑬X<O0 BG鹬'T(~M y-fe TlU^N7+14Xf^ja}:̀M:dDdgl\a$nݮ@}W! $D1x۴WސG*k eB>\q6!DV!FYdȀ&$ (bB0Cѝ5M5f7([xMȅ@EMN|㎜YdZ$B(–䎜ʤxQ7 H*Afw^'@(7䨙.`Kԓğ @‘i&(OxW 7(pBF]P43x^9'Dt.7=W𾤥Qy7/$D7}Cj9%Ip'z6猄@3T4&nAu aU oD9^w K-F XX) B(!AL=.1*Fq.%J%6AGj `biYHGP(tiSSHi<^ .uj6!oGMi6H9Tδ71Oq6^ M[RnTb4m(!X-N,-ÍTɴ{MM*ei&J !l:MfKpdṃ AlM xbgL=S봙zsTU II)խm]#/ׅ }݅l L' a0KMYj@0=HV!al2VjYjRn49 `͛y[kF `Ȃ?ckFGkcQR0Je®)Q1o0͛R^JEBP¢ ]A|CI'j׷!, 1V @ "aom\[eHX$ބ@m ")Az  {  "aJXs2."aQp  BBBb[ooC/X00+`} Qph=l BxX^& A}Z Q}Z !L !f@-@о>ӅAz0tRo  "/ 6FY6E! HXZ88f8 aRޠ@ww|n`"Z؈V0O|`ĂG>GbZ aBpQe b(Nx؛RXV6EUauAz_Cฒ('@Mh?`zOĉ7֭ @U H CH}C~DŽ0IXBB 7BDNVTZ R=+߆_(D8X5a6a Z`Q+nC5'LEuB$δ"!BDG~`}OՀ$X pAǥ (9j}@rHbX%yj߆e(D;X5n0 +aq([dʧ /(vkem B<^ y8Dv&ECC6^b~[ 8t$`;>sB!@Y-d *cW'%!VPXmB̃Ub"6&ju h3 CL x(j1C5Dc``y* &jPV'*؇;yLX"Yr_bcR酙a-a贫, f 0ۘ j8ǡ̋LE<%` 3Tn%zR\8~ŌIQ`%V9]9\Nچ( zЙz"n)6ZD |Z {&L&6aa ZbRkmKDN=0WEDN@l[^.V BYf 0o=ԯ!LM&9aQ{ '.Br⓺k TZg@ "& 7a Xsd|7aQ.TD MD{D'U V&X7ab^Z 8TEz,&vXmo%O@ X ֋Ym5㰕C0 a!owOR3h<Q~* :j"N}AC y'4taNNvNm{b'L" V\”< B <@ ZfY6dE! COXK5:ME!<FG&zܹ%" ómBgUVz%f/qDOxm?- iAR ѽlz/'8_N8i *x,C'8_N*tB~} = QEd// .&YuE5Og ) ᥤ1X)RRA'"!ErJARJRbf(J|{v{XcQnל\sF9g*sF9g.26RRLݾdV͂淪(N}Q9|:){w$sv!C*W: 3j͏^ ]*+#+:=pyZl-Z4Ӄ3!4w:aDfȒϜmQc@ lPP<\@h!Duhvf!NWX1sChѷ$cfNqAu#8WbTƮ [Y6O|˩Y]bRy8+κjПй0&zc= LϘ$edzc=Rǖ*.=X(֚fzCd;c ]^zcuI\ dp^d >nYD/#-J̢Rt+cjaLcU <7]%z5&(%6*6tpiJct0|gj1PNw NhejW M1)bƀٛNwW7Q U踥{l cpQ)t5^)T7 =2NC ;!%Oؓz&Om{?( ؊~EwW4%5y:cpA;֔Bw}16 VR%2,FR b]}O^CDR}Hw 8j'8"jb Q\l_f X'oc)D `N@w̉{ ^tJIOs㈏F$QxyPȀ%%DMY\`u<Fc_kt}N~I} jdTG A;ն *S+zr)}n;(M){Ee>R}YpJzOb$uW+ ¶$pn7?I%3Cdr@wPEwl~lIcTA s;?'F AaűFL6Ij2ġ!(ed7}g/ ʁ8T5_ApcduJ @O )%Aƒ @EU-B3X}DŜu*4dt/ԙOy ܨA!mHqۺztٶ,")*^KIjU1,C~ SU#"@`HЍQMoYX]\)+ӏ\ f(*v!qީ{I5t{ǝzǵ7oԒ$\Ѱ$O#>ڔI '2El 4$A f&] Miv)"GMcՓ>R\&y͗|Z&'a\U?dD^)uI@UuSSL(_fX˅%pOl $ uR"GU+X'WF$qB$TmW`#@#AU#u2' kJԊ+AVH~Ql *5p#*UdBNYbqXs,oVP׊3N-㑗 !%뀸|)뮅 |L#G@u 7-՝oT_PC`-4yٳJ,eZЎt SPF|P#O vChl75ZI!)3c {v-I 2ڨND育qL=QuV5^Lv91KtAipUǀz00j.ڡc2e!qE*(. ܰOnܛ$>r@U9IK I4BB\ 塿2"/[c24Ee"eC4n0yQZbE6*%^ @D#ޣ&0=hC [DCZ,-O^`LWRK N_J ?JWFo@?Jț~GܹPJEy4Mu:h|k/jYXHTBNZ*Y89fgSg0w+$o=ک GB2YtѼTV$&f~%٦ B]9\8k4"DPQR)(9PU Ch-uR]ܳnH?Fuaf(#u(k.5KTqJjxt? E9*qy}N)T>6I떜r?&J1w-?AK}CeDaNf"pr wY`Rk$AڇX= O*$~B>g'o;.^}ٜRf*bՈWXըrϾ[(>sA: -+〓$ &jۜ$.XK\zhc͂ZnR`J84)$q RpƛuFdFdk`^}a*U%򘜡1bj;.nL~ p"tZ1D *2/Yg5hn$&#WK@ܹh"PiJ>25s6sreSĿ%HiTov&l200^#Z%){֋`@%lgANچ.H!rfKѐ"%(*%1kWTɣ+P$B'm@A8f|@I.x*M/P;  AnNvԹ EPtyxK׉*E@E2)Vs8h5WjĻqx.9?ūɎFh>f>@@+*@%nZUCU'\\ 2q$tU<)r?tUkhɤT!@IlnYBqZɀlSu6JHZqOdP%#*k-B'|,wR2pekBQ| #[Zy1&z۵ _k7z5pgE>B@.m^{'eTAJ}򻿼} xdvxDFAc#z518kc Lzqcxx͌;>t b: ^~BpD>aAV˵˃x{r3w$h7NoG; jakǍ=WƳ|À[t8U|Àt!.am]ŶRc;VclC[. vDOg) *fNZQ7i?WYH=nb+uI1&"-~IsV2C /ت-,.j[ 5&g[uyi*ЗR0Y o rQJ)?9P!rL@mZЀ.{3ivׅFHgdAz]S>Tm8 )(U轕ja]T5K5jRt [)3RJU 5Dm Rvԍ1GJ9'RQ qAX:R<&;#i2_o?qs7'FGiR3V nK)G)XQRRP\.|JB~k!=K9F6@g68YjR,5@g Խ [ >aU|D8|ƻǯ?9럆4/i?^li?%ōmaAqCvwq΀\j_yx!O<$Jq*yx2_o[yoۗO/7 tIǐ*N2&t̗9<ƥn.a8h浂q#uD:n>A8*|xW{8X-o)Hj38+ ݸ}}v11ՔVH_1^CrB淬fH=ӵ^@]6ǶD"*>oɏakr4*HoG#ѵߋsi}q}mY~xɟYG~>ggoIcu~yeo]/ȵS\ c=g7tDz+]RnECRv} rG<37= *dce_qA8o1(|uAOo>y,~۱u|6/6esmk=/l:FX Ab`.KklӥSt5]Zj6kx:˵L>> z~uvVY|&yp^"Lɥq?MhK/y <:@ pcs= R.2.jWesJl{p?{^`f?nEso5>{ٛr.\3j ,g#jca8`;t=Dt}EضNmnRXc_K2׆h9s z9xƧEvBpBDFu^ܔQ>rt0w-s{a cE.!:愇ß3DO2Ǡ/]nx{'<>|rqk ?mOWƎ 9S@q 8{E&82nm{"}\:v Vm[]zb|& դEeGW#Q͍7)p҇qKomoIP^B{%E }VSz-2s4,<9Yk6>D?i۟{_97NԾi!'NpÒm~թ"gWD#|C;J:yJ΍Y,c9g@f7R3_&vﶛtD7B/h UЅ A~-q]S?9.gH5wx^|т?XT*6v:ֿr? zƵa+{^.rED澗5V:3pB1n(\1_n"1 nA[o#S.~o8 nidJ{FYJ? 6'V$ @oN r4vbA~Jg*"xnrInc !MTz0Ka]%aY~ӧz\yCD}_]\K<\ƣxM\ CSэQO;gKTw|c]J $"pdvϝKHcq.7x7O}  'D (. ,\Љb],\1OঠJq{3>U=%{|bH~}F-X|sr:^TT ;nࣿx6f  z:0vԏ>M n)!]:W@.@!˜w9s-[6uFuqw? yǸ8tpʘVbjEcbo(.V_ݽXd\Wyq5.y. Qfaf&߿f\uW煫GG5[J~d*Q^3oP|P g H=.MQ7hI7?(cJfL|:Y}ڭ9u;@?0Nd,Un{W2ǸzM 2eW2x>iJ Y<ڣF|={&BAopC2,*1SjUcR5t[\uYu㶗s^?m?nStum^o/.T?گxחTF,(ʴJj?,SwF !y G*.߃xIҺ)nW'QM:V=V54X^ұ b K̓ju)eUh7Ol:_1<տF۷-Ð ,0 Fj8Y;C񇲽_[ ,3in0NsDZ5]/>S}>)p>C"ՄLߒcaD(y /oGKWlŸ́䫐?|ݧ~qK=(Ti)w7|t=_w xT#@!P8{e6N;>:8䦥՝$C|gglt@ mvieLP!9kU"Ď7"փxGO·g/[C" SHLeS2\jǞSqǾ~d>={r] J,i]zߡN:tH!2<u\vlΰsyký ϋ/]8^`i)CnU|?x]>@e{oEpWvòv9srCܜ w79BʈgJ(DB[%V Q_X2rH D`F4lYhF`P[ʻ[8BLT$#'7Ԉq JX 6޲t}]H a/6=K.˷aCMރ ;χГLL ^Za, {}O{s缡}st/C>zxͭ͊Dla7~t=xӖaߊopJ תcnJc kEI=!H$겉{A g 4QޛI:MƸŀ%|W38Pmhendstream endobj 148 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 1254 >> stream xERklSe>g"PE2`@teYƶ֞)=zY2(̭,rA)"K^B?&L,oyK$I|7HOҲiy.`2銼Ģ-/_R /+yyEGQ_f8FjfV[Fa: Z[3iuLިczsUqjVZ0bQ3ڊVkhLBo[j6Ѽ35=ef[5jt@ :QGDG BKl#JM$KQLȈiҚ3/1zɣx~=OJ?J>>&qo[{ `2+\0;l>,ށ?(,F|1Pp2h-@-e l>&07!`#t˱=Ob?vVl;G >iΓ~4Tt .mQjEy#6Pu6õ&G0B -HyŵVr0.2"^ɐQ>:>Ȥ 7z&#ґ+0L]pL%x1.wFC) )%uqՕ7U0~%84A3'@e&Z=_hŴ99<9 ʾ1v8{ y`jf6+g 4t TJ QH; ^m`em0w>dX|yX]0 '!NP?zx[C)&9YAWc=T^8t xrf{RUU2+u9{vo >/PnǬh_?ˆJա A@'GiD9" },P9K9_g*;bMJ ޝ}A)J36fНL*RJ~Iz=Rm}_GGMyPh/h7cu̩pW~a-^W\A.SښT~!GFu:T )9Zh ~WuUwm-#--̀lc5~F9T*]7](L h@0${S nsJvKi=& K*>#Bj 'n_@ԷhIO(?in>zwo1}}B|@=_4er9'WĿendstream endobj 149 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 500 >> stream x%o`qPDY$&1F!1QÖA:J,te_W0q1̓7'_G=yÛ<,c|8_?l^+(>E w.X }tnCV\%"f R*d*#C~ iNJc?QR2Oq %-1#X#Dt6's N CBȂQu04~>6j AAzmio謶xr~ x} J_m6kz{^O;Rr ,g7d 8*2`݀Zm\~s5U!XS=<Nm_c6sښ՚ K4GfX]tRn,o~ad鬒5lAY^Ė4UtDȷ/ CoƸZendstream endobj 150 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 591 >> stream xcd`ab`ddds4H3a!3,jnn?N }=^19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMURL:)槤1000e203x10p1Gϩf&߉]wڝ]XR]T0}I'ͿxnIՍ--Im-E-E-ݭsΝ3}dމSMĤs'.v݁ ۶`78֪*>.kzemW[E\uXR`D7GE'͙@~֦Ys]k]m-#" g.qcu~.]RWW4f%+(?u)}Sv8*R5Di[̙}-s`nJ[k:[uweJpeD?ЕMϟ8k*y?N`pq\Uendstream endobj 151 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 335 >> stream xcd`ab`dddu 21~H3a!.,*nnw }O=^19(3=DA#YS\GR17(391O7$#57QOL-Tа())///K-/JQ(,PJ-N-*KMQp+QKMUSD8)槤1000201012{ӏg!?EWwyzvws/>}|9ǟ^:C7C{y7%tX#WS'/`8}=n9.<'ܛUtendstream endobj 152 0 obj << /Filter /FlateDecode /Length 2005 >> stream xX[w~ׯ`x=U8=M̓ HmwˀZJKAof`v>, ~-WxWqJzwĘE4R BS|bT3&$tϑ%-'2&'-ZG+\.Gprմ\}CPwakXlQi,XkLDNXlՀ[8$Ŵ CӾ:zqR-moHGer*A-+ƴU<,gTDNtH`UU%!@oaCn;c`A^WÕUZ#uDA1n̤˕%<ĭlELpH x180[SdRX@_o!]0}6)hRY#L$.AHV|.`? H,Z &ZY-[+F19U~uـ~)%d(&RRkHE92 &r"C)Z,'(IC-r1.:TOyBde?xAInCil"ڃлm&נSCL@n7ND9ofsf5}偿N񯿞*irL>k&)RtbW+Cp*6|=۴1ߕUP6| 2!Ӯ}iȪA,Iuk9:36:ځMNFq.2v t [g{tVX~jC3qRV ҞFudoo6VtaЅɅ^_[Kpb𑉛JMШ A#$LQ!g 5\ihs"_&Qv!T[?gٌt=7㑳)c`Vq8WiSՋm`tNT+G6<f! X}-n'4X!٭WW9ճxUzG&{]á: dQZWWCϒn¦İUP^{n. ԮUNa8  9c؍x! )L1+BcbەA +/\0endstream endobj 153 0 obj << /Filter /FlateDecode /Length 2742 >> stream xZoGAJߋ-p/ž>ŋB_b;F]E0p8ur`oWlz~vm]'a&wUV1&rq~XTrƋ9jNO6N3r@+`M2wNe8"~MZȕtn oiwXdLd㩦: ]8a k)U<ζr_-~QV&/ _r=p9dV<;>4PIX2Vd_|xp v$C]p61Or!TuCz:QgHDMhj^ֈK}Ts]Wg*is2~`yb6yj麬<_w>lv̞^q"F\{|z9NkM_sSpzUoS;-Q=wO`~珁 \"q{p5 ViUXn@:*C6Vg% l~_k,)'S68-n~X9`mr9clfzY9099[uV&l_ "_V( ƤcV6e+*PBggBn$~yԄr؊#m$QB ukIz bV֋rcAh՚TH8=}yig YdF HjF;=VfR1`\d%)ߒrIzR䂫$ps !2UhsA_eVA1 ?"zHgpfy8!*\q#"0r?G>R .? p\Xݔi7\nhØ#4fIB'&a(HBD $єanZ3mbsqsE62q 鰟i=025xoxB;.x0*.`g ہCW1_h ͅ$.i-ga5dJ (NGnc! +YC}g@h.l*16uV0ѐנ /Z6S$d XP䄅'b#QW% 5'L*8yV%u&X -1L EIث|&Hu&ZJcnB':>Md鴛iJqZ4apF -8.=bApC% &'z9"z4A SL|fVIe.If5ȌG0P0=H:gBft#݀NJNpQDLsJlkV)?栘Ғ#sx1=q48*i-p "g"rQ?1"DdErOE\^ [L"|'ͺU{ԪۤVrtrøJtվ6)_^3{F:=ξ o5Yt{c(_/OILRPHB!Ekʯd(q,0%WΌQӰ$Z`}l[a YÀ M &WOKMܛbLj- L$zUvr ֘t`LqKN#%ŧWj>q EKOtѯH;Pb' cP'e0Q3?fÝvmSe !)lͩع<(J)T'9<3؍kیXk6s}˟Άnب6LI;Q)mꫤ|0 | kfQyaL,@ 2ĻaaB7)h+T,`HA6 .Dȍ`J*U䜵b~U订[롉~DxG n FIE &,nkk(REwl9 j4`oPH҈[u cEP[1k4;&a w؎#iSA _Jɶ;x (zPII+]2{cQ *eb'05iIs-8Aӡ:i'[{Oz:ia㱷iMB,n0tf/V_{ H8c.VI*8 5pF m g:9U¸Lq=H5Imr3^|ߗ`M(5 Hi#ݨ y' AMبd^lk%hߑ} +4I-NϣU+ÛߚC>(@Gd;^ďS-\4Ȋ>QwSHt*,FNytg1(RHLx 8g_~'Std0v({;+"CT>[(sG:{(~bP,&p1/es5zM&Mx4j ]:^b R2LHO2zMKI+>S[æ~7ߐ-VBasK!r~endstream endobj 154 0 obj << /Filter /FlateDecode /Subtype /Type1C /Length 467 >> stream xcd`ab`dd v 144qt~H3aӛUAVWw_7s7ABsg``bdr-(-I-ROI-S,H-/0000302d00mf`gxhe?+9ȩ ǿ3b^㏨e{Qu5;'vu7twwwpNg:zG{铺gq,^PX\QY}2Q}ݵY%͙1mnSc2{G㦳8oj^># 8J/e[3kAF[-qb_P;BMw{ߕGwl^i=S8 rN ?ACB~޲u[7~0@='uOca嬲kiwil'-<͞ endstream endobj 155 0 obj << /Filter /FlateDecode /Length 7714 >> stream x]Iq7akc"A[ydYC17$E ͙&š~#"3"dKazS%/ y\ŷ<9Ӌ!-fonJڴۯ/e8ҚP | a!NjŽV$I8="-ЈIy6 %"x;v 5DF?BJpExYOo޼p|p&B} o*CVyO%˂%(%mCX8q6ZysyU#z%cy1Fm#\$DK| =Ҙ$=032 Pwˉ]j=/K^cccT e(.%Kk@Z<}ܸ^F{wrɧ(8 {菜FNyY wAm6-AOT혷S88LIaEEDAoK#jt å5@dPtu2L:v7kũ[;46^ ۷iHs?$T~KN CDǷlNچYq}] |=}Ha!9P ZǼP!\*p>lH&^q+.D]X c%deɂ!82 c4ܶtOweFWf2h+hmtZǂZz-i49C|* 6`wЌ;ի25m`VyYyUպR;~ 0i6%ffCR;a>3[˺Dag:9+kۮW'+1TNKk6 Ch1ўw!h؂3![s !zMtW $$.6|<`PqmSIY] z~Ӹhg 55!|% iB<#t`H iy8V-(l'݀}2*j1S8I=OR YyoJ紏ztPI1yyܠ&frBkFsRစ:4CRb"%tijjaAڎY*у(LBΉٞr$$)w c* @4n uO/~s&8ڂpp*67EœiU +a 1xۗ/^iCPM21%ewEoÃ{\h^V:?&&HD+Sj=DyC ]ِl׉酮ιe"tJG$}  =s<\4N&nU՗7݄6|#eЗ1"`u6$vc\°SvB1N c9GXE2Bk-B"…향NrO^DqVoL҈AyapGAKS=*I|Z71RquvzTUE?MlSڔ0`^>Ӹ2HkJ͙$xo!h!$(=edeZt]%z9S'fu`0ݳ܃Ԅ5r%any<]W܄0hmOvQ}לF|Xb\ًvMn1E9 q4p$e?VܦYo"g#0$,q:><ӿhvpCon.5F{|{V7% DES@˶$R΃`nېΪl_z+$hue ߱/y FQ9_8Re{!j܇ΊFeUPbl)pByo>_A2Ml1JDQIZ525̩  e Q׍L6P= ~bGfT6 2@./i<~拠4IE"EJ\n sKY .iژaB ͼIL%S%Rڸi]fhPE aM(cY*|>>ײ}).I,\ِSm a~dY9,DuUBVY ?`i_$+XTʄF71ވ"D%(]qXwSk 1)ܪ> Ѩ3@@<;퀚mS f] jy0)`*vH]"K (Bk$aDYtTItt 2G-d$95nH|/-UWoӘpB6vqfT]R類񷗫tyb*!qTPit Qgfc΅OQ3Zc+޻_>ô 7R¨\XœkB$z>jdd@Zh_lG0Z e* cZL( "z1GN~4}J t_X;VlLC0[o,~%֎PKAԎ?knjιQy~1ގѼ! gbHDBAR!W*bqdIKuJ$P3.<Ȯa:8;ЇxeWOE˂={{b_Q(Wnwx6>m1btaeN:XCg߲sG} apJ1/>& (8c(CǶW .y{>._:3ANmV8ӈ\4u2!=نims?r $nxjDz0L >堂*gȿuiQKCb1\3Kasǎi {o,8Ѵ=@&KpM_o/JisNE`/jaWYX{$҈A_'s@_9&69y×ڜ{y}j9I5@fİ蝿i"3bdѭ7-,G-ZJ9hcAJ )yXӹ )`ܞmݛUjyļQ&4&uqC.>9gOW!#Y]/O8 gOn=9{1{'oZ!|:9>M R6K'ȁBAw|ņ>ȅR2j.%N(;(L%SKTi@KC6Eu-1sWf!bl5S?V 9sUX/ tGu&5.3qhgJ4RlCX`dkA{j7eGz^SLjW(BE?އv2G:I:F=7q`xfu؇?W%°I)[Y>ZcC:m(1P2> stream x}ݓ];ļle2>-jػd`'UֆHd[}s\/,nn|5f9'޿{݋mRnR-iI9v5opEy"@_!/߿Dyߠl~.mo̖&jkyË|ZbJJXNm]%!T}g9[I۩՛/xJT3P beO%9j 4g1rT3ص+( ɷ =əU(z"qDEG9(m)DCD1Ӗ>$gvچA˺ʠbɧ{@sm=x*(VRAX"~L}liYp+6`b,mҐAiInU/0jaPU]@`*Kq1,]e[h *1OQ+kK!8 DrO tZ`lu.|8P0czk@u54=+ڴNC`'$)5ZZI9D Em:DX{[^5CDWdR6+t.Kn|ABpmߪq]Ct/Z*{ˬAXu[,Zj:U ,Zk8Мw*-6qbZpu9["l܄ ޹`"bptjrA^i5` Yh0QL@H.>->JpOs`+4W19ԟU-iZV PN81g)>Jp?Ʀ+I۹zʫu(%M2)nJ @N-PIxIgP)>Jp?_[†fWw.IXIֲюi470 f@ u% diM0(sJ T~bBm+?Zv˹F5D]N#`9$.G:Xt8:{N;9B Ff d7m]Tn-b%tQsL\YsIKӸd +ag;& ,`&a +!riZ78ci,Ɍr@,iicgS7iC)=)Sne__7q_f _)=PX QtTXo9J`ģ)wT2,e?;,ҚdD 2\viwpbb2hFa?.k?~>"]881j1aIȠ5컌-=." /F-òO'iuC0`D>CN[GRZu!l9GFmc+DA)Kn@@cݬKbn .N–pD0f,dw˂X %& Q2L?O Sam90 ]BIu)gA [w!a] .KdhNYjʈУ)Gs`雓G:LX] Nt#kUpwبc;)P_ s9tCs:OK.%Z28q4DrOէtZẂְoapgK:mSOkyCtٵ=vM/UE!l&6jXԝ1|+LxD0y*2 SҠ  k 6tu(Xpmr|tv[N]tvt5#^z30N!iIpYc7~#S7sƧ =<0Y`CD rGR<}iN(0طyYe9Skͯ d ª4z Bfyxnk)&|lXړK]si0\ f?0qWpELxxV@1iI^gh,L)ZcV811 XBdYC@9`Ctg; #ѡ,g2DW^+}PwV3"CJ⽡Pw)? A,.zFE@Y bʛ +q $GDs]K ii/Ⱦ𐴌n.< IS> IӢH-,i9*y*9*9*u*y*9*׀d'OTvI<5no/9}(H {RׇМ>dupTj1;+!i5yc,`Њb՜ନ\`kmAYeZ‰l_W)-[*tCp/1{t "Fm3\مx@aSZD[(CqGi]OSLZ0Ȉ樫˨DSuR4GOeBp8F&=}%;S鈭莲4֑.?3v4W%x6̟\I94@< pO+I8!gHM N 7.%gDJNx)g)>J$,('8m91-r_!"4m`N@`^R-2K-r?u,;Sh(}fbaA ly,WA4zpXIw0K+rN~ Ŕ(D,-? $d 5J( 9%(fzm_ȁq4A e~n:)xM4Zs5gc"Ox1@-p -Um  ^9Ƚ ^kpz@m'`pm!o9.7 ` (1R39ΰ2$XKiM`myxnYgkt!xm9b ֮Za+Dj*s+qv1٨D3vY꒸hNzE{óMc/a($APesV˄z8<<40[0|Xes>VT/@9H+xtT|@AArmͿ9UYiuT3PLe8*TimptDqʇ)cW{w>ʽT}ԍʅ#z A@s-º U&9/{Ds>"x?w]Xxi&ByFXdĊ1,IMegm8Vj:'_Yvlni5YMhKX]=UaWe*[`r2A2Z` o^W*<^[1WՇ8:L[A^ MG!P1,Qc~C*uJPY}kc%#rs$+O`h/YDȗ,։ޮ9WJ`P6Ȣgj625pQ kuL"%#`vH h(VrV wrɧ2a06piFp{pXS3])c}PVwΗӾZ& ƦOo^_'s8`(M86gAJp\"\FοgD7Gcxfk4Ԙ&JSSQϩ R)֊lpkFj{Xnr)~F8XS*&SNC kRh+Ns|k"0p+ëz:ֲ A6D]XQ.,N-*tSh+NuՈb,|O &Y.H *rt䇮RA6nx%iHIiD$.6Scs|$-i/RTNt n/dm`{/BaKp嘯t_n `"NJ0 n4P$ q`+'_^Q Nx $k@ Fh<:P ~.@%K]NI:9 ZzrrEqR/I'H(BkD E/FQ:kb*ȥ%I.V:쓮Vh k=u%RbcOV dPjyIrnЦH_129جu超=L$Ń6HIRbNM%7m>V!c?sű1H05?5vU/=h<)yp9XB2Ϝ\ ;u1+^( W<…lyXb%g`׌Wˆeqj:fJ9e)=OM=xJŇ':V(-Ѻysce@f8ꗗQYQ530 ]qdo3<4svZ)S 1]93@k]HR$aZY2AdpynbρN𪟒Q`$~QTkaFXc-kĭ PЌ""Gd>3q\#U1]jMq/]ە} lLn9]dN>|]$1cM'֠קu>0yVZsk[mQ:Ɯ!_9 &UNc39ӂg‰k?:x35A8dC6P3آyxvā!c,/>$7QFьCv ?fl֕pҎSA ^1w [xhNڒyxv؋Wc[CCA=Vc9`XB'(ȩ3ҜJX !p=gؙSW.C[er YE!}YђgF&>FhӱYѪE) L= Ks.]m+6%xi'KH{T/Gv*@r J.G:*@vd'/К ZQFޠ5*{֨#֍v]֮^:zkD&G(=ޏt$t{kL.G:JO%˖҅gm3kb-`Gy/e|73{w9!De憓_ЊY,m5Oof_?ޮ]whz +_&]C42ज़i`ʹDӛiD5ʂbj`5DK_?Pg׷.ݨ'nT]$ި* KrA'+M - !b'虲_t]`2a06V<^UW|GtI9(鄓Q-I3_ m:8eufk@2D}%{D}Π5qF!j`i#MQs,Q鎼Jᰓe7z~A jvI;R(@^9Xpe;ЩEJ;93vCi#8BL];EE,,-iCXI V0\[ktv mrN)Hk0!_=_"i'Q(AE:>AHNu} [9r< D-r1pEzLzIw``_isS(ʡ6`?6p;R9rx (.Uitj hS)$M,Ji [&Tn:Xv ;tQ'y+,YAodn˦%* 1[Ye|#5AV@ F<3k 'Ԟ N{0`&`굼$@l+s `y9V Js<*gLt{M9[VcӜ >rH"zoAiG0ay\rF;9:ׄ"f2~n@hkMA >%)=3@{pThn Dr7YhUme*KX-Oi)$+¬5cx)Z^+j/@Py a'd$\z>z| /.ew.mĨTp(@KE@P=Vn #KyN0U am`{3K` 0,eS4ᕚGCX~=X\%Uj\<<4 i )>*s眣b&9p3gF)z"G/11|"ǖDζg69.7ê;0#͹&z9v)S֭僱#UWS-)yP7?o-/n|Dro'"݋۷}/A9ݟ'l/jsN{뷃$1WyƜ R6Nm $?|aLIfs6|/Ɖ7׃,xS-叄ar.c =p^?#XiwWj%VJ+QwWj%V0Kl ?D3>θJx yjŁeH>)[2_ :r@ۻa rH<`D(K0j%@]4jo%'8{^ gV .J$6 fb"إWu pr=ʭDMn%or6">{ LjZTO^ǩpv_Buapv_mv {_Ou;{6o%7ⷉؙ L׏^WILPQ% LxmAK* wƐfD  bti\U L*Vr2xzߣ طn`zٻTJd\0 V~6rm?Opӥ B}7il,= b")]P`zgptkn"*Κt=S@K| 45` 蒛q Oi#&jA{?@ ^XA뵞i cI^,|SO9wǐ#;VM-–!645)X֊΁N;4Q"L5r\M!8 VA{;+;%x iq}Ms N1 |-WC]F MSBg={q1.B5.9{CwV j*,~wsONF!4 YM]޴:85nD-?0xZѫV˨t8.9 >~M3y UPsӉލBj 458RSh!w#&jAAvdRnHs4\M}8Z`i@o@KdWdK IWh.^bCe:[CDߏxէ1i_'촂B=T"\Qh^`P ?FaT#@ lO*AqF2S-pVm)z0i%>Tz#悽g?73=3b2ef3Jhf?s `DO+Gn76/i,mƀZC_c>k7M Z~A"='zˏGpg] _$Jp [ۏq||fb'yf[r7)Ιd9 RsXL9% C7)!5E6//۰?|*ڄw8c,~~CSY 'H_bvhZ?Vu eWu]6D؊ro"?J]o1-kRi]oJ ~,Q[RP_0h"Fkn}-2 [7ҽ)uЅ} T+76Vac qo'H*m(gȩ< V5גz7X,//1T ]ar?NZH'R6P &N5cs@0|~N=7(˰aQ4%A_Sk-~I1hIyx`kXni5mgX̵Ô%a5kXg`zZaa.6$~1r;| IU8x?~z ZVDlU'}yď-z{7gm|Nd*[Θ|;(,? RVlL(#5N8 ^k:487{ٰӛcͯ!qF_kێv|XM$hP\u/<7zt3XynO}iүZ5@oC 0Z?!I"l> Kxl.U>tYke㿦K;64CgɶϘ7ܒ%Y=U(I7Eҧa`q轺t\S/f9B8crx8òrM%|\ 0MT&Iw,xG2W2xUV@pfT4n5ّtGS_SSȨxnw,b}>2n`"T%C.p8Aƣ pd;jٺD3/L["j&m[\ئ_) ^>?3i lpĨv:=Ўj}r /wz8 Twvߍw"v+`6e ;!`jC'pfQ^]`v};Ri0LKIeSj3dkpD,)6upqVJ1;Vb@_5( _BVfk @?6~`?phghkJ L-+t\^``^xQO01+a(ڣVaV}Eb⸆ dz>6X~-hghGܹNΰriI'3H^KʲU'oě(-qdE_Ʌ`Z4ZK6 o}# b-:ȍ M6Nf2j`o}E>suڕ dkwJW%3aMTCbgKn!G)9T%s<@E `b _a_Yt VtubCm":|1N4e}ıgrbS&W:g=} u\Ƴ!CT.܂s6"2Mr~J"(;^0#^ouz*DH3w4a]>wO7Z@9w8ǦdX XYgѝhuǷhH<`f%Sq%3Dxo»ACٌrPtB=o8>ʤݐ:w{o<)q/a/JߞKMhkW;<5Po.&V"t*I~ihS6GFK}yG8vfK@K!Ք)FȜ{ I%reWtPՅfuA}Vvp^P/O30j1mCړ4 ǥ!yF&槜lXX {.;8/OE2'V y]ˆjqꈬ p0h2@Gv~g m蒿ۧogv݀[t[Qd\zmdZ>EIKMЀ_` <719˶@-ns*kݙF.wL6G葫.i: $sF%|Plnuܹ&/]舵'g~4&,:/6u~1>!چim* Wv/.}YG& u6@U|F'3e]M%q x|q{BD5w'{ڹp&P; n"LYصDff04F~; Yv~@>QA;Pv?+ndࡎzDݖ!z,^YvE߶ʏkx$i Y9 q*ܶ(0 AA%d>Q>N$qܨm-A3vaS>tg|sƀ`K9`<-OCpi*pfjK?~|hu3f;zl! 43 .u@77xa_3LFa >s7Ca[.7)(-3 cƁHhD|/GݐP9v\wgέHDK?++ S1Fr7]wy;*] L Z0^Z!% |̰+B >J@uLu^Dđ*{<;-Jk^ж5.4CF ]Ydz餱F؆qwa9`rb|xP sԦk ^kK1w ސ چhhNW:cvxYqOt X! m'.an.t[7:]k , o<{=@1?Q΢}{V[ן/DŽu.ˌ_ cqoh`vw<7A kޗgI7 `4 Xǰ[Q\i.<"(9q88™o]fȁF;=`B]QBOFG"^q:F2.]F<2#.=_Xڙ#f?{ׁû0^RByx6`;~?Q:gđޘ{вo b$;c uE@![v"wA_)|W-9<v63-8n5,dKaL ?)L ;.v'&:Lmz6qKv+ţ得a&vQ/Ww~GX9148}3= ˠσؒ] #)fv;b?=W8r=Mׂۋݛ\endstream endobj 157 0 obj << /Filter /FlateDecode /Length 113512 >> stream xܽ]%;n(޿_y8˥R"A܃7m\_t{<3vߤVU}zl E(o??۷}o+/[.|/߽K֨y>I۞}[}>ږߧԶ(0}~[9fll#Ɋu],+˻1aCڷpHHyoR<Cz ܋oo|{yߍw}ǩvJ)m\QvRI0\qꉹZ-zӰIUM )OԹ([NO)C &z;p?WM.ejj0Rhן?%-Ju1TzQ,LTkePA "եL xQ3jg0ޤ*xQgOG >]'6aT~T3uU,QuT*6Fu~~P#%T,AuPvөbI;;l[7|+`:@] cmL1F)5j=&#>@gkVG,WB5$PڄCKÉ9XSNL)bX}.R*笛]kYu,p'#_1+hp Lc%uDm(< .8Tv›ܟ})Ñ6!ZzO0oJESyƀ-`G6 Z&f[# 4hT& &Bŷ*(x->*I*RNdE,+$ 4h}'g! (< m#;O#;mq`2;ZGud{#vBzUAYNT|f?Zm)yD&@͏^sy]wָ`s\}aS8^8<78"g]Qܢ#/r6/gZLOrEζȉ_:wq<'155w!(3TV9gkK;9g?*</'aeMNξBcB]=噊sN[{Y2552 Í\&8+( <2E ܮYηxLBmU[yqyE' 0B@Ԅ'B{*_mN\E"Ue9΃ʭgkv=225GF֣\+#XqF6Z 瓓1OmLXT"IۄH܅ywn}kh84;tr!Q|:Haӂ <]P( G᥮p3+Gj鷔gHoN29?B.崛9y}70 ۦ9oG.i!;H|BK 7@x 7c0oOs޻'ϐ@>q 7oi%ܧW0}Ԏ6vPj"a}?ZZhs۶Z?`w=UH3 T{ I= {|(P $NnN0黑:g?]$ҟZ:.fXY+| L)8p=v%}Claxj\<}FWFL lr`m쁘@vߩ*-VJ|JQMSs)\n,ݭ`[9"K슰0해\ h0T(pѶrAAA-9a/2r)]FzM%3{.9;hc' ;^!&<%p3WI .|,t<P*1\:^g3O9gr& ' 'O\=٥A\ >5Bq0; R]#])ECv 5Bxɤ)B5]le,-:R[D,>0խw!5JW fN/L_d=ם;j p-|Y*?O-\hTNL]qY}'i&_Xla;WCe=2x(F.*BAR,jFAehC R(D9hU o32* .U-syv02R26Ўk .3!¤PR98CAO><uS*xw_yv_{'A70~-Nt'&J۪6AEA5:XXp4¹bjB5rm gruv5ԕmw¹O4@-?%X!dZPa;.SM (>ej)BP1 sRwmPCtcC%}kĹlt;=mOϏ/ v_N-'C>/K>PXa EşÒaqnq2Z<Ҿyf[vp}MȾr[U8Ĺ{xs8k 'Pcܺ)ήpygTbmlˏۧzHOG ݬtC >kUSgssPwpn͵nj"l~멖Kx=55wO ihեS-m6h]m!gOG %<2XYE-)8QgDKpٰ~-MZ/D՟?%{ZAN")lH TEm=vn27*)+\v #M8ipҦ,!L KϬBAW7dp-d)O&k֖"Wkߜs s#E3U]8oA6Y<$3϶4O̳RgAjKț?\00FUԴ27i\*HƑQĊ*E3*a 3%Y(rڭ *G&HԼ=nvDS5Tm;U):qAZ& \0QKయ/UQ,R7&]@ vH usxcHݾ1sgHV#}߾g <-ԍ]]#7 ^TO.zÚ KxvopOF^ =߃ &BO<@sb)@vӜ*%A>kSw~%2yt;Z DûOSl=v7or y(ӶcTq\mAc7ow?av@Wb%5JL! E"x {n\֛~-WIͧX#/ /)מ}0!)rUSmUl1^ (J+/ܫw ==F=rZR5&ejk:^5 <2E[;Y3xJjK#25a^3|hk-4~9c<(ְy[_0ư5(33a}m lkc6U[8a٭a'akQ5lЕ[ְ_v ]0?yH/:,:v] ej#~Um;vT_/Sr, rH{4 kƛ1G|wƷ SS _\v~cMe C L}~~톿]3;-.8: KF:1TSE^ʺB^}J/>*: z%EA|AGL$@w{čC a? Ww/%=bL6MKg#r(ЫD44eé+*{(𘂖 ܲӈ(XT1| ^!!b1` R6BTP@ec5ZD9^?GU@}@^Gg <ǰ@*#Oǻ2O3S+E8主9u!<ڛAxWv` vm/ܮ_yhD+⨙hSQVI9E*tޏ~_6(5.7Vn›2\ AF0om$Zcܾ/(XbSoesۯB:5kMxW52^ E)^qsjbFWA`ؽ^o$G5wGӬΚJh1fi[A@IvٔH /!M׫;02OjGG-EB^BN`(tQjc!5/!<ue bD`aө|3Db(w&eŰ<&%~sRfvR2IɵI1XJL`Rb'%gjpRIIfl+LmhUJ&M6lpBXgC?Fौ.2.Q2F[FatCG(,rj['[za!8Q%U mr#y/g'VPɩ*QXDԹ؏ g7% />[wmEXϖ(,rA)?8h5T0 dM˟f tm-OBAbJlfk<̢VB|Bk‚Ĉ) _ [(˩"yPWY '-äDyX\v*-zdr@0HG6m:TC+2BAʅcu(/V1t2(ǩ}V4 Zĸa |Y~0DN_0se0)SPC 7 sںkPC R(D9>{ۓ%34k-jS{ ()>c$QpFb B0K8׾61jᢩaCN(hc~t\hڵE?$Վ"+^?WŽY`At8"l1,.,7kNsR/G;93x''Bh'.ńv*,Վ"jQJRdH]2]Z=Ddb@!sEj-sa?>_aPDPM..Vx|e)0/\t*ɢH = 83[D7BNVE@\Z pc:}7;1)>SF12bؘۘx czP7c֐DNh"'9u5s`L=sQ*'"\kYEl| lmMsBE/ ,RZ_{U/e &ExugL=w.p[vsXGT\pY!5~NuXZb w>ohk"9Ǹ\{DˁLj .F 8< $A|O=pHdD]b>dMQ;_Q|xv??꼝pHߍԅ7>CKb;8Э8xo1qsѫ3_%;uҞ03Yzf =(sॏ]?qEoM䒈 srւZ#88@L̓=ii`/v)K*d39#}ܕ=ApO{#J\3$v =!1[#l$2v!V䢞ΐ lW;>/7+|9b9H&G|a33 ݂|F~(8?ksϑՅ)H2>)zfup'x哪qӴf-8m̃@3lԻ_6_ҏߓn6B!>*׺7/2_=:/wH7.憑NOFB׶7C+E m?b󔻊jj RW1A>s K ((6?{b 9 Wc\$T$VM2'C.KJK 9ae̽w dX'}F hJ8=?e_~$yY ^,TjBxtӱqG(W/Y V\Z< BNv4%D6ryڞhm_q¥ ;R-pQ7׻,Sχ#C>1: W7p[~^HA96@.}uۊ#zb 9vms4 67Lh4owtR1ܸb 9zՎU=WtWș39#P$7b.XhAhh 5Ug6i*3EF3IR>50efR4sAk=Jm%5-XwfEJW,:` brDNTH^U0tsto˹ B9制6$q%SXPH;lLRzTg,ҦC5Sz6%6HɢM J:iӂ!\ (Ǣ%sK]p؃×|v+@ {y:0CWՍJ; PZhӱ6Z6--ĥ|NlZUSh/IAPPJM~IѶ;&\6TZQh#,iP֘%A(Dm*QIJ QBAʒ9(M|%'([! p'B{ppfSg6sVݦBId+ݕ"6H/vEYFJ>i9 ]l':6+\Y(2ꋡ,Q9KJ+%V*H.biZiri4rye\aLcXeX(aI0-ŁrX)ː$iViM+=J+V,WZ4bڻYZ{e*4|, e9{Ce.pZ?1%ݫrJ|ݻ=\MF>;7,N\fXLQn05 [A T$^ #ľ6l!,&4ɫnpbWAc+ 5VkpnaҚ2@߄evEE^{h{ N6N}{ml.ٌau!$:v⁖6|R֝!ކj#e D!M3}7#mR`BPm {"eRjciC')V01 8U78tDBf/V +-Sf/(lomlo\p9|D(mDQ"*V[0c,~!-cNܼwkz5Ae'bL!JqHn`V:` ύhzx.-yŵH`*f%~G5@e0 aP(D|7&m/? hK c(+c(;"(G BJ>E0$Ɖq  P"==oCnS@H6鎁>0ȨgLmo1n*Ű6lf?.BS=^گwGTH]MA.݌Rv+Lm89aQ==X1K{;׹"56Uf54Z}.RjԽvmJb,E.zWsŅXᢓTHEj$|ACBM_Kщ "●{rO̵u0VۃG##;gH=ux,l z#,pRBSByh>K>(h6}O?AfnO > $.,m=hݾ`X`Hh vW-9~34L[)N[;R{D$D/ɹ7I2=ȃ2Ȼ]KYIݾIj1-= gۃiIm:G'rE=[ݾA5OS;c(o) *=P,gŽ kVJJ!o90w %y;?0ov[= i<)\vhsUo4[A>s$p>2"~f@?;"_u|z?H+1Nl)8[iP'[rmtn,Y&Ky^ʅ`Ba#8XiwPzD23O1K0&.&vPìWTSPZ'S DZ<$Ǵ[jvW(T,\ĽTJVPkEߡ%"J-^]Mrj4,T R3KSrO+RL l,A(sxHfi%cEc,y Be6$q!¥Pr(=mJnEnECnEo>6݊pZhA֣FXN[ӌa"D`a>kiP`ގhޢ10̉ @e&$1!$SRy=[Y1YY)nބ\V l9X1rigce 29fSLzjð/ǹ1T3U٩wL!{- S;(G$9pB9ղ"I"bb VL+}@m 0(a2PH$6du~&x-EI u)ָ`ZFՋSO 3dEmӺz ^\_V/N`& sJՋNՋ1tbQunœnb]W/:V/Ջ)+"]0[Ytb ]B#xZiČ2nYFΗN/cN)HY2B/&/唶d/cΗ(,r,QNQJVԖd6&N _z#Jm(!駍 .BcNޟRY/!~X a](HYrD-{]X[3yXv{r7=E-V56=#Ş2 Au!q5=^BŞT3(ºY$΅HF?Yl7 ڴYвYL$.4]ڹpiBCYLq6CL-QviS\T` RA{nF.mMHicB0|OJF.̮T`* Rf@٥ ?m\0jXxvq"OjmO㝼9Q8+^-U'zcS']_Su'w{췩; Q|=\ˁm*|}oۻ͔ $x̷KQ o?} p?J?o41'!]:vo_Nϔe2ΐ@lD ˾aumpW&N0%HU+Y K!eU:)cRÒ[G;yDnЩaO?gDYȾPh?_[۴ ۋB񥅠o/K Z^/,߯~߹1?F7nX03a⨤ "r4Li/hH^\B{A\>{E|cYCbv0؂+t=Zbv%E7K,)>Y$bIY_,I7K >T2\B:90JД/$zz3a d8ep2Ef ;ԁ/' av0Η\ׄtm*乏{R@!t>?/?3Dk܏}PR5>876sO+a;cE_n7!^Ae 51}0y-) ؿĝvyOH5 !OO~lzr.^OOO Ud~}'yQ hGGbuz3<*$\a EqAqR<Ɓ~n&7*lNg%P`ȇL] q-gs̑@iY{i/)ۈ2`#n{!ڠ!0R?'[~?aq6,%$3R1t ?Nsٍbp+)S n\ *T ] *۸((a1^nO70je(&J7E`r"x-|:팜rGYbe7>L-)^dx0Ӭ .((aڽ1^vs{ٗp˜†C pü͏vtv0sFz@Ј!)\-bh m~?C \+E#S2YA70ZJ>9䞯юXdxF36_+9QỎ(g}].K/F\H-U i6~49'}%j[-@m/хw#LU-`a?7M? =Bl[ϴA>52G _T,|;۳*@thC9QR6EnSmml F *"7fQy&q#*obp;=톹pdx ̫e Mg j'Saa%a85.CFAx(`b]~Ŋ,|mx;NW#sM om|!mqkq~c96hnhB7g |c۶~|gwݎc3i~GO? v|O?x?1oHjbF19}OH?ӿ m" nsH>WGZS!’{O}~R?9llh{?iN_I1s᷌st*n`?,1Ֆq{N?;=|~Z=6G<8u;ISPNW'ÕnqA9 ?"eQ9z4=N@3/"K # #x=C09YY6JfpLM@ Pn,].BG#ە*B ݉x[S M\Ca /x0q쳆6E'"B J*qKW~t -,:T+2B@ʙrh6Lm( At2(Y+%  O Oy!ŘB>k%E0Z1j!@zEz؃*~T t\vb(MN9ڜQSAk]4^q|%#*URWx^ B2M*Er ջ}-k.{汨hC@SFܼG[za!_vz;FnW1`֝)Ay >^5vfslU %3:24V ;xؐȩ oqwdw?|Yx(Fb3hUQN}N  +n3;!C(s}."}2ۨok1?&@+Dx`ARpuJNRpJ,;IE\[+'F}\\6QCdE DG MP\ 7h69C}X?3O2q*m7fZ^|jJiAiAfh jVxF #\Z 9I;%$"z ɩ*[lĄ7{h,])Ƈ)N O i]}GZ4PKCBu뼫THC$|02 zTyH0t F> ]O;MTp/xayAr2(1ԺRYҒz[O< *xl2u8gpΖNn.Td\`V lB$@dDIH)kpսN} З(T)律ʓ$HdaiVD}^*puŐH;K7i ؃G6ԊL|!E%Cb*C"lRD\iFkzҾ3hidlJ Y3֊L}}a.%v ߭&rbw[.X\3dm(q-FB9rSsk! sCb!yIrKٟ:Iηz&_[=tÝ w$輤'i,4c虫Љ Szn ̟Z=@e(Bvr|t4; \Y'i~pca(H ilBp!¥Pr v~BxD[|,ziwEjK;Y!t&+JಓU1AE6PvRe 0a󻚂!C't9ː60㏵8q!:L!q"fssD1S^wZbj #!qd%ˡZ y @~%^O.?c*˅kus!qWؿS7J.;$j'.H,7P!K 0$4DSU i֒"TD % ЅD?3˯9Hy,y,֯Őb\0rGTf\jF/"7ݪo0LV~XT~?&WUUV)*E6Q^dp6^ ٹ%/[es^n/L 68,׮I_Z6ud2ȞqJ_C"m;ԉlzR 32[~Hnə$ZtOx֊L}}go@oЯ%cҋvY֊L}}ao8ةHw6=Twؓxni|dDǁ;|aF1 ĺָZLH‚X6fU W(H0Xz&aB!qm@6)pAF2/Se>bm_M /SRn2:;:mCA* Y Ki ƍwA0(QQ25 co,mPC` ),r-v m1 "5p%p ,R`CA.J[4kDe'`(Lazc=Sraf}Sާa R6T8".;C n.B& |k(GLҹ9>jsRٗvJɔ1\C{=WgnefJK/:(m ֆ%9 >NdNJ{q.{g#)=]syjS1PX)ϓ-s9 Ƃt֏.ҩv84|_ ^`pu!`H]\  ݌r?g̏+:>-tR#VD 41ܬ[!8M.YWTl RYҲEF \CF?l=iZ&Yi*:kp3`N!r6إs~AOpU#a {xFhZHqt4p8s 4B7x|+FfEW7vX. ?di>OK#b-Ww1&n\!&So}>ɗ (;A1~`녆eZkMA@3QǍ ]Qڠ2.C Qry] vȕ@yT"P ,"P2N)cy\*(ǍL76̓&0$EZvQ^)u`3{bu7^n`1t`Bdqb ޛa`&N)6ow1Xawn!6.s(&fAS sTFƙ@zzRիAʶ>WӢMJ ~ȼOU& >]qL8C {48Z4@s^$JDOKXL: :IBwfW2Qf!i$2Bįr =~TEaO%i3gAQd3W*&H݇2-3 J[\g~LnW$??42Y?#,`7,wĺјk+AV eB,䤥W0D@ kKQxM`a Q=un}5[d `1="vbkIÐ:l2UY_0ta Qs]$>׊M%wF>-~p:=> !5&~Bӌ["T`AxQyr]3bHAÄmτVX% el X2a[G , Í ./27F%GXYAR&c2Y *:iAxQSG%γ c7eA>S)E\0=&TB /"TpWuH/}<T,S3o!e QZ*8򠫏9` ݟ3=&wl߇ 4O!| f)M]v9X1NyVoFyYΜ-MMW@obdB.o 'Ga P BR!t* 0}5s9 (C炼ݻQ"̏?(WK:~d ph=33 +ixWM5 uL "L7А*s2ij!X-E/s .&k]$!yK &AS:g0 Jr32uxqf^t1mǴ_ 2Z1iʥ*&<>}pDK'krK+x&M.|ަa M'LONDfK&vmNYԳJkit2B腉vp]CX0{W%ɺubWZf/Rת!{=Ɖ8b7$ѫ'=7ԊL|%-yDYwwM.m~""=_IC!L$߹v-*J;ƶ͍C-)$gK}h4.i& *me3=(}D7ӂpkk[VK=.C ΄)JKe/`BAԆ !R(jT6 CwL!qc=58s:+{׎#t<oLj8v D_|k؝CJAxYOTp1Udh&F,=O|f$1n>/oGKitDAkִ ZSt,-Yl%&"S,XrҼS3DdŐ>c ԘE nvR}f|j3'J[JK!/'l|:Q|!ɧq~ 58_\^lL ~soL0Q2vȷ܃IW癕|˙>~WZ竽fN\$,4ABꄌf9c^\V 䩃I)/Aid'37iߜZ"Bsl쁚Ut/I,].]|B7 in|Že7}:b)&7~Xף_ a9$-H}3$̭qx??BCu˦V elPV YM(^4-\̭s]$NMNf~ .; .Oidr粧& A|Bkz^OX|ݯB0jKRiq*hXWگ>CjؔaMN[W# =^){6KhS9bk>_FϏ8(:JTgZ?%l@WoD{ ";B*&w?\v3JK}.R.4Z\:F[}P"+MC&8h,x -+"}7)qd ²Qb8rgYa tǝp`D1Rc]ː 7qPbXenSF1&(@!6 5 BrP]Ώ9(cGm =aN``q 'pK{fӖ6V R(D9nSx¯BRU ܉7X˿rm)y'+ߥlu\ƳkNVB"T<(9>l|zA_N !FPb 8Ib*g<|g9 z|P^539ҬŮY8iAL#:}h͞<: Jw7g9eGl;pm2竈n H: }u_~M lyH/ՈTͅ^hG$GMJ"Ǣzw+K4\02Kc7)ͮ[w.2ҍWǔ5T/|b C@TnnTg$îq!,üHT/0A3f{`ފ;?enal c" TjPY$2 N =,8-\\9&@<jG.3핲x_-q=FuΈ˨済33mk9 ^ͫȾō>*r]CCBUNى1tb zv*|k: oJKvtb ]:@SеoM>'-<e*#` GpS_N9iOVIHs>[܂FjCW0tgfqeuttѥ])禛SmY{?r:`)ʐڎ6S-(Hڀ Idžnd?S.\2U2ݼ].NX!@*wPfV G)Hpy.\kN`Dat=,K 0'W9B n@:p7z~x bLUӨ2N )e ިr\сZVb@B°(g;}4b1o9!o^tIcȋř/|0}~P7w+|}.=W#"}7R[?L [) ",J8:bvgC k8ˊE0E0pV牨~"6lXE%!M_ž Ǣz%]B< xRZ shګ0~H^Is4p汨^5C93 :)pjs̳\8T*Tfsp2̡iѬk .JKCt&C5ԋH*+J0'V'+i1\ݯE`Klv26JT'̽J_Gjz"5HWh'Wch 4ԊD|%}eQVqR; ݎw(;C]1c2c [TWt/M5 na$u/^P`N 8wFO_Mmπ!'< p҂}s;LڼН\v2& 7,n0vBjE?D!#[0g՟_Q\T#Kc("il.݁.|>T9 I*pmPg R(D9(Mە" #B(UD饩< !.sNλ xaZ6scw4|RxxL:n$CFSoH" 7d. 0$4./)< `%&A+z EGQܖ_{Kh>wzY~~Ҿ !P/'~.K4jF7}o▔Ft[͠JM}|B =[ XlXT~ .pic͆,3p@33da]/NpaUYUbo^rfz/Y0+YezP+AB;Dsd!d5jGkzҖdM?&_ $We<}xBAԆOC27| -`gߕC@yޤXgq+ 20V6Dh ()SNֽWxi cwe'`(La&w#q 1Ѫ6EfV G)Hq.TTFaBRvmP ,%SX81)h^ ӱ'FJL!WC(On B9qs29's \vIS5e/l5iߤ8ؽAGjF"Ss؎Թ) g:$GHIOGG>Y:Mknc0,֯hދ$zH<00L0nFխW1W$G5G.ݏmaJF䄏MN.Y!_*$c~)S^iaUɞbS'KiHUJ yW]ܸ$!X4X6@\;;%jo&d\r ,Bw-rZT:+By+~,~W}~Ⲇcl6032 BwzI;g%icݲD:#b-WWItui ϐvUĎ&XOG(JĹ (a *c(LaS\9ߜl/-ji>Nw\v+,BAԆWec .\2EM3s=o3ާa R6ݣ FN`."ǩM6ė apq12hӬq 2 !"6 u&C@=hެd7RQ!yh[S?BED! ej‹p#ƁV .:qAxQkFI[68lnpaKpR ,F@؂ O.' ?&d8uw_*Z$3?*Y[]L,% SV]G!ƴ$RZ-t\G@D ~J%=.kzQsGcJ|V k2:o0^7J<F3ē/:lwk^ݤu7 -=r5D=}_q덿} }y+v>PR|νkZd"]G[B ._RZI/o5I?$ b' ,D>Z)Gn10?QnۅYQOAu$42:~,N$_&>anwpt;ݻ&  sWqUW Ԅ;wF8&#;u B JqwDFVŤ$6]tltAG Ea7;c9_.#2(}2֘Ǽn{y% XLkH\ ?|k|`ı#$]`B\Mqdg hx╍$o4m'\CST2%!H%N# \ / țK .q%7摶#HJ[oiKI%lh[j#0lCCM-*: wLc"6rdIJDJ/blbT1; Q1r'v01"B)^2|1CNxZ8îvg:amt:(]sZAφSD#1=鴵U}d/DIJYZF9>'=͂ XxRBX,B -2[C%p .6$Lk,pgn*9??z KË'Jw KͥeᣍX:5,lߑ^˕p,aHhA/L"AC&Ajs90'K݇Nzܲ y#U*?aT|96Rݽ@ޠyit%Hu<l(7Q|}'~:nr1H}2mamv0) tIB&~,FLUA<k#yVYu1:J2 {4R5tIcSkh"͹S ߩ`ɏB:Kôx 3 )BwZ}e0֑":b=S_i%Yl,Ԗ?nxb=S_idcEqY~jF ƙ|ͬhysW \btqP:o˵M^ 1 +B.hlE\"g70_cd䄦QOTvC*vt;\gu̟cg /pV' sD`6'6$R%j1CC3 M"՗ atrHM} )( R&ꏐ?~b]~|a o\9'ʸ%2FаdTi%Q R6|3.;4?C fd QWd®;/P  e?!C'Lm-f%/0mB>6t8N'$G?E6 Lw6p5"@,e7}qHT[x] ]=qXorb'>Tڒ}H|uawQ 3u륹q6vvhFd&]wo5<E:%Gƛ#,`grgZx rgqaM.y?5VR"v$hZ=ƎXf!嚯9رjZ߬d͍X^>\匨w+Yr;agOyve,V;Z()7ga_M}l|1C]w-s-ɁK~cJ+l}l}67vEx SmA)2JAye$G_%{f!$z=^6jd54񏧁,@*%rEv^y rʋmh%,d?~ 1v13Xr”4W,d}";Qʼ>Bq%A!O^@G2C>,c9=j.OMN1.rjKaK5`G 삦Z-zdc̾n׼uM̾BV\\Ԡ"tvE%z)U]|w}J-_}meq%:ۖ+:ep\yT{\0v2UDKv=dtB:/;-1'&ܟ3̬#uP.yY:K#P2'hxs{oH+d%n~o*LT`%*;WP'ȃXͅ rٵ}[rʢ~uBЪUZ_l>fYu͢D{?"R`AK{aY:Eiy^.,]Rlf'1W vSgRPZZ7yD߬Wj՛8s捦FKywTHmՒޛa˼^/k[;{XͶo?BW%v=%wͥVWHkL R;5VoV/S,ѻv?}ypUK>zK45ͲԌYi*hm[YϲMu4˞`@eO9ͲMgf!5Ϸ#7]yCM êBf,Pw‚VG8咰L>{9GgJcO!o}u9Vjaqe0''bܞ&yɷ~u2$2*x$˶$+%@U%ҏNu,0 MͦZXHI~C^XΣ()ɫ:G43v s-@:sbFh9ix+$LVT%g)\Sdr5ΜS/|̋aQM[i?^%a͒nIt{$Oy&9?)*r%(x3prOQ&a[+}Cdo=@/m% 0Y`q͉A&4$w?|BNz:嬰|E9|E(Sϼνtv.l]=ٲl1M, ^EЕRz{i?{iԖ7bҒy~C~wZ_[z{ivs/i ?9Id[7~E{$7ޒkGSPʻ@0]>.7ZPX7\5ߔC]xFo#<)q+2pX i6'/K )Za'֞EK( 'P™y_Uo-ɲ9ZtVq6roh^7TKRC|liN-t%6gUwz%ggԑ4 Z:>ǓEt<(}6miӻlKxJKpa_푥Ee[|5ޖ+fNU]lgdx1:L fΙ"f6^fBgfų )o0" |a_Z<{kI`{ZcuZi#=<&r%(3nVA T%XVz*  d/Z^mHZ2d[^Cxf?v{>4wDc[oޏ/!+wC.HNꖛْKղH+zW\ k<Uv43GG^$]9Vk *u]R9`VXwZ h2jEqTP"FyLT$\ k䚤y#+0H8(2i緎Wg0ૌ#m-uY FZSN""d\!U8[3PS=3ӂ+[ezzNV Jϛ8;WوSpjU>3HNʹ$%2:^~Io6f2qC(|=))\f5q6e`3`>ͨJ䒨Q=v ~۟7Q+dۂZn43B07 ,. K%E=;Ea2E!ϛPS>Oe# U04&j`q! <9 e%*SBh ZOL974@\ ,.,~H>jwq˄*>ldTnV4 ^tzsߙM~;Ӏ sCt 1)Ĵ}(9 C3Ŷ C 4b2DABVb u&:Tb@}:590y፰uIei i)L~̫Pt47_E*5 o 05,@KY)(wDpL>4Z3D0 4 im I_M.kyFMMjyȽ`MF8ɡUL5HXH^,$au{wh4rX@ߺf+JKEok/z۠vBc:<=j -wlK5rA`h(00,@::87kCvR*t5fa#97W[*-F #_M&\k-@:)'|#_M&\$4fa#;'Ėq8 5N a tSf2Z'Po5F\WQ 9I{Eۻ0+gMM&\Ú k&C8E;ETٵ2s%!au90VwׁX]X|IPݙƋuN$;Uꪅ6nu&^|/Tev h9,sisqw(w7,,~,eY.:]"t25]" rxn%⹞KĐ9$t-,~OXh.x@;dҭ{5@P gLtwbcܙ*Tf;hH.m%~AZp4@e&-H J@ҰuA~QҰ%ɂQ@^2~돃((I*34@A rNL`-YPkXbRWcɲb'!"c D0d8Q!iԧ KD"s^W`7NS'h `0P-P#o*˨#;TmsLMwȏpN2!#JLÝ3  xLC0DI8RkUOɞqR Wh kJdz$?N0|sEBEymً$1>OrmRQ/p-aVjGsv2p-F^dI{c~=+3[JP+Xd\|匵x{*k`mtً嫍 ]5+U\s|Up@Mȱ`mSp?#tv!Kga|Rm]Zש"#=`?1uVAM~;7;&N!ǎϫU  _BBEeP@a {`Q"s"D@Z1Vteu9&Eֹ/}u{IӃ1֝>ͻ𢟗FZ%6sZ<$o hHMZ`%e^Y;oSCkZhhd#`A߹M^xh%,d?szv-݋k'ŵ>>h;ǖI@Yn 6bP4B5HXH^8z'wYGpQ<{`8sϤHBz,n ;]$ȲQN8%=') A= HnSTArF#eG>05'.) L&߶0p* Թ+Ξ. Uf 'X\X{;@@P0+S*sF /zR!KYaж s F ps ܳ )3pdxOkH~_R0 Sf9u/7U28bÍ +9?Ӓь;iD.b5a bS{Ṗ)(> O ;Ҧx h""Lb Mu$  D$ڮT^_=!QhH͋GJ/%O.J/%S6ʄdr8,@:¹yKIR*s97[J/us{~tY%Ǟ4(  k䜜S,=k^d iKPg%1Rr*sPB! JMYfHU|)(]ûY~,A)wiRLޖso9\f4<[UpHmJ&CU搄wX<8vu/zi^mNeݫ:ط9_ 2 4 d/rD -m}L;wJn"#n!%l=:'YpTpph ,d/yy{g:Ot!T#O$^ cvk8dC@ɦ;oqE&[a%J[VK|)ZyVkbxf&kt]6W`rhZX]~ӸDn- *6\s k[@uQt?KQ䜕D+vu`" u T7X=>r<qa` ED#)Px?[?T`rv+5>kGq lE6|lLjͨCelVT 9Y6zS_kΦ26hY& H]a(S*kVT 9 k‹itFb2EkXZr,:T!RKjacňN0?Uf84#n#۶k`7C#^8`M1f!VYT]3̇԰Y`XR;6u8ȌZ ُŹX^2yLvQ  vN2dR>lm4n+Lh^n%%,!#ċ sc aw!W dU:$pؒbOOxS **]{W2VM#Pj!{=`߯XMѧH!2rY ߮mOwY3d JWn25X]1aAz 80=,Ȇ#i+?PՍ8qmg 4 []I!t"(+CjNX3V>VOM#`j`i}vIl%LfK@b+Wv&j$9OGr'W7 ,.K-D e te'-1CyCr;KVSCT#!j`q!/* fސtU3o9+M {TX}M4Ǹʌ1 oX\XPJf ͍ 0nʎ32Q&jRo5ov?: ,.,Ca5-v 9|mi( < ;P, {is;9M#Jڝ׃:0Oe\P@o:0̪]K⧡Ԍ_dX r5(dFjkܥ6@@HN ¼úF %H`#'gQ/ &H l M:l4I DTdAQo^6i{)nUR2pY Jrrd5JX~ؾH۷ϫ`퓫bӃHmĉn,Ό:,x;աra6QGdu.AMfp4;HDw4JNl]]4JX~,ΕM9rs¹[wR9@al*5c ?iY~|e@->A`q6lO^(? _eu$`iAL<4{a/#`:T&VBc[P&ɘ1F| U؝ 6 4h8YpvAxRT3;Q/ ٯR/8 y-Wd5nA~4 P4kXv6Q6ۈy\GyidbdE2; )zΦ'@Alr&:4,4"=ojq~:[_F:t*!(<.^4 Bq,kh `rTT  x s |<~?h k|o yg|dF u<J9 3 dKhxX3SʕL)7)sRr X22E08!-8Эq#M gM`9,M#F%:sUO]9}8/ q9p̂;W~צ;w*ǝ; "ΩF8pq\D(Nƫhh ` 5q vP4ݑQG>_4<+r맡C||dn"['?_o)_=%*c_39&a-M0aG?88Tma#w[.,H{x(xj63ȮAR3FgϝP~w 0Y J[8t{U?6@m;z@m$)x@HGy@fViK@j)HM#E[tbL!Z]q@!$d[nW 8U^fDV 'MY&2 grDv W1\E#c`ۗn(40"8M[v8Ɉ\4:&T S&OaqmQc=cʵ3ywkDž2tf ;5ȩ7q14\]3<-oנ Py<d"\ yB @5Pedp1StJߩdf8&aBvc(LDcKpu^u}A6"gQ 99Ro;1❖qcS:ws;+IqwEwi_/Ӥ7_c2߼)" EJ4݂Zwarh^}:5т٤cɆd =/=|nK0d2[u0%4Ad{a4^@IwڃFmaճDQ淤iW^1+)[}?NS]k[/7[^"T6דv]>>?aܽ'7_b?e<{}GW[ގl۠r-/ɥd .iA@!wW>JRp5ِn'.\^|09S3eTPчR3qL+I͔LP{bW.D#^W N©ї϶W˶f%[rHOq$]4}8֪jɪEöiu}1qo{`̳'hxgBV4 I/3Y7ZZX }:5ktěW}M@h0O.L.n繠 h.x*ס2?=K 횁3OLx<:_rhX*5DI݂;Ηh%,d?B7LLقzL~ v  &[pɯ:_C&]톅SXkDߒklK[~'Q\NY)/xy*:ö:A<>UFZYI(HmI%f PW͗Mi>J"9EkɁwcN,J߹TW2z`+EkVۗ !-#-{{Blۗ <-~2O7JRrjox"Gu8յ:KZ*{5ޞ3e$پzɘ֪jۖUYMT{[JlAh1/yk"X˟Py `hr)n[|pkDmnv&f}ӳ\/9t*޵ޤ|[GG8;\v)k5yOgv[c2aVZܛ^?zv(J&9XC֏oǬЀȡ %ơ1vlқo'W* ҲNܹ6aiRSK @-mi/^@Dj/@\  ZհLO]ӏSiЪ^ ?^3͵ve$j$n+4p ̤`s-\yeJ{oۛdo6Lx4<)GB ,(~!,!= }2#M#~!%) lJ|rKDl14J1,lu.%$%Ll14nX~ۓ(ƕޏWU4w7ɛ  rEϓBuPkX+B>U@z_ wܼګB@Q{=^Ώƶ@Vn%|AXʕ1a _-W gvH}D(EGALfz]eoU}+Iz۵ڹ-lY$X#<C+iiks=W䕢?6%9 SbױxK[>\s߻/ TI1(vOmZyr~V"CJ\zsޯע_NGhksunRsc$7vb`J'zK߂Kn$Z>[_m`laOAhv: K=j}~v4_ ]`G鑊<cfOCT4v* >KLQǟ[oApfȌ$eNQ"Yy$pc eBbrkh%,d?IC)Q"ēp>wK\˲*s1GY.tVE5zhF^qٓS(BQҶMd}׶H9L*])pX;Drj^<< 2Vt{o򍰡U&{o?̓lP.7:XaK9`Nʾpǔ^GyKE4۹aiQlz1<ǩ*=l_'[fCST c!jZ(?"A]x`^,_yz4̦9y+p)ч}hB/ %<}SW@` ;T"+>x^k!vYry+vAr)5Zdf!OXykFVXnK)G& װV<}{nF$5mQu x gn #Ta2HxxߩoJ.$)`'U9%~'bL0&s:Ƽ+gIh~HuHuN2uv1b"Q27%cm?3UmJo%݊Ad|rKȣ]RGmQAGHǞ끥'/;m0%_:XR-#-{;3vgnͽf@ȓ{ Ȱ׭[ pa674~5\6s-}z K\Vzx<\xr *-ɖ%ґނRQVW`[rܓliԪjPtb3=_ z]qA39[>Q9z3>2 ُvY.'(DX Mfm/ Ӄ׃PZ ُգo yn,y礮 = yؗEܶ_i74f`q!WVn q顏pU![4EE?7fB 6jXZ8>FE/o4Jcojŝz+|J^PWQW醏/~@ uu ?\7-%J/'7nc;ᴠdԿk ȫ+(Ps]_:0<|؟]Z`!a5Y-[yw`/vZNXj.W:mz^ JgK .yW7tl1-6_ev>Y;3DRb- ¾~/k}^)v3 V,4;{ƒU;[yrZf\A*ӌ xߞ"WČKs֥w|42`W r͠\(.h6m5HӨ#4r$/0zJ )KʭYڞ7m! Y]C>e6,@:&l]`aru,O@+a!ֿoOMQ(%)U%gLƘL k䰰*YuKXz*îVB2VXhD.SI{c/O?[^uOG/=x4cJn >$[s[g0AY,=Te?|>Jovm ƻy6eVd`QN2>ށܓвz`Ln1nQbʪ2Ҳ^w:rhiks)n@It,5Zze W: hu=ϥ^JmX+I3tMcۋ8HNjВ"ƨm%cy/>k}Mٮ=}o$YcgWψ]Sy77BeF_yB lRMO$)dFkLh򅂴Ynu45|5 ɋyh{3KDa1QSYUӔ?`E%c|Z [W݄ EH`',ۼ!}`N-|L*OjT3qє[u(KX! P -V]=bBDBD.* ƘLLvʆ(QR`^Bx+@ 6Zŏlj̪d`([}ܓB2jt{ס%~'-7 ܘ@G #|?ǔ^E_jIb3݇)kPķ* ;>Q Ù(L=KsU̾${᭤ f_~??)}A@c--c=`+|FG$QPV"AZ0kN{n{BO d(nヒ1?Jz eC󜗟̺>*kJ= vNmSd';-ȯKQ{gS'i 7S54S_,JM\g|6*WϖC1%ryl[_R^(h Haf%†m^sMf McvD#y~3(8n7~d3uWi2FesOm"~wvKOS Uj!6|x]ۏ)K.B2h*U Ri @'P"7˒o&P5/%\Vw%~gW2AUNc@}Y&ϦPp\/]~y9]~!)b&ɭ3_o/T>iƣql?~KeǸ<3Gmdg_[pQv۪]5Z+Oم''EySx#Z@n",/lZ?e&P$TR0xx1\B?vZ>[_m/t(8ੜg[R)< x*g9vrL֎y4w/;S7)i,dBWoF15i+Κ5$9 Ora4;n&$4|,4 ɋ }٦Vitg wr83@ÓOsi~(vYz!A>e&K7 oYXH ~C$*Iyɱi~'"Zw'7 мÂo#j;,lԡ29 ~W  /@NO?&hhXsyɫB㥷0ϏI)tT?rrt\=ᔫ`r2.ЩkX?0[Md}O㝍{=\LIҳtI7ig2-} _g<}Z/#O<Ͻ~af>N5@]aC :C}74"iO;,/%&m(NHadRSfQ1=8Jqr=}SӪ|'%Һ(-WKF_= 5&O1V*}uFEi1}MhXѾ!V}u3x b/4M#L Gξ!o*H9~Du$\%6Csn>-`/Y^ak܌|vljݔ;Um|KB3"nƄqZ"h7rbHnI 9>}-JߣK*'bc1l3H4/ %Иe:GmP%2/kcFHIzƅ2o_ޓ+8ԑyβpZI;l^^/Ԫ7OS\R+$SYDgsXq*Mgq Q{6\WMoઋ#D' *,vW+5 b5}j9C4%qU;:8e.aqg.1}4R9N ݉ZL+ xrn;jvO2UrLo9im|h @VME ݼfs9-h^[(yS!F Y*+JD.>el G30J KO _R)GP`7ZkD_Rݛ_6Ά[+'No+#'X`X-+YxSŘ#F/J.轤{_Mo݉S-HD+bG>=, V yZMـHoav,)iޯ兺Uuz# ɵ$ KuGVV;:sɖdPm4W˚L-(inE-ɻɗ!S䩖[gO/Kǧwes=g-Xye2MGCڀ3+A'·s93snh] +t[X'Vhh k -ӞX6O+j u ,۸ 먠 i;nm^")2ݵحՅd[2DZ}*rHO?ZV)BO;`%QΏ( ុfyNw]1 :%G3HKZ1X6[o(r %Pb<~ B& wVkA@]n{PW[%_i qoe}w۔ً/e7[>+›<>7¡IuXxptLז %!+G&} 6-o3KM35JRSeeAr$#QjyeZAsD{VǹϤY;뇙G&2eyv!ÙGnwwa?kް] Zeea,$ >4\<DŽ'XuS~cI#<̈́f!M!6S"=|+<\ k3xXu'J`Tu=X& @+a!ooO輠(7D^a׀;Y [?9[װ&كnHA6X6eN%~'JsN25&_٘[xO':gt׋ٯVZ~1Vf]PXu9OR(I!Dj/pz [!ֶjkmDX&mnJֳ֢~<²se9 uf1Vb/lkl} r_]/? hIn`/}϶WϨ3>b0,l< [ijf$PI4*f=* h ,drz3[3 LIl˘Ng,(5ੴiryWok hY~wq_1%\RfJ0 yƔ`VWQW'4^׉"Ce@3 oYXno9ON`VkE#)3;4},@:Cz㺅!9Tf~(߭yڪO|𻝗ZMkƼ[a~79??sҫnR474Ƹi8mATz&b~ݓupF=n4aүR}|4-c­_-õ!9Em0%ikoZOtg2-k٣bۑiF3 Q'W~F#v>ٗ5д,UgD b7 ^7 ZO4acOlaFUpЪ^ /nL^->-v\dbUawn#`l~uǍd'>Pu+_Aˠf [۴[AŖ/}`~G,d?sB[uُ0. W*UCh*!,@:a=³7h|67?$ғ]j}REYĚa2`yVA Q)C݅Pr 4k(%7Bqe5sp.88LC,x(6[PE E-Psz(Y~,تjyҍG$eb=TTP  kED*s{8;r( JMX @*+A_ QʇEyX 1+rm%}lȺv 2pLfPn^.rm1O af{@j׳FaﰹvOVdr5nAS(])g;ZhOgܻxI)W{)ZL戄"Qp$}&Fj`qaLz@hbPI@B/O1p2Fd$K7K)#7[*[P`17Sq¯M)V9M:vQC]: rC D2a`<< D+@{疒3#Ca`q-S*3BA&HTrD12H%J ȩPJNRT(%B9r*g-\=8G8Y84^,8Ș1:{%Om*c9+B"{&d>ٲܪl4z&ka-w Xd/7EL|:\s4W|<;f8ZqםbdHhx1 ُ%&5s9HŦ%4(IeY/93i)ǂ4똅GvV4`vQn'4hx+)Lr:.׬r ˵[~dVp"'sb @EȨa k[%ÖHB&a29h%,d?̹QM=lT(d,fMyFs,V;տi};qZ<8'x 5v#%D:=k;D~[0ZkXdyd&}9+VՏ\){Ad[TV!sM |.J6[ֽear8hZXXc<]#dT樄YzX6˲{ai<a|*D~dz$No]@SĘL j 䚐> BX 5+6ՋǍݺwn4tX@:Ǎ_کǵ6yOǩ=NKja#;'#v)(7 ;@V9'*ͣ䛕DՋ>q ?`u^ \xd; 7 G:ᧁ\ O9AMáf!@V7G0hP-gdBf-!.,EJ"Gpp0Y~$r0J6A0;XBBCL:¹"Lʼn:LB톅ǂ66b -$*3:gpHM`*!Gn;dzٳ.R룿 RÙL-@:3&Tu2(M&\*Ry>6MU so2w8hd15.۶`t{p$Y~k!|&N$IeMAW":JJ$r_ѣ&'xK$V5G+V[7dNT*,@:(>:E\9i,5<{b>:)^<Zy1+1eB&k2-@:¹9&c7FC\[i?3y0nQ)f&Ɏ;0Lxպl=C5faq"{v!׶Z vpoY`p\:Ɍ6 oYXX:\|L2E) N|{S/R3aA ѝëF@¯  "W E{W౫+ '7idž[68†ϟ@nyp4Ņ"0#OqΩ1o99S&V8&%w3>ivLw{6< @*µ1mv T h=XT&&a:(ersD,ȱgn2i̤Vc; xi߽*_aINu+NA( #B)`Q)YTtq"Sd=P!pNKJدA`]SЛLp(ϛ;pğX!Ё4f`qaSBO>\5Mi]Ec M O ?Qtkfp챥+{iГ`odB@P{@7sM׼M^n$&&5@n j8N+vWMӊe68em1m61inBmZ>PV 5$c]cxGU&cFy%d"Ի%} >N#.!ȃ|ovwLAh2 PA x}1^.:tib  ~.,hXH(SYZ\h8,ڕ5 Ua qޤsi 3 Q N.uݝte$i8,d?sow[`B&oa-w gڽip9~pU'dkXQ83OWeU; iogYHs c og<*,d?tb> %*"gOCT3B<(W2Bva4423c)UfנᾙwpXݹ9pBc-艄ΌVy1:-8-haBU\@g%_ tt46S1:)JT' @ub'aMⰁK!K4qz 4l3X &$q : `0 '`0 NPDNPAP  %Qix\GbfjUB&oa-w gZ<{֯=}65@#a!yFdI{D#;#"M#,xDV[FDV, IoQHd]x<ŋqqSl+k `8:FOvuRx;i `L-h XugyD#boN,'HrtᤠE,~xn>A@H6E]jVFPwݭJ A yfLe>r7'AԀ}cmL58EFw߃o>K]%u GJ-k2,@>,$jBP9&3 i&TבGHY3-e^S@%<@eK\r›\r&sIt8MW|2av7t(%A.dwX1QGk0+>g.etH~%@IeQL(S,3v!la+__!I3Pٺ?z_ۗ9{?!AXmHKd,<)yUU'V"Nұ7zV:[XJXIq羡+Y +lMSiyxK9\6NJVtcIJLZVne%!˱?W(]a%T%uzVJaOJV2Ղ.dܽK&)&:W+v,k iV݃ItKA ^\hhF5jLᨸSc`j`OtLւx RL H0M c^ D#1L8}<]wiPaDw&R萌<=X2 d MR]aZ&: u#b ~Ʉf^$k^M3;ׇ/ K%.adv?PX*CI5ˆÄ PPfzumnʗi\.V ?2oF@KswCNkR|_ו:"!d~;: M\%a@ԅboGHR?K~l~RS*R@d+#Iz2TJUJ.V83G|Tm̗L20GtڬI;e^2n^Pk*fj2Pa a 3a@eS3tÁMڳ񔢗VT! P^oLݜ(} .eHP]߿a+J? ea8G}#5rlPVu'cqţ S0F+#:\5k7^ෙ>(~u^Cassq9g֎FOZhԔ÷k+Q`+֐}  Aړd}tKsx;;פ;߆tmd*\ $IɻmqqYX8g,^~P sde9Ǩ.oa_[]i~ B26@Eusw`eixs#L1jM TƁLOEu=جg Ѧү(=r^L[U}!fؿaKl%366Ƙm|Dqmmo/񞍳4د)n?WIؚx(<B 8)\>^[+;q FDZ1X|t:eƼƑvvAuD;=Q|1OL:1xf-I:jpibMO|UMZ!a& ]? sxT _鿝="=gGrjH;zFvs d_5Xn9G ^Y#BoӾ>/*/efGRv+*ab7ɷӔ9v+FziRoOTg7oJ0_(ĭTH"U!;] J}_-d{J.ZS) &MO*:q@Ge¹g3DϞǍ߷4)oCK=ߝ6*[G^9v{1LT|YxݺwRRsJ4^";J '%UԬ V*O?wݶ):G>u>p9by9C {ӌ>NnH1@UQvkUzPi bI#3qGNNzS<= C@k/E`KE$7ڳwZes#g\4YdR,F+AKaT4Y@(A$NO~ِ/EcNES&PR{Is _ּ෣STT E=rpҀ]coˉ FU,ƣ9}j%Nc 0(QS wu[ioN{ :-~M}Dt7˂N鴇y=\8&39&g1鑐Bή Jjn kBaA7 Fjn-K&<p48+qq͝~XAL:_?GB/9o| e%2Vϫ\,O}Ŕ0Xk{5tVrV#y 5WY3B e}H$ _?Vx8葏V"Wo9D1|r;t~Pꋣdyj"|3-]/h&ׅ-)_Π>?7Hrw>'<0/b5JS8}q3Vn1$9&ՃAA1iMUN*C5\7h?1'<6@ujnp}7|dÖ!x;y5j,dfJ Gs\#ҹ3ҫ1YA`2#TD#Ws׉@Ck46w}nL֓K #K;F n᷑k" O!Zi'Bo.yzm-H:UyRMotu]7%gE0pwfyp]#C5r?`WL q8;x}9Xr3ؠH&d2>#_v+|֛SwW ڕ-h; 8oQqE;UvY܁G@q{" i4RQ>K`2*,"u`b&H\WMNx\2MP.:I@(QRY&O;잽&p.]3A@8k}@8|P49[tj=DrqOg;џ5S>GGV稖tSVCu+~ȣSeW)XEÑZOAV)FA\B4F &(a-^BdUe5T+ N~2݂6<9D P?O2 ];tv\gg)ԈP8'v;g Y0 t*#D L UoEI8L!J}1C!DKT Q3%E]kƠL^ZghV/{X2T)Lknr٠LB)5vZ߁ќ n*(ܩJybbۓ"z_ӤE`iprsdUx?fV1[QiOjQTn"ʝ߉u"@@&t@tĤg9P]|#q.C9v§\ Yuhn6A2'Qq%jM3_B䧾Oxs߈~!짵eg۫PnlMvo3FѩOߧ/p̂V:UmVzn ٘-5f/Ax) ؠ#΀Y]NuE߱8}fjLbIt.rKAډ.;ӑB6TGtMǪ[X;}~:Rp$ˮPs%"DpJGo8F*\# # ^ձaTWBH|?4ȅ|?99Bv?gp]xH &Bb(H!5 '64CX)XE2ΐ٤y]N`.!qrwu[ sxw)ɅmFs ?=0/#=0ϔ<0/#S 2B q׌NL&|L1) 3toŝߘs;1S@'Eaqlb*'.'+p2*.vi&ۃOML2B,+CHZE&}@hE%#oΘ!Nbu)N_&|h!5o)԰> -7'Itf/6Q* +Rf1gJBhs)sBBV*}PY3{:Ob> DYDk'"&B$ͥL^kb4ܘ>q}=mzx'\&5c֋`뜻B&oƊXfwύ ^c3ʉNsӗ;wYy1\sbb\:oO##sAfFZ@ڬ -Ib(r3ٓcՃrʒTڵcd'}bh\h<8b fNCD)71OмxO_}{aӡ׳(k8]M,C&oviV駈krVgĺ ok\:(0)DIk61=Ͽuh=~żdm<~΅94?eF~O#:U;Kwg,0Cpg ,p'fȌA]Ghp=I,MKCn`6F ouB[&Lp,LBD^y7z1@C9!Ŋ9)5~Na)5]\HvF;I/lfPj:8J5չ64G}2<*o6ZeX TG4Mٿ.XE~pi^i\FTټ_UDŷPjpE`ƪ^SVWףVfcur8EK4ӎ8G?TEPK]wd)(j"cX7@v+bORMdtJ@H.8چԠ6+dϫ8wb6+!Bvv)&?6)3)B*fd8&&)EgM2gsu)FzBTrgV RF6*`|,/nU EDԇI !^BQ`5;aY0arĝ3+*#8gv^ҥ/(3o;Hr[cLR4>'PrB‹D]Je/P;J2 JWkǥ@JLVO$_nd0)+bUPT=i`T:쭶 BNHI]a[z ̟ID ʣbEOt *#E{_|ʼw{W[㣣umQqnMFsg'\,;FEu=X(Sq "H8|6nW21`"}NyC"ylbܪ3Pw{H+8P]4t԰{[ 9˜&hLt\f*짟ee=vq_%E9Ok|W+2)2=$]Hei>fӃ3kMnf I޻8}}veSoǚh\ͻ= k7f F5NGky6Y2),l6lз2|BuE߱qEk"lna{ օꊾc?=Y(╛ETx5ntuOnj_?nS~tKrt!`/O ^T5E=.?GuO)DKVˤ +fJk|LT܇wa,ʍL C(KAr|'ot0!TU Hp׌ !aON`.!q{:tyWaHv'^= v.HӸV(&_>i,gY.{aB(U_;w\Et>`P2˛/rn(k'`Cu~}_bwL= fpRb 8IE-Sx5-sFX_HT^- P.a=}a \ 0#KM P.a- /s.|$on \4Q8nep5sqW-Sx,}ܽpv|29TM'p+*pc3'0S`s0A'p`'p*\0&MgRbXeJCfY6rW^_nXGDՁٵY<e\_6WRsŻ{``ȁeFWOXķ9>j47|9۲VMӊ 3s5Lp0ܝ`D fܑ>0Y\X뤃5М\:Ai\z6=[~ M]ޡ;ȗM󱃋Rɩ ׅ_ކUxzʇZ>Xt1o'_"?.&y*j`| RM{9d}:pߡ;vq|qq>0l};}f y6M+GSj#OevA\O"FEUeQQ`,f9q,0BӦҐEV9%2ڧ<F7c0n՟je^n,l̞0jaGc 3nS]wl~C)LӒ76}=3rʹe'pwԧS {f:6,N^8sưv+zUfy Df:<ک;)LtL p mNAǩR-s4,.,s<*Rα=Lab ⯯+.SWr*Bޛ8ńBA(Dfs(bY\T6yBdMUɓZ:ƻN\uRsSIW'Bc:,S& a6 ѝ-Hۼ-٢3!&؄!&dr@oKN]b- ^Oel YGf_9h d ¢՗?Ap~x%5W<)ҝE', eI[ B3irTt*(QgW:$^'ÇALze#A=ipDTb@QGW*8[dWƧcC'z SXR *NSjkii or0+T SI l^ ZEujeQϮxZ$iSʓ-Y]G1'W6{X.E1V~r+E{) _!_n3y C~t8&\L/2d3lHV%tؤvrTy}lrM`s yk>C.cX:*# 7\XE~[c>t1u=O)+ ~ѐ6+Y@w_7Ukjn᝞zk; ?e1hEص!Ob"FtހEْo:up5s9AUZ mDW -[۲%hTQ!mDW X+V0y{38!@ ;#x :*[| ÕM%`1@ԅAnw)ߑ$,:Q@x(()t>)=߂ t)ot+BZ?E'0 xx66xPX7x np0loyNJ嬗 T Exyev9Y*~ͼ->@e>C0I . }5FaC0 %9!$#IjQ0cQYAW u#B R, v^(Ak y J('옅 یT.;B((d's/X?+\4I$5W,X>WR+V(!p(k[Ӌ,elӅv)shJjvˆ==Ab0ߨSr@Vsk,ޝ=90ʞc`Sb+JD7m~u.3?O?YLVɝ֔ϸ|Ο5gH(N%&ZӪmP bV0>|WC]vhgM}'{}L;.߭C=5F}Pf?)_lv"9*nwo?܃q "^K!GnY8fyqMض`m4xwQ=lq[Gc*jVG|?4KR|RN7K1~zH@騑 =qQQ%{`'&٫7А /rA_T}}񉕖VdxΏ*sUS'_h@WOm3O{TIu?~:G % DW 1mPΈk"3Q8fR,j'Џ@?3:>/ dh,ov+o6葭r09 ^Ym'P#M:AIsQt|mJ?Aolr@(h8O"bS ,.R>Lrt%&QeQݦ 2j92vǀn5\Ld(mo!^Ncx\iI+ eMvհo* s4hjpf)DpH,z୞FTt.8&q_O[GK|jm)PTK:01yRr?n91sI̠Dn0հVչ<dw-D&x brrkTmhb3m}7ht B_\E':L&8m&2 &ub&6 xmB4YENWc<>pgɷURDkVKU\dCIc G1g \xbzg IL|:x2v5tJ;)_֟kuBKɇLSkl1#=>wx1D:k׈+>_הv 52J*jz]V9]6Ω&r^i#0(#2ePGŲ!\MUǜj|;颱ROu}[NÖ5c _!V"sD ߼3D?wv :ѡ)nP8/!@'6T爰у]siy>92Csb秈^W%R{:<*f5z\EchZh c4EKXrMMZ}P_%_[iE\Rҷ֬q֍Yy٘O,1_!pӽgOO Hk&x?(U>z2؇,He#O:OE;|54*pyR}qAuF% REOoǡa80Ea]Vqq@DA]|^v*{mhOp}qXL#lPRڽސv[.pPp[&j*EyJabi0mu.,+ZyrT.zk Fx^2ׅlЍg*YLGlaD}>zFT ¨NKh+;\MFɛ87&Z/&:v6HMϭ͓?96-b `R :bMڈ;26\G6=&Ǟ5S]w짳''_&w?wb11芽#?>K5JAvwX;}~;tmf &v @zS]w쇓(q)H($y3C\,#S>GP`c!a(d{O䵅9!;b.V/N^`i@ԃ7C0Nt)J!< BƄf;;D[L0[P2=ume7BNXe -#D9̼r m.6CN!ϼr 9m.6Cn! ^f.?6xڼOw|lZvxe N12uw#'_>N٬L|To̧R>mvp?!;))6SAX{Z]T…Pft|ue{ySԤ )p+34 J(?! !{A FFr+id~ GY:,m;iZŝjeC~`pcw&5SRoZ4f1;{o5`UcA\ KjTF_IaN~}6Iw\[=(<z뱻y&q5Sj+TK;Z3$\X0E  4[>2LY/"+kiN1%]{fbS60]<:0S]{be>91Nrk ?-m яbђ[yS?YlMv=xsβ"/ռcz%mS2R 04O9mQHhe3?>G[Tfof-Rxy&fT\'=as`zsMfX&Qض԰$p?יWfYŘf~^VBprM`^sDvp4m>#og¨b8:-!J'fʅ286}ial6652C9Oml O~ev iϾ3;hfYuPDR| ݒ1n(tl- }ǖ#?Lf&N[f,b];㡖@j⨖ޝ+P]ざ Ӱ8T)~3{Sj}ښ{F(ܵ>Ҳo+ i W ه(Sp ,"HpCjL/W:]Tv+p)Q#xkWT|͔'i)tUۑkP14Q9xq!]Fr<ih1lw)%\ S)߉O~'ew*#D_α }QO R&TAzza*?@&0ar[oU`pޓ}dk!=*`}#s)= EZ4T~iMH@/5 CXJv/fΤuSG}%aӍ0>l%j6$T$c$m_'[T ̙'Ru"6T* Q4W:rj-th .=`Q5Q}J(mQEj*<1UO,e ,6 D+?0TDl&ցDPJ)?™4yLq"0+7g38k 30+ MJ!D4|fW)~=fIj4c,q?1RsʓTڶଯ3E:M6x4 Ir)xcz{ĊiO25IL7k]89~с/C`KZ_КMI 0|sM蠬-uN-1iN/];NbϏٞP ?2\moבOm?Mxv,O Z5ՔP':$ 4y"3i!*OD,N3=w4^6HmVP'f6jC[~b6-zƿƤmdT割F GIx.Jd3HҞ?s«5?vˀ\d?!RM`tC~ t~)[)006*O"M#Kpg<%DE~+Tʉ_^E8ǚ)=i&6SziI8w[jsQ8;w }u7L[IQM&$QG4~?` |nK 33yKaf&_9Ӧu)v+߉By|QmT94Wq9ƮX;}~*‚*K a7Ϗ354W AfyrcyZvLoX;;tIFw\>+K&es@S\J9tynwHyVʮ}5q Rڿ=iؿ#qrBD?O8JALhs)~O~ N-,Fj ʿՃy4?FYN{OqAo=n@:zx倉3 SK džr;T穨݋="X؀WK`"3SӨݫQwlvlGbYL5ar–~U&*_̧JL"'ƦԷq<|5ߖC+km5[^v ,x*ՅyMgn6Il Vgյԏ|L|a2z1Nc4/(l,S #ɯzJLIA3Ё0^URM{nP;&:q&a+vC }&7ά&=LOqx.ܴϑWP]7c(OMjl>}huoPN]6;rJn= }~9kk";)\k԰.TW*xe趀XmnhrtcPm>Peʭa}H B(_;ח퓭E;[jhWNVcN`m HT|ehYqB!\ By|adasA f)Tw)>Ia<\y:rF.!q{hky/lǷ wNl.{aBc p¦+@נ@E'9QǓ+7IExqvqoR 2~1Nc+',+Z:GyY\̭">]L|'x LZ74-SMaJ!q!AGR/+U{!WJAk&\DY9-pc-)lN8/OE'{d /R1S*X0b1(L`Jt$1)ћR'V82*CZ7":AE/E40 /pⱃzL <^D.ax衃i$Q*iiLs} Ӎ˕Axӭ-/$"ReRw  Fy(|tWڸdz(k2XRh>6@e:CqŝjH}6sy]aS&)k3Pip`:R9Mk?qLe !Ho_! #ZaAp)5xOT"rmHWmk<|9BZ51o5==pK}q,WF$2&X8} ~s #1(d5N߆=6*j ֿ჉sk$?9*n`*q|ơDqh2unlVby%-B7qH{h}mo vy*jF8Rb_zԧ"S3 # e<5GN&p< 2` <% ϓyYj۸s \RMU ۞cE)vUvsvzծ?vrzg-\,t9:cFaU)XD6ލmOox` }N ۨ?m~һqwST DW MLd=Y%":$ NtހCtG엑{QE.z DW 53#є(v6>i'Bo/lIiy@9M;9$cf*iX!Н Ǖ+g(i PvIG\rEPnEB7A%ɳ`§ *E=?2rdxIͩ􄷡R,LȔ|!+fŎL)՝g T $eHUr;-#Xo `ݛo֛vS4] O=ϼ>ET|:96gr}9|7٠ J 5 ApI>4-~_ ̞F"EOk`(07GSSĀEDP_S<"WkҤV*w>%^h*Hb uN >J *@>u:]锊^Lxo^#bzBƑ^B , @> 6nbGQbqxXTLjbb^d".Sф=a?7Hrw 鷫_"P '+ի>;8"7r18C:@vDe;eiywO|ɡO^YNZ}rT%!U_ :..quz\NS>96xkspc#jp<6xlxW ɱ`lvSuQ8n؍ͦwd45R?[˱A29+Ҙ敳Djɟ:3;mQs[w葽;K2Hcgix ~iȝ#roi)nL'Nw]7#chT{[PK#AEډGFp:Nn Z(e6\7O(@GcP<ޝ^v+sFbf,۷ i'BoFxwI.'u`4ܣ Fdo]-^$x/yVWC3Z5zC=07on0;/EQ< H[ܿW0q!h Q~ʎKNjwɋgi:sbPg["F#|u`KAyEU*FU#rR2m|YF6휷<'FyK!3(`PpTT ͡Q˙Nih ,ӫu+IO0dK(hMΚhIX .!ʁ'G_X{g%ٔ훦 J/p ,"PY0E_Wy )MZ̭G~@@ؼ)vnMySl{ƦTH {0T.a{P"#*u:R$N,IOTmEx>E2¹ A/hA&("sx1I^Pp_n+r-tur;&Es6xFjv paҚۉLV2E׸O$S 0l0\hA ) 5nPRs%Q#]FSD 5J*۱iR$ݯbHZ3)Rf烏-碨t~-55?L:{2 TVzlIy'߭Rީ&8&uvtavمxItIJ9kݯ 'C5HXqv$x|4?l>N\g D=BMFdnGiSUwr1E'㣠hW7 ln:m3hylT6SQo|2]%86 #z9rۏmC9(p-N5a~%)7A(ȟl#jNi̒ǾbBXG@ k(DVqѪzrg6G߇ҲM_SRMdsWѡA_唳Re||s]P9S}{z:njNߙKiiUcS4fcȎ[YOx9kb{{cWꊾc ]j^k#ꊾc?Ih}cq}%;c !pwԷ(>7JdRf,2γ3Ȱv+p4Lix ,_m3נ+פtQ ]2!2}| ]@(LrNR?"NVA68YdU aYP~1[fhӦN|4)Je +#m ;:~uJW?Xp; /$R +*#h؇v@>'+nǜX20Q#)k{'mJ3%sB1 Ww,6]мb[jP4ߦZ68z@ m}/Vpps@oh\X\_9֛gkuC"OkP\ v4_f*pɪ, hpB^MX hH` 69s-騬Mf}n:k{t7T Q#h}7*{2!lrg[V|]ּ7YE־ɚ{d['j툷^1%P8A+VBITuPͰn&*]܂_ɏk~|CkjҨAxəBSsx)=a׶-m֜zbD誛\ΐBac^ൽT`5N`*{{ć/KӫzghɽjFo=(T(8׉"HRjԶ~:,u_T[oE]}-O͎AE'R$x'fc4$U aPWa@0RKiP)xrFG ;}n ENKQ3E U)#DnϬTj&> nº,*{QBDy:4B˘B1BF/XMpuUq ," H7 a}X(KArܝ^>kZ: l5ԃWMkv+ճP"S0U)HPSRskN:;k GƤP 6(ӓP=A)5VIzbuWGx۴̻]Mm/>yKe-sg?bEk"]MRVjsRSIX#g+cE<;=>DyRc51&1ޟUv&Jl-c'X}_:]Svt\;XCӷrH\,h?Mۉ~MGJ(,4qh'Mf|OW.ב$lQQv&!nN%<y*j`C@'Q`Sq060|0nQ14w7\0hŠ̋PQPA^5Hy~SX¸&S a$rq\bX[)]V}DǸ6bW3`ʰlhj@&_,OźP]wl|oj65&*h nSr}uW N'U12Ͳ"].;ɕ ^FSMdsQIo]F`GܕsMdMYۅ]`];0hyi~F{QPkW"|pxcAng}Y뫊6.M/k;0\vg`\v*>uΩXTtuSCU֣Cuؐ6Tj 3P4XT 1B*|D^RA\ h J>xRR%`#x)m7Y2cvDsC+e U# ؆Tx RϬˑaU5x} ޡc,OxY0Cu2 j5aDX̙"tA<p-Q |ie%/b6')n][y* HpΉ EW kR?2=r|tI4 Qi^LeCk::M*z jQ k >J0✂qN^40]qwWhF`LtÄ;(W,qFbL`cAlkM\Rы+*D)n[ /SW"[=2p_lesa"o IlH*nM g6$P'_f4+ELЛ y]1Zꮝ;oz^FwHSO{E/-UaӁr xd^k2Ϳ 6.k<|2̋_`33pcM伳KUx?"lnD䞽N_Fd O5vus] ݽlو~%y?hЙєS|K0>)ڈ~T;TC݃ n5}y KzvYwf>i" \h{Dž<6.{~oc׍qSar xR7GnܺzIA^[B'*ϖ17>s j<|֓6@MxcyOGnl+ؤ0ӣˁˌZ<>6^Yx+>DCInN 1]v+BJ>3&)T1ov+:TF:*8/nq^M۫(td T4i'Bo&m>A| 6BX "dEp'xFy Opl>A/,֠JWYKNVʍH&iIvڗ-ŧIjžDCy4GptT*n^ɽJcmT_Nk]Oy㭇`=ooD/b~K KﭼwoopN5jՠVwͶ5K^yqaͅfӦٶ'.GbEP^?=\8BL] ėK'ç?lw}˩rIN>bR+vS&r i|$hTt/BY^4] xr.hvE'e‰iػlYK5pTt)(Q j|~3apSQJM ̝sj,LsqmQt0@!C:(*Nq里 에\A% jTB < @bhgJv$Z{:ѨEcetZQ~GՉv 6xhµD)P4K0g~̊̊R|J 繽_ b|KYq17\fE)ZA# Z7_+56Y̓9( dIR\шKM1AfiĢϢ( D\%w6 N.!` V\pc[T.:@(B4蜃EkV6f\I 3/fL ޹g'>23[f\f(:M OJ"4;dI3H4`@kmL_\ih@(&W,VW)I|1F7"uZLsKJ~r9n=GX, 2Kљq{)R|ze͡z> 0(Q̱t%E 8hNe=:PL R>$|2`Ǡ1$p!¥ D9pc'殀 5kR6briA2a-sx@8e(KArD bu-7S f]L}84 N2pȭEB,r1 {骶Eo5n-El1f|;lPPa(f>F1/Q(Aԇ7u՜1(kAe2.!ʱ  DjN6-6f rR.h9NOR-vAe/PjAhgI4siegJRC*+l}&g]PYRNr?D*-|ۏ~R~Yb0(tͥLBj4/{E2̩̞{֦ыlL"-sAw#w'e(ak.e7*!=,,ٓց O ue'R0\ʄD:i\̞GÃ@%ڎiHZs *:MI[):ʘ|fd3o:TDLCՓ5ڄe!RFx _fkuAeQ֞8CB02́n*^ ~h.Y4MYDeEoWZQ$y~Gr?K%O\)3 G eÞQ`8j(0\TgF!\ B#d|F$ֈR Er߈@xF>.7"J!\FDj$.<5!{ϢW ƻbv}WkZ&ǮygJ`0B(KArD 10;J1̸S f]L}8 tVybxʓ˛U 0Q(=x_m?HKAZ=Hcb{ޣeh˽G_)q#)FO%ԌOE }p2?`oFe?݄B(ǶPRB ekibBeP , e/ _x $B(Y2>΁h,,. #}(Ј`G8oPb">S(Q2pVk>}QR:;R:q9$&c MLFfY6j=AH粒&f>9-4̒qL'{c-l`peoL}D*Y*U&\..˗ :#pUTL ⢹P\1 Gl(o'fu=5EX) h!5^Pq]PB < @"f ɭ. ,*JD3bL]hah2|xꂋ&A~9h.t ^)5R):єW)SNtN{o/{Qщ̣D)6CKMZ_FlnHDSщHyϋA1b}:J0Xa>^@!R >5ln! Ȱɕ ]Xw "Vn[L]xd^e^IP2̣D)NI'5L)PF!C)S!撳Čy*z[aG Қ1ioƁvќgR &`Qk:)SBj_eWʲB!\ B#pi­B'JKل3 bL}pԁwlM8.!KQppb6ls ̺"Hp1h&\~iTv)p)Q,!_ʳ, ;T.;T 9A2vr8;1`%Q NFRl6>4oK{UfNB!ּ5Bg|'g\(Ÿ D v(6jM`}_`Eh&yDhB)f>[+(G\HK)cׂp:Sv3E.;ST aRoRwǯӯ|ޞzo|:3pFt)Ha1cA_  v>n/ŲA:v7k  K[ƑZ_KU1\z{,}%9u1T. ]K\ȣ"X-2zۏv/J,) ^zLpzZ.>!{_s1)\ vPA"lrJ?] S,t8R*W1*_ 0\(~"ƀE)ݬR18sw1t5:Jf}we|*z;LZD #GIECWZ &_wo׬nϯNy/7dTrJު +5ɖ:u~Pgߏ5LV,Ad|'fpy=$6f +u B2 iɸy=d^uX*va{00.8TZ%ْs m+63 --!!ۑ+qrsamxþ9@kda[U_5Pls(i䡒U S eN4;%8rZ`efv EFxJS:w8ܸI>)gm'6I{3U_8qz6.)wn*f4$8㘄d2iۡRy4k'֤qx:ZlۜM4ي֒G9ّc$#~7_Cm*Qf%:LS>fрXԹFF]=$d]0(9u dtߢa40].d.oMXkh29%$d;.Yחï59U5 AחeZ4&18hs]ݱqgh=0Gj46簗@kڪlnFFksh َl~ֹk/Mim@wг 2MzH&&ӜJBB";/lbp:&P _JG:9KmKh}4"ы6 -Aߔ)!q.qE疔l9m=λ؎ky/qUcf<λ"6qV;#Tj0ow@ĭ\cqk4yO}WE舸R!+7tgu[mV>#A}*5EM9A|w %SѰ6P;n419]*ljP]Vu[{3ٻn=gS9% \:Xoa{F =8U>GzKHc:#HolvR?T`ž!{]74z :h&ɜLZdNz3 tfV; H.%2!./v{wlP*KQޫ:7EyGyl%T.& o٥P"*l4y1z6AI(VrPk4#MB"[z_jM9uR$!TLs%6ȶm))~]fn4p(\R_9Rfk}r m'<6N}AHv\e9S]vD7xct2grRk#9^89h'1 ل]e]ß^Ka4ϲa(FY7mV{8%$d;.:nxl3YVHp Cg쑵I0lHy=m=U3>'sW;iׁ~ irZ4c9M/-:-vxeǦEtj P ch rOr(Jq,L2tq=Nh0ѿ gIr\æv$`z>Hm rî=$ P ]OYybxf% =cP𘈔qEDxGxlB~_25.|\냤{RUYpsur%9" P HuQ }Z`ss#=P!ep)RHrjw΢ Pt'd.Ca-"&8Il 2xHb؅N8U 18j &\Ҁ%:ZaYB:;x4{1xt _wޓBCc}|^K-ԁPv;XxXU֕a Tx\:JX֗p4\xП1h]P*uE1UZe אѦׄ6}03R3 VZߧ:O2 σF#hۇ-F& pE59}wEQsdMH il 0Q/쀒,oR~)3ߙ{RF% #Z4\[1?!,z\芍!! i}q1h-9]Wx煝H'ZftwD^^L@6 UN=٢ӂP,” Lu t}R|9&P(pP<^C)`HBsxx&txL͌<&HRz܊E|w|eCo^|d6=C4WI|YIȗP&GQrf90{0SS#&0Sֺ1L&{W.HIi$xY YJf1d%$bJ)<k5顗JivzVLcֽOgJLOFHQLbicuҳfڏ~@LVBLI`|$ s]$ѝ&hԛLqYN"DZg;FcZ1WnLyԉfCΡ*XJko>AFZ]x>S"I˘kVt+gnMnfo~abu>{&շ2S&Vhsr\X笄}N6mHdٔoHﯯ%&nŞMӂKeM%k%Iӵf s%Mw' V$ȫ򡫥nd\9.B/"?;26XݘG%ISY1\\L/MGu+Μck͊ڲ<2PiH%hU1^&ѱ:x ` &L6т<]TdÏW/s!P}(ăqAlqΠq^6qsy1,X:md5lLߨYw؛;DI 1oo8A`0[Pd^ a9WB] ! po]\ؓeB%փ} fUg0{f01->+HC5d' 3 ;dޔ#2mdcVN$! x*x{3+OΝ|yqe]^x,֧;~gXD#y(لSySf=Fھ&F,Mpslق mvCed#VgMıCԩppQ >|Xs܃OpHӝ$6f +)BB) Z6L --!!19oض3\0 >Uc~)Y+j:@kd(IZJ-!!qwz}M6nA7[F|z ێkGduy=c*݅ϗ0= q˔ʧ}*!D%!o($T4d-K[|Ӊ?VC~8Wj_ޟɴsV<t]7tG''s-`YAL.3BYJZj=vV{$&P;fY|Sj=Zb16jH׭-,lZIֵ}ilĬx]aIPݓu底~m+oTCx{bQ_|{CԕVEuQt*L໘ - L'd@i*G8qL|5 MkHHVDže+_ C]*ELٷPeQJ^2ĈtDj3:<pUskIޟyYz pL]u%̢>Yz Iˮ K_6 v! GlFQD ׶|׷n$y\o@csn̦)>;3g-9(yQeU$Z;:6Jڶ>x&{e[I- ?Dݳ$u+*پQy38iI~A2Q]n#\үĸSգ`̺aeYyUZ| 3ff0izgS?Ry;=V6IbI4mФ1R|g>[u@Ry1XW4ٴCmVފqpẽ((~\ZY?{HE mwe"rTjʘK=}sO1uȶ2zE&@ |Zz9-Fa5!bde̥2YW-HsTݏ4_LAK@vMᜈ3:qVz:}O-/h[G"l#VUOI[`A[] +7&zCBA^" ujNX18%6"Kk9\opca;h*(QXhJ熡 C]pശA7%t Ma `C.v?«oYO14f\}Bq|z\ ɮ=ua^rNp:/yrjwImD1v6L+ E(s;3llroaYuEeYiHƹ'Jn/_>i( sis5?*7C4S-嗂5@'/.#I3sjt4J %i,MA8cB1ŀC\ZWm~8 0β$>ϐl;\үWiIVTJ] ӭ'DKzI*)iYulm[i.vb3ƍi5oöDc@946u$hm$s|yV =RyiIqhۘ(CMה\ZAEmBd%g7>М l@q>8q}ߐV>O5) +c.Tݷ KZ },!AKC*'I>nis+"}s=&zB`+ђJrW/7~H.L/ (rKs-I\kn<:&x5pU=! [cޚe| ADImku,96[ "`7@Cke99\#\g{$t<< {%E(r=G'Q|8vBhmCjXm{a0-ôt ~=8Fe#{hZ6(40@/H$z[,WJ6&ôt َpxU=N&Y ̴%ރaGFߚy=cw82~![Lw$yL,TX2srW3Ϡ8 yJRHn_'UۃS w}6^0}~GDv 0ыާ;~/7L,| -#.s>>Y -lדy5߳|r./r/o[R;-=xX2jSnkR \7~߷a2́ZI-YXOGue~BߢVF2TJz1jG]rue~ '.1:[=12t&_D~M]c>sND\i~ç_&!Yco̰W> ɶJO%[8j40 - F^}w'yB  ds8x?Wo& 0Sg̈́ G^\ BLoϠW Ǣzgk8$C`C\k}A`KC`/]s|% k O!K @Fǯ[V5VKOP{n38W O.J憡Y/1Bp[׋_%Tx!tZ~Dòe YG_m5m=V&QpW@o-c '&5q06E7vH_-aq\10,,55^/ܒ^j1`X_{|h_zA(?IG^Y(Nt;L/ߎNe㳥b%l/vlmܑte}5,u;)X`Ü y#*oH@-h U4Y }V,zeX,|3e9b qi6<,4ATu}ku}<u$C]u-gh"T2y!a^k,3D-r}fkHmV2 %TjgM슯l$;kj7\#:Y@kd mF`FJ-!!qڤYgW[21W{E6A aC%>cELBܫ_vvrfmv6DH eݪZRZlSz Ա6n1L̬~Hյ=@zcTJl >x#6s-|7b1W3?~;aK˘k`:]}VoeDQd%m-nG\ftmp . ZIR[hbăВ ʪsQ2'ɺrJ!pZpy=U#~d1LH2ȾJ~pMZdL6;h.Vg6r1 i}x۝YU,qݑ1p$d+~L=YAniYYs< 6*3Mp 2ܟ62AFaZlǃ@x娋Bi^u5U0pdzboㇷj+߅smc|Cp$b0f[PxgJB;_fGC;hJ398ܪ{ޜ)HDF;y7e8S31X~f,z%ORrTm(R4g'"JE.>O:2Ϧ/j'fQ(JჂ_[?+˒[3Y =Iy2<3F3%°Gv-牕C\?r]?IK{l"~]xR•HHҀ0 Wvd˶J'&OJ\#MDpߣ1lnE̤Ւ̩Q$\oU#U)FyߔVϓRNAuPq`jYնV%YպhfUa8_,8ZDK]$Auh!YW[W?ߖ^wY~h\%YW[WٯLAV2_L9Nr3)OrDim=fZ4q?A Z tNd4+e4L|;JVFs:W.FM=N?Ms %|x"4Nd4Z`vh5a4slGm{5[+yh8U9( PYT>R%Z hgݏ&4>$>V*g2IvM¼Ii_cÒG*rY6Y H#k~QĴ駓Zޟ?=PH9/jo#?,3qS鎬WyZ!c>!WV\Zs7=" 'Yj=vrȵcCHW땼?C3;ZpisiD9l %IY1_ǿŃ35=-/nn_egLvhVr]c 98|e@&h^"8B'KsI ys@B"yck]rH9&r1 s4CΟ}Ku98^mzhFA+CqnPq˸^R݀젪$U @FS.^?CͥA8Țױ-CI0$]nE_oDjJ}"MH) A y"5W46dji>e >2ل z>~H2#ۨ6#tm3@eSY} `vJ2 d/_ v=@$006 $h>epl>|B7W_)b y%zeQ4; -XWl'0; 18U/{$v65`5 H<](b-PEb_Y7=uQe]g˚`w}2W5 ԔDjĶiLėr"(ʫ?W+a$fђY7Y4PDv )i۩Ou0&^/ yey҇Ι#b4)5 Xq}gͯm:<{,eĤw:1Ւ4A`Į˫Շ]52@ˣtA[7߈FW6ۼZ΂>j% ʹ}mUC1bN36׽z<2eZn2ͣr!h%yR"lB^ռ)^ gJղJLɢ1Јy`r髞m*fXXՀ]& z+S`{h{c)h{pO@059Tۜ!A0n҄>j h^'n3:V+ @ [4 0f$~wjxX-x0m|ZUFZu[Y!V+z446g+@^)SzvP8ZtaZx 0 G)i.LS<_Ěާ;~_"3?X\goXP/6c@4ul9,l :%% 0`KBDŽabPSjpA Y#L/r\@$jԤF~w69hI=oПA ӭ+L+ k9ZiBI?uoe]n_eT-)8|L{=gʕs>-)K9{&1kg)kȄU!UvM:%1&!ۑߞm.r(gr.3̶jBJYS IpĴsR`QKJ-L';4لtM\34KY))3@PC en9 jIpZۘp V2>U*LB븣EFI1apƥZm/)r"Z=U0Ek;VRj_Vp@AHp oiu^VeD#42n 7%$4ZYLυyc`yw$ x;Ķ\s[g.VX̮uV0ýa!Q(Q2>2IM|]RIOl&^8` g/hDg.vùާ;~/HPAk(j >^#w(ɲn M'ɺrJʶ{;ߨ^O?h Ғ7m&+׭7xbr(5fLx i<8|bvhS2ʾJ}SZ궭CA/gr[P7uu+*5PԸ(iaK|͐vBCs(x ;YX='@kz 6p$3#޴m|dD~J,wNg;> Vy\:yWzF38{$d%٪JY1f;fEUf¬PPY|X`V̌jYZL`VL2aVcVlEFu{k6^vn2A]dT7c&(oB0XIݲ7͟Me՟C-dlmaAP+k=rȼPn3ܬvʴ=b% R>ԦKF]`0Q6vI_,%2rC<%{B9F86)E&Ϧ񬉿Un#[#˳)D+I8􂀬$4!Rd!D*oѮ~P Zo$ɈTOTF&\@6~6^!^u,IfޥAxr> ow>0@=\ܩs*TJr2mAjdzv cvv{ &dd~ RcxF2V188whf^p̀xnFz0@A&]ryPs1Jye_9/ykrm(o);!1kX@ 2 xMCŬ~Iڥ0l2єqI&0]hmlse2I;0I2 CCMHve׈>SI[htuHm]R$0':h298l0'5l4;"8f5 Im(@x c%T=JU1ְds.mGq3%\~ҳzbְl3Kk<&czxM9s Nl8wlG60]fp!f 688A\88ސ m `ΧFvHl#;U#xKYze67g ɚ<^2K>fgf4WG)nw5{r^ gYdkΰ}n52lZ3 r׫VdLWʰ}/PϦj'IVW^-1:'|)Fd6)A|2I̷ rX?VN{F($Po+2xVd@zHGJC~6IwN6iՍdќl80VlDSцn~6P;?MJS"J_9w]e8Is8Ι7Iksw8<$xc{iɴ:C@{фl8<$$+e6$Bc,f4!ädyQR6IiCCMYsdvJ3v8JJb2SKzB=Zr@OPe.vd$ <Žցc\Q NkdEKɟ$e~!\4$K&Kg9I$0!Yo]B7P+iWW !\q׫u(]!m8UBI\+3r5\Ifr+WTHcvCQd vEW9johaF2n?[ ,O -eѫEЏAʰ,('1+-H( 0`T0*B@!%bKee **EVgAt2jh5|o$A@6 [uֺUES{I9)@kd.-L ż9kM(I9tl[Q@(0 xܡgg4{$8MBǢӣ[s- OAhm=r!a7 #$d;U=%P#ƙ7"%d ta[:3h َl\kIKƵk4&\Fi290 Bfid8re3[Q-[hh َk$_5T"͓#5׳#I鑕C*gP8Arkɷj4Dw|kJ .qt${I=ߴ>eHAG⠣##S*t `sD e-I)ӟMY#Ŷg8CP/Rcnb-H> H}GM0n)|I9w1(orD$mP$1x0لk)y+G U$kH>0xƑ|8 sz[g$;l t+)Gf/R2 2%-_q|}WT2e `-H N(̩Y?wnd F{`iqw(ջ|%9QMxC_1BpYdfkp@kp-.p]gP-U[kg] r@*Rq82Ip\pJ/T>&̥P T)dELw4?4Ls%6(h!O%#ȓ 'w1 َ;W̅YCtDJ>#OHc$xM/پAJG;Iv\|6Dȸ}2hvJp@7ۮW c)ouLB#gsq6nd[Rf =&76lyx-|0Zsp+ni-Ѻ[F <-nyz-8-в];М&Un-1FmaCHmL;}RJi!![qq#&[ '7f!K7ss=NiqސɈݎ䔋nC4 FivJp hke ͣdsEd+.p/MxlwH5 6m15û7 hIGxnz"n8ym99<|j={-%>EǼއ#~FDNIc$=9R4,0)8Tnjz?r0LM_NHUAl9A&#Eauξ]^G UɞcL:wc[#qG+#M8iA|G.THDzϪ4,M=N6M}&ӳ4zby#A|^7 8!ʨl?# j'q|2C J%sSqxϭɾEz)mmS4aj0]vAh!Ǫjy#lGu|Zk8VA% â]hm#dd#Aq-!!qq1,>k?]+'Ni{ UwXSwRw]4~A ӓieճB^vǯE@z B&W=d%=Xɛ.)8%$d;., 9Á S(m{gR;HG8>} "K-lU2%?Q#]}=R6\ZTF4lm0⊄|::5$%t$Re&Mq1J89rs %' W&xEڴ1v iKصMpӞdBcJ&#ÏA@"6ff׎sW ;z0< ӜwNkaژJ"904+.YlINZl*\ _M`Eɾh Ѥ4ıS}17fb1ށd _L@"v'3[;F-Fi`6AhOAh utlťm]eiӷnvL f!hD73[&[>d&0@GV\ڗH1I*8 _KCgZ!2s|oו9yi!QV5! p9jCuIšt./Cic HmB#ʌK%̦SϘt^`6d]s9ЉI fAYΠj{}S[5ƸM&Ql.^ɰ*$.N8Vz%<'48Q @(, \;/9~ pl+8!\G@N!9ƀRs$[F20lr(لK 䒸@K} PSZ6@}JK510Uz_qFVNSc5#N2x8`nۺI]4m4mzApqL@6җ$^䀢'y:w% Z)T*E6^}cB3لkQVejGyQFN"Hc``Aj4X=/LH,! prazͧ>Ww2ϴ3/>PT< [AX$ND#~6:(U| Z4Ô p(XO+">0V-$z]c\x4WIh,_ȮeX %ﭩwy i!Ɲc:*Y8h9Lsz6AI JщXh4P`)Tkr H|M)8(0m8K_RHv\|R"lKmy&d;jTfNh@ilծ[Ȯb%.9l86uݻ" ,Սtc37$d;{%Y" Rsҗ=q b#eQ\R΄Z@[y*ÕT"8\q8&JDUG(K7& %@#@j6ظX^q.+aReحLa49$8= ӎ}QD JzmFip%MBM#< Gv䑫bG{Jlv1<)`䌰",rIl@2Y4Q^&]vj@1 N#Yȟ-='ý$aa^h`,aff )s$$ňIJ̣.tIJpFv,~xnF4=eS, ʹb,sSľӑf[Li>roy]5߱UDP;wGلKt>J$WE1@@# !Z 9*,=EH~Pr۱d?CK#ktr#w%c]`0p[F%R_~`/`7)8<$xLwعxC6)Mk724&!q5|)ٶ3!uOF'ܶd Ӂ7qhEY8ܴ&RǐvRߐ` pxےa WEB^&(pPT# (%C(Ptnypn"t:A@LG햶+22O&7{1= XI2 a&8C"k8ȣkaXXيKqڳx32c-jXerkG cF,sNو^VaINnn·egvCeS9j 4P.q^0ɆK&MM!Jr1\T]Tq(B6(`ȢWHZrBjFiZrBkM+ C (פ8~v3r6`ZTi(%{wt!a3o+]OXKRcT)b,ӻ4Ι=ȜI;e#<`}Y%C"VMw}ԝ3*IM~_4T5y(U3|/?Ol,fk_|' 5T?w>}7 fgkBބUۯ?%+r83O|Bp N=zbW?ӐW9/dȎ?Nڣ,|ZF˨~ Vhpf @*Y&-qyIp:FԣWĄmKk5$~U#|}-rnюA= 1[; ň<^@<혰%ĵ3$|u( ?Ux}V~`}_dWᜓ1ӻcR+=Rz*|1ei} I4>V%eZks 1%+`$X 3YOI7bg>.\7l .t A(HL2ufMKQcZN)qY68ߪ'/ȽɎ6f)!uTB*0cYgƓZө-|o j/ϫFX+5^ -׽V<8/νE(譍m.6> cz>/N<6/|zwLkOf$F7g*9{ѹSgko^@/߆'iWC<%h(}޻_MMrn9jÏ?d V %NՌ|M>r2/Ky5쉒O3iǯ"~.[o^e_"B4, rZg:Ac`?(  U䷒:ը?:RJK h( ,VxؗyGGYok1Fh&w[+˷}\,/f[yVGg4簇@kzXJ oDGFqh َF|O}$*8Fڗ}|f CL\$=0DyI+^M/9ѻV'lH(+*(ȤZZBBQ#ySIXf+<ۙ\CܸW!K#+kzΤ茦]2 Ejmf66ZZBBQ#SI>tֹ%^M>|P[gV\,/Y4=ģ3^sw z=1^vjCiz=-!!qߨ|𩏰T48rnc3Q2;wtJ.둋GHHx@<!xM/!$65S!ph[xPhh َV|%0Ň }rru^ Ѳԭ+($>:hY2[/3?Qqi2wcՎ_dfk뺶?J[en-jx!s97@$ΘߟS$m=[/k_fx3缚3B ⯒n_SpmFޫE.Uk͔3ܫkׁo~6]q׬71p5 ŴJ` 4; .AC I$ɶSJP?N;u$Of<~;KTDy2/]Du)? `7^Q9X/iԬ(+`c+{o/p̘r2zwU|F'$<VF\VOyn獒獟}o$ˇ1˩I+I &ogWwdpJ_ۃMݛ7Ӗ#o 61TZ7Gqc {k.r䍡jKX]c %d;~g|6q=K~aK!'u2˻JDoL`ŗߝ]+Mx#:Z_BP"T]?;6o2$~1c=&xfBQYݵd2/Rݎ_~Ut~?Ç;&ϰ_J_-%8tS{׆b=Y$z{? _du,xE~wuL|Y {ǣ(dT<Ͽ"V$iT~/ ,}Ԧ 7}$ڴfLAG־}Qnr>Skޖ&:Zڛ},e8ćp>QmWivZ PV a!'Ϛ?-*5˳XIo@ r-ˑ/*G#cJ@sT|ߛwIendstream endobj 158 0 obj << /Filter /FlateDecode /Length 11380 >> stream x}KlGr޾Q;7ticH@ 3Cr};"ϭfs) 83GDddO.O}8K͗JK9r?\tS\@OPH:Ҽtek4J~IYrނ>}~jף,(?"u #/S (?>}ա#Q#y]-'PJz(a\!uk0B AB z@JvT1/]V{L(c_rXBү#B)ABQJA}HݯjuNQ9 5LzP+tH kZڜd(xCG/)J_n3̪7,ɜBq iȮi.3(69LK4*(BFHfi*,L5p49m*/ӘCkrہmUS(v(jfhLsA)Z`ЌU/NMt4vBa+*NSPa""./,ܼȸxkF{܅8X ;FNPgA;PM5X TgF#7Ypd¶Fk؉ \;Oh$bXTiˉgRp|H Q8XqĖk遂0uzyVӗ9dϾϯ^QNpoy%AsΙ1?v'LA[?]&Nlw`Q}C9Py_va˟ؠ$7%Vz}\(~@C eCLBF@ eF.Q)5P /UK&Yvѡ%}ѫ":ɼ:V PsdYִ8Xp*ഋZ S;pkQD k el$]DYܖd.7i a&DP($1}\E|ִ`hp4PPiy6ZGee%Yl1.L-`9hp)]ki28R#aoG͵)٤tyGE&M ҡf09Bv)ZK尷M)@˥tD)4{a ,:NՃMy4'B:O +eۇ2,+}eXGB:ԠJDHWʾ)BR``F!Jt26{ؐWW=iItKQEtJs1).CM~oӡ;ӝ9lt9jN/r:}Gr+25yBm]9ݲ#0)Pf5T0*-芊tC9P L%J~ZLkVOG(ܺ˴8뮠ŽS_r,aBYKOAc4A)?p!jXMb 攌ߵrySX(犢 g[ZfCz.~=_`(~5I-i6XfXAt0p[*2S2Z8`u9CJp3,&k8X=rhvFZ;}ݯYAlon,o;_Pr{Nf˖#@%p49 ?_ Nc"aoͯjV 4#.:T^IBb4e6خsƂtڧXCh-ގ_7Ė/);AE[ 'R@4])p4919i̡PR9um[KhXc,T]2th!A}kCK p:4ZEZpNP񴫶KtR˄7i_I$:,՘O׎QYBJܠBfޜ@?k4^_RAB8'@mUj! o-̒agN7'B g+xtSk]|KJ T q&\8ڲn {[ٗ(J\w'7%GǦ$CSZo+:1$7-限QƁZW<,UI[dQJ a%,Gkެ2m߂2Wʦx67o.\>}$ǷLٔm/Ҧ!Ey%xDBxtʣg˔*\tΔZ_;Cxx e*y*ezU vm'S셀%cHA_T涥u@w Lgrwo{!BG[Rn.na2]LIS ;-MDޑRe0]h0)h OIs=O*zޡQnj:Kڊ:"KddNAB36h3[e ORnʸ6|§a-Kc,ǏoY<<ކi9vmH0 * << <<S.x*N<t j(v#J,Bkq-9b=w.XoÅ}<4B}l,mD8ԤDšOԦQ$.{w#/x]:vݖ2eݮЁG캍 m;*)Ptmj qsC'r4qPcK+z樆+:mѼHǷ͋lݼi3j_\ځz¼XJ{gi9B+ &ʠhya)w >o96"MlaRAeh-t晭^̧cj)jPj;qV'}[xZ*/ۤELRUo&I~gۂ$wM6: IZA7%,-81LU'uV5aJg$s7ET'$\j{]E\bsӹb{sg.^BYA'~iRRCT[-@" 0emdF:xd=))eQѭ`MDU/J jR}w/jN#튏S>?c['AS>#rZA0пX|hϡbb28unxԊbخ8djoZ|떐v?PtSyr_w"hY-Z<=O|oO|o(n(fD<D-h,Ԏ?7?&|tQ[[C߿%]C \uti]==uMuMgW5#u ѩSO(_*W7绿׵ky^P'/jzu稳ހ:~zPaQFk ;Sf–>,b~ gTifbf"Z;3 6&V ??LCguDg v {w&{vMpf8Ya ` $A{`à?qp<h l {!7I\caN}C&eyQ罕lx$,O{lL@TNLĥ:Ob-Zhc/K:PhKJY9:2[% U[D# 2uGx<~9ȳrt2ۢSRH.'Kw.b}cP"+e%-ZrLW8R |c-e,<y$~s;tOZ R.S74}tjU@KTo_~$kxN4|qLtNTŸP][e vBn&Q[|VM\vTg i)r7dEƎIWnAYƸo Mܞg|L1$v7>icFGT銵uCGBh Kק? +9Sf6ZY@q9 N/7d2l(_CԵ쀄'!-lǪ#I,t)6yy-wZvۚ> ]6(@:?ǿZc矉~u>'TUsoFL BUd5a/.k֟J[}L+`l"OshڟF:|@ c _f 31`gXŸ}|}@T~]6mϿm!T:$#m-=P[=n=s`j5di)5@\x G?ht3FX%%cmJ<,x)A(lT% Uci%|n)Uq1PvFQLQiP4E1eCV/(ݶQ(; bʎia*j(0[BQC1eGuYa 5);jZ EBQxs>BP]f" ;W";&wĔW@");@D1d NE#); $O#5c(PtyCeG-QLPxpBQIݵ Ŕ_c_EJaQEmuYBKȰ"6 ]G eCMtmx %r{= H%Eٚ8Jk J(i=ʾ$>5 e_1SK4+SNcqv(q";.P6T R6je%z+7%,ccbT "v(#K#dR! z1kVpL'PtnsJh)qHǯCωHtCHQjED+Ů4ha4kMb's49^gJ"\ k % KOLFY'J` c=KbVth1(`0A#Ib`owY`|Iv4lBze*`ONi+`j`=̠D`J4v:( Nxa:0J!i-,i(/¼tF( ̡DEA Nc 8M:$*Qcs:ȏ*$brqSD!/{i.s(i4Zj)N%!b/_B >4ax>y&fB;/ S 1 9s h@gl])}p{%nB]W!њ0UnI(Ŗ#<+#HFpUo0l!NttΏgnt ,K :nMс$t0|j`a\t F]9q`!qKA&8 s,}hul ~"uiOvQq#67mi+jJ]>3JQnFS6=ʅvL@)*Cچʎ ώbʎT8=ukP6TQSʎK;xUSvTPLQЃyŔʆ"?㍗PvTPLQyPvJ[Rbʎs녲.bʎHuŔj:h:j}_›bc]BQ:G1eGrAD1xN;7SNC);6$!~VPc(eGnQZ1%nߺ|T?D@""Pj(H8ʬ%RE,+E %IBqdΕyo[yV}_cme_! e_qx e_ŚֶU)hń9l_$QEbR-%VREDPj pn0eZuv"& V.pmH=W TroN]oǎ+l#sۢ7Ekk p}3JP ҩH6A8M'lrS MZK尷#Z*2=Z*2*h+A(4 hpK_)TW"$ Hފh:RäơvMl0"Rf[f__ )f ǙO FLV0RtX$rcu1)&i.Í UӫΡet28F CX-éHцR@ˠ"HSd$U5F G 9HZp#,a8)>%/Cn0[8"cTOHhHÒRfYʋ"9YJSCRvI'6.Թ i%`¶-\*k؃@ ?l3E€3R|v؄3Fe3 LmC*e`X߹*G jH 4 >f "c_;L17Vjp) fJv "g  "-ЮpC Na?1>J6])Ut0R*R\) \)rW C++ԕQJ(upR\) \)rW G+ܕQJ(w0R*R8J])RW G+ĕAJupR8jKDW G+ܕPJa wpR8\)rW G+ܕQJ spR8])rW G+ܕQJ(w(1,rW C++ԕQJ(upR8J])N#+RE>t pdZ)eCQxЭ }*%*ɰRn;R&`Ha=b#PHaHỂ:Rދ#/H+:R ^#/Hk:RZa$Ӿ) Fq8R)J(jN36^Bپ9NR6㡔wS_lm)ck3 H*4|[ğ lqX<0\NۊG,wެ6/ oD xSK_LdPm$wĢ]7n:L5~NPA/ 9ʃL-G7UDFAY̗n]HxVp0nɔ`` "/,PSzB-"h*z/ɮ^(Jh殓pcikT{nh>0 ⓬0ZIZ+gjg$sFn-|Pgio#v`+' Y4lIq? -^X aP")N2_Dѝ $pYsepI-v - <{Nm{c1K^I*B,e.:g EСb ].mp8]}$E'U<\Ic0NO9bP(Jgi.s(u7/b+ߚp'07c0J-S4$)aM'Cwif$3-[85?8I{ [_m0F s&@ (NCBE+bлAE,Ng0 epbKI-T{;DPǸ8=6^kX|l5Cc !M12Bc;l$v)< th!ao+akhjGL="; %Iga5<&=2%\`8Hqq T{IwJe^[-{E$jiq4;!㪡 NBkv7N7ZU-ŠeS .{Tқ3>R6_yGys[(o`%ѳs:/-NJפ7ҔM4?ZzGa%SKf`6vb>˦ouwQW&[(Qi,,4@1iõ1ƼP_(:hf#V l8*$dU؍`⭜B+%2/NA2/}R! :Y %ZboD!ꔹP6"Y[ ڍT_[$ƖP䔛ۚ0jC<JLj;SBrܐ%zQ==E{IQ$ cd*(+GQGx 8* I\S E%c]q.#PnF Ku~J 0ʽo'Kv ,8;FD(70j!MޯSFVqmm }݂݂l(u vJ݂ nQ`[-P-Q(w v;݂nr`C[[-Au v;J݂nR`G[-Ql(s 6Tp v;݂nr`G[-Q(w v;݂nr`G[-8-8-QZ_*;J݂n`[c-APӻ쨺`Ǩ[-8~݂nr`lROd'p%k'p%k'pkN S)Pl컂:N2`C`)ww _}w`_)_w 5L} s`cÝuJ%EE (=(9a (=]NwS1)q`]iLI;KQ)R , ypF^R34u4;~zvcN|CtIi2{vߖSJ{ }@UK)A7 4jtJVmBdtRʶv_ao'|0`Jh#@BNlvϙm`W6N;ĸH9U7|d YP9lk+&`07݁Í;7 oco*|h;f#V|@c}6^U(t@;^y@>Ԕ|쁈QoGhn"}z!뉴eDC'J/#:t=PzQlU/2㿵o1Q\Րw w|@WEY G4MosI6C5muIC{_$`ROPkJ-BŃèh'!X\I鸅Mձt|x|A;?s70!6!Cm(C>xP|amC)ۆ1?s e~o軟!,<>s<t@ 5$a?n~C7 S}x/TgS5ȗ9(xyKG9ŏ]yq[;Fw{dW/}#mGӨ)cɁ 9>GBHnxz9kG9N>8)!HN4[g`Gu|ӭ|eH&k½G>ڝq+g.v^돼X)/O؃/eXoH:!?p N{@&F~!Oc{yo}+u.{c<ԇ>?|{RnEWG?5# [} U,ᡏ[nSN"=:ʬ-1wo^4Q(Sō˂qǧ%endstream endobj 159 0 obj << /Filter /FlateDecode /Length 26881 >> stream xͽK%q<_qf U |a6 mQ̦̇NJU{ɇ t^Q|Dd6O~OƶVGS][iloO?y^|rfiK{~Kn[~G{oT}j[9fk9ϚQJ}/맜~ӯURj+LS*P}RxߎR:^SbC9{=,c=x;Tߏ妩6omȋF ^Kb{z9"r#4_^;2 i)w"^I3y1c%{T.y5o{6bYGeՔʣ:?偏2|9S_|2o)keE:_{7lٲ,^JK@ic4^4\T7' )mQicW-(WRe %K*z]]Ƿw-]H_ ̻{"%,~|yP:6(y$8;o(=lYC+fI1ےH"Gg0Ǘwi_e~biL ;=S˼b~%ĬHd)fph29zx*Z C=lliU"*g RFpo~fޚrp%,~\>9ޚ]藗6be&;.FbڂF<_a߮V?[5sقt"*g RFp69<{U98kV?{>xdl4UIڠ5s*J EZ`]dB| P?|jCyKOԏYecP9hq e>R2AܾICp8爕Fn1 njq5!Rۙȋ9b.u㚾IlUFyvwZ]+F'w(”|y伍ꍱnzhXpyip \7s4x4І:jmGB镽7LY1m, M[D\ԝ; b>}:>&~̺Jc`fqzz~q"osg~s]{&UZ!Ppw Nh9-f!Q&(D#JPr@+wŒqύ [iù=mFr>tTZeCw++׮ulRsrӬ ˫qυݏcwN3᪜*5b iq$*|<+Ncޅ @UDs~Q%9S@ӈ8TT!{@.u ):_%i4[g/Cd:,K@˸Ol%,eRf9 *U.!Z9\4s V/ƎgRdL뚽HR};2MTG-@2<ڷ9ӛ|* =V?nc.%2>Yb]Vh @ ]9S^UCz4rB0/01|i:!{02t v̆-PC"{0%,~FG5ͼ{ aCΊd)#:2'vP@4rcsTU*d:æn@D)|Q=&GU!{T2,,.y|W?OO OPQJpG)A> 4c@aauix]5h{=~cRuL͂RT9Q-B)/CdjN~Ə{2}nYFѓA1ZYC+`ͳjY@pBȳ䀴yq͹i "G "/f1ߣ<]҅IAWIeV{Z&ygh[SZ̙MB,mDzYgtbogŷa9e Ьg_]t(R4)$ h" dGAQ&; P ɭP&Dx[1-1(ɍ֏]9$jK(EQPrksșs+ACɍ,:?ҿ#뗯v^@Sqeivn? yV9}lUOńi[kD)?Gt5h$؁tD%icЩ[,expuL0}2D#PKXX#ϊ$O+_wby=DOԀ%DWgspQa!rtuՋqٰlyhu6,<3GXs'd:BPfAe-ÃοŗF2IYPك@aa6hJWx| s1Չb0|sY7y@`a24b핗%:H]0[ wDYEu}(ؼ<>o^82hUEMV4m/(ˢ[Y0"CPy8|;7 '.Dg&Va2nmQ>d47*JBdA0LP)́rgW.pՓ ~.[򫫹%D_ gJ%~=@- 'ȴ~V=X,etނT^tjj Ë:a0 ݝfr^VPW\2s:ڃlvWY8ՅˈE'c]3-ͩj!SdZXD{2"V1i<( >]/CdՏ@츦 "S2P9xjC. փ5u2G4 %Uʡc(0S2e罍44}MCǙCXp5D1OxhZЃ__LW@[6O48"*W RFp6*Eg:9:{ΙՏ{RF/xnq;6RiǞF͊@P: 9k **+@eLjnyf `e9#$4:a K|p+UCXX~4OX8:᚝yq+ff2AqL2 h L* dΆ=Ay,@0S&lm qUI<| &2`0v|$2ɞ52 DdۑQ^\uNd)Ã=ӊ77 =TR ^Ƒy5EB j\5RFtUU?Wp@2;%e`m!^d%"=TCc wJ`C;jW.Ƒ{m#62VHcP2! Y~-ʡ @-aai4ۭ(|Gx2&YGY!1TTh,T쭌jj !N3cqւDgZETB]1 ,QEgiU98kV?5hʦ5ɣqPkuDRd֜hQZwի 1^L3ʣ TlC~B0 2$pv-dΎy"W a#4 A36g' x*J#=j  -$ql!YZ19 m6cʐ@XegC:xerpZ8}1(T)hH:BwOb^tj˴Řro`)s 9 B,BrfeuIWQ;eôWb/vÆMv}" Ͳ.\+}gevSFf1N7a8[DQ_& Q_h^Eާ~=qRf1^F!rdD}aVD}YVeY|znCʝvŸj4ӨjMp!⮱Ns9V vML QVh4Oʝq`0Y#욛2!jL$̭X.!̝!욛rSkcئITyF}iC!욛 KӮyW9c+wBׂҔw̘数}.€i-Cƕ¨PF`@`qe8&! (h" qeGAQWv*2CR 0_PPXn _E\]&ĕMD!(h}C\4y^"LR9!0c9«QF6yI]pW]^!,`xUP#4RXQSVV)"G wʭnFQZ>Vf +c(\ew*h]k:.H_F?-vɗBp#$ Kn= (=Zw԰Un&r,~> !O ,@2~aT,nOB^]xQVfz TE2UTde dm> AVdu (VيT\KiPj(惟N(g۴(@!PAXX]xL݌@4Ÿf+·'d:Bn~Xի q"u vâͺ6u18^ qol Kg/%؎@aauiTZm_ԠT@.UGH-@2> }7˼g# d)Fҭn1є,kj Zw!:{0%*rppy.#:;x ;+rtsN-~\F,*qn.fխdV):ߔZkyE#R)X,e$N6NeLW@H,, ݷQ98ku,eg =,YCްq hwO_#gAit,na˜ UkD.Z=X,e0i3C *﨡^N)ih0]O=jrY,e?񡛣@q`?x+ygS>w+,Ϻ,PX ste ̙ E"D+P EX%GkcWfd԰ 6%.\5zb KW ̯-2DaT '?[6cSt6$2mt,Dd?0MVq 22D2 Y?nCٖ=)2'FPAǵ&w+MJ(Adg?kpR+d%=ghvX7mY#3HbI"_]~26D)̸jqϸ2 SYZd\e$66 ."ǰM3E۪Ym 'n]Ji+]V[vzB'νaPol|h Ʃk /=.WtZ8)2toqC¢fY5e2Wsnj>ve躶~`x.41DGb4}Qwئ쁼65˭GtfzݩJȊ. hf>aGՀN͋V͘o@SdE,-2cYm!z); ˨'."(='钍ji5cXp0oHC!dކ_ `0~ !1㴒^"{7UϷ⪻2J)U;qUI1U]v5D1U~GKL\\^Qu* c>$\FSnc ""GW+YʈΎ-xJ4>OݞvӨ1Kp5!NO9!{ &u wCC,G/Q^EQCzu6`$cٛkʇZ-T~B0 g (Ok(C!PKXXx04]HE7&SaANHuL2d)Zz4 ^Huj QN@jw>ϣ9ytrpY,egSף IaUTR ^<3܊&s#KIZTGH-@"<ʭh19"MJ(»}zq3fF :ўVSC'f|L[N/#ў^ EX-ɏg1c&C*d0ׇ|Ux*~h).5QL{8Gé=aDVNfz]ų5qo>y*d\uGpȭz*EYgW/1.41.]D67{ Cئ59 p*YYa]ȩp1}Lf]6%NsMca;-XFtY!ę"wEkƉRh\5q% iMqtzbTeV|_7ۈqBu ۪Yl&uFV5 C,ʈŐq5iT,q,k>$C*Tv+h/LdKF{]s54K- L$lS ~!ώ9#뚛q]qD6v͓1^K :B]pknģ7 fh~7An!l+^čЌ7ж ۆIܘnN"܋苇{]s7*Ʈy%h| ?%Ct?推1TK, /g]&6M?sooaomw:CmߠN|gO?Lc|_QX7??~Oo1~6zX#|<@n@(^OH$r_O˷xnGl8 `^̇|OO3;U>?!̷f߷6m,OH\kO|TxS_k3?V!A_׎i'_̋f~=Tt&A,^mA[xr!wjnU[Z"ᵺц{H= 9pdrDƇ?jԾRMDD_G5{s Ѽ3?)dB(4oS*V7 ԉ(rԉ(r9VԉP Q^Ai9ާwaZu?6YuN.)WGKP$ru) \uJ~rAl70=?^1E6h~_;<ρ(UR6nΈD;e+*/Z@3e㝰 (eTe([aPPan:-G6tEclEc~OQGc1ʦ- C:Cu wrC/D?F);RPRPEz:#}v}GYa.M =?x`t2Q":G޴?ٍ1Dm|&;TEQVhݪPv:.Z}I3 U/k*߈ kLyz8y8x//G:8̓,%T>-^eLWh%* pW0ޑ6' \|)·dӗC,0qD/돠v:ė5hN˅P3֗)S=enSj{bWi}ȇlfcS(C+FbFK=vVXE85 8 !c"AO4$"ߗ==w*ZR;(.hVzR;5FK đL$咙 !oy#(؂M-f2D+P PKXXx@a6c{5D wuWCOT~RL4* Лcύ_fĮuAi6|fr@d)ysmC ,U߭rɅꛞjz!tU 9 RF"f@SOcDe\}yNhw!-\i2R |h)kњ4Yʠ+xx-0¢| Åk]-a}VPd)#Oޙ}P9xjV?u 5E3|Д*SӢכ(0^5v}b/@yUAޅ,@gY iPSZS#0`\sݓh/A*VG5pN)99r]?V^ʞFBHIui<+3FH v[w ġb ^tjj -@(+E\&JًE=TB=1 1oJ_y ]ŋmIz_CӞK52+,ZyP h>f|rYkUOZ%I㣃¡YcYOHуɐ|WbR®D㳪V=fEt4,StrE]{3N7^`Uɉ8Z~SDTԼ\rv~7kPEmb]z]痒o6:xNbFQP=f$ńՋ}4f&xeUoq GBfף+YP+/j^5/ڒϧ|hVhsfmy{XQG?}m9}MgA| : ( G#Jsol\Di-oہS@f.usX}1DUc/^(/jt[YUZ¶&<+zRͿ˝n}0iar-o\wCaV?ڼAiy9)ծΞKVˇEx6n~YyLhuW5ԀVO|GA #$ Kw[nsT@-aa67/:dDɄʮP@sj\差L~Yhg7968V?'m%]EU[-rHTBPfቪ9Xj2-C=QeY8qM5H2ϋ!=TDdwp KkB␖!rtVVKpiV.Ӧqf rӛ+ʔ tf {r X.nn^Ȟrj 9~B4Aq|OABB]1 ,%rz{z\7 maʔ-aIGOXeIVWՓ=ae8yq˔uqR V"W#j]"6su׬`y]"51pinn~CbЁa# Ut[,ex*-g %*5ԫWnryom=q$9 yT2蠭;S@ް}پTY41y6SlUuiWO9hVH5g~!3+FM!yI ƺHB&ʺq95qbjqѬƩQpmb'Q:4˄rCqTX217⡑Wjثlu>W^/417{M,Ho8Kcӎ/s2EIX-햌.1!w<b|/1fzεWbW)q:ybn6 L+=8Jkn2sv967͓|Z0jy'BjMfPܒJOM>L H&3l0I:n'9hv]jY7 5dBR%gL$3#CBm 暛4,ڑ4Ce1enhkf.,sg殡O3so2A|L%  ""{B$QpGԮg8J4+nhԁPYQERU.QN2hVT;fEu9AX1Կ$uQq^mk~5>@sBnAr= T`͹:T(ѬK eIJ@u[b(:c̩һ5F9Qc_wBiv#˸Q9j &ўץVYQ(ќPmw u2>^4a'ٹB.4PE B.Q%^F4*Yڋ&jeٖ94k0kOy&Cz=Y[;aahV/O-j<<+ Sh@aHQ ("Jsq|]|;t \ASu坹1BlvKXUrK4e[s6~K #ulIX^P_O% w1o#ۻJ]%ڮsr4Ӗx^Ϯj!4Ֆh{e*A  \[b/|03t임 M3$D;ĶJ=iII!C49*!Cի 9~M%aK'B rY,e?i-7 ^ @-aairnJ%n9"$C2 *Sع:eU@X-ɏV*Rf;C+Z0!=hCjN"jaif.QO"s왹0RI=YIER'RPY8@dW9BkiV?n3spGl9+HF ,Ex*ܣ$5^Ȟ2Q M%EK4gc订"*PG`d)"ʑ\J9 Q x)2 a4k DT!{r O[7\ ۠FV@-aa6Gh5l{2j9: \RFtiYbo @}9٬&Ɂ441 %#7 -soEn\4!;qڊB.с544k<ӧY[xߔʌf1Nj\rtES2F9mɘ.qrim-@y4N<&7uGmi7TY efm2ŤdOSvϻ!D9!pK]y %nW[.ajVЌMP+]>deh73k1 !暛lDd`h#Li覱;M5\̚knRtn m3DJIRCf-YsMns}y2H40&Bf5w)-cyJ䆠y~H`wW 暛 ]bj BM y&k=J se.5FBӞh(0Z:- e Phih(化Q`t3Z: rFKGQh(0Z:-u(c41Z*0Z: rFKGQhP~FKGQh(0Z:-J0eGQh(0Z:-J--2FˀQF2FKGQhPY;P2(ct-化eQ`t3ZT;ݯhPǹDg%wn@h"`kQDjRE04Ur}2C>K gi>KCg(|78X1㭾Y: |r>Kog-YW>KJ2>>LGGrI(myDԾ !O= ?tUDLQ A:t4lVe{8/&0]>%Zo'q(87dՓle)U(/@"jeHx6h.i :䘙OoZ ʖՂ/\Cx#a2D2P PKXXxt,ob"*"rtVXpg*sVXl."YqSis5n#pJB{1ioӌUGzYw˚"vƃ7/2ɨ"j 1 i R"*?p=D)ECO@XɅgjb(C.Xg]hbgD LehI ELW@x3eYk پdp4xrYEOi=%@VSCxq\9W:Yhke~0lB=r@PR2 }r [@{(CdՏǏ  i2ob`=yETB]1 N58{eΊՏ%2MiHrAidbvY8Oi Geg"Uf\4UN:USu霻r5@Ify=qjfb\5B{tt`$x3~Q8{cB=*)u+>A4Ϲm?Ʒ,)KmkN8 "3%l%tPg_b,[~SuئB^lf}Hĉpy;9aI;uHa>$ /T8q4׳]mf $}Gk:Sc3[ `&]O+!Yr zN)`<H6td]O!,v8EmLdscsi*1C-LsutE٘S<,A04ayV(wނٸ2gk6&Ч8gD422Y: Lr&KCd(gt,L2&KC&KG)1`t3Y: Lr&KGQd(et9p ,L2&KC&KGQd(lZu3Y8ݭd(0Y:ʙ,&KG9e@iPd(0Y:ʙ,&KG9e@)e@d(g sCn?GQdPdPd(0Y:ʙ,J,ʘ,2&KX&ˀ:AοӝL2NVd (%(#x6pb5PE$Hp9B.sseXzK{Pcici(4T><'8vy,KG9y,KG9exj+XjԱfaQs (dS[x蚅FKsXK_тgRY8Xc%d2b@,b Zq=E ˡ,Ƽ^Q/gr#f20>6B*QӶ˴]5R=m{1 6NjI1FtZOqxn^_i"iPz=(S\}.4s\# {Ztr OK͛*lf [pVBVKp67?Ýӕ1YC+fTÝ%Yв2DaT '?5h8$2UTOA!4dLkdOV?nsp\9k:OeNJU&'9PeyqVZXxtTF!5dQԫh'`ܧrpL!)95 aT'nspVxKGj ;Y1G&yETv? ~zRI;hWӔ"? OsplHCЮRS7o>SS̓[,exn: ٝSS@%m.rzjpuQ}IJz\5:b K]vչUe|5j OpslIOpYBf.>uI!Ra:[47 4҇[#H%>gIm iDbYw)L/NM?f}) Fh.r.hȯ}piOYV"4}\$ӮRpLRbH% ]2N^MIdO/W%N]fs@5h@:h_xN {Rr2I%:G:00+O2+BdIN^&ҘU $o}rUD!YҬ7ك»"GgaT '?'f}as-N0!'2N`Єkԃ$P!'eV?pȋRNXR!W(棟E743^O PAXX]x{["vso`so-'9I%ڝKA\\]xsR D+F6;*Pj׫'!?p=D) =\\M̋*N.<ͽ|'z D\ق )GH-exB*s:-E'n\I`!Cش^!f й-D R;d'noϙMYlf=b^o> 2[ɎAL i7FORoi1.[(.v]#}&N<g}d/So<>c1-Ŵ%qhv,Nc,19ig\avԸ 8^l/|C{̻p9.sMލI)GO /ywOWAē,4_暛Sndޕ膠yys.]9C77洘mͳF헄Fq 3e(DCh5D2o| լ+so|F {ܛ+ޞQ* lϡ7(gʹF(Q:( el lPFiF(Q:(6JG9F(gt(lQ`tQ: lr6JCF(gt(lW`t(l22Q`tQ: lr6ʀR6ʀ26JG)2`2Q`tQ~~Fi(c4T` (e (ct(llelQ`tQT;ݯFPǹDg, 9A9G$GAQ(MP5)߄LQeϲhP<|m4Q*Q: |r>JG齃Qz>JoQtQz>JoҿQw|}*QN{4#!is߉`yOtk|xpxDTVV GAQU (뜔SM(\ü!^7|V!|ΎYDIp k 7iM+}w K1̼@t(ʿC?c1gWuQ@2M6bѯmJ[SJy^({\%qWޏj~0vUQ.dzS oZm[tKsu7;d)P "PeLWh- Z @ +n(1ɦV'Q~SBl9xH~ɮLhEuZe^x-fml+Q/ 0AkrYʠ+-Ё,=!2]ZZ'.ΑB)ݬ< G<@~zp\%_9 '3C_b̀<_6xq  MPt** ?S`(%OH*%C!YR&9jV? 9B|Fk( v14r  Kt4}{˧PG@Ճ /klM#VƐ nV-pVOCf$^Ez~fa 2DՏ릘Ѳĸ]ygfvfiE!F.溆HIOV&Kt44mY(9y^J:b,^wPbvU  ,EWuW\4wU*8yD\<ꫛVUS۸Ẇln-D tǩf56sFY4aMYh_YP0hJI f9r5Vbf:_qS/+|:OEY #X7E5bV5f6̔։t}kJz#hf\i&bMwjteifxw HÔf P>W]}TVqլi{JGE/5aXaNoC7FkC'BVCG0۰ZOkXw95pT|k6֝t2LX}VLu6.14E'G=hO1PՕ{4dَ~c{ BA^L 5Y e|l+ ,QWK K˺%Ǜͣ{eA#d2u^ښ{9i^y)/PKiK(R:y)^JG9/PQK t x)伔/QtR:Jy)d/PKiK(R:y)^JG9/e@ /QtRRR: r^JGQKPKPK(R:y)Jwsx)弔e/22Қ,㥴E {:ݩK(R:y)jE^ʀ:Сʆ<?y#f [@!PȾ9qٷB>!3w`g) e̔ ̔2fJCfJL齂3SzKfJoɝQ`t3Sz{fJoQ`t3S"3%D1 ]h|} O[@!Q:X ( ^gW.1D\nE,!}RCs(g~#5䔂P־:†-7y)in *gCg!4;Ex&se>}Z*ox˴8\(q*J׫-nwLU'Dce*GqSt^3BEDW`ϒ l7@'C ,FOwsL^]xcɘb.Y9nwAT#dr ssMrY0@{s(BdT9ŋ۬\{R|YQR8^*W RF}G2TՏYi -tIWGClr$i`,̿2Dt#PKXX1;0fC-8rpY,eg\wN#!rp%,~<1LMScŠ-dOXBQfd)V LԼZ"[c^V^Mn.FW:2d5 U  ,EDW^h4WEɋLgY#gKȞͶ_;nsCiMr0ϠYC  +ێRVWbt nUIAL)">עdbh8_\dz?ftoSsbj4-10 Li04K GLjˍrf2h[cӴkܜ7 ҄ZsPjSO6C;Af4O2ss!$2/ 5d昡2-B! ڛ6;_~_Lg0_yo~r"ޏy{%Zs=yJz>k3?cdp%v;_ۇ^<7Lٽ/vu|~B!ߞ~(⛺؟Mѝ2?] 5m Ceĝ: zv?s3?ܗB ߋC^<7/A{HՋxAHus4Le#R [n/RdTM{Yl(',2M@AXJ4rr[4O?L{2 W2捕eZWM2 tbdeĠyet"deĠI~R=-|MՄ N5jSuͭuζUCknPl`c0ڐus_}~δaNQLDiP6ͰE4-}@Dm9HCsƙqmaS5n޼fWmݬIK^| &"MZ?ѿZ t,X[\6D8CN:(2%^1KU2;?wbb̡N)o~,( Z;\^W~38ZJjfo2|7>Z!&1Z ^yo²Gi .iK>Cr:hx>뿡XWQfvv dnqn/>}.*6hAa}kx?AeoOo?r4 3/DMgK>^$ݦr㒙;ȼC?L0d!ߨtmk(k+%V;@Pk=vڟ7)MoJG実S︛Uo n!~RBtB@!qhqrfEK6JCgM-c֠*O?So??s:gf?3I=[@i.< P:l>n8 ]8ۊvTpm/}z_]}?> stream xܽ[';reg%( :j$HV_=LfU-18S-y Fi{O|/u;)׽=\%x2BM>k0MJ{]Cu%5$t^(EO IsiV͵TT9XA4y&0ԿVNWN;]CdQlԗxn kc߮UHw=?t<8`WX!h Bۀdl ).PCy18S (4E~έWojr "!"3¤zdQa|t kPL J֠` e -J5-?GE!9(-ʭ1t@ vđaP[NN8Ef?oܒ$xd&BƹqsД~b PIaT9zgt}W1'ZJ쫥>UN3 %Ib}UX"2SQ9D8>m.ZfVS37$&Gi&))Bv)gI)9\JVb}| ~FwB8KN0ؐ٫i7\C)LC49w!.ײ+ҐCR%pz]1?SqbioNϖKdS9X E4l}CtJ, g_opڳƵg3מ\{2 hb=;\{/:Lߥ8hW{$\R<+E^=xa 7FkUSz#P{5+t C C-0H:J=se|{lg=vWԏ 8a5CEz"&pvw?}\Ԋ|jFq)vcrKftGjF'|T#~M !CO(/}eHeGTp_z]4><#y0~Oy4z;e> ףQ طs̈́$ŝ{/loQS_v:88x|/0n`n3en FιTS(]Qǒa}E顯a77 `"Sw:7[QS_mlc3eNhFTƻ<(9=}җ9f irR^?H@}l lj8JYnuߞK9B e8Z,k}ڋO^?b1`x%,"R;q*PY׫"\\6P``D|V :Y },ǰ&G)x'TPi)t\wh8 f?@l{{-~8߅NK'pp97s0"DIPVX?5 `WwN=%>]X~h&Ill8tqUب3L(jJߺ}#qT7#fz[FW&]ʰ}uf[3_܂`VK_y٢lqXVnVA簲'`ދ/vpw{DM`MZ٪r- Ns_<<'([:k7㳷vg7v/{{P{/)~g-zZwKx.~:y㒧ƥ <?p\xzLPr uR[WbX#ޙr/!wU% v)˯ <?pLsqw\2׻[c[c\od3 }*L:WYގfZ\7v5-7K,e=o8gȤ} jl5{xVvN+hSqY(O,vx6iv[?+ ՞1~(}:G[4cIq}!H0 >d5W8DJAvN!=Y9 m)RB_#`rp H֌[Cӕ^W)aMr">(}EsW2 -GƋ(&"#kSͨi6{rt~=JC٣* ZTZ?GLNYLo9+\q6vтi*ÙLo}Qy]v&¡R2¤Ǜ6ӦQH\uĐKK`CLEдcH+C6)CdIo;-mR<~A׾kpgrP #h 次ꌃ'%޴05Qc;Nl?$co&ʻASڌC/æaTC?-:U `D?hxaCgVArFTcy^TWQ8D\>mi}]K6)Bhb^ 6 (Lwܒ$t3+˽G5+mPR8X>wt<Z2_Bc&NIN;-L\C-;;TޚHJ;KT``PcŝNR(NGRX}O1w_XbE2n}kV4lq7}r5b >X76ki+]xڎ(#v=pò\12,;Y6PFo!Y^dOޔ{`-M9 ʔ>i0e`ie@۾ր@59k@ 6xx/UwZ\~e黟 #rdX|i3pwZ`wNdwZZD~E׭{2xcvt/P*8+0'Jx`7,ld 2 B/0ٖߊ0/ ìa`p;܋ \ Q^?oi@ ^) Pް+7gGh#kFyp57`{WO)o|fxwEPlmf[}3j+2_NPc6g+?5q+[9#4%e"=%c/\{dZKivTeeɷNmؠ}m`e6l7fdków}uiæY%nPYep\WK\2]ƶѱP >:vUzSG^W(QU(+G]rWD]j8Z'WĹU.u\%nK(A[ 7Bzx^h\Ld˛X_2pK?K v\-~=( Vjp*rpҩ hpƺĩqFSUTF̸6(F|U9%} X+cU"3V=HdZ `bq3yRvtbCnAt,QWa6|ipmH !᝜5 6< `bmꠏs`dx 'oJK6<}fB{S^M04p ]ɝj~-_9D\Bǭt#cq4Ր8VKzX~J1AOFSpO\V]v9U,dͭIJICh%_@8ܝIi)Qպ?z,=7^/7Xxk)%l&! +8JWW(·=D34f(R l#rl.lkuub)6 4`yÃ=La=tOd2o`1a".\oJ?EC\[u7L(RHxS^8 v;ᜟ}/nAW0F)U=u5%'(#x,49Cbi*"ǀ5[UYc-|]P7/2<`"sf8\LV;D3Kfe"C!:w WLS0nb#Z% ~|eSdO)v`ۅzruM~u1Bݲ-^ޚwhomߥM< b?{_VӰ4PCw{-t*88QF(ҍQjr,>v22찋doQ ;8r:UyAϽם/3xw&p$ ];b=O[(Sh'u)4} ON&w ipZkc|뱷z$K7@Oƫ ڮ6) |0Xq8 ]I{!\P+L lHlZt- QA/,W]sҐ2L0,.uJ/᧾T R2]}t4p`*(Hp"4_ޡ_L'4ZD+4ꁔ<󊝶0{cqc=q*)(waA/HN0[a?=\7ӫpxT+"ũEm2T~A%'9[.caV}@-!~0K&z(=* xGoHKE z:0(a‡ ^}\uYY uf&xI.V܍?\q=l8B|i+@/@B: \UͲU'f3\WMǨ^)ˁ1O> F$FŶq؊uF"ƥ mpq:Ӎ'_hNהÖ{ WT& XatGԗxfWlN|HYᝯ?)k:FَZp>ũ*Ecr{wXsxe~)F_6뛾.4^(Iy4^2xMʥkv}#~-K}_x.kbK|_8.ka\2^jT[I4^rxM%4^zpS:=ז;Z8.}Z:cxMas{x.n3ɒ}}ˑ_H2-sN$,H9hط 4}3,3f:v%Og%Wo:u8=eZ=uI_y_Z [LKN[~>M_rAѿFn%K^5ISZ\BjEq0_?ni EAR$+j䠴U8DA4u3\BʠU8Ea'ԿD[v[Ę2[:4`z خC{(I{l ,: Ks *^617 7'z ([Ml{* LZ(Cdo"zq.ЍngEKBI`iyPnzrn$ʐG&p0_>ngT:=,a緙\J.'&H05(ЪFr&V?(?*c;ގ$F1hP ֜ D9"h(9Š2ZkW ơKFx6~]mc=]Bwip&i* H7ǵ:])uMF=|A.BĀèȕ4'͂^'x.i* ]d3(m6!" ¨p--=/=tѨ!ʹGCNU`MAT5{Ccҩ)|A:3s{;LK $wZ.]`s="7޾iO6BܓQ3Zl B P("Vi_M4E9LE)-Go#--Tjsڭ3ۭkhDM<d\c0M%ȱZڭyPlÆm}ؐ"i@Bݚ.#qO;,|;,2ԹB<-{R|Xpް:Yŭ۳t=Q^?oiu@̓ :T4Q8By0gٝ羁̓Y~*sZ(E~=Pް^EF:(bHx(η^fjWzRj8; 4a 3^&.\Ko6C7 PT oB{0CM|KSoZBvه!d# ZL'5y+-`o3|q@KfSy6}c?<ڨmEWNPuEגVmp XlDsj_OˠԤ) d 4HT!bM?ӿr(*o%^Ӡ?_7o, ֧A55u h;pж '8%q)Ÿ"z<OYZ*R3.W>; Kߙfx{z1B'$rO$Ȟ?PHۗaX)OO`cJZ2i׏;xGsYS!&ߪ4=6xؗ3|wya lYbAr ~l<CW8|@BF9aԐ֐ ZKB ;< (Ёct]̣/ 5+ܷ"1{Z1Sχ,oܧ}uӸ ֶ1`mZ~NRZ{ D@so7m( r-WPNU%P1|>4q+5^)x[35ipe D|ڇyZߵkRCo`P=խJmT%9}o]G P;ǭ^>(i-?SeJ^Nt* 0lJVMFS٦6SN }|߷w|2{ \u-X}r*iqe:3a "*M[s#IW[I*٥u/[1xNWPFS=SޱXf8Fg\}_p17 8}姟 /\yRhOo^6*IY=ީ~-K.GiIj^{t_l7=nկ|K$Kx{+At kyxo'ո'{p9b&M4%pY~NR:kt ̞, g7DG>,!=Tm|`^!x kHMIz])ṫawE箮Pe+l'>+“ @ g pl׿1љvm{ܿ(gwܯt찪}0 ]Z&ڎVFtYj'u+ mG ?].k(=+؄k$wmO_E`2ڑcw(v2>C+SoAn п'br޺`S]0;ETsܴTc_M_ Q '~}C!77Gǯ G}BO韜6Nԧ(= *Jܺ^B{?PۡǟI}" ~Ts$-em_/'!S۶ZwlNP$ 9z ?g`v=p:Gꟕo(ɶ^{u%V\ū=E}J^xTV7o䆕%] bst:5́čR )8tM J{ECIgbCE3ʠWU89acUJ"Eee l\:w-#hV Z`=YkZLmڸ0|!|; 0u ߎ(.N]Up*8*)nBUr(}Xr~co&|8O,n.Dtk5!2JKFT9{韾w>j~[ܦW7VE#_D+<W/t%..^nOL8,& !cKu^. fvpL:<8- /N\g[@|BcgE硤e<HnIcw-vc(( /~jz,߾sg5Ü S%κ4 g^WtA2Nטf=^]PvY7jԢ"U0 *2±5:aH/l^il6R(aem0fWJC =x4r SHqd1b>Qj"TBm zKI'wQJi*v>xXM I%mI0E7E)R$MeI*!iz"!UXDZni1 +_(:Iw9TFF(k ̳Q+zVoVvdQAT3 U)4G] ᜺(À`8`t 8BY (Av*N6\vOա}`.+嫣7FFv,9\X:N&nHYwf6dthW/PKH(Y d|<]u˓D\ZmcV{gUE4ƀ |UvJD,~OQ>\7 G\+,/'<儙 re3Rb#qyA;bO( v0ɢIM#{-Gjm,Qƚ9x}GrtU:qgl>rr] O*i(!21AX+}ƾ;vNeH;)Rb` 5A:1g#G\.}b}E[>1q |J-r,2)ƾjSDOVN|.rQ?5=ZoߣZ/FyP3zr(ecWS8T-FP =43 4CvtePk*" zM=yڵek ]+2uڮ1jZ *#FNLlAC a pF5eS8^ ޅU8P +SD68'yh GRvrTʎAZІQWP8D~A45;(*xpE`kɿ}Tb}h8ZքZЀQEV8T;FP8ZCh6meP+ac}}]z;( ŸЩdhf(w1~>t꧂gˀ lʲ(Xac33ƔeŮl.rDﶴiwnaby-eSh|?<_mXIvBlLD\3ү\y1c׈)8x/#C*bR\o}rb{kN"|Cd nYCB\>- WE)dюZ)J\r-t;&5 $&<a(`3R,# m}9$LVM¶}=g;a4a#y r)2:╲ ţǣoB[}v:sgl>Rr]O*iv߭8IANS 9u:wGy]cjPBV'3 {F;R[W9~/㿭 F[v5NMw¡{F/an1 JpF=^/^=cK;{3 {3mpe ɨj6Su> Q|fq=]+ OZy9ҦqZ$Me "I WNP$mH)@lULcU}m. J!GtH 9I;hݢid~@8D@UcTP=# Ԭ8h)}-@場SSDM/jV~@.x<=8&w3үO}U`f]kM֚23hk25ǩE;hd[jӔ9TUUX4 t>kn4o$D\nQ^?b|7 8[1vo腚d:((bʲ)b xqꀀQ&9&ү\yQ˟׉1g gG#Xi^-leVf2xdc\3}ξs<M$2߅m00KboH!m[}>>L\t^%1OB2nzԠ2& (i|Լ=‘ eL3&Ŕ&>Z](`Y}=e(`5:J pjb:c|):)MJ;5ՐuG U#|# N{5éU}ZJx AR)B>-cVa8m*/~%?'5lm9tUPF'1WP SLO~bvp() )% xeQɭIG~_@phpJ" #L*aŃuzN`&=5=+G:E+= vJkgNNL>W J3DXi10 ulPSLJCY5.q˱~\2kR*沊_Y,!c;yG}~8H Щ N'uĩJ0e aJϰaj |zh7i9 Zi|=ІLZvl|d /{tkjAu)LL:42lzq4Q+abo]Q^bUB&Ea,){}0(C6t.L\vʅdB\2o@&n&A#cKuWl:}C:9vrVzly qmC?~bnCxUaePo&CdIx,P2A>~/QcT2z1SADA>sP9TEB_6QOtֆ6aڴUVGMe87/[E`)"7#Lzm)E;hn#J;ڒm0ӣ*<}-Q8B hS+B߂ʠtϡRHh YkF/R0cL^WPMN/M{tjB&+^WPacmhZbZbzh\0 2mQFp`0sh¤ڒQ뺓e"#JypASWec7:n4, c/ 斀hyd|z&͵l1 p7ׯx'nXo&GE4E~ 0o,WvZNLApL 5Ntg&7E(s-8J|jvT7S\ΥbGIJ$y5J6zmRe2dD  I6ԨϡhՐJ>Wiܧ*|4JPx|" >,5R tžŰt1l>AAzcW48K)X'3}k($548LUvjtu:qg;FG"PܳQɍsmCƿ hpۇ|2(!R2¤NjP]}Cxr7J^W042}cu+]CdI0ж}΂nZDV7" !.ԍmKd(UF-nL{ueit+ʄ^hp<}hA|wLA;)2Vz׏Ai!B)d]='(e'}д%)q7Mw4m>Me 3 jƩh*r27/ǦMK(^=9qCIS^i)'%_&(G%nuDLw:|@~8%9BOS}@&rd`}L(*m0w1g\ Ko,k_L_mTU/]S W/GKpV}Z?yH KŭR0?rxE4En(+SUSU|fgF"O}rL!.Bn \_ǩO%ֲ/69=ނ;1 :1 -wbjt'fbV&U}aQ3X_>3<[TBѓ(};S%KL - N k:b9?Tr^!)b"al%BZDU=#Z=Sͬ7@yE[cr5iP `mH902 O%*\=»2Ús5Qהg]CueuAW 붏n+r8]OKo}[W8z/*P+, H CZwj;WX8DJAqȩx. 8prM\|U5 cHa6ISC(!!l2( 9D P)a҃5>nXV4;UP(җו^WMAu?ȴ?0" J{]t#I1{:lߙЮ6)BmatWצ;{6%kSB_[= t@yc.F^|QQN'0e&;P혅j viỘ\˱ A:x+wf~bg3W7.jPE46/h ln+GY +GOpV%$ +a J(E/U,Ys48qާ`3Co% Li cB6ZO%媧<qD2bAļIPx e: 2$^Vo"445\$;Y0 F<\){!oQC{/ZI)GϗP3 |fO)ŸbBh~jW+>C?joǥa1;VԢ.f }QwFۼW*۴WjpJc^9['whR ^~ƽCv{@e׫EY^Yo#>V.xCnoɏm~aRƽlcM [W" ( pf m J;CdI#Ʃppxt9m+6ISngێ7R-ܶq0Gy[\)Ǫr!;])u2}iJ.LWJ{]C[F=GV)d{pڶP{mnmCam۝fmkڝk=kڃmkZЦeA Pcsn1O_t}R^?b,}OGL(N:eDћE)q`LYV#g|o4ti5zQ͸wq(SG]"Ƶq 9 _K0#lW. ,r%zf2`c([[ڦӻ֒/_ z_c|U"c~ 1xֈ@IO$Ӳ~*!W_U2ض}f`"#SFHMt@F;pY#R%9s` ou; 8Y2 ]: @n7.tL0it}N:N\g ?T#Sy; ?e)y([$`3݁tw~!-)co _}E3ɝ'.Tus3s] :KArUed,D&SuKmpt=FC?5Y|r¹ʹ:5W|hw|hm}0Q&W.C'魴Ս<+LDLm}J :ȽAx C93eXzpdIS_Jx\ 0ZԓUCJV]y1]Lqq& R6hRDJR1H ;piDƂFf)`D雁CͷP|h"BJI^^`\g ?\ z> o 'Q&91c$a\g*896sS*uF oW%V=ӶU0**'CЭAi*(^B౴Z.L0kjI˰ZPa}n|h .qbpکHJphjF8JֹҦ2PB=a8Ǥ ta6Y8d-ُ=VTlhȣ E28 e"JA:1|D.6ȁYJ{!T>c-+UDK`i*kLrGUVMKe`%͉Ɩa S46(RzA -h^H%Lנ 7'+΀ taJK*4GypASX~Q@^d?lLBY0DïaYM:yGp pӿ:\k:ah2WiW7FpPUKZZr=t1`AߞʨXr`LTe+?1mgKS /nzF8n?&j{45]&f^˾*iy]iyȑjӊ]iV$&ęo ;8#Mi; ӓ^O8=Y9޵Yi2n%`dQJq&̡ci9CT3}`kfΆKE8Ŗ݂2L GnꎡK~~ eQ3PjݬKsC Zfh8_4UNMa`5%hMDFFzf3>US8T-Ur(ۖcEkǝ {06]tLT1PWŝQp"*DvK?*0DF(}[! į+_ fs_%U/_b6Q M4Me`C V!H!"%#LzPp"+;G k_c*hמ: =b d3N fc6cGZw%檻CiR ),e{6Lp[.҆qI |RTV ¶.CG ȍV }QU}ƖΟSI= Mf泜I?53gp΂lepDEkmҦX'3}ٍ0lߔ[n*] g0l !MVv!R2¤0G=n#*vN.uMqu;Z']Ӥk:&]v}mLY;AYn̡0}6JVedR1V&=6{3d "K(}!*q1Z4Aa3:>c$Me@Eϓl}v%`E]$ܣ hj]}L=g'ɉ/VϚSsGbWEJ%ATW\Dg8V¡kEľ=8vŨn]#?J uFu1>ؑR솵c&Aԉ >Aߙ;jssI>-3҉=#ߙ:Ky.tAJFSiPATV$v}? )%ݞB~ 9‹K\};SҰB) ^ıI3F%5]#4]#4]CTW,c5a kS,5*Ve՗F=йϼ¨=Ңc#OC |}b;nzz5,!Xl ac xt~@e )Ʉ[g|xg("#)*\hStBB<с0 +F=9N޽\Dg":g2tEt>Meƥ[l׸]bzh\dQwtn\ibp KD^,绉@*SLg*ǐ}0aTS[!ح]⒈%iwIi>tN~b]~29W=qĮAyH\%ɤY1PF1N# C{OxgtUU |` "VECXc7Gv=|kj%.wmf3c'3xnla5t~sG.A8t- CxRk{0=_M̓G5zЩh Nm[N;# qfފ1DU;;j jaToP5r7FN]͂ i*Ýcx);q-C945#zHq8 p39s emO,c89ߝcd/`~fvzivs.Oq݊~.&<9tea^3sw ܋$4is#,ײµֲ̡1,1l: S(,%VM΍OKo5٩4kéP^?VGpdȒ#Kuh^,z d`By묱7+:{6jD==6ʀNR%:Iܡ,֩r5\j0]IZJB}ej91mUxvK4/{pEKa1t∴bSlRDP av'P]oT]o'klN3Rs•('<qc"'SF9 ,9P;cC_鮁i+Za=x˙,)YLܱcU2P (}U}O?\˳O'1/'QF9w*%<޿:qg@xeNBl5z}։>c 攲Mnno<( wQ2t @fC~ 9pXj? Z IԄ;('ɄWS8TݟmH^͸~VvF%q&p0c.1S.Ζ9!CAw~窔1B !n?sRH)7a14FLwDuKP|k i*QBG*ʠ4`!E 6jG’728tВPKCCB^I@X1mE"Xv: 4 Č66Xp*zA)z4b2S4\5ھQ2Y\QApwRL]7Z.NRpVq>-1,^,TJNB eYoh5,6ŗ2.z,`\V]S>/__<|uc'?yUJ ??yIs[6&[,d‰Ru觚vgxY(3y'h_N$و/XF:>D [V qD||\(ղ~21\yQJxf4II$/%elQKIQJpT)pH[(N[id13Q6/*>Hʼn*/jf''1oJRNЙ2 _i 3u%>gb|:O#E~i%ٷRͶnoH3 {F -;ܻ2ɦ)nqf3}WNXkD1mq%w$n%i w4`[ ۦ}Mu7` , )TZ}J/# tܥ:b`#>p:2c.S29SrǷuίTIf%9MDhޅ78UkN Zo3&;k%MNO#Q[] 6=Cfx`wC)(w 0} iUvfKDC9BPvi*01`0# k9T )aX'YJQ>䠴ו9LW %i*! Aipn0>qK7qYиEO:i.cĥWC ێJ9uRA,W(enEa)+ݜ!3D|,tk974Zu7.f>eΫw_f1uFoHk- S<[R'g}ƾ;J\>&/&n 2w9QJIhͦ=NXv)aNʮ2v2nI)Gg&¡0 Ԇ+T])uEF=g˜2I3 :ӎ׍* ! p!Air0vxiE'.PBlSZ8>`|2g>˲>v .Op.shBK?n7]PbI?@tM.n_fp>pa0q:`M!d.s!9N *H/  CTf~4Mpo9SuƤh8Ly3tgwe1ci9Zhf;Zޒ2Ƙ͸䣫 \SR_.~o"M( #@&ԋc"'o@R ꢓ1x,QKz;0mVSuh Pcܝ4I;)C ,eg)E2.DC慥|03 eX&lGo46t}q|x_RYWZa3rɵ 9= ŎcgLrrIpRF3p1xB;<6LC@}|N\g]keQ9|1waӚl8MeEWow?\Iuvܓ cRXz.?թО AM05 25#pzfebzД9\P}ЕxASMWRTڼQU301*CӮR1]¤{K .-@Z [hlw rl @U] ʱ6,-sh [FL?nwOJ4k“gf 6m^U874"XifЂM3j 6-V>l˾iѳkZ&aM.0li*chJM{kZLMڴ0`c=W6^Byp`tꨂӻ^G<Ƭf›CcwӮa1Pt~$]L7r4dBMRPӡNL_ݰ(K)|p_S9TG_mOqzsBj[:`D`}767'rXq$-0{pi_d*YzQ1߅>U}u]YW]O uqF/_'+/d ߫ MɟkF$^'e @hgƧǎw~yNY[-@]D/N;B>(R`Ӟh(\BA.pRb|Ĕ#rLLL5pꘘ"Dk#pegJ/N(8kM`PrK:IN ɸ:&㘎> A'1'_P<d$}ۘdV=| q/CnX^j6Mֆ i*æWxm;,hL\{T߶璋.nv*dG]')aB\\ -?Ŷ/]M 7V8lUdb$瑛ƖR,7mEL8ß eX[#O)ֺ}29WH(ֶDĴHC&f&f$A̝ۘMBa: e, ,>N`։>cG؟Mtq>YM>u:8 7 Jn75O8xx) k̭[Gc/C0υQʮVx_'stRyRvtcm߭X H)oABe OEG0=XbI1IS@ XXuePH)>x#권Y{8ӬR%-A3U BE N;UCU!&08M)5Ռ-ތ%*&Z2Kd~827Mqs`F`џc݄.?2Zt"Loav"Je(_ajMDF [QxtOLhܴ(l|fmbx(YEhf6sc;c)~0}Z ͨk݋>V4rYN rɹɉQ /=J.`Gr\'0e(`OIX'3=#lr-:AORŞՐc:JCuF7 *K^0t[<$Gw+t" 2Ov+=aTez~0=W=ON|Eg rPi HD3Ű2(u0¨M[x'~_eG?gZ9>~!l0C491r!$42(@X )aԃ>n(Xy 8I '9 ۹ Np@㺾s3n2-$<LJlzcN湶hQ"pA~g^sŅ糁Ão:mGᡠ+B\}lĔ+<[=hmP\8Pܕ$ղ|21+)v.^@1Ť|nq)"g׊B(e^TE:3gd Dd苟PSOTF5 ٢XZw^m uFoc|74ְVmt~ +jI\}5lit9+epڭCQx@GTvvUJ_cu;ƍ! A4t]TWJ{]CQAˉVQJkӾp򊥒=HH9O8i.N*2-ri躟UJFt9ocIye<t;ɔq;ilͤ*2hEM,Y1Jl,ߩ&*w}.BEnغ-:]tMq2/x -}N.9ww,mEwyK'r,Ǚu=x8P82`ӷA;;>M R4*)$ ۮnY+[5ɵ"h07ة'O eWݛba Zz7t8;%R%+"pq e>J H'%!0ECLd`lr"y$?o_]^C!CjɌ}Y%u,\ژ:Ζ0O=r)U5-7񇊟>O"OU-w>I\[݂p(iQ÷Q\tMp,Y״a'v״vب;pOls׬*uy;Ag\@TKBc}KuEc9VHtQ 4+]'/J 8 Kx!a,O4e 7kAT nb$#D>L[otշdJζzLYC01qocϯ+'0Da}۟nrmO8Vqs;[5 kpڢFJ \`o \`I+gB4JFx7v|gn1ktW%DmT vTEC1z|9$KJ˶kQ)3O}>q/gjOJF֋ib|`o]59(y ] Y07*[x22o#h\lFjXiD[d=EZעsUנ (S5fE#Gf$I]\˫v{X8zr* `I(P,Xúh]gl9QvT5R'iR%,hpyS.ZWT|Zk*ΔZ缇\6S n]&QURՠl<1X) >uKg'V#fZևNpwS tysoH眶pD|*ZЛnMVon/O h#H_~NfZ$Z^ğa n?1N11Ue6NodIsΖ?nWR'I2G!f+-'^ӮJ^*`;]@0$D;$lv֬uEO#psg*)|xxk2GݨO5u) 7!*lݏ71N2F>q\{hQZ(Y@iOQ4"#(ōXM +em\G5JA<2Cw0)n4ORN G,g*iBA292 M WVL P+aA ޗ͵7&;QTrZ~8>iU5 (w,sϑ ^Ag$f0dCE32X:! ,Mu))7,K~ {a|n@Oq$/Jq~qW_VˢE2En\J >{$Z$ZE/?w9+ˑȼ#evtTJ N2kY^E];]RފHdLIf3IUbvK\E$|cO Me0{x.K.h0{a!%xGvH%`pݨ =IͅIIaw$&5#D'q_0X|Av84bS8З@65&kYjCnivQ4DOJBBL1Y8?7XIʂnHsDVʩ|yʵId V|ּAkvZ$Z(XY8: mV$%!FAĮWY^Ӭez5;5-oi<֪.HR/a٢2IoIoMxo%8yqs $)Ca4W}ƞ$]0fN v냝$ v[ !X+09ėTr>᳍5%mC=֥2lQ9] LCJ|GnɮzkHlFc*3 .i*M7|+uӞhI$vmt-1Ӣ#ie0XheUplQV"Wl\~si7#?Z͙zwϥfEKl3.ogp1>)Q.i /`: iqCkm 7M7_Цey)#}2#ScHy#)[r Ԭ$Ir/V"u| . _@N_r.yjPW+-W>yyr5խ`XW+8g:8<:V!0%ڪTxn]G)a*?P{ZoFsѻ{`"M4vq(P)% {!Nt l?IAkD$$ݥyT1%<c'cW]yL1 RႢew9϶LiY񒺔AvphB 4 :Wth6^=14 RXO50\O!{6#ԩo]^`ha68Lat9VpF(!cg,G_CN@]bbn~sq=?E.Y%ڠ_$#;\ eNrD#yQEP,!(6zJl6}=5l\ծubL& ob<(q&alg"0+ d)E=zqvaF.3(x=n N _RZ\xS/)բ'r<{zνQ㾎h'X.{'V@kV@ջ .n)T1a懪X+-^ӮZ^j`;Gv$铯IId`]3cdmꍖLǭbCB6/FnUL8_NB'S^E1 ME$Me@<, BaQʠ4+DC1oLvޣ5fCp)9^ĐlW&ܫP85gYpARs C`)g1>h8zmxB%?v}7FpX|**֭1ײZ(Uwk7) e$PZN2^7 0_R0ɀƾX|Ϲ=mJ>킇j*hQ>Ҫԥhƕi$%M0 yZ"5Ω]zjۑ8P;}W;tW;AP;p <4fV]VJ%K+ PnVuѺ|E/jRk;hne[:8zبo 8yƕtC>],ܑߋֺ~3~~Yhc g% w!%I!9RpX"(I(b_މ]BsYϜzf.YC3:ok*߰Rg R"{=8NC,ͻaVti'@.Ai,"!m Dr5}0B`y sO蹟?k\^P*r0,$@"L;WtM1Ӣ*r`_=k~Y=l5U8U- o/p{Y05s9>3cO^̮uW3R+OCj yBYuE| `Z#!L (x򻄆npntfbhqڍTC#0B4x#J8:l F%X.8BN;!ATz#6+}2(X)W  4s2nf)'X::D0<0wJɖh贒"i}h=- @#nY5 ZH$XCA.Pu]a]5e0B3IuH'"l jm`12a2ÒO#6C+zʵ L8?[gaXM =0`Pn~{`aW?1Ё'kdɼ+q8eN?$fP0J?`_ E#IC<+EZףW3t9$R}Mvtى|E H|9Uj ].`]3v\$x5/gb2Yαjg)ZWK [!&h ۧ N$MexolܛEFא2(⽩X)GP~"!E26Ɂ3q-%'-RbK5$ hL]asz׵"($$ 2$O4F]7Шv 6t(0邿l ևBhXm2 #+}DbV\vUCc1F<$ө$Ĵ(  _I &mwX"; PEqÈ9aN}k?j6ϙk}mE|G:\S.x%y%7Jg.Y -.!,. |\i%9!JJ}~I,J$٫pY]F8 rQG+Z=/f梖A)FsdK<6w`V$ZpwYyl].<'6xOc% OPSx&sk"Kv i;:7 DC dߓu*<ٗTzj4ɉ"NsC Q%Zq\U6pb:pe Jj@DCr|pv'|7@784[ ÒS7JW7_RZ7\j9 |v@2-[{/aퟠE2E8u>0)D⳹ax|}㩂fN Ud^c`ox%@HΙ'Zv=Ԣ3tY:z vkE&YS=Ǧ! NLvu'E`z_ N) E>cKp״VZؕ!UlbX+t$Lv֩]}~Z'Ɓbګ&vY|y=%mc>2l̇ik18/X#edWQ~U-@|ATFG dӞh0<B/jLR+]?9u~JiQ4ԗ<#hs8rpUу{ 779M\,Kr !x<xa!4""EDpED|ED` @1ЃIpqXi! D1- N2.&H>p~B =c"<*-[sENHzLzVӐ-P^xF9|`׎wp9&+C6q,ɟ<8zCM YwEeU,!-Y<{}_̮u=z5CWՍ`y;[. &6{dInl#؉*%ョc, Ջ}ƞb"]fRv uvvyΒItX+b@e7-8LG[ k}vC>5`l 746( 9|!V2#DxCn.ɮ}Gp9Abse e\Zi2(r#b"RgQ{ F9YC3:2{ͽ|3ν|C^5Խ<#D1&q{d]_塚z;$ם+k(WFp^KӁ+k(WF<>Yv͡(% ηo.o9&H>K99%&£Ҳ5=ZT\#aOs;8Shc>DS>>5f@j26H io{þ5%8_ɜ#-vFC*%MYr/WpA1bvѫns#I##:;9~Pf'Gltvj,FsڙECdXZCР%z+>uѺSPk Jνvv ft53FN+-$t\0_{$_h?yA4=΃+<΃878/b D1.q{dW8g)TOikn0R0;(A`IS1:wcgT7SG"#?;dr7uFحH;WGPN܂jӫ۲&XYɕOeƄUIFn0O:@7;(gyN%O?D7v1+ q /99\hC{,Ei*ylA-=tUXZ{8ccµs*[%U/XN^%Ag9-;32-x-{.Wٲ$b982I{VٶP;(0c><Ւ¢IOW^;RPh7?)ќX^H[)"D;FFdcRr~Ϻ kS^k]]Lv2Bݭc\wQC.#D0}$ 8G3|!Jl:XDMs+y° 'c}OQN>D FhG ~~3M-K#(pw%GF%ZpՀ,4ǫNjDpW = /H$Yh~6^їpO֟;H~xi8uiً=zζ4ƁV!a59[7ó/8@D"ùA]ZKUrnx:Y >C旴#zI3/A2[YʱJ32vC/^gTY{|hOIޔg?!wk OXl(?PeduY}S*eSRJ{!@Tg N‡cY0`AiRD(WZAU4ȏ?@iOS4(Au+<#r&ow*-L5VhEo  eT,U` u+:qՕZue?PXQYp kfaɧ(Qxh3CFӞ(+Q ,&elb8w K,o[Lno]:49$vo8"m i_h nۊ,pXV變cS׍l *OSw!Wg.ufD3R+o[ǁYwn|/wV=. s C"".?\4cUs^;{񥟣}*Dtv=|=.}%ǐKbQxU o[qDϱNV6,t}w9Fe!RU/R (+$vpҥî'iղf>d!fA֦. \ڬ|$#bcAG b1[y\u62bp2ʆYiԽ]9#GF6>Q%6gsЩ&q<7vCE(7,|PxC =wBzBłC>"9mH-jH{FWy=## "z#ILx*ZiX}^l2bqch bhz\ m֯nOWD1 O}5k]g쇍FW@LdS4Tc_gckֺۍ6UUeA˰2'o8J7+HJpk$/5\p4eP,TMſhޏdÂd9W犯gB Cvhsjvor.$0vOy5ǁE8ƄBK3_/985v4EAxq@v7|yzXܑĤȿkͤZ-ŕr煖?uz1ڡwr_#.*XpAi<yrDYC$(s,!Px1Ӫ~"\o T9Dl,<}<(m.6-G=4/RJKC$N!b_ IC4nAa@q9 ܶ( :G @"YG;k5 6=H/kG 75W9sխeϛpqՅa/P>u;bl8Zĸ·f_utkBuYגt9`Z ' %olj31L!}Q,U,oOα{~}mGy~;jN> 1PlT>shO'g7V=qВG2CIzBF-[kYj 6(H2ق^H`d%g _ltؙd2s'w90s>9ıdj;쩮 +} Q\Md^;ˮ=,Iy&"͂yT=Aע|̛jjHis7ȷ}vg$I7WY >CvKZu na@Z%J`@_HH4\gvBw[_)Xg3^_ (F 5+]gfbo|0ʐ69(W^V r׳KWY >C?mEjb z,7"5gʩP|~ڈN!ܔv{ӕ o觢[-  yu9N!?Gol=${?q'K~^BǯWR# \q ,(†R fhTi)yxeYuفIOw&E)kQNhem5":u8Y ڠNׂ!=]إk˗?s[ĪC49DR7,B?R!Xj%#LM|ݧ[[.Lw jkYspC dکxϧn}0g,?NGe i*ro/CZl(?PduY}SJ U00pgIiR4HJ, \, J;&J@vu DEF*p\D{6A$ @ҹ"9^&E8m2(4MMΥ[MKiHNQ4(k hg0ҪAiGRVf6 psmӡmMжȥk\aJڷMVV{|  _U?ΥѪVh]`ڎr1m;p*nQ6/ 5ZW,+>Fo[NSq n_Y}͞3gyzZ8u~+ʱ0:|F7:Ҷyڜt~YOލ5n9++QH?MZ5 BwE' Or(ȗNn74|Hų/xy%v:^y.tY宊(=u\{c',;D"Dٖۈ.ߢI(+%< p i1BHHc,7hL+s֧ RV-hyot *q*rE+1-Z9Y9l;v⨒՛;(pm!4m2ڛ"υ3 L39ъHҵӎ3UI3ҭKg.NG睙>3m);1N81~'nN8ăO3,9]"YD׿~J" \H7c]36P]w]kz;'zlH$vJagE>c/! \uI&@uE;|G 5{YrjB{a]3Nu$gf>Ůy'Ȱ.ZWa'RF~65Ў-:1qV/d_2(2DAV9woj96{3xSX:mnݴ|TaՔ~WL+s+i[14VLYA?n45LIZ+@iOQ4HZjEU63R-mgkvwurt1\.s=C/)h%t 'N7X[WSw{;-Spz=NTk` _!Qߍ!f6O(;؎a)G1^ J"+f56>O{' 7y>[XގcTp9:p{~Uxws~FrXaҖ( +NA3gbn7/D9rGq N ОGc~7tp+7 ?Y˞2 uN2՝Uaۃzp|bʘz3+Ɵx. /2ޜ\*vh@]j@9zj4usbU;ĂNҜ.CY,{7+y 3 2 />Xt tDʭ+D浊|nh]|-y-GZcE״OzQC/}b.؉h9+{Ž E>c//B*zPf3*X+ lg<J0"A(]gk܉@?QNn=ϯa]35Hf+sp7+\2i!ұ QX[*26F(@ .z'ƘY hVP3e A"ӳ7S!@28mLUA X\TbuZ3{ooUJ{a/zϴRӞ)+(͵&ZUzVZ-0tӡ^KGކQb^VpԒOKW߯s3URVaVSqț_p O! 1DAI 6l$|[f7:IN/\%fwLAITHs?m VEP3%H38TL >bĹ(aGPS " i*¨m˰[eTGT)驒l$}J{Vu[mtCj!{ o2U=3oB/jV\/ԷV`fDi|a+Vr++hsemC\w,u[\Q+,Gʻd+E[)PoŒus[0ScӌBޣ7:>vCqxJPLr{Q$azu#<pX?)ါίϯ)?nV߉{Lw].V1 oyzF}i9YQ~,g_uc\9xEy ɭ % Pdɼݠpe[P~:xWy (փ70Z'<ħP{p,Ҥt9 t׾"r ]K0 ŗ}j5*Ԥ²!KB?쏆N!MJW xhRA F$ȭ@@<=FCSI0?PxCO E~ ҤtLjW2539\[j z/Ŧ_Z@T_g@r+] L-έ'>-Xoo5>NkN ;VPv 䂟'PF>ѸJz$q4`p>Ruc `hr5(4ᙞm*Bl$}H{i kxTEAco)bRO^*&KE{0XGByǾWI}/hi*"a! 챝RHSrb 'EI>0=_(dž{|$ FgrUŹ5I/G˟8}eP6`PaW!ߓț򜯻xR[%goI)fsj=S&੓nċ"CÕAiϕ5!L-Di~Bo=~Iiy{̵XGxr> UrIyWw/ ʶsQbӢl\~si̬"i}+qR埽JI˧| V-Y|v$  i'ϠO}>At}SG?óc+Xޢ,Zo}IL; 3,[D2e\ћ#^O2e ji/}=2JM]UZf|ͬ7f֋ވ4;;ƒ(;F74_ mjh볡bh+6BtGZیPB_6WbCD5C 3Cƞso>XuSAK];툨<> <|ࡦ:xi;uH{F拐/hM'F^ EӜHLV},;Y>c.@GpiCw zkֺث vjfU`O r#[;nw( ֬uE.@v8_(:nIC5tH:@2ܢF}5Vi!6\&Ǐ0}?n7$;D?{Fl"O͠n]JLaS:`EP3e _x~wϕnWA7BqyOS9艊ѳAiO5|>k i^˖{-jN1lUX4 ^P>k .ު5ӡ*UwnR+G` 0N$ԇI ( i*r$w`A2( 9Q`%!ߒ{Ҝ/tXvhtvm6pI!蘃"1-CJ / FI? M_D*hou>7*ڐJ~~;MMK"8g[JC7=HnUv3i@ޙ6Jyju*|8fU{L69A.K/+^lntV_LZC^ۑ޺\W}_~vّ3…ؖt~Y/_:%y%7v=gJnw'|#J$JK#jnɓN(~ ܾM'/+ItyptbO" uմt:1="d[>#mC"XLBP>iѳ&w Mz楬Y61oќKO JQ%y%탯bFԼE-2Q˯[ɢ`4#lVٙ頧3KT-8ez$ zs\̳kadzAֽF7v>DGxW8ҙ+.g ®cOPC1d',,_"cT|[okֺ|E~^kGdbg3Vy^!ǭ}^p4,G+*٨`I&G+Y;Y>c?n@GIek\  EC90N*2 7%9N;&J@j4:[g4WJO4d%L4g?ĤH?dFjEij5r@F_\jk}eM{M__dt5Ƕx|L1|#(Q(:vDEefj_KnCnB{J|?1$}Zj+6r\J} A _WpN.q7ڎo纊n_!瞞_8ZR~`Uq(NCQ#~˞Я b DġS=ǔ/9!L@`ӘΏtݪF/]eb e~w>IlȾJPawY+{`/u52B-hPbDwpRoT;]ku\YC2⬱)[R(NǕ~W{|&Ry m͵eJ;'1-9fsm.;są F$mq<; #ۖ"áT'ZhdYz3Ë8(,5W Z#i$ï?fOVGw.hO]UŦ,eAz7c]OY\~Y^.[Ϝy}%?Tu˟5(ŋ+']Q%9%uG`FopIɗ'];13\n<(x.'[ÓְPxSm3p"1 dNrFw!rdeC.8KH Y}vT+Yl/f沩;l4S bҗ/V:*l',V#`$SXY6+7s[g+I欤\ʴ5V$Zyj fB fl|B_1G[q {Z!7 /Ľpع1=]c8G5I2tUW02=\ \՞%қ UYR!w]f+ۅ/jM^ Ɔ y$|&డ`Qt$o\,f:I=#6 |+Q[|h:4+]gզ9 1Imc~ 5)]g-Ck&GW)d&snX}~20oId$\RqM])/wP%3ْՔtd֛v R!CA}qZމj#< ˵RΡ^v4U'GwjXv lP 1Q"D{:糡)}piBEC!dQIc |>=SPfIo)z^t톛4EJc1 `kwVynZM1k5t5Xo-Z>jdfKl ǖ:5սyñt`Y8O5O/1 v|&\Pe"9'CYLh ZN{f1]ID$VƐҎ( nUPڹK)m4MC Ha2֪BiSXsɢ<dvJnlyR 1P"Şf*esZ 6C(xBGkǸApDbXC Ha[Q뷜@w-/ZQ51=K-|Th5Q&ztD6}E|al`^;(~A͖ 98"mt#P5oĒۖ"@YބdY|oS4<8Gqv徤1{ʵF`@}sm׻/l}E pf5灭~%*~}m$+J$sJ\͌ϖr+6a1Ҳ-ɜfZl$HY!'UdNr-v䉝CmurRܿ="9/Ϟr=,a3^on}7YF cڇ}Ι"{5 eNrReb9ˊh8` iim4j|_U#g 5MMD3sϠ?xUsrBvPK0W;vc nH|zԬues^K/${~"I]|VԬuIzmD \ʾ3rkQ-(D3+Ǎ5 4cӰf+(9щ%;[5߷P|~ZްC:nML[ŀVT(ik3uSm5"`߰)VkTClh߿ (}(p>G`xQ촘`vU e!9I8s4 Iϑ8s~ ywZ3'++(()/ _So)Grd$+_3 RX%:V=鄯s iSr~OU:m @3- tq\ϩ!z)XMi <&"LM{Cұ̵G_WmQ4_N:{  rc@ـ$Me@P<2Cȸced2+XCdFX|cn8TLvZ?v:+nu99Ѹree5 ] Dw㉹' aUh*!\ s?dBw"9hS,OF#}Eq*>UTbNe OL {Rz+p<ֱ[,g0;•.;¬=FchX0&^U1na8e̒w@sq=T<]`HpOaa w :V)KG0({skt0l}Eg{ʴh.)<q9I%-Z ĕa)*F~`?dӴH浪.;@l6`gF0`*DSmzM]uqRG(tE`Ao~I` PW :pST$FH!,A-KXI$Pٵn̯f`'ԝDd^g;+J6;ܒ v}st NMl]A(rc.xPt{ygB;Y>c.tmpr; )ԢͯutZKp@Rrjpߩ2rx.nTmeP-gZWCn;]/>TR;1RZ/f=W& i*s͝ r Ӟ+k08Qd۶VL '+!d)!d.oePړe O4\߸Z\~ŬjqkqCkqs-nbBx-"_wwjRGl17[PŒw[mh=>⫥} -6i xъ |18X@iqV4F`49D* c%+Ґ6Q h.oJsZno :z7|KN;@ҍMTHH2(m,UM8&˻DmW$m4MH1g(P$M%C_?1$MMrGҪukxW}=jJ:+2@[&_4ɞ2kƵLL l{f9Ao`;8k9p ~~7M=K'#d:Ij0X o۰1co ^-ܶp[ikU,> PX|VEr)껐侖y؆%Ӣ:)V{eyo:; C+TD"D".?\4qLf,M:8oJX8*xş\c$J&wk3|QK"J,z3vemó_t >0t&)q"§A7H,gpWD"DX)|yJ$%s1iYnȾQbV޷S,Sa kO%Un?ģ|3$q+9"6Z,4+aE+I䭤l[`I$ ;xJNB/zmun-Zy¬̇֡qht?+U(:e|H,%Id3mV׫i>B:!)߆h]'hڜxMzW4q;%!D+aꞇX+v4s@MBH25Kr.n.ZW zhp 3Ḯ X{"źh]gG)lޯ:cEpAJWa/q'8Rl0uȝ͸U$֐U[a )u*vv+Q ==~nX-coFs1]RPzډ[;h+-[( 뤃:}ɰP縑7?T V-F[:`6ʒ{KJ{saR,!,Zaݓl5+biqKV»\6L{!$@TYA9ןbyɽ\+C3"uftUA]c 5P\ik?\vW5r@a} ae4vN :RG3^h'$E@ ?WJC1B4HA,"^ y[mS|5ИY!63Ҟh(SFPUodeNY!J"{Fm#wE&U,=ȲP%exع4O6<ߑt J"{)Ӗˆ~CلGFlC%F5#(,^lW!*qYF< }mSͰC[1fhǥY Xv ;¬Zm< :os@Fq^%?聯&ХR\&%ÒpqM)%Zr@,YVLJ!b3=%:Y =p/u͵NXz =˜7ǐ9ٞZ\r1UbӢl\~siף!4Y 8sw:.?4DAR`q΂|G ,(Gt ҏ8m_W"ע|+GCC:܎{-WY >CnC:lrFsf4\f+ t'Tg+=*]g;Nˢ^`Ӗ(6DCA:Eisd-\apeppǏ0T|?n;] `]kgjkConźEtp OSk%KAHd)ePs R";ed1ȷ#GWW,k(YFPXF ?{MLJ Lw%J#~I\nI' ,'Y , 3[tqP+]t*0`"TnESN;ªtAT# [U>F+Ҏjyc@b|KzIQ,p8h.vRG i*r$5CAiF(aޓ*TT Wf835EC8ѨI!R+C&J@>4f?ף%-.%k(+PNݿɌ'wJ;`$߅Uch7Vc')JS; W\:X2v%`Q䟥vR`: :fVmwrg,YCY1r: 5#+ &}IR`:B muX+)Un ~~3M-K"(pA;s0 F$mQ8j>Xr@%GKP?Kojٻㆻx_Y~3-RoGi1S[lEZ-y)wzu8 rQ6 I13|/أ/=S'O& +Q.|xv4qX2+^}^Cp藈?O:r!=~އp0Q'PdWL(A>f[sNTˉwPD^R• pC'EBZ G͍״jv_fG+&QD%^2ELxf#IU+ڈ:,~_̬bd@t }F3"w띜9*{mtw}F33ӭ ggyUR]g>b =g󖞿%KE-=WisYA#xt| ZwDK֬uE9uIHU tkֺث7j5o:#y[6ɒw)X搎fBKwX}~ ht*\1ny6Ժ@ZW )N3WPR\ᣴ_ 1.NdR1FnP+^Ӻba l~[ZϤދj4wdɠ[e=[uk㐎 :~QJ0821Ĥ#?q{k>tad cmtYαc eKh5e:YA8r@>1Wأe 9es ԬUbVD Fh~@Ҏ(ț: 4;#TI [洣2IS UgT[FVmXD(|aQg>|-vWj]ɛ[f ]H`8ݍViA}uiA  qX=l b6)vۤmRJI@MZMaun;9ޏEp@ Jq?fBo;EzKJ&hV&4;^374YԺKJO{ kuwݵ=}nGl=L X2m-sZ܎'"hvMQ _HUvF+QW.׼ǒxXO`=GZV-ױ^ ?pD+I([n6`%IK=?C`Q0pZGMG$ub=9]UpQU]H!n^KW]嬘]w[94Y([UYwA{|pi+|$ෲM[ f+=+]}W3tzA"d7K%Ú3ggf*ۣRvyTÚ3g Q2J>œU|AZW KyL$*n%Ϻͻ5k]'/*^k%UYWaXs/.3.Wr+m S%pL%pV%p(]bh.ÅYtca}@A#uqڑL˟v1"FX 7V@9iUr}洣) kZh2=&Z|UMaKU58O"sӾTU(|aWʘX֏#4ʩM^dh5Ȗ_96#` 2&I?t ކr^ٓOKYE!j,8tdܴIC24hA$pO]]`ߖpJ&84|(=Q`I*A5np{ɞ#]w~8w~_є^P10aK:d !BFe:d %֢*XiWAJ J{!4zP>ZcH;8XF΢o%?s+FaZI䳖H =5.Yp="PE@ԜmԋFkY¬$2J&W$] yxE6ڞg䂮*vjN(阅N=w~jp i 7մǾ3j|L1Y6AY5~T/nAqF=D "ƖQDiʄ+~Sf#nTG=0Y*=#ډwHNNA)XGc>c_m6)S]m|U`\Gj?]ՈtU#6\6P%+ꁎܰ,E*`a4юf5?K]& _b{>#R>Ք9"J0:L*շ6YOgUdш=.kiL1;.CnkxJ[?fn6:iur6Z\YH;˒B}un,#/uUʅ4˅>`}u|"4sZ맰@A"UB n9p8 +/Y#|8<1 `H8Š}߀_6Ξl>~{\H\Xo UD7ޥR.Y.\틍a/nS5.Sa¨Rr0ZvSt:,ީ³z 9:#۪]Uz\F`k\ֵy<=y U01f\tCx4n0-IqLS6*\氶J{*b2\0tGRM/ jS!f M%X3>J,[3TlSUmm;Ffǥۨl[-sHC%i]m>+38*˦2ۑim+['J6V;ކs`ÄӮ8m )t_d#%=bCi()bzq:\ۘ|`_\^ó]W:*#:Dt:+.ʢ8Qיm=Yɑ qI|=Pxcqs<GeҋS~X|ua:(r_ V:Q, V:01sFx51pKmjm}eyǢcqG@$uk>"iۍ 7"?JA.TUN+~պޖ\S1LaBЇvZ$~i链<{\R-[` FV Z7uTI jܗUNJ9Y%/H X~+T讲Q A '*9YWKQ L1vJpa^{Vw܊J؜X#{@t"n =2ټjɤb3bQAS2( <[To>u:؋=FOTOAؿQ0gX#}hc_1Z3gR]yY$Avv99 q>Y"x@PU?8c373W}R{a b"qk ibb?oV{;P;]&6&0ӕ113e7L`#@}ʘef XLp[{[6F\j#k,6Hb>/}5@/7u1}JYmLml6~7(1? :گ/8H|GeНzhSTؤBzl5[;Q<@+#-U ]T$2֑h%#v{wlv=;jQPJJR=lmeY#4̓}X2F-S3ك549)<ؽe6s0q~R+Z[V؊S[h!i]Ԋ2b_V܊}[hz4X3 H,f!Hl3]/F2ʰ{?Or , F;l[zbwiŸ빨򸘜zk8Ya8ddV IެJGմ<CYL :b2qk#1#d0Ʀ~PMtP%le\x8⍱7UX08bV\؊K[q0ڱ)kG,|7ᤓ .P,d Pd?p|pY>2n!k^0?Vw?}ָ7?_W#8}|Wյ'?8Jpd]s:/ز-SG895eC56#,%+2ٶIbxb%E7 ]dmh\:Z:x,L$JxfuUྣ?lftS&A+~B흷 F oG>aOlȿ{뇧֥P+xުb u+` pY.X.ܢbJ> 'rLx1&XzD˴Iyb`Q bm[[82<׾fxOQOoWb"( nطib-+nerS%hQ L.7J9 \.tvL){&~d{m[nݥS(Zؚ`l||Dl%(,c#s[miDm%F;^ܮw}c4.g>R\o]@zW\C Q^nlkx.EuL' J?qvӻ;Q+.{h}>bwZ,iP`0FGc3 fxugLo,2n+-vu)[1Y)b?6hA岕m<(6ݖb.Y.7S52e뽫MKͦ$2.cI#tݫjukj`jm 4* Iĭ)p*>M:2s_fS7-.;gSsz|6#(پG  x|XsׂF;~] whro`LvXJ:1O2 Y B4 (X+c2&qQ奕" i*Qd{!B:gV=4P~I9JC\F P){?t`72 5H f `LӪ\N͜fg jeZ  d@tSycwJce?e8fQZ^+evþieᘃX5 %aP"KV,>7HlRYW,{eE;1:V[\1>#M<V̄$˄>c?}YdC)"2aċk\dkS U#&;GQm\JXd!ղܘVC\aZuԲ-3G1Zr X2q.e eIu5XD>c&͌,>cLV:kM6vH ,!ϰf#}b^ZWě*ק:d!p'J)pպt ,HE?hz}YަH6T,[+m`.k4#OVbX "LX^D?CH!x_kҊ;񬎏Ge55&oX ,s0Wض枍gctښ7ק,%o=/Aecp% kk[ =^3lzdnru!nG+N/qa1%URuk*uGj%W_7ߞLip!ra'X_]щxw j Lz;*4˅3aUVD,.\!=8Id- Hg 2q^GJax[j[эjk9 !Ǖ#lS. ܃1,T{NAGG!֞LwӬ;s>խWVm410\Eӊ.ݝBW i }>OD?7;WQ&A':ranoG i !WysӴ)”~p+kݎnMUs݆s-gl3ą&=z`[UϤ3N[% L|p"" e*kf#M nTVyZ%c9>iHHt<@P2X3}ƾb@Gdc+dރSY]=\FwqԥDFђ&;MǿGۖc]}hm (W2[a0I59?kf&V^z\62MeJѾ1dgK4 }cpsY(&P.w Q줕& Dq5fKVi&5aI&Ʈeh*ӄ4%X 2[l,dҙ&G5LܽjHYj'mm80pnoҞ2swE8&sw_,eEKcQ{Ε̶!u1hfOW!UI}>"}eiTn%~8K9*E3X׏ґdFLѼƲErfS 1abR&d rQ ɇD=o3ߖV7y+[th_F-d P-X%RF-CZB)e|?RucjX.t;S |3ޖV<752'I|t޷fV׬xYvJ=f!hc =ZA7:aPBٕTzhA /:m}ouj9j<ɵHX j_/OL !:G?.Y. <^#|w }R -<&L8a"߫eKmǴXbN.,wQ-%W-|ULe3D'ցЉbav٠Úvt)8 t oOc3$< GM=x+M*sQp/Hmj}?%9X' 1IR!*Uk&<}_f;s5B+ϦV] 52i fRCmTpǫdկW XHgqY x7Q__\Tew=#H Lw8m8 Ya QF+A{5Z:oѪ]o 3Xcp؉Ec&F`m5:ֿ{Pro~7VbZIj"L&\da1eXA+vP)Oow@KSz N8C( j{Ӻ]Z~wZ:Mm_maOL" \D{0tW_Fx5W%hf&z×|vIG}ajz8lU'jh! C.h;zZc_Or݃u`[XᣲFiFra\jf8T6.s0Zhfw|*ҐJ*$eURA(Ú N58EɎW̜nL9G<XcWLfJhpkdq@<' c{"]9o0I=X5 D & ԃ&`c5$!ީXM&(V2B@cjSG6o_1cӛ]B;1a Ly! DYwJ>(M}_>u|NG<6WP u!k{Z%zr2htwx cn;"YXm35a9K "ejĄ`QSxb/ ) SJ0OioIurxx% dڨda%2)pi9O I }eq')WJc0(nm {s/i&1PF{nx̠0a-d­Gz iט R\ Bi9]I֍\15i9ET2hEI ۵VKwf҉XCCZ*͜P C(V"BgX2p--fU'$fH>S>7'; 1@|DS'NaGѐ|``0WϦiyu1KuxS(x@dd¤J`Q-K 1?1X2%raE>mz'm@8Ⰿ8N@ 8DY>~|;'sɧ1]ZzX/8-?BżtdzsF;=F^9Y;;W-pcw/G&#fcՔRSũzLÞJ֥6xt<^/I֭禴\1H %bLj,(rk>xR1o :{wu.Kmq2(~aJi3M`\GnS$귨E vQNeM묧`\Gr9{o]bn:q켫#8ŵS޵>Q8w9cpPjG౬Νr 0h: \t![:~&B#MޣW~:``"ǀd7tH.T{f,힃Kv, Yڻ!K{gF߽:|ȡYaѦCii"fxGY[C R4 c*q & %8~X˃ȡ"d:C9_t`tSHߞmU)Oowr@{S {Xq?qYֽκ1Leyj/'&v<i699|^/ K~:Ll_P20 d 3C)_xmIr8O2γqޝd2NoKvtc.[F b}8Y&-=5W-"- 2 ZF\2Zf^YIXH1#*uD9mu&oTC쾸5)]VΌQ$f#}}@>Ǚ Cy .X1ea]=kT׾,W# d?#=$_3.MDJ-X6 87 Ji WX7uOzy hI5M)6M?Te7Yױl3$'Qa[P{9)Ӝ( "Lx4˪e;3s _#x @v+&']"09?s2gJ%V=Hg?puu趀)1CӗW8iC_px= ct,DPF!,ǔj}xs@,;eUC\ɹȮ !dqH$˄>ew?}Y]ViߍSe a/߶FfΡ 2Qn;NJo$7QK:kxHCs(tZ績%%+7'+muRI%:T-?G-2j$Qʖy;w2H6Kh_\GY{MEmKT2 {Dm=#_*-6fcm1e7nƑ}ZݶW<,OߑM\Þ1(QbdzH,[#bVy<`<~Kt23s @6n LX(1~ ~%vi hbF#q)JQksၟ)khpPXs0ЏܧY'y/Vy`Yoz0R>ⶾZj) PKC,%ъ OP&GʗP R\Y ÔSWkRaZwZFY,![}R=O73=kL: ,|7 ]y^yR 2F4 h:Q(8[V9E4˅p wBA&p7-d܊EҍߦZVtczfuQ#‹#³{^ @| 'E$Cy{y30 9NZͩxh| 9D"PpGQG31uvM5u;$6~]Ư"u!~!ceLPǮǎձ#[fߵmS°-Ji~3SN2Zrl"[3cf\Ng\Xf2|la4 s3a!Y G_Mv*Oec/x Ԏ1}2ٹ`˜K%pWfN0kT2&ԂB53<2$0:ZfLkM"VbX j3ER.p_LR)Oo$8&+n'C\>x5,sYP)XpH`rylqͽ)i#WvL]ՇU;v.C|թvV=)hM1QT Ŕbx#Qdh qn[Z-΍hb < )VG4f6T2':B2HRHn q5sg9muL=HƦi`(jKܔ>cL]7g#i1sGJ\F ֛kB5Abd`xs@*rGʪ\9pY+CE&NzњHC0E6o9{cD@qAWea['c:y$$]J'c/#1ps s2p%wP֙]^'aq9+%;epԞH }>"}eiI_Mμ빉.# }= /NssӰbQڷW"6x2xZ K:Mih\ -YJ0+Q-Jn䘦 STVVR%tF23X3}ƞ7zkeSfהvci!*Of#2%`BϋVU?جT6ޜpP.͕bvJTa5mw`r(xI<nG|Aecp e`MkI,_64@*rפ C5CK9%͜f88p90$_,=fK&\ &qH9BLޑUWIy>6r4CQ9CVdO²!p0A$;!J!xHb!F"C d3U Mf2.~ NC(ÄSz Ôη_s%Ҫ18k32.3ӳDHo1޹wIyE1q;603kl)*^ |s@i k!'3!AD!\قZE.rޔfwYǦߙ kZCH <0e ]K2( JLZk:c[k+=I&k{ܒ;|x>YM3+z޵ǐ8u}AbY:}KǴޅsQCd`Y:`a4f ;ÖYP*d)[В6P@C(A /@UC1T8XEF^a G 96p]#DC8`HEHDsъЬ 2 %ud pD9[S ^9X<̡"Z$?Qri,M0K{ZJ #V0rq;Ɔ,g#짽a[^x0{ÄGGg)8 2UZY6ŜPo>3=k#LrqBzOh|wOc_|Ad!%V@ڠ@xU!t(h![5S0XtI(ţGXp7׺ݚ|ēuiVO7 zeҳklcl) .VQ{[FG:pglpzU*{:S28hQO:pgL9|t_1W8J>t2asY=%RFU]EۆXS֑F+M?˖}0ZR 'c-l,e"=M%>g'#j0 0ZrBr$Nj &#qpGpTLAb,&$3p|?0d+iz?30iȃ@!C@ ' ;`   hbT0*1ѣτVe8&T)͊& p #kK&4V>MW,eEKߒ5 ־Z~y(ڮ2άMW6H_Y:k:և^<6˼Q +]-xdб`Zz"; (Eho=1-l97yjn%đK `-#0Ғ)۱AKji vi 2tpeg3f}ƞܘ^Mr &LŊ!Q'X#ZkBW;Rޜrpg.wƙɩ3W=76{ܜkDf``kR,1{|3kG` mjǞ{S8xp01 ;ۮܟ.pq@tCck9[Cl%l-Z8[Cl%юWHN X ϔ7׻!@7'8IeErNc?cُ.,Zc`N_'&>f`b.!a7zuX^spb. jMP֫c= acH p] whjv*R(>$mãȚmaҗ%[F6ؚv1lmeIs@`cSw?}hJSY8ƥmW0`1O"Sߜ{goi1Z#,!/W.3neU-Ad50vme}ㆁY헷ARsc 2L{o إz61_|sqN=F}{Ol%D q/|5?֚bjxj,%o_>}~Cl`c6^Dy -#ۯσCdV/c 7==@)BO?єaJM/l/<Mk{~/v}P y[2\B1\0`&XyJ1\g%jn[Z mHplGO|i -D.VH߁ PTc=K)+YPl.ܾѾ(¥'K3N LuMԿ*}=ᓃ}Z~#yi{!Ж钳 (D+mO Ql[?*w_i`֬S Ss},Y/}Ubv~Uw?HC8폏OIVMmu{?x-ڣm$q;'cj;mA[ncg4+c:q<0jVou(ȭWh(Np+UtS+1Ű=5ꓩe+G' Xs0؍l߫fTbzrTH zL[[际ZpW n֫OUKk^4+)`bԿ?0=kfc..O[PuVv 1"h錗re\X Gs@gG&-S^wFB`K@{/؂}|U>S6wLx J(gJ{u:ظs0..}1A)H`q{}=߆`ׅqmlTTJ>5o;ulzpP. oZ2h#Leˏkj]RUxLsD7םՇ=~Xa8{B4bv8PNVsTaI\6O_8XEFxW{A7|O+;ڃWKswos\/}^6΁ׁ>y ~N*??#_v\)DKXB6@w|͚_~X_ߩ9Ko[,~M4socyr efy߭#x$GSmK PRӓa6%gm`ib񰃝'k_{ֽ1Y-y2柬cv*$ 50ufVT TDQž%O:Hm m.ijs7W7s``CC4^u%j$`h!3yP#p`!C6,=]GoCC/>'endstream endobj 161 0 obj << /Filter /FlateDecode /Length 52043 >> stream xKm;6Z|ʖ%@<薁[/dɽk"BVq"x_k>z*Rj+1ʕc_oKz_5kyMڃk=he>|K#Go)19O8_*k/#[x?GCi(>-TJr=~[Oy`bK>*ak4?[EK.[n5u-.:~U'k%~X˾گ}ѝx +sј38: FZBBc 8l&W;v1a|U @gWÀ1Fa4 '_?̾+Isʹ_3%5kP_7}8c/} @iO=efKcz4I8z߽>iz^Aﲀ'43̴1eo_<17L+4_-$vmē7rv[!(1voUB*!4a0I8q|:S+`͙lmDFPD2zD[Vhk6Kp70UYoOߣ~'Zd[j!Ha}gVm;æn ޿4G\-&119o:f x.A x09l^*6_dzL1CtkZ,|ѫf:_ixGJ[a!izXV!~Ǹc/1 Y_T]{lsDFPT2ؼewChk6KpA`N]H{u6f.߉Vgw*Ǿy ki~ k %vȹ |\}=׬JHT#kAy01mٻp¥?:2{o)|ѵ/~Ǯ%g5p~&#C˻4mC!Z>z請:r%?kZ' +hGOKJ^N.(7ۜsA^A mh){ere=S] Z19'ѻI})b!PF@" '4[eVp E>`!!eBS17LWay~#*"[p,q2tU>'lupztv _iە%#Zd;VLruмCk`[3(-w0С}MQ%@|$}U;i\>*)%B3FoVi@6CpBx~^*7~gݺ,$.Iծ>luhr i+uJS3Wz]{Uiֆ^.9 0ZF͞f><ts}mykҏ&*lDwHsؽHLĞഌA["rM~Qk7(}i s,M‰O~ٟywo,izrZXiOP3>+ LهNGK"hweH}1aŵB2u2[x 4mn(1[;ӫƋS@]|6^9|*闀yӱ*oȪaӋV7tt א11e=['Z<B8S1ԫ~fԚ?8ͩnw|Z6}V>O΅[7e+;Be 4xGC}.hޑvxGV.cg Zo|8Om1`2B#`8 i6/ŝLZYv̥'=AS7eLJY19 BD9!_4Eɾ;dDovWot +VP}&:>+r8&qz`ׯKGqc%8^l'Gộ[VS7RF>j /5`}d!n gyŇ_j<z☈]93͑$4Y# `(#|F:ͩkN;ɎÍvc7ĔuNanC0ѽ.cWh4vn(=yOe~rcx18XPy%Kc+[*ه_tXP ѽa0Fjtޤ pc> c(z;LW d%8 W\Z!a? }FB`c1W9e^F-mVw)Cݢ485C&H!Np LO~ig9tkN]?)njơL2s|;grBsFwqF%@zӶA{>נpTgA@X5_kPtנk"YSom8;}̽d \p3W![pyG чx%񮃙|0x9Ԝq N--tӇ$޲} IxޣM:[`f{Bnߩe v ֔p&m"go+n>/>3r{[ ;v;FNvpL9'Y͋1Xbtb]H|z1658kfiN/2 hme .4xX z.\?'6;$(O;<ɛr9B#|yǫ9ᢺ($\^AӱKa@S?p$e<;qtі8pGYW-N%en v̡T޲ⲁFЈ]9=3D{D#~Coʋ21wPw'D:zestObɬqT ^m[tJe_jA8G{dk$O8GpC񬡟erWsY| ^!&SO^8x͕?b"4`6#ii*CӦy1#5Dhpn^7L9WNM훒B|b2CtC \&.AdE6.mm:`e0ʼn{/s N. -]\ii(;5D{5NN{׏^?ĂKC$uR59_2cȚ/I#`:žH'4G: t/yK4> 5 M03N)#׫<-o A 4aST 7OԠ㮿#8?NÄ" 78tN 9lf*6 ~:Z$[W:t>a(:*iͼ-XVh6E~񤍷/ɥ#S5EaCC}.$~xєxW%p&6 lxj7k Qxʡ.s{ m$Vwz~V;Y䂪,rYN9l'r8J,򘀛ásD#P9c34Ӽ7LS&ժ8eTS&`JTz)a[CJ0РOQ>`#R"/ ҀwR_@N&YF3T7 MNaitx~)qx0,LMIELf h}&qD EFb81pYMTwX1%MHa3,4F*jo7cmBjAWH@J^37I ";}@)q,:#ԣMm,9Ā!"0J^3FqѾomvOWOcLM1^+eD(v -D[3jHNIG=h뛶k+Ym* hwts`1 aOt  LW1 \X̊ː؏2rT͟aFQ~Bǜ?(82.ւ`z0ڂmJg9ʍrj .\V'fri=ಚ98BNеqEVu(ê m\aUsK+UA.}D^9:gkݼTm|OJ*9n]:aErھ?L9o'䴑/R՞J]+c/<-d">8dD/Z:g46 /#RFb{K"Pd:YǧEr6}M#kiFX N =nP P;+PárTVj@pگTX8V6ESV1`ZorZKkm?nns C'NיU$pNK(ts^ iޒp{E~kt_Nv>[!*/9mc(Ha4 'O+6L.gpX`n؁r޵!إUDg! 9_&vԍ]e=c4ʭm+dž~t2*}cS98l*DiU8H'+ThDD%8y8FaҀ9l*DZfrJ6ւl 8eN+ae]IM U~Z&!dc-IH`~nGG|R&g>s)< 'fMK *XaGqf vTKK29O>0µ4-ByYm8,FgX ?-HyXZjr ZڴC⺖e6Lm9Îr胡*[~R!g﯇ZrzMm9{]z4MPi oImTOoWUO*l1p9EjmI6 6OdBahBd[f8XO짦Xek~յO lAK٨; l1)d/WkdOHp#- WDV1v_H07LcQ x;;VptX9 pE4Va/F4R{F7mwdLHPwզQh]^?B ʶqrn2) pW~YprX`3uvzà%4`w2"C+[ 7c+W2Nr8ƺ0PTwCw N߉ZlFGzp,M‰@}ރ_$QOEBϚ_#s /ϻ71+W 'zؤah8dq- a+.i"[ZMbD@%7b9'cz#@}hrŧdZ:;B3N~Э)>N n(^7 ޲.Im:*!4:9|&h]+Q MBG0wŨǓa}ʧgI!ǀ/ ߒ1֍9͈ק[5W%8^9\Yf1ww,9-CFI`&3b?'STtV-ߺE!g4`uE GS~:Z pA|&|ۅCɐ1r8=&BD91cBcBZa c6Kpx|>W?K_䠡/3 Gl{j_)e|yk OoI8qnч p{77xW" ,uQ{^1ē^ꤴ#vz5n{dc/|)|1c_؍ñXbNp 0)î3J3仄B8b1 C[3N1N> Y'qo^ tbe f':{;!H n>>9)u> :NA FqJ}_J.;㐾qJ=8l&!}֕ 7L CF|Pp.'ǣ局e,9l&iTna5sjV3LFW9 a%M~ڍߤ#U7~U‰?;|v|v:.MtX|(9 ū?`]0Zzmef71:l_gP8=,H[:Csöb1b69g yU|153(4 r$_wfGڷᆾ x8歹~8wz_$__rsӹS0j%:w8;L:7LIΔ _) 4n) ) LxJzfPF) 7Lu?*d$_eZkjp%7ɭi4rMOq?$R9e`o"7+UI&0Iv}d{>pDs( ^M _4!r_I_5D\3' #og8DpZqï;|g=|y7q)b{ THr~xkx$nx,nx y/EK(&O/X|i,>(ccfro|-oq"|q,U UfŞqWͺ)t+;;*Áw BNqF|~|0_9Jpp?ĿW8^i@mSᗏ2/FT5N_]+aor^pMK?xVU(ʝï?3(4tR&%Eul'GlsT7/jp~jpAﯬׅ/MC+&޿P|ԧ1*i]RUNSjkYε8l Q![;CZl~[r?C4H)-x&er6/ν`eVqumug($l9B`c&zO<5 T\ҥ+fO(*-I6.)۬DSߣQ7Jxޡ΄[:t4p"u$\p<#,t nĬ_k@e(c%i4Li`sOsF7/i-tE1M_Ɉ_u~q{Va~1Thf=㰸=q|]]boq{=7dn; {n޽=}^$`Z>[X@PcT,fiNO/O 襤Ϗ~b:E\&&#oȕÑ;+عv$.c^|^ν&(!۶s)Ca^Sνp !{{ c~?mW{a|_^8-C8S\?Ww P)ƙ%l6[|Y!B6-v|;*@ai*|YS0 a?`7톽?G4rHd%3d5G#A>LUM@ ģY&#А$Ah6DĞCѺI4'Q>]P8.h :0!tv_0#&i"bɃ(@8&10d,5, 4"Yϴ6Kpx:bU#%.S+G 7cmI"z;HuW[N񢁷$/9! wj86# 7c3AE mhZj8{3xLЮ 'ם7YpdMkΛ_pἹHM6Kϝ7]1))pM:96sZ#9X໑g/|7tF9 IpcŅF8l*Djm#j>!jMiZsIs F{.>֔5YA{Ϙ1'!fSS SF0dO1y^[wM~~ؼrMα<B٫71B5n&Pka**~zVR:L_gP8]ոkVRL_9d^˚[zb=j h̦`z*iʟ|P8 3F(sP xѥSt xtp͟#7cz:7r_{sŬ2pnxsxĊ+Z_E9uޜ$Gܛ,{sý9*KShroNUʽ9=V&O]C.z˖Pg H}کAe#"NiTæn ^9@5*vZE[|k)ϙ[kFysxxμ5?5;XNRN͍8tgnA~HZ-ƓY-mo~wqCrnFӣ,i z"Ln|(a) USMQ#+B0òepCM8!<-gƑ4 CCr@ցt׸)@y |c_ ؿ6o+W 'q0X`U*S*yt`w*i_zfL@2>GpCo6D-jnCgIg'E: FrT kV+7s^vU^8iA*o"N^aO]( A^3XFD"&Hd Bovj[v͕,Z'E֠,J+R,kJHrYa004v,v6Ns Nm3c%)M bDZ-7Z_/I'0_mﵮ[K} wU#tz&a۝[ wD-?)Iq)'.9w $|o᜽U-asi sl߽ek *y&odgu>v Lڂ\\\$Jb6iXW󩏁\rAQ-)n;`ՖcE**r4 h˱nrp1;\ڂ\)\ ź/^O~ZLSt9*1W^cT)sfS&[n_zYόWe{Z#9OV ̯tO+xE3~ZR-[Jorr]%U)e%.d`f6ҥI0:[JoI|!$$~c B9|*JD9F U F^/f>` D LJE *%Q\p-z]ȃ@Fѿ*sȼsݍP־(NJ$v_i߽ԫdlπPhDhȺ-IEj@@s16O|x1E!8T[ͽͱB{+=& x8AVlJA:)({]1Cg6opxS `iI V%8X=2 `9`U‰_.)Eʢ*oq]|BdR1x)$=5*ZZ5 }I W2YWWDђ YL"N}hjkF8'WsB#F+|QgC}!UoWm=[L4zOekpWRB\MJ,1C 6gpbx:R>pЈ8 0ZkڤZɳiO/2M"<{:E?._ܽEL![Qvt3b%.*L~,~\fI%NF \b\B(]앀!DBТ1!z^Z&DOl71Ngf/1)Ø-1~F݄R2ւG#`Ύg_5ʼnx\SZDݶmh!;!w rQM-|.3尜^`8 ?]eWV@6r/YM?<\U8A/gM[>Z#*_?O&]:~7qqhy5lΕ\|h#ܨJ@L{)8!hh-Nz YxӊO-B!)ɬVCΤ}C|trѪ@AO݁Q1iϤCsw9LIR;BTȾ[5ᘒFA!t%qmpba4 '/nvDlU|+lyb:bU‰I`tpMYb88$%vKVB7SM2F kNCPÃfiN_ \:V>X/0@Ρ\2§kOn B#|0&])Ͱhٷ %re]uW2/2z]2Ivt#Ic98⪀mU{x{(x%3(Xe[yգX]37φ4LN4?щ|6\hڥƕc|`A=$4 10I8q|5($IU 1 ?K2_%8Idw 4"[Z_%8 k5z~UM+- p>(mjM)ʮ4[ÿb_1 FuE |ϐpd/':|G&DU/fI]-=1ngkLho$bdGBmU '%5[\8H;pvr\v  0U‰Y;Th-;PhꔌCvy_$pxkl̥k2lp8fB2DnXg๡ c)j×@VnyKU.mkn-C k4]N#k"w1M?$?,Sl!kً;I,a ҭkΥ!*ج/0wW2I*qf 0? ¤ ;p2ZVJpY[weD.U@? ˛E=5'l\:,ԦEÆ5zUۼ˶ %s2X\dn+$8cb='x]oHo_$  .g=ȕÑm Um@h03MM3GKbϛzCh0fiNn4g-n8"q-O9K0GK4VHHcig6 Ydz&hnp qmA(;T{E6xEՆSbp ){ET‰yM0@F4c@8&WG*?>_%n 0}X"1K !Vd#ʳ{-px#G\Wb'pO'qvrZo{i '0 }h#ST'yH:|er~v-fƝpn{Z+Z9!D*1иn6Kpx _|Bp',tH>?5=mWpz,e>!!]\W 'm\ԁ9qCJ[sCSM2z$`&m|(`v Xi/b`lG=P9<P%xgr48m2aƀ8p7I74%3 n[1G@g6Kpx:0Iyks63 ?$s.g-૞᫄_kR PzZhBdnia$Ps(4AVJ8q d;i\ /zKfRT‰EWT|7 <B78,Q 0ZTgŀ@ \A`s4']AIVp4>wrCrݝ<كT@%'Zl݌`)w;h-]xs]bYCӽ@mi"$Nm͐1$o? "ZQo+)FAݝ ;K`@o^[}T`VT#X~wXz \-MJ[հ8q4 npX%\MG';z Ǯ^%8zs:&<Z+W P]_)*)`+*fecA1 ca4 '#FJхb,'4Fe  q uPvfiNx FV P3Rzwz9D>:S Tu<|@> pxșv88.n 'ХORw_c (4@?h*^ p*4.Uĝ^%Ut- JrG%G+L@޶1mq\q$YyYڌóf@w/@~䦛Nw~14.607VeӨ;MA}׬@ P@q A3F: mN1,_=S `6KpxL+l,!n^q5P9T n&M~tX'?e>3!Gj`iTM9̸3o10O41 sOߐ3Lh#T]\`ߐP8%~*uo`v f8~DWh{+F7ȇ>BSӤE$gኣha~&YL"- 3J|BU‰@?j}X=>\]5}X9\V ۱Ї%:a }xhЇapxCgL`,pQ΀_Hg7S Ub\aAi rlwk8n=!㹲BfbjRHI?J\S\fT2GJ3`?c(A)8ҢM$( S,Ɣ6 L[S1c `-@/'tG*X2!9s)XrI!Hԧ Y-}|@,'U`(uxzB_(s>)RBm% stF^>G[0M#.,ְPpy!AG.+\^b~,(hYڊh H#Җ~[]mڍ heoOBeEp崀B.}T˞TRI֩bYyjgq*y'LPktWdzzHGz3%~ŒB6f~PkM߭"YQ6&T>bp!^F b SZa=n:sU"Py3R!LZ6ƁT ipqChDT$pP٩CTtַJ?AW%#@*8$)-DT16!UmfH>?~CiAh=ltU&]2)44ݍfKYu,TRi? O qN֐nN&Gfͦ!IFGSpHPSHPZLj()OVaOa6 'O m&pmr^+ZŸV%8*GV #lV%8~ZPdɭ%' gdtlelTem8@8Sk+c8ѱ6Kpf*8r~%Hf `U-^ohO+ ̔4+oo5K2oE6:vCC2DTY_> c9ŧ9vM =d0=NUO]k#RXhbzH ) ^qX/!t+R%RB8گ*o+c-)Joe-o~P ]xnw\&6V՜dZVZcV0Opm9Ӄ''-i]4NRbBL~RWhJzFe'j;DvB؎?pš{9aU4dk)b)v~^W5*[P|RWh jmYO պ 倶8rO Y~.y ji+(.倢哺B[e9MC֪z@mso哺B2gPRAGG9B*q|?Z^E'uXNw9.g|RSO_ %f-]QqWIp#$$-cD_!va&O I'іdL0=7q\1#hˈ,R%p+M$^jcQw&B9Tv Fzđ.\1w6C}Bx2Xhw붑ȇg9&Yy7jA*Guhnj{WGN/ "CB=XVr8C rDC!ptKC +B8D >kpCdݍKT?$́ ^x c$+Ctx~S; b덊7:BC"C2FW%NCE5C舡I8qГqΎh n1؄M J]tk͗2$=;lV'q>~sh NoRIWt*Iq~JM̹ &c^XS0̺`mQ¶ǭ;'w䍍&ptf|N XpW%)e߱ -qA_fOO҆!!}7>1$8rO9 ybO9}C)9paoϕOaw,2p^%-cK`04Xa4 'gb0}C҃%387 0on}`D_wCBBa.qb\OGUHH ~]*Ο-!uz:^}ȍu??$q_|Mk?S`4.gtӪBy57Sҍa+ qNO{ "'b`9SZG7&o$ݵ^%Jux;Wqr,M‰e JaCkL)Z "C@9L5 _o,"f}Chԇf}xXz&I: ?;tX*z/V/,Sd! mcx\ 钲86~Zb :qF`@m l"aӷ@:B-p n`[ߒF`Hx7px2яuF? b9|2$3x(ij o -!WqhPΡ:K0Z-$Ђ3@ V`YF$5$YyFq@_Ezz57d3#W)bF@$kf*sd9>DUD=y~wH7Mqx772E 0p9àm Ils _3|p*᫳‟ ;<_8~B"χm)6ȽpёCM԰֞!B-`f$TD J{o VђhԑY~S4hQⰏʼn#o]ai_jt}SSƣ}aXff10MDž5h| 18<J8q<@9V:$$\/l&"~$\‰e#'VPכ-O6q pi.N1VPae9VrNOsɣ~8){!h$-c !çS_qDy"AzS>C|汸LuudF ML4dl0 8Q<㖗οSrs ]]Btq HhrN@_X}X~hN5 !>( Gt¢Քp8Yݬ2UJ<"Rzh1Tgv!x:f8NaV9&B$LYⳟ-O;-``!!KHrO@ g4\=Rͳa{,Q<8$Ɍ0[7ֳ` +$8 Hg?V%awpY6 P P I 1=xP ]ʕ{4l:DU`P#%-cL)PJ6Kpxz`dNt'MtӭAsկ|\k5}]*)$sXx.‡R @N=-tnn;D6"4:DmNOȩpR *\AL p׏JMK½`pU‰esbx'v@"ʧ ێU UVwvUQR>X( &; S*%STNK$; H14PGhK8q|F-:Su 8 I0X(Y /M&>!Ui\pץJ8q<}o>}Fsx''jn+&b>Q@8'jchB)+ SL"0k-C=HyH|_h 1Mi{jnh*~A}Vn)Y{&h%l%cp`(^v T.P'_|<)v`=m];X/NqW\9"Hx)5z蓨>GHL~JwH-N/{ɑA5 JbD8!SF:( sؼM‰(Z=TbU3aЇ]PH(Y҇]0ihl FDa|P'2 xp-P X9\1V O2xr II]$u*1v<|XX)t 9|#mEu)1g=Ϸ;WkfIi Ƙ$XI}KX~me8e8$JӺ`-8Aߗ <1A~SRO`R}sNǢґVRBB RO'\%p,:j,Ǖc.8rjY&H4ZőV!F[~VH蓢?RH(QyXr:BOZBPR*yYGy AowjVo-Yaz<|@wo`1?żгE@]Aiz OK.Ym8wȟb+B??Ձt dR*ݭqŝrdz-4~k[.by&%je``ʗ) 1&o&>i9:)~z,m&(LP& Rh/rN^,keL:WgU+c:Z)ӽYʘnEɛCYozXD}Yj#}_tg_o%đҸ< 羻 }Gjc#]{,!|'@äLL :6 Ӡ{)}Ԧvц|Plm:qyD81Vs&gLP$ΙH1A8_hg % k_Hy/Vo>Z'omG|vb]XgK]o׿P%]X-Xb﷫(1۩{\no/~i/Ho~hm>67߲Vٕ{"vKus`rYSGʀ0/uø޶ETrdmQS:VFdǼ۸3sy$hMSChC'a 6kbPXo!0x~;(X=f 6|So|¥-?zw߼K5.Z?[_- +%2& ?G혼 u0.mӆ{O5gwM>g3A?T h^gwtDaO#wjp?y]W_7#Gw_koVJ<귔aOXumm3֜߃ߛ1[Էu]co/$ s[kֿ{+6mO5j~}Ù^y^:6SVǿ_Jdnm #ϿۖQ:U7Z*~uLk1))@Lp:? 1<;۷uWhvN頶zW6qM_پẾ{7*0ϒV6G/ej[Av목[>R_\,+KGs?N{Wԡwhݜ6lK<ݚeM5Du۾O.[ IĻg7)96ľSFI@ae^HwǞ\!45 8AMDjN GΨ s!(\_7r8 FC[Ѣ.c(M=d SwpPg\MJS0:`FTG(n! ? |xE^>AqmZD ߎ8Ruz\B* p_\22Bũb 8l&jL]B,e0ʇb"JXP(.hR,‰3b5ba4 ' Y$֍)'qi!F˰਒N%(cpm|>%P(hI1Cg6opxS۫r`mO(m)8Ҁ9K0m_ڊĵE$&ih⨬z&S`})2"oӿ¡i?ttt%-cPk[4 yzCiꡳpI8qB6&:RepERQ:Bq F6`C%PU:$8~%{7":Q N.NN:V-) ZX_|]8C·&7)|ʼc,tceR-hv+/, _oM9S'sd뒃b2тG%`MNy| ϽM锝lϯ}L+3\P_Dy -~L*q--~9;/K-ޞt2lV[%o39RU-ڋf)YF]SKPkysAz|%pCYuLJ2U%]2׸m(<. uAή{l}%/D⢲i˻ʰI7g}+ 9YSལ;?ce׆߷W &))oj+e5U)!5L>i(QLI8Z%(xN/z-o|Be]f]8JpZ@N^@ѯR%pKtbEH7Fxt,]w,uR±d6ApBgݮTkn傾>/ѩf2S F_p:mNJ;&iJp1Q0WmYu#Zr=򙀗Di0zCM`FWK Ydz6UḞKo&/L'MaB|0I0Z@DA3ѧ^"FޒyZ'E4xkC}1.褹&IgE (coZjFa -N-VO ]]吇_p/@!>:͎_cuqieք2,T?rJ*,sPe &w$ܑLCG*w |['Hʌ$ݙՊbи? [rZ,c|9 h`Pq]Z :×ﭐ@б32f2  [_~h-d%,TOMn}b3up6,@.Kpn&.VOě ]6ߌ7kXXwвƂYYs(axrmx%wʁerC۰ xȆrus9.%,Tw]1UCWMWsh&3|h\@` ~ZhD>Lf(a <'5 "i{(BHm`A}b/iAq"QwAbV9f 6t,Af2wkbe8^v 6w[ ;z'՜n4b [{[i>Lf%,T 5[t}=}Ȑsn2 [OЕ24h 〠-h(2'E8Rg2GX,@{ zậ:0CЈ[ 〒 ^x|CQ>X`aJ쮄eǡ@9 h x^V)$ BkBIeغ^XX5gYa2yFQBq1\-Djp -y:ͿF8n!գ&Z"t 1C-T#:~ηn@$ۅH p ,vB/ un2/C 3PA܍\ŒfIЁV]f讑PBݭ Eo)L5l8@Cu8uh%Àds oZZN]Y 3a(aԤ D0`7BfGGp5€_À&3rhD-TO݀so7:[&)\e0A s !=-O("R4 H޼kl[ l-!w,G|[@/,G{na4E@E_: 5q ![n1aa}هBt /8n{A3vernLfFk-tB.sht<k^pOmWIBțy-˄cX!]}GK]#л# ͵̄*M΄AcYS\ XYXʈld uwgocVva-5¿<6)ɢLA5p².:D & @p5%}Ex{3QMp5ZN0N].|0|r+PVgԳt$<(yą 5rPhVJrB !WXUVZ1"2ÆFr>- >D_ <v{ Py' _(h{&$jq0"p$FBm.Eq10U96oKEn<(CفɌC-TO==9g0V:#hrqt[O]yiX.? Pb(Pq.W~瀟/;a!(Z;{8A2xHr! rrnCrMf. 4r25r.LGh-w>g;lK=ٯsd&3xhXXcZ,MD250JX82e˵r:HhEs'D 'g2L*wc1(a=%!59!e{Ke.,@>LN"BclO1.A1 h`Pqh;\fn!s5`ߌ97'|ax#fE4;5Fls  \N!KEL2@9ѹvo#GIM/F߾5߾=~tD=r%RS+Qsu+B4$ysBJNQoB'2jyaAeF^p mYlYuc)<>?hR,uI C.G2MC6 PpK:RǷe\d}A,{mdúL2ABA-\ˉ1^rb㻜?%']N^Gn:e";E}eyxY 3X bF > mXIO&3xh*$ڊWtHd"G^LhoaZ >Taf"F }p8*¡t -k:NkS)N n;!BSi@hp_bp>0pn?7Ě e]XsA5 n*ہŧAֆ, '?~_z洑{%=<`=vOH.s rd`68k;8(aĭO6 cshsn!n݄y붍;#.3|h`Pq<-ꎘc:^y.352CZ}Ќo]ʐ֍Һ{ř!5"#Yxq?A"o0H2+ p_mV=F&3|h`n*+,*RhH^C7*Ȱ/kTWIV#;[pt&6~Ȼ^OPo SȗB dOǝI;;x:w&IEV:<>B_V;)6\e CkZW#?=ص־Y&Bdԗp|)^@hYU-߸>})WƩKj?P}OUi` ZM!-o)ZRMPyCՙH;-)S Qj KT]?e-Ek[RN?dJRBe-UkoZR䶫=ZKђŔ5TvP.Z8I6n̒w]ujUU䥩H RIS0ixu]*yN> M7siGK9YvNGg)ugjYK}xd%-O*7[KvG[8-7] ԮIRB*L^.*W"%"%V7۰uM:ŐyBz,"Uvhjל,-,R.$)SJsN5?!ͿhX9`Dd2m / [TI*5d˪lS(q|au>FEf1Po"\& jn4 ar*&Fc?-L5/:2o/(hy_v(h7IK ȥ hYBiX&d& G+kRMHԷJ)ɩ %&{`YS$A"1) )S#`"1[9vYJlưЃ'= r@Ri.%z@ynԇI *Qk0 +sKMB_U^'m\ r:"~׋s2B Yv :@FjX"-}d`sH;JR#37,TVo%[MQ7=1b(`lnv9!d *gLD;+.5y/Erw:Sli6}Z]O R5kD^3 -Ĩ+M]h ۓYx٧g hZJ&sዽ&wFfxTQt飂Tz u ׌V!7j9Ǽ[Iڶj|ޥ*нfT6-Qh)N=&S_z UӼj|=rݝ /Hu :RAAZ^4#i-n僂TS9!qʆ哊T<\N|  Ie"tV U|Rj(^zb($-TRmTVʖO*RMcy^K/C-l(4|c;zbV.n-OjRןQح]k?7勓@ΰ[hxP-,l}dح¹8q.vKXZ8Gq+86~!8 F7 ˑuW_z,xAOXţ>+rNUYf<),k0fnrNS:&iZP9`GI;4t8WN4ӂU>Lfnxbصo22r_\ Vc!^\+[z9=[y3Bj`pP3D_ĤcsshXC`ZѲ/nATwXv ](xYF-]KL&yi'LM̍!܆7U/ J 9aF,wNF}څ2ŭ*-MBhNbF "B> M)* $EM8/8Ϻ,JY _I|yIߍWGC4Is>JL9W P!3޿^}bs@1ZX]i[uqa;JpvM&!!PP|rhCJ"(% q#cЈH8jieYpٰKU$?L?hT z',bY(C!bIrXlm2C,;bq f 3r['egPO;'_z^P vtuI("[FDavNJ>8 #Lƒ0o(́<q17% ßӲ1<4 [ -GzIC#.Rǣ-GsFeGkGTK 4XM-()hD(-D [lL25B˕&s%,T79 92'6/'|>4N/5o2ÇFB0Ŝby3ahBթe@v|!draH ˲ kAQK 3 Cj`PPLB$gr x n:mFf2d[;:}ܰM/.k`Pqx d?-i%,%D&40JX8=kR̼s1* ,FS"4.SrBKdKSv_ZXEH[xq7 $Ft)._963cek*ciHn &ʉbhAxHhx#,@>("!` * kq&;5{j?09{Z8zl?~ K dN";@a E!.O"F ]q ;=xGs8Xq 9l ӻCtC#9ڨg:~\C 9i}1|Ѐ .{"I@-*=&1f?B6er z3i!x% Y|lvdt *Y`iy\7]t7Np,]wcq5b-l]x.M Up9SC*E{c6rn ,$sxtw;k / G׽fPTPPjs??[;^~4uz^;80f2ƠBp;*xi}SB.+ Dv6‚&sX.,DS.rC#nxgEj4asQfs>}C\yrb+'n[֋||Ms,]I8V݇8 };8 BEq(곀?m^%Ƽlݺ`Bb]|)gaQ7fɔuN6>kĤn>T7faNƬn̊>1h)Zfy;eȅ3d8ryBVi }L(aKk!F=7e}G{|Cq0aDzp2a 7),丽{V5'yj Ǚ8h ,A:{'n̴s  {aF?%x tfЏG%Do>л0i Eb6>V˽Y4 sxeb¢k(QBq =Pʪ $6:G_*{ARv  EY8"7'Qvݼ7'E5"BSHaJh}\ %Co:k %pk/Qw-@XHQ;%`YH7_MخfxH V-x̸M1nĩ ҚDy8ⶮԙ<ೃ`bP2l!Ǽh#ـcx2  [|Ml13r"/OlꍖRmp҄y-eŅhp Z\ºIkrx9y;FGW 5yc.Sׅi{ |"H;7|g ,TO=籓XX~dqpo&,@{>Z9ȕN>B]!y86D>_;epC#p#?P١mv<40JX8{FkUȴm) [pR^:q\pGh`@z E\pH<@ xh8 *|@ˌ%,Ty}d̅Mekr|yhAN.\j--@> 13Qet+>FGY.5-Tw|^܈yM'f# /+\p}va2C1p~̙{p[y#Lo WYi ZSp\(aI8q0aKBj8G%",`)tu2&v꺅puO#L{Z,""6c[8nsnɑ]}wh@h-@޼P̗|A}@h`PqBছˑ)Ǒ ]db\MJ\fN.ƅ>aӺ,ä?K {F^p<WL]`߈'=,S" nE(peF ]/ĝ-\;XmJ2 hi8,?އ %,T]NHB O핓<X@0s7C`<!Hh`ܰPq#]Nzۍcu!s gu<56z{®e}Ȼw]e3An$'nd'z4%qo$q/ T#~2(! ]p3g2ph=poN;Ʀ%i 9 *nji6i*$IQ)?h ÚU,a2 *Ŀ+zDvd/2_r dbA7Xó$9N -;kgߥ~>Nk4hq0_ d }<+C5N7.4`P_gyݛץʔ9ϼ[h>0Lr%$Э3Sy#5Qd}Tjv{8+2M"t<SAbJ_ZNjx Kܗ1e%>* 1F\F?GՇB_ZG/5#>qe,"T"T]6 jUQswPur6/x[E CK i^u8hɢx -oZ穬T҆s6/UJR$ZjYf#ֲ%XI.8{K:j c6Tك_F-U\m>ZRI(E,v-ZƲDU Z>MZRCTٕG2[yrFdƷ#ҮrŽhIzn>C{^ YK}:$K}<ޭ /ZRц֢Wk-: yVʅ% EbZRRvRơI[XÒT]O,d.P_;e(]99Ʀ[WəG>.G 89dЛQTx8HۅL}mpZesnewGA ARQNs"ûwKvGnT]M"MDCשF |ƙ,αRckZ]ް2mlE.> axF#u(=v6$T*4[dCx7&ϾL#?cx~t: +;!įe |zl GB>\Du^UڟoI˖9ufʚC\R'?#NWϴ#"/i@*P*!]!y9Ŧ0t7;i(Lf XTe}LPCG*/,>:Z|4snP6PaCuVfIJ@fZѲo)˕,Gsh`pPE %VB9Th ,@>|Za2F 46*/[G˶qfJj C GC9%R' ){eNpy ]G}%,To_]( r;(A`ǮV1 lh8 cUZZP2 *{V-IC4v[t-[V۰NaY5øTRyb];i-P4NsyEˮ_;>lƭxReEyi|Η>*M5&982e7[F^;öpܥvRlK$-"-g>Wiha_u]wi䫮w0>-ݙƭ8—eяq$ >'RM:>}FWWNu*]0_eal1E5nQm6Ԙ/ g чrF *DaeހsCw1LCw w2,T7ѹ ׵+Mq~1-3'.<4| w/-̹ua2Ctŏqv/ZvF>ߤ|ȸwfsB1 ?-e ^z GˮV !K k%3zsWAȰ#E"r caQF-ыȧ˖KEۢ bh.ppa9E2^ H]DwIXܺ:suPZ뺎1-wY+%NVO .{rZsnA؜},G(BㆅG9Iq0\ vj(=;C3Fs 6&3|h`Pqܼ "x Y" /C_VYliyvH 6ZK[y^^7Y@`/}vnו1,:> @\&dG%t $\~?40JX8n[8WKDEcELd5&ט4" [941qN1h`Pq ̌ _v |`"49/)xտ{|xVl@5ߌ}l\2G */x mfhi5P@jD@+"DhKv@LF" #㦙yYv_434d {o>|var@18?/siK+2Lq"qh2 La@ [fz}ah`Pq/.Ƙ7c4LfpnefKч %,TO]Ux8[fotw@|'L&(4 r\‚a@BR>~ 'VY\%qȡHz&F>4V[3>Lf(aFD =V9JMwKeXd@B5 nW)tV~f&s%,T?Dƪ;[WsdF@VtHlnW.Lf a{ eDDo2]˖],䲹dktށ崘.Ug1~0uM2]?;8u@e&>'2K@<40>^횫;mw B!hrqBq@|5,9Slsh&' pc6d *T;c?ě TN.2tUk~fWQ@*Nmuw[7Z0wGCn8[ \i ດW> * ;iC -gEB#!3?@uaAarBRf_a~`NBeQLE r)8t}?<6 4|aBxtY{"mR)re ud#g&n M09N eX8n:]GhO /{F:nd%,TO+ڝ-CF~!Oȫguʖ> R|"' *.a_7 0¿WA7 7 j|Pqᐃ-NNC ]nX7u륫|!4R]`r IpٲҨ840DX O-W^ҕOAsN@h8°s .;V\s%,Tw/dZ /r sLf8XADˢѼ.|"nh`Pq<ƺ-;Rfck6CE~BlYy*,qQ%Dp_j1h@nGvWXjdΦ1#CЈ[HQd+]l9%bXpɰkD$-TOu01ně75N{ .o\<5z,f2O؞<׈Fpxd"ۓ@@jJ耻nN>Ltd2=֟5VX3&Ur2BcaA|84 YP8u}L|i*t~lHh o2ÇFs ?m'7QBx@˩]M?4bb[:[v]I ry *dvh 7/(-C+Τu3g}^&G *fKehΖpy0Mff [b?=$!Q\&g840JX8H׼gǧ%pB=٣eraaC&3|h`Pqy/RDĒ b%[ 66F l Z_-\#RA@nhMkVxrC`!RC4"9D}H!G)2F~p sk6 6EgFS 5AҫrZjZ E d2U/ dص8b&lؽ|btF$p jRn 躓%^4"[[((|jkO1}XG2paAga[-PX",Lg!40JX82<`I 0?!N3A/tn~9 H* )[Hm~h Ot0 xE>9AۢчAͳW_I2ޯx:Lf8X% ۼ+%nD2Twُ/V${H)I'v$ɮIB$InC-6BI5<%I`Ed׈nM:!nR&3|h^~R4%C+|Q5i B#8潷ݘ'ڗj%Ȇ"n=EvߤMe&s `1"S0sUk,@>~ӫM$& \ "F S@9G2Z)>LYoXl}мxlX ,dsšQBq-B+eЯ)yˌ O]  nxࡁABAqPU,o%evrjnظYU^?G7iJ9fL!:d&V{kRg{JU9mxDs(2$$^tYO𼱖:zޯT\KͶh8"Vj> V5:J|oSZKgжZ KbUWX>+Vzjmz-EKrr*o:P%aR -Ukw6jn ֲ%f,7T~XT5bjYKڛmyL-kZh-Ek,)YCՑJ\yKZZ-*[]KTU䅩H ūH SJJ(]JשҒ|7{vHء---[3gIzKպ6ХhGo7D)ZRo/-XkotA.*I GIkRVѵTt-QKJZJ?TJ xkWe>.Q5rcѵUKbeG%摗l bSYyZ}T ukwtMp-8{V.GIlͨ@U?w)sAK=e鑧ivyݘ{f@RAN<50HX((>,PSuQ.K',m 8ɲK ~I뗴 0Yv3GTj~@vG ܵbSVvo2Cp;f!Hd-xWV ?N5{00oI᜵.[2ӝIߍ CVIZ}œBaq>Qp)qz>N]KPB@T Jb0 D]!|ZKApzu⌌A)/‚%ŐlD H1;GW֦7P,, x{4 gh.z<껄(]vG2CF ǧ,V\uwS29#aG|6fE&g#50JX8>M x^ljd/EPGC4s=-!|+u`2 ³TY2WB WjޒE?(Kմ*JRs"DHY(mDzQPKH_aS0WnjXrx'bl^9 -o[VqȑNA0o׺Za'GKҮe-u>*K՗%R\S3]bXjZ[QonG5AORCcG(}SukVmŸ_^qu߭FŤJ^SKIYiQL*[>JeKi-T겭jY-ydZ֒ʖRvu˚IQ.+֞ƪI)f-jRAY*g1$VŎ7|R ^C֒#l僢TtV/ Zݼ0-jR]^\M*[>(K%OoB˓TI1KLB[wC\LLBCBp7 \Wqx G{jZ}c[1b;iAIrr7ow&g8|uOS&;40@Xo(P l\F휗}0#C.J=wЏ&W8Zm6H=ψ9S.%TH LKˢ3Bh( 'PYmY(Cw ß"U>Lft /8梖[`.4~~c;R)Z Jܭh\Z7:6R1w WwQ4a2w;4[JР].O' a%лF Cf^>ȖYgr-grFJ \eKn- cv,XQF Mƙ>`gA/ D/A@&lY Mf-$`LCAX((ROl+.c;&soBCʽR]sÂT)p M6Ww{ݸ5qT|+?_z Y E0d*1G(e Cpf%c~|.TD:{&gr89 g]'̈؁=dGF#G֑?L2,M웕SB$e7I$yXlXmM.Or aa_e[Ѳi$7H]#0Cklzlg]#0.|['oKn[Z!yyҝF/ er{B 'f _OK􌎧ᕇf2Fs NK F>Lf(axhBP׃"3CḦHR' Hqa;6ۯQTeʄ u7u)\ux4Фhٞ; '԰Qi*Qcm*aET2dQ˖tpX";\Gh`Pq܍a _ \}C]_[1d@WN<-TO=EjV.IQeqDdrd 7'F ]7@>~٨d\s>4hʳ M6E29c~t Wqy'ݼ薉L!ț<~b40@X^{-SW AVNf]7ѭ71CC:~9fd3It Fh:MJ `L˃F ]pQ˒Jd *0/DLeO/׀LCJKLkrheZ _ Z"пԉzP9]!]zyTG,aI& yi@~ZB~ZX>Lf'>9BWVC-e#w9 y8/&1B?BHUQw+eR:}.3ph,@.|z5.Hl?(2G9ABAg8:ɳ}eod&\%ON4/i$,wZWhC|:w|<@ss> ~vt -m/Py+vn H8j+9@ӉJa23ޠA,eq鷮xbVQ *o÷T c Ac߾88ZFQBf4@l;҆%5pQnTX蓜 &6Ϯ 74_"%XC.]#&Rї4z:z\~@|-b֜% -4vC-|ɓvh`6Hc_ f,1O B u9Ɍ-ݒReˡfNp,ɕ%"i( ̿ wFyc5 ˈ7,C[vjbFBq7W,Gw mWU%Q_+|J*lK-gu_,U}Tq2͊C'aAb; O;B~]!vImeu^ - La!Xr!ƌ z!jf 6"_F8nxLsNL6%ygVD,ՒQ$鞕 .4C6DӼDۆgG& 1} @SC90\3G?'NJ&F NdVnpL8LéPI3Q.c5 *'<@#%|ɜRx[nhIBIVm*%r ~y%zy瘐$o2S,@ޏq]q^2L8hSq]ofcg[M].]o/wKH_xznx<v)&B` wSA)β%)d&A$8X8ƃ黮g/vV¿lE]pa!NG\yBtgﲒN𻭵|nx <==b\Z ̴&D5{!y&Ơ5bn ƙ8hc2TMF8 a/柣Ҝ.6r8/zn 0p7Ǔ]#puVl1JMRĻ`>Ln`BdHc>g>LFZ|XHoж }0xh|xE IQ߾]*CR]o__v]o0C\#vv&-Za%ZO/DžRMBㆅ$;seBvZhЙ.3@ií3E'%p~xLCヅ1p?^baqX2w sp w„65.2JT8h-T{CߔyL@MeDL@ 5[ͽ [ -Tϙ N"L"!A]#n!q Njax O֕<@Q2B4M`?ZaI02<4 Yd𲳉zXNGid800гgdA#HasW, >Lf^4*\@M0WI76Vn=]| ,D[ xnx j+J )!2o<78D}&3#6IE7ff 5*kx;,|,[Kecke8Ep1h\r/rŹXo4"yV)9Ip\.9[W,QXY9hI-Të'͈Wߋp|$ NBAs$:HnVFw ӺSHE?%ީ@e>.+bňf_Z<:i2Q_o\sXᦇeԿ%x|1_*eX4~Z&%jVټVt^ >sӃSHY@SJT,[%"WR_5ϾWʼȪn-o)Z<-o)Z-C[ֹ悇k[d23syOiw'N JTZY/f0T/_hQB:~uBV -ZQ/[x׵}[׺P+;$_~-(F6|2mj^Ԧ䉛#Y2~x['k:*eh?wht)83;\>|gc컦)9߲(%wIaydcjLJT?/u׈v c 5q[f_Y3SSxX~. Nh.讔4ߝbCADži^7fr9޷VlZyzۇ|z/jy9m>]x)^ 񻒁o9s8ߕ)&);F,OC?6/-dL@C^%Z zʛMP}ݖᄊE 3ծK3N?|>M>Sk H?O7>G nqзWV]G:pdџyk^".c~>>-lZ:09Th.o83\*5I8OO-o/4@}{%k4V^^ =cM|m 3݆K2]nhb@f.wk+-,ܡ4x{79|0&N<C'G0~xl*(~gӿ߽ۃ,A6˺"aOsZ.˲__7MaYRϻ2%*}Y{Op]{ {+)0M}'i8 VDuۿY/spzy6߃_yd5 _^x>,3{3+Ӭ4?|+wN˅6BO?;r34ڥ#9/ӯI}}_VRBo_IjTk+YX8i뛗}廋/s_Dg+e0mhۼ.|赌8_Ѻ/]y-ӗئ+/qu$==oer1X=rloݜ4n#зnM?z4X+ze'G:ӶC]a^wA"cnVi+>pW![=kGO?tNxlFƼ|&gׯ*͵_j|X̯PzںY|Sƭ7tNyU#wOSgIwMwjw&endstream endobj 162 0 obj << /Filter /FlateDecode /Length 34172 >> stream xܽM%9r%Ȍx\k'I И^t-(J52U*q^wE` EҞЌKfx[B~?/m{oR߽ۿP2km}Ql,cx/子JiXs;?e=f YޏZ1VuS~$?𷦵/S6r-HVG-$:k. K%Ik}ԨӨ%lQK%Ykm$-$VrD-dm}_J k{ldjڵ֨sU[$imKaTZ1#?+Hb>ekHֱ-{C䑶5=Q6HJ%oƐJXkW%O|󿃖JA{RIz ))$i"$QuzV{տ{FfIOlC&vZ.JNOy_Z ׏9Թ)C:uPk̇>Pk ZTIm,Hm&XU Z'2Rmʧtקo=jWJs8ViOl\x5_E'vZo[K>ޏn܏j=HUߏky?vUZǔ*^ҳQe}.$zqppωxFFŇmk+.->WnRr4^lVzuΊ\/emj@2\oֆg Xa[8kgލ߻=;{~\AUh3W&}9dTZި|\W\sx8VPlk>Gh+ކB|%$-xwv3hC rp4Cв]TwV"J@ZΚB|eKrplrtp(KRWQ ̡C5 X "+ _C 89ڮ_gЀ@~\/TenOew5G@Yp;ņ9儧;j!d?')،lOKַ)1#6r^ykZ%2ηֆfin d?')ۘjsHD_2}s~r4C@֭u5#:)m?L^9&5/^9f 4PW0 A_(ֆr hsP=;{ϹvLAk;emLaв;ُ/&r̿ảRڂF"ʾT4.􀀅 V`b2ubz|b}wg$=;{:IA!/]a3\Pwʶ56|V`}?iJ3k9~Si]*G"ZG0'bVRAqlDugAVI^7} D*(QU6_?0ɾ K|r?!H"Ac~}PFF8vvG#@ <|+J !~D\D$b裵7_=6z:= 6sHCƗO?_;D>9]1$<" 7MP6!dǺI"1g_Ȓ5sh7hQ͠32^M2?ؔA#5p;3pdZ?`*x?|0(8$t mjFZG0OoӮK^栣1= ӧ}K"YT'jXu [%{ {mqD .DԊ"IK' 5 *IK=]b=Qm'/>&zlxC7*O0g#JNqϯNd$%Z9vHN!_W>EuFۄo,_fϴ,_ip@ ?(2{k@"o )%`}XZd$%lϟsyt=, m$DȈ@ch09l|u$ 9PhZI@k֨ArOP0Zɖӛ-䔮e 6!\p{7ge׷vJrjw$gK*'+xګ.DB.9e*NوpcG1)q2 8s5Z$\ru]/=tQs,*05 DyoHK.ڪ3Z}~xFN_J t+dB[6_M6/^u5>XO`x6?-56fODgϟhr?&|,Иܒ͟q\6/w8ח~6]$jDת^$j[IR: x%5. bRI֚5u=i$];Y&.D$km p%$:j $Yk{RIwp-<]2ͻv JWPPµ"W!W[EAI/ %\* = jeW+Hң\+$Fi$iWA:tmi$viT$~0:D'APһBA BUNtN JMJWW^{Txuq9:tuhL^:x.pum~~.|P媭E j-Zί\0=}vaebprBu]@eAzxد}=F+/\e^/\_lZFC8J:\a Wή㄁|#k0?tŚvlM*O.4ڒܷX{5>UKuxPüzWsq=f~MmX6)ѽGE!dzGвtfX>_W__@:]پ~f|}wC5 uP6n,(ڐrp4`%c|QSkoo5 uP6?3cNjO(gMV!q{Cq'[6{>ۼץ,4܀C+svvY)Gg瀐5Tڛv|f7^z+(N<Ñ?BЀx0P\0҉!.ОZwGm#-WW`ei#8з.s 0 )gMV*ƒF_l;h膠ΚbV6a+1Gh+ )+ #;$FWǬ1- rppV6֡.OwX(\:1!vz ɜa yQz4Nv !X;҆SۘUvM );5`d/n/Nj񵹫vyۡi#\ Ӗ Aކ+81MϱT-|H9: -k JН7pj=YӀ@~^J# Fw >m#zK49E4Kpv BJ1D(PY<_\B;v '&_;B-ކُKad],`/`i+- X _2 P+ !q{ N&ѹŜչǜɜ6;ywb:X6;аGُ C߄Y:vwvNץ5 <7 ,m' g XBV!q˯otvYj ;˃>pVt]CH@27,F*zRv.$n c!+>٬#m[l:dZ5d:2w^hо);Ҁ a$_hشȴv xi;73)9%7$bhNWT5gx1OSBZEKnh<%Hm V#]%7JTP;/暨 Tz@۾)3@&B[eȄ?PP]rCmc#chMy 5[:E.VGZ v~L*y\xsrQPky#kpLrC 86 '+j ~d?s>%iɀm/PWe-m)rDcskl*K2%`\kjx1 ^ K]y]{JMܒy .)ۛAs%z'QW^wMX! a^VE!nG|-'xOд,ieM+D7-|oZ!k!ky{Ӳ"߻"߻G7-|oZ!k!kyOb$F-_ڂ$Iko9-$NEEg#6,Gݴ,ieLp Lpi[#HC{iޟ;sJRՕCϩ$i3) W=ŽWD^M{_]Bxِ偶_CZ-^+-J5D'(? >fuZG3gWPy>Z _Kܙy'N/kIXQ)MofBC{Yg Q .僸(14h6r+;;аGُ7{!o.} a( =oӞj7`wfKeɼhտB]8qmL]&+s@KnQcɍCyh+[GHtHCWa% Ӏ@~vkgVY߻iq=;pgו_hfЀ+@]㽹Rěr4`d/;%}{ ݽ#[-oaN,mGKg+\Pkg.:0~!~@֊C աkzKͰЀ277kxXRv'MA-Ɂ7=Gr2cprU|2ؕX4YB4&0b~v`jsraR,-ā;T}Co!Q ף j.~yw6'znIc}"2qT3$tK2<k ؐDp{rIh xnH26}n2ҭOh 0b ɱlr%J#Ac{E<$>jcPY[$]/f7NssxRLè7=5P*xf7joZ%==RK>^5ǣw"KaǣzSEMV.tp|:H#or|@^et~),OS}l^c*t}~6v"؋fpI;:_nqbO¬.X>$]ᗢ]}]e n. >0p^Lv`"H'QapH}ӛ^YёArdZZ)kh B< .t$մxKo*I P&1}ЍօK4)^l c ۦK_d=C>҈wTz#a B"%u SD힓Ι'MF?{<,DZh{wFIjO%UI=CA&h 3<$@3 ߝ= ;jڹ<> G@_8@@H@!V2 9}-%Lr ~#ͱqk@.bq$r ܾŠB132a1(6#MY{ˠLr#hO@b#bTzqy=@赽8JNwlj<8M> ${V*~g_kq  0EBٔe˝?WZ6l %–<|O| |8WJt؅;dS D=mp6D5F{Ni¯ֱu=eI? m`U9$׏l%m}g]S_f1g{KjX5n/IN2kf2ˈ@2&` sU9/(G' Ĭk:^T$MP7AEq%Hdns2~AwcXۜɈ`,Q6mWQKh^ mQzV%F5ZzŏZ[+';c">|`EF[ /d^K⤟zjͥ˚jE|.Ijtkx)5WqvƗZBoO UK6r A^FJΈӍbr uq#k(oאݎ᷿Nf׿YZ + cFY7_G9j!MWuVpyLބ# 飼$V$9ہQn)4 l\"ɟ ACY>ʬ0 m0٫0%\7V5"ZjFyJ,j[i]"$^- Bv6\j+7fG:zV` gڴ+(Ӏ [:h&YrppV6tl݃+(Gg+ [[›pV;%0}X1+LV!q{;$4ԛ~ZRz[56#l'M"# @KZ H>i&a./.JEuBOP/ ei#Ik-8!iJ d?n|kɫZfo`0|Ojof4`ipv1D)Z-ھc*אW2R%D):$[6ѭB _ǜ˚ _f3SMC]1%@0ُ˲&j /k`j hk.!ڰ5Yaُ˲%Lћdn1Ѐ+@@]"A ct++c&9^Ai;*(=[.`eiPF7Q&]?qT3[~w33[ռcak9x +K N4R87PvMAMم;p0AӰ(&\tl˹ȹιmj6 IP~ W8n/ണrUe%mq ΰ#U >;am9r[~*I $;.^5z$BĵY暉'.tpO\PH\蒐i rܡ0!C&^pT.*7tv_EؖӰC~NFryEg,-Jp=.rHtpÂGbh.ċsIIf6Knn pʣG^G $77<}zHxvcS=5 .{~:0=)?ǒG䋻U{H@s2%Arê Ta@tڞ-tR8*ssɁS8^ӴDn^[de@&!iSQ#ArC:R8^;_?/(]ڣz]rCnXJنI#dR6C#!GArCʞO؀帷d2HdkQ|WPy@Fm%qLrCY.W|@p/-/-lE_G<$Os{JmFrqg[#6Fít-4ؠ@LKHk"kUoXcrZ-Q|R`y(bk׿~HwGM0"5*l0S7HܔOcKZbQo_7ss1_ *oePF; m6_~oBJ"g9!ʷ;V Lw$'淿~N~.?4as;{?>?ӕa2-?o˾ : lV7;\*M"?K"Ak$hӍI\l=rIBW2k9[]$A+e͏?J;`+|]Qa 2 7ISw2oB͛ȻټkJ|L߅#>}Rv}K5y5뿟 弯[}?84gWPS*:?iJ|Bl-˙nT7M疕!+iYU YTMnnV!ZEǐEմ,i,eBM+d #O|,7Oxz<>~rKeL+0-[<&NZ`J~t){6n!4 +zLlmD$= wp˿vJ_o~w?RҾc\~ocښszmDzQ1}cicfTCW`;]dr/+uvO/sWc~[s>m-\ȐkB?Vֲ#2]>84| _6?>0 !/Br Y<3'&mjoNQv IC@]MZ@JhBʡMF!yJsĞxrښ| B\rpZ(K:#Cv)PB0bjMh9iF d/^twnnuݞNapF b& "ihEE\Rl &)q#xatJ}-`y0BC<󥎡l}|{Q谫Jh_&m?F F)˜_hTj;gO/w{&k j4X&?FGݨtKaԷ0ʑSm67'ԶKYcn(7U^P)Ʋ,ߩЎ9 Վ~ݏ?ؿ/~i_t8_5#> ^+7u޾gil,$ߏq42aNM\֥$_\{O][J˟a+Q{N!_[<.űNAG^eO:v/Y)/4 yLݖ< D_\{ӂ",(l|٠Abߵ:'s m1OO|;|P>-"oSyn~p|z4̪nHKKP"sաSbEJqlӴT*g4%wiZ*ZtHZ"IZ(۴TC? D%UZl~q igbe4-HVsE%D(?O ZuY8tk$bI_UzN% 2䑱쒥GH3(E'm<R+- $[JQFԂ$jqL Z!ONآIc`|FȯGm2xTZ΅Ƽ`@f%Cs=-g3QNBXr-blW|ݐl 8ˢ/ gN5R\TueHܛrNtcq,΋Yدj=9e+CmJg䈅6\l{t |8ղk: p2uAמU{EϼR猪/ԡ;}#+o:%wEDĤNu7\s7P6O+-2ՀЀ@.|=d4?敱bʞb}!Zb(KV9#*kN!iJ d?6T^^mYv(Q޳w8uG r TGӀ@=%ãCWUVU%~J9: {\? =8;+,4I(B2gszw]I[%k uB )߶;-S#,mP H:9B 9oCTV@V!qsueӥIgwڶC\ʋf`5~!샶Z -gMC4+ P.9xMh&ѷWm;a;6(!,4`%[SNF0Ѐ+@в/BSP XYӀ@~\n t뒃y:(\>x'BD*uX]YՀd/#PbI6a"g'߱PjE]{T\qZa(K&-AVކ+8ǚeoh4for 7DYh+dEkCYP+ !qJ\՘%rg9i 8ˁӃΪBGv_;(]_$|4b ]c)(TƗ"' )S  ljj&)G"BӾfqʶf2x5a 򡭵Lsu{:{6{Ѐ0w12{.ֆ?0 -="kCjFЫ‚ .W* :DV^AB`n_4cdK Ш<ۢFv.G.eij@57G Zڐ2ՀЀ@~뢯t1ub~CWPO en"J~⮶X R Փ ?F贱!ݤ[W MG=8xG$~qLB6hJ`ft 3p(΂It_tڰiجB& n>'f9h|gi]DZ%]BQ'+B_5$ze0PRpgFh:B\%|ר% r*I쪖 GdВt;Lڦ!\By׋n7p04H28߯ mnkH2z<5d1u.9PW>$"yƺ$Dz]X1b$cKqE2JF$r:v%hft(Oc[ ]nb$pd},I(.z3e2$Sl9}^uwK}[*-1٧})Yfu{tqp$\~;lD6p:2,Z4#d[ H%.)Z6 9$_ X![b暦Jt[HNWXtZu^;'krm}\Gx #IZ%秴z1^жUh@&9?#萟~:,3Y=ӦlnB ߀]oΗ[lX&9;^UKWLʦY6 h_Arn:4A.I.-r/[_v*L+{SuN7s|.,J[ G Jĉ-H!IZ;M? I֢3-i$im$kѧrRbA9(~}BRIj5'-ddY*ߔxhMZ%iЂ$j m4( (k4(Au-D'iݖJR4}>ʎ=C׊7zW%i >wBo@)"hA/ Z Z '?$AAz#w rE˼ V ?[rOGkG+%E[xBm|wLj*Vܥ}:o ƽBC*,zJ̬^PG+E&u>x3)e'jw>o.٭JbIJLYKkݔ2;ZwW;3I(IFS\Cvem)xaJye3TQv kn d?n ZrՊlK-"i ,mgi~ `mh98kj!d?n f0ם]8;+,4E@YڈRP589̲;+,4`%LJyqWr[1 FކЀ@~|$GIvy3S\Cvޅ ʱBRv kJ d?n<&&䨑IZG7i+kLJ\TZv[RΚBIZ)878",49E0׸,]YYCRBV!q$eÖ'˟ 2Q#Xx]ݙYCYx!;G@Yp^t(x^hNB]zj/2 -?MC0'ejWmH9jُ[i4>=y[9!+47E0{O VXRBF!yqK" E سA\\ N 3[.D{Yr;\5`7$JT ~+ΌeUӝE\Cy;eGqUֆ l jiS[¼K/-K9z Pf\ ք9Sh=vӯ9<R wn3Э\A( ~|tn ikI9x6hdސI+Idۙ(|JWs#i9yw!YmG,1Y>Z͌-$U9GʣkDl$h%}7,ҩTɱE&n]B{?pFZ)cd l[oPC2'/ܠd <5L% L|($H8$R[zo Ÿ6ǂF$$h~D![% N&p$>y:C"+ ) %$[>kO[xM!-X))mt؍hʕ{# Ѡ`4 & G`_0I+&+KDJ_A t0&+%{`LBe)vȈa}*{|A$M?GpdpbJ_r'f=#VʉΉ[K |ոTz\ ΫRFٖ --]rC$>\@Iy)BJ%q Hnr%Bt H> ΩQtEKn]t_P"hQ.+I:vΕ\+ѵoɎxrN]VO \J:TN8t&Hn|]#=2I]r$r3r-.i= L(!\Vʼ|.]r%$O2 ?]QI@pI?4ˁ KZ)f GqKܰItk@Jn}JnhD+ f IG:_\ޜpM-9'UfGZe!d>r#55l]XJjl4\s56Fm|', bbG h \w6 me_Lfް>3ZC&i!D^^p$-b$%$YHeEd}yWI֚ԜJz*Z}rѵD:h$~X⑅KG#ZB J#: J-F]xGZx&=&'w僖J]8>DآJ3D ~}I"haڈZ ZF-e-pI3)'<LBAKzTB{d(҉^3޼~d%"MBYTgYt<#hYp:^Ra1 -;5`7k2ZY9*!i@QZ.(cVYZlBPZ`*oQ0b7+49E0׸,ezi{pY)GgVBSZvg|h]jc조k_ȾWo#2gQnB!{p&hMZ+ST.7(ؔвs\Cw ewޜ9 6khFس?qZ3}M$И0\wf*t v<3 P!Ёl mHٝ36\Bg頯w5˚rt0n `0 ogFjiû$f!I.iHʂ=3n-5ІsP`Q;!t-,$rWl9f<#,m8!P,1I;oJkCs X -sY`_[ j98k!,mggu ېrp4`%tR.}KOVYhX6s!>צj9: X -47Dw Oe7{ \Teo0&X`V@V!mRF[$(bfj7i \$ |=!sN[iTkIoĞŰ)gdl ijv $2iBk$s^ZE)m@&L4Vy ]i\MQ&o j:`^D̞LrC/-DH@TJn襅nZ(Ƒ,29򡇓0/Xzi'y?趟d?D7 ʪt D$e㭤@4 { =BMp.-!i #@r.-C :Z]"%K·5%L^isޙ.a @>;vvr ~utoZr8dʿ}OU}7gGÇIr W=le6vzljGou0-Dׇ>6iw]oKJDA)IݥH ]H е x`>jfZΗ?mZs%_%dZTɛܱҧR>>WBeG׬-R-73$iMRkji \M M00[CUh XY$!+4`%?WW25:[j _[t3=Rۊ,E~?5Ѐ@.\W.V.ak $K])Z$ϔ( ^_1|xC ;p:OMC4/Tj l/T }&}j6@6 {qJu%l\弴UގuW5mpUU0W5lpW]UU0W {qZ~nd/DȨmlr*뎃1na_].xfQ+KTV޼ƷD)Ou30?xp[[&n"ZnBNFp|m)yߎ6<5 X~ZݵBL2@3Ǣ)GgU\Qwe ކ0߫)O-R }vks7DY&V6s{1Ly[yP )Gg+_,-hrKī'c;mXyC/5hm(KTC%!6L5 P+ !qR;o1kF!RΚb(KY:wݝi[65 X ֔6$h4IDf:gvC5 03ʬ )Gg+8ݚ6]R8Yh17iZ7U5 P6"4 1 -gMC4xI$t08$f`$ݢ>úJ8G8"8`@'^o&J5pdp <8庼S!aC> 50]2j5RUv.Vm&IbۄmX^_z- 캠6|#lSOn zIC7ug+"]8Z4r~$8щ±'Z<&WIm ;U~n ջY ` ~wV8W4,|f ^W ȵbا\bikZ$ͯv5oiƼ)"B^7JGM‡3*I+N) wSxq'jM"]ӛ_T#J2Yڅyfy/a,I'M_Oa\M.ļ{'j iIq&8P/Mxhz,$agiԝf˻ʀ6@rKAձd wf:ZJRCOaoGBY4V,2nCn(;Pyڀ 9?RG_-DG@&>X2̐ O푩xAr~.,h@܀bF3^BtBFt 9O},h o-~eJn4-HNχ\KD&4w̡ḣr HNxJQ =/52F4UsdJv>߳ptY`dp+9`Lr H `&${b )K=fJ1y RCCˆ(_yZc튠j!ƛ&x#-WMaΛںܨ9`64e}O-wcSsK=H\OӼcНN_ fu'Mԏٷ$2L=/Jfndzߍ7$S_ZN^ju}cA)gX^e<[-{#)] ZDۿc3 eK>qX3\Cx=hRX;1ec칂Ʌ;U2uI{=iܤ=݉~ nj45 w4 97ͅwڄn)G7'em!Fch2#7ewfO.|H3E_Drك!\С,=P!X]"Ar{RvkJ d?ni;KJUث0DY0WFpv4ieɗoC6!!SE\Q+KY ېrtُ[i%j+ٜVQfЀ+@A\R9̘#Cm TV@V!!4XM/FًStͪ;WBloVxqjb z>􅂛X3_wwW\5uPnd]zG5!)Fd/n9ߡWSD&)U`ь!1;*.ZP#@Y6ctDYK -WMAm4ŇJq|G͹Ur(YR$͹(;e4gV67"K"h{ -pzwX(\%N6E&g"\tsfЀ@Ԥ@ЗZT?nIG+7Co rp XYڈζatֆЀ-4M^MSz19s wN5Bs!cmdV@vzIIM&iO#Ys7]rc ;B \tj-ߴr=IYnk9\s.(OP/ ei#I#ERUӀ@~M5:1;[nJ9: sEmg2YYugBo8.iKÛSnHln*4 P4SKVɿnmH9: X ǷMkd50+!ʺ$TBC/TRMGr&ɭ$ld^ ^V7oJiA<"Nǵ2MVܹf ґ 9{ߔWeKB[.þb 4߄<6yK:tC&!t[EWI9~8uE5x9"{$vz AdYJ`VB{E&v5#q$* D ߍqG֠:C6͋cUb]c2*CQ I:%ުm Ѵ'pdiB_oc\M=߽{{B/a_-`,5 ?gZ4JǬys:Kn DwwZ$i:ۀ,iʭH2%D H>`v!7LSo(`2?*#P=hZ@0M=ǸIRqCiwI?x#Rq9p1]rC3_|I$74Jaxt nh5/{/iimYyL $7!;#%4e&gIi.ȵ-´5L]rM3 3$̾`Z)\J(?=߈[鑕m##lEK)ONKZ9Pyr78[l%ĶC[ ڮޱ*dqEZѥhʢ5D4}.w/f~u'~MԏdKUBzh>!IZcW I֚裸D-3GUu±$U=5'ET %=Nd-Z%vJMGįƐ#6OW.i4j)4h]K. ZKc'viMaoݫ ,i>ʆH1ʆ \[TI~ B!tNtJ6IoO ӨSN$.7>LO3 >ϴК1z3-]>i vŜ8E8Ga]pET'U]p /e\M9he^>[d.gOy4-^_PM Eib_PM[^}jI~r )lkiooo&f1W(2!Qvڞk)P)^h7m #&~rkZWكe£ND9i,-7y] )&~rkZh'KUʖnha}0aMh ,-D73x"r!RôV`5=lfXRA#\4?…#yT jWFO.|L0򁙧Gٿr`A; X]"A2xR<Ӏ@~LzfJX>U!RΪV6Y@9s:^6e~:Xpt -I( \E<5G+eZrb>vgȆHٝ5 sE,mDgigK2ڐrtُY84TI!%7r#9 Bs '[hlkBNs-wՖ]S{puKw;]Dp95 -WMAm4-?L&\D**@Y=:P^ܲJh+.=/b 9^$oppLsT9Q)FG`)@CNo\ DW.ܜ6[oB$q#q* zcA-MHDDu'bWM"Im!{X_;Z)QH+$39S2k*IQpU \2K ]M { d^дFhnlJB 1L ]g#xHCj)j}--*$VD 9p_fu`Lnu`LTKD B$2'1-߯(1'~bhB阱Gf!*UTjnlx:wk -=<iH*Ї&Bw7Po|#PR|XJJ ] bi%@E$7R~e-5H> f#29]rC-+V$ $ PZ$ R 4E 4LOfL~0thQS+) q !]:G$"At#$wSzQ&|b|wY9:P| Ŕ4x 7)ޓK}`DLތyR'׬gFUri$o+YZ ُYF^ϊJ?G`ӃΦI(K ',}(5ֱYC9.9-!M~ -S 4`%6(YPHOkF!6L5 uPFJOHOG҆Ѐ@~L I~]"9z ˯ %$4n4`%t6wm|!g+$f",4)9j|Gw/ G7!,49E~\ou6Z_\0su)EˎSNM8㙔4fɛF] +#K>y{s"Іg  0QgNС\1Ik9<ւp%P'`ei!IK(wֆM(D]8eM:߼У LvHՀ `ei!Id#.Yҟ`V˖88V)DV;4q-LCms7NSi~ n֭ qD"/.)9bۅ>k; !R8ע o -S 4JC~NmmKG4S%"i ,mg9R? P{ZΚZiُשƯD^qg狪ଔЀs@0rt%w9{U{wVYhs8zispas4 d=x~jdUv!qל/M}5m>y rotr b%HnKJMӀnn7aǒX#j)ivA+ M4j0o#|*3ϩ^^T;NgL>GJvD#6ҏnR*vI|-IO'tO S@NC9]\Fr"h'TRiZ`,JA~ӣ *0_ ixɏfa,s"Ҥ`%Mձ @c.f"Aԋb!h=yp:ZJE~CM ؉|8p:j"hf -jy$nP -"vBZf}>4[ar 0&ؚ9\qݓ)Qti9dz֨IZhLjWek(zCo P̀2}>(D963bhkmIvI.i@On Έ@y>/J{OP~dƹ2 Ox_0̕<36?`d7ӖQ,yB,(x@ <1ֱ5o(AQ6Xr h7rx(y.dNm؀*n^d.d@MtH%[]%z3Go[5}Ӕ U:EO)pJǎvɬq8Äa& ]13t3'-3MCf_Y`FQFh=3.bȌb %2s[+#tv_K҂+DWQ% JK-RR/|`J=S;:QSTVuQ:"Y`TZ:)-v˕=M6d)]ؤAK%QKؤAK%AK٤e%lҠ_J_C_\;F: ܹ6F@.JBRIiRITᑆJUIԒOLRIc{)J(Uyu={jQZн^yLxu=op~%wB8kȉ<ѾU:bWxA) } m'wHcITcH'ѳQI/ k9ЂW\m4}J,fwx4ON Դ\vix`2]<8l55 14@$rtMn5IcMCKR Fc3pc\U5K3V58A0ӺOtɿj,!4юCRR 4^8re廵JӴ\ix`i ev#]LA(Ϗ&Jn.5^7R)2Mt044ċiv3{ypT-ŷCj{vs?$Zk84 5B4g3W"$ yp̔|5F⥭˅G|p3A`5=`#hpgzi<@M;G50qH2($OB5fH:kb!h:$:x1v<$5 -"vRM)x+r[٫ۚt0[UC,1MST @s0䅾#hjh!agJn*VT츩arSEL3U?Ln+YVar[ElюL ҖTI$T<\#[])]\iPKN""Y&vs3[b1p` e=%RŪ|fe/CqI]RkR/t/ 7/rL a+1 zd1]p1c'#zd#OgާOrC;7dە, q&KHUOIe`dJ<É))-eg_ $8!61Clt!bw8Ncm14t%d@G}ɀL׋)n ӲGgdr ڔ)-E,= Q,y@1-$EiA^M0LiӻD,1}zÔV{)2L+uZh.9`;K‘BIc7#P@*yôuВ]F7/Ȃ90# Fb۟,_\ZEzЩw13 %<{fEYfE(]zyfyv_;S6ZxΜ-3d΃er#5դHWݢҜ.|EZ9vc>mh NMnzpKKi7enM+I7-snZk u$^+k+RoTnT_ g)dZ1a#ZvUȴ]!ӲB. ߟY\yZf/s }8U#%*z0 a6- ZٴD2ToϱY7u>32ʳ~O?*~u|IH.oaMa ׯݼ7rYNEmn%Xc ~N7y!,M[ @t>25Es d0ⷧ_B;_꧿i| yXTՖI&Zɋ=ZDah7(כZ1RnAX$Nۼ7<Yƍ0`E=3Bj^E->H´{0`үቩr%{ < k~JB-IR r+h *9 NX/w^yX//ю ]aMgY3N_e]&fZA`uPQ(IJHm$4AK 7Bȵ!<,0*G8/N-D*yet  ^E%Ǟ _}W Tߴ-m‚y%x<`o&$ҙ '5V)3ߞ33JZ@EMxd+>|sN]UBcf8fN>;8D8yB0I_骃Wa"P!j^sNOamrc(^ R*h5Ly ~~~s9hSC|cݲ<8ZJE\*b}0;7|sod3sku3|{Qk=&iͧ|;úwmqƐiK8CN t2 /T }a/0X973R;ؿ'Dj&hۀٷ;LO͂D>:sYy;AxҼކߘh+THZN?aSmSnʨ"z:VYW<+4A/fF^wi;Vin bоL(O\ޚ7X > t}j3 :aJzfV ^`2H/+ VL`V`hB /ek*'P㕿Q(&=+TPp7>+X5' 8A} M{yRl\)׿n'>q ]duzh,f&ϴS pӫ"ߓ~Y(LR ~Z, bu}.g|v˲*^o;uG VҼB51{/{*WB,f;6pendstream endobj 163 0 obj << /Filter /FlateDecode /Length 20082 >> stream x}K%Ǎg~Dz0<4cc ž,˂jlcGyYU<ߢ.e:UdeR7ЖtEJU64/dD.2 ~;R@]C |i.`d!Lt#+ǽȥ6@:W>:aF2t0 *\=T8IV+G*]fjxN,ڑ  Q& hS_c*M%*Nr;CN(\1+YTq)lY].W SZvZp2a뇹RC6Ugo(E T,*2jJK5hkNy)7$6) ʢ|0Zs +] ǵss~1d.wa;-X/|}^ b&&BmdOr=ܲ\6-f͸ I@eɼkIøwmfp$!9D*"2!] O۝U;)0q#D/uTՋM˘uؠEZd%%66zSwg#f(IR/{F8dcU6 zJo cƦHҊuٔIXY_qX=\Z k/Pa3&cݎDת] ow6te:-ڋv7!!hAZDv0$AӴ %pCǶN,mrrEJ$6cճd A :M:Onrj&Pؐ[>3e̹i$ƂӼulLAȴe˝0r'ߩN+#7L [')sTQI9GMЕ\˖3m\H9CM ڢ633eݵ{M۪4 Iڦe6>@')9`"$ ?Iʁ%N} r߼|)n>62}yw>c ? -G Z7giJj䈃dҾN8H``Nʊ{YdžyܣuC\Ia.!.xݰ>6+y a(4W#&|L00mo99 [Ht$uTKwU4#t '7|sZne&y`2-8Ш]B]6&cv,lCf30%(ͤU=dgVuM|rdwTUE9[y0.FO6*S)vfA heTLl;Ÿn*wgot0^Ճ0Tn= @ &wJ̇vΔ *$`H&6=Sl/W2h% `q )vN==;CN(ݢ28[}\&HShӑkֶrdnPgatY#t '7[pS7OY\:;Ss@'0p)vf|k&] Ǎ2';t}C׳T3NSAhg.F:fV-r2l~:}V 7G u gP30s`9= *3imYm]充Uݚ.s['9l&%6;}t+d!FNrJHql ؑ}AMЦ_>$H9va4X #=&چw$-obR!Npp! ۲w$j|'N0bЙ"B|})k7@Bh5_=LJ i+!amѝ>lo>}۷Agri϶].ғI|'s3~y{&h:7idsP4! Ωw:wGJ&%uՙjb7ð'MCb_.8 i)h0F7K}H(v!uVU>ds8,pr"CnW#w\G4J0@x$ۦ̹hn?ORl?>Z[tq^cl2H A{PS>1pz&;N58(U= ͮbg"rw !)OY慎9Jp1Kz뎒kuVoKQ"ϾFM>-ש G8I r0.a8ZveΜ2֙F:t l>ӎmT["@,Vir8Jp1 iK uԱmiMϕBhAC0M$uMliMpFT2|07p"m:Jn$}6}?&{og*Jp^HxFh V4AZd r vB\eXQg} qd( $y0Wa.\!(28CpE$P uS<G@5˜v`GF/NL2p24)brP Qn!"CpQ0(FZ,)S;2gk=qyn@ sC'RQ3C/c)A*kD-hVnkr? !XGF-C-Ca =څ$y( ͣ)tS>Qa58 .KIGSf7T40KH/.8]K,a98K:Б+X #kVeZy1 t]TZYFs;GVItpS u߬{ͭhߐ0 nE,.H$"enX."]|sdM3E椽H!A hypJHqx. mGw>&g],&R Wxp.!G6Lc@Xq]ˆZ_i:[F͋YK/,n׭|p̲YdN#eO皘NlF`pD uC)UmHn6ku_0rOQ1Ctis,>GbR` P\ [kKٔ Q*̊l @el NXȆ ΐpBqJ;Kj})څf\w #)ߢk +?Rڤg3_&L 0F׮<[g:Pb.%6 Nҹ $-uKN'p+Cˆ݇ ]m^!klBЍf`.`2[gWVfn4CǵK(1wkC/2|/_LZ4>8PӐt| ք_ʡhF.겙 / g5|%DGp #kBe#Z>8ǵh4)v];m4 xpiCh\BwF~<.C,`&MZAv%h1ImOp$6=0%~K,uGV8GXN8!ۺn6؄0h@~3};MVk?~r?CgT(s*r ;g 9JpqϳmSdž'eA'0p)Hm-T6Q;MVk?~`]Gϡf ]Յ %6}j_Ma4׾sdC7 '~>aq?hFNzH"IZO"(;bpYdxĒXlDZAǔzǚVYmpvz=_zU1#-ڲY?i5yGGГ“L9)v¸]+Z]tNκ =\;dsG`8!!h ,lX@`F},),J'$=ĒMs Ir@H!?o"es_!y 9Kt0|} n>8~\ˆFGU/9 $t$_f^4 ?8\HzGu;S9ءtOX #WжH"ڒ^轻, :8 6 ]fVW1+A7P!a񞾴)S,fˈ)B`Œ)CB)B)$xAŠ0ѩVL _ W$-%*r Bॆk!F3xpX0ѩiBdRݖ, 0tewZۤ=>Ĵ?LLBp/,3rMʵm4׶dm BP ]؅4 '҃ oOa O+M[MEcQzKI So)\CN(nt)NTzm"0 gA<'Kf)g1Ӝ Y7&Fү+l@xNMgU2۽3gX& hSAwX晫p6B {zYeHL _ ALﶁA#BXH!BCu&BF?O~k&ݚ޽ڭX4@Ov%C^}sz#7~gXl)sl3!7ɨ.fBHm:*-1S:uMȃ#%8n:`C "`+$h`S L(FFB€==K0 'u:IoY&miЕOcJ} bR&8G%F1S,bh aWQ ˃Ñq)1%Xԁ0is.F)$Z0 Rr8$S, ]BAR#Lv '7|~CX1-}̓r?d|QyMe1K4yvX}ț ɿ aprEY<=6۝#=c?T8d˽( #XE9G=+S,XJhC (VVBˆc8g ^]hD(ϋ)u36@?|ϿrMe׋>kw f wo<5>+Zf8,85Z6 ߧQȜ@EoϽ /U-$k++t'N N>N.3="_L|N2{U>.3 KŽωYOJ`"_^9[eU6^$.t&Xa _:^ 9*tĉv'շgLٗNfzv7+yB1k:?Ag?*/?OC7gw/UtBǿݲ%YM&QR4=^4KV䘧6 #Rb5 aӋkpZu&"Kdza0Z H͔>Ӄrhk=De"sj}Sؿ&) Uߗ=~׵c>w2KRKt׹Rî.Zչɿ/4C]*kM~Oql3p< DddrY7j2vɻ1SniꝞ(GK4_viۻ^/zN֭>W~//?hTR.{b.0|dŶ۱HXIk9eySE֖1;˦;ֻ"e?Twm4Mq5_h*m^^//_Ҁ ݗ/_&bӻqz-|2+ڔɄIh8}mo_=OˬVE-*oؕ6s[:yنbͨae4n A#ԙE F:cQ;|Jh};|u*w\nuIɇC>22,86r3eӇzSҘfY;0y 3S};=r7gA~&%hT 9'Wd@?} 7kPh<{KC`/۸<|iO Ke0_M[vsYPgR]H{<]W0{?}`Kb7hHE|O2)oBU?b c=&RplS@́u(!@F$ ͂ɤ ;,#k#q~gB\]F3rpƴ`T]%zf\I FlKEǕTPf2ho>'I _u |N]ˆ㚐2+}lvnE̦޺aX'`'从w;Mȃ #R^12 ZltܠԹA3h:uġ ?M)SwI S=a0vn4*^ >dь k7T|E-;#!a@q.r9FTWz |p8U$o:%z=8`7$8PeN]:t:8@R)SPrrѶˀþw4M/1j+^UHr4ļBu*0P ­]ɈMS_FJ¶o=Tt-&M$Z6S4Bw]F[pZrp+!.$I a7;G`u Aۄ=CS-ZT&]#v<[JFk :ɨ@e4 {FحHi N<]3(RiBOΑk~=,u8*%X`ԁSi8Ck:d|}g,\MǨiB` _eR RH>8`KQ:K0OsRӠ7YiY-hPS;[ thEu=J]IњVVBˆ.~-bi %6 _&u 8l:@'䀕0vc1>/=jd¯qo4lχSl8=Y=YYF׌}|B f8wz [ۈG~^'֮^ɵQǍi:Ǥ>d/NWy # Ifh;ꭀiF~4U($m:h34Wt,& *~p u_U{M ppW7$64jEr24M?í #'{y7a%ڠU7p^P4¹yז߲Lkۯpy7s2Ewgwv@ XHy7{l<<#9 Oa&2% L]S;CLCLpI&x$Sx;gHKF:עSP{B9M !k/Y M'$|}|#F3|p\0SD!w|OastwSNh~*;G@w # ް{k5y:a̔/DlBloԄ؄VǍy>P74@d* iWw0#ͤܪINnt}[bg98(=Axp$m:~<nRI;L'7H%&)RLثjӰ͇8KtpQkܶ%pG@kHEi@Ek@em@wem4F6sJpgm|6r3Nw9Mgj'f!35}\Lm+4;0ԂVBˆƳ:iHqr?Hq:iڱ1ƃ `=BBۻ'`0VBˆƳZ-~)qn)`Lȕ`4J4BˆㆳZ[TF1B7P! E>;5|E/VBˆև|#-qJGrkwNZLqlJ#ƚY&_RaAS}/H dkwnQ)e=V]261\qz+NuUo4,9AW3<{ЦwfSmk,U]ܭ>~B0ueh2y{'iZC讂ZOڢkTa4-̃FB€b8B?EG9k!y,) !!KxvcohH*0ga8bFù !e]&Y-"'N 7ABЦgVG&gVQGW4gVL%p 2e1n RHݏ=ilB[)u!A@/ir>+!a/ -uNr.F9Ϝ>qRkg2nu"RԢق )W2 \')Kx @L"`B L\1!2!2dBD̄` uӹD2~&D_} f`.!.g3X/yU7L`=B k 4 <֦zC}oޏPM7<ƛ#*wsibͪt*9~\ˆÛ >|ؽY_azZiϔǛ5 Όaq~ )b%$m:žu5t8_6+VBˆW=[筙2w (vO7 tG ABҦ#ϓ RԛP꘼0(5O }oQ<%kRnMB҈n)=I QROIwXG׼zYdu@-2[!jk& ӦQ/~}4~u7<瀕0mEֺ6_fFkm##hj-%$m:͗R6rtMm>8`%$8eW]ס]@_;Vn[JgtW~G!Fsŕ;j秪ZhFs˷߳4DO8U}]iQyL1[>8ǭzjЂ{o(u֮>lJȵI}~G vؑ"T󫻓N١i µ;d刅ЏI{Yb7ЦD7Bna4X #kWw5^H˜&Bv ~$xI/ܸf*r kw,+Vx.wtw~yV7L}7ރP;GԽKnYswʟ7+9f5^sYG¼S)^3J&.h%hpbsb 2gtTi1CњVÍĵ;ϵt&/D(5^, zp8Tt]ʐPT0 kvk5GRigfi %.֧ ]_fy>tt0fq}c@Blkh Цa/DZd9 ]!{yTzpU<zݵLLfdJ7#nk9jPmmBRtj w\@m~+[ĕ>#\F3tpd]uK94lmMydsup`ȆЦu݉MxhTN7~זεUu)k)2/{?"(sRӋ ͞% A ́o4nC!llt'~ِmzYgkM˜$`4T0/eOF2x0 8ۖfߵN~ =7o$Ozҷ{  8Ynez?|Ί|  Tp g p' x0FpBq ٖDEJYaeNx0v~Q/:J&x' |0jpBqBLqL/FosA;'r7IB 0 [>l>;{M:Łs. <[MyףtV _,ǭ?KU~"'*Ȧ@Vd9wtϏNRU6y}I{8w #WЈ͕)+%Xtԁ_i8`%$89+S,zWyt(ЈIM.!iޕ,W@Fm)=|WYtC\H /-nCDv=PMB0zK+=%)8|u@#_0uu;\zv*n# 6~#}-H[[uGzCMg9-So`b IzMHo(F;9`%$8n}-]j >Sc9{Js Iț)2&$f#0ǍE+궼`Q 6t`I~E4 ޯEg>WN.o*q[ dm8́;9P͐0xOU ԝ }Y(n4k䰥gJm:rqZ{=A/:sqF7:Cّ%7V%v@pp!4 ʔ?Mx0YćX #3`CX҄oM>&|p$DqpB)›@M+p]ˆ_IO!M¨曆oռsT@W#55]ּsDͻ9Eբ}Sةa}烙}X87a3)tא74?oP|) 6:ON Rt9r M!H9F:&FB6? z2'ES:&`T0?5n4C =>ezHXZ mao% *$6[LR논@p#!`"uuF͓:UIۖ! hSPQyr\_r sc3pBqH-kYv# m,`! hSkK j$cMF&d.FO|X4.}7 mH m* ޲ #<L "buш \ }4S ~L;hϨS>τZֆjw\9CԌ 8A5WĖ^2l}cmCIMLIR7iK's*T$UWTY"Up|T6Sr"I٠/X~p D+l `؈{kGEʽ*8`[X #8eEEE\[J.MB+,W (_qpHHP ,S,W4>_40˕8ЈE˰笠EůsKqo=QrN̙ 4"ttɤnQr2 x]b8`bb_,F~0A#d +k+V IX4{5!/6.!6*Nxs/I-۝^9?GBYl2uP޼w/?|&8f? óR:> ~o|=RDV*Cf2|kTS0~o>h}D ϋ)5&__>&#$vNԦN?D-+WAO^\:|5>tWKWuz&4g24@'w-Kټ|%n2Q;Ʉ;!5 /+Oή@0@0F_p,A sbb{UԯV_̒.-?OwqlHK˲11C+#ׁ):G# F& FLFE,<7)OOo}Usz @fJ_S9?9!r^ht+oO{|ȗ,,__k_}Ǽ /=D,8큲ZwlP], lC>h*E$Id-{WV9yJ\ V>%;/oZcx F>hNo)@賓^b럣~ߨ^zx~G*t:pRqZ ~˝r,t,k-3; _"o7'k-zC_(*dLYzoS_׽C}e \}9 Y^R@ЦAr E4`*0 𳙇X,W'pOu9F o ]hQ ~j-@;5]daOѫuZw]FQ8M %l6=QaOsw KFgq[ (nKxA~, R}tI Owت2dqc:. Jed5~Df.4?SnsX'F˓P'تNS=]$F~CEE478Pi.s )9,`ppMq'Z<,?S34,nKNLRKz駗,ek}wM,R嫾'{,\֩ұ;{wE sA+R_SV1w?fP?E=~/!uh3p< DdZvq5ww b0.;=M()3ş=&HÆU}\ql2`?|Yusi:ÏAk:Iu+qOG*Fo鱃nG"QcE'{dCcE>\?;ʦqdwb;Mwm4Mq5_h4* ^^#/_ҙoeWM0l'>!7ǾR:ˇy-CBM>#& vc?2U{˴J}g?v%DZ o^} @e4naԙK F[:lymʱ2Ԏ\l=fiY܏ش^tl*۴Rv] =^V,+zbPXMvgj>ofr^YegjxRzYqGݦiهFRz~3Ma]x <)+2pw 5hq"ݱeht#m}>"ᵚcZ<+"+B;.G^(n^(԰{h,zrs.?i.}Py2/vSh}[^"qpq]~drJ?VLV4׋Ûendstream endobj 164 0 obj << /Filter /FlateDecode /Length 20574 >> stream x}M%ݾv~L&0 -[LðԀ=^Z-Uqx{Vѐo0" xX.â_{ë}9.?c.P.ör<}>*=M?zEɺUzݎK9ֺ?k[.6y՟_˲oÌr" ^eo^GWӾv 1!]N汄n祵]8/j\˱3L\`[N*j`] )֗˾1L[z(*4O"e}0pA)V2 " #Eʄ2RL\RLV/2qU*9'0ׯ~wnIr}`/ľV9~\.@jlHieWZ>)5ʼnJ~.=hS9u)AZq?^~9WN@Re;$^] r\V^-r ك6.Ast>?} ( \JW)Jq9 ?g0h-O z;As~~`! fr-Ì'an-`90[Ĭ'ȧI\!?'d>oZ=H?0^FdRRtT"7> ?GY;@E>vHOq9gBuޭm4B[Rv>JT/)Z+ЋcWU)i Hie))@Y1.U9`8`#$(n(.b/軎d~z.!|}>8Kqϑ&jIbF3zpZXUhh !swm3l*78Vq4kNF3npHHPE*F'ҁ,th`]e u'ӄ>8`7$8ֲdhzLH+ Vp^DQz[AK\$Rh1rx 3$lmgpZs+!aqjY *&l^Fӝ AxB^ױ+L4! 3[vmB.5MWXc%ӌf8Hm:_g%҇0t+!aq us fӬ4 Tg:A s)tAP;qT/JֱL&ʭhBhϐz/R/EE FB„nO'% ZGJi#:+JQ0,u:߻Mj+?RmZ_6Z58-9`Ap@p/U2ph2؝# /dY74fy8ue|$vc!${Ax.VN 0%\AK8F]i;ÖV3AO !Kb`2O9t XI{ y~JvYz"_Gv4! AD}L!^lP|rJpnGz\d =eiJh žsj.f(iS售 Tڶ5[n`H!e3MO]C { 9KoC&snM@͜=~ Жo6N.TߴKAuL : F3fpFU"n8% F3tpnHq%z~MO{>]v~n(YormT۝#ǭ]Y|t=4e1L3AhAlSt8Mí 3W/?ҵwW {1·y=dUmG%O!̖ S2i:vo*bi`K<4?YL^),S\.Yx[W z.$Ǻ>)6Ěk;=]ad/ݶf=e=n_,zwgF7,#Ezm!a]5b>~` 6޼J';Yf?K2Ν}tN־HN)AwJX 3k[MemfgQjpb4$6\m2iMg+8M00p{VRu S#irc44)ע>iZKӮP`4˃+;ȗn]dNϖ. ]dJ;p[;~v)H}]dd <~<;=[xu>*.b?p˞ y\1eBpgZ]  ?]6zB:pF0h9gB>lmeLQ[i]WyAL99l@o)($=i67kG ɫh%];u0# Ma,ӵ#F:&+!aqkdu!3/1Wrntȇ =3!?ê0!w@fw8vګZ&{'6Nf0Hm:L׃4<@3tpJHq|sC_ꦣ;Jn/]#vs4L MG."CC6Ԡ^idwXgw5U}r\րc&BhAW4 wXgw5HrdS;'d/.ZGp[^M컑fpf+oF6NfͰ`%Q}7 c}m}:8Kq|䁾XdJ;lv2w>q)%!!hӑ)퐕8K'F~JrJpS}Y,sQW<<ıjs:1!XM?elZ;MVŝ"N "ľMNO؍#Z'{L6vь0%(8ח5fwe()c~7ssGw A:^kϱOMip|vKm֏6<} tn6&m%Ц#7@^o+P?~mo6sv%ʘuL"CQ$2lr0# Mau?%8:&VǝGin>G&з1wH S@u M?dsM6BŒnmѸySyḂ;G w }蘠q&EBZNBwfϜ~$bNmm0hiێObi{,56)%M6f"Ӥȗ>T1q*+3)+Pa6|e'Ŏ)yL{Q{=%O{Q{O=hSp:zb?(1lWOǰm_p1yoij{KNӒ;8|A@/ ea7#\Ӵ& >^`~K/Rh^`Hm:x!σVѺnzXt+ndab%42P3$9T !p!t W8cv.c=Cũ2NisF $^B@,45:t'G+0Z$8^:>%sfEݛKZ Ϝ)lf7x簑5 EжMAQQ a) ϫK2]L׏ڊC'R&.C\2qu0<f - DQ@E"@E5\j[-R&:mZOR1%LNЫ; f2\ CQiVǭk8K'!wf,24:+?+]B o^fxp.aBqZ/s!f?;"}NO# ];4B?UB%8n]#z)^;ף s_-w],Yz'8I$|pǽkUMm9Cm뺏ຫLҾ봌6ԓTh4uXNu~ 5'<צj#LjIນi<8VHm*ߪ&'@5u8MȃÍ ĭ˼ȵω|Ĉ$}NOȝ#@D|3>i|90[w ZtkZ6A `[琠tMÍ ĭk@A׭( ?ь@5G!TH0>Q6 A(]d+sUs.U]3t:8KpUr焾W6Ѝfp!`qVe(Uzy,j#Umz)݇G:)B\A7\6)z=b;&PQa66+lm_fP{atk,7wW͔2R:|BNi` Bqؖٶapb/%vcH& o+cO?7I3t0cetCUKc:tU ھ(' 6 6<yPn#7\u|C":&Ⲟc-s hSAsѩ~`5+sY9&dR(>KӤnd͟ şoou :u(7k/CoܰC@{/+w:Ve~\{w$FTLW)bDJ++!a1zcdLW8 A\,hFi Rp I#UXܫ`qR"g00ثBLWfl!rV{lQhS.KXSYsQ0/+ #Uw,.V͊~rDLLWߢbJ AdaI fe]ϩĊArj 1rA#UXȫ`!Bbf `. 3يo=}#lVXY^us}CܬY_tYpYBgW .m'n'n₃vr7s6qAd@- \=+"|VpQ:? Tnpf^ +\=ݳ . _, RٺE@J_];N ]W5@$!9l $SBbN= N !~͞{pvUM>]'5G yp9$>)FN"rVBŒNad2nBB14hfwnL` B%tKtH_T8# XB(a q:8GM_҃/uLQn-3Ǹ+{йL[8Ц#}rәGU\Z&ǝ|M:)y]d2 }p86żb.i8`$$L(t?2r] |ӵul흖Q*3n_+⛠;G@ؕ1u7U0RLM;uO#$f}X9ГÀ*)r<|1t zp0χI;'oɿ4C/GJS'A2wfw nL]ͯ ۞`qR;'lp&$t)[n Ypp[Ȣݱ! 9H@|oGӗi#e;-$>=l Y+5iҴUl0FA>RX.>s"MC9Td2+xe%'fehB\Kn0aNl3VW͉+k"m XA@hD59/_IX 3>@[`QrcQ VN$F L(Y)hJ+FBb:([b1Zhd+|0`Hf0hX4,Vt'0H+7RUadf+@#V6glp:=vcV4{Uٹ,?A)6Z+e(}=Rjδ:>(˃׮]e*wo>SO񲘏GFROo|o' ^ iR>cX?N ݗo e1AA7?/z¢e*^[\zQ>§Wtݫ뗢wtP,މ_rgGtJEՇquţ7T*#DV!AkBT?}Ws3]{ L}f\ ̞t*L]KK੢2y蜨\VtoVZQ7eM'jȐzЏFh%q Bu vplpA)zOFHFON3"D `P_&R[h=RxKc*_rh\E^mo_Uϫ87?ތ1"Gw}eq8ME(cȌKru]70?A-S^S:rլo?^=.]Uͫ0~NE^uGknNS A*B=ƕBp:<D RE)/V_)iEﰜ/`]m[y/| (M3498Pi"4ϙEَ`Xu7'yE{ʋUrrDzR35u? /<\}lQNS A*z;%U4Afyg'o)/tW'>ʡ[ώDΥpzpF12wjNS1AmLXo XpXM8l[ r(fKx*RDUcV;WN-WS+g7?  np2{I"s?ל)}xi/4: [75FK"zBv=ޞrzG`ƣ7(Ϣp (L4PqReatw0?A-S^S ;O[-nRn%{9kb°xߌ}ٳ>ǗwGsY[)$k]I!NΓ7Fu\ĕm駯4ê!PU_qgDۀI "!~&sܺGP|H3o~Y^ތ8T8,OrH?duz*V1>g_,_2t9׿^??4vJ?zZJ=?_ʭ/rW+z'm-|y^<禑C,:mO1c4jLLȏ= &Oa&]ČS`M5ʽy)uYwԗ8!?D=5}hl,ƻLe)2H_Z%5xw+aewՓAn>StH3 +0Zsp@H!"1O#S'Hӌf8Hm:N3*:ffFRa*5M>o4G|m3E[:f0%g%{u~:ίs_C|.`T _ν8`tN 3[+l;JYuJ_V2l<~0 r WBNȝ&Z2^۽LDZҀGjZ1n.gIuDI M)ER[;IњVVBŒi\d(+?^pQ1>m%6їYHYpǽ81}Vy& uG>7s$:hKwPpt̾ ̀!us %Gfxȵï/EvwAa ABrK )f'F. ǭ^dӨm*R4s8`8 >8#-^P)átWt,s0E> nyBPН#+}Ȧ犯b'UVi4ABЦxzI+?8JHqowJ)j|x.D,t@5|EܩDoQth۳)Akz-t9`8`%$8nLuͺ|1y4>886{Д dfH֌BGEC +p]Œ/u'^9Ws|\΃[ Osft }r1} g[a  Q͞ !:װ<ǭc%{(v٨;MЇΔ#kj?8`%$8 ysl_rL߾~Ǥ}K~[_y'S$'c"m6_#eޅ2l=I ۿ8wE#E>5bM[&%{2,џ˔bM6 4u XM`==z 3fr:;zao:*+e0,'x?c'߳m xߖy>'[K|͇W~ 5b'X^oci  7[dBK`Tb' .V;r-GլӼtb|,$lӼ4yztF=:Oi E!qB%T8M.aFaЃ1AJs;M#Qp8@^$b6UpFp+CŒ8ZތW*brͅS?j-CA^Zbh8ǡ~0|f2Q\ )a!`b*SIk LUO/ CLBLI&$S{bHݵ jM(1Obq)\b:^fڴB Pa) x-'&\bۅJ 1.d;yj7fJ3+ٲpV rʐ?ۄ m rPt 5 3agBpLWO6?ڸ 3`yMs>~N0y}Je0a<ƥD7;i1~=(6dC0ϳS͟hY  /ZtΡæ)I:>}踓sfwzoRF CjDOb-4Ц5B 'wǝ!E:^JmhF`ЛUO*PB/M~T-Dc4TyFFlp$*wݯpp+ (1x'-1e܇w/^iKdJpuCru;6{CW)Few4Bn4#ChAț,\\}x* ++!aq{Z:xx\9K&w#[qwfwG' % &cН#9NBIJBILB4';G@w 3tUBxF*blڲ;0-Ht,w;Qfatn$t W8^xHY-<77ud΃r Aq<(/|i?8JHqd[mLW;u)JG2JΑMB:&`#N@F9p/2O] W>{[y ]G>sIMR1ܝa" <΢'aߤ2#n0i,$6}N{!'SGG pJHqy<.ng-\q]4 ~p$Dql 'x!w F3|p\0x\np@Ы6JHt:8XHm:: \Œ'|c!vbmܪx{hkWqy MTI]ɫ5zOjsIj{TIDkc2טIEڕ s;s峞>tHWT@ tаQG+SU6iNpp! gnD)%SH++!aqOf;/DŽ{@B9 y9f#`@ (sr# -g?'}q#_^l;G|vmL?kAٛ]}%8Y"읲T2Gi-L ]B:&}Ka3ۀZwf^dxFUju:X 3{ sY5YB? :bO{i=%6; :ηؓVBŒ(l:͂iNpp!2ht)aR pJHqܺG"/ڭ0tO=!t0HX3wRBwR %8n]_'z['z['zszX}mV V k]„^7 і ޖ  ]B74k-_c9KqܺlX&|[&|[[%|s|MU @  N}'.`q /zpH˩"0Yќ !£(Sa)aaN9msOn]6vȅ@o`Qڦկ`otp$F L(mٵ[-vp0wt8ܚ(v҈ L]nk[iڃ40F%CCC@ЦBs eHHP '%lP:!5=3E# ˬa A u9k $ah0:efE>K$j4 1$ Ewƛw "E؝!+p~L|_ֱQTVEԽ0 T0cl\RQŖ [a`w֋7g6a;CvA zo6 zk6 F`.`p+6UuY[# ;ANZf =]@Ц.N:}r{.T M[ͱa)C :w)}k֬F:umlVFejN`n\rqd,mU߈m|ICK@V[/i;W]φԇ'} [,bl6J]>.6?z taêlYF/h'KOA"8O&ۡ4U 3lXx.h+9$\9b XZyL@[M{q6CC0T"PWXt֞k[`8 A#Vά,MFy.9fR. 3ݓu,둺`=RW`Fl/ {v!Tˬ/vg.8sh]EѺZ 7f \+(^))ғq| n^Oobr'nbS"`W \+ -BvEQȮ]E1M?o͛̅xɛX܁xLNE¸[ڂh6"ّ#WIV9{H EZm2 X5tNí 3;Ku/SйhFu ML%=;Kib!F0ӝ{X:&] qX9vU4 T_\ F3|p|0ã;#Z̎4 Q=ppt= -SB9KG::|HѩVMG\7hp:gHv9^Ju >sfwzU_}]e~j)ihrpdt Iru\eTql!WG18`%$8to-k__}/_4÷g6 ;uToi8`%$8qo]6A*Tױx $0`: zAl'F3tpr 3t?R|`a̔:F`Б?2$8t[`jLC-fbL1U c ;=h^x4'x6HXfn4'RŒO?>u uM6͸wv~ tw " }=;q>NUHУ5fʦG?klν䰭TL4([}V33FB>'2hJ]4Avc>sin5L q\u(}( +9 oRoN3tpɕ 44"6,aۈWflHb8u0%HXFha4T˃\RGy}H[  EiOt*w,oG"s&=+KuK  RodW&sWt\ʔg+`;d A '4$P!V+ F 3daT15`+|]>ʪ \w;G7;CwY/qQ­ {P& ו9bX9{hbgմu>ކTu}Y5UR&]c| ]VX9}48'c5&3݌)v+6<,Wbs)8 ]u+둷r{ϻ++fөnko̕koʵ%$ZbREJݕVVBŒc:]bbvEXˍ#\łïX "sQ]G.*LŒv3tW$T`*Sv]9KQ)־ӄrQ]4uAl7veEʁârEGخ8! 4Bjeݢgʱɣv \l:^ݏ]4]avey>n ';L~l"lW4{5-?!!6Z_?.VbuuPP ~x`=I^ U_/Eoa)13~wͻ/>=>b>TY'(_OXe %_ nvivu`׵rW~eLΧ sr1[Y5W|~WCTACJ5ĚΜ'>Ń^E@,L,Й8V`0E@0Q`P`HNeOnʲ<~7WO1S\=71ylLsT-uj}ʣKG!}o Fr=#Vu~7?~j-񛯾|w=c's_0XpVQ"3WgmHӗ%:ȧ]$鍬-NL9ӣޮ/'\a$_tMקˌX_!z |>U)|u%~l tDbwG nJ+?ÿ=7pZ x˃rI,̻cҦV}}T5rͲnS$HˤHa:Z+yçiY L[iB@ЦAr ECiH,fNcP_,Rֱ@7)1/EE\2׹buż辪W[rwif;T} <( 5B/ -D~pKyT%X!)*:kW*-7>͞p.祢Ȧd5~Df.<2<CQ"d eM@Rޯz+v?C^߇}oS} {:eiMW9Q )5NT@B@ce6V) &[<Ѣ=ZK9E9MU3y{jҝ'&]zǗNqsc/RPMw&88O*h Ƅo~!&᧟ W]U_qDۀI Hg?kܺGP|H3oLeu eɒdO ҷʹj&ZsT؟}w|]tsi9ϖAt=SY' ??dvؑH TmzhLrOx4s冢AMLkV;MZeY *NE*ݥ!ǯ_љvo4r.m7n<=|:+=~֔˪rd{-<ǿR7*rhl7<{~l?DuQpoq@*`>i〫oNox\eY/u\S;kͲ MR*+g?gq?aS弲i}٦}fI V@z.YĮʙ j˲?we|ku3D?'x5dҁ/K=/V﮾KX"""cJLb0ܼmA6hoݷEcPu<u.fZJZLu۔_ aB!'qF s| lջb˥]nҟ_ɥ'ͥ/Ȯ߾g}yFǢqXvu)>JdӇ\f%=G(oqveu_?I"endstream endobj 165 0 obj << /Filter /FlateDecode /Length 27429 >> stream x]fmxQw[.T/^Yb"i3j2~ qo$JH$'?HOKz_÷ݸKOuj+e<+/ӯߥ]i]WT~޽ϻtմ2ScR^f _k/WY?ysb5/?}.^p7Αf//׊H H#2J@2HD؜A)ҚCFOObZ_X aÀ^j Xa~A[k-☱{: =X/}D$R^%GI rRa+wRȭWNaT^JNX<>}wVδc-^폄q|9aL|R=7@‡= w~Λu{Y裂륧''P礯yw?`|;&u-QV_ywB~6@S踶g$P3?% w˃e-Gnп-S7|vp?YοHw"bN7&rO/%JW x)./4iXH/u}R\\Wޢ=o̻zɼm ^Ӌ:-DYW,(xj ןϪU-W̒t+dHͧ\ưo;îgvc@*(ow9>4eP&Ct 7)޸Z{[&K~^,G Ä5 VfAҏ36X;qirq;rߓl+aLQvV$(yPNkoS)Ex*wz%/F7 Jl#j[+|t-!oƥQϫAK7E/ cYr1T8`eoöv(lݤA|0.]/\RTfM {J{ފ&~h\n2g 0pTYن š'K* -rʥSVΫh4no% >Q|0qFdCmA|0.]v^mb}'=POco#r0L8`eߪA җ)7r0]x^4~%+1N/.6wK- ܤ^/%y(n+y-G # T=:{*elj ;'a-L`e0|B++82WgʳaF.Ǘ|p{{vmhs%e=kyدb)R2@ 썼m(ձ|{Z5 0.ƒo]ڞu2pg9.R2ۈ署 4 no]b;efWb9J.R=QM,NNc7 namA-7їhWAtѮqщ) r:S?;QtpQIA|V-穻C2[Q{IA|- Ä3 ^fA|{(\eA|0.ƒ_ںľ*j{9C.xZQS!^ۊ$|?l^+ax,q`ƠQEϪi+^ii9*S2ۈ/nd.^6s_ao]̞6>/0uX[z: 6{ߓ;Q|0.]|_Oڔ|G6S5+;l-r< 5*`F.23ڮ[~gvS3g#e]YwnC߼;qq}5Uz#0TK;m=`l Yi g3(4C&6D;[z\,%5co=iȮ8hiYJ&~ڑ3Ml{gRo&of.-f {l4-O=~6V?@4&v ?@4cgebmhd`ekb=uI؟|l\ClظFfo&:q (z컿Gx,3Oc[fN?Xð3QSO{S|;|;+ڕ] gT'Ocp0=0L5 ^vCSQӷsƥRxV7^ &Y ăx-1tu Z6^\7g/z =a#ߍg}A}W a> R? ̀ (va$u:|5a0Hph4ds ѽ:OD!)9N1=6u X&&`L2n2K]`bZi&<n{\b^tbfg]bBoL@z2'0˜dbL3sXI&`+.@`>.=[ V8WrsC` nOVf p|WL%n{cz:Vȗe]L6麽uY)DgX?H^1ZW|(w{.vH]hzdj@92q6\rMT9`%O̮x]MG`CE]fV?B'5|8rV^$!nٛPХU.mK jʇѥ_.ףui+]`F.2L݊7kƽi cF.O>R8ezU9e !+sܻs{EP+z,S0.]/ۜAtvu'3|3wj9Dw3fwp ]#fs$jrDi%(7Gxp镂o#bJ.8G* Mt1EMȡO.AtupŏH~Gpr|DN?;`+s=ޯEwk;h`C+Nz_{e(O@/+ c}ߍC~ dugqf1깺r0L.e$ORtRz8 q"|c&C&8o{rΕ I皿Z߄͑^4Gl)V D9j|L{OܓMN~nr!kQnpIAx#V H:p4YX$퍇ƱHUE59ʒFhc<ƥQ{a:zŖl7eZwNq.GEp;ƥQw#Cla>河e7 ս#z|.йZ |x%X7S-m?j9m&Q~6}"߃s;O^6\à|,˼Mcom&i-Q% -u,[ۨ{K%%Z⧮m1#֭YC;b;]0ls0 V.1=`qeK9a\]ۆU3 ;*.pRtrmKV^O[7$3x^ԋ1cy> $40 9 ˓8VHjXBV<,IܤU 9(3Dl$Diʱ }\md剎Bl}^KaC| _\K;mh5 0.]x5;Fquķ8#apFu OvbJ.[vN5 ‹#kS֨ :wɵKYAšatZ0ÂaJ.[nxNGn5 ]"(0빼 +T5 W)ܙ~s/7(^"m^z'{DW (xo\z'{:}Fwtc8si9M)2ү=wK=`:mw (x&A2(amx"ִW Nm'~駇6X;qira*[a@("Yj]At`JF,5>JJ}`9ƥQ-Ϫ q-~ѧcG|pF:Y?`mFcaA7]7ny^j| 4~zPhg1r\!.R8妡Re E2W68\qirq |܇V1ϐd9{BΠ&0 D{a§ 7 [_浇iCVHo08li!늚{iwh|Z[!@Sj<˨a1g8պdd,ݧ)TüapFʖ@Bz0h;tbF.[tr;U4(*.!MAtf;[#bJE׀ 4%)AQAtpѕ]x~;d{ Y#3o.RpaP` ?:aL*_zKׄZZ30'YIiHʜenլ cuE Vcœds6,0I#pG<6\덪r3*SW#: Gt3 C47!zsI%=Xȥs]ă/,&QМd3.܇ W}i cJ&[v^C,wxdaR V .zu+I$nAl0M#: 8]l^Ժ(Ժ(RI&#8D$O1t$0:kK\N9갱d8OMs7 ]Ǎ@K7:$m0@+FSÊAaVËv3`zNl5 ,9=Oԛ`qWp xTRukjbQ*v%Ƙ`/K ANj1Ƣ֝Ժh8_bfdZ ‘X䗘 GP "$p-Ԃ3pMx>"TF˖7@Y0td<= C+xE)HGp)E ^$w [).嵷~C #kJnlqT a/Q|/:N9I z?8ETb1~pCxrgOB[1g8vXًN AޠKXXr'H?!Zj 'g7bdZӚxk4)֪]{ThWnqۚwKɞZ뭏:9c3M4>2ӐoyTNC|-܈P?vM5sBMz[\mN[vW3Vܡ- 7I2$oֹg^ûe:J!uwȐY#RD߅6|@:}4w'#ݒR8eKu Lu(0iÒo. ø4 w9lgcrg3S ÎVF퐩Lǰu;d[G[ѴYiZ-w˷JCaD2gԳrl˩-,Y-c||7[]ߐzuW]O-sɷ˳o9VȾX~˱B-ϿN!9llLs6v2e˝#:l,sX̩O<IHX˱B.Dz\tRpyz.ݐܵCKwUG}\#ztGuH]`K^.ۂ;ALP5@D`YOYc a_shrji9ȮN)2!Na+Y9ƥQ~yV[[P×rM)4_[NnOO5OƤQIN z7rr#)2ۈWu5 "r0Kp'\ZvuGOaeS)P:&:|aa\_m nߘw );Du#F.: A|ɰį +\vw [A|]A|p]/p:^$&cDV㵒znŭ|L5Clj\ϵxJRh[Dsaʤ eN'Y$۔Δ7pwj[Hm>3Q,W0?#p~Iu$ü78civ7ˬc` a2Gϟl~[C˭H3n%Fϊbjt^n\IrWIY<˃` *{t Or4Z*5OB!c{N]KGlY|fLɉ.^|8MS ~0KTTHb\Xx =6_ 73ב("TJ&o"Kp+!eٸ-xMXBrtLc3g/,Y;&s#.h[~TUQTǓgҔ\gZϸ=s==J9=Z}(=JSf{N)WWrsb=z}Kh@T4_{Z.S)SY¤Zͤ[YZs6 (鞴\H!6p`?RhZpK ~ЬYH~NyLqKu4&: 'ϙ(v=:ad\#oЫV+(Q6v7ow'%ׁ0YU+j;8[M$(0Y4!| )piNkNO[Oϧz)%r<:)Ŧxen$*P{D|bw[OY{ůC w uю(4~o{"<)^'({EM77I#-7ԝT~Ur'~o_g :EdM/_˛,c/_?`JO5{&Bydzxdzxdc=>2u{R`=<2E]ѱ;lMU"G?<}i.d)+B}j}1p*ps pGWp7ǀ*ps pGW`}Y=SGO;#ESH1vcN0C`0/c]ǖard\trR:4 Б<!D_pEt[l2rq}wQi oEg]w?;RG_:^ ^l{@K{)DP03/o}7w}uwpJy>'lMyܪ^=)ti]{o}x6^w?L~8żU-g [ ?7{#Om, ۏI5v h߮&BF8}yv:}2,i/-//~6Ai_: \49_gIm%l/O}*6oy#o>}(l$Flapo7Na, 66Ȅ#(V&O [IZx-8vDƭO?mK8DC0<r/o17|Cˡ :xk,ׁl)l.rGPMtֺ\!?mh}!ah>S0kKŻ兣wCu2[].yM)%oC˧AY7GZr:4OO$| 5f&VwyA4XZ]n֡Fl!tyz?]^86X>]ʢտ.?Z+ա~~ MGxeC Q/,(|Eib歈vCˡ :x-.E .u8mht#E-]~.WȫCOf[>__5p9i|՗TH lj}U9>*Ȧ{`"0pǐRP"1Be38e\7wU^|/||<_.#Tm"bSc]Gʳh& ^ Z)ND6#\g?`lzMB[{=#Z_/H9_{]}̗Ͽ HHn7ll,=fo}ޚc^)=Y3雷z=K?x^숸@#$ ը=x|X%]WZU[}y/::c>nLa L70ҝ?^{L>aUw{BӏI(26uy߻50M듧C5S'NkE8~(|zn/<]o֬28h߲G7 c?p5t\#>iך>]pev_60O:/ec:K]92ȂP40m? op{ľw[Ie˄)'˭"r5d/G_VYE| r~aKi֭M]f^dYB<9@3sQ@ęd eQ@W'k1BҨ/RI5@E]WN#BnX#VR kXn?X X9#\XcV(7{8"`ՃRQ94GJ}E) rj4,ܰf+/rrה/t+7J%U1$WbzC:+3܆ #yxC>X~~ɉFJlR$+$$@<{ -13n~d,v HY|g'Π5,R|+L%^"T&4*|f8, `$_+B  Ł٩숛,!($r%]ok!3$&\3 }8j ?5"pZm`C<&ynI*rݐe,LvZw[xO`05F$ ;ϋ L/Wŝ )}C5p*y'Ӄ }K]/-6,6]ےIp^ A?MSZ2 訄+mL ]3iQ&1u]>lx!C U&r[YØ!uψ@kEV0ʮ j>RI"}J$Nh 27 r/̗3a "ɝ@LN<阜x'&?ċLi0C!M&g۟{)dU4% GgRC}WWb,Ta_]49\92}FhbC`Eҡw@Dg42HqZHJЍ*sÙrene}w'Rݐ&)+.W^gR͌yˁIVIuqg7i502~\$=yˊIWg a)ytʑ.\HZ[37 qe[=1񘫈!3(7n]TsKH9w̆LrSř =13%Ɗq{p)InTȐ !DwMwݐ :A 4݄dƺL-"bU JRO*;&纪$$wc/Ms}R5<,|M]mk -,kC$OjOF6?I ;9*8=Q⌘C@֚=}m&wCfP :qKcSt.R/fJ6=ڊ#2ڃ}yo Lvbڋ?uH:tF6}6 /.VS6 d~ljNn6ݭvk1AQX οr:dPT4L ]1z84`IV.U}$idGʿJ1=ðZ hxI?<ߤ8TW֐ݸ6 ZHPT*^)=A Yތ6ef炢J֖só)^:$ ZKm'@B^$Hm# {ˆr-_,6mY . 2gfקhbn!/yi{Ě.2C*W t|zVK_tl>b| R91qP{3 b5:p3AF\I솛YF$@0N#qِj#2'I@Dݯ0{h# ֶ*dAdC$ѥrK`KQY*fB "ߪ_vF7di.\fB+ߪ' *+XMq$7҅OgHgRh*zDELaX"T.׈b 6< GN%g%G`&@F%$KqT}aC3F֦g+i6fC&boH7;Mt)7-Ah MNvp%`Jps;-/ (liAOup8 #Bٙϧ%c}hWP@F%3b_ݸ4F3*S T U 8RYHTސNn<#Y̦48d$s[#wν%RyOlmHUC.)*L\NsPVX;9Gtkȵ*3qrB~TkP''W.N{Qك}e&\p-&͋vēt,Ix^siȐXT|N4Uq&*8p OZ&Ʒ~.B7!8.#6$7{NG)7S^RQ*Ȉ`uQshH&];ŀt!; 8b]bSGMc^wr@Ə8Cg9BHeK<Ǖ: Ps4iq.g ~^xˮ oR3M$q) _s$]3.&ēwf}^:?,6 Ak1xG@ܗH,m\:RQ>.trms5Ӆt1]cnYCSQahdaV&~('VĦq1āA ͦPEG6br`g4Ca6#)U%)/dz.u\;xPÝO`V^ eB!:bʴZA4 * N>r9eb-pPk˴=׬m&m}]#\Jb";ͤY[:I^FB>TfAY=dH:BFQ*$7ŕqC '奦 o0VJSDߐC_nRo$N{,y]U Q`CyҔj-Ot!\%7 oj<8i$B $&HP-,4/k 1 =1Zu5@2+"#ʄʤ/t~I-!%t7ȴ$Ƹpc3 !q|17K MU2ȸ^q^l @Ɉ jH_'!/oE+X(dIp׉I^@ӖFOۧ1_i 2ҷf*Du: V07K}E,] Xw-`[2ߵm֣7[3FcO \^_6DWQ@M8Aid4Ƚz,nX̻-bZ2ثt] m^a&UMn'OM3vӽ lC2[ !s=ֽs$p^A])2CR$6YIm%2Xi1`.P2!mNÀPOZιp (jXU .OAZd@W@ =C>ˬ0]Ϊhe;SG>e/!]tK1W֢RAp,O^,N_y` "\0 D2YhV:&RnbR bR$JNj'''|Z`;gAw|yyHNJ[I(xVfaTB7wEp67U  &]/ oK@U'iNX`Y!0߸׭kCE.0aoCf,FpbEm˺΀ N}-ךdrdm]7_Dc|{@(2 kEPXRkXb:6B|x(T:m, raƥQAp\l*ʯCj'd)Ό[r+[}r`eN!1 0.]U~ن1,9ۈkrב`Z_蚊E+!s)>,kZΘpeDX7~w3&\?b>4kCP%N'cD- /DX:< &s^s̊KM̊KM̊L֣͊1=g c Q@āDثUӫ 8HE#/@1jX Xf|XjX Xf|XjX Xf|}Obz=ϙN\M 6 4 g15Z Nl{la_ Ƣֿp7Q5N@Za?ZmV>FPkS28F=8ނ7YA17q %|`E9@-[c))Xmc#(qd86XF0.]13@ev] ,V>ƃA[`eq u!Ђ~wYAq!Iė:j+n14 +chLٛScNZ>ƃ\:7CcseC9ИsyF@+CP28FngLrZ`XwPkEG#c_=FFǔz,ZFFǠ P28VF"ڣ;IL"lx0Kpff(ǃAs`eq r!)C,0.]hpJ+fo5)PӔ蕭HָW>snրxȝ׿q9[g,5AϿǧ>[Ԇ)AkCo=ܪ| \xA:Nןٟ ?gu D}[먟4zUC}Ct "-mX a!,kh)$bA=pGRRH*ȡ}ұ rBPAK!E"ӘeCHqXg VODsk^\_U*6YVqj -;#dDZd &7C fl0q նQYMaջkJ(zF5p 7`vƼZ(}Į% R艌hEH WI1!H-"a4nӈǚ8.b ~T~Xٳ%HFԱ A\-:M!1řErXgFE\mHp'c~jk/MP\(2rCd"`F:!Gu[lJ@xSK'~&qNX$dF]Q|X-WGHfq_ idgY_5YF U*ՔI~d*dp22u"bZ+ZqdC4SIU2ddZS͋iEXb$B&0`tLi4> (;KWI$8!*H:LhJN$KTiLUbT05%u19.o#SBH2"p$ 5]g_ E!+4:Xhҭ-M <8;e$*u˩f$yB'yDpO.l{^*OZcjO wNNKL%0]7I8:jm:"-0HLU:erVMS&eH)ӒWeؔɉ%M.㤛I0Yֳ$Neٺ^ft!Nf&Aĭsc͢2DRs ?r.9Q4H^=dFu凧q-j#&Ds:|cIV]pmL& sn硥ñ34Uǖ|+vn$wM i/ZJJ:gdCripB|$&;Iop #SC/;³Pcerj𳻮U# .- hgȍtD"՗\:\TL\BLI!Q(<!If iS/Y)"<fb[5yE-HFӇ㡃J:Ed=@D/= tvEdkQ 19eNevd"Q95}8=I8]AUSӇL9$72q+"]aIwjuQ:9DJe:EmCAF56䋈] >[D>dLe4]4 }B5.S.  y&kN3,JT: TP:Ez6Suژ~G븯0{e)7D5\:O: tEijc ;YG90ٗJ)җ'dn.7(k7oV D =(.O +D'E0YD]IR)M3yx*يK7sP3 OLdZXBFr'ZV~ey&O1#yo8eI8eM񡁌bhj(̔ap^NLrqLCx$aKS";ú7[dr2M7E/ATN_>'Ti澂 R(gWB,irEG!7ۆH"A\[ 9gӆ:5%`[r<)Et} /$`?~` utC7hL. 5֐\r[p!"SX5Ⱥ .DIИ1\J6eI5FC@4I{/ײ*uХ (HPE|J*ApW_ q6s1}nvqb>A=XQԗEd)~3gBw* z笼}'D7ŐIiqHV_-c$<BT)st^VKo%B8OY 1[xFW%% !ϹZƂV j-%NN|~lo~U?!l^O1Dر\ԿPnj [`e = }?mX0KpԐ=W}҈sMeE/n2^ #!w/_D]W}饷6)*oUs*'V;;"Mi$8ΐ "Hx?Y-~/#Z r•EJܰpKIhx+͂*$vȨQP)ƮB!)wI0B'x)&#L!3ڱ r*|=`)ń\h7ȭW\<$~J:2TF<8 @sL,BsctD$b E!^&m<<,t6y㎐Ād3G_$K$v$*C]+!4!c˄5 DB eF"g (8IF{ HcVdգ3gK0.lț#ގ+kb," F`UA},L!pZA䭧DRb4_7x5/VԭN_ qXw)V~BV鐂wa11]l2'/ <Cq&@,&0U˼&%PQ&ΌWaeͳjݿQ.i?uf!4,E:y?p̔=;MG{/H0,U=aR&` QM [%ߍ ;%ۧy5!|{"ga5 }$T4DG =`w;Y 1teīm0;gHl@93KfE; uLY/kK_rhvL&TVw/IfE#` -T@ n Z/o%wz#5U%  I-NKݏ\ԹWK·rqݮ@Q gi3b;nijoųE<2KI!S-vE(塒"\* 1$}"[ {+qaStLp/ #1mE'HQ2DY`J3+>)woAC>&+ \T ޑ1ϤHcBS\<j#e%gG*e}@&{pMeG}+BxJԤOkϹb0ۂL@'f]YKB$Mu\w%WY%*W/ 8#̺hxEF=H!GMG 1ibNtcfT ٙqWR^IJ"|0k2L: ;$NQ%nR)?꽅l, c|Ivt 4*@jQ/SSddE cp1{U ]=T<4I/`4AņH @?#0r/BXg6W[EI"b3krRm$~i&*ވ{:2Y)1{c&*(/ah|AHŢ1 yP)̛$I'K\(D$JfmS"3,kx.3wyJ KqSgf` 6Sk';x\!&gWa!G JuMd0;$I!=J8pqIwq+bj5RVoVw(,y,g+:pn⯤J+;>nEfI-ANpüC!7K@NqE) E @ }4 7n8 D*!0@MI;HP@KwN$_jyAY2aSDL8m5"M@jr{A!fLD%g ^ID 2#|{xb47-D@ E?pG~&WD Gs >@?B.C"EQ0@I|bv^ |i> "J .6nMn`ơ\'`,,pZ%2ne0I&PH97d3.@$VQ@Q֖1$FU ] ILAuXSGS:jLrmqq +Kk3OdXk }ݡ^r8b UA8c-Jܨ ⯪gnd_IRj]j;/ftmZ)n3BoN?@5~2ĩQvljnckTAm9$`S8wSoM  Q.QEZ} + te3qW lФ隲h2QlMUV1B6t5KQkѨad,4_Bݘu((Ը4[hwQbSƥ闙Lv66¯BP5(TRQ 䄛fZ#\Us_FeUdT=:!е'M+x7faXmE"<^* \&dMWf/"(;]l7A|S*5LLMh l,xg3kw!joaQ:պ"^ A@cF-K^RKdԨHnQwQkfШZԨ;jKĄ̈RQJ KlSIΔl}^]Fi2Q6ʈ{3P=:uK>Ǖ|/ |2ͷa9;>{ʼnǕqwjNgGݧ.Hwr4vE&ykRO=ܲX0ve#D0/@f a+q-o&s/?_;asRH HƐBfb c]8'1?P5['㉯͝~zu*#?>_~^Ysc~U'/{[p~uщ3\߲~ _ (JBo/1m*4㍘o> ArBP౺*Zr֯+]w_/_*?zi4=4v?v l}kNy m&`+_?ga׻$v,{akm~ݺ;G .OG|4™ w_8|wV5я}ҫ'yMRoΏ}$A5?=5үG2m6*w:z$In?5ca_y qЭ'c&qF6a )GBګ@| n’'FerVHt{x {LePFA):IW֚J|PӞ_1#>bW~߿\}ܦT /g,(Ü{X>Qׯsendstream endobj 166 0 obj << /Filter /FlateDecode /Length 37780 >> stream x͎ܽ&9%'2ogSbUF{=׿sHdGezvLOH?EQ6ocߎ|ey{[e{?j,?o6\纼WMYo۷^*}ݗ}~oMKy߶ijՄޯR x֌KyVD$߿}6O{(\uF&DX"6y<R">'q$>X}_RSj_L? q \VS4idt1&#$&ouh&H\s}_QRjX77JdZץdT1{_FiLe+iVQ50!1Km\*?tc]2{GQS}ߧ*5&,[mnEHjטνŋt!A! !k[TV2Ke~ jw*^ޖ}9joq']Ʊ7I$\/QBJhLki1uGVB[E*GUM&ݙ>IgKP9ecU82bϸڇئ*3|Pئ]%9suIXIo T 铞'=2OH= Go'~"Гr^Јq@N,=9;i-ûVgY2෤"N$o'omZjשUZ%)so2un#U鳤mdt--#9};ZeZFNeXZr䀔@zCnQ>vL lhR+ 8cvAaQ㔖'`$%4O+nj}N_?66E.E.͗{<+x wH==ޫ{s{li ){]xcUi"΁/B]dK} ehڻs@J d=Z󝬱C':RxRлZPg :D00Ou^ s :=ZZwѶHkp B /,ؗTl~Ң oU V%OX03=,_.~eN\mWǥȥ}Gh :+mk8Lťrcp{TXWȮfn͙+C`D2)8 !tzξ)SԵ.Vl6YI.9D-p,˂InP wkZrlp@4 d[v{&x=ւÉUNvMY2SϗYd%m}Gf+nSv '/XgnȴaV#W6(!7ئ oPDž5a% K)ԾvVuGﱭC8?!  j+h52~ [唅[{8B֣~-G:Ցh%Kr9QG9ZFALU{ 2,-e䀔@zOwk]TXu \-u(_@3- Dz9w(޵ץ RۛU7?t?bm)2e(E.͗dXQ,Gf1ZLb_@WJI_欳.(K]MkUyR2VǕ>g,OP=GD I9}&Mӡs2B琍}mk^Woe[o?Y{n!NY\ҌhZYzڎ3 EHPj7Gi)aB )z:0|?KƼ0ɟ2\y':Wʕ@'76B^g[h]O_VB7<ۘ|_\0f%R[&*k+{Fߠi sU[UR2TbXu +Xs{lf' &ic{(C 6[clC>j\RMAb$%Zje%l91bn ]qҢZuM,̊b7fL&y:&YӉRMNV^-7\۪;xPZ}JW]q6=^u[׽Cg'{;boww;>;Cl) RWgPFP$-֝c[wFJ k,Mgg|GdGZ }cuX߰>eKp@>dj鿾M8*>ߪ*zBXݫUiƧ]<k]lZXٍ/jjvF_;ץŹDZۭ|u}mS87:.f=-,3g Zso@ !&xd JdI d2JUD|g>k_HaENVNaj˖P -ࠔ۫qUv0Vz͙qwǮ;4+M%842~9 9Uql?e<-;~Zm-g O{*KOur:2P{>t)eh:xR!1Sh¤Xb@a&l:H b @Y0Bcl"lN)Ws bfpA3\UZsdP@z܍@`Z 5=O/B %AV]8]reh:(JH Qj^B}ǖv î`U+,g9}8N2sg3[±sF pt!88~1${[ Gþ1׽q[Mم ?tX/(J8PX}WChmZ`4%u Fˮv>~͸Pp$nKPzt vj$seD) x`-'mhOçeW!9Z55ʱcrZ'Z; ,)M]J \oP.SK7Z*|QQ₍\Q+(pmc(H(FRZŬ](|q_e9Ji$R9QlKUٸ*\h1{14_tĴFA#=(ux⮳fNaQiL%zCҠt=E kx=JqeI$AkWs{'fYihVJlS/p z;zdsH駐n*.H$RtGr J/9sNԪm߇X=~2saIǺWGqkKk}C@Zˈrup M#8 %csƋe %!FaAK6G̗8$'I^FY300X8 BQA B0`6oP p@J d=2\Wt=} AOr`hՊjUՔ7a'n&8 5c["׳T,w(Zku5?MSr$u;H|c `$u٦ku;'!X9gȥȥD[YLZEMaȣB'S.3oP{g|>BgSĭ[;lm(2tN&:gWJ\ϸ4dIuhF2c=]l"=MP׺w=(C]'ԫm=l4 R`4 AIC9ɃR`XS)xHrr\trU.sӊ꾦5=9z>NmQLo) QRkt2tuGﱟ= sK_A)y:o#g'z;tdL'=NIH"ݘn=i:cBvd;֍c?z|8%8o3?)qpohD0?AOJclt[z,叫%mS"pBQgN[~!k1󐇾uԱ B7ZFumG&Veh: .nq`Q&EZBS A-՞oq@w!tTTkU2FDb3Nm]ĸPr)=rCd*rX)" eTb3#1\J MT䀔@z =ub^:e:HZ)v)vieZBcl3[VZD/33@*@$dʊuhMHe|@R[ /[~tPPHkAѵꊮu3#H(AQrDz >pm',S];KoN]kl8N2LvxKŢ8(H]8Kň)ïQ@n~ރn?fa_.6[x5cZ(q-()J=(nxt Gq(~_+>n9\ts3h! =xg2a{}K u8\F \NsD0El]$ 4ߠvK}ߘ"41-HhF-u5VUblO>'KF (-r 181JcHUq`#\9 n$LޜW* XmJS)= 5:'^\QH^~q2-eCX4' !);X=~hbUunc\w֝V->;RtGj?'O]E]ȱz;zde/'iV6`{7X|[ Cϯj9^O@rp66CP9}BXtДPH"5A/kX3j:nwsOe>r AMx= P{LkQϺl-%Ԩ'axX ?68 %cE.1rq1*\~iaU JJSյ盕 д]uc᐀< {qĝIӃLkiNMwi˰kYL"%шG iF$XBUm*8 5w#HD#,B01`0Π 9 $c ;D֮`X)^`]eUSn [d5k2er@:-{8We}85ѐpît‡ K܋,6d7@$xɋ #] a@0(x4bp |H`tq1YG_&{\S/*b핏:&^I/4S͡+L]ޑʔrv\oT֥mtcJ 7.b&N\?j 8M`b&8Mޛ:ԛĔ=ݜ*t=^ps"[-r[=!hEAysEb㋥>3˨ȥRFt2Sp$УX:ɤCqg5.xgz;ҍ#?s zW,f9ml!Ef1֍cL:Vi Z L/u㺣O&_mj ^|TWqQAMpp>f eMgyqE} %dPCB˝Uq~]=̇Hyp%t8!0͇|^>;FjӸ!d=V}})X;<{״0C_p ;[c&!\Gf` TWB֖vCuprmu!ށ,CӮsHJ7+in'O%4i"K E Bd@Tala/I {'] yQ'qrʰq@u^ ۆ` G.U)ӰS!c{3^H!źuo /~-5߃%*5P&/{~! |/DإetNw8[FיׂK i`'kWBܐdGn5,Rdlu]|UPy"jf"23'OZZk("B'( 8mAF$1'qKbNǁ[cPb2t7!]C1ynC3k:̬hf{50lY@9*%Xǡ5X@yrVtCY:Bck,Jּ.-\7S/$;ld!cR}fIvB:VuGG/'Rvܱz;zd6/VK"o4tАxSۂEh:HIa<Du9#'8"b# zJLkT~C h$.'Dq&*nםsbsD+!%,+Tz0Ua@C&Cfx;T?3iZ:FhvAA8%)X͗,m ϪgGZ poo(ˮ]Ӯs@>dm0d 2Aڞb i#D5KifJj!{n##ŢDȟMfGHPbX>8 g &v``l_[` ulNEXS 0!ppBa8 uagfbyi؝O@c?5co؋i}97:2FIRaq`! 0<`eh<%npL,],{@T{u<>p_\~.-ՎH|cJ6ϻaVg#N=8: _kf"H6iygȒx"o.xțQ)SF<뮮i]zy ^|!Uy CwJw '@6G"}.|9ECuxkk4RĆT|Y>յ$7˥.ҜA-+؈%qjG2JZ1'2)!lӡ/d̵K)kHsD%dmE3)qɠ"+&6Sh$Kd r&#$VՙZLFL kzG?t kƚQZ{(~ҸJ!}M0́锗dȄifw+$a`ѵĽz髡򸯤 XRXH"$ r:s(BӒBB!i1<, RHݛHptP 2D$;\"˰tЕYᑁ3Ձ*U[:+8Pm@jZFTl:$A5%BvpZ¢-ZGPtm\QMGEZ2b #GjұR!g-C^Xsg]OyzgEuunEYgP7YK!-ڢm-ͭme apu@в DʰG~ZRyI eH|:w:-ᇶvJ=5Z1!~sՅ{du|=Md;p"-ȥȅQM#ܽTL{؝x,׸=O|o5Z}8|uÎfeXZU3t>#'))+& ;v.jb̸#Q|IH AH# _>NiЅhd'?`SfGmha5?<;1)6ˤR\I/r1ZZ.3b%uU4<=JEIn.=cfH(ʸqWO _w}A'U)JcUi~cpu5ig.Cl&92u/.Pu;~K䀔@z\N8$eEDܖ e eDE㊞^HEO8 RQp: !1'iB|Yl-]AYr*D@_"ӕʙLbʒR!1'K?NY䔣`u-YEx{iNfFRQgf~[KK gǦd=b?X"X:(ISHk AM1 /)7Q-j"Zؚl2^*$Vێgv12mU h/vtUNW%.t_Mm.YI\bntcv1Y7bCupP6YU{֎}2 3GXbh 31O@hYSFL9h.~YZt*()2yn.1<C>Y3z*>l]KOdRR}h8&}ۇ$d.؀w~"fKb%#FCz5S 㹩ur9,IG w}:HxIC|a}h)2k0 -b%ϋ]w+)2G1}k 1Ĝo9]E^}1?J,w.)VR_) fh/`W'^ul0@粭->@M a*VWA^-u9){{lw{@R`l[ I1kJ5r%! S!K!eҏ`%v6pvtD$ԣ5(gbΎͨ֨8wS6R,0Ԏ(L䇲BAu5r[ 32kL~LERaAl< M oI@ڢGXf@|H ͽCs&+ޯ@<' 1X a2m3"FrCrXL2lW{wށs#9e=^S_>4s دw_Sˢbo\`zҖy$64w+Y %4' p<8qwX/c `}Wl#DZE"/*?< m郣y4ؑ;020|crh|RF L=X.[&mCon4נ}oV~Mۃɡm 7oV>q(8,SG!F Mp׎<XIgo*hA͕43`#w|(.Ggc,wnFp)5R`-(imYdJ锒'i71ROBY$b#? add R^E&>J,w.k#p䉻-RX=~sf N!fOֈᓵ`\w{d&pYi,:`kWN=%;앒T@m}VuGﱟ,Jb\l:aeç1d-IK0fEn3v` ;FڢeOs0`)ea@0M,fH2WhjYYZ:5X+F:I-+qj_I\5r w/-`~}i:!- }=0f9ѭdN$@Val*G4țL5&wzMS 0[~KjnHbJF%r@@daι8bcH)>1g eD`)6R4NLbZ@{u R#f3j5@еe#lȓ[W<3*ݦvX֠-h8cG7^H* >1d d~Q.f\f ίڗx_z[T-@t%`IW᧍#,E\K"%y4Csw~o^͞y-_+!y9 0Z7M>mD?sHi|њ)[!c8_փȥȥK9(/is(q{'#OK@Mq[yW~;@ z;zdQ)/1Nǵ%Xk1纣OV_ւrU΀Tq>67N& eKR'$Փyb";<597ӗ"kCg-~U<(yk :krqInz5p } %$Re!|VHG<D Pʲ<&?Iwɞ `^["RJ%oHӭgO;~˄{Z-9KjJ-EH+g,3{';/ky5g©ݸbfG$G.A-tSБ 9W~k[gj%r)-riAaOƔ⭋ bjnW)`G|6|ePgJl^N30`,MdHuvg_~ѠaR[ 7)LʡEIl5ݦKDؔ1l^vBvA'U I5^p__tl^iXDA/= j$IR%Աq{'2uQŘ#ޘ\} 0yús{'O-~u=Y=#,r>./CHwvdZem9&/8}ׇS b>AMGei-#nSnK.+\z=_f/TI(BnjbFm@h1(^l^4.;joV*;lQ/"HT uunHcs>b ǜK5]n#̚X+-ͦ7%#͏/eR'Jf]ۙ,u7'3AaÎv)e/ufXTga|@(jWaWuA#E8p1ߺ*QcV۲˷$"7}oQlU"iu;fE9V=@iKpɦh?"QU.I %qae%]`I\JI\E\pe%6KuMsbjcZb[ \"ee4p%ryksN0[M%էx[%r%q^XZsrZWJ7om1ob=pa܏\Q"E}vRkj6,]Y"⭁C.PVdд@HZOT#_^F 2xAUr&DfZFU_w]e]5uBc|m{eDʂo@we%tе%;5Y7 amAӱXCJYϠd,kljB@b|タ㼶I=vʰjr8-^qH0ŵ.\K­!%AL1@~Z&~8 TރcAG,~ jW;0GŋV<"uS@Bl<{ߋWb!aŶ-3tC*P[ {9yy0.6/+O(m4{j[=~cP0 CfKiK=졐.C C@I{&@KSɋ$eE.7$Y9qK.<ɥȥ^r%B/bسjF \OR"sA-u-jx eݕvr@‘= \ MU4B.n-<)uG0.@[/ 5s{TBQb.6u9/u~vhi$|.vD4Bqߪ8VW ;c7mt΄ĨG@=S?8qh/oC~ `1˸d1ݠ{&BcH?,T4TuG'N:k4!O9Ox>dn=Ӂ`y9EL?j {Mu8~T ?oKsepr@@ dg9 fa Ũ J8 2ej ,CQYp@J d=8\-viqfΥ eDE/uplX:* (؅,kDZIW*nӪ:0LY R_z2$(ezE[R!Z,cLa &Eccݗm?s]GP i-u\W*д+Y3Jb̊CAh硨r@` EYh!Ŭ4,8vi!E40+BB@blAxNP:Ա숔a70έ )&b@irA9N{Vyr9(σ`%_ ,0}\v6t[:yAV4G <}Ζhya%do !E `8Ci l[E.ͧ\Rk ʹ%c]/ĺn!uٳw" QDɥȥӺ+FIN :3|U= ZGUZl˚/ :n ̂._F-,XL.E.\䵯3F_PJ"dfl@lK="4T%dBbl!ӴY,R AW ;YKG]Uwug{qnXj!qGPEE\k(8A3Gy%sBcl +vz^PûL[ce*}8 %F)^"Ē24-9Lc?06(O`f-V8DŽtU8,va gFlyujA~]K5aJi-!)>]jO\P`v5-Vn<A*bsfċtTPHk QZIZTRQAEc?0 ؖu3!V=,/ιp`Hq0jYġm8ɴ;ey$RM/ͿΠuI^`@)t[ܭ.V@Յ I Fyc L.[݅RXL:v X*'2χX"'0))2om".wkJq<.iȤȃ:\}<yqs_`ӑ†Z5X]x"vLJLzI0Nn~/^-2i\XC%6Ux ʱ/4o۷Tӧo22BtQR:UNqRI@c*<&NUA͖&o"8Ss.B3lIZlyY)ytKӖ3fĺڶdԐN̫PX=~2㰢$*J)+/]:RtL8_8z;z=2.& h!M8#?p~MkEx 0;9s {0Hgc &!bLb]g tTT8AOvLJu!,E7(c1!"ZZqN9գp.9, iZGj9h \biNiCKeNUnYO,+`Rl,d -V67dtG״|p@>˟ii8Lzݕ-Q"doдexq N&Alv?1 f kү`w,7/Ar@dd2 vۏ%m@ [~lΫ,I`o}t@O,tֻrʰxn_1`_15-ؚH9H@:4PW|C;dl^j[ZN˩\ֽ/{H nVEVJ+qz?*|Fk? ?7f/@iY!xS{KK)mȤIh6DAMt 0(r"뻫z-5}#oyuX(yd]k#Kft־[_ YTZ`\YL1$lILd1ً2ًp[1^ƙ4WOvFǚgd4t#Zbux+ :|U)s{'NR#c?m=*-{Is[;VuGGv0nٌvRn3=W[hp%}aXvm& D~nV// ޴s-q-* 7> Z> RNz0Y~ =ZZFfaka%98A@R`l n( K1'qpC`054|P@06'@*܍7wUM9g6)$2iq utY^/rzvÁq,v}L;ûG478(!D ƶuIՅB'K9T"0$Ū. )A) clQW 4BL BUmqtuZM8MHY%h@AAMr@ C`ZˈzΧajӡ騧qPJCr'$^ӣbڪ}`55PF]agqj:SFmF_@2J_)pqX]2>>"/T_<]yջzG|1}i,\!/d6:rSɱ߀\J\DFm)b/`J&'kPvϣg\="W= :># 븲dA \/8ܟjq؟lzz;NEB7r)-rM~8JF>.w%֌/~b|Q:7 Jr, M_Қn(=Kۣ>rw[OR֨yh/<3=! FjR8*fJɈs{!ٺ&!< TT:u:PbOF_mrb. 'Bw-7JNMDTx"B؆CexbN݂.688(ZFTVQ2459nZP&=u}#6v$1?J4[s?f[)a&K[t2\+zvMR!1搩uuAJ]aK[d1u9Z2jCdGZ &/t%h:j ݚ7(ba{1p2$AiS&lD01ˠURR!1OКs}oR5]b .,S @|ۇ@rsI@ꐝ &!$D`zø#o5Xsw0kԽ r= js[8[,kVll\{y'42C6JžO\/28Þv7`%c I # "Q=41 1}X:0>iMS}7L]{wɥȥ*Ͳտ{ߘ"vwr)-riQb>78^<IikneieGh!W0n2qYFݒ.J-D%9%TJ@@)mꭷϩk#,9B\TGƚSM y]xҀmӷ­ n+?_?WۓƛW2 X&udwmHtgݛȋ= `kHD6JF\%+%K:[ |{1~뜀]3a&Eʶ^3-7G\݇-C~rL]ȥȥj٤p2OMM!b-۠n1Ŀr"lp'6#?.Kֺhk%"K"N[4JTVz$B?ؐ"`c mwF&48DAoN=>妌JMe"WCDP@=СY:|C0jNR uGﱟL8kN*VAws{g[GmPksv;~2j;ŗ-=6H1  crGE4)8 G0 A J1Xc} a/ԛ)* JXXqu#bj~_<4qJ5#˗$ϾZz7Q,ΪkIXܱ,@8Ҭ"xFQ(㣑U\>R=4̪͹v_g<?o5{׷6(D7%bmWNjdE6 (ԅY I.$E?"D)i'd2J檕K)kHszۏJ<ۦTg2˦>H$]kEVdt̫D)r&#D86g!2y6般1 ]:h\F5]kcM_(k]A.6J*u ϶-Bg2BbjK`2Bdtt-(xOm."lYNh[j@4Ku}8N> :zY၃38[qY _Rh:j uRO) ϮS&8 "sC Y&E֖\bOaVSֺ`zI5i9 $C6Cn{@550M ׳jlɨ-wVAcN)cuv=Zi-#JAjc eh: H (+siꞹmt;-TcU tźyv|vnO \F \"۾eep3Z-j] \Frjls{y70(Aݙ/70ZG桚 Ô|1E\pj@iK=t T0T.;!F[g 8-bSoJ_փ.<^JL޵AE.rαL..˧\6`"08Hpdk*!.,gaxXLB>~3w_}WEs1:4g_,h`z$D@Zvhwc]2& hRVCz }iR|"Y ʂ#(ƴ65$z%hضdgBcK+{_I4v7"eAOyL5=Y: haHj QM/{Zb$-08 #\.&6a*S8N\ph<0 2AAţv30.Bwio/֛aY>Vc"[ӱF*! e*Vv릫F4&W0 |C+|M-{w1\K bK[:(K*_mɠ~3ǹ{{fĕtTTwcb,))&84It @]t P %%v@ S1m{& 퐟4hy 'A_sUHmۻ]칕H>S7vFm4ɑ.?\1Ծ16Ir\I2[j8INP8JHf/!r8=|M!A:Ԓï!c>s5W6Bji>WxPf:zAh%dn]ĮkɆVnG ĎxnA.[-F-TZc ؋^,"{jǞx3lynੴNcpqÁ9;Z|O}B98z3R3g4߸@|)vn+\JKd\A3ڠ1Eeq%PZb?hx2Ȼ<$ehݱGrѱ\XB!kiIȺ"Ge/=ܡ7Pϸ\J\/)*Q̖Qq5~]W}G_.hȵ}A qhۂFH2UuBOUŒ_Wf˗\E)?Y.k< jj{}\WFKmqmYn\df)ILφƟt~+˿@Nw@ݸn=ӡ:Wu٤ VW0|5uԉ]w%6'#d1%pT*_e)tawȣt;iY@Ӛ =*H7vtuیԍC/h*i0Bީ?b#n\7 Ch_ۦ4kۼ$C}ꁀAUˈGu({mz88tBc|NkPSVLv{ܖʒR eeg:\yd8 uy'n)siLyֵ 3tPZFPvJsZ粵>?t+XKӺg>9tTPW2r&4alX䀔@zYt|!Vx̭ܭ@t|Ӂ=WWrtbO58<4`՜x|(c8#8U"^~RbVXN*xڔ !1>O GGԕxs|m2(tiocmaiN!%  <,CӒRR!1>O]ag. \P~r Ĥ]@gvƳ^a@э#1 ;<@t8#E psS:Cf˝U:No$g]ZK`/AA [>$8*'^QL! ?:9m޼|hFz8 g=ߧvnEE!@EfIٹNH*SK  VFu׏QWp !1>@G-AS"VyEM\7\@so<@'*N,< qH :Z #,Fi13iղ%1YcF)B@} yd.gPi1k,℈qbw*\&= _0.[])Y$.A}9~x ~6\ a3{Z@SДngU8#:d`|kx}_saQku>iW FI܀I˻X=~:M65=yV纣أ3uQ|xHn X=~:Wg~`pLB8@\wz|VuGﱟ xג `T(H`?FxN9,.!Fr)}(sGTj i-#([ f%7%I=;[/Hi;8Xp!VQ{LH;s+?R/s+"d=Gduo8D]OMG=-t+")JadP&,{Ra|#}P5 }lhqcb{' p/ۤ}k"Nئ@z9 _HߊcxʈZ% '+4Wzp7oFQI؏ʲmrXOtb;'s$(T))ϫs NN^{syuv9C#ý)a=*a `#Gz[t}k6}ݔ|N\V9(Ϡ}N703w"RT%.6{@cC:0%0߭b171;)bܯ<^ao5c"y Jnz s+6R:}>Nk VJm-=9{=ɯ=T]|uOZj+X"f;i ]srZZpex^T}5A}\ӟ49׌s eȤٔɺYʄn(U&(yO|o{S/0|}d vxDe#(jR c89c81z =~p <<!8^6'^~!H !NyŤ&yJlB-?6c3#Ҏ s #+<}{.ᕖ#kbihU>צnAqkb Nw6k`t]X->rX—TQj!|PJ3i"9م02U?sw-{kb?qr[38m@5q_kpt 5L{5NBj ŻppIN3#tpZ^ _p焅}̡m%/L8$pn7VyGϘ&Uw8FExЇ[x7vC}|IdW*]tQE VWA0I1%D6'ElbW R oU^MC80ZBPSᰕs0cijGofo(l&S M:-!u}-W*WK;J> uy 0U;]uy|I0YJcRFtrAtbIlC~Kjn4FFGkZ!qP@hT8vy+ JpLP2oIAڎ !8tYZco7y&'Ef)(lSBAE0oI- 8xrI_x@n2p-cj\ƀd-c7 ؙpn 8(4*={z9t PCR)bo7 \OMG=QfHP1BPK`0fX5L r#|'gPhT8v{ɏPbP HA?BeA`~Kv q xA Qͮ;Oʋԝh<מR Z gGRz=1vr Cnɢ8+ʾPs7]mBUHEC4i,J82YMtsfXNJڏ2vsօߟʜ>n}^i4M.W4& #z{ NaD6ʤY&βMS.z`& 4{&LJ:( *@͂ &7ˬOMr˪`p%G;m|&tA Q&Gƞ-9 Bƒ`6Z ԄK>IAq_](@±{hSHH`aUܕ>K'Cnzi%X@+XGJH ZcO7rX:JӄH"C̭-n*9 $*-=KRl E9,T%iBheD]uut Pw{q(u󨣓EBQJ1ohbN"/e[J))Pq&}ߤŘkMQU4ߠ} Q;!9LJ"z9&p@Dнk:V`p:Y"%PlEPwh.`5Zc/8ۤ:(Ңm0_ HkAY9LeleX:(KǮpt/|_8?xC)SDag : Do8μ'IOu Ou9Xg. mnd028Ԕg raeyCAΒM1.'Ȃ|UBn+e Dϼ@v58&%^fۑ%RjCᄫ5 q9N58'J~"~j\ r8kӫ<y}O."*j SDGwKxY`s+RZ|r(Ur*_)ҥP:sgzμV6jUJ-'zO̮;QU]Ͱ~@BG !B l^zTq[3GP->s{O%Z=x}:'.#E)M'Yo冻f P E>sUjnJLg9Tn'$?j-#Y/OW8j-#6ǚ\P۵id5BAYr`ZFPV؂r^䀔@8vI}o$![@n$>9ar=gYAOr@@ *QǤL帞1tTP;ٝcW=a @b($;ܵ ǎu~~SJ5TAֲRi:V\pZi-#V]sw^w2xR؆#Ǯv0yLR,&8 ¨:!xo2o9>PV|ƣN3(8%HFGœ x#"޶HDZL]Csgfrsk:~Npc)VFPVBhK] $tyVVCwPy-5px v]V]23)m;tvzsz(tNtS+)Ε;9ؽz4шw[`2nn씍`|@}-[8%_]'f}##62C =s0v6J'EԴ~]Oݴܞk)y))sg>J2-KR8 *jfEͬUanWt!z乯~QMԥ9SIeN0iq]AO%,`ʝ2g۠q(<>/Q ɞtw38wwC']G^ҝjqIq|@gՍ ѐEqy滣G)ˌ0%ɍTҧZf-3 eoL%~`Ug}]ɓBWvy7v4NەFO{3T &>BϷϷ.(O}>t#S'ֲG漲z3OfD-SOa~i'*5٫o^41CNZNw(9%cǿküאJ|իgh~U܅f̨ʟz޼t9; +L5j{P$*ʔ&*}(s,"r(8Es.LYTI0^֐Ot8mr%x|OIi.ך\"D)Te4pVEscM^VsnRxP6]V3L6V&R56ϯr50/^\0eyI)ѥM^;kǨR>F]:fgbJ#~d黬$F'r:h0UC2yFxGL SXUYšZھK<ؾy$vK. DʻP1A-XKuB*&Ud]佑-Z8 ʛEroeu/Rׅu&OY{ߏ7_},Wrow7/_bmnX]7/o^/~&ÿw^NCF=uMyM׏slU1Ifd87x=5ᚷni ^Dȍ$e(njݫ$2f!LVfQK*ot`K{oE|/0%xo5[y{sHr+wWz)άe[1Ie#67ݟ.Ͽ2ݖ˫eIA"X~3H\ &1y.Oʖw|~׷e^6n/w;aJO;yS9 'ٓαh3\oj">4{K8!؀ B˰w-d0R솠o]+ce}vˍzPt׭=lr'{6k7Gn]LK0y'WLw󤝻AL.˓z.a~ &"W*<-LnWqVҘq)yV:y<#EWLa0nrrA &/$&eF,43_mL>뵼[̯3.}42a5,k )WJ:R^jy.-A-"]KbK%JXo[+RKJ}<~S7eUi۶@3\,K-l6/u,ܲ4\`/]7g_Rྒ%.Cd}Fy.b-[_^᭑]!oɲ|y>v.ruX[$sKGVӴiߪfc_4+0+7?A(_n"wP˯Vw]nBT~﷭_哬PK18)ؼIݲJ@[ 03YkYУܲu!]żc e3sܒA\:jk > stream x]rdq}ǫCv!\׾M;MIEV((=4g#0̘>u3Ro jde7N-z+{u.ϳn3w-}yUѻ.Ƹbo6~zwzYi;~9;w앲ﯭZ\NnF[~9'ߋQE7 F/މwϳ:Rq31V v"O)iYS6b{u_o_ٿ*/NM9k~N2nҢoM@|L-~M"&\z<',ƌL})x;s'V _]7iD) >v N.R7Evt 崕JYߋijQ$YYu1vz~UFRYc24W;ձJW_+x!m|.^:J[@tfYm?[/ϬagFy]G Q]!d崵UCCNۣr#e_\}}8<O j$V9Fr)#4Kg# )X `7Z2vEAxQ$ $/qo^{1B[}>W?*l w9/,m1qg'z9χ^]ٌy- bƣ@fw\JKk/@xZj-SZd/oUى^eYL'٫LOZd/l,JJL8\٫L~䌽eꕭ'^EiC匽tP*p?Zd/5ld댆9NErCiUnIh-IiI@+{ϖZ+ٸi2J9coz]\PaSkz%JpL΁"U67! G+DXc ÀKK)kZF,sp.&L~1Dy);f@3J&;aK \'d+- WmЦq衶dH@L78czq [<$qpH5`y^iE"ƖDzLhĎP ,]y@Һ_M-; D9.Fa`*X"K&jZԠ[XD҂0~j/.h[:8 B70\/%Z`eOJ6@hpj%@O -lmxPo(h@i&4*PXqE S%A_4c1eP(kºF&k@M=]8[ {peysNKmH ?TQYI k+tbcbhPosϝ ex+Kx16[(Y,{L[ &Vyͥ3m:bVB};%~C¤Wk.FlՖib붹CXzi>C MZU'؞)Oi^vT ` x).hiΕi%-J"HOU ۘ0)Z4d "-.Q9Ʋ1$BL,d34,ҳ]Y^vE^|1ocю`ѧ%BL \  e[ԩh.(Ѣ :KwB20` ù0gmKق3j@=YW;l*}pB 9&70zکJv1N\hHH'?^ d?Pl(/BcG%-ְߠ8Yg!L@&+q/60> C`G;- UI  PI% <4tT΍2A#@Шnx=,Կ cr`@%>IA 1J*Mʃ:Nx)S P)P DP@lA8 ͨ26bĀx%0ѵʀ80 CUOtP \@ |h -$9R'4.IR `@W5C0 HEɀ0{ɀҽd@8 ;zK c;3 1;!:9/`C""aԕ:EɒnM?o?jÄN>%߸iBd#ɏSksHG<0ݙ$`aAA~%$P yc0:!U7_#؏&cCc_`PkGlٯdJV')C2;ii@M3AۊN盿_sw]9+/bŏi~th6tJG¿ijtukX/b_ M_Jc*~ *8+1-a/DVn&sͷh*j[j;^ n|X-:+GGϒBhV6TgI gdHHc[`vFTn~XO7qډBg?{$A34S(4(II Kg\}fgbA~yYj>̜d?Fg;Y`yE:C~[~N*~ _ZX)¶u\*T%v xu'Z83ȏU|8NaC+xUu ;yUL9AfIe?O2+?sTKǯ hӟp 3V 3Xg9AԢ"AQpCHXW  hk_l'2٠?"iHPn?WQ+yRt8_ՠ$1~t2#e羘;9-r8q8Ql>4&s oP-L3a>l.s)Lո}羔wKP" ꃌb&0SV꣗o&#g!r2+|\ UP3RЍ@qϒՈ2bY NT@73-~|4)ISܧTh|wZ2Jsac>ee>͝x8E~ks)ӀΚkܧ+ 8l5lYN~FźA~&7:=Z0ُ 3 -|gݎ`?өTa?|?vtd?w=؏Z%UWx3}.B{n1p?K ~^}?~'A~Avnʕ;q'Լ~Qp_{aj_C~1F;hm%Ni}Ա+hsO6c)KQ:)p/vQ~=^W&^/w;rx^/w;rxJw8Z'?ʛ˒ZnzR|~ r(rx\^+@( ?&8+bj3NO = 0x깖엉veH '\Aۇ_EhVy|?mR~b$.Ryb$pNKr${}}Ɂ>ڛhV1A;Zh:̖1T+.%.[-k}vks} w5޹ݾ;n•wnoN;p>[S-\ksy w~6*[;-i~ w~wھ^iheZx}61A&{=%7=>JiZfJu_lcSZOz2^>-DZmsLb战||/1l7siQ{ ,wln҄`G}"~WD 7U m4Ҷ);(VVs^M[Dm1~( uװf#uוB&N)<=:|U: [5{=R}=R{7`-i;X@&+!(7-GZ֧ P-+׮p#g/* ߥrH2FU#c9·귮_A| )ɓd,1;J5%f<+ԥvx!fp=/.Hjm29~qLxE8T^?{ɡ|{]v͔#@cq31L!v;KnI_ݴ}Idz (_݂z*>[U|`s)~"}FsHYu66Pk=b \Q~`xq&:jzGюj]Y]^8Bu+;a4i(ebtI5&24wLqw94\MykkZom-P8b9!~)F9An=Zn4)HEooޣ6Y/&Vqls?ن4o{f?'cȻ8+oq /h6J!@,//&M'SrLCbz8D;z.=ßb^T+;|Ɵˍ $RY@/ |jg]4诫CiIřTL~pve@ws[2[ĝWM:lj_ w"Ԏg|ZNIe8D&&DapL%dxTo'> /W [ 1 3 1 ] /Info 3 0 R /Root 2 0 R /Size 169 /ID [<4a2f5ae2fe0667649747637dc1ed4db6>] >> stream xcb&F~0 $8JҐ?(o9=\J |A"9ׁH R`L R% "El6#(fGIGɚ S&c) $OM , "W`CDr3EtA\: $ludLӊgȶ q 2dcN=>-&' endstream endobj startxref 590906 %%EOF robustbase/inst/doc/lmrob_simulation.R0000644000176200001440000012701414124272516017660 0ustar liggesusers### R code from vignette source 'lmrob_simulation.Rnw' ################################################### ### code chunk number 1: initial-setup ################################################### ## set options options(width=60, warn=1) # see warnings where they happen (should eliminate) ## number of workers to start if(FALSE) {## good for pkg developers options(cores= max(1, parallel::detectCores() - 2)) } else { ## CRAN allows maximum of 2: options(cores= min(2, parallel::detectCores())) } ## Number of Repetitions: N <- 1000 ## get path (= ../inst/doc/ in source pkg) robustDoc <- system.file('doc', package='robustbase') robustDta <- robustDoc ## initialize (packages, data, ...): source(file.path(robustDoc, 'simulation.init.R')) # 'xtable' ## set the amount of trimming used in calculation of average results trim <- 0.1 ################################################### ### code chunk number 2: graphics-setup ################################################### ## load required packages for graphics stopifnot(require(ggplot2), require(GGally),# for ggpairs() which replaces ggplot2::plotmatrix() require(grid), require(reshape2)) source(file.path(robustDoc, 'graphics.functions.R')) `%||%` <- function (x, orElse) if (!is.null(x)) x else orElse ## set ggplot theme theme <- theme_bw(base_size = 10) theme$legend.key.size <- unit(1, "lines")# was 0.9 in pre-v.3 ggplot2 theme$plot.margin <- unit(c(1/2, 1/8, 1/8, 1/8), "lines")# was (1/2, 0,0,0) theme_set(theme) ## old and new ggplot2: stopifnot(is.list(theme_G <- theme$panel.grid.major %||% theme$panel.grid)) ## set default sizes for lines and points update_geom_defaults("point", list(size = 4/3)) update_geom_defaults("line", list(size = 1/4)) update_geom_defaults("hline", list(size = 1/4)) update_geom_defaults("smooth", list(size = 1/4)) ## alpha value for plots with many points alpha.error <- 0.3 alpha.n <- 0.4 ## set truncation limits used by f.truncate() & g.truncate.*: trunc <- c(0.02, 0.14) trunc.plot <- c(0.0185, 0.155) f.truncate <- function(x, up = trunc.plot[2], low = trunc.plot[1]) { x[x > up] <- up x[x < low] <- low x } g.truncate.lines <- geom_hline(yintercept = trunc, color = theme$panel.border$colour) g.truncate.line <- geom_hline(yintercept = trunc[2], color = theme$panel.border$colour) g.truncate.areas <- annotate("rect", xmin=rep(-Inf,2), xmax=rep(Inf,2), ymin=c(0,Inf), ymax=trunc, fill = theme_G$colour) g.truncate.area <- annotate("rect", xmin=-Inf, xmax=Inf, ymin=trunc[2], ymax=Inf, fill = theme_G$colour) legend.mod <- list(`SMD.Wtau` = quote('SMD.W'~tau), `SMDM.Wtau` = quote('SMDM.W'~tau), `MM.Avar1` = quote('MM.'~Avar[1]), `MMqT` = quote('MM'~~q[T]), `MMqT.Wssc` = quote('MM'~~q[T]*'.Wssc'), `MMqE` = quote('MM'~~q[E]), `MMqE.Wssc` = quote('MM'~~q[E]*'.Wssc'), `sigma_S` = quote(hat(sigma)[S]), `sigma_D` = quote(hat(sigma)[D]), `sigma_S*qE` = quote(q[E]*hat(sigma)[S]), `sigma_S*qT` = quote(q[T]*hat(sigma)[S]), `sigma_robust` = quote(hat(sigma)[robust]), `sigma_OLS` = quote(hat(sigma)[OLS]), `t1` = quote(t[1]), `t3` = quote(t[3]), `t5` = quote(t[5]), `cskt(Inf,2)` = quote(cskt(infinity,2)) ) ################################################### ### code chunk number 3: tab-psi-functions ################################################### ## get list of psi functions lst <- lapply(estlist$procedures, function(x) { if (is.null(x$args)) return(list(NULL, NULL, NULL)) if (!is.null(x$args$weight)) return(list(x$args$weight[2], round(f.psi2c.chi(x$args$weight[1]),3), round(f.eff2c.psi(x$args$efficiency, x$args$weight[2]),3))) return(list(x$args$psi, round(if (is.null(x$args$tuning.chi)) lmrob.control(psi=x$args$psi)$tuning.chi else x$args$tuning.chi,3), round(if (is.null(x$args$tuning.psi)) lmrob.control(psi=x$args$psi)$tuning.psi else x$args$tuning.psi,3))) }) lst <- unique(lst) ## because of rounding, down from 21 to 5 ! lst <- lst[sapply(lst, function(x) !is.null(x[[1]]))] # 5 --> 4 ## convert to table tbl <- do.call(rbind, lst) tbl[,2:3] <- apply(tbl[,2:3], 1:2, function(x) { gsub('\\$NA\\$', '\\\\texttt{NA}', paste('$', unlist(x), collapse=', ', '$', sep='')) }) tbl[,1] <- paste('\\texttt{', tbl[,1], '}', sep='') colnames(tbl) <- paste('\\texttt{', c('psi', 'tuning.chi', 'tuning.psi'), '}', sep='') require("xtable") # need also print() method: print(xtable(tbl), sanitize.text.function=identity, include.rownames = FALSE, floating=FALSE) ################################################### ### code chunk number 4: fig-psi-functions ################################################### getOption("SweaveHooks")[["fig"]]() d.x_psi <- function(x, psi) { cc <- lmrob.control(psi = psi)$tuning.psi data.frame(x=x, value=Mpsi(x, cc, psi), psi = psi) } x <- seq(0, 10, length.out = 1000) tmp <- rbind(d.x_psi(x, 'optimal'), d.x_psi(x, 'bisquare'), d.x_psi(x, 'lqq'), d.x_psi(x, 'hampel')) print( ggplot(tmp, aes(x, value, color = psi)) + geom_line(lwd=1.25) + ylab(quote(psi(x))) + scale_color_discrete(name = quote(psi ~ '-function'))) ################################################### ### code chunk number 5: fgen ################################################### f.gen <- function(n, p, rep, err) { ## get function name and parameters lerrfun <- f.errname(err$err) lerrpar <- err$args ## generate random predictors ret <- replicate(rep, matrix(do.call(lerrfun, c(n = n*p, lerrpar)), n, p), simplify=FALSE) attr(ret[[1]], 'gen') <- f.gen ret } ratios <- c(1/20, 1/10, 1/5, 1/3, 1/2)## p/n lsit <- expand.grid(n = c(25, 50, 100, 400), p = ratios) lsit <- within(lsit, p <- as.integer(n*p)) .errs.normal.1 <- list(err = 'normal', args = list(mean = 0, sd = 1)) for (i in 1:NROW(lsit)) assign(paste('rand',lsit[i,1],lsit[i,2],sep='_'), f.gen(lsit[i,1], lsit[i,2], rep = 1, err = .errs.normal.1)[[1]]) ################################################### ### code chunk number 6: fig-example-design ################################################### getOption("SweaveHooks")[["fig"]]() require(GGally) colnames(rand_25_5) <- paste0("X", 1:5) # workaround new (2014-12) change in GGally ## and the 2016-11-* change needs data frames: df.r_25_5 <- as.data.frame(rand_25_5) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print(ggpairs(df.r_25_5, axisLabels="show", title = "rand_25_5: n=25, p=5")) ) ################################################### ### code chunk number 7: lmrob_simulation.Rnw:371-372 ################################################### aggrResultsFile <- file.path(robustDta, "aggr_results.Rdata") ################################################### ### code chunk number 8: simulation-run ################################################### if (!file.exists(aggrResultsFile)) { ## load packages required only for simulation stopifnot(require(robust), require(skewt), require(foreach)) if (!is.null(getOption("cores"))) { if (getOption("cores") == 1) registerDoSEQ() ## no not use parallel processing else { stopifnot(require(doParallel)) if (.Platform$OS.type == "windows") { cl <- makeCluster(getOption("cores")) clusterExport(cl, c("N", "robustDoc")) clusterEvalQ(cl, slave <- TRUE) clusterEvalQ(cl, source(file.path(robustDoc, 'simulation.init.R'))) registerDoParallel(cl) } else registerDoParallel() } } else registerDoSEQ() ## no not use parallel processing for (design in c("dd", ls(pattern = 'rand_\\d+_\\d+'))) { print(design) ## set design estlist$design <- get(design) estlist$use.intercept <- !grepl('^rand', design) ## add design.predict: pc estlist$design.predict <- if (is.null(attr(estlist$design, 'gen'))) f.prediction.points(estlist$design) else f.prediction.points(estlist$design, max.pc = 2) filename <- file.path(robustDta, sprintf('r.test.final.%s.Rdata',design)) if (!file.exists(filename)) { ## run print(system.time(r.test <- f.sim(estlist, silent = TRUE))) ## save save(r.test, file=filename) ## delete output rm(r.test) ## run garbage collection gc() } } } ################################################### ### code chunk number 9: str-estlist ################################################### str(estlist, 1) ################################################### ### code chunk number 10: estl-errs ################################################### estlist$errs[[1]] ################################################### ### code chunk number 11: show-errs (eval = FALSE) ################################################### ## set.seed(estlist$seed) ## errs <- c(sapply(1:nrep, function(x) do.call(fun, c(n = nobs, args)))) ################################################### ### code chunk number 12: lmrob_simulation.Rnw:449-450 ################################################### str(estlist$output[1:3], 2) ################################################### ### code chunk number 13: simulation-aggr ################################################### if (!file.exists(aggrResultsFile)) { files <- list.files(robustDta, pattern = 'r.test.final\\.') res <- foreach(file = files) %dopar% { ## get design, load r.test, initialize other stuff design <- substr(basename(file), 14, nchar(basename(file)) - 6) cat(design, ' ') load(file.path(robustDta, file)) estlist <- attr(r.test, 'estlist') use.intercept <- if (!is.null(estlist$use.intercept)) estlist$use.intercept else TRUE sel <- dimnames(r.test)[[3]] ## [dimnames(r.test)[[3]] != "estname=lm"] n.betas <- paste('beta',1:(NCOL(estlist$design)+use.intercept),sep='_') ## get design lX <- if (use.intercept) as.matrix(cbind(1, get(design))) else as.matrix(get(design)) n <- NROW(lX) p <- NCOL(lX) ## prepare arrays for variable designs and leverages if (is.function(attr(estlist$design, 'gen'))) { lXs <- array(NA, c(n, NCOL(lX), dim(r.test)[c(1, 4)]), list(Obs = NULL, Pred = colnames(lX), Data = NULL, Errstr = dimnames(r.test)[[4]])) } ## generate errors lerrs <- array(NA, c(n, dim(r.test)[c(1,4)]) , list(Obs = NULL, Data = NULL, Errstr = dimnames(r.test)[[4]])) for (i in 1:dim(lerrs)[3]) { lerrstr <- f.list2str(estlist$errs[[i]]) lerr <- f.errs(estlist, estlist$errs[[i]], gen = attr(estlist$design, 'gen'), nobs = n, npar = NCOL(lX)) lerrs[,,lerrstr] <- lerr if (!is.null(attr(lerr, 'designs'))) { ## retrieve generated designs: this returns a list of designs lXs[,,,i] <- unlist(attr(lerr, 'designs')) if (use.intercept) stop('intercept not implemented for random desings') } rm(lerr) } if (is.function(attr(estlist$design, 'gen'))) { ## calculate leverages lXlevs <- apply(lXs, 3:4, .lmrob.hat) } ## calculate fitted values from betas if (!is.function(attr(estlist$design, 'gen'))) { ## fixed design case lfitted <- apply(r.test[,n.betas,sel,,drop=FALSE],c(3:4), function(bhat) { lX %*% t(bhat) } ) } else { ## variable design case lfitted <- array(NA, n*prod(dim(r.test)[c(1,4)])*length(sel)) lfitted <- .C('R_calc_fitted', as.double(lXs), ## designs as.double(r.test[,n.betas,sel,,drop=FALSE]), ## betas as.double(lfitted), ## result as.integer(n), ## n as.integer(p), ## p as.integer(dim(r.test)[1]), ## nrep as.integer(length(sel)), ## n procstr as.integer(dim(r.test)[4]), ## n errstr DUP=FALSE, NAOK=TRUE, PACKAGE="robustbase")[[3]] } tdim <- dim(lfitted) <- c(n, dim(r.test)[1], length(sel),dim(r.test)[4]) lfitted <- aperm(lfitted, c(1,2,4,3)) ## calculate residuals = y - fitted.values lfitted <- as.vector(lerrs) - as.vector(lfitted) dim(lfitted) <- tdim[c(1,2,4,3)] lfitted <- aperm(lfitted, c(1,2,4,3)) dimnames(lfitted) <- c(list(Obs = NULL), dimnames(r.test[,,sel,,drop=FALSE])[c(1,3,4)]) lresids <- lfitted rm(lfitted) ## calculate lm MSE and trim trimmed MSE of betas tf.MSE <- function(lbetas) { lnrm <- rowSums(lbetas^2) c(MSE=mean(lnrm,na.rm=TRUE),MSE.1=mean(lnrm,trim=trim,na.rm=TRUE)) } MSEs <- apply(r.test[,n.betas,,,drop=FALSE],3:4,tf.MSE) li <- 1 ## so we can reconstruct where we are lres <- apply(lresids,3:4,f.aggregate.results <- { function(lresid) { ## the counter li tells us, where we are ## we walk dimensions from left to right lcdn <- f.get.current.dimnames(li, dimnames(lresids), 3:4) lr <- r.test[,,lcdn[1],lcdn[2]] ## update counter li <<- li + 1 ## transpose and normalize residuals with sigma lresid <- t(lresid) / lr[,'sigma'] if (lcdn[1] != 'estname=lm') { ## convert procstr to proclst and get control list largs <- f.str2list(lcdn[1])[[1]]$args if (grepl('lm.robust', lcdn[1])) { lctrl <- list() lctrl$psi <- toupper(largs$weight2) lctrl$tuning.psi <- f.eff2c.psi(largs$efficiency, lctrl$psi) lctrl$method <- 'MM' } else { lctrl <- do.call('lmrob.control',largs) } ## calculate correction factors ## A lsp2 <- rowSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lspp <- rowSums(lpp <- Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) ## calculate Huber\'s small sample correction factor lK <- 1 + rowSums((lpp - lspp/n)^2)*NCOL(lX)/lspp^2 ## 1/n cancels } else { lK <- lspp <- lsp2 <- NA } ## only calculate tau variants if possible if (grepl('args.method=\\w*(D|T)\\w*\\b', lcdn[1])) { ## SMD or SMDM ## calculate robustness weights lwgts <- Mwgt(lresid, lctrl$tuning.psi, lctrl$psi) ## function to calculate robustified leverages tfun <- if (is.function(attr(estlist$design, 'gen'))) function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lXs[,,i,lcdn[2]],wi) } else function(i) { if (all(is.na(wi <- lwgts[i,]))) wi else .lmrob.hat(lX, wi) } llev <- sapply(1:dim(r.test)[1], tfun) ## calculate unique leverages lt <- robustbase:::lmrob.tau(list(),h=llev,control=lctrl) ## normalize residuals with tau (transpose lresid) lresid <- t(lresid) / lt ## A lsp2t <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi)^2) ## B lsppt <- colSums(Mpsi(lresid,lctrl$tuning.psi, lctrl$psi,1)) } else { lsp2t <- lsppt <- NA } ## calculate raw scales based on the errors lproc <- f.str2list(lcdn[1])[[1]] q <- NA M <- NA if (lproc$estname == 'lmrob.mar' && lproc$args$type == 'qE') { ## for lmrob_mar, qE variant lctrl <- lmrob.control(psi = 'bisquare', tuning.chi=uniroot(function(c) robustbase:::lmrob.bp('bisquare', c) - (1-p/n)/2, c(1, 3))$root) se <- apply(lerrs[,,lcdn[2]],2,lmrob.mscale,control=lctrl,p=p) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } else if (!is.null(lproc$args$method) && lproc$args$method == 'SMD') { ## for D-scales se <- apply(lerrs[,,lcdn[2]],2,lmrob.dscale,control=lctrl, kappa=robustbase:::lmrob.kappa(control=lctrl)) ltmp <- se/lr[,'sigma'] q <- median(ltmp, na.rm = TRUE) M <- mad(ltmp, na.rm = TRUE) } ## calculate empirical correct test value (to yield 5% level) t.val_2 <- t.val_1 <- quantile(abs(lr[,'beta_1']/lr[,'se_1']), 0.95, na.rm = TRUE) if (p > 1) t.val_2 <- quantile(abs(lr[,'beta_2']/lr[,'se_2']), 0.95, na.rm = TRUE) ## return output: summary statistics: c(## gamma AdB2.1 = mean(lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, K2AdB2.1 = mean(lK^2*lsp2/lspp^2,trim=trim,na.rm=TRUE)*n, AdB2t.1 = mean(lsp2t/lsppt^2,trim=trim,na.rm=TRUE)*n, sdAdB2.1 = sd.trim(lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdK2AdB2.1 = sd.trim(lK^2*lsp2/lspp^2*n,trim=trim,na.rm=TRUE), sdAdB2t.1 = sd.trim(lsp2t/lsppt^2*n,trim=trim,na.rm=TRUE), ## sigma medsigma = median(lr[,'sigma'],na.rm=TRUE), madsigma = mad(lr[,'sigma'],na.rm=TRUE), meansigma.1 = mean(lr[,'sigma'],trim=trim,na.rm=TRUE), sdsigma.1 = sd.trim(lr[,'sigma'],trim=trim,na.rm=TRUE), meanlogsigma = mean(log(lr[,'sigma']),na.rm=TRUE), meanlogsigma.1 = mean(log(lr[,'sigma']),trim=trim,na.rm=TRUE), sdlogsigma = sd(log(lr[,'sigma']),na.rm=TRUE), sdlogsigma.1 = sd.trim(log(lr[,'sigma']),trim=trim,na.rm=TRUE), q = q, M = M, ## beta efficiency.1 = MSEs['MSE.1','estname=lm',lcdn[2]] / MSEs['MSE.1',lcdn[1],lcdn[2]], ## t-value: level emplev_1 = mean(abs(lr[,'beta_1']/lr[,'se_1']) > qt(0.975, n - p), na.rm = TRUE), emplev_2 = if (p>1) { mean(abs(lr[,'beta_2']/lr[,'se_2']) > qt(0.975, n - p), na.rm = TRUE) } else NA, ## t-value: power power_1_0.2 = mean(abs(lr[,'beta_1']-0.2)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.2 = if (p>1) { mean(abs(lr[,'beta_2']-0.2)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.4 = mean(abs(lr[,'beta_1']-0.4)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.4 = if (p>1) { mean(abs(lr[,'beta_2']-0.4)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.6 = mean(abs(lr[,'beta_1']-0.6)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.6 = if (p>1) { mean(abs(lr[,'beta_2']-0.6)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_0.8 = mean(abs(lr[,'beta_1']-0.8)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_0.8 = if (p>1) { mean(abs(lr[,'beta_2']-0.8)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, power_1_1 = mean(abs(lr[,'beta_1']-1)/lr[,'se_1'] > t.val_1, na.rm = TRUE), power_2_1 = if (p>1) { mean(abs(lr[,'beta_2']-1)/lr[,'se_2'] > t.val_2, na.rm = TRUE) } else NA, ## coverage probability: calculate empirically ## the evaluation points are constant, but the designs change ## therefore this makes only sense for fixed designs cpr_1 = mean(lr[,'upr_1'] < 0 | lr[,'lwr_1'] > 0, na.rm = TRUE), cpr_2 = mean(lr[,'upr_2'] < 0 | lr[,'lwr_2'] > 0, na.rm = TRUE), cpr_3 = mean(lr[,'upr_3'] < 0 | lr[,'lwr_3'] > 0, na.rm = TRUE), cpr_4 = mean(lr[,'upr_4'] < 0 | lr[,'lwr_4'] > 0, na.rm = TRUE), cpr_5 = if (any(colnames(lr) == 'upr_5')) { mean(lr[,'upr_5'] < 0 | lr[,'lwr_5'] > 0, na.rm = TRUE) } else NA, cpr_6 = if (any(colnames(lr) == 'upr_6')) { mean(lr[,'upr_6'] < 0 | lr[,'lwr_6'] > 0, na.rm = TRUE) } else NA, cpr_7 = if (any(colnames(lr) == 'upr_7')) { mean(lr[,'upr_7'] < 0 | lr[,'lwr_7'] > 0, na.rm = TRUE) } else NA ) }}) ## convert to data.frame lres <- f.a2df.2(lres, split = '___NO___') ## add additional info lres$n <- NROW(lX) lres$p <- NCOL(lX) lres$nmpdn <- with(lres, (n-p)/n) lres$Design <- design ## clean up rm(r.test, lXs, lXlevs, lresids, lerrs) gc() ## return lres lres } save(res, trim, file = aggrResultsFile) ## stop cluster if (exists("cl")) stopCluster(cl) } ################################################### ### code chunk number 14: simulation-aggr2 ################################################### load(aggrResultsFile) ## this will fail if the file is not found (for a reason) ## set eval to TRUE for chunks simulation-run and simulation-aggr ## if you really want to run the simulations again. ## (better fail with an error than run for weeks) ## combine list elements to data.frame test.1 <- do.call('rbind', res) test.1 <- within(test.1, { Method[Method == "SM"] <- "MM" Method <- Method[, drop = TRUE] Estimator <- interaction(Method, D.type, drop = TRUE) Estimator <- f.rename.level(Estimator, 'MM.S', 'MM') Estimator <- f.rename.level(Estimator, 'SMD.D', 'SMD') Estimator <- f.rename.level(Estimator, 'SMDM.D', 'SMDM') Estimator <- f.rename.level(Estimator, 'MM.qT', 'MMqT') Estimator <- f.rename.level(Estimator, 'MM.qE', 'MMqE') Estimator <- f.rename.level(Estimator, 'MM.rob', 'MMrobust') Estimator <- f.rename.level(Estimator, 'lsq.lm', 'OLS') Est.Scale <- f.rename.level(Estimator, 'MM', 'sigma_S') Est.Scale <- f.rename.level(Est.Scale, 'MMrobust', 'sigma_robust') Est.Scale <- f.rename.level(Est.Scale, 'MMqE', 'sigma_S*qE') Est.Scale <- f.rename.level(Est.Scale, 'MMqT', 'sigma_S*qT') Est.Scale <- f.rename.level(Est.Scale, 'SMDM', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'SMD', 'sigma_D') Est.Scale <- f.rename.level(Est.Scale, 'OLS', 'sigma_OLS') Psi <- f.rename.level(Psi, 'hampel', 'Hampel') }) ## add interaction of Method and Cov test.1 <- within(test.1, { method.cov <- interaction(Estimator, Cov, drop=TRUE) levels(method.cov) <- sub('\\.+vcov\\.(a?)[wacrv1]*', '\\1', levels(method.cov)) method.cov <- f.rename.level(method.cov, "MMa", "MM.Avar1") method.cov <- f.rename.level(method.cov, "MMrobust.Default", "MMrobust.Wssc") method.cov <- f.rename.level(method.cov, "MM", "MM.Wssc") method.cov <- f.rename.level(method.cov, "SMD", "SMD.Wtau") method.cov <- f.rename.level(method.cov, "SMDM", "SMDM.Wtau") method.cov <- f.rename.level(method.cov, "MMqT", "MMqT.Wssc") method.cov <- f.rename.level(method.cov, "MMqE", "MMqE.Wssc") method.cov <- f.rename.level(method.cov, "OLS.Default", "OLS") ## ratio: the closest 'desired ratios' instead of exact p/n; ## needed in plots only for stat_*(): median over "close" p/n's: ratio <- ratios[apply(abs(as.matrix(1/ratios) %*% t(as.matrix(p / n)) - 1), 2, which.min)] }) ## calculate expected values of psi^2 and psi' test.1$Ep2 <- test.1$Epp <- NA for(Procstr in levels(test.1$Procstr)) { args <- f.str2list(Procstr)[[1]]$args if (is.null(args)) next lctrl <- do.call('lmrob.control',args) test.1$Ep2[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r)^2, lctrl, use.integrate = TRUE) test.1$Epp[test.1$Procstr == Procstr] <- robustbase:::lmrob.E(psi(r,1), lctrl, use.integrate = TRUE) } ## drop some observations, separate fixed and random designs test.fixed <- droplevels(subset(test.1, n == 20)) ## n = 20 -- fixed design test.1 <- droplevels(subset(test.1, n != 20)) ## n !=20 -- random designs test.lm <- droplevels(subset(test.1, Function == 'lm')) # lm = OLS test.1 <- droplevels(subset(test.1, Function != 'lm')) # Rob := all "robust" test.lm$Psi <- NULL test.lm.2 <- droplevels(subset(test.lm, Error == 'N(0,1)')) # OLS for N(*) test.2 <- droplevels(subset(test.1, Error == 'N(0,1)' & Function != 'lm'))# Rob for N(*) ## subsets test.3 <- droplevels(subset(test.2, Method != 'SMDM'))# Rob, not SMDM for N(*) test.4 <- droplevels(subset(test.1, Method != 'SMDM'))# Rob, not SMDM for all ################################################### ### code chunk number 15: fig-meanscale ################################################### getOption("SweaveHooks")[["fig"]]() ## ## exp(mean(log(sigma))): this looks almost identical to mean(sigma) print(ggplot(test.3, aes(p/n, exp(meanlogsigma.1), color = Est.Scale)) + stat_summary(aes(x=ratio), # <- "rounded p/n": --> median over "neighborhood" fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + ylab(quote('geometric ' ~ mean(hat(sigma)))) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(test.3$Est.Scale))) ################################################### ### code chunk number 16: fig-sdscale-1 ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(test.3, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + facet_wrap(~ Psi) + geom_point (data=test.lm.2, alpha=alpha.n, aes(color = Est.Scale)) + stat_summary(data=test.lm.2, aes(x=ratio, color = Est.Scale), fun.y=median, geom='line') + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels= lab(test.3 $Est.Scale, test.lm.2$Est.Scale))) ################################################### ### code chunk number 17: fig-sdscale-all ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(test.4, aes(p/n, sdlogsigma.1*sqrt(n), color = Est.Scale)) + ylim(with(test.4, range(sdlogsigma.1*sqrt(n)))) + ylab(quote(sd(log(hat(sigma)))*sqrt(n))) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = Error), alpha = alpha.error) + facet_wrap(~ Psi) + ## "FIXME" (?): the next 'test.lm' one give warnings geom_point (data=test.lm, aes(color = Est.Scale), alpha=alpha.n) + ##-> Warning: Removed 108 rows containing missing values (geom_point). stat_summary(data=test.lm, aes(x = ratio, color = Est.Scale), fun.y=median, geom='line') + ##-> Warning: Removed 108 rows containing non-finite values (stat_summary). g.scale_shape(labels=lab(test.4$Error)) + scale_colour_discrete("Scale Est.", labels=lab(test.4 $Est.Scale, test.lm$Est.Scale))) ################################################### ### code chunk number 18: fig-qscale ################################################### getOption("SweaveHooks")[["fig"]]() t3est2 <- droplevels(subset(test.3, Estimator %in% c("SMD", "MMqE"))) print(ggplot(t3est2, aes(p/n, q, color = Est.Scale)) + ylab(quote(q)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Psi) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) ################################################### ### code chunk number 19: fig-Mscale ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t3est2, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Psi) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(t3est2$Est.Scale))) ################################################### ### code chunk number 20: fig-qscale-all ################################################### getOption("SweaveHooks")[["fig"]]() t1.bi <- droplevels(subset(test.1, Estimator %in% c("SMD", "MMqE") & Psi == 'bisquare')) print(ggplot(t1.bi, aes(p/n, q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 1) + g.scale_y_log10_1() + facet_wrap(~ Error) + ## labeller missing! ylab(quote(q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) ################################################### ### code chunk number 21: fig-Mscale-all ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t1.bi, aes(p/n, M/q, color = Est.Scale)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_point(aes(shape = factor(n)), alpha = alpha.n) + g.scale_y_log10_0.05() + facet_wrap(~ Error) + ylab(quote(M/q)) + scale_shape_discrete(quote(n)) + scale_colour_discrete("Scale Est.", labels=lab(tmp$Est.Scale)), legend.mod = legend.mod) ################################################### ### code chunk number 22: fig-efficiency ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(test.2, aes(p/n, efficiency.1, color = Estimator)) + geom_point(aes(shape = factor(n)), alpha = alpha.n) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + facet_wrap(~ Psi) + ylab(quote('efficiency of' ~~ hat(beta))) + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(test.2$Estimator))) ################################################### ### code chunk number 23: fig-efficiency-all ################################################### getOption("SweaveHooks")[["fig"]]() t.1xt1 <- droplevels(subset(test.1, Error != 't1')) print(ggplot(t.1xt1, aes(p/n, efficiency.1, color = Estimator)) + ylab(quote('efficiency of '~hat(beta))) + geom_point(aes(shape = Error), alpha = alpha.error) + geom_hline(yintercept = 0.95) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + g.scale_shape(values=c(16,17,15,3,7,8,9,1,2,4)[-4], labels=lab(t.1xt1$Error)) + facet_wrap(~ Psi) + scale_colour_discrete(name = "Estimator", labels = lab(t.1xt1$Estimator))) ################################################### ### code chunk number 24: fig-AdB2-1 ################################################### getOption("SweaveHooks")[["fig"]]() t.2o. <- droplevels(subset(test.2, !is.na(AdB2t.1))) print(ggplot(t.2o., aes(p/n, AdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=K2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=AdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=K2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype=2) + stat_summary(aes(x=ratio, y=AdB2t.1), fun.y=median, geom='line', linetype=3) + geom_hline(yintercept = 1/0.95) + g.scale_y_log10_1() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels = lab(t.2o.$Estimator)) + ylab(quote(mean(hat(gamma)))) + facet_wrap(~ Psi)) ################################################### ### code chunk number 25: fig-sdAdB2-1 ################################################### getOption("SweaveHooks")[["fig"]]() t.2ok <- droplevels(subset(test.2, !is.na(sdAdB2t.1))) print(ggplot(t.2ok, aes(p/n, sdAdB2.1/(1-p/n), color = Estimator)) + geom_point(aes(shape=factor(n)), alpha = alpha.n) + geom_point(aes(y=sdK2AdB2.1/(1-p/n)), alpha = alpha.n) + geom_point(aes(y=sdAdB2t.1), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + stat_summary(aes(x=ratio, y=sdK2AdB2.1/(1-p/n)), fun.y=median, geom='line', linetype= 2) + stat_summary(aes(x=ratio, y=sdAdB2t.1), fun.y=median, geom='line', linetype= 3) + g.scale_y_log10_0.05() + scale_shape_discrete(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(t.2ok$Estimator)) + ylab(quote(sd(hat(gamma)))) + facet_wrap(~ Psi)) ################################################### ### code chunk number 26: fig-emp-level ################################################### getOption("SweaveHooks")[["fig"]]() t.2en0 <- droplevels(subset(test.2, emplev_1 != 0)) print(ggplot(t.2en0, aes(p/n, f.truncate(emplev_1), color = method.cov)) + g.truncate.lines + g.truncate.areas + geom_point(aes(shape = factor(n)), alpha = alpha.n) + scale_shape_discrete(quote(n)) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + scale_colour_discrete(name = "Estimator", labels=lab(t.2en0$method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + facet_wrap(~ Psi)) ################################################### ### code chunk number 27: fig-lqq-level ################################################### getOption("SweaveHooks")[["fig"]]() tmp <- droplevels(subset(test.1, Psi == 'lqq' & emplev_1 != 0)) print(ggplot(tmp, aes(p/n, f.truncate(emplev_1), color = method.cov)) + ylab(quote("empirical level "~ list (H[0] : beta[1] == 0) )) + g.truncate.line + g.truncate.area + geom_point(aes(shape = factor(n)), alpha = alpha.n) + stat_summary(aes(x=ratio), fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(quote(n)) + scale_colour_discrete(name = "Estimator", labels=lab(tmp$method.cov)) + facet_wrap(~ Error) , legend.mod = legend.mod ) ################################################### ### code chunk number 28: fig-power-1-0_2 ################################################### getOption("SweaveHooks")[["fig"]]() t2.25 <- droplevels(subset(test.2, n == 25))# <-- fixed n ==> no need for 'ratio' tL2.25 <- droplevels(subset(test.lm.2, n == 25)) scale_col_D2.25 <- scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(t2.25 $method.cov, tL2.25$method.cov)) print(ggplot(t2.25, aes(p/n, power_1_0.2, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.2) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) ################################################### ### code chunk number 29: fig-power-1-0_4 ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t2.25, aes(p/n, power_1_0.4, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.4) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) ################################################### ### code chunk number 30: fig-power-1-0_6 ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t2.25, aes(p/n, power_1_0.6, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.6) )) + geom_point(# aes(shape = Error), alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + scale_col_D2.25 + facet_wrap(~ Psi) ) ################################################### ### code chunk number 31: fig-power-1-0_8 ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t2.25, aes(p/n, power_1_0.8, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 0.8) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) ################################################### ### code chunk number 32: fig-power-1-1 ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(t2.25, aes(p/n, power_1_1, color = method.cov)) + ylab(quote("empirical power "~ list (H[0] : beta[1] == 1) )) + geom_point(alpha = alpha.error) + stat_summary(fun.y=median, geom='line') + geom_point (data=tL2.25, alpha = alpha.n) + stat_summary(data=tL2.25, fun.y=median, geom='line') + ## g.scale_shape("Error", labels=lab(t2.25$Error)) + scale_col_D2.25 + facet_wrap(~ Psi) ) ################################################### ### code chunk number 33: fig-pred-points ################################################### getOption("SweaveHooks")[["fig"]]() pp <- f.prediction.points(dd)[1:7,] ## Worked in older ggplot2 -- now plotmatrix() is gone, to be replaced by GGally::ggpairs): ## tmp <- plotmatrix(pp)$data ## tmp$label <- as.character(1:7) ## print(plotmatrix(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) if(FALSE) { tmp <- ggpairs(pp)$data tmp$label <- as.character(1:7) # and now? } ## ggpairs() + geom_text() does *NOT* work {ggpairs has own class} ## print(ggpairs(dd) + geom_text(data=tmp, color = 2, aes(label=label), size = 2.5)) try( ## fails with old GGally and new packageVersion("ggplot2") >= "2.2.1.9000" print( ggpairs(dd) )## now (2016-11) fine ) ################################################### ### code chunk number 34: fig-cpr ################################################### getOption("SweaveHooks")[["fig"]]() n.cprs <- names(test.fixed)[grep('cpr', names(test.fixed))] # test.fixed: n=20 => no 'x=ratio' test.5 <- melt(test.fixed[,c('method.cov', 'Error', 'Psi', n.cprs)]) test.5 <- within(test.5, { Point <- as.numeric(do.call('rbind', strsplit(levels(variable), '_'))[,2])[variable] }) print(ggplot(test.5, aes(Point, f.truncate(value), color = method.cov)) + geom_point(aes(shape = Error), alpha = alpha.error) + g.truncate.line + g.truncate.area + stat_summary(fun.y=median, geom='line') + geom_hline(yintercept = 0.05) + g.scale_y_log10_0.05() + g.scale_shape(labels=lab(test.5$Error)) + scale_colour_discrete(name = "Estimator (Cov. Est.)", labels=lab(test.5$method.cov)) + ylab("empirical level of confidence intervals") + facet_wrap(~ Psi) ) ################################################### ### code chunk number 35: maxbias-fn ################################################### ## Henning (1994) eq 33: g <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) integrate(function(x) rho(((1 + theta^2)/s^2*x)^2)*dchisq(x, 1, mu^2/(1 + theta^2)), -Inf, Inf)$value }) ## Martin et al 1989 Section 3.2: for mu = 0 g.2 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) lctrl$tuning.psi <- lctrl$tuning.chi robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) g.2.MM <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) robustbase:::lmrob.E(chi(sqrt(1 + theta^2)/s*r), lctrl, use.integrate = TRUE)}) ## Henning (1994) eq 30, one parameter case g.3 <- Vectorize(function(s, theta, mu, ...) { lctrl <- lmrob.control(...) rho <- function(x) Mchi(x, lctrl$tuning.chi, lctrl$psi, deriv = 0) int.x <- Vectorize(function(y) { integrate(function(x) rho((y - x*theta - mu)/s)*dnorm(x)*dnorm(y),-Inf, Inf)$value }) integrate(int.x,-Inf, Inf)$value }) inv.g1 <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2 else g.3 uniroot(function(s) g(s, theta, mu, ...) - value, c(0.1, 100))$root } inv.g1.MM <- function(value, theta, mu, ...) { g <- if (mu == 0) g.2.MM else g.3.MM ret <- tryCatch(uniroot(function(s) g(s, theta, mu, ...) - value, c(0.01, 100)), error = function(e)e) if (inherits(ret, 'error')) { warning('inv.g1.MM: ', value, ' ', theta, ' ', mu,' -> Error: ', ret$message) NA } else { ret$root } } s.min <- function(epsilon, ...) inv.g1(0.5/(1 - epsilon), 0, 0, ...) s.max <- function(epsilon, ...) inv.g1((0.5-epsilon)/(1-epsilon), 0, 0, ...) BS <- Vectorize(function(epsilon, ...) { sqrt(s.max(epsilon, ...)/s.min(epsilon, ...)^2 - 1) }) l <- Vectorize(function(epsilon, ...) { sigma_be <- s.max(epsilon, ...) sqrt((sigma_be/inv.g1.MM(g.2.MM(sigma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1) }) u <- Vectorize(function(epsilon, ...) { gamma_be <- s.min(epsilon, ...) max(l(epsilon, ...), sqrt((gamma_be/inv.g1.MM(g.2.MM(gamma_be,0,0,...) + epsilon/(1-epsilon),0,0,...))^2 - 1)) }) ################################################### ### code chunk number 36: max-asymptotic-bias ################################################### asymptMBFile <- file.path(robustDta, 'asymptotic.max.bias.Rdata') if (!file.exists(asymptMBFile)) { x <- seq(0, 0.35, length.out = 100) rmb <- rbind(data.frame(l=l(x, psi = 'hampel'), u=u(x, psi = 'hampel'), psi = 'Hampel'), data.frame(l=l(x, psi = 'lqq'), u=u(x, psi = 'lqq'), psi = 'lqq'), data.frame(l=l(x, psi = 'bisquare'), u=u(x, psi = 'bisquare'), psi = 'bisquare'), data.frame(l=l(x, psi = 'optimal'), u=u(x, psi = 'optimal'), psi = 'optimal')) rmb$x <- x save(rmb, file=asymptMBFile) } else load(asymptMBFile) ################################################### ### code chunk number 37: fig-max-asymptotic-bias ################################################### getOption("SweaveHooks")[["fig"]]() print(ggplot(rmb, aes(x, l, color=psi)) + geom_line() + geom_line(aes(x, u, color=psi), linetype = 2) + xlab(quote("amount of contamination" ~~ epsilon)) + ylab("maximum asymptotic bias bounds") + coord_cartesian(ylim = c(0,10)) + scale_y_continuous(breaks = 1:10) + scale_colour_hue(quote(psi ~ '-function'))) robustbase/inst/doc/estimating.functions.R0000644000176200001440000005501012737461431020454 0ustar liggesusers## Called from ./lmrob_simulation.Rnw ## ~~~~~~~~~~~~~~~~~~~~~ ########################################################################### ## Prediction ########################################################################### f.predict <- function (object, newdata = NULL, scale = sigma(object), se.fit = FALSE, df = object$df.residual, interval = c('none', 'confidence', 'prediction'), level = 0.95, type = c('response'), terms = NULL, na.action = na.pass, pred.var = res.var/weights, weights = 1, cov = covariance.matrix(object), ...) { ## Purpose: replace predict.lmrob from robustbase package ## ---------------------------------------------------------------------- ## Arguments: See ?predict.lm ## type = 'presponse' ('term' is not supported) ## terms argument is ignored ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Sept 2009, 12:16 ## take as much from predict.lm as possible ## check arguments if (!missing(terms)) stop('predict.lmrob: terms argument is ignored') ## set data tt <- terms(object) if (missing(newdata) || is.null(newdata)) { mm <- X <- model.matrix(object) mmDone <- TRUE offset <- object$offset } else { Terms <- delete.response(tt) m <- model.frame(Terms, newdata, na.action = na.action, xlev = object$xlevels) if (!is.null(cl <- attr(Terms, "dataClasses"))) .checkMFClasses(cl, m) X <- model.matrix(Terms, m, contrasts.arg = object$contrasts) offset <- rep(0, nrow(X)) if (!is.null(off.num <- attr(tt, "offset"))) for (i in off.num) offset <- offset + eval(attr(tt, "variables")[[i + 1]], newdata) if (!is.null(object$call$offset)) offset <- offset + eval(object$call$offset, newdata) mmDone <- FALSE } n <- length(object$residuals) p <- object$rank if (p < ncol(X) && !(missing(newdata) || is.null(newdata))) warning("prediction from a rank-deficient fit may be misleading") beta <- coef(object) ## ignoring piv here predictor <- drop(X %*% beta) if (!is.null(offset)) predictor <- predictor + offset interval <- match.arg(interval) type <- match.arg(type) if (se.fit || interval != "none") { res.var <- scale^2 if (type != "terms") { if (p > 0) { ## this is probably not optimal... ## cov <- covariance.matrix(object) ## set as argument ip <- diag(X %*% tcrossprod(cov, X)) } else ip <- rep(0, n) } } if (interval != "none") { tfrac <- qt((1 - level)/2, df) hwid <- tfrac * switch(interval, confidence = sqrt(ip), prediction = sqrt(ip + pred.var)) if (type != "terms") { predictor <- cbind(predictor, predictor + hwid %o% c(1, -1)) colnames(predictor) <- c("fit", "lwr", "upr") } } if (se.fit || interval != "none") se <- sqrt(ip) if (missing(newdata) && !is.null(na.act <- object$na.action)) { predictor <- napredict(na.act, predictor) if (se.fit) se <- napredict(na.act, se) } if (se.fit) list(fit = predictor, se.fit = se, df = df, residual.scale = sqrt(res.var)) else predictor } ## predict(obj, pred, interval = 'prediction') ## f.predict(obj, pred, interval = 'prediction') predict.lmRob <- function(object, newdata = NULL, scale = NULL, ...) { ## Purpose: extend predict() functionality to lmRob objects ## ---------------------------------------------------------------------- ## Arguments: ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Sept 2009, 12:16 class(object) <- c(class(object), "lm") object$qr <- qr(sqrt(weights(object)) * model.matrix(object)) if (missing(scale)) scale <- object$scale predict.lm(object, newdata = newdata, scale = scale, ...) } ########################################################################### ## some helper functions ########################################################################### f.lmRob <- function(...) { ## Purpose: wrapper for lmRob ## ---------------------------------------------------------------------- ## Arguments: see ?lmRob ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 10:28 ## get arguments args <- list(...) ## update defaults: if (is.null(args$mxr)) args$mxr <- 2000 if (is.null(args$mxf)) args$mxf <- 500 if (is.null(args$mxs)) args$mxs <- 2000 ## get all arguments except the arguments of lmRob: uw <- c('formula', 'data', 'weights', 'subset', 'na.action', 'model', 'x', 'y', 'contrasts', 'nrep', 'genetic.control') ind <- if (is.null(names(args))) rep(FALSE, length(args)) else names(args) != '' & !names(args) %in% uw ## they go into control: control <- do.call("lmRob.control", args[ind]) ## now call lmRob do.call("lmRob", c(args[!ind], list(control = control))) } ## lmRob(y ~ x, d.data, control = lmRob.control(initial.alg = 'fast', efficiency = 0.95, weight = c('bisquare', 'bisquare'))) ## lmRob(y ~ x, d.data, initial.alg = 'fast', efficiency = 0.95, weight = c('bisquare', 'bisquare')) ## f.lmRob(y ~ x, d.data, initial.alg = 'fast', efficiency = 0.95, weight = c('bisquare', 'bisquare')) f.lmRob.S <- function(... , robust.control = lmRob.control()) { ## Purpose: call the S estimation procedure of lmRob ## ---------------------------------------------------------------------- ## Arguments: x: design matrix x ## y: vector of observations ## robust.control: control list of lmRob.control() ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 29 Oct 2009, 14:54 ## code: from lmRob.fit.compute, robust package version 0.3-9 robust.control$initial.alg = 'random' robust.control$estim = 'Initial' z <- lmRob(..., control = robust.control) class(z) <- 'lmrob.S' z } ## f.lmRob.S(rep(1,10), rnorm(10), lmRob.control(weight = c('bisquare', 'bisquare'))) f.eff2c.psi <- function(eff, weight='bisquare') { ## Purpose: convert lmRob efficiencies to c.psi ## ---------------------------------------------------------------------- ## Arguments: eff: lmRob efficiency ## weight: type of weight (weight argument in lmRob.control) ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:36 if(is.null(eff)) return(NULL) lw = casefold(weight) if (lw == 'bisquare') { if (eff == 0.95) 4.685061 else if (eff == 0.9) 3.882646 else if (eff == 0.85) 3.443689 else if (eff == 0.8) 3.136909 else NA } else if (lw == 'optimal') { if (eff == 0.95) 1.060158 else if (eff == 0.9) 0.9440982 else if (eff == 0.85) 0.8684 else if (eff == 0.8) 0.8097795 else NA } else NA } f.psi2c.chi <- function(weight) { ## Purpose: return lmRob defaults for c.chi ## ---------------------------------------------------------------------- ## Arguments: weight: type of weight ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 28 Jan 2010, 10:05 switch(weight, 'bisquare' = 1.5477, 'optimal' = 0.4047) } residuals.lmrob.S <- function(obj) obj$residuals robustness.weights <- function(x, ...) UseMethod("robustness.weights") ## Purpose: retrieve robustness weights from robust regression return ## object ## ---------------------------------------------------------------------- ## Arguments: obj: robust regression output object ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:42 robustness.weights.lmrob <- robustness.weights.default <- function(obj) naresid(obj$na.action, obj$w) robustness.weights.lm <- function(obj) { if (any(class(obj) %in% c('lmrob', 'f.lmrob'))) stop('Caution: returning incorrect weights') naresid(obj$na.action, rep(1, length(obj$resid))) } robustness.weights.rlm <- function(obj) naresid(obj$na.action, obj$w) robustness.weights.lmRob <- function(obj) { if (obj$robust.control$weight[2] != 'Optimal') { c.psi <- f.eff2c.psi(obj$robust.control$efficiency, obj$robust.control$weight[2]) rs <- obj$residuals / obj$scale obj$M.weights <- Mwgt(rs, c.psi, obj$robust.control$weight[2]) } naresid(obj$na.action, obj$M.weights) } ## t <- f.lmRob(y ~ x, d.data) ## t <- f.lmrob(y ~ x, d.data, method = 'SM') ## t <- f.lmRob(y ~ x, d.data, initial.alg = 'fast', efficiency = 0.95, weight = c('bisquare', 'bisquare')) ## t <- lmRob(y ~ x, d.data, control = lmRob.control(initial.alg = 'fast', efficiency = 0.95, weight = c('bisquare', 'bisquare'))) ## robustness.weights(t) robustness.weights.lmrob.S <- function(obj) { rstand <- resid(obj)/sigma(obj) Mwgt(rstand, obj$control$tuning.chi, obj$control$psi) } ## MM: Why on earth is this called covariance.matrix() ?? -- S and R standard is vcov() !! ## -- For lm, they are indeed identical; for lmrob, too ## HOWEVER, the *.rlm() method of cov..matrix() *differs* from vcov.rlm() -- why? covariance.matrix <- function(x, ...) UseMethod("covariance.matrix") ## Purpose: retrieve covariance matrix from robust regression return ## object ## ---------------------------------------------------------------------- ## Arguments: obj: robust regression output object ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:42 covariance.matrix.lmrob <- covariance.matrix.default <- function(obj) obj$cov covariance.matrix.rlm <- function(obj, method = 'XtWX') summary(obj, method)$cov covariance.matrix.lm <- function(obj) { s <- summary(obj) s$cov * s$sigma^2 } sigma <- function(x, ...) UseMethod("sigma") ## Purpose: retrieve scale estimate from robust regression return ## object ## ---------------------------------------------------------------------- ## Arguments: obj: robust regression output object ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:42 sigma.lmrob <- sigma.default <- function(obj) obj$scale sigma.lm <- function(obj) summary(obj)$sigma sigma.rlm <- function(obj) obj$s converged <- function(x, ...) UseMethod("converged") ## Purpose: check convergence status of return object ## ---------------------------------------------------------------------- ## Arguments: obj: robust regression output object ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:42 converged.default <- function(obj) is.list(obj) && !is.null(obj$converged) && obj$converged converged.lm <- function(obj) if (is.null(obj$converged)) TRUE else obj$converged converged.lmRob <- function(obj) is.list(obj) && !is.null(obj$est) && obj$est == 'final' ########################################################################### ## alternative estimation methods ########################################################################### lmrob.u <- function(formula, data, subset, weights, na.action, ..., start) { ## Purpose: update lmrob object if possible ## ---------------------------------------------------------------------- ## Arguments: (lmrob arguments) ## start: object to update ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 28 Jul 2010, 08:30 args <- as.list(match.call())[-1] args$start <- NULL if (!missing(start)) { ## if start is a string, get start from parent if (is.character(start)) start <- get(start, envir=parent.frame()) if (class(start) == 'lmrob') { ## check whether we can update start easily oldargs <- as.list(start$call)[-1] if (isTRUE(all.equal(args, oldargs))) return(start) else { ret <- start ## check method argument (ignore cov argument) if (is.null(oldargs$method)) oldargs$method <- start$control$method if (oldargs$method == 'MM') oldargs$method <- 'SM' if (is.null(args$method) || args$method == 'MM') args$method <- 'SM' rest.ok <- isTRUE(all.equal(oldargs[!names(oldargs) %in% c('method', 'cov')], args[!names(args) %in% c('method', 'cov')])) if (is.null(start$x)) stop('x matrix not found. Use lmrob(..., x = TRUE).') if (args$method != oldargs$method && rest.ok) { ## method is different, but the rest is the same oldsteps <- strsplit(oldargs$method, "")[[1]] steps <- strsplit(args $method, "")[[1]] ## reduce start to largest common initial estimator while(length(oldsteps) > length(steps) || any(oldsteps != steps[seq_along(oldsteps)])) { elems <- c('na.action', 'offset', 'contrasts', 'xlevels', 'terms', 'model', 'x', 'y', 'degree.freedom', 'df.residual', 'call') ret <- c(ret$init, start[elems[elems %in% names(ret)]]) class(ret) <- 'lmrob' oldsteps <- oldsteps[-length(oldsteps)] } ret$call$method <- args$method steps <- steps[- seq_along(oldsteps)] if (length(steps) > 0) { ret$cov <- NULL for (step in steps) { ret <- switch(step, D = lmrob..D..fit(ret), M = lmrob..M..fit(obj = ret), N = { y <- model.response(ret$model) ## taus are standardized because otherwise ## the resulting efficiency is lower tau <- ret$tau / mean(ret$tau) tmp <- lmrob..M..fit(x = ret$x/tau, y = y/tau, obj = ret) tmp$residuals <- y - ret$x %*% ret$coef tmp$qr <- NULL tmp }, stop("only M or D steps supported")) if (!ret$converged) { warning(step, "-step did NOT converge.") break } } } else { if (is.null(ret$qr)) ret$qr <- qr(ret$x * sqrt(ret$weights)) ret$rank <- ret$qr$rank } } ## update covariance matrix if (rest.ok) { if (is.null(args$cov)) args$cov <- lmrob.control(method=ret$control$method)$cov ret$cov <- vcov(ret, args$cov) ret$control$cov <- args$cov ret$call$cov <- args$cov return(ret) } } } } ## if we're here, update failed or there was no start cl <- match.call() cl$start <- NULL cl[[1]] <- as.symbol("lmrob") eval(cl, envir = parent.frame()) } ## lmrob.u <- function(formula, data, subset, weights, na.action, ..., start) ## { ## cl <- match.call() ## cl$start <- NULL ## cl[[1]] <- as.symbol("lmrob") ## eval(cl, envir = parent.frame()) ## ## do.call('lmrob', args, envir = parent.frame()) ## } ## set.seed(0); d.data <- data.frame(y = rnorm(10), x = 1:10) ## lres <- lmrob(y ~ x, d.data, method = 'SM', psi = 'lgw', cov = '.vcov.avar1') ## obj1 <- lmrob(y ~ x, d.data, method = 'SM', psi = 'lgw', cov = '.vcov.w') ## test <- lmrob.u(y ~ x, d.data, method = 'SM', psi = 'lgw', cov = '.vcov.w', ## start = 'lres') ## all.equal(obj1, test) ## obj2 <- lmrob(y ~ x, d.data, method = 'SMD', psi = 'lgw', cov = '.vcov.w') ## test <- lmrob.u(y ~ x, d.data, method = 'SMD', psi = 'lgw', ## start = 'lres') ## all.equal(obj2, test[names(obj2)], check.attr = FALSE) ## obj3 <- lmrob(y ~ x, d.data, method = 'SMDM', psi = 'lgw', cov = '.vcov.w') ## test <- lmrob.u(y ~ x, d.data, method = 'SMDM', psi = 'lgw', ## start = 'lres') ## all.equal(obj3, test[names(obj3)], check.attr = FALSE) ## test <- lmrob.u(y ~ x, d.data, method = 'SMDM', psi = 'lgw', ## start = 'obj2') ## all.equal(obj3, test[names(obj3)], check.attr = FALSE) ## test <- lmrob.u(y ~ x, d.data, method = 'SM', psi = 'lgw', cov = '.vcov.w', ## start = obj3) ## all.equal(obj1, test[names(obj1)], check.attr = FALSE) ##' Compute the MM-estimate with corrections qE or qT as in ##' Maronna, R. A., Yohai, V. J., 2010. ##' Correcting MM estimates for "fat" data sets. ##' Computational Statistics & Data Analysis 54 (12), 3168–3173. ##' @title MM-estimate with Maronna-Yohai(2010) corrections ##' @param formula ##' @param data ##' @param subset ##' @param weights ##' @param na.action ##' @param ... ##' @param type ##' @return ##' @author Manuel Koller lmrob.mar <- function(formula, data, subset, weights, na.action, ..., type = c("qE", "qT")) { ## get call and modify it so that ## lmrob returns the appropriate S-estimate cl <- match.call() method <- if (is.null(cl$method)) { if (!is.null(cl$control)) list(...)[["control"]]$method else 'MM' } else cl$method cl$type <- NULL cl$method <- 'S' cov <- if(!is.null(cl$cov)) cl$cov else '.vcov.w' cl$cov <- 'none' cl[[1]] <- as.symbol("lmrob") ## get S-estimate obj <- eval(cl, envir = parent.frame()) ## correct S-scale estimate according to formula n <- length(obj$resid) p <- obj$rank type <- match.arg(type) ## for type qE: adjust tuning.chi (h0) to account for different delta if (type == 'qE') { if (obj$control$psi != 'bisquare')## FIXME: "tukey" should work, too stop('lmrob.mar: type qE is only available for bisquare psi') h0 <- uniroot(function(c) robustbase:::lmrob.bp('bisquare', c) - (1-p/n)/2, c(1, 3))$root ## update scale obj$scale <- obj$scale * obj$control$tuning.chi / h0 obj$control$tuning.chi <- h0 } ## calculate q q <- switch(type, "qT" = { rs <- obj$resid / obj$scale ## \hat a = \mean \rho(r/sigma)^2 ## obj$control$tuning.chi == h_0 ahat <- mean(Mpsi(rs, obj$control$tuning.chi, obj$control$psi)^2) ## \hat b = \mean \rho''(r/sigma) bhat <- mean(Mpsi(rs, obj$control$tuning.chi, obj$control$psi, 1)) ## \hat c = \mean \rho'(r/sigma) * r/sigma chat <- mean(Mpsi(rs, obj$control$tuning.chi, obj$control$psi)*rs) ## qT: 1 + p*ahat/n/2/bhat/chat }, "qE" = 1 / (1 - (1.29 - 6.02/n)*p/n) , stop("unknown type ", type)) ## update scale obj$scale.uncorrected <- obj$scale obj$scale <- q * obj$scale ## add M step if requested if (method %in% c('MM', 'SM')) { obj$control$cov <- cov obj <- lmrob..M..fit(obj = obj) ## construct a proper lmrob object elems <- c('na.action', 'offset', 'contrasts', 'xlevels', 'terms', 'model', 'x', 'y') obj <- c(obj, obj$init.S[elems[elems %in% names(obj$init.S)]]) obj$degree.freedom <- obj$df.residual <- n - obj$rank } else if (method != 'S') stop("lmrob.mar: Only method = S, SM and MM supported.") ## update class class(obj) <- 'lmrob' ## return obj } ## summary(lmrob(y ~ x, d.data)) ## summary(lmrob.mar(y ~ x, d.data, type = 'qE')) ## summary(tmp <- lmrob.mar(y ~ x, d.data, type = 'qT')) ## this function calculates M-estimate of scale ## with constants as used for S-estimate with maximum breakdown point lmrob.mscale <- function(e, control, p = 0L) { ret <- .C("R_lmrob_S", x = as.double(e), ## this is ignored y = as.double(e), n = as.integer(length(e)), p = as.integer(p), ## divide the sum by n - p nResample = 0L, ## find scale only scale = as.double(mad(e)), coef = double(1), as.double(control$tuning.chi), as.integer(.psi2ipsi(control$psi)), as.double(control$bb), ## delta best_r = as.integer(control$best.r.s), groups = as.integer(control$groups), n.group = as.integer(control$n.group), k.fast.s = as.integer(control$k.fast.s), k.max = as.integer(control$k.max), maxit.scale = as.integer(control$maxit.scale), refine.tol = as.double(control$refine.tol), inv.tol = as.double(control$solve.tol), converged = logical(1), trace.lev = as.integer(0), mts = as.integer(control$mts), ss = robustbase:::.convSs(control$subsampling), fast.s.large.n = as.integer(length(e)+1), PACKAGE = 'robustbase') ret$scale } lmrob.dscale <- function(r, control, kappa = robustbase:::lmrob.kappa(control = control)) { tau <- rep.int(1, length(r)) w <- Mwgt(r, control$tuning.psi, control$psi) scale <- sqrt(sum(w * r^2) / kappa / sum(tau^2*w)) psi <- control$psi c.psi <- robustbase:::.psi.conv.cc(psi, control$tuning.psi) ret <- .C("R_find_D_scale", r = as.double(r), kappa = as.double(kappa), tau = as.double(tau), length = as.integer(length(r)), scale = as.double(scale), c = as.double(c.psi), ipsi = .psi2ipsi(psi), type = 3L, ## dt1 as only remaining option rel.tol = as.double(control$rel.tol), k.max = as.integer(control$k.max), converged = logical(1), PACKAGE = 'robustbase') ret$scale } ## sd.trim function by Gregor Gorjanc ## from http://ggorjan.blogspot.com/2008/11/trimmed-standard-deviation.html ## with added correction factor to be unbiased at the normal sd.trim <- function(x, trim=0, na.rm=FALSE, ...) { if(!is.numeric(x) && !is.complex(x) && !is.logical(x)) { warning("argument is not numeric or logical: returning NA") return(NA_real_) } if(na.rm) x <- x[!is.na(x)] if(!is.numeric(trim) || length(trim) != 1) stop("'trim' must be numeric of length one") n <- length(x) if(trim > 0 && n > 0) { if(is.complex(x)) stop("trimmed sd are not defined for complex data") if(trim >= 0.5) return(0) lo <- floor(n * trim) + 1 hi <- n + 1 - lo x <- sort.int(x, partial = unique(c(lo, hi)))[lo:hi] } corr <- if (0 < trim && trim < 0.5) { z <- qnorm(trim, lower.tail=FALSE)# = Phi^{-1}(1 - tr) sqrt(1 - 2/(1-2*trim) *z*dnorm(z)) } else 1 sd(x)/corr } robustbase/inst/doc/fastMcd-kmini.Rnw0000644000176200001440000003113213012615634017324 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{covMcd() -- Generalizing the FastMCD} %\VignetteDepends{robustbase} \SweaveOpts{prefix.string=mcd, eps=FALSE, pdf=TRUE, strip.white=true} \SweaveOpts{width=6, height=4.1} \usepackage{amsmath} \usepackage{amsfonts}% \mathbb \usepackage{mathtools}% -> \floor, \ceil \usepackage[utf8]{inputenc} %% The following is partly R's share/texmf/Rd.sty \usepackage{color} \usepackage{hyperref} \definecolor{Blue}{rgb}{0,0,0.8} \definecolor{Red}{rgb}{0.7,0,0} \hypersetup{% hyperindex,% colorlinks={true},% pagebackref,% linktocpage,% plainpages={false},% linkcolor={Blue},% citecolor={Blue},% urlcolor={Red},% pdfstartview={Fit},% pdfview={XYZ null null null}% } \usepackage{natbib} \usepackage[noae]{Sweave} %---------------------------------------------------- \DeclarePairedDelimiter{\ceil}{\lceil}{\rceil} \DeclarePairedDelimiter{\floor}{\lfloor}{\rfloor} \DeclareMathOperator{\sign}{sign} \newcommand{\abs}[1]{\left| #1 \right|} \newtheorem{definition}{Definition} \newcommand{\byDef}{\mathrm{by\ default}} \newcommand{\R}{{\normalfont\textsf{R}}{}} \newcommand{\code}[1]{\texttt{#1}} \newcommand*{\pkg}[1]{\texttt{#1}} \newcommand*{\CRANpkg}[1]{\href{https://CRAN.R-project.org/package=#1}{\pkg{#1}}} %---------------------------------------------------- \begin{document} \setkeys{Gin}{width=0.9\textwidth} \setlength{\abovecaptionskip}{-5pt} \title{covMcd() -- Considerations about Generalizing the FastMCD} \author{Martin M\"achler} \maketitle %\tableofcontents %% %% Pison, G., Van Aelst, S., and Willems, G. (2002) %% Small Sample Corrections for LTS and MCD. %% Metrika % ~/save/papers/robust-diverse/Pison_VanAelst_Willems.pdf %% <>= # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0))), width = 75) @ \section{Introduction} The context is robust multivariate ``location and scatter'' estimation, which corresponds to estimating the first two moments in cases they exist. We assume data and a model \begin{align} \label{eq:data-model} x_i & \in \mathbb{R}^p, \ \ i=1,2,\dots,n \\ x_i & \sim \mathcal{F}(\mu, \Sigma), \ \ \mathrm{i.i.d.};\ \ \mu \in \mathbb{R}^p, \ \ \Sigma \in \mathbb{R}^{p \times p}, \ \textrm{positive definite}, \end{align} where a conceptual null model is the $p$-dimensional normal distribution. One typical assumption is that $\mathcal{F}$ is a mixture with the majority component (``good data'') being $\mathcal{N}_p(\mu, \Sigma)$ and other components modeling ``the outliers''. In other words, we want estimates $\bigl(\hat{\mu}, \hat{\Sigma}\bigr)$ which should be close to the true ``good data'' $(\mu, \Sigma)$ --- and do not say more here. \section{MCD and ``the Fast'' MCD (= \textsc{fastmcd}) Algorithm} The \CRANpkg{robustbase} \R{} package has featured a function \code{covMcd()} since early on (Feb.~2006) and that has been an interface to the Fortran routine provided by the original authors and (partly) described in \citet{RouPvD99}. %% Rousseeuw, P. J. and van Driessen, K. (1999) %% A fast algorithm for the minimum covariance determinant estimator. %% Technometrics {41}, 212--223. %% >> ~/save/papers/robust-diverse/Rousseeuw_VanD-FastMCD_1999.pdf % ------------------------------------------------------------ We describe shortly how the algorithm works, partly building on the documentation provided in the source (R, S, and Fortran) codes: %% R CMD Rdconv --type=latex ../../man/covMcd.Rd > covMcd.tex The minimum covariance determinant estimator of location and scatter (MCD) implemented in \code{covMcd()} is similar to \R{} function \code{cov.mcd()} in \CRANpkg{MASS}. The (``theoretical'') MCD looks for the $h = h_\alpha (> 1/2)$ out of $n$ observations whose classical covariance matrix has the lowest possible determinant. In more detail, we will use $h = h_\alpha = h(\alpha,n,p) \approx \alpha \cdot (n+p+1)$, where as \citet{RouPvD99} mainly use (the default) $\alpha = \frac 1 2$, where $h = h(1/2, n, p) = \floor[\Big]{\frac{n+p+1}{2}}$. For general $\alpha \ge \frac 1 2$, the \R{} implementation (derived from their original S code) uses $h = h(\alpha,n,p) =$ \code{h.alpha.n(alpha,n,p)} (function in \pkg{robustbase}), which is \begin{eqnarray} \label{eq:def-h} h = h_\alpha = h(\alpha,n,p) := \floor{2 n_2 - n + 2 \alpha (n - n_2)}, \ \mathrm{where\ } n_2 := \floor[\Big]{\frac{n+p+1}{2}}% %= (n+p+1)/2 \ \ (\mathrm{\ where ``/'' denotes \emph{integer} division}) . \end{eqnarray} The fraction $\alpha \ge \frac 1 2$ can be chosen by the user, where $\alpha = \frac 1 2$ is the most robust, and indeed, $h_{1/2} = n_2 = \floor[\Big]{\frac{n+p+1}{2}}$. Even in general, as long as $n \gg p$, $\alpha$ is approximately the \emph{proportion} of the subsample size $h$ in the full sample (size $n$): \begin{equation} \label{eq:h.approx} h \approx \alpha \cdot n \iff \alpha \approx \frac{h}{n}, \end{equation} <>= require(robustbase) n <- c(5, 10, 20, 30, 50, 100, 200, 500) hmat <- function(alpha, p) cbind(n, h.alpha = h.alpha.n (alpha, n,p), h. = floor(alpha * (n + p + 1)), alpha.n = round(alpha * n)) hmat(alpha = 1/2, p = 3) hmat(alpha = 3/4, p = 4) @ The breakdown point (for $h > \frac{n}{2}$) then is \begin{eqnarray} \label{eq:breakdown} \epsilon_{*} = \frac{n-h+1}{n}, \end{eqnarray} which is less than but close to $\frac 1 2$ for $\alpha = \frac 1 2$, and in general, $h/n \approx \alpha$, the breakdown point is approximately, \begin{eqnarray} \label{eq:eps-approx} \epsilon_{*} = \frac{n-h+1}{n} \approx \frac{n-h}{n} = 1 - \frac{h}{n} \approx 1 - \alpha. \end{eqnarray} The raw MCD estimate of location, say $\hat{\mu}_0$, is then the average of these $h$ points, whereas the raw MCD estimate of scatter, $\hat{\Sigma}_0$, is their covariance matrix, multiplied by a consistency factor \code{.MCDcons(p, h/n)}) and (by default) a finite sample correction factor \code{.MCDcnp2(p, n, alpha)}, to make it consistent at the normal model and unbiased at small samples. %% Both rescaling factors (consistency and finite sample) are returned in the length-2 vector %% \code{raw.cnp2}. In practice, for reasonably sized $n$, $p$ and hence $h$, it is not feasible to search the full space of all $n \choose h$ $h$-subsets of $n$ observations. Rather, the implementation of \code{covMcd} uses the Fast MCD algorithm of \citet{RouPvD99} to approximate the minimum covariance determinant estimator, see Section~\ref{sec:fastMCD}. Based on these raw MCD estimates, $\bigl(\hat{\mu}_0, \hat{\Sigma}_0\bigr)$, % (unless argument \code{raw.only} is true), a reweighting step is performed, i.e., \code{V <- cov.wt(x,w)}, where \code{w} are weights determined by ``outlyingness'' with respect to the scaled raw MCD, using the ``Mahalanobis''-like, robust distances $d_i\bigl(\hat{\mu}_0, \hat{\Sigma}_0\bigr)$, see (\ref{eq:Maha}). Again, a consistency factor and %(if \code{use.correction} is true) a finite sample correction factor %(\code{.MCDcnp2.rew(p, n, alpha)}) are applied. The reweighted covariance is typically considerably more efficient than the raw one, see \citet{PisGvAW02}. The two rescaling factors for the reweighted estimates are returned in \code{cnp2}. Details for the computation of the finite sample correction factors can be found in \citet{PisGvAW02}. \section{Fast MCD Algorithm -- General notation}\label{sec:fastMCD} \paragraph{Note:} In the following, apart from the mathematical notation, we also use variable names, e.g., \code{kmini}, used in the Fortran and sometimes \R{} function code, in \R{} package \CRANpkg{robustbase}. Instead of directly searching for $h$-subsets (among ${n \choose h} \approx {n \choose n/2}$) the basic idea is to start with small subsets of size $p+1$, their center $\mu$ and covariance matrix $\Sigma$, and a corresponding $h$-subset of the $h$ observations with smallest (squared) (``Mahalanobis''-like) distances \begin{align} \label{eq:Maha} d_i = d_i(\mu,\Sigma) := (x_i - \mu)' \Sigma^{-1} (x_i - \mu), \ \ i=1,2,\dots,n, \end{align} and then use concentration steps (``C~steps'') to (locally) improve the chosen set by iteratively computing $\mu$, $\Sigma$, new distances $d_i$ and a new set of size $h$ with smallest distances $d_i(\mu,\Sigma)$. Each C~step is proven to decrease the determinant $\det(\Sigma)$ if $\mu$ and $\Sigma$ did change at all. Consequently, convergence to a local minimum is sure, as the number of $h$-subsets is finite. To make the algorithm \emph{fast} for non small sample size $n$ the data set is split into ``groups'' or ``sub-datasets'' as soon as \begin{eqnarray} \label{eq:nmini} n \ge 2 n_0, \ \mathrm{ where}\ \ n_0 := \mathtt{nmini} \ ( = 300, \byDef). \end{eqnarray} i.e., the default cutoff for ``non small'' is at $n = 600$. %% The \emph{number} of such subsets in the original algorithm is maximally 5, and we now use \begin{eqnarray} \label{eq:kmini} k_M = \code{kmini} \ (= 5, \byDef), \end{eqnarray} as upper limit. As above, we assume from now on that $n \ge 2 n_0$, and let \begin{eqnarray} \label{eq:k-def} k := \floor[\Big]{\frac{n}{n_0}} \ge 2 \end{eqnarray} and now distinguish the two cases, \begin{eqnarray} \label{eq:cases} \begin{cases} A. & k < k_M \iff n < k_M \cdot n_0 \\ B. & k \ge k_M \iff n \ge k_M \cdot n_0 \end{cases} \end{eqnarray} \begin{description} \item[In case A] $k$ (\code{= ngroup}) subsets aka ``groups'' or ``sub datasets'' are used, $k \in\{2,3,\dots,k_M-1\}$, of group sizes $n_j$, $j=1,\dots,k$ (see below). Note that case~A may be empty because of $2 \le k < k_M$, namely if $k_M=2$. Hence, in case~A, we have $k_M \ge 3$. \item[in case B] $k_M$ (\code{= ngroup}) groups each of size $n_0$ are built and in the first stage, only a \emph{subset} of $k_M \cdot n_0 \le n$ observations is used. \end{description} In both cases, the disjoint groups (``sub datasets'') are chosen at random from the $n$ observations. %% For the group sizes for case~A, $n_j$, $j=1,\dots,k$, we have \begin{align} n_1 = \; & \floor[\Big]{\frac n k} = \floor[\bigg]{\frac{n}{\floor[\big]{\frac{n}{n_0}}}} \ \ (\; \ge n_0 \label{eq:n1})\\ n_j = \; & n_1,\hspace*{2.8em} j = 2,\dots,j_* \\ n_j = \; & n_1 + 1, \ \ \ j = j_* +1,\dots,k, \label{n1-plus-1}\\ & \qquad \mathrm{where}\ \ j_* := k - r \ \in \{1,\dots,k\}, \label{jstar}\\ & \qquad \mathrm{and}\ \ r := n - k n_1 = \label{r-rest} n - k\floor[\big]{\frac n k} \in \{0,1,\dots,k-1\}, \end{align} where the range of $j_*$, $1,\dots,k$ in (\ref{jstar}) is a consequence of the range of the integer division remainder $r \in \{0,1,\dots,k-1\}$ in (\ref{r-rest}). Consequently, (\ref{n1-plus-1}) maybe empty, namely iff $r=0$ ($\iff n = k \cdot n_1$ is a multiple of $k$): $j_* = k$, and all $n_j \equiv n_1$. Considering the range of $n_j$ in case~A, the minimum $n_1 \ge n_0$ in (\ref{eq:n1}) is easy to verify. What is the maximal value of $n_j$ , i.e., an upper bound for $n_{\max} := n_1+1 \ge \max_j n_j$? \ %% %% This is all correct but not useful: %% From (\ref{eq:n1}), $ n/k - 1 < n_1 \le n/k $, and %% from (\ref{eq:k-def}), $n/n_0 - 1 < k \le n/n_0$. %% Putting these two together, we get %% \begin{eqnarray} %% \label{eq:n1-ineq} %% \frac{n^2}{n_0} - 1 \le n/k - 1 < n_1 \le n/k < \frac{n n_0}{n - n_0}, %% \end{eqnarray} %% (the first $\le$ from $\frac{1}{k} \ge \frac{n_0}{n}$; the last $<$ from %% $\frac{1}{k} < \frac 1{n/n_0 -1} = \frac{n_0}{n-n_0}$.) Also, %% from (\ref{eq:k-def}), $n \ge k n_0$ and $n-n_0 \ge (k-1)n_0$ and since we %% are in case~A, $n < n_0 k_M$, which combines to %% \begin{eqnarray} %% \label{eq:nn0} %% \frac{n n_0}{n - n_0} < \frac{(n_0 k_M) n_0}{(k-1)n_0} = \frac{n_0 k_M}{k-1}. %% \end{eqnarray} Consider $n_{1,\max}(k) = \max_{n, \mathrm{given\ } k} n_1 = \max_{n, \mathrm{given\ } k} \floor{\frac n k}$. Given $k$, the maximal $n$ still fulfilling $\floor[\big]{\frac{n}{n_0}} = k$ is $n = (k+1)n_0 - 1$ where $\floor[\big]{\frac{n}{n_0}} = k + \floor[\big]{1 - \frac{1}{n_0}} = k$. Hence, $n_{1,\max}(k) =\floor[\big]{\frac{(k+1)n_0 - 1}{k}} = n_0 + \floor[\big]{\frac{n_0 - 1}{k}}$, and as $k \ge 2$, the maximum is at $k=2$, $\max n_1 = \max_k n_{1,\max}(k) = n_0 + \floor[\big]{\frac{n_0 - 1}{2}} = \floor[\big]{\frac{3 n_0 - 1}{2}}$. Taken together, as $n_j = n_1+1$ is possible, we have \begin{align} \label{eq:nj-range} n_0 \le & n_1 \le \floor[\Big]{\frac{3 n_0 - 1}{2}} \nonumber\\ n_0 \le & n_j \le \floor[\Big]{\frac{3 n_0 + 1}{2}}, \ \ j \ge 2. \end{align} Note that indeed, $\floor[\big]{\frac{3 n_0 + 1}{2}}$ is the length of the auxiliary vector \code{subndex} in the Fortran code. \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/inst/doc/asymptotic.max.bias.Rdata0000644000176200001440000000774112321016053021021 0ustar liggesusersYgPYDN # ,"A" ( (QE@@$(A$䜾YUU֭9oszJ~~D!QF4 D&H|+{m+ /m^ 2NÇ^uU$h27 7_(/}WטOw/S:kyF6MomȃFFqзKY:|-η K qT e8`i!67X"8 Å7#'$NèښC09ʮ0vuӳ [ zh0%5]&=3̼z?VLM2ba:lqzޡ k^|;\3}-F![? W~Cq{޺KHŏIHJPS\]#-TW2R;},ròkdۜ}\;'6ﷵ,,[E3_V mNNiѐPWxqA(CC}&d0P(!kI- ; ˚ڳP⾐{(}]Y4߭ǐ(庚W&41"uh7(fטPNy|EΞ#.T;%Aʒ (a#U%/0<~O!SAѥ2 Wts`Ȓ87|pAm=J,Fe(&ed\olsa&.,+_^܊GQ',T8sN)h: [Dr:>h\V: ;Q)2!,3+gmu7Gur2wq"#4 B%h:K҅~Lb}-;~]҂ E |q2Kׁn8Gx9Xze}fk~%tB.׃0ڻ>_=h7'Ð=J90| VfpO%F/O7R-]:wƜ zBꖿ"`i 2LH${F!0ҿ2VxsSaZxD56Vm0YaL>0(}`ywOS-!IjgЪb$&I_CFG~:XR^Mބ+A2g!bE/W2BM4f![v#WӇhi1zw{>]*3<'lF^.{SbSgťI _I;UG5wunO@jܢ(hGt6 m 3C4Ki=Y6w>(@K>i ڊzH0pP4 xJU_سy/ma(߳ {0\z4xIV#nFԴ8 jjmP&iW800!}vL菞]m-;;7錥]U0z|SY;J7J0=2۪ 33z_`VS9̎Wk~/~ Ny,=HQy$3$$$Amjl^?n[*KVǢ)YXHU4-a4S(V KCҺ\GEgj6ܖ=R w[N"OL빆%[  FIG6*#?.p{-7oTZEA@ύv(ظ%v D{;D2}QDTyHMGBaﭽ-a[Q(LpCl^0JnRFAU=(+"^p :B9ٶ"w|6i-lmcg,!KХ6h6 >ȵC43K^U,_ӋsmqcV*'+̮J S0myFX-t6߷>M/Է z^?qRT `t`>HgwU'2[V% qA< 6\jCd~Zl ( ˆȶM\ 0|cwI Èî$ak6k+ lpZ"=Bi0ҝƆ^ fRTJ'+|\!ʢ)^G/{>FJ”1LU^5=i-e0CAv%D e,kY͉V_&亝0wok0l" {*v"i[mg߶!i<-GVwWJ#9qBvRX#6#匃u;~Mc>R(EjviM/=L>Qiw͵%9.YGnk;Ϥ@y4YqybWSuoF<>ړ1o ȜytK4 -FA. MR57:*/CGn;"_<҆tE§:H*RlQA5H:\Wb-U{Li>2.~Dqi'(iW:*S(IJU$#SZdEIR{Bm_3FFKUU.T)CĴ^I%u#M 9%N=O(UԩCœ̡ o6Ҫ1im\pln)T|%B%7c*r_e4쿶ܹmO:c?f.?=<.cǭ=m{7w/kxq>rdHğj$󘚏> Y1AYpj| n㷍z )AG(tk;~T==R iRdH#>X _D,ti7xF{R#4C67G7 G<"xL:^ۗD^ x^?=z( ,8n | -^PH]QLs?𚨣X|V8Og,z%D]%m!ADd!|:H򳵄2޲_-Qs%7D\FġGrͳ~9>TT݉׀*-"[&Y2}@5[us1# !!8GWʒZZSu@-kmPK[7J'5:xڛX"nLA=Џh~p~p~_e? "܏9;|n?u~:U'{ )-robustbase/inst/doc/psi_functions.Rnw0000644000176200001440000004152413012615634017527 0ustar liggesusers\documentclass[11pt, a4paper]{article} \usepackage[a4paper, text={16cm,25cm}]{geometry} %\VignetteIndexEntry{Definitions of Psi-Functions Available in Robustbase} %\VignetteDepends{robustbase} \SweaveOpts{prefix.string=psi, eps=FALSE, pdf=TRUE, strip.white=true} \SweaveOpts{width=6, height=4.1, echo=FALSE, fig=TRUE} %% --------------------- !! \usepackage{amsmath} \usepackage{amsfonts}% \mathbb \usepackage{natbib} \usepackage[utf8]{inputenc} \newcommand{\abs}[1]{\left| #1 \right|} \DeclareMathOperator{\sign}{sign} \newcommand{\R}{\mathbb{R}} \newcommand{\code}[1]{\texttt{#1}} \newcommand*{\pkg}[1]{\texttt{#1}} \newtheorem{definition}{Definition} %% The following is R's share/texmf/Rd.sty \usepackage{color} \usepackage{hyperref} \definecolor{Blue}{rgb}{0,0,0.8} \definecolor{Red}{rgb}{0.7,0,0} \hypersetup{% hyperindex,% colorlinks={true},% pagebackref,% linktocpage,% plainpages={false},% linkcolor={Blue},% citecolor={Blue},% urlcolor={Red},% pdfstartview={Fit},% pdfview={XYZ null null null}% } <>= # set margins for plots options(SweaveHooks=list(fig=function() par(mar=c(3,3,1.4,0.7), mgp=c(1.5, 0.5, 0)))) ## x axis for plots: x. <- seq(-5, 10, length=1501) require(robustbase) <>= source(system.file("xtraR/plot-psiFun.R", package = "robustbase", mustWork=TRUE)) @% = ../inst/xtraR/plot-psiFun.R --> p.psiFun() --> robustbase:::matPlotPsi() {for nice legends; lines ..} \begin{document} \setkeys{Gin}{width=0.9\textwidth} \setlength{\abovecaptionskip}{-5pt} \title{Definitions of $\psi$-Functions Available in Robustbase} \author{Manuel Koller and Martin M\"achler} \maketitle \tableofcontents \section*{Preamble} Unless otherwise stated, the following definitions of functions are given by \citet[p. 31]{MarRMY06}, however our definitions differ sometimes slightly from theirs, as we prefer a different way of \emph{standardizing} the functions. To avoid confusion, we first define $\psi$- and $\rho$-functions. \begin{definition}\label{def.psi} A \emph{$\psi$-function} is a piecewise continuous function $\psi: \R \to \R$ such that \begin{enumerate} \item $\psi$ is odd, i.e., \ $\psi(-x) = -\psi(x) \: \forall x$, \item $\psi(x) \ge 0$ for $x \ge 0$, and $\psi(x) > 0$ for $0 < x < x_r := \sup\{\tilde x : \psi(\tilde x) > 0\}$ \ \ ($x_r > 0$, possibly $x_r = \infty$). \item[3*] Its slope is $1$ at $0$, i.e., $\displaystyle \psi'(0) = 1$. \end{enumerate} Note that `3*' is not strictly required mathematically, but we use it for standardization in those cases where $\psi$ is continuous at 0. Then, it also follows (from 1.) that $\psi(0) = 0$, and we require $\psi(0)=0$ also for the case where $\psi$ is discontinuous in 0, as it is, e.g., for the M-estimator defining the median. \end{definition} \begin{definition} A \emph{$\rho$-function} can be represented by the following % definite integral of a $\psi$-function, \begin{equation}\label{def.rho} \rho(x) = \int_0^x \psi(u) du\;, \end{equation} which entails that $\rho(0) = 0$ and $\rho$ is an even function. \end{definition} A $\psi$-function is called \emph{redescending} if $\psi(x) = 0$ for all $x \ge x_r$ for $x_r < \infty$, and $x_r$ is often called \emph{rejection point}. Corresponding to a redescending $\psi$-function, we define the function $\tilde\rho$, a version of $\rho$ standardized such as to attain maximum value one. Formally, \begin{equation} \label{eq:tilde-rho} \tilde\rho(x) = \rho(x)/\rho(\infty). \end{equation} Note that $\rho(\infty) = \rho(x_r) \equiv \rho(x) \ \forall \abs{x} >= x_r$. $\tilde\rho$ is a $\rho$-function as defined in \citet{MarRMY06} and has been called $\chi$ function in other contexts. For example, in package \pkg{robustbase}, \code{Mchi(x, *)} computes $\tilde\rho(x)$, whereas \code{Mpsi(x, *, deriv=-1)} (``(-1)-st derivative'' is the primitive or antiderivative)) computes $\rho(x)$, both according to the above definitions. \textbf{Note:} An alternative slightly more general definition of \emph{redescending} would only require $\rho(\infty) := \lim_{x\to\infty}\rho(x)$ to be finite. E.g., \texttt{"Welsh"} does \emph{not} have a finite rejection point, but \emph{does} have bounded $\rho$, and hence well defined $\rho(\infty)$, and we \emph{can} use it in \texttt{lmrob()}.\footnote{E-mail Oct.~18, 2014 to Manuel and Werner, proposing to change the definition of ``redescending''.} %% \section{Weak Redescenders} %% \subsection{t_nu score functions} %% t_1 (=Cauchy) has been propagated as "Lorentzian merit function" %% regression for outlier detection \paragraph{Weakly redescending $\psi$ functions.}\ Note that the above definition does require a finite rejection point $x_r$. Consequently, e.g., the score function $s(x) = -f'(x)/f(x)$ for the Cauchy ($= t_1$) distribution, which is $s(x) = 2x/(1+x^2)$ and hence non-monotone and ``re descends'' to 0 for $x\to \pm\infty$, and $\psi_C(x) := s(x)/2$ also fulfills ${\psi_C}'(0) = 1$, but it has $x_r=\infty$ and hence $\psi_C()$ is \emph{not} a redescending $\psi$-function in our sense. As they appear e.g. in the MLE for $t_\nu$, we call $\psi$-functions fulfulling $\lim_{x\to\infty}\psi(x) = 0$ \emph{weakly redescending}. Note that they'd naturally fall into two sub categories, namely the one with a \emph{finite} $\rho$-limit, i.e. $\rho(\infty) := \lim_{x\to\infty}\rho(x)$, and those, as e.g., the $t_\nu$ score functions above, for which $\rho(x)$ is unbounded even though $\rho' = \psi$ tends to zero. %% --> ../../TODO section 'Psi/Rho/Chi/Wgt Functions' %% ~~~~~~~~~~ %% %% FIXME: where?? MM: can no longer find it in Hampel et al(1986) \citet{hamfrrs86}. %% FIXME: 0) Mention our psi_func class // and the C interface for "the other" functions %% ----- i.e., we currently have *both* and in addition there is all %% the (to be *deprecated* !) ../R/biweight-funs.R (& ../man/tukeyChi.Rd & ../man/tukeyPsi1.Rd) %% %% FIXME: 1) explain plot() {the plot method of psi_func} %% FIXME: 2) Show how to compute asymptotic efficiency and breakdown point: %% ------- %% a) end of ../../tests/psi-rho-etc.R has aeff.P() and bp.P() and chkP() %% which now uses the psi_func class to compute these *analytically* %% b) Of course, Manuel had used the numeric integration only, %% in ../../R/lmrob.MM.R, lmrob.efficiency(psi, cc, ...) and lmrob.bp(psi, cc, ...) %% ~~~~~~~~~~~~~~~~~~ %% c) *REALLY* nice general solution is via PhiI() in ../../R/psi-rho-funs.R %% for all piecewise polynomial psi()/rho() ~~~~~~~~~~~~~~~~~~~~~~ %%\clearpage \section{Monotone $\psi$-Functions} Montone $\psi$-functions lead to convex $\rho$-functions such that the corresponding M-estimators are defined uniquely. Historically, the ``Huber function'' has been the first $\psi$-function, proposed by Peter Huber in \citet{HubP64}. \clearpage \subsection{Huber} The family of Huber functions is defined as, \begin{align*} \rho_k(x) = {}& \left\{ \begin{array}{ll} \frac{1}{2} x^2 & \mbox{ if } \abs{x} \leq k \\ k(\abs{x} - \frac{k}{2})& \mbox{ if } \abs{x} > k \end{array} \right. \;,\\ \psi_k(x) = {} & \left\{ \begin{array}{ll} x & \mbox{ if } \abs{x} \leq k \\ k \ \sign(x)& \mbox{ if } \abs{x} > k %% -k & \mbox{ if } x < -k \\ %% k & \mbox{ if } x > k \end{array} \right. \;. \end{align*} The constant $k$ for $95\%$ efficiency of the regression estimator is $1.345$. \begin{figure}[h] \centering <>= plot(huberPsi, x., ylim=c(-1.4, 5), leg.loc="topright", main=FALSE) @ \caption{Huber family of functions using tuning parameter $k = 1.345$.} \end{figure} \bigskip \section{Redescenders} For the MM-estimators and their generalizations available via \texttt{lmrob()} (and for some methods of \texttt{nlrob()}), the $\psi$-functions are all redescending, i.e., with finite ``rejection point'' $x_r = \sup\{t; \psi(t) > 0\} < \infty$. From \texttt{lmrob}, the psi functions are available via \texttt{lmrob.control}, or more directly, \texttt{.Mpsi.tuning.defaults}, <>= names(.Mpsi.tuning.defaults) @ %$ and their $\psi$, $\rho$, $\psi'$, and weight function $w(x) := \psi(x)/x$, are all computed efficiently via C code, and are defined and visualized in the following subsections. \clearpage \subsection{Bisquare} Tukey's bisquare (aka ``biweight'') family of functions is defined as, \begin{equation*} \tilde\rho_k(x) = \left\{ \begin{array}{cl} 1 - \bigl(1 - (x/k)^2 \bigr)^3 & \mbox{ if } \abs{x} \leq k \\ 1 & \mbox{ if } \abs{x} > k \end{array} \right.\;, \end{equation*} with derivative ${\tilde\rho_k}'(x) = 6\psi_k(x) / k^2$ where, \begin{equation*} \psi_k(x) = x \left( 1 - \left(\frac{x}{k}\right)^2\right)^2 \cdot I_{\{\abs{x} \leq k\}}\;. \end{equation*} The constant $k$ for $95\%$ efficiency of the regression estimator is $4.685$ and the constant for a breakdown point of $0.5$ of the S-estimator is $1.548$. Note that the \emph{exact} default tuning constants for M- and MM- estimation in \pkg{robustbase} are available via \code{.Mpsi.tuning.default()} and \code{.Mchi.tuning.default()}, respectively, e.g., here, % \begin{small} <>= print(c(k.M = .Mpsi.tuning.default("bisquare"), k.S = .Mchi.tuning.default("bisquare")), digits = 10) @ % \end{small} and that the \code{p.psiFun(.)} utility is available via %\begin{small} <>= <> @ %\end{small} %\enlargethispage{3ex} \begin{figure}[h] \centering <>= p.psiFun(x., "biweight", par = 4.685) @ \caption{Bisquare family functions using tuning parameter $k = 4.685$.} \end{figure} \clearpage \subsection{Hampel} The Hampel family of functions \citep{hamfrrs86} is defined as, \begin{align*} \tilde\rho_{a, b, r}(x) ={}& \left\{ \begin{array}{ll} \frac{1}{2} x^2 / C & \abs{x} \leq a \\ \left( \frac{1}{2}a^2 + a(\abs{x}-a)\right) / C & a < \abs{x} \leq b \\ \frac{a}{2}\left( 2b - a + (\abs{x} - b) \left(1 + \frac{r - \abs{x}}{r-b}\right) \right) / C & b < \abs{x} \leq r \\ 1 & r < \abs{x} \end{array} \right. \;, \\ \psi_{a, b, r}(x) ={}& \left\{ \begin{array}{ll} x & \abs{x} \leq a \\ a \ \sign(x) & a < \abs{x} \leq b \\ a \ \sign(x) \frac{r - \abs{x}}{r - b}& b < \abs{x} \leq r \\ 0 & r < \abs{x} \end{array} \right.\;, \end{align*} where $ C := \rho(\infty) = \rho(r) = \frac{a}{2}\left( 2b - a + (r - b) \right) = \frac{a}{2}(b-a + r)$. As per our standardization, $\psi$ has slope $1$ in the center. The slope of the redescending part ($x\in[b,r]$) is $-a/(r-b)$. If it is set to $-\frac 1 2$, as recommended sometimes, one has \begin{equation*} r = 2a + b\;. \end{equation*} Here however, we restrict ourselves to $a = 1.5 k$, $b = 3.5 k$, and $r = 8k$, hence a redescending slope of $-\frac 1 3$, and vary $k$ to get the desired efficiency or breakdown point. The constant $k$ for $95\%$ efficiency of the regression estimator is $0.902$ (0.9016085, to be exact) and the one for a breakdown point of $0.5$ of the S-estimator is $0.212$ (i.e., 0.2119163). %% --> ../R/lmrob.MM.R, .Mpsi.tuning.defaults .Mchi.tuning.defaults \begin{figure}[h] \centering <>= ## see also hampelPsi p.psiFun(x., "Hampel", par = ## Default, but rounded: round(c(1.5, 3.5, 8) * 0.9016085, 1)) @ \caption{Hampel family of functions using tuning parameters $0.902 \cdot (1.5, 3.5, 8)$.} \end{figure} \clearpage \subsection{GGW}\label{ssec:ggw} The Generalized Gauss-Weight function, or \emph{ggw} for short, is a generalization of the Welsh $\psi$-function (subsection \ref{ssec:Welsh}). In \citet{ks2011} it is defined as, \begin{equation*} %% \label{eq:ggw} \psi_{a, b, c}(x) = \left\{ \begin{array}{ll} x & \abs{x} \leq c \\ \exp\left(-\frac{1}{2}\frac{(\abs{x} - c)^b}{a}\right)x & \abs{x} > c \end{array} \right. \;. \end{equation*} Our constants, fixing $b=1.5$, and minimial slope at $- \frac 1 2$, for $95\%$ efficiency of the regression estimator are $a = 1.387$, $b = 1.5$ and $c = 1.063$, and those for a breakdown point of $0.5$ of the S-estimator are $a = 0.204$, $b = 1.5$ and $c = 0.296$: <>= cT <- rbind(cc1 = .psi.ggw.findc(ms = -0.5, b = 1.5, eff = 0.95 ), cc2 = .psi.ggw.findc(ms = -0.5, b = 1.5, bp = 0.50)); cT @ Note that above, \code{cc*[1]}$= 0$, \code{cc*[5]}$ = \rho(\infty)$, and \code{cc*[2:4]}$ = (a, b, c)$. To get this from $(a,b,c)$, you could use <>= ipsi.ggw <- .psi2ipsi("GGW") # = 5 ccc <- c(0, cT[1, 2:4], 1) integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi=ipsi.ggw)$value # = rho(Inf) @ \begin{figure}[h] \centering <>= p.psiFun(x., "GGW", par = c(-.5, 1, .95, NA)) @ \caption{GGW family of functions using tuning parameters $a=1.387$, $b=1.5$ and $c=1.063$.} \end{figure} \clearpage \subsection{LQQ} The ``linear quadratic quadratic'' $\psi$-function, or \emph{lqq} for short, was proposed by \citet{ks2011}. It is defined as, \begin{equation*} \psi_{b,c,s}(x) = \left\{ \begin{array}{ll} x & \abs{x} \leq c \\ \sign(x)\left(\abs{x} - \frac{s}{2b}\left(\abs{x} - c\right)^2 \right) & c < \abs{x} \leq b + c \\ \sign(x)\left(c+b-\frac{bs}{2} + \frac{s-1}{a} \left(\frac{1}{2}\tilde x^2 - a\tilde x\right) \right) & b + c < \abs{x} \leq a + b + c \\ 0 & \mbox{otherwise,} \end{array} \right. \end{equation*} where \begin{equation} \tilde x := \abs{x} - b - c \ \ \mathrm{and}\ \ a := (2c + 2b - bs)/(s-1).\label{lqq.a} \end{equation} The parameter $c$ determines the width of the central identity part. The sharpness of the bend is adjusted by $b$ while the maximal rate of descent is controlled by $s$ ($s = 1 - \min_x\psi'(x) > 1$). From (\ref{lqq.a}), the length $a$ of the final descent to $0$ is a function of $b$, $c$ and $s$. <>= cT <- rbind(cc1 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=0.95, bp=NA ), cc2 = .psi.lqq.findc(ms= -0.5, b.c = 1.5, eff=NA , bp=0.50)) colnames(cT) <- c("b", "c", "s"); cT @ If the minimal slope is set to $-\frac 1 2$, i.e., $s = 1.5$, and $b/c = 3/2 = 1.5$, the constants for $95\%$ efficiency of the regression estimator are $b=1.473$, $c=0.982$ and $s=1.5$, and those for a breakdown point of $0.5$ of the S-estimator are $b=0.402$, $c=0.268$ and $s=1.5$. \begin{figure}[h] \centering <>= p.psiFun(x., "LQQ", par = c(-.5,1.5,.95,NA)) @ \caption{LQQ family of functions using tuning parameters $b=1.473$, $c=0.982$ and $s=1.5$.} \end{figure} \clearpage \subsection{Optimal} The optimal $\psi$ function as given by \citet[Section~5.9.1]{MarRMY06}, \begin{equation*} \psi_c(x) = \sign(x)\left(-\frac{\varphi'(\abs{x}) + c} {\varphi(\abs{x})}\right)_+\;, \end{equation*} where $\varphi$ is the standard normal density, $c$ is a constant and $t_+ := \max(t, 0)$ denotes the positive part of $t$. Note that the \pkg{robustbase} implementation uses rational approximations originating from the \pkg{robust} package's implementation. That approximation also avoids an anomaly for small $x$ and has a very different meaning of $c$. The constant for $95\%$ efficiency of the regression estimator is $1.060$ and the constant for a breakdown point of $0.5$ of the S-estimator is $0.405$. \begin{figure}[h] \centering <>= p.psiFun(x., "optimal", par = 1.06, leg.loc="bottomright") @ \caption{`Optimal' family of functions using tuning parameter $c = 1.06$.} \end{figure} \clearpage \subsection{Welsh}\label{ssec:Welsh} The Welsh $\psi$ function is defined as, %% FIXME: REFERENCE MISSING %\def\xk{\frac{x}{k}} \def\xk{x/k} %\def\xkdt{-\frac{1}{2}\left(\xk\right)^2} \def\xkdt{- \left(\xk\right)^2 / 2} \begin{align*} \tilde\rho_k(x) ={}& 1 - \exp\bigl(\xkdt\bigr) \\ \psi_k(x) ={}& k^2\tilde\rho'_k(x) = x\exp\bigl(\xkdt\bigr) \\ \psi'_k(x) ={}& \bigl(1 - \bigl(\xk\bigr)^2\bigr) \exp\bigl(\xkdt\bigr) \end{align*} The constant $k$ for $95\%$ efficiency of the regression estimator is $2.11$ and the constant for a breakdown point of $0.5$ of the S-estimator is $0.577$. Note that GGW (subsection \ref{ssec:ggw}) is a 3-parameter generalization of Welsh, matching for $ b = 2 $, $ c = 0 $, and $ a = k^2$ (see R code there): <>= ccc <- c(0, a = 2.11^2, b = 2, c = 0, 1) (ccc[5] <- integrate(.Mpsi, 0, Inf, ccc=ccc, ipsi = 5)$value) # = rho(Inf) stopifnot(all.equal(Mpsi(x., ccc, "GGW"), ## psi[ GGW ](x; a=k^2, b=2, c=0) == Mpsi(x., 2.11, "Welsh")))## psi[Welsh](x; k) @ \begin{figure}[h] \centering <>= p.psiFun(x., "Welsh", par = 2.11) @ \caption{Welsh family of functions using tuning parameter $k = 2.11$.} \end{figure} \bibliographystyle{chicago} \bibliography{robustbase} \end{document} robustbase/inst/doc/simulation.functions.R0000644000176200001440000014752012174500122020466 0ustar liggesusers## Called from ./lmrob_simulation.Rnw ## ~~~~~~~~~~~~~~~~~~~~~ ########################################################################### ## 1. simulation helper functions ########################################################################### f.estname <- function(est = 'lmrob') ## Purpose: translate between 'estname' and actual function name, ## defaults to 'lmrob' ## f.lmRob is just a wrapper for lmRob, since there are some ## problems with the weight and weights arguments ## ---------------------------------------------------------------------- ## Arguments: est: name of estimator ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:36 switch(est, lm.rbase = 'lmrob', lm.robust = 'f.lmRob', rlm = 'rlm', lm = 'lm', est) f.errname <- function(err, prefix = 'r') ## Purpose: translate between natural name of distribution and ## R (r,p,q,d)-name ## ---------------------------------------------------------------------- ## Arguments: err: name of distribution ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:36 paste(prefix, switch(err,normal="norm", t="t", cauchy="cauchy",cnormal="cnorm", err),sep = '') f.requires.envir <- function(estname) ## Purpose: returns indicator on whether estname requires envir argument ## ---------------------------------------------------------------------- ## Arguments: estname: name of estimating function ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 7 Oct 2009, 09:34 switch(estname, f.lmrob.local = TRUE, FALSE) f..paste..list <- function(lst) if (length(lst) == 0) return("") else paste(names(lst),lst,sep='=',collapse=', ') f..split..str <- function(str) { litems <- strsplit(str,', ') lst <- lapply(litems, function(str) strsplit(str,'=')) rlst <- list() for (llst in lst) { lv <- vector() for (litem in llst) lv[litem[1]] <- litem[2] rlst <- c(rlst, list(lv)) } rlst } f.list2str <- function(lst, idx) ## Purpose: convert a list into a string that identifies the ## function and parameter configuration ## ---------------------------------------------------------------------- ## Arguments: lst: list or list of lists ## idx: only take the elements in idx ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 7 Oct 2009, 10:03 f..paste..list(if(missing(idx)) unlist(lst) else unlist(lst)[idx]) f.as.numeric <- function(val) { ## Purpose: convert value to numeric if possible ## ---------------------------------------------------------------------- ## Arguments: vec: value to convert ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 26 Oct 2009, 12:10 r <- suppressWarnings(as.numeric(val)) if (is.na(r)) { ## is character, try to convert to TRUE and FALSE return(switch(casefold(val), "true" = TRUE, "false" = FALSE, val)) } else return(r) } f.as.numeric.vectorized <- function(val) sapply(val, f.as.numeric) f.as.integer <- function(val) { ## Purpose: convert value to numeric if possible ## ---------------------------------------------------------------------- ## Arguments: vec: value to convert ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 26 Oct 2009, 12:10 r <- suppressWarnings(as.integer(val)) if (is.na(r)) { ## is character, try to convert to TRUE and FALSE return(switch(casefold(val), "true" = TRUE, "false" = FALSE, val)) } else return(r) } f.str2list <- function(str, splitchar = '\\.') { ## Purpose: inverse of f.list2str ## ---------------------------------------------------------------------- ## Arguments: str: string or list of strings produced with f.list2str ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 14:20 ## split input string or strings into a list of vectors lst <- f..split..str(as.character(str)) rlst <- list() ## walk list for (lv in lst) { lrlst <- list() ## for each element of the vector for (ln in names(lv)) { ## split lnames <- strsplit(ln, splitchar)[[1]] ## set either directly if (length(lnames) == 1) lrlst[ln] <- f.as.numeric(lv[ln]) ## or, if it contains a dot, as a sublist else { if (is.null(lrlst[[lnames[1]]])) lrlst[[lnames[1]]] <- list() lrlst[[lnames[1]]][paste(lnames[-1],collapse='.')] <- f.as.numeric(lv[ln]) } } rlst <- c(rlst, list(lrlst)) } rlst } f.round.numeric <- function(num, digits = 0) { ## round only numeric values in list idx <- sapply(num, is.numeric) ret <- num ret[idx] <- lapply(num[idx],round,digits=digits) ret } f.errs2str <- function(errs) { ## Purpose: convert list of errors into pretty strings ## ---------------------------------------------------------------------- ## Arguments: errs: estlist element errs ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 14:51 rv <- vector() for (lerr in errs) { rv <- c(rv, switch(lerr$err, normal = paste("N(",lerr$args$mean,",", lerr$args$sd,")", sep=""), set =, t = paste("t",lerr$args$df,sep=""), paste(lerr$err,"(",paste(f.round.numeric(lerr$args,2), collapse=","),")",sep=""))) } rv } f.procedures2str <- function(procs) { ## Purpose: convert procedures element in estlist to pretty data.frame ## ---------------------------------------------------------------------- ## Arguments: proc: estlist element procedures ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 14:57 rdf <- rep(" ",7) for (lproc in procs) { method <- if(is.null(lproc$args$method)) switch(lproc$estname, lm = 'lsq', "SM") else lproc$args$method cov <- switch(lproc$estname, ## lm.robust, rlm, lmrob: set default arguments lm.robust = list(cov = 'Default', cov.corrfact = 'empirical', cov.xwx = TRUE, cov.resid = 'trick', cov.hubercorr = TRUE, cov.dfcorr = 1), rlm = list(cov = 'Default', cov.corrfact = 'empirical', cov.xwx = FALSE, cov.resid = 'final', cov.hubercorr = TRUE, cov.dfcorr = 1), ## lmrob = list(cov = 'f.avar1', ## method .vcov.MM equals f.avar1 ## cov.resid = 'final'), lmrob = do.call('lmrob.control', ## get default arguments from lmrob.control lproc$args)[c('cov', 'cov.corrfact', 'cov.xwx', 'cov.resid', 'cov.hubercorr', 'cov.dfcorr')], if (is.null(lproc$args)) list(cov = 'Default') else lproc$args) if (is.null(lproc$args$psi)) { psi <- switch(lproc$estname, rlm =, lmrob = 'bisquare', lm.robust = { if (is.null(lproc$args$weight)) { if (is.null(lproc$args$weight2)) 'optimal' else lproc$args$weight2 } else lproc$args$weight[2] }, "NA") } else { psi <- lproc$args$psi ## test if tuning.psi is the default one if (!is.null(lproc$args$tuning.psi) && isTRUE(all.equal(lproc$args$tuning.psi, .Mpsi.tuning.default(psi)))) psi <- paste(psi, lproc$args$tuning.psi) } D.type <- switch(lproc$estname, lmrob.u =, lmrob = if (is.null(lproc$args$method) || lproc$args$method %in% c('SM', 'MM')) 'S' else 'D', lmrob.mar = if (is.null(lproc$args$type)) 'qE' else lproc$args$type, rlm = 'rlm', lm.robust = 'rob', lm = 'lm', 'NA') rdf <- rbind(rdf,c(lproc$estname, method, f.args2str(lproc$args), cov$cov, f.cov2str(cov), psi, D.type)) } colnames(rdf) <- c("Function", "Method", "Tuning", "Cov", "Cov.Tuning", "Psi", "D.type") if (NROW(rdf) == 2) t(rdf[-1,]) else rdf[-1,] } f.chop <- function(str,l=1) ## Purpose: chop string by l characters ## ---------------------------------------------------------------------- ## Arguments: str: string to chop ## l: number of characters to chop ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:19 substr(str,1,nchar(str)-l) fMpsi2str <- function(psi) { ## Purpose: make pretty M.psi and D.chi, etc. ## ---------------------------------------------------------------------- ## Arguments: M.psi: M.psi argument ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:28 if (is.null(psi)) psi else if (psi == "tukeyPsi1" || psi == "tukeyChi") "bisquare" else if (grepl("Psi1$",psi)) f.chop(psi,4) else if (grepl("Chi$",psi)) f.chop(psi,3) else psi } f.c.psi2str <- function(c.psi) { ## Purpose: make pretty tuning.psi and D.tuning.chi, etc. ## ---------------------------------------------------------------------- ## Arguments: c.psi: tuning.psi argument ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:34 if (is.null(c.psi)) return(NULL) round(as.numeric(c.psi),2) } f.args2str <- function(args) { ## Purpose: convert args element in procedures element of estlist ## to a pretty string ## ---------------------------------------------------------------------- ## Arguments: args: args element in procedures element of estlist ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:11 lst <- list() lst$psi <- if (!is.null(args$weight)) args$weight[2] else if (!is.null(args$weight2)) args$weight2 else args$psi lst$c.psi <- if (!is.null(args$efficiency)) round(f.eff2c.psi(args$efficiency, lst$psi),2) else f.c.psi2str(args$tuning.psi) if (!is.null(args$method) && grepl("D",args$method)) { lst$D <- if (!is.null(args$D.type)) args$D.type else NULL lst$tau <- args$tau } f..paste..list(lst) } f.cov2str <- function(args) { ## Purpose: convert cov part in args element in procedures element of ## estlist to a pretty string ## ---------------------------------------------------------------------- ## Arguments: args: args element in procedures element of estlist ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 15:39 lst <- list() if (!is.null(args$cov) && !args$cov %in% c('Default','f.avarwh')) lst$cov <- sub('^f\\.', '', args$cov) else { lst$hc <- args$cov.hubercorr lst$dfc <- args$cov.dfcorr lst$r <- args$cov.resid lst$rtau <- args$cov.corrfact lst$xwx <- args$cov.xwx } ## convert logical to numeric lst <- lapply(lst, function(x) if (is.logical(x)) as.numeric(x) else x) f..paste..list(lst) } f.procstr2id <- function(procstrs, fact = TRUE) { ## Purpose: create short identifiers of procstrs ## ---------------------------------------------------------------------- ## Arguments: procstrs: vector of procstrs ## fact: convert to factor or not ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 3 Nov 2009, 08:58 lst0 <- f.str2list(procstrs) r <- sapply(lst0, function(x) { paste(c(x$estname, if (is.null(x$args$method)) NULL else x$args$method, substr(c(x$args$psi,x$args$weight2, x$args$weight[2]), 1, 3)), collapse = '.') }) if (fact) ru <- unique(r) if (fact) factor(r, levels = ru, labels = ru) else r } f.splitstrs <- function(strs, split = '_', ...) { ## Purpose: split vector of strings by split and convert the list into ## a data.frame with columns type and id ## ---------------------------------------------------------------------- ## Arguments: strs: vector of strings ## split: character vector to use for splitting ## ...: arguments to strsplit, see ?strsplit ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 19 Oct 2009, 08:46 lstr <- strsplit(strs, split, ...) ldf <- t(as.data.frame(lstr)) rownames(ldf) <- NULL as.data.frame(ldf, stringsAsFactors = FALSE) } f.abind <- function(arr1,arr2, along = ndim) { ## Purpose: like abind, but less powerful ## ---------------------------------------------------------------------- ## Arguments: arr1, arr2: arrays to bind ## along: dimension along to bind to, ## defaults to last dimension ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 20 Oct 2009, 11:33 ## if along =! last dimension: permutate array ndim <- length(dim(arr1)) if (along != ndim) { arr1 <- aperm(arr1, perm = c((1:ndim)[-along],along)) arr2 <- aperm(arr2, perm = c((1:ndim)[-along],along)) } ldmn1 <- dimnames(arr1) ldmn2 <- dimnames(arr2) ld1 <- dim(arr1) ld2 <- dim(arr2) if (length(ld1) != length(ld2)) stop('f.abind: Dimensions must be identical') if (!identical(ldmn1[-ndim],ldmn2[-ndim])) stop('f.abind: Dimnames other than in the along dimension must match exactly') if (any(ldmn1[[ndim]] %in% ldmn2[[ndim]])) stop('f.abind: Dimnames in along dimension must be unique') ldmn3 <- ldmn1 ldmn3[[ndim]] <- c(ldmn1[[ndim]], ldmn2[[ndim]]) ld3 <- ld1 ld3[ndim] <- ld1[ndim] + ld2[ndim] ## build array arr3 <- array(c(arr1, arr2), dim = ld3, dimnames = ldmn3) ## permutate dimensions back if (along != ndim) { lperm <- 1:ndim lperm[along] <- ndim lperm[(along+1):ndim] <- along:(ndim-1) arr3 <- aperm(arr3, perm = lperm) } arr3 } f.abind.3 <- function(...) f.abind(..., along = 3) f.rename.level <- function(factor, from, to) { ## Purpose: rename level in a factor ## ---------------------------------------------------------------------- ## Arguments: factor: factor variable ## from: level to be changed ## to: value ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 18 Aug 2010, 14:45 levels(factor)[levels(factor) == from] <- to factor } ########################################################################### ## 2. main simulation functions ########################################################################### f.sim <- function(estlist, .combine = 'f.abind', .combine.2 = 'f.abind.3', ## hack for foreach silent = TRUE) { ## Purpose: perform simulation according to estlist entry ec ## ---------------------------------------------------------------------- ## Arguments: ec: estlist, list consisting of: ## - design: data frame of design ## - nrep: number of repetitions ## - errs: list of error distributions including arguments ## - err: name of error distribution ## - args: list of arguments (to be passed to do.call() ## - procedures: list of parameter configurations and ## procedures to call ## - estname: name of estimation procedure ## - args: arguments that define the call ## silent: silent argument to try ## ---------------------------------------------------------------------- ## Author: Werner Stahel / Manuel Koller, Date: 21 Aug 2008, 07:55 ## get designs ldd <- estlist$design use.intercept <- if(is.null(estlist$use.intercept)) TRUE else estlist$use.intercept nobs <- NROW(ldd) npar <- NCOL(ldd) + use.intercept nrep <- estlist$nrep nlerrs <- nobs*nrep ## initialize: lestlist <- estlist ## 'evaluate' estlist$procedure list lprocs <- c() for (i in 1:length(estlist$procedures)) { ## generate lprocstr (identification string) lprocs[i] <- estlist[['procedures']][[i]][['lprocstr']] <- f.list2str(estlist[['procedures']][[i]]) } ## find all error distributions lerrs <- unique(sapply(lestlist$errs, f.list2str)) ## walk estlist$output to create output column names vector ## store result into lnames, it is used in f.sim.process lnames <- c() for (i in 1:length(estlist$output)) { llnames <- estlist[['output']][[i]][['lnames']] <- eval(estlist[['output']][[i]][['names']]) lnames <- c(lnames, llnames) } ## get different psi functions lpsifuns <- unlist(unique(lt <- sapply(estlist$procedures, function(x) x$args$psi))) ## get entries without psi argument lrest <- sapply(lt, is.null) if (sum(lrest) > 0) lpsifuns <- c(lpsifuns, '__rest__') ## Walk error distributions res <- foreach(lerrlst = estlist$errs, .combine = .combine) %:% foreach(lpsifun = lpsifuns, .combine = .combine.2) %dopar% { ## filter for psi functions lidx <- if (lpsifun == '__rest__') lrest else unlist(sapply(estlist$procedures, function(x) !is.null(x$args$psi) && x$args$psi == lpsifun)) cat(f.errs2str(list(lerrlst)), lpsifun, " ") ## get function name and parameters lerrfun <- f.errname(lerrlst$err) lerrpar <- lerrlst$args lerrstr <- f.list2str(lerrlst) ## --- initialize array lres <- array(NA, dim=c(nrep, ## data dimension length(lnames), ## output type dimension sum(lidx), ## estimation functions and arguments dimension 1), ## error distributions dimension dimnames = list(Data = NULL, Type = lnames, Procstr = lprocs[lidx], Errstr = lerrstr)) ## set seed set.seed(estlist$seed) ## generate errors: seperately for each repetition lerrs <- c(sapply(1:nrep, function(x) do.call(lerrfun, c(n = nobs, lerrpar)))) ## if estlist$design has an attribute 'gen' ## then this function gen will generate designs ## and takes arguments: n, p, rep ## and returns the designs in a list if (is.function(attr(ldd, 'gen'))) { ldds <- attr(ldd, 'gen')(nobs, npar - use.intercept, nrep, lerrlst) } ## Walk repetitions for (lrep in 1:nrep) { if (lrep%%100 == 0) cat(" ", lrep) lerr <- lerrs[(1:nobs)+(lrep-1)*nobs] if (exists('ldds')) { ldd <- ldds[[lrep]] ## f.sim.reset.envirs() } ## Walk estimator configurations for (lproc in estlist$procedures[lidx]) { ## call estimating procedure lrr <- tryCatch(do.call(f.estname(lproc$estname), c(if(use.intercept) list(lerr ~ . , data = ldd) else list(lerr ~ . - 1, data = ldd), lproc$args)), error=function(e)e) ERR <- inherits(lrr, 'error') if (ERR && !silent) { print(lproc$lprocstr) print(lrr) } if (!silent && !converged(lrr)) { print(lproc$lprocstr) browser() ## <<< } ## check class: if procedure failed: if (ERR) next ## check convergence of estimator if (!converged(lrr)) next ## process output for (lov in estlist$output) { llnames <- lov$lnames ret <- tryCatch(lres[lrep,llnames,lproc$lprocstr,lerrstr] <- eval(lov$fun), error= function(e)e) if (!silent && inherits(ret, 'error')) { cat('Error', dQuote(ret$message), 'in repetition',lrep, '\n for:',llnames,'procstr:',lproc$lprocstr,'\n') browser() ## <<< print(lov$fun) print(try(eval(lov$fun))) } } } } ## print debug information if requested if (!silent) str(lres) lres } ## restore original order of lprocs res <- res[,,match(lprocs, dimnames(res)[[3]]),,drop=FALSE] ## set attributes attr(res, 'estlist') <- lestlist cat("\n") res } ########################################################################### ## build estlist ########################################################################### f.combine <- function(..., keep.list = FALSE) { ## Purpose: creates a list of all combinations of elements given as ## arguments, similar to expand.grid. ## Arguments can be named. ## If an argument is a list, then its elements are considered ## as fixed objects that should not be recombined. ## if keep.list = TRUE, these elements are combined ## as a list with argument. ## ---------------------------------------------------------------------- ## Arguments: collection of lists or vectors with argument names ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 7 Oct 2009, 11:13 ## convert arguments into a big list args <- list(...) ## if more than two arguments, call recursively if (length(args) > 2) lst <- do.call("f.combine", c(args[-1], list(keep.list=keep.list))) else { ## if just two arguments, create list of second argument ## if this is a list, then there's nothing to do if (!keep.list && is.list(args[[2]])) lst <- args[[2]] ## else convert to a list of one-elements lists with proper name else { lst <- list() for (lelem in args[[2]]) { llst <- list(lelem) if (!is.null(names(args)[2])) names(llst)[1] <- names(args)[2] lst <- c(lst, list(llst)) } } } ## ok, now we can add the first element to all elements of lst lst2 <- list() if (keep.list && is.list(args[[1]])) args[[1]] <- lapply(args[[1]], list) for (lelem in args[[1]]) { for (relem in lst) { llst <- c(lelem, relem) if (nchar(names(llst)[1]) == 0 && nchar(names(args)[1])>0) names(llst)[1] <- names(args)[1] lst2 <- c(lst2, list(llst)) } } lst2 } ## some fragments to build estlist ## errors .errs.normal.1 <- list(err = 'normal', args = list(mean = 0, sd = 1)) .errs.normal.2 <- list(err = 'normal', args = list(mean = 0, sd = 2)) .errs.t.13 <- list(err = 't', args = list(df = 13)) .errs.t.11 <- list(err = 't', args = list(df = 11)) .errs.t.10 <- list(err = 't', args = list(df = 10)) .errs.t.9 <- list(err = 't', args = list(df = 9)) .errs.t.8 <- list(err = 't', args = list(df = 8)) .errs.t.7 <- list(err = 't', args = list(df = 7)) .errs.t.5 <- list(err = 't', args = list(df = 5)) .errs.t.3 <- list(err = 't', args = list(df = 3)) .errs.t.1 <- list(err = 't', args = list(df = 1)) ## skewed t distribution .errs.skt.Inf.2 <- list(err = 'cskt', args = list(df = Inf, gamma = 2)) .errs.skt.5.2 <- list(err = 'cskt', args = list(df = 5, gamma = 2)) ## log normal distribution .errs.lnrm <- list(err = 'lnorm', args = list(meanlog = 0, sdlog = 0.6936944)) ## laplace distribution .errs.laplace <- list(err = 'laplace', args = list(location = 0, scale = 1/sqrt(2))) ## contaminated normal .errs.cnorm..1.0.10 <- list(err = 'cnorm', args = list(epsilon = 0.1, meanc = 0, sdc = sqrt(10))) .errs.cnorm..1.4.1 <- list(err = 'cnorm', args = list(epsilon = 0.1, meanc = 4, sdc = 1)) .errs.test <- list(.errs.normal.1 ,.errs.t.5 ,.errs.t.3 ,.errs.t.1 ) ## arguments .args.final <- f.combine(psi = c('optimal', 'bisquare', 'lqq', 'hampel'), seed = 0, max.it = 500, k.max = 2000, c(list(list(method = 'MM', cov = '.vcov.avar1')), list(list(method = 'MM', cov = '.vcov.w', start = 'lrr')), f.combine(method = c('SMD', 'SMDM'), cov = '.vcov.w', start = 'lrr'))) ## use fixInNamespace("lmrob.fit", "robustbase") ## insert: ## N = { ## tmp <- lmrob..M..fit(x = x/init$tau, y = y/init$tau, obj = ## init) ## tmp$qr <- NULL ## tmp ## }, ## .args.final <- f.combine(psi = c('optimal', 'bisquare', 'ggw', 'lqq'), ## seed = 0, ## max.it = 500, ## k.max = 2000, ## c(list(list(method = "SMDM", cov = '.vcov.w')), ## list(list(method = "SMDN", cov = '.vcov.w', ## start = 'lrr')))) ## standard for lmRob .args.bisquare.lmRob.0 <- list(## initial.alg = 'random', efficiency = 0.95 ,weight = c('bisquare', 'bisquare'), trace = FALSE ) .args.optimal.lmRob.0 <- list(## initial.alg = 'random', efficiency = 0.95 ,weight = c('optimal', 'optimal'), trace = FALSE) .procedures.final <- c(list(list(estname = 'lm')), f.combine(estname = 'lmrob.u', args = .args.final, keep.list = TRUE), f.combine(estname = 'lmrob.mar', args = f.combine(psi = 'bisquare', seed = 0, max.it = 500, k.max = 2000, cov = '.vcov.w', type = c('qT', 'qE')), keep.list = TRUE), f.combine(estname = 'lm.robust', args = list(.args.bisquare.lmRob.0, .args.optimal.lmRob.0), keep.list = TRUE)) ## output .output.sigma <- list(sigma = list( names = quote("sigma"), fun = quote(sigma(lrr)))) .output.beta <- list(beta = list( names = quote(paste('beta',1:npar,sep='_')), fun = quote(coef(lrr)))) .output.se <- list(se = list( names = quote(paste('se',1:npar,sep='_')), fun = quote(sqrt(diag(covariance.matrix(lrr)))))) .output.sumw <- list(sumw = list( names = quote("sumw"), fun = quote(sum(robustness.weights(lrr))))) .output.nnz <- list(nnz = list( names = quote("nnz"), fun = quote(sum(robustness.weights(lrr) < 1e-3)))) ########################################################################### ## simulation results processing functions ########################################################################### ## use apply to aggregate data ## use matplot(t(result)) to plot aggregated data f.apply <- function(res, items = dimnames(res)[[2]], FUN, ..., swap = FALSE) { ## Purpose: similar to apply, return data not as matrix, but ## as data.frame ## ---------------------------------------------------------------------- ## Arguments: res: simulation results array ## items: items to use in apply ## FUN: function to apply ## ...: additional arguments to FUN ## swap: if TRUE: swap first two columns ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 8 Oct 2009, 13:39 ## aggregate data lz <- apply(res[,items,,,drop=FALSE], 2:4, FUN, ...) ## if return object has four dimensions (multidim output of FUN) ## rotate first three dimensions if (length(dim(lz)) == 4 && swap) aperm(lz, perm=c(2,1,3,4)) else lz } f.dimnames2df <- function(arr, dm = dimnames(arr), page = TRUE, err.on.same.page = TRUE, value.col = ndim - 2, procstr.col = ndim - 1, errstr.col = ndim, procstr.id = TRUE, split = '_') { ## Purpose: create data frame from dimnames: ## len_1 .. len_100, cpr_1 .. cpr_100 ## will yield a data frame with column id from 1 .. 100 ## column type with cpr and len and columns procstr and errstr ## It is assumed, that the max number (100) is the same for all ## output value types ## ---------------------------------------------------------------------- ## Arguments: arr: 3 or more dim array (optional) ## dm: dimnames to be used ## page: add a column page to simplify plots ## err.on.same.page: whether all errs should be on the same ## page ## value.col: index of value column (set to NULL for none) ## the values in this column are split name_id ## and put into two columns in the data frame ## procstr.col: index of procedure column ## (both: or NULL for not to be converted) ## errstr.col: index of error string column ## procstr.id: create procstr id ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 19 Oct 2009, 08:41 if (!is.list(dm)) stop('f.dimnames2df: dm must be a list') ## remove 'NULL' dimensions dm <- dm[!sapply(dm,is.null)] ndim <- length(dm) if (ndim == 0) stop('f.dimnames2df: dimnames all null') ldims <- sapply(dm, length) ## split and convert types into data.frame if (!is.null(value.col)) { ldf <- f.splitstrs(dm[[value.col]], split = split) lid <- NCOL(ldf) == 2 if (lid) lids <- unique(as.numeric(ldf[,2])) ## convert ids into numeric ## we do not need to repeat over different types of values, only ids ldims[value.col] <- ldims[value.col] / length(unique(ldf[,1])) } ## merge into one large data.frame: for each distribution rdf <- list() for (ld in 1:ndim) { lname <- if (is.null(lname <- names(dm)[ld])) length(rdf)+1 else lname ltimes <- if (ld == ndim) 1 else prod(ldims[(ld+1):ndim]) leach <- if (ld == 1) 1 else prod(ldims[1:(ld-1)]) if (!is.null(value.col) && ld == value.col) { if (lid) rdf[[paste(lname,'Id')]] <- rep(lids,times=ltimes,each=leach) ## value ids ## no else: the values will be added in the a2df procedures } else if (!is.null(procstr.col) && ld == procstr.col) { ## convert procstrs to data.frame with pretty names lprdf <- data.frame(f.procedures2str(f.str2list(dm[[ld]])), Procstr = factor(dm[[ld]], levels = dm[[ld]], labels = dm[[ld]])) if (procstr.id) lprdf$PId <- f.procstr2id(dm[[ld]]) ## repeat lprdf <- if (ltimes == 1 && leach == 1) lprdf else apply(lprdf,2,rep,times=ltimes,each=leach) lprdf <- as.data.frame(lprdf, stringsAsFactors=FALSE) ## convert all into nice factors (with the original ordering) for (lk in colnames(lprdf)) { luniq <- unique(lprdf[[lk]]) lprdf[[lk]] <- factor(lprdf[[lk]], levels = luniq, labels = luniq) } rdf <- c(rdf, lprdf) } else if (!is.null(errstr.col) && ld == errstr.col) { ## convert errstrs to data.frame with pretty names ledf <- f.errs2str(f.str2list(dm[[ld]])) ## repeat and convert to factor with correct ordering rdf[[lname]] <- factor(rep(dm[[ld]],times=ltimes,each=leach), levels = dm[[ld]], labels = dm[[ld]]) rdf[['Error']] <- factor(rep(ledf,times=ltimes,each=leach), levels = ledf, labels = ledf) } else { ## no conversion necessary rdf[[lname]] <- rep(dm[[ld]],times=ltimes,each=leach) } } ## add page argument if (page && !is.null(procstr.col)) { ltpf <- if (!is.null(errstr.col) && !err.on.same.page) interaction(rdf[['Procstr']],rdf[['Error']]) else interaction(rdf[['Procstr']]) rdf[['Page']] <- as.numeric(factor(ltpf, unique(ltpf))) } rdf <- as.data.frame(rdf) if (!is.null(value.col)) attr(rdf, 'Types') <- unique(ldf[,1]) rdf } f.a2df.2 <- function(arr, dm = dimnames(arr), err.on.same.page = FALSE, ...) { ## Purpose: convert arr to data.frame ## uses f.dimnames2df and adds a column to contain the values ## if ndim == 4 and dimnames NULL: assumes first dimension is ## data dimension which is ignored by f.dimnames2df ## add counter ## ---------------------------------------------------------------------- ## Arguments: arr: array to convert ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 23 Oct 2009, 12:29 ## ndim == 2 ?? ndim <- length(dim(arr)) ## if ndim == 4: check if dimnames of dim 1 are NULL if (ndim == 4 && is.null(dm[[1]])) dm[[1]] <- 1:dim(arr)[1] rdf <- f.dimnames2df(dm=dm, ...) ## just add values for all 'Types', possibly including Type.ID if (ndim > 2) for (lvt in attr(rdf, 'Types')) { llvt <- if (is.null(rdf$Type.Id)) lvt else paste(lvt,unique(rdf$Type.Id),sep='_') rdf[[lvt]] <- as.vector(switch(ndim, stop('wrong number of dimensions'), ## 1 arr, ## 2 arr[llvt,,], ## 3 arr[,llvt,,])) ## 4 } else rdf$values <- as.vector(arr) rdf } f.dimnames2pc.df <- function(arr, dm = dimnames(arr), npcs = NCOL(estlist$design.predict), ...) { ## Purpose: create data frame to be used in plotting of pc components ## calls f.dimnames2df and adds an additional column for ## identifying the principal components ## ---------------------------------------------------------------------- ## Arguments: arr, dm: see f.dimnames.df ## npcs: number of principal components ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 23 Oct 2009, 11:51 if (missing(npcs) && !is.null(attr(estlist$design.predict, 'npcs'))) npcs <- attr(estlist$design.predict, 'npcs') ## convert into data.frame rdf <- f.dimnames2df(dm = dm, ...) ## calculate number of points per principal component npts <- (length(unique(rdf$Type.Id)) - 1) / npcs ## add new column pc rdf$PC <- 1 if (npcs > 1) for (li in 2:npcs) { lids <- (1:npts + npts*(li-1) + 1) rdf$PC[rdf$Type.Id %in% lids] <- li ## fixme: center is not repeated } rdf$PC <- factor(rdf$PC, levels = 1:npcs, labels = paste('PC',1:npcs,sep=' ')) rdf } f.a2pc.df <- function(arr, ...) { ## Purpose: convert arr to data.frame ## uses f.dimnames2pc.df and adds a column to contain the values ## ---------------------------------------------------------------------- ## Arguments: arr: array to convert ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 23 Oct 2009, 12:29 ## convert dimnames rdf <- f.dimnames2pc.df(arr, err.on.same.page = FALSE,...) ## add values for (lvt in attr(rdf, 'Types')) rdf[[lvt]] <- as.vector(arr[paste(lvt,unique(rdf$Type.Id),sep='_'),,]) ## repeat values: only PC_1 has center value, add it for other PCs ## build index idx <- 1:NROW(rdf) rpc <- as.character(rdf$PC) for (lerr in levels(rdf$Error)) { for (lprc in levels(rdf$Procstr)) { for (lpc in levels(rdf$PC)) { if (lpc == 'PC 1') next ## get first entry of this PC lmin <- min(which(rdf$Error == lerr & rdf$Procstr == lprc & rdf$PC == lpc)) ## where is this in idx? lwm <- min(which(lmin == idx)) ## get first entry of PC_1 lmin1 <- min(which(rdf$Error == lerr & rdf$Procstr == lprc & rdf$PC == 'PC 1')) ## update idx idx <- c(idx[1:(lwm-1)], lmin1, idx[lwm:length(idx)]) ## update PC column of result rpc <- c(rpc[1:(lwm-1)], lpc, rpc[lwm:length(rpc)]) } } } ## repeat centers rdf <- rdf[idx,] ## update PC column rdf$PC <- factor(rpc) ## return rdf } f.calculate <- function(expr,arr,dimname = as.character(expr)) { ## Purpose: calculate formula and return as conformable array ## ---------------------------------------------------------------------- ## Arguments: expr: expression to calculate (string is also ok) ## arr: array (from f.sim) ## dimname: name of the calculated dimension ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 9 Oct 2009, 10:15 if (!is.expression(expr)) expr <- as.expression(expr) lnams <- dimnames(arr)[[2]] lst <- list() for (lnam in lnams) expr <- gsub(paste(lnam,'\\b',sep=''), paste("arr[,",lnam,",,,drop=FALSE]",sep='"'), expr) r <- eval(parse(text = expr)) dimnames(r)[[2]] <- dimname r ## maybe use abind to merge the two arrays? } f.calculate.many <- function(expr, arr, dimname = dims, dims) { ## Purpose: calculate formula and abind into array ## supply expr as string with # symbols to be replaced ## dimname can also contain # symbols ## ---------------------------------------------------------------------- ## Arguments: same as f.calculate and ## dims: vector of items to replace # with ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 14 Oct 2009, 10:11 for (i in 1:length(dims)) { lexpr <- gsub("#",dims[i],expr) ldimname <- if (length(dimname) > 1) dimname[i] else gsub("#",dims[i],dimname) if (i == 1) rarr <- f.calculate(lexpr,arr,ldimname) else rarr <- abind(rarr, f.calculate(lexpr,arr,ldimname), along=2) } rarr } f.errs <- function(estlist, err, rep, gen = NULL, nobs, npar) { ## Purpose: generate and return errors of specified repetition ## or, if missing, all errors as a matrix ## ---------------------------------------------------------------------- ## Arguments: estlist: estlist ## err: error distribution (estlist$errs[1] for example) ## rep: desired repetition (optional) ## gen: function to generate designs (optional) ## nobs: nr. rows, npap: nr. predictors (both optional) ## --------------------------------------------------------------------- ## Author: Manuel Koller, Date: 13 Oct 2009, 11:21 nobs <- NROW(estlist$design) nrep <- estlist$nrep nlerrs <- nobs*nrep npred <- NROW(estlist$design.predict) ## get function name and parameters lerrfun <- f.errname(err$err) lerrpar <- err$args lerrstr <- f.list2str(err) ## set seed set.seed(estlist$seed) ## generate errors: seperately for each repetition lerrs <- c(sapply(1:nrep, function(x) do.call(lerrfun, c(n = nobs, lerrpar)))) ## lerrs <- do.call(lerrfun, c(n = nlerrs, lerrpar)) ## to get to the same seed state as f.sim(.default) ## generate also the additional errors ## calculate additional number of errors for (i in 1:length(estlist$output)) { if (!is.null(estlist[['output']][[i]][['nlerrs']])) nlerrs <- nlerrs + eval(estlist[['output']][[i]][['nlerrs']]) } if (length(lerrs) < nlerrs) nowhere <- do.call(lerrfun, c(n = nlerrs - length(lerrs), lerrpar)) ## generate designs if (!is.null(gen) && is.function(gen)) { ldds <- gen(nobs, npar, nrep, err) } ## return errors ret <- if (!missing(rep)) lerrs[1:nobs+(rep-1)*nobs] else matrix(lerrs, nobs) if (exists('ldds')) attr(ret, 'designs') <- if (!missing(rep)) ldds[[i]] else ldds ret } f.selection <- function(procstrs = dimnames(r.test)[[3]], what = c('estname', 'args.method', 'args.psi', 'args.tuning.psi', 'args.type', 'args.weight2', 'args.efficiency'), restr = '') { ## Purpose: get selection of results: first one of the specified estimates ## ---------------------------------------------------------------------- ## Arguments: procstrs: what is the selection ## what: named vector to use in grep ## restr: do not select estimators with procstr ## that match this regexp parameters ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 2 Nov 2009, 09:06 ## match restrictions lrestr <- -(lall <- 1:length(procstrs)) ## no restrictions if (!missing(restr)) { lrestr <- grep(restr, procstrs) if (length(lrestr) == 0) lrestr <- -lall procstrs <- procstrs[-lrestr] } ## procstr2list, but do not split into sublists lproclst <- f.str2list(procstrs, splitchar='_____') ## helper function: select only items that occur what tfun <- function(x) x[what] lproclst <- lapply(lproclst, tfun) ## convert back to string lprocstr <- sapply(lproclst, f.list2str) ## get all unique combinations and the first positions lidx <- match(unique(lprocstr), lprocstr) r <- procstrs[lidx] attr(r, 'idx') <- lall[-lrestr][lidx] r } f.get.current.dimnames <- function(i,dn,margin) { ## Purpose: get current dimnames in the margins of array ## we're applying on ## ---------------------------------------------------------------------- ## Arguments: i: counter ## dn: dimnames ## margin: margin argument to apply ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 16 Apr 2010, 10:44 ## pos <- integer(0) lcdn <- character(0) for (lm in margin) { ## get length of current margin llen <- length(dn[[lm]]) ## i modulo llen gives the current position in this dimension lpos <- (if (i > 0) i-1 else 0) %% llen + 1 ## update pos ## pos <- c(pos, lpos) ## update lcdn lcdn <- c(lcdn, dn[[lm]][lpos]) ## update i: subtract lpos and divide by llen i <- (i - lpos) / llen + 1 } lcdn } f.n <- Vectorize(function(design) { ## Purpose: get n obs of design ## ---------------------------------------------------------------------- ## Arguments: design: design to get n of ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 19 Apr 2010, 11:19 NROW(get(design)) }) f.p <- Vectorize(function(design) { ## Purpose: get p par of design ## ---------------------------------------------------------------------- ## Arguments: design: design to get p of ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 19 Apr 2010, 11:19 NCOL(get(design)) + 1 }) f.which.min <- function(x, nr = 1) { ## Purpose: get the indices of the minimal nr of observations ## ---------------------------------------------------------------------- ## Arguments: x: vector of values ## nr: number of indices to return ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 4 May 2010, 12:18 match(sort(x)[1:nr], x) } f.which.max <- function(x, nr = 1) f.which.min(-x, nr) ## f.get.scale <- function(procstr, proclst = f.str2list(procstr)) ## { ## ## Purpose: get scale estimate used for procstrs ## ## ---------------------------------------------------------------------- ## ## Arguments: procstr: procstrs (dimnames(r.test)[[3]]) as output by ## ## f.list2str() ## ## proclst: list of procedures, as in estlist$procedures ## ## ---------------------------------------------------------------------- ## ## Author: Manuel Koller, Date: 9 Sep 2010, 13:52 ## ret <- list() ## for (lproc in proclst) { ## if (lproc$estname == 'lm') { ## ## least squares ## ret <- c(ret, list(list(fun='f.lsq'))) ## } else { ## ## default (S-scale): ## fun <- 'lmrob.mscale' ## lidx <- names(lproc$args)[na.omit(match(c('psi', 'tuning.chi', 'seed'), ## names(lproc$args)))] ## if (!is.null(lproc$args$method) && ## substr(lproc$args$method,1,3) == 'SMD') { ## ## D-scale ## fun <- 'lmrob.dscale' ## lidx <- names(lproc$args)[na.omit(match(c('psi', 'tuning.psi'), ## names(lproc$args)))] ## } else if (lproc$estname == 'lmrob.mar' ### continue here ## ret <- c(ret, list(list(fun=fun, args=lproc$args[lidx]))) ## } ## }) ########################################################################### ## functions related to prediction ########################################################################### f.prediction.points <- function(design, type = c('pc', 'grid'), length.out = 4*NCOL(design), f = 0.5, direction = +1, max.pc = 5) { ## Purpose: generate prediction points for design ## generate four points along the second principal component ## in the center, 2 intermediate distances and long distance ## (from the center) ## ---------------------------------------------------------------------- ## Arguments: design: design matrix ## type: type of prediction points: grid / principal components ## length.out: approximate number of prediction points ## f: extend range by f (like extendrange()) ## direction: +1 or -1: which direction to go from the center ## max.pc: maximum number of principal components to use ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 9 Oct 2009, 16:48 ## match type argument type = match.arg(type) ## get ranges lrange <- apply(design, 2, range) ## extend range by f lrange <- data.frame(apply(lrange, 2, extendrange, f = f)) switch(type, pc = { ## calculate robust covariance matrix rob <- covMcd(design) ## and use it to calculate the principal components rpc <- princomp(covmat = rob$cov) ## get corner with maximum distance from rob$center lidx <- apply(abs(lrange - rob$center),2,which.max) lcr <- diag(as.matrix(lrange[lidx,])) ## create grid points: rdf <- rob$center ## for each principal component for (id in 1:min(NCOL(rpc$loadings),max.pc)) { ## calculate factor to reach each boundary lfct <- (lcr - rob$center) / rpc$loadings[,id] ## calculate distances to boundaries and take the minimal one lmin <- which.min(sapply(lfct, function(x) sum((rpc$loadings[,id] * x)^2))) ## create sequence of multiplicands lmult <- seq(0,lfct[lmin],length.out=length.out/NCOL(rpc$loadings)) rdf <- rbind(rdf, rep(rob$center,each=length(lmult)-1) + direction*lmult[-1] %*% t(rpc$loadings[,id])) } }, grid = { ## generate sequences for every dimension lval <- as.data.frame(apply(lrange,2,f.seq, length.out = round(length.out^(1/NCOL(design))) )) ## return if 1 dimension, otherwise create all combinations rdf <- if (NCOL(design) > 1) t(as.data.frame(do.call('f.combine', lval))) else lval }) rdf <- as.data.frame(rdf) rownames(rdf) <- NULL colnames(rdf) <- colnames(design) if (type == 'pc') attr(rdf, 'npcs') <- id rdf } ## ## plot with ## require(rgl) ## plot3d(design) ## points3d(f.prediction.points(design), col = 2) ## d.data <- data.frame(y = rnorm(10), x = 1:10) ## pred <- f.prediction.points(d.data[,-1,drop=FALSE]) ## obj <- f.lmrob.local(y ~ x, d.data) ## f.predict(obj, pred, interval = 'prediction') ## as.vector(t(cbind(rnorm(4), f.predict(obj, pred, interval = 'prediction')))) ## estlist for prediction: ## start with .output.test ## we only need sigma .output.prediction <- c(.output.sigma,.output.beta,.output.se,.output.sumw,.output.nnz) .output.prediction$predict <- list(names = quote({ npred <- NROW(estlist$design.predict) paste(c('fit', 'lwr', 'upr', 'se.fit', 'cpr'), rep(1:npred,each = 5), sep = '_')}), fun = quote({ lpr <- f.predict(lrr, estlist$design.predict, interval = 'prediction', se.fit = TRUE) ##, df = 16) lpr <- cbind(lpr$fit, lpr$se.fit) lqf <- f.errname(lerrlst$err, 'p') lcpr <- do.call(lqf, c(list(lpr[,'upr']), lerrpar)) - do.call(lqf, c(list(lpr[,'lwr']), lerrpar)) as.vector(t(cbind(lpr,lcpr)))})) .estlist.prediction <- list(design = dd, nrep = 200, errs = .errs.test, seed = 0, procedures = .procedures.final, design.predict = f.prediction.points(dd), output = .output.prediction, use.intercept = TRUE) ## predict confidence intervals instead of prediction intervals .estlist.confint <- .estlist.prediction .estlist.confint$output$predict$fun <- parse(text=gsub('prediction', 'confidence', deparse(.output.prediction$predict$fun))) ########################################################################### ## Generate designs - function ########################################################################### f.gen <- function(n, p, rep, err) { ## get function name and parameters lerrfun <- f.errname(err$err) lerrpar <- err$args ## generate random predictors ret <- lapply(1:rep, function(...) data.frame(matrix(do.call(lerrfun, c(n = n*p, lerrpar)), n, p))) attr(ret[[1]], 'gen') <- f.gen ret } .output.sigmaE <- list(sigmaE = list( names = quote("sigmaE"), fun = quote({ ## estimate scale using current scale estimate. ## this amounts to recalculating the estimate ## with just an intercept llargs <- lproc$args llestname <- lproc$estname ## save time and just calculate S-estimate and no covariance matrix if (grepl('^lmrob', llestname)) { llestname <- 'lmrob' llargs$cov <- 'none' llargs$envir <- NULL ## drop envir argument if (llargs$method %in% c('MM', 'SM')) llargs$method <- 'S' if (grepl('M$', llargs$method)) llargs$method <- f.chop(llargs$method) } else if (lproc$estname == 'lm.robust') { llargs$estim <- 'Initial' } llrr <- tryCatch(do.call(f.estname(lproc$estname), c(list(lerr ~ 1), llargs)), error = function(e)e) ## check class: if procedure failed: class == 'try-error' if (inherits(llrr, 'error')) NA ## check convergence of estimator else if (lproc$estname != 'lm.robust' && !converged(llrr)) NA else sigma(llrr) }))) robustbase/inst/doc/graphics.functions.R0000644000176200001440000005027412023604053020102 0ustar liggesusers## some additional functions to help plotting ## g.drop.labels <- function(breaks, labels) { ## ind <- breaks %in% labels ## breaks <- as.character(breaks) ## breaks[!ind] <- '' ## breaks ## } g.scale_y_log10_0.05 <- function(breaks = c(0.00001, 0.0001, 0.001, 0.01, 0.02, 0.03, 0.05, 0.07, 0.1, 0.14, 0.2, 0.4, 0.8), minor_breaks = seq(0,1,by = 0.01), ...) ## Purpose: add nice breaks and labels ## ---------------------------------------------------------------------- ## Arguments: just like scale_y_log10 ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 11 Nov 2009, 11:52 scale_y_log10(breaks = breaks, minor_breaks = minor_breaks, ...) ## the same for lattice: g.scale_y_log10_0.05_lattice <- list(at = log10(c(seq(0.1, 0.01, by = -0.01), 0.001, 0.0001, 0.00001)), labels = c("", 0.09, "", 0.07, "", 0.05, "", 0.03, "", 0.01, 0.001, 0.0001, 0.00001)) g.scale_y_log10_1 <- function(breaks = c(seq(0,1,by=0.1), seq(1.2, 3.5,by=0.2)), minor_breaks = seq(0,10,by = 0.1), ...) ## Purpose: add nice breaks and labels ## ---------------------------------------------------------------------- ## Arguments: just like scale_y_log10 ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 11 Nov 2009, 11:52 scale_y_log10(breaks = breaks, minor_breaks = minor_breaks, ...) g.scale_y_log10_1_l <- function(breaks = c(seq(0,.4,by=0.1), seq(0.6,1.4,by=0.2), seq(1.6, 3.4, by = 0.4)), minor_breaks = seq(0,10,by = 0.1), ...) ## Purpose: add nice breaks and labels ## ---------------------------------------------------------------------- ## Arguments: just like scale_y_log10 ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 11 Nov 2009, 11:52 scale_y_log10(breaks = breaks, minor_breaks = minor_breaks, ...) g.scale_shape_defaults = c(16, 17, 15, 3, 7, 8) g.scale_shape_defaults2 = c(g.scale_shape_defaults,9,1,2,4) g.scale_linetype_defaults = c("solid", "22", "42", "44", "13", "1343", "73", "2262", "12223242", "F282", "F4448444", "224282F2", "F1") g.scale_shape <- function(..., values=g.scale_shape_defaults2) scale_shape_manual(..., values = values) g.get_colors <- function(n, h=c(0,360) + 15, l=65, c=100, start=0, direction = 1) { rotate <- function(x) (x + start) %% 360 * direction if ((diff(h) %% 360) < 1) { h[2] <- h[2] - 360 / n } grDevices::hcl(h = rotate(seq(h[1], h[2], length = n)), c = c, l = l) } g.get_colors_brewer <- function(n, name='Dark2') { idx <- 1:n if (name=='Dark2') { idx <- c(6,2:5,1,7,8)[idx] } RColorBrewer::brewer.pal(n, name)[idx] } g.scale_colour <- function(..., n=8, values=g.get_colors_brewer(n=n)) scale_colour_manual(..., values=values) ########################################################################### ## some useful helper functions ########################################################################### f.range.xy <- function(x,...) UseMethod("f.range.xy") ## Purpose: get plot range for x and y axis and return as a data.frame ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 6 Oct 2009, 13:42 f.range.xy.default <- function(x, ...) data.frame(x = range(x$x), y = range(x$y)) f.range.xy.data.frame <- function(x, names = c('x','y'), ...) sapply(x[,names],range) f.range.xy.matrix <- function(x, names = c('x','y'), ...) sapply(x[,names],range) f.range.xy.list <- function(x,...) data.frame(x = range(sapply(x, function(x) x$x)), y = range(sapply(x, function(x) x$y))) f.range.xy.histogram <- function(x,...) data.frame(x = range(sapply(x, function(x) x$breaks)), y = range(sapply(x, function(x) x$counts))) f.trim <- function(data, trim = 0.05) { ## Purpose: trim alpha observations ## ---------------------------------------------------------------------- ## Arguments: data and trim ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 19 Nov 2009, 14:33 n <- length(data) lo <- floor(n * trim) + 1 hi <- n + 1 - lo sort.int(data, partial = unique(c(lo, hi)))[lo:hi] } f.seq <- function(x, ...) ## Purpose: make seq callable with an vector x = c(from, to) ## ---------------------------------------------------------------------- ## Arguments: x: vector of length two (from, to) ## ...: other arguments to seq ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 20 Nov 2009, 08:55 seq(from = x[1], to = x[2],...) curves <- function(expr, from = NULL, to = NULL, n = 101, add = FALSE, type = "l", ylab = 'values', xlab = 'x', log = NULL, xlim = NULL, xcol = NULL, geom = geom_path, wrap = TRUE, ...) { ## Purpose: curves: does the same as curve, but for multivariate output ## ---------------------------------------------------------------------- ## Arguments: same as curve ## xcol: column of data.frame to use for x instead of default ## geom: what geom function to use, defaults to geom_path ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 15 Jan 2010, 08:48 sexpr <- substitute(expr) if (is.name(sexpr)) { fcall <- paste(sexpr, "(x)") expr <- parse(text = fcall) if (is.null(ylab)) ylab <- fcall } else { if (!(is.call(sexpr) && match("x", all.vars(sexpr), nomatch = 0L))) stop("'expr' must be a function or an expression containing 'x'") expr <- sexpr if (is.null(ylab)) ylab <- deparse(sexpr) } if (is.null(xlim)) delayedAssign("lims", { pu <- par("usr")[1L:2L] if (par("xaxs") == "r") pu <- extendrange(pu, f = -1/27) if (par("xlog")) 10^pu else pu }) else lims <- xlim if (is.null(from)) from <- lims[1L] if (is.null(to)) to <- lims[2L] lg <- if (length(log)) log else paste(if (add && par("xlog")) "x", if (add && par("ylog")) "y", sep = "") if (length(lg) == 0) lg <- "" x <- if (lg != "" && "x" %in% strsplit(lg, NULL)[[1L]]) { if (any(c(from, to) <= 0)) stop("'from' and 'to' must be > 0 with log=\"x\"") exp(seq.int(log(from), log(to), length.out = n)) } else seq.int(from, to, length.out = n) y <- eval(expr, envir = list(x = x), enclos = parent.frame()) ## up this was an exact copy of curve if (length(dim(y)) == 1) { ydf <- data.frame(x = x, values = y) gl <- geom(data = ydf, aes(x = x, y = values), ...) ret <- if (add) gl else ggplot(ydf) + gl } else { ## check whether we have to transpose y if (NCOL(y) != n) { if (NROW(y) == n) y <- t(y) else stop(paste('output should have n =',n,' columns')) } ## add dimnames dm <- dimnames(y) if (is.null(dm)) dm <- list(1:NROW(y), 1:NCOL(y)) if (is.null(names(dm))) names(dm) <- c('rows', 'cols') if (is.null(dm[[1]])) dm[[1]] <- 1:NROW(y) if (is.null(dm[[2]])) dm[[2]] <- 1:NCOL(y) dimnames(y) <- dm ## restructure the output matrix to a data.frame ydf <- melt(y) ## un-factor the first two columns for (i in 1:2) { if (is.factor(ydf[[i]])) ydf[[i]] <- f.as.numeric.vectorized(levels(ydf[[i]]))[ydf[[i]]] } ## add x column ydf$x <- rep(x, each = NROW(y)) if (is.null(xcol)) { xcol <- 'x' } else { ## get desired x column lx <- ydf[idx <- ydf[,1] == xcol,3] ## remove it from the values ydf <- ydf[!idx,] ## add as additional column ydf[[xcol]] <- rep(lx, each = NROW(y) - 1) if (missing(xlab)) xlab <- xcol } if (wrap) { ## use facet wrap, or assume it was used before ## there seems to be a bug in ggplot that requires sorting for the rows variable ydf <- ydf[order(ydf[,1],ydf[,2]),] gl <- geom(data = ydf, aes_string(x = xcol, y = 'value'), ...) ret <- if (add) gl else ggplot(ydf) + gl + xlab(xlab) + facet_wrap(substitute(~ rows, list(rows = as.name(names(dm)[1])))) } else { ## factor 'rows' again ydf[, 1] <- factor(ydf[, 1], levels = unique(ydf[, 1])) ret <- if (add) geom(data = ydf, aes_string(x = xcol, y = 'value', color = names(dm)[1]), ...) else ggplot(ydf) + geom(aes_string(x = xcol, y = 'value', linetype = names(dm)[1]), ...) + xlab(xlab) } } if (!add && !is.null(log)) { ret <- ret + switch(log, xy = coord_trans(x = 'log', y='log'), x = coord_trans(x = 'log'), y = coord_strans(y = 'log'), list() ) } ret } f.get.range <- function(p, axis) { ## Purpose: get range of axis from ggplot object ## ---------------------------------------------------------------------- ## Arguments: p: ggplot return object ## axis: 'x' or 'y' ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 27 Jan 2010, 09:37 lr <- NULL ## get range in from base mapping, if available if (!is.null(p$mapping[[axis]])) lr <- range(p$data[[as.character(p$mapping[[axis]])]], na.rm = TRUE) ## walk layers for (llayer in p$layers) { lvar <- as.character(llayer$mapping[[axis]]) if (!is.null(lvar) && length(lvar) > 0) { ## check if the variable is available in custom data.frame if (!is.null(llayer$data) && NCOL(llayer$data) > 0 && lvar %in% colnames(llayer$data)) ## if so, update range lr <- range(c(llayer$data[[lvar]], lr), na.rm = TRUE) else ## try to update range with data from global data.frame if (!is.null(p$data) && NCOL(p$data) > 0 && lvar %in% colnames(p$data)) ## if so, update range lr <- range(c(p$data[[lvar]], lr), na.rm = TRUE) } } lr } ## makeFootnote: add footnote to plot (like stamp) ## from: http://www.r-bloggers.com/r-good-practice-%E2%80%93-adding-footnotes-to-graphics/ makeFootnote <- function(footnoteText= format(Sys.time(), "%d %b %Y"), size= .7, color="black") { ## require(grid) pushViewport(viewport()) grid.text(label= footnoteText , x = unit(1,"npc") - unit(2, "mm"), y= unit(2, "mm"), just=c("right", "bottom"), gp=gpar(cex= size, col=color)) popViewport() } ## ## Example ## ## plot(1:10) ## makeFootnote(footnote) ## using this and multicore results in segmentation fault ## print.ggplot <- function(..., footnote) ## { ## ## Purpose: print ggplot and add a footnote ## ## ---------------------------------------------------------------------- ## ## Arguments: see ?print.ggplot ## ## footnote: text to be added as footnote ## ## ---------------------------------------------------------------------- ## ## Author: Manuel Koller, Date: 25 Jan 2010, 16:32 ## ggplot2::print.ggplot(...) ## ## if (!missing(footnote)) grid.text(footnote, x = unit(1, 'npc') - unit(2, 'mm'), ## ## y = unit(2, 'mm'), ## ## just = c('right', 'bottom'), ## ## gp=gpar(cex=.7, col=grey(.5))) ## if (!missing(footnote)) makeFootnote(footnote)o ## } ## ## modify print.ggplot: update legend automatically ## print.ggplot <- function (x, newpage = is.null(vp), vp = NULL, ...) ## { ## set_last_plot(x) ## lg <- ggplotGrob(x, ...) ## ## edit grob: change legends ## ## get all legend texts ## lls <- getGrob(lg, gPath='legend.text.text', grep = TRUE, global = TRUE) ## for(le in lls) { ## print(le$label) ## if (!is.expression(le$label) && le$label %in% names(legend.mod)) { ## lg <- editGrob(lg, gPath=le$name, label = legend.mod[[le$label]]) ## } ## } ## if (newpage) ## grid.newpage() ## if (is.null(vp)) { ## grid.draw(lg) ## } ## else { ## if (is.character(vp)) ## seekViewport(vp) ## else pushViewport(vp) ## grid.draw(lg) ## upViewport() ## } ## } require(grid) print.ggplot <- function(x, newpage = is.null(vp), vp = NULL, ..., footnote = NULL, footnote.col = 'black', footnote.size = .7, footnote.just = c("right", "bottom"), legend.mod = NULL) { ## Purpose: print ggplot and add footnote ## ---------------------------------------------------------------------- ## Arguments: x, newpage, vp, ...: see ?print.ggplot ## footnote: text to be added as footnote ## footnote.col: color of footnote ## .size: size of footnote text (cex) ## .just: justification of footnote ## legend.mod: named list on what legend entries to replace ## by value ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 26 Jan 2010, 09:01 if ((missing(footnote) && missing(legend.mod)) || packageVersion("ggplot2") > "0.9.1") return(ggplot2:::print.ggplot(x, newpage, vp, ...)) ## this is mostly a copy of ggplot2::print.ggplot ggplot2:::set_last_plot(x) if (newpage) grid.newpage() grob <- ggplotGrob(x, ...) if (!missing(legend.mod)) { ## edit grob: change legends and strip text lls <- getGrob(grob, gPath='(xlab-|ylab-|title-|label-|legend.text.text|strip.text.x.text|strip.text.y.text)', grep=TRUE, global=TRUE) ## walk all legend texts for(le in lls) { if (!is.null(le$label) && !is.expression(le$label) && length(le$label) > 0 && le$label %in% names(legend.mod)) { grob <- editGrob(grob, gPath=le$name, label = legend.mod[[le$label]]) } } ## also: remove alpha in legend key points lls <- getGrob(grob, gPath='key.points', grep=TRUE, global=TRUE) for (le in lls) { if (is.character(le$gp$col) && grepl('^\\#', le$gp$col)) { lgp <- le$gp lgp$col <- substr(lgp$col, 1, 7) grob <- editGrob(grob, gPath=le$name, gp=lgp) } } ## also: change spacing of legends grob$children$legends$framevp$layout$heights <- grob$children$legends$framevp$layout$heights * .91 } if (missing(footnote)) grid.draw(grob) else { if (is.null(vp)) { ## add footnote to grob grob$children$footnote <- grid.text(label=footnote, x = unit(1, "npc") - unit(2, "mm"), y = unit(2, "mm"), just = footnote.just, gp=gpar(cex = footnote.size, col = footnote.col), draw = FALSE) llen <- length(grob$childrenOrder) grob$childrenOrder[llen+1] <- 'footnote' grid.draw(grob) } else { if (is.character(vp)) seekViewport(vp) else pushViewport(vp) grid.draw(grob) upViewport() ## add footnote to plot (from makeFootnote) pushViewport(viewport()) grid.text(label=footnote, x = unit(1, "npc") - unit(2, "mm"), y = unit(2, "mm"), just = footnote.just, gp=gpar(cex = footnote.size, col = footnote.col)) popViewport() } } } ## guide_legends_box <- function (scales, layers, default_mapping, horizontal = FALSE, ## theme) ## { ## print('hello') ## legs <- guide_legends(scales, layers, default_mapping, theme = theme) ## n <- length(legs) ## if (n == 0) ## return(zeroGrob()) ## if (!horizontal) { ## width <- do.call("max", lapply(legs, widthDetails)) ## heights <- do.call("unit.c", lapply(legs, function(x) heightDetails(x) * ## 10)) ## fg <- frameGrob(grid.layout(nrow = n, 1, widths = width, ## heights = heights, just = "centre"), name = "legends") ## for (i in 1:n) { ## fg <- placeGrob(fg, legs[[i]], row = i) ## } ## } ## else { ## height <- do.call("sum", lapply(legs, heightDetails)) ## widths <- do.call("unit.c", lapply(legs, function(x) widthDetails(x) * ## 1.1)) ## fg <- frameGrob(grid.layout(ncol = n, 1, widths = widths, ## heights = height, just = "centre"), name = "legends") ## for (i in 1:n) { ## fg <- placeGrob(fg, legs[[i]], col = i) ## } ## } ## fg ## } ### viewport test ## data <- data.frame(x = 1:10, y = 1:10) ## tg <- ggplot(data, aes(x, y)) + geom_line() + ## geom_text(data=data.frame(x=10, y=1), label='test') ## print(tg) ## tgrob2 <- ggplotGrob(tg) ## str(tgrob, max.level = 2) ## str(tgrob2, max.level = 2) ## tgrob$children$footnote <- grid.text(label= 'test haha2', x = unit(1,"npc") - unit(2, "mm"), ## y= unit(2, "mm"), just=c("right", "bottom"), ## gp=gpar(cex= .7, col=grey(.5)), draw=FALSE) ## tgrob$childrenOrder[7] <- 'footnote' ## grid.draw(tgrob) ## print(tg, footnote = 'footnote test text') ########################################################################## ## ggplot 0.8.7 bugfix ########################################################################## ## require(ggplot2) ## data <- data.frame(x = 1:10, y = exp(0:9)) ## ggplot(data, aes(x, y)) + geom_point() + ## geom_hline(yintercept = 9) + geom_vline(xintercept = 2) ## last_plot() + coord_trans(y = 'log', x = 'sqrt') ## GeomVline$draw <- function(., data, scales, coordinates, ...) { ## data$y <- if(coordinates$objname=="trans" && ## coordinates$ytr$objname%in%c("log", "sqrt")) 0 else -Inf ## data$yend <- Inf ## GeomSegment$draw(unique(data), scales, coordinates) ## } ## GeomHline$draw <- function(., data, scales, coordinates, ...) { ## data$x <- if(coordinates$objname=="trans" && ## coordinates$xtr$objname%in%c("log", "sqrt")) 0 else -Inf ## data$xend <- Inf ## GeomSegment$draw(unique(data), scales, coordinates) ## } ## Coord$munch_group <- function(., data, details, npieces=50) { ## n <- nrow(data) ## if(n==2 && (all(data$x==c(-Inf,Inf)) || all(data$y==c(-Inf,Inf)))) npieces=1 ## x <- approx(data$x, n = npieces * (n - 1) + 1)$y ## y <- approx(data$y, n = npieces * (n - 1) + 1)$y ## cbind( ## .$transform(data.frame(x=x, y=y), details), ## data[c(rep(1:(n-1), each=npieces), n), setdiff(names(data), c("x", "y"))] ## ) ## } cs <- function(x, y, ..., if.col) { ## Purpose: make aes dependent on global variable color ## ---------------------------------------------------------------------- ## Arguments: same arguments as for aes ## if.col: list of arguments that are only applied if ## color = TRUE ## ---------------------------------------------------------------------- ## Author: Manuel Koller, Date: 7 Sep 2010, 08:36 aes <- structure(as.list(match.call()[-1]), class = "uneval") if (globalenv()$color) { aes2 <- as.list(aes$if.col[-1]) for (item in names(aes2)) { aes[[item]] <- aes2[[item]] } } aes$if.col <- NULL rename_aes(aes) } ## replace levels by legend.mod lab <- function(..., lm=legend.mod) { factors <- list(...) lev <- unlist(lapply(factors, levels)) if (length(factors) > 1) lev <- sort(lev) ret <- as.list(lev) idx <- lev %in% names(lm) ret[idx] <- lm[lev[idx]] ret } ## my labeller mylabel <- function(name, value, lm) { str(name) str(value) if (value %in% names(lm)) lm[[value]] else value } robustbase/inst/doc/error.distributions.R0000644000176200001440000000271311535441110020320 0ustar liggesusers## error.distributions.R: additional error distributions for use in simulations ## require(skewt) ## loaded in vignette if required ## centered skewed t distribution Eskt <- function(nu, gam) { M <- if (is.infinite(nu)) sqrt(2/pi) else gamma((nu+1)/2)/sqrt(nu*pi)/gamma(nu/2)*2*nu/(nu-1) M*(gam^2-1/gam^2)/(gam + 1/gam) } dcskt <- function(x, df, gamma=2) { ncp <- Eskt(df, gamma) dskt(x + ncp, df, gamma) } pcskt <- function(q, df, gamma=2) { ncp <- Eskt(df, gamma) pskt(q + ncp, df, gamma) } qcskt <- function(p, df, gamma=2) { ncp <- Eskt(df, gamma) qskt(p, df, gamma) - ncp } rcskt <- function(n, df, gamma=2) { ncp <- Eskt(df, gamma) rskt(n, df, gamma) - ncp } #################################################################################### ## contaminated normal #################################################################################### rcnorm <- function (n,mean=0,sd=1,epsilon=0.1,meanc=mean,sdc=sqrt(10)*sd) { e <- rnorm(n,mean,sd) nc <- floor(epsilon*n) idx <- sample(1:n,nc) e[idx] <- rnorm(nc,meanc,sdc) e } ## ignore other arguments for the moment pcnorm <- function(q,mean=0,sd=1,lower.tail=TRUE,log.p=FALSE,...) pnorm(q,mean,sd,lower.tail,log.p) ## ignore other arguments for the moment qcnorm <- function(p,mean=0,sd=1,lower.tail=TRUE,log.p=FALSE,...) qnorm(p,mean,sd,lower.tail,log.p) ## ignore other arguments for the moment dcnorm <- function(x,mean=0,sd=1,log=FALSE,...) dnorm(x,mean,sd,log) robustbase/inst/xtraR/0000755000176200001440000000000014124272431014500 5ustar liggesusersrobustbase/inst/xtraR/plot-psiFun.R0000644000176200001440000001127713310763444017060 0ustar liggesusers#### Functions to plot and check psi-functions #### ----------------------------------------- ## used in ../../tests/lmrob-psifns.R, ## ../../tests/psi-rho-etc.R ## and ../../vignettes/psi_functions.Rnw vignette ## Original Author of functions: Martin Maechler, Date: 13 Aug 2010, 10:17 p.psiFun <- function(x, psi, par, main=FALSE, ...) { m.psi <- cbind(rho = Mpsi(x, par, psi,deriv=-1), psi = Mpsi(x, par, psi,deriv= 0), Dpsi = Mpsi(x, par, psi,deriv= 1), wgt = Mwgt(x, par, psi)) robustbase:::matplotPsi(x, m.psi, psi=psi, par=par, main=main, ...) ## -> cbind(x, m.psi) } p.psiFun2 <- function(x, psi, par, main="short", ...) p.psiFun(x, psi, par, main=main, leg.loc= "bottomright", ylim = c(-2.2, 6)) ## for psi_func class objects: simply use plot() method. mids <- function(x) (x[-1]+x[-length(x)])/2 ##' is 'psi' the name of redescending psi (i.e. with *finite* rejection point) isPsi.redesc <- function(psi) { psi != "Huber" ## <- must be adapted when we introduce more } ##' @title Check consistency of psi/chi/wgt/.. functions ##' @param m.psi matrix as from p.psiFun() ##' @param tol ##' @return concatenation of \code{\link{all.equal}} results ##' @author Martin Maechler chkPsiDeriv <- function(m.psi, tol = 1e-4) { stopifnot(length(tol) > 0, tol >= 0, is.numeric(psi <- m.psi[,"psi"]), is.numeric(dx <- diff(x <- m.psi[,"x"]))) if(length(tol) < 2) tol[2] <- 10*tol[1] xn0 <- abs(x) > 1e-5 ## need to jitter here, as for "huber" the mostly equal cases are not counted ## (=> use all.equal(*, countEQ=TRUE) in future). Save & restore r.seed if needed: if(hasRS <- exists(".Random.seed", envir=.GlobalEnv)) RS <- .Random.seed set.seed(8) dpsidx <- (diff(psi)/dx) * (1 + 1e-7*rnorm(dx)) # jitter if(hasRS) assign(".Random.seed", RS, envir=.GlobalEnv) c(all.equal(mids(psi), diff(m.psi[,"rho"])/dx, tolerance=tol[1]), # rho' == psi all.equal(mids(m.psi[,"Dpsi"]), dpsidx, tolerance= tol[2]), # psi' == psip all.equal(m.psi[xn0,"wgt"], (psi/x)[xn0], tolerance= tol[1]/10))# psi/x == wgt } ##' This version "starts from scratch" instead of from p.psiFun() result: ##' ##' @title Check consistency of psi/chi/wgt/.. functions ##' @param x range or vector of abscissa values ##' @param psi psi() function spec., passed to M.psi() etc ##' @param par tuning parameter, passed to M.psi() etc ##' @param tol tolerance for equality checking of numeric derivatives ##' @return concatenation of \code{\link{all.equal}} results ##' @author Martin Maechler chkPsi.. <- function(x, psi, par, tol = 1e-4, doD2, quiet=FALSE) { stopifnot(length(tol) > 0, tol >= 0, is.numeric(x), is.finite(x)) is.redesc <- isPsi.redesc(psi) if(length(x) == 2) ## it is a *range* -> produce vector x <- seq(x[1], x[2], length = 1025L) dx <- diff(x) x0 <- sort(x) x <- c(-Inf, Inf, NA, NaN, x0) ## if(is.redesc) rho <- Mpsi(x, par, psi, deriv=-1) psix <- Mpsi(x, par, psi, deriv= 0) Dpsi <- Mpsi(x, par, psi, deriv= 1) wgt <- Mwgt(x, par, psi) chi <- Mchi(x, par, psi) if(is.redesc) { chi1 <- Mchi(x, par, psi, deriv=1) chi2 <- Mchi(x, par, psi, deriv=2) } rho.Inf <- MrhoInf(par, psi) stopifnot(all.equal(rep(rho.Inf,2), rho[1:2])) if(is.redesc) stopifnot(all.equal(rep(rho.Inf,2), rho[1:2]), all.equal(chi, rho / rho.Inf), all.equal(chi1,psix / rho.Inf), all.equal(chi2,Dpsi / rho.Inf) ) else { ## check any here? From ../src/lmrob.c : ## chi = C-function rho(x) which is unscaled rho <- chi # for checks below } D2psi <- tryCatch(Mpsi(x, par, psi, deriv= 2), error=function(e)e) has2 <- !inherits(D2psi, "error") doD2 <- if(missing(doD2)) has2 else doD2 && has2 if(!quiet & !doD2) message("Not checking psi''() := Mpsi(*, deriv=2)") stopifnot(is.numeric(psix), ## check NA / NaN : identical5(x[3:4], chi[3:4], psix[3:4], Dpsi[3:4], wgt[3:4]), if(has2) identical(x[3:4], D2psi[3:4]) else TRUE) if(length(tol) < 2) tol[2] <- 16*tol[1] if(length(tol) < 3) tol[3] <- tol[1]/10 if(length(tol) < 4) tol[4] <- 8*tol[2] i <- 5:length(x) # leaving away the first 4 (+-Inf, NA..) xn0 <- is.finite(x) & abs(x) > 1e-5 c("rho' = psi" = all.equal(mids(psix[i]), diff(rho [i])/dx, tolerance=tol[1]), "psi' = psip"= all.equal(mids(Dpsi[i]), diff(psix[i])/dx, tolerance=tol[2]), "psi/x= wgt" = all.equal( wgt[xn0], (psix/x)[xn0], tolerance=tol[3]), "psi''=D2psi"= if(doD2) all.equal(mids(D2psi[i]), diff(Dpsi[i])/dx,tolerance=tol[4]) else NA) } robustbase/inst/xtraR/test_MCD.R0000644000176200001440000003056313710236747016306 0ustar liggesusers#### Utility functions for testing covMcd() #### -------------------------------------- ../tests/tmcd.R ## "workhorse" -- by default *passed* to and called from doMCDdata(): domcd1 <- function(x, xname, nrep = 1, ## These are all got from doMCDdata() [yuck!] method = get("method", parent.frame()), # compromise time = get("time", parent.frame()), # compromise short = get("short", parent.frame()), # compromise full = get("full", parent.frame()), # compromise lname = 20) { if(short && full) stop("you should not set both 'full' and 'short' to TRUE") force(xname)# => evaluate when it is a data(<>, ..) call n <- dim(x)[1] p <- dim(x)[2] if(method == "MASS") { mcd <- MASS::cov.mcd(x) mcd$quan <- (n + p + 1) %/% 2 #default: floor((n+p+1)/2) } else if(method == "DetMCD") { mcd <- covMcd(x, nsamp="deterministic") # trace = FALSE } else { mcd <- covMcd(x) # trace = FALSE } if(full) { header <- get("header", parent.frame()) header(time) } xres <- sprintf("%*s %3d %3d %3d %12.6f", lname, xname, n, p, mcd$quan, mcd$crit) if(time) { xtime <- system.time(repMCD(x, nrep, method))[1]/nrep xres <- sprintf("%s %10.1f", xres, 1000 * xtime) } cat(xres, "\n") if(!short) { cat("Best subsample: \n") print(mcd$best) ibad <- which(unname(mcd$mcd.wt) == 0) nbad <- length(ibad) cat("Outliers: ",nbad, if(nbad > 0)":", "\n") if(nbad > 0) print(ibad) if(full) { cat("------------- *MCD() result: --------------------------\n") print(mcd) } cat("--------------------------------------------------------\n") } }## {domcd1} ##' Test the function covMcd() on the literature datasets: ##' ##' Call covMcd() for "all" datasets in robustbase / rrcov and print: ##' - execution time (if time is true) ##' - objective function ##' - best subsample found (if short is false) ##' - outliers identified (with cutoff 0.975) (if short is false) ##' - estimated center and covarinance matrix if full is true) ##' ##' @param nrep : [integer] number of repetitions to use for estimating the ##' (average) execution time ##' @param method : [character] select a method: one of (FASTMCD, MASS) ##' @param time : [logical] whether to evaluate the execution time ##' @param short : [logical] whether to do short output (i.e. only the ##' objective function value). If short == FALSE, ##' the best subsample and the identified outliers are ##' printed. See also the parameter full below ##' @param full : [logical] whether to print the estimated cente and covariance matrix ##' @param digits ##' @param domcd workhorse function, to be called e.g. as ##' @examples domcd(starsCYG, data(starsCYG), nrep) ##' @author Valentin Todorov; tweaks by Martin Maechler ##' @note Is called from ../../demo/determinMCD.R and ../../tests/tmcd.R doMCDdata <- function(nrep = 1, method = c("FASTMCD", "MASS", "DetMCD"), time = nrep >= 3, short = time, full = !short, digits=5, domcd = domcd1) { stopifnot(is.function(domcd), length(formals(domcd)) >= 3) options(digits = digits) method <- match.arg(method) # *is* then accessed from domcd(.) stopifnot(require("robustbase")) # all data() which do not specify package data(Animals, package = "MASS") brain <- Animals[c(1:24, 26:25, 27:28),] data(list = c("fish", "pottery", "rice", "un86", "wages"), package = "rrcov") tmp <- sys.call() cat("\nCall: ", deparse(substitute(tmp)),"\n") header <- function(time) { ## the string length here require 'lname <- 20' {FIXME} ## 1 2 ## 1 3 5 7 901 3 5 7 90 2 4 cat("Data Set n p h(alf) LOG(obj)",if(time)" Time [ms]","\n", "=============================================",if(time)"===========","\n", sep="") } if(full) { ## header() is called in each domcd() } else ## here header(time) domcd(bushfire, data(bushfire), nrep) domcd(heart[, 1:2], data(heart), nrep) domcd(starsCYG, data(starsCYG), nrep) domcd(stack.x, data(stackloss), nrep) domcd(data.matrix(subset(phosphor, select= -plant)),data(phosphor), nrep) domcd(data.matrix(subset(coleman, select = -Y)), data(coleman), nrep) domcd(data.matrix(subset(salinity, select = -Y)), data(salinity), nrep) domcd(data.matrix(subset(wood, select = -y)), data(wood), nrep) domcd(data.matrix(subset(hbk, select = -Y)), data(hbk), nrep) domcd(brain, "Animals", nrep) domcd(milk, data(milk), nrep) domcd(lactic, data(lactic), nrep) domcd(pension, data(pension), nrep) domcd(pilot, data(pilot), nrep) ## This is for CovMcdBig .... ## domcd(radarImage, data(radarImage), nrep) ## domcd(NOxEmissions, data(NOxEmissions), nrep) domcd(data.matrix(subset(vaso, select = -Y)), data(vaso), nrep) domcd(data.matrix(subset(wagnerGrowth, select = -Period)), data(wagnerGrowth), nrep) ## Obs 14 has missing, column 7 is categorical domcd(fish[-14,-7], data(fish, package="rrcov"), nrep) domcd(pottery[,-7], data(pottery, package="rrcov"), nrep) domcd(rice, data(rice, package="rrcov"), nrep) domcd(un86, data(un86, package="rrcov"), nrep) ## there are missing values domcd(wages[-c(29, 31, 38),-9], data(wages, package="rrcov"), nrep) cat("========================================================\n") } ## {doMCDdata} if(FALSE){ data(mortality, package = "riv") mm <- as.data.frame(lapply(mortality, signif, 3)) for(j in c(1,2,6,7)) mm[,j] <- mm[,j] * 10 mm[,5] <- mm[,5] * 1000 mm[,8] <- mm[,8] / 100 mort3 <- mm dput(mort3) } ## which gives the equivalent of mort3 <- data.frame(MO70 = c(140, 101, 86, 102, 115, 121, 118, 76.6, 131, 112, 111, 112, 117, 118, 123, 122, 81.7, 108, 111, 109, 92.5, 83.9, 93.8, 135, 124, 126, 122, 120, 127, 115, 156, 95.1, 127, 129, 116, 82.3, 115, 106, 134, 94.9, 119, 111, 131, 85.6, 135, 126, 141, 152, 137, 151, 93.6, 84.2, 78, 50.2, 81.3, 112, 80.1, 125, 120, 143), MAGE = c(297, 277, 275, 268, 296, 327, 314, 258, 342, 278, 278, 313, 284, 272, 296, 277, 271, 296, 286, 250, 280, 270, 246, 301, 279, 287, 293, 271, 291, 295, 314, 267, 275, 307, 259, 251, 324, 285, 288, 254, 278, 287, 316, 287, 326, 309, 334, 369, 321, 311, 261, 272, 260, 244, 248, 277, 240, 295, 319, 346), CI68 = c(137, 137, 129, 129, 151, 157, 157, 157, 157, 202, 202, 202, 138, 160, 190, 191, 191, 191, 159, 159, 146, 146, 203, 203, 182, 166, 203, 203, 167, 167, 165, 153, 149, 149, 149, 157, 152, 183, 183, 183, 183, 183, 183, 111, 171, 148, 148, 148, 192, 160, 160, 172, 172, 172, 172, 101, 173, 173, 144, 181), MDOC = c(142, 80.4, 148, 167, 230, 187, 240, 149, 240, 195, 327, 377, 203, 160, 161, 68.7, 141, 120, 176, 105, 128, 112, 98.9, 160, 209, 200, 153, 126, 157, 157, 145, 160, 158, 102, 195, 188, 250, 143, 157, 186, 114, 129, 129, 143, 186, 207, 144, 112, 157, 121, 168, 155, 144, 144, 120, 194, 93.6, 231, 185, 89.7), DENS = c(37, 37, 27, 32, 17, 13, 23, 19, 27, 29, 15, 15, 48, 34, 26, 47, 17, 10, 10, 18, 11, 13, 26, 19, 55, 17, 16, 7, 10, 17, 44, 13, 18, 26, 40, 22, 29, 7, 28, 10, 15, 1, 11, 10, 8, 13, 13, 6, 10, 26, 49, 28, 32, 18, 62, 15, 21, 18, 10, 12), NONW = c(4.22, 3.36, 0.67, 0.52, 2.51, 0.82, 4.07, 1.11, 2.86, 2.92, 2.74, 1.05, 7.23, 5.16, 3.44, 2.84, 1.84, 1.47, 0.62, 0.03, 0.96, 1.07, 1.74, 2.41, 0.45, 4.7, 4.45, 1.2, 0.64, 2.28, 4.13, 1.06, 4.02, 2.22, 5.6, 0.43, 2.34, 1.78, 2.81, 1.9, 3.09, 1.43, 2.58, 1.34, 0.78, 3.44, 2.07, 0.68, 1, 3.6, 3.92, 2.58, 2.66, 0.05, 0.86, 0.32, 3.02, 4.24, 1.26, 1.08), EDUC = c(454, 516, 601, 631, 565, 620, 661, 653, 661, 591, 568, 499, 685, 534, 539, 536, 560, 542, 680, 546, 648, 632, 601, 469, 458, 446, 521, 540, 661, 601, 480, 627, 506, 363, 551, 662, 518, 556, 484, 607, 562, 517, 521, 582, 629, 506, 534, 433, 459, 476, 492, 548, 517, 517, 468, 685, 483, 471, 678, 528), IN69 = c(86.9, 99.3, 113, 99.2, 104, 118, 113, 117, 125, 100, 104, 115, 122, 107, 135, 101, 123, 114, 114, 113, 108, 109, 100, 99.8, 102, 100, 110, 112, 111, 113, 92.7, 116, 86.3, 103, 86.4, 109, 116, 112, 104, 108, 103, 116, 99.3, 116, 114, 104, 105, 97, 102, 83.4, 101, 125, 117, 118, 90.3, 108, 92.4, 106, 126, 109)) ###'------*Generate* data for benchmarking ---------------------------------------- ##' Generates a location contaminated multivariate ##' normal sample of n observations in p dimensions ##' (1-eps) * N_p(0, I_p) + eps * N_(m,I_p) ##' where ##' m = (b,b,...,b) ##' Defaults: eps=0 and b=10 ##' @title Generate n x p location contaminated MV data ##' @param n number of observations ##' @param p number of variables ##' @param eps amount of contamination ##' @param b mean of "outliers" gendata <- function(n,p, eps=0, b=10) { if(missing(n) || missing(p)) stop("Please specify (n,p)") if(!is.numeric(eps) || length(eps) != 1 || eps < 0 || eps >= 0.5) stop("eps must be in [0,0.5)") X <- matrix(rnorm(n*p), n, p) nbad <- as.integer(eps * n) if(nbad > 0) { b <- rep(b, length = p) # recycle to p-vector ## = E[.] of bad obs. xind <- sample(n,nbad) X[xind,] <- X[xind, , drop=FALSE] + rep(b, each=nbad) } list(X=X, xind=if(nbad > 0) xind) } ##' Repeated calls to different MCD algorithms for timing purposes *only* repMCD <- function(x, nrep = 1, method = "FASTMCD") { stopifnot(length(nrep) == 1, nrep >= 1) switch(method, "FASTMCD" = replicate(nrep, covMcd(x)), "bestMCD" = replicate(nrep, covMcd(x, nsamp= "best")), "exactMCD" = replicate(nrep, covMcd(x, nsamp= "exact")), "DetMCD" = replicate(nrep, covMcd(x, nsamp="deterministic")), "MASS.best" = replicate(nrep, MASS::cov.mcd(x)),# uses nsamp = "best" ==> up to 5000 ## rrcov.control()$nsamp == 500 : "MASS.500" = replicate(nrep, MASS::cov.mcd(x, nsamp = 500)), ## otherwise: stop(gettextf("Method '%s' not yet implemented", method))) } repMCD.meths <- function() { switch.expr <- body(repMCD)[[3]] m <- names(switch.expr) m[m != ""] } if(FALSE) repMCD.meths() ## [1] "FASTMCD" "bestMCD" "DetMCD" "MASS.best" "MASS.500" ##' calls gendata(), repMCD() dogen <- function(nrep=1, eps=0.49, method = repMCD.meths(), ## "FASTMCD" is first p.set = c(2, 5, 10, 20, 30), n.set = c(100, 500, 1000, 10000, 50000), n.p.ratio = 5, seed = 1234) { domcd <- function(x, nrep=1){ ## system.time() *does* gc() xtime <- system.time(repMCD(x, nrep, method))[1]/nrep cat(sprintf("%6d %3d %12.2f\n", dim(x)[1], dim(x)[2], xtime)) xtime } set.seed(seed) method <- match.arg(method) mkL <- function(ch,m) paste(ch,m,sep="=") ans <- matrix(NA, length(n.set), length(p.set), dimnames = list(mkL("n",n.set), mkL("p",p.set))) cat(sprintf("Method: %-12s; nrep = %d\n", method, nrep), "------------------------------\n", " n p Time\n", "=======================\n", sep="") for(n in n.set) { n. <- mkL("n",n) for(p in p.set) { if(n.p.ratio * p <= n) { xx <- gendata(n, p, eps) ans[n., mkL("p",p)] <- domcd(xx$X, nrep) } } } cat("=======================\n") cat(sprintf("Total time: %11.2f\n", nrep * sum(ans, na.rm=TRUE))) structure(ans, nrep = nrep, method=method) }## {dogen} ###' ------------------ These can only be used with rrcov :: CovMcd() -------------- docheck <- function(n, p, eps, ...) { xx <- gendata(n,p,eps) mcd <- CovMcd(xx$X, ...) check(mcd, xx$xind) } ##' check if mcd is robust w.r.t xind, i.e. check how many of xind ##' did not get zero weight check <- function(mcd, xind){ mymatch <- xind %in% which(mcd@wt == 0) length(xind) - sum(mymatch) } robustbase/inst/xtraR/test_LTS.R0000644000176200001440000001033311757675047016347 0ustar liggesusers#### Utility functions for testing ltsReg() #### -------------------------------------- ../tests/tlts.R repLTS <- function(form, data, nrep = 1, method = c("FASTLTS","MASS")) { if(method == "MASS") ## MASS::lqs(x,y,control=list(psamp = NA, nsamp= "best", adjust= FALSE)) for(i in 1:nrep) MASS::lqs(form, data = data, method = "lts") else ## set mcd=FALSE - we want to time only the LTS algorithm for(i in 1:nrep) ltsReg(form, data = data, mcd = FALSE) } doLTSdata <- function(nrep = 1, time = nrep >= 3, short = time, full = !short, method = c("FASTLTS", "MASS")) { ##@bdescr ## Test the function ltsReg() on the literature datasets: ## ## Call ltsReg() for "all" regression datasets available in robustbase ## and print: ## - execution time (if time) ## - objective function ## - best subsample found (if not short) ## - outliers identified (with cutoff 0.975) (if not short) ## - estimated coeficients and scale (if full) ## ##@edescr ## ##@in nrep : [integer] number of repetitions to use for estimating the ## (average) execution time ##@in time : [boolean] whether to evaluate the execution time ##@in short : [boolean] whether to do short output (i.e. only the ## objective function value). If short == FALSE, ## the best subsample and the identified outliers are ## printed. See also the parameter full below ##@in full : [boolean] whether to print the estimated coeficients and scale ##@in method : [character] select a method: one of (FASTLTS, MASS) dolts <- function(form, dname, dataset, nrep = 1) { if(missing(dataset)) { data(list = dname) dataset <- get(dname) } else if(missing(dname)) dname <- deparse(substitute(dataset)) environment(form) <- environment() ## !?! x <- model.matrix(form, model.frame(form, data = dataset)) dx <- dim(x) - 0:1 # not counting intercept if(method == "MASS") { lts <- MASS::lqs(form, data = dataset, method = "lts") quan <- (dx[1] + (dx[2] + 1) + 1)/2 #default: (n+p+1)/2 } else { lts <- ltsReg(form, data = dataset, mcd = FALSE) quan <- lts$quan } xres <- sprintf("%*s %3d %3d %3d %12.6f", lname, dname, dx[1], dx[2], as.integer(quan), lts$crit) if(time) { xtime <- system.time(repLTS(form, data = dataset, nrep, method))[1] xres <- sprintf("%s %10.1f", xres, 1000 * xtime / nrep) } cat(xres, "\n") if(!short) { cat("Best subsample: \n") print(lts$best) ibad <- which(lts$lts.wt == 0) names(ibad) <- NULL nbad <- length(ibad) cat("Outliers: ",nbad,"\n") if(nbad > 0) print(ibad) if(full) { cat("-------------\n") print(lts) print(summary(lts)) } cat("--------------------------------------------------------\n") } } method <- match.arg(method) data(heart) data(starsCYG) data(phosphor) data(stackloss) data(coleman) data(salinity) data(aircraft) data(delivery) data(wood) data(hbk) cll <- sys.call() cat("\nCall: ", deparse(substitute(cll)),"\n") cat("========================================================\n") cat("Data Set n p Half obj Time [ms]\n") cat("========================================================\n") ## 1 3 5 7 9.1 3 5 7 9. 123 123 lname <- 20 ## --^ dolts(clength ~ . , "heart", nrep = nrep) dolts(log.light ~ log.Te , "starsCYG", nrep = nrep) dolts(plant ~ . , "phosphor", nrep = nrep) dolts(stack.loss ~ . , "stackloss", nrep = nrep) dolts(Y ~ . , "coleman", nrep = nrep) dolts(Y ~ . , "salinity") dolts(Y ~ . , "aircraft") dolts(delTime ~ . , "delivery") dolts(y ~ . , "wood", nrep = nrep) dolts(Y ~ . , "hbk", nrep = nrep) cat("========================================================\n") } robustbase/inst/xtraR/m-s_fns.R0000644000176200001440000001340613177452122016175 0ustar liggesusers#### Testing M-S estimator --- self-contained utility functions --- #### ## Exercised from ../../tests/m-s-estimator.R ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~ ## Test subsampling algorithm m_s_subsample <- function(x1, x2, y, control, orthogonalize=TRUE) { storage.mode(x1) <- "double" storage.mode(x2) <- "double" storage.mode(y) <- "double" z <- .C(robustbase:::R_lmrob_M_S, x1, x2, y, res=double(length(y)), n=length(y), p1=NCOL(x1), p2=NCOL(x2), nResample=as.integer(control$nResample), max_it_scale=as.integer(control$maxit.scale), scale=double(1), b1=double(NCOL(x1)), b2=double(NCOL(x2)), tuning_chi=as.double(control$tuning.chi), ipsi=.psi2ipsi(control$psi), bb=as.double(control$bb), K_m_s=as.integer(control$k.m_s), max_k=as.integer(control$k.max), rel_tol=as.double(control$rel.tol), inv_tol=as.double(control$solve.tol), scale_tol=as.double(control$scale.tol), converged=FALSE, trace_lev=as.integer(control$trace.lev), orthogonalize=as.logical(orthogonalize), subsample=TRUE, descent=FALSE, # and hence no 'convergence' here .. mts = 0L, ss = 1L) z[c("b1", "b2", "scale")] } ## Test descent algorithm m_s_descent <- function(x1, x2, y, control, b1, b2, scale) { storage.mode(x1) <- "double" storage.mode(x2) <- "double" storage.mode(y) <- "double" z <- .C(robustbase:::R_lmrob_M_S, X1=x1, X2=x2, y=y, res=double(length(y)), n=length(y), p1=NCOL(x1), p2=NCOL(x2), nResample=as.integer(control$nResample), max_it_scale=as.integer(control$maxit.scale), scale=as.double(scale), b1=as.double(b1), b2=as.double(b2), tuning_chi=as.double(control$tuning.chi), ipsi=.psi2ipsi(control$psi), bb=as.double(control$bb), K_m_s=as.integer(control$k.m_s), max_k=as.integer(control$k.max), rel_tol=as.double(control$rel.tol), inv_tol=as.double(control$solve.tol), scale_tol=as.double(control$scale.tol), converged=logical(1), trace_lev=as.integer(control$trace.lev), orthogonalize=FALSE, subsample=FALSE, descent=TRUE, ## ----- mts = 0L, ss = 1L) z[c("b1", "b2", "scale", "res", "converged")] } find_scale <- function(r, s0, n, p, control) { c.chi <- robustbase:::.psi.conv.cc(control$psi, control$tuning.chi) b <- .C(robustbase:::R_lmrob_S, x = double(1), y = as.double(r), n = as.integer(n), p = as.integer(p), nResample = 0L, # <- only.scale=TRUE , now in lmrob.S() scale = as.double(s0), coefficients = double(p), as.double(c.chi), .psi2ipsi(control$psi), as.double(control$bb), best_r = 0L, groups = 0L, n.group = 0L, k.fast.s = 0L, k.iter = 0L, maxit.scale = as.integer(control$maxit.scale), refine.tol = as.double(control$refine.tol), inv.tol = as.double(control$solve.tol), scale_tol=as.double(control$scale.tol), converged = logical(1), trace.lev = 0L, mts = 0L, ss = 1L, fast.s.large.n = as.integer(n+1) )[c("coefficients", "scale", "k.iter", "converged")] b$scale } ## m_s_descent()--R-only--version : m_s_descent_Ronly <- function(x1, x2, y, control, b1, b2, scale) { stopifnot(is.list(control), is.numeric(control$k.max)) n <- length(y) p1 <- ncol(x1) p2 <- ncol(x2) p <- p1+p2 t2 <- b2 t1 <- b1 rs <- drop(y - x1 %*% b1 - x2 %*% b2) sc <- scale ## do refinement steps ## do maximally control$k.max iterations ## stop if converged ## stop after k.fast.m_s step of no improvement if (control$trace.lev > 4) cat("scale:", scale, "\n") if (control$trace.lev > 4) cat("res:", rs, "\n") n.imp <- nnoimprovement <- nref <- 0L; conv <- FALSE while((nref <- nref + 1) <= control$k.max && !conv && nnoimprovement < control$k.m_s) { ## STEP 1: UPDATE B2 y.tilde <- y - x1 %*% t1 w <- Mwgt(rs / sc, control$tuning.chi, control$psi) if (control$trace.lev > 4) cat("w:", w, "\n") z2 <- lm.wfit(x2, y.tilde, w) t2 <- z2$coef if (control$trace.lev > 4) cat("t2:", t2, "\n") rs <- y - x2 %*% t2 ## STEP 2: OBTAIN M-ESTIMATE OF B1 z1 <- lmrob.lar(x1, rs, control) t1 <- z1$coef if (control$trace.lev > 4) cat("t1:", t1, "\n") rs <- z1$resid ## STEP 3: COMPUTE THE SCALE ESTIMATE sc <- find_scale(rs, sc, n, p, control) if (control$trace.lev > 4) cat("sc:", sc, "\n") ## STEP 4: CHECK FOR CONVERGENCE ##... FIXME ## STEP 5: UPDATE BEST FIT if (sc < scale) { scale <- sc b1 <- t1 b2 <- t2 nnoimprovement <- 0L n.imp <- n.imp + 1L } else nnoimprovement <- nnoimprovement + 1L } ## STEP 6: FINISH if (nref == control$k.max) warning("M-S estimate: maximum number of refinement steps reached.") ## if we'd really check for convergence above : ## if (nnoimprovement == control$k.m_s) ## warning("M-S estimate: maximum number of no-improvements reached.") list(b1=b1, b2=b2, scale=scale, res=rs, nref=nref, n.improve = n.imp)#, converged=conv, nnoimprovement=nnoimprovement) } robustbase/inst/xtraR/platform-sessionInfo.R0000644000176200001440000001074113465050054020751 0ustar liggesusers## <---> sync with ~/R/Pkgs/CLA/inst/xtraR/platform-sessionInfo.R ## ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ ##' return 'x' unless it is NULL where you'd use 'orElse' `%||%` <- function(x, orElse) if(!is.null(x)) x else orElse ##' not %in% : `%nin%` <- function (x, table) is.na(match(x, table)) ##' Derive more sessionInfo() like information, notably about BLAS, LAPACK, arithmetic, etc moreSessionInfo <- function(print. = FALSE) { .M <- .Machine if(print.) str(.M[grep("^sizeof", names(.M))]) ## differentiate long-double.. b64 <- .M$sizeof.pointer == 8 onWindows <- .Platform$OS.type == "windows" ## Do we have 64bit but no-long-double ? arch <- Sys.info()[["machine"]] b64nLD <- (arch == "x86_64" && .M$sizeof.longdouble != 16) if(b64nLD) arch <- paste0(arch, "--no-long-double") if(print.) cat(sprintf("%d bit platform type '%s' ==> onWindows: %s\narch: %s\n", if(b64) 64 else 32, .Platform$OS.type, onWindows, arch)) sInfo <- sessionInfo() if(!exists("osVersion")) osVersion <- sInfo$running if(print.) cat("osVersion (0):", osVersion, "\n") if(is.null(osVersion)) osVersion <- "Fedora" # very last resort if(!length(BLAS.is.LAPACK <- sInfo$BLAS == sInfo$LAPACK)) BLAS.is.LAPACK <- NA # R versions <= 3.3.x ## A cheap check (that works on KH's debian-gcc setup, 2019-05): if(!length(BLAS.is.openBLAS <- grepl("openblas", sInfo$BLAS, ignore.case=TRUE))) BLAS.is.openBLAS <- NA if(!length(Lapack.is.openBLAS <- grepl("openblas", sInfo$LAPACK, ignore.case=TRUE))) Lapack.is.openBLAS <- NA if(print.) cat("osVersion:", osVersion, "\n" ,'+ BLAS "is" Lapack:', BLAS.is.LAPACK , '| BLAS=OpenBLAS:', BLAS.is.openBLAS , '| Lapack=OpenBLAS:', Lapack.is.openBLAS , "\n") ## NB: sessionInfo() really gets these: if(getRversion() >= "3.4") local({ is.BLAS.LAPACK <- exists("La_library", mode="function") && ## R 3.4.0 and newer identical(La_library(), extSoftVersion()[["BLAS"]]) stopifnot(isTRUE(is.BLAS.LAPACK == BLAS.is.LAPACK)) }) ## also TRUE for Windows [since both are "" !!] ## Find out if we are running Micrsoft R Open is.MS.Ropen <- { file.exists(Rpr <- file.path(R.home("etc"), "Rprofile.site")) && length(lnsRpr <- readLines(Rpr)) && ## length(grep("[Mm]icrosoft", lnsRpr)) > 3 # MRO 3.5.1 has '20' times "[Mm]icrosoft" length(grep("Microsoft R Open", lnsRpr, fixed=TRUE, value=TRUE)) > 0 ## MRO 3.5.1 has it twice } if(print. && is.MS.Ropen) cat("We are running 'Microsoft R Open'\n") ## I'd really would want to know which of (OpenBLAS | ATLAS | MKL | R's own BLAS+LAPACK) ## ## Next best, I'd really like ## ## strictR <- we_are_using_Rs_own_BLAS_and_Lapack() [ ==> BLAS != Lapack ] ## ## Actually the following aims to be equivalent to {and *is* for MM on Fedora, 2019-03} ## strictR <- !(using ATLAS || OpenBLAS || MKL ) if(TRUE) { strictR <- !BLAS.is.LAPACK && !is.MS.Ropen && !BLAS.is.openBLAS && !Lapack.is.openBLAS && TRUE } else { ## workaround: strictR <- print(Sys.info()[["user"]]) == "maechler"# actually ## but not when testing with /usr/bin/R [OpenBLAS on Fedora!] (as "maechler"): if(strictR && substr(osVersion, 1,6) == "Fedora" && R.home() == "/usr/lib64/R") strictR <- FALSE } if(print.) cat("strictR:", strictR, "\n") structure(class = "moreSessionInfo", list( arch = arch , b64 = b64 # 64-bit (:<==> sizeof.pointer == 8 ) , b64nLD = b64nLD # 64-bit, but --no-long-double (sizeof.longdouble != 16) , BLAS.is.LAPACK = BLAS.is.LAPACK , BLAS.is.openBLAS = BLAS.is.openBLAS , Lapack.is.openBLAS = Lapack.is.openBLAS , is.MS.Ropen = is.MS.Ropen # is R a version of Microsoft R Open (==> MKL-linked BLAS) , onWindows = onWindows , osVersion = osVersion , strictR = strictR # are BLAS & Lapack from R's source, and "otherwise known safe platform" )) } if(getRversion() < "3.4.0") withAutoprint <- function(x, ...) x if(isTRUE(getOption("chk.moreSessionInfo"))) withAutoprint({ ms1 <- moreSessionInfo() ms. <- moreSessionInfo(print. = TRUE) stopifnot(is.list(ms1), length(ms1) > 1, identical(ms1, ms.) ) }) robustbase/inst/xtraR/subsample-fns.R0000644000176200001440000001273212271657124017416 0ustar liggesusers### Mainly used for source package checking in ../../tests/subsample.R ### however, available (for reproducible research, confirmation) as ### part of the robustbase package. ## R version of LU decomposition in subsample() in lmrob.c ## Modified from Golub G. H., Van Loan, C. F., Matrix Computations, 3rd edition LU.gaxpy <- function(A, pivot=TRUE, tol = 1e-7, verbose = FALSE) { A <- as.matrix(A) ## m x n matrix, n >= m >= 1 stopifnot((n <- ncol(A)) >= (m <- nrow(A)), m >= 1) ## precondition: cf0 <- max(abs(A)) A <- A / cf0 v <- double(m) ## work matrix ## these matrices will contain the results L <- diag(m) U <- matrix(0, m, m) p <- integer(m-1) ## pivots idc <- 1L:n ## which columns of A are used idr <- 1L:m ## how rows of A are permuted for(j in 1L:m) { sing <- TRUE while(sing) { if (length(idc) < j) break if (j == 1L) { v[j:m] <- A[idr[j:m], idc[j]] } else { rows <- 1L:(j-1L) z <- forwardsolve(L[rows, rows, drop=FALSE], A[idr[rows], idc[j]]) U[rows, j] <- z v[j:m] <- A[idr[j:m], idc[j]] - L[j:m, rows, drop=FALSE] %*% z if(verbose) cat("Step", j, "z=", sapply(z, function(x) sprintf("%.15f", x)), "\n v=", v, "\n") } if (j < m) { mu <- j mu <- if (pivot) which.max(abs(v[j:m])) + j - 1L else j if(verbose) ## debug possumDiv example cat(sprintf("R-Step: %i: ", j), round(abs(v[j:m]), 6), "\n", mu, v[mu], "\n") if (abs(v[mu]) >= tol) { ## singular: can stop here already p[j] <- mu if (pivot) { tmp <- v[j]; v[j] <- v[mu]; v[mu] <- tmp tmp <- idr[j]; idr[j] <- idr[mu]; idr[mu] <- tmp } L[(j+1L):m, j] <- v[(j+1L):m]/v[j] if (pivot && j > 1) { ## swap rows L[j,] <-> L[mu,] tmp <- L[j, rows]; L[j, rows] <- L[mu, rows]; L[mu, rows] <- tmp } } } U[j, j] <- v[j] if (abs(v[j]) < tol) { if(verbose) cat("* singularity detected in step ", j, "; candidate ", idc[j],"\n") idc <- idc[-j] } else sing <- FALSE }## {while} }## {for} list(L = L, U = U * cf0, p = p, idc = idc[1L:m], singular = sing) } Rsubsample <- function(x, y, mts=0, tolInverse = 1e-7) { if(!is.matrix(x)) x <- as.matrix(x) stopifnot((n <- length(y)) == nrow(x)) p <- ncol(x) storage.mode(x) <- "double" .C(robustbase:::R_subsample, x=x, y=as.double(y), n=as.integer(n), m=as.integer(p), beta=double(p), ind_space=integer(n), idc = integer(n), ## elements 1:p: chosen subsample idr = integer(n), lu = matrix(double(1), p,p), v=double(p), pivot = integer(p-1), Dr=double(n), Dc=double(p), rowequ=integer(1), colequ=integer(1), status=integer(1), sample = FALSE, ## set this to TRUE for sampling mts = as.integer(mts), ss = as.integer(mts == 0), tolinv = as.double(tolInverse), solve = TRUE) } ##' Simple version, just checking (non)singularity conformance tstSubsampleSing <- function(X, y) { lX <- X[sample(nrow(X)), ] ## C version zc <- Rsubsample(lX, y) ## R version zR <- LU.gaxpy(t(lX)) if (as.logical(zc$status)) { ## singularity in C detected if (!zR$singular) stop("singularity in C but not in R") } else { ## no singularity detected if (zR$singular) stop("singularity in R but not in C") } zR$singular } ##' Sophisticated version tstSubsample <- function(x, y=rnorm(n), compareMatrix = TRUE, lu.tol = 1e-7, lu.verbose=FALSE, tolInverse = lu.tol, eq.tol = .Machine$double.eps^0.5) { x0 <- x <- as.matrix(x) n <- nrow(x) p <- ncol(x) if(p <= 1) stop("wrong 'x': need at least two columns for these tests") stopifnot(length(y) == n) z <- Rsubsample(x, y, tolInverse=tolInverse) ## ---------- ## convert idc, idr and p to 1-based indexing: idr <- z$idr + 1L idc <- z$idc[1:p] + 1L pivot <- z$pivot + 1L ## get L and U L <- U <- LU <- matrix(z$lu, p, p) L[upper.tri(L, diag=TRUE)] <- 0 diag(L) <- 1 U[lower.tri(U, diag=FALSE)] <- 0 ## test solved parameter if (z$status == 0) { stopifnot(all.equal(z$beta, unname(solve(x[idc, ], y[idc])), tol=eq.tol)) } if (z$rowequ) x <- diag(z$Dr) %*% x if (z$colequ) x <- x %*% diag(z$Dc) if (z$rowequ || z$colequ) cat(sprintf("kappa before equilibration = %g, after = %g\n", kappa(x0), kappa(x))) LU. <- LU.gaxpy(t(x), tol=lu.tol, verbose=lu.verbose) ## -------- if (!isTRUE(all.equal(LU.$p, pivot, tolerance=0))) { cat("LU.gaxpy() and Rsubsample() have different pivots:\n") print(LU.$p) print(pivot) cat(" ... are different at indices:\n ") print(which(LU.$p != pivot)) } else { stopifnot(all.equal(LU.$L, L, tol=eq.tol), all.equal(LU.$U, U, tol=eq.tol), LU.$p == pivot, ## only compare the indices selected before stopping LU.$idc[seq_along(LU.$p)] == idc[seq_along(pivot)]) } ## compare with Matrix result if (compareMatrix && z$status == 0) { xsub <- x[idc, ] stopifnot(require("Matrix")) tmp <- lu(t(xsub)) ## idx <- upper.tri(xsub, diag=TRUE) stopifnot(all.equal(tmp@x, as.vector(z$lu), tol=eq.tol)) } invisible(z) } robustbase/inst/xtraR/lmrob-trace_lev.R0000644000176200001440000000452212154325275017711 0ustar liggesusers## testing trace_lev settings require(robustbase) ## fit a model with categorical, continuous and mixed variables selDays <- c( ## days ranked according to number of outliers: "403", "407", "693", "405", "396", "453", "461", ## "476", "678", "730", "380", "406", "421", "441" ## ,"442", "454", "462", "472", "480", "488" ## some other days ## "712", "503", "666", "616", "591", "552", "624", "522", "509", "388", "606", "580", "573", "602", "686", "476", "708", "600", "567") contr <- list(julday=contr.sum) ## using this seed and the default configuration options, ## the fast_S algorithm stops with some "local exact fits", ## i.e., coefficients with std. error 0. set.seed(711) lseed <- .Random.seed r1 <- lmrob(LNOx ~ (LNOxEm + sqrtWS)*julday, NOxEmissions, julday %in% selDays, contrasts=contr, seed=lseed, max.it=10000, nResample=5, trace.lev=1) ## change best.r.s to 11 and it works properly ## (for this seed at least) res <- update(r1, k.max=10000, best.r.s = 3, nResample=1000, trace.lev=2) ##### ## fast_S (non-large strategy) ## test non-convergence warnings / trace output: res <- update(r1, max.it = 1) res <- update(r1, k.max = 1) ## test trace_levs: res <- update(r1, trace.lev = 0) res <- update(r1, trace.lev = 1) res <- update(r1, trace.lev = 2) res <- update(r1, trace.lev = 3) res <- update(r1, trace.lev = 4) res <- update(r1, trace.lev = 5) ##### ## M-S estimator r2 <- update(r1, init="M-S", split.type="fi", subsampling="simple", mts=10000) ## test non-convergence warnings / trace output: res <- update(r2, max.it = 1) res <- update(r2, k.m_s = 1) ## does not converge anyway ## test trace_levs: res <- update(r2, trace.lev = 0) res <- update(r2, trace.lev = 1) res <- update(r2, trace.lev = 2) res <- update(r2, trace.lev = 3) res <- update(r2, trace.lev = 4) ## this produces _a_lot_ of output: ## res <- update(r2, trace.lev = 5) ##### ## fast_S (large-n strategy) ## need to use continuous only design r3 <- update(r1, LNOx ~ LNOxEm + sqrtWS, subset=NULL, contrasts=NULL) ## test non-convergence warnings / trace output: res <- update(r3, max.it = 1) res <- update(r3, k.max = 1) ## test trace_levs: res <- update(r3, trace.lev = 0) res <- update(r3, trace.lev = 1) res <- update(r3, trace.lev = 2) res <- update(r3, trace.lev = 3) res <- update(r3, trace.lev = 4) ## (there is no level 5) robustbase/inst/xtraR/mcnaive.R0000644000176200001440000000443413325654361016262 0ustar liggesusersmcNaive <- function (x, method = c("h.use", "simple"), low = FALSE, high = FALSE) { ## Purpose: naive implementation of mc() ## ---------------------------------------------- ## (low, high) - as in mad() - for choosing the (lo/hi)-median with even n ## ## Author: Martin Maechler, Date: 21 Jul 2007 n <- length(x) if(n <= 2) return(0) x <- sort(x) stopifnot(is.finite(m <- median(x)))# <==> no NAs in x[] x <- x - m n1 <- length(xL <- x[x <= 0]) # both contain all (if any) median values n2 <- length(xR <- x[x >= 0]) n.n <- as.double(n1)*n2 if(n.n > 1e8)# 1e8 < .Machine$integer.max stop("\"simple\" method not sensible here: would need too much memory: n.n=", n.n) Mmedian <- { if ((low || high) && n.n %% 2 == 0) { if (low && high) stop("'low' and 'high' cannot be both TRUE") N2 <- n.n %/% 2 + as.integer(high) function(x) sort(x, partial = N2)[N2] } else median } method <- match.arg(method) switch(method, "simple" = { r <- outer(xR, xL, "+") / outer(xR, xL, "-") r[is.na(r)] <- 0 # simple -- ## ok only when the median-observations are "in the middle", ## e.g. *not* ok for c(-5, -1, 0, 0, 0, 1) Mmedian(r) }, "h.use" = { # use h() = h_kern() .. treating obs. coinciding with median k <- sum(x == 0) ## the number of obs coinciding with median() irep <- rep.int(n1, n2) if(k > 0) { ## have some obs. == median ( == 0) h <- function(xl,xr, i,j) { ## must parallelize (!) eq <- xl == xr r <- xl xr <- xr[!eq] xl <- xl[!eq] r [eq] <- sign(i[eq]+j[eq]-1-k) r[!eq] <- (xr + xl)/(xr - xl) r } i <- integer(n1) j <- integer(n2) i[(n1-k+1):n1] <- j[1:k] <- 1:k i <- rep(i, times = n2) j <- rep(j, irep) } else { ## k == 0: h <- function(xl,xr, i,j) (xr + xl)/(xr - xl) i <- j <- NULL } ## build outer(xL, xR, FUN= h) manually, such that ## we can pass (i,j) properly : Mmedian(h(xl = rep(xL, times = n2), xr = rep(xR, irep), i, j)) }) } robustbase/inst/xtraR/ex-funs.R0000644000176200001440000000510314004270422016202 0ustar liggesusers## These two fail when length(x) == 0 {but are short and nice otherwise} himed <- function(x) { n2 <- 1 + length(x) %/% 2; sort(x, partial = n2)[n2] } lomed <- function(x) { n2 <- (1+ length(x))%/% 2; sort(x, partial = n2)[n2] } ## From package 'limma' : ~/R/BioCore/madman/Rpacks/limma/R/weightedmedian.R weighted.median <- function (x, w, na.rm = FALSE, low = FALSE, high = FALSE) { ## Weighted median ## Gordon Smyth ## 30 June 2005 ## improved by MMaechler: 'low' and 'high' as with 'mad()'; 21 Nov 2005 if (missing(w)) w <- rep.int(1, length(x)) else { if(length(w) != length(x)) stop("'x' and 'w' must have the same length") if(any(is.na(w))) stop("NA weights not allowed") ## Note that sometimes the estimate would be well-defined even ## with some NA weights! if(any(w < 0)) stop("Negative weights not allowed") if(is.integer(w)) w <- as.numeric(w) } if(any(nax <- is.na(x))) { if(na.rm) { w <- w[i <- !nax] x <- x[i] } else return(NA) } if(all(w == 0)) { warning("All weights are zero") return(NA) } ## otherwise, have sum(w) > 0 if(is.unsorted(x)) { o <- order(x) x <- x[o] w <- w[o] } p <- cumsum(w)/sum(w) k <- sum(p < 0.5) + 1:1 if(p[k] > 0.5 || low) x[k] else if(high) x[k+1] else (x[k] + x[k+1])/2 } ##' pure-R naive version of Qn() ==> slow and large memory for large n ##' >>>>>> keep in sync with ../../man/Qn.Rd 's \examples{} <<<<<< Qn0R <- function(x, k = choose(n %/% 2 + 1, 2)) { n <- length(x <- sort(x)) if(n == 0) return(NA) else if(n == 1) return(0.) stopifnot(is.numeric(k), k == as.integer(k), 1 <= k, k <= n*(n-1)/2) m <- outer(x,x,"-")# abs not needed as x[] is sorted sort(m[lower.tri(m)], partial = k)[k] } ##' pure-R all-k version of Qn() "all-k" is O(n^2) cannot use large n QnAll.k <- function(x) { n <- length(x <- sort(x)) if(n == 0) return(NA) else if(n == 1) return(0.) m <- outer(x,x, `-`) sort(m[lower.tri(m)]) } Sn0R <- function(x) { ## `R only' naive version of Sn() ==> slow and large memory for large n if((n <- length(x)) == 0) return(NA) else if(n == 1) return(0.) lomed(apply(abs(outer(x,x,"-")), 2, himed)) } ## Tol = 2e-7 : higher than usual is.all.equal <- function(x,y, tol = 2e-7, scale = 1) { ## scale = 1: ensures 'absolute error' in all cases ## scale = x: ensures `relative error' in all cases is.logical(r <- all.equal(x,y, tolerance = tol, scale = scale)) && r } robustbase/inst/include/0000755000176200001440000000000014124272431015023 5ustar liggesusersrobustbase/inst/include/robustbase.h0000644000176200001440000000541113177447140017356 0ustar liggesusers#ifndef __robustbase_h__ #define __robustbase_h__ #include // for SEXP #include #ifdef __cplusplus extern "C" { #endif SEXP C_psifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_) { static SEXP(*fun)(SEXP, SEXP, SEXP, SEXP) = NULL; if (fun == NULL) fun = (SEXP(*)(SEXP, SEXP, SEXP, SEXP)) R_GetCCallable("robustbase","R_psifun"); return fun(x_, c_, ipsi_, deriv_); } SEXP C_chifun(SEXP x_, SEXP c_, SEXP ipsi_, SEXP deriv_) { static SEXP(*fun)(SEXP, SEXP, SEXP, SEXP) = NULL; if (fun == NULL) fun = (SEXP(*)(SEXP, SEXP, SEXP, SEXP)) R_GetCCallable("robustbase","R_chifun"); return fun(x_, c_, ipsi_, deriv_); } SEXP C_wgtfun(SEXP x_, SEXP c_, SEXP ipsi_) { static SEXP(*fun)(SEXP, SEXP, SEXP) = NULL; if (fun == NULL) fun = (SEXP(*)(SEXP, SEXP, SEXP)) R_GetCCallable("robustbase","R_wgtfun"); return fun(x_, c_, ipsi_); } double C_rho(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase","rho"); return fun(x_, c_, ipsi_); } double C_psi(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase","psi"); return fun(x_, c_, ipsi_); } double C_psip(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase","psip"); return fun(x_, c_, ipsi_); } double C_psi2(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase","psi2"); return fun(x_, c_, ipsi_); } double C_wgt(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase","wgt"); return fun(x_, c_, ipsi_); } double C_rho_inf(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase", "rho_inf"); return fun(x_, c_, ipsi_); } double C_normcnst(double x_, const double* c_, int ipsi_) { static double(*fun)(double, const double[], int) = NULL; if (fun == NULL) fun = (double(*)(double, const double[], int)) R_GetCCallable("robustbase", "normcnst"); return fun(x_, c_, ipsi_); } #ifdef __cplusplus } #endif #endif robustbase/inst/CITATION0000644000176200001440000000340213713564014014540 0ustar liggesuserscitHeader("To cite robustbase in publications use:") if(!exists("meta") || is.null(meta)) meta <- packageDescription("robustbase") year <- sub("-.*", "", meta$Date) note <- sprintf("R package version %s", meta$Version) fullTitle <- paste(meta$Package, meta$Title, sep = ": ") authors <- eval(parse(text = meta$`Authors@R`)) aut.gf <- lapply(unclass(authors), `[`, c("given", "family")) authNms <- sapply(aut.gf, paste, collapse = " ") collNms <- function(nms) paste(paste(nms[-length(nms)], collapse = ", "), "and", nms[length(nms)]) citEntry(entry = "Manual", title = fullTitle, author = authors, year = year, note = note, url = "http://robustbase.r-forge.r-project.org/", url = "http://CRAN.R-project.org/package=robustbase", textVersion = paste(collNms(authNms), sprintf("(%s).", year), fullTitle, paste(note, ".", sep = ""), "URL http://CRAN.R-project.org/package=robustbase"), header = paste("To cite package", sQuote(meta$Package), "in publications use:") ) citEntry(entry = "Article", title = "An Object-Oriented Framework for Robust Multivariate Analysis", author = personList(as.person("Valentin Todorov"), as.person("Peter Filzmoser")), journal = "Journal of Statistical Software", year = "2009", volume = "32", number = "3", pages = "1--47", url = "https://www.jstatsoft.org/article/view/v032i03/", textVersion = paste("Valentin Todorov, Peter Filzmoser (2009).", "An Object-Oriented Framework for Robust Multivariate Analysis.", "Journal of Statistical Software, 32(3), 1-47.", "URL https://www.jstatsoft.org/article/view/v032i03/."), header = "To cite the multivariate class/methods framework use:" ) robustbase/inst/external/0000755000176200001440000000000014124272431015222 5ustar liggesusersrobustbase/inst/external/d1k27.rda0000644000176200001440000035650511762754374016600 0ustar liggesusersRDX2 X  d1k27@%~O"}@'R*o?R@(eO@&*8@&A(9-@(!6@$.2@&*A c @(}@&"S @) ^#@(3B¤@(¹M@'Ew@'hr!@&c^J8@'g@&8K@'/ @)| |[@$.2@'͛t@%Qm݃@'qpO4@'5inX@&g@*R`@&o@&OD)@(ŗN@&^lLYt@'2#@&gM3H@(JA @(8Go*Z@&>W@&ha@&_#cW@'-f@'M/^ @&e{z@'lnm@&NΚ,@(Mj@'PfB@'3cHk@&e+a@'|ϕ@)ܱF_@&6@'ZpU@&rT~@&}2f@((zc@'ƞFJ@( ~+U@(AH@)]Ww@%1y|ß@'th@&"}@'h73@&P@%HӮh^@(v'|E˼@'2<64@&Zqx@$b1Q@(āoh@'xuO7u@'W7@%?@&ha@(@(FFV@'^ @)yhH@&A(9-@' ǎk@'E1L@*_6@&"Y5@'G@&c^J8@'h73@(i_@'z$'@'E1L@'@@%wr(@'3]$@&r Ĝ@&*A c @)]x@#ef@(> @&^ N@&T ǎ@&8\g@&LB@'x*")@(v@&NΚ,@'H˒@'76g@'9K@&]ce@&Zqx@)&ݬ@'Jw@)o@'Gth@&90 )@'< oTK@'ѷX@&9 @$@'.y@'҈p@()QX(@'//t@&ЫNP@(ałU6@&D"@'7k;<@(\C@(L@#2i@&X@'X:S@'7@&`d@(tSM@'`N@&(@'c@'ٞ@(V@(*EM@(@4m9@& >l@'=@*~@&- @&!-w2@'FJ@&+@'_@@'n<`@'[\|ؘ@(F]c@'.y@'D Ri@&Zqx@&޳"@%$@% xF@(008@':@'䎊q@&~J@'T*0@(1;@&(@&ЫNP@&LB@& ǎk@'!bw@& 6P@&A(9-@(Iԕ*@(7@')A!la@( /@+.۵@&_ô@<6@&D@(O?R&W@&`d@)m ֶ@&fA@&A(9-@'FJ@&ۥS@'EwUq@(98R~@'X:S@'CfQm@'Цv @)F@(L/{J@( u"@'7k;<@) D @) @'xK@&x@'G@(X@'Ë@'Y/n@%fP@&ynX/E@&FIm@'P3@&*A c @'ܩiu@'ѷX@'eU.@%wN&@'5@&x@'+S@)AJM+,@(1 @)i>BZ@&9 @&`V. @%muO7v@'::э&@'N-b8<@'Q@(X@&*A c @'`V.@(\C@(v'|E˼@)b"@(e@'x*")@( @&LB@)1&y@& @'W7@'p*@(^ F@#ef@*ݕW@'W@'> @&X@&7@&Zqx@'28@(U;'@$.2@%HӮh^@(u"@'Bw\@(¹M@'/Y@&!-w2@'ޭՐ@'$s@'ߏG0@'@ =q@&NΚ,@&ha@&vݭ@'8Ck@)%@)͉/@&l@@'g @'ѷX@'@( /@'|ϕ@(NzVC@'28@'- @&P]@&^ N@&i}@&lsG@'EwUq@')A!la@(5H@%!Z@$$/@'2<64@' gw@&'!Wi@'c@(008@(K/V@'FJ@'[\|ؘ@&A(9-@%@'Bk΅4@&8K@&FIm@(R@({@)i>BZ@'D Ri@(s@&u^޾@&޳"@)6;%o@)Kq @'klC@(@*_@&6;%p@&^ N@&bM@&ha@'Ew@&WCcH@&Y@&o@&A(9-@&@gS@'28@&6;%p@'_@@'4@&P@&^lLYt@&ܜM@(AH@)fSP -@(#@'FJ@&?z:@%nY@'ϕ@' '76@&6;%p@&e+a@''(b@(]yg@(i;yS@&c^J8@'|ϕ@'.y@&>W@&Zqx@%Ԇ-@&OD)@'ϕ@#TEJ@';D=G@(ԊԿ@'B؃@(I^5?@*j?@&@'L@(v'|E˼@&^lLYt@&D@'W7@&(9-@#Ε*@&JL@%Ԇ-@'B؃@'@(TS@'҈p@%o@%+@'K@(lj@&z\|ؙ@)9XdE@'=@'7k;<@&E˼+@&5inX@'X:S@(}^_1@$xW@&lsG@)͉/@%0եb@'[ @(ӀP@'K@'āoh@' gw@)fWx@(I^5?@'g @(_ô@$@եb@&+@'x*")@$D<@'@ =q@&aQR@&?g@*|hs@'P@'om@(@%HӮh^@'27@&ЫNP@&ѢY@+],{ Z@&A c @(@&@&ha@'nCz@(ͳ|@'^ @&>B@&^ N@(եc@&>W@&@~@&@'FJ@%^"S@(ŗN@'.y@' ǎk@&FIm@%y k~@'N-b8<@'c @'EwUq@'-@&"S @%r@'L@'7@&i}@(O?R&W@'\(@')A!la@&@% $ @'^ @'N-b8<@&z\|ؙ@%$@&`d@&P]@&T ǎ@(@4m9@&]ce@'| (x@(Iԕ*@' @'r/Y@';D=G@(DZ1@'f3@&쪸@){J#9@&P]@'ߤ?@'>Y@(X@&sRC@'0h3@(-@&sRC@&D@'_@@%y k~@(6fffff@($')@(1;@'r ě@(}@(\C@&Zqx@(L/{J@)͉/@'4@'RwkQ@&@'X:S@'gwZ@'s-@$h<ϕ@&T ǎ@'Y/n@'Jw@(AH@&^lLYt@'>Y@(>p@%y k~@(L/{J@$.2@(tqq@(3B¤@'B؃@&P]@(SE @)e;K@'ɯ@'lnm@) 27@( /@'7k;<@%!Z@(1;@&ha@)#ᆘ5@'FJ@'c@)7ޓ@(NzVC@*/ M;@$.2@&wFz@'SE @#{`I@'FJ@'1@'7@'ce@%1y|ß@'ۋq @'W7@&rq6@%M@'ѷX@'>WS@&}w1@'wkP@(ioiD@'28@&@'GM3H,@'N-b8<@(UoiDg@&hb@%r@&c^J8@)3PH@&*A c @&lsG@&D@'PfW@'|ϕ@)'n@&Y@'q Eb@'n<`@&g@(eK@'aR~R@((zc@'@&ha@'7k;<@'S@&LB@&]ce@&u!S@&ha@'|ϕ@&P@&NΚ,@'#@'9K@&ZڹY@'ߏG0@'2<64@'FJ@'x*")@&ЫNP@'< oTK@'E1L@)H>BZc@(WE84@'Ë@&NΚ,@'>Y@'ZpU@&hW @'䎊q@'ߏG0@'< oTK@'p*@(ɯ@(Hh@)JE@''=["@& @&u@(%#M@&T ǎ@'҈p@%DSMj@&P]@#Ε*@'Bk΅4@'IA!la@%˒:)@*AJM+@(O?R&W@(rd2n@&A c @'ܩiu@%r@& `V. @&"&r@'ߏG0@&쪸@&*8@%HӮh^@).H@(+@&9 @'.y@)s")@(L/{J@(_*ZFs@'h73@'28@'@ =q@&- @&e{z@'c@(i_@&6;%p@'nCz@'5@&9 @&6@'z$'@(,<@&ɃQX(@&P]@&LB@(\C@&*A c @&X@'P3:~@'N-b8<@(UoiDg@'x*")@&ha@(i;yS@&!-w2@&j@(@4m9@( @& (@'B؃@''(b@&=`N@'RwkQ@&E˼+@)KCW@'hr!@''(b@(AH@&f3@( 6@%!Z@&gM3H@'Bk΅4@((zc@(AH@&E˼+@&P]@)F@'hH@(L/{J@&Zqx@'7@(O?R&W@)m8Y@( @(O?R&W@'5@&6;%p@'|ϕ@'h73@'tW}W@)*cP@#Rş@'āoh@#Rş@)Z'(b@'7@'wk@(1;@)fWx@&o]@'䎊q@&*A c @*EwU@(3B¤@(a@N@)*cP@*_Ë@'@ =q@(@'+S@' ǎk@'lK[@&ЫNP@(3H+@'c@&&`V.@(L/{J@'2<64@'- @&o@&Zqx@&)Dg8~@(v'|E˼@%w#d@)KCW@&>B@)*cP@'EwUq@(:!B@%I@(U=@'.y@(As@')A!la@&g@'jS|@%v5B@&j@)Ad2@'c@(a@O@*>LX@%!Z@&A(9-@(s@)>3@'-@'6/e@(K/V@&_I@(F]c@*UDR@&1߹8R@'hA_@'Ë@(v'|E˼@'[\|ؘ@'7k;<@$6}2f@'fWx@&*A c @&@%Б}kf@)l@(¹M@'҈p@&+@&*A c @&@'*o?R@&>"@'2<64@'N-b8<@'d@'d@%!Z@& k;=@'@ =q@(K/V@&P@'@ =q@&z\|ؙ@#L7@'&]cf@'d@'[\|ؘ@&u%F @'G@%0H@@(WE84@(ӀP@&6@'EwUq@' Fd@&N@'2#@%+@'ޭՐ@&|ßa@'%@%DSMj@&?g@&e{z@'2#@(L/{J@'[\|ؘ@):=B@%>< @&޳"@&Zqx@&9 @'-@&Y >@'76g@'C]@(o;dZ@'EwUq@&@N@'7k;<@'>-b9@("S@(c 4@%Pa@&ѢY@')A!la@'&]cf@#Ε*@'g @%1y|ß@%Jt}^_@*!$Jb#@&gM3H@(L/{J@'28@'Jw@'x*")@'Q@#;Jm3@'7k;<@)+jg@'L@'!5`@'E1L@'B؃@&h`@$h<ϕ@'IW@'2<64@&@(;@' qiC@%Y@'7k;<@'EwUq@&^ N@'ѷX@(^ F@'B؃@&YE@'x*")@'3cHk@&g@&H߹8@'2<64@%y k~@*ȊG@&x@&6@'7k;<@&$Jb$@'8Ck@(Ma@&ZڹY@'- @'_@@(L/{J@'_@@(#@'< oTK@%Ԇ-@("S@&sRC@'Q@' ǎk@'W7@*GK]d@(E@&P]@'z$'@&NΚ,@&6;%p@)͉/@' :@(&V@@)^)@'̸@'VWx- <(%VW2 JP1 2?/%Z)b 4 >'1eB 2$.")+T+$I+A(4/-T-(-+P=,**,O-)   K9=d>'+;#)9+A,#*'U@ %$(-O *&+#cA&$K5c51*!3&0+1$KxX '(=YY(G(7**`,DD)"-)& d # ><#K431;"<1E 5 2(#+?$?"e6) +:*3 0 Z(4,2$',<+(6+>5N%V"71 >+*1XJ#!H&2- 0c,2!,8+:<( 4!I @7 K30,&d?-316++$'6 ,#(Kb7(-" +*%*D_ @$ 4K-5A8.# 0,,% _($-Z<<,B'd0kd(+K!3Y0+9 +-,+6PK+*E0K4='K#-)-!-4'-)* U A*-7,79K2 -(b+!-.">P 9=+UY !K.?d# b!Y,!5Z(KT##"-d$P)dG)2'" "2+)3$)+ #,7 :$$> P .(<)20R3!L&dN J//#"&.3 I3J3d.2G-'d/<1J'K( '")H"8d(% #< =,'1+_/Ld,!'Y P0(= :#$?)!56L+2*":c2,>K d9S< 2*!=32L*5eK )+046122"P',-F2 .K"6/7221:@,$M$(2-G.-#-)(T.c2 +1Fdc2*) )5!A9.P%03d,1BL)-*$XF.+( 4&P"@53$#??@??@??@ @@@@@@?@?@@ ?@ ?@@?@ ??@?@?@@???@??@?@@?@@@@ ?@@@?@@??@@@?@ @@@?@?@???@@?@??@??@?@@??@@@??@@@@?@???@ ?@ @@@???@???@?@@@ ?@???@??@@@@ @@?@??@@???@??@ ?@??@@@@@ ?@@@??@ ???@ @@??@@@@?@?@ ?@?@@????@??@?@???????@@?@?@@@ ??@???@@@@?@?@@@?@@@@@@??@@@@?@@??@?@??@@??@??@@@?@@????@??@@@@@ ???@?@????@@@@@ @@@@@ @@??@?@???@?@@@?@?@@ @@ ????@@@??@@@@@@ @@ ?@@@?@ @@??@?@??@ @@@@@?@@???@@@??@@@ ?@@@??@?@?@??@???@@@@??@??@@@@@ ?@@@ ?@@???@??????@??@???@@ @ ???@?@??@@ @@@?@??@?@@@@@?@@???????@@ @ @@?@@?@@?@?@@??@?@@?@?@@?@??@?@?@@ ?@@?@@?@????@@@??@@????@?@???@@???@@?@@???@@?????@@@@@@@?@?@ ?@?@@?@?@??@@@?@@@@ @@?@@@@??@@@??@?@?@?@?@@??@ ?@?@?@ @?@ @@ ?@???@@@?@???@??@@??@?@??@@ ????@@@@??@@???@ ?@??@?@@@?@????@??@@?@@?@?@?@@@?@@?@ @@?@@@ ???@?@????@?@@@?@???@???@ @?@@@@@@?@??@@@@@@@@??@@??@?@@@@@?@@ ?@@@@@@?@@?@???@@??@@@@?@@ @@@?@@@@?@?@ ?@@@ @??@ @@@@ ?@@@@@@??@@@???@ ?@@@ @@?@?@?@@?@@@@@??@@@@@?@@?@@@@??@???@@ @??@@?@@@@???@????@@@???@@??@???@?@?@??@@@ ????@?@?@@??@@@???@@???@ ?@@@???@@@@?@?????@??@@?@@ @?@@??@?@ ????@@?@@@@@@@@?@@?@@?@@?@@@@ @@ 4 ``  "L 5 xY u  8lrL ^ D  _k: L| n<  a D Me %/?i @x)  Y $8Q#0 .2 RLT  8thL 4F" .5SL ~@N%O a rZWt  L /O lre . X ]j eL ` PFd lrRL  Z 6fb  HOp1=XX%j~ t`te  rl'tx{ K@ :  x 0Bl H Gr~8   hALf LLhoe Z Y bi@l z: : ` KJL  i 4  Fd.e J {T D4T(dLV ( ` @'y HC"xL,$L 6@R^jt  : `, T#c . ? Y T|] # dJ x f  /x(F CH 7 r OHy=hL9Y T 1 V M~C  t.\=@ = >r` F 0@ (lmoFy5 {    @ZYL LYWe@ ~ 9( c $ f,T4  T> `r  i dX "#4 c*: ` w! L ;g KD,r$ ] Z~ G  s:4-l\ L< lTxKDzRx +-  LIfAFD|au 8I s}9q  T| H 5 ^ 1?4 z 4Lt i$'r@FUjU xp nY! 8f~ p T? =p ? =p ?p =q?(\)?Q?QR?Q?Q?(\)?Gz?zG{?p =q?zG{?zG{?\(?QR?p =q?Gz?Gz?ffffff??У =p?\(?333333?ə?(\)?QR?Q?ə?\(?ə?ҏ\(??Q?Q?Gz?\(?(\)?\(?\(?333333? =p ?333333?У =p?ə?(\)@ =p ?У =p?\(?ҏ\(???\(\?ə?\(?GzH?Gz?(\)?zG{?333333?θQ? =p ?zG{?޸Q?Q? =p ?(\)?p =q?QR? =p ?\(?zG{?zG{?zG{?Gz?p =q?Q?ٙ?Gz? =p ?? =p ?zG{?p =q? =p?(\)?Gz?QR?\(?ə?Q?QR?Q?Gz?QR??Gz?Q?(\)?=p =?Q?\(? =p ?Q?ə?ҏ\(?p =q??\(?Q?333333?zG{?zG{?333333?(\)?Q?\(?333333?Gz?ffffff?ffffff??(\)?ə?ə?zG{?Q?zG? =p ?У =p??У =p?=p =?ٙ?p =q?(\)?׮zG?θQ?У =p? =p?Gz?Q?У =p?(\)?QR?p =q?333333?ٙ?\(?=p =?θQ?ə?zG{?Q?zG{?Gz?GzH??\(?ə?\(?У =p?QR??\(?(\)?(\)?(\)?Gz?Q?Q?Gz?ffffff?333333??Q?QR? =p ?QR??Q?\(?\(?θQ?ҏ\(?QR?\(?Q?zG{?Gz?333333?(\)?ə?Q? =p?zG{?p =q???Q?333333?QR?Q??p =q?333333?333333?Gz?Q?ٙ?zG{?ҏ\(? =p ? =p ?Q?Q?333333?θQ?(\)?zG{?Q? =p ?333333???QR?(\)?Q?zG{?(\)?Q?Gz?Gz??Q?(\)?GzH?Q??ə? =p?333333?zG{?(\)? =p ?Gz?θQ?zG?Q?Gz?333333?θQ?\(?\(?? =p ?=p =?\(?(\)@Q?zG{?Q?zG{?Gz?\(?θQ? =p?QR? =p ?Gz?333333?Gz? =p ?333333?zG{?Q?Q?Gz?(\)? =p?Gz?ə?ə?zG{?333333?Q?\(?(\)?333333?QR?(\)? =p?θQ?׮zG?(\)?GzH??Q? =p?(\)?zG{? =p ?333333?ə?(\)?Q?p =q? =p??ə?GzH? =p?QR?ᙙ?ҏ\(???Gz?p =q?QR?\(?p =q? =p ?p =q? =p ? =p?p =q?\(?GzH?(\)?=p =?QR?zG{?Gz?θQ?QR?Gz? =p@zG???У =p?(\)?Q?Gz?Gz?Q? =p?QR?θQ?zG{?\(?Gz?zG{?(\)?p =q?У =p?Q?zG{?(\)?\(?Q? =p ?Gz?θQ?ə?\(?p =q?Q?333333?(\)?GzH? =p?ə?p =q?(\)?޸Q?p =q?Gz?zG{?? =p ?333333?Q?p =q?GzH?Gz?zG{?Gz?ҏ\(?? =p ? =p ?\(?Gz?zG{? =p ?zG{?GzH?=p =?(\)?θQ?333333?Q??zG?GzH?Q?333333?333333?zG{?Q?ə? =p?(\)? =p?QR?333333?ᙙ?У =p? =p?Gz?Q?ۅQ?\(?\(?Gz?Gz? =p ?Gz?Q?\(? =p ?У =p?ҏ\(?=p =?\(?(\)?zG{?(\)?Q??ҏ\(??Q? =p ?p =q?ə?Q?QR? =p ?Q?? =p ?Gz?Q?QR??(\)? =p ?Q?zG{?\(?У =p?Q?(\)?Gz?޸Q?θQ? =p ? =p ?\(?ə?\(?Q?(\)??ə?Gz?Q?Gz? =p ?(\)??zG{?=p =?333333?Gz? =p?Q?333333?Q? =p ??zG{?\(?GzH? =p? =p ?zG{?(\)?Q?GzH?p =q?=p =?Q??ə?(\)?Gz?zG?Q?Q?Gz?Q?zG{?p =q?Gz?zG{? =p ?Q??ffffff??Q?(\)?У =p? =p@QR?\(?zG{?GzH?Q?θQ?(\)?GzH?Q?p =q??ə??\(? =p?(\)?Gz?zG{?Gz?θQ?QR? =p ?Q? =p ?p =q?333333?Gz?zG{?QR?Q?GzH?(\)?Q?Gz? =p ?p =q?ə?QR?zG{? =p ?ə?(\)?zG{?p =q@\(\?Gz?zG{?ə?Q?QR??zG{?(\)?Gz?׮zG?zG{??\(?333333? =p ?zG{?333333?zG{?zG{?QR?޸Q?Q?ə?(\)??QR?ٙ?333333? =p?ҏ\(? =p ? =p ??ҏ\(?Gz?\(?(\)?(\)??=p =?\(?(\)?У =p?\(?Q?333333?333333? =p ?Q? =p ?ҏ\(?ə? =p?QR?Gz?(\)? =p ?(\)?p =q?zG{?θQ?Gz?ə???Q?zG{?Q?p =q? =p ?QR?\(?ə? =p?p =q?Gz?zG{? =p ? =p ?=p =?(\)?333333?p =q??333333?333333?p =q??p =q?Gz?p =q?Q?p =q? =p ?Q?ҏ\(? =p ?ə?Q? =p?Gz?\(?Gz?ə?\(?Q?(\)?zG?\(?Gz?\(? =p?Q?Gz?θQ?\(?p =q?(\)?zG{?Q?ffffff?Gz?Gz?p =q?Gz?zG{?QR?p =q??p =q?Q?θQ?Q?\(?Q?\(?Gz?Gz?陙?Gz?θQ??(\)?Q?Q?Q?\(?p =q?zG{???ٙ? =p?\(?GzH?\(?zG{?333333??zG{? =p?(\)?GzH?QR?\(?Gz?θQ??Gz?\(?GzH?ҏ\(?zG{?333333?? =p?\(?(\)? =p ?QR?Gz?Gz?QR??Q?GzH?Q?zG{? =p ?Q?У =p? =p ?\(?Q?ə?Q?θQ?\(?Gz? =p?ۅQ?θQ?Gz?\(?\(?У =p? =p ?θQ?Q?\(??ٙ?Q?У =p?ffffff?У =p??zG{?(\)?zG{?Q?Q?Q?Q?ҏ\(?θQ?zG{?? =p?zG{??У =p? =p?У =p?333333?ٙ?zG?333333?QR??p =q?QR?У =p?У =p?zG{?Q? =p ??Gz?ҏ\(? =p ??zG{??ffffff?p =q?=p =?GzH?zG{? =p?333333?(\)??Q? =p?(\)? =p ?QR?\(?(\)? =p ?\(?Q?θQ?p =q?У =p? =p?zG{?p =q?\(?zG{??QR?Q?\(?޸Q?У =p?QR? =p ?(\)? =p ? =p ?zG{?Q?陙?Q?333333?\(?GzH?ə?У =p?\(? =p ?333333?ə?333333?(\)?(\)?Q?zG{?GzH?333333?Q?333333?\(? =p ?(\)?p =q? =p ? =p?GzH?(\)?(\)?Gz? =p?p =q?(\)?Gz?333333?\(? =p ?\(? =p?ffffff?p =q?θQ?zG{? =p ?Q? =p?(\)?ə?Gz?\(?=p =?Gz?θQ? =p ?zG{?(\)?zG{?ۅQ?p =q?ə?Gz?333333?Q?Q?zG{?zG{??GzH?У =p? =p?zG{?Q?Gz?Q?333333?޸Q?\(?QR?Q? =p???ə??Q??p =q?(\)?ə?? =p ?QR??Q?\(?ə?(\)?QR??QR?zG{?ٙ?zG{?Q?QR?p =q?Q?\(?? =p?Gz?QR?(\)?333333? =p ?Q?QR??ۅQ?Gz?QR?=p =?Gz?Q? =p ?zG{? =p?333333?p =q?Gz?Gz?Gz?Q  levels 0 1 class factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor  0 1 factor names y a tb tf A r2 r3 r4 r5 r6 r7 r8 r9 t01 t02 t03 t04 t05 t06 t07 t08 t09 t10 t11 t12 t13 t14 row.names  data.frame robustbase/inst/NEWS.Rd0000644000176200001440000003742614124272367014467 0ustar liggesusers% Check from R: % news(db = tools:::.build_news_db_from_package_NEWS_Rd("~/R/Pkgs/robustbase/inst/NEWS.Rd")) \name{NEWS} \title{News for \R Package \pkg{robustbase}} \encoding{UTF-8} % --- 1.0-0 if we fix adjOutlyingness() properly ! % ===== *and* fix mc() nicely according to Lukas Graz' MSc thesis !! \section{CHANGES in robustbase VERSION 0.93-9 (2021-09-27, svn r888)}{ \subsection{BUG FIXES}{ \itemize{ \item \code{Qn(x)} and \code{Sn(x)} gain an \code{na.rm = FALSE} option and now work when \code{x} has \code{NA}s and contains \eqn{\pm}{+/-} \code{Inf}. } } \subsection{Misc}{ \itemize{ \item No longer include \file{Rdefines.h} as it is somewhat deprecated. \item In \file{src/lmrob.c} add \code{USE_FC_LEN_T} and use \code{FCONE} rather than the older FCLEN approach. } } } \section{CHANGES in robustbase VERSION 0.93-8 (2021-06-01, svn r879)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{scaleTau2()} gets new optional \code{iter = 1} and \code{tol.iter} arguments; mostly experimentally to see if or when iteration makes sense. \item \code{Qn(x, *)} gets new optional \code{k = .} to indicate the \dQuote{quantile} i.e., order statistic to be computed (with default as previously hard-coded).\cr Experimentally to try for cases where more than n/2 observations coincide (with the median), i.e., \code{x[i] == x0 == median(x[])}, and hence \code{Qn(x)} and \code{mad(x)} are zero. \item \code{adjOutlyingness()} gets new option \code{IQRtype = 7}. } } \subsection{Tweaks}{ \itemize{ \item For tests: *again* differences found in the non-sensical \code{adjOutlyingness()} example (with large p/n, hence many "random" values in the order of 1e15). Disable the test for now (and record the result in *.Rout). } } \subsection{BUG FIXES}{ \itemize{ \item The \code{test()} utility in \file{tests/lmrob-ex12.R} no longer calls \code{matrix(x, n,4)} where the length of x does not match \code{4n}. Similar change in \file{tests/mc-strict.R} } } } \section{CHANGES in robustbase VERSION 0.93-7 (2021-01-03, svn r865)}{ \subsection{NEW FEATURES}{ \itemize{ \item Use \command{\\CRANpkg\{.\}} in most places, providing web links to the respective CRAN package page. \item \code{adjOutlyingness()} now gains optional parameters to be passed to \code{mc()}. } } \subsection{BUG FIXES}{ \itemize{ \item update the internal man page, so new \code{checkRdContents()} is happy. \item fix several \samp{\\url{.}}'s that now are diagnosed as \sQuote{moved}. \item \code{adjOutlyingness()} finally works with \code{p.samp > p}. \item \code{scaleTau2()} now works with \code{Inf} and very large values, and obeys new \code{na.rm = FALSE} argument. \item add \code{check.environment=FALSE} to some of the \code{all.equal()} calls (for 'R-devel', i.e., future R 4.1.x). \item \code{wgt.himedian(numeric())} now returns \code{NA} instead of occasionally seg.faulting or inf.looping. Ditto for a case when called from \code{Qn()}. } } } \section{CHANGES in robustbase VERSION 0.93-6 (2020-03-20, svn r854)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{splitFrame()} now treats \code{character} columns also as categorical (the same as \code{factor}s). } } \subsection{Tweaks}{ \itemize{ \item Small updates, also in checks for newer compiler settings, e.g., \code{FCLEN} macro; also F77_*() etc, in order to fix 'LTO' issues. \item More careful or \emph{less} calling \code{intpr()}: correct "Rank" of array (for gfortran/gcc 10, when \code{-fallow-argument-mismatch} is not set). } } } \section{CHANGES in robustbase VERSION 0.93-5 (2019-05-09, svn r840)}{ \subsection{Tweaks}{ \itemize{ \item For tests: try harder in \code{moreSessionInfo()} to detect openBLAS (should work for KH's debian openBLAS setup). } } } \section{CHANGES in robustbase VERSION 0.93-4 (2019-03-18, svn r839)}{ \subsection{BUG FIXES}{ \itemize{ \item \env{R_CHECK_LENGTH_1*} fix in default value for \code{ask} in plot methods for \code{mcd} and \code{lts} \item \file{src/rowMedians.c} gets better PROTECT()ion thanks to Tomas Kalibera's "rchk"s. } } \subsection{Tweaks}{ \itemize{ \item losen the strict tests more, convergence issues on all non-standard BLAS platforms. Now modularized the partly platform dependent testing into \code{moreSessionInfo()} in \file{./xtraR/platform-sessionInfo.R}. Further, for now use \code{RNGversion("3.5.0")} e.g. in \file{../tests/mc-strict.R}. } } } \section{CHANGES in robustbase VERSION 0.93-3 (2018-09-06, svn r827)}{ \subsection{Tweaks}{ \itemize{ \item losen the strict tests, notably in \file{tests/nlregrob-tst.R} to work around \code{nls()} convergence issues on all non-standard BLAS platforms. } } } \section{CHANGES in robustbase VERSION 0.93-2 (2018-07-26, svn r825)}{ \subsection{BUG FIXES}{ \itemize{ \item Update \code{lmrob()} to the change in R (incl 3.5.1 patched). \item \code{mc()} gets new optional \code{doScale} argument, and \emph{increased} defaults for the tolerances \preformatted{ eps1 = 1e-14, eps2 = 1e-15} such that it should converge by default in more cases. \item A \code{na.action} is now kept in \code{summary(lmrob(*))}, and when \code{print()}ing it, a note about omitted observations, e.g., because of \code{NA}'s, is made as for \code{lm()}. \item Internal \code{lmrob.weights()}: more "resistant" in case scale=0, using na.rm=TRUE (report only, no reprex). \item \code{lmrob(*, trace.lev >= 2)} now shows some information about the number of \code{find_scale()} iterations used (as these are now stored C internally).% TODO: return the maximum used to R? } } } \section{CHANGES in robustbase VERSION 0.93-1-1 (2018-07-18, svn r818)}{ \subsection{BUG FIXES}{ \itemize{ \item \file{src/robustbase.h}: \code{is_redescender} now \code{\bold{static} inline}, needed for some compilers, e.g., on ubuntu 18.04. Fixing R-forge bug(s) 6588 (and 6590, 6593), \url{https://r-forge.r-project.org/tracker/index.php?func=detail&aid=6588&group_id=59&atid=302} } } } \section{CHANGES in robustbase VERSION 0.93-1 (2018-06-20, svn r815)}{ \subsection{NEW FEATURES}{ \itemize{ \item The \sQuote{Usage:}s in the data set help pages now say \code{data(<..>, package="robustbase")}. \item The \file{lmrob_simulation} vignette now should continue to work with upcoming package \CRANpkg{ggplot2}. } } \subsection{BUG FIXES}{ \itemize{ \item \code{Mpsi(x, c, psi="huber", deriv=-1)} now gives rho(x) instead of mostly \code{Inf}. } } } \section{CHANGES in robustbase VERSION 0.93-0 (2018-04-21, svn r805)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{.psi.const(*, "lqq")} now also gives a \code{"constants"} attribute. \item more examples and help on Mpsi() etc functions and tuning constants. \item The S estimator \code{lmrob.S()} and M-S estimator now both make use of the new \code{lmrob.control()} argument \code{scale.tol} which defaults to \code{1e-10}, its formerly hardwired value. \item \code{lmrob.S()} further gets a new option \code{only.scale = FALSE}, which when true allows to only compute the S scale estimate. In that case, but also generally, \code{trace.lev = 3} or larger also produces output showing the C level \code{find_scale()} iterations. \item (By Manuel Koller) There's now a small C API to call our \code{Mpsi()} etc from C code in other packages, as \code{C_psi()}, etc; using new \file{../inst/include/robustbase.h}. \item \code{nlrob()$call$algorithm} now always contains the algorithm used as a \code{\link{character}} string, compatibly with \code{\link{nls}()}. \item new data set \code{steamUse}. %% which also depicts the \dQuote{non-robust} behavior of \code{lmrob(setting="KS2014")} \item Vignette \file{lmrob_simulation.Rnw}: fixed the wrong \dQuote{emprical power} plots; with faster \CRANpkg{ggplot2}, remove all \code{eval=FALSE} for plots and longer store the \file{*.pdf}s. \code{nlrob()} gets \code{model} option to ask for the \code{model.frame} to be returned. } } \subsection{BUG FIXES}{ \itemize{ \item \code{lmrob(..., method = "S")} no longer necessarily produces a warning in \code{.vcov.w()}. \item \code{nlrob()} returns a correct \code{dataClasses} component. } } \subsection{Tests Tweaks}{ \itemize{ \item For use in non-R-internal BLAS/Lapack libraries, several \file{tests/*.R} examples have been tweaked. } } } \section{CHANGES in robustbase VERSION 0.92-8 (2017-10-30, svn r778)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{fullRank()} utility for \code{adjOutlyingness}: \item \code{adjOutlyingness()}, gets new options \code{p.samp} and \code{trace.lev}, and when it fails to find enough good directions, now checks the rank and mentions \code{fullRank()} in case the matrix is (QR-)rank deficient. \item The \code{"lmrob"} method for \code{vcov()} gets optional argument \code{complete = TRUE}, where only \emph{complete = FALSE} is back compatible. \item improved (error) messages in singular case in \code{.vcov.avar1()}. \item \code{.psi.const()} is exported as well, and help is improved about using and setting non-default psi tuning constants. } } \subsection{BUG FIXES}{ \itemize{ \item loosened some regression test tolerances (for alternatives to BLAS) in \file{tests/(mc-strict|poisson-ex)}. \item \code{scaleTau2(x, *)} gains an optional \code{sigma0} argument and now returns zero instead of \code{NaN} when \code{sigma0 == 0}. } } } \section{CHANGES in robustbase VERSION 0.92-7 (2016-11-23, svn r742)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{vignette(psi_functions)} \item \code{adjOutlyingness(..., maxit.mult = max(100, p))} new option, allowing more iterations for finding \dQuote{good} projection directions. } } \subsection{BUG FIXES}{ \itemize{ \item \code{summary(nlrob(*, method = "CM"))} now works. \item \code{lmrob..MM..fit()} now works again when \code{x} and \code{y} are specified, but neither \code{method} nor \code{obj} is. } } } \section{CHANGES in robustbase VERSION 0.92-6 (2016-05-28, svn r717)}{ \subsection{NEW FEATURES}{ \itemize{ \item Now provide an \code{"lmrob"} method for the standard \R generic function \code{hatvalues()}, and also export its lower level workhorse \code{.lmrob.hat()} (formerly hidden \code{lmrob.leverages()}), which now by default has \code{names(.)}. } } \subsection{BUG FIXES}{ \itemize{ \item \code{.lmrob.hat()} (formerly \code{lmrob.leverages()}) has been corrected for the rank-deficient case. \item \code{classPC(m)} now also works for a 1-column matrix. } } } \section{CHANGES in robustbase VERSION 0.92-5 (2015-07-21, svn r698)}{ \subsection{NEW FEATURES}{ \itemize{ \item \emph{Hidden} \code{print()} methods \code{print.summary.lmrob()} and \code{print.lmrob.S()} get a \code{showAlgo = TRUE} argument which the user can set to FALSE in order to suppress printing of the \dQuote{Algorithmic parameters}. \item import (remaining parts) from "base" packages. \item \code{summary()} now also prints a summary on the residuals. } } \subsection{BUG FIXES}{ \itemize{ \item \code{summary(lmrob(.))}'s variance-covariance matrix is now called \code{cov} instead of \code{cov.unscaled} (because it \emph{is} scaled). Code which has been using \code{vcov()} or \code{ $ cov}, or even \code{ $ cov} is not affected. } } } \section{CHANGES in robustbase VERSION 0.92-4 (2015-06-04, svn r696)}{ \subsection{NEW FEATURES}{ \itemize{ \item Started this \file{NEWS.Rd} file, to eventually replace the \file{ChangeLog} \item \code{plot.lmrob()} also identifies largest residuals as \code{plot.lm()}. Also gets new argument \code{panel}, and \code{add.smooth=TRUE} behavior. \item adapt to the fact that R 3.3.0 will have its own \code{sigma()} S3 generic. \item setup for having message translations (volunteers sought!). } } \subsection{BUG FIXES}{ \itemize{ \item more careful in \file{../src/mc.c} (valgrind, thanks to Brian) \item add missing documentation, better examples for \code{predict.lmrob} \item \code{warn.limit.*} checks in \code{lmrob*()} } } } \section{CHANGES in robustbase VERSION 0.92-3 (2015-01-14, svn r679)}{ \subsection{NEW FEATURES}{ \itemize{ \item The \sQuote{Co-Median} \code{covComed()} from Maria Anna, tweaked by Valentin and modified considerably by Martin. \item Also document (and export) \code{r6pack()} utility. \item New \code{smoothWgt()} function --- \dQuote{Biweight on a Stick} --- to be used as \code{wgtFUN} option for \code{covMcd()} or \code{covComed()}. \item New utility \code{colMedians()} and \code{rowMedians}, as we use columnwise medians in so many places. } } \subsection{BUG FIXES}{ \itemize{ \item Tweaks to \code{medcouple()}, after detecting flaws -- which may be inherent and have \emph{not} been removed. \item Improved its documentation and the \code{adjOutlyingness()} one, notably its \dQuote{central} case. } } } \section{CHANGES in robustbase VERSION 0.92-2 (2014-11-22, svn r660)}{ \subsection{BUG FIXES}{ \itemize{ \item \code{covMcd()} with new options (\code{kmini}, \code{nmini}) now ok (sometimes wrong in 0.92-1). } } } \section{CHANGES in robustbase VERSION 0.92-1 (2014-11-18)}{ \subsection{NEW FEATURES}{ \itemize{ \item The deterministic MCD, via \code{covMcd(..., nsamp="deterministic")}. } } \subsection{BUG FIXES}{ \itemize{ \item \code{adjOutlyingness()}: reverse the defaults of \code{clower} and \code{cupper} and fix an \dQuote{eternal} erronous \eqn{\pm}{+/-} swap; see new note in \file{man/adjOutlyingness.Rd}. } } } \section{CHANGES in robustbase VERSION 0.92-0 (2014-11-18)}{ \subsection{NEW FEATURES}{ \itemize{ \item \code{nlrob()} now works with indexed vector parameters. \item new \code{outlierStats()} (Manuel). } } \subsection{BUG FIXES}{ \itemize{ \item got rid of Fortran compiler warnings about \bold{ancient} style. \item \code{nlrob(*, weigths)}, fixing R-forge bug #5988. \item \code{covMcd()} fix for \dQuote{MAD = 0} case (new \code{exactfit} code 3). } } } \section{CHANGES in robustbase VERSION 0.91-1 (2014-05-01)}{ \subsection{BUG FIXES}{ \itemize{ \item . } } } \section{CHANGES in robustbase VERSION 0.91-0 (2014-04-24)}{ \subsection{NEW FEATURES}{ \itemize{ \item . } } \subsection{BUG FIXES}{ \itemize{ \item . } } } \section{CHANGES in robustbase VERSION 0.90-1 (2014-01-30)}{ \subsection{NEW FEATURES}{ \itemize{ \item . } } \subsection{BUG FIXES}{ \itemize{ \item . } } } \section{CHANGES in robustbase VERSION 0.9-8 (2013-06-14)}{ \subsection{NEW FEATURES}{ \itemize{ \item . } } \subsection{BUG FIXES}{ \itemize{ \item . } } } robustbase/inst/po/0000755000176200001440000000000014124272431014016 5ustar liggesusersrobustbase/inst/po/de/0000755000176200001440000000000014124272431014406 5ustar liggesusersrobustbase/inst/po/de/LC_MESSAGES/0000755000176200001440000000000014124272431016173 5ustar liggesusersrobustbase/inst/po/de/LC_MESSAGES/robustbase.mo0000644000176200001440000000513114107540744020707 0ustar liggesusersl6G`(v4- 1?7q-M0%$V{  %##GWcO <$=a!3#;3U-^5 (L #u $ $ # ) '1     Problem determining optimal block size, using minimum Skipping design matrix equilibration (DGEEQU): row %i is exactly zero.'deriv'=%d is invalidArgument '%s' must be numeric or integerArgument '%s' must be numeric or integer of length 1Argument 'cc' must be numericArgument 'naRm' must be either TRUE or FALSE.Argument 'x' must be a matrix.Argument 'x' must be numeric (integer or double).DGEEQU: column %i of the design matrix is exactly zero.Error from Rdqags(psi_ggw*, k, ...): ier = %iFast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'.m_s_subsample() stopped prematurely (scale < 0).normcnst(): ipsi=%d not implemented.psi(): ipsi=%d not implemented.psi2(): ipsi=%d not implemented.psip(): ipsi=%d not implemented.rho(): ipsi=%d not implemented.rho_ggw(): case (%i) not implemented.rho_inf(): ipsi=%d not implemented.Project-Id-Version: robustbase 0.93-9 Report-Msgid-Bugs-To: PO-Revision-Date: 2021-08-19 22:26+0200 Last-Translator: FULL NAME Language-Team: LANGUAGE Language: de MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit Es gab ein Problem beim Bestimmen der optimalen Blockgrösse, wir verwenden das MinimumÄquilibrierung der Designmatrix weggelassen (DGEEQU): Zeile %i ist exakt Null.'deriv'=%d ist ungültigArgument '%s' muss numerisch ('numeric' oder 'integer') seinArgument '%s' muss numerisch oder Ganzzahle der Länge 1 seinArgument 'cc' muss numerisch seinArgument 'naRm' muss entweder TRUE oder FALSE sein.Argument 'x' muss eine Matrix sein.Argument 'x' muss numerisch ('integer' oder 'double') sein.DGEEQU: Spalte %i der Design Matrix ist exact Null.Fehler von Rdqags(psi_ggw*, k, ...): ier = %iDie 'Fast S large n' Strategie hat versagt. Verwende Kontrollparameter 'fast.s.large.n = Inf'.m_s_subsample() hat vorzeitig angehalten (scale < 0).normcnst(): ipsi=%d nicht implementiert.psi(): ipsi=%d nicht implementiert.psi2(): ipsi=%d nicht implementiert.psip(): ipsi=%d nicht implementiert.rho(): ipsi=%d nicht implementiert.rho_ggw(): Fall (%i) nicht implementiert.rho_inf(): ipsi=%d nicht implementiert.robustbase/inst/po/de/LC_MESSAGES/R-robustbase.mo0000644000176200001440000000630114107540744021106 0ustar liggesusers%p"q9 %;XDm%<6 LY/u/3086i+<8#4\&">$c(y"G*! @L D ! = A?  ? 5 F4 ,{ 5 R A1 2s      'X' must have at least two columns'coef' must not be negative'formula' missing or incorrect'start' must be an initial estimate of beta, of length %d0s in V(mu)All observations have missing values!All weights must be positiveConvergence AchievedCurrently, only family 'poisson' is supported for the "MT" estimatorImplosion: sigma1=%g became too smallMore dimensions than observations, currently not implementedMore dimensions than observations: not yet implementedNAs in V(mu)No convergence in %d steps.Not the same method used for fitting the modelsNot the same response used in the fitted modelsUse fullRank(x) insteadinvalid first argumentmaximal number of *inner* step halvings must be > 0maximum number of "kstep" iterations must be > 0models were not all fitted to the same size of datasetno intercept in the modelnon-trivial 'offset' is not yet implementednon-trivial prior 'weights' are not yet implemented for "BY"some terms will have NAs due to the limits of the methodvalue of the tuning constant c ('const') must be > 0y is not onedimensionalProject-Id-Version: robustbase 0.93-9 PO-Revision-Date: 2021-08-19 22:39+0200 Last-Translator: Martin Maechler Language-Team: LANGUAGE Language: MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: 8bit 'X' muss mindestens zwei Spalten haben'coef' darf nicht negativ sein'formula' fehlt oder ist ungültig'start' muss eine Startschätzung sein von beta, der Länge %dNullen ('0') in V(mu)Alle Beobachtungen haben fehlende Werte!Alle Gewichte müssen positiv seinKonvergenz erreichtMomentan ist nur die 'poisson' Familie unterstützt beim "MT" SchätzerImplosion: sigma1=%g ist zu klein gewordenMehr Dimensionen als Beobachtungen; momentan nicht implementiertMehr Dimensionen als Beobachtungen; das ist noch nicht implementiertNAs in V(mu)Keine Konvergenz in %d Schritten.Nicht die gleiche Methode verwendet um die Modelle anzupassenNicht dieselbe Zielvariable verwendet in den angepassten ModellenVerwende stattdessen fullRank(x)ungültiges erstes ArgumentMaximale Anzahl von *inneren* Schritthalbierungen muss > 0 seinMaximale Anzahl von "kstep" Iterationen muss > 0 seinModelle wurden nicht alle auf die gleich grossen Datensätze angepasstKein Achsenabschnitt ('intercept') im Modellnicht-trivialer 'offset' ist noch nicht implementiertnicht-triviale apriori Gewichte ('weights') sind noch nicht implementiert for "BY"Einige Terme werden NAs enthalten wegen Beschränkung der MethodeDie Tuning Konstante c ('const') muss positiv seiny ist nicht eindimensionalrobustbase/inst/po/en@quot/0000755000176200001440000000000014124272431015431 5ustar liggesusersrobustbase/inst/po/en@quot/LC_MESSAGES/0000755000176200001440000000000014124272431017216 5ustar liggesusersrobustbase/inst/po/en@quot/LC_MESSAGES/robustbase.mo0000644000176200001440000001025214107540744021732 0ustar liggesusers +:6G;(4-5c1'7({=-MG5j}U5>>t0$  ) Jk%#34 :> 6y G  , 8? !x # 1 " 5 'I 7q (  -R Q G jY5>0T$   %,#R3v      M-S estimate: maximum number of refinement steps reached. Problem determining optimal block size, using minimum Skipping design matrix equilibration (DGEEQU): row %i is exactly zero.'deriv'=%d is invalidArgument '%s' must be numeric or integerArgument '%s' must be numeric or integer of length 1Argument 'cc' must be numericArgument 'ipsi' must be integerArgument 'naRm' must be either TRUE or FALSE.Argument 'x' must be a matrix.Argument 'x' must be numeric (integer or double).DGEEQ: illegal argument in %i. argumentDGEEQU: column %i of the design matrix is exactly zero.DGELS: illegal argument in %i. argument.DGELS: weighted design matrix not of full rank (column %d). Use control parameter 'trace.lev = 4' to get diagnostic output.Error from Rdqags(psi_ggw*, k, ...): ier = %iFast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'.S refinements did not converge (to refine.tol=%g) in %d (= k.max) stepsToo many singular resamples. Aborting subsample(). See parameter 'subsampling; in help of lmrob.config().find_scale() did not converge in '%s' (= %d) iterations with tol=%g, last rel.diff=%gfind_scale(*, initial_scale = %g) -> final scale = 0m_s_descent(): Problem in RLLARSBI (RILARS). KODE=%d. Exiting.m_s_subsample() stopped prematurely (scale < 0).normcnst(): ipsi=%d not implemented.psi(): ipsi=%d not implemented.psi2(): ipsi=%d not implemented.psip(): ipsi=%d not implemented.rho(): ipsi=%d not implemented.rho_ggw(): case (%i) not implemented.rho_inf(): ipsi=%d not implemented.subsample(): could not find non-singular subsample.Project-Id-Version: robustbase 0.93-9 Report-Msgid-Bugs-To: PO-Revision-Date: 2021-08-19 20:45+0200 Last-Translator: Automatically generated Language-Team: none Language: en MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Plural-Forms: nplurals=2; plural=(n != 1); M-S estimate: maximum number of refinement steps reached. Problem determining optimal block size, using minimum Skipping design matrix equilibration (DGEEQU): row %i is exactly zero.‘deriv’=%d is invalidArgument ‘%s’ must be numeric or integerArgument ‘%s’ must be numeric or integer of length 1Argument ‘cc’ must be numericArgument ‘ipsi’ must be integerArgument ‘naRm’ must be either TRUE or FALSE.Argument ‘x’ must be a matrix.Argument ‘x’ must be numeric (integer or double).DGEEQ: illegal argument in %i. argumentDGEEQU: column %i of the design matrix is exactly zero.DGELS: illegal argument in %i. argument.DGELS: weighted design matrix not of full rank (column %d). Use control parameter ‘trace.lev = 4’ to get diagnostic output.Error from Rdqags(psi_ggw*, k, ...): ier = %iFast S large n strategy failed. Use control parameter ‘fast.s.large.n = Inf’.S refinements did not converge (to refine.tol=%g) in %d (= k.max) stepsToo many singular resamples. Aborting subsample(). See parameter 'subsampling; in help of lmrob.config().find_scale() did not converge in ‘%s’ (= %d) iterations with tol=%g, last rel.diff=%gfind_scale(*, initial_scale = %g) -> final scale = 0m_s_descent(): Problem in RLLARSBI (RILARS). KODE=%d. Exiting.m_s_subsample() stopped prematurely (scale < 0).normcnst(): ipsi=%d not implemented.psi(): ipsi=%d not implemented.psi2(): ipsi=%d not implemented.psip(): ipsi=%d not implemented.rho(): ipsi=%d not implemented.rho_ggw(): case (%i) not implemented.rho_inf(): ipsi=%d not implemented.subsample(): could not find non-singular subsample.robustbase/inst/po/en@quot/LC_MESSAGES/R-robustbase.mo0000644000176200001440000011104214107540744022130 0ustar liggesusersfL |X ;e(<6)Hr4;" A5 Aw   P Q-l---*--D-78.$p.N. .9/7?/Nw/6/e/Bc0>030%1A?1?11019 2 E2f2:22U2*3/I3?y3333<36344j474)4/5 15>57R5B5B56],6$6/6/667+F7Hr717F7%48Z87i8'8#878M%94s9<9;9I!:Ak:8:':A;"P;3s;.;G;<=<*M<x<<2<F<Q=q=<===>>7>7H>,>=>,>?!?)?.C?,r???@?9@ >@H@X@p@,@+@ @ @@$A 3ATAmAAAAA?A0!B#RBvB}B BBBBKB0.C_CyCCC)C[C 5D,@DmD+DDD3D6 ECEZEiExEEEEEE EEE>1F!pF+F2F9F"+G+NG zG.G3G0G(HBHXHApH'H6HI/#ISI*sI>I!I5I5JFJ;YJJEJ#JK+6K<bKKKKK%K0LAFLFL L9L*M8DM}MM=M-M#NJ'NrN"yN%N(NN6OP?O*O(O+O+P!;jezjJjF+k7rk%kAkClVl4ol=l lm:#m^mUem.m3m?n^nen|n<n6n4o7Do)|o/o oo7oB/pBrpp]p$/q/Tq/q6q+qHr5`rNr%r s7s/Rs's7sMs40t<et;tItE(u@nu'uIu"!v3Dv.xvGvvw*wIwhw:zwFwQwNx@fx!xxx y$y75y,myEy4yzz&z2@z0szzzLz={ S{]{m{{,{+{ { ||$#| H|i|||||#|C|0>}#o}}} }}}}K}4K~~~~~)~[ ^,i/7:Iŀ܀&+ 1I<B!Ɂ+29J"+ ӂ.ނ3 0A(rE̓'6;r/*ԄF!F5hCƅ I$#n/@߆ <L\%p0ALJN X9y8͈A -b#J")),S6PԊ*%(P/y3!݋0?Ha8824$Y)_=<'"XJD))3<3p)#Ώ)2-E.s8/0(Y5^*'  [W=,&9@pADAdV$DY,.N3 ~To\<+"7H )BI]Q&-8Ker !%V_q=S84R/H aLb dek$G1B)M*gfM?FXS%j*/ fP:4n#.Y?`cb`ZC5Qs\ 3^ET6tz>m7:!F-^Ul  _R 'G2+OP 9N U<0;]#"6'((J5v2a@[;|EuZJi1}>yLKch{CwX0OWxI!! Using default."boot" method not yet implemented for "nlrob" objects. Use confint(*, method = "Wald").%s-step did NOT converge. Returning unconverged %s-estimate'' are ('%s' is not a valid family (see ?family)'%s' must be a function, numeric vector of length p, or NULL'%s' must be character string or function, but is "%s"', 'parm' must be specified as an integer'. Using defaults.'Anova Table' for a single model not yet implemented'Anova Table' for a single model object not yet implemented'X' must have at least two columns'coef' must not be negative'control$n.group' is not much larger than 'p', probably too small'control$n.group' must be larger than 'p' for 'large_n' algorithm'control' is missing'cov' must be a p x p matrix'epsw' must be numeric(1) or a function of nobs(obj.) which returns a numeric(1)'epsx' must be numeric(1) or a function of max(abs(x)) which returns a numeric(1)'formula' missing or incorrect'formula' should be a formula of the type 'y ~ f(x, alpha)''full.h' is true, but 'hsets.init' has less than n rows'groups * n.group' must be smaller than 'n' for 'large_n' algorithm'hsets.init' must be a h' x L matrix (h' >= h) of observation indices'hsets.init' must be in {1,2,...,n}; n ='id.n' must be in {1,..,'id.n' must be in {1,..,%d}'mf' is unused and deprecated'nsamp = "best"' allows maximally'nsamp' options 'best' and 'exact' not allowed for n greater than'offset' is not yet implemented for "BY"'offset' not fully implemented'psi' should be one of %s'qr.out = TRUE' for univariate location is disregarded'reflect part' in'scale' must be NULL or a positive number'shortMain' is deprecated and will get defunct.'start' cannot yet be passed to glmrobBY()'start' must be a named list or numeric vector'start' must be a numeric vector, NULL, or a character string'start' must be an initial estimate of beta, of length %d'start' or 'lower' or 'upper' must be fully named (list or numeric vector)'weights' as formula should be one-sided'weights' must be a numeric vector'weights' must be non-negative'weights' must be nonnegative and not contain NAs'weights' must have same length as 'x''weights.on.x = "%s"' is not implemented'weights.on.x' must be a string, function, list or numeric n-vector'wgtFUN' must be a function or a string specifying one'wgtFUN' must be a function or a string specifying such a function'wgtFUN' must be a function or one of the strings %s.'which' must be in 1:5'x' must be a numeric matrix(probably because of rounding errors).(tau / hybrid / tauold): tau not found in 'obj') but should be ()$).** direction sampling iterations were not sufficient. Maybe try increasing 'maxit.mult'*and*,, cc).-estimate in.. Using default.. Please report!. Will use default..vcov.avar1 can only be used when initial estimator is S; using .vcov.w instead.vcov.avar1() supports only SM or MM estimates.vcov.avar1: negative diag() fixed up; consider 'cov=".vcov.w."' instead0s in V(mu):.vcov.w: Caution. Some psi'() are NA:.vcov.w: cov.corrfact must be one of:.vcov.w: cov.dfcorr must be one of:.vcov.w: cov.hubercorr must be logical (or NULL):.vcov.w: cov.xwx must be logical (or NULL):.vcov.w: scale missing, using D scale:.vcov.w: unsupported psi for "hybrid" correction factor:.vcov.w: unsupported psi function; try enlarging eps1, eps2 !?< p ===> scaleTau2(.) = 0Algorithm did not convergeAll models are refitted except the largest oneAll observations have missing values!All weights must be positiveAll weights.on.x must be none negativeAs "sigma" is in 'pnames', do not use it as variable or parameter name in 'formula'Assuming constant prediction variance even though model fit is weightedAssuming prediction variance inversely proportional to weights used for fittingC function R_lmrob_S() exited prematurelyCannot find valid starting values: You need helpCoef. for Hampel psi function not of length 3Coef. for psi functionCoefficients forComputing 5000 subsets of sizeComputing profile confidence intervals ...Convergence AchievedCurrently, only family 'poisson' is supported for the "MT" estimatorData set and provided center have different dimensions!Detected possible local breakdown ofDiagnostic plot is not available for univar iate location and scale estimationDimension {= ncol(x)} must be 2!Do not use '%s' as a variable name or as a parameter nameError: neither breakdown point nor efficiency specifiedFailed to compute robust Mahalanobis distances, reverting to robust leverages.For method 'cubif', use glmRob() from package 'robust'For method = "%s", argument %s is not made use ofFor method = "%s", arguments %s are not made use ofFor method = "%s", currently 'psi' must be specified via 'control'For test = 'Deviance', the estimator chain has to end with 'M'For tolerance ellipses the dimension 'p' must be 2!Implosion: sigma1=%g became too smallIndex plot of standardized residuals is not avalable if scale = 0Initial estimator '%s' not supported; using S-estimator insteadInitial setInitial setsInitialization 'init = "%s"' not supported (yet)Invalid scalefn='%s': must be function or a valid stringInvalid number of trials nsamp =Invalid number of trials nsamp=Invalid number of trials nsamp=%s. Using default nsamp=%d.KS2014Length of initial coefficients vector does not match rank of singular design matrix xMatrix 'x' is not of full rank: rankM(x) =Maximum 5000 subsets allowed for option 'best'.Maybe use lmrob(*, control=lmrob.control(....) with all these.MethodModels are not nested!Models are not strictly nestedMore dimensions than observations, currently not implementedMore dimensions than observations: not yet implementedMore than h of the observations lie on a hyperplane.More than half of the observations lie on a hyperplane.NA coefficient (at %s) from "best" subsetNA coefs in full and reduced model do not matchNAs in V(mu)NAs in d(mu)/d(eta)Need more than twice as many observations as variables.No categorical variables found in model. Reverting to S-estimator.No continuous variables found in model. Reverting to L1-estimator.No convergence in %d steps.No weights defined for this object. Use type="robustness" argument to get robustness weights.Non-finite coefficients at iterationNot the same method used for fitting the modelsNot the same response used in the fitted modelsNot the same tuning constant c used in the robust fitsNumber of observations in x and y not equalNumber of offsets is %d, should rather equal %d (number of observations)Only 'test.acc = "coef"' is currently implementedOptions 'best' and 'exact' not allowed for n greater than 2*nmini-1 =Please fit the nested models by lmrobPlease report!Predictions on current data refer to _future_ responsesProvide 'upper' or 'lower' with names()Psi function '%s' not supported yetRegression Diagnostic plot is not avalable if scale = 0Regression Diagnostic plot is not avalable: option mcd=F was set in ltsReg().Robust GLM fitting not yet implemented for family %sS-estimated scale == 0: Probably exact fit; check your dataSample size n < h(alpha; n,p) := size of "good" subsampleStandardized residuals vs Fitted values plot is not avalable if scale = 0Std.error computation not yet available for the case of 'weights'The 'method' argument is different from 'control$method'The MCD covariance matrix was singular.The argument 'acc' has been renamed to 'tol'; do adapt your code.The covariance matrix is singular!The first object does not contain the largest modelThe sample size must be greater than 1 for svdThere is at least one constant column. Remove it and set intercept=TRUEThis family is not implementedThis may take aTolerance range must be between 0% to 100%Unexpected 'exactfit' codeUnknown setting 'Use 'main = "short"' instead of 'shortMain = TRUE'Use c(minimal slope, b, efficiency, breakdown point) [2 special cases]Use c(minimal slope, b, efficiency, breakdown point) [6 hard-coded special cases]Use fullRank(x) insteadUse lmrob argument 'setting="KS2014"' to avoid this problem.Use only with 'lmrob' objectsUse only with 'lts' objectsUse only with 'mcd' objectsUsing the former, method =Weighting methodX'WX is almost singular. Consider using cov = ".vcov.w"X'WX is singular. Rather use cov = ".vcov.w"You cannot specify both 'main' and the deprecated 'shortMain'You should use different 'eps' and/or 'eps1'])values^list\(alpha not inside [1/2, 1]anova.glmrob() only works for 'glmrob' objectsanova.lmrob() only works for 'lmrob' objectsare disregarded inare disregarded.argument 'm.cov' must have numeric components 'center' and 'cov'argument 'psi' must be a string (denoting a psi function)argumentsarguments .. inarguments of function 'betaExacto glm(.) error:calculations stopped prematurely in rllarsbicalling predict.lm() ...coefficientcoefficientscontrol is missingcould not compute scale of residualscouldn't find optimizer functioncov.resid must be one ofderiv must be in {-1,0,1,2}deriv must be in {0,1,2}dropped:failed to converge infamily '%s' not yet implementedfinite sample corrections are not corrected for non-default 'k'fitted probabilities numerically 0 or 1 occurredfitted rates numerically 0 occurredfixingfor fitting methodfor method='function incorrectly specified.glm(.) {inner subsample} error:id.nif a list, weights.on.x must contain a covariance function such as covMcd()ignoring cov.resid == 'final' since est != finalillegal 'family' argumentillegal 'singularity$kind'inincompatible dimensionsinit must be "S", "M-S", function or listinitial estimate residuals length differs from final ones. Typically must refit w/ lmrob()instead ofinternal logic error in psi() function name:invalid 'cov.dfcorr':invalid 'lmrob' object: no terms componentinvalid 'method':invalid 'posdef.meth':invalid 'psi'=%s; possibly use .regularize.Mpsi(%s)invalid 'seed'. Must be compatible with .Random.seed !invalid 'start' stringinvalid 'test'invalid 'type'invalid 'variant':invalid first argumentinvalid tuning parameter names:is not implementediteriter2iterationslmrob object does not have a proper 'qr' component. Rank must be zerolmrob object does not have a proper 'qr' component. Rank zero?lmrob..D..fit: control is missinglmrob..D..fit: parameter psi is not definedlmrob..D..fit: parameter tuning.psi is not numericlmrob..D..fit: prior estimator did not converge, stoppinglmrob..D..fit: residuals undefinedlmrob..D..fit: robustness weights undefinedlong time!lower must be either of length %d, or length 1maximal number of *inner* step halvings must be > 0maximum number of "kstep" iterations must be > 0maximum number of iterations must be > 0mc(): not 'converged'method for psi functionmethod='%s' is only applicable for binomial family, but family=""missing or negative weights not allowedmodels were not all fitted to the same size of datasetmust be in {1,..,n < 2 * p, i.e., possibly too small sample sizen <= p -- you can't be serious!n == p+1 is too small sample size for MCDneed 'model' or 'x' component for robust Mahalanobis distancesneed non-negative number of nodesneed non-robust working residuals for this model typenegative eigen([negative scale 's'neither breakdown point 'bp' nor efficiency 'eff' specifiedno intercept in the modelno valid subsample found in LTS - set 'nsamp' or rather use lmrob.S()non-function specified as optimizernon-implemented test method:non-trivial 'offset' is not yet implementednon-trivial prior 'weights' are not yet implemented for "BY"not correctly supported yetnot implementednot of length 1not yet implementednot yet implemented for large vectorsnsamp > i_max := maximal integer -- not allowed;number of offsets is %d, should equal %d (number of observations)number of rows in 'x' and length of 'object$rweights' must be the samenumber of subsamples must be > 0only M and D are steps supported after "init" computationoptim(.) non-convergence:optimizer function must use (at least) formal parametersoptimizer="optimx"optimxoption 'cov.hubercorr' is ignored for cov.corrfact = "asympt"or c(0, a,b,c, max_rho) as from .psi.const(or c(b, c, s) as from .psi.const(original detmcd() wrongly declared c-step convergence (obj=%g, i=%d, j=%d)out ofpackage must be loaded in order toparameter 'tuning.psi' is not numericparameter names must appear in 'formula'parameter psi is not definedprediction from a rank-deficient fit may be misleadingprofile() method not yet implemented for "nlrob" objects. Use method = "Wald".provide better scale; must be all positiverecomputing robust Mahalanobis distancessaving the robust distances 'MD' as part ofscale 's' is zero -- returning initial 'mu'scale MAD is zero for this sampleset to i_max =setting 'y = FALSE' has no longer any effectsigma0 =singular fit encounteredsome eigenvalues are negativesome terms will have NAs due to the limits of the methodspecifying 'weights' is not yet supported for methodspecifying both 'acc' and 'tol' is invalidstart 'theta' has still NA's .. badly singular xstepssubsample size h < n/2 may be too smallsubsets; computing thesetestthe following arguments to 'anova.glmrob' are invalid andthe following arguments to 'anova.lmrob' are invalid andtuning constant 'cc' is not numerictuning constants for "lqq" psi: both 'eff' and 'bp' specified, ignoring 'bp'tuning constants for ggw psi: both eff and bp specified, ignoring bptuning parameter (chi/psi) is not numerictype 'pearson' is not yet implementedunable to find constants for "ggw" psi function: %sunable to find constants for "lqq" psi function: %sunable to find constants for psi functionunknown 'wgtFUN' specification:unknown init argumentunknown setting for 'subsampling': %sunknown split typeunsupported psi function -- should not happenupper must be either of length %d, or length 1usevalue of acc must be > 0value of the tuning constant c ('const') must be > 0value of the tuning constant c (cw) must be > 0value of the tuning constant c (tcc) must be > 0veryweights should not be both close to 0 and close to 1!weights.on.x needs %d none-negative valuesx is not a numeric dataframe or matrix.x is singulary is not a numericy is not onedimensional}Project-Id-Version: robustbase 0.93-9 PO-Revision-Date: 2021-08-19 22:34 Last-Translator: Automatically generated Language-Team: none MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Language: en Plural-Forms: nplurals=2; plural=(n != 1); !! Using default."boot" method not yet implemented for "nlrob" objects. Use confint(*, method = "Wald").%s-step did NOT converge. Returning unconverged %s-estimate'' are (‘%s’ is not a valid family (see ?family)‘%s’ must be a function, numeric vector of length p, or NULL‘%s’ must be character string or function, but is "%s"‘, ’parm' must be specified as an integer'. Using defaults.‘Anova Table’ for a single model not yet implemented‘Anova Table’ for a single model object not yet implemented‘X’ must have at least two columns‘coef’ must not be negative‘control$n.group’ is not much larger than ‘p’, probably too small‘control$n.group’ must be larger than ‘p’ for ‘large_n’ algorithm‘control’ is missing‘cov’ must be a p x p matrix‘epsw’ must be numeric(1) or a function of nobs(obj.) which returns a numeric(1)‘epsx’ must be numeric(1) or a function of max(abs(x)) which returns a numeric(1)‘formula’ missing or incorrect‘formula’ should be a formula of the type ‘y ~ f(x, alpha)’‘full.h’ is true, but ‘hsets.init’ has less than n rows‘groups * n.group’ must be smaller than ‘n’ for ‘large_n’ algorithm‘hsets.init’ must be a h' x L matrix (h' >= h) of observation indices‘hsets.init’ must be in {1,2,...,n}; n =‘id.n’ must be in {1,..,‘id.n’ must be in {1,..,%d}‘mf’ is unused and deprecated‘nsamp = "best"’ allows maximally‘nsamp’ options ‘best’ and ‘exact’ not allowed for n greater than‘offset’ is not yet implemented for "BY"‘offset’ not fully implemented‘psi’ should be one of %s‘qr.out = TRUE’ for univariate location is disregarded‘reflect part’ in‘scale’ must be NULL or a positive number‘shortMain’ is deprecated and will get defunct.‘start’ cannot yet be passed to glmrobBY()‘start’ must be a named list or numeric vector‘start’ must be a numeric vector, NULL, or a character string‘start’ must be an initial estimate of beta, of length %d‘start’ or ‘lower’ or ‘upper’ must be fully named (list or numeric vector)‘weights’ as formula should be one-sided‘weights’ must be a numeric vector‘weights’ must be non-negative‘weights’ must be nonnegative and not contain NAs‘weights’ must have same length as ‘x’‘weights.on.x = "%s"’ is not implemented‘weights.on.x’ must be a string, function, list or numeric n-vector‘wgtFUN’ must be a function or a string specifying one‘wgtFUN’ must be a function or a string specifying such a function‘wgtFUN’ must be a function or one of the strings %s.‘which’ must be in 1:5‘x’ must be a numeric matrix(probably because of rounding errors).(tau / hybrid / tauold): tau not found in ‘obj’) but should be ()$).** direction sampling iterations were not sufficient. Maybe try increasing ‘maxit.mult’*and*,, cc).-estimate in.. Using default.. Please report!. Will use default..vcov.avar1 can only be used when initial estimator is S; using .vcov.w instead.vcov.avar1() supports only SM or MM estimates.vcov.avar1: negative diag() fixed up; consider ‘cov=".vcov.w."’ instead0s in V(mu):.vcov.w: Caution. Some psi'() are NA:.vcov.w: cov.corrfact must be one of:.vcov.w: cov.dfcorr must be one of:.vcov.w: cov.hubercorr must be logical (or NULL):.vcov.w: cov.xwx must be logical (or NULL):.vcov.w: scale missing, using D scale:.vcov.w: unsupported psi for "hybrid" correction factor:.vcov.w: unsupported psi function; try enlarging eps1, eps2 !?< p ===> scaleTau2(.) = 0Algorithm did not convergeAll models are refitted except the largest oneAll observations have missing values!All weights must be positiveAll weights.on.x must be none negativeAs "sigma" is in ‘pnames’, do not use it as variable or parameter name in ‘formula’Assuming constant prediction variance even though model fit is weightedAssuming prediction variance inversely proportional to weights used for fittingC function R_lmrob_S() exited prematurelyCannot find valid starting values: You need helpCoef. for Hampel psi function not of length 3Coef. for psi functionCoefficients forComputing 5000 subsets of sizeComputing profile confidence intervals ...Convergence AchievedCurrently, only family ‘poisson’ is supported for the "MT" estimatorData set and provided center have different dimensions!Detected possible local breakdown ofDiagnostic plot is not available for univar iate location and scale estimationDimension {= ncol(x)} must be 2!Do not use ‘%s’ as a variable name or as a parameter nameError: neither breakdown point nor efficiency specifiedFailed to compute robust Mahalanobis distances, reverting to robust leverages.For method ‘cubif’, use glmRob() from package ‘robust’For method = "%s", argument %s is not made use ofFor method = "%s", arguments %s are not made use ofFor method = "%s", currently ‘psi’ must be specified via ‘control’For test = ‘Deviance’, the estimator chain has to end with ‘M’For tolerance ellipses the dimension ‘p’ must be 2!Implosion: sigma1=%g became too smallIndex plot of standardized residuals is not avalable if scale = 0Initial estimator ‘%s’ not supported; using S-estimator insteadInitial setInitial setsInitialization ‘init = "%s"’ not supported (yet)Invalid scalefn=‘%s’: must be function or a valid stringInvalid number of trials nsamp =Invalid number of trials nsamp=Invalid number of trials nsamp=%s. Using default nsamp=%d.KS2014Length of initial coefficients vector does not match rank of singular design matrix xMatrix ‘x’ is not of full rank: rankM(x) =Maximum 5000 subsets allowed for option ‘best’.Maybe use lmrob(*, control=lmrob.control(....) with all these.MethodModels are not nested!Models are not strictly nestedMore dimensions than observations, currently not implementedMore dimensions than observations: not yet implementedMore than h of the observations lie on a hyperplane.More than half of the observations lie on a hyperplane.NA coefficient (at %s) from "best" subsetNA coefs in full and reduced model do not matchNAs in V(mu)NAs in d(mu)/d(eta)Need more than twice as many observations as variables.No categorical variables found in model. Reverting to S-estimator.No continuous variables found in model. Reverting to L1-estimator.No convergence in %d steps.No weights defined for this object. Use type="robustness" argument to get robustness weights.Non-finite coefficients at iterationNot the same method used for fitting the modelsNot the same response used in the fitted modelsNot the same tuning constant c used in the robust fitsNumber of observations in x and y not equalNumber of offsets is %d, should rather equal %d (number of observations)Only ‘test.acc = "coef"’ is currently implementedOptions ‘best’ and ‘exact’ not allowed for n greater than 2*nmini-1 =Please fit the nested models by lmrobPlease report!Predictions on current data refer to _future_ responsesProvide ‘upper’ or ‘lower’ with names()Psi function ‘%s’ not supported yetRegression Diagnostic plot is not avalable if scale = 0Regression Diagnostic plot is not avalable: option mcd=F was set in ltsReg().Robust GLM fitting not yet implemented for family %sS-estimated scale == 0: Probably exact fit; check your dataSample size n < h(alpha; n,p) := size of "good" subsampleStandardized residuals vs Fitted values plot is not avalable if scale = 0Std.error computation not yet available for the case of ‘weights’The ‘method’ argument is different from ‘control$method’The MCD covariance matrix was singular.The argument ‘acc’ has been renamed to ‘tol’; do adapt your code.The covariance matrix is singular!The first object does not contain the largest modelThe sample size must be greater than 1 for svdThere is at least one constant column. Remove it and set intercept=TRUEThis family is not implementedThis may take aTolerance range must be between 0% to 100%Unexpected ‘exactfit’ codeUnknown setting 'Use ‘main = "short"’ instead of ‘shortMain = TRUE’Use c(minimal slope, b, efficiency, breakdown point) [2 special cases]Use c(minimal slope, b, efficiency, breakdown point) [6 hard-coded special cases]Use fullRank(x) insteadUse lmrob argument ‘setting="KS2014"’ to avoid this problem.Use only with ‘lmrob’ objectsUse only with ‘lts’ objectsUse only with ‘mcd’ objectsUsing the former, method =Weighting methodX'WX is almost singular. Consider using cov = ".vcov.w"X'WX is singular. Rather use cov = ".vcov.w"You cannot specify both ‘main’ and the deprecated ‘shortMain’You should use different ‘eps’ and/or ‘eps1’])values^list\(alpha not inside [1/2, 1]anova.glmrob() only works for ‘glmrob’ objectsanova.lmrob() only works for ‘lmrob’ objectsare disregarded inare disregarded.argument ‘m.cov’ must have numeric components ‘center’ and ‘cov’argument ‘psi’ must be a string (denoting a psi function)argumentsarguments .. inarguments of function 'betaExacto glm(.) error:calculations stopped prematurely in rllarsbicalling predict.lm() ...coefficientcoefficientscontrol is missingcould not compute scale of residualscouldn't find optimizer functioncov.resid must be one ofderiv must be in {-1,0,1,2}deriv must be in {0,1,2}dropped:failed to converge infamily ‘%s’ not yet implementedfinite sample corrections are not corrected for non-default ‘k’fitted probabilities numerically 0 or 1 occurredfitted rates numerically 0 occurredfixingfor fitting methodfor method='function incorrectly specified.glm(.) {inner subsample} error:id.nif a list, weights.on.x must contain a covariance function such as covMcd()ignoring cov.resid == ‘final’ since est != finalillegal ‘family’ argumentillegal ‘singularity$kind’inincompatible dimensionsinit must be "S", "M-S", function or listinitial estimate residuals length differs from final ones. Typically must refit w/ lmrob()instead ofinternal logic error in psi() function name:invalid ‘cov.dfcorr’:invalid ‘lmrob’ object: no terms componentinvalid ‘method’:invalid ‘posdef.meth’:invalid ‘psi’=%s; possibly use .regularize.Mpsi(%s)invalid ‘seed’. Must be compatible with .Random.seed !invalid ‘start’ stringinvalid ‘test’invalid ‘type’invalid ‘variant’:invalid first argumentinvalid tuning parameter names:is not implementediteriter2iterationslmrob object does not have a proper ‘qr’ component. Rank must be zerolmrob object does not have a proper ‘qr’ component. Rank zero?lmrob..D..fit: control is missinglmrob..D..fit: parameter psi is not definedlmrob..D..fit: parameter tuning.psi is not numericlmrob..D..fit: prior estimator did not converge, stoppinglmrob..D..fit: residuals undefinedlmrob..D..fit: robustness weights undefinedlong time!lower must be either of length %d, or length 1maximal number of *inner* step halvings must be > 0maximum number of "kstep" iterations must be > 0maximum number of iterations must be > 0mc(): not ‘converged’method for psi functionmethod=‘%s’ is only applicable for binomial family, but family=""missing or negative weights not allowedmodels were not all fitted to the same size of datasetmust be in {1,..,n < 2 * p, i.e., possibly too small sample sizen <= p -- you can't be serious!n == p+1 is too small sample size for MCDneed ‘model’ or ‘x’ component for robust Mahalanobis distancesneed non-negative number of nodesneed non-robust working residuals for this model typenegative eigen([negative scale ‘s’neither breakdown point ‘bp’ nor efficiency ‘eff’ specifiedno intercept in the modelno valid subsample found in LTS - set ‘nsamp’ or rather use lmrob.S()non-function specified as optimizernon-implemented test method:non-trivial ‘offset’ is not yet implementednon-trivial prior ‘weights’ are not yet implemented for "BY"not correctly supported yetnot implementednot of length 1not yet implementednot yet implemented for large vectorsnsamp > i_max := maximal integer -- not allowed;number of offsets is %d, should equal %d (number of observations)number of rows in ‘x’ and length of ‘object$rweights’ must be the samenumber of subsamples must be > 0only M and D are steps supported after "init" computationoptim(.) non-convergence:optimizer function must use (at least) formal parametersoptimizer="optimx"optimxoption ‘cov.hubercorr’ is ignored for cov.corrfact = "asympt"or c(0, a,b,c, max_rho) as from .psi.const(or c(b, c, s) as from .psi.const(original detmcd() wrongly declared c-step convergence (obj=%g, i=%d, j=%d)out ofpackage must be loaded in order toparameter ‘tuning.psi’ is not numericparameter names must appear in ‘formula’parameter psi is not definedprediction from a rank-deficient fit may be misleadingprofile() method not yet implemented for "nlrob" objects. Use method = "Wald".provide better scale; must be all positiverecomputing robust Mahalanobis distancessaving the robust distances ‘MD’ as part ofscale ‘s’ is zero -- returning initial ‘mu’scale MAD is zero for this sampleset to i_max =setting ‘y = FALSE’ has no longer any effectsigma0 =singular fit encounteredsome eigenvalues are negativesome terms will have NAs due to the limits of the methodspecifying ‘weights’ is not yet supported for methodspecifying both ‘acc’ and ‘tol’ is invalidstart ‘theta’ has still NA's .. badly singular xstepssubsample size h < n/2 may be too smallsubsets; computing thesetestthe following arguments to ‘anova.glmrob’ are invalid andthe following arguments to ‘anova.lmrob’ are invalid andtuning constant ‘cc’ is not numerictuning constants for "lqq" psi: both ‘eff’ and ‘bp’ specified, ignoring ‘bp’tuning constants for ggw psi: both eff and bp specified, ignoring bptuning parameter (chi/psi) is not numerictype ‘pearson’ is not yet implementedunable to find constants for "ggw" psi function: %sunable to find constants for "lqq" psi function: %sunable to find constants for psi functionunknown ‘wgtFUN’ specification:unknown init argumentunknown setting for ‘subsampling’: %sunknown split typeunsupported psi function -- should not happenupper must be either of length %d, or length 1usevalue of acc must be > 0value of the tuning constant c (‘const’) must be > 0value of the tuning constant c (cw) must be > 0value of the tuning constant c (tcc) must be > 0veryweights should not be both close to 0 and close to 1!weights.on.x needs %d none-negative valuesx is not a numeric dataframe or matrix.x is singulary is not a numericy is not onedimensional}robustbase/po/0000755000176200001440000000000014124272431013041 5ustar liggesusersrobustbase/po/update-me.sh0000754000176200001440000000152612533543452015272 0ustar liggesusers#!/bin/sh # #__>> Keep in sync with ~/R/Pkgs/Matrix/po/update-me.sh <<__ # ## Script for updating package-specific *.pot files ## written such that it should work for any package # R=${R:-R} thisdir=`dirname $0` ; cd $thisdir; thisdir=`pwd` echo "R = '$R' (`$R --version | head -1`) preliminary thisdir='$thisdir'" pkgDIR=`dirname $thisdir` pkg=`basename $pkgDIR` echo ' --> pkgDIR='$pkgDIR' ; pkg='$pkg echo "require('tools'); update_pkg_po('$pkgDIR')" | $R --slave 2>&1 | tee update.log ## -------------------------------- as of R 3.0.0 echo 'end{make pkg-update}' ; echo '' echo 'Test with (e.g.)' echo ' LANGUAGE=de R --no-environ --no-save' ; echo '' echo 'and then something like' echo ' ... [TO BE ADDED] ...'; echo '' echo 'Commit with something like' echo " svn ci -m'translation updates' po inst/po"; echo '' robustbase/po/robustbase.pot0000644000176200001440000000706114107540744015750 0ustar liggesusers# SOME DESCRIPTIVE TITLE. # Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER # This file is distributed under the same license as the robustbase package. # FIRST AUTHOR , YEAR. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: robustbase 0.93-9\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2021-08-19 20:45+0200\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" #: lmrob.c:225 msgid " Problem determining optimal block size, using minimum" msgstr "" #: lmrob.c:262 #, c-format msgid "DGELS: illegal argument in %i. argument." msgstr "" #: lmrob.c:269 #, c-format msgid "" "DGELS: weighted design matrix not of full rank (column %d).\n" "Use control parameter 'trace.lev = 4' to get diagnostic output." msgstr "" #: lmrob.c:290 #, c-format msgid "DGEEQ: illegal argument in %i. argument" msgstr "" #: lmrob.c:293 msgid "" "Fast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'." msgstr "" #: lmrob.c:295 #, c-format msgid "DGEEQU: column %i of the design matrix is exactly zero." msgstr "" #: lmrob.c:300 #, c-format msgid " Skipping design matrix equilibration (DGEEQU): row %i is exactly zero." msgstr "" #: lmrob.c:460 msgid "m_s_subsample() stopped prematurely (scale < 0)." msgstr "" #: lmrob.c:560 lmrob.c:561 lmrob.c:599 lmrob.c:600 lmrob.c:627 lmrob.c:628 #: rob-utils.c:62 #, c-format msgid "Argument '%s' must be numeric or integer" msgstr "" #: lmrob.c:582 lmrob.c:613 #, c-format msgid "'deriv'=%d is invalid" msgstr "" #: lmrob.c:642 msgid "Argument 'cc' must be numeric" msgstr "" #: lmrob.c:643 msgid "Argument 'ipsi' must be integer" msgstr "" #: lmrob.c:655 #, c-format msgid "rho_inf(): ipsi=%d not implemented." msgstr "" #: lmrob.c:685 #, c-format msgid "normcnst(): ipsi=%d not implemented." msgstr "" #: lmrob.c:715 #, c-format msgid "rho(): ipsi=%d not implemented." msgstr "" #: lmrob.c:734 #, c-format msgid "psi(): ipsi=%d not implemented." msgstr "" #: lmrob.c:752 #, c-format msgid "psip(): ipsi=%d not implemented." msgstr "" #: lmrob.c:774 #, c-format msgid "psi2(): ipsi=%d not implemented." msgstr "" #: lmrob.c:1204 #, c-format msgid "rho_ggw(): case (%i) not implemented." msgstr "" #: lmrob.c:1245 #, c-format msgid "Error from Rdqags(psi_ggw*, k, ...): ier = %i" msgstr "" #: lmrob.c:2176 #, c-format msgid "S refinements did not converge (to refine.tol=%g) in %d (= k.max) steps" msgstr "" #: lmrob.c:2335 #, c-format msgid "m_s_descent(): Problem in RLLARSBI (RILARS). KODE=%d. Exiting." msgstr "" #: lmrob.c:2379 msgid " M-S estimate: maximum number of refinement steps reached." msgstr "" #: lmrob.c:2468 msgid "subsample(): could not find non-singular subsample." msgstr "" #: lmrob.c:2517 msgid "" "Too many singular resamples. Aborting subsample().\n" " See parameter 'subsampling; in help of lmrob.config()." msgstr "" #: lmrob.c:2576 #, c-format msgid "find_scale(*, initial_scale = %g) -> final scale = 0" msgstr "" #: lmrob.c:2591 #, c-format msgid "" "find_scale() did not converge in '%s' (= %d) iterations with tol=%g, last " "rel.diff=%g" msgstr "" #: rob-utils.c:63 rob-utils.c:64 #, c-format msgid "Argument '%s' must be numeric or integer of length 1" msgstr "" #: rowMedians.c:42 msgid "Argument 'x' must be a matrix." msgstr "" #: rowMedians.c:46 msgid "Argument 'naRm' must be either TRUE or FALSE." msgstr "" #: rowMedians.c:71 msgid "Argument 'x' must be numeric (integer or double)." msgstr "" robustbase/po/R-de.po0000644000176200001440000005506314107540744014207 0ustar liggesusersmsgid "" msgstr "" "Project-Id-Version: robustbase 0.93-9\n" "POT-Creation-Date: 2021-08-19 22:34\n" "PO-Revision-Date: 2021-08-19 22:39+0200\n" "Last-Translator: Martin Maechler \n" "Language-Team: LANGUAGE \n" "Language: \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" msgid "some terms will have NAs due to the limits of the method" msgstr "Einige Terme werden NAs enthalten wegen Beschränkung der Methode" msgid "y is not onedimensional" msgstr "y ist nicht eindimensional" msgid "Number of observations in x and y not equal" msgstr "" msgid "no intercept in the model" msgstr "Kein Achsenabschnitt ('intercept') im Modell" msgid "All observations have missing values!" msgstr "Alle Beobachtungen haben fehlende Werte!" msgid "Implosion: sigma1=%g became too small" msgstr "Implosion: sigma1=%g ist zu klein geworden" msgid "Convergence Achieved" msgstr "Konvergenz erreicht" msgid "No convergence in %d steps." msgstr "Keine Konvergenz in %d Schritten." msgid "maximum number of \"kstep\" iterations must be > 0" msgstr "Maximale Anzahl von \"kstep\" Iterationen muss > 0 sein" msgid "maximal number of *inner* step halvings must be > 0" msgstr "Maximale Anzahl von *inneren* Schritthalbierungen muss > 0 sein" msgid "value of the tuning constant c ('const') must be > 0" msgstr "Die Tuning Konstante c ('const') muss positiv sein" msgid "non-trivial prior 'weights' are not yet implemented for \"BY\"" msgstr "nicht-triviale apriori Gewichte ('weights') sind noch nicht implementiert for \"BY\"" msgid "'start' cannot yet be passed to glmrobBY()" msgstr "" msgid "'offset' is not yet implemented for \"BY\"" msgstr "" msgid "'weights.on.x = \"%s\"' is not implemented" msgstr "" msgid "glm(.) {inner subsample} error:" msgstr "" msgid "betaExacto glm(.) error:" msgstr "" msgid "value of acc must be > 0" msgstr "" msgid "number of subsamples must be > 0" msgstr "" msgid "maximum number of iterations must be > 0" msgstr "" msgid "value of the tuning constant c (cw) must be > 0" msgstr "" msgid "Currently, only family 'poisson' is supported for the \"MT\" estimator" msgstr "Momentan ist nur die 'poisson' Familie unterstützt beim \"MT\" Schätzer" msgid "All weights must be positive" msgstr "Alle Gewichte müssen positiv sein" msgid "non-trivial 'offset' is not yet implemented" msgstr "nicht-trivialer 'offset' ist noch nicht implementiert" msgid "NAs in V(mu)" msgstr "NAs in V(mu)" msgid "0s in V(mu)" msgstr "Nullen ('0') in V(mu)" msgid "'start' must be an initial estimate of beta, of length %d" msgstr "'start' muss eine Startschätzung sein von beta, der Länge %d" msgid "optim(.) non-convergence:" msgstr "" msgid "'X' must have at least two columns" msgstr "'X' muss mindestens zwei Spalten haben" msgid "sigma0 =" msgstr "" msgid "==> scaleTau2(.) = 0" msgstr "" msgid "invalid first argument" msgstr "ungültiges erstes Argument" msgid "'formula' missing or incorrect" msgstr "'formula' fehlt oder ist ungültig" msgid "'coef' must not be negative" msgstr "'coef' darf nicht negativ sein" msgid "More dimensions than observations, currently not implemented" msgstr "Mehr Dimensionen als Beobachtungen; momentan nicht implementiert" msgid "More dimensions than observations: not yet implemented" msgstr "Mehr Dimensionen als Beobachtungen; das ist noch nicht implementiert" msgid "Matrix 'x' is not of full rank: rankM(x) =" msgstr "" msgid "< p =" msgstr "" msgid "Use fullRank(x) instead" msgstr "Verwende stattdessen fullRank(x)" msgid "" "** direction sampling iterations were not sufficient. Maybe try increasing " "'maxit.mult'" msgstr "" msgid "the following arguments to 'anova.glmrob' are invalid and" msgstr "" msgid "dropped:" msgstr "" msgid "anova.glmrob() only works for 'glmrob' objects" msgstr "" msgid "'Anova Table' for a single model object not yet implemented" msgstr "" msgid "Not the same response used in the fitted models" msgstr "Nicht dieselbe Zielvariable verwendet in den angepassten Modellen" msgid "models were not all fitted to the same size of dataset" msgstr "Modelle wurden nicht alle auf die gleich grossen Datensätze angepasst" msgid "Not the same method used for fitting the models" msgstr "Nicht die gleiche Methode verwendet um die Modelle anzupassen" msgid "Not the same tuning constant c used in the robust fits" msgstr "" msgid "Models are not nested!" msgstr "" msgid "Models are not strictly nested" msgstr "" msgid "This family is not implemented" msgstr "" msgid "some eigenvalues are negative" msgstr "" msgid "non-implemented test method:" msgstr "" msgid "for fitting method" msgstr "" msgid "the following arguments to 'anova.lmrob' are invalid and" msgstr "" msgid "For test = 'Deviance', the estimator chain has to end with 'M'" msgstr "" msgid "The first object does not contain the largest model" msgstr "" msgid "All models are refitted except the largest one" msgstr "" msgid "anova.lmrob() only works for 'lmrob' objects" msgstr "" msgid "'Anova Table' for a single model not yet implemented" msgstr "" msgid "invalid 'test'" msgstr "" msgid "Please fit the nested models by lmrob" msgstr "" msgid "NA coefs in full and reduced model do not match" msgstr "" msgid "test" msgstr "" msgid "not yet implemented" msgstr "" msgid "deriv must be in {0,1,2}" msgstr "" msgid "deriv must be in {-1,0,1,2}" msgstr "" msgid "'x' must be a numeric matrix" msgstr "" msgid "The sample size must be greater than 1 for svd" msgstr "" msgid "'wgtFUN' must be a function or a string specifying such a function" msgstr "" msgid "invalid 'seed'. Must be compatible with .Random.seed !" msgstr "" msgid "Invalid number of trials nsamp =" msgstr "" msgid "!" msgstr "" msgid "n <= p -- you can't be serious!" msgstr "" msgid "n == p+1 is too small sample size for MCD" msgstr "" msgid "n < 2 * p, i.e., possibly too small sample size" msgstr "" msgid "Sample size n < h(alpha; n,p) := size of \"good\" subsample" msgstr "" msgid "subsample size\t h < n/2 may be too small" msgstr "" msgid "'wgtFUN' must be a function or one of the strings %s." msgstr "" msgid "Unexpected 'exactfit' code" msgstr "" msgid ". Please report!" msgstr "" msgid "illegal 'singularity$kind'" msgstr "" msgid "Options 'best' and 'exact' not allowed for n greater than 2*nmini-1 =" msgstr "" msgid "" ".\n" "Using default." msgstr "" msgid "'nsamp = \"best\"' allows maximally" msgstr "" msgid "" "subsets;\n" "computing these" msgstr "" msgid "This may take a" msgstr "" msgid "very" msgstr "" msgid "long time!" msgstr "" msgid "Invalid number of trials nsamp=%s. Using default nsamp=%d." msgstr "" msgid "nsamp > i_max := maximal integer -- not allowed;" msgstr "" msgid "set to i_max =" msgstr "" msgid "Use only with 'mcd' objects" msgstr "" msgid "x is not a numeric dataframe or matrix." msgstr "" msgid "argument 'm.cov' must have numeric components 'center' and 'cov'" msgstr "" msgid "Data set and provided center have different dimensions!" msgstr "" msgid "The covariance matrix is singular!" msgstr "" msgid "id.n" msgstr "" msgid "must be in {1,..," msgstr "" msgid "}" msgstr "" msgid "For tolerance ellipses the dimension 'p' must be 2!" msgstr "" msgid "'hsets.init' must be a h' x L matrix (h' >= h) of observation indices" msgstr "" msgid "'full.h' is true, but 'hsets.init' has less than n rows" msgstr "" msgid "'hsets.init' must be in {1,2,...,n}; n =" msgstr "" msgid "More than h of the observations lie on a hyperplane." msgstr "" msgid "" "original detmcd() wrongly declared c-step convergence (obj=%g, i=%d, j=%d)" msgstr "" msgid "Invalid scalefn='%s': must be function or a valid string" msgstr "" msgid "'%s' must be a function, numeric vector of length p, or NULL" msgstr "" msgid "provide better scale; must be all positive" msgstr "" msgid "More than half of the observations lie on a hyperplane." msgstr "" msgid "'%s' is not a valid family (see ?family)" msgstr "" msgid "Robust GLM fitting not yet implemented for family %s" msgstr "" msgid "'weights' must be non-negative" msgstr "" msgid "" "Number of offsets is %d, should rather equal %d (number of observations)" msgstr "" msgid "'weights.on.x' must be a string, function, list or numeric n-vector" msgstr "" msgid "'start' must be a numeric vector, NULL, or a character string" msgstr "" msgid "invalid 'start' string" msgstr "" msgid "For method 'cubif', use glmRob() from package 'robust'" msgstr "" msgid "method='%s' is only applicable for binomial family, but family=\"\"" msgstr "" msgid "invalid 'method':" msgstr "" msgid "setting 'y = FALSE' has no longer any effect" msgstr "" msgid "" "No weights defined for this object. Use type=\"robustness\" argument to get " "robustness weights." msgstr "" msgid "need non-robust working residuals for this model type" msgstr "" msgid "invalid 'variant':" msgstr "" msgid "Weighting method" msgstr "" msgid "is not implemented" msgstr "" msgid "" "if a list, weights.on.x must contain a covariance function such as covMcd()" msgstr "" msgid "weights.on.x needs %d none-negative values" msgstr "" msgid "All weights.on.x must be none negative" msgstr "" msgid "'offset' not fully implemented" msgstr "" msgid "illegal 'family' argument" msgstr "" msgid "start 'theta' has still NA's .. badly singular x" msgstr "" msgid "Cannot find valid starting values: You need help" msgstr "" msgid "family '%s' not yet implemented" msgstr "" msgid "NAs in d(mu)/d(eta)" msgstr "" msgid "Non-finite coefficients at iteration" msgstr "" msgid "Algorithm did not converge" msgstr "" msgid "fitted probabilities numerically 0 or 1 occurred" msgstr "" msgid "fitted rates numerically 0 occurred" msgstr "" msgid "Only 'test.acc = \"coef\"' is currently implemented" msgstr "" msgid "value of the tuning constant c (tcc) must be > 0" msgstr "" msgid "Std.error computation not yet available for the case of 'weights'" msgstr "" msgid "negative scale 's'" msgstr "" msgid "scale 's' is zero -- returning initial 'mu'" msgstr "" msgid "scale MAD is zero for this sample" msgstr "" msgid "calculations stopped prematurely in rllarsbi" msgstr "" msgid "(probably because of rounding errors)." msgstr "" msgid "unknown split type" msgstr "" msgid "No categorical variables found in model. Reverting to S-estimator." msgstr "" msgid "No continuous variables found in model. Reverting to L1-estimator." msgstr "" msgid "'psi' should be one of %s" msgstr "" msgid "invalid 'psi'=%s; possibly use .regularize.Mpsi(%s)" msgstr "" msgid "Unknown setting '" msgstr "" msgid "'. Using defaults." msgstr "" msgid "'mf' is unused and deprecated" msgstr "" msgid "Initial estimator '%s' not supported; using S-estimator instead" msgstr "" msgid "" ".vcov.avar1 can only be used when initial estimator is S; using .vcov.w " "instead" msgstr "" msgid "only M and D are steps supported after \"init\" computation" msgstr "" msgid "%s-step did NOT converge. Returning unconverged %s-estimate" msgstr "" msgid ":.vcov.w: cov.hubercorr must be logical (or NULL)" msgstr "" msgid ":.vcov.w: cov.corrfact must be one of" msgstr "" msgid ":.vcov.w: cov.dfcorr must be one of" msgstr "" msgid "ignoring cov.resid == 'final' since est != final" msgstr "" msgid "cov.resid must be one of" msgstr "" msgid ":.vcov.w: cov.xwx must be logical (or NULL)" msgstr "" msgid "parameter psi is not defined" msgstr "" msgid "parameter 'tuning.psi' is not numeric" msgstr "" msgid ":.vcov.w: scale missing, using D scale" msgstr "" msgid "option 'cov.hubercorr' is ignored for cov.corrfact = \"asympt\"" msgstr "" msgid ":.vcov.w: unsupported psi function" msgstr "" msgid "(tau / hybrid / tauold): tau not found in 'obj'" msgstr "" msgid ":.vcov.w: Caution. Some psi'() are NA" msgstr "" msgid ":.vcov.w: unsupported psi for \"hybrid\" correction factor" msgstr "" msgid "invalid 'cov.dfcorr':" msgstr "" msgid ".vcov.avar1() supports only SM or MM estimates" msgstr "" msgid "" "initial estimate residuals length differs from final ones. Typically must " "refit w/ lmrob()" msgstr "" msgid "X'WX is almost singular. Consider using cov = \".vcov.w\"" msgstr "" msgid "X'WX is singular. Rather use cov = \".vcov.w\"" msgstr "" msgid "fixing" msgstr "" msgid "negative eigen([" msgstr "" msgid "])values" msgstr "" msgid "" ".vcov.avar1: negative diag() fixed up; consider 'cov=\".vcov.w.\"' " "instead" msgstr "" msgid "invalid 'posdef.meth':" msgstr "" msgid "'control$n.group' must be larger than 'p' for 'large_n' algorithm" msgstr "" msgid "'groups * n.group' must be smaller than 'n' for 'large_n' algorithm" msgstr "" msgid "'control$n.group' is not much larger than 'p', probably too small" msgstr "" msgid "C function R_lmrob_S() exited prematurely" msgstr "" msgid "S-estimated scale == 0: Probably exact fit; check your data" msgstr "" msgid "lmrob..D..fit: control is missing" msgstr "" msgid "lmrob..D..fit: prior estimator did not converge, stopping" msgstr "" msgid "lmrob..D..fit: robustness weights undefined" msgstr "" msgid "lmrob..D..fit: residuals undefined" msgstr "" msgid "lmrob..D..fit: parameter psi is not defined" msgstr "" msgid "lmrob..D..fit: parameter tuning.psi is not numeric" msgstr "" msgid "control is missing" msgstr "" msgid "'control' is missing" msgstr "" msgid "internal logic error in psi() function name:" msgstr "" msgid "Please report!" msgstr "" msgid "argument 'psi' must be a string (denoting a psi function)" msgstr "" msgid "tuning constant 'cc' is not numeric" msgstr "" msgid "Coefficients for" msgstr "" msgid "function incorrectly specified." msgstr "" msgid "" "Use c(minimal slope, b, efficiency, breakdown point) [6 hard-coded special " "cases]" msgstr "" msgid "or c(0, a,b,c, max_rho) as from .psi.const(" msgstr "" msgid ", cc)." msgstr "" msgid "Use c(minimal slope, b, efficiency, breakdown point) [2 special cases]" msgstr "" msgid "or c(b, c, s) as from .psi.const(" msgstr "" msgid "Coef. for Hampel psi function not of length 3" msgstr "" msgid "Coef. for psi function" msgstr "" msgid "not of length 1" msgstr "" msgid "tuning constants for ggw psi: both eff and bp specified, ignoring bp" msgstr "" msgid "neither breakdown point 'bp' nor efficiency 'eff' specified" msgstr "" msgid "unable to find constants for \"ggw\" psi function: %s" msgstr "" msgid "" "tuning constants for \"lqq\" psi: both 'eff' and 'bp' specified, ignoring " "'bp'" msgstr "" msgid "Error: neither breakdown point nor efficiency specified" msgstr "" msgid "unable to find constants for \"lqq\" psi function: %s" msgstr "" msgid "method for psi function" msgstr "" msgid "not implemented" msgstr "" msgid "tuning parameter (chi/psi) is not numeric" msgstr "" msgid "need non-negative number of nodes" msgstr "" msgid "unknown setting for 'subsampling': %s" msgstr "" msgid "" "'epsw' must be numeric(1) or a function of nobs(obj.) which returns a " "numeric(1)" msgstr "" msgid "number of rows in 'x' and length of 'object$rweights' must be the same" msgstr "" msgid "" "'epsx' must be numeric(1) or a function of max(abs(x)) which returns a " "numeric(1)" msgstr "" msgid "Detected possible local breakdown of" msgstr "" msgid "-estimate in" msgstr "" msgid "coefficients" msgstr "" msgid "coefficient" msgstr "" msgid "'" msgstr "" msgid "," msgstr "" msgid "." msgstr "" msgid "KS2014" msgstr "" msgid "Use lmrob argument 'setting=\"KS2014\"' to avoid this problem." msgstr "" msgid "arguments .. in" msgstr "" msgid ")$" msgstr "" msgid "^list\\(" msgstr "" msgid "are disregarded." msgstr "" msgid "Maybe use lmrob(*, control=lmrob.control(....) with all these." msgstr "" msgid "'weights' must be a numeric vector" msgstr "" msgid "number of offsets is %d, should equal %d (number of observations)" msgstr "" msgid "The 'method' argument is different from 'control$method'" msgstr "" msgid "Using the former, method =" msgstr "" msgid "incompatible dimensions" msgstr "" msgid "missing or negative weights not allowed" msgstr "" msgid "singular fit encountered" msgstr "" msgid "init must be \"S\", \"M-S\", function or list" msgstr "" msgid "" "Length of initial coefficients vector does not match rank of singular design " "matrix x" msgstr "" msgid "unknown init argument" msgstr "" msgid "arguments" msgstr "" msgid "are disregarded in" msgstr "" msgid "" "Failed to compute robust Mahalanobis distances, reverting to robust " "leverages." msgstr "" msgid "lmrob object does not have a proper 'qr' component. Rank zero?" msgstr "" msgid "lmrob object does not have a proper 'qr' component. Rank must be zero" msgstr "" msgid "invalid 'lmrob' object: no terms component" msgstr "" msgid "unsupported psi function -- should not happen" msgstr "" msgid "weights should not be both close to 0 and close to 1!" msgstr "" msgid "You should use different 'eps' and/or 'eps1'" msgstr "" msgid "calling predict.lm() ..." msgstr "" msgid "prediction from a rank-deficient fit may be misleading" msgstr "" msgid "Predictions on current data refer to _future_ responses" msgstr "" msgid "" "Assuming prediction variance inversely proportional to weights used for " "fitting" msgstr "" msgid "Assuming constant prediction variance even though model fit is weighted" msgstr "" msgid "'weights' as formula should be one-sided" msgstr "" msgid "Use only with 'lts' objects" msgstr "" msgid "Index plot of standardized residuals is not avalable if scale = 0" msgstr "" msgid "" "Standardized residuals vs Fitted values plot is not avalable if scale = 0" msgstr "" msgid "" "Diagnostic plot is not available for univar\n" "iate location and scale estimation" msgstr "" msgid "Regression Diagnostic plot is not avalable if scale = 0" msgstr "" msgid "" "Regression Diagnostic plot is not avalable: option mcd=F was set in ltsReg()." msgstr "" msgid "The MCD covariance matrix was singular." msgstr "" msgid "'id.n' must be in {1,..," msgstr "" msgid "alpha not inside [1/2, 1]" msgstr "" msgid "unknown 'wgtFUN' specification:" msgstr "" msgid "'wgtFUN' must be a function or a string specifying one" msgstr "" msgid "y is not a numeric" msgstr "" msgid "There is at least one constant column. Remove it and set intercept=TRUE" msgstr "" msgid "'qr.out = TRUE' for univariate location is disregarded" msgstr "" msgid "Need more than twice as many observations as variables." msgstr "" msgid "x is singular" msgstr "" msgid "no valid subsample found in LTS - set 'nsamp' or rather use lmrob.S()" msgstr "" msgid "NA coefficient (at %s) from \"best\" subset" msgstr "" msgid "Invalid number of trials nsamp=" msgstr "" msgid "! Using default." msgstr "" msgid "'nsamp' options 'best' and 'exact' not allowed for n greater than" msgstr "" msgid ". Will use default." msgstr "" msgid "Maximum 5000 subsets allowed for option 'best'." msgstr "" msgid "Computing 5000 subsets of size" msgstr "" msgid "out of" msgstr "" msgid "mc(): not 'converged'" msgstr "" msgid "in" msgstr "" msgid "iter" msgstr "" msgid "iterations" msgstr "" msgid "*and*" msgstr "" msgid "'reflect part' in" msgstr "" msgid "iter2" msgstr "" msgid "; try enlarging eps1, eps2 !?" msgstr "" msgid "optimx" msgstr "" msgid "package must be loaded in order to" msgstr "" msgid "use" msgstr "" msgid "optimizer=\"optimx\"" msgstr "" msgid "couldn't find optimizer function" msgstr "" msgid "non-function specified as optimizer" msgstr "" msgid "optimizer function must use (at least) formal parameters" msgstr "" msgid "Provide 'upper' or 'lower' with names()" msgstr "" msgid "parameter names must appear in 'formula'" msgstr "" msgid "lower must be either of length %d, or length 1" msgstr "" msgid "upper must be either of length %d, or length 1" msgstr "" msgid "'%s' must be character string or function, but is \"%s\"" msgstr "" msgid "Psi function '%s' not supported yet" msgstr "" msgid "Initialization 'init = \"%s\"' not supported (yet)" msgstr "" msgid "unable to find constants for psi function" msgstr "" msgid "" "As \"sigma\" is in 'pnames', do not use it as variable or parameter name in " "'formula'" msgstr "" msgid "Method" msgstr "" msgid "not correctly supported yet" msgstr "" msgid "'formula' should be a formula of the type 'y ~ f(x, alpha)'" msgstr "" msgid "specifying both 'acc' and 'tol' is invalid" msgstr "" msgid "The argument 'acc' has been renamed to 'tol'; do adapt your code." msgstr "" msgid "" "'start' or 'lower' or 'upper' must be fully named (list or numeric vector)" msgstr "" msgid "specifying 'weights' is not yet supported for method" msgstr "" msgid "For method = \"%s\", currently 'psi' must be specified via 'control'" msgstr "" msgid "'start' must be a named list or numeric vector" msgstr "" msgid "'scale' must be NULL or a positive number" msgstr "" msgid "Do not use '%s' as a variable name or as a parameter name" msgstr "" msgid "'weights' must be nonnegative and not contain NAs" msgstr "" msgid "could not compute scale of residuals" msgstr "" msgid "failed to converge in" msgstr "" msgid "steps" msgstr "" msgid "type 'pearson' is not yet implemented" msgstr "" msgid "invalid 'type'" msgstr "" msgid "'cov' must be a p x p matrix" msgstr "" msgid "for method='" msgstr "" msgid "', 'parm' must be specified as an integer" msgstr "" msgid "" "profile() method not yet implemented for \"nlrob\" objects.\n" " Use method = \"Wald\"." msgstr "" msgid "Computing profile confidence intervals ..." msgstr "" msgid "" "\"boot\" method not yet implemented for \"nlrob\" objects.\n" " Use confint(*, method = \"Wald\")." msgstr "" msgid "Use only with 'lmrob' objects" msgstr "" msgid "'which' must be in 1:5" msgstr "" msgid "'id.n' must be in {1,..,%d}" msgstr "" msgid "recomputing robust Mahalanobis distances" msgstr "" msgid "need 'model' or 'x' component for robust Mahalanobis distances" msgstr "" msgid "saving the robust distances 'MD' as part of" msgstr "" msgid "Tolerance range must be between 0% to 100%" msgstr "" msgid "arguments of function '" msgstr "" msgid "' are (" msgstr "" msgid ") but should be (" msgstr "" msgid ")." msgstr "" msgid "invalid tuning parameter names:" msgstr "" msgid "instead of" msgstr "" msgid "You cannot specify both 'main' and the deprecated 'shortMain'" msgstr "" msgid "'shortMain' is deprecated and will get defunct." msgstr "" msgid "Use 'main = \"short\"' instead of 'shortMain = TRUE'" msgstr "" msgid "not yet implemented for large vectors" msgstr "" msgid "finite sample corrections are not corrected for non-default 'k'" msgstr "" msgid "'weights' must have same length as 'x'" msgstr "" msgid "Dimension {= ncol(x)} must be 2!" msgstr "" msgid "Initial set" msgid_plural "Initial sets" msgstr[0] "" msgstr[1] "" msgid "For method = \"%s\", argument %s is not made use of" msgid_plural "For method = \"%s\", arguments %s are not made use of" msgstr[0] "" msgstr[1] "" robustbase/po/de.po0000644000176200001440000001104414107540744013777 0ustar liggesusers# Uebersetzung von robustbase.pot auf deutsch. # Copyright (C) 2021 Martin Maechler # This file is distributed under the same license as the robustbase package. # Martin Maechler , 2021. #, fuzzy msgid "" msgstr "" "Project-Id-Version: robustbase 0.93-9\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2021-08-19 20:45+0200\n" "PO-Revision-Date: 2021-08-19 22:26+0200\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "Language: de\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" #: lmrob.c:225 msgid " Problem determining optimal block size, using minimum" msgstr "Es gab ein Problem beim Bestimmen der optimalen Blockgrösse, wir verwenden das Minimum" #: lmrob.c:262 #, c-format msgid "DGELS: illegal argument in %i. argument." msgstr "" #: lmrob.c:269 #, c-format msgid "" "DGELS: weighted design matrix not of full rank (column %d).\n" "Use control parameter 'trace.lev = 4' to get diagnostic output." msgstr "" #: lmrob.c:290 #, c-format msgid "DGEEQ: illegal argument in %i. argument" msgstr "" #: lmrob.c:293 msgid "" "Fast S large n strategy failed. Use control parameter 'fast.s.large.n = Inf'." msgstr "Die 'Fast S large n' Strategie hat versagt. Verwende Kontrollparameter 'fast.s.large.n = Inf'." #: lmrob.c:295 #, c-format msgid "DGEEQU: column %i of the design matrix is exactly zero." msgstr "DGEEQU: Spalte %i der Design Matrix ist exact Null." #: lmrob.c:300 #, c-format msgid " Skipping design matrix equilibration (DGEEQU): row %i is exactly zero." msgstr "Äquilibrierung der Designmatrix weggelassen (DGEEQU): Zeile %i ist exakt Null." #: lmrob.c:460 msgid "m_s_subsample() stopped prematurely (scale < 0)." msgstr "m_s_subsample() hat vorzeitig angehalten (scale < 0)." #: lmrob.c:560 lmrob.c:561 lmrob.c:599 lmrob.c:600 lmrob.c:627 lmrob.c:628 #: rob-utils.c:62 #, c-format msgid "Argument '%s' must be numeric or integer" msgstr "Argument '%s' muss numerisch ('numeric' oder 'integer') sein" #: lmrob.c:582 lmrob.c:613 #, c-format msgid "'deriv'=%d is invalid" msgstr "'deriv'=%d ist ungültig" #: lmrob.c:642 msgid "Argument 'cc' must be numeric" msgstr "Argument 'cc' muss numerisch sein" #: lmrob.c:643 msgid "Argument 'ipsi' must be integer" msgstr "" #: lmrob.c:655 #, c-format msgid "rho_inf(): ipsi=%d not implemented." msgstr "rho_inf(): ipsi=%d nicht implementiert." #: lmrob.c:685 #, c-format msgid "normcnst(): ipsi=%d not implemented." msgstr "normcnst(): ipsi=%d nicht implementiert." #: lmrob.c:715 #, c-format msgid "rho(): ipsi=%d not implemented." msgstr "rho(): ipsi=%d nicht implementiert." #: lmrob.c:734 #, c-format msgid "psi(): ipsi=%d not implemented." msgstr "psi(): ipsi=%d nicht implementiert." #: lmrob.c:752 #, c-format msgid "psip(): ipsi=%d not implemented." msgstr "psip(): ipsi=%d nicht implementiert." #: lmrob.c:774 #, c-format msgid "psi2(): ipsi=%d not implemented." msgstr "psi2(): ipsi=%d nicht implementiert." #: lmrob.c:1204 #, c-format msgid "rho_ggw(): case (%i) not implemented." msgstr "rho_ggw(): Fall (%i) nicht implementiert." #: lmrob.c:1245 #, c-format msgid "Error from Rdqags(psi_ggw*, k, ...): ier = %i" msgstr "Fehler von Rdqags(psi_ggw*, k, ...): ier = %i" #: lmrob.c:2176 #, c-format msgid "S refinements did not converge (to refine.tol=%g) in %d (= k.max) steps" msgstr "" #: lmrob.c:2335 #, c-format msgid "m_s_descent(): Problem in RLLARSBI (RILARS). KODE=%d. Exiting." msgstr "" #: lmrob.c:2379 msgid " M-S estimate: maximum number of refinement steps reached." msgstr "" #: lmrob.c:2468 msgid "subsample(): could not find non-singular subsample." msgstr "" #: lmrob.c:2517 msgid "" "Too many singular resamples. Aborting subsample().\n" " See parameter 'subsampling; in help of lmrob.config()." msgstr "" #: lmrob.c:2576 #, c-format msgid "find_scale(*, initial_scale = %g) -> final scale = 0" msgstr "" #: lmrob.c:2591 #, c-format msgid "" "find_scale() did not converge in '%s' (= %d) iterations with tol=%g, last " "rel.diff=%g" msgstr "" #: rob-utils.c:63 rob-utils.c:64 #, c-format msgid "Argument '%s' must be numeric or integer of length 1" msgstr "Argument '%s' muss numerisch oder Ganzzahle der Länge 1 sein" #: rowMedians.c:42 msgid "Argument 'x' must be a matrix." msgstr "Argument 'x' muss eine Matrix sein." #: rowMedians.c:46 msgid "Argument 'naRm' must be either TRUE or FALSE." msgstr "Argument 'naRm' muss entweder TRUE oder FALSE sein." #: rowMedians.c:71 msgid "Argument 'x' must be numeric (integer or double)." msgstr "Argument 'x' muss numerisch ('integer' oder 'double') sein." robustbase/po/R-robustbase.pot0000644000176200001440000005230314107540744016146 0ustar liggesusersmsgid "" msgstr "" "Project-Id-Version: robustbase 0.93-9\n" "POT-Creation-Date: 2021-08-19 22:34\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=CHARSET\n" "Content-Transfer-Encoding: 8bit\n" msgid "some terms will have NAs due to the limits of the method" msgstr "" msgid "y is not onedimensional" msgstr "" msgid "Number of observations in x and y not equal" msgstr "" msgid "no intercept in the model" msgstr "" msgid "All observations have missing values!" msgstr "" msgid "Implosion: sigma1=%g became too small" msgstr "" msgid "Convergence Achieved" msgstr "" msgid "No convergence in %d steps." msgstr "" msgid "maximum number of \"kstep\" iterations must be > 0" msgstr "" msgid "maximal number of *inner* step halvings must be > 0" msgstr "" msgid "value of the tuning constant c ('const') must be > 0" msgstr "" msgid "non-trivial prior 'weights' are not yet implemented for \"BY\"" msgstr "" msgid "'start' cannot yet be passed to glmrobBY()" msgstr "" msgid "'offset' is not yet implemented for \"BY\"" msgstr "" msgid "'weights.on.x = \"%s\"' is not implemented" msgstr "" msgid "glm(.) {inner subsample} error:" msgstr "" msgid "betaExacto glm(.) error:" msgstr "" msgid "value of acc must be > 0" msgstr "" msgid "number of subsamples must be > 0" msgstr "" msgid "maximum number of iterations must be > 0" msgstr "" msgid "value of the tuning constant c (cw) must be > 0" msgstr "" msgid "Currently, only family 'poisson' is supported for the \"MT\" estimator" msgstr "" msgid "All weights must be positive" msgstr "" msgid "non-trivial 'offset' is not yet implemented" msgstr "" msgid "NAs in V(mu)" msgstr "" msgid "0s in V(mu)" msgstr "" msgid "'start' must be an initial estimate of beta, of length %d" msgstr "" msgid "optim(.) non-convergence:" msgstr "" msgid "'X' must have at least two columns" msgstr "" msgid "sigma0 =" msgstr "" msgid "==> scaleTau2(.) = 0" msgstr "" msgid "invalid first argument" msgstr "" msgid "'formula' missing or incorrect" msgstr "" msgid "'coef' must not be negative" msgstr "" msgid "More dimensions than observations, currently not implemented" msgstr "" msgid "More dimensions than observations: not yet implemented" msgstr "" msgid "Matrix 'x' is not of full rank: rankM(x) =" msgstr "" msgid "< p =" msgstr "" msgid "Use fullRank(x) instead" msgstr "" msgid "** direction sampling iterations were not sufficient. Maybe try increasing 'maxit.mult'" msgstr "" msgid "the following arguments to 'anova.glmrob' are invalid and" msgstr "" msgid "dropped:" msgstr "" msgid "anova.glmrob() only works for 'glmrob' objects" msgstr "" msgid "'Anova Table' for a single model object not yet implemented" msgstr "" msgid "Not the same response used in the fitted models" msgstr "" msgid "models were not all fitted to the same size of dataset" msgstr "" msgid "Not the same method used for fitting the models" msgstr "" msgid "Not the same tuning constant c used in the robust fits" msgstr "" msgid "Models are not nested!" msgstr "" msgid "Models are not strictly nested" msgstr "" msgid "This family is not implemented" msgstr "" msgid "some eigenvalues are negative" msgstr "" msgid "non-implemented test method:" msgstr "" msgid "for fitting method" msgstr "" msgid "the following arguments to 'anova.lmrob' are invalid and" msgstr "" msgid "For test = 'Deviance', the estimator chain has to end with 'M'" msgstr "" msgid "The first object does not contain the largest model" msgstr "" msgid "All models are refitted except the largest one" msgstr "" msgid "anova.lmrob() only works for 'lmrob' objects" msgstr "" msgid "'Anova Table' for a single model not yet implemented" msgstr "" msgid "invalid 'test'" msgstr "" msgid "Please fit the nested models by lmrob" msgstr "" msgid "NA coefs in full and reduced model do not match" msgstr "" msgid "test" msgstr "" msgid "not yet implemented" msgstr "" msgid "deriv must be in {0,1,2}" msgstr "" msgid "deriv must be in {-1,0,1,2}" msgstr "" msgid "'x' must be a numeric matrix" msgstr "" msgid "The sample size must be greater than 1 for svd" msgstr "" msgid "'wgtFUN' must be a function or a string specifying such a function" msgstr "" msgid "invalid 'seed'. Must be compatible with .Random.seed !" msgstr "" msgid "Invalid number of trials nsamp =" msgstr "" msgid "!" msgstr "" msgid "n <= p -- you can't be serious!" msgstr "" msgid "n == p+1 is too small sample size for MCD" msgstr "" msgid "n < 2 * p, i.e., possibly too small sample size" msgstr "" msgid "Sample size n < h(alpha; n,p) := size of \"good\" subsample" msgstr "" msgid "subsample size\t h < n/2 may be too small" msgstr "" msgid "'wgtFUN' must be a function or one of the strings %s." msgstr "" msgid "Unexpected 'exactfit' code" msgstr "" msgid ". Please report!" msgstr "" msgid "illegal 'singularity$kind'" msgstr "" msgid "Options 'best' and 'exact' not allowed for n greater than 2*nmini-1 =" msgstr "" msgid ".\nUsing default." msgstr "" msgid "'nsamp = \"best\"' allows maximally" msgstr "" msgid "subsets;\ncomputing these" msgstr "" msgid "This may take a" msgstr "" msgid "very" msgstr "" msgid "long time!" msgstr "" msgid "Invalid number of trials nsamp=%s. Using default nsamp=%d." msgstr "" msgid "nsamp > i_max := maximal integer -- not allowed;" msgstr "" msgid "set to i_max =" msgstr "" msgid "Use only with 'mcd' objects" msgstr "" msgid "x is not a numeric dataframe or matrix." msgstr "" msgid "argument 'm.cov' must have numeric components 'center' and 'cov'" msgstr "" msgid "Data set and provided center have different dimensions!" msgstr "" msgid "The covariance matrix is singular!" msgstr "" msgid "id.n" msgstr "" msgid "must be in {1,..," msgstr "" msgid "}" msgstr "" msgid "For tolerance ellipses the dimension 'p' must be 2!" msgstr "" msgid "'hsets.init' must be a h' x L matrix (h' >= h) of observation indices" msgstr "" msgid "'full.h' is true, but 'hsets.init' has less than n rows" msgstr "" msgid "'hsets.init' must be in {1,2,...,n}; n =" msgstr "" msgid "More than h of the observations lie on a hyperplane." msgstr "" msgid "original detmcd() wrongly declared c-step convergence (obj=%g, i=%d, j=%d)" msgstr "" msgid "Invalid scalefn='%s': must be function or a valid string" msgstr "" msgid "'%s' must be a function, numeric vector of length p, or NULL" msgstr "" msgid "provide better scale; must be all positive" msgstr "" msgid "More than half of the observations lie on a hyperplane." msgstr "" msgid "'%s' is not a valid family (see ?family)" msgstr "" msgid "Robust GLM fitting not yet implemented for family %s" msgstr "" msgid "'weights' must be non-negative" msgstr "" msgid "Number of offsets is %d, should rather equal %d (number of observations)" msgstr "" msgid "'weights.on.x' must be a string, function, list or numeric n-vector" msgstr "" msgid "'start' must be a numeric vector, NULL, or a character string" msgstr "" msgid "invalid 'start' string" msgstr "" msgid "For method 'cubif', use glmRob() from package 'robust'" msgstr "" msgid "method='%s' is only applicable for binomial family, but family=\"\"" msgstr "" msgid "invalid 'method':" msgstr "" msgid "setting 'y = FALSE' has no longer any effect" msgstr "" msgid "No weights defined for this object. Use type=\"robustness\" argument to get robustness weights." msgstr "" msgid "need non-robust working residuals for this model type" msgstr "" msgid "invalid 'variant':" msgstr "" msgid "Weighting method" msgstr "" msgid "is not implemented" msgstr "" msgid "if a list, weights.on.x must contain a covariance function such as covMcd()" msgstr "" msgid "weights.on.x needs %d none-negative values" msgstr "" msgid "All weights.on.x must be none negative" msgstr "" msgid "'offset' not fully implemented" msgstr "" msgid "illegal 'family' argument" msgstr "" msgid "start 'theta' has still NA's .. badly singular x" msgstr "" msgid "Cannot find valid starting values: You need help" msgstr "" msgid "family '%s' not yet implemented" msgstr "" msgid "NAs in d(mu)/d(eta)" msgstr "" msgid "Non-finite coefficients at iteration" msgstr "" msgid "Algorithm did not converge" msgstr "" msgid "fitted probabilities numerically 0 or 1 occurred" msgstr "" msgid "fitted rates numerically 0 occurred" msgstr "" msgid "Only 'test.acc = \"coef\"' is currently implemented" msgstr "" msgid "value of the tuning constant c (tcc) must be > 0" msgstr "" msgid "Std.error computation not yet available for the case of 'weights'" msgstr "" msgid "negative scale 's'" msgstr "" msgid "scale 's' is zero -- returning initial 'mu'" msgstr "" msgid "scale MAD is zero for this sample" msgstr "" msgid "calculations stopped prematurely in rllarsbi" msgstr "" msgid "(probably because of rounding errors)." msgstr "" msgid "unknown split type" msgstr "" msgid "No categorical variables found in model. Reverting to S-estimator." msgstr "" msgid "No continuous variables found in model. Reverting to L1-estimator." msgstr "" msgid "'psi' should be one of %s" msgstr "" msgid "invalid 'psi'=%s; possibly use .regularize.Mpsi(%s)" msgstr "" msgid "Unknown setting '" msgstr "" msgid "'. Using defaults." msgstr "" msgid "'mf' is unused and deprecated" msgstr "" msgid "Initial estimator '%s' not supported; using S-estimator instead" msgstr "" msgid ".vcov.avar1 can only be used when initial estimator is S; using .vcov.w instead" msgstr "" msgid "only M and D are steps supported after \"init\" computation" msgstr "" msgid "%s-step did NOT converge. Returning unconverged %s-estimate" msgstr "" msgid ":.vcov.w: cov.hubercorr must be logical (or NULL)" msgstr "" msgid ":.vcov.w: cov.corrfact must be one of" msgstr "" msgid ":.vcov.w: cov.dfcorr must be one of" msgstr "" msgid "ignoring cov.resid == 'final' since est != final" msgstr "" msgid "cov.resid must be one of" msgstr "" msgid ":.vcov.w: cov.xwx must be logical (or NULL)" msgstr "" msgid "parameter psi is not defined" msgstr "" msgid "parameter 'tuning.psi' is not numeric" msgstr "" msgid ":.vcov.w: scale missing, using D scale" msgstr "" msgid "option 'cov.hubercorr' is ignored for cov.corrfact = \"asympt\"" msgstr "" msgid ":.vcov.w: unsupported psi function" msgstr "" msgid "(tau / hybrid / tauold): tau not found in 'obj'" msgstr "" msgid ":.vcov.w: Caution. Some psi'() are NA" msgstr "" msgid ":.vcov.w: unsupported psi for \"hybrid\" correction factor" msgstr "" msgid "invalid 'cov.dfcorr':" msgstr "" msgid ".vcov.avar1() supports only SM or MM estimates" msgstr "" msgid "initial estimate residuals length differs from final ones. Typically must refit w/ lmrob()" msgstr "" msgid "X'WX is almost singular. Consider using cov = \".vcov.w\"" msgstr "" msgid "X'WX is singular. Rather use cov = \".vcov.w\"" msgstr "" msgid "fixing" msgstr "" msgid "negative eigen([" msgstr "" msgid "])values" msgstr "" msgid ".vcov.avar1: negative diag() fixed up; consider 'cov=\".vcov.w.\"' instead" msgstr "" msgid "invalid 'posdef.meth':" msgstr "" msgid "'control$n.group' must be larger than 'p' for 'large_n' algorithm" msgstr "" msgid "'groups * n.group' must be smaller than 'n' for 'large_n' algorithm" msgstr "" msgid "'control$n.group' is not much larger than 'p', probably too small" msgstr "" msgid "C function R_lmrob_S() exited prematurely" msgstr "" msgid "S-estimated scale == 0: Probably exact fit; check your data" msgstr "" msgid "lmrob..D..fit: control is missing" msgstr "" msgid "lmrob..D..fit: prior estimator did not converge, stopping" msgstr "" msgid "lmrob..D..fit: robustness weights undefined" msgstr "" msgid "lmrob..D..fit: residuals undefined" msgstr "" msgid "lmrob..D..fit: parameter psi is not defined" msgstr "" msgid "lmrob..D..fit: parameter tuning.psi is not numeric" msgstr "" msgid "control is missing" msgstr "" msgid "'control' is missing" msgstr "" msgid "internal logic error in psi() function name:" msgstr "" msgid "Please report!" msgstr "" msgid "argument 'psi' must be a string (denoting a psi function)" msgstr "" msgid "tuning constant 'cc' is not numeric" msgstr "" msgid "Coefficients for" msgstr "" msgid "function incorrectly specified." msgstr "" msgid "Use c(minimal slope, b, efficiency, breakdown point) [6 hard-coded special cases]" msgstr "" msgid "or c(0, a,b,c, max_rho) as from .psi.const(" msgstr "" msgid ", cc)." msgstr "" msgid "Use c(minimal slope, b, efficiency, breakdown point) [2 special cases]" msgstr "" msgid "or c(b, c, s) as from .psi.const(" msgstr "" msgid "Coef. for Hampel psi function not of length 3" msgstr "" msgid "Coef. for psi function" msgstr "" msgid "not of length 1" msgstr "" msgid "tuning constants for ggw psi: both eff and bp specified, ignoring bp" msgstr "" msgid "neither breakdown point 'bp' nor efficiency 'eff' specified" msgstr "" msgid "unable to find constants for \"ggw\" psi function: %s" msgstr "" msgid "tuning constants for \"lqq\" psi: both 'eff' and 'bp' specified, ignoring 'bp'" msgstr "" msgid "Error: neither breakdown point nor efficiency specified" msgstr "" msgid "unable to find constants for \"lqq\" psi function: %s" msgstr "" msgid "method for psi function" msgstr "" msgid "not implemented" msgstr "" msgid "tuning parameter (chi/psi) is not numeric" msgstr "" msgid "need non-negative number of nodes" msgstr "" msgid "unknown setting for 'subsampling': %s" msgstr "" msgid "'epsw' must be numeric(1) or a function of nobs(obj.) which returns a numeric(1)" msgstr "" msgid "number of rows in 'x' and length of 'object$rweights' must be the same" msgstr "" msgid "'epsx' must be numeric(1) or a function of max(abs(x)) which returns a numeric(1)" msgstr "" msgid "Detected possible local breakdown of" msgstr "" msgid "-estimate in" msgstr "" msgid "coefficients" msgstr "" msgid "coefficient" msgstr "" msgid "'" msgstr "" msgid "," msgstr "" msgid "." msgstr "" msgid "KS2014" msgstr "" msgid "Use lmrob argument 'setting=\"KS2014\"' to avoid this problem." msgstr "" msgid "arguments .. in" msgstr "" msgid ")$" msgstr "" msgid "^list\\(" msgstr "" msgid "are disregarded." msgstr "" msgid "Maybe use lmrob(*, control=lmrob.control(....) with all these." msgstr "" msgid "'weights' must be a numeric vector" msgstr "" msgid "number of offsets is %d, should equal %d (number of observations)" msgstr "" msgid "The 'method' argument is different from 'control$method'" msgstr "" msgid "Using the former, method =" msgstr "" msgid "incompatible dimensions" msgstr "" msgid "missing or negative weights not allowed" msgstr "" msgid "singular fit encountered" msgstr "" msgid "init must be \"S\", \"M-S\", function or list" msgstr "" msgid "Length of initial coefficients vector does not match rank of singular design matrix x" msgstr "" msgid "unknown init argument" msgstr "" msgid "arguments" msgstr "" msgid "are disregarded in" msgstr "" msgid "Failed to compute robust Mahalanobis distances, reverting to robust leverages." msgstr "" msgid "lmrob object does not have a proper 'qr' component. Rank zero?" msgstr "" msgid "lmrob object does not have a proper 'qr' component. Rank must be zero" msgstr "" msgid "invalid 'lmrob' object: no terms component" msgstr "" msgid "unsupported psi function -- should not happen" msgstr "" msgid "weights should not be both close to 0 and close to 1!" msgstr "" msgid "You should use different 'eps' and/or 'eps1'" msgstr "" msgid "calling predict.lm() ..." msgstr "" msgid "prediction from a rank-deficient fit may be misleading" msgstr "" msgid "Predictions on current data refer to _future_ responses" msgstr "" msgid "Assuming prediction variance inversely proportional to weights used for fitting" msgstr "" msgid "Assuming constant prediction variance even though model fit is weighted" msgstr "" msgid "'weights' as formula should be one-sided" msgstr "" msgid "Use only with 'lts' objects" msgstr "" msgid "Index plot of standardized residuals is not avalable if scale = 0" msgstr "" msgid "Standardized residuals vs Fitted values plot is not avalable if scale = 0" msgstr "" msgid "Diagnostic plot is not available for univar\niate location and scale estimation" msgstr "" msgid "Regression Diagnostic plot is not avalable if scale = 0" msgstr "" msgid "Regression Diagnostic plot is not avalable: option mcd=F was set in ltsReg()." msgstr "" msgid "The MCD covariance matrix was singular." msgstr "" msgid "'id.n' must be in {1,..," msgstr "" msgid "alpha not inside [1/2, 1]" msgstr "" msgid "unknown 'wgtFUN' specification:" msgstr "" msgid "'wgtFUN' must be a function or a string specifying one" msgstr "" msgid "y is not a numeric" msgstr "" msgid "There is at least one constant column. Remove it and set intercept=TRUE" msgstr "" msgid "'qr.out = TRUE' for univariate location is disregarded" msgstr "" msgid "Need more than twice as many observations as variables." msgstr "" msgid "x is singular" msgstr "" msgid "no valid subsample found in LTS - set 'nsamp' or rather use lmrob.S()" msgstr "" msgid "NA coefficient (at %s) from \"best\" subset" msgstr "" msgid "Invalid number of trials nsamp=" msgstr "" msgid "! Using default." msgstr "" msgid "'nsamp' options 'best' and 'exact' not allowed for n greater than" msgstr "" msgid ". Will use default." msgstr "" msgid "Maximum 5000 subsets allowed for option 'best'." msgstr "" msgid "Computing 5000 subsets of size" msgstr "" msgid "out of" msgstr "" msgid "mc(): not 'converged'" msgstr "" msgid "in" msgstr "" msgid "iter" msgstr "" msgid "iterations" msgstr "" msgid "*and*" msgstr "" msgid "'reflect part' in" msgstr "" msgid "iter2" msgstr "" msgid "; try enlarging eps1, eps2 !?" msgstr "" msgid "optimx" msgstr "" msgid "package must be loaded in order to" msgstr "" msgid "use" msgstr "" msgid "optimizer=\"optimx\"" msgstr "" msgid "couldn't find optimizer function" msgstr "" msgid "non-function specified as optimizer" msgstr "" msgid "optimizer function must use (at least) formal parameters" msgstr "" msgid "Provide 'upper' or 'lower' with names()" msgstr "" msgid "parameter names must appear in 'formula'" msgstr "" msgid "lower must be either of length %d, or length 1" msgstr "" msgid "upper must be either of length %d, or length 1" msgstr "" msgid "'%s' must be character string or function, but is \"%s\"" msgstr "" msgid "Psi function '%s' not supported yet" msgstr "" msgid "Initialization 'init = \"%s\"' not supported (yet)" msgstr "" msgid "unable to find constants for psi function" msgstr "" msgid "As \"sigma\" is in 'pnames', do not use it as variable or parameter name in 'formula'" msgstr "" msgid "Method" msgstr "" msgid "not correctly supported yet" msgstr "" msgid "'formula' should be a formula of the type 'y ~ f(x, alpha)'" msgstr "" msgid "specifying both 'acc' and 'tol' is invalid" msgstr "" msgid "The argument 'acc' has been renamed to 'tol'; do adapt your code." msgstr "" msgid "'start' or 'lower' or 'upper' must be fully named (list or numeric vector)" msgstr "" msgid "specifying 'weights' is not yet supported for method" msgstr "" msgid "For method = \"%s\", currently 'psi' must be specified via 'control'" msgstr "" msgid "'start' must be a named list or numeric vector" msgstr "" msgid "'scale' must be NULL or a positive number" msgstr "" msgid "Do not use '%s' as a variable name or as a parameter name" msgstr "" msgid "'weights' must be nonnegative and not contain NAs" msgstr "" msgid "could not compute scale of residuals" msgstr "" msgid "failed to converge in" msgstr "" msgid "steps" msgstr "" msgid "type 'pearson' is not yet implemented" msgstr "" msgid "invalid 'type'" msgstr "" msgid "'cov' must be a p x p matrix" msgstr "" msgid "for method='" msgstr "" msgid "', 'parm' must be specified as an integer" msgstr "" msgid "profile() method not yet implemented for \"nlrob\" objects.\n Use method = \"Wald\"." msgstr "" msgid "Computing profile confidence intervals ..." msgstr "" msgid "\"boot\" method not yet implemented for \"nlrob\" objects.\n Use confint(*, method = \"Wald\")." msgstr "" msgid "Use only with 'lmrob' objects" msgstr "" msgid "'which' must be in 1:5" msgstr "" msgid "'id.n' must be in {1,..,%d}" msgstr "" msgid "recomputing robust Mahalanobis distances" msgstr "" msgid "need 'model' or 'x' component for robust Mahalanobis distances" msgstr "" msgid "saving the robust distances 'MD' as part of" msgstr "" msgid "Tolerance range must be between 0% to 100%" msgstr "" msgid "arguments of function '" msgstr "" msgid "' are (" msgstr "" msgid ") but should be (" msgstr "" msgid ")." msgstr "" msgid "invalid tuning parameter names:" msgstr "" msgid "instead of" msgstr "" msgid "You cannot specify both 'main' and the deprecated 'shortMain'" msgstr "" msgid "'shortMain' is deprecated and will get defunct." msgstr "" msgid "Use 'main = \"short\"' instead of 'shortMain = TRUE'" msgstr "" msgid "not yet implemented for large vectors" msgstr "" msgid "finite sample corrections are not corrected for non-default 'k'" msgstr "" msgid "'weights' must have same length as 'x'" msgstr "" msgid "Dimension {= ncol(x)} must be 2!" msgstr "" msgid "Initial set" msgid_plural "Initial sets" msgstr[0] "" msgstr[1] "" msgid "For method = \"%s\", argument %s is not made use of" msgid_plural "For method = \"%s\", arguments %s are not made use of" msgstr[0] "" msgstr[1] ""