DHARMa/0000755000176200001440000000000014704441036011251 5ustar liggesusersDHARMa/tests/0000755000176200001440000000000014704246644012423 5ustar liggesusersDHARMa/tests/manualTests/0000755000176200001440000000000014665273541014725 5ustar liggesusersDHARMa/tests/manualTests/DHARMa-rhub.R0000644000176200001440000000056714665273541017012 0ustar liggesusers library(rhub) # validate_email(email = "florian.hartig@ur.de") rhub::platforms() pltfms = c("ubuntu-rchk") rhub::check(path = "./DHARMa", platform = pltfms) # submitting to winbuilder packageFile = "./DHARMa/" devtools::check_win_devel(packageFile, quiet = T) devtools::check_win_release(packageFile, quiet = T) devtools::check_win_oldrelease(packageFile, quiet = T) DHARMa/tests/manualTests/readme.txt0000644000176200001440000000014014665273541016716 0ustar liggesusersmanual tests moved to https://github.com/florianhartig/DHARMa/tree/master/Code/DHARMaManualTestsDHARMa/tests/testthat/0000755000176200001440000000000014704441036014253 5ustar liggesusersDHARMa/tests/testthat/testModelTypes.R0000644000176200001440000007236714703461527017410 0ustar liggesusers skip_on_cran() #skip_on_ci() set.seed(1234) doPlots = F # Test functions -------------------------------------------------------------- checkOutput <- function(simulationOutput){ # print(simulationOutput) if(any(simulationOutput$scaledResiduals < 0)) stop() if(any(simulationOutput$scaledResiduals > 1)) stop() if(any(is.na(simulationOutput$scaledResiduals))) stop() if(length(simulationOutput$scaledResiduals) != length(simulationOutput$observedResponse)) stop() if(length(simulationOutput$fittedPredictedResponse) != length(simulationOutput$observedResponse)) stop() } expectDispersion <- function(x, answer = T){ res <- simulateResiduals(x) if (answer) expect_lt(testDispersion(res, plot = doPlots)$p.value, 0.05) else expect_gt(testDispersion(res, plot = doPlots)$p.value, 0.05) } runEverything = function(fittedModel, testData, DHARMaData = T, phy = NULL, expectOverdispersion = F){ t = getObservedResponse(fittedModel) expect_true(is.vector(t)) expect_true(is.numeric(t)) x = getSimulations(fittedModel, 1) expect_true(is.matrix(x)) expect_true(ncol(x) == 1) x = getSimulations(fittedModel, 2) expect_true(is.numeric(x)) expect_true(is.matrix(x)) expect_true(ncol(x) == 2) x = getSimulations(fittedModel, 1, type = "refit") expect_true(is.data.frame(x)) x = getSimulations(fittedModel, 2, type = "refit") expect_true(is.data.frame(x)) fittedModel2 = getRefit(fittedModel,x[[1]]) # expect_false(any(getFixedEffects(fittedModel) - # getFixedEffects(fittedModel2) > 0.5)) # doesn't work for some models simulationOutput <- simulateResiduals(fittedModel = fittedModel, n = 200) checkOutput(simulationOutput) if(doPlots) plot(simulationOutput, quantreg = F) expect_gt(testOutliers(simulationOutput, plot = doPlots)$p.value, 0.001) expect_gt(testDispersion(simulationOutput, plot = doPlots)$p.value, 0.001) expect_gt(testUniformity(simulationOutput = simulationOutput, plot = doPlots)$p.value, 0.001) expect_gt(testZeroInflation(simulationOutput = simulationOutput, plot = doPlots)$p.value, 0.001) expect_gt(testTemporalAutocorrelation(simulationOutput = simulationOutput, time = testData$time, plot = doPlots)$p.value, 0.001) expect_gt(testSpatialAutocorrelation(simulationOutput = simulationOutput, x = testData$x, y = testData$y, plot = F)$p.value, 0.001) simulationOutput <- recalculateResiduals(simulationOutput, group = testData$group) expect_gt(testDispersion(simulationOutput, plot = doPlots)$p.value, 0.001) simulationOutput2 <- simulateResiduals(fittedModel = fittedModel, refit = T, n = 100) checkOutput(simulationOutput2) if(doPlots) plot(simulationOutput2, quantreg = F) # note that the pearson test is biased, therefore have to test greater #expect_gt(testDispersion(simulationOutput2, plot = doPlots, alternative = "greater")$p.value, 0.001) x = testDispersion(simulationOutput2, plot = doPlots) simulationOutput3 <- recalculateResiduals(simulationOutput2, group = testData$group) #expect_gt(testDispersion(simulationOutput3, plot = doPlots, alternative = "greater")$p.value, 0.001) x = testDispersion(simulationOutput3, plot = doPlots) } # testData -------------------------------------------------------------------- testData = list() testData$lm = createData(sampleSize = 200, fixedEffects = c(1,0), overdispersion = 0, randomEffectVariance = 0, family = gaussian()) testData$lmm = createData(sampleSize = 200, overdispersion = 0, randomEffectVariance = 0.5, family = gaussian()) testData$binomial_10 = createData(sampleSize = 200, randomEffectVariance = 0, family = binomial()) testData$binomial_yn = createData(sampleSize = 200, fixedEffects = c(1,0), overdispersion = 0, randomEffectVariance = 0, family = binomial(), factorResponse = T) testData$binomial_nk_matrix = createData(sampleSize = 200, overdispersion = 0, randomEffectVariance = 0, family = binomial(), binomialTrials = 20) testData$binomial_nk_weights = createData(sampleSize = 200, overdispersion = 0, randomEffectVariance = 0, family = binomial(), binomialTrials = 20) testData$binomial_nk_weights$prop = testData$binomial_nk_weights$observedResponse1 / 20 testData$binomial_nk_weights2 = createData(sampleSize = 200, overdispersion = 1, randomEffectVariance = 0, family = binomial(), binomialTrials = 20) testData$binomial_nk_weights2$prop = testData$binomial_nk_weights2$observedResponse1 / 20 testData$poisson1 = createData(sampleSize = 500, overdispersion = 0, randomEffectVariance = 0.000, family = poisson()) testData$poisson2 = createData(sampleSize = 200, overdispersion = 2, randomEffectVariance =0.000, family = poisson()) testData$poisson3 = createData(sampleSize = 500, overdispersion = 0.5, randomEffectVariance = 0.000, family = poisson()) testData$poisson_weights = createData(sampleSize = 200, overdispersion = 0.5, randomEffectVariance = 0.5, family = poisson()) testData$weights = rep(c(1,1.1), each = 100) testData$poisson_weights$weights = testData$weights # stats::lm ---------------------------------------------------------------------- test_that("lm works", { fittedModel <- lm(observedResponse ~ Environment1 + Environment2 , data = testData$lm) runEverything(fittedModel, testData = testData$lm) # lm weights are considered in simulate(), should not throw warning fittedModel <- lm(observedResponse ~ Environment1, data = testData$lm, weights = testData$weights) expect_s3_class(simulateResiduals(fittedModel), "DHARMa") } ) # stats::glm ---------------------------------------------------------------------- test_that("glm works", { fittedModel <- glm(observedResponse ~ Environment1, data = testData$lm) runEverything(fittedModel, testData = testData$lm) fittedModel <- glm(observedResponse ~ Environment1, family = "binomial", data = testData$binomial_10) runEverything(fittedModel, testData$binomial_10) fittedModel <- glm(observedResponse ~ Environment1 + Environment2, family = "binomial", data = testData$binomial_yn) runEverything(fittedModel, testData$binomial_yn) fittedModel <- glm(cbind(observedResponse1,observedResponse0) ~ Environment1, family = "binomial", data = testData$binomial_nk_matrix) runEverything(fittedModel, testData$binomial_nk_matrix) fittedModel <- glm(prop ~ Environment1, family = "binomial", data = testData$binomial_nk_weights, weights = rep(20,200)) runEverything(fittedModel, testData$binomial_nk_weights) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData$poisson1) runEverything(fittedModel, testData$poisson1) fittedModel2 <- glm(observedResponse ~ Environment1, family = "poisson", data = testData$poisson2) expectDispersion(fittedModel2) ### Weights checks # glm gaussian still the same fittedModel <- glm(observedResponse ~ Environment1, data = testData$lm, weights = testData$weights) expect_s3_class(simulateResiduals(fittedModel), "DHARMa") # glm behaves nice, throws a warning that simulate ignores weights for poisson fittedModel <- glm(observedResponse ~ Environment1, weights = testData$weights, data = testData$poisson2, family = "poisson") expect_warning(simulateResiduals(fittedModel)) } ) # MASS::glm.nb -------------------------------------------------------------- test_that("glm.nb works", { fittedModel <- MASS::glm.nb(observedResponse ~ Environment1, data = testData$poisson3) runEverything(fittedModel, testData$poisson3) # glm.nb does not warn, does not seem to simulate according to weights fittedModel <- MASS::glm.nb(observedResponse ~ Environment1, data = testData$poisson2, weights = testData$weights) expect_warning(simulateResiduals(fittedModel)) } ) # mgcv::gam -------------------------------------------------------------- test_that("mgcv gam works", { fittedModel <- mgcv::gam(observedResponse ~ Environment1, data = testData$lm) runEverything(fittedModel, testData$lm) fittedModel <- mgcv::gam(observedResponse ~ s(Environment1), data = testData$lm) runEverything(fittedModel, testData$lm) fittedModel <- mgcv::gam(observedResponse ~ Environment1, family = "binomial", data = testData$binomial_10) runEverything(fittedModel, testData$binomial_10) fittedModel <- mgcv::gam(observedResponse ~ Environment1, family = "binomial", data = testData$binomial_yn) runEverything(fittedModel, testData = testData$binomial_yn) fittedModel <- mgcv::gam(cbind(observedResponse1,observedResponse0) ~ Environment1, family = "binomial", data = testData$binomial_nk_matrix) runEverything(fittedModel, testData = testData$binomial_nk_matrix) fittedModel <- mgcv::gam(prop ~ Environment1, family = "binomial", data = testData$binomial_nk_weights, weights = rep(20,200)) runEverything(fittedModel, testData$binomial_nk_weights) fittedModel <- mgcv::gam(observedResponse ~ Environment1, family = "poisson", data = testData$poisson1) runEverything(fittedModel, testData$poisson1) fittedModel2 <- mgcv::gam(observedResponse ~ Environment1, family = "poisson", data = testData$poisson2) expectDispersion(fittedModel2) # mgcv::gam warns about weights fittedModel <- mgcv::gam(observedResponse ~ Environment1, weights = testData$weights, data = testData$poisson_weights, family = "poisson") expect_warning(simulateResiduals(fittedModel)) } ) # lme4::lmer -------------------------------------------------------------- test_that("lme4:lmer works", { fittedModel <- lme4::lmer(observedResponse ~ Environment1 + (1|group), data = testData$lmm) suppressMessages(runEverything(fittedModel, testData$lmm)) # lmer warns! fittedModel <- lme4::lmer(observedResponse ~ Environment1 + (1|group), data = testData$lm, weights = testData$weights) expect_warning(simulateResiduals(fittedModel)) } ) # lme4::glmer -------------------------------------------------------------- test_that("lme4:glmer works", { fittedModel <- lme4::glmer(observedResponse ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_10) suppressMessages(runEverything(fittedModel, testData$binomial_10)) fittedModel <- lme4::glmer(observedResponse ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_yn) suppressMessages( runEverything(fittedModel, testData$binomial_yn)) fittedModel <- lme4::glmer(cbind(observedResponse1,observedResponse0) ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_nk_matrix) suppressMessages(runEverything(fittedModel, testData$binomial_nk_matrix)) fittedModel <- lme4::glmer(prop ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_nk_weights, weights = rep(20,200)) suppressMessages(runEverything(fittedModel, testData$binomial_nk_weights)) fittedModel <- lme4::glmer(observedResponse ~ Environment1 + (1|group) + (1|ID), family = "poisson", data = testData$poisson1, control = lme4::glmerControl(optCtrl = list( maxfun = 20000))) suppressMessages(runEverything(fittedModel, testData$poisson1)) fittedModel2 <- lme4::glmer(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData$poisson2, control = lme4::glmerControl(optCtrl = list( maxfun = 20000))) expectDispersion(fittedModel2) fittedModel <- lme4::glmer.nb(observedResponse ~ Environment1 + (1|group), data = testData$poisson1, control = lme4::glmerControl( optimizer = "bobyqa", optCtrl = list(maxfun=20000))) suppressMessages(runEverything(fittedModel, testData$poisson1)) # lme4::glmer warns, OK fittedModel <- lme4::glmer(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData$poisson_weights, weights = testData$weights) expect_warning(simulateResiduals(fittedModel)) # lme4::glmer.nb warns fittedModel <- lme4::glmer.nb(observedResponse ~ Environment1 + (1|group), data = testData$poisson_weights, weights = testData$weights) expect_warning(simulateResiduals(fittedModel)) } ) # glmmTMB -------------------------------------------------------------- test_that("glmmTMB works", { fittedModel <- glmmTMB::glmmTMB(observedResponse ~ Environment1 + (1|group), data = testData$lmm) runEverything(fittedModel, testData$lmm) fittedModel <- glmmTMB::glmmTMB(observedResponse ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_10) runEverything(fittedModel, testData$binomial_10) fittedModel <- glmmTMB::glmmTMB(observedResponse ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_yn) runEverything(fittedModel, testData$binomial_yn) fittedModel <- glmmTMB::glmmTMB(cbind(observedResponse1, observedResponse0) ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_nk_matrix) runEverything(fittedModel, testData$binomial_nk_matrix) fittedModel <- glmmTMB::glmmTMB(cbind(observedResponse1, observedResponse0) ~ Environment1 + (1|group), family = glmmTMB::betabinomial(), data = testData$binomial_nk_matrix) runEverything(fittedModel, testData$binomial_nk_matrix) fittedModel <- glmmTMB::glmmTMB(prop ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_nk_weights, weights = rep(20,200)) runEverything(fittedModel, testData$binomial_nk_weights) fittedModel <- glmmTMB::glmmTMB(prop ~ Environment1 + (1|group), family = glmmTMB::betabinomial(), data = testData$binomial_nk_weights2, weights = rep(20,200)) runEverything(fittedModel, testData$binomial_nk_weights2) # glmmTMB does not warn, implemented warning in DHARMa fittedModel <- glmmTMB::glmmTMB(observedResponse ~ Environment1, family = "poisson", data = testData$poisson_weights, weights = testData$weights) expect_warning(simulateResiduals(fittedModel)) } ) # spaMM::HLfit -------------------------------------------------------------- test_that("spaMM::HLfit works", { fittedModel <- spaMM::HLfit(observedResponse ~ Environment1 + (1|group), data = testData$lm) runEverything(fittedModel, testData$lm) fittedModel <- spaMM::HLfit(observedResponse ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_10) runEverything(fittedModel, testData$binomial_10) fittedModel <- spaMM::HLfit(observedResponse ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_yn) runEverything(fittedModel, testData$binomial_yn) fittedModel <- spaMM::HLfit(cbind(observedResponse1,observedResponse0) ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_nk_matrix) runEverything(fittedModel, testData$binomial_nk_matrix) # spaMM doesn't support binomial k/n via weights expect_error(spaMM::HLfit(prop ~ Environment1 + (1|group), family = "binomial", data = testData$binomial_nk_weights, prior.weights = rep(20,200))) fittedModel <- spaMM::HLfit(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData$poisson1) runEverything(fittedModel, testData$poisson1) fittedModel <- spaMM::HLfit(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData$poisson2) expectDispersion(fittedModel) # spaMM does not warn, but seems to be simulating with correct (heteroskedastic) variance. # weights cannot be fit with poisson, because spaMM directly interpretes weights as variance # doesn't throw error / warning, but seems intended, see https://github.com/florianhartig/DHARMa/issues/175 expect_error(spaMM::HLfit(observedResponse ~ Environment1 + (1|group), family = negbin(1), data = testData$poisson_weights, prior.weights = weights)) expect_s3_class(simulateResiduals(fittedModel), "DHARMa") } ) # GLMMadaptive -------------------------------------------------------------- test_that("GLMMadaptive works", { # GLMMadaptive does not support gaussian expect_error(GLMMadaptive::mixed_model(fixed = observedResponse ~ Environment1, random = ~ 1 | group, data = testData$lmm, family = gaussian())) fittedModel <- GLMMadaptive::mixed_model(fixed = observedResponse ~ Environment1, random = ~ 1 | group, data = testData$binomial_10, family = binomial()) runEverything(fittedModel, testData$binomial_10) fittedModel <- GLMMadaptive::mixed_model(fixed = observedResponse ~ Environment1, random = ~ 1 | group, data = testData$binomial_yn, family = binomial()) runEverything(fittedModel, testData$binomial_yn) fittedModel <- GLMMadaptive::mixed_model(fixed = cbind(observedResponse1, observedResponse0) ~ Environment1, random = ~ 1 | group, data = testData$binomial_nk_matrix, family = binomial()) # Does not work yet #runEverything(fittedModel, testData) # GLMMadaptive doesn't support binomial k/n via weights #fittedModel <- GLMMadaptive::mixed_model(fixed = observedResponse ~ Environment1, random = ~ 1 | group, data = testData, family = binomial(), weights = rep(20,200)) # does not yet work # runEverything(fittedModel, testData) fittedModel <- GLMMadaptive::mixed_model(fixed = observedResponse ~ Environment1, random = ~ 1 | group, data = testData$poisson1, family = poisson()) runEverything(fittedModel, testData$poisson1) # GLMMadaptive requires weights according to groups weights = rep(c(1,1.1), each = 5) fittedModel <- GLMMadaptive::mixed_model(fixed = observedResponse ~ Environment1, random = ~ 1 | group, data = testData$poisson_weights, family = poisson(), weights = weights) expect_warning(simulateResiduals(fittedModel)) } ) rm(testData) # phylolm::pylolm/phyloglm ----------------------------------------------- # OBS: somehow, the base function update() in getSimulations.phylolm() and # getRefit.phylolm() searches for the objects in the global env, then the # test_that wasn't working. A workaround was to put the objects of the tree and # the dataset in the global env. test_that("phylolm works", { # LM set.seed(123456) tre1 <<- ape::rcoal(60) # global env taxa = sort(tre1$tip.label) b0 = 0; b1 = 1; x <- phylolm::rTrait(n = 1, phy = tre1, model = "BM", parameters = list(ancestral.state = 0, sigma2 = 10)) y <- b0 + b1*x + phylolm::rTrait(n = 1, phy = tre1, model = "lambda", parameters = list(ancestral.state = 0, sigma2 = 1, lambda = 0.5)) testData <<- data.frame(trait = y[taxa], predictor = x[taxa], x = runif(length(y)), y= runif(length(y))) fittedModel = phylolm::phylolm(trait ~ predictor, data = testData, phy = tre1, model = "lambda") runEverything(fittedModel, testData = testData, phy = tre1) }) test_that("phyloglm works", { #GLM set.seed(123456) tre <<- ape::rtree(50) # global env x = phylolm::rTrait(n = 1, phy = tre) X = cbind(rep(1, 50), x) y = phylolm::rbinTrait(n = 1, phy = tre, beta = c(-1,0.5), alpha = 1, X = X) testData <<- data.frame(trait = y, predictor = x, x = runif(length(y)), y = runif(length(y))) fittedModel = phylolm::phyloglm(trait ~ predictor, phy = tre, data = testData) runEverything(fittedModel, testData = testData, phy = tre) }) # isNA works? ------------------------------------------------------------------ # test_that("isNA works", # { # skip_on_cran() # # testData = createData(sampleSize = 200, overdispersion = 0.5, randomEffectVariance = 0.5, family = poisson(), hasNA = T) # # fittedModel <- lm(observedResponse ~ Environment1, data = testData) # expect_true(hasNA(fittedModel)) # # fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) # expect_true(hasNA(fittedModel)) # # fittedModel <- gam(observedResponse ~ Environment1, family = "poisson", data = testData) # expect_true(hasNA(fittedModel)) # # fittedModel <- lme4::glmer(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData) # expect_true(hasNA(fittedModel)) # # fittedModel <- lme4::glmer.nb(observedResponse ~ Environment1 + (1|group), data = testData) # expect_true(hasNA(fittedModel)) # # fittedModel <- glm.nb(observedResponse ~ Environment1, data = testData) # expect_true(hasNA(fittedModel)) # # fittedModel <- glmmTMB(observedResponse ~ Environment1, family = "poisson", data = testData) # expect_true(hasNA(fittedModel)) # # fittedModel <- HLfit(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData) # expect_true(hasNA(fittedModel)) # # # now without NA # # testData = createData(sampleSize = 200, overdispersion = 0.5, randomEffectVariance = 0.5, family = poisson(), hasNA = F) # # fittedModel <- lm(observedResponse ~ Environment1, data = testData) # expect_false(hasNA(fittedModel)) # # fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) # expect_false(hasNA(fittedModel)) # # fittedModel <- gam(observedResponse ~ Environment1, family = "poisson", data = testData) # expect_false(hasNA(fittedModel)) # # fittedModel <- lme4::glmer(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData) # expect_false(hasNA(fittedModel)) # # fittedModel <- lme4::glmer.nb(observedResponse ~ Environment1 + (1|group), data = testData) # expect_false(hasNA(fittedModel)) # # fittedModel <- glm.nb(observedResponse ~ Environment1, data = testData) # expect_false(hasNA(fittedModel)) # # fittedModel <- glmmTMB(observedResponse ~ Environment1, family = "poisson", data = testData) # expect_false(hasNA(fittedModel)) # # fittedModel <- HLfit(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData) # expect_false(hasNA(fittedModel)) # # # } # ) # DHARMa/tests/testthat/testSimulateResiduals.R0000644000176200001440000000126114703461527020743 0ustar liggesusers test_that("Rotation of residuals works", { testData = createData(family = gaussian()) fittedModel <- lm(observedResponse ~ Environment1 , data = testData) expect_no_error(res1 <- simulateResiduals(fittedModel)) expect_no_error(res2 <- simulateResiduals(fittedModel, rotation = diag(x = rep(1,100)))) expect_equal(res1$scaledResiduals, res2$scaledResiduals) expect_no_error(res3 <- recalculateResiduals(res1, group = testData$group)) expect_no_error(res4 <- recalculateResiduals(res2, group = testData$group, rotation = diag(x = rep(1,10)))) expect_equal(res3$scaledResiduals, res4$scaledResiduals) }) DHARMa/tests/testthat/testDharmaClass.R0000644000176200001440000001130714703461527017470 0ustar liggesusers test_that("createDHARMa works", { testData = createData(sampleSize = 200, family = poisson(), randomEffectVariance = 0, numGroups = 5) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) truth = testData$observedResponse pred = simulationOutput$fittedPredictedResponse simulatedResponse = simulationOutput$simulatedResponse sim = createDHARMa(simulatedResponse = simulatedResponse, observedResponse = truth, fittedPredictedResponse = pred, integerResponse = T) expect_match(class(sim), "DHARMa") expect_message(sim2 <- createDHARMa(simulatedResponse = simulatedResponse, observedResponse = truth), "No fitted predicted") expect_match(class(sim2), "DHARMa") }) test_that("recalcuateResiduals works", { testData = createData(sampleSize = 200, family = poisson(), numGroups = 20, randomEffectVariance = 1) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulationOutput = simulateResiduals(fittedModel) ##### only grouping ##### simulationOutput2 = recalculateResiduals(simulationOutput, group = testData$group) expect_true(length(residuals(simulationOutput2)) == simulationOutput2$nGroups) ##### only selection ##### #error expect_error(recalculateResiduals(simulationOutput, sel = 1)) #more than nObs in selection simulationOutput3 = recalculateResiduals(simulationOutput, sel = 1:400) expect_true(length(residuals(simulationOutput3))==simulationOutput3$nObs) #less than nObs in selection expect_true(length(residuals(recalculateResiduals(simulationOutput, sel = 1:2))) == 2) expect_true(length(residuals(recalculateResiduals(simulationOutput, sel = c(1,6,8)))) == 3) expect_true(length(residuals(recalculateResiduals( simulationOutput, sel = testData$group == 1))) == 10) ##### selection and grouping ##### # selection by different variables simulationOutput4 = recalculateResiduals(simulationOutput, sel = testData$group) expect_true(length(residuals(simulationOutput4)) == nlevels(simulationOutput4$sel)) #selecting = float - wrong dimensions (Sel has length 0) expect_true(length(residuals(recalculateResiduals( simulationOutput, sel = 1.0:20.0))) == 20) #just one group within the selection expect_error(recalculateResiduals(simulationOutput, group = testData$group, sel = 1.0:10.0)) expect_true(length(residuals(recalculateResiduals( simulationOutput, sel = 1.5 :10.7))) == 10) # not checking float # simulationOutput2 = recalculateResiduals(simulationOutput, group = testData$group, sel = 1.5:10.6) # not working, donĀ“t has to work # selecting = negative Values expect_error(length(residuals(recalculateResiduals( simulationOutput, sel = -10 : 5)))) expect_true(length(residuals(recalculateResiduals(simulationOutput, sel = -10:-5))) == 194) # checking NULL functions simulationOutput3 = recalculateResiduals(simulationOutput, sel = NULL) expect_true(length(residuals(simulationOutput3)) == simulationOutput3$nObs) simulationOutput3 = recalculateResiduals(simulationOutput, group = NULL, sel = NULL) expect_true(length(residuals(simulationOutput3)) == simulationOutput3$nObs) simulationOutput3 = recalculateResiduals(simulationOutput, group = NULL, sel = c(1:5,8,9), method = "PIT") simulationOutput3 = recalculateResiduals(simulationOutput, group = NULL, sel = c(1:5,8,9), method = "traditional") expect_true(length(residuals(simulationOutput3)) == length(simulationOutput3$sel)) # checking for expect_no_error(recalculateResiduals(simulationOutput, group = 1:simulationOutput$nObs, sel = simulationOutput$nObs/2:simulationOutput$nObs, method = "traditional")) expect_no_error(recalculateResiduals(simulationOutput, group = 1:simulationOutput$nObs, sel = simulationOutput$nObs/3.5:simulationOutput$nObs, method = "PIT")) }) DHARMa/tests/testthat/testTests.R0000644000176200001440000002402714703461527016413 0ustar liggesusers test_that("overdispersion recognized", { set.seed(123) testData = createData(sampleSize = 200, overdispersion = 3, pZeroInflation = 0.4, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) x = testUniformity(simulationOutput, plot = F) expect_true( x$p.value < 0.05) x = testOutliers(simulationOutput, plot = F) expect_true( x$p.value < 0.05) x = testDispersion(simulationOutput, alternative = "greater", plot = F) expect_true( x$p.value < 0.05) x = testZeroInflation(simulationOutput, alternative = "greater", plot = F) expect_true( x$p.value < 0.05) }) test_that("tests work", { set.seed(123) # creating test data testData = createData(sampleSize = 200, overdispersion = 0.5, pZeroInflation = 0, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) ###### Distribution tests ##### expect_snapshot(testUniformity(simulationOutput, plot = FALSE)) expect_snapshot(testUniformity(simulationOutput, plot = FALSE, alternative = "less")) expect_snapshot(testUniformity(simulationOutput, plot = FALSE, alternative = "greater")) ###### Dispersion tests ####### expect_snapshot(testDispersion(simulationOutput, plot = FALSE)) expect_snapshot(testDispersion(simulationOutput, plot = FALSE, alternative = "less")) expect_snapshot(testDispersion(simulationOutput, plot = FALSE, alternative = "greater")) expect_snapshot(testDispersion(simulationOutput, plot = FALSE, alternative = "two.sided")) expect_message(testOverdispersion(simulationOutput)) expect_message(testOverdispersionParametric(simulationOutput)) ###### Both together########### expect_snapshot(testResiduals(simulationOutput, plot = FALSE)) expect_message(testSimulatedResiduals(simulationOutput)) ###### zero-inflation ########## # testing zero inflation expect_snapshot(testZeroInflation(simulationOutput, plot = FALSE)) expect_snapshot(testZeroInflation(simulationOutput, plot = FALSE, alternative = "less")) # testing generic summaries # testing for number of 1s countOnes <- function(x) sum(x == 1) expect_snapshot(testGeneric(simulationOutput, summary = countOnes, plot = FALSE)) # 1-inflation expect_snapshot(testGeneric(simulationOutput, summary = countOnes, plot = FALSE, alternative = "less")) # 1-deficit # testing if mean prediction fits means <- function(x) mean(x) expect_snapshot(testGeneric(simulationOutput, summary = means, plot = FALSE)) # testing if mean sd fits spread <- function(x) sd(x) expect_snapshot(testGeneric(simulationOutput, summary = spread, plot = FALSE)) ################################################################## # grouped simulationOutput <- recalculateResiduals(simulationOutput, group = testData$group) ###### Distribution tests ##### expect_snapshot(testUniformity(simulationOutput, plot = FALSE)) expect_snapshot(testUniformity(simulationOutput, plot = FALSE, alternative = "less")) expect_snapshot(testUniformity(simulationOutput, plot = FALSE, alternative = "greater")) ###### Dispersion tests ####### expect_snapshot(testDispersion(simulationOutput, plot = FALSE)) expect_snapshot(testDispersion(simulationOutput, plot = FALSE, alternative = "less")) expect_snapshot(testDispersion(simulationOutput, plot = FALSE, alternative = "greater")) expect_snapshot(testDispersion(simulationOutput, plot = FALSE, alternative = "two.sided")) expect_message(testOverdispersion(simulationOutput)) expect_message(testOverdispersionParametric(simulationOutput)) ###### Both together########### expect_snapshot(testResiduals(simulationOutput, plot = FALSE)) expect_message(testSimulatedResiduals(simulationOutput)) ###### zero-inflation ########## # testing zero inflation expect_snapshot(testZeroInflation(simulationOutput, plot = FALSE)) expect_snapshot(testZeroInflation(simulationOutput, plot = FALSE, alternative = "less")) # testing generic summaries # testing for number of 1s countOnes <- function(x) sum(x == 1) expect_snapshot(testGeneric(simulationOutput, summary = countOnes, plot = FALSE)) # 1-inflation expect_snapshot(testGeneric(simulationOutput, summary = countOnes, plot = FALSE, alternative = "less")) # 1-deficit # testing if mean prediction fits means <- function(x) mean(x) expect_snapshot(testGeneric(simulationOutput, summary = means, plot = FALSE)) # testing if mean sd fits spread <- function(x) sd(x) expect_snapshot(testGeneric(simulationOutput, summary = spread, plot = FALSE)) ###### Refited ############## # if model is refitted, a different test will be called simulationOutput <- simulateResiduals(fittedModel = fittedModel, refit = T) expect_snapshot(testDispersion(simulationOutput, plot = FALSE)) }) ###### Correlation tests ##### test_that("correlation tests work", { set.seed(123) testData = createData(sampleSize = 200, overdispersion = 0.5, pZeroInflation = 0, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # grouped simulationOutputGrouped <- recalculateResiduals(simulationOutput, group = testData$group) ###### testSpatialAutocorrelation ##### # Standard use expect_snapshot(testSpatialAutocorrelation(simulationOutput, x = testData$x, y = testData$y, plot = FALSE)) expect_snapshot(testSpatialAutocorrelation(simulationOutput, x = testData$x, y = testData$y, plot = FALSE, alternative = "two.sided")) # If x and y is not provided, random values will be created expect_error(testSpatialAutocorrelation(simulationOutput)) # Alternatively, one can provide a distance matrix dM = as.matrix(dist(cbind(testData$x, testData$y))) expect_snapshot(testSpatialAutocorrelation(simulationOutput, distMat = dM, plot = FALSE)) expect_snapshot(testSpatialAutocorrelation(simulationOutput, distMat = dM, plot = FALSE, alternative = "two.sided")) # testting when x and y have different length # Error different length expect_snapshot(testSpatialAutocorrelation(simulationOutput, plot = FALSE, x = testData$x[1:10], y = testData$y[1:9]), error = TRUE) # x and y have equal length but unequal to simulationOutput expect_snapshot(testSpatialAutocorrelation(simulationOutput[1:10], plot = FALSE, x = testData$x[1:10], y = testData$y[1:10]), error=TRUE) # see Issue #190 'https://github.com/florianhartig/DHARMa/issues/190' # testing distance matrix and an extra x or y argument expect_snapshot(testSpatialAutocorrelation(simulationOutput, distMat = dM, plot = FALSE, x = testData$x)) expect_snapshot(testSpatialAutocorrelation(simulationOutput, distMat = dM, plot = FALSE, y = testData$y)) ###### testTemporalAutocorrelation ##### # Standard use expect_snapshot(testTemporalAutocorrelation(simulationOutput, plot = FALSE, time = testData$time)) expect_snapshot(testTemporalAutocorrelation(simulationOutput, plot = FALSE, time = testData$time, alternative = "greater")) # error if time is forgotten expect_error(testTemporalAutocorrelation(simulationOutput)) }) ### Test phylogenetic autocorrelation test_that("test phylogenetic autocorrelation", { set.seed(123) tre <<- ape::rcoal(60) b0 = 0; b1 = 1; x <- runif(length(tre$tip.label), 0,1) y <- b0 + b1*x + phylolm::rTrait(n = 1, phy = tre, model = "BM", parameters = list(ancestral.state = 0, sigma2 = 10)) dat <<- data.frame(trait = y, pred = x) fit = lm(trait ~ pred, data = dat) res = simulateResiduals(fit, plot = F) restest <- testPhylogeneticAutocorrelation(res, tree = tre) expect_snapshot(restest) expect_true(restest$p.value <= 0.05) fit2 = phylolm::phylolm(trait ~ pred, data = dat, phy = tre, model = "BM") res2 = simulateResiduals(fit2, plot = F, rotation = "estimated") restest2 <- testPhylogeneticAutocorrelation(res2, tree = tre) expect_snapshot(restest2) expect_true(restest2$p.value > 0.05) }) # Test Outliers test_that("testOutliers", { set.seed(123) testData = createData(sampleSize = 1000, overdispersion = 0, pZeroInflation = 0, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) expect_snapshot(testOutliers(simulationOutput, plot = F, margin = "lower")) expect_snapshot(testOutliers(simulationOutput, plot = F, alternative = "two.sided", margin = "lower")) expect_snapshot(testOutliers(simulationOutput, plot = F, margin = "upper")) }) DHARMa/tests/testthat/testHelper.R0000644000176200001440000000474714703461527016537 0ustar liggesusers test_that("ensureDHARMa", { set.seed(123) testData = createData(sampleSize = 200, overdispersion = 3, pZeroInflation = 0.4, randomEffectVariance = 0) pred = testData$Environment1 fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) expect_error(getSimulations(fittedModel, 1, type = "refdt")) simulationOutput <- simulateResiduals(fittedModel = fittedModel) expect_s3_class(DHARMa:::ensureDHARMa(simulationOutput), "DHARMa") expect_error(DHARMa:::ensureDHARMa(simulationOutput$scaledResiduals), "DHARMa") expect_error(DHARMa:::ensureDHARMa(fittedModel), "DHARMa") expect_s3_class(DHARMa:::ensureDHARMa(simulationOutput, convert = T), "DHARMa") expect_s3_class(DHARMa:::ensureDHARMa(simulationOutput$scaledResiduals, convert = T), "DHARMa") expect_s3_class(DHARMa:::ensureDHARMa(fittedModel, convert = T), "DHARMa") expect_error(DHARMa:::ensureDHARMa(matrix(rnorm(100), nrow = 4), convert = T)) expect_error(DHARMa:::ensureDHARMa(list(c = 1), convert = T)) expect_s3_class(DHARMa:::ensureDHARMa(fittedModel, convert = "Model"), "DHARMa") expect_error(DHARMa:::ensureDHARMa(simulationOutput$scaledResiduals, convert = "Model")) expect_error(DHARMa:::ensureDHARMa(matrix(rnorm(100))), "DHARMa") DHARMa:::ensurePredictor(simulationOutput, predictor = pred) DHARMa:::ensurePredictor(simulationOutput) DHARMa:::ensurePredictor(simulationOutput, predictor = testData$observedResponse) expect_error(DHARMa:::ensurePredictor(simulationOutput, predictor = c(1,2,3))) # testResiduals tests distribution, dispersion and outliers expect_error(testQuantiles(simulationOutput$scaledResiduals)) }) test_that("randomSeed", { runif(1) # testing the function in standard settings currentSeed = .Random.seed x = getRandomState(123) runif(1) x$restoreCurrent() expect_true(all(.Random.seed == currentSeed)) # if no seed was set in env, this will also be restored rm(.Random.seed, envir = globalenv()) # now, there is no random seed x = getRandomState(123) expect_true(exists(".Random.seed")) # TRUE runif(1) x$restoreCurrent() expect_false(exists(".Random.seed")) # False runif(1) # re-create a seed # with seed = false currentSeed = .Random.seed x = getRandomState(FALSE) runif(1) x$restoreCurrent() expect_false(all(.Random.seed == currentSeed)) # with seed = NULL currentSeed = .Random.seed x = getRandomState(NULL) runif(1) x$restoreCurrent() expect_true(all(.Random.seed == currentSeed)) }) DHARMa/tests/testthat/_snaps/0000755000176200001440000000000014703461527015544 5ustar liggesusersDHARMa/tests/testthat/_snaps/testTests.md0000644000176200001440000004054014703473302020065 0ustar liggesusers# tests work Code testUniformity(simulationOutput, plot = FALSE) Output Asymptotic one-sample Kolmogorov-Smirnov test data: simulationOutput$scaledResiduals D = 0.035577, p-value = 0.9619 alternative hypothesis: two-sided --- Code testUniformity(simulationOutput, plot = FALSE, alternative = "less") Output Asymptotic one-sample Kolmogorov-Smirnov test data: simulationOutput$scaledResiduals D^- = 0.035577, p-value = 0.6027 alternative hypothesis: the CDF of x lies below the null hypothesis --- Code testUniformity(simulationOutput, plot = FALSE, alternative = "greater") Output Asymptotic one-sample Kolmogorov-Smirnov test data: simulationOutput$scaledResiduals D^+ = 0.034418, p-value = 0.6226 alternative hypothesis: the CDF of x lies above the null hypothesis --- Code testDispersion(simulationOutput, plot = FALSE) Output DHARMa nonparametric dispersion test via sd of residuals fitted vs. simulated data: simulationOutput dispersion = 1.2578, p-value = 0.096 alternative hypothesis: two.sided --- Code testDispersion(simulationOutput, plot = FALSE, alternative = "less") Output DHARMa nonparametric dispersion test via sd of residuals fitted vs. simulated data: simulationOutput dispersion = 1.2578, p-value = 0.952 alternative hypothesis: less --- Code testDispersion(simulationOutput, plot = FALSE, alternative = "greater") Output DHARMa nonparametric dispersion test via sd of residuals fitted vs. simulated data: simulationOutput dispersion = 1.2578, p-value = 0.048 alternative hypothesis: greater --- Code testDispersion(simulationOutput, plot = FALSE, alternative = "two.sided") Output DHARMa nonparametric dispersion test via sd of residuals fitted vs. simulated data: simulationOutput dispersion = 1.2578, p-value = 0.096 alternative hypothesis: two.sided --- Code testResiduals(simulationOutput, plot = FALSE) Output $uniformity Asymptotic one-sample Kolmogorov-Smirnov test data: simulationOutput$scaledResiduals D = 0.035577, p-value = 0.9619 alternative hypothesis: two-sided $dispersion DHARMa nonparametric dispersion test via sd of residuals fitted vs. simulated data: simulationOutput dispersion = 1.2578, p-value = 0.096 alternative hypothesis: two.sided $outliers DHARMa bootstrapped outlier test data: simulationOutput outliers at both margin(s) = 1, observations = 200, p-value = 0.82 alternative hypothesis: two.sided percent confidence interval: 0.000000 0.012625 sample estimates: outlier frequency (expected: 0.0028 ) 0.005 --- Code testZeroInflation(simulationOutput, plot = FALSE) Output DHARMa zero-inflation test via comparison to expected zeros with simulation under H0 = fitted model data: simulationOutput ratioObsSim = 1.0565, p-value = 0.552 alternative hypothesis: two.sided --- Code testZeroInflation(simulationOutput, plot = FALSE, alternative = "less") Output DHARMa zero-inflation test via comparison to expected zeros with simulation under H0 = fitted model data: simulationOutput ratioObsSim = 1.0565, p-value = 0.784 alternative hypothesis: less --- Code testGeneric(simulationOutput, summary = countOnes, plot = FALSE) Output DHARMa generic simulation test data: simulationOutput ratioObsSim = 0.97457, p-value = 0.808 alternative hypothesis: two.sided --- Code testGeneric(simulationOutput, summary = countOnes, plot = FALSE, alternative = "less") Output DHARMa generic simulation test data: simulationOutput ratioObsSim = 0.97457, p-value = 0.404 alternative hypothesis: less --- Code testGeneric(simulationOutput, summary = means, plot = FALSE) Output DHARMa generic simulation test data: simulationOutput ratioObsSim = 1.0077, p-value = 0.944 alternative hypothesis: two.sided --- Code testGeneric(simulationOutput, summary = spread, plot = FALSE) Output DHARMa generic simulation test data: simulationOutput ratioObsSim = 1.0834, p-value = 0.208 alternative hypothesis: two.sided --- Code testUniformity(simulationOutput, plot = FALSE) Output Exact one-sample Kolmogorov-Smirnov test data: simulationOutput$scaledResiduals D = 0.22547, p-value = 0.613 alternative hypothesis: two-sided --- Code testUniformity(simulationOutput, plot = FALSE, alternative = "less") Output Exact one-sample Kolmogorov-Smirnov test data: simulationOutput$scaledResiduals D^- = 0.16279, p-value = 0.5338 alternative hypothesis: the CDF of x lies below the null hypothesis --- Code testUniformity(simulationOutput, plot = FALSE, alternative = "greater") Output Exact one-sample Kolmogorov-Smirnov test data: simulationOutput$scaledResiduals D^+ = 0.22547, p-value = 0.315 alternative hypothesis: the CDF of x lies above the null hypothesis --- Code testDispersion(simulationOutput, plot = FALSE) Output DHARMa nonparametric dispersion test via sd of residuals fitted vs. simulated data: simulationOutput dispersion = 2.1091, p-value = 0.048 alternative hypothesis: two.sided --- Code testDispersion(simulationOutput, plot = FALSE, alternative = "less") Output DHARMa nonparametric dispersion test via sd of residuals fitted vs. simulated data: simulationOutput dispersion = 2.1091, p-value = 0.976 alternative hypothesis: less --- Code testDispersion(simulationOutput, plot = FALSE, alternative = "greater") Output DHARMa nonparametric dispersion test via sd of residuals fitted vs. simulated data: simulationOutput dispersion = 2.1091, p-value = 0.024 alternative hypothesis: greater --- Code testDispersion(simulationOutput, plot = FALSE, alternative = "two.sided") Output DHARMa nonparametric dispersion test via sd of residuals fitted vs. simulated data: simulationOutput dispersion = 2.1091, p-value = 0.048 alternative hypothesis: two.sided --- Code testResiduals(simulationOutput, plot = FALSE) Output $uniformity Exact one-sample Kolmogorov-Smirnov test data: simulationOutput$scaledResiduals D = 0.22547, p-value = 0.613 alternative hypothesis: two-sided $dispersion DHARMa nonparametric dispersion test via sd of residuals fitted vs. simulated data: simulationOutput dispersion = 2.1091, p-value = 0.048 alternative hypothesis: two.sided $outliers DHARMa bootstrapped outlier test data: simulationOutput outliers at both margin(s) = 20, observations = 200, p-value = 0.1 alternative hypothesis: two.sided percent confidence interval: 0.0 0.1 sample estimates: outlier frequency (expected: 0.005 ) 0.1 --- Code testZeroInflation(simulationOutput, plot = FALSE) Output DHARMa zero-inflation test via comparison to expected zeros with simulation under H0 = fitted model data: simulationOutput ratioObsSim = NaN, p-value = 1 alternative hypothesis: two.sided --- Code testZeroInflation(simulationOutput, plot = FALSE, alternative = "less") Output DHARMa zero-inflation test via comparison to expected zeros with simulation under H0 = fitted model data: simulationOutput ratioObsSim = NaN, p-value = 1 alternative hypothesis: less --- Code testGeneric(simulationOutput, summary = countOnes, plot = FALSE) Output DHARMa generic simulation test data: simulationOutput ratioObsSim = NaN, p-value = 1 alternative hypothesis: two.sided --- Code testGeneric(simulationOutput, summary = countOnes, plot = FALSE, alternative = "less") Output DHARMa generic simulation test data: simulationOutput ratioObsSim = NaN, p-value = 1 alternative hypothesis: less --- Code testGeneric(simulationOutput, summary = means, plot = FALSE) Output DHARMa generic simulation test data: simulationOutput ratioObsSim = 1.0077, p-value = 0.944 alternative hypothesis: two.sided --- Code testGeneric(simulationOutput, summary = spread, plot = FALSE) Output DHARMa generic simulation test data: simulationOutput ratioObsSim = 1.4725, p-value = 0.04 alternative hypothesis: two.sided --- Code testDispersion(simulationOutput, plot = FALSE) Output DHARMa nonparametric dispersion test via mean deviance residual fitted vs. simulated-refitted data: simulationOutput dispersion = 1.1231, p-value = 0.248 alternative hypothesis: two.sided # correlation tests work Code testSpatialAutocorrelation(simulationOutput, x = testData$x, y = testData$y, plot = FALSE) Output DHARMa Moran's I test for distance-based autocorrelation data: simulationOutput observed = -0.0166319, expected = -0.0050251, sd = 0.0112750, p-value = 0.3033 alternative hypothesis: Distance-based autocorrelation --- Code testSpatialAutocorrelation(simulationOutput, x = testData$x, y = testData$y, plot = FALSE, alternative = "two.sided") Output DHARMa Moran's I test for distance-based autocorrelation data: simulationOutput observed = -0.0166319, expected = -0.0050251, sd = 0.0112750, p-value = 0.3033 alternative hypothesis: Distance-based autocorrelation --- Code testSpatialAutocorrelation(simulationOutput, distMat = dM, plot = FALSE) Output DHARMa Moran's I test for distance-based autocorrelation data: simulationOutput observed = -0.0166319, expected = -0.0050251, sd = 0.0112750, p-value = 0.3033 alternative hypothesis: Distance-based autocorrelation --- Code testSpatialAutocorrelation(simulationOutput, distMat = dM, plot = FALSE, alternative = "two.sided") Output DHARMa Moran's I test for distance-based autocorrelation data: simulationOutput observed = -0.0166319, expected = -0.0050251, sd = 0.0112750, p-value = 0.3033 alternative hypothesis: Distance-based autocorrelation --- Code testSpatialAutocorrelation(simulationOutput, plot = FALSE, x = testData$x[1:10], y = testData$y[1:9]) Condition Warning in `cbind()`: number of rows of result is not a multiple of vector length (arg 2) Error in `testSpatialAutocorrelation()`: ! Dimensions of x / y coordinates don't match the dimension of the residuals. --- Code testSpatialAutocorrelation(simulationOutput[1:10], plot = FALSE, x = testData$x[ 1:10], y = testData$y[1:10]) Condition Error in `ensureDHARMa()`: ! wrong argument to function, simulationOutput must be a DHARMa object or a numeric vector of quantile residuals! --- Code testSpatialAutocorrelation(simulationOutput, distMat = dM, plot = FALSE, x = testData$ x) Message Both coordinates and distMat provided, calculations will be done based on the distance matrix, coordinates will only be used for plotting. Output DHARMa Moran's I test for distance-based autocorrelation data: simulationOutput observed = -0.0166319, expected = -0.0050251, sd = 0.0112750, p-value = 0.3033 alternative hypothesis: Distance-based autocorrelation --- Code testSpatialAutocorrelation(simulationOutput, distMat = dM, plot = FALSE, y = testData$ y) Message Both coordinates and distMat provided, calculations will be done based on the distance matrix, coordinates will only be used for plotting. Output DHARMa Moran's I test for distance-based autocorrelation data: simulationOutput observed = -0.0166319, expected = -0.0050251, sd = 0.0112750, p-value = 0.3033 alternative hypothesis: Distance-based autocorrelation --- Code testTemporalAutocorrelation(simulationOutput, plot = FALSE, time = testData$ time) Output Durbin-Watson test data: simulationOutput$scaledResiduals ~ 1 DW = 1.9703, p-value = 0.833 alternative hypothesis: true autocorrelation is not 0 --- Code testTemporalAutocorrelation(simulationOutput, plot = FALSE, time = testData$ time, alternative = "greater") Output Durbin-Watson test data: simulationOutput$scaledResiduals ~ 1 DW = 1.9703, p-value = 0.4165 alternative hypothesis: true autocorrelation is greater than 0 # test phylogenetic autocorrelation Code restest Output DHARMa Moran's I test for phylogenetic autocorrelation data: res observed = 0.851667, expected = -0.016949, sd = 0.088733, p-value < 2.2e-16 alternative hypothesis: Phylogenetic autocorrelation --- Code restest2 Output DHARMa Moran's I test for phylogenetic autocorrelation data: res2 observed = 0.047474, expected = -0.016949, sd = 0.088260, p-value = 0.4654 alternative hypothesis: Phylogenetic autocorrelation # testOutliers Code testOutliers(simulationOutput, plot = F, margin = "lower") Output DHARMa outlier test based on exact binomial test with approximate expectations data: simulationOutput outliers at lower margin(s) = 4, observations = 1000, p-value = 0.8037 alternative hypothesis: true probability of success is not equal to 0.003984064 95 percent confidence interval: 0.001090908 0.010209665 sample estimates: frequency of outliers (expected: 0.00398406374501992 ) 0.004 --- Code testOutliers(simulationOutput, plot = F, alternative = "two.sided", margin = "lower") Output DHARMa outlier test based on exact binomial test with approximate expectations data: simulationOutput outliers at lower margin(s) = 4, observations = 1000, p-value = 0.8037 alternative hypothesis: true probability of success is not equal to 0.003984064 95 percent confidence interval: 0.001090908 0.010209665 sample estimates: frequency of outliers (expected: 0.00398406374501992 ) 0.004 --- Code testOutliers(simulationOutput, plot = F, margin = "upper") Output DHARMa outlier test based on exact binomial test with approximate expectations data: simulationOutput outliers at upper margin(s) = 4, observations = 1000, p-value = 0.8037 alternative hypothesis: true probability of success is not equal to 0.003984064 95 percent confidence interval: 0.001090908 0.010209665 sample estimates: frequency of outliers (expected: 0.00398406374501992 ) 0.004 DHARMa/tests/testthat/testPlots.R0000644000176200001440000000546714703461527016421 0ustar liggesusers doClassFunctions <- function(simulationOutput){ print(simulationOutput) expect_true(class(residuals(simulationOutput)) == "numeric") } doPlots <- function(simulationOutput, testData){ plot(simulationOutput, quantreg = T, rank = F, asFactor = T) plot(simulationOutput, quantreg = F, rank = F) plot(simulationOutput, quantreg = T, rank = T) plot(simulationOutput, quantreg = F, rank = T) # qq plot plotQQunif(simulationOutput = simulationOutput) # residual vs. X plots, various options plotResiduals(simulationOutput) plotResiduals(simulationOutput, rank = T, quantreg = F) plotResiduals(simulationOutput, quantiles = 0.5) plotResiduals(simulationOutput, quantiles = c(0.3, 0.6)) plotResiduals(simulationOutput$scaledResiduals, form = simulationOutput$fittedPredictedResponse) # hist hist(simulationOutput) } doTests <- function(simulationOutput, testData){ testUniformity(simulationOutput = simulationOutput) testZeroInflation(simulationOutput = simulationOutput) testTemporalAutocorrelation(simulationOutput = simulationOutput, time = testData$time) testSpatialAutocorrelation(simulationOutput = simulationOutput, x = testData$x, y = testData$y) } # currently not testing the following because of warning #testOverdispersion(simulationOutput) #testOverdispersion(simulationOutput, alternative = "both", plot = T) # simulationOutput2 <- simulateResiduals(fittedModel = fittedModel, refit = T, n = 10) # n=10 is very low, set higher for serious tests # # print(simulationOutput2) # plot(simulationOutput2, quantreg = F) # # testOverdispersion(simulationOutput2) # testOverdispersion(simulationOutput2, alternative = "both", plot = T) # testOverdispersionParametric(fittedModel) test_that("Plots work", { skip_on_cran() testData = createData(sampleSize = 100, overdispersion = 0, randomEffectVariance = 0, family = binomial()) fittedModel <- glm(observedResponse ~ Environment1 , family = "binomial", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) doClassFunctions(simulationOutput) doPlots(simulationOutput, testData) doTests(simulationOutput, testData) testData = createData(sampleSize = 100, overdispersion = 0, randomEffectVariance = 2, numGroups = 4, family = gaussian()) fittedModel <- glm(observedResponse ~ group , data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) doClassFunctions(simulationOutput) doPlots(simulationOutput, testData) plotResiduals(simulationOutput, testData$group) } ) DHARMa/tests/testthat.R0000644000176200001440000000061014703461527014401 0ustar liggesusers# This file is part of the standard setup for testthat. # It is recommended that you do not modify it. # # Where should you do additional test configuration? # Learn more about the roles of various files in: # * https://r-pkgs.org/testing-design.html#sec-tests-files-overview # * https://testthat.r-lib.org/articles/special-files.html library(testthat) library(DHARMa) test_check("DHARMa") DHARMa/MD50000644000176200001440000001441114704441036011562 0ustar liggesusersa637660efc14daf2a2bd94f7c56fb58a *DESCRIPTION 10445a5f9bcdc858ce15778d9b3a9a32 *NAMESPACE 5436910eab101544ca852112d96fb71f *NEWS.md cc0d4370eb9061cc1f213b6bd0674835 *R/DHARMa.R 3c42784b405effcc179246a0a10efc89 *R/compatibility.R 70e16931174530072ce80f8bc11691ad *R/createData.R f539e61385454f3d728af452086ad7b6 *R/data.R 90e550b33711ec0dd6f7e71bcbc6a828 *R/helper.R 47d2fb3f55d823c74d931f6f6ba4b812 *R/imports.R 627453c1d3a6953e662fdb8872babefa *R/plots.R 73eabb6140a03269799b956b7319bf70 *R/random.R 673e3f0fa1d100af3c754af17d9087b2 *R/runBenchmarks.R b7f627736fc5eb6bbfa00a34d64c54cd *R/simulateLRT.R 667ed23fca1472cb658075bfe88a1a1e *R/simulateResiduals.R bdecf8098fad9b3bb21ef590bc56f1d0 *R/startup.R f0d3c56b8b1a302f73539fd18eb1d00e *R/tests.R f3cfacb85b16933117a0af9d6a356976 *R/transformQuantiles.R aa2abed84bf74267907007712f12f735 *README.md 5d81d9adefec17698f44ada71eeb564c *build/vignette.rds 2dbef09b816a247903951e91550b9883 *data/hurricanes.rda de7915d835f6bddb69df00d1241a94d4 *inst/doc/DHARMa.R 06b940b3a6162b9d5067bde33f3749e2 *inst/doc/DHARMa.Rmd a1d14e7d3a71804579fd7712f20adb3f *inst/doc/DHARMa.html c6f74025dd0c63558199590aaecff0c8 *inst/doc/DHARMaForBayesians.R 6720e9ba051d92719da97d7a56627a9c *inst/doc/DHARMaForBayesians.Rmd 0407dda327a3617887e36206c321e959 *inst/doc/DHARMaForBayesians.html ee47b2f15b8a66cc4309e1adadc014f3 *inst/examples/benchmarkRuntimeHelp.R 0a2f261f38df56829e675be3027addc0 *inst/examples/checkModelHelp.R 157bf049ccdb1f599ae11ddcf143aae2 *inst/examples/createDataHelp.R a03e04292a6bcd04cd1b2fe5dae931fa *inst/examples/createDharmaHelp.R a8c668c49acfceb950a096e861885c75 *inst/examples/getRandomStateHelp.R 6db08b81bdd69845056d139aad943d60 *inst/examples/hurricanes.R 77dee7ee026b1b8c3493c4262adf97f9 *inst/examples/plotsHelp.R 7b32d639293813eb9365399c79073178 *inst/examples/runBenchmarksHelp.R e0146bbbe75a638986094fb22809c1aa *inst/examples/simulateLRTHelp.R 7f4efa27a1ead9c0cde3223056a305f9 *inst/examples/simulateResidualsHelp.R 8a5f442e550f3d1ef244082f967b7dda *inst/examples/testDispersionHelp.R 42d9f02128ac6bd5c3bd705cc0e295df *inst/examples/testOutliersHelp.R d4703695b9baba6ac06050810b467ee7 *inst/examples/testPhylogeneticAutocorrelationHelp.R 36ef3ce1613d67686879b745c468ddab *inst/examples/testQuantilesHelp.R 2a2ec2f571745e4a4d397324649e4db5 *inst/examples/testSpatialAutocorrelationHelp.R 38986a94c08126c8b2216fa3d5df3bbd *inst/examples/testTemporalAutocorrelationHelp.R d9a505251ef334303d725b8949f481b9 *inst/examples/testsHelp.R a0dc4177dcf3e0da7eff73eb5eb0eed2 *inst/examples/wrappersHelp.R f920cb4bc0f7f884e4e2a9d365374222 *man/DHARMa-package.Rd 1f1a38089f4673fcc9b6b7d62a23212b *man/DHARMa.ecdf.Rd 71945c2bf6c10a56571e5a8f9250f854 *man/benchmarkRuntime.Rd 39b0e25c1077d0cd325a11aeae74a3d3 *man/checkDots.Rd 5b3c612c6a78a43cfa36c156d6f08ac6 *man/checkModel.Rd 79edc930df01cc7a46305352ce829937 *man/checkSimulations.Rd b7fce416065b06c498c3013cbb51f278 *man/createDHARMa.Rd aedd47c72835e224d9f85b41aca06723 *man/createData.Rd 49ff8a0a7fabca0d678d97a0016a7fbe *man/ensureDHARMa.Rd 01961d739b3118246eab4c862521278c *man/ensurePredictor.Rd 5657667bc914349bb62e057c879da818 *man/getFamily.Rd 7acc490f9dd41470a94360d4b117a04d *man/getFitted.Rd f1575fb1a19f70ce628a861a47104be5 *man/getFixedEffects.Rd 601dcea47484f72d9a61e3aeecc59a1c *man/getObservedResponse.Rd 04cf53120df1874ae70523bd39729141 *man/getPearsonResiduals.Rd 099415363a046427099ba7a8107d2b78 *man/getPossibleModels.Rd b9fdb4bcf1b01ccf0f9df78e6acb64f3 *man/getQuantile.Rd fd0a9d29b272789aaeba0e32999ea32f *man/getRandomState.Rd 4e16a97a0008eda17160a65dd9d8edf8 *man/getRefit.Rd 9ac26f9151c2a218a1b60332bdfbcb01 *man/getResiduals.Rd c717ebfefdbe9f3f523e490df9bcaa99 *man/getSimulations.Rd 1f1464311bf812f436ef12caa999ffac *man/hist.DHARMa.Rd 952b418a9dc1fd528425c85a86a6ea3b *man/hurricanes.Rd c7d4b00984d8ffa57b05b3244a8404d7 *man/outliers.Rd de43a5b57a1238243bd0ef453440ac3a *man/plot.DHARMa.Rd 85dda03164c287370368d28f9b5cebe3 *man/plot.DHARMaBenchmark.Rd 027b801d6da688deb9168812b2f4528d *man/plotConventionalResiduals.Rd fc0973bac7d70f97d620e963e4932ff7 *man/plotQQunif.Rd 6f71c893f545ca3c7a96a1c11d4a5b81 *man/plotResiduals.Rd 39f79ea0c9573fab4e4f07c756ef2f2f *man/plotSimulatedResiduals.Rd 53886ca6e98dec2df8452381b1794156 *man/print.DHARMa.Rd cc8427445223e1d6d3353d16b7240820 *man/recalculateResiduals.Rd 22c5aa6fba66455a907a20ef6c209210 *man/residuals.DHARMa.Rd 70c20faef77af161e44f1b70344f64b5 *man/runBenchmarks.Rd 157c84fcf2cc8291fe166132b2aa5912 *man/simulateLRT.Rd 3161a211ff4a144353032a64edd7b001 *man/simulateResiduals.Rd d61d6446ff1e45cbf86bc199733ecd78 *man/testCategorical.Rd 36df20e6594ef64ef89191364927404b *man/testDispersion.Rd d2f510ca9c2451fbfbe4011c3acd4e5a *man/testGeneric.Rd ce8da5e1227962df85a7d2fbe44b12f2 *man/testOutliers.Rd 63a3426bc32814532d091ae635e402c0 *man/testOverdispersion.Rd 3f318918c1be50ea0bcf00d907f51a0c *man/testOverdispersionParametric.Rd dd1629f4739b37dc8385c5c9c35304e0 *man/testPDistribution.Rd 278043de129a5f286d0b8e9a6dcdcc1d *man/testPhylogeneticAutocorrelation.Rd 2798a82847ed4ee3030e047fe1e9ec01 *man/testQuantiles.Rd 4ab17ef1306a52eef021ed889da836db *man/testResiduals.Rd 1209ddeefefa6b27ca75f79c60543548 *man/testSimulatedResiduals.Rd 1b34b8e920070236969b5d273c7afd65 *man/testSpatialAutocorrelation.Rd 68dc3d6cb6d49d1d60c2e024a9edbd83 *man/testTemporalAutocorrelation.Rd a538bf9ee96c9ca290b1056b4ac71436 *man/testUniformity.Rd 7352bc8e693c29c902c1bae89833f956 *man/testZeroInflation.Rd 2d6af0b33a006ddb5eb123466e08b0a3 *man/transformQuantiles.Rd e51e8d8ddde253d018bcbf0619157953 *tests/manualTests/DHARMa-rhub.R 5b00d36ec9f077c6f8c7217cc5fd54eb *tests/manualTests/readme.txt 6eb3b794dfbc6c5fc0e2769673d5bbaa *tests/testthat.R 32e15f7d136d0b966ad569d122824413 *tests/testthat/_snaps/testTests.md bdda6bc4e8c5881bafeab8be865c61f5 *tests/testthat/testDharmaClass.R 2a6145a052e6bfd056b97782c9944aea *tests/testthat/testHelper.R 02105598809998f035267b088ccec214 *tests/testthat/testModelTypes.R 8b55709c39438eee7d3d4b29299996f7 *tests/testthat/testPlots.R 75008e7ebf5bdab8fbe59683d286bd1f *tests/testthat/testSimulateResiduals.R 789f444a01f6d435959692bc2d0b3009 *tests/testthat/testTests.R 06b940b3a6162b9d5067bde33f3749e2 *vignettes/DHARMa.Rmd 6720e9ba051d92719da97d7a56627a9c *vignettes/DHARMaForBayesians.Rmd 2e9ebf8d3e62710ad6ccb5fcc48a02ce *vignettes/ECDFmotivation.png 7272863a2c73082768e1e36c91a861d3 *vignettes/dispersion.png DHARMa/R/0000755000176200001440000000000014704245735011462 5ustar liggesusersDHARMa/R/transformQuantiles.R0000644000176200001440000000162614677165224015516 0ustar liggesusers#' Transform quantiles to pdf (deprecated) #' #' The purpose of this function was to transform the DHARMa quantile residuals (which have a uniform distribution) to a particular pdf. Since DHARMa 0.3.0, this functionality is integrated in the [residuals.DHARMa] function. Please switch to using this function. #' #' @param res an object with simulated residuals created by [simulateResiduals] #' @param quantileFunction optional - a quantile function to transform the uniform 0/1 scaling of DHARMa to another distribution #' @param outlierValue if a quantile function with infinite support (such as dnorm) is used, residuals that are 0/1 are mapped to -Inf / Inf. outlierValues allows to convert -Inf / Inf values to an optional min / max value. #' #' @export #' transformQuantiles <- function(res, quantileFunction = qnorm, outlierValue = 7){ message("This function is deprecated. Please use residuals() instead") } DHARMa/R/imports.R0000644000176200001440000000312014665273541013300 0ustar liggesusers #### Package car #### # this is copied, slightly modified, from car, to avoid dependencies / S3 clashes when importing car together with lme4 leveneTest_default <- function (y, group, center=median, ...) { # original levene.test if (!is.numeric(y)) stop(deparse(substitute(y)), " is not a numeric variable") if (!is.factor(group)) { warning(deparse(substitute(group)), " coerced to factor.") group <- as.factor(group) } valid <- complete.cases(y, group) meds <- tapply(y[valid], group[valid], center, ...) resp <- abs(y - meds[group]) table <- anova(lm(resp ~ group))[, c(1, 4, 5)] rownames(table)[2] <- " " dots <- deparse(substitute(...)) attr(table, "heading") <- paste("Levene's Test for Homogeneity of Variance (center = ", deparse(substitute(center)), if(!(dots == "NULL")) paste(":", dots), ")", sep="") table } leveneTest_formula <- function(y, data, ...) { form <- y mf <- if (missing(data)) model.frame(form) else model.frame(form, data) if (any(sapply(2:dim(mf)[2], function(j) is.numeric(mf[[j]])))) stop("Levene's test is not appropriate with quantitative explanatory variables.") y <- mf[,1] if(dim(mf)[2]==2) group <- mf[,2] else { if (length(grep("\\+ | \\| | \\^ | \\:",form))>0) stop("Model must be completely crossed formula only.") group <- interaction(mf[,2:dim(mf)[2]]) } leveneTest_default(y=y, group=group, ...) } leveneTest_lm <- function(y, ...) { m <- model.frame(y) m$..y <- model.response(m) f <- formula(y) f[2] <- expression(..y) leveneTest_formula(f, data=m, ...) }DHARMa/R/helper.R0000644000176200001440000002063214703461527013065 0ustar liggesusers#' Modified ECDF function. #' #' @details Ensures symmetric ECDF (standard ECDF is <), and that 0 / 1 values are only produced if the data is strictly < > than the observed data. #' #' @keywords internal DHARMa.ecdf <- function (x) { x <- sort(x) n <- length(x) if (n < 1) stop(paste("DHARMa.ecdf - length vector < 1", x)) vals <- unique(x) rval <- approxfun(vals, cumsum(tabulate(match(x, vals)))/ (n +1), method = "linear", yleft = 0, yright = 1, ties = "ordered") class(rval) <- c("ecdf", "stepfun", class(rval)) assign("nobs", n, envir = environment(rval)) attr(rval, "call") <- sys.call() rval } #' Calculate Residual Quantiles #' #' Calculates residual quantiles from a given simulation. #' #' @param simulations A matrix with simulations from a fitted model. Rows = observations, columns = replicate simulations. #' @param observed A vector with the observed data. #' @param integerResponse Is the response integer-valued? Only has an effect for method = "traditional". #' @param method The quantile randomization method used. See details. #' @param rotation Optional rotation of the residuals. You can either provide as a known or estimated covariance matrix (e.g. when fitting an AR1 model), or use the argument "estimated", in which case the residual covariance will be approximated by simulations. See comments in details. #' #' @details The function calculates residual quantiles from the simulated data. For continuous distributions, this will simply be the value of the ecdf. #' #' **Randomization procedure for discrete data** #' #' For discrete data, there are two options implemented. #' #' The current default (available since DHARMa 0.3.1) are probability integral transform (PIT-) residuals (Smith, 1985; Dunn & Smyth, 1996; see also Warton, et al., 2017). #' #' Before DHARMa 0.3.1, a different randomization procedure was used, in which the a U(-0.5, 0.5) distribution was added on observations and simulations for discrete distributions. For a completely discrete distribution, the two procedures should deliver equivalent results, but the second method has the disadvantage that (a) one has to know if the distribution is discrete (DHARMa tries to recognize this automatically), and (b) that it leads to inefficiencies for some distributions such as the Tweedie, which are partly continuous, partly discrete #' (see e.g. [issue #168](https://github.com/florianhartig/DHARMa/issues/168) on DHARMa GitHub page). #' #' **Rotation (optional)** #' #' The getQuantile function includes an additional option to rotate residuals prior to calculating the quantile residuals. This option should ONLY be used when the fitted model includes a particular residuals covariance structure, such as an AR1 or a spatial or phylogenetic CAR model. #' #' For these models, residuals calculated from unconditional simulations will include the specified covariance structure, which will trigger e.g. temporal autocorrelation tests and can inflate type I errors of other tests. The idea of the rotation is to rotate the residual space according to the covariance structure of the fitted model, such that the rotated residuals are conditional independent (provided the fitted model is correct). #' #' If the residual covariance of the fitted model at the response scale can be extracted (e.g. when fitting gls type models), it would be best to extract it and provide this covariance matrix to the rotation option. If that is not the case, providing the argument "estimated" to rotation will estimate the covariance from the data simulated by the model. This is probably without alternative for GLMMs, where the covariance at the response scale is likely not known / provided, but note, that this approximation will tend to have considerable error and may be slow to compute for high-dimensional data. If you try to estimate the rotation from simulations, you should set n as high as possible! See [testTemporalAutocorrelation] for a practical example. #' #' The rotation of residuals implemented here is similar to the Variogram.lme() and Variongram.gls() functions in nlme package using the argument resType = "normalized". #' #' @references #' #' Smith, J. Q. "Diagnostic checks of non-standard time series models." Journal of Forecasting 4.3 (1985): 283-291. #' #' Dunn, P.K., & Smyth, G.K. (1996). Randomized quantile residuals. Journal of Computational and Graphical Statistics 5, 236-244. #' #' Warton, David I., LoĆÆc Thibaut, and Yi Alice Wang. "The PIT-trap—A ā€œmodel-freeā€ bootstrap procedure for inference about regression models with discrete, multivariate responses." PloS one 12.7 (2017). #' #' @export getQuantile <- function(simulations, observed, integerResponse, method = c("PIT", "traditional"), rotation = NULL){ method = match.arg(method) n = length(observed) if(nrow(simulations) != n) stop("DHARMa::getquantile: wrong dimension of simulations") nSim = ncol(simulations) if(method == "traditional"){ if(!is.null(rotation)) stop("rotation can only be used with PIT residuals") if(integerResponse == F){ if(any(duplicated(observed))) message("Model family was recognized or set as continuous, but duplicate values were detected in the response. Consider if you are fitting an appropriate model.") values = as.vector(simulations)[duplicated(as.vector(simulations))] if(length(values) > 0){ if(all(values%%1 == 0)){ integerResponse = T message("Model family was recognized or set as continuous, but duplicate values were detected in the simulation - changing to integer residuals (see ?simulateResiduals for details).") } else { message("Duplicate non-integer values found in the simulation. If this is because you are fitting a non-inter valued discrete response model, note that DHARMa does not perform appropriate randomization for such cases.") } } } scaledResiduals = rep(NA, n) for (i in 1:n){ if(integerResponse == T){ scaledResiduals[i] <- DHARMa.ecdf(simulations[i,] + runif(nSim, -0.5, 0.5))(observed[i] + runif(1, -0.5, 0.5)) }else{ scaledResiduals[i] <- DHARMa.ecdf(simulations[i,])(observed[i]) } } } else { # optional rotation before PIT if(!is.null(rotation)){ if(is.character(rotation) && rotation == "estimated"){ covar = Matrix::nearPD(cov(t(simulations)))$mat L = t(as.matrix(Matrix::chol(covar))) } else if(is.matrix(rotation)) L <- t(chol(rotation)) else stop("DHARMa::getQuantile - wrong argument to rotation parameter.") observed <- solve(L, observed) simulations = apply(simulations, 2, function(a) solve(L, a)) } scaledResiduals = rep(NA, n) for (i in 1:n){ minSim <- mean(simulations[i,] < observed[i]) maxSim <- mean(simulations[i,] <= observed[i]) if (minSim == maxSim) scaledResiduals[i] = minSim else scaledResiduals[i] = runif(1, minSim, maxSim) } } return(scaledResiduals) } # # # testData = createData(sampleSize = 200, family = gaussian(), # randomEffectVariance = 0, numGroups = 5) # fittedModel <- glmmTMB(observedResponse ~ Environment1, # data = testData) # simulationOutput <- simulateResiduals(fittedModel = fittedModel) # # sims = simulationOutput$simulatedResponse # sims[1, c(1,6,8)] = 0 # any(apply(sims, 1, anyDuplicated)) # getQuantile(simulations = sims, observed = testData$observedResponse, n = 200, integerResponse = F, nSim = 250) # # # #' Check dot operator #' #' @param name variable name #' @param default variable default #' #' @details modified from https://github.com/lcolladotor/dots #' #' @keywords internal checkDots <- function(name, default, ...) { args <- list(...) if(!name %in% names(args)) { ## Default value return(default) } else { ## If the argument was defined in the ... part, return it return(args[[name]]) } } securityAssertion <- function(context = "Not provided", stop = F){ generalMessage = "Message from DHARMa: During the execution of a DHARMa function, some unexpected conditions occurred. Even if you didn't get an error, your results may not be reliable. Please check with the help if you use the functions as intended. If you think that the error is not on your side, I would be grateful if you could report the problem at https://github.com/florianhartig/DHARMa/issues \n\n Context:" if (stop == F) warning(paste(generalMessage, context)) else stop(paste(generalMessage, context)) } DHARMa/R/random.R0000644000176200001440000000322114677165224013066 0ustar liggesusers#' Record and restore a random state #' #' The aim of this function is to record, manipulate and restore a random state. #' #' @param seed seed argument to set.seed(), typically a number. Additional options: NULL = no seed is set, but return includes function for restoring random seed. F = function does nothing, i.e. neither seed is changed, nor does the returned function do anything. #' #' @details This function is intended for two (not mutually exclusive tasks): #' #' a) record the current random state. #' #' b) change the current random state in a way that the previous state can be restored. #' #' @return A list with various infos about the random state that after function execution, as well as a function to restore the previous state before the function execution. #' @export #' @example inst/examples/getRandomStateHelp.R #' @author Florian Hartig #' getRandomState <- function(seed = NULL){ # better to explicitly access the global RS? # current = get(".Random.seed", .GlobalEnv, ifnotfound = NULL) current = mget(".Random.seed", envir = .GlobalEnv, ifnotfound = list(NULL))[[1]] if(!is.null(seed) && is.logical(seed) && seed == FALSE){ restoreCurrent <- function(){} }else{ restoreCurrent <- function(){ if(is.null(current)) rm(".Random.seed", envir = .GlobalEnv) else assign(".Random.seed", current , envir = .GlobalEnv) } } # setting seed if(is.numeric(seed)) set.seed(seed) # ensuring that RNG has been initialized if (is.null(current))runif(1) randomState = list(seed, state = get(".Random.seed", globalenv()), kind = RNGkind(), restoreCurrent = restoreCurrent) return(randomState) } DHARMa/R/simulateLRT.R0000644000176200001440000001251714677165224014023 0ustar liggesusers#' Simulated likelihood ratio tests for (generalized) linear mixed models #' #' @description This function uses the DHARMa model wrappers to generate simulated likelihood ratio tests (LRTs) for (generalized) linear mixed models based on a parametric bootstrap. The motivation for using a simulated LRT rather than a standard ANOVA or AIC for model selection in mixed models is that df for mixed models are not clearly defined, thus standard ANOVA based on Chi2 statistics or AIC are unreliable, in particular for models with large contributions of REs to the likelihood. #' #' Interpretation of the results as in a normal LRT: the null hypothesis is that m0 is correct, the tests checks if the increase in likelihood of m1 is higher than expected, using data simulated from m0. #' #' @param m0 null Model. #' @param m1 alternative Model. #' @param n number of simulations. #' @param seed random seed. #' @param plot whether null distribution should be plotted. #' @param suppressWarnings whether to suppress warnings that occur during refitting the models to simulated data. See details for explanations. #' @param saveModels Whether to save refitted models. #' @param ... additional parameters to pass on to the simulate function of the model object. See [getSimulations] for details. #' #' @details The function performs a simulated LRT, which works as follows: #' #' 1. H0: Model 1 is correct. #' 2. Our test statistic is the log LRT of M1/M2. Empirical value will always be > 1 because in a nested setting, the more complex model cannot have a worse likelihood. #' 3. To generate an expected distribution of the test statistic under H0, we simulate new response data under M0, refit M0 and M1 on this data, and calculate the LRs. #' 4. Based on this, calculate p-values etc. in the usual way. #' #' About warnings: warnings such as "boundary (singular) fit: see ?isSingular" will likely occur in this function and are not necessarily the sign of a problem. lme4 warns if RE variances are fit to zero. This is desired / likely in this case, however, because we are simulating data with zero RE variances. Therefore, warnings are turned off per default. For diagnostic reasons, you can turn warnings on, and possibly also inspect fitted models via the parameter saveModels to see if there are any other problems in the re-fitted models. #' #' Data simulations are performed by [getSimulations], which is a wrapper for the respective model functions. The default for all packages, wherever possible, is to generate marginal simulations (meaning that REs are re-simulated as well). I see no sensible reason to change this, but if you want to and if supported by the respective regression package, you could do so by supplying the necessary arguments via ... #' #' @note The logic of an LRT assumes that m0 is nested in m1, which guarantees that the L(M1) > L(M0). The function does not explicitly check if models are nested and will work as long as data can be simulated from M0 that can be refit with M) and M1; however, I would strongly advice against using this for non-nested models unless you have a good statistical reason for doing so. #' #' Also, note that LRTs may be unreliable when fit with REML or some other kind of penalized / restricted ML. Therefore, you should fit model with ML for use in this function. #' #' @author Florian Hartig #' #' @example inst/examples/simulateLRTHelp.R #' @export simulateLRT<-function(m0, m1, n = 250, seed = 123, plot = TRUE, suppressWarnings = TRUE, saveModels = FALSE, ...){ ######## general assertions and startup calculations ########## # identical to simulateResiduals if (n < 2) stop("error in DHARMa::simulateLRT: n > 1 is required to simulate LRT") checkModel(m0) checkModel(m1) randomState <-getRandomState(seed) on.exit({randomState$restoreCurrent()}) ptm <- proc.time() ####### extract model info ############ out = list() out$data.name = paste("m0:", deparse(substitute(m0)), "m1:", deparse(substitute(m1))) out$m0 = m0 out$m1 = m1 out$observedLRT = logLik(m1) - logLik(m0) if(nobs(m0) != nobs(m1)) stop("m0 and m1 seem to have an unequal number of observations. Check for and possibly remove NAs in the data.") out$nObs = nobs(m0) out$nSim = n out$simulatedResponse = getSimulations(m0, nsim = n, type = "refit", ...) out$simulatedLR = rep(NA, n) if(saveModels == TRUE) out$saveModels == list() for (i in 1:n){ simObserved = out$simulatedResponse[[i]] try({ # for testing # if (i==3) stop("x") # Note: also set silent = T for production if(suppressWarnings == TRUE){ invisible(capture.output(suppressWarnings(suppressMessages({ refittedM0 = getRefit(m0, simObserved) refittedM1 = getRefit(m1, simObserved) })))) } else { refittedM0 = getRefit(m0, simObserved) refittedM1 = getRefit(m1, simObserved) } if(saveModels == TRUE) out$saveModels[[i]] = list(refittedM0 = refittedM0, refittedM1 = refittedM1) out$simulatedLR[i] = logLik(refittedM1) - logLik(refittedM0) }, silent = TRUE) } out$statistic = out$observedLRT names(out$statistic) = "LogL(M1/M0)" out$method = "DHARMa simulated LRT" out$alternative = "M1 describes the data better than M0" out$p.value = getP(out$simulatedLR, out$observedLRT, alternative = "greater", plot = plot, xlab="LogL(M1/M0)") class(out) = "htest" return(out) } DHARMa/R/runBenchmarks.R0000644000176200001440000002350314701675644014415 0ustar liggesusers#' Benchmark calculations #' #' This function runs statistical benchmarks, including Power / Type I error simulations for an arbitrary test with a control parameter #' #' @param controlValues optionally, a vector with a control parameter (e.g. to vary the strength of a problem the test should be specific to). See help for an example #' @param calculateStatistics the statistics to be benchmarked. Should return one value, or a vector of values. If controlValues are given, must accept a parameter control #' @param nRep number of replicates per level of the controlValues #' @param alpha significance level #' @param parallel whether to use parallel computations. Possible values are F, T (sets the cores automatically to number of available cores -1), or an integer number for the number of cores that should be used for the cluster #' @param exportGlobal whether the global environment should be exported to the parallel nodes. This will use more memory. Set to true only if you function calculate statistics depends on other functions or global variables. #' @param ... additional parameters to calculateStatistics #' @note The benchmark function in DHARMa are intended for development purposes, and for users that want to test / confirm the properties of functions in DHARMa. If you are running an applied data analysis, they are probably of little use. #' @return A object with list structure of class DHARMaBenchmark. Contains an entry simulations with a matrix of simulations, and an entry summaries with an list of summaries (significant (T/F), mean, p-value for KS-test uniformity). Can be plotted with [plot.DHARMaBenchmark] #' @export #' @author Florian Hartig #' @seealso [plot.DHARMaBenchmark] #' @example inst/examples/runBenchmarksHelp.R runBenchmarks <- function(calculateStatistics, controlValues = NULL, nRep = 10, alpha = 0.05, parallel = FALSE, exportGlobal = FALSE, ...){ start_time <- Sys.time() # Sequential Simulations simulations = list() if(parallel == FALSE){ if(is.null(controlValues)) simulations[[1]] = replicate(nRep, calculateStatistics(), simplify = "array") else for(j in 1:length(controlValues)){ simulations[[j]] = replicate(nRep, calculateStatistics(controlValues[j]), simplify = "array") } # Parallel Simulations }else{ if (parallel == TRUE | parallel == "auto"){ cores <- parallel::detectCores() - 1 message("parallel, set cores automatically to ", cores) } else if (is.numeric(parallel)){ cores <- parallel message("parallel, set number of cores by hand to ", cores) } else stop("wrong argument to parallel") cl <- parallel::makeCluster(cores) # for each # doParallel::registerDoParallel(cl) # # `%dopar%` <- foreach::`%dopar%` # # if(is.null(controlValues)) simulations[[1]] = t(foreach::foreach(i=1:nRep, .packages=c("lme4", "DHARMa"), .combine = rbind) %dopar% calculateStatistics()) # # else for(j in 1:length(controlValues)){ # simulations[[j]] = t(foreach::foreach(i=1:nRep, .packages=c("lme4", "DHARMa"), .combine = rbind) %dopar% calculateStatistics(controlValues[j])) # } # # End for each # doesn't see to work properly loadedPackages = (.packages()) parExectuer = function(x = NULL, control = NULL) calculateStatistics(control) if (exportGlobal == TRUE) parallel::clusterExport(cl = cl, varlist = ls(envir = .GlobalEnv)) parallel::clusterExport(cl = cl, c("parExectuer", "calculateStatistics", "loadedPackages"), envir = environment()) parallel::clusterEvalQ(cl, {for(p in loadedPackages) library(p, character.only=TRUE)}) # parallel::clusterExport(cl = cl, varlist = ls(envir = .GlobalEnv)) # parallel::clusterExport(cl=cl,varlist = c("calculateStatistics"), envir=environment()) # parallel::clusterExport(cl=cl,varlist = c("controlValues", "alpha", envir=environment()) # parallel::clusterExport(cl=cl,varlist = c("calculateStatistics"), envir=environment()) # parallel::clusterExport(cl=cl,varlist = c("controlValues", "alpha", envir=environment()) if(is.null(controlValues)) simulations[[1]] = parallel::parSapply(cl, 1:nRep, parExectuer) else for(j in 1:length(controlValues)){ simulations[[j]] = parallel::parSapply(cl, 1:nRep, parExectuer, control = controlValues[j]) } parallel::stopCluster(cl) } # Calculations of summaries if(is.null(controlValues)) controlValues = c("N") nOutputs = nrow(simulations[[1]]) nControl = length(controlValues) # reducing the list of outputs to a data.frame x = Reduce(rbind, lapply(simulations, t)) x = data.frame(x) x$replicate = rep(1:nRep, length(controlValues)) x$controlValues = rep(controlValues, each = nRep) summary = list() # function for aggregation aggreg <- function(f) { ret <- aggregate(x[,- c(ncol(x) - 1, ncol(x))], by = list(x$controlValues), f) colnames(ret)[1] = "controlValues" return(ret) } sig <- function(x) mean(x < alpha) isUnif <- function(x) ks.test(x, "punif")$p.value summary$propSignificant = aggreg(sig) summary$meanP = aggreg(mean) summary$isUnifP = aggreg(mean) out = list() out$controlValues = controlValues out$simulations = x out$summaries = summary out$time = Sys.time() - start_time out$nSummaries = ncol(x) - 2 out$nReplicates = nRep class(out) = "DHARMaBenchmark" return(out) } #' Plots DHARMa benchmarks #' #' The function plots the result of an object of class DHARMaBenchmark, created by [runBenchmarks]. #' #' @param x object of class DHARMaBenchmark, created by [runBenchmarks]. #' @param ... parameters to pass to the plot function. #' #' @details The function will create two types of plots, depending on whether the run contains only a single value (or no value) of the control parameter, or whether a vector of control values is provided: #' #' If a single or no value of the control parameter is provided, the function will create box plots of the estimated p-values, with the number of significant p-values plotted to the left. #' #' If a control parameter is provided, the function will plot the proportion of significant p-values against the control parameter, with 95% CIs based based on the performed replicates displayed as confidence bands. #' #' @seealso [runBenchmarks] #' @export plot.DHARMaBenchmark <- function(x, ...){ if(length(x$controlValues)== 1){ boxplot(x$simulations[,1:x$nSummaries], col = "grey", ylim = c(-0.3,1), horizontal = TRUE, las = 2, xaxt='n', main = "p distribution", ...) abline(v = 0) abline(v = c(0.25, 0.5, 0.75), lty = 2) text(-0.2, 1:x$nSummaries, labels = x$summaries$propSignificant[-1]) # barplot(as.matrix(x$summaries$propSignificant[-1]), horiz = TRUE, add = TRUE, offset = -0.2, names.arg = "test", width = 0.5, space = 1.4) }else{ res = x$summaries$propSignificant plot(NULL, xlim = range(res$controlValues), ylim = c(0,1), ...) for(i in 1:x$nSummaries){ getCI = function(k) as.vector(binom.test(k,x$nReplicates)$conf.int) CIs = sapply(res[,i+1]*x$nReplicates, getCI) polygon(c(res$controlValues, rev(res$controlValues)), c(CIs[1,], rev(CIs[2,])), col = "#00000020", border = FALSE) lines(res$controlValues, res[,i+1], col = i, lty = i, lwd = 2) } legend("bottomright", colnames(res[,-1]), col = 1:x$nSummaries, lty = 1:x$nSummaries, lwd = 2) } } # this used to be an alternative to the boxplot for control = N plotMultipleHist <- function(x){ lin = ncol(x) histList <- lapply(x, hist, breaks = seq(0,1,0.02), plot = FALSE) plot(NULL, xlim = c(0,1), ylim = c(0, lin), yaxt = 'n', ylab = NULL, xlab = "p-value") abline(h= 0) for(i in 1:lin){ maxD = max(histList[[i]]$density) lines(histList[[i]]$mids, i - 1 + histList[[i]]$density/maxD, type = "l") abline(h= i) abline(h= 1/maxD + i - 1, , col = "red", lty = 2) } abline(v = 1, lty = 2) abline(v = c(0.05, 0), lty = 2, col = "red") } #' Plot distribution of p-values. #' @param x vector of p values. #' @param plot should the values be plotted. #' @param main title for the plot. #' @param ... additional arguments to hist. #' @author Florian Hartig testPDistribution <- function(x, plot = TRUE, main = "p distribution \n expected is flat at 1", ...){ out = suppressWarnings(ks.test(x, 'punif')) hist(x, xlim = c(0,1), breaks = 20, freq = FALSE, main = main, ...) abline(h=1, col = "red") return(out) } # if(plot == TRUE){ # oldpar <- par(mfrow = c(4,4)) # hist(out, breaks = 50, col = "red", main = paste("mean of", nSim, "simulations")) # for (i in 1:min(nSim, 15)) hist(out[i,], breaks = 50, freq = FALSE, main = i) # par(oldpar) # } generateGenerator <- function(mod){ out <- function(){ simulations = simulate(mod, nsim = 1) newData <-model.frame(mod) if(is.vector(simulations[[1]])){ newData[,1] = simulations[[1]] } else { # Hack to make the binomial n/k case work newData[[1]] = NULL newData = cbind(simulations[[1]], newData) } refittedModel = update(mod, data = newData) list(data = newData, model = refittedModel) } } #' Benchmark runtimes of several functions #' @param createModel a function that creates and returns a fitted model. #' @param evaluationFunctions a list of functions that are to be evaluated on the fitted models. #' @param n number of replicates. #' @details This is a small helper function designed to benchmark runtimes of several operations that are to be performed on a list of fitted models. In the example, this is used to benchmark the runtimes of several DHARMa tests. #' @author Florian Hartig #' @example inst/examples/benchmarkRuntimeHelp.R #' @export benchmarkRuntime<- function(createModel, evaluationFunctions, n){ m = length(evaluationFunctions) models = replicate(n, createModel(), simplify = FALSE) runtimes = rep(NA, m) for (i in 1:m){ runtimes[i] = system.time(lapply(models, evaluationFunctions[[i]]))[3] } return(runtimes) } DHARMa/R/DHARMa.R0000644000176200001440000002337014677165224012611 0ustar liggesusers #' @keywords internal #' @references vignette("DHARMa", package="DHARMa") #' @details #' To get started with the package, look at the vignette and start with [simulateResiduals] #' "_PACKAGE" #' Print simulated residuals #' #' @param x an object with simulated residuals created by [simulateResiduals]. #' @param ... optional arguments for compatibility with the generic function, no function implemented. #' @export print.DHARMa <- function(x, ...){ cat(paste("Object of Class DHARMa with simulated residuals based on", x$nSim, "simulations with refit =", x$refit , ". See ?DHARMa::simulateResiduals for help."), "\n", "\n") if (length(x$scaledResiduals) < 20) cat("Scaled residual values:", x$scaledResiduals) else { cat("Scaled residual values:", x$scaledResiduals[1:20], "...") } } #' Return residuals of a DHARMa simulation #' #' @param object an object with simulated residuals created by [simulateResiduals] #' @param quantileFunction optional - a quantile function to transform the uniform 0/1 scaling of DHARMa to another distribution #' @param outlierValues if a quantile function with infinite support (such as dnorm) is used, residuals that are 0/1 are mapped to -Inf / Inf. outlierValues allows to convert -Inf / Inf values to an optional min / max value. #' @param ... optional arguments for compatibility with the generic function, no function implemented #' @details the function accesses the slot $scaledResiduals in a fitted DHARMa object, and optionally transforms the standard DHARMa quantile residuals (which have a uniform distribution) to a particular pdf. #' #' @note some of the papers on simulated quantile residuals transforming the residuals (which are natively uniform) back to a normal distribution. I presume this is because of the larger familiarity of most users with normal residuals. Personally, I never considered this desirable, for the reasons explained in https://github.com/florianhartig/DHARMa/issues/39, but with this function, I wanted to give users the option to plot normal residuals if they so wish. #' #' @export #' @example inst/examples/simulateResidualsHelp.R #' residuals.DHARMa <- function(object, quantileFunction = NULL, outlierValues = NULL, ...){ if(is.null(quantileFunction)){ return(object$scaledResiduals) } else { res = quantileFunction(object$scaledResiduals) if(!is.null(outlierValues)){ res = ifelse(res == -Inf, outlierValues[1], res) res = ifelse(res == Inf, outlierValues[2], res) } return(res) } } #' Return outliers #' #' Returns the outliers of a DHARMa object. #' #' @param object an object with simulated residuals created by [simulateResiduals]. #' @param lowerQuantile lower threshold for outliers. Default is zero = outside simulation envelope. #' @param upperQuantile upper threshold for outliers. Default is 1 = outside simulation envelope. #' @param return wheter to return an indices of outliers or a logical vector. #' #' @details First of all, note that the standard definition of outlier in the DHARMa plots and outlier tests is an observation that is outside the simulation envelope. How far outside that is depends a lot on how many simulations you do. If you have 100 data points and to 100 simulations, you would expect to have one "outlier" on average, even with a perfectly fitting model. This is in fact what the outlier test tests. #' #' Thus, keep in mind that for a small number of simulations, outliers are mostly a technical term: these are points that are outside our simulations, but we don't know how far away they are. #' #' If you are seriously interested in HOW FAR outside the expected distribution a data point is, you should increase the number of simulations in [simulateResiduals] to be sure to get the tail of the data distribution correctly. In this case, it may make sense to adjust lowerQuantile and upperQuantile, e.g. to 0.025, 0.975, which would define outliers as values outside the central 95% of the distribution. #' #' Also, note that outliers are particularly concerning if they have a strong influence on the model fit. One could test the influence, for example, by removing them from the data, or by some meausures of leverage, e.g. generalisations for Cook's distance as in Pinho, L. G. B., Nobre, J. S., & Singer, J. M. (2015). Cook’s distance for generalized linear mixed models. Computational Statistics & Data Analysis, 82, 126–136. doi:10.1016/j.csda.2014.08.008. At the moment, however, no such function is provided in DHARMa. #' #' @export #' outliers <- function(object, lowerQuantile = 0, upperQuantile = 1, return = c("index", "logical")){ return = match.arg(return) out = residuals(object) >= upperQuantile | residuals(object) <= lowerQuantile if(return == "logical") return(out) else(return(which(out))) } #' Create a DHARMa object from hand-coded simulations or Bayesian posterior predictive simulations. #' #' @param simulatedResponse matrix of observations simulated from the fitted model - row index for observations and colum index for simulations. #' @param observedResponse true observations. #' @param fittedPredictedResponse optional fitted predicted response. For Bayesian posterior predictive simulations, using the median posterior prediction as fittedPredictedResponse is recommended. If not provided, the mean simulatedResponse will be used. #' @param integerResponse if T, noise will be added at to the residuals to maintain a uniform expectations for integer responses (such as Poisson or Binomial). Unlike in [simulateResiduals], the nature of the data is not automatically detected, so this MUST be set by the user appropriately. #' @param seed the random seed to be used within DHARMa. The default setting, recommended for most users, is keep the random seed on a fixed value 123. This means that you will always get the same randomization and thus teh same result when running the same code. NULL = no new seed is set, but previous random state will be restored after simulation. FALSE = no seed is set, and random state will not be restored. The latter two options are only recommended for simulation experiments. See vignette for details. #' @param method the quantile randomization method used. The two options implemented at the moment are probability integral transform (PIT-) residuals (current default), and the "traditional" randomization procedure, that was used in DHARMa until version 0.3.0. For details, see [getQuantile]. #' @param rotation optional rotation of the residual space to remove residual autocorrelation. See details in [simulateResiduals], section *residual auto-correlation* for an extended explanation, and [getQuantile] for syntax. #' @details The use of this function is to convert simulated residuals (e.g. from a point estimate, or Bayesian p-values) to a DHARMa object, to make use of the plotting / test functions in DHARMa. #' @note Either scaled residuals or (simulatedResponse AND observed response) have to be provided. #' @example inst/examples/createDharmaHelp.R #' @export createDHARMa <- function(simulatedResponse , observedResponse , fittedPredictedResponse = NULL, integerResponse = FALSE, seed = 123, method = c("PIT", "traditional"), rotation = NULL){ randomState <-getRandomState(seed) on.exit({randomState$restoreCurrent()}) match.arg(method) out = list() out$simulatedResponse = simulatedResponse out$refit = F out$integerResponse = integerResponse out$observedResponse = observedResponse if(!is.matrix(simulatedResponse) & !is.null(observedResponse)) stop("either scaled residuals or simulations and observations have to be provided.") if(ncol(simulatedResponse) < 2) stop("simulatedResponse with less than 2 simulations provided - cannot calculate residuals on that.") if(ncol(simulatedResponse) < 10) warning("simulatedResponse with less than 10 simulations provided. This rarely makes sense.") out$nObs = length(observedResponse) if (out$nObs < 3) stop("warning - number of observations < 3 ... this rarely makes sense.") if(! (out$nObs == nrow(simulatedResponse))) stop("dimensions of observedResponse and simulatedResponse do not match.") out$nSim = ncol(simulatedResponse) out$scaledResiduals = getQuantile(simulations = simulatedResponse , observed = observedResponse , integerResponse = integerResponse, method = method, rotation = rotation) # makes sure that DHARM plots that rely on this vector won't crash if(is.null(fittedPredictedResponse)){ message("No fitted predicted response provided, using the mean of the simulations") fittedPredictedResponse = apply(simulatedResponse, 1, mean) } out$fittedPredictedResponse = fittedPredictedResponse out$randomState = randomState class(out) = "DHARMa" return(out) } #' Ensures that an object is of class DHARMa #' #' @param simulationOutput a DHARMa simulation output or an object that can be converted into a DHARMa simulation output #' @param convert if TRUE, attempts to convert model + numeric to DHARMa, if "Model", converts only supported models to DHARMa #' @details The #' @return an object of class DHARMa #' @keywords internal ensureDHARMa <- function(simulationOutput, convert = FALSE){ if(inherits(simulationOutput, "DHARMa")){ return(simulationOutput) } else { if(convert == FALSE) stop("wrong argument to function, simulationOutput must be a DHARMa object!") else { if (class(simulationOutput)[1] %in% getPossibleModels()){ if (convert == "Model" | convert == TRUE) return(simulateResiduals(simulationOutput)) } else if(is.vector(simulationOutput, mode = "numeric") & convert == TRUE) { out = list() out$scaledResiduals = simulationOutput out$nObs = length(out$scaledResiduals) class(out) = "DHARMa" return(out) } } } stop("wrong argument to function, simulationOutput must be a DHARMa object or a numeric vector of quantile residuals!") } DHARMa/R/startup.R0000644000176200001440000000056614701671620013307 0ustar liggesusersprintDHARMaStartupInfo <- function() { version <- packageVersion('DHARMa') hello <- paste("This is DHARMa ",version,". For overview type '?DHARMa'. For recent changes, type news(package = 'DHARMa')" ,sep="") packageStartupMessage(hello) } .onLoad <- function(...) { options(DHARMaSignalColor = "red") } .onAttach <- function(...) { printDHARMaStartupInfo() } DHARMa/R/data.R0000644000176200001440000000352014677165224012521 0ustar liggesusers#' Hurricanes #' #' A data set on hurricane strength and fatalities in the US between 1950 and 2012. The data originates from the study by Jung et al., PNAS, 2014, who claim that the masculinity / femininity of a hurricane name has a causal effect on fatalities, presumably through a different perception of danger caused by the names. #' #' @name hurricanes #' @aliases hurricanes #' @docType data #' #' @references Jung, K., Shavitt, S., Viswanathan, M., & Hilbe, J. M. (2014). Female hurricanes are deadlier than male hurricanes. Proceedings of the National Academy of Sciences, 111(24), 8782-8787. #' #' @format A 'data.frame': 92 obs. of 14 variables #' \describe{ #' \item{Year}{Year of the hurricane (1950-2012) } #' \item{Name}{Name of the hurricane } #' \item{MasFem}{Masculinity-femininity rating of the hurricane's name in the range 1 = very masculine, 11 = very feminine.} #' \item{MinPressure_before}{Minimum air pressure (909-1002).} #' \item{Minpressure_Updated_2014}{Updated minimum air pressure (909-1003).} #' \item{Gender_MF}{Binary gender categorization based on MasFem (male = 0, female = 1).} #' \item{Category}{Strength of the hurricane in categories (1:7). (1 = not at all, 7 = very intense).} #' \item{alldeaths}{Human deaths occured (1:256).} #' \item{NDAM}{Normalized damage in millions (1:75.000). The raw (dollar) amounts of property damage caused by hurricanes were obtained, and the unadjusted dollar amounts were normalized to 2013 monetary values by adjusting them to inflation, wealth and population density.} #' \item{Elapsed_Yrs}{Elapsed years since the occurrence of hurricanes (1:63).} #' \item{Source}{MWR/wikipedia ()} #' \item{ZMasFem}{Scaled (MasFem)} #' \item{ZMinPressure_A}{Scaled (Minpressure_Updated_2014)} #' \item{ZNDAM}{Scaled (NDAM)} #' ... #' } #' @example inst/examples/hurricanes.R NULL DHARMa/R/createData.R0000644000176200001440000001446214677165224013654 0ustar liggesusers#' Simulate test data #' @description This function creates synthetic dataset with various problems such as overdispersion, zero-inflation, etc. #' @param sampleSize sample size of the dataset. #' @param intercept intercept (linear scale). #' @param fixedEffects vector of fixed effects (linear scale). #' @param quadraticFixedEffects vector of quadratic fixed effects (linear scale). #' @param numGroups number of groups for the random effect. #' @param randomEffectVariance variance of the random effect (intercept). #' @param overdispersion if this is a numeric value, it will be used as the sd of a random normal variate that is added to the linear predictor. Alternatively, a random function can be provided that takes as input the linear predictor. #' @param family family. #' @param scale scale if the distribution has a scale (e.g. sd for the Gaussian) #' @param cor correlation between predictors. #' @param roundPoissonVariance if set, this creates a uniform noise on the possion response. The aim of this is to create heteroscedasticity. #' @param pZeroInflation probability to set any data point to zero. #' @param binomialTrials Number of trials for the binomial. Only active if family == binomial. #' @param temporalAutocorrelation strength of temporalAutocorrelation. #' @param spatialAutocorrelation strength of spatial Autocorrelation. #' @param factorResponse should the response be transformed to a factor (inteded to be used for 0/1 data). #' @param replicates number of datasets to create. #' @param hasNA should an NA be added to the environmental predictor (for test purposes). #' @export #' @example /inst/examples/createDataHelp.R createData <- function(sampleSize = 100, intercept = 0, fixedEffects = 1, quadraticFixedEffects = NULL, numGroups = 10, randomEffectVariance = 1, overdispersion = 0, family = poisson(), scale = 1, cor = 0, roundPoissonVariance = NULL, pZeroInflation = 0, binomialTrials = 1, temporalAutocorrelation = 0, spatialAutocorrelation = 0, factorResponse = FALSE, replicates = 1, hasNA = FALSE){ nPredictors = length(fixedEffects) out = list() time = sample.int(sampleSize) #change to random order because of issue #436 x = runif(sampleSize) y = runif(sampleSize) for (i in 1:replicates){ ######################################################################## # Create predictors predictors = matrix(runif(nPredictors*sampleSize, min = -1), ncol = nPredictors) if (cor != 0){ predTemp <- runif(sampleSize, min = -1) predictors = (1-cor) * predictors + cor * matrix(rep(predTemp, nPredictors), ncol = nPredictors) } colnames(predictors) = paste("Environment", 1:nPredictors, sep = "") ######################################################################## # Create random effects group = rep(1:numGroups, each = sampleSize/numGroups) groupRandom = rnorm(numGroups, sd = sqrt(randomEffectVariance)) ######################################################################## # Creation of linear prediction linearResponse = intercept + predictors %*% fixedEffects + groupRandom[group] if(!is.null(quadraticFixedEffects)){ linearResponse = linearResponse + predictors^2 %*% quadraticFixedEffects } ######################################################################## # Overdispersion on linear predictor if(is.numeric(overdispersion)) linearResponse = linearResponse + rnorm(sampleSize, sd = overdispersion) if(is.function(overdispersion)) linearResponse = linearResponse + overdispersion(linearResponse) ######################################################################## # Autocorrelation if(!(temporalAutocorrelation == 0)){ distMat <- as.matrix(dist(time)) invDistMat <- 1/distMat * 5000 diag(invDistMat) <- 0 invDistMat = sfsmisc::posdefify(invDistMat) temporalError <- MASS::mvrnorm(n = 1, mu = rep(0,sampleSize), Sigma = invDistMat) linearResponse = linearResponse + temporalAutocorrelation * temporalError } if(!(spatialAutocorrelation == 0)) { distMat <- as.matrix(dist(cbind(x, y))) invDistMat <- 1/distMat * 5000 diag(invDistMat) <- 0 invDistMat = sfsmisc::posdefify(invDistMat) spatialError <- MASS::mvrnorm(n = 1, mu = rep(0,sampleSize), Sigma = invDistMat) linearResponse = linearResponse + spatialAutocorrelation * spatialError } ######################################################################## # Link and distribution linkResponse = family$linkinv(linearResponse) if (family$family == "gaussian") observedResponse = rnorm(n = sampleSize, mean = linkResponse, sd = scale) # need checking else if (family$family == "gamma") observedResponse = rgamma(n = sampleSize, shape = linkResponse / scale, scale = scale) else if (family$family == "binomial"){ observedResponse = rbinom(n = sampleSize, binomialTrials, prob = linkResponse) if (binomialTrials > 1) observedResponse = cbind(observedResponse1 = observedResponse, observedResponse0 = binomialTrials - observedResponse) } else if (family$family == "poisson") { if(is.null(roundPoissonVariance)) observedResponse = rpois(n = sampleSize, lambda = linkResponse) else observedResponse = round(rnorm(n = length(linkResponse), mean = linkResponse, sd = roundPoissonVariance)) } else if (grepl("Negative Binomial",family$family)) { theta = as.numeric(gsub("[\\(\\)]", "", regmatches(family$family, gregexpr("\\(.*?\\)", family$family))[[1]])) observedResponse = MASS::rnegbin(linkResponse, theta = theta) } else stop("wrong link argument supplied") ######################################################################## # Zero-inflation if(pZeroInflation != 0){ artificialZeros = rbinom(n = length(observedResponse), size = 1, prob = 1-pZeroInflation) observedResponse = observedResponse * artificialZeros } if(factorResponse) observedResponse = factor(observedResponse) # add spatialError? out[[i]] <- data.frame(ID = 1:sampleSize, observedResponse, predictors, group = as.factor(group), time, x, y) } if(length(out) == 1) out = out[[1]] if(hasNA) out[1,3] = NA return(out) } #createData() DHARMa/R/simulateResiduals.R0000644000176200001440000005116314704245735015312 0ustar liggesusers#' Create simulated residuals #' #' The function creates scaled residuals by simulating from the fitted model. Residuals can be extracted with [residuals.DHARMa]. See [testResiduals] for an overview of residual tests, [plot.DHARMa] for an overview of available plots. #' #' @param fittedModel A fitted model of a class supported by DHARMa. #' @param n Number of simulations. The smaller the number, the higher the stochastic error on the residuals. Also, for very small n, discretization artefacts can influence the tests. Default is 250, which is a relatively safe value. You can consider increasing to 1000 to stabilize the simulated values. #' @param refit If FALSE, new data will be simulated and scaled residuals will be created by comparing observed data with new data. If TRUE, the model will be refitted on the simulated data (parametric bootstrap), and scaled residuals will be created by comparing observed with refitted residuals. #' @param integerResponse If TRUE, noise will be added at to the residuals to maintain a uniform expectations for integer responses (such as Poisson or Binomial). Usually, the model will automatically detect the appropriate setting, so there is no need to adjust this setting. #' @param plot If TRUE, [plotResiduals] will be directly run after the residuals have been calculated. #' @param ... Further parameters to pass on to the simulate function of the model object. An important use of this is to specify whether simulations should be conditional on the current random effect estimates, e.g. via re.form. Note that not all models support syntax to specify conditional or unconditional simulations. See details and [getSimulations]. #' @param seed The random seed to be used within DHARMa. The default setting, recommended for most users, is keep the random seed on a fixed value 123. This means that you will always get the same randomization and thus the same result when running the same code. If NULL, no new seed is set, but previous random state will be restored after simulation. If FALSE, no seed is set, and random state will not be restored. The latter two options are only recommended for simulation experiments. See vignette for details. #' @param method For refit = FALSE, the quantile randomization method is used. The two options implemented at the moment are probability integral transform (PIT-) residuals (current default), and the "traditional" randomization procedure, that was used in DHARMa until version 0.3.0. refit = T will always use "traditional", respectively of the value of method. For details, see [getQuantile]. #' @param rotation Optional rotation of the residual space prior to calculating the quantile residuals. The main purpose of this is to account for residual covariance as created by temporal, spatial or phylogenetic autocorrelation. See details below, section *residual autocorrelation* as well as the help of [getQuantile] and, for a practical example, [testTemporalAutocorrelation]. #' #' @details There are a number of important considerations when simulating from a more complex (hierarchical) model: #' #' \strong{Re-simulating random effects / hierarchical structure}: in a hierarchical model, we have several stochastic processes aligned on top of each other. Specifically, in a GLMM, we have a lower level stochastic process (random effect), whose result enters into a higher level (e.g. Poisson distribution). For other hierarchical models such as state-space models, similar considerations apply. #' #' In such a situation, we have to decide if we want to re-simulate all stochastic levels, or only a subset of those. For example, in a GLMM, it is common to only simulate the last stochastic level (e.g. Poisson) conditional on the fitted random effects. This is often referred to as a conditional simulation. For controlling how many levels should be re-simulated, the simulateResidual function allows to pass on parameters to the simulate function of the fitted model object. Please refer to the help of the different simulate functions (e.g. ?simulate.merMod) for details. For merMod (lme4) model objects, the relevant parameters are parameters are use.u and re.form. For glmmTMB model objects, the package version 1.1.10 has a temporary solution to simulate conditional to all random effects (see [glmmTMB::set_simcodes] val = "fix", and issue [#888](https://github.com/glmmTMB/glmmTMB/issues/888) in glmmTMB GitHub repository. #' #' If the model is correctly specified, the simulated residuals should be flat regardless how many hierarchical levels we re-simulate. The most thorough procedure would therefore be to test all possible options. If testing only one option, I would recommend to re-simulate all levels, because this essentially tests the model structure as a whole. This is the default setting in the DHARMa package. A potential drawback is that re-simulating the lower-level random effects creates more variability, which may reduce power for detecting problems in the upper-level stochastic processes. In particular dispersion tests may produce different results when switching from conditional to unconditional simulations, and often the conditional simulation is more sensitive. #' #' \strong{Refitting or not}: a third issue is how residuals are calculated. simulateResiduals has two options that are controlled by the refit parameter: #' #' 1. if refit = FALSE (default), new data is simulated from the fitted model, and residuals are calculated by comparing the observed data to the new data. #' #' 2. if refit = TRUE, a parametric bootstrap is performed, meaning that the model is refit on the new data, and residuals are created by comparing observed residuals against refitted residuals. I advise against using this method per default (see more comments in the vignette), unless you are really sure that you need it. #' #' \strong{Residuals per group}: In many situations, it can be useful to look at residuals per group, e.g. to see how much the model over / underpredicts per plot, year or subject. To do this, use [recalculateResiduals], together with a grouping variable (see also help). #' #' \strong{Transformation to other distributions}: DHARMa calculates residuals for which the theoretical expectation (assuming a correctly specified model) is uniform. To transform this residuals to another distribution (e.g. so that a correctly specified model will have normal residuals) see [residuals.DHARMa]. #' #' \strong{Integer responses}: this is only relevant if method = "traditional", in which case it activates the randomization of the residuals. Usually, this does not need to be changed, as DHARMa will try to automatically check if the fitted model has an integer or discrete distribution via the family argument. However, in some cases the family does not allow to uniquely identify the distribution type. For example, a tweedie distribution can be interger or continuous. Therefore, DHARMa will additionally check the simulation results for repeated values, and will change the distribution type if repeated values are found (a message is displayed in this case). #' #' \strong{Residual autocorrelation}: a common problem is residual autocorrelation. Spatial, temporal and phylogenetic autocorrelation can be tested with [testSpatialAutocorrelation], [testTemporalAutocorrelation] and [testPhylogeneticAutocorrelation]. If simulations are unconditional, residual correlations will be maintained, even if the autocorrelation is addressed by an appropriate CAR structure. This may be a problem, because autocorrelation may create apparently systematic patterns in plots or tests such as [testUniformity]. To reduce this problem, either simulate conditional on fitted correlated REs, or rotate residuals via the rotation parameter (the latter will likely only work in approximately linear models). See [getQuantile] for details on the rotation. #' #' @return An S3 class of type "DHARMa". Implemented S3 functions include [plot.DHARMa], [print.DHARMa] and [residuals.DHARMa]. For other functions that can be used on a DHARMa object, see section "See Also" below. #' #' @seealso [testResiduals], [plotResiduals], [recalculateResiduals], [outliers] #' #' @example inst/examples/simulateResidualsHelp.R #' @import stats #' @export simulateResiduals <- function(fittedModel, n = 250, refit = FALSE, integerResponse = NULL, plot = FALSE, seed = 123, method = c("PIT", "traditional"), rotation = NULL, ...){ ######## general assertions and startup calculations ########## if (n < 2) stop("error in DHARMa::simulateResiduals: n > 1 is required to calculate scaled residuals") checkModel(fittedModel) match.arg(method) randomState <- getRandomState(seed) on.exit({randomState$restoreCurrent()}) ptm <- proc.time() ####### extract model info ############ out = list() family = getFamily(fittedModel) out$fittedModel = fittedModel out$modelClass = class(fittedModel)[1] out$additionalParameters = list(...) out$nObs = nobs(fittedModel) out$nSim = n out$refit = refit out$observedResponse = getObservedResponse(fittedModel) # this is to check for problem #325 if (out$nObs < length(out$observedResponse)) stop("DHARMA::simulateResiduals: nobs(model) < nrow(model.frame). A possible reason is that you have observation with zero prior weights (in binomial with n=0) in your data. Calculating residuals in this case wouldn't be sensible. Please remove zero-weight observations from your data and re-fit your model! If you believe that this is not the reason, please file an issue under https://github.com/florianhartig/DHARMa/issues") if(is.null(integerResponse)){ if (family$family %in% c("integer-valued", "binomial", "poisson", "quasibinomial", "quasipoisson", "Negative Binom", "nbinom2", "nbinom1", "genpois", "compois", "truncated_poisson", "truncated_nbinom2", "truncated_nbinom1", "betabinomial", "Poisson", "Tpoisson", "COMPoisson", "negbin", "Tnegbin") | grepl("Negative Binomial",family$family) ) integerResponse = TRUE else integerResponse = FALSE } out$integerResponse = integerResponse out$problems = list() out$fittedPredictedResponse = getFitted(fittedModel) out$fittedFixedEffects = getFixedEffects(fittedModel) out$fittedResiduals = getResiduals(fittedModel) ######## refit = F ################## if (refit == FALSE){ out$method = method out$simulatedResponse <- getSimulations(fittedModel, nsim = n, type = "normal", ...) checkSimulations(out$simulatedResponse, out$nObs, out$nSim) out$scaledResiduals <- getQuantile(simulations = out$simulatedResponse , observed = out$observedResponse , integerResponse = integerResponse, method = method, rotation = rotation) ######## refit = T ################## } else { out$method = "traditional" # Adding new outputs out$refittedPredictedResponse <- matrix(nrow = out$nObs, ncol = n ) out$refittedFixedEffects <- matrix(nrow = length(out$fittedFixedEffects), ncol = n ) #out$refittedRandomEffects <- matrix(nrow = length(out$fittedRandomEffects), ncol = n ) out$refittedResiduals <- matrix(nrow = out$nObs, ncol = n) out$refittedPearsonResiduals <- matrix(nrow = out$nObs, ncol = n) out$simulatedResponse <- getSimulations(fittedModel, nsim = n, type = "refit", ...) for (i in 1:n){ simObserved <- out$simulatedResponse[[i]] try({ # for testing # if (i==3) stop("x") # Note: also set silent = T for production refittedModel <- getRefit(fittedModel, simObserved, ...) out$refittedPredictedResponse[,i] <- getFitted(refittedModel) out$refittedFixedEffects[,i] <- getFixedEffects(refittedModel) out$refittedResiduals[,i] <- getResiduals(refittedModel) out$refittedPearsonResiduals[,i] <- residuals(refittedModel, type = "pearson") #out$refittedRandomEffects[,i] <- ranef(refittedModel) }, silent = TRUE) } ######### residual checks ########### if(anyNA(out$refittedResiduals)) warning("DHARMa::simulateResiduals warning: on refit = TRUE, at least one of the refitted models produced an error. Inspect the refitted model values. Results may not be reliable.") ## check for convergence problems dup = sum(duplicated(out$refittedFixedEffects, MARGIN = 2)) if (dup > 0){ if (dup < n/3){ warning(paste("There were", dup, "of", n ,"duplicate parameter estimates in the refitted models. This may hint towards a problem with optimizer convergence in the fitted models. Results may not be reliable. The suggested action is to not use the refitting procedure, and diagnose with tools available for the normal (not refitted) simulated residuals. If you absolutely require the refitting procedure, try changing tolerance / iterations in the optimizer settings.")) } else { warning(paste("There were", dup, "of", n ,"duplicate parameter estimates in the refitted models. This may hint towards a problem with optimizer convergence in the fitted models. Results are likely not reliable. The suggested action is to not use the refitting procedure, and diagnose with tools available for the normal (not refitted) simulated residuals. If you absolutely require the refitting procedure, try changing tolerance / iterations in the optimizer settings.")) out$problems[[length(out$problems)+ 1]] = "error in refit" } } ######### residual calculations ########### out$scaledResiduals = getQuantile(simulations = out$refittedResiduals, observed = out$fittedResiduals, integerResponse = integerResponse, method = "traditional", rotation = rotation) } ########### Wrapup ############ out$time = proc.time() - ptm out$randomState = randomState class(out) = "DHARMa" if(plot == TRUE) plot(out) return(out) } #' Check simulated data #' #' The function checks if the simulated data seems fine. #' #' @param simulatedResponse the simulated response #' @param nObs number of observations #' @param nSim number of simulations #' #' @keywords internal checkSimulations <- function(simulatedResponse, nObs, nSim){ if(!inherits(simulatedResponse, "matrix")) securityAssertion("Simulation from the model produced wrong class.", stop = TRUE) if(any(dim(simulatedResponse) != c(nObs, nSim) )) securityAssertion("Simulation from the model do not have the same dimension as nObs.", stop = FALSE) if(any(!is.finite(simulatedResponse))) message("Simulations from your fitted model produce infinite values. Consider if this is sensible.") if(any(is.nan(simulatedResponse))) securityAssertion("Simulations from your fitted model produce NaN values. DHARMa cannot calculated residuals for this. This is nearly certainly an error of the regression package you are using.", stop = TRUE) if(any(is.na(simulatedResponse))) securityAssertion("Simulations from your fitted model produce NA values. DHARMa cannot calculated residuals for this. This is nearly certainly an error of the regression package you are using.", stop = TRUE) } #' Recalculate residuals with grouping #' #' The purpose of this function is to recalculate scaled residuals per group, based on the simulations done by [simulateResiduals]. #' #' @param simulationOutput an object with simulated residuals created by [simulateResiduals]. #' @param group group of each data point. #' @param aggregateBy function for the aggregation. Default is sum. This should only be changed if you know what you are doing. Note in particular that the expected residual distribution might not be flat any more if you choose general functions, such as sd etc. #' @param sel an optional vector for selecting the data to be aggregated. #' @param seed the random seed to be used within DHARMa. The default setting, recommended for most users, is keep the random seed on a fixed value 123. This means that you will always get the same randomization and thus teh same result when running the same code. NULL = no new seed is set, but previous random state will be restored after simulation. FALSE = no seed is set, and random state will not be restored. The latter two options are only recommended for simulation experiments. See vignette for details. #' @param method the quantile randomization method used. The two options implemented at the moment are probability integral transform (PIT-) residuals (current default), and the "traditional" randomization procedure, that was used in DHARMa until version 0.3.0. For details, see [getQuantile]. #' @param rotation optional rotation of the residual space to remove residual autocorrelation. See details in [simulateResiduals], section *residual auto-correlation* for an extended explanation, and [getQuantile] for syntax. #' @return an object of class DHARMa, similar to what is returned by [simulateResiduals], but with additional outputs for the new grouped calculations. Note that the relevant outputs are 2x in the object, the first is the grouped calculations (which is returned by $name access), and later another time, under identical name, the original output. Moreover, there is a function 'aggregateByGroup', which can be used to aggregate predictor variables in the same way as the variables calculated here. #' #' @details The function aggregates the observed and simulated data per group according to the function provided by the aggregateBy option. DHARMa residuals are then calculated exactly as for a single data point (see [getQuantile] for details). #' #' @example inst/examples/simulateResidualsHelp.R #' @export recalculateResiduals <- function(simulationOutput, group = NULL, aggregateBy = sum, sel = NULL, seed = 123, method = c("PIT", "traditional"), rotation = NULL){ randomState <- getRandomState(seed) on.exit({randomState$restoreCurrent()}) match.arg(method) # ensures that the base simulation is always used for recalculate if(!is.null(simulationOutput$original)) simulationOutput = simulationOutput$original out = list() out$original = simulationOutput out$group = group out$method = method out$aggregateBy = aggregateBy if(is.null(group) & is.null(sel)) return(simulationOutput) else { if(is.null(group)) group = 1:simulationOutput$nObs group = as.factor(group) out$nGroups = nlevels(group) if(is.null(sel)) sel = 1:simulationOutput$nObs out$sel = sel aggregateByGroup <- function(x) aggregate(x[sel], by = list(group[sel]), FUN=aggregateBy)[,2] out$observedResponse = aggregateByGroup(simulationOutput$observedResponse) out$fittedPredictedResponse = aggregateByGroup(simulationOutput$fittedPredictedResponse) if (simulationOutput$refit == FALSE){ out$simulatedResponse = apply(simulationOutput$simulatedResponse, 2, aggregateByGroup) out$scaledResiduals = getQuantile(simulations = out$simulatedResponse , observed = out$observedResponse, integerResponse = simulationOutput$integerResponse, method = method, rotation = rotation) ######## refit = T ################## } else { out$refittedPredictedResponse <- apply(simulationOutput$refittedPredictedResponse, 2, aggregateByGroup) out$fittedResiduals = aggregateByGroup(simulationOutput$fittedResiduals) out$refittedResiduals = apply(simulationOutput$refittedResiduals, 2, aggregateByGroup) out$refittedPearsonResiduals = apply(simulationOutput$refittedPearsonResiduals, 2, aggregateByGroup) out$scaledResiduals = getQuantile(simulations = out$refittedResiduals , observed = out$fittedResiduals , integerResponse = simulationOutput$integerResponse, method = method, rotation = rotation) } # hack - the c here will result in both old and new outputs to be present resulting output, but a named access should refer to the new, grouped calculations # question to myself - what's the use of that, why not erase the old outputs? they are anyway saved in the old object out$aggregateByGroup = aggregateByGroup out = c(out, simulationOutput) out$randomState = randomState class(out) = "DHARMa" return(out) } } DHARMa/R/plots.R0000644000176200001440000004613614703461527012756 0ustar liggesusers#' DHARMa standard residual plots #' #' This S3 function creates standard plots for the simulated residuals contained in an object of class DHARMa, using [plotQQunif] (left panel) and [plotResiduals] (right panel) #' #' @param x An object of class DHARMa with simulated residuals created by [simulateResiduals]. #' @param ... Further options for [plotResiduals]. Consider in particular parameters quantreg, rank and asFactor. xlab, ylab and main cannot be changed when using plot.DHARMa, but can be changed when using [plotResiduals]. #' @param title The title for both panels (plotted via mtext, outer = TRUE). #' #' @details The function creates a plot with two panels. The left panel is a uniform qq plot (calling [plotQQunif]), and the right panel shows residuals against predicted values (calling [plotResiduals]), with outliers highlighted in red (default color but see Note). #' #' Very briefly, we would expect that a correctly specified model shows: #' #' a) a straight 1-1 line, as well as non-significance of the displayed tests in the qq-plot (left) -> evidence for an the correct overall residual distribution (for more details on the interpretation of this plot, see [plotQQunif]) #' #' b) visual homogeneity of residuals in both vertical and horizontal direction, as well as n.s. of quantile tests in the res ~ predictor plot (for more details on the interpretation of this plot, see [plotResiduals]) #' #' Deviations from these expectations can be interpreted similar to a linear regression. See the vignette for detailed examples. #' #' Note that, unlike [plotResiduals], plot.DHARMa command uses the default rank = T. #' #' @note The color for highlighting outliers and significant tests can be changed by setting \code{options(DHARMaSignalColor = "red")} to a different color. See \code{getOption("DHARMaSignalColor")} for the current setting. This is convenient for a color-blind friendly display, since red and black are difficult for some people to separate. #' #' @seealso [plotResiduals], [plotQQunif] #' @example inst/examples/plotsHelp.R #' @import graphics #' @import utils #' @export plot.DHARMa <- function(x, title = "DHARMa residual", ...){ oldpar <- par(mfrow = c(1,2), oma = c(0,1,2,1)) on.exit(par(oldpar)) plotQQunif(x) plotResiduals(x, ...) mtext(title, outer = TRUE) } #' Histogram of DHARMa residuals #' #' The function produces a histogram from a DHARMa output. Outliers are marked red. #' #' @param x A DHARMa simulation output (class DHARMa) #' @param breaks Breaks for hist() function. #' @param col Color for histogram bars. #' @param main Plot title. #' @param xlab Plot x-axis label. #' @param cex.main Plot cex.main. #' @param ... Other arguments to be passed on to hist(). #' @details The function calls hist() to create a histogram of the scaled residuals. Outliers are marked red as default but it can be changed by setting \code{options(DHARMaSignalColor = "red")} to a different color. See \code{getOption("DHARMaSignalColor")} for the current setting. #' @seealso [plotSimulatedResiduals], [plotResiduals] #' @example inst/examples/plotsHelp.R #' @export hist.DHARMa <- function(x, breaks = seq(-0.02, 1.02, len = 53), col = c(.Options$DHARMaSignalColor,rep("lightgrey",50), .Options$DHARMaSignalColor), main = "Hist of DHARMa residuals", xlab = "Residuals (outliers are marked red)", cex.main = 1, ...){ x = ensureDHARMa(x, convert = TRUE) val = x$scaledResiduals val[val == 0] = -0.01 val[val == 1] = 1.01 hist(val, breaks = breaks, col = col, main = main, xlab = xlab, cex.main = cex.main, ...) } #' DHARMa standard residual plots #' #' DEPRECATED, use plot() instead #' #' @param simulationOutput an object with simulated residuals created by [simulateResiduals] #' @param ... further options for [plotResiduals]. Consider in particular parameters quantreg, rank and asFactor. xlab, ylab and main cannot be changed when using plotSimulatedResiduals, but can be changed when using plotResiduals. #' @note This function is deprecated. Use [plot.DHARMa] #' #' @seealso [plotResiduals], [plotQQunif] #' @export plotSimulatedResiduals <- function(simulationOutput, ...){ message("plotSimulatedResiduals is deprecated, please switch your code to simply using the plot() function") plot(simulationOutput, ...) } #' Quantile-quantile plot for a uniform distribution #' #' The function produces a uniform quantile-quantile plot from a DHARMa output. Optionally, tests for uniformity, outliers and dispersion can be added. #' #' @param simulationOutput A DHARMa simulation output (class DHARMa). #' @param testUniformity If T, the function [testUniformity] will be called and the result will be added to the plot. #' @param testOutliers If T, the function [testOutliers] will be called and the result will be added to the plot. #' @param testDispersion If T, the function [testDispersion] will be called and the result will be added to the plot. #' @param ... Arguments to be passed on to [gap::qqunif]. #' #' @details The function calls qqunif() from the R package gap to create a quantile-quantile plot for a uniform distribution, and overlays tests for particular distributional problems as specified. #' When tests are displayed, significant p-values are highlighted in the color red by default. This can be changed by setting \code{options(DHARMaSignalColor = "red")} to a different color. See \code{getOption("DHARMaSignalColor")} for the current setting. #' @seealso [plotSimulatedResiduals], [plotResiduals] #' @example inst/examples/plotsHelp.R #' @export plotQQunif <- function(simulationOutput, testUniformity = TRUE, testOutliers = TRUE, testDispersion = TRUE, ...){ a <- list(...) a$pch = checkDots("pch", 2, ...) a$bty = checkDots("bty", "n", ...) a$logscale = checkDots("logscale", F, ...) a$col = checkDots("col", "black", ...) a$main = checkDots("main", "QQ plot residuals", ...) a$cex.main = checkDots("cex.main", 1, ...) a$xlim = checkDots("xlim", c(0,1), ...) a$ylim = checkDots("ylim", c(0,1), ...) simulationOutput = ensureDHARMa(simulationOutput, convert = "Model") do.call(gap::qqunif, append(list(simulationOutput$scaledResiduals), a)) if(testUniformity == TRUE){ temp = testUniformity(simulationOutput, plot = FALSE) legend("topleft", c(paste("KS test: p=", round(temp$p.value, digits = 5)), paste("Deviation ", ifelse(temp$p.value < 0.05, "significant", "n.s."))), text.col = ifelse(temp$p.value < 0.05, .Options$DHARMaSignalColor, "black" ), bty="n") } if(testOutliers == TRUE){ temp = testOutliers(simulationOutput, plot = FALSE) legend("bottomright", c(paste("Outlier test: p=", round(temp$p.value, digits = 5)), paste("Deviation ", ifelse(temp$p.value < 0.05, "significant", "n.s."))), text.col = ifelse(temp$p.value < 0.05, .Options$DHARMaSignalColor, "black" ), bty="n") } if(testDispersion == TRUE){ temp = testDispersion(simulationOutput, plot = FALSE) legend("center", c(paste("Dispersion test: p=", round(temp$p.value, digits = 5)), paste("Deviation ", ifelse(temp$p.value < 0.05, "significant", "n.s."))), text.col = ifelse(temp$p.value < 0.05, .Options$DHARMaSignalColor, "black" ), bty="n") } } #' Generic res ~ pred scatter plot with spline or quantile regression on top #' #' The function creates a generic residual plot with either spline or quantile regression to highlight patterns in the residuals. Outliers are highlighted in red by default (but see Details). #' #' @param simulationOutput An object, usually a DHARMa object, from which residual values can be extracted. Alternatively, a vector with residuals or a fitted model can be provided, which will then be transformed into a DHARMa object. #' @param form Optional predictor against which the residuals should be plotted. Default is to used the predicted(simulationOutput). #' @param quantreg Whether to perform a quantile regression based on [testQuantiles] or a smooth spline around the mean. Default NULL chooses T for nObs < 2000, and F otherwise. #' @param rank If T, the values provided in form will be rank transformed. This will usually make patterns easier to spot visually, especially if the distribution of the predictor is skewed. If form is a factor, this has no effect. #' @param asFactor Should a numeric predictor provided in form be treated as a factor. Default is to choose this for < 10 unique values, as long as enough predictions are available to draw a boxplot. #' @param smoothScatter if T, a smooth scatter plot will plotted instead of a normal scatter plot. This makes sense when the number of residuals is very large. Default NULL chooses T for nObs > 10000, and F otherwise. #' @param quantiles For a quantile regression, which quantiles should be plotted. Default is 0.25, 0.5, 0.75. #' @param absoluteDeviation If T, switch from displaying normal quantile residuals to absolute deviation from the mean expectation of 0.5 (calculated as 2 * abs(res - 0.5)). The purpose of this is to test explicitly for heteroskedasticity, see details. #' @param ... Additional arguments to plot / boxplot. #' @details The function plots residuals against a predictor (by default against the fitted value, extracted from the DHARMa object, or any other predictor). #' #' Outliers are highlighted in red as default (for information on definition and interpretation of outliers, see [testOutliers]). This can be changed by setting \code{options(DHARMaSignalColor = "red")} to a different color. See \code{getOption("DHARMaSignalColor")} for the current setting. #' #' To provide a visual aid for detecting deviations from uniformity in the y-direction, the plot function calculates an (optional) quantile regression of the residuals, by default for the 0.25, 0.5 and 0.75 quantiles. Since the residuals should be uniformly distributed for a correctly specified model, the theoretical expectations for these regressions are straight lines at 0.25, 0.5 and 0.75, shown as dashed black lines on the plot. However, even for a perfect model, some deviation from these expectations is to be expected by chance, especially if the sample size is small. The function therefore tests whether the deviation of the fitted quantile regression from the expectation is significant, using [testQuantiles]. If so, the significant quantile regression is highlighted in red (as default) and a warning is displayed in the plot. #' #' Overdispersion typically manifests itself as Q1 (0.25) deviating towards 0 and Q3 (0.75) deviating towards 1. Heteroskedasticity manifests itself as non-parallel quantile lines. To diagnose heteroskedasticity and overdispersion, it can be helpful to additionally plot the absolute deviation of the residuals from the mean expectation of 0.5, using the option absoluteDeviation = T. In this case, we would again expect Q1-Q3 quantile lines at 0.25, 0.5, 0.75, but greater dispersion (also locally in the case of heteroskedasticity) always manifests itself in deviations towards 1. #' #' The quantile regression can take some time to calculate, especially for larger data sets. For this reason, quantreg = F can be set to generate a smooth spline instead. This is the default for n > 2000. #' #' If form is a factor, a boxplot will be plotted instead of a scatter plot. The distribution for each factor level should be uniformly distributed, so the box should go from 0.25 to 0.75, with the median line at 0.5 (within-group). To test if deviations from those expecations are significant, KS-tests per group and a Levene test for homogeneity of variances is performed. See [testCategorical] for details. #' #' @note If nObs > 10000, the scatter plot is replaced by graphics::smoothScatter #' #' #' @note The color for highlighting outliers and quantile lines/splines with significant tests can be changed by setting \code{options(DHARMaSignalColor = "red")} to a different color. See \code{getOption("DHARMaSignalColor")} for the current setting. This is convenient for a color-blind friendly display, since red and black are difficult for some people to separate. #' #' @return If quantile tests are performed, the function returns them invisibly. #' #' @seealso [plotQQunif], [testQuantiles], [testOutliers] #' @example inst/examples/plotsHelp.R #' @export plotResiduals <- function(simulationOutput, form = NULL, quantreg = NULL, rank = TRUE, asFactor = NULL, smoothScatter = NULL, quantiles = c(0.25, 0.5, 0.75), absoluteDeviation = FALSE, ...){ ##### Checks ##### a <- list(...) yAxis = ifelse(absoluteDeviation == TRUE, "Residual spread [2*abs(res - 0.5)]", "DHARMa residual") a$ylab = checkDots("ylab", yAxis , ...) a$xlab = checkDots("xlab", ifelse(is.null(form), "Model predictions", gsub(".*[$]","",deparse(substitute(form)))), ...) if(rank == TRUE) a$xlab = paste(a$xlab, "(rank transformed)") simulationOutput = ensureDHARMa(simulationOutput, convert = TRUE) res = simulationOutput$scaledResiduals if(absoluteDeviation == TRUE){ res = 2 * abs(res - 0.5) } if(inherits(form, "DHARMa"))stop("DHARMa::plotResiduals > argument form cannot be of class DHARMa. Note that the syntax of plotResiduals has changed since DHARMa 0.3.0. See ?plotResiduals.") pred = ensurePredictor(simulationOutput, form) ##### Rank transform and factor conversion##### if(!is.factor(pred)){ if (rank == TRUE){ pred = rank(pred, ties.method = "average") pred = pred / max(pred) a$xlim = checkDots("xlim", c(0,1), ...) } nuniq = length(unique(pred)) ndata = length(pred) if(is.null(asFactor)) asFactor = (nuniq == 1) | (nuniq < 10 & ndata / nuniq > 10) if (asFactor) pred = factor(pred) } ##### Residual scatter plots ##### if(is.null(quantreg)) if (length(res) > 2000) quantreg = FALSE else quantreg = TRUE switchScatter = 10000 if(is.null(smoothScatter)) if (length(res) > switchScatter) smoothScatter = TRUE else smoothScatter = FALSE blackcol = rgb(0,0,0, alpha = max(0.1, 1 - 3 * length(res) / switchScatter)) # Note to self: wrapped in do.call because of the check dots, needs to be consolidate, e.g. for testCategorical # categorical plot if(is.factor(pred)){ testCategorical(simulationOutput = simulationOutput, catPred = pred, quantiles = quantiles) } # smooth scatter else if (smoothScatter == TRUE) { defaultCol = ifelse(res == 0 | res == 1, 2,blackcol) do.call(graphics::smoothScatter, append(list(x = pred, y = res , ylim = c(0,1), axes = FALSE, colramp = colorRampPalette(c("white", "darkgrey"))),a)) points(pred[defaultCol == 2], res[defaultCol == 2], col = .Options$DHARMaSignalColor, cex = 0.5) axis(1) axis(2, at=c(0, quantiles, 1)) } # normal plot else{ defaultCol = ifelse(res == 0 | res == 1, 2,blackcol) defaultPch = ifelse(res == 0 | res == 1, 8,1) a$col = checkDots("col", defaultCol, ...) a$pch = checkDots("pch", defaultPch, ...) do.call(plot, append(list(res ~ pred, ylim = c(0,1), axes = FALSE), a)) axis(1) axis(2, at=c(0, quantiles, 1)) } ##### Quantile regressions ##### main = checkDots("main", ifelse(is.null(form), paste(yAxis, "vs. predicted"), paste(yAxis, "Residual vs. predictor")), ...) out = NULL if(is.numeric(pred)){ if(quantreg == FALSE){ title(main = main, cex.main = 1) abline(h = quantiles, col = "black", lwd = 0.5, lty = 2) try({ lines(smooth.spline(pred, res, df = 10), lty = 2, lwd = 2, col = .Options$DHARMaSignalColor) abline(h = 0.5, col = .Options$DHARMaSignalColor, lwd = 2) }, silent = TRUE) }else{ out = testQuantiles(res, pred, quantiles = quantiles, plot = FALSE) if(is.na(out$p.value)){ main = paste(main, "Some quantile regressions failed", sep = "\n") maincol = .Options$DHARMaSignalColor } else{ if(any(out$pvals < 0.05, na.rm = TRUE)){ main = paste(main, "Quantile deviations detected (red curves)", sep ="\n") if(out$p.value <= 0.05){ main = paste(main, "Combined adjusted quantile test significant", sep ="\n") } else { main = paste(main, "Combined adjusted quantile test n.s.", sep ="\n") } maincol = .Options$DHARMaSignalColor } else { main = paste(main, "No significant problems detected", sep ="\n") maincol = "black" } } title(main = main, cex.main = 0.8, col.main = maincol) for(i in 1:length(quantiles)){ lineCol = ifelse(out$pvals[i] <= 0.05 & !(is.na(out$pvals[i])), .Options$DHARMaSignalColor, "black") filCol = ifelse(out$pvals[i] <= 0.05 & !(is.na(out$pvals[i])), "#FF000040", "#00000020") abline(h = quantiles[i], col = lineCol, lwd = 0.5, lty = 2) polygon(c(out$predictions$pred, rev(out$predictions$pred)), c(out$predictions[,2*i] - out$predictions[,2*i+1], rev(out$predictions[,2*i] + out$predictions[,2*i+1])), col = "#00000020", border = FALSE) lines(out$predictions$pred, out$predictions[,2*i], col = lineCol, lwd = 2) } } } invisible(out) } #' Ensures the existence of a valid predictor to plot residuals against #' #' @param simulationOutput A DHARMa simulation output or an object that can be converted into a DHARMa simulation output. #' @param predictor An optional predictor. If no predictor is provided, will try to extract the fitted value. #' @keywords internal ensurePredictor <- function(simulationOutput, predictor = NULL){ if(!is.null(predictor)){ if(length(predictor) != length(simulationOutput$scaledResiduals)) stop("DHARMa: residuals and predictor do not have the same length. The issue is possibly that you have NAs in your predictor that were removed during the model fit. Remove the NA values from your predictor.") if(is.character(predictor)) { predictor = factor(predictor) warning("DHARMa:::ensurePredictor: character string was provided as predictor. DHARMa has converted to factor automatically. To remove this warning, please convert to factor before attempting to plot with DHARMa.") } } else { predictor = simulationOutput$fittedPredictedResponse if(is.null(predictor)) stop("DHARMa: can't extract predictor from simulationOutput, and no predictor provided.") } return(predictor) } #plotConventionalResiduals(fittedModel) #' Conventional residual plot #' #' Convenience function to draw conventional residual plots #' #' @param fittedModel a fitted model object #' @export plotConventionalResiduals <- function(fittedModel){ opar <- par(mfrow = c(1,3), oma = c(0,1,2,1)) on.exit(par(opar)) plot(predict(fittedModel), resid(fittedModel, type = "deviance"), main = "Deviance" , ylab = "Residual", xlab = "Predicted") plot(predict(fittedModel), resid(fittedModel, type = "pearson") , main = "Pearson", ylab = "Residual", xlab = "Predicted") plot(predict(fittedModel), resid(fittedModel, type = "response") , main = "Raw residuals" , ylab = "Residual", xlab = "Predicted") mtext("Conventional residual plots", outer = TRUE) } DHARMa/R/tests.R0000644000176200001440000015010114703461527012743 0ustar liggesusers#' DHARMa general residual test #' #' Calls uniformity, dispersion and outliers tests. #' #' This function is a wrapper for the various test functions implemented in DHARMa. Currently, this function calls the functions [testUniformity], [testDispersion], and [testOutliers]. All other tests (see list below) have to be called by hand. #' #' @param simulationOutput an object of class DHARMa, either created via [simulateResiduals] for supported models or by [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @param plot if TRUE, plots functions of the tests are called. #' @author Florian Hartig #' @seealso [testResiduals], [testUniformity], [testOutliers], [testDispersion], [testZeroInflation], [testGeneric], [testTemporalAutocorrelation], [testSpatialAutocorrelation], [testQuantiles], [testCategorical] #' @example inst/examples/testsHelp.R #' @export testResiduals <- function(simulationOutput, plot = TRUE){ opar = par(mfrow = c(1,3)) on.exit(par(opar)) out = list() out$uniformity = testUniformity(simulationOutput, plot = plot) out$dispersion = testDispersion(simulationOutput, plot = plot) out$outliers = testOutliers(simulationOutput, plot = plot) #print(out) # do we need it? return(out) } #' Residual tests #' #' @details Deprecated, switch your code to using the [testResiduals] function #' #' @param simulationOutput an object of class DHARMa, either created via [simulateResiduals] for supported models or by [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @author Florian Hartig #' @export testSimulatedResiduals <- function(simulationOutput){ message("testSimulatedResiduals is deprecated, switch your code to using the testResiduals function") testResiduals(simulationOutput) } #' Test for overall uniformity #' #' This function tests the overall uniformity of the simulated residuals in a DHARMa object #' #' @param simulationOutput an object of class DHARMa, either created via [simulateResiduals] for supported models or by [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @param alternative a character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis. See [stats::ks.test] for details #' @param plot if TRUE, plots calls [plotQQunif] as well #' @details The function applies a [stats::ks.test] for uniformity on the simulated residuals. #' @author Florian Hartig #' @seealso [testResiduals], [testUniformity], [testOutliers], [testDispersion], [testZeroInflation], [testGeneric], [testTemporalAutocorrelation], [testSpatialAutocorrelation], [testQuantiles], [testCategorical] #' @example inst/examples/testsHelp.R #' @export testUniformity <- function(simulationOutput, alternative = c("two.sided", "less", "greater"), plot = TRUE){ simulationOutput = ensureDHARMa(simulationOutput, convert = T) out <- suppressWarnings(ks.test(simulationOutput$scaledResiduals, 'punif', alternative = alternative)) if(plot == T) plotQQunif(simulationOutput = simulationOutput) return(out) } # Experimental testBivariateUniformity <- function(simulationOutput, alternative = c("two.sided", "less", "greater"), plot = TRUE){ simulationOutput = ensureDHARMa(simulationOutput, convert = T) #out <- suppressWarnings(ks.test(simulationOutput$scaledResiduals, 'punif', alternative = alternative)) #if(plot == T) plotQQunif(simulationOutput = simulationOutput) out = NULL return(out) } #' Test for quantiles #' #' This function tests #' #' @param simulationOutput an object of class DHARMa, either created via [simulateResiduals] for supported models or by [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @param predictor an optional predictor variable to be used, instead of the predicted response (default) #' @param quantiles the quantiles to be tested #' @param plot if TRUE, the function will create an additional plot #' @details The function fits quantile regressions (via package qgam) on the residuals, and compares their location to the expected location (because of the uniform distributionm, the expected location is 0.5 for the 0.5 quantile). #' #' A significant p-value for the splines means the fitted spline deviates from a flat line at the expected location (p-values of intercept and spline are combined via Benjamini & Hochberg adjustment to control the FDR) #' #' The p-values of the splines are combined into a total p-value via Benjamini & Hochberg adjustment to control the FDR. #' #' @author Florian Hartig #' @example inst/examples/testQuantilesHelp.R #' @seealso [testResiduals], [testUniformity], [testOutliers], [testDispersion], [testZeroInflation], [testGeneric], [testTemporalAutocorrelation], [testSpatialAutocorrelation], [testQuantiles], [testCategorical] #' @export testQuantiles <- function(simulationOutput, predictor = NULL, quantiles = c(0.25,0.5,0.75), plot = TRUE){ if(plot == F){ out = list() out$data.name = deparse(substitute(simulationOutput)) simulationOutput = ensureDHARMa(simulationOutput, convert = T) res = simulationOutput$scaledResiduals pred = ensurePredictor(simulationOutput, predictor) dat = data.frame(res = simulationOutput$scaledResiduals, pred = pred) quantileFits <- list() pval = rep(NA, length(quantiles)) predictions = data.frame(pred = sort(dat$pred)) predictions = cbind(predictions, matrix(ncol = 2 * length(quantiles), nrow = nrow(dat))) for(i in 1:length(quantiles)){ datTemp = dat datTemp$res = datTemp$res - quantiles[i] # settings for k = the dimension of the basis used to represent the smooth term. # see https://github.com/mfasiolo/qgam/issues/37 dimSmooth = min(length(unique(datTemp$pred)), 10) quantResult = try(capture.output(quantileFits[[i]] <- qgam::qgam(res ~ s(pred, k = dimSmooth), data = datTemp, qu = quantiles[i])), silent = T) if(inherits(quantResult, "try-error")){ message("\n DHARMa: qgam was unable to calculate quantile regression for quantile ", quantiles[i], ". Possibly to few (unique) data points / predictions. The quantile will be ommited in plots and significance calculations. \n") } else { x = summary(quantileFits[[i]]) pval[i] = min(p.adjust(c(x$p.table[1,4], x$s.table[1,4]), method = "BH")) # correction for test on slope and intercept quantPre = predict(quantileFits[[i]], newdata = predictions, se = T) predictions[, 2*i] = quantPre$fit + quantiles[i] predictions[, 2*i + 1] = quantPre$se.fit } } out$method = "Test for location of quantiles via qgam" out$alternative = "both" out$pvals = pval out$p.value = min(p.adjust(pval, method = "BH")) # correction for multiple quantile tests out$predictions = predictions out$qgamFits = quantileFits class(out) = "htest" } else if(plot == T) { out <- plotResiduals(simulationOutput = simulationOutput, form = predictor, quantiles = quantiles, quantreg = TRUE) } return(out) } #unif.2017YMi(X, type = c("Q1", "Q2", "Q3"), lower = rep(0, ncol(X)),upper = rep(1, ncol(X))) #' Test for outliers #' #' This function tests if the number of observations outside the simulatio envelope are larger or smaller than expected #' #' @param simulationOutput an object of class DHARMa, either created via [simulateResiduals] for supported models or by [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @param alternative a character string specifying whether the test should test if observations are "greater", "less" or "two.sided" (default) compared to the simulated null hypothesis #' @param margin whether to test for outliers only at the lower, only at the upper, or both sides (default) of the simulated data distribution #' @param type either default, bootstrap or binomial. See details #' @param nBoot number of boostrap replicates. Only used ot type = "bootstrap" #' @param plot if TRUE, the function will create an additional plot #' @param plotBoostrap if plot should be produced of outlier frequencies calculated under the bootstrap #' @details DHARMa residuals are created by simulating from the fitted model, and comparing the simulated values to the observed data. It can occur that all simulated values are higher or smaller than the observed data, in which case they get the residual value of 0 and 1, respectively. I refer to these values as simulation outliers, or simply outliers. #' #' Because no data was simulated in the range of the observed value, we don't know "how strongly" these values deviate from the model expectation, so the term "outlier" should be used with a grain of salt. It is not a judgment about the magnitude of the residual deviation, but simply a dichotomous sign that we are outside the simulated range. Moreover, the number of outliers will decrease as we increase the number of simulations. #' #' To test if the outliers are a concern, testOutliers implements 2 options (bootstrap, binomial), which can be chosen via the parameter "type". The third option (default) chooses bootstrap for integer-valued distribubtions with nObs < 500, and else binomial. #' #' The binomial test considers that under the null hypothesis that the model is correct, and for continuous distributions (i.e. data and the model distribution are identical and continous), the probability that a given observation is higher than all simulations is 1/(nSim +1), and binomial distributed. The testOutlier function can test this null hypothesis via type = "binomial". In principle, it would be nice if we could extend this idea to integer-valued distributions, which are randomized via the PIT procedure (see [simulateResiduals]), the rate of "true" outliers is more difficult to calculate, and in general not 1/(nSim +1). The testOutlier function implements a small tweak that calculates the rate of residuals that are closer than 1/(nSim+1) to the 0/1 border, which roughly occur at a rate of nData /(nSim +1). This approximate value, however, is generally not exact, and may be particularly off non-bounded integer-valued distributions (such as Poisson or Negative Binomial). #' #' For this reason, the testOutlier function implements an alternative procedure that uses the bootstrap to generate a simulation-based expectation for the outliers. It is recommended to use the bootstrap for integer-valued distributions (and integer-valued only, because it has no advantage for continuous distributions, ideally with reasonably high values of nSim and nBoot (I recommend at least 1000 for both). Because of the high runtime, however, this option is switched off for type = default when nObs > 500. #' #' Both binomial or bootstrap generate a null expectation, and then test for an excess or lack of outliers. Per default, testOutliers() looks for both, so if you get a significant p-value, you have to check if you have to many or too few outliers. An excess of outliers is to be interpreted as too many values outside the simulation envelope. This could be caused by overdispersion, or by what we classically call outliers. A lack of outliers would be caused, for example, by underdispersion. #' #' #' @author Florian Hartig #' @seealso [testResiduals], [testUniformity], [testOutliers], [testDispersion], [testZeroInflation], [testGeneric], [testTemporalAutocorrelation], [testSpatialAutocorrelation], [testQuantiles], [testCategorical] #' @export testOutliers <- function(simulationOutput, alternative = c("two.sided", "greater", "less"), margin = c("both", "upper", "lower"), type = c("default","bootstrap", "binomial"), nBoot = 100, plot = TRUE, plotBoostrap = FALSE){ # check inputs alternative = match.arg(alternative) margin = match.arg(margin) type = match.arg(type) data.name = deparse(substitute(simulationOutput)) # remember: needs to be called before ensureDHARMa simulationOutput = ensureDHARMa(simulationOutput, convert = "Model") if(type == "default"){ if(simulationOutput$integerResponse == FALSE) type = "binomial" else{ if(simulationOutput$nObs > 500) type = "binomial" else type = "bootstrap" } } # using the binomial test, not exact if(type == "binomial"){ # calculation of outliers if(margin == "both") outliers = sum(simulationOutput$scaledResiduals < (1/(simulationOutput$nSim+1))) + sum(simulationOutput$scaledResiduals > (1-1/(simulationOutput$nSim+1))) if(margin == "upper") outliers = sum(simulationOutput$scaledResiduals > (1-1/(simulationOutput$nSim+1))) if(margin == "lower") outliers = sum(simulationOutput$scaledResiduals < (1/(simulationOutput$nSim+1))) # calculations of trials and H0 outFreqH0 = 1/(simulationOutput$nSim +1) * ifelse(margin == "both", 2, 1) trials = simulationOutput$nObs out = binom.test(outliers, trials, p = outFreqH0, alternative = alternative) # overwrite information in binom.test out$method = "DHARMa outlier test based on exact binomial test with approximate expectations" out$data.name = data.name out$margin = margin names(out$statistic) = paste("outliers at", margin, "margin(s)") names(out$parameter) = "observations" names(out$estimate) = paste("frequency of outliers (expected:", out$null.value,")") if (simulationOutput$integerResponse == T & out$p.value < 0.05) message("DHARMa:testOutliers with type = binomial may have inflated Type I error rates for integer-valued distributions. To get a more exact result, it is recommended to re-run testOutliers with type = 'bootstrap'. See ?testOutliers for details") if(plot == T) { hist(simulationOutput, main = "") main = ifelse(out$p.value <= 0.05, "Outlier test significant", "Outlier test n.s.") title(main = main, cex.main = 1, col.main = ifelse(out$p.value <= 0.05, "red", "black")) } } else { if(margin == "both") outliers = mean(simulationOutput$scaledResiduals == 0) + mean(simulationOutput$scaledResiduals == 1) if(margin == "upper") outliers = mean(simulationOutput$scaledResiduals == 1) if(margin == "lower") outliers = mean(simulationOutput$scaledResiduals == 0) # Bootstrapping to compare to expected simIndices = 1:simulationOutput$nSim nSim = simulationOutput$nSim if(simulationOutput$refit == T){ simResp = simulationOutput$refittedResiduals } else { simResp = simulationOutput$simulatedResponse } resMethod = simulationOutput$method resInteger = simulationOutput$integerResponse if (nBoot > nSim){ message("DHARMa::testOutliers: nBoot > nSim does not make much sense, thus changed to nBoot = nSim. If you want to increase nBoot, increase nSim in DHARMa::simulateResiduals as well.") nBoot = nSim } frequBoot <- rep(NA, nBoot) for (i in 1:nBoot){ #sel = -i sel = sample(simIndices[-i], size = nSim, replace = T) residuals <- getQuantile(simulations = simResp[,sel], observed = simResp[,i], integerResponse = resInteger, method = resMethod) if(margin == "both") frequBoot[i] = mean(residuals == 1) + mean(residuals == 0) else if(margin == "upper") frequBoot[i] = mean(residuals == 1) else if(margin == "lower") frequBoot[i] = mean(residuals == 0) } out = list() class(out) = "htest" out$alternative = alternative out$p.value = getP(frequBoot, outliers, alternative = alternative) out$conf.int = quantile(frequBoot, c(0.025, 0.975)) out$data.name = data.name out$margin = margin out$method = "DHARMa bootstrapped outlier test" out$statistic = outliers * simulationOutput$nObs names(out$statistic) = paste("outliers at", margin, "margin(s)") out$parameter = simulationOutput$nObs names(out$parameter) = "observations" out$estimate = outliers names(out$estimate) = paste("outlier frequency (expected:", mean(frequBoot),")") if(plotBoostrap == T){ hist(frequBoot, xlim = range(frequBoot, outliers), col = "lightgrey", main = "Bootstrapped outlier frequency") abline(v = mean(frequBoot), col = 1, lwd = 2) abline(v = outliers, col = "red", lwd = 2) # legend("center", c(paste("p=", round(out$p.value, digits = 5)), paste("Deviation ", ifelse(out$p.value < 0.05, "significant", "n.s."))), text.col = ifelse(out$p.value < 0.05, "red", "black" )) } if(plot == T) { hist(simulationOutput, main = "") main = ifelse(out$p.value <= 0.05, "Outlier test significant", "Outlier test n.s.") title(main = main, cex.main = 1, col.main = ifelse(out$p.value <= 0.05, "red", "black")) } } return(out) } #' Test for categorical dependencies #' #' This function tests if there are probles in a res ~ group structure. It performs two tests: test for within-group uniformity, and test for between-group homogeneity of variances #' #' @param simulationOutput an object of class DHARMa, either created via [simulateResiduals] for supported models or by [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @param catPred a categorical predictor with the same dimensions as the residuals in simulationOutput #' @param quantiles whether to draw the quantile lines. #' @param plot if TRUE, the function will create an additional plot #' @details The function tests for two common problems: are residuals within each group distributed according to model assumptions, and is the variance between group heterogeneous. #' #' The test for within-group uniformity is performed via multipe KS-tests, with adjustment of p-values for multiple testing. If the plot is drawn, problematic groups are highlighted in red, and a corresponding message is displayed in the plot. #' #' The test for homogeneity of variances is done with a Levene test. A significant p-value means that group variances are not constant. In this case, you should consider modelling variances, e.g. via ~dispformula in glmmTMB. #' #' @author Florian Hartig #' @seealso [testResiduals], [testUniformity], [testOutliers], [testDispersion], [testZeroInflation], [testGeneric], [testTemporalAutocorrelation], [testSpatialAutocorrelation], [testQuantiles], [testCategorical] #' @example inst/examples/testsHelp.R #' @export testCategorical <- function(simulationOutput, catPred, quantiles = c(0.25, 0.5, 0.75), plot = TRUE){ simulationOutput = ensureDHARMa(simulationOutput, convert = T) catPred = as.factor(catPred) out = list() out$uniformity$details = suppressWarnings(by(simulationOutput$scaledResiduals, catPred, ks.test, 'punif', simplify = TRUE)) out$uniformity$p.value = rep(NA, nlevels(catPred)) for(i in 1:nlevels(catPred)) out$uniformity$p.value[i] = out$uniformity$details[[i]]$p.value out$uniformity$p.value.cor = p.adjust(out$uniformity$p.value) if(nlevels(catPred) > 1) out$homogeneity = leveneTest_formula(simulationOutput$scaledResiduals ~ catPred) if(plot == T){ boxplot(simulationOutput$scaledResiduals ~ catPred, ylim = c(0,1), axes = FALSE, col = ifelse(out$uniformity$p.value.cor < 0.05, "red", "lightgrey")) axis(1, at = 1:nlevels(catPred), levels(catPred)) axis(2, at=c(0, quantiles, 1)) abline(h = quantiles, lty = 2) } mtext(ifelse(any(out$uniformity$p.value.cor < 0.05), "Within-group deviations from uniformity significant (red)", "Within-group deviation from uniformity n.s."), col = ifelse(any(out$uniformity$p.value.cor < 0.05), "red", "black"), line = 1) if(length(out) > 1) { mtext(ifelse(out$homogeneity$`Pr(>F)`[1] < 0.05, "Levene Test for homogeneity of variance significant", "Levene Test for homogeneity of variance n.s."), col = ifelse(out$homogeneity$`Pr(>F)`[1] < 0.05, "red", "black")) } return(out) } #' DHARMa dispersion tests #' #' This function performs simulation-based tests for over/underdispersion. If type = "DHARMa" (default and recommended), simulation-based dispersion tests are performed. Their behavior differs depending on whether simulations are done with refit = F, or refit = T, and whether data is simulated conditional (e.g. re.form ~0 in lme4) (see below). If type = "PearsonChisq", a chi2 test on Pearson residuals is performed. #' #' @param simulationOutput an object of class DHARMa, either created via [simulateResiduals] for supported models or by [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @param alternative a character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis. Greater corresponds to testing only for overdispersion. It is recommended to keep the default setting (testing for both over and underdispersion) #' @param plot whether to provide a plot for the results #' @param type which test to run. Default is DHARMa, other options are PearsonChisq (see details) #' @param ... arguments to pass on to [testGeneric] #' #' @details Over / underdispersion means that the observed data is more / less dispersed than expected under the fitted model. There is no unique way to test for dispersion problems, and there are a number of different dispersion tests implemented in various R packages. #' #' The testDispersion function implements several dispersion tests: #' #' **Simulation-based dispersion tests (type == "DHARMa")** #' #' If type = "DHARMa" (default and recommended), simulation-based dispersion tests are performed. Their behavior differs depending on whether simulations are done with refit = F, or refit = T #' #' #' **Important:** for either refit = T or F, the results of type = "DHARMa" dispersion test will differ depending on whether simulations are done conditional (= conditional on fitted random effects) or unconditional (= REs are re-simulated). How to change between conditional or unconditional simulations is discussed in [simulateResiduals]. The general default in DHARMa is to use unconditional simulations, because this has advantages in other situations, but dispersion tests for models with strong REs specifically may increase substantially in power / sensitivity when switching to conditional simulations. I therefore recommend checking dispersion with conditional simulations if supported by the used regression package. #' #' If refit = F, the function uses [testGeneric] to compare the variance of the observed raw residuals (i.e. var(observed - predicted), displayed as a red line) against the variance of the simulated residuals (i.e. var(simulated - predicted), histogram). The variances are scaled to the mean simulated variance. A significant ratio > 1 indicates overdispersion, a significant ratio < 1 underdispersion. #' #' If refit = T, the function compares the approximate deviance (via squared pearson residuals) with the same quantity from the models refitted with simulated data. Applying this is much slower than the previous alternative. Given the computational cost, I would suggest that most users will be satisfied with the standard dispersion test. #' #' ** Analytical dispersion tests (type == "PearsonChisq")** #' #' This is the test described in https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#overdispersion, identical to performance::check_overdispersion. Works only if the fitted model provides df.residual and Pearson residuals. #' #' The test statistics is biased to lower values under quite general conditions, and will therefore tend to test significant for underdispersion. It is recommended to use this test only for overdispersion, i.e. use alternative == "greater". Also, obviously, it requires that Pearson residuals are available for the chosen model, which will not be the case for all models / packages. #' #' @note For particular model classes / situations, there may be more powerful and thus preferable over the DHARMa test. The advantage of the DHARMa test is that it directly targets the spread of the data (unless other tests such as dispersion/df, which essentially measure fit and may thus be triggered by problems other than dispersion as well), and it makes practically no assumptions about the fitted model, other than the availability of simulations. #' #' @author Florian Hartig #' @seealso [testResiduals], [testUniformity], [testOutliers], [testDispersion], [testZeroInflation], [testGeneric], [testTemporalAutocorrelation], [testSpatialAutocorrelation], [testQuantiles], [testCategorical] #' @example inst/examples/testDispersionHelp.R #' @export testDispersion <- function(simulationOutput, alternative = c("two.sided", "greater", "less"), plot = T, type = c("DHARMa", "PearsonChisq"), ...){ alternative <- match.arg(alternative) type <- match.arg(type) out = list() out$data.name = deparse(substitute(simulationOutput)) if(type == "DHARMa"){ simulationOutput = ensureDHARMa(simulationOutput, convert = "Model") # if(class(simulationOutput$fittedModel) %in% c("glmerMod", "lmerMod"){ # if(!"re.form" %in% names(simulationOutput$additionalParameters) & is.null(simulationOutput$additionalParameters$re.form)) message("recommended to run conditional simulations for dispersion test, see help") #} if(simulationOutput$refit == F){ expectedVar = sd(simulationOutput$simulatedResponse)^2 spread <- function(x) var(x - simulationOutput$fittedPredictedResponse) / expectedVar out = testGeneric(simulationOutput, summary = spread, alternative = alternative, methodName = "DHARMa nonparametric dispersion test via sd of residuals fitted vs. simulated", plot = plot, ...) names(out$statistic) = "dispersion" } else { observed = tryCatch(sum(residuals(simulationOutput$fittedModel, type = "pearson")^2), error = function(e) { message(paste("DHARMa: the requested tests requires pearson residuals, but your model does not implement these calculations. Test will return NA. Error message:", e)) return(NA) }) if(is.na(observed)) return(NA) expected = apply(simulationOutput$refittedPearsonResiduals^2 , 2, sum) out$statistic = c(dispersion = observed / mean(expected)) names(out$statistic) = "dispersion" out$method = "DHARMa nonparametric dispersion test via mean deviance residual fitted vs. simulated-refitted" p = getP(simulated = expected, observed = observed, alternative = alternative) out$alternative = alternative out$p.value = p class(out) = "htest" if(plot == T) { #plotTitle = gsub('(.{1,50})(\\s|$)', '\\1\n', out$method) xLabel = paste("Simulated values, red line = fitted model. p-value (",out$alternative, ") = ", out$p.value, sep ="") hist(expected, xlim = range(expected, observed, na.rm=T ), col = "lightgrey", main = "", xlab = xLabel, breaks = 20, cex.main = 1) abline(v = observed, lwd= 2, col = "red") main = ifelse(out$p.value <= 0.05, "Dispersion test significant", "Dispersion test n.s.") title(main = main, cex.main = 1, col.main = ifelse(out$p.value <= 0.05, "red", "black")) } } } else if(type == "PearsonChisq"){ if("DHARMa" %in% class(simulationOutput)){ model = simulationOutput$fittedModel } else model = simulationOutput if(! alternative == "greater") message("Note that the chi2 test on Pearson residuals is biased for MIXED models towards underdispersion. Tests with alternative = two.sided or less are therefore not reliable. If you have random effects in your model, I recommend to test only with alternative = 'greater', i.e. test for overdispersion, or else use the DHARMa default tests which are unbiased. See help for details.") rdf <- df.residual(model) rp <- getPearsonResiduals(model) Pearson.chisq <- sum(rp^2) prat <- Pearson.chisq/rdf if(alternative == "greater") pval <- pchisq(Pearson.chisq, df=rdf, lower.tail=FALSE) else if (alternative == "less") pval <- pchisq(Pearson.chisq, df=rdf, lower.tail=TRUE) else if (alternative == "two.sided") pval <- min(min(pchisq(Pearson.chisq, df=rdf, lower.tail=TRUE), pchisq(Pearson.chisq, df=rdf, lower.tail=FALSE)) * 2,1) out$statistic = prat names(out$statistic) = "dispersion" out$parameter = rdf names(out$parameter) = "df" out$method = "Parametric dispersion test via mean Pearson-chisq statistic" out$alternative = alternative out$p.value = pval class(out) = "htest" # c(chisq=Pearson.chisq,ratio=prat,rdf=rdf,p=pval) return(out) } return(out) } #' Simulated overdisperstion tests #' #' @details Deprecated, switch your code to using the [testDispersion] function #' #' @param simulationOutput an object of class DHARMa with simulated quantile residuals, either created via [simulateResiduals] or by [createDHARMa] for simulations created outside DHARMa #' @param ... additional arguments to [testDispersion] #' @export testOverdispersion <- function(simulationOutput, ...){ message("testOverdispersion is deprecated, switch your code to using the testDispersion function") testDispersion(simulationOutput, ...) } #' Parametric overdisperstion tests #' #' @details Deprecated, switch your code to using the [testDispersion] function. #' #' @param ... arguments will be ignored, the parametric tests is no longer recommend #' @export testOverdispersionParametric <- function(...){ message("testOverdispersionParametric is deprecated - switch your code to using the testDispersion function") return(0) } #' Tests for zero-inflation #' #' This function compares the observed number of zeros with the zeros expected from simulations. #' #' @param simulationOutput an object of class DHARMa, either created via [simulateResiduals] for supported models or by [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @param ... further arguments to [testGeneric] #' @details Zero-inflation means that the observed data contain more zeros than would be expected under the fitted model. Zero-inflation must always be accessed with respect to a particular model, so the mere fact that there are many zeros in the observed data is not an indication of zero-inflation, see Warton, D. I. (2005). Many zeros does not mean zero inflation: comparing the goodness-of-fit of parametric models to multivariate abundance data. Environmetrics 16(3), 275-289. #' #' The testZeroInflation function simulates new datasets from the fitted model and compares this null distribution (gray histogram in the plot) with the observed values (red line in the plot). Technically, it is a wrapper for [testGeneric], with the summary argument set to function(x) sum(x == 0). The test statistic is the ratio of observed to simulated zeros. A value < 1 means that the observed data have fewer zeros than expected, a value > 1 means that they have more zeros than expected (aka zero inflation). By default, the function tests both sides, so it would also test for fewer zeros than expected. #' #' @note Zero-inflation can occur for a number of reasons other than an underlying data generating process corresponding to a ZIP model. Vice versa, it is very well possible that no zero-inflation will be observed when fitting models to data derived from a ZIP process. The latter is due to the fact that excess zeros can often be explained by other model parameters, such as the theta parameter in the negative binomial. #' #' For this reason, results of the zero-inflation test should be interpreted as a residual pattern that can have many reasons, not as a decision criterion for whether or not to fit a ZIP model. To decide whether to add a ZIP term, I would advise relying on appropriate model selection techniques such as AIC, BIC, WAIC, Bayes factor, or LRT. Note that these tests are often not reliable in GLMMs because it is difficult to determine the df spent by the different models. The [simulateLRT] function in DHARMa provides a nonparametric alternative to obtain p-values for LRT is nested models with unknown df. #' #' @author Florian Hartig #' @example inst/examples/testsHelp.R #' @seealso [testResiduals], [testUniformity], [testOutliers], [testDispersion], [testZeroInflation], [testGeneric], [testTemporalAutocorrelation], [testSpatialAutocorrelation], [testQuantiles], [testCategorical] #' @export testZeroInflation <- function(simulationOutput, ...){ countZeros <- function(x) sum( x == 0) testGeneric(simulationOutput = simulationOutput, summary = countZeros, methodName = "DHARMa zero-inflation test via comparison to expected zeros with simulation under H0 = fitted model", ... ) } #' Test for a generic summary statistic based on simulated data #' #' This function tests if a user-defined summary differs when applied to simulated / observed data. #' #' @param simulationOutput an object of class DHARMa, either created via [simulateResiduals] for supported models or by [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @param summary a function that can be applied to simulated / observed data. See examples below #' @param alternative a character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis #' @param plot whether to plot the simulated summary #' @param methodName name of the test (will be used in plot) #' #' @details This function applies a user-defined summary to the simulated/observed data of a DHARMa object and then performs a hypothesis test using the ratio Obs / Sim as the test statistic. #' #' The summary is applied directly to the data and not to the residuals, but it can easily be remodeled to apply summaries to the residuals by simply defining something like f = function(x) summary (x - predictions), as done in [testDispersion] #' #' @note The summary function you specify will be applied to the data as it appears in your fitted model, which may not always be what you want. #' #' As an example, consider the case where we want to test for n-inflation in k/n data. If you provide your data via cbind (k, n-k), you have to test for n-inflation, but if you provide your data via k/n and weights = n, you should test for 1-inflation. When in doubt, check how the data is represented internally in model.frame(model) or via simulate(model). #' #' @export #' @author Florian Hartig #' @example inst/examples/testsHelp.R #' @seealso [testResiduals], [testUniformity], [testOutliers], [testDispersion], [testZeroInflation], [testGeneric], [testTemporalAutocorrelation], [testSpatialAutocorrelation], [testQuantiles], [testCategorical] testGeneric <- function(simulationOutput, summary, alternative = c("two.sided", "greater", "less"), plot = T, methodName = "DHARMa generic simulation test"){ out = list() out$data.name = deparse(substitute(simulationOutput)) simulationOutput = ensureDHARMa(simulationOutput, convert = "Model") alternative <- match.arg(alternative) observed = summary(simulationOutput$observedResponse) simulated = apply(simulationOutput$simulatedResponse, 2, summary) p = getP(simulated = simulated, observed = observed, alternative = alternative) out$statistic = c(ratioObsSim = observed / mean(simulated)) out$method = methodName out$alternative = alternative out$p.value = p class(out) = "htest" if(plot == T) { plotTitle = gsub('(.{1,50})(\\s|$)', '\\1\n', methodName) xLabel = paste("Simulated values, red line = fitted model. p-value (",out$alternative, ") = ", out$p.value, sep ="") hist(simulated, xlim = range(simulated, observed, na.rm=T ), col = "lightgrey", main = plotTitle, xlab = xLabel, breaks = max(round(simulationOutput$nSim / 5), 20), cex.main = 0.8) abline(v = observed, lwd= 2, col = "red") } return(out) } #' Test for temporal autocorrelation #' #' This function performs a standard test for temporal autocorrelation on the simulated residuals #' #' @param simulationOutput an object of class DHARMa, either created via [simulateResiduals] for supported models or by [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @param time the time, in the same order as the data points. #' @param alternative a character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis #' @param plot whether to plot output #' @details The function performs a Durbin-Watson test on the uniformly scaled residuals, and plots the residuals against time. The DB test was originally be designed for normal residuals. In simulations, I didn't see a problem with this setting though. The alternative is to transform the uniform residuals to normal residuals and perform the DB test on those. #' #' Testing for temporal autocorrelation requires unique time values - if you have several observations per time value, either use [recalculateResiduals] function to aggregate residuals per time step, or extract the residuals from the fitted object, and plot / test each of them independently for temporally repeated subgroups (typical choices would be location / subject etc.). Note that the latter must be done by hand, outside testTemporalAutocorrelation. #' #' @note Standard DHARMa simulations from models with (temporal / spatial / phylogenetic) conditional autoregressive terms will still have the respective temporal / spatial / phylogenetic correlation in the DHARMa residuals, unless the package you are using is modelling the autoregressive terms as explicit REs and is able to simulate conditional on the fitted REs. This has two consequences #' #' 1. If you check the residuals for such a model, they will still show significant autocorrelation, even if the model fully accounts for this structure. #' #' 2. Because the DHARMa residuals for such a model are not statistically independent any more, other tests (e.g. dispersion, uniformity) may have inflated type I error, i.e. you will have a higher likelihood of spurious residual problems. #' #' There are three (non-exclusive) routes to address these issues when working with spatial / temporal / other autoregressive models: #' #' 1. Simulate conditional on the fitted CAR structures (see conditional simulations in the help of [simulateResiduals]) #' #' 2. Rotate simulations prior to residual calculations (see parameter rotation in [simulateResiduals]) #' #' 3. Use custom tests / plots that explicitly compare the correlation structure in the simulated data to the correlation structure in the observed data. #' #' @author Florian Hartig #' @seealso [testResiduals], [testUniformity], [testOutliers], [testDispersion], [testZeroInflation], [testGeneric], [testTemporalAutocorrelation], [testSpatialAutocorrelation], [testQuantiles], [testCategorical] #' @example inst/examples/testTemporalAutocorrelationHelp.R #' @export testTemporalAutocorrelation <- function(simulationOutput, time, alternative = c("two.sided", "greater", "less"), plot = TRUE){ simulationOutput = ensureDHARMa(simulationOutput, convert = T) # actually not sure if this is neccessary for dwtest, but seems better to aggregate if(any(duplicated(time))) stop("testing for temporal autocorrelation requires unique time values - if you have several observations per time value, either use the recalculateResiduals function to aggregate residuals per time step, or extract the residuals from the fitted object, and plot / test each of them independently for temporally repeated subgroups (typical choices would be location / subject etc.). Note that the latter must be done by hand, outside testTemporalAutocorrelation.") alternative <- match.arg(alternative) if(is.null(time)){ time = sample.int(simulationOutput$nObs, simulationOutput$nObs) message("DHARMa::testTemporalAutocorrelation - no time argument provided, using random times for each data point") } # To avoid Issue #190 if (length(time) != length(residuals(simulationOutput))) stop("Dimensions of time don't match the dimension of the residuals") out = lmtest::dwtest(simulationOutput$scaledResiduals ~ 1, order.by = time, alternative = alternative) if(plot == T) { oldpar <- par(mfrow = c(1,2)) on.exit(par(oldpar)) plot(simulationOutput$scaledResiduals[order(time)] ~ time[order(time)], type = "l", ylab = "Scaled residuals", xlab = "Time", main = "Residuals vs. time", ylim = c(0,1)) abline(h=c(0.5)) abline(h=c(0,0.25,0.75,1), lty = 2 ) acf(simulationOutput$scaledResiduals[order(time)], main = "Autocorrelation", ylim = c(-1,1)) legend("topright", c(paste(out$method, " p=", round(out$p.value, digits = 5)), paste("Deviation ", ifelse(out$p.value < 0.05, "significant", "n.s."))), text.col = ifelse(out$p.value < 0.05, "red", "black" ), bty="n") } return(out) } #' Test for distance-based spatial (or similar type) autocorrelation #' #' This function performs a Moran's I test for distance-based spatial (or similar type) autocorrelation on the calculated quantile residuals. #' #' @param simulationOutput An object of class DHARMa, either created via [simulateResiduals] for supported models or via [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @param x The x coordinate, in the same order as the data points. Must be specified unless distMat is provided. #' @param y The y coordinate, in the same order as the data points. Must be specified unless distMat is provided. #' @param distMat Optional distance matrix. If not provided, euclidean distances based on x and y will be calculated. See details for explanation. #' @param alternative A character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis. #' @param plot If T, and if x and y is provided, plot the output (see Details). #' #' @details The function performs Moran.I test from the package ape on the DHARMa residuals. If a distance matrix (distMat) is provided, calculations will be based on this distance matrix, and x,y coordinates will only used for the plotting (if provided). If distMat is not provided, the function will calculate the euclidean distances between x,y coordinates, and test Moran.I based on these distances. #' #' If plot = T, a plot will be produced showing each residual with at its x,y position, colored according to the residual value. Residuals with 0.5 are colored white, everything below 0.5 is colored increasinly red, everything above 0.5 is colored increasingly blue. #' #' Testing for spatial autocorrelation requires unique x,y values - if you have several observations per location, either use the [recalculateResiduals] function to aggregate residuals per location, or extract the residuals from the fitted object, and plot / test each of them independently for spatially repeated subgroups (a typical scenario would repeated spatial observation, in which case one could plot / test each time step separately for temporal autocorrelation). Note that the latter must be done by hand, outside [testSpatialAutocorrelation]. #' #' @note Standard DHARMa simulations from models with (temporal / spatial / phylogenetic) conditional autoregressive terms will still have the respective temporal / spatial / phylogenetic correlation in the DHARMa residuals, unless the package you are using is modelling the autoregressive terms as explicit REs and is able to simulate conditional on the fitted REs. This has two consequences: #' #' 1. If you check the residuals for such a model, they will still show significant autocorrelation, even if the model fully accounts for this structure. #' #' 2. Because the DHARMa residuals for such a model are not statistically independent any more, other tests (e.g. dispersion, uniformity) may have inflated type I error, i.e. you will have a higher likelihood of spurious residual problems. #' #' There are three (non-exclusive) routes to address these issues when working with spatial / temporal / phylogenetic / other autoregressive models: #' #' 1. Simulate conditional on the fitted CAR structures (see conditional simulations in the help of [simulateResiduals]). #' #' 2. Rotate simulations prior to residual calculations (see parameter rotation in [simulateResiduals]). #' #' 3. Use custom tests / plots that explicitly compare the correlation structure in the simulated data to the correlation structure in the observed data. #' #' @author Florian Hartig #' @seealso [testResiduals], [testUniformity], [testOutliers], [testDispersion], [testZeroInflation], [testGeneric], [testTemporalAutocorrelation], [testSpatialAutocorrelation], [testQuantiles], [testCategorical] #' @import grDevices #' @example inst/examples/testSpatialAutocorrelationHelp.R #' @export testSpatialAutocorrelation <- function(simulationOutput, x = NULL, y = NULL, distMat = NULL, alternative = c("two.sided", "greater", "less"), plot = TRUE){ alternative <- match.arg(alternative) data.name = deparse(substitute(simulationOutput)) # needs to be before ensureDHARMa simulationOutput = ensureDHARMa(simulationOutput, convert = T) # Assertions if(any(duplicated(cbind(x,y)))) stop("Testing for spatial autocorrelation requires unique x,y values - if you have several observations per location, either use the recalculateResiduals function to aggregate residuals per location, or extract the residuals from the fitted object, and plot / test each of them independently for spatially repeated subgroups (a typical scenario would repeated spatial observation, in which case one could plot / test each time step separately for temporal autocorrelation). Note that the latter must be done by hand, outside testSpatialAutocorrelation.") if( (!is.null(x) | !is.null(y)) & !is.null(distMat) ) message("Both coordinates and distMat provided, calculations will be done based on the distance matrix, coordinates will only be used for plotting.") if( (is.null(x) | is.null(y)) & is.null(distMat) ) stop("You need to provide either x,y, coordinates or a distMatrix.") if(is.null(distMat) & (length(x) != length(residuals(simulationOutput)) | length(y) != length(residuals(simulationOutput)))) # To avoid Issue #190 if (!is.null(x) & length(x) != length(residuals(simulationOutput)) | !is.null(y) & length(y) != length(residuals(simulationOutput))) stop("Dimensions of x / y coordinates don't match the dimension of the residuals.") # if not provided, create distance matrix based on x and y if(is.null(distMat)) distMat <- as.matrix(dist(cbind(x, y))) invDistMat <- 1/distMat diag(invDistMat) <- 0 MI = ape::Moran.I(simulationOutput$scaledResiduals, weight = invDistMat, alternative = alternative) out = list() out$statistic = c(observed = MI$observed, expected = MI$expected, sd = MI$sd) out$method = "DHARMa Moran's I test for distance-based autocorrelation" out$alternative = "Distance-based autocorrelation" out$p.value = MI$p.value out$data.name = data.name class(out) = "htest" if(plot == T & !is.null(x) & !is.null(y)) { opar <- par(mfrow = c(1,1)) on.exit(par(opar)) col = colorRamp(c("red", "white", "blue"))(simulationOutput$scaledResiduals) plot(x,y, col = rgb(col, maxColorValue = 255), main = out$method, cex.main = 0.8 ) # TODO implement correlogram } return(out) } #' Test for phylogenetic autocorrelation #' #' This function performs a Moran's I test for phylogenetic autocorrelation on the calculated quantile residuals. #' #' @param simulationOutput an object of class DHARMa, either created via [simulateResiduals] for supported models or via [createDHARMa] for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case. #' @param tree A phylogenetic tree object. #' @param alternative A character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis of no phylogenetic correlation. #' #' @details The function performs Moran.I test from the package ape on the DHARMa residuals, based on the phylogenetic distance matrix internally created from the provided tree. For custom distance matrices, you can use [testSpatialAutocorrelation]. #' #' @note Standard DHARMa simulations from models with (temporal / spatial / phylogenetic) conditional autoregressive terms will still have the respective temporal / spatial / phylogenetic correlation in the DHARMa residuals, unless the package you are using is modelling the autoregressive terms as explicit REs and is able to simulate conditional on the fitted REs. This has two consequences: #' #' 1. If you check the residuals for such a model, they will still show significant autocorrelation, even if the model fully accounts for this structure. #' #' 2. Because the DHARMa residuals for such a model are not statistically independent any more, other tests (e.g. dispersion, uniformity) may have inflated type I error, i.e. you will have a higher likelihood of spurious residual problems. #' #' There are three (non-exclusive) routes to address these issues when working with spatial / temporal / phylogenetic autoregressive models: #' #' 1. Simulate conditional on the fitted CAR structures (see conditional simulations in the help of [simulateResiduals]). #' #' 2. Rotate simulations prior to residual calculations (see parameter rotation in [simulateResiduals]). #' #' 3. Use custom tests / plots that explicitly compare the correlation structure in the simulated data to the correlation structure in the observed data. #' #' @author Florian Hartig #' @seealso [testResiduals], [testUniformity], [testOutliers], [testDispersion], [testZeroInflation], [testGeneric], [testTemporalAutocorrelation], [testSpatialAutocorrelation], [testQuantiles], [testCategorical] #' @example inst/examples/testPhylogeneticAutocorrelationHelp.R #' @export testPhylogeneticAutocorrelation <- function(simulationOutput, tree, alternative = c("two.sided", "greater", "less")){ alternative <- match.arg(alternative) data.name = deparse(substitute(simulationOutput)) # needs to be before ensureDHARMa simulationOutput = ensureDHARMa(simulationOutput, convert = T) # calculate distance matrix distMat <- cophenetic(tree) invDistMat <- 1/distMat diag(invDistMat) <- 0 MI = ape::Moran.I(simulationOutput$scaledResiduals, weight = invDistMat, alternative = alternative) out = list() out$statistic = c(observed = MI$observed, expected = MI$expected, sd = MI$sd) out$method = "DHARMa Moran's I test for phylogenetic autocorrelation" out$alternative = "Phylogenetic autocorrelation" out$p.value = MI$p.value out$data.name = data.name class(out) = "htest" return(out) } getP <- function(simulated, observed, alternative, plot = FALSE, ...){ if(alternative == "greater") p = mean(simulated >= observed) if(alternative == "less") p = mean(simulated <= observed) if(alternative == "two.sided") p = min(min(mean(simulated <= observed), mean(simulated >= observed) ) * 2,1) if(plot == T){ hist(simulated, xlim = range(simulated, observed), col = "lightgrey", main = "Distribution of test statistic \n grey = simulated, red = observed", ...) abline(v = mean(simulated), col = 1, lwd = 2) abline(v = observed, col = "red", lwd = 2) } return(p) } DHARMa/R/compatibility.R0000644000176200001440000006230014704245735014457 0ustar liggesusers# General notes ----------------------------------------------------------- # This file contains the wrappers for the models supported by DHARMa. # The design philosophy is that DHARMa interacts with packages ONLY via the wrappers, # so that the internal package functions can rely on a standardized interface. # # The currently supported models can be returned via getPossibleModels(). # # Below, you find under the section "Generic S3 Wrappers" the 6 Wrapper # functions that need to be implemented to add a model to DHARMa. # # Each of these generics has a default, which may already work with a given # model class. The general approach for integrating a package in DHARMa is # # i) test if the default DHARMa wrappers work as intended. # ii) if not, define new class-specific S3 wrapper (e.g. getSimulations.gam). # See comments in the help of the generic S3 functions for guidance about how to implement each function. # Checks ----------------------------------------------------------- #' Check if the fitted model is supported by DHARMa #' #' The function checks if the fitted model is supported by DHARMa, and if there are other issues that could create problems. #' #' @param fittedModel a fitted model. #' @param stop whether to throw an error if the model is not supported by DHARMa. #' #' @details The main purpose of this function os to check if the fitted model class is supported by DHARMa. The function additionally checks for properties of the fitted model that could create problems for calculating residuals or working with the resuls in DHARMa. #' #' #' @keywords internal checkModel <- function(fittedModel, stop = F){ out = T if(!(class(fittedModel)[1] %in% getPossibleModels())){ if(stop == FALSE) warning("DHARMa: fittedModel not in class of supported models. Absolutely no guarantee that this will work!") else stop("DHARMa: fittedModel not in class of supported models.") } # if(hasNA(fittedModel)) message("It seems there were NA values in the data used for fitting the model. This can create problems if you supply additional data to DHARMa functions. See ?checkModel for details") # TODO: check as implemented does not work reliably, check if there is any other option to check for NA # #' @example inst/examples/checkModelHelp.R # NA values in the data: checkModel will detect if there were NA values in the data frame. For NA values, most regression models will remove the entire observation from the data. This is not a problem for DHARMa - residuals are then only calculated for non-NA rows in the data. However, if you provide additional predictors to DHARMa, for example to plot residuals against a predictor, you will have to remove all NA rows that were also removed in the model. For most models, you can get the rows of the data that were actually used in the fit via rownames(model.frame(fittedModel)) #if (class(fittedModel)[1] == "gam" ) if (class(fittedModel$family)[1] == "extended.family") stop("It seems you are trying to fit a model from mgcv that was fit with an extended.family. Simulation functions for these families are not yet implemented in DHARMa. See issue https://github.com/florianhartig/DHARMa/issues/11 for updates about this") } #' get possible models #' #' returns a list of supported model classes #' #' @keywords internal getPossibleModels<-function()c("lm", "glm", "negbin", "lmerMod", "lmerModLmerTest", "glmerMod", "gam", "bam", "glmmTMB", "HLfit", "MixMod", "phylolm", "phyloglm") weightsWarning = "Model was fit with prior weights. These will be ignored in the simulation. See ?getSimulations for details." ######### Generic S3 Wrappers ############# #' Get model response #' #' Extract the response of a fitted model. #' #' The purpose of this function is to safely extract the observed response (dependent variable) of the fitted model classes. #' #' #' @param object a fitted model. #' @param ... additional parameters. #' #' @example inst/examples/wrappersHelp.R #' #' @seealso [getRefit], [getSimulations], [getFixedEffects], [getFitted] #' @author Florian Hartig #' @export getObservedResponse <- function (object, ...) { UseMethod("getObservedResponse", object) } #' Get model simulations #' #' @description Wrapper to simulate from a fitted model. #' #' @param object a fitted model. #' @param nsim number of simulations. #' @param type if simulations should be prepared for getQuantile or for refit. #' @param ... additional parameters to be passed on, usually to the simulate function of the respective model class. #' #' @return A matrix with simulations. #' @example inst/examples/wrappersHelp.R #' #' @seealso [getObservedResponse], [getRefit], [getFixedEffects], [getFitted] #' #' @details The purpose of this function is to wrap or implement the simulate function of different model classes and thus return simulations from fitted models in a standardized way. #' #' Note: GLMM and other regression packages often differ in how simulations are produced, and which parameters can be used to modify this behavior. #' #' One important difference is how to modifiy which hierarchical levels are held constant, and which are re-simulated. In lme4, this is controlled by the re.form argument (see [lme4::simulate.merMod]). In glmmTMB, the package version 1.1.10 has a temporary solution to simulate conditional to all random effects (see [glmmTMB::set_simcodes] val = "fix", and issue [#888](https://github.com/glmmTMB/glmmTMB/issues/888) in glmmTMB GitHub repository. For other packages, please consult the help. #' #' If the model was fit with weights and the respective model class does not include the weights in the simulations, getSimulations will throw a warning. The background is if weights are used on the likelihood directly, then what is fitted is effectively a pseudo likelihood, and there is no way to directly simulate from the specified likelihood. Whether or not residuals can be used in this case depends very much on what is tested and how weights are used. I'm sorry to say that it is hard to give a general recommendation, you have to consult someone that understands how weights are processed in the respective model class. #' #' @author Florian Hartig #' @export getSimulations <- function (object, nsim = 1 , type = c("normal", "refit"), ...) { UseMethod("getSimulations", object) } #' Extract fixed effects of a supported model #' #' A wrapper to extract fixed effects of a supported model. #' #' @param object a fitted model. #' @param ... additional parameters. #' @example inst/examples/wrappersHelp.R #' @seealso [getObservedResponse], [getSimulations], [getRefit], [getFitted] #' @export getFixedEffects <- function(object, ...){ UseMethod("getFixedEffects", object) } #' Get model refit #' #' Wrapper to refit a fitted model. #' #' @param object a fitted model. #' @param newresp the new response that should be used to refit the model. #' @param ... additional parameters to be passed on to the refit or update class that is used to refit the model. #' #' @details The purpose of this wrapper is to standardize the refit of a model. The behavior of this function depends on the supplied model. When available, it uses the refit method, otherwise it will use update. For glmmTMB: since version 1.0, glmmTMB has a refit function, but this didn't work, so I switched back to this implementation, which is a hack based on the update function. #' #' @example inst/examples/wrappersHelp.R #' #' @seealso [getObservedResponse], [getSimulations], [getFixedEffects] #' @author Florian Hartig #' @export getRefit <- function (object, newresp, ...) { UseMethod("getRefit", object) } #' Get fitted/predicted values #' #' Wrapper to get the fitted/predicted response of model at the response scale. #' #' The purpose of this wrapper is to standardize extract the fitted values, which is implemented via predict(model, type = "response") for most model classes. #' #' If you implement this function for a new model class, you should include an option to modifying which random effects (REs) are included in the predictions. If this option is not available, it is essential that predictions are provided marginally/unconditionally, i.e. without the RE estimates (because of https://github.com/florianhartig/DHARMa/issues/43), which corresponds to re-form = ~0 in lme4. #' #' @param object A fitted model. #' @param ... Additional parameters to be passed on, usually to the simulate function of the respective model class. #' #' @example inst/examples/wrappersHelp.R #' #' @seealso [getObservedResponse], [getSimulations], [getRefit], [getFixedEffects] #' #' @author Florian Hartig #' @export getFitted <- function (object, ...) { UseMethod("getFitted", object) } # NOTE - a bit unclear if fitted or predict should be used #' Get model residuals #' #' Wrapper to get the residuals of a fitted model. #' #' The purpose of this wrapper is to standardize the extraction of model residuals. Similar to some other functions, a key question is whether to calculate those conditional or unconditional on the fitted Random Effects. #' #' @param object a fitted model. #' @param ... additional parameters to be passed on, usually to the residual function of the respective model class. #' #' @example inst/examples/wrappersHelp.R #' #' @seealso [getObservedResponse], [getSimulations], [getRefit], [getFixedEffects], [getFitted] #' #' @author Florian Hartig #' @export getResiduals <- function (object, ...) { UseMethod("getResiduals", object) } # NOTE - a bit unclear if fitted or predict should be used #' Get Pearson residuals #' #' Wrapper to get the Pearson residuals of a fitted model. #' #' The purpose of this wrapper is to extract the Pearson residuals of a fitted model. #' #' @param object a fitted model. #' @param ... additional parameters to be passed on, usually to the residual function of the respective model class. #' #' @example inst/examples/wrappersHelp.R #' #' @seealso [getObservedResponse], [getSimulations], [getRefit], [getFixedEffects], [getFitted] #' #' @author Florian Hartig #' @export getPearsonResiduals <- function (object, ...) { UseMethod("getPearsonResiduals", object) } #' Get model family #' #' Wrapper to get the family of a fitted model. #' #' @param object a fitted model. #' @param ... additional parameters to be passed on. #' #' @seealso [getObservedResponse], [getSimulations], [getRefit], [getFixedEffects], [getFitted] #' #' @author Florian Hartig #' @export getFamily <- function (object, ...) { UseMethod("getFamily", object) } # nObs # default ----------------------------------------------------------------- #' @rdname getObservedResponse #' @export getObservedResponse.default <- function (object, ...){ out = model.frame(object)[,1] # observation is factor - unlike lme4 and older, glmmTMB simulates nevertheless as numeric if(is.factor(out)) out = as.numeric(out) - 1 # check for weights in k/n case if(family(object)$family %in% c("binomial", "betabinomial") & "(weights)" %in% colnames(model.frame(object))){ x = model.frame(object) out = out * x$`(weights)` } if(is.matrix(out)){ # case scaled variables or something like that if(ncol(out) == 1){ out = as.vector(out) } else if(ncol(out) == 2) { # case k/n binomial if(!(family(object)$family %in% c("binomial", "betabinomial"))) securityAssertion("nKcase - wrong family") out = out[,1] } else securityAssertion("Response in the model is a matrix with > 2 dim") } return(out) } #' @rdname getRefit #' @importFrom lme4 refit #' @export getRefit.default <- function (object, newresp, ...){ refit(object, newresp, ...) } #' @rdname getSimulations #' @export getSimulations.default <- function (object, nsim = 1, type = c("normal", "refit"), ...){ type <- match.arg(type) out = simulate(object, nsim = nsim , ...) if (type == "normal"){ if(family(object)$family %in% c("binomial", "betabinomial")){ if(is.factor(out[[1]])){ if(nlevels(out[[1]]) != 2){ warning("The fitted model has a factorial response with number of levels not equal to 2 - there is currently no sensible application in DHARMa that would lead to this situation. Likely, you are trying something that doesn't work.") } else{ out = data.matrix(out) - 1 } } if("(weights)" %in% colnames(model.frame(object))){ x = model.frame(object) out = out * x$`(weights)` } else if (is.matrix(out[[1]])){ # this is for the k/n binomial case out = as.matrix(out)[,seq(1, (2*nsim), by = 2)] } } if(!is.matrix(out)) out = data.matrix(out) } else{ if(family(object)$family %in% c("binomial", "betabinomial")){ if (!is.matrix(out[[1]]) & !is.numeric(out)) data.frame(data.matrix(out)-1) } } return(out) } #' @rdname getFixedEffects #' @importFrom lme4 fixef #' @export getFixedEffects.default <- function(object, ...){ if(class(object)[1] %in% c("glm", "lm", "gam", "bam", "negbin") ){ out = coef(object) } else if(class(object)[1] %in% c("glmerMod", "lmerMod", "HLfit", "lmerTest")){ out = fixef(object) } else if(class(object)[1] %in% c("glmmTMB")){ out = glmmTMB::fixef(object) out = out$cond } else { out = coef(object) if(is.null(out)) out = fixef(object) } return(out) } #' @rdname getFitted #' @export getFitted.default <- function (object,...){ out = predict(object, type = "response", re.form = ~0) out = as.vector(out) # introduced because of phyr error } #' @rdname getResiduals #' @export getResiduals.default <- function (object, ...){ residuals(object, type = "response", ...) } #' @rdname getPearsonResiduals #' @export getPearsonResiduals.default <- function (object, ...){ residuals(object, type = "pearson", ...) } # #' has NA # #' # #' checks if the fitted model excluded NA values. # #' # #' @param object a fitted model. # #' # #' @details Checks if the fitted model excluded NA values. # #' # #' @export # hasNA <- function(object){ # x = rownames(model.frame(object)) # if(length(x) < as.numeric(x[length(x) ])) return(TRUE) # else return(FALSE) # } # #' @rdname getFamily #' @export getFamily.default <- function (object,...){ family(object) } ######### LM ############# #' @rdname getRefit #' @export getRefit.lm <- function(object, newresp, ...){ newData <-model.frame(object) if(is.vector(newresp)){ newData[,1] = newresp } else if (is.factor(newresp)){ # Hack to make the factor binomial case work newData[,1] = as.numeric(newresp) - 1 } else { # Hack to make the binomial n/k case work newData[[1]] = NULL newData = cbind(newresp, newData) } refittedModel = update(object, data = newData) return(refittedModel) } hasWeigths.lm <- function(object, ...){ if(length(unique(object$prior.weights)) != 1) return(TRUE) else return(FALSE) } ######### GLM ############# #' @rdname getSimulations #' @export getSimulations.negbin<- function (object, nsim = 1, type = c("normal", "refit"), ...){ type <- match.arg(type) if("(weights)" %in% colnames(model.frame(object))) warning(weightsWarning) getSimulations.default(object = object, nsim = nsim, type = type, ...) } ######## MGCV ############ # This function overwrites the standard fitted function for GAM #' @rdname getFitted #' @export getFitted.gam <- function(object, ...){ class(object) = "glm" out = stats::fitted(object, ...) names(out) = as.character(1:length(out)) out } # Check that this works # plot(fitted(fittedModelGAM), predict(fittedModelGAM, type = "response")) #' @rdname getPearsonResiduals #' @export #' @details This needed to be adopted because for some reason, mgcv uses the argument "scaled.pearson" for what most packags define as "pearson". See comments in ?residuals.gam. #' getPearsonResiduals.gam <- function (object, ...){ residuals(object, type = "scaled.pearson", ...) } # Get Simulations of gam object #' @rdname getSimulations #' @param mgcViz whether simulations should be created with mgcViz (if mgcViz is available) #' @export getSimulations.gam <- function(object, nsim = 1, type = c("normal", "refit"), mgcViz = TRUE, ...){ type <- match.arg(type) if(length(find.package("mgcViz")) > 0 & mgcViz == T){ if("(weights)" %in% colnames(model.frame(object)) & ! family(object)$family %in% c("binomial", "betabinomial")) warning(weightsWarning) # use mgcViz if available out = mgcViz::simulate.gam(object, nsim = nsim , ...) out = as.data.frame(out) # from here to end identical to default if (type == "normal"){ if(family(object)$family %in% c("binomial", "betabinomial")){ if("(weights)" %in% colnames(model.frame(object))){ x = model.frame(object) out = out * x$`(weights)` } else if (is.matrix(model.frame(object)[[1]])){ # this is for the k/n binomial case out = out * rowSums(model.frame(object)[[1]]) } } if(!is.matrix(out)) out = data.matrix(out) } else{ if(family(object)$family %in% c("binomial", "betabinomial")){ if (is.matrix(model.frame(object)[[1]])){ # need to convert to k,n-k format for refit. Syntax here is from # https://stackoverflow.com/questions/6143697/data-frame-with-a-column-containing-a-matrix-in-r trials = rowSums(model.frame(object)[[1]]) oldnames = names(out) oldrownames = row.names(out) out = out * trials out = as.list(out) for(i in 1:length(out)){ out[[i]] = cbind(out[[i]], trials - out[[i]]) colnames(out[[i]]) = colnames(model.frame(object)[[1]]) } class(out) = "data.frame" names(out) = oldnames row.names(out) = oldrownames } } } } else { message("It seems you don't have mgcViz installed on this computer. When using DHARMa with mgcv objects, it is highly recommended to also install mgcViz, which will extend the ability of DHARMa to simulate from various gam objects. Withoug mgcViz, simulations will fail in various situations. See vignette for details!") out = getSimulations.default(object, nsim, type, ...) } return(out) } ######## lme4 ############ #' @rdname getSimulations #' @export getSimulations.lmerMod <- function (object, nsim = 1, type = c("normal", "refit"), ...){ if("(weights)" %in% colnames(model.frame(object))) warning(weightsWarning) getSimulations.default(object = object, nsim = nsim, type = type, ...) } ######## glmmTMB ###### #' @rdname getRefit #' @export getRefit.glmmTMB <- function(object, newresp, ...){ newData <-model.frame(object) # hack to make update work - for some reason, glmmTMB wants the matrix embedded in the df for update to work ... should be solved ideally, see https://github.com/glmmTMB/glmmTMB/issues/549 if(is.matrix(newresp)){ tmp = colnames(newData[[1]]) newData[[1]] = NULL newData = cbind(newresp, newData) colnames(newData)[1:2] = tmp } else { newData[[1]] = newresp } refittedModel = update(object, data = newData) return(refittedModel) } # glmmTMB simulates normal counts (and not proportions in any case, so the check for the other models is not needed), see #158 # note that if observation is factor - unlike lme4 and older, glmmTMB simulates nevertheless as numeric #' @rdname getSimulations #' @export getSimulations.glmmTMB <- function (object, nsim = 1, type = c("normal", "refit"), ...){ type <- match.arg(type) if("(weights)" %in% colnames(model.frame(object)) & ! family(object)$family %in% c("binomial", "betabinomial")) warning(weightsWarning) type <- match.arg(type) out = simulate(object, nsim = nsim, ...) if (type == "normal"){ if (is.matrix(out[[1]])){ # this is for the k/n binomial case out = as.matrix(out)[,seq(1, (2*nsim), by = 2)] } if(!is.matrix(out)) out = data.matrix(out) }else{ # check for weights in k/n case if(family(object)$family %in% c("binomial", "betabinomial")){ if("(weights)" %in% colnames(model.frame(object))){ w = model.frame(object) w = w$`(weights)` tmp <- function(x)x/w out = apply(out, 2, tmp) out = as.data.frame(out) } else if(is.matrix(out[[1]]) & !is.matrix(model.frame(object)[[1]])){ out = as.data.frame(as.matrix(out)[,seq(1, (2*nsim), by = 2)]) } } # matrixResp = # # if(matrixResp & !is.null(ncol(newresp))){ # # Hack to make the factor binomial case work # tmp = colnames(newData[[1]]) # newData[[1]] = NULL # newData = cbind(newresp, newData) # colnames(newData)[1:2] = tmp # } else if(!is.null(ncol(newresp))){ # newData[[1]] = newresp[,1] # } else { # newData[[1]] = newresp # } # if (out$modelClass == "glmmTMB" & ncol(simulations) == 2*n) simObserved = simulations[,(1+(2*(i-1))):(2+(2*(i-1)))] } # else securityAssertion("Simulation results produced unsupported data structure", stop = TRUE) return(out) } ####### spaMM ######### #' @rdname getObservedResponse #' @export getObservedResponse.HLfit <- function(object, ...){ out = spaMM::response(object, ...) nKcase = is.matrix(out) if(nKcase){ if(! (family(object) %in% c("binomial", "betabinomial"))) securityAssertion("nKcase - wrong family") if(! (ncol(out)==2)) securityAssertion("nKcase - wrong dimensions of response") out = out[,1] } if(!is.numeric(out)) out = as.numeric(out) return(out) } #' @rdname getSimulations #' @export getSimulations.HLfit <- function(object, nsim = 1, type = c("normal", "refit"), ...){ type <- match.arg(type) capture.output({out = simulate(object, nsim = nsim, ...)}) if(type == "normal"){ if(!is.matrix(out)) out = data.matrix(out) }else{ out = as.data.frame(out) } return(out) } #' @rdname getRefit #' @export getRefit.HLfit <- function(object, newresp, ...) { spaMM::update_resp(object, newresp, evaluate = TRUE) } #' @rdname getFitted #' @export getFitted.HLfit <- function (object,...){ predict(object, type = "response", re.form = ~0)[,1L] } ####### GLMMadaptive ######### ### getObservedResponse - seems getObservedResponse.default is working #' @rdname getSimulations #' @export getSimulations.MixMod <- function(object, nsim = 1, type = c("normal", "refit"), ...){ if ("weights" %in% names(object)) warning(weightsWarning) type <- match.arg(type) out = simulate(object, nsim = nsim , ...) if(type == "normal"){ if(!is.matrix(out)) out = data.matrix(out) }else{ out = as.data.frame(out) } return(out) } #' @rdname getFixedEffects #' @export getFixedEffects.MixMod <- function(object, ...){ out <- fixef(object, sub_model = "main") return(out) } # TODO: this could go wrong if the DF has no column names, although I guess then one couldn't use formula # TODO: goes wrong for k/n binomial with c(s,f) ~ pred syntax #' @rdname getRefit #' @export getRefit.MixMod <- function(object, newresp, ...) { responsename = colnames(model.frame(object))[1] newDat = object$data newDat[, match(responsename,names(newDat))] = newresp update(object, data = newDat) } #' @rdname getFitted #' @export getFitted.MixMod <- function (object,...){ predict(object, type = "mean_subject") } #' @rdname getResiduals #' @export getResiduals.MixMod <- function (object,...){ residuals(object, type = "subject_specific") } ####### phylolm / phyloglm ######### #' @rdname getObservedResponse #' @export getObservedResponse.phylolm <- function (object, ...){ out = object$y return(out) } #' @rdname getSimulations #' @export getSimulations.phylolm <- function(object, nsim = 1, type = c("normal", "refit"), ...){ type <- match.arg(type) fitBoot = update(object, boot = nsim, save = T, ...) out = fitBoot$bootdata if(type == "normal"){ if(!is.matrix(out)) out = data.matrix(out) }else{ out = as.data.frame(out) } return(out) } #' @rdname getRefit #' @export getRefit.phylolm <- function(object, newresp, ...){ newData <- model.frame(object) newData[,1] = newresp refittedModel = update(object, data = newData, ...) } #' @rdname getFitted #' @export getFitted.phylolm <- function (object,...){ return(object$fitted.values) } #' @rdname getFamily #' @export getFamily.phylolm <- function (object,...){ out = list() out$family = "gaussian" return(out) } #' @rdname getObservedResponse #' @export getObservedResponse.phyloglm <- function (object, ...){ out = object$y return(out) } #' @rdname getSimulations #' @export getSimulations.phyloglm <- function(object, nsim = 1, type = c("normal", "refit"), ...){ type <- match.arg(type) fitBoot = update(object, boot = nsim, save = T, ...) out = fitBoot$bootdata if(type == "normal"){ if(!is.matrix(out)) out = data.matrix(out) }else{ out = as.data.frame(out) } return(out) } #' @rdname getRefit #' @export getRefit.phyloglm <- function(object, newresp, ...){ #object phyloglm doesn't have a model.frame object terms <- as.character(formula(object))[-1] newData <- model.frame(object$y ~ object$X[,-1]) names(newData) <- terms newData[,1] = newresp refittedModel = update(object, data = newData, ...) return(refittedModel) } #' @rdname getFitted #' @export getFitted.phyloglm <- function (object,...){ return(object$fitted.values) } #' @rdname getFamily #' @export getFamily.phyloglm <- function (object,...){ out = list() # out$family = object$method out$family = "integer-valued" # all families of phyloglm are integer-valued return(out) } DHARMa/vignettes/0000755000176200001440000000000014704246643013270 5ustar liggesusersDHARMa/vignettes/ECDFmotivation.png0000644000176200001440000107732214665273541016630 0ustar liggesusers‰PNG  IHDR ž *ó%ł pHYsLåLåuĪš•tEXtSoftwareAdobe ImageReadyqÉe<~_IDATxŚģŻOHeŁĄńWE­‚ŗHo& n'Ķ<Żtg1A $‹ 3†ŌZ¤W‰UMŻC•n&Z«{Õ½(Ҥ eQd3­’YLB@ ł³T™™M²P3$ۚś½ŖcĪ;žsī¹’’œļ,Ė÷ī»÷¾ūļwϟ{~7žjŠXŸ………Ž£G¼Ė)z]’¶¾?Źžž ¼”€Āžžö&''‡.ryGŪPČrŖ @U}o F”|ŹøŲµ%Õ±.Iˆą“ó’JPĮ–––EżG](õŸŃŃQļ|¤ŖĖœ‡yįõ½2mŅ<|ŸI³.IÆ­­­„ś>ę<ĢϘŪĪÜ֮Ϩ8Q*īīŽ÷ŽęęęP#¹yē®’½ææß›››s¾o[NČzéļ›Óõ¦§§‡ŽłŽęßSSS½ćććkӘóJ*ՌŖ<Ӗ€Ōņ}Ū†j;€P+é1ŪŻ³ė¢fė”%wź6fšQ$H('''™×Żö·\øónsó8;;Ėō9 >ę¶ŁŚŚŗ6ŻĢĢ 3@ Ø;„”4½µŌEPJ;®Žėbļ*Q¤)™¹–į+Ʉ~ēŠi’Ö'd»śJ’®éh7šq ½Uē…;ä.½é„”4ķ! Źķą[†*ÉHūIŁlW€n«¼ N]\>|8øąč?]øųloo2i˜÷™­äū\\\T²_Š p®N"@źbck’P”¤bµ•§ķĶ [æßļ-//;×ß¼˜8·ĻśśśP‘N ŹĘĘĘą³j]d~ŠÓź?łœŖW’—Ž|ŗ½½½ ­ėbÆ^—޾^qśśd=¶.//ƶ—śT¹ ō¼ōQ™g‹t{zaLœ¦N½gķeƒŸÕÕÕÄiNNNÆ^½¦OćŪ.ś|Ģ÷ĻĻĻÆ-ƵŻlŸ·-cddÄśśÓąć—|Ī÷ŻĢłŁÖU½'ūß|ßw<„|Æ“Ē €j.§•wBiØēn:ÆśNOļÜ©“Tß$ķ#z§EŚ5lÆŗéV 'Ų% Ę z@L¤ē§ōx5ĒC¬ė'šPYŒ’y@žŁ“‹æėqD€@Ü䣭(¾«<'„Ÿ¤Ļ @ Ōä$ß»wÆ~¹čW5¢@ŠfY„MR@.źyŖµdŌ CHĒPÓÅæN###Ž”u²¬7ÉŻā>¦õ_šY]]½6¤łCš‰čŲ©c$„$ySSW}÷f™m­® ]ou÷K Ø»¤ń^Ę1pČEEM/QZ•h\ėn²=“b»‘T?®Ģ¾@ā5µkQˬā’yKzg=UŖŽšeģԃOČś„Éj.ĆÕ @^W»Y3–Rׯ›ŖŠš  ŠĆ”Žśė’ógllĢyūHČ Ÿ'¹¦!ltōBCU‡$ݰs2ØIšI“z[%Kó™¬¤Ż J6h Ś€ržŠsss¹ęwvvVČzIĻ#įˌėÓÖ1ŁŻŻu¶Ż“­Ć@°Ü-ź'øä7²)½÷˜ķ}³Lzéķ3IAмȨžGŖAX^—¶*5Ķāāb!Į—^pĶ£ļēŪ·o;ƒŠČ‹j§r¼VnłéÅÜ»œ~æ’d||ܛv\O­Ö[O}ōż$ wŅw•÷ēēē Ż¶ņŻQæ­­­k)Ū{žŌė;¾Ł~OÉ]EY—ļöw.n¶‡@ed éüŅęŖl½6 īu ])yŹžČśyŠ Žļ‡Öž¶r K[eӞJ@8óorŚ€€†sō6·éČw‘[QĮGśa.C=ü—Ģǜæ“ļŖŽÆś’ól“£££8#% €O“ĪĻŠēŅL®‘IBēk{>Nåė‘ŃE\Ė0?ś|]Čó{]?<Dą§E?`— •užzI+ĶØ&Ņ6§ĘU“ ©s%ÆI¦Võl` ¢@t+e]#ĢÓ]’W&''£ŚļQVĮ5±ā$wĒją×ŽÅ"«ąl„¤iŅVĮłJ5)šPśA7A1]xԐQŖ]$$H%M›UŽa“BČų‹jt ³§7Ć®‚K»s]£\‡«Ši™'ó24’žcŒŲ·ŻBŽĻRgĪ/¤ .dCF»Ļ3 ~gƒ. ,ø`2Ļ¢ē)U<®»lJŻ@GŪ€ōĢž!$’‰o>!õ³”Ó2Ļ8ę)I>®c ˆ:ÅŅ Ž6 ŌQ OŖ²(E@ŻŚŅŠˆ¢ƒ4¶›A†Ō@¤% ¤;Õ6Ž(ŒfŸŲ;Į§ cĮżõa@īL‘7ųȳ±, €Ó 5ŸĮ‰Ä±Ÿ:¤c>@-@@EĮG‚@§ūv©zŸ¹īuļÓ¦%÷#”| 1‡ō±%›KC„—Øŗg£-ߣGŚyŃ„¼ä‰’ G‰–Ž™•|ZžķRõ6‘Ds™—[×zwčG@ɧZ*¹›Æō”",IŚlūĄWż„æ§?^”V‚>­­ta¾o;ä’Ģ|Ÿ·ĶKż_}Ö÷dŲ>ėZæķ“4M@Į§Ņm(é­Uµ•ėāxvvv5$i3S'ØR€mąP½d"?¶Ń“åuI·­O§—ŒlÓ»ž–’ūR;ųJ1[[[ƒ@ØgA•åĖū+++Wė&ļėėé«öÓ³ĆśV ™¦l¤ä* >-ūÅ;išWsŌźyȅ=+U2SSS„nƒ„„„A&\ øśņó+¶ķ“vR(łD¹­õŖ”“F‰B.źY.Øęž=>>Ī̚rc¢J{MC<ōź‚Oµ„¤¬™Be$½Z)˰Hzš’RJ—ŽcŠŖ:†ąSBƒŲüüü 4”„DŅŌ‹vŅīFZDŁÅoWó’BžĶ±½ž•ļłõļę#Ė‡ļūe™GŽĻ«^„M¼‰" 'pQ ь„0œŗZ~dšÖ¤iŅn3ó3® ÆŽuŗØ}b®{–‹¾ķūŪzŗÖŁü¼ō"lj ž^p€A͚Ŗ·ņ‚PÖilļ¹zw%}Ö7ķÉÉI¦uLź²w½B¦MŚĘY>C ØĄōō4Į‡ŅļŠ³9 „†œ˜Ö»Å‡;õ„ńńćÜ|€Š@¶@óąńćĪŲŻŻ]vv„<  “!.nĄ°ZŚ€Tļ 5FSž§ž"K?  Ć% żD·uU$”®c@% }DW.hJš”ōD€€¦!ųÄYڬ{}bovØ%©!0’žäŖøųp¼” *ĻžŖņ6 Õö£Ē«+>u.“ ŌŌ ;Ͱ@Ń8övąėˆdī£ŠĻŹ8sś°J’š{ll,Õ¾OZ–ķŚ6_WĘVEz_^^fśĪmF¢£FH&ųŌ|ĢśūI„`y_’ŃłŅVŪ¾‹ė¦Ū•\H³„žŗž ĀüĪu•ą£@”Ö|vˆgˆęų’ šįüüÜł^hN×¹ÆgXMŖ‰±}NOFzƒå:¤ŅP(zZpsŁ]ŗqj\ Mk˘Č-Bu)ŁXۘē©z ]‘j3×MeR¦SßĶhQ7UŽčnnnZoø @%1ó^E—~źŗQįF)™“ÓČhä¾m%ÆIŖÉtŖ_Œ%ĄųnJ‹ŗ¶„Ž$µ=\7Ü ¤h«Gt½a ÄÓżEO{ wš‘Ļ˜%š¹¹9ļgĢ“ŌY‰*o’“²¶€R’ģ’rīģģ ½. Ć“ć ,ŖNŅG³‚Üˆž]ķ—ńńqo°Ņ’ÖNRć¼ł¾,×lgńMļ›wĖ\¶YmŁV•wƖž)!@YwÜØ—œ’rµU‰žžž^K§ķ Bfć¼yė{?©'œ9½Tū‘¤.mU°L'%!}]v$’VćŅ1HżjÖ\źh×Åæź}Ģ1Ռą#TŪFČ~JÓ,ķūyęWD*ģŠyHźbu\搋GFF8S;Ā•)uņöķJ <ĻѤ`L0jΤ“ ”ź5߯…ž<€HK@rē'õ¼“““©²”88¦ŠiČ㢀2I‰@³1:éöó6&ntJ@@ešŚķš`PB$øą£-7'  CɃvķ·6YĒ®CrŅ‘”ł666® ĆÓŌŅˆ8/T½µäƑĒ3šŽc«XtBjø£ŽõBęĖļcžmŽÕ–”žŚ6„—oX/ßsˆf";s½Bęįś¶ib r”€@é•—"BrŚčé®Õß2gQ©©}łuŌß¾õ“åź±­“/m·z^-Ö\Q “^#£:*ĖgH€0’_vް49‚l¹‹lÄ6” R±ēA#”õ$Ÿ „Ÿö”~īŻ»7ø×$På&ĖÓļšš0qš¤qešåXs”Ń„Vć¹Žv”&ģ;37Y–õ‘LŽĪŖ*OJņ±„¢¹é:©I‘6^ŌŠ.E—‚ŖjŖō­äé¢Ż†^€• 铯§ČŽqĆÄRzpż“AŪŚ~bmh69IM­Ž±zINe,]__OµUfX}›D€¤O¾:Q¤M„N ŗ£[~ōĪ;ƒ„qęO[ŠöÓ>ēēē½¹¹¹ŽŹŹŹµNI™Om=ÕĢtŽfﵐģŖŅĶ6½ü_24ė 2]ė guu58橞V>'ŪDR~Ēv,ßxž…æµDźĖĖĖ”I^kā¤0Ÿ¼ūnļ /½tķuÉVjŪ†²mmźĮćǽ;ƾšjŁ®ed©naķŒ?·BNp[4ēBŗo4>@»ŻJ <œą€2XŪ€K LØ»ō£×Ķ·e½iKŠĪĪŽó*²× ‡z~@~w꼚K”Vr‘*ra Nt>ŗcØ‚kČqJ @f šXś‘N2mÄy8^µįė„ĆI„¢JŚcl{{›t)% ƒĶ6Ŗ‚Œ AxvM)-qst<éGe/”óB€¢8Ē‚Swš’½pooo·CF„‡ź*żŲR·ń{P’<% “JšĖ(CRjc @fo#ÉY!Éź€Ŗ±€Žr>¤Ŗ $ļ^} Éź€ŖLOO_“"@¶ž  ’ŌM•ŗ%Uqp FB@“LLL°€˜J@Ō‰Qׁī»É&@­ÆÆs#Ä€Č6 Ø%9³¢MĶ›l«PĒCK?eŸG@ŪXŪ€fff*»øč'£<čŚÅ  0•]1ƒpä5snaa½ꬂ“ ¤W”Um`–v¤ōu›œ0µ«Ŗś5ōøI Hž»8;;¤`P½‘ŌE©č ƒ 0©9wppŠė÷ūģ™š‘,@-% >hōg1Tą™šš*äīZ‚œĢsggējžź.›(椄 ␋äR²ŅŪTĄ9>>&ć%:cČ€ŠLĒĄÉ kpŠõ’“Že } ˆĻĶŠФC®¢ŌŚŪŹÖK‹üŅm«®Œ2  C% WŖBč2]Ó„®³õh?zü˜ žzį¦ķEū]$%1żć<“*89!ōŸĮóÓ ƒžw‘'"UhP¾śĘŽ’€Ž 3@HŠŃ© ”÷d>½ŲŲ‚Aāś9 éééÜ%"×ēUļ»ŃŃQŅA@¬HF'0K;ņŠĆ’«Id,8ŽqPŻÆõź·ÓŻŻkÓ½ööŪ½’śĶoŲ`@א*‰ŲŖĆŹ„Z‚ “č"Žk`˜÷9 ł‘Éüü|aĪĻĻé„p EŅ2ģZŖD²’ö[0“!~Źčq‡ęRÕoß»s‡DčV 'Ų@ÜyõU6@ €J@@L  „aō€tķNĶõSd‡@¼nŁŖśż¾sČ2— @jü·$ś˜m!daż@Ä(m0!ų=Ī g'yU€Q „ IĆ ®®®½NB^Ŗś ¬čģģlP]°¶¶v­ś@FÉ ”ä"ł€ŽŁj€jTĖÉ ”@?łōS62óé’æ{÷.[ ™|÷ƒæcķ~MŚ °dvŻŚŚ¢) 0·\wjäęŌRRUKKKl%äĘč‚J@zÉG‚ŠĪĪε×(<ł‚AؙdšXW‘ķŽt™µ¾.£v“ē2Ńc§¦§§[½„Ŗß~öŽ{™ēńūÓÓŽń»ļ²£® 3čpē†"üķä$@r ˆ (ÓM‚Ź“łÉ'lžžéŌ×īCpB÷üćĮoŗ_Wmg9’>߅NBe}‡²·MŽ}} HšTżßö Z£££C7ˆź‚G”fPYøFę@šéņņŅzƒhŽy£ėėėl„"ė –×õA€£äē 0+++Ö×õlʧ§§ÖyŚJPŪŪŪĪPlŪÖ͜oҬ”“Źūņ¬›NžN³,×:ŗ^ómŪž‘’ėĻŖéėcīŪöŒŃ-ßĘō7TĆU­³¹¹Ł»’žŠkr””‘KTBÉÉÉÉŽüü|oww÷ź¼VóÓ/Ŗ¶*>5s™_ČśŚęi›_hiNžuÓ?/ėóRļÉ÷ĪŪę²=Ōū擾ļ¦O#U«ĀĢ<m (¤@ö@Röy¤_$åB<>>Ž{ōč‘uZ¹Ćw:×:Źė*˜™AcaaaØtbĪgddäjzuń YfŅś˜ļ«›d ŠEž4ūĢllŸWU«1ßŌ§ź†-Eū¼;qxšųńą÷ß|ö³lŒKK®*8½:Kż_Æ2²•Flw’: t’5Y/čó“ ®~ńĶB]Ģą©‚š¾īz L#t{ų„”jŌ¶ŠYāhŲśdDB¼óąĮą÷~ō#'ŪÅ6O€’¶# ę…:k‡>Ÿ9^_‘µ(ż~æwļŽ½Į’———{'''C×+³³FVy·‡m;d&Ź$;óššpØx+ éQjÉÅÖužŚJŗ‹‹ k)ĀVe•ÅĢĢĢąĘŌ\nĢNŽ™o}ŠØćf=g hjjjčoņÕs5ŠŪŚ„Yµ566¼¬,½±TéG•>Ģõv]ĢÕūz•~čņCÆE¶ą«Æ³ėż<ŪĆé.Ÿ2ŁH%I¾óžūƒß_~łe6†qŹŅž`>|ź >ź5×tf•ŗt š-#ļ¹.ó”gdōłéՉēēēƒ*5õ^H F¦±õF³mŸććcļ¼$8»–w{ųöžź–ļd1»OR¬DˆŸ>o\żč­·Ų—„B¦Iź1–ee¼'ÕvIBē™ö»…Ģ#4`dŁŽ#O ČV”&Š*)eķ@ęÕ?<ļ&»ņµÆ±1„•€BŲŹ* ua(yPsŗĀÖ;OJn®×ĖDp)Žes>ˆj‚G ōŅĻ’(“ä J<‘ (µ䢆ü ²AĖNŽt#!ČWŹ IŽ…n£ż@iˆ»3@Łn² µ × ”ää€ņ;Ś€|©äµ~æOBļ«o¼1ųMūOzE§¶:€dŅ••ē„GGGƒßkkkl5@qHlnn&~`}}­(6moo³Eč3/¼ĄFP\Z]]ķ-//'~€śküśćŁŠ @ŖmG:˜%!yų”€R^ŗ‘’źz-?ggg½ÅÅEJ?  ļżķŚw,#!dšÆŽdj÷õÆ~łå—ŁŠ-ŽŽ²5ąōłĖą÷Go½ÅĘPl’QCŖ¤}ˆj@^CUpRż6;;›\ئ‹Ėßó›l啀”żżż”!Cööö9€F$^’ūĒ?² Ą9xJ@6R" ō ūę÷æ?ųĶč@ØŌüņ—ƒßŒ~€č„[lø|ēż÷ŁŅ{—Ņ Įć§{{ƒß$ŸsēĮ"?@Jµ45å @! Ś€`õąńc6Ŗ÷΃ƒßTæ( Up@EčxP€Šd1V摚ØUp@äA „FųŻÉ @w^µqtttķ=Ōē«o¼AI ‚ć@ HN>É1¤ņ¢LOO³€T =ĒŠŹŹ w„ ń?ų@·nss“½Š_|żu6€n ÓÓÓ”æ„*ŽRÄ„ņnŲz°ŃŸ _\\$% ņƒ9,Éöö¶õuTcņöķĮozæ•ÜsŒ5 7‚TƅÜ!īļļ³÷P;iĻ“‹SSS½ŃŃQ6ЦJļŹ|Ø~Ėīæõ«ŽÄµ×ĒĘĘØj4® Ž“@•”b¤ŹŅ bĄP<Ą°Ź«ą...Õŗƒƒƒ«“rddd0 ŖAõ€hPRŻ8w‡‡Ęµ„€T½³³3öJEžłĮƒĮoŖßD€¤„ckē± Ńt#!Ćj Ī술æ •„P/zæØcĮ@ØĘO>ż” v·Ųńłī ~’Ė;lŒ éP P€PÕłąē~ČĘ@Bõ>÷ā‹l TƒĪš„NQxö§t<(@Ø €P+: ”2’ž‹_°4ŖČ¤|Æ’ąƒßt>hĪ>µuHpķs:/€„ŹŁ†jĖAūč2Ŗą:ŽĪ@؝€P: ”t>ŠdtBØŁ^z‰ɾ}åóŸļżā·æü¶ķ÷OŽ}7Õ1¢ęe{žs !‘뢓··š¹oeæøöy–cĵ   ؂ė8: ”2t>@B-č|€ąŠj›”x†N½ČŃłEńé\CBŃÓ”€`Eē T†Ī@؝€\£n øAž”BĒŠłY™ EŠĪČS:3Ńee¢ ®øH“ßwŽŸŚ|h[h޾yķķ·ƒ¦ū·o› (ł 8’ĮA’Ķ>”|P8}昁†ĄŠ ąƒFŖ¢øzµ9|NŚžs Ż.Tč_æõ­Žw?ų ĒFH{TŽeČĶĒ%BQ×Ā’Ļg^x“¼%žéK_ā&hR’¢»ėēōō”½ā >ā×Ģ@Ź|>½ė“ś`ŪĻää${Å|¤:ķć*åHI Ulaa!1H”×ūӟ’||~öŽ{‰Õ9hJ²ˆ**õe©ėļćŽ;h“‡ŒŃ÷Å×_Æu~zŚ;ŻŻ­mł®eæüō8’åh-ł— Ɗ„{¾eÄžģÆ¶6 ›ŃŃŃĮļŲ»kŅĶŗ{Ģ1śŲÆ@ % .ŖĆėż˜)±Ø„伀Kš‘Ģ„j[š!št“ŚÆ’87ĒĘz“Õ^ʾłfļ+ƼĀFŠĻÜx^źhLŃCŖęBJB¾žriJRōøŠ4‘ŌŻh\ŖŚęęfļŽ½{P@Ä€jé„ ĆķŲ†įŁŽŽf—@,ØźŽę¢WKMMMõŽÆ½ T[—룣£«×–––Ų5@ ØųÆ„3;;Ū;88 ”€Ŗ%Įē!b% ¢ém=.”~€PįT[ĻĢ̌5š|€Ż) ¢@kkkŃ~÷żżż8o»"’Oz˜r¬s¬€”€Ų€Š9<„čHĻǘ{?"’póīŻ»? @-ˆ*8@-@€Š=·bžņę@}±å"JJŽõżĖ>õ{Ē|¼·eŸG[R£ž/–Q’/..¢Z= aīó„„„(¾¬Ēŗļ‚Ó>ojąö9 ónH†kŸ››‹īŽ8¶;BżūŚ^ėyąv}}=ŗļm;×c+µą»Ęł ŖėŒ±J*ęč±]ˆcÜļś>޹ ®”ߛQAš‰é;ĒZė±Żō*W¢"m>±–|bkе„o¶ł4yŸG€VWW¹GZźŁŁŁit£l„žK}jÄZ lŖ(m$dŹn²Ō3[/°n>\ײ••PwŌģģ,§Ž»¼¼¼vGĆŻ°:¦UŽŻŻ|ēĆĆCŠŽź÷ūCǵśæ¤fhšhD•SvH¬ķ±}ē˜o.T°˜õ˜¾÷ŃŃŃPą™ŸŸÜx4²@> @ń‡^p€Z€ €@€ @€ @(‰ž’Få?Ņ_/r9e~‡¦Æ£²½½]ųvÕ÷[ŻĒŅžž>'r»Å&ˆ#ųŒ÷NOO‡‚N]łQź\vU–——+’ŽMŹ÷².1 }š*ų˜…R£Xr¼ä)­­­u>-@Høˆč„!¹ ŲRUėKµ’ŌlĪ|Ķ6?sz}½jgjjŹ›:Ūöŗ^õ•”zŪ|M–²^”óU旖–¬Ó›ė©“¬–®eŲęgV«†¬§šfaaįj•ŗ[\\\8×[m+LŌdŽ“­‹ž÷śś:' †ī€ŃQēēēi?+++׎WļĶĢĢ ~Ėōb~~~šwæßšV’mĪĒü-ó0—c~^žxx8ō÷ŽŽŽ““““”éFFF¼ĖÕ’–ļ©/ϵ|ó³²\ż=Ūzłęk[/Ł~śū>Z†üČwõ}/Yµ\ߣöC×S¶­žwČrlŪµ..Iļ£ó@ńģ鎵[Jŗø‡ “~!VæUÓ©‹­ķĀģZ®(’.xI(i½B.œIŪjuuuš{kk˹ÆĢ×T`š-+äū‡¼&?,]ĒE\–č„Y•›ÆśŖ¬ö'—Gy§›˜˜H\Ęӡ֛žž|?©šJ³ü,ė%ĖŃ·ŻÓ‹ō  +-U e®³k™U…sss¹·’,ß HƒŌquõ4’å>½cæj;ń] mėzńTm%bgggØm˜wŅ<ͮŮķ¦^ßÜÜģŻ¾};1`”Ō“¾W–ļTN)I”QŅIzVdeeåŚkŖ=ķzKƒ¹/°éėI„ßzéßõīŻ»™OŅēŅģ¤éB¾æ9­txxČ …āŠOūŌŪ«¶‡^Ī6 õiĆč9ųeYfŪÆ±|ccćZùj‡č“o˜ŸUė¶å›ŸZÆ^ŹNŖķ(©3†ķózg}©Ļ©ŽźļÅÅÅ”ļźśž®ķ¦öeŅöļ“éėBč„@ŗśŃķóĪOļ%ō ¤˜’¤ł“ė˜Ōø/?dL®õR=Łz–†ś¤u±u:]Eõ”õ3;xČkfąJśžI“ēéᣯ `;Äoigq"imˆ!`iš)†Œ4`’°“C, rˆs‘ģģQģ)› Q‚ĶŽfē÷2%—źUUWæžßõżĄćIļOæīź?æžUWW1 ’ųCØ@€ @€ @ @€ @€ @€ @€@€ @€ @€ €@€ @€ @ŠH»»»7nP@Ć Qé˜ŗä6Ķ,Ӓiä1/yüNžó’T6€ (Ŗą#CyEY÷ļ߯ÕüØõ aĒŌG;.{o…YF“2 ²ē%$"ųĶ<¤’pšÕIƧŽķżćć栃xšyņ-‡™ …̧ﵓe#ÖÖÖr].å# `ĘĒĒĻÖĶj!ó5ßĮOŽŪŚŚr~Žüéé鹬DŸž9OŪŪŪ½gµ|¶å0ŽŽ^ūÜ użūūūūAӐ¤ęĖVN!eMę€C ĻĻĻ™AČA-ä3KKKÖĻÆ¬¬ō½2½™™™«æmT^·Ūķż-ĖŒó>ØĻĪĪü]_Ł,,,|€ŗŠs ([6466Ö9;;ص–ķ¬ŻwöÆæožŽļZˆł^Ņo˜Ÿ1æ«F²4ĢĻŲ¦”æ&Uˆfēū~šßr•/ØžZIdÖĆ7¹>>$[šUĶłł("£sżFhuįĮĮA¦ykH|Š\NõTz²Œå199Ł{odd¤•z`MśÜĪĪNęy‘¬-Æł^__æöŚ*4¹†”—ÓÓÓĢå ÄxPfœ˜‹‹ o©s³Ś“UKrŻAæKßõݤ’}A<ōó¾j©4ĖéŖĪ“×äŗÕ½{÷¼åbVķÉõ(É"³”…o¤€ņ©„g@MĻpōźBó ¦ää±··g}=éą'ļ;«'ÓNĻ—Ų~cccĆ»œ®éHš±}Ve&j^WWWÆŽ“ėgY—ÅWv·nŻ"Ųd@ÉgŸR}sxxČ"ˆ?„·‚Kjl@š€8PéUpŖŃĮņņ²õu@$ˆū€UğZ Ē@ %1‘ūčä!­eONN*ŸŸ²½Cm[™Yŗrρ_ś”FOØN­™@`˜››‹fyēēē{}Ęzā̈Ø1 n©štĢ+|ɀ@B‹‚&™ &K#7)@(@So“ iĀąnd5!89īfŁjČÅ*²gmÄp\ū«ToŁzYgū@åHß›ŗ‘¦*[ҼR½®ZÜčßM€]ß ō®3Vł[2³!øÕg_ęīŪŽō}7ėšˆ Up)ƒtn©†®–±lĢįōŖŽW;§­ ĪÖKuŚĮåBzéöMӜ'żó āēv^v 2 R3”¦‘`"TšI={—Ńó·­,Ķפ©'”Łø2Õ£8Õh ŖHčĄiƒ\Ć*śŗ—>&ā¤Ę]jė "ȀZM.®†’4g‹zUWQ;žńń1+0BzVc >ūūū4Ø źzqÕVĶgf;ÓÓÓ¬ĄƒŽNŖaĶ@Cƒ€¼ƒ«\§±±±Ž³ Mm;ØĻŁZšÉ{ņ¬ź›UyyimŌ ļÉ }y6  ÆYAgkkė*ب֘@­<Ś@[aæ”ß‘b;::ņ¾Ÿęū·nŻJüŽśžśœ<ĻĢĢ8?ū0M3ŌĪĪN®ÓCömŠöøøø`ŸĶP¦(·Č[5PŪ{Ʀ94\÷į4}ź°Y»‘b’L_dTĮ Ŗf³UÆ5õ$ŖÉ|7ä"h`ą‘ėŒM£ī”Ėó -e£īŃggg¹^«“õž”®åŖĮ䲘ššŠśŚ*ØA8Ć"š41š(Żn7õrŪ^Óķ¹GO}fbb𔸺¬rYXXč½/MÕ„§ż³Ŗ·oyÖ{ž6ēŃ·jže}Ź¾f f;ēŠWwŌ!šč$š49č˜Ė&tß>źźJJĶõ™‹‹‹«Ūlß±u{eNĆ×VšéÉ swīÜqN?d~bŲÜoƾ³'õ“ńŌĻŃŃQßkś=qčņ ßʐĘęęf_F…~7ŁŁOYņn[®ųdm +ĮL֋ō"’GĖø½½½«éØ÷†&¢¤6Ø64cEūź§ĶŗQ»*iFZU7xg¹×u\QóA7H µ£Ėe#2ŲzȈ©[i”¦giŚyX__æś;ž„ƒ×,ٚ޳~“'bml„ ;ś½{÷2Ÿ©„fKi²*¦ß4Éŗ“ĖC/ó}5&UŚ ų¶F&½Q„m\-ß<ŲęŪ6līM”uĆ1ČYVšąTu UŅņ'q2–ķµ4æ[Ō<°\×ŗ*8I‹ÓŌ§śZĖ„ÖѦ©ĖešķŸ¦ķZc^­²PƒĪ“ä±±±įüL^­r3ėŃ»Ėač ²¤ČMa¾3Y."ļąckŃFO@„Čv˜œœ¼z-¤‹ Mš@rR7˜¹Z½ XC |2 €ą€øéwĮ‡ś LäEJź?.-5öPÕėµÉŪHIZ·ŸŸ“ł4ĢÉÉIߘ=YČŲCŖæ²¶×<7µ™?׀ĖNqŗ³Óæƒ..¶ī-MöUOŸf)b}ÕįĄßäķ $#VŖ&ūŸl',IÕV®Ļ˜#Šŗ¦aū¾ķoż3®éł^Kś¾k’Ź@ļ½_’Žoš®åWĄ¦-CP#2ō3Į'{šQe(ÕW®ƒ§ļ ¹±\z0q½ÆßÕŪ½ś®ž;ŅsEHšq}?MČ|łę_ !įįįT橗±<¤Ŗ0m–¶=0$7ņ8Ø“½ Ī׫1/CsčjłĢĪĆmIAŌÖ uҐݮާ“†ąN;L·o>\ó&ĮGļ}%ił|æśż“eXÖ&A8mĆ@#ÕÕÕk’›CW‹Å‡'.®ź§4ėlŠ1ŠĪ’ŗžZ^^εŒėŒF@ĄĮHŖgņnĘ£Ьgįņ}ɬ¦§§SOOūSF rŃ³æ¢Źø6ūUpČć ŻĘ*8n4-¦<“Ŗ‡BÆo$M7iھŖ:9į'Ió2HÜ Uh¾y¦ h™µµ5‚OĪBF• īef¶Vi1HÅęņ Ņ˾š†/ŪN;¤yoT%ėėė¹zBčō.Ąė×wĢņ•¦ī* ŅĖ€ł=ՂĢ5/‡‡‡}ÓWAiŠuÆ~[‚Ų ×cäwÕM³ęņ‡“ZSՈśwėzEgŒ¾ 1É ĢdmAČ5"­­ ²9-ż!Óņe(¶é…6^p Į­?$“t(o׈¼¶2Éņ}×k  ¢ą#äB6ā#Øv-­ą0ŠĮ¹|gˈg·Ż *fké&¤µ[ŠÓX÷  ²ģĪwњƒŠšd¶č0«uŲIQå6©WĆ(ÖĶ*vtŪ Qņ˜œœdĄ.T†“ ÅHßÉmM0ķ» Čė¤@‹P·ąƒ8˼7Ǿ ެŖĄUW'ś ”ށ2Œ÷eåĮ¦\„6BŠ»ˆ°żĻĮe9??/}{£ŗØ8²uĆįź2(2\\\P®[˜ŌŒųś³ Um›fŅ0Õ!وłysģ¢ŠįµmŸ -!-3}Cl€€VƒČĮ Õ¬א۪ļ4ż}½ŗTŖŚ5œ€mŗ¶·”ŁŖzܾ}»Æ”TŅšįś‰·9”Ӑ¤õł!¶Ūb؎‡¤L(mwäŪI=Ė^Ɔw­U]:Čt]ļK IŹ‚lócö—ō›Yē[ž¶š4nŠfffzf{Č8@ÖTæž|UZyU±5å¤åĪ;….Š#2¢)ŠÖm‹Ą—ž$Į•ÅŲ²¦¶–o›·››UnhmźØŸ¹¹9‚@C²!먌õhvĖTÕqŖĶĒĒJīņ„Ļ;p»³P[ćŪśJ›µŚZŹžOŚ&äR€źŻ–™•µ-Ś2Į6õ3TŁĄµe„¶\\C½Ļ²Q]2/“»%H°]”w}O5WVdČmż}ՄŪ6!Į2ėöŅą"é{mŪ–o3FóNäšPZūūūWÆ­­ ō»¶ŃIéŃ?‡ (ę–±¼}­š¤)ćKÅz”ŁÜęä`Ÿ÷v¦® éŪqRYKüj^Ö××­Ū”oŸ8::źLOO÷-SšėIņ•••« ƒŅ&J:;ŃWĄüü|g×1*&šē“Į7l2ŗi3³9ŠŹĮŽ ś’¶ [0±MC¦/Y‰ ę°Żśßņ™ŃŃQg RCĒH ±żžtz+ÆÉ}g¶e ™oõ2boŒ''C! D{{{tsZ˜˜˜`;l ¢³Õ4ӗūˆ’¶Ÿķ+ėIyĢŪšĶ”Y“““Q_,”éĮŕB€Pµķ©Ŗ- @!)!UØZÓž£śø.U3l²Tnw€–f@®Īŗ Ū å ;=  ČģuØ®”-@d:@qŲÆGҳßõv"ä™Ķ„n_Œr “8¹ŗ¬ņöūĻ>KŻ ōĀĮ¶ “‹³‚Ž;,’J @zļ°4ĶFÕh|D€T·ęf•;?=rxŠłłyguuõZ0Ŗ"=ˆ$I¦£Ŗß$‘ł J———}9€– ½ŗĄüŸŽ‡y ¾•3P”Ę@„@ž¤@@¢'Ō%ū£0€XjuD AœQ@K9ļ Ōt||L© R4>" @śAĄöČĆĮĮĮÕß»»»Ü!Žh+Ū9šk#„™™™RĪnm÷]\\p÷;ŁOgyy™Āb @z†’÷ĮerrņZ5ŸYĶĀh¬›››ŠrAUpb{{;—äS3²¢Ŗ_}žöÅ>{d«&ėv»½GÖģ„ §žŅ——É—^²fÅ"ɀ¤Q€ķ ĒAĮņ{kkėźo©öć-Dœ-..ŚAeU¶ī666:+++¬•żéō“õĖHf$ [ė4vDķ_š¶ ödf%śßŅD€Āy­¦¬3ÓŠ;ąé,5}™@c2 *政ēś\Q÷/5­UŪÄābmęOĶKU­ļT€BĻ”É0€b3| 7õĄ¢ęk§Z(åµćTu£# †ȗéČ yµŽÓĻõ‡tŃ#ļŃ\\TõŪ_~™Āb@³³³…VAHK:[&%]ōČė———¬•½üĀ ™¾FE÷H@†Ÿ³G=qŲ2%®-@z¶bŹćąė ®Čģ õóŪO?jy  Ąu7];Š@bJL>ÄÆ>ł$ŚegŸ®s6B0ÆūlooSZČģ‡ļ¾Ū{¦ł5€›®ącž±u»]!Š @»z#6« Ø>€{yS½<ūŌS€žįņł€THуś[F30ī”®óŽd¾Ęh¦”Ŗ~ūłėÆSÜHŻpŹp ČŪwŸ}–BŠc­‚›˜˜ø–łPJRAGŖÜōkA# āīGQr’žż«æUÆ”zš­ķ¼½y÷nļ9kļ®eĢ{Ųų¢°ļ)ŠńńqgzzšØžļ/é|ųÖ[}ÆO¶f·WVś—ļQćĶćģŠG|nŻŗEC@yyP Ź€>€ŅŁóCrłŹ²Iƒ õ}WcŒ¦ļŸEcŠ>vÉ}“#3 =ėŃo@5@Zæ~ē !ćttt“³±±ŃŁßßļr «¹oRÖ ĒČō†(ķIķĘf¤>¶Dn‹Ø—:ßш ($•ē¬ (Ÿ-ČØ×lū¤Æ:HOæĻĻ÷=ż5żŁ5żŠyŃ«}÷r;kii©ļw’ŽS!ó4ļę÷|åf¾¾¶¶ÖWž ˆĘõ źWŸ|Ņ{žŚcQ³Ÿ“ßŃ«‚l½Ś«÷õĪ…å=éķ^^_]]uå¶ 166ę_\\ó255Õ«VTæ977ē\&Õ7„ĶöövļYŖ']æ5hŁ'•‡~"`ž¶oŁU¶$ÆMNN€ōĀ4 ”k@Äß}·÷üĒ> 0,ūWŁ’ś{ssóźµ­­-gąPIĪŠÅĀĀBß“ÕßŌoJp±Ķ‹rrrŅ;Ą«ß )[0UV“›”³–qhy¤]jŗ*Éėr³?( å']šéöķŪ½gÉ^ŗŻ®·Š]ÆķŲŪŪĖ”„%Õ ¤ŽÅW 隒<2Š“åįĀ(Ó)M°ź ē:=É^ō³q•Ķ Z㔿o;>”Uƒ"…^å•Ēļä]D#…HßøĢj9 ÉžāBT#ßś}ryy¹ļ;óóó½lHQ󳓋¦N†õą,ĶŅ„tžņ(Żśś:tH’•«6Ųk.ɂbėć)„œČEj¹@Æ_DZe*HŁ®“˜ßzuŅééiēŽ½{™‚Bš×mļ«ė,”ĪlĮ§7b9Nł®'e-sšśŸøĪzŗ0–梀°ƒRš‘Śå»źZŽŠ/ąėHN]cx™Zõž4sÖO6Ód4źÄŌü¬~ƒ«fÅ|_®“Š{»9­¤“å¤Ļg)ײ|*<‰Ś²"T/é&„Ź"lOŅ4ädÓõ~škAy|/ßņµĄKś|Ry„ĢSĖŽFÖk@’‚źĆr«h®ī\¾’ƽēīóĻSŅe@*„įńéēŸ÷žņŹ+\ > “ä‚”č{dŗ!­`€&£7Ą€|C0äuƒ—/ØI;ynxm¶o½ųbļłgƽFa@@V’ņĖŽó÷ž{ŽĀ0X*Ŗ ĪÕŁ©g©¢@Ž™5€śr ŪNžWpŠoü"ą «Ó¾×&[p›¶ß0x‚3 ¤ ‘W’žœ8SmuĄ’å›oR5Ģ«4Ȑ”ŻéķF€Š ż9qVŌ.ß~śi ”ĘĮ  2ų\Qū9Ēfōģƒ(/p)GGG}'q2ų›mr†jīßRavī©ĻöŪX|µ<.//ēĮõ¶y”.»BzĀvŻŽa  Ŗ†ć–{BĘ͐:eéż×|Ä>¼mÕNŽŸBȑf:©cM—ŠéźŅō|/=qūNNÕ@wikRø–Ÿ”“C‘(tŚR§ŒśłśćS9ŗsēNanó;®*8Ū~©AŠ@īPĢ VŽ'pfõWČōó¾zllŒ‹ümƒĪ³NĪžŹīkh¢ĖżMIJŽCIŪNHõqĀŌhŸE'ßPähi2[ŁäIv¹ąé n”fQ÷’œ,N]—ń m¾f%-CzŪØ*7õݤ‘HUµ›4:??ļ»d›G_p ”³v껤®ÆČsŽõeēšeQyßģ 4]ŅšŃIļĖ>l¾/'™ŖévČ>'ŸQ5fĘ„ž—“Kõ¾ÆY÷ ĖMKøˆ2 åää„*2 &AČģG1ĖūźĘŠ4ĶÕMć®ĻIĖ7õ¾-ųøš„§}ĶU>hITdm333Ž3ڌž„ (ä>‚AÉEEÕ’›+»bēlŽ6wų  ‚d»I-Oz*_Åͮ±O¢Hµŗq¤‚  šĪwæ™ķ1;;K”uȀżžŸŗ^ J{oŅŅęf”óĆ €|­ŃŲ‰yŖ‚“ęÓ@įČ$ͧé„Pztō’ ŌDödCwVĄu©!p Ph"ŠĄåžüg @.«ąžŗ~õÕŽ3×ä€luŌś° ä€|ƒM1Z)ŻYž (i“RÄķ·Ÿ~J!(&>ƾżvļ™ė?@€v µµ5ē¹ž($É5žõõuk aølÜżč£Žó7žx‚‹k7¢ŖGmAˆąS®LōŸFF żŻ7ļŽķ=’ī½÷X ×ū` @ģõējšĢK/Q8eąF\ä’…VP†_Ph¤īŅV׀\­­-‚ æ HDŠŒ„*fgg)) #N܀„¤#š€ź7E”'@Ø~@Ś€ė†(ĄĻ×IÆļ= €ź·ÆH'½¶r2"ƒ£ @ă*8ō”ś­ō„Ȁ€ś8<<¤™4@BUTõŪÉūļG·ģO<žxē–åVe _TĮĮźėĘŠŖ Ȭā0/ĢŹū\¬E1˜#Pa¤ļ€j'¤Ī½>TUÓÆßy‡ĀŠ®džżq&XOONLPŚ€l‰,@…s‚PµTõŪĻ_Āо¤_÷Ń׃źć»Ļ>K!(Eé­ą\×}Ōė›››¬“×< 3 +++¬•’©ź·æü2… ½Oč}÷ļßļ÷½.ÆĶĪβVōņ /P)X_g;€BŒŒ°ƒ£Ęž hA¢Ž¼|Ŗś­ūüó€RUv hww÷Ŗ5œüjżä•W(„‚™­?2  vBÓ¢ÖŪ0Y€ >® £Ī Båų֋/öžæóĢ3€ŅUŚ\š÷ææłeļł½żˆĀŠžäj¶ šżĢĶĶYÆmoos¶ß§JÜžÉņ 3 ³?8õčv»ÕÕUvŠ’żfcƒBP‰Jī"ČnRńQ7Ÿ~ć‰'(  ”ļwļ½G! mGO(jZÆ@€Šžģ‡Ę@؍ŌĮE”ƒūß2 (©ź·Ó @BŁ€P  ĮĖģ)\ łė•uü­ąj*ļĘ4>hę:§ē @B(uöüµĒ£0€P¾?~š…€€ B…¤…Ó³O=UČ“¹x]­o?żtßk²®’ü׿ZßK;­?žvžœ˜°~ž÷Ÿ}Ę É>|ė-‚G$ėUüņ£:’öĀ ©Ö¹mZßć ēoMA\ Ńų•¢ń®µ Ć.ŌŻŲ® xDZiŗNdĘ( dF\ ϰi|€„JŠų®µÄ’~ńEļ™Ęõ%ėFnD@Šé0č@Gvꌮ€¢p Øį~õÉ'WÓųJń/·owžūģ¬÷÷?NŌ0+Mŗ&G–2 4Ī·^|ń*ųŠš ~Ō:‘£‚Œ<’ĒŻ»W™«z½ūüóȀЬ³k‚O3י™õüä•W:[L! 䛕MZµ¹eüFŃæ 2 |PÓ åšfž×›|æA‹:|€”&uĮ'– ē×ļ¼C!j„VĮ©óąĮopr½Oš!ų“ɓȀŹä .‚OlY@„Ā3æ“!‚A©g»»•-³ķ·]|¶;kkk™ēēąą 3~’¾õ½¼ܛķlƬ¤Ž÷c9/5:==ķŒ···źŲżūOŹŁs }s|œBŖĢ€Ęī„ŃŃŃN·Ūµ~&Ęj8WU‚O{üfc#ø{€T‘‘‘čÆõųĪ3Ļtžėą €„b³Ū17ķµ“¬gÖ-š•²‘Z¤$IĶ“ĶĻŁ¤É®øę Ž"©%ŗQ«TeŠ+Ņüü|g·ĀÖGPĒĆoōUpÜ{ÕØ$¹²‚Ä£’¾ąÄźźj/ąČc’ź=×=B€v)õ <¾»˜é āˆ?„ n hŖ¬ y*»vR"ū2 B؆&Õ(‡Źī"Ų@ÜnRPbmAcÖ9Ė €@€ µĮéé)Ė(\\\D¹ÜGGG¬üšŠ~DT@5ń‡*8@%@€@j é PÄęąą ŗåŽu›Ė=55ķžƒ………¾u^×e²öČČHēņņņژD²‚b£HßcZf}YĻĪĪ:­_~µ®cŻÖcÜŽeyåž/9ĪÕ~Vc @¶ĖNiž Å€bYē±.wģØ!Ėļ}@¶³ƒńńńÖ/·kHō–;Fęr7ą¬8փq®–––®ŖŽÖÖÖź»~bĶ€lg…±e1˜ełĒĘĘzUq±e•••(–YÕtÄ“½Ė²wīßæ_÷}żˆ]ą‰9šĘ’HC‹“““«åä„ėØī(ĘzńŲ–9¦¬'ę“-_«ÆƒP]Šg`}ǧķ=DĒZĆ!Õn£££YĪ(!ؕ³½½x¶„v”ļŅ—Sżć½@1PLŌz–€$ommÕsیy8µ’ŅfˆŗēI,//w677)”–“½ēēēuĻśn0 ’D_p€J€ €@€ @€ @(ĄĀĀ‚s œ<ĒÄ)zŒ<¦]Ö8@y’NĘ.bü,€|°ŲŪŪė "įįįŽkU KĆ@pU¾Ł¤ %ﯭ­±ƒ€ŠvśPFHj€2†©.ĘźźjŪ@H}Qu0Пõ,Åü’ąą Øoii©ļ»³³³}ß±}ĪĢ–\,Ūėź5Éšōļ›gŪIՑƒĪ—zm}}ŻZ†2]ż³2,¼k:¾ß°­§4Ėļ[ē¾ß Żf’Ö ŒŠRŒvūą«Śž¾ž9ó{ę’ūūū}Ó²M#ä3ccc×>799ėū=3XÉkY¦éšÆAyŽe+ŁØo¹ņX~€„LŠ8š ’‰¤=ĄŚ¾›Tm(ļollōž–*““U†!óe6ĄPæķjN¼³³Ó÷šÜ“•–Łd^ęĆ'ĶņŪ²@Čõą)½Z,kRĮ¤jŖ5–¢®+Ékr€¶µ®2’7ÆMø¬¬¬\ż}xxč ^¾ėG®łR­ųTQÕW®¤>§—…ŗ'+MV§šĢ+ź÷|­Ģ’–_‘*<×ŗ4ĖĄv.Mf øŖykøA§ē"-Øä}iQf¶$“מ”÷MėčččŚ4Tk4łnZŖE—śĶ‡Ļ –`¶÷ĢłJ[–j9Ņ”½“œė¤h%§·ŗ ]žŽŃšM¾£[^^Ī“Ķäµ­”½‡„ĪĘȹ3cŖŪPå&H  Rd? @€ @ €@€(Å’ ĄŽżÄŲ±‡æļ+)2Ł yć d/2@F‚›ÜČZX )Ą€į±‡$l$HŁ2š“’Hö&$#!2ĘFŲ”V~€^“€ˆ ’'€cä5ń"4¢ƒÓM ŽX ’vž¶oękūŠēVWÕ­Ŗūæź÷źńŻŪuėÖ=Uuļłźœó·²ŒøRć½}ā?īĄƒ!@ €@ €@ @ €@ €@ @ €@ €@ @¤€@ €@ €€@Š o½õ֛eČūąŲ,vß^½z5¶ Ē@¬©³gĻ–VģTō†ø\øpA””ĻĘĘĘŃ’ųį‡c 0'óŖŲ%*sĆåŲ7sėÖ-… ¢ˆµ-©5įņå˵ė>yņdtęĢ™7ėß¾}{¦ūŪOŪŽ–®¶®]»öęõ=šk Z׏åŅ“L‹Rłž:ujōģŁ³©÷õššp%Z‹nÜø1׿ˆĻēO,ł¹žĖŸ/ŠīryĖjŻŗÓžŠ[Y— ˜™8µņeŪ([ž>}Złš[·n}ø··÷įÖÖÖŲóńxŅū5ٟŲv8yņd£ĻZ¶’ēϟÆül]ĖźōéÓµŪ*žķåĖ—GŸ„Éūß'^—†¶eĒÆM™ty&ė]ŗtiģżćsÅłÓ¶Lš(Ūn›ćēoŁyŻō˜ĶāŚ€u­¢ĘņVÖEE_ęŅQx7}ݬŗQŝ÷ĶĶĶŹmÖ½g“ż‰”Ēw~ż4Ÿ»XĪUÆMė=|ų°²Å§jŗģ[Ó2=888jQźņŁę]¶³87&mwŅńšę³5=7`UÓų.p,Ŗ…ńXeŗŖ ά‚Ÿx¢{VzŸü„6]›ņ.nuū“weĖ_³čńPM*ÓuŻŻŖ^’ņå˱mE7¾Y8}śte𳬠{Ž5pYŻÄڜ7Uź*”%¬šA@QQĶĒ“¤¾žó‹Įń€ØI…­K„1xbŒÄ“™µž?^ŗżŗŒv|­ōķ™ł"˜ĢĖńĮƒcÆé:Īj懦•ʱÄ>ÉČżŌū.p1h8„™ķāäɓGŪ`>•š<0šō|ÓķmmmUVŖÓ:ēϟ?6€¼ź}£rüśõėÖūÓåóĢ¢ ܓݩ¦½ĘšĖY•Ė“ŻÄ¦-»yuk»ŻY_OŠĒźgü§×-@usl4YBT|eJš½ŗģUólźZĮŸfM?ϼϳy•kˆ]D:‹r¹~żśÄu×ńnz¼ŅDŖ‚hf]ąŗVāu1>a]+PĖ’§®Z.^¼8˜–•}“䤲ÆZņ±6©"]ÖŹÖŻŻŻ}³īžž~ės!źOڟ”ŗģõњX÷śET^›–kÕŲ¬Iė¶Ł’Ō=qŅö'msgggt÷īŻŚķıĻ®ŗ@£Ė>ĢóxM:ļŠ7y€z½ī—*f{{{ēópgśÄzŻ”–hmHwK›Œē¹’žTw¶€ÕŌū.pÅn!Ń]dRןķķķ£uoŻŗ„[ ōȉ”|PA `"T`0N(‚j³Ō5“œÖ+`ØŅŌńo,1×_šŸłésżS“Ąßv{q‘§tŃĄjWŅ=z4ŗwļžĀ°Ŗ),5X¹}ūvė9įXN@sžÅ{1„I×iM@ÄJYŌ$ C©Ż#»”2ś$&Ž^•—Č«‚ X™Ę”WüSj÷6ēU³3W6^Nšż—nÖĢ[“*Ä{9sF”Ćõ: ܤł~Ś,¬Ÿ$É?ė0§Uš„8ķ³¾ŌŠ?ŃB2i¾&NŸ>=zųšįŲ÷[Ó%ŗĄ ~@ŌŪJŸ ŌņEæŻi‚ŹčūŪ&0MĖūvOz]Ŗx—-)ūL×|RPżŠ§ źÆ]»ÖųuUĖ÷£i9Äs“>g›ī ń9L> ėÆī†ā¤q7Mƒ˜ĆĆĆŃåĖ—6 *« å•©č£ŪånŻ#Uåɓ'ĒŹ3Ó&PüżÕ«W•Ǥlł{Tż ębūi|LŁz‘‰/ķG×s0ķg¾ķƒƒƒ£æß¹s§¶,ņą¬ģ³DŁ>xš`źÖŹ|?öööŽ•c*‡\7eeZ¼Ó ōKŻM£IßCWÆ^õ{ ,Õ`ĘEą<üä•į²  ø^™ŗĆų[zż4c[¢…Ŗźµł{llltzŹ^Ż/ņ@®ģsÄ:/^¼8ś’—/_ķkQ ĢfQMŽÕ<ĒĖ jf5ž&ŗ¦iVÕŪCł Ń½'*mmŗŃMŽåiRE¹iE:ŗFĶ{lVŒZµń_)ųIZÓ²Ō·;;;Ēŗ¶ ~.]ŗTۊ#ųV™,pĢÜ,ēU˜Ō’4‹%ßFŁ]ĖuL‚‘N@æĶź;*‚š˜Č@-åŻĘ¢%"ŖV©k՘eZåe¾Ē2*AŗØAæÄ÷čęꦠ`ŽVĢCüč†h‰ØĖÖ ’¶ƒå'ŻõĢ»;v˜nVwV«¶ūXøäė²½ ~ Ŗŗ¬Õ?ׯ_ÆģŠ&ų± ń£[¬¼—§OŸŽ­Sl)šōśųūÖÖVå>DŠنBtĖ+×ŪH딽Gʆ6­Ō*V\RbŽŗģGłßņloÅĻI–üT/ąøŖōž7oެ|MśĪ,.4ŠÜ[Y„Åmć-"UÄXƒ‹K« 0jŸ]Ķ÷Å|4ķF7¾ŗöšŪ5Ū}ˆÖM>³>½ā?Z€`ĘϤģjŃzmœł‹iŠŻ«ZŅRŁ9ųYD`3©g@ddĶÆ%˜I`Ź€gR Ļīīnå>ęWŁNŖęQ+€._tu˜7-@0'īčBžI-<šäß‚Ÿå?QžM‚ŸŖķ4iµØ[/ŗ³ēKÉo&Ķ;WµN]b‹¦c/»ŽŃ¬Ū÷Ŗm–ż­ékŗ‹IėꭂeK]ęZ@ō\܁@Ą³ĪΟ?æō}ȻԄä7UŻ‹‰~ņ%%ģ¹råŹŅ̤IåĖ–;•>C>wŁēģŅķ³XNUņkÆøķxŻĘĘFé>¤u£«¤>  ¾,ņ/ŻųāŹūąx<ė"Ÿ”ķō óvśōéŅ ŻłyY—łt•½yMĘŻ$Šl©!(ƒŸō|]ÖÕ&A ÖLŁéĖ"ÄĶæĄo xÖ’x&«Ö›aŚß×Uø9ūŠ“ūŪ<Äx®ŖņČ÷!Ļ—’’½{÷Zuᣟ$Ač±”F»)cU€!hRɑ“`}åŻÆž?>Ųß’Yg³‹±3©ūXUaD>žėܹsoö£Ųķ®øļź:ä“õDÜU*ŽÅüC—@Æ»Ć[ ųYoÅ®L]ē’¹uė֛’_§”Ģuæ’]ƒ”IĮOŪėr–Ē7¤nweū–w‹ŌʃhŠ›Lī6i{Ó¼’,š’mÓ6mÓ6»n³,Š©ŗtņäIsļ $Š–¼póę͉ēUt—+ž-Æ8G×ČŖĢdÅń&Ėēv¾oy—»h9)ī›@$Ę/UÕŅõ7‹mš čąą ńśUūŻźŗf Dăw/Ęæė*T> vņī0ō[“ä„枏)ū]īr—.]*­8Wej‹q³ń·®-L³ēv¾Æ)£Y,i~üļm ¦ŖŹ!Ļ7)hIAŚ,ź4łūÅvónpUėĒ’ZöŹĪƒčVgÖüvd'[`+&įÉ/šø+ńśõė¹¼_›» Mæ°lÓ6mÓ6gµĶØĢęI[ęõż@cŸų$k¦ģĪå,Z„ŚVęQ™°MŪ“MŪl,ÅĮwó¤  U6!c |fü˜;®ē Ė+{ǽ1F§8—٬`4ŗ|łņŹł¾ÅxäōĖ9Œ eYP¹›uu•M„ }ōģŁ³£Ł·Ūrm¬‡˜U}{{{aĒÆŖu &/}9OŅg*ū<ŃÕ}iŸ·åŻå8tYŁ–ų1@ĄŅ„Įī6~Ķę•+ nM[?ņ0_®1Ē‹zŗĄ0sqG³üčźĘ2ĪĆŗåššp&۩ʚšwõš“øaŠåż£kiŻūŸK­?eŪ-{¾NŽŻnRÖĘŗ}Š%Z¦Ś¾ŻzÅæM*’:7nÜhüśIĻՕA×óÆéė§=@а’#šaŃŅ”Ås°ølnn6>Ÿ«¶b\Ū¤ hÕ6’øaķŖ×¦É€‹KÓ kZ’üłó•å’?_|’2?>ś7ŅÖOš:£ģ{ ļ8fóü^Ŗ;~uŸ3žæwļ^ķ9P,»ŗēŚ|FPŁä<Ī÷5ŗĻć<@ƒĄ'ę[FąS÷ćĪ0ÄųٲŹ|ٹR%ÆH6ŻF]űjłó©¢]¦lŻ¼Æ£ŗą nÕēģŖ;~wļŽ0LŚĘ¼lll“>''Ӟ‡ >%ĻÓ§O~LM¦Ģ²M“U,ÆH6ķz4Šc\K©’޵ _W³q™ł{åŻīÖżC“ÖŽųN[¦&ļ-„€X@š“;}śōQeØlL,Ć¬Ņ ×u}ZDkcTŅó÷HāčĀ7ĻT×1Ę'É» >ž¼ĮOSłų¤eȏC“€s|ŹĘłĢūn4“žėŗgÕMĒĮ,CÜdˆłs”X©,.]ŗ“÷Æś^Yä8•ć5M“³³3Õūæxń¢v¬WčĒx,@Ģ8ų©« Į*ˆsrkkėč’cEU×µI•ŲŹ,×®];ŖtF„8=7m%wŅ ‡xÆH2’„™¶Ž<Ł@Ó,be×ų£Gt„V®āŻ ó„óŚŌŚĮt1ū^Ó,r7oŽģ49mžž‘œ Ÿ05–ōųĪ;oÖ_õńX Ö*šY—ģnmSÜŅOqĒ<£ł@÷[·n½y>*‹“ŗÆ»žEWϼ©t¦1oe-M»ĘÕ­ĻÅXœØ€Gr‡ØųēįŹ*¼“޳˜.‚Ø6ć\¦¹ī§)“ŲßbęøŗrkśU·^:ł¶®_æ~ō|tŒżIĒæn»é5”˜taŅ~ß?ZŽā܋%?—›ļiĪĆŽż¦eŅ-¼v|ٹ»ŹPĪõ¶”ŗ6P̲da}æ\K¬ūé’Ń@©ČāfNöõ_œ‡śą„"f-~0Ū“ę¼’żļ+“<†E*?01šßuO_ią4Ø;·¬ MåŃŚ‹Ø'bœAžż$W6hvVó¬ƒųŽ‹ÖÅJŠŗMhŗØ¹Y­ģzZbu,éqJ%™ß파)+JdؐAč»Hß[üž«Ėlė¬×c€Ņz¤w,¦‰L9äĆÕ«WG÷ļ߯ŻF¤!¬›„ `æ'sśl]ąŖrä'UĮOH­A‘ļ /ā†N1ųIsJ@_‚{=8źĖfČå£ @½W6–ēĢ™3 Ō~޳ ,ņ‡?fĻEącÜ#Šy=„;™łXž“¼xńāh6ē°½½}ō\\,‘$!¢ā]Ŗä ė"ZƋw;ÓĢź} šŠ 0 )“eŌó`P ‚Ņ’¤”®‡‡‡c‚āb‰$ _°īAĮĘĘʱļG7wšÓ×CüŪFž8)*ŁmDėjž€©©Ė—/æy]$+i#ź8]÷·©GĶģ=ā&ō4ŪI™~óaé¹ŗ,ĄuĒ9ʰėł‹±ć«kP”֝ü‚ ĻĮO®,1 åH½?blož|Y= ĒżÜ@ŻŪŪ;ŖK¤ž%uƍōńi2ŃcģB¼6’0”vÓŹ·{éŅ„£mGĻ”ø9{óęͣ緶¶J+×ń\±{kģwtēŸUKhq;‘H**ż±ołßŖ¾gŅ:ń¹Źö©øÆMėgq{ŅsUŪŖ*›ĶĶĶ‰Æó!Ÿ»wļQ×®];qó}ݦčõwBv@E+°ÅE%8c+6^’½ļōµ÷ŽkõšĶ+W\+`m*} ų†ņ¹©>źŽ—Źv“×ęż&RÓuóē«‚«²ė£éžE0“Z‘'ŻDiR.uļ-/M‚2т”Ęo7-Ēėꉜę³äėÕ÷Õ’y[9ԱͲ’xjŽmŗ”ŻĶhśż5ÄąŠ¢Õ¦NUQ&ļŹÖ“nŠtŻü¤–¢*ŃśS'Zøź*Ł]æņ.““Z£¢ŸDKH—śŅ"MŪøi™ęAĖ%špBĻr P>ęØÉ2éK ŖĀ5«JĻŗwĒÉ4cS¢5x¢‹YÓó*­“ŗÜ5MĢQ–ø)_&M˱ ćNņęĮƒ½<Æņ¬™“nh#`ĮOCCÕ6y@~-„VŌĢÄ ’¶Š7?ėnTŌńMūb²“eˆ±SIŒ}é{Ōęę6 …I©_Ū.m2µ¬ZšóōéÓN6č“œ_7ßß4c€ŗø~żzéóyƏ.­ł|…uÆļŚ"Ń4x‹V³”8"Ę -zą’¼2ŽMśī­;Ē-TJ™ī†DfŽ6zdiŃ| ¬cšßcmÓüB_äc:āZHóżUuOŠ›Åą§Xį/[Ŗ¦÷RõšØ[”[QqoņžUõ’ųĢÓģsÓ (AŖŚ~Žj¶Œl“u]Óf-Æ3֝cź’«§×i°Ó`³.{zM:a§żŅ˜‡<3SYÅ †ØLt½&š¼®lh čŚ"0ķõŪuŸ›švŒQÓ÷ž6P[ä~ųĪõNÜé˜4ąXŽĆŻ»v¾÷½Ń½?ųƒ^żPŝĪ0ķ‡@c¢õ&śÜ¶¹č~ĮM uŽŸ¶ßŻŹ€¢^ŖĻÓt 1€Ļ'°j|Įt3ˆ ²ŠA$H(öĻĶάjšÓä;pT&&éšėü\½zUźUō?ų988›mō2ųŃ孞ņ Īۊ`5„ɜUīa1āz{Ō"½ž*ŽeO¼ļłņe' š‰™äÓdĪ‚XL…=\¹rEaTøqćF£ą*żżńćĒG“ŅĀŖŃ`E+b‚X×ŲdĻž=S–ō‚ ĮōR$ )›ēÆ, lqĄI«š?07yļ&ūŠe½&Ɵōņõ÷÷÷+·Ńu’Ŗö#ŗW‰2,n?Zšš~ž¦ū€€?&bī^Ž*2΁X^¼x1ŚŪŪ{31z,OŸ>*ƒbŚvl'Ÿp½ģ܋÷Iėģīī­sēĪ„ž“ńŽwļŽ+“XnŻŗ5öŠ7ab9žü±ēŠeŠö­ŚVt®ŗ~ónuypV¶?M»i? füDeäÕ«W ¦TƒröģŁ£1vmä’˜+*ȱ<xłņå± !ļ2ļY¬˜/CģCŒå)ŠV•|’vvvęž]Wl”1uĒ“l;yŠ™>_ńóęė–ķ üDEŹDĶ0]%?æ¾b)«ōwQ5łptw+ ˜VUY÷¼ü»hŽŁšR‹SÓĄ³É±.ŹŻhy« €“ €Xbš?ęyE čÅ“‡{÷ī ¾¢›“yżśõ±n_‹lõh’nŽäņĻ]ŽC ę(ŗ·™ćę/‡“_c«˜žyŽ-G)‰AµL/^œøN±ÕfŽßɓŗŽ1ĄØ¬™t^żCśnccCš3'˼«ĶjØŹ –w{ZÅ OŸeėTķ{“ĻŅeŪ“ö/n‚O*Ó¦­Æ]ŽS“ĻŲd_&½·ąG4QŹ$­BE‘k¾­bæ[€y+KnąGėD›•'µäTEāyźÉāLĀCw„#ėR“ļL`~Z%AH>|Ųyid‰`HŠc?ó£6u:%A˜†īoĄ+ä‚X ­Ó`·]ßlꀱē¼|łR”Ąhś£=éł* @šów¶¶¶J!+»h¤r]7&Ė“æ$Ļž=S0°ŖФ€€j÷īŻóŗ$Ź€VPÕ¬¾]³čų!†FŅX£Ønę`€uõŸ’ė- N&iūŻ'ų€5 €śģąįĆĘėn^¹2UšsėÖ-+ęķ6+WM*W6é\,üCqöģŁcĻɂ k„ 'ę°Øϧ%œ;wn“³³£”^;<<=ž|ģ9]ß` äDwŽŖ9,Š?ö{{{G’޼yS)½¶¹¹)ųY!e= U”[sź~T._¾¬tAT¶?г(ż æ~żŗōŽZžƒOü-ÖU†ü¤–o`Ķ ČÄrpp0:žüђ÷I?~<¶>Ą‚Ÿ“'OŽ.\ø ``ŵNƒ}ę̙ѓ'Oj%€>»’ž±ē^½z„` oP—¤" o¶··}Ļ­0Ē€:­Ó`‡č—§¼®[śDŅXoZ€Ŗ~€uŅ* €ąēļ\ŗtI”@_ <ŠJĄķŪ·•0×®];öÜ£G ō9Źļ~޹s§t¦ķU}»nÖa’åzšąĮŲc­ā«Ķw93 €š&=X„$łßÖÖÖŲžÄ|űMłņņåĖcŪ†Y™ü@œ‡,«°ģļļ×VfN:uō·Tł‰U|`Ų|ĄĄ ŗ–|ŒPŸ* 1»ūėׯ) ŽŠZB7Æ\{¼»»ė$€!@©ņsõźÕ±YŠĖŗ‡<{öltīܹ„·šÄ¾Fßż“ł¾¤Öŗ}<{öģ›ąĒ_†īąįĆĪĮĆŗł7ßžö±ēŹ!ė§Ń |MüT‰Ą!Ż-]ę]ćŲ׹«WÆF×Ūßß?ZŠb"ŌŲ0?õя*č©F]ą¢u$™ü„˜0pļﻓ,³(Ž;?eóBLZBL„Ŗūō×±“×-ĒS=€šŗxńāŹ|Č®cxāu§OŸ^z ĢGY⃢¾tżžN£,p1_Ī;wʁI]å¦ >f)&;ķą/^8K §š$>hÓ"ō¹Æ~uōƒżHĮ.™Ä5Ōi=Šīīī›@§īĪē*dOKļ­Qi’šŒē‰1LeŸč‰@ŌHŒļi:~f÷'ĘMź¾’8ÜŗuKl‰`N åƒ b€Dā.”ƒņĪ7¾1öųŸ9£P@ŠOöĆŽ=ž÷wļ*Š*]ßś©,¹nŠ$­[€vvv&&xō葒Ö+ŠłŠĮĶĶ›7'nōŹ•+Ē^÷ģŁ3„ ,Mq~­?0LĒŗĄ:ujōśõė7gÕm ļ’pśōéŃįį”Ņ"&(ĶI| 7šLŚ…ž×Ą²[$>@/™ógxÜp`fPYfāN¾Ž!`•4Ī—›«WÆ6i) „ž>}:1`˜·bė@£(2[[[£ū÷ļO\’ģŁ³£ŻŻ]A°4æTČX¹ń±) YtéŅ„£Ÿ?ŽxĆM%€yłĖB¦É’åŸü…4 €ņ‰M£EgR ėXg’č’Māƒa+› Z@)IĮĢęę걕āĶĖ—/?ĄĀżĶ( Rė4Ų‚`•m}į cµž b°j~žwĘ’§>„P€1­»ĄE&ø—IcƒęéÆ~üć±ĒļžÖo)`Lė.pĻž=+}> T8Ģ›ÄųŻ ‰·gµ”ƒƒ„ S*kY“°:‰RźmĒVWė H‰}„fvõ§OŸM„ tósŸųÄč{_’zćõ7k®Ē”XµÄmŽßń€ €Šw)Ož<9zõź•–Ŗ˜ųąW/^T(Ąō>ÕĄ**&>ųŻÆ|E”Ó@«FāŹ”«Z§›xi’Żx\shŽĆŗķo_Æ]ÖĒŪe'_ŁĄÜ.ƒ³ ī`"CŖßEčų¤k#’j„©Wööösq¢i¤-V™Ö–įĢ™3£/^4ś­L<-Ū4C –±_óŚĒIePlż™¦‹é“'OŸ Ge„bæĖ‹ŸZ¶iLĶ–Ą=4ģėœ;w®t¦Æļāį’’]”ęIŒĻ^Vī¼U½]™F¼~żzbŁĻĖ,ĻāētL"ŽŲŲh|ÜāßāyŸ×IuA„6*0NÖA_~Æ.]ŗ4VaīLD„µI4©rŅ]ōIĮĖĖ—/k[Qņ@įšššX™Wˆćļm‚—b¹“ ~ź^?ÉåĖ—'–{śÜŲ¶A@Ś§ØŸÅ±X¤Yžm5 ~ŠĒmŅyÆNĖŪóŚpU&@WZ’ŗą§Ix÷īŻ±ßć“äAEø}ūö±ßīiŗ7MźB– ›››cĖ»ęMjeZÅL{uŻóĒe­zńŁWń3ĶśüčźéÓ§׉–Ÿ”Ü aAP×ō׋¾KĖ¬č¶ ź*lućXšˆq4)ńB,©AG¾ĻłŹ|żIĖ<Źļłóē*©‹ŲĒY|¶Ō Ē¢®eöUł,Ė:?&ĪURKĢ$Ŗ:łSn|ēĻGó7Ą,[`ƒ X/Ęr„h”©ė¦“ʱŌm«®R]- e ŹĘG,ŗüҊAmįÖ­[cϧ±qĒŻ­ėׯĻģ‹V5J7ŪōŽˆ×å­IŽk"bę¶Ŗm•G©zl3eUK]ΊÄŲFz®˜‰+*–ńļ<ŗ:„ ģ••KŖO¤Ļśųń汿‰}Œ„nc ŗA•éŖc;žüTŸ«,hhŅ=+N„Ļӈ¤}ŗsēα:YSQn‘L¢xn—Ićk–q~¤÷N­—qžTóTF±~1s t €ž}?ūüœ$|y¦€.Œż”EMˆ›y/ˆb F±—D]E3*’i¼ [l#u‘Š›‘“¶ĻĒowT–£R-ł~”Uć¹|»Äėć·?Ę ÅöŖŽ³k×§üó6}}¾n *bc‰ÄuŪ‰“éļyĖH|Ö|»4NŪ•«ų¹& ŹĻ{ŠÄg‰1`M»—M*æ8~łqMApńuuۘęühZNU"ųĻ׋朎y“ėaŅgcxN4Y)šš£E'¾@‹éćīMŗcQuĀĄPDąRՊŃUTž›ds+žvOó¼cx»”K’·(­Š ĶÓ¬Žė2Ļi„@hœ!šbĖ"ģt÷¦˜C¤ Ģ‚Ö`–NĢjC©y`Ukj›īzUsńw‘¬–Oė0kĒZ€“ā°ĪüŽPēķ>ø®­U‘Ud[² “žK €Ö± \ž·>nRZÉbJEw NōżęsäAYL\W7£0°ŪĘĘĘął¢nĀFy?xš`fēgŖ+= €ö÷÷ž½zõźJ@šu9_¤Į†åųż× µ-©bŪe¦ś˜p<Æ×õNˆæ’žž+ū[¾Ī™3gę:ķC“H„÷ˆÉŅ»ˆ×-bjŠ&ūYV¦MŹŗI°¦ ,Gć1@]ī Å8›ū÷ļ+e ‘Ļ|éKcµž°źŹ~殞•Ā8¶¦ŪŲŻŻ=Ŗ”GĻ„XņJs.’[Õsłkņ÷ˆD?QįæqćĘQÖÓXŅov›±±QyOļĒéÓ§ž›”MĘ@EšXlY‰ż<<<ŪĒŗm¤÷)ū{ž·²cV·ŸmĖčQ4ċ{ÖWu³ŽoÄ¤ļž²×¦ēŅkćß|½āß'ķCŁßR°AĘęęęŃŲŲ6ņV“āžU•AŁz“>Cŗ‘šoćįƇ£Ė—/w:fUū™Ź”ø^Y )ąõö¶"Ø’Qšeⶉ$öööKė}~ڊ®só»żčžŽ#Uś‹AM“~ÆŖZ£ņīm“Ź1’{›¶¦¢å Éū6ÆĖ<@Ązų^¼PĢĢŗd& Ń “’fFÅ~ūžæG<Ń}m–ļŃ5pŹ»¶¬T”÷Ķ]ētŲMƒø.Ķź@wų’ćŲc­? A“~œ;wīŲoė¬åóß„÷8{öģLß#8t‘·uŻ@[ĘM›ņq™źZ”ņō“‘µ&õ~üų±‰ė`AŽūž÷Ē’ĢĒ?®PXył`śŗq%uæAM*ü‹čI1‹÷øyóęQ·ŗŖ9rŖ~Só–£ŲF“ˆU%OZ„ßåč.] õŌØ(’¢‰@aŻ‚Ÿ“'O–¶Nå),ćK·Ŗ«kJS`²Æ½÷ŽŲć?÷]…ĀŚAńū’~'ŖŗWUÖóßÓŖ×ēÆ-K™\[SÖżśõėG’ę€ēKdžKŅ]Źb{{»¶ā}Ŗ>¤ēć†k—m,Ś‹/t÷‡5Ö* \— ×Vį‹jš -Z‰ā!õĖfgē{ß{üĻ BfĶ4ż}©śżhņ»Ņ&9@éu¶³3±µ©Ŗåf‘æ÷ó|}ÓmO[ÖĖųÜ@{Z€Ņ Ķ$śģ®Ó iīά˄®°ŽīżĮŒ=ž÷wļ*fņ/!Uµ„AšaīTūhē" )ŽńŠ©eĶ’Ńä>‹»cĄ?ųo~S!«U͜¼.AP’g³‹ė’;<}śtęYr€šĒ湂µ§ū°ŽĻcabYēy€"³LÓ®{‚˜Ÿw¾ń±Ē[’ōŸ*`!'Ačš čĻ~ųñǒūg>3zžß’»‚V'ŅĢ ĢĀē¾śÕ±ĒŪæüĖ „ƒĆ©ł7Æ\i5¹l¬-žei—`ݽ­€EśĮ~4öų_’śÆ+fjU3’°·uĻć‡H~žwĘ’Ÿ_ü¢BŖq PÓä±lmm)Yą˜æśńĒńW~E”«µ§F’ńpįĀ„ Œ~öóŸ{ü;_ž²Bś…”5Nö8 üķO~2öų×>ūY…,œ1@ĄÜEV±Üļķk …¹)ū½ņ[@ėȏ0+Ÿžä'°Ņ`sUlżłæ’ŻæS(Ąj@‘Ą ŗLźž–Ö‰åĢ™3JØōænn*`µ ̤»»»µ‰īqi/^4 š€ž+¶ž<’Īw °ZPø¤y}®]»6qC±NZ’äɓǶšSżØB`īŹę§€Ź(’éźÕ«W„0Å֟ƒ‡ °ŗĄ` YµÜܽ{W)ĆĄiżV6Ź»¾EtćʍV,&@hūz€…@)JŠ½{÷ĘŅ\OZʶ ‡±?,[ŻoœØūcĄÜ擽½]ŗŽéÓ§G‡‡‡JJ*bm€’×µ07“ äi®‹‹ąZ€Uw¬ØŲm–ę¹m`¹~žwĘ’§>„P€•Sš!–½½½Ņ~Ō.\=yņ¤rƒŃU®j|Ié æžźĒ?{üīoż–BVNå tņ@%‚ž‹/Žö÷÷žmB ĆPģśĖä·€:Ē„€ØĶōß/ݼ9öųg>žq…¬Pę/ IPžüŻw śGÖ7@ Āo|ó›c?ö‘(`åP@¼·7öų/¾ū]…ĀJȧ\HŒK Ń“¦ė įß|ūŪ cŖę«[nß¾­ąX8]ą€Vv’äOĘkż”˹`ī(–E йŅ*ų@@/żŃūļ+`ķé4ņ›ßśÖŲc­?¬*ߨ£hMš¬+-@@­’šƒ(„śo’忌=RōŽ  Ö—~ū·Ēkż@kąŚµkcóOœ:uŖŃėŅś0DŬo’é÷~O”k­÷]ąŖ‚—ׯ_æł›³pÜ’ųėæ>öÜ?žéŸV0¬å÷¾ļy’^·„ĮóēĻżų•-i½¦-B0ŸłŅ—Ęėś€Öē'O*’–‚ ¼E†®Ųõ @“FbüO¼k„ ˆ”ū›>P€h„ĄęĮƒc .\øPŗīīīī› (–gĻž9Cœ­/|aģ±®o€hĶ‚ X®_æžę¹żżżŅu£„(tīÜ9gƒRģśö;_ž²Bamæ÷‹ć=`P²³³ÓźĒŠ'ŒFæöŁĻ*@ōO±õG×7@40łø”YoÆÉrńāE„?×’ÅæP(ĢŻ;wZ/Ą“N(‚j³īśÖv{‘¾[Ä2ÜųÜēs÷ŁO|bōķÆ½s   5]ß蛲–"c9\Ō„ė„L†üüŖG ēz?ØŲoüįƇ„)Róekkkģµ§Nr¦0æū•Æ( ×zŻ”Ÿ—/_¶ bņ PcÆ_æ>zż«WƜ1ō†®oĄ " Ü4-8ēϟ?ś7‚ č‹’ė’pģńĻ}ā „AŒŠVœ.ćy.\ø0Śßß?śćč“W’óŽ=ž^‹L\°ź|_0Ų(~£ŪŚĘĘĘTóGų1„Ļt}†¤÷]ą¢ū[1ÉĮõė×+׿{÷ī±õ”/~éęͱĒ?óń+`P9ŠĪĪĪŃC󗇇c’üŻw 0(o+Yß@0Čąē7’ł?W(Ą P0¬ą'lü£¤`č­²¤7Ęsh‚ū£÷ßW †ßüÖ·Ę÷€€^’ō@ƒ ~žÓļżžBI‚½~b²ÓüÓ?­` Ø£z¤,éÉN@ŠK’€`$=Į ƒIŹI‚ „ŁåW½eåcłˆ¤øV š&Fˆ×¶¹Ęćꃤ ėE ¬±’šƒ{ī/¾ū] ‚žłŅo’öŲcć~@ŠK’€`Į¤ĶH‚Ą`• ”^Ēą'Hz€ėvy×ū"¾K$ZĮL“Ķö“leIĄ5[}ĶžŪ/~qōüŹÆĢõ:o³OŸūźWG?ųяÖī» Otƒ5"é€AŅ 2ų‘ō@ƒ~>ö‘Hz®™tŻü«÷Ž3†€1’ Ą +Kzšßż®‚–Ŗkę;ŁģPKŅ`•Éf¬#]ą`EIz ‚A?’€ —~üĮc%=€v¢µ4µ˜ĘDØZOĮ‘ō`v$A€ęĪ5°‘ÕĶ÷ 0Z€`üŃūļKz°Z€`‰~ć›ßżńŽŽ±ē?P.Ż(˜å5ņ7|0ŚśĀ.€˜—ŸżüēGū“Ÿ”žķćżØ‚ ųD”·˜F–͉BŹ®Į“m@ĄŒTU®ŅŻl•/˜ģW/^k9żĢ—¾ōę’#kbŲü«÷Ž;Z&7Z]@Ą ¹ßżŹWJ»Ž†Ŗ–Õu~"IAēϟ=yņdķ?G~ų”‹ĮŖ?˜Ī¬ŗ«żĪ—æ¼6Ÿ·©Ļ}õ«£żżżµ’]Ę{iE@°BAĄV/śµĻ~VA Ģ`Ņ`5½7]īßæļģ SąS¬Å˜„|fz`¶AŠ2^ €he„€fssóĶs·nŻ:ź#\\öööF[[[Gėloo †˜*š‰Śį/¾ū]sōū_ūšą€ĘzŻ. ¶Œ€ēöķŪ׿pįĀčŁ³gǶĮP &QųŘ‚Ō­¦j 60;Ÿžä'eüPe]ąš?U"“Nxšą³eŃݶKW’ķą “Åēłw¾sT±2¦ÆiKk\§}÷—‡‡­æ‡ž{0ļešzČ<ÄMŻEžnĪKÜø^Ō1¤_‘!NÜ®i1S&‡ī®…Ed śßžåæģüžĄ|Æ’ŗėśē>ń‰ŃO h¢į”fOkū¹cņé¶Œć5})Ū”~™éuAĻŁ³gGϟ?Ÿ*z7§Ą°üŃūļ~ó[ߚé.°ü č{_’ŗ ’-@Å1=Ń\:i~„¦c†X_ļ|ćG’v¹³#šõ ‚\· &*Zř±™iš­Ė*KšĮa}Dʔ„DšĄ  6ņns}ģ÷čѣѕžUź›)æš©Ožģ‡?T1‚žśŻÆ|å(ś·_ü¢Ā`¼ŽŸUģ tXtźŌ©Ńėׯ{{ü"qÅåĖ—ČUļ@Ą  ·•0ƒ€¦™ųźšļ'”@kų„ŻżŚ,assÓ ĄŠƒČ×5A¼.?ńÆ Q`½€śąąąąčßŌīžżū_)¢ūž†fi°_½z5ŚŲŲčōZō#öL€ā?ĘƒqB Ū/žā/ŽžōO’TA¬Ć4@tōéOś(r G$÷ŲŪŪ]øpAa č˜»Ę‡#®ķżż}ĒÜ÷:=?ęt§  €@+ĮDØĄ bŸų `0@€@ ±‚Žzė­Ņ…a†w;īżtūömĒ›ŃŁ³g÷:<<¬żNw½wų”{x•¢7üĆ’aœōėų¾|łrtźŌ©7Ÿ={6:wīœćß#gΜ½xńāč’·¶¶ŽŽ±ėݵļx÷óŲ^ŗtiōčŃ#2eq €T|UŽų£čRpė:ļß1w¼‡+Z’īܹćū}ą×9ķ ]ąōŁäK1’Ūżū÷\Ä1ĶśļÖ­[ a`×7Ć­Gš×¼óZ\;Z€†u÷ Éw  8ž¬’ńĶķīīŽ®]»¦€z"ļΚ_Ē®ļž©jį+ć˜7ūšŒ’h@„8 żČżPöS×½½½ŃŻ»wĒžßŽŽĘz_Ļüœ]¾|YĮ ü|8žüčɓ' dĶ£q¼8öŻ‹J4Š‹ćąąą(sP™¼©ÕäKĒ—~ė“n“¼zõJįõą˜·į»Ą÷ć.pwžĀęęféßć® ąÖ’0ŗCø†ū/Ҝ'u­õyÅHš³ŽŖŗ@•%Ɛü¦?ßė“ęųü“'pŅ…Q6yÖŋ]<°¦vvvŽüĢ cҼž‹1=éū:n^Õćčé»Ö3č9¾ŖźnéæzõŖk¼M`© 0„Ų'ž£  €@ €@  €X¶·Žz«ŃņčŃ£Ź×­ņgZ×ć”gēģŁ³ĒĪēU=ī.\š„TR.OžŻææń¾”­Ūäµ³=ś’č"TU‰čr×|wwwb¢īļ³¾ūŠŗ÷åōéÓoŽ;*[Uū›ļßÕ«Wk» ęėVul[ĮŹ+ØēĪ+]'*»ł{…›7oÖ¾wZ/u›„iŹkooÆtżtī֝+ÅmßŗuėĶsMŽĒɓ'+ßc{{»Ńń‹×Ęŗuļ³µµ5÷sv^ĒæĶń|šąAķk^¾|łfŻŗą>³MŽ{R¹€``¢2]V888x³Īžž~ėžóq§·NTjĖčyˆż©«ōDe«i—Ą¼ ņJjYÅ0ÉM×U¼šVŹ&‹©²›*”M*}óźņÓ„¼r/^<ŚFŌĻŻiö©īõÆ^½Ŗ,Ÿ¶ļ›‹²óožēģ¼ŽÓsłņåĖ^I&÷MD  Z‰ ‹jY”ŗ®2]3L5ķŹw«g-?&‡‡‡ĒŽO“^äŹŗżŒ ź"éśT Ę#Ø+{M]PµJņc±Œsv™Ē’ńćǾT°:ĮĄP>oØź‚ućʍĪAH“%ŗĪCŚīęęęŃæłöŖÖ‹²ż+¦8ŸF“ŲL[^“‚ń|½č^VlšgYwÕö›å9»Čć_¦ŖÅ@,L±EgR2Æ@-²{I>žØźN› .*Óe&uŁ«+»&Af¾żIÆi£øŻt§}ŚnaU¢2^§¬"Ż„¼ņĒy 7«ą¢Ķ>u9Gr©[ē½{÷*ǵŌķĆ,ĪŁY’6ÆÉ’–_ĒM_cŒ ęå¤'uMЁڱž¢'Lc'āNپ5©(„uņ,`ÅīDÅJq|Īāsł¼I©E ®Ģņ2.&QčśYź*óUDzĻQ÷ŽU­ƒ„¦ūeۈ¹¢ŖŽæKy…‰Æézž§`¾é>u‘Ÿ[1®„ģ}ŹĖYž³³:že×ę,Ļ’ØĢū¦4×@ēFŁŠ[50„’)ƒO€użś‹’hC Ę EĆ¢ė0dZ€€@ €@ €ōÕ’'{÷cG~ ¾łj„Ģ%¶ļUģµūaYA"lļĆČŻ#Į$r·@— "¹hgž·#X…U‚ŻL“AÅvņ”YeŚ–&ʌj·x)m H^l³ <Żm Mī•mŲD÷į o¾=SĶźTæuŖ>éŲ}Ī©S§ĪļŌ©ś}ė÷ū}Gž={’?S@WćžųGėŠ ‚@š ųü~?‚Į€ąü~?‚Į€ą@š ųü‚Į€ą@š ųü~?‚@š ųü~?‚Į€ą@š~?‚Į€ą@š ųü~Į€ą@š ųü~?‚Į ųü~?‚Į€ą@š ų?‚Į€ą@š ųü~?€ą@š ųü~?‚Į€ąü~?‚Į€ą@š ų?Šü0+++KGŽY:~üųŅ“'OLĮ‘gĻžÅ’Ļ½’1üØāyšcxölīŪ0ļķhć÷ŅÖ2wūŅ×§|’0ŁSvü£å‡©Š«ŲQ¹+»mll(¤Ž.Łś0Gó¬ŲéŽļŠŅ3؟źöĘ“ŸÓ§O/Żæ’¹eŚ6¶A··v~ž®v{ÓÅf{Źü0õ įɓ'—>|øPŪ,ųé^%ųU ĮS-:OŸ>]øJąGš#ų€n?0qYą3I7oŽ<8ĮÕķŪ·+—ßÜÜ<\>ROŚŚŚŚįśÆ_æŽčµŃ6ĶmĖ—A•3gĪnKQ×Ä2WÆ^=|]¼Ļ$Dņ‹lĆ¾ßi¹sēĪŅ©S§·#>ē4Ž#nEÅ­ź{HĖ=öĮiīŠÉHĖ“•Ų[·nü}įĀ…ƒĄeätH-½bĆ­­­ŚĖ—½WeŠ”mŹÄų¦'NTnŪø­ŁkŖÖ3l;ö÷÷€"i«^ņ­óyŖŹō޽{KgĻž­\GŻ»Ŗå†ķgĒŽ+ŸÖō;ö^ēĪ{.8jņ;g߀.Ē=ń–&. v"ŹW䟈ŠfQ…/*†eŅĄ#–ŪŪŪ[ŗråJ£JdŻJėłóē*ŽŪŪŪ‡=xšą %„lŪņGž³LźJ|Õzā»H·ćڵkŸ#‚ŌĢņņraE?ųD٦åAÓ$¶5-—4š™•ųL±ļD0›‰Ļ]öŻNżH+£|¹gĖŌi¹ÓŚ@ļ# -?LCtÉŁŻŻxlŌńqõ܊gTśWWWž.KĀP·e`ĻĒē‹ąiÜņŠ {¬gŲge;Gi1Ø»¾:Ū3l™qZ~¢ėbY÷½qæŪQŹf’ĖŌŻ7 ėqOü£å‡©ˆ1EW§ėLlšÆĢMāŠ{:¶ęŃ£G^›¶d•U@ÓĒóöō~YėH“ń6UĘ |ņ—U§ŃUŖĪöLSÕø„QZ“&!ZŚźlĆĪĪNé¾Ww߀ŽD@Z~˜śNVR!{üųń@ÆI%}ÜķhŅ‚Pw{Ź–kņyfъpéŅ„Ź$ “ųĆ^Įʍ7+öecŒĘłŽF]®źµŃŻ1ŸŒaZߣ¤öc|ąSbü£å‡©‹ W~|Lˆ±'ł:­,u„™±&1Ö!ߒ•—Ž)WŻŪ ivŗ&m]YąŖŸyˆ–‘Ź‹öyLŹŪ–żŗ@šĆĢDw·hŅ !? żāŋc½GSY…uXę·¦b}i…8ĖŁÉ'ėŻ$žŖĻPŌµ(ÄmžŸk’²Ļ ¦‘®}TuŹwX`ō8ųI瀙ō\%Ō P"uš¤ƒ‚øZŸSYF¶ģ6oóŹ&Õbå˜o‰ˆ€uѳˆå·?’l“eß©Ó2VŌ}čiš“N*·“ĖM&K—™Ęä†WĘÓ֞4(jŗ¾ōj}TX'=Qf¤?N+ÄU·"“čn6©fœĻæ§x.?ŽŠ()ÕóŪ}īyŖyu.ĢkRXX4Ox0n…ĢįŁ|7Y9§)©‡ ̶®¦ĖŒ›š`œm÷½š|žIļÓ£ ¶Ÿd™dĻGׯ²‹é~U¶U“ŌfĖ ›€TĀhoÕ3žétĖO>š‰ŹĶ°+Żqu’ĮĆ„ćŅ”ŌE-t]–~ö6_ČĖ’fźHÓ3OźsWńŹŸ*“J7“W/ŗ½eAOŹMTÄņ]~@ĶŌ™G$-ÓŖ.†m(ū“r?ŹöDkA×ß½{wfŸi’I Fér“ąÖłÜ“ųžė5e]Żęµ¦Ł«¶!}.ŅĒ= ~ҫɣ^ŃåŠ6KٲŹ2‡E„;}¼(oQ·|@c|ŹÖ“ßč¦4N6ß2Q·"š¾Õ2‘oģ“\«’ÄļeŲüK©Q[č®]»6°ī|Ņ‘ģżŅV¢"é8±ü¾’O‚Qō}d"[_>1Dž3Ļ2@ĶO\TžłĒ$>€jó3©~īÓ'ŃéŖfE>*­UYČź¬§I ],[õ}ū®ė~®Q_Ÿn_Ó}nŌń!u?GTÄoŻŗÕųsŪ®Ø¬K+=ģ{kZ¶qQ# hÓ W‹D Lš”cRcjF™WQćóü0qQĮŠ4ÓƖ–~9–©šæ$ßjTV±‹ng“ØōÅ:†u+ŗpįBåė˲ŁĶ²R:¬\C¾µeX7¹Q·?ö²2lru×Ūä»Ļ·üDRHm]¶Ž| Ģ<~OłĢzłē5P3źjĖOĢńčŃ£±+–Z~`ńćžN?ią2jš¢»t'ųéM··dŅ SƖų@·ķņ‡Ė ߯ŻmœU+M7 ~ZEźŪHæ<Źk€nčE··H~eDŠ4·EsĖ„,3”ģIŠ=Nx°dž O?@/U夻č¦;wīܲæļ޽«P]­ū ~fų[]]µćŒ¤śūDP#X~Ņņ—#Ó ~¢ż¤VōG ikΤŸhµŁŪŪ+m©ų€ą§sb0_YSł(7Xōh‘Zæ¶··µÖAŅ’ōܚµāŅ„K•ŻŠ źŠčö“F\­Vė0ćteÓ= ˜“ζüDŹ•ćł{ņäIaKZŻ nŁėćé:‹¾÷¢ÖŗōuEW'ŻźWōśS§Nµžō³5YGÕré`āLY«iÕŗÓt²ć”ŸĄß8]Łņ >€ą§”|T'kKэęāj߉' Ÿ‹qń|•›7o–¾>ėŚø­9įüłó…P>Ą¢üŠźĄ'S”ål-qĻ'g­ Xi¶¦Y×ņ¢5'üµ»/€ąFŻĶвœMBzU“ Nš^{ €ÅcšĘIH 5ü@ˤ©PĖĘŁŌSGš-mÖŅ®4“ZšüĄ£ŠoY÷ÜøŻŗu«Ń±Mk€ą‡)J[,ŖęsØz®Ī«««S’,“Hė<‹Ą%}“śB7D–,ą©;‡W~Bi­9‚¦,m±ˆ ¦h.›*Ŗœ“ūWŃÉ{ŅsĶ”RiZē4õō¤ ōń{÷ī=÷|śXŁ:āŠp¾Ā4ké÷”²£K“ŌMĀ’;&õrœ•źŗŻ'ĮEpĀ‰'†.ćyŹR$ē+ūŃ}#ŗŗĶ33,Õuٲłe"芬ĆåĖ—‡¾v”`'’™ė¬»(w̘e±»»Ūئ•źŗny4J>A—õMł=4?Ü ~?s €².UóƔU Ž;v°žģł˜(tmm­Öė‡YłeG©€ē_Sõ1ųxXühM6°¹lŪfüÄ ģ¢±‚)©›ff‹Œ—ÓJüЧąG·7¦&Z,¢Ā3g¢Õ#M!­U)„ćńķķķĆūŃ2e]в×>ŁóeŠ2ØÅ2é8™ü6N"ķuöśt2±]uG`‘­ćōéӇ§ƒ™‡½wU׳lĆÖZ<å—ØE]łF)78ė6LIfeųL^œG”kń>*ɝŽĻµü“ū ¤Ņ·øß]Zźˆ <“ÓČ;Šŗéó1» ē.ēO¦¹{Å?G•Ģ/č)ė¶ĖdDR˜ŗŁņ˜®¬;¼ †yüĄŒƒžč\•īŸé|ŁxŃ"i¦Ręw1¦¾/źöÖī…«#‹÷‚ŹćĮüæe?ßs—ó"mŲÅć-? čé”H¦£ü"²½ĄˆAϰĄG¶ĀłH'®nkłG·ĒŲ"›g™x®Ł×"øĢ¶?nUI:bŁ|0š=VōܬÄ6gŪoĢ]ĻŽŻŗ½µūÄź¤ оcó0ŽŻó“ŸcnsU-—=ÓDEæl’‰i²@Ø*3ŻØsŗ5ŁÖ²u Ū÷‹ĘT5ż½”mC“nsٲ15DĢ«Õd{üv»}ųŽ“ü@͊°–žö›Tą3 ¬j’É&²ŽeŖRr·„(ҲĒ\séÜs±Ż‘]Æ-'†>1Ē_|†6–/Ó#ųAS–„ŪŽ w:s:‘w¶ŸE JŗL[ö­k×®nOtY90Ņķ˧/ŚžQ>[:yUWĮ:AQö¾ŃĀŸ!žN'å–łÆćĒtŻŽŚ}Āå/ś¼īīīś~ ʱ²ŻßŪøßϤ»’]øp”°b]'3]ŚļäɓK>œź¶6-»ŲžåååŅ×Ījū²eņéä³Ē«ęՒ‘®ū‡‡ųGˇW5>Zz˜ž²…K—.ž¶„¢•"óčŃ£Ö}¶4ИWōw›nOŻm++{ŗEšÓS›››…'ųō čō0}i7®QŗtĶC“ōD@–żnŅßNŁD²ćJ{Uc‹¢Ū`Ŗn°ø(eϘĒzŻŽŚ}"žę$cĆ,}9Ö6©t±8ßgŪŗ½ÕYǬŽgŌuŌł½d™ķ&½}UĻWu»%‰ß|7ń–Ÿˆfö:W4ÓĮ„]%-=0Ž…‚H0ė¤ e=T²Ą†ütü ·,}ę0ib€.Ź&6VQō,®¢ģiL6šÉ~óHk}żśõŹz˰q;łLse7?,ˆH99ŹlŠ~č@—E%-Ž‹[[[‚žŽ3ncŗŅy}ę-›)ķbēūGšÓ‘É$NģgĻžmüŚüĄ@€®Č.åēIEŚ`AOw+čÓnż‰ÖÄ>ÉO:łnś«««•ĖĒo?Ķ·qŅ^N++ Ą¼ƒž*1C}<łłRč^}PY„Xk⬤caŹ>ļ$Ź”¬«Ū,»¦ŻŻb’ŅLŁŒō7®+$‚Ÿ?ōųńf“Ķ¢’P“|›uZ§uZē<ÖY%&?Ģfا»Š2~Å-3’¹}ūöĮsłŠ}ŚĀ=+bšˆL6eD[ä?WŗmY‚IˆVŌüEÓüŗļ޽ūÜė&Ż—õZ)zÆ"‘.}’|6ŗL:'½8&Õu‹æœ\Ŗėidėvłh²ÖiÖiÓ^gd“¬“ŲE·¶~ŠV‚llČØAÓ°åېź: €._¾\śśHæ—,`h²­ģŻøq£rŻéļpXšź²2o’Ŗ<]6Z¾ŠŪT·U]`+ŗ_µü,@šsįĀ…„[·nĶäżT°¬Ó:­s‘Ö­ąu®śŖČÖÖ֖vww+—‰–ž¢ b1ˆ¾č\œķ[m ~B:ßMѶF‹gÖUÆé¶F׿¢±ÅuŹ”ź÷?‰ą§Éļ¼ź-J†~˜cšßOÓ«V‚ė“Nėģņ:MXĄØĮ1? ®@ĉ;&čķYk„4ž:ŖGōU-jZĒ /¼°ōo|£Ńk¦qÕ:­Ó:­³*čęk_ūŚŅOüÄOü=,ķ-ż„Ū[›æœ\ƒ"uśĪŗr0©ć`Óć×°1ōó”"ųé@šÓ¤’ IÕxĒŗƒĀćHƒc~:$›”<č¬.™M€6‰n¾EOvœ«s,Lopiłiń—Ó°å§lM‚'€y+KIģĄ8ÕāųGĖOĒeW=ә«*óó©|˜Z¤å§Å_ĪZ~ņ¢{Ū‰'*ƒ%€y(KąāøĄ$ŖÖ‚Ÿ?©S§N-=zōHE˜»a3ĒĄ$‚ŻŽzģįƇ‹żż}…|č~¤å§Å_Ī”[~ŹŽS„˜•²®øŽALŗš+ųüĢż8'š`VĮno|čĮ?0‹ĄēńćĒ]žĆöfkeeå¹ßé;w ‚ŗų?~\įĄg·oßnõöݽ{·0 ź›˜ßLš=ćżOƒv¼tŗųÜ»woéĢ™3SĒR†ķ+]ŁGŚž™ņs öż·ŁÅ}°ĶÅ’UĢ#šŁŽŽžxą“»²Ż—ĄGPÓ^ŗ½­8IŒrc±Ÿ …vķŚµƒ’Ož<Łźķ<}ś“/ėGŽ;vš’ŽŽžĀ˜Õ9I··vW\- /ūśžĪNķ寿łęŅ·ŽņūXŠĄēĀ… żÜgłžö†ķ+}ŁG꿙³÷Ź~—Ēųhłiē×’hł`fAH\ķfą,†>&7 ?Ģ$š‰ī÷ļßW8Ģ|_ĢßF©x7ķ~[“ĢØŪ’eĖß"y@÷ŅEŪ[“Mŗ×]öźÕ«CĖ¢h½ußæĪsiŒŅ}śįƇ„ū@š­rX9W=7,XöŽÓŽ»BĀfųĘ»eƍ¤©¢VœhmH/ ”½’0qUæč5麚N0šž¦£|«DĖOžżGż,Mmnn¾gźģٳµiłķŽ #¾×üóUÆõ˜ūLŗžtŖś ćī‡;OIxŠīJ„Arōe_—𠻁Ϭæ' Ø3Ų<ߢS“\ZY,«–-“Ļ`XTinŗ­£ šöš:ė\[[[ŚŻŻ=ų»¬EaŌżŁėŖ^Sg™aĖÕ)’t™K—.-]æ~½ōł¦‰Zź–Ļ$¾’ŗėu?ģŅ”!žŃņĄŲŹśĻć$šæŹ*šéÆ,p‘a­‘ie°N„÷āŋ„Ļ„-‹ ŗ fź“(“]Ł1 }üʍ•ė˜g¢–Ŗż8/m-\ōżpZ?Œųõyt0iŠ2‰ī–M*ž£Tœ³yyФsõ Ų>Ki»«æó“¼ē™¼Ī~œnßņņņÄ÷CĮ¼+!𔋲åӚl9“RddŪ1« (ż G7øTU»Kß{ŸT퇂x׃>0£ $ŒĶJ6ž'/m™Į&±}°ææ_8Žl–cĖŹÖ?ķ(2Üå„suU/ĮX0óč^'šęä”iāƒģ7?ĖUé·„ĻGšĄL¤é|3?V0“¤»bmmmµī3¦PLN9{{{s9ÖŌynĪ;7×8muÖĀ–¦čŽ”ŻT3ĻO›æóüŠ£}Żē“ö6Īó3żpŃ ń–F|‚Ą‡EŪg«PĢR~ŅͶ–å4 ›`5Tu[äĻŽt?>}ś“| ‚F>łj}£Š®~„ض’Ž»wļ¹uœ:ujéźÕ«éŸ'™ņŗČåĖ—×ļ•üüūó¬»żłÉ1§ż»O»ŃFFÉ¢ļ.żn湕•_$Ź(z]“1ZEļū_ģ‘ņ<’¾i2 ?Œ”ØĻūNƒ®Š³Ņ&= ‹øG«C~¹GŒ*K’<-ńž«««c!£¼fÖc^¢b_5v%Ÿ“Ehš•ž(ÆQʑER¤é­x’t¢ŻŲ’b?Hē!ŗråŠ Q‚&éāŋĻ=–ŸųŚÅ-ė•Ų|Ę“:iŖ³eņņ¬ŅYōś&)°‡­#üń«Ö[ē}ć¹ōāE¾e'/ķŚ:nr“ŗe]žņeŪeO¶®¢ :ė–å"į@<AWU}’EŸ5ū”<Ÿ&™H÷ƒhēó5żn„mžr$< Gūŗ„ķž~ŠN’¶św š[b‘wåųGĖ„¢ū‰@‚”ötą€BŃÕ#ķWĢ!ż |Ņc@ŃÅüŠ ĖĖĖĻ=Ö¶Ō»ĄäÅ“čź–>Z|üHS±Ö½Ń>ĘĶ@8qĀoŸĪ:Ŗ€ih’Ą`y}]µHѤ‚*?Š‘/ę׉n®Z{üŠiQéÉW„€ž0Y&]¦Ū‡Šŗ»©ŠZ~( |±»›.z”éMĖO ĀŽ[‘ ƒ°^+š!}_ĮŠ)oł‰»łžė!»2XäÄs÷īŻ+ü Š%7nÜøģŲ±„S§N):„Ó-?eOŲÄ~ł“ż¹sē;{ö¬½č“¢‹@1ĻtM§[~ŅĄ'ŗoÄUĢø›4é~E}ij AüÆ9Š—ĄĒń€®źlĖĻÕ«WNäY÷h ʟŲĖNōŃå  «nß¾żÜc; ęgĮOKlmm½c}€.[/˜\vmmMĮŠYR]ōīnōe’¶_+›Ŗr°ōINzęųńć?]réŅ„±×!ŪŠ5‘åņéÓ§]øpAĮqóęMó5Ņæąēśõė‡ēłūec{Nœ8a:%Ķr™Vŗ źx/^Tō/ų 'Ožų1d·ĢćĒž”ŲńxLwīÜ9Č §O,ŠÅJA^oéń>Ü ß:š ŗwD óčŃ£ēž‹4Öł~ļ—/_~n¹Ó§OŪK€…WŌĀķĀ}Ół„Å žŹ•+KēĪ;ų?īgų»,Ą‰ł.bRT€E—Nś\Ų”bĄ¬50ķ?LŚ+$né\‚udƍ‹®uėńžŁū2õĘĘĘĘįė‹ęōš„•••‘ˤģ{„nŒŃŽņŠ[¾ ³[Żrļ(¶#ŹoœĻaŚ€ö:ņī•?—’ŚųåüčĒćŹ,‹&N2«««Kū &Ė\^_o“üõ7ß\ŗńÖ[~ Ž%y].;i¼Éw] m>ŃGz! ¬b<¬Ėä±cĒ*“#U½¾lŸŒJóīīnć×ÕyĻčõröģŁ±~ńy‡‰.[ož{)ė“mkŻ /;ļT‰ ąip”Ou]VnUūGŻżÄń§=‡†ųGŖktś7Pų„h-J’ž=GöööZMc½eōō·cćµQ¶­iąƉßkYu*ŚEĻ„ļ™>£ˆÖ|ąå’ĻYgœ],SųLb[Ē=N¦ūGU+Pžµ1ž<¾·¦åĮģ~:jssó¹Ēö÷÷ ½Kł[ĹvķŚį²Ń„)ŗyE+@śŚ|—Ņ-éņč‚ƍ–‡ü{gŅV‰ķķķƒeā5!Z[Ņ×U¦óeļSõžMŻŗuė¹õG¹DWµų; ‚źVųóßIŻ@/åTōÓõę»Äåß#æŃņTō¹ĖŹ<{]ŅŁö¤ėUSšĄ”Żøq£²R}QÖm)ĖśZVŃ.ŗ€PēBBU‹F™“ŪVYKĆłóēkMÆ£NÜ|ŲX£¢õĢcŽÅ¢ķČ·čUm×NI·ķtŅo ~¤Ӥ„_­ŗL¢¢2‰Š,ŖŖż¾Nw·2酄InÆŚŽ4˜Hŗ4H›wėnŗżėėė#}ĪY&d©zÆtóßqŻļ?ZńüŠ¢ ōY݊vt]Ė2å/Rę[ņ-JĆ2‡šĶ-“¶ņN£u7Ķä6‹cI“L|ćŖ› ļīŻ»÷ė¶ī’5ĮĻÜÄ Įģ6©ŹG“Ą(“Ćä*ŚY ³¼¼\™8!ARTؑŖ^éø”ŗ½BŠĘMĖÖÖÖLæ“l¬Ó,L"XŌ“GšÓi~x€EPŌ/½oS\L¢‰Ŗ®ołŠk `Æ»_å#¤ėgNœLšüōR> m¤ŲŹ•u{*Źä5J÷“¢,fU-)u{‡Ģ21€ćH½ļXĮ3Tt59Ę)å†uéšŌąūVʲ˵½'Ķā6Ėń8 ųS4Aė—©‹Š•J\ĢQŒ_w˜”².vi+Ī(]āŅTĖÓ/“®óņå˵„.·i ü“ZŲŌ™-8ņģ ,š¢JUÕ%ŠUēóō¹½½½ĀeŹŗŗM«ž0Jr4Õr>+Ł4¶¹*ƒY:ŸM—[žÓϦĪ(ųY˜ƒ^×š‘Xł,Ti÷č«,[kœĻóń|ż ¬Å$²ŖåĒ×ä_›8ŹŅ[W[阐X®l{Źŗ„-½ńśüŏģ½Ė&ä&ݾČ^—æąå›~¾6Ģo3‰ÄuöƬ|Ėz™\ŗe±Į»;s'GaåNu›Å%½ņ2ÆAj±żȱhāą-Øū N°Ėėė–æžę›K7ŽzĖļ£äX7Ļć“é7qåŹ•ƒcRUkHŁļ%‚‡tžœ¼ØÜ§­łą„I0‘‘R{˜#T”š ’aéøć}³ķå8Qē³EŁ ž†½÷ØŪXV†łõŌ]’°mŽ–Į:iĒ“ŪqhˆzŃķ-®€6Łé"ųizh 'YxG“žDšSÖ•ō²ßK“°Djė²ßXUׯŖńvŃāSöžŃBĻ W¤,Ó[>©Bj"džXĒ… Jƒ²ae3mQ†Eå?­1Że)ĶóåB‹" ®¶ü¤W?Ęł±sudģ/GĖ HĖĻģ‚~ʌŽvõÓ՝…X<óļ”Vü«t-((ŗ’)š€rG°Øš¶.uM“°„Źŗ¢ļč옟ITĢł“UQ&(Ł)ßQ4‡t:ųI+£œųb[žŖ*@[ä³WÅ`f §ĮOČē_/Ź{Ÿ) ó¢źC“IŃóH@σŸč¶–ļž9ū‹ŗDd·|®vŠvŽS ų9Żß¢bpņäÉFÆ«Źż0/ĘÆĄčz“ķ-füME«PšŠ  hėq,øH‚Ÿ”;Ą¢Y^^øß“Eśī=Š żŠ’µµ‚€…ÉZR1.ütŹńćǟ{lccCĮ@CG@»=}śtą¾$Քe“ü“˜ŌÖ09Z~ˆV  JzĮÄńž§å`*1Ąt9sĘonBb‚ye‰ą€F•‡ØOæaAdŌä[Ę'Ķųüé˜.?Ģų„ Œž;Źw«ż[9 K$Š×ćmŒé*JŻą€ UB€éˆnUeAcĪ;ć&™ o‘ żżżĒ"‹œćswÉöŠ¢“00ł (Äd–Łœ.u2ˆuM|~ǚņ²ÉŹDŠÓ}µZ~b)&'‚m’łh®*cX”Mę9KēĻŸÆż}¤I.]ŗ4³mLCŻßzüÄRY_Ł:)hʱØc‘[&Ņ9˜†ep‹$EĢ$ųIÅÄW L®ņrņäI…2AĆfƇ¶JEõ¬²Ś‹T•½gśųéÓ§§ņŽe-Oi ņ4X£gĮOى# ‚Ņ~¤@ū+ŽMo&ܜ®¾wǁIŠ!;vEźā8~E÷„“BąŽÖcAžXüōéÓ±. ¤ėrŹ—Ķ•+WF:—ŒŗŁz¢Ū’}w©iMZ-Oé9.‚žų{kkk&Į&óstŌM*ś‘¦?œ>NÜ‹d?™e|č b}]Mć#LW Dźāø„¶··[}Ń6ź[eE“ E€4j+Ķ… 'UŽrZ]]=|.&WŽ@(­ü7ٶ¦Ÿ1‹ģ»Šzdŗ-Ó®Oę/čåß»(H£;Ž3źN[֕ ~4ŗÅĖW,œ`aņĮCÜŅVŒ¢µ'ßŲŲØ|]ÓzO“eŅåŖZŌ泝ä"U ųĒ²–”Ŗ÷Ŗz.XÅć„AO<– īÆó9ÓmK×Õ¤,²Täńž™sēξ®*šiZÖłåā³Ęc?>xĻ|9;.wŪŃId2ł€'»? ć€Y‰Źó"·“®­­M­u²qźe“ܶ,™‡¬ėż2ŃINwJŗŅDkÖæ ÆņˆŅ“ÆĄ?‘ #ėę¶^06 mNŒ¾ŗĆüŪńhyšM„UOō”Lū“Ę’if•h.蓲q@ ƒŸ4ą‰IOó"{Jšuo‹Ģ*Ł ½»»»Jč•,ĆRʀZ˜½Z ŹŗŖEõ&Ż6ŒłZüäzÅ22zÄMōIžRÖ ĪlŹ{œó= ~&qāČrČōŁõė×ĢA­1?®˜ŒĘEX°ą'KtŠÄ(Æ蚻wļÜw1 ZüŠÜ“'O“Hᘟ²-9õ8qbąžµk× Ģ‘–€ŁÜÜTŠÅągggGßv ·bbg ] »½åƒ–¬»›` žGUW€ŁÓķ `Ā>|Ø …jMrźŠ%@}ĖĖĖ÷£0³ćœ@-?S¶¶¶¦ ĮO69éÕ«WŸ{l”@Ÿ8īĄ?]LšÜnß¾=ö{ź‚ķq“ΉŚÉč£õõõFĒæIKĄ ƒ€®Śoxą#ŸłĢŅ·¾ūŻĘĮŅĄūķļ+thŻŽ¦ÄD§°ąĮOQ·ŽČd”ö“wĀś&Ÿčąäɓ 5ų¹yóęĮÉ=ß­cccciwwwౘŁ\Ę# ĻLt üUóśōÕęę¦B€.?iąsņäɃ.uo]i’€Ž?™ƒV €>zōčŃĄ}x ĆĮĻŚŚšzɅčYšŠWłqŪŪŪ ?Ż·±±” ‹ĮĻćĒž—ŗč£ćĒ+čKš'žÓ§O €€^zśōéĄ}‰ ĆĮO<<øæˆ“œ– XŽą.ŻfélĢ;wĀ‚1į6c?‹,2Ōe'æ›7o€ŁŚŚ:ü{’¹®nią­G·oß¶·@Ļ;vL!@׃ŸE”NȚ|Ņ`®¬U'±@Ń%nwwמ,=yņD!Ąė옟Ŗ€%߇_EČū¾ß?tN斟¢q4YėIöœ12Š-u/tÉ’ū’8p?›ļ X\Z~źf?‹åīßæßŚ½Ø €ł1Ń),¾Ś-?łĄ'Zwņķģģ,­ÆÆü}öģŁ¹¶E:ŚŲ†"1ę§N W6/ōĶ’õ±-żÖ‡?\kŁ×æžõ„W_}”?ļņ»Ē±L6É3°Ųjµü¤W<# ) j"É@śÜ<ēÉIÓŃVmGÕÕÜååe{Šź–l`ĀĮO6 hŻ>ļmó“no6ii–²:ŪĘųlłnz‘ö: ˜Œa€Å×(įĮ¢õyķĶwĻ[Ļugɔu‘3Čśåē~ū·ī_ŗtI”,¬(óž>|Č8nooō:ƒœ”_žå‡?øżśu…‚ŸÅ²±±q8^)n{{{YßŅŪµk×–ŗ£Q··č>V'(ŲÜÜlż_YY9ČśłƒÆ|E!@߃ŸtṴ̈(²¬=xšąąļóēĻ+a`alć śü„˜ē" jņ©£ĖRIg™Õę­ö˜ŸH“˜ÖՅ13YzģyĪWĢĘßķļ+üü›lÓH PōHP<¦ĮT4 ŠķWćłµO}jąž™Ÿži…st”EBƒEHjŠ&1ŖiPØ¢J—~ó7—6?ņ‘ŚĖ/—ĢuÅ ’éßż;…‚ŸžŠ‚ŻńEō]¾5,ZÓX\ŗ{P;ųe ń@—4éF,pšÓ‹ŠĶęęsAš1Lгą'Ė֖æmoo.sž-gbł¶Œ™yųšį@ sćĘē–‰ĒŅe®^½jļ€ų/~qąžO¾’ż śüyņäÉŅŋƒ£HŪ\$ĻžX>‚Žy‹@fyy¹ńė¶¶¶tۃųÓ½½ūżŚk śüœ8qāą’żš“fŠ(AǤŸTY‹UzŪĖU„@Š£ą'sźŌ©…üYŠSÖb•ZYYyn¢VtÓß|ē; ?‹/‚˜L §H¾č–ßśģgī?xć …‚Ÿw“a O]wļŽhtßū^xįšļļ?yršæōžŠG›,cxźdpSŻ}7ųŁßŁ©ŒĢM– “K­–Ÿüų—²9qnß¾=ų̳ŪŲɓ'}»@ķ@å³ū˜B鈢d6P;ų ÷īŻ;ü;?'Nv[O*‘d`žŻĘŅ.z£¶DiĮ‚žųŲ‡?¬@šóŽ3gĪŌ¾zöųń摓 LK2kkkµ—MW `ńmś‚4ŲŲŲ8la‰æćÖ&±­i³»»Ūø5g{{Ū^óņē??p’g4?0åą'uóęĶÖĄ€"@eĀU->ŠMłķoÜ’ók× ōĄ{śš!crÖlŠk$a(K†pīÜ9d £ŽöķG†EšÆ˜¬æųÖ·ōŌ{Š'ÆüįÜšĘ ?Ż÷¾^P ųü,œåd"ęšGŸų„B€9Ŗ€¾ś—^RT4Ÿ› ž-?€ą «~ęŌ)…‚Ÿj1aht)(»Å<:m÷ē×®)č™Zc~ŠśO—¹{÷īĄņśY­~š=ĆÖ!ęåO¾łM…w{;sęĢsĻŽŽŽAS÷¶³³ó\“¶¶¦Ä™ūŻ/}iąž~īųō4ų‰ åĮƒ_¹rå0˜i:–'ģµēĪ;xlwww"­Ic?Y€“µņ\½zu"orēĪƒõE00÷ąēęĶ›#µņŌĮ”±?Ą,-ÆÆÜ’£O|B”@O $<8eŽ  ć~ć„—BǹČ@“œ½p“é ŠdW٤µŚäW/_ø’ā> P ĒµüŌĶŌĖŻæ_ésõ÷ÜósŸS( ųiųµī¤sūœ={Vé‹ü?~| č)ė֖ĶķS0ĢŹŸ|ó› hü<}śōą’Ē×Z©1?Ą¼żī—¾¤€ęĮO&mčlš ųh‰åõõū_žō§ 0Tכ››Jh_yńE…Ō›ä4dOü_•ŠąĢ™3K<8ųūüłóJ€™ŖšŒĮO-§OŸ> jņ'–²”Ū·o+a`¦~õņåūæüó?ÆP€µ»½Żæ`Óa\eęįļ>ø’ŚļżžBš?!›Äōڵk•AĄXčą' ² '˜—׿žu…ŒüʞS§N)- Õ^}żõūū ŗź‚ŸC=:‚āvóęM%t3ųI]¼xń0hƒžĮ0~š“ēŁßßī¹,Ņ-˜§ÓżčĄżÆ½śŖBš?™p²@(ŸöZ·8 M~įƒTĄčĮO*K{·ķķķēt‹`^d" ĢŃI¬dccćą" ö7”,Œ`eeE!Œą_~yąžÆÆ®.Ģwxūöķ„ćĒū`Q‚Ÿ-A»»»JĘp÷īŻĘÆł¾óŽ—Ū?~’ū÷’ų“Ÿ\˜ļšÄ‰Z&`‚Ÿc}Š\ŗtiéśõėJj27ĶņśŗóżÓ ~¢{ĘÓ§O Ÿ;vģŲŅ“'O”*03Æżė ˜\š3,q.Ą¼¼śśė÷›“¼ż2r¶·½½=Yt€n?.\8 xd„ęķ{oæ­€Śju{Óŗ“Ń/½ņŹĄż?ūĀ Pź=ŠčŠŸ]^V€ą€~ˆ$=łŪ"n;ó’ģ3³Ć)ņæŪ;wīŲ™~š“ķpWÆ^­<‘Ō½ĢĀÆÆ®*ę.&ü.:>|ųPį@EW4A“ńåLĆQEtĮņ“ ¹VŽŖ,æŪ%32„ŖŠĮæ9uźŌĄ}ćĢ™¶ēZ~²lniĖOöŲ(7čSąsņäÉ„{÷īœ#Kjjuuõ`²pą=ų0ßą õ'Ėś§ū?öŽ÷*ęųœ?ž ņ]ÜΜ9sšŲĶ›7{üųńįrOŸ>=|xĒéÓ§‚€"’ōßžŪĄżæżźW s |¶··—nß¾]ŗl“ö¤Wµ–ļb–¶mé 'µŒ»?­7‚źøÕy’hĢÆ'Ś…?e‹€˜§×æžõū?łž÷+jUī§üLó¼øææ_ł³Ź^Y7¶ŗŅ®BUe-%Uū:ėH·{mm­2C^^U·°ō»©³ž²±WćĢĮ-u³ų„­m7éżcT±žŗāb@ŁŲ¶HIOæÕźöfĀR  ^}żõūżŚk …Ī–˜ LL²Ś¤2_·«Ņ¤»\ķīīV¾GUKHÓõ§ėxŒ˜¤ABQ×¹ō±ŗ‰Ś`^ūG¾Ģ†­?Ż/ŖĘž7 øéqš3ź+ƒ]“NP:N®q•u¹K»ĄeĮė¶h Kh4 Uc,Źŗœå‹¦Ū­@uy&ń9§UŪMs’€… ~ęy2€iJ+Ņ[[[_Ÿ^yNēżiŖ*cZ¾u$ ‚ŚPAM™ŗ]“ƽZ_6hŅvvv†.SÖrßMŻV`tGė.W¦Ł— Źõ7ßTtBš5k”yŠ‚ˆŖJv>ą©>ė.īéx„“ÅŖ*Ųis7ü40N˹Nśķ4€J×Sēõ³b]P«å'H£>~$Ą¤Üxė-…@+Ķ+żpݹrŠĘķĪ;QQ IÕ8q·7Kc]Ē4{ÆM’¶~Å÷”|€9?é);ˆŽ»wļą~>O~fœ¦|h³l^–Š$5r>Żņ8źfKĻߙYu+“Všóć‘Ҳ- ~F““4ĆIŒ|T]ų 6óĘĶÖ×ei÷Ē“epXٚg’±ƒŸ¢t6ą3ß"”-“åR€®ÉWŲėtKŹwqÖå­Ŗ’>ÉÖ¦4„ö^•M¦üLҟīķ Ü’é’ōŸ ­ €ŠnikBJp–Ę:²ÉU„Īŗ §ā x¶\TT³‰?Ó×å·!½Ÿ¾g¼>n³Jg\Õµ)*įéD—ŁöD—øŲĘųæj;óIŹŹµØŪ]SM‚¦“쳓äńśģó¤ūĆ$Ź“j߬³³Ž?Š~c±/DEYåßsX"˜Hš#µ5m1ėłG¢rVÕb‘ÉĘČÖ•»aĖF%=sØHTTWWW++“UĒėóé¼ĒÉP——Oi=Lt÷Ė'KˆńK±uĘ1 +Ó&ӖaćLźÖ‘Ž’Š`$¾³ģóģķķŻŚRõ¹cß­³_Īc’Hß; |£U4Ź( Ü"@”h‹‰?ł 'kÖĶX O¢’œ%žI»<ÅߣćuĘédĖfƒśó댊j=ńXYw„ģ5i`‘nG÷ߎŽ~.HÉo[ŃėšV>£5¦ékcÜIQYGŁD`4l]ŁóiĄAO<–“UŸµI…=» kˆ€!æMōÄcY²‡²ĻU·üāł4pŒ¤ńXąÕYĻØūGrŖųĘr{IO€tæ® GŻ?é¦ZóüÄ%‹²cGĖ‚ ¬)½čĄ0 /žó÷żGĒ”ļ½ż¶‚”µ¦Ńb”uŽ31hT`Ē©ų/BYGĄ1VŠqL{›²Ąq\óÜ?"Ä0ŽŚóüd¢¶(ąI»Ā8€IųĖo{ąžņ“ ˜^šF™ Zd,\š“3E}&óƒ=£ĻŖĄh“£“ZѤ2¢d>ņ™Ļ Üæų”)`dęłZė[ßżīĄżß’ųĒ ųØņ\··IO·Hc²Ļn¼ō ųhƒ_|łåūŸżŲĒ µø€@ķąĒIhƒüž÷īģĆV(ĄdƒŸ®§ū^Ńk…°Ų$<?‹ģüłó¾]XP?÷Ūæ=p’>ń …Œ­V··qŗĶ«»ŲķŪ·ŸŪöĒ/?~|čēŌÅ ęė_~ųƁūæńŅK [绽„Ģ‰'*ƒ€yˆ /MnĄhz‘ź: ū÷ļ/={vééÓ§•­;@[ģļģŌ^vy}]-`p›ž˜ŸZ-?q°®{Ėæ®-Μ93°=q2ŗzõŖ=Z&_¹’Ś«Æ*`h€©e˜XšÓDĒŽ;ų{cc£u8¶ļܹsomm9PBĖżĀ?Ø Bϵ]õäɓ‰Õ¢¾  :üd‡pėÖ­V~č;wī<× ”m3,Š8µõ\ŪŲŽ±ĆćZ[[;XWtĆfļhŸ?|@ŁÕ–IŠ€ń|ļķ·©¤ļéöł|QģīīÖC€Łčż$§qąŁŪŪ³'@ üŅ+Æ Ü’³/|A”SoČ”CĮĻ¢„“^YY)MŚĢĻĻ./+ĀƇĒnÄ9eŌsQ¶ŽčU&ŗng·¢ĒŅĒó677ĒŽĪa"™Pö7oŽlüś˜§oŚŪŲd;‹Ź“ny‰z†i7`~¦>É)tUŒĶw›¾{÷nķI³£\4ö#ŗF•­cuuõ¹åó„Æ‰J~$ųÉk²UuƒH"@Q]įāŋ·óēĻN>ޤ®‘nćµkׂ·aÆĶ–üs§NZzōčQįvŽ»wļ ;ģ8å “ŪT»½9użŻž¾B`”ÄüqłĄ'Ė&ZU”O„Ļ„K—ŗaĒ’MÖQg;Óķ‹÷øråŹÄŽ# PŹD0AGŻĄēĀ… ĮNźņåĖc·˜ÄūäŸTĢ~‹ƒjŸøZ–Ż€éśµO}jąžƒ7ŽP(“ZZQĪŗMgŁDćvśōé”ėŲŁŁ9\žśõėݼ⒪shQ7ķŖ9÷¢Å%ݾx8ÆåǼV'uÄ4Uó’•ią³½½}ššč†­<łuD°˜s£Źoc6EGQ V6”īņ°˜ŽÖ=HōQŚM`īƒPßū^xA!ŠZéń|æ¤Õ2*éƎūUc{ŅL`ń’4ĪÅ阚Ŗ‘:ʦŒˆĮżY Yžs¤Æ‰¤lĪ¢“,"č§,Š^;Éł{€?LFÓ¬rEżŒh—Ŗ“¾¤2® FŖŗŖ„Żg1ß^ÕvFK݃ģŠ ųéÆI_e›fÖfļäɓ?OÄši*K€0M“,¢…młŻ ŃB4J&¹*ŃŻŠEGč¾Fc~²&ü&7€Ę•œ…ĄB˜D&Q6‘j ’ćlC]1.gÜõŒśśŗ-l“x’I×kŌ“`¶jµüD“t“€`!ƒŸ¬_pČĻ ŻÖy~bĀøqŀS`:žą+_ø’ā> P˜ˆ¢łŪ vš“™×ĢŠ£H3ēĉoX·¢“g6ąTZM˜¼ķo|cąž›ŸūœBŚü,štšåŋÆF‹N¤ÆNS]Ēż²«„‹ąØv“Ė.|™f¢E'ŅW×Iam "tC­–Ÿl²t†åEŅtüQts›÷˜%č²ßłā,ø6Œļ˜Jš“Ķd3,onnvā@]uÓĶ ¦ėO÷öŠĪągeeå°õēʍ…™tŹnmPkĢĻŻ»w•°ŠŽ£€YśČg>3p’ā‡>¤P€ö?M&5mĖ$§@;}ė»ßø’ū’øBiØI×ćQoŠEGĄbŁßŁ©½ģņśśČÆĮ“˜^”1ę˜™_½|yąž„ßüM…ĢL­–Ÿqś»džžįƁū›łˆBfFĖ ųÉŌÉź¶—›±]¶7`į‚Ÿ:VVV©RŌĻżöoÜ’£O|B”‹ü¤?~,üĖ8p’7^zI”‹ü?~\ÉŻ~?@ļ,ÆÆÜ’ņ§?­P€Å~īܹc¬PéW^|Q!05qŹß Lu’ӝ% “ĀŌŗ½]¹reimmM CĻżó~ €VØÕņc²R`T§?śŃū_{õU…Ģ…„ĄLżĀ?ØĮĄ\ƒŸČąVfeeeéźÕ«JxĪ÷Ž~[!‹üd)BWWW+W²µµ%(šœ_z啁ūõå/+ ]ĮĻżū÷GdāuJxĪOżų+ ]ĮĻŁ³gīWe{‹.qłēoŻŗ%Śü¤->ŌŌMs_6  æžę;ßQĢEv>JoPüdvvvF>éR@æüÖg?;p’Įo( =ĮOŚUmmmm䕞;wNÉŽ÷Ā hOš3©®jU©±ęü~F“?āB€©?Ęź½~&5VG–7 ÕĮ×·¾ū݁ū?łž÷+ ĮĻ•+WF€>|ųÜ$©@æüÅ·¾5p’Æ_{M”ķ ~®^½:p?‚™aAPō,//>¶½½­t€Ö8Zō`“Ųäž&­@Ńz”N˜ ³Rt¾Ņ€Ņą';QD‹NښS‡ ”WĄŗīś›oÜ’™S§ģ@ūƒŸpźG—,˜9sęĢ҃ —»vķŚŅęę¦Ņ„œ?š„7?÷¹ŚĖ/ÆÆ/ōē½ńÖ[÷’üGĒ€…~R÷ļßWZĄĀz"&!Ÿå  ÕĮĻŹŹŹŅ“'O¦ś†!@7½ņ‡8p’Įo( ½ĮO8qāÄĮ@ķ;wīL<č‰õŽŗuK©C¼ļ…ŠŽą'žóēĻü½ŗŗz¬DkŠ8²y‚² ēńćĒJ:ęļö÷°XĮOø}ūö@ŗź»wļ0qĖO‚šŠŌŲ,„ĖgNžp’>ń …Bk9rä¹Ēž={¦`ŠņŒļ7^zI!‚€6Šķ h$ßåķ³ū˜BaéźÕ«–_[[[:sꌂ@š,Ž}ųĆ ”ēŠĘŲ ³µµe‚Ļ‹ųĄŅ›Ÿū\ķåó-ˆ0 Ęü#WXuy?@/čņ~?Ą"Źwy»ų”)@štßļüć X(²½Š)RhPFĖ0Ō’łśė÷}uU”‚ ūžų“ŸT€ą ŒłłåŸ’y…ŠGŽi“ü?üĆ?(4?@·¼ö{æ§z`g§ö²‘żüėæ*4ZO·7@šōŪ/¾üņĄ}]ŽĮŠI’ųżļÜ×å ü,€¼›Žīßæ?ō5+++‡Ė‹­ó Ź—³gĻüo&p(–ļņö3§N)öøļX@čtĖO­:P,ßåķĻÆ]S(€ągÄUæōvģŲ±Ćē677ķ ųYLOŽ“'O’¾qć†=æzłņĄżć’žß+@šÓV·nŻŗLéž’ęļ>ø’üē’¬PĮĻ¢»wļž½?Żwę̙ÿµžĄŅŅ˟’¼B?‹äŅ„Kµ—M»æŻ¼yÓ^AÆżå·æ­ĮĻ"¹~żśįßMZt.^¼8 ü,”€ā¶²²Rų|ŚśsāÄ ]ąč„ßłāī’Ų{ß«PĮĻ"(Jq}÷īŻŚĖW- ]ō§{{÷’ö«_U(€ąg‘ ¢ Øjł .Ų3üøŸŽ ĮOŃÉp˜Hz-©°Æ\¹bO”óžą+_Q@§UÕ"všŗjūßøææ³£P€Ny"?@/čņōnoŅtד0{ēĪ…J+éņĘ<4=&–MU‚Ÿ–‰‰SWWWĄ»š#łĢÕ«W‚Ÿ¶;~üxćÖ#­2 Æżė ¹hŅĀø¼¾®ĄüL“¹!čƒW_}ä )Ą"‘šč-?ŠcņĶo*:§ØĖ°–|B/[~677NŽé-ƒ¾łŻ/}ią¾.o€ągĮ=|ųp йqćĘsĖÄcé22 €ąg”D ³¼¼Üųu[[[²­Ńiń­o)@šÓ„Ą'uśōéƒ~ßU·½½½Źu@W¼ņ‡8p_—7@šÓYŠs’žż”ĖĘ āY $ĮOėE“©ōÉ·AWčņ~:äīŻ»   Kņ]޼ń†B?@÷½ļ… ųYT'Ožōķ‡Žvõƒesū„ų”ٽ%: žźĖ_VtŹ(Ē{ś”7ŻŽ"Y[[«½lų8‘Ņe?õć?®€^8ŚåAKÄģīī6nĶŁŽŽ¶—Šß{ūm…~ŗEøååå‘^ ]ņKƼ2p’Ͼš…ōF/ŗ½:uźpāҘ»§,¹sē—ųŠ?;ĀE€Eu“o8ę 蛞ĮŠk곁ž8żŃÜ’ņ§?­PĮŠ}æņā‹ üŻņ‘Ļ|F!‚EŻ÷­ļ~wąžžĪŽB?]tT@·-ÆÆ+z„h2kÓ“ü‚`±It ų^č@š~€.Č':øō›æ©PĮŠ}›łˆB?@·Ht ų^č@šōŌQEŻ"Ń}÷ģŁ3…@!-?Šq~ s$:ü@/Ht ų?@Ht ų^’č@šóņē?Æ?Š}łķoܗč@šōŌQEõ9r¤ńkžįžaźŪõĶ\zė‹ś/ æÕœgĻžMå7^wŻ~`į4éNŁ×žĒæžėĢ·ń÷?žq_Œčkƾŗō ü`­e’ę;ßYś­Ļ~V”,ŻŽĮ°X$:ü‚`Ř¢”D‚č‰?Š9/žó @šŻ÷—ßžöĄ}‰?‚XDłD’óü @šÅ®_æ¾täȑF·¶ś_~ā'|”ōĀÖÖÖČæÕižĘ›KšŽN:åĖ˜£Š€>ŗ|łņĮ’/~ąµ_ó­ļ~wīŪ-Ń}vü½ļ]ś_——żf›žĘ÷÷÷—~įƒ¬µüßżhŁQŽ#M–’ŽŪo/=zōȗ ųń½ł¹ĻÕ^6߯lņ‰ Ož·>_ių›ÅoüĶ)nÓõ7ß\ŗńÖ[¾|€ Ńķ ü푿*}ńCR(‚č¾ß’ųĒ€ąŗE¢ĮōB>ŃĮžĪŽBü~`įHt0곁#ŃTK/dė* @Šņ-&ŃĄähł“čh£#GŽŒōŗgĻž)<@š,–&cņcęE·7h)‰?ŠK~ s$:ü@/Ht ųü@Ht0R]CĖItĶdŻDćB‚.£¤“ü‚`¾\µü°¶¶¶täȑF7€EeĢ“„DĄ<pš’Æ}źS V®æł¦ @šĢ#šł™S§ 4ü¼Äėn¼õÖįż?ūĀ(€ą˜EąóĖ?’óK~ķšĀ!¾öź«…æ©?ųŹW ƒ¢|€T0żģņ²ĀüÓ|~}uuéµßū=…5üĀ?Xųųö7¾qšūśēü šł_|łåŅV¢Ÿ|’ū,@O˜äęųüŃ'>±ō/½¤p {ļ{—žå‡?,|īōG?Zųų?~’ū„ėūė×^SØ=”å¦,®D>1Ę@ąĶżķWæŚ»Ļ|äȑF·®|ަ·ū÷ļū•“üĄ={ö¬šJō_}łĖK?õć?®€`μńF«·ļo¾ó_RgĻž=8ī~`žkAלØl½ļ…Œ“”uqkbQ~‹MRą·9}wÓĻŃ•Ļ ~ Óžä›ßūÄL7hyńŻyƒü#ś/~qéO÷ö>0e17Öß?|8ņėßüÜē"@ĻHxXHęĘ ©Ž“ü?~|ééÓ§µ—7`’¦~õņåĀ«ŠhŸ’Ÿ½ūyk;ģ> ^Šlx/›$/‹Ceļŗ(XzėÉ“”¤÷Ł%öū^%“e ²– [‹Šd%é/L»čĪr ŻJ†v/d·$[ÕGńG×÷÷ĢHsĻż|ąŹņĢhfī¹÷Μļ=?®ć`˜’ołŁŲŲø˜ž²Mš f}ŗPfĖßüčG…Įēs€€šsÖÖÖF»»»Wn[XX^“ģä—ĶĶĶĀUĀ Ce\fĻ—ßūžB~Ņspppł{pNOOGĖĖĖ…ßŚŚŗ|Üźźźåķ”Ė”Ÿ"ĪFĆõłū••V’§ŸüD”?i A&>mķļļ–––.~oŪeĮø>’ņóŸ+†~ž>}:ösŪC| !ŽQ€as’pc³˜ĶPSvlśL“ī7mÖfķZF©¬Gß×įZ‡®•$ĮśėŸö3Ÿ‡–~üćŃ’žńƒż.hūų/īŻżŗÅrg5”·Yļ’ųżļG’ųĶ7É|÷_G}Ł•l··³³³ĖߏŽŽfśŒĆł¢¦ē›Æ¾Ŗ¼’¾ųB!?iŠgh[YYidŒpń拦Ąfžėääā¬˜}_}’ū €JIw{ }4ćÓµ%'ž9ŽōżŻO:śŸ·o+óßžöč?ó…=įDĮ·R_Įq©ķ}ų²4Č-}Ļ~ūŪĖ֝°ŌŸæ^\|@Č ‡1įA`Ā؟•ĆxŅößųĆåļm2>žĮFO~ųC…=ņÆæų…B`8į'³¼¼,Ų T“®ležķ—æżÕw¾s”čGŲ‰g¦śŪļ~W”0¼šĆp„®l;æū]§æÕŗż‡0>„’Óu.žæüüóŃæ’źW BĻ’żéOĘē ü4Ļ—bw9×128RB‹šIż÷c„Ž@˜)‡ŸćććуZ’Ż4C—špsu1®·j/ü\cų™õõKĶĀĀĀÅÅj ?ßRÕI?[R]æłłłdÖēääär{ >|’€“ü©ēžšCĖ0Ā ü¤äöķŪü›.€šÓ+aężū÷­žNį§7ÖÖÖF»»»Wn S ^™É-[677 CŠIĻö6ĪuzBp:88øų=LżīŻ;{ ō4„ɶülmmu>Įžžžhiiéā÷¶]ę€L@©¶üŒÓźSō<©^膐{ĀS]ƒ üĀOŸ]ž~ttŌé9ĢōĀĻĢ 5ͬ¬¬“ 2§§§W_46Š/IOu}±‚h½1Łō;„ɏł'øģķķ >ˆ[CXÉ,Ą„±?” Ü40›’ļöČ=ᇩ®€A~į ·Į°¹+@æ|öŁg£·oß*į‡.Ģn7¼Ąk›k{;ĪćŲꤵ½ļŻ»§ :Ņķ ~„į@ų~„į@ų„į ·Į°=ž\! ĢŽŽžBóós…00ŪŪŪ a`^½z„|®ÓŠÜĒTŠ@²¹'üŠķ į~„į@ų~„į@ų„į‡mmmęęę>Y^¼x”pąöķŪŪ;ģ¤éōō“šĖŃёJPŁö¾’¾Āą~Ą0ŽļüBĆņī$¾lnn @=Ęēéx÷īŻčĪ;¶7WŽ}ŪŪē:ÅE~hłų ąŅŅŅÅA’-ŪŪŪ—÷-..*¬Dmll(„„=yņä“/Āx©z,żŸüö^]]m]y¢ŸÖÖÖĀäńŖĻx*–Ÿaž9(:HBwØ÷ļß;{ ŠĀóōéÓ+·iłŽ1Žō1ō§Ā{ppP¹-———G/_¾“½tģŪÖén_Ūuü¢ ?“ü D|–·ģą Ż'ŠOæ?0ƒ>¤­ź ņłóē (Yš©bŒ×p*ʇ‡‡ #AŽįÉ~bgggŖnVÓV¼øė£/Õ4<|ųP! <ų”•ōd'£ēēēʄÜR®¢}’ÓŌ4ČÄ-¼”«+ißyūūū *Qń¶Õ*]Æ_æ¾ų׌¼Šū}—DšĒ”æākx©š K˜ā¶LÜ’}}]aAĢĻĻ×>&>Y­×ŽšCĪŽŽ^iŠq1<č·+Ēs“…~‹?«ĆöĢ_ó%ܶ²²rł’/^(4č‘0µy|<ē»»...^™ŽZÆį‡œµµµ+_‹*Ań³˜mq—ęŠõ„,Ü:©ż»” k|ŒæyóĘ1ŽĮÜĒĀRbCŪš¹/ĘŠTźŒ@?…AĪń˜/"HKØ£]Ä4œ“6©IóźÆš &üčö ‚š?Ā€š ü?Ā€š üĀ€š ü?Ā€š ü?Ā ü?Ā€š ü?Ā€š üĀŠosss„KŻßĢņśōy;(Ēé•ķŃё}Öń üĄŠÜ¾}»¶b”ņA •hPįlyyyōžżūĖ’ŸœœŒĪĻĻ/—½½½ĮW Uvśļōōōņ÷WÆ^]ģŪaß·Ļ:–Ū˜ÅÖBą>KĀ—ČēŠśYČ|<–kŃŚŚZoÖ­j½®óy&½MśVŽ7ikkkōōéә_¾ī³}ū¼ėZ.³vl×’1~hłž:>>nüEß’čŃ#… Ā»wWÜRŠOOž;īö/zŽIķ’a‰ĒgÕ­C¶CŻė7=–„HŲžžž•ŹĄ$½xńbōąĮƒĀū^æ~=zöģŁčĪ;••£iØ{Ž0łC›×nŚüMU™yóęM£Ē5.×iVŪ–WųÕ:¾|ł²uˆļņžŹBé¤ö•ƒƒƒĖ“ÓÜg§±żėBN¾ģźž’īŻ»„eQōüa˜Ö¾Ÿš1Ę~ 1]Z Ź|łå—••‡Ææžŗšžõõõk©xē_7,KKK…Ÿ¢æiņ|EÆŪōoꬮ®¶ZßBą¬zķ, Ó裄¼Ŗ’ųńćĖŹ™Šņ»¹¹łÉse-MŽSQ(Ķ*ŻóóóµŪ/~ēϟ¾N8)0Ķ}všŪæiŁÕ­O(ˬ,ā1‰Uāņ/ŪuĒ2€šQöå*,qjZiņÜU÷‡£Ģ“¦•ŻŽŽ.¼½Ķ:6™õ)¾=t剻óÄĶ6e–‰[ķźŗ¾åg™øegggŖĮ§IyÕui‹+ó!䓯o“÷TŹBXos¢`aaa“±±Ńz»ObŸÖöS‡×•]~Ć”śEā²,k9®ś›¢×6=5 üŸTĀņšįĆŅĒ„ŠHØüŪ"?S]eŅ•–l='ŁŖQUf±Š•',EĶqÕu}Ė*‚³p¶»MyµŁ¦]ĀOU‹QSqX/ļołń,×¹ĻNkū7颶»»{%ÉJ’¤up“«ŖJb—u)j‘˜Ę>Öå±Óœ^Ör‘)kuiŖi+JŁ13é}ö&·ńä“8¾„@ų gµ³ 1)›Š§Ž[×Y®‚ųģ}—™³źĘ55m‰+‰e4ėWr‹Žk8;? ć”W¾,āĒ6ķīW§.Ģ_הšUŻüĘŻgojūķE· .€šĢ”0Ę$®“ĒcNŗT(oBVé+źī” šn[™ļbR•öXY »ƒeS‹—UrĖĘaL2ü“•µFu— ŻżRŸ®xRūģMnį˜ E×;©oˆ›÷E\éĖĻÕfŠz|†¾hĘŖ²%®ŠW©šŗ7ÆØņ[āY·B«üū›F(›Dya`՘•i .ć‰fmŸ½©ķ_t’¤Ķūv!f@ų&neee¬ m_‚^U`˜då®N›p3Ī6) \Ł4Ļ7”Ky•…¾ImĒŗ÷_ ušņŪd’ūģMm’¢Éŗ1PńI–Yī> ?@āā3ą7y6|Zŗ\ŪčŃ£Gµ”+,Óģ¦OĪBUY³ilƲ¬ŖnnmŹ+ū½jFµ.““}Oć ×õ™V(nŗĻŽōńŚf–ĒģaĀ0Qq’ŗnC”’u]gĄŪšT—§¢ Y–Uź³ĄQ%>kuŪŖ{Ļ]Ö%~_ŁŃŁÅ"3ńä“*DzĆ¢iŖ»”W“Źp—VĖL<†­,DÄ”µkėK<‹ZYw 'M÷Łil’¢ķVö|łżæI‹N›‰Ę9f„ˆ|’ģL{؈…³śaÉĪĄĒ•¬² tNK|ę<_AkZa« zŁ:ÖUFC„>+£ü fłi‚ćAź?nTŁœäø•|e>;^'¾?ŒßźśŚł­²ēéR^ua1¾­Ė>·(…ż»ØŪYÖm+&ۊ§ÓĪÆK8‹N,LrŸ/p:Éķ„ķVµ/ߢæÉ&w76Ė­}ģ{ ōŌƇC-³Ń’—æ½ģqEÆU&»ssó“ŪŹ–šŲŗ÷ŅdݚÜÖę9Ćś–­_ŁrxxX[FyŪŪŪ•Ū)ų"+_÷Cż|uuµtż›®Ćśśzéś·-Æų¹Ś”q^ŃžŃō=U­{B]ćżoūl×ķ_uŒŽó™1Ny7)’¢ż£Éūkņœm’HƟOĢ ?†ø‚/KKK3ńžŠ*oggg­žćäää“ē·ÕUpāpŽGŻs.,,T¾š¾Ē]—ĪŸÜ¹× •ņ.¼°_t©Hv)ÆłłłOž&Ü6)EļiZÜük„}ė:÷ŁIl’ücöööZ—]Qy_Ē1Pu,4 ?sž k€ōMköD€Y’ų ?ŒłAų„į Gn)“C¦å~„į@ų~„į@ų„į@ų~„į@ų~„@ų~„į@ų~„į Č’ ĄŽż„F²­‰Ļ{©­QiįŽŒq•6³z ÕĘŻ‹•ʋ#ćETņĀ[I C$ń``V%ћń,®JōĀļA«Äƒńī©“1īU• ß4†©³ņB„1^æń—Æ£^*tāOFFž‰Čß‚{+y222ā|ēūĪæżķoG’ż[MŠ)?ŒžĆČ7€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üčĮ€üą‘~ųįÉ¢–»-p¬ō½ķ^æ~]ø/£Ė§OŸ|ht‚ąŠ µ:ībŁŲŲŠ=šžżūļßė ĀXīīī¾?×××µžA"č‚~ūŪߎžū·šč‚öœµ²­ÜyŠCbŌż2¶QQöće±?Ÿ£££Įńń±cÅ÷¬ŌĖ—/_æ~Mžķöövų÷¼4 ųĄ"ßfžCęŠ)oß¾v~VVV†æWWWĆĪĆųo,ūūūÉõļļļ5*ōˆ¬>ŖDp0ųÉ~7RŸ š@—ČüŗsĀ*!~~~> 1½v–ł³Üm±Øž?>xxxųžļ輟G½c„[m—zĶõõõĮ—/_| tś6{ōĻ“ŠEeœt®Āņśöķ›F §§§€^‘łtćdedż\ŪZęcĒJ_ŚĪ9€¾ŽfžĆœ?Źę!w988ØÜöū÷ļm{‘ę=‰ł3ĘŁ÷»»»Ö^;2Ęź¾ī¼ēóHķSݶ¬kÜć©rŠć¾f¼Ē:=EĻļŚ±Ņę{hc{}8ēT}®ćOć~ޱī8ķ„ '9ēĶā¼@7Éü:!:ƒWWW“»½½-œ {j'Ļ:ĪŹę˜h³cnÜķmŒŠĻĻÅŅÄŹŹJ£²^?~¼yóf¢×Žcjmm­•öl£Ż«Ž‡²}j£=Āēϟ3łŽŌ™„Œ‘E8VŚĪBiŗ½ys¦łĮŸ“““‰¶±¹¹YÜj³ķŚųžµqŽ “džŻĮ„čdM‰Ž×,c`źgŠ’Ó¬ˆŽ»čP+[ö÷÷‡ė—unßßß’’wļŽUn3–£££Ā}žÕÜ(ŃiƗülooWī^lc܌‚XæØ3?ڧ赯®®žS‹ >·¢ćmwwwųž¢mĖž›j8¾Ę=–^½zU«“;²RėÅ÷£źˆ÷TõŻhó»Ü§ceR]=ēT‰ąOÕ¹&”½Ļ¢sPŁļĮłłyé6ćļ)uægÓ:oŠ2€Ī‰ĆĶĶM­u£ƒ­ŅU±‹‹‹äßmōtQÖMŻżl3s :£ŒŌX?L‰ķT}ŽŃ9»µµÕŹēS•µ4Ė̟¦mėķķķM¼ļQĀźśśz¬ķ4łüŚlÆŖ÷øhĒŹ¢džĢėœÓÅ9Ź2Q›ģwY©ź{Óęy€Ī“łt[dDŪčHõ"Ń>ī<;yEŸ,;`эø?==Ž/K¢ņƇ:Smš dŒJuęg™&MŚ/ĖĪZń>ź¶e|?Śü„”D;ęķģģtöüŃ÷ceYĪ9ó” ü4=†Ź¾›ń=žd> qĪōĢ 7¢22źf՝[”­Œ–™žÜū\5oEŁsĖ:5SY&Mēė)Ū‡čdOu&O+s`Ö EÆAĪȬ˜d;Óh‹¢mFłÅÆ_æ>y|Ņc¢Ļg•>džLrĪéZęĻ4÷·ĶczÜó½ óč§ččŹ²‚ŠęēUgn…&„Ģ:šļ+ŽßhSŁ2K©,“l¾ž¦KŹŁŁYķ}*š‹¤«Ēõ8ĒIņ—‚ĻbÜć)2²¹{ŹŽ‰ģū:o}:V–圳hŚ:†"X6óż$ųōZoņ“Éē•u\¦ę;™•ČJŹ:V#PU6·O?÷eōńćĒ…ŲŽĘwīŻ»w…ėÄńœ`³e’WŽēœyi« bŪób°Üz*:&é=ŗDĒF›åz`¢Cŗhž EźŽ}‰ļ]Ŗ|]Ģ£3šŁT“ĢŪķķm­żw”™y}1ĒÖčsŖęĒY[[ūę)qĪ钶®ęü æz"_š%59ż$.//‡¾F_'r¦’ߕ”¢ Ÿ¢l”i‰łRRū’u°¶5Ā|ŚęŻy||¼”Ē÷¢g±ÄļĘhĄ ęŻJ‰R‚³z/‹x¬Ō™›+„I¢/ēœyj+cēääDcŠĮŸ‹Nž,3Ni–Ø)_“¬ÆÆ×ŽĪįį”Z’ōʋ/’1¦uÜżśõÉceó-‚TiÆYvb¦>»y–ė›§¢€É¢–“Š2c`ŲŽŽžÉgŲ•c„éēÕäy]<ēĢSźX6œ4k§h@MU¶üéØčą‰Lœ”č“)+Ķ#Š‹–˜ą¹ģ¹ēēēÉ׌ŽšeiO·DgsJYĒ]Łqæˆfż]ŒŅ^ólŸ¢Ļ.^æi9¦.µ£ä^^dƒ.rǬ2I»r¬4 &Ä÷0²t—įœ3OEójEÉĀø†jŚ~1 &oeeE–5ż«ÓnB…ˆĪ…˜ˆyTtĢzNžT‡WĢ  D ÓĒUtlF†Z“’HѹV”ÓŖSńš[[[…÷(Ÿ4ļÕšä\sN²ŽčČģ«Ó9™ŚF×’zEYˆ³˜¤ģ½·Ń~Ó~/“~ö£"pŃ)GGGSķ¬Īź¢€`›ļ}’öZ“c„h[u¾³‘eņŹ”ćę}ĪióøŸäx÷õŚ8†Ź¾§“œÆguĪ`į<ŗIüéā'ŲB§E÷…å;īŪ0īńZąH‰`UQY©Č(J•i*{Æ荎õčȍŽĮ…žŚ~tš§‚]M;Ęė¶S*8å·ā=d³x^°Sļ%ŹŹÕ &DfWjŅśqeõYžć¦Ń‰[÷xćt48ĮĪ¢ć¶jRļ#Ū~öqܞU¶’4Ūk‘Ž•x~Q6ļ8b„ų®ä?»Ŗ÷<Ļ9] žŌ=ߏ.ĪóŃ~ń½ŠąhÕwrœ}ü`ō6ńŃ?:ų .PĄ%:3ņ@:h[[£™62¢ōĶ›7ļKŃ÷%: £T×øF;y›ž+Ś:nj(kŅNEš¶_~“}׃?mÆ”(hYöź'‹µ­öZ¤c%‚,eA±"ōÉJZ¦²ėPēuĪérš§Ķó]Óß%ĮFoG’aĪ&²¬“Ŗ3[Ń©–šƒ*:£Ć,² ņ¢yww79V%Æ¢ ]jN¬Ō¾„x<žžN‘(唭s?|/©męßßččžT›ÕŃōyyщ?ŗč ŽÉĖc$|QÅēėMśś£ķKt~GgżØŲx½Ńõņe–Śj‹¦ķŽÖė{¼FŪÄÄöQŹ3’¼¢ĄOź=Äóc;uæć”/m«½éX‰¬ŃēĒ÷%ÕnłĻet.³8æå÷£NęܼĪ9Ó<ī§}Ž+;ßE;„Žżģ<×ÖļŅ<Ś€nłÓAEõżg}³åšbRźQŃYū̌²o}*·–)+É3©²ņ[ć”ėZ#ųÓć΃2:ĮzŁÄĻ1!qŁdß)Ó 8„śØĶ‰ÕĒóÄksó(ųó£öč‡üDĆ©‰š'eŻņ± üĄb‘łŠm2śJš GžišųņåĖp>ņšéÓ§ļg’»¹¹ŃP—›>†ü”‘~ųa”O;;;ßt¦å7æłĶ`eeešw÷wZ‘  Œž’ŻŻŻąėׯ (õC®³^ųŽzĪ‚ŠöoD½€>ŖŹč„€å”“uŠĖü”‘ĶĶĶ„>ÅEĪÖ֖f,īÉG}yO™ėėk2üaā‹€¼,HŁ!±j@ædƒĆŸ?>ŲŲŲž’Ė—/‡KxżśµF‚9üXRQŅ+J{ ŅĄüeĮ”¢Š` 0Į€ūųńćp‰ŽłWhC*£#ž’‚‹OšXj?üšĆ“ĒL`ĢZV"-™7Å"(‘!ś€Ķō€6ž@Ēķģģ .//Ÿ<. 0[Ył“ČĀYöNi"8“eŽČ€Łüč°÷ļߎ‡„tō/ÆTą'Ä vt>8¶ŪŪ§½½½Ņu| ggg½}Y&Ė<¤€~ü蘸S‚:bÄ©c»¹˜ģxmḿ=Ł:ąéź5ö‹/†™ą±ÜĮŸŽ^˜~ūöm!÷Ķ…&ĄģÉ6i_Œz-š,łźźj˜•$ ݼąyxxXŲ}Œ œ,ˆcī`R‚?“Č#’t@Š‘õóĆ? ’’ččhģ’^yŽåٲjŁ0/‚?%m OÅh¾XFæ+++ƒ·oßNOOg²ń:E“»noooHǹ Œ,æČöĖ?–R6·K6Éj]Ł$µ±ä³²Ś.t†ĒžĒg’’Śų Ø:–SŸżčļF|O^½z5ŃwlžĒö<Ī?~`2ó*Ļ6ĖĮASņ؃čGķŃŃŁ`° ¢ƒ>:»«?ńˆÉOónnn†ĻŸ$0ūPtCŗææ?8??vb‰‰Īļ¢ŽūY)ź¤ö‹@@ö>ā’£S?%Ś"ŽĒ“20ŖDG|¼~Qą§hæGEŠ&¶Ń$ˆŁeńܲĄOt(DFp'žæČįįįp[“¢ź¶[^R☈}hųżŽE»äEp'®RĖŽŽŽŌ?q÷Ī©%‚N?@ŸČüéŪšėD–ł32ę#ęąX[[Kž-,uJ»ÕÉdØū™F9ƘŒuŅļéøÆ;Œƒh»q³MŠ27"ąŸÕ,Īq)Ÿ?®„*Ė'›¤hŸ"X7ųė]\\4~?M©:Ł7u~GʾWu²€¦ylO‹ĢH_[Ä5rRf¹“]wÄuK“kt€łÓg²č«¢ĄO”‹ĪÖŗsśD'z¬_–yQWӛʢĪįyfų Ńł}{{ūäńüA³’ä¬›}ĒL¬ĒP^tźWn"C(õ™eĒä8Y/ŃARt\DP%:Q¦”¬ä`üžÄ>Õ@}ÆR" hZĮ@`ś" ׊Yö}ŁƒJbpĶ4?q× ©ģp"šš;‚?=#ˇ¾*Źųi:ĻLt¶§JĀĶJ*ś Ø4ʬoŗ#˦I+;†R ¢LœLQ‰·Ię>*  ¼yóf¦ķߍ&óčķæņo°X- “ŗVVž `2‚?=£øG/Œƒčƒ¢ćxŅņJqć8-‘©‘ĶC“Zb^—.ˆŒh’²Ž”I ML\(Śß¢Ģ˜i“!•MÕĘ{÷ųmSÕü\Ąä= 3*š|ųš”0ĄÓtPæ÷L‹īääd*Ū-+y5īvśŠ¹]g>¤eSō¹Nė˜ E#Y#išĖQuKēÓq¢TmŃ<…‹&ę܉ š“";`¾Ø(‰re£ē9ĒĻØZ6źs{{{˜éRX”÷ŠU‘u“yeā”Šč&Į€1KbŽ™.Ķm„Ίę­i£|Ylnn&R%_æ~]ˆ}ŗ#ŌL3ƒø.€ž3ē°š¢xJ“ éó7ßMž“ üD°¤k“ڧ?ѰŒŸ¢rjć>eY&1)mĻĆLG °‰ßóŌ\;Ó üÄuÜŃŃŃpžĄ¢¹t²%²ŲćŗVą æ€…WŌÕN¢ÉĶw[7ģ‹(ŠŽ€>#uķķķ=ylee„°C¤h¾ØĆĆĆa‡OSTŗ¼¼|ņxdEʰX×Mq­šÄ›ŌļySŌłüł³€cü:”(%n°‹²0ŠÄśM穉€Ą¤"ppqq1Ńó§„h^Ÿ"‹2ßOt°4Ł—ČŌ)z^U§č˜ŒŸ&Į½ī”b²ēe0ĶcšŹ®óKĢ’ŲfŲŻŻŻĀ Īø×h ųtFQg{t˜g7įEäqӜ­“ź`UVN®( Ū­źšĻF†^__OŌEåźbŪ㔲‹l’¼›››įvŹŃ>Y[™ō=6•ķWY;Ä{‹@K¬™:y㔾‹õRsńDp/¶_•­Ēk¶ĻEóHM’IŌ5mŪŠÄĮĮA2ČSuķ8®wļŽ%ƒ<ćh€2‚?@§Äq”½(²¶¶V82sTtŖuÜף‡”¬ĆæhZ“ŗóE-÷”(K—zķ”<ģļļ'’Ł+Eļc“Ü^֎‹p\Ōi‡¬K*Š6ŁčŚqÄgĻ‹ Q^¼NŁ1ĒkJÖ®]›GŖ mŪP$®Šęć9;;kķuāŗ"ę­Ly"ČÓ&ųtN”½ČnžcRŪŗ"Š“=/ėTO•™Ŗ€É¶3NščźźźūóB”öČ«;ā3²ˆb;1r“Ž¢÷eĻŖjuŚq49&"ūéžž~ų¼I3l"h”ķCź³-D£“3/cЧķc€åæ©łxbąE›óńÄuŃčox¶ÄuŤsĢKŃą5:v>Ļ–ž­&z7“YöĆ"d=Ą2Ż„¤ųĄr‹4m—gŁŅ1hŗāćĒĆ²¹QÖyÜc=žWU¾÷]Ė"ŸfߣČčŲżžč‚?0>ĮpĢĻ“‚=Qzu™3pé¦(gXTNx‘ÕtøļZĘū®ņeŠŗ’ł>śgŚ€EÖf°'ʾFVDŒę†.§ W”(Œ’×£eÆcPcŽāūŸ3Ź üЇļDÓ@ņŅ@ž°PŚ öČä”ob>©ÕÕÕĀæGЦī\–©łOß×¢Üēįįį÷Ēbš@LŗBŁ7h@Ł7˜Ó–ņŠK‚=P-ęįÉgčdīļļĶÓĆŅßwµ™łŠÕSųč?~ŌĢR{¢“.[š~"Ų{Ł"šC_Å÷$ųɾ?@žą1ڹ3ŗ‹M°š+ŹĀˆlŸy|FæĖ£Ė¬·9ņĻ9>>~“N”ī*Ś~~©*—%Ąźl'quKļÕi‹¶?«y÷ė.uēŚ©ūŽŖ^Æ­ökū{ŃÖvĒżü®B枊*ĮhGŃ1’įƇ„ĻöIĶY”¢ŻāÜS6?R^¬Ļłųńć£Ē³óŲčÜ/e"C+Ū֗/_–īs™ō¼%ögØjCü^„4ŽfŃs#š[&Žį¦ŸĆÅÅÅ÷ē.ćń }#ųĄD{`:¢#6eggGć$\__ĻA£ķ¶½½¬°¹¹™Üʛ7o†„Xņˆŗ„¶µ²²’ÜÖ«WƒAŖ> L¼{÷®°zĆčrtt”ÜVYFV݊UÆŻ–ų½zńāœĒĻĪĪo3õÜhÓ¢Ąo–éĒpźs÷3ˆć·kA8 wĶčŌ΁b4QŒFÉ.$ś~#ŸŻ¼›@€ü5ā$\_B½ūĪQŃI›/o6KEĮMļ'Ł^YĒttv×ɎŚŲŲÜÜܔ®S÷½Åė„ęfŠ Q€]źżLŅēŠt{mĘuµ_Ż×]„ök«ķRŪ‰ąŅŻŻŻXæĖć¾n+#ˆ:ėĻhļgzō2€G7łzÆŁÅĆķķ­s€%%³ęs–²l™$MÄł„nY¼²ŅV‘Ń3N§wQ–J*ƒśķó-uéŲK'[ÆhŻ¢ĄOĆm~²óN*“M¶!t“ą,©øhˆ‹įћųŌØ²ģ‚įåĖ— `Iö]Y6ćŗŗŗJ>^Œ(STŒęņó0uń¼¼¼, Žä¹X7Æ,åŁĘYæĪ>¤öčĮXqĮ›Ńųµµµd:u› tC”ģÅåČRŠ2‚ų& mfS9ļ=Y)ł§eK_ŽĮõõõ'G?L•ÕÕÕ''³öM©ŪŽ}žĮč”7 ł@Ļׯ_ĒŽ–Ą@?坜œ4ŚŽ`LOQ£é÷•Ł_—ŻhŸDd„ŌxŚ7EA™²ć#šŻŽŽ. Nv-+ ˜-Įčøø½qQ"U“UVŁÜÜųč‰Ō¼ŽMĮ˜­ŌÜ”Ks °<²ž‰TŸD”Bżż(Zś$õ~ā÷7•½[RķÖ4øS§­Ē]€īü‰›ėüūõõu«ÆńīŻ;eztĶX4Æc‚=0_EsĪŗo[M?ē&óM"2ZRżŁļG“r|}š’'õ»œ* W'ąŅfÉB `A< ō\\\Lõ5ooo‡Æ @7ÄHįü\ MÆ#Ć ®{`±uGņ<:~£R³Óō<<ėów*£“l®šeAÆŌw&~»S’Ÿ¹ææÆµż¢s€ßo žĄˆ”ż| ēģģlę7jtėŗ1F 7Kaww÷Q 'F‰»„ÅTŠL‹8̲£÷żū÷ÉĒ›LobłjT‰@’8bPēååeēŽėńńńT~7ē-•½æŻ1/P,łßńØČ’ ‰y1dY³­€‘ߣÜ„ŽPó‡;KÓ·īi\(ļķķuāF€łŠ½m•łŅ3:‚ ŪźÜOF`·(@SGb)Ė*Š@qŃÜa‘±PÕqÆ \×¹OMšŽß.ʶŠbėėėĆ@Į¤ĒGdćTeŒķCŁ{hņœ:ĻÆūŽŪŲ‡iUļ1óāŋ±ƒ}Ł÷6U:.M%°,¤G'Įh nš'&ę{óęMēŽß4‚AÓŃe?ķ§ż“ŸöÓ~ŚOūŁ—ż½¾lCÓĪ# ¢#üÕ«Ws½/, ū:M;Łūü‰,ĢÕÕՉŪõóēĻĆ  ü‚iŖ~o£“hģO¼væŃ_’܁Г““FķWI‰rles*µü©ó[?éõEĄjöŽGƒŲ·¢Į& C§<:q)ū-^|ēK·u1šĄlD™¦ģŗqŅĄOt––pų~ŪŲŲųž}NžiØŹŠóLÓNįPgūĻcŃyķ²ææßčłq<ÄóćIyźdŠ4Æ™g)ˆ NüŽ&ƒRńüI2T" 9Ī1ÕVfm]ńyloo—¶ß¤"xŪ‰lßŖ÷ŸG¶”µÅ$™„Ą|ÉüŚ™¹hŒ ¶ŸöÓ~ŚOūi?ķ§żlw?³łtšĪĻ3vŗß¶ńßT†Ndbd‚F·1ēWdsÄ+£Éń:±żE˜{„Ė"k&?7pdoÄć‹ZŅ3~ūb’bž™ü~G§*išķm[&q>ˆ%艀lœ⻪D,tš²oŠĘEtQŁ·Ō…E×č²ŸöÓ~ŚOūi?ķ§żœl?g=Ļ£ą,=eß`šb4Óhɍ²“gś!F/–’eą Oę4P–ł3NĮ¬ėĖ©3į#‰Ŗ ³Š¢ĢJĢæs{{{•ėīīī ś Iš–ČĮĮĮ“€Šžž~ésÖÖÖĒĒĒ赘W±nŠg}}}x"€E$ųKīōōōI@(F±Ž:99––蛘K1‚>Æ^½Ŗ\7 śD `‘ žOÄ(Ö|@čłóēęz# ś¬®®V®»²²"čtŹ3MŌńńćGōĀ8Z"čŠ5‚?ĄRō–…²o@ÆEЧną'+y ōST6‰y³ėƒX¢ŌżĮĮĮ°,,@oīƒr76īr †OŸ> ¶¶¶¾w°x¢#ēįį”Öŗ÷÷÷Ćõī†mŒ Ą¤ēēś(:uęųó=śš?ś™?@Ælll ;6ė~>ž<ģäYÄĄĖm43%[";…ńŚpœĄĖagg'łż‚¾üzįķŪ·Ć÷›››Źu³ OŠ`ŃuDŸ½­÷ļß^¾|¹tŪene ‹-2¹²k¾¦.//“G9@č“gšąw7űLŪéé©ĪfhYt\\\ŌZ÷üü|ø>,²¤šźÕ«'Æ¬¬Ōz~tb___/mū)mø<Ÿi›b>(čĮ`éÅ\n{{{3y­ø‰wÓ ķˆ€mŻļīīīīL¼Š†,䚱]GGG!ß-–…ąĄˆŪ¦²ŻÓ_übpöĖ_j`hĮ—/_’£”S¶··āĒĒĒX*‚?@'ŒōY__®ŠwwwwĆNķ|łĖĶĶĶĮĮĮĮpbóYŠŒź(s›ŸS#ĪĖQv3öiV¢]"ėóėׯŚ%öcZ%@SÆ„ڲלFłß˜%’›}>Ež?ŽÉņÄńˆ„αٷ÷¾HĒ~ž3‰×8ł¹c?¢,aœ ¢ŻĒż¬ĘYosųÄw/΁©÷ƒ’ā}Ļcæę}L0?äŅÜä¼AĶ—­­­ß}i¤Šōź¼>‹Ģæ0žMōźźj­u£ƒ­ØĆ«“7l5ėŪ;ƀsb1Ltp·9J—Š&OÆ2n >µß£ūüņåĖG›ć¶KŃyżššŠč¹Uķ]õ~Sײ“ˆŽąŃĪō¢yƒ¢|ZÓ,š¢ćw’A±/'''­¾÷q?ƒ¶Žį.ūmœ“2ł9ۚߧØmå8ɵė"Ģ÷öaō?j`aļ^žūmŻ…øįķSą`TtŒ×9'F'v,)‘=ŁV§jl'–T§gŒnæŗŗž—c‰ŽŻčąĢ‹Qń±I;³}iŅŃŁ“]"čĻI~^¼x1 DÄSŸĒķķķĀsEYhiś{[tüŹÖķαŸÉÉu®Óā;‘:dŗ˜yRv Š=ÄæSā’m«ļĮ< š8•łĶ.€džōó¼.ó熺®>§džU為ߒčØĶ— j²­¢,“üČż&ļ§I&L“÷P–­SēłE#źŪ>O’uR”µÓdŪŹ’(ʐ˜ĘoX[;‹œł3c?D9³7oŽ$’6Ͳ»m~“ÓeēŅqögŅóś",Ö%Ćč?dž‹s·ņ÷£ėČF”÷Yö« ßēŔČēū²“ž/b¾ˆTą'¶;ĪČżX?²ƒź¾×¶"k„(ó¦Ī>Ģ"š³hæE)Eóµµy*š%¦hēZaĒ~šRŸśÄ>ō=ƒ+ŽłTągÜóqö¹}(x8ięĶ, ›ą°7Óuo0ļļļ<€„ƒ§DG]dŸ41ÉłsooƵķEö@J“Īć&ūķWT†©‰¾w’¦:©#[ &¶Æ#Ulww×Ü"šå±ߣT/²ž–”l_œS2“œ#˜ķWō·E?&X|‚?ĄÜdõāėL–}Ęi ŠegggO‹¹3šv4Nz¾Ī‹y,&‘źlœåÜu©ß£”,{µ‹ó—T‰ŽčT¶Öįįa£c'D&óŃäŲ/*õÖĒć}–ēć¢ö+šKhŃ·,6Į`ę¢Ć':ČŖęŸQFŠą÷ēĻyHÆONN¾<š,©mÖł]hKĢ‹ÓDüÅļRŃHł‹‹‹'﵁ޢl­²¬§»»»äg*ƒw¾šūyWWWĪĒ-˜4¾HĒ‹Eš˜©č$J•Ź‹2Ń9¤$ Ąļõżœø²²Ņ™}ĶęQŠ%2ŹÄļ^:>>īģēS“)Ź~X[[{ņXŃü"tĻ<²ūųž pbZ€™ųōéS­9bōćø“‡,‹E*Ķ£ž³ąG[KL>ŽE‘į2ś>"ĄQČŹ2¦ŗ*śśõė“,ˆTĒųžž~ēq=Ćļ,{é¾¶ŽQVLJš˜ŗčäŚŚŚŖ\/:”ŗ<"`Ś"p0©@Fóś²ŠG²āw­lŽ ®śšįƓĒF³z#”ü1Ō‡yEšś4š×łØoļ’śśŚI“©ü¦&:źtnÅH`õ’Ku°‡IęfhśÜ¢ŒūjQŅ©os£ģģģ vww“ļ5¤Ź½u5«+Æi‡׃„EåśęĜe@­čżOZ²­Øż\ÓĮ`*āf8Õł“ŗ¹ķĆH`€¶Eūęęę“Ēc”x“×xĪ$#ĢSŒ®—0›•Y—ušÅäķ©’WÉ×ījGv N)ś.ÕsCõį;Łl1cQ{Ģ£\ļ,_³čżĒ1ßäó`hŃóвa\‚?@«²›Łø.³½½mT#@…ŁžīŻ»äßā\ĖĮĮAįó£s4[/S”QT%:õ‹ĪŪŁkŒtˆą×:Ƴ÷Z·ć9Ö=;;{ņų4³Š2²b_ŚĢÖJłcQĄ  āų,š»);"Ė9/‹A0ń÷WÆ^U~Ē»"Žł¢sĄÅÅE­sR&‚‡±nŻĢ™õõõ'EiĮYž?ŹŽöŽ«4eēäÕÕÕĀļŌ¤ŁDšżøĢ°ī¼ ęÅw–®«Ć _ēõŪ†!•7ŽæųÅąģ—æōŪAļmll nnn*×ó=_tVÖėžS¦ćœ›£s»Nvg]ŃĮ^Ulå‘Ęm‡čøNīIÕŁēYFm·cˆó˜Ģ~Ҷ{r-z:8<D™?@;w’ż†±*š£6ŻDŽ×¦u`9Dp$Ī”QØ(!%檈ēµyžHŁ6ĒŻŸ¼˜;¦+óĮD¶BQ&V•čäoūsØóÕŻß¶3P☘Gąg"“%ڲhŽ—”(٘}ŽY–X*Čӄ̟²sĄ‹/mēččØöŗŁėMr¾™ĘūćbÜ}Šó^ö|×ĘLķ^BęŒOę@Ļė2`|©Q¬)Ń1 ŒÅ˜7l5;Ī+ĢKQFšß&€Łß>Œžć™öß]ŌNŌ)åōć:Ąļ>ĄbPö [6Ym•(ƒ”ś%ęųK]D©3æū‹Aę0Ł>°|Ŗ&ø1 ‹Aę°šźNxŽtÉęūŹ× ü|žüYąząąąąū5sQą'›ø^ą`±ČüڧOŸ¾’’æūłĻ§ņ’÷üƒæü’AcC‰:AŸ`rgčÓÓÓĮŁŁYņo÷÷÷ƒēϟk$€%ųtĘżģgSŁī’s{«q”ĄĒo޼©\ļƇƒ =½Æ^½œŸŸēü €$Ł>ĄĘʆßz€2ēšČ—/_j~޽{§3` žšD”öxżśõ“ɰ#µ÷īīNōXŌīŅ.U"č“@3]ŃĪu€Q‚?=A™|°&æ¼’¾šł£ėEM÷ėėė'ė\\\ ÖÖÖ­ūķŪ7Š£ėɇ‡‡Ņõö÷÷œąOĒÅČĢøI L•½½½įŗńœĢĖ—/k×rOY]]>?JƒŠŻkŹ:דō9==Õ` Nš§£źŽĢL‰ēd™;_æ~}ņ÷«««Ņ’+++Ož„A¢TŻYÜu®)777eūtˆąOG„Ff®ÆÆ—māļe²õŖ‚8ŃIŗłRq1_‹occc˜Å]%®ū>}ś¤Į:ä™&čžŃ²m™ūūūä㣢4[nR7łMFrĘsņ%ćb¾ £B[²æ1pHi_€n’łÓA©²UŸq׫+J€Š ;;;µ?1°Hą`qd„ŪGhrÜ8v ŽX2`Inś«Ä܎‘)@·Éüa"1Ļ‹ėąą VąēööVą ':(5§NÜŠWݬĒD½E7žMŅ=S%ä®®®|@ "®ńĪĪĪJ׉lŸø¾|łņ„XŁą„Ś€IžtT*Ȳŗŗśč!‚3£’ŽŚŚzr£ŸļØ3'P¶ŻüÜC1’Ļėׯ}8svzzZ«ƒ ®)eū,¶¢łt@™{hƒ9:*‚,1B³ģF/œ•eÅĻÆ_æ>zN“›ĒwļŽ G%0_uÆåRŁäōƒĢŸŽ‹›öXź–[»ææt£ww7э’śśśšł?óõńćĒZŸóós€žüé‰,Øj)*ė6ŗĪīīnįėD°ēóēĻßו† 0ōyóęMåzqżööķ[ ŠCŁ<æu— :‰÷ļߏõzŁū9©q_;ŖžLRę4ž?˶u||\ūuµ’øĒf,“\ÆŌ):Ī>M»]Sūr}}]ū½µµŸć“IßćI¾Ó®g”Į’'Ü¢ąQ{6664Ąˆk³:Ł>GGG²}zŖhžß*kkkę Ėž³··×hc?›ĪWÖōµ£Üżøć£ó(–˧mėĢ«œ’eóĘrrrRūy0˜FŖ‰Ń ÕøĒføøøųžü6ū”šģÓh»öqt“6ɾĒMńüq>īwzōų00Š ž@ÅĶö«WÆ*׋ OtĄŠ/1ņ=@ÉJ{¦–P$¶³³³Sėu£œ|&ę’­S…¤čµćuėdäD #,ZYY©|ķ¬ŗIŻā4ÄkåēQŽŽŽ®|­¼l^åq²$ʲy³Į©%?@ ę!ĖąH­^¼xQنń™ęŻÜÜLŠé ņĒP©}ˆļQ‘øöŠL°6„Ŗłlnn^×-ćfćÄėęŪ$Ž£Ō¶£PJ“¹Ćć;_tœgÓUŒs.‰Ļ¤I0–ątLŖS*/ėŠ Ÿbä{Čęā­*ķŠ‚įņņ²V§v >ȶSwžßģµSū«««•ĻO2¢ƒ¾Ną(«nR÷÷5 ©ßÓČRØRŌ¾‘%ūQ&+¹U¶Ż²Į£„T;ĻB© Ž,hU')>ÓX7‚myY؉³³³'ķYō™Ä÷Øģ»™`}˜ū:+-74.ʐ‹ąiY`¬īēmžśĪWMW‘?—¤‚c@š”ą;źŌP =E#ŸS7ŠULtßķķm£’Gń;YyŃ©}zz:µż- ÖLó5ėJż¾~ųš”Ńļi*hPUÖ*Ur+ĖljŅĪūūū3m榦D&G“ ä¶å³™Ź>«:š Œ)Z4˜ŌećĪ뀪>ē8W„¾MŽń–¦‚œu3aYžŠ;M&wiRÆ`ŃłTUĀ%ż @’EfÄ$姊²0gž^Ŗ²jRŖčDžd¾‘üol^t,·Ż‘\”-RĢØ“ŁTd–µŲĻŌ¼H$˜ä3Š,”TP·ÄYŃē\G”7죢ąZ•¢`QÕüT©rœ\·¦¾‘Įüžą½“Ŗ•ŚöŅō éMvՈįįkĀ[€åQ§ Y•²y€źŠßžčąÆ;˜²‰Tes÷ŒŹęÉ–Ų&慩ßŲü¶Ē]RĘÉić³™•¢ Ł8%ćēml»®>”xK)*ń6 EßĖIgåh¤īä†L&Fugó:‰²4‹P2€niŚ©½±±ń½ó5FóWĶC׆līž²,ŒŲlņ÷l©3ĻĢ"šeĒü¤²¹cf)•iÄāj#X ŒOšTtZUunÄ|ź›÷WŻĢt€&ĘĶ’‰€Dü6ŻÜÜ<ł[ D˜Åł*V£Ū«šŪ&J¦Ę>[ņ+~_§QI’:Ēw šü€T§”EÜąN2߯ÕF©Ļq2ā÷&•į‘uøĪk Bd¾Žvü~žü9¹^”v'›¦éü0m9>>ī̱˜*É7ķć¼KeńčV&ō‰ą,˜ŗ–פ©1Yz*ƒ§(“&•‰ŗˆóįFIŗųÜŽŽ~ņ·¢ņd©RrU“×·éŋµ÷uŹŚ˜“%Źų„t)8Fńłj–s7Į2ü陨e;ĪD‹ć.ółŠw?ŌóŪ4ĶžŒĄĻźźjņoӜCn–öć¼¢ ØYM(_4/Q¼~|VÓŗžhS”żk{?Šž{ļŠAQJ1/ęµT¾¦Gš§"Jžg¢–ķ4'ZŒ4é>Lœ°hāŗŖŖƒdeeEą`É|śō©šo‘æćUbż¢ĄĻøæ1[[[c½nQ6MU€£I@*śGĘļ=~gSū=‹@JQŪĒg5ĪėĻj󢿨ä^ģĻ8‘½Uō¢bŠsß}§»\>-‚Õ©ŠåååŲē1 ĮŸ‹Ö89F”¼L¤G-ŌHĒ®;9Z¬ĻY__Ƽ˜’0™ø–Ŗź¤ŚÜÜl<ś€~‰ūõŃReTÉWīČ:ŠóKJtŚW~Šžžm7:ä£ó6‚U¬‰NźŌė¦ęjI•Ÿuxxųä}Äö³×‹%²wŹŽgń;[Ą]¢#;±±DMž=gKŻąUģ_Q?L݊-ć¼ß¶e%÷RA“軯Ļh»h—¬ķF’–:b^”eSä‰r€UŸż"‹ļMŃ1’:eßól)śŽuåżĆ¬ż;qFŲ•®ą¤i¶ÓN—ŒķGT>u!µ,qa™p2ś`vēÜŪ‚r“śéWæüü§Ÿ¦ś§æųÅąģ—æōŪĮŃłQ5˜':ʌ†XNqžĻg̜ŸŸϤˆ ĒŁŁY£mGł¬q³(źün„Ä Ó¬C;Õ§Q\šÄ$}Ń6mUW÷śÆi;GščĖ—/„ķ7«kŃÜd&Ńä-Ėjj^ķ8ĪqP¶?mļ[müńćĒĮ›7oZi«Ńó",±G_N™?½šKuÄ vu2ćĜDN²õÅčĒŖśč,ųXī>€|µŽŃ{ļȞż[t€F¶čج"Ht¤®Ū¤|VtčgĻķEFF^¼~¾śČh&CŖI‘üz1ču{{;¹n¼n¼’Ńõ'¤ĻŻVdķļļŪ³čõ£³LŖ:ļÆN;Ē™^ł~˜ģs]o4š3n[·-ŽÓükU™‰Ļ4U±¦É1ZTåfójĒŃć Žżüw;‚›ń,ŹX›Öž·ÕĘя™ßFź6zĢDZmQv^~GęO?“9ŽŚXä}™%™?ó9ēv9óē’Ėæü’žßϬŻü>uóš.ÆÉhW–ó6sōĻ““ˆH|ŌŹ}žĻ’łū’’»Ÿ’|*ÆńWæžõą’ŗŗŅŲ]ø"ÆųĄ )Į&"šćū£Ÿżl*ŪżOó7·~hūxqĢyęüé (’‘ŗ)Œ²8ó¾ż0„R9]óü0 ‚?ußoooŸ<ó!D‡B,1!\Ūīī½F^LڵšXL¾¼ŗŗZŗNLŲ+š@:źåĖ—Ć΁õõõäß÷öö¾iŚZÖÖÖÉ׋}9>>öĮ䜞ž^½zUŗNĢ£™AŠĮŸŽ‹Q¤xI•‚›¶(ńÆm„*@ZdE–®ŁÓ³.ß @æ žōD”cĖ1Łruu5Eچč”Čo_‰7€b‘©}yyYŗN\ÆÉž mĻ4A½~żŚ(R€9HĶ˜™Ū1€Ś&ų-ŖųQ6€iRö Z"šĄ"ü€ }ūöMą€…!ųˆ9WWWK×YYYų”‘8nź,£ ”ćććĮÖÖVé:›››ĆĢ ˜Įhąõė׃“““ŅuŽŽŽ†™A0KĻ4Œēłóēƒ‡‡‡Ņu®®®†"€&Ró *õ @]2` ŃSų¹ææų sæołåąą@Ć0W;;;Éc€j‚?PSĪ†‘™AŠõß·³³3Ccļßæ¼|łr¢`ĶååeņqƒlŖ)ū5Ō ü@×|žüyšźÕ«'Æ¬¬hĘA™ėė멿ĪĒ56@Į(ńķŪ·Įźźjåz?tÕĘʆß1’ć 9eß Ą§OŸ*?/^¼Š1ĄBü€„ćććĮÖÖVé:ŪŪۃ»»;ĄĢEŁ«(±eIG—xģōōt!÷łĖ—/Oö9²Žbn˜qÅ{Ģo+ž­XZ h988ų>O¾Żāŗg\‘Ū]ā±¢×/Z⸘‡Ų×xßq ęŪdgggøoóųœRßė·oß¶ķ2žK€z~ȍR4djžgAFśĢīœ{ūįĆT^ć§_żjšóŸ~šźkü韒łąÆ󛩾Ęé/~18ūå/ż>µ N½śwļŽ ;Q`"@²··×č¹u~³RsŪÕ¹žØz^9óź¼n“÷sŪa^“æMÆ­&Ł^ÓĻ$'''ouŚnō:}›››…–6?č\^^NµMź|GGG߃mųJĶ·Ue}}½qŠlŚē`®4Ķł#ž?>xxx(]'&ʎQ¢0õ»·š¦¾Š_UsĻķīī“b½X¢c7’;6ĪŚ&AŸŃēFvmd$ÄņęĶ›FŪ‰vˆmķļļ/UCQP ‚,1Ą%–—hߛ››Ā¶›$ČŠ•ßŅxŠv©Ó&ć\ Ę6óĮŸ,Ū©IŠ'ūūß’ŗ™s]=—œ’džĄųdžĢēœ+ó§œĢŸéw„ūūūa€å˜ō‡å;\]]}ļĄŸęė4ĶNU'xP÷\W§Ć½h[u;Éūł3¼.<=»£¾(“'ę7¬[ę¶({zķĒJ*ˆ3īž”ŸUŪ©“5š T¤,XT7@7«s 0߈Ń˜óõ:žā_ą€Y)ź^¤ĪŚČ&)ūݬÓ)ėEv‘(·ėŌÉ“(ꌿøøXŖc§I†FW···O’śõkēŽ\Æ„?ńžĘ DÅś oU]?–}Wć»Ū®3ßRūEūļ³*8ׅs Š>Į–^ŻĄĢŅĖ—/“GYŖE7īļfYhÜyVŠ^»N'»c.}ĢEY“®ˆĄWŖ„o~ŠŽ_•x’ēēē­µMdSĶsŌäŲ~ūömoĻ%@s‚?,­čLų`QE‡ntēÅ8ńūK×ēc©R–Y4®&ķ}"‹Ģ˜ģŹ/E×L]qvvöä±ų5 üŒ~S.//ĒŽVŪA—T©=ē@š€„@e“Žf?ĢStÖĒ|sEbŃŽūI;øéčĢĻvÖÖÖ'''É̘>«3ßS1?Oź”0t.&%ųĄŅ‰ÆU“ļĘÄŠu'7€iŠ,ŒKLŠ^&ęg©Źā ’ĒK|öљŸ²½½=ųüłó÷c*æōQ[Œ®Ļżč\ĖEš€„Ūʈ×2QÓ½­¢ŠöļX¾³~æpżč“5©ūrČŹŁ¦²zF—(96Ģ“yjėŗ®Osä8—@’ ž°4b“cUMō˜ øjŅ\X$§§§ß;o××ןü=~ū jčæT9Ū8”°Tü©«ź:ҹX$­FkB*‘Ą"‰®UõķćFW}sŗ¬hŅv¶õĶéҤż¢Ģģ¢}yR&-ŁVTś¬ÆĮ5ē辩džDMȘ@.?©ÜĮĮ`ęā&õęę¦t£bYDEó1,Ćü @»”kŖ§(`³··7V % me›LbÜņn“Īķ²ĒY 䉾y1ØÉūĖŹé„Üßß;— k¦eßĪĪĪž„béS½LX$©ßŻi-°Øā†=:lŹ,ć;‹/śK²k­:‡\]—-BJW]¼zõŖtps<ž}^Y¶qŁ*m{ńāœĒbšKģO\łōéSå5}ŻrgEĒYl{–Ē`”š-‘½×(qVµX/UN/Äö'Ķ&r.¦éY›˟Tc„eՍvxóę͓Ƕ··… %’ĖÖÖążĮLeŪgæüå÷›F£ĄXDE7ģ™Ś•w–ĖhIšˆąCU'7ÅuAōqu~ĒąęXŖdżduÖmCLĮØŌėU]^eA¦¢ģZTŹ‹LØT6Ō43XcŪŃ.©ż9<<.M¶é\tĮ³in<~,R“åÖ ]^^>ł”YYY„t,Ąxžł’ńą~ö³©l; žĄ"Ŗźøˆ‰lS׫°"%ŹvEyżqlnnę0™¬“?®...j=ēźźj®żVŃ9KdwŌķäoūx‰²kŃv±u‚+ÓD–ķOˆ@Py Gķīīv~žēXNĻęń¢MƒBqbŽŚŚzņų‡;;;>M¾«“ͳŒ“ Š­ß²č¬ž…¦Ł mv ·™QŃę¶¢æj’Ī’¦ū2É{ˆŽžIžßFūEҤó·e ²x‹ōŻjs{³<—‹ćĒEŚ™¬g~‰”ÓČś)©‹£sĢćDĄāˆŽ†Ŗ],Łuüøh;eŻ"=̉l qŅ1#…1j™fĻW&`yD6OUm}ślnĮŸT'–Čā© ōD­Ķ¢ ””ėėė‰&Ŗ ;ŖźŪG-~賩¢&j>ĄS7Č„Ž>žü$ČSTg5_6.O ßŖ®÷b‘¬pś®ÕąO*Č%ŪźŲßßä‰y{b‚ŗ¦ŽŽŽ|ĀK¢*š‹&™¬ŗā٬_šÅ‹ƒOŸ> ^¾|9õ×:99ń ,Źub`tMŻźę±FMµģŪ»wļždóÜŻŻĶ$š“Żåś%²y.//+Æ `Y“šłć¦€Y«*3|{{«‘X*­ņ% ¦ šÅkŠ U%±bČYeĄ¢ų±ėoąųųŲ§°„Ŗ?1פkE–Q§‚?>1jeóÄ\“°ŒĘ*ūV5ŗrŅõ›888š),‘ÓÓÓĮׯ_K×Q€eö¬Ė;ļ¦`¹D6Ļįįaé:÷÷÷ €„6Vš§*Ų’Ļōœ Mkkk„???<ž\C°Ō~ŌtAUIįĶĶĶĮŪ·o5KÆÕ²o2}˜†:Ł<Ÿ>}ŅP0čųœ?ō_dó<<<”®c}åŲšØ,ūå5ņĖńńqķu§½Š_‘ĶsqqQŗŽĪqx̜?,¬­­­ŅæžüY#@޲o=ńŸžęo¾’’,2¤§qSõvww>xČ© žŒsSÆäĄbøżša*ŪżÓ?’óĮ_’ę7Sß’ŖĄĻŹŹŹążū÷>hHPö €…ņśõėŹu¾}ū¦” ĄB–}‹‰}cķØÓ @·E6Ļõõué:²Ķ ÜŌ‚?1óåĖ—ƒ‡‡‡Z7źĻŸ?“““G’މ}Õwč—ø~ÜŪŪ+]ēööVC@…Ö˾ÅhͨѾŗŗZĢɋõė®^½z5“IŒ˜ø~,óīŻ»įą" \«ĮŸČŽIÖ\__/„™ āÄś‘%4ŗÜßß×z.ŻSu]ׇ jh-ųszzś${ēƇĆĄĶ—/_’£4#K(ļŋĆõó"°”*Š]u²yRׇ° "0ZgÕZšēšššŃæ#ˆ³³³SśœT–ŠŻŻ]ésĪĻĻż{œrq,–ćććĮׯ_K×)›7xźĒimųķŪ·„OeżlooWnWw€~ˆA?'''„ėüĄų~œ× §²~>~üXł¼OŸ>łŌz`mm­ōļQB_kĮŸ£££G’.řš§gww·Öėä·»²²āS蘪9J"#¼Ŗ„0ÖZš'źµ§nźc‰æÅAŸųwjžžTøQqóŸź$ųöķ›O CźLN_'#H{ÖęĘ¢&{Ģõsqqńčń¦µÜ«:Ԁ薪y!]ćĄäZŸó'2xā†żźźŖrŻūūūF7÷MŸĄüD6O~Pžk<˜ÜÓŚšėׯ‡7ļeKjīŸQ1P‘Ę}‹ēĶ›7„æ½½ÕHŠ‚g‹¼s©y„čžŖr¾ūūūƒ—/_j(hĮš€iŖ ü¼xńbpzzŖ” %S žÜŻŻ vvv†7üÓZX\µ®“ŖRŚŁ0Ŗõ²oļßæģķķiY€%Ł<777„ėč“€öµü‘‰@ųöķŪąšš°tūū{ SŠZŁ·ƒƒƒäć···µK4YX<«««„???<ž\CĄ“–łsvvöä1Į€åS• ¾¹¹9xūö­†€)łQŠ–:Ł<Ÿ>}ŅP0E­޽{§5–X”~xx(]Gf8L_«sž¬¬¬uęwTžfÆÕąO”{›õ²¬*GųE€'%ž÷õė×Āēķļļnooæ·ńżżż£¹œņ¢ĘæŃ†°|b~ĒŖkC`ödžtPdڜ=yüüüüQ`ģķŪ·OÖIiÖ××=/JŗŽž?ž}.§lŁÜܬµm`9Å@`>ži‚ī‰L›¼:#kS„Y>|ųP«dKާOŸ†’Ķ|"htwwēC€%ĆEČĄōĶ4ó'&ū}żśuķIJ#(!ŠžTi–&ŸQł  ²rr@÷żõo~Sś÷/^ 3†€ł™jęO” {xxhüüVäjĒĄbŚŚŚŅ=÷?ż«U¹Ž;0S žLsī—lŪ÷÷÷ĆąŅ2Šl›ėėėGE¶ČŖ*%ŽŽ¼yÓź¾ä÷€åõæżĖ9ų_’Ł?k}»?żźWƒŸ’ō“ž³Ó_übš’ž—’RŗŽA:°Z-ūöķŪ·dągeeeؐ_ŖdėEŻų¼ÕÕÕĮŪ·o—ņCĖęŪ£­«‚aQā-J±Œš$P—zīēϟ}« ‡Ī~łĖŅæĒĄ`1“ü‰€ĢØ,čA”IDŻųŲĪööö£Ē/..’e žE‰½lž¤¢ĄX”b‰  Q±ž8YTEs4½{÷n°±±į[=³V‘9|~~¾“ٰˆZ ž¤JŽMōÉūųńć`}}żŃcĖ<æ@€ŠFŁF`, Ņä—ÓÓÓa6Õh0m4pTµķĖĮĮoōĢ_żś×•ė,k66ĢźšæĪ0ŖµąO~ī—˜—f"pĮļÅ(Ū²ņxEŸÕÉÉÉąņņ²µ›Q Ÿžõ_üEåµ°X~œÖ†æ|ł2•ķ¶MŌ'Yy¼Ń%J±Eł½6DÖUĢéc„!,‡?9<,ż{¾$/°Z žDP`T”›Ę|›ģy{{{OÖųhß_żś×:ģYŪ|ūöķpŁŁŁ\^^>ł{qRœP7K(JĢ)?6_1æŠ8ŻńÆ’ā/4tŲ³imx“üėׯ×××moww·vÖÓwrr’||Q‚?m•p®bŁüÉįaéßW’Į?Ü’×’Ŗ”`=›Å‹D9ø”»»»į2źåĖ—Ć&±ææ?ųņåĖŌ¶’ķŪ·ĮĶĶ†Zń?üĆ8ųGšSŁö_’ę7Ć’ŽŽŽžčg?ÓŲTśŪܵYŽ’ų’ń÷ć XLĻęłā=ݵčó-žžNuūŠÜŚŚr ­ųēüǃƒ?żÓ©l{ķĶ LkĒĖæż³?üō«_i(Xp?jźužéž”†€ØĢüYōł=  ~žÓO„æżšA#ĮŌ½s_Œ’ł°äžäš°ōļ’ó?ł' :äY×vxšüłsŸ\…·oß...ZŪŽöööążū÷Śzę’ūo’mš·ww„ėüŸ’ęßh(čŹĢŸ(0Īr~~žd+++cogtY__’¾­³³³ĮÉÉÉąŪ·oƒććcŸąß‹ L”„Č–6?įņņr°ŗŗśč5NOO5<tÜśæų„’·ög :¦Õ̟@<<<¹zó¦ƒ5ŖkŪ·Øž‰¹Ö<~ėÖ­uķĮ"!“oß¾¤ææĶć‘č©$”'n5Ņ®fgg“‰‰‰5Õ»ŹŖ‘š+ @ }½xī\ęx£:@mTæµØūœ?‘ĢÉA)NųŠ„ŠĢĢLÕŪäBÆĻū3>>ž‹Aq’,ļHÅ:•VÅɽ¼hĮgN&h__=t(süŪMhć4ŽęFn¼š.µÅÅÅܤ½wļŽ­j‘PˆŸM«2éeQõT*UAŕA!Ŗ­āD,³&PŽmWZ•×Ȅ°qæ’ōÓ䷟|’¹Īž~Z   ln֎†‡‡ĶĻSg…UVńļr‰µØ¶ŹĻÉtņäÉŗ<‡Ū·oÆkŻ“ŸO>™9ž³—_$蛄 ;Db-Ał„sļDÖõė××ģGāŚß¹·Ž*»Ī—†† ŗ„äO—йw “4õX"Į\@gyńܹĢń„łyA€."łŠÅ¾zčPęų·÷ģ$č2’?]ź7œüö“O2׳ŃÓO tÉ€.õµĆ‡3ĒöņĖ‚]Hņ  MæńFŁu¾44$PŠ…6 @÷™yóĶĢń„łyA‚Š××WŃz+++‚¬RłŠc¾½g @“üč1?zśiA€.Ö²¶o—/_N}|pp0·P?{łeA€.W÷äĻ;w’ńńńäāŋ {ŅcccÉÜܜ£PąÖļ~—9~ļ–-ɗ†† ŗÜ†Ū¾ŒŒä&!Ķ/ Mü„óēĻÆŁg,’A@ÆūŻūo™ć^ø HŠŖNž,//ÆIŗ\¹r%sż­[·&GŽI.]ŗ”¬¬¬T½ÄĻÅĻĒv²LLL¬>§žž~Gč)_~ā‰ģæ•}T GT”üYXXXM¬ „“ ٱcG²““”š¼‰6pÓÓÓ¹ ”ZÄĻÅĻĒvJ%‡ŠŻ½{wM‚jqqёŗÖo>ž8łĆgŸe®óĀĮƒ="3ł“oé6::ŗęńłłł5 ˜ØlÉ ˆēXœŠ9 ķŚµ+÷:ŗŃ×Ī’ĢA€RQåOq˶}ūöµõ‹Šł Ÿļģģ¬# t„¼öZęų½[¶$÷oß.PŠC2“?—/_Ī%OjmŁÖ.ĘĒĒsÆ Ū̾żvęų‡.ō˜ĶBŠ™¾üÄ™ć>*HŠįÜÄŌbS3wķŲņó„-Å¢½\Ģ'ĄZæ\ZJžšŁg™ė¼pš @@jhåOr÷īŻšžāŋ¹„;ߒä[Ļ<“9žž™3‚=Ŗ!•?łJž$~ŹmūĪ;ŽŠ“~šŚke×¹ūv€U×äO$dŅŚ·mŻŗ5W±S¼”“_ļųńćėĘ’ńńqGč9³oæ-@IuMžDB¦P>é³Ń*'Nä¶³wļŽ5Ÿ?>¹|ł²£ōŒ”ŃQA2Õ-ł322²ī±z·f[XXHvīܹę±={ö8Š@OųåŅ’ eÕ-łsåŹ•5’æ{÷ī†<įééiG čIßzę™Ģńįūo ØoŪ·B‹‹‹ Łn½«‰:Į÷^y%sü¾mŪ’’é_žKź—ü¹~żśš’æ{÷nCęćÕēčA?½t)süƒ³g Č©[ņgxxx]«·˜§ÆÆÆ.۟››KŻÖŹŹŠ£tµ”27æyüqA‚.߁*Y Õµķ[TśĢĪĪf~a‰$N%’žžžÕŸ›˜˜X·ŽÄŠķ~¹“TvÉżū XU÷9ĘĒĒsI™½{÷¦ŽG§ŌŻi…I¢hļ­ćŅD‹9‰ |ė™g2Ēß?sF€565jĆQ¹ šXŠŪĮÕblllu{Ńb Ū}ļ•W2ĒļŪ¶-¹ūvÖŲ܌D;ø4ĖĖ˹„Šąą`nču?½t)süƒ³g X§®ÉŸ˜Ļ'ھUJ¢ ŻŠčhęų± HU×¶o…ółä—j’A$É/>śØģ:{L €T›½ƒóēĻÆKEµOq»7žÉwĖæńśė‚”T×äĻŹŹJrūöķd÷īŻ™ėŻŗu+Z—жq½ģ{Ƽ’9~ß¶mÉī¹G €’ź^łÓßߟ\¾|9—*\®_æžģŲ±#ógµzŻO/]Ź’ąģYA2mj֎†‡‡s­ŽŠ“B³³³™?§mŠ+†FG3Ē8 HŠcŠæ?•Z mjõˆŹž“//SSS%¦°m@7xēźÕ²ėxģ1ŹŚŌ®Olrr27ŠŽ½{%š®øź“KŽŅŅ’€“>}:süĘėÆ÷L,žś×ænÉļ"t‹Ķķš$¢…Ū¾}ū’7n8"“•‡x yć„—²ķ|‹ÆæūōSīq‡NŹæoŪ¶ä ÷ÜÓ3ńųĒ’ń?V’½4?ߊß?čFM­üY\\ĢĶ×S|·e“p+—ų‰  ØŅŪč6ļ^»–9žĮٳ‚T¬!ɟ¹¹¹¤ææ]’g×®]¹łzŹ™]—äYXXČm ›”«@9vą€ U©kŪ·j{¦ļܹ3¹|ł²¤Г޹zµģ:{L €Ŗ4­ķŪŌŌŌŗjžh'ńōŖĆ§OgŽßxżuAŖ¶¹ŽܱcG®š'ęö Ż”S§2Ēæų§æ„¾pĻ=T­®ÉŸØę ¼wÆ]Ė’łŌ” 5Ł$Ķ54:š9žĆ§ž$h………ddd$7·y~‰i+&''“;wīŠ1Ź& ßčņˉ'*^·Ń @·łĪ#PÕw¬^-ĘciUģĮūß…f¼ĒÓ\‘Š)Œ’ččhråŹ•5ėܽ{7™™™I č*ŚČŅü¼ =knn.3ɵgĻžÜ’µN©› X|nHčŻŖģœ?»wļ^÷Ųąą`ÅėP™/–ų  ›Ekā;¬7āäɓ¹%ģܹ3Y\\dÖ)UeeNs [”MžTSR߬ņ{€nōó©)AzF̟mtJ¹téR.1T‰R ¤7nä.īmŻŗÕ< ¬’ųz¶oMpõęĶĢń>õ” =!ڲÅE·“ÄĻģģlīĀ[,•&~B܈˜’¹4±ÆŲ§(åųńć‚t•ĶõÜXqÖ¼Łņā}ܾ};wĒ@»zēźÕ²ė|ē‘G čz„Ŗ}źŁž-¾‡F•OŚ1_ŠŲŲXnn!(dĪ8 Ūt|åĻōō“£“µĆ§OgŽ/ĶĻ ŠõbīŲ“ÄOÜi]ļyy"ÉTźfÄóēĻ7$ł ‡‡‡s7,ę—ų’F~g}F\ ÷Æ}rr²ī1M{}±ģŪ·Æ©ó*Åk+Ü'je,ć܏Ŗŗ“s†µ¢R0āRü;KÄP¢ sŽ?–——sdzp)uĢK-µ<Ļø!ėµ7»uaaaŻsŗ[U•?Õ~°Õóƒ0Ž ¢_3@'Ł’üó™ć_$ ėÅ÷¹[·n­{<*~y5@i·&&&’ńńń m»ÜœE!¾Ć=z4·¾ęZ/vV²ĻŸ™™É-űØT©Ź©4/^Ģ-…–––rĢ+•vŒ Ÿo5(5GŚv+9–ĶŽe±Róa•;gźYWN\˜Z÷ųŽ;rcµŠø„½ļ¤ ń>tņäɊ·1„šgš=·X½ĻõzmÆÕē|šHzVr|£:“”Ż»w—MÖDB§ųõTóŚk9‡ŅĪŻø©"’ŁŸuqÓŠ›ŖJžTóAXĖśµp§ŠĪŹĶõóó©)AŗŽččhźćĶøøĮҾ›Ę±Z*€²q±:ķbs”HÅ6Ŗ¹Č—•ō‰}Ęk‰‹üł;Ō‹/öĒÅŌJÄĻf]üœśÓgVģ+žO¬ńK»Ø˜ææŃ6ķ„.Žg)•š‹Ēź}Q<ėümu,+I†ø  –ā8ēĻÓf́Ē9Ś1æžxN‘8Ž źÕŠc“vī\ŗt©Ŗcń‰ß­üdq¬JݜœŸ[¬™‰³vŅnļĶT.AēDœĒł÷érēPø~żz®J§œŲf©ėÆ*{€Žmū6??ߐ9…źå«‡eŽ;p@€žU›”Ō ƒµÜ ]x!-.ęÅwŅĀ%.¢?Vź{kįE¾,„Śåī3^c¾ U\X,Ž%wŃĒsI»p[øh•æū‹‹·ł±øPY,īžÆµÅ^\ō,¼xI®JcÓŌRķUźg²®G“2–q>ÄžÓĪ™#Gެ‹_œ/Åēmįž›qSo(õŚJ%ŽĖٵk×ŗĒöīŻ»šÄ)ć#9‘ŸüļWžw,;łń“dR\Ōģ±Źīv{’HūØä½#ķ=¦š\H{ŸŹŖ˜Ź/qξOžC„žKœĆµ&oā¹’lœ«•¾‡Ż£ŖäO֛`ŚF¹õ7²Ōrē@3żö“O2Ē<ö˜ śÖ oJ%_:¹‹Cžb^5ė—JH”»øWiŪŖH{ÕŽpAÓ֏{µ“īŹßŸOśT³øĄ‰Žb‘š«¶„RZ’0«’Ŗ•±,Õ>-ŖĢb{•Ī?•ßTć4ū÷Ŗ–ß‘“ćŸ&ū,sü;<"H@O+7qv;*5_H„ā"\“‹«FVĀ(–ZZ™J«PØ“B$ĶFŸO=Ļ‘R×%*™G#ķbkTŠdŦ•±,u}d£×fźÕv«šß‘“Ŗ­Ø©DŚz„~‡*•o­Ļ-’{W¼¤©5éÖIŚłż£‘fffÖ=‰Ņ¶ū+õś/^¼Xõ¶š]¹“§Ž¬ü‰ž³łEŁ"Šn†Źō§?ó쳂ōŒR³ji±ŌjõųžYķ…ŃøąšK%¢-Yń…čJ/Ų—z.„.pWŗ“““ ’ŃŖ)«m_\ģO»Ųšu1æÕ±lŌÜ<­øę’Ė,ić1O5•q^ƒØ<‰ų¦Uzō²^x’ØF½„ĒÆĖvÜ0„†%āMo£oųiKŚrä?,@Ļ(u'tŚ\ PŖŹ#m‚ńfصOįįåZqEõCž;tÖĄ^ØL(UI“©—’ÖfŖ\M/IJ™JÅ»TŅ8-æ'•&™óU=„Ī‹½{÷ęŽ3:q޵FqĪWöY׬ψF= ³Õ=łXĒf„%¹Չ­€īU®ź •šĒ ŃwyGG©$S%­æ”ܤ󕾮 Ļ1ĻD“$K ÕƹŌī,­TŖB!ķ"kŚÓRēo/Ʋ™ŅŖ¶ŅŚm…£G¦žž” Œųż(5OP~‰ßŪV½gtš^=ēėUłSĻ€¼ŗ&ā³ŌÖ1Qc,YcYė®ŪėwWķé÷Ÿ~*)¢õ[©j•FMFuKM^ėwɍĢe‘׈ö\‘°ˆ×Æ+kŽ b½Ō(ķ˜Ē’Ā ­qsiń&ŃnƒyHZĖłłłŌĒ7zĆl+_W©Ŗ­ās9ķÜ®ōw|```ŻcqĢ]oJŚžœo7õzoæråŠ`uS·äOZ†;z«ę“4ńG,iI›xcĖ®“–ä‰ućƒ=«?/@+ģ|ņIA(!īĀO«N‰ šõ®Ś·o_źEݰ‘‹ŗiÕÕ(Ն§žšć‚ł„K—*^?-)×ÉóndIKŒT즵™ÆęŚC+cē|š¶ĪoŌ\B•Š„kŚūF>!—–˜ŪhKĒN¹ŽTjn˜Z*PjMäōŅūG”ض,õž»„b' ŌŖnɟā6ońįœÕ[µųīr“Ę]aUPōa­äī€fPõP^T§ÄM‚iź1Łw|m\¼xqŻX|G­Ē“Ų~-Wć犫JB5‰šJUÓ6ØTk¬Z_g;‹ ³iĢćzDZKÆjĻ—VDzŌó­e’łyœŪå}£Ųłóē×ü7olllĆķŁŖżłVÅ©TĀ&®ĻU“ĄŠß‹Z“|½ōžQ(łiķ k½™!ß~0M©jN€JljŌ†ĖMŖWü!UIYcq¹r|Čė… “ƒrU?’ī/žBžł»b\¤.5GM> K¹»ŃćĀc\„ĖÆ_j>Ų_5““§µŹ*¼1.®ę÷™5ÆHܰ˜•ŌŠēUīFČüĻWzóc©8d%™Šo¶L{Õ“‹ Ļq\ÕŅo#āœ*>÷āzDq ūRwö—ÓźX–JUrĪƾņ딛ǹŁó/§]Oū½Ŗ6įV¹ēBl;ė=#^¹„u3Św•J Ä ÓńÜJ]›Ė'Ź ē;Jk±× ē|«Ä{rVĀ5–r­BóŸY•Ŗķ ½mjՎk}ó*žƒuŌ¤Ź@‹żęćĖ®³åsŸ( j…‰¬„åēØ)U ānō¬×sś¤Uӄ½{÷Öu®ŲHҤ]äŒēPźłW%äE[ŖJžWaĀ'¶Uė¹PI’)߂=ę;IķĆ*=ćĀs—üEåv<÷²ÄEšR-ś*ŃźXƾ³ZŸ•:gc_…¢²”Ō…żf+UµUüŗ«‰°RIø _źø¶ÓĖĒ©UqÉŚw$‚Óžq‚8¶±‘9Ķzéż#ķ¼+•,ŽV”YƵŌg„y΁z©[ņ§ųZJFĖż1 ŠŽ¾vųpęųūgĪ@ łJ üRė…ęų¹Āķl¤KDŚ]źłļ«ł‹œqĒ}©ź„4ł ıTŚV*Ŗ²’cYāĘÉZ. Ę]÷łŸ+w±=KÄf~~¾mĻ»Rq‰ē½‘‹ąķĖ8ĒjŁwįļQ>ł˜v­¦Ł•?!­j+o#­±āx—K˜U§VŸĻ±DŪ»FæGx’X/’Åł×‰ j>B·“yĻ6js½6”/y-’Ą„Ŗ|āŲĀ 3£$6_¶_ŹF'+ا_VŠęžķŪ  Šļ–­™Ėµœ‹ļ¹Õ“’«U$ĒŹµUoeź”U;›¹ßfŲūnås/ÖČß¹|¬™ēS½ĻĮHod®z>Ÿv:ošłū×4›ńŁŠļ”@ū«kŪ·“7›ā2Łā?b‹åĖ‹ļŅŹ÷q.Vė]Põš­gžÉWõ4[Żēü‰PōV®fż“rȘ˧0i”ÖĒ9J@[uĄ/>śØģ:Ŗ~€fŪ܈WķDŁ~Vé~¾²Ņ‰J£ŚGŅhµļ;–9~ćõ× hŗĶĶŲIL˜6A`1=*€NQIÕĻī¹G €¦Ū$Õ+Wõ³4?/H@KHžT靫Wh[eŪ¾U:O«h4ŪįÓ§3ĒUż­¤ņ‡T‘ō+\Nœ8!(š'?yļ½Ģń{·l$ „6 @å¾’ź«™ć^ø H@ŻčtŌ¢lņĒ— €¢źčھu ā–lXŠÓÓÓĪrŲ é7ŽČ’āŸ>—é\ž{uaaAh*•?]`rrr5U?…Ž=ŗzįaqqQ° 3o¾™9žó¢1 ŠččØ @ƒøIŅm®ēĘźÕöėņåĖɉ'r,ŶnݚܹsĒ‘Ėˆ]^r÷īŻÕ’ßµkןæoß¾²żąµ×2Ē~ąAźpKóó Łī¤UNnÜø‘ū÷üŸ>›öķŪ'(@ĶŚ²ņgdd$—Ä(œKfēĪ¹±Hfø›«2‘$‹ŲE¢§ŲĄĄ@.†@möķ·3Ēßxé%AjIžų~žOü„ØuŻ£1āZ‰ŖAzAĒ“}‹–eÅI Ģ•‰O>‰vżśõ5c…É“øĖų3U?4‚‹Ī*ģąįÜnŒøŽ‘>q3,ō‚Žšó'’ÅsŚD•ÕżĮ“O?~|ĶXÜeä‹ü™Ŗ ÕJŻ.qŻ#*Q¦ŠęܱcGŻZė÷¢øøš=²°Ŗ zŦNióQ™˜W©ø­šO¾÷Ź+™ć_č!AźbrrrMėūååeAŁ€“Ī&1‡taŒ Ūu\ņG²§1⮘ü@ńōŗŸ^ŗ”9~ö¹ē  MĶĻĻ'KKK«×:¢Ā zIG$¢’m©vńAN}ÅD…wĆÄUBŠ+:•9žķ={  Nā»F„ķķźŻö;¾ūĪRéRĶ|©•¶éŪhkæ¹¹¹Ŗ_G,͜o&ķX§Ój^K“‡Æ÷EżĀk•.ćććŚg­ķ+ł¹j^O5æcĶ:·kµoß¾dppŠ›,=ks=7Öģ^Óq‡r ŽŽ½v-süGO?-H°°°ŒŽŽVżsŃ "’½s#7Frabb¢ęŸĻĻ—š’^™[e£ßĆ÷ÜŠŠčVXYI³Hą T½Ķ»wļ®ž\t±Ø5‰©“'OÖüŚĪŸ?Ÿ[B“Uī­V˹Qų;vżśõŖ@{éČ9Ž?žū£“•`ŻIÕ@cŅåR‰Ÿüw½“åRQ;Ī”””šŸCa•ĘŽ;Ö“†ŹZŅÄk‰ź“R*ŻN-ū·oß^ż÷ŌŌTEÆ#ā\źŲ4²5VZœņɆāÄĻģģlÕĻ?A±­j:W䫌Ņ?qn”‹eZŪō|r°U‰“˜?Ø8ń366–śü#Ī„ģŚµ+ó†ŪFŸŪĄĘŌ5łSÉ™õX“ k/lĶ,×€fQõŠłÖSYß%³¾ėE;ŖzĪSšßÖņņrÅ%bżĀdK^$ā‚{+DB%’Z*}ēRq¬„ś¦^ “-YmŌņĻæTā 9•“/‹Ų„U€å“qn”“o›¾wļŽuc…bĶ433³ī<$WšˆsV,oŻŗÕ²sؘMB@·‰?Škéu]ͲĒ]ß=iāŃG FiCGņ”–;’ć‚ū‘#GZņ:"aUÅ /øwŠRU>ÓÓÓ Ł_VB&*»*I¶‹ó'’FÅ¢¢(Ś – æHŚ‹ä^-7œĘ¾Š«ÓņZ‘ŹWśTĖ4xn’?t”ų#¾ŃÕg„žØ »½pš  Ō ŌÅļ“kT‚¢YU)Ż +iŅ»wﮨR§”RI£Rķ㼋Š–bŃ-«…_9ńŅ*€BT¾5S©JŸr¢u Š$؉^½ōš#?.uTjŽ–V‰„N7tX\\Ģ%0*}-ķ ‰‘jΧRó×#©W*qÖ) C-Ž {4$ł“ļå܌ &÷ļ é*­LļD©ō؇؞(ünyžüłŽ=O†‡‡W_Ē®]»RŪ™użētI‹h ×li•FT×äO>éc> “¼ņŸ’sęų±  ‹Ä÷ցuĒÜ1”p‹Z¼–7n¬›ŸŸļŚÄašØxąĻź–ü‰»„’>1gLšŲˆ¹WØL”žĒPł/„–õšŻćZéīßż]ęųĒ$€:;qāDKö›ÖA"ęœ‰ļ—„ęŽiG1×iZKž»r©ÖfķØmßŖi«666Ö°×R* Õnm€ī·¹^Z÷Ų„K—Z^ŹßKā‹JŚqØF|yˆeff¦ä:ńGk«¾Ø@½}õŠ”ĢqU?5ÅmÆZŃz+&iŖM>Ō#Y±QimÄā;x'ŠÄĶFoŃĀ/-„’Į’ö ÅĶ‹•ž±ĶX7Ī„bŃ®p_q^D#¦ń=»p,-^Ńr®ē)®Ē¹ ŌG]Ū¾’a“’ŗw u³“?Ž¢Œ¼ŌĒśŅ–ÖOZbht4süĢ³Ļ @äo,+5K9ł›Žjżž-č².ŹĒE’Ø ‰„8Ł“O8•Jb-,,Tōbī”j„ęGʊcÜ Ļ?^GT^o#Ÿ$i‡Öo‘LÉz-ńÜĖ%¤ā;ql£–dBœKń³³³³™ēEÄ1bzńāŲϣš ¦nRÆsŲøŗĻł“V’/ćoÄŅ‹Ņž8jEŅ%žØ.¾k.¾`@7ųĘĆ @ÅÅńĀīSSSė*ń}#*SŠ;¤}ĶźTõŻ5Ńæ’}&œŠ»#ÄRmē…ĀŸ=räČŗńhÆqÉzM…qŒÄCZ%¶“Oöä—Ā$I­1lÅ9 ø8.ł$K~©ĒwāØī)ŽCģ+-Ń·wļŽu1ŻČóØĒy\ÆćWķÕćÜn“v9ļ Q67āµ(Ó¦7Ä]=õš4šEÕ@ū™œœĢ-­ż«™ć„¢„X,‰‡ųNŽ-U'­<'ņ¢2ŖŖ£:Y=Īm zuMž˜ó„9ā“ā„KōqnEÆ\%ŚtšßśiŁuTż¬nmßJ%J• ×kéEŃ£ŗXōqnfņ-?YhŚń€v¶óÉ'3Ēüā‹‚“ ķ°€ZŌ-ł“V"Š8„b[ų0śM×: j±hPømĒ€NTIÕĻW|P €ŽV·äO“"£¹ņz–rńāÅd`` ā»³–¬yœbrT‰:A¹Ŗ€nP·äOZ+2/&ōĢ·Ą[ZZjŚ~?¾ŗßVOŒ •ųĶĒ Š6ÕscÅÕQ12==-ŹM288˜:/Ņ„K—’#GŽT]µ{÷īdjj*u›'Nœp:Ź×h1sł4Ǧzo°ų·£G®¶›œœńÉ%į¢:+ķķRĖåĖ—3ŗĀ/›X ŠjuMžĪ“fff¦.óĻ”Ū@”o=óLęųSõW‚tMBt³_|ōQŁuś?’yŗFŻēüiöå»ĒŽeŽßxżuA¢įź]żŽŠ%ZŠTž]«’ŖŸ/Üs@AĘĒĒ Cl [•«śYšŸ$š¢QēŚŠčhS÷AóévŌBåЕ޹zU€žT׏ŸčŽlī„Ņ>}:s\ÕŠ­Tž]ē'ļ½—9~ļ–-‚t-É ė|’ÕW3Ē?¼pA€®U×äO“`«÷²““”¹/€BŖ~€^×ö•?ƒƒƒ«‰ž­[·®>ó ;‚ĄŖ~€^×Qmßīܹ³ę’ϟ?Ÿ,,,8Š@ι·ŽŹæoŪ6Aŗ^ĒĶł³{÷ī5’?::ź(9/ž;—9žĮٳ‚t½ŽKž\¹rÅQ֙~ćĢń/ Š:*łÓßßæī±±±1GHfŽ|3süēSS‚ō„ĶõÜX___Ó_ĄÜܜ£=īƽ–9žšŠ‘*żŽµ²²"XĄŖMśÄwīÜé 3ūöŪ™ćo¼ō’ =££’?ĒĻ%|bY\\tōU?EźŚöM%ŠlŖ~ÖŚ$@§śŽ+ÆdŽż”‡ č9’?@Ēśé„K™ćgŸ{N€ž³įäϾ}ū’¾¾¾Õ„###k¶Kl ”C§NeŽ{ĻAzRÕɟ;wī¬IŅ\¼x±!O,¶[øŸ……G Xõīµk™ć?zśiAzRÅɟÅÅÅ\f`` īObxxøģ:£££¹żW².ŠŻTż”VQņ§ææ?ٵkWźŲģģl²²²²ŗŌbzzzĶ6bYZZJ]÷ʍ¹$Šņņ²£=JÕ@ie“?‘ų¹{÷īŗĒóIšńńń†<±ĮĮĮÕ}Œ­JęęęAč1ūŸ>s|āŃG čÅ7ɕZ e&¢Õ[©ÄO3E’'mŸŽ ō˜«7ofŽæpš  =-3ł“Öź­•w•ݾ}{ŻcQ™ō†o=š9~äńĒ čy›:éɦ%zŅ*“€īō«2sžMīß/H@ĻŪ$@78vą€ $’?@—8šŲc‚”IžLMM­{¬ÆÆÆeOvrrrŻc;vģp Ē©śų³ĢäOZ²%¤Ķ½ÓhsssÉĢĢĢŗĒ—ĖōžŗŸŖ€?+ŪömeeeŻcwļŽĶUEB¦b_ėæté’#]ī’xżõĢń>õ” ØhΟ“Pˆ„L$fb¹sēN]ŸŲČČČź¶K=§Xčn’’9žG$€›+]1’-‘ąHO{|ė֭ɾ}ū’įįįÜ¢e\>QtłņåÜråŹ•ŠŸšŲŲXÓ*Ž€ÖĶ?ó쳂tµJē\-uĆŠ›6W³r$nņ_*"™sćʍĢõ£=ÜłóēsĖF]æ~}5@o‹›ŅځŅ{¾ńšĆ‚PdS­?øøø˜KÅ҈¹wŽ?¾ŗżX$~Čkfāēo——¼ETżŌfs=6sļh3@³=üĄÉ/½Ōm—K<ŠXļ\½ZvU?é6 ŠnŸ>9~ćõ×  É ­|ł‰'2ĒæžŠCÉī¹G JČLžD;·¾¾¾Õe¹Ćę=_óü€öö‹>JžšŁg™ėœ}ī9Ȑ™ü™››[ó’CCCk’)Åć­688øęł?~ulļŽ½Ž6“¹ļ;–9®Ż@y™ÉŸH¦¬¬¬¬.[·n]3>11±&ŁKTŪ4ŗBhzzzŻ~c¹uė֚õŽ?¾śÜmhc_=tØģ:ڽ”·¹š•ļܹ³śļHņVÖäÅci7ĖõėדįįaG:Č/—–’ß~ņ‰@ŌĮ¦Z0Z¾V5żÉG%R${ Ÿ‡Ätžo=óŒ ŌɦzmØ8ĖķŪ·s­×ŠŪÅU+KÅIžX¢I²:Ū7ĶßśłĻ гŠæ•Z mjäĘūūū“'Nä’4•~iI["±$ÉŻē7œüŖĢ\O’Õ_ @6 Š*_;|8süg/æ,HU’üZāŠ©S™ć÷mŪ–|ihH Ŗ$ł4Żļ?ż4y÷ڵĢu>8{V j ł4ŻĪ'ŸĢ?ó쳂P#É ©Źµ{»wĖ–ä?,P5’üšŖ\»·/\$€ üšfht4sü‡O=%H$ł4Å÷^y„ģ:ßyäŲ ĶB4ĆO/]Ź_šŸ$€"}}}­·²²"XĄ*•?@Ći÷Š<’?@Cżąµ×Ź®£Ż@żHž 5ūöŪ™ćŚ½ŃŚ¬YĖĀĀ‚€]Ϝ?@Ɣk÷vģĄAV=üĄÉ/½ŌŠ÷£ÅÅÅdß¾}‚ t5•?@Cœ{ė­²ėxģ1Ø3É !^5%P-²Y:ŪF.łŸ½~żz2<<,˜TD»7€öū{eeE°€U*:Ō‰'2æĪĻĻē¾.ccc©ėīŚµKÕłęŃ£™ć÷mŪ¦Ż@‹Ižt åååääɓė ž|¢'ŚĄ›››[ßŗuėŗq ²üę揓_żé3(ĖgĻ @‹Ižt ”””uEB§šÖmwīÜInß¾½īńH-—¹°@ośŚįƙćO„Üx@óIžō°žžžŌŽą‘\Š*!Č;tźTŁuž—żÆ   HžŚnbb"™œœ’ßśiņīµkŠ!$ȉ6p»wļ^óŲĢĢL®:€Ž¶óÉ' ƒHžt RsõDg#._¾œLMM­yģīŻ»uŁ6©\»·{·l$€6#łÓ¢gvvvŻć¹DĶąą`Ķɚhõ–6P~Ū @)×īķĆ   ĶHžtØńńńŌ$MøuėÖj²¦p©Fl{lllŻć£££«Ū»r劊ņžōžŸå‡O=%HmHņ§ĆE’&–½{÷Ö}Ūsss¹mĻĻĻ 4@łŽ+Ɣ]ē;<"PMś{æÜPh³t‡RķŲbŸX6bß¾}k¾Pƾ¢=\TŠ~zéRęų’Ś–äO—É-õÉ XčNڽt6m߀U?xķµ²ėh÷ŠŽ$€U³oæ9®Ż@ūÓöšœ8q¢ŖĒhåŚ½;p@:€ä59yņdźćķüY^^N†††$€*œ{ė­²ėxģ1ŗāļŲRĖÖĖŅŅR288(Ų@ĖHžŠuā‹öŹŹJC÷qłņådĻž=‚ tĻĖ×ī č<š@ņŚK/5dŪł ʹ¹9ń@KIžP“F'WhžrķŽ&}T:Č&!€ŽUI»·(€¢ņz˜voķ­ÆÆÆ¢õTę…Tž@*×īķŪę6čH’?]j||¦§§ C}’ÕW3ǵ{č\ęüé`Q‰ ™jķŽ½;s›7nÜØh;GĶ-Aq€Īńå'žČ×ī  ³IžtØhėVŖŗēŅ„KÉČČHCö;77—LLL¬{<*Ž?žœ8qĀĮhcļ\½šüį³Ļ2×Ńī  ³iūց×%~¶nݚ«¾‰„Q‰ŸI§ü~Š>žŚn-@±ĢĶĶ5lß1·P~?·nŻZ3¶uėֆī€ĘųłŌ” “©˜g³’ Šf!čL—/_Ī%b֍MLLä–b;vģHFFFr;ń߬mĒRMK·ØFФE»7€ī#łÓĮśūūWļņ‹ß½{7sżØŌ9ž|īß'Ož¬Ėsø}ūvnß“Ÿo=š9~ß¶mڽt!mßŗDT¶}hÄÜ;ŃÖķśõėkö#ńŠž~óńĒÉÆ–—3׳ąģYčB’?]*ꎩ“?x„K$˜†‡‡ |ķšįĢńŸ½ü² t)Éč2ūŸ>s<ڽ}ihH ŗ”ät‘Ÿ¼÷^rõęĶĢu“{čn’?ŠE¾’ź«™ćļŸ9#H]NņŗÄŠčhęų‘ĒOīß¾] ŗœätr‰ŸžĻ>™Üæ_ zĄf!€Ī¶’łēĖ®óæ’Õ_ @źėė«h½••ĮV©ü€ö“÷ŽK®Ž¼)¬’ü€öżW_֐ü€UnžŸ#?.H=Hņ:P¹ÄĻ}Ū¶%“ū÷ @’ü€³’łēĖ®óĮٳŠ£$ ƒüä½÷’«7of®³4?/P=Lņ:Č÷_}5süż3g  ĒIž@‡(7ĻĻ‘ĒOīß¾] zœät€r‰Ÿū¶mK&÷ļ(’ĶBķm’óĻ—]烳g   ­¬¬P5•?ŠĘ~ņŽ{É՛73×YšŸ(VIž@ūž«ÆfŽæęŒ °†ä“©róüyüńäžķŪ €5$  •KüÜ·m[2¹æ@°Žä“™C§N•]烳g €T’?ŠF޹z5y÷ڵĢu–ęē €’$ >}:süż3g €L’?Š&ŹĶósäńĒ“ū·o(2Iž@(—ų¹oŪ¶dr’~ ¬ĶB@=-//'ćććQ…żė²ė|pö¬@ŠÓÓÓÉĀĀBĆ÷/ų›Cņ€ŗjھžvy¹+bö’żį™ćKóóN,€Õ××WŃz+++uŪēŃ£G›ņŚ®\¹"ł "ł@Ć4*iQ®EZ7y’Ģ'-ńć_L¾ņąƒußī/>ś(łī±c dĪhS>šÜæ}»@PÉhrÕK÷nŁ’¼pš @P5Éh²C§N•]ēĆ  €šHž@½sõjņīµk™ė4j®$zƒä4ŃįÓ§3Ēß?sFŲÉh’róüL<śhr’öķĄ†Hž@”KüÜ»eKņĀĮƒĄ†Iž@ƒ:uŖģ:^ø PŌÅf!€ĘyēźÕäŻk×2×yä €T+++‚TMå@éėėkųĀZ‡OŸ€sņäɦ|ę6ś³|ii©!ńłeĮv;9Nžž ©üčQKóó Łn¹¹mz‰Xō¦Øč|ķ„—śŁņŸH¾ūŲc’łÕØæGö?’|rõę͆īcś7’™7ßtĀŠ–Tž@”»pvļ–-‚@CHž@:uŖģ:^ø P4„äŌŃ;WÆ&ļ^»–¹N£ŚĻ@ü€::|śtęųūgĪ %łuRnžŸ‰GMīß¾] h(É؃r‰Ÿ{·lI^8xP h8ÉŲ C§N•]ēĆ  €¦ü€ xēźÕäŻk×2×YšŸ(šf³@ķŸ>9žž™3‚@ĶJµż÷ēĪå–<7…Tž@ŹĶó3ńč£ÉżŪ· ŠÓ&''“¾¾¾¦,###Øü€š”KüÜ»eKņĀĮƒō¼™™™Õ’ųŲļ;–ūļ•+W ‘ü€Ŗ:uŖģ:^ø PE¾ņąƒ‚Šھ@޹z5y÷ڵĢuĢ»@+Iž@Ÿ>9žž™3‚@KIž@…ŹĶó3ńč£ÉżŪ· -%ł(—ų¹wĖ–ä…ƒ €–“ü€2:Uv/\(Ś‚ädųūüĒäŻk×2×YšŸ(ŚĘf!€Ņ~’Ē?fŽæęŒ Š0Å7äېž§’ļ>ö˜©Tž@&}4¹ūv ­Øü€Ü»eKņĀĮƒt¼ńńńdyyY ŗˆäłŽ+Ɣ]ēĆ  čx###ɕ+W ĖHž@_|ōQņÓK—2×)ž 4źoœü\U49ąŸżri)łī±c™ė¼ęŒ@ŠÖTžŠó~ņŽ{É÷_}µģzߎ³'¹ūv ­IžŠ³bnŸr-ŽņīŪ¶-łŃÓO e½wóf®ĶՑĒO&÷ļļˆēüĪÕ«ÉįÓ§<č’?ōœj’>}}}Éßž—’"hTģßż›“üßæžu2ó曹%Ü»eKņį… mõ<æyōhņ«åå5E•k„Ÿ‘@ū’ü g¤]ä*ē޲EąØŹ¶{ļ]—DłĆgŸ­™ōž‡O=•|ē‘Gšś¼bn»o=óLÉńHPE•«ät>ÉŗŽ—Ÿx"wŃ­_L~>5µęT«\%ęšĖĻ7­E?8{6u½RŸG’žÜ¹Ü’·4?ŸŗŽžēŸO®Ž¼YŃsn·Ź$ v’?t­j8q‡¶9}اHČTņYōŪO>Y³Ž_|1łŹƒÖ“Ļß|üqņµĆ‡kz®@÷ü ėT“ōiEŪzG„  Bß=v¬”Ÿ}ÅŽ?sʁ€.#ł@אō Er„–jœf8vą@r’öķtÉ:Śļ?ż4Łłä“Æ’³—_N¾44$p4M$WŽ<žx2óę›mõ¼bž»=ęÕč}“śļ¾¾¾†ļoeeEŠ+ԌćŃ Ž9°’?t¬OŖHüÜxżõä ÷Ü#h“ÄäžżÉOŽ{/7æO»ųłŌ”S'š3i’óĻ'WoŽą69…UęĶŲĄFHžŠQ޹zµ-¾ü@µ>8{¶m.ģś|€ī&ł@Gˆ»„æ’ź«Æļ¢ķ(>ŸZņ ŻOņ€¶ö½W^I~zéRÅė» @»keč̳Ļ:Š6 A÷Z\\LĘĒĒs“]ÖŗģŪ·/¹|ł²`M÷Ķ£GsĘ*IüÜ»eKīBšÄā‡O=Õō}~{Ļžä?,ųŠTžt‰;wī$ußīŋsK±#GŽ$ÓÓÓŌŻWŖx2ģĶ›6%óӟ ē;<’›ĒīŻkך²æøQāGO?-šŠ#Tžtøįįį\…N#?YfffV«ƒęęę`þüĹJŸJ?aą_ż+ c}ķėĆ zˆäO‡Z^^Ī%^nÜø‘:>55•¬¬¬Ōu9~üxź¾&&&rĻ ‘š‰åŸ}VvŻhW£µݤŸk>; ÷HžtØ”””uŻ¾}{5Q399Y÷}ž8qbuū³³³ėĘ%€€ŖŽĒž9éS‰˜!.\iW@7jdręż3gzäOźļļ_÷X$dŅo”ńńńÜ>‹IY~’é§U%}~öņ˹ b1/t³F$iŽ<žxr’öķ‚ =Hņ§ݽ{·mžĖīŻ»ؓHŖęēŅjŌŅ*ł¤ĻĪ'Ÿ¬hż|ŅēK)UŽŠ"Iɚz¹oŪ¶dr’~ķpń7T^£ž>¼råŠ@·©F7č–åĪ;N–6:—ȶ°°ŠUæ19о6 į,؟óēĻÆžūĒ/¾Ų}|÷Ų±¦¾¦_.-%ßzę™Š×æńśėÉī¹ĒÉ@OŠdĶOŽ{/łķ'Ÿlx[œ=+ ] š\hŌ߇’ćL>ūć» 5ś;ĮĄē>—üŸ’į?4t’ėą`ņżƒŗééé\›zŅ]¾|¹įēŌĶ?}ļ{éÜ9Į®@œÆĶüŽßč}”š‹h’?hlllĶEā022²ę½Y@æÉoŻŗÕ‚:ųʃvÅėØ4ńc"jų'‘“©“=ŖĻUÖå¢Ąæų‚Ū£ß āŲ7zż÷ÜÓ5ßmœStāńpĢ”·iūցęęęÖ=„śQn999Łšż/..ęöµgĻžuc* ·}ļ•WŖZ?.N¹@ė?[ń³@÷PłÓ”VVVrÕ>Åż™gffrK”#GŽ$ūöķĖ­_­čEåÓåŹ8£āGāzĆ^{-™}ūķšžŽ-[’/\HČ­P++/ļĢ³Ļ #łÓĮņmŽb’ųā6p…ŅBõ"éŻē7œKš¼{ķZ]·+é•‹9š~ųŌSÉ÷_}µ¢õæ½gOņ‡8 Gņ§ Dø|+øååå\…Ļ­[·¶æŁŁŁ\Ā čl_=tØ.J—óÅĮĮäēSSUśĪ#$?yļ½äźĶ›e׿ŃÓO °Jņ§Ė ę@„ÄX,łŖ”4‘<Ŗ„EŠ^¦ßx#™yóĶ’ćĶHü˜w6ꍗ^J†FG}ŽU‘üé1‘ŠEr:_£Ś³•sß¶mÉ –l-SīPHī”ś|•ųŅHž“±w®^Ķ%xšQ„Sčė=”KšÜæ}»ƒm -ōž™3¤’üóDMLLD›jFõĢ‘ĒO&÷ļlč?{łåä[Ļ<“ū÷’¶k—›4€’$ØI___źć+++mńü¦§§“;wī4lūYó*uņń£»Ü{Ļ=Éī]»’æøļ¾ÕNJēŠyś»Ųąķö’ŠūųŪ?nų>žėõėÉ’ūé§¹ėż× ?§š±hÕŲ ūųū?żNtĆ>šł^Õ-ūšžŪ^ūhĘ1ļä÷uļ¹Žsķ£wŽscNć'N4dŪq]įwæū]ņ—ł— {ž±ż?žń¹ż¾~ь÷ĆF‹°øø˜‹SĆöń‹_ü"N>÷¹Ļuģńhö{оśŠ.ÖÆ 8mžüifr£]^s»Ę€ī366–ė,“5}UžP“Ż»w·õóėĄL³ć#Ō9Ž?ŽŠ»§€ī¢ņ ³­¹Ū“xtÉ€."łÓÅ“ńńńÜÜ.µ.ūöķK._¾,˜Š!ĢłÓ%īܹ“ 4mGŽI¦§§ZϜ?Żdxx8W”ÓĢÄO˜™™Y­š››s  MHžtØååå\āåʍ©ćSSSITuÕs9~üxź¾&&&rĻh=mß:õĄ„$[nß¾ō÷÷7e’QķIŸbEēŠxk’’?(꿀–RłÓmęęęrU7±,--%;vģhčžfggW÷'ńķEåOY^^Ī-YmąFFFr Ą’ĻŽŻ^5±… Ļ:’Ń Ą Ō Š   „ Š Ä Š Ä  Š  Ą ņ¼7ļ3®0ģIę3Éģ\×Z³Ī!B2³göžÉ}ļ`ü‘ü7Ó¾äJņ #’)Ų\ggg“““ŁŪŪ›»; Aņ6Ųżżżæ’?==U ”OŸ>=ü÷ššp²»»«@ Ó:ī^yśśõėäööVĻė@Ļė°l[Óétöē©"ĶńńćĒ7”R[är£ßś’kż]^^N޼y£@ Ó:ī^yŠ{÷ÕՕ:ž×‘×q#XÖ%7ūƒ52"łÉ€ŒHždDņ #’?‘üȈä@F$2"łÉŲ`Ļž=Sķķm…zõź•B€ ±»»« cbp¬ĀÖt:żyŖHFekö#2"łÉ€ŒHždDņ #’?‘üȈä@F$2"łÉ€ŒHžĄ†łóēĻäżū÷“­­­GŪ«WÆ&?~T@°ę...&o޼yr/-^’śõ«B‚‘ŗææx&߯ŻMÖļų·xž€E¶¦ÓéģĻSEVō’÷e±­RdŠœžžJöˆ<{ölņ÷ļßÖæ½½żPÖOŌĶēϟwz>LĪĪĪ&d :xų^#®ŽŽŽz}OõŽ€$ ēŚ>'鳿æ?9>>~tĆśöķ››l@; łćæu9F"`ōåĖ_&!ƒ:¾³³óš¬£ó qļ¾ŗŗšū|ķ_›ąž ć|^oC}§ėełčÉČOUĀŗ_«z.ÜÜÜ>Ö©š„‘üŒÜŽŽ>y-n*]ÄŠó²?~(lXsѳųŪ·oO^×+Ę+EE©,™ī’,†5ĖõöņņRF*uæMņ¼~żśį9¼¼¦WÄķ¾|łņ0śoö÷üc(’)Č[מ½z ĄųīżWWWÉ‹é„ĢOėÆī:›;;;¦q„ÖgÓ3ĆøÕyžŽēīŁ˜\$ŒŹSÅ"YäY!ł##}‹žƒ©ÄO1­Œ/“—čA\Ō}¶`½#õfE`WāĘ/õ¼£ėĖÓŗFņ§ŲŹSæ¦fډäPŻ P‡‘?0Bč-O%Qv}}ż0ż0.‹‚Ʊf@yŹēh"`¤×0¬žŃ>°Ł÷é:Š©ŽRķE¼¦Ķ Fž@ęŗĪūźĶ`!YX¾ų‚8;7xUāēīīī_B‰ČSLõu|’ÉæEÆakĄj\\\< äFļ~A\ Jtւ”Hž@FRCFĻĪĪz’= `yfŽM)O1aź'ŲhN©j/€a÷÷÷ F.5Ms— QUÆ`# ĒĒĒÉ×Ūvšˆ[L Y&¹K_$ C©›D,üZē‹e±˜tłęÓIX8†•ŗWGāĒ:>‡xžŽQ|©ŗß$aSō·§FśG§k÷qś&ł‹ž…qćh³NĻģŗzĄšŖ¦‘ņÓ¼ÄsvŖ÷p;;;’žŁßæÆ``I¢3GÜÆÆÆ;½ĻĶĶĶĆūčtͶJEéE€qy4̓‘?‘üȈä@F$2"łÉ€ŒHždDņ #’?‘üȈä@F$2"łÉ€ŒHždDņ #’?‘üȈä@F$2"łÉ€ŒHždDņ #’?‘üȈä@F$2"łkźģģlņęĶ›ÉÖÖ֓-^€²­ét:ūóT‘ĄņO¾|łŅł}vvv&žüYü°µõäµŅ3ŹM™+zöģŁäļßæ•’~yył¼§žu½··7łłó§‚”V؎›žˆ4ūƒ‘?°B(ŒąE‰Ÿp{{ūotŠ×Æ_pīOu‰QaĄźźć¼ÄŚY\/«Ś?:9°$`īļļ+…www½÷ėn777ÉĻ8::¤X‚Ŗi8Ėķµ ĄŠ<ūÉXēϟ?yķōōō!8£šŲŻŻ›ŖšŖ¼ŠN$ōĖbŗ2`8©g™MN°ėD$`ÉŖ¦Žč:%G$ĪĻĻ0Ą0Źč[jZ¹b»ŗŗR@Ą#’?°dC.\żžżūZŸ¹hNś¢÷č¼9ė#Y5/#˜žüłS¹Æs’>¶EĮÓ8®¾ę×ržž˜jѱ[Õ9L—¾ŽcѹŒ-’‹óĪgÆ^½Zų9}ÆUļ—śœøžŗhņž1*¤Ī±åÜußś¾¦Sõ¬Ibc××»wļ:ׯ&ŹĒōéÓ§'æƕ?^ū_§œš®Ż±č¼ÅµY¬=×wŪWU÷f·Ø¹·³«Ŗ’M÷wQźZGćoė”C“ĻIżż¢ēTż©Ó~T]3CŻ—zQ& ģüü<ęX{²½|łriūśüY{{{ÉÆŚ÷E[qlßæoõ÷±Åg—]^^.<–¾Ź¤īļĢūŻ6ŪõõõÜóRgb’vvvZ—k“cļćünoo÷vTS¹žöržcßŪ^³}sŖžÅµ8ŌyÆ{}õQ¾mžž¶ś¼(˾įīī®vēmŃõŁÖēϟ;_ėM®µumg—Q’ūøæUµ]Æ»®õdv‹²lr\‹źXģS×kgöy¤ĻūņĻxÅqŁ{”ļ1ņ–¬Ŗ÷čļßæõ|żõė×Zķwģ×ŃŃŃæŸ“óķļļļWŪĮĮĮ£×///“ļńņåĖ'ļŸ=†i”āü¦zõnoo?¬Ė”:ŽŲ¾’ž|æ×Æ_?\!z4O¬Ó4]°žSUÆćÉŻŻ]ņļSÓ ŽŽŽž»VSŠŽ)ÅśV‹>'޽©õXBվϣ Rfėk÷·oßjw±„ֈłū÷ļØz‰/ćśŖZ×"ާŖl£īELķo›)7ćofß?Īmó]“aUåūXUNUĒkČ5¹FŖ®Ļ®u,Ž÷ää¤Ń1ÅuŃęZ_ēvv,õ?Ž7ŽQż©ZOpŽuWŒśJˆ›W©zeŁē(±bD^¶£źŲgŸGśø/ fjä¬DŪQ4“’ė ;ŪS¹qW#& {«Nzčɼčó×qäOū3é0z§ŹĶĶĶĀļuŌE3é”§ód€Ó}›c/÷D_¤Ŗ·|ūŅF‘?Ėŗ¾Rķę‡:Fģ©s–źi£%RĒ#fšØEY!9Æõ5 ŖNÖ¬R5ŹoR1ņg]ŪŁeÖ’U“§1"oŃ{ō5Ņ²ĪžN:Œ®kņ,RÕ¾u½^ėīkäl2#`DĻå↣hšˆž°ŃS¹Ķ:9];tU5 #%5ØM/żeIķ[›2KõļZę/^¼Hī[ÓŽŌń^³½õ ±fBˆõCŹā÷›–Ć“bX©^åMF„z¹W$˜Õ“N®ó5¾źėėųųųÉæ5Į•jƒśZ h‘-Q£ RĒ5O”GŒ¦)›¹Ø~õu½}ū6łžMŹ4®“ŗmÄŗ¶³c¬’MĖ-ÖāI=Æmcėģh¢.ē'ž&5Ņ-uis/hņ,mLj_ĘÖV›GņÖ@L‘2­˜¢¦i€źźźź_"؏ IįƇ~?µß1Ķ[}ī’2”§‚j›,‹sŲ§TĄ¹é¹˜•ZŒūĒĮ“TąÆķ"į}O}ē'Ą«“ Hē#9U$% Ó¦÷[@qY×WÕyˆ©Ę¢½kĢ-Ÿ£ņ"ńCHµgHo3ż`ń~©vvŃūM{œr*•M%Yū¼Ļ¬k;;¶śß¶Üāy„ŖŽ¦Ź“Ėõ–ŖÓ©{Ģ2Ź8Õö,£ŻčBņÖX–ĖóÉĻn×××É^ņ…b½€>‚čå 7ĶE ėāāā!ˆT¬‰“Ś–!zėW}~­ī5²¬ŽõMĪAY¬Iµ(AõåĖ—'Ƶ©WQŸ#šX¬[“ŚRėdŒĶ×Wˆ²«)Y¬A2»E‚d]’i© uU ½®Ō±ÅzI«Ō„¼«ÖŌk;›kżOµė©ŃCµ’q~Xģ?Eね٠u£SÓXÅk]{{9\nŖĪĆ*,³grŖ×w×ė&‚‡}÷Šé²Ź£U"pYUGRāEõ)źf$•r·Šžļ‘0‰­N=‹„KÓSõ¹Õ÷č²eŪävyŚŁM«’qŻÕi—‡ŗ>ćÜ됰˜‘?°D©^¾m§ÄJ™·nBÓ5%h.Öøˆs:/ Ɍ˜)¦ÅŖѵ UŸŻeKi2ײÄhŌōR©éßRė–̛ҬØ×óæ1r%¦tlZ–c²Œė«hļŹŪēϟ“Óū•Å9*Ī×X“@›8ķŌ:·³›R’W}ŻéŒPä¬Ų²’2cļį¾ī¢ņ·oߞ¼Só•×)¦żY†eÉRSü¤¦Kkbˆu9Bjz©ņōo‘ø*ŸÓżżżŹ2MŠ$D9°£VĘŌk}ŽT\ė„65Ī]*Ø~~~žü›uA²H*9¹ź©»thX4×ŗ¶³9×’ŗķr*”ŽĒ4~ciw֑ä,Q*8R,d>4#Ŗõ±&HźėĖŠ£ R×HĒŪ~nŖœRI­!U%7Ś$’Ś$óÖ„KżļŚ.Ēž„ž®hGS\Śž×HŌ¦’G}N• ;ÉX²”¦‚Bši³Pc"Š6Æ'|kˆ4Q7ŲĄ.½Ōg„zĄĒ~Ōķł]œĆ&=„SŸYŌL]‘ōj˜‹÷® ĢŸIĢŖ`ŻŽłń»}—:å^NXÅč‚ņš1­Ō<©Ń‘©{Ģń÷qÜ©ŃFm„[qlu@Ģnrm,ėś*Ÿ«Ø/MƒĖqNNNNž¼¾Œ©¤RåSh5i+Š5oŹ"ń³ŠžUÉæŗ÷²h3źžĆumg×±ž×mk›“Ė©ż‹u•Šd_tp錞柚Žo Ļ2MīĖC“ü€ˆ€Ų¼ F¾SÓ„¶ʤ¦ž 4Ķ=ńSu|uŹn6‹ƒwĮĒTŠ'ĪO}™=‡L›w<ňŖ‘UScEY„FęŌ½ÖŹӘĘ*޳؊`ź¼DB„ĪēĢJ•Eߍŗ‹u~ź.‹ŃOmŽłķŪ·’ž¦j]š¦ÓE@6–/‹Š¢ż™]§jRuqčė+ĪÕ¢ś±h+'öbŸ—ŁNV•Sݶ¢*(¾Ź©=«Ž©Ī½lv¤Ė¢kmŪŁu«’uµi—gĻ{9iڵżÆJlŽåY¦é}`H’?°BÅŠ4J“š*‚„Ŷ)½M‹2l*F`eՇb‘ł6ÓżD ²Ų—¦ÕY“=‚ūM;‚ēmĖ«P,®ž r„®ļŌZ'Ėń^‘Ąj{MĘž¬Óõõ#ž½kb0cń>«Xc©rŠĖ®'uŽ©ĶõßäZ[×vvėö¹©"Yŗčo‹ßI%ė*:°ŒiĶĀ6÷e€”l•ڦŠ2łŅŸi²iÓĮlŹ£ßģFždDņ #’?‘üȈä@FžS§étŖ6‘?‘üȈä@F$2"łÉ€ŒHždDņ #’?‘üȈä@F$2"łÉ€ŒHždDņ #’?‘üȈä@F$2"łÉ€ŒHždDņ #’?ł_Ų»ƒä¦¹>}Ų”ėf=V²‚ĄØ‡„V˜żgĄ +Œz²‚Ą č ¬ PÕs¾ēöū™v‚dK²lKĒ×Uåzž˜Ä–ŽeėÜ:æsē÷ļßÓ’’üĻķ@“ŒŹŁĢü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚ "ü(ˆš  Ā€‚Ųz?žÜyłņåĪ;wnÜ?~¼óéÓ' ĄØÜłżū÷ō’?’s;Š$[ü”pēNåż3Ÿ[ŻŪŲč+„·Żēϟwž!„?sŪBv?ŻŌ>ŅWĘÕv?ŽłņåĖ_÷ķ|ųšĮN`“§Ł7~ž£:‚-˜ńóōéÓÉLž¬åõ@²P®āĪ­j°Ļm³öžč/C’÷w]É.į÷™ćŪzš=ūĆæ“0š­sqq1¹šū¶Üwūž„C?~œ”|Ę'anÖśŗmwwWć0WfVńy@iĢüFįū÷ļ;>¬ü7Wk÷ų”ąŠų¹m”Æ Ē»wļv^½zõēēū÷ļOŽśŠ÷ŁŽPŽ9}¾Ļś9šnϼ÷č*Žyė:n0fže8??ßŲsg4ƒŠ>lüDž¦īŠņm‚ŸųųńćäļŖ-#Ųiæ6ū&¤ ņ†b¶ 2˜ŪvPyÖt–O›&Ņæ»¶I“» ŗĪ$HŸųņåK§æĶ ö&śŌģ,’Ū2(œāeŽ#Ė„]ڤ팯Ŗ÷ŲŗŽcė+«zĻŽł˜³I™}ÓĒēĮ«WÆZŠ«88˜ģŸŗ>µ‰59źdPø«Ģ(Øk”MĢ>ȾČ-żŖ®æę1šLgŸĶ ~²ĻÓRŽ-ż·Ė{¤%ö•®Ę|ĢŁt»Ż.»VÕßÓ×Óēēļ¢Ļ@q™će°ę0žւµnÆuŠ·\e=o–@E•īÉl–^f­˜”ō•!­ł3¤cĪ*×¢é«ĶēĶnK.*]9/|lŗ†Ņ*ŽŒŚ“^į0®#XƒÅ»We ¬īŠöe;Ÿ÷|ÓAŗ>ڧéĄfƒĒ]Ÿ{:ųßef 5„āŖ4 åę=—×ŅVÕ nfœÜš„/u×ö½ŃuVż]Óķ^¶Æ7Ł?Cź+C †tĢCųS÷8mŽcł½ŗŅl_æ~]8Ū®ļć£'üĘkŽģ’Ū2Ö×äu}¹ŹzŽ ž”“OÓAķ®£uWĮ·P­›UŃ%pč²õÆM JwyīŖĒźŅW«gŽąv]HŗŠvė ©Æ )üŅ1gčįOݬ”wk[0[ݚ\Ėō”u“%ƒ#ü 8’5˜4ėźźŖQ£*uƒs«˜ń³Ź¶Yå`tÕߝõ6“`Žž«›ÉѦōŌ¬yWćo*üYT6oV_Aܼ¾_÷Xuåū†ž ­Æ”ž¬ā˜3äš§.ģźŚ‡ęmS“ch_Ē Špć$ń?“0FØ[“šų¬ ųf¬Ėą\ŻUŁC]/aŃ¢ā}«k‡.ĮOdõ¶—/_Öž~]i¤EknŌŅL®Č=mp«‚Ÿ "w±hm ±)½ÆlĖ1§¤>)ĖV„®Ōč*Ž”GųŒVJ %Jy·¦rÅv›P¢nš;„ƆjŃ:}«jĻ6Į\Õ~½­.€‹ŗEӗųĢ‚ķCŃGÓ5ˆ«SųÕĶĪJˆTz_Ł–cĪ&żųń£÷>“ŠõxJ nhOųŒ^ÖõItrrŅč÷$4Ø«[Ū¤é‚Ž›°īŲŖĮŠ&k}“mó*u3¹– ēŚ®Ū1U3QŖfR­āyęķ»Ģ`X¦OōA_)ē˜SZŠĢŌ©ū|€.„?@1R,!PnYgžĢhÕĶŲ¤\ѝ+ÅSĘ®źöäɓ­ŁēuĀ²į\×õ”śÖ¶œVU{Ģ+™×DÕ{iŽ€tŻL˜WÆ^Mśē¦Ź%–ŽWsĘŪ‡ę=v—Ņ‚ŪT†€zĀ H`^%ŲéRk™’f]epq:КY_¾|L;W©$nzkjUWÅÆ¢ S׿ŽĘååå_÷½}ūv©}Qõ˜U÷M-ZūdņøĖ†Rm•ŽW¶å˜³I«œ…S"vyĪ!ĻL`}īdptz~łĻķ@“„É`ōƇk’}ę8ų÷A²"ˆČāÜėšĶóō5č:ļuĪ{½óž6W«gFĒ:TmCŪķmõYńŲ}oǜuæĻ6µæśxü>ŽćĘI¢™?@ń26oš¬ķģŸeön*ƒēŌśÕöuk`Š_ߣ¼Gņžk²ŽP֌J_/uĶXŒå˜C¹Ē ś%üŁ)’©3°–¹ŗŅ>,Čļäw-0L‰r5t•óóóV³źš'„Ō͚899ł³¾QÕ V©IØ 2S®‰„뜹„c”LŁ·eaꬄ«©ūA¾”vRCž"z-Ėźl¢ģ[Ż6¶)µ®2T ‹³¦L—ēŚÄž\ö±×]ŽŖķó ¹Tśī£G&km-²Šraʾē˜£ģŪrżZ°µ”}+Õtqę,ĢÜWšØ{ńāÅä±3ć”ĖŚ 0Æ_æ^ś1V93®nVŃéé©÷ „ß$€ŹL  LĻż]%ąÖĒ1§¹„aėvpąŗ,ŗžąĆ‡“`ęćĒ+®A>\˚'° } *g&Ü*ßc·ešoØ3ļ6=HæŖŃ’J_ńµä=”Øn0}ėĪč+esJ=Ž„ōŽ±Œ—šgäRB'³rÖ-ƒE œRbʤņWMŹVu‘jU†<ø¼éÉ”ø\E›u@æj-žŖ²|C‘™%{{{•’– ō•Õć1g“ź±>.ØŪuż±ęψ%ų¹¼¼lżwó®~īrµuJĆøB˜±Č€rU`Z·FBĀŖ÷Å*ś}‚©Ŗ’m×oČĄķ“'O:=N_kNõ>xßęłŪ¬UR%³«B¾”Æł“AčWÆ^­u»WõŚóY5/„čŅ^Cė+uĒ—u­ł3ŌcĪP×üéćļū~\kž0{š8ūƒ™?#•¾EĮOÖTø¾¾ž ĢŽ2HSw»ż»¹-Z#%éuåJ`hź®®®S`¬bĘ]_ku­+t™Ŗ*ݵŽ2”«lĖUĶīZµŗYmūż6Łd_©›9×å=Üeõ˜3DYĻŖļöŪÄC”Ć̟±īøš+D])½¬ŗ+œĆ•¦Œõ½“RYóīÖõ~ėėŠņe§ĖßfÖDÖ»ķéÓ§k) ™XUaS×u³!V}œėė žUĶLX÷ūr3†ÖWźŽ;}¾ļē=ÖP9Cžł“cڳgĻz}ÕmS.ąY“Ę¢™?Ģž&Īž`ęĻÕ]ŁA‚U×éĻyŻL W–³J}„ul‹Ž7)ńV%åšś,żVµ^K[›Xƒ§n„óóóµĢØ{Ž Ź·}žü~_³!6åāā¢U’ŖUōå”õ•ykµ™Q»( (혳I‡‡‡µ’Öåó nßeßtŻÆŸz’žż_÷}żśumƒ y²žĒmC^Tœń˕ּīĶųĪśYóĢŠĖ,‚EßT]°Ūtp5ļĶ.ėvõįźźŖņž”Ē[G0œYFuĻßt1ölē¢r~«ŲūžR7ØæŽØķ šu/t?“¾RWŚkæQ)±y3rK>ęlR¾sÕ}“ €ņŁQ·ļ–)#3k»|žĶ(d·ĶŽvww±’īNŠæÓÓÓæśŚŽŽŽļ‹‹‹¹w}}=ł½Ŗ¾:½åw–éó³·ćććĘÆ„ī}[÷ŲGGGs÷žżū ·o™×ŲÄĮĮĮÜē_“æf}żśuņšŚß½ž«««ŚēZō·]^Ʀ©ó^GöU'''¶ēģģģĻs¤]yżśuē>·L{ ©ÆäŌåø’ū«~·m» ń˜³īļ.]žsŽńnŃq+żk^»åsĀw1ŗœöĪŽ¬ł32¹ō޽{7ī;;;›[†dURņęö•Ļ)7“m%`X½e®lŸ'åÜŚ\„2L¹橽½½ĖĖ˹įūmó։̺›nsf“™q÷{EkžtŁWYĻeöX‘×R7ƒ ésW»JĶWooÓ*q}ÆŻŃ“æfęI^Ó“Yž.mYµ?NNNęĪȩےĒĒĒ>§²ÆēķļČL²y%іmÆ”õ•Eļū6ļń¶ķ2ÄcĪļÆłÓöx—öĖ Ÿō¹¼·RsŃļ·)AhĶfOoü ü— ¼F3Š9/ōź«Ŗ“ čŖLCĮ“՘ß¾ūk¤œÜ¼rT}¼W5)æ¶l{ ­Æōń¾ļŚ.C;ęŒ%üéūx×ö‚„M“ƒvć$њ?Ąąe0ø/¼ĪĄX×ü\%ŸæÆ[§£ŗ5AŗÜM_[dÖŃmėZC"ƒź™‘Ų—6WĮg 631ŗŹZó_Ū®g³iÓžZ·P[óf9ō±R曮»³¬”õ•®ļū‹–ģū1g“r¼Kæ]VśbŪąęžƒ—Įą 0.(d€4„¤śŒĢĄnėŗ†@™%7/€ŹėMi“¦r…żģkĖĄömŸ>}ZŪ>K¬¼†¼Ī®2°œ}Žv@4„˜Ś>wŚ:3ŪnUūglįĻTśFśė2!PŹQĶ Ņ^]ßÓö_÷ą÷ŠśJ7ķÜę}ßWŸū1g“ŅoŪī»é1.ŸKłŪōEčÕĢ@Ÿ­‡4|Y@zēÖ¾Yˆ{Ŗ©ĪāŻ°.éūu‹nēžōĒōÓuČBšYl½jōé¶äwŗČāß{{{73Ļ3¶÷[^GŻžŹėyśōéļ³³³ŽŸ7‹«gńśŖżŅuŸŒŻ¼žŚGŸ~Fä1Ŗöyž7ϟ}3$Cź+y/Ün»ŻŻŻI»­ć½ZĀ1g“ŅV·Ū0·ēņ]V`6ļ±ęĻ „¾{ÕMéze͟­k„©”z¹ü„ą6Kų3BU5å_½zÕjQōe$ų¹wļŽ_÷÷µ 8Š²ocŻq„ß" mÆrAņ7oŽģ¼}ū¶ņ߲з‹`ķ”}+Įėׯ+ļ’ńćĒ$Ź­Æ™@™ésxx8yĢŗągooOš`ęψ=zōhēņņ²ńļģ<~üxņwuAMf åöłóēŹ5}źĢō#`½nĢüžŒ\Ūh?°Qʾ•äŪ·o;gggyī§OŸ ~``„?Čz< aŽŽŽÖņ|»»»;×××;Ÿ>}Ņų00Ÿ‚|ųšae&Pš¾%\Źć’üł³vĶ `³¬ł³޽{7™„óåĖ—Ę“ščłóē“YE?ֈ0\7ÖüžŒŪšGŁ7€‚ "ü(ˆš  Ā€‚üKŠÅēϟ'’żłóēĪ·oß&’Ÿ’ęēųņå‹F*Ąļßæ5ĀČčäĪ;Ż÷śõė7oŽh˜iHóżū÷Émö¾Ņ@Y„?'W¼|łr„Ļńæ’ūæ;’ó?’3y®ū÷ļktz!¤ś ü”8ūūūk{®˜ņ°eźJœ i€”žPœ£££?j€%˜0^ĀŠóįƇÉm•ŖÖ<Ų„Ł@¦*°Ņlįüu_J ¬Ź“¼ÖlŲQ‚ŁĄęĒv4Kž°Ō—€eĢ:¹ @`{ŻæßęŠį½Jˆ“@'ĮNž?’żõė—†Bģķķķܽ{wņ’?žsŻ’ė'ü`!ŒĻ¢&’öčŃ# žl) ĆģśŚU!MJ”)‡“!ü(PÖĪI óéÓ§óós =Ø ifgĻi€”žŒTĀéķĖ—/„„)Pį°µīܹSy’ļßæ50Óp'3x.//5»»»B˜üw^`ĄöžlPÖÜI°“[BkīT»’ž¤œVŅB ! }ž@r„ßō*г³³ĆĆC0 ʳU›–›:ÖK€~ܙ)oōłŸŪ&oœ™raJ„•±g٧”Š—w޽{§qÖHy¶čĄFÜ ž@—w‘š§øż8Ė>eL2Ćēüü\_^“”d¹wļžö€-ž.°­åŁ:0xĀXś]$ü)n?ĪĖ>͉ēĖ—/w>~üØźĒ£īĖcéŪģ˜7Ąć}åIČóęĶ›bgńL¾ć¤„pü7?£tcČš?c;ŠĻģgūģīīsÅéPūöģzJ@پ’> {rJ īLgźälįŒXŹģļļ’uB!–3ƤPެ“ųźÕ«Ńnæ€Ø"ü€ˬ”“““÷ļߟ\µJ7ß¾}« Ō€2Œ­”[.źÉE) vņ_eŁ€&¬ł3B¹Ź{ˆ¶i±Gkž”·g }ŸVm·~H©ļÉMõķŗćC®¬Ķē°÷!ŒĒJ¹™½ōąĘ`…šgŒ{p k"¼~żzņ…zŪöĮ¾ņŽKĀŠ·ooK®øżłó§÷!ŒÄK¹ x€»1X”ģĄ-{{{JĮ\__+Æ7„Rn¹XdZšMy6`ӄ?·dš(ė)mK9S›M•rĖŚŠĻŸ?7{<įPōÉ_J+¼|łrråŻŖe\q˜Ū?žŚŽœ¶-ŲtÆEæ×öÄ4e2ņ:¾|łņ׿=}śtҦC?ŁM‰®¼Ž>ÜŲŁ9aĻm]ې¶¼}źXŚqVśo_ķ¹©¾ŻŌ:Ž@;›*åv||yņ¤Ó{l}{Ÿ>`uē¼›(å–ļ4 {2 `D¬łSŠ\•œ¦\m]7ÕŻ@„JXqļŽ½V“Įų Ų~żśué¹6”S• ¦ß^@~Sķ˜Ąįׯ_ž>'āiÓ¦aÖŹ?įZ yŻéG™…“Į…>ŽĖēēē­’.ŪńšįĆAō‰Yé·g±­ė=lŸM•rĖw°„=ė˜°.’” Ę/W¹ēŹ$Ų¤oüĢŚßߟœX.c™ąg*ž}Ķ~X¦-»?³fõžĢ3/LH`²L[&°YvöOžæKšSÕ'Ö5›jŃėiüÜ~ į5ƗŠ%ß;rĖÅ0ė ~RŹ-Kę–ļ’‚ 4ʾ•“3• ŁH[kćõŹ€r–™¢mJoµŁ§u3<¦kūLgĮLךWŖb^é“u”ĘŖ{ŽœĻ.xŸ×ńžżū„Ū®«ŗņcyĪyķ”Ey›†łŻ.Įą¢ż”}œ>“~ѤOD›Ł3}—}Ūdæ[ÅūÕg. G¾ eŻC„Üzwc°BųSŅž5µ‘¶ÖʛļēSó֝j³nI“}:[vīäädr»Č¼Rq××׍gžōż^Ÿ>^‚“S“ugźöĆ*Ė–µŁ‡§§§µWo.*Ł×vż²y}rŽvÄ¢õ‰šī×>߄~·C¾£££É¶¶Żž¶aژ>Ē|ę@óļpłĪ ”ĄŹ JU5 i jEļ"įĻĘŪż¶¦ū!Wś-ŗŹ°écåd²Ičsūä·*Ąh3HŽ÷ sf”dVJ_ūcUļ‰&įO›šiÕż©M;,»-}†?]'Įe]łĄeŪAų摐'·.³ļū‹Xņ`Kݬ°ęOAŚBƘdąæN›הWK@°©÷\Ź~U=×µUśŠ%ų‰ĢVұ(Z›YGóśĶ“l߂ŸEæ_5ćf]š¾Žyķī³ ŹünžĻų\2]Æ'·Ģ^gš“Rn)•;]»GššĢü)m‡ŽŗŁUČ«ogm¼™¾=uuuÕØLYÓĒ[Ē>Ķ Ÿ,fŪõy‡4ć`ݳśŽo³åūŚ>^Żl—®}23°ž={Öi[V1ó§m{Öõė±öķ’¶–±éŁ<³”r˜ĖĢ  9łė2Č) ±)uŪ<Ę+³Ģ\\\tś»8™1T%a̶ž\¼”ļXC(lm€ņ)ūŒŅ¦ČėŹĶ±üYÕ섃ƒƒŃ?}”lĖBŹCxŒĖ‚ĪĄbŸ>}šĢ¢I9·Ģø^wš“Ł<Æ_掹ŗŗŗQ¶-Ū%ų(›™?Ą(mj€üĒ•'Õc“ķUĘ8ė$Ū¼ģPUūuŒ_.ɬšŗ2Į«b6³Ģü/ėĻ Ł»wļFצ}ĶRBX“lū×­±sttTū7\©²ģ:IuÆåųų؁ž}0³{r{ńāÅJƒ³yhāĪL©¢Ļ’Ü4 4xćüsR75Ęr_c“ŁgĻžżuJ”u rbÜeķžŁ}æLØzœ¦Æ§źo/..ZģW=F—×’q?~ģ„MֹݳRŠ„jÖμĒLČóšįĆæīßŻŻ­ “VłśŗöĖU“ē2ļ‘>śö&>|l§Ģ²y’žżŹŸēäädņ\°ĄĮ 3€Į;<<¬¼?įMׁö>Ėʵ  źJ®-£uoŗØ ~6”kŁ·ōŸ.åŚUÉU¾]÷E]æH “Ķ6Õ· J.|™ĪšYešszzśgFą€.„?Ą(dVL•{÷īµ~¬ŗŁ]µ=!ūömļķÓ¶ōYĀfcdńä.„ģźśOf›,’A™*Y̹­„PużbŁRrc7ƲŠ”#ŸŃ¹ių¬źĀ—|7;;;ūų$d€e€Q˜7»¦M˜Ówš śxžeŹ…µ½RX5 t2K„ĻŁS}H}ż6м}Ѥę Ź“ég wźBØŌōæ{÷īVæ÷»ĢĢ€eä³9³ŠóyžĻčóóó•$JX2o_äµĻk«ėėėĘĻ·h}ØE³VŅÖUkMm[ɳ>ś6t‘ĻÜ|ĒÉgN>›Ū\ÜÓö³īźźjņb2Ą*ܙøśüĻķ@“@ƒ7ĪĢ@¤¾×+'Č}œŒ§“F‚.‹¶ĻˆĪć&„ŹĢ‘ "|śō©r†Ģėׯ+”&ż)óƒŪR2ÆjęT8ęĶŖ8::śåļŪ>«z_Tµ}B¼gĻžĶ}żŁ×MśMʽµ-g—ż¼ØŌ[BĆ“e;m™P(ėõՆ]śń¼¾¼Ģžėŗ-}öķuų<§|OĖēó¢Ļäe=}śt2;yŪgó°r7+„?Šå]$üŁØœ8/{’>Żo]pī,³Ąo֊©š‰Ō¦?µ Q÷øĖĪØČćV=F—„ėėĪ6ō N÷KmC‹®ū«m»tķG› śģŪėüšy0 a2SxÕrM—õ` 7+”}F'³8rBŻEJm,;@›Y]Ÿ?”H ųö5ČÜõq2Ćiś·U„ŗ–YæØ‹Ģ¾éŗO"„Ž–Ł/™E•öH»,#3¶=@ Š·|wKhŸŪ*ƒŸćććÉēXn‚6MųŒRNØ3`ßf°=õÕūZC%ϟz›JyŖ ō9¦°aś8پ¦`͆;UµźSFe }b¶ōU†%ķ’~Övæ$øŹvŌ­Å“mśźŪlÆĢŌž>Æ^½ZŁódMŹią³h­?€±˜?goŒš­ Ųg°='Ū9ń¾=%ė¬L÷sĖģŒŪ¦’6{k*”G~?CÕ¬“* Jņ;Uė’,óÜSÓן+M«ž: ±H¶/æ—Ņg‹Śńv€•ŠåöėŲŌÕ®‹śD…¼–ģ³U…-égÓmČžO’Ŗ}3ۦ˶W×¾TõwĖμéėńśźŪ«Ņw»Šßēp)—)Ń;O¾KäūŅō؟ Ę$ßū7µf"°^Öü.okžĄĘß{ރĄTBŸ?~¬ä±s!LfõjhF£Ė:„¹p*Géė8ļś·ŁuārQß&*\“=„ßųAųĖ}2š NB€ĶXUč“Aš śU•·…”źųŌIčłżūwź¼kkĻ»‚žŸŸ;’FwŸżAŁ7FeZŽ­Ļą'Wugķ¾ ģeHĮcńüłóŽĖ®j&ŒEUšÖiĘDųĄ(ōśdŻĘéś=)ēSµF$ UBʼ>~üŲūcW­»c‘™›ÓĻ‹¾Y3 “i†¬ĻņnŌĪ>0f™ķÓ4ōŁŻŻ”°ŹūčńćĒīĻ vn_¾|łėo¼G›>Ė”BųĄ õśœœœL”)Ixyy9÷wš„œ³APdŸ„D‹†(3įÖ!³DĘBųĄ ōśX°ž- ~NOO'³‚ŗ¾ļÖ5€C—r wļŽŻłõėןū...&÷Œ…š€Aś@½,4_ü¤“Ūϟ?5ōČ{ »’ŠlŅtaīe‚Ÿ„>¹R[šC‰2żöķŪŹŪŪŪ3H üÅĢ6ĀLhęŽ½{µż_©6 Šš€µś@s>|Øż7ļ Ž²o¬…ņnŠŽ‹/*ļæŗŗŅ8 evŌćĒ'ǟéķŃ£Gsƒµ:ļŽ½ūė±ņó§OŸ4t‹6›Žw>žÜźńRę03{«+}xū÷foŪ4k.Æ7m}»żó>ČžŹt'_š§Ēnš¼qžł€œšyĢōn2`ūäɓæīßŻŻŻŲ:?³ēĆ}œw}¼&÷ņåĖ÷ļß/|¬§OŸ. oīŽ½»óėׯ…õśõė7oŽōŅĖ“ķ2·ģ>NßLŲsyyŁj›³~U“@¦ī}ŃÖĮĮAmšŌ¦żź>ć¾~ż: WŗJ[ģļļ’u’ŃŃQćą²ķēoBåü 0Z7^ĀXņK”šŖ } æsĻY××ד0b(Ū4Äš§®ķę©zŽ ²×;ŖÓ%œ+!üÉkĪqæIH6ĻÅÅÅ$<Ŗ3“š'ŸQ>ģżó«ī3“ɾČ,ŸóóóNĻŪ4„†łÕaöeßč•ņn°Z› ~Ę¢KšSõwϟ?oüDĀmÜG™=µlš vĘTFÆn¦Ģ²?tżūō½®ĮOdʖc ”Aų@/„>Пŗ+ļS¢Œz]ƒŸ©Ģ˜ˆ„?~ģü8 ARvn›$,«s||¼szz:™Õ“[~žēٳg£zķ)ÅV„kˆU÷žOŪ-ź’u\Žłū|Ęꖿ‘Yjużw™’uĄ@>•}ƒå¾L*ūĄ¶SŽ ś—ōŖšaŁuDś<ī㼸ϲo nÆńSÕ^‹Ź†åło?~])¬EeᚶM)kžL’6Ēō“s“õcź¶µMł¼”‰ūņåĖFŪÆźoŗ®ĻU·ĘŌ¼ēO?ÆZg©ÉšVuÆwQ >`p¬ł}~ŁŻ–š'_hs2ŽeįJŹ$ōõœwĪŚō9čĆŸYMÖ-i:KØIąV÷X™’óéuļļM…? ¦³§ÖŁß‡žt݆ŖĒšāŌš™Ż3oFÖŽ9@»ĆĒģʾ æ@å @®düŹ»›vpPżrf[4Y°¾É öŁŁY£™VWWW•÷/S:nŒŗ?q}}]y’˜J焯TižĶŖ xęĶŽ© ~<6 ~ę½Śn?0fž@—7Nį3ņ…āv}]S}0Ó6sŽ9Ė̟«›éŃv{źJeEŪ’]}϶Y¦m75ógS}~3śŚmK¾Õ•ģņŚŽŸŸīø4?|Ķž`ęšG®É•ŪĮO¾š ~¶—™>ĄXd¶C)—U§ķńŖnęǼē ū¾¢”\VUš“ņmuŖ‚Ÿŗ¾øČ¢µ€qžĄ–ĖTų\U’“łŗéč]'`ü„>ĄŲ“-Q•óį.’V„®ä™š§™&åõ†®n_7ķ—uæ×¦|Ū¼¾ŲU“2ŠĄš`K„nnNä÷÷÷+Æ*™šWG€2 }ś#üi¦„š§.0¬šÓō÷ęĶ&Ŗ ‹R½„oÖżqžĄɉ÷t–Ļū÷ļ}Ią”Ąæ¹²))–’ņłœ©ŗ=yņ¤ˆ×y||ÜėćĶ—y÷īŻ_÷åbßeT…Mʆ`œ„?°޼y3ł"õšįù³|n;99Ńx…K‰_”«0][xŗ¾š—/_ŠĶUĢ¼ū§źf×Ģ+?xyył×}oß¾­ ؚܪ³ź>`ųž„  Ü“ųL™^ę~Ł«E®ŌļO ąe%ōų0+3|¶!čićÕ«WsĒYŖJ¾¹(X†™?P˜\)’+5īŻ»·Tšsuu„1 ž¾ølšc¦¬V]Ł1å—²\ˆšĻ˜m~ĪĪĪzyåĖž@!¦us›."øčD>„?(Gśų¾(ōõ8<<¬}/ĆeFi.D'3YņRwŪÖ÷oUIø|ŽÅīī®#$üĖb‰Óš¬}^UćD Ӌ„ŽææŌć}`½ź‰­½ĮPÕĶ(~~ä¶Mį厎Ž_÷Õ}§$Üm]gł„’˼€­Ė-3ŗ€ńžĄå ’éb‰};::ŅĄ#—AšĢäīć"!” ĻŠJæ)EĒŻ»w+ļ?==ŻŚĻeß]+²ų¼¦„?0¢/ ÓY>ēēē+{žŌ€`œ2ąÓĒś!ōĶ«»8ļɓ'E¼>³ ŹńėׯæīĖŗUϟ?ߌ6© Än·IÕlØćććĪĻ+Œ¦„?0płRųu|¹æøøŠą#” xņńįƇK?Vė„>0œ÷v—[·®ƒĶY#†ń{óęMÆż¢$U!ĪĒoü\U ®j  *Ukń¼}ūV§&„?0@9ŃĪ"9æż„`•R€ńȀ[¾3¾xńbéĒŹUBƒu0,u‹¾÷ń¾ļK× Ŗé÷6Hš>V}‘%~žtéćUĪ¼ļu„?0Ą“÷\±Y5ez•®ÆÆķ€‘ČE;łŽŲĒÕ½Y!”AX¦y³šr‚®e&WYŅ|ŒĒõ*]‚•uĖ–]굎AU˜3-żVUÆMVU2n}&įlŲģb¼›šš›+ŒźjŃ0¼ļ_¾|Yśń¾~ż: }¶y=‹yė %j[*²nŠz[Õ‹»ĢškŁÆuV>Y§OŸ>Õ¾ÖŖ×üčŃ£„ŸSé7 „?°!¹²²ÆÅx—„“Ąpe@µÆļ)•ß }ś\Öwž8ÆTŽUĢ«PDµYs,kżT­s²ĶīĻkƦ6qag›2eu–-C?ä2öm¶ķččØõć×­ß<”`Ųį¬Q®ÖĢIv>€_½z5ˆmzżśµ0@)3- ¼¬ĢōNą3][ēłä<Ļž=›3ŚĪĄ¹mQ¹Øy%"›„Ź9¶ķļļĻżm½@±n}§“×¢ż9¾Æ»„|ŌĶājz¤Ļ-;£µī¹† U­éT5Ū«K黼ƺNŪMųkP°UĻš8;;›œĻÖÉ@sˆeeķ±·g}49/ĶæWĶérŒ®:g¦F—ą|ŁĒJŅGių“CÕ¶4iŸ.·h»s1Āōs)mŃ&šiŗOŪ~–Ī{ÜŗĒZę;Ą¢ķ[ö³;įį¢Yu‘fy’LK ęļņž®z?œœXŸ ĘåʁĘĢčY¾ÄLƘ[šĄze°eŽ:meĄ4S‚(’Ų‘AŁUY4Ų›šiŁēĻ@·µĒž¶lپXÅE Ėnwf¹}Śūc79.åł»–ˌŸé@wÕqk^ł³mŠ5¼ÉŒŽM?Ėnw>Ǧ»··÷׿7Yļ(r1Ef Õ¼ „źuw‘6H[֕flkkHżžĄ’rÅSNÜū¬Õ @™Ró?ß³&DzRF(<ĖwR`{+9d&Ͳ2`œĒi:ˆŸuAŚĢ¶˜·Ķųi:Ų_²“S› #3?ū˜5ŌĒvg?·ŁīŁż]Õ7ŖÖĖ©“5—›ĪJŪÄzxu”LßŪ’6Ķ…!Ė„@Óuń²ętyć¼č5l§ķ“¶ÓvŚNŪi;ūßĪ\ķŪĒ‚Öėl;`¼2š›`&Źuåœ2@Ÿsæé­™ ōžżūæž'÷Ļ[Cˆł~dĪīĖ źgöTīŖ1پĖĖĖ?÷evR¶{åÄźŚ-}1}r›Źf6]öGڤjfßōx¶QŽFķĘɏš–DČ{hÕ'óė$ü±¶ÓvŚNŪi;m§ķģo;3ĄµŽāĀŲz7N~”}ƒ$üÉ w¦»÷UW€ńJĶ}„€Mž@Ļ'ł™^ß¶6/ć—µ¦ėA6Y,`U„?°"©„š(eŌ(WBŸ>>Üłõė—6Κ?Šåsk͟¦²pŽłłł ^Ė’÷ļüēžēŸŸ³°_®X`±\šóāŋ„ēæžėævžß’ū7īĖbŌ³‹dĻcĶŲz7Öüž@—wQĒš§ļA‚¾,hƏļt'''“§ŹćĒw¾|łāūŠÄšGŁ7Ų€ēϟONŠsŪŪŪŪųödą€vߟ– ~R8ßė‚€e`þ}ū69ń?==ŻŲ6 iĄ¾æułž“»»»suu5łī—Y=«"ü˜Īŗ¾¾Ž¹’žŚŸßĄb ~ö÷÷[żMfzē{ŽĻŸ?wķ\^^Žø’éÓ§“Ėbėėu_ņ|·K€MŚeŸyŻyģŪĻypp°sxx8YgdUƵßėšB}¼ž’ÖSŚDßor\š›ŅÖ9>å=Q%k]}ūöķÆūŚīÓ¼Īuošņڳo¶±O°wfŹ äŻr I Įg¦Ę¼R"e×W½ęO¾ō[„ŚĖÉūķÄ:„?Ö5źd ōĮƒń‹‹‹ƒŪUĒ—eŽ#ä}ńāEćßĻ€cž¦ \¾}ūöĘ}Æ_æžk 5Ƴéš/·Ū„«ģ‹?~4žżŖķnśPµŸŚ|^4ż,IPõžżūæ~goo﯁ł>śŹńńqē°²×ߦ]»¾Wŗ>ސūž¬:?~lżw·÷}Ī•ž|ŲxIÕéÓ«śĀģļﯵ]ŚĪvŗmU%ąś”]UĢb_’w”!õżYĖ„>S™M6F™e³ģ10^½zÕśūėūĆ"ü)W’6ä<;;üEkŗīYJŻææņßņ÷}¬7‘AĘg’IŪ*'''“œ)£”RB)oT'ƒ”uėy4‘šh™fŚ.łĢi*Ū;ļ9ózÓ)Ɣ’Vu!ŹŠÕsukµ,RWā*}„qōżŁ÷@›r{9.µķC’Žz»ģZÕq Ē€ 棯Še“}‚a²ętyćXó Ųćŗ5`8'ÕMÆ$===-ņ$՚?ĄŌ¼ņ— R>­ną°éšM#ō­»Ś}њkUėT4}ž6kjŌ­]±Ø-š¶AŻńyњ˜MŲÕäskŁõfśZÓ¤Ļuoś^CgÕĻ3”5†Ō÷›|GÉ1)ۼ袜éŗRMž»nūūÜMkެæļ‹ŹYĪ ņ›®©5Ä>ĮpN!f0ó”Ō>ßöą`*‰uĮOf•$Œ™ød0e1ūRüd pѕė ź¶„«’s•}¶£.€YŌĖĢ m²Xz?Ū7¦Ļ­“iŻguęĶa˜}^š“ēČ1©Éć„1`Ø ~ņ“É:fiŸŗ™@9Ö7-…9äć!Ćaęty昳PģqŻĢŲ¬6k(äÄyŒ„Bŗ›ń¶ć{Ź2ĒĄy3všGźŽŃmA]ÆāŸw\ls1@Ö+Ŗ+[·hźžvh³WśšłÓǶōŁwÖµ½}>N7„¾óffÆņ»ČPfžŌĶŚIŠŅ¶¤fŹ'žŸŸ¾O0¼Æ ³?˜ł ‚ąēļī¦7 LuW‘§¬RŪc`-­;±HÕ1ś¬Ć…3uĀ4¹j¾J.®i3›fޚ5‹¶”Æ«ņǤn ©eūĖ[gߏM?C‘½*ųÉńøĖZj ]ŗüŪŠśĆ&ü6®ĶµŪüD]Ł®rĖ;ėBy‡ódĄō¶¬ŅE—YÕ¹2¾ļ6*U]ūvŻ÷S'''wEūf}æ.PØ+ čx¼XŹEV©›‰ćxH[Ā`£¦ `7‘…pˆŪ¢nQšEkėĢÓuGÕ@ÖźŖj͘ŗH«x=uŁ‹N}Zy—ąe}‚až;‘üT« – MźfqŒU])¦UŖ .rdö?Æź•»Ī\0£`\}ænÖõ¶ĢĘ^ÕńxŽcwiŪM6į°v ~īŻ»×čw³.„ąŲ6«ļ2X_w Nб̭›ģ\“ĘG.lČė+­¬YUčUWšpŖn`»“r†ž8/Æī½Šå9…?Ü&üÖ®MšSWj dC*·c–Ęß®ÆÆžĪū÷ļ'!P)AG]?˜ T•|Ū–RaŽG^’2źÖ‚6„?ĄZ5½*Qš0 ŽÅĖ"ļæ’ž|V-’A܄@}/ī¾nu!֋/Z=ζĻ"Xį°6Ÿ>}ڹ¼¼lō»ŗ|V]\\4śŻĢč«Üݦ¼~żŗńļ֕|`=„?ĄŚ<{ö¬Ńļåjj†-Ēź¾oc”=Łö«««F3Ę½yó¦ņžŖ…ļ«J¾y欉šX‹¦ƒ]‚Ę(eŃ2(ŸcĒĒĒswĢ%ąīßæ’×}?~lō·‡‡‡:Ź5 5KVÕēWķąą@ĒciĀ`åšö4Y@`¬jąļóēĻ­’fģkÕlBJž%Ŗ4ŽÄņ«īC³åZ_¾|ł×æļķķ酏+Õ*ß¾}ū’Ų»ŸŠXŚō0ō­“³|`-&_“Ll“ńd'ɐąEŒ$°³’4w瑾«0–d²ˆįHø»I0pcˆ9Ī*KŚefs$pœ„$ˆ7ž…¤„1xtż“Żgś“źowu««ś÷ƒęūNWuU÷[Õ„ź÷yŸēuŻeh€”е¹ėÅ<16Éē”ŸąĶ Æ× ŁæūūūŌ Ēééi-?Sd8%é.żv||\ł¹ĖŪI*ė—uš”Ļ_żVi„'„m®©®’ q–OE¾8]%K”%hÖużnHuČē’až“Åė‡/“üKåDŗG |żŒL’(ō+‚ģĻĻĻÆžĻūm›ō^677GĄHzƒü&Oūū4ŒßłIūŹŗ/ō½UŻV½¢Ó;iNŸĪ>†½’Q×z&ehõū9y_ćpīņūņĒ¢Ÿk`ŸæßķÖłzČp/Ż’ł MŃL€bz}RৈAęya„Tˆ˜™™é{›imWõw•É%ųT.FA&Õūļ›ƒtF4YŚØšČž);Ś>Ö$k(ķZsŗMśČ’~z.„*ę'é–ŌAŁ`½?¢“keݟ”éœoŹ|†iĮ°P¤Äo¾+I²ŻYmSö¼ˆļ‹90©ŠąP¹¢£ ud[[[K|>FŒķĢ¬‹¼ŃłEJ‚„ejʶ›\ÖŖWŁ9—ĘeB÷AKö*ŚA]ē,†“ņ[KKK…Ź}„ͱUW———©Ė"4Č|dYŅ‚?£ }žü9ńłz– -..¦žJ!S%Į RE3yL /iž”޽½½Ģ×čtåU•œŠėūņņrā²ŲGģ«Ģ¼2ńžb›uåĒc~~¾żY‹5"š“ÖžĆüÜ ‰ĻW”ńńćĒŹī ĘQÖ1ށ.i¼x>Śŗ»ƒgg§ö×£¶¤[!¾q^ē"Ą™4EĻĒ“ ĻõõõHĻ“I»F(ļ;ķŸłöö6qyZi=č×T×®ų뼬I Ą§ė“N €f]×ļRj9j~c£żßų±Påd»0nbztFꉑ£ćPĀ ¢£ty%ŗ=>>¶;]£Ó“ßėrŃl†øļéŽfÜõī·Ģoė¤ŽāA~“§u/“É樓hĖ2Į‡Č¦ėē{U'ƒ^ā»%‹Š q‘y¦Ā(Å÷¹ģyѹīÅēŽ×ÖiĪ3źGšHüø.2jÓ$¶Õˆ Ī£Ó0:“:#Š™>±NRhēõŻA&MļŽ^\ėÓ&DŽŽµµµv°§³~gā÷"zßó  ’¶—·ĶĪdö±^‚¢Ó;éóĘgņn޲Ńż¼·4ŃĘńŚx?IēJt¦Ņ–i@ŽŖS»Ź¶ėĮ‡ŲVœĆ½Ē=:ō£;ūK ²öū¾ĘįÜÆāĻĒņĪŗń]ʚū*Éīīnę1ˆkb\’²ŹXVŻžó¢‹Rv½āŚĮĪuÆģē®Ū9Įx0ēōóÅ1ē@cÆėęüĮ¾Ci¢¬é£_ߢ]Ż“š–Ņęū‹ × Ū”’ Ńż™?@’æ. (~ yŅęūųx{‚?@_Š–éd‚r`<„• ,3‡ Ć#ų”“8_\\ä®uĻMd Ķ»8<<|õ|̹"ė`<ž„---å®@ƒL ŒŸ˜ē'ķ> –0€RŠfņč€f¹ŗŗJē'²}ļ5PŌīīnėłł9w½ĻŸ?k,h•••Öõõuā2پćGęPHdņē®·¹¹ŁZ\\Ō`ŠŌ™ššJ ütī/2€±–5Ā­“2/½NOO54@}ņ¼¼¼h(€1$ók?0ęęę ­§š#ʹł»PO2€Śų’žš‡²Żóżļk\ČpttŌzxxČ]ļņņRc@Ćī¶··_=æ¼¼ÜŗŗŗŅ@cLš؍ž«æŖ`Ä¢†’ŽŽ^īzŃ e€ęŲŚŚzüy||l}ųšAćŒ9e߀TEēł1śškmm­ō‰2o?õ óH“øøXh½č šÉ¼>õ$óx%2ynoos×;991`ĢžÆ¬®®ę®3;;Ūž €ń"ų|ejjŖŠz÷÷÷ ` ™óųb}}½Šzęł>s,ż’łĆ+ķŗķ1ā³óXYYižžj€»¹¹i]\\ä®·ææož€1&ó§brŽŽŚńōōԚ™™I]~}}Ż~loo·¦§§Ū%>üąh–„„„Üuā^0 0¾dž4DL“ŪÉ҉Éy;Īssss©ÆļYŸ^ĻĻĻķõ‹–EöÄ !ʛąOĶŏļīœe®÷ššŠ^Æ;+(D)·ĆĆþö™@²ź/ş<Ÿ?ÖX5 ųScQ“½LĘNˆl x]ˆĄQ”qDtŌWÜ “¶¶ÖZ\\Ō`5 ųScEj²g½./p“¼¼üå‘%@»»»@ Lt~~®±jB𧦢„Ū ŅFm.,,“^^^Ś(×ytž;99I|Żńń±ƒP3YóBv‹ū@źCš§†īļļS—mnn~ Ōtń\ÆŪŪŪWĻE`§S.ĶÖÖVėńń1uõs?Ƽy.//5@ĶžŌŠŹŹJāó”‰ńI?ģÓ6‘ńS4xsü$uœ985QdīĒøGL»÷`| žŌPŅĶīDP&M,ūüłsźņ¼ŒŸ^:ź«h į²÷ˆŒĮŸ˜žžĪ üt¤Ķó3;;Ū×~?~üų깘€ńUtO^ę8ćKš§f’‚+GGG…_óś ņśn»»»…Žćs/y}}»^ ņ)2ø€ń$ųÓEēźI[w}}]#L€ÕÕÕÜu"+Š®—÷šŖ¾WqĢā|«ėWģßihŗ©®‰^ć*±¬I&[\ąćĘįįį!qyd¹Ų~=²ÖdÉ£»ęŽ iž’ł®9ō†½˜ń?li®8ŸVWWs׋ræuź ¢x6—{R˜ĢkAļ÷>:kÆÆÆ m3*€ Ņ‘ā±æ"óĢu‹Ņ£UIęęęRū-’ģļﷃżˆv*RVµŖ¶ķmēų¬ĻĻĻ…ÖĮŻB’Ī·ų{QęÜ 1eAœ'ńŁĖŠć|xx˜{ü˼§Žv}Ėæ÷żÜ T} ‰į÷÷÷•¶AŁļōŻŻ]_ēLĀmC÷?džš•ˆ®Ē<ž$=~ĘK‘ĄOüHųh¦4Dēn™Īõ³³³ökŹb"Ł&333„?!^ūķw Lē³–é$ (۟Ÿ5öU&šÓݶƒvŽGG|“sŃĄOēž 2$ĘE'ƒ£Ģ¹ā3ĻĻĻ„ŚLœóeßÓøµkÕb x™6‰ļ_?׏¬s¤ģw:Ī&؊ąŌTё¢UĪ`Ctč ޤ½>o.–蹏Īéč<.Ņiļ%-ć'>YYIsĶ”mŪčÄN+§mĒ1ķ=ÆeŲ/²Ī׼㐗ŻU佝 1ėüÉkĖQüĶ­r¾¦¢×“łŖņŽKŁ9€ŅĪóśäeö{-€Iŗm辇Ģؙ"Ÿč$ų˜ Ÿ?Īģ|Mtgeć<żÓ§OķmÅžŠ¾&­ÓzrE:{£“9ö]4[#-šŪČ+/ĮŸ8IД֋öL üÄqėĢ·”&‚iū•¬ Œ"Ē!–g8Ź ŠČŁF±Ÿ“ó'Æ-›˜iķ‘vžvŽKZöM™łz¢m“Īó )™v~ ųĮ¤ü€)ŚŃV¶Ģ õ)E;£cP@t|')ځ“÷ū>“äĶœÖ!\õ(’“Žļ2Łq"Ė»W‘rci;"£č`ŽŲ’[•{ūŽ“ąUٌ•¬õóiyēEДċ¶ü”R ķsÖUŃć“uoYtN­¤ MœćeŹé„½ß2ó—Į¤ü€šˆŻE:ēŽzä/£Óe'<Žļ“üżvŠH{ŸĒĒĒ™Æt®—¢’4Ÿś(œVŽ7+3+mY§²ŚŃĪYó KŚą”»»»¾Ļķ¢Ē©ØĢ• 2d}Ź”9ges‘q×Ļ÷8¤Ėżm¢D\•ē4•ą}‰TŪŽĒ[N&0 ŅŹŃōž6ń-Ądč7X“6‚’āābØļ÷-‚E¤˜śmߤąZVfDŚßķ~ o‘‘TĀ/Ś”hĘr™¶ļ·]ś™w32…Ę„Ēįū8HĘ]UÖP¤D ųCÅą¤ąT•€Q+ŚaįĒ0Ądč·Ó“#-ūg˜śœ<Øņļ]R !k~¤ų}LY;‚?Ķuww'³ †LęŒ”łłłÜuFŁŃĄdHšć7¤erœœœ¼Łß£č8Žą@jvvv2×­jŠ©jŗÓ4—ūX>Į3§§§¹ė,,,˜p€Ź„I’²k"P“µµ5ļūččØŠ,¤$isč½uP(-Ų6Ø·š¹—)¢Č܇ԓĄ ŸąŒ™ķķķÜubŽčWZēüīīī«ē×ĒĪŪČ&ˆE?ó[’ڻж}ėcóššąKĘIsńj2Į#EFPļļļk(’6Š )«“Ŗ,ŽQ$ŅH”ł£ŹhZ__Źž#jTÖÖÖŸ“¬WŚgČ+ńĒųI Ć%ųÓ`ńG2&>Œ‰ ūyDķÜQŽ,Šjł @®ad¬$ĪUeqTDŖZR©ø"‡iŠ6O*Ó7,i}GŃ'5ˆ½½½Rūkŗ:—IKĖpsO Ć%ųÓ0q1ķoāäķķķ@7±ĪöŅF£P|“ēņņRCŠž åé驯צĶ7ģĪå·ؤIūÜ£źŁÜÜģū¾ Ź×U}’Ø~ĖԦҔ“ ‰%•~ƒįüiˆHŽĶńńńŠöqqqń%Ōļ &ÉāžŖČ(ß·ž›€ń133Sś51h4-+$-S£Š÷qųū•öŅ‚Ń2Šl„“}Ä}AŁżĒśo1ßĪÉÉIāóKKK}Ż„-%WguĻxJĄżŒĄpž4 “ .’£=7˜:Ŗ½æŹóųųØ”ųJ™ŽÓ8š6hōīī.õui%›Šö DGd*õ«l‡ZÖHŚēČśü17Ņ(JS„Ķ›ū/Śéļ3m.§ŽaeweĶQTęcvO“°°šµ6 Ž"ØW„ø^¤’€`8j,?żŒņ©JÜø¹8 ®HGLĢG0ÉüĀņņņW’ŽßęYčPĻ8źY„ĀŅ‚Ń/Õéb»ƒ”v:??oĶĻĻ·ß‘²Wń^Óö—õw4Žgo»vÄöb’e'ń^c›E’vĒēLÓ)ɟ‹}Åņ·.”õņņ’ŗ,Ž_^+Ī„8ÖYm:)ā;™ÖŽu–UI(>[فęqN雄tS]ęų ¶¬Ijtš ^ÜbōHŌ© h‘ŗÆq!Žšøń(šQ”õ¾ém?iŸą-ƹwŸ> eó#ŪGt*Ōy²VŽī¾Ī=€æŻņžvÄ "™1æHi÷čOČŚ^Ģ[Ó ŽN™@DÖßøž$•©ŪŁŁł2'Oģ/YŁE‘ŁS¤O$m½āž®»£:k’E’†W9Č7²…£}zßS”Žf%—Šä•z‹s3Ī•xŃnсŸ×ęeīƒŅ¾3Uß§ óŽ,}Y°¤ó1ķ7Ę0Ž’ m\äéœ'Ż}™ńŗųŽ$}Ļ>~ü˜‡Iŗmųź‚?õ”u3ĒøąW9Į_üщ nŚ^ŒH˜¤‚?osĶü”‰²F(wÄ`ž¬ĮLƽPļļŠAG¼ üd½—2Ņ‚VY‰*FõG ØĢ|)1÷ŃķķmeǰLæAюń,1’N'ø2źąO(øØ²żņ¾3U}Żę0¾oiļg܂?Ć8GŹ^Ė É· Ż’Pö­†ā™ų‰ ]\hcy•ŸŪ‹ķ¦Õ™›"Y€ņŠŒŒų É ·‘©Söw|æūėōW„¤rVi3)ų €łTဠÓÓÓĀėFąi9ž>žœYŠÆģüIżˆ>¤8Ži󻵿æ?Ńn›üŁ«:G:ŠdėĮ$ü©”¤qó4ŠąKd„żń©:ą0‰÷u½¢Ó ŅÄoōč$/*Ŗ…ÄkŹ$z÷—67N’Č4éąFÆ“Ańw2!ńžĖŠ÷ļ5oN¢4Q2­lŪöŠŽķ"•}¾—2ūī|ŽīöMŗĻEš§#Ž{”Ū+ŪĮÉų,isMMŚ÷»ŖÉ8Šs$¾ćƒ|ĘĪł¼¦ģ[ZÅé«uo} üŻ5WŁ7&į¾nRļÆ\tģG‡yļü½q’óatęČ©J"«¦»DZg>—atÜGGqģ/i~Ųo|¾ųœĆœŸ7IsŽÄž#šŸ½źvĪ:®ŃIµqŌ™ßēāāāÕēˆ@•`O¶8·_µ]ē¼o‚Čŗ‹ļX„“¦žčœ+ń™ėrŽĆØb~õĮŸśwD„»ß;ƒHŖ[tÅ&2£»ę žŠ$E&”ž”{+fΟ:Kź,z«‘IiŁo„€:‰Šę#FĢ1ŽŅępģå/~臱O¼U§@ŒVĘOSŹ>4Łüü|ī:£˜Ļ€fzÆ Fgę’ćÖ’ūo’ķP¶żo¾’} \E2„ĖL¤ ½ J»˜ä jróõžQ럒źÆjˆ ¶½½»Žy”„²o5³²²ņź¹­­­7y/I£V“ŽÅļŪö÷÷5meęśč&ųÓ···o²ß¤Q«‚?éĪĪĪr×988ŠP Dš§!F=ŚO™9€ręęęr×¹¼¼ŌP Lš§†Ņ:"ōōō4ōżGą')ŪH‰€dqöšš»ž,jŖ ųSCY333ķQ„ĆE ’0„•™S¢ ż-Ļć㣆 ‚?5õņņ’ŗ,F•FCj"Kēōō“Æ}Üßß·vwwæL"{xx˜ŗīēϟ€WWW¹ėĢĪζ>|ų ±ØÄ{MP_1:4oidéloo·Ćrrrb €«««¹ėÄ ؊ąOÅčŠ" aŠŒ€dEŹānnnj(*„ģ[ĶE(JĄķģģŒtæQš$ö+š.«lnGæ%z ąOCµƒ1ūūūCŻO}"ŪHi€lEÉDł\ؚąOĆDi‘UZ^^n—w‹mFŠĒdÄŁžžžŚs/꣌ŚŅXTΜ?  Ž:óWWW­›››v‡D’”:77§œĄā~*ĻŻŻ†`(&ĢŹŹJūĄpÄ@›ēēēĢu¦§§ ˆ ʾ@…–––r×IĖĀ€*ž@EŽŽŽr׉ł`˜ "{{{¹ėÄŒ0L‚?P"ó*īļļk(†Nš*p}}»ĪĮĮ†`č`@>|Č]ēņņRC0‚?0€ūūūÖóósīzEŹĀ@Žkčßüü|ī:ŠŅ^^^4Š™?ЧóóóÜufgg •…€Ŗž@Ÿ666r׉²p0J‚?Ї­­­Üu6775#'ų}8;;Ė]ēōōTC0rļ5”377—»ĪÉɉ†ś255•ųüĖˋƠ™?PĀÓÓSėįį!w½"eį` „™™™Üuīīī4µ„J#Ū$ėė넱±øøųåÜSÕĒ#éœĶ³¹¹Łwš„Čū/óžŖŗĻÖµxs_]tdž@¼ĄOų`”¢ÓµLgmxxx(x©JwĒtŹģ?ÖMėdĻ_$šžŸŸŪŪŠĢŽIAŸh«ųüe?!^‹¦ $Šv‰ĻU6šÓ¹/Œ×ćŽ/2nʼ§ėėėö{$#§.×`p‚?Šó#<ϧ!eˆA·¤Ņ©“ŗWV–GŃ6«:xŃļö9&hė'Ky}Ž[ūe}ž“““ö¹RDÖ9õłóēÜ2pEž–ł~üØ” EęĒyk1G^Y;;;‰Ļ•ŽV‘2r$˛æŅ}e‚ˆżÜ/–Ńoą„Źs»×`0‚?L“ėėėÜu”Šą-¤QéÄ…~6ióŌōSšŖß9oØš'mnœ*ę·I+|uu5²Ļ×o©¶ŗ^K€Įž0±ŠüąM«ÆƖ6NĢg2555¶svōSžŖŹNčŖ³4š$ŹŲFūÄł“ōX]]­õēK b0ĆŅŅR»óÖ$ī‚? Ÿč؟oŲJ›cȵxk‚?Lœ"?f———5c!Fāüų±Šŗ1‰{tÜF†+“)ÆÄ9Šä \K€|‚?Lœų1›g”%< Ļīīnėåå%uāö^‘įzpp į&LtīĻĢĢd®’q.„=p-q-fü`¢™Šv_C0–b~tÜīģģä®{xxØtӄ‰’]I¢£æ܉Īp-ęü`¢©{ot#ćīččØŻq{wwךžžN]ÆH¶+Łź’ üįƇÄēONN&¦tWŃl–*Õ½T°k 4—ą£ČŠ——— €Zżm‹9^>žœŗĪŹŹŠ†zq\FéłłłÕs˜(’õÜĆ<×£¤^“æ_®%Š<‚?L„ų1ūšš»žµŌŃāābėńń1qŁõõµ@æ™?iĮ‚aHĖZž“9 Ó]‘Ż2¬6nZ=×hŽŹ‚?Jc0Ίdż¤żŠ€:ˆ²_kkk¢bżĪuREĄaŲļ±W݃EiƒxöööŽöÅÅEź÷εG•āĆŌŌTūaā8ĘMR)”nQ#¾‰?Ž˜,śdŖW$s8IZ°`œŽcÆŖ‚HŻĘ% 4H¾“vy‹9†\K€¢†RöķųųųK h4 Œ"µī'e"dš­H¦+啽OØkĒłŁŁYåŪeł»šéÓ§ÄēgffśŽęöööX|6× Œ”ĻłĒÅ(Ź&__y›››‰±óņņRųŠ1ŹRcM“³³“ųüüü|įmD’W Œ„éé遷1¬yG}N®ÆÆ§.+2 ØWZfx“y“³Ę]K žŽjGQfcii©ЉĆIµ€¢Š”‘­Ą8ź§óuŌA‡IjóčĻ*rOż_ƒŽ›”•–iT4 sy___„M«*IWĘēϟŸAe@qĢÓJשoÓµ&Ó»·Ųéķķm;Õ2AżDÜ€|ńóS†uŲa„*¬®®jj)&©/Ówė&¹¼¼Ō˜%¤eŃDV“qR øx.²@ŅŹƒ [o’D@'ļü‰ņ^‡‡‡•¼………Rēę°DŠfyy9qY€ņ2vāxĘ{Žcžäääĵ{•bŌĀŻŻ]źE>M\p;Gi؀ņFłĆsX£aüˆ`Üu÷Ä ›č?‰~˜xDĘGgY’dØSNŽü>Q®w0T<כ!Ł'iˆa˜Ķ<¢ćæsŽÄ9ŌyļUf期Ÿ§.õ ²ųœi¼8V÷ķ™1Ń&Q2®s<ÓD¹ąŗfw-É2ÕUśźļ•żEŠŌĒø^\\ōõśˆ Ė blæ8]ÕXźp­Śłķߏ>Ž’äO†~=ģžw)“·j~ccdūųÖ7ß“~śćuŃÉ0Œr"u?ąóīGż-`EŸHޜuEł;—ÖŁŪļkūżŪ:Nۊ`@dKō«ÓƝ彄bąI^'zæĒdŠ ›Ų~Ņ6мē~ßCÖgŖāœˆ, “ ž~ė ēį ÷ŸeŪd”׹Ķ}ux?¬½Dśd'ŚßO (F+Ē#F-DēE¤ żŁżßŹv»ƒ?0Žņī?cä&Œ£A;kc”¾ł–øŽŻmgõ3ļIdütęJ žÜÜÜ -ƒ"-xSę|‰Ŗ>½Į’2ēRæļaX¢½£$ö •sĖŹ–ćd$sžtAqń åŚŚZį×Fźi'V&Ee•鈎GчņńćǾ^Y:kŁ?1ÅAQ‘}Ē­ųI3ģcļ”L¹¹Žó%éż¹Æź}i%׎Bō)Ę{*;š'>CœńŚ:~\K`²½õ uׄ,ūG€É²ŃUĪ/ķĒ<Œ³Č>‰ž“x|śō)µ%:ś£UgݲY%×õ>ś}mæĘq[Q‰¦óśÄ¶cĖ“ŹīƜ*½ļ£Čœ×ƒ“ļ%֏ó¢w~³ææŸz¾Äą˜Žżö3`&‚Y퉼ĻTå9ŃūŁ¢ ’ę.ļY=bų ƒT#ō8VÕ&£ŗ–ćehsžōó”Ÿ9‚āb%ÄČæ8ęüjv­Å\9ęü)¶sž¼Ķw IŒāTZ€¦ü īžĒ»qyWżfE°Ø“ ¤“€Pd”¢ĄMõnßT恠ÕÕÕv(j²0¹z'Vīe> ©Žó›‹2p333„KĮķķķµƒ@EjØŠ,EjŅ»O ÉĘ.ųŸN·³³³¶uxxŲŽNĢ'ĄdŲŽŽĪ\“@“Eš§ß€ĻĀĀBėää$w½Č łŠ ś1O$M÷fĮŸ~>ÓÓÓ­ĖĖĖö|@777ķķtęŠ`Pš˜G€f[YYÉ]gnnNCŠh# žÄ(Ģõõõ¾KŗE†Oyb;I?ź#øĮ XgssóÕņēēgG įnoo3—üųQ#ŠxC žt|¢ōŚÅÅE©×ļģ쓃9ńˆ Ÿ¢b’ßżżżÄ÷@3害»»«”h¼Źƒ?ƒ|ÖÖÖŚ%Ü"ąSä|šƒƒƒWĻEfĶ“··—¹|vvV#0ŽWµ”Ey||Ü×kć‡ųłłykqqŃ “"Ž0)*ĖüéēĒtgŸūū{śVä^2ęˆ€IšnŌ;Œ¹xś™ĒgTŌĻĆĆCęņtu„“‹>ŗ½ÅN677[§§§#ż`‘MtyyłÕsF{4OŅ\½F=čŽŅŠ‚? ķy|ęęęŽäƒÅ~ßjߌĪįįaęņååeĄD©“ģŪōōtėÓ§Oķ’n1ą ĆŁŽy®®®4„²Ģ?Ŗ5s:Ąkļ4uõüüœ¹<²Ņ`ŅTüYYYiMMM}y #(&óķŽ‡‘ž“kkk+wõõu ĄÄ©uęĻķķ­#0”ĪĪĪ2—Æ­­i$&’²oŌĪĶĶMī:ēēē €‰T«ąO‘ł4ßŅŅ’F€µ žDąēāāĀ ×åå„F`b½/ŗāŌŌT© Æ®®j]*·µµ•»ĪŹŹŠ†`b™ó€Z9;;Ė\¾¹¹©‘˜hµž(ē0YĪĻĻs×9==ÕPL“ŚbD§r“ecc#słōō“F`⽯ŪŽ Ńœ$¹¹¹ŃL¼Ā™?///™åååÆÖ’lyÆéē!š0™Šd}ĻĶĶi(&Ž;M@\__g.ߣŁŃHŠü Šdi(h žPŪŪŪ™Ėggg5üƒ÷UmčźźJkP¹§§'÷¢PĀ{MĄ8[YYÉ]gnnNCŃ8///苲oŒµŪŪŪĢåūūū ŗž0¶NOOs×988ŠPŠå]Žé©©©W$Qg=iŻa>üŠh¶ķķķĢå³³³ zČü`,===å®sss£” ‡ąciqq1w>h(č!ųŠ0×××C-½;777’Ļńšš¹üääÄĮ€‚?”A™aϽXdū[[[$xŸ÷£»čū•••ÖĖĖ‹``‡‡‡™Ė———5¤xÆ šå[ß|Óśé<”mĻol żżßßßē®suuå@@ eß+‹‹‹ ųĄXy~~Ī\~rr¢‘ ƒąccww7w­­- ĘrΟ£££ÖÓÓSū’?|ųPØ€ś;>>Ī\¾¶¶¦‘ ĒŠƒ?1aļééiėęę¦u~~žŗ^{fff—ķķķµ’ūéÓ§Öśśŗ£Š@qæ˜'ė~ų{C ž“ ’Š_ZZŹ]occ£5==ż%+€ęXYYŃP”¦¦¦J­_$šÓG)8 €f‰ū¼,——— xWõĖ~ś™°7:Œ hŽ"÷„ī’ ˜J3ęęęJæęģģ,ńł˜Ģwqq±į“4ńļõõµ£Ši÷„››› Ŗ,ųAš‡‡‡Äe'''‰£9Ó&õ}||l—vė8::j‚nooæZ/ž+210ćėźź*wÓÓS UüI*Ʊ°°œI ÅØĪīĄOGl§·¤\o0€śY]]Ķ\>==­‘ „ŹęüI Ääeå$½&kTēžž¾#0aŠdAÅą·¢€nį(õVµƒƒ ²¾¾ž»N”śŠ«$ų“”į“TŅ­[ĢćÓkmm­ō¾źėāā"słĪĪŽF€’* ž<==•~MRš')³€fŹ*÷›uĻd{÷V;~xxxõœ’“c{{;słōō“F€>¼ÓŒ£¤ŅĀ@¾7 ž$•ļŲÜÜt4&ÄŹŹJī:sss śPIš§Č÷n{{{ƞėw¾Ÿ²ūąķ]__g.ßßß×HЧ”ežlmm%>’ōō”ų|‘‘ėėėÆžüؗÓÓÓÜuśTü靐÷ģģ,±NūĢĢLīkÓ\\\8b5·½½¹|vvV#Ą* ž$z–––Z‹‹‹ķ‘›‘ 455•ųŚóóóĢmG¶PŅku ŌKZxŽ}%PÜūŖ6eŪ"ƒēłłł«ēoooŪ,I„Ū®®®ŚĆĆC ƒņ|ųšACĄŽW¹±“ ,Ÿ>}J|>²…²&^XXŠ1P3™ĖONN4 č]Õ|||,¼npÖ××Kļ#2ŒdżŌK īÉ„‚€ĮTü‰lœ———ÜłxÖÖÖś ąÄėŠŌŠ`¼d•ó 10Üūamųžž¾żßÓÓÓö£#ę÷)2ź³W“"X¤Ō@żtī ³Ä|ĄąŽ{Qŗ£ŸņńšÅēęę)€‹ūŗ<ł@5ŽėSļ 92—Ÿœœh$ØČ;MĄ0ķīīę®cąTGš€”:>>Ī\¾¼¼¬‘ B‚? Ķżż}ī:WWW *ō^0,‹‹‹śōņņ¢€¾ 5óēōō“577ךššŹćąąĄcĻĻĻ™Ė?}ś¤‘ bCÉüYYYi]__k]€ ¶µµ•»Īśśŗ†€ŠUü‰Œ8;;Ė\¾¹¹©‘`*-ūöįĆ- @ėźź*w( TƲąĻĶĶMnMw&ĆźźjęņééiCRYŁ·˜ē'ĶĒ[»»»Z€¶"™A@* ž$eż,,,“3‚˜ėėė¹ė,..j(’wĆÜøĄĄä¹øøČ\¾¹¹©‘`ˆ†ü‰RoL–óóóÜuNOO5 ŃŠ‚?ęų˜<™Ė§§§5 Ł;MĄØ( Ć'ų@%VVVrי››ÓP0dC ž\]]i]€ r}}¹|_#ĄTü™żźßęü˜§§§¹ėh(Ź‚?½õŪoooµ.Ą„ŲŽŽĪ\Ž;PžŹ‚?>|xõ£>ž Łžžžr×é( O„sžÜßßõļēēēÖāā¢Vh°"÷{Ąč¼«zƒ_ż;ŹæMMMii€†zxxČ\žńćG#ō¾ź ĘØĪ———WŸĪæ777[ėėė_F®¬¬8 5utt”»Īī€Ŗ,ó'‚8ąé<Ҝµ666Z«««ķG÷kŹ>ĮQS’źźŖŌk¶¶¶rŪ[Ē Škoo/słĀĀ‚F‚>•łmŠķ&h†ÓÓÓ/?ü–––ځµ""+^A¹<ĒĒĒķuÕģBļ|IŹH'ųSsOOOķ€Ģööv©×EvP¼īłł¹ō>ć52ƀ"å{ €Ńü©±Čö™™™)żŗEvŠ [sssL؇‡‡Ģå''' ŽĄūŖ6“øø8ņ7?Ɂ‡óóóŅŁ>żŒŅD§O‡"e_€ę(RĪ-ęFƲąĻŃёÖ”Ģå³³³‰Ļē•g‰×ű\__’ņ\tīD‰·ėėėÄ×D(–+“#ķ¾ cyyY#ĄQö­†²‚,———­———ŌLœ“ŽšĶĶĶ/Æėü„E(–ļļļ'¾>JĄtÉ †Cš§†Ņ-œÉŹģI+½ņųųŲž?؈<Åśe¶ŒŽąOCDą'ĻŁŁŁ«ē>}śŌśšįC©}ÅśI ¤ķ“'ī/€·#ųÓ }æ¶·Ä[QJ›W˜lżŽ_Õü©™¤śłGGG}mkmmm ÷r~~^čż“#ęŽÖȂ?÷÷÷ķ9a¦¦¦1—L·ŻŻŻ¾ƒ“&kžŸ,ŃʃX\\|õœą4×ū‹æČ]§č<‚Ąš =ų‰īĢĻĻ—šęęꦵ··×~ķÜܜ#c.m€Ē ĘĒæłž÷5ŌĄŠ‚?1ź3:l®ÆÆŽÖĆĆCbvćēééI#Ą„ŗ¼¼Ō0†ü‰ņnŪŪŪ•o÷ššpā³€’J­õ‹R|Uūšįƒo4Šļżą¹ėō[ŠØÖūŖ7ˆ2åŻŹŠ, €DYøI”\‰ X? ˜S)użJŖéÆÓ`rŻ}ś4”ķĪolhÜ1š§9Y=››› ĘDå™?ˆH³¼¼Ü.ņņņŅŚßßĻÜĪÕÕU{ż$···ķĄæP¤=ÖÖÖ^µć ’²» yžĖO~’»NŅ ąmTüI+łõńćĒvĄ':e‚±~¼.ÉŽŽŽÄ“¤ĄY“G^6Ōłłł«ēÖ××ūzql€ÉWņmzzZ#Ą©,ųóōōŌz~~~õüććckwww mG(©SaŠķÖUZ‰·„„„Ü  ŽlŖ‹‹‹Ņ%ōāXÆ®®¾z>‚|@óüļæł›Ģå“ZŽĘUeĮŸ¤¹cīīīR³ŹŠ€CÆććć‰=pi–ČšššjŻßß'.OŹŲ‰ QRVP’<ĶĢĢ$.›Ō`4Ł_žŻż\ž¹¹9 c¤²ąOdt‹LŖ;vvv±–………Ōåóóóķ PR@&©”ŽĘĘF{ż“Ģ”ŲN,O›ÓéóēĻ 4P^É·“9€·ónX.šIRFR`b’ēž‰+Y ŁQ“é<"+ę]Jš7(t2‡zYYV›››­ÅÅEß&h ’‘’MÜqzzŖ‘`HbŠVŃ@·”"Ą0 “ü J ä$‰y™®ÆÆS3xʊą“N˜\J¾Ąųy§ ź/ęį‰Ń~Qjo”"čd‚gh®?ųį3—śŽ(ę}Ż?Ą}N)’IņōōŌ~DÖÕķķķŠö=± ŁNžóĪ\.ūĘSķ3”łZĢéŁ8‘ ōłóēÖŚŚZeŪŽŁŁioWąėėėĘŠŠ2ŽŽŽZ»»»C’‹‹‹ŽbFۜŸŸõ\Ģ‘Į”ądĶ—ŁC0yžÓŸ’¹F€šZšgooÆņąO”zqZŽ€PDŽ|?ßžö·5Œ©ŹŹ¾mnn¾z®źąO”¾’ż7“¹ü7~ć74Œ©Ź‚?IžW6šŌŌŌ«ē’N ę/ļī4ŌŲ»*7¶¼¼üź¹ķķķŹŒÅ¼4IŸPU` €_ų½ü@#@Uü‰@M’ėėėv'ę繿æĻŻĪÓÓSkkk«żšÕÕÕÄuö÷÷=€!ųī׀ńõ¾ź ¾¼¼¤fź\\\“IŪ"fgg[Ž@÷ĆŲčććckfff(oxzzŗPöƕ|‹ņ~ƒ”ųŽž’oPC ž|ųš”’}~~®l»1§PZi9F+-KKšźķO//5ŌÜ»an<ęØįīīNą‡B"„‡łTd±ŗV0 ƒčˆ, (™;e¬­­µ_Ɵ››s“(äśśZ#cokkK#0vžÓŸ’¹F€x?ŖE øŽĢ¤LeĆT ××ׇŗ& Jßž•_Źvņß’»Ę„”’ē?ü‡Ģå;æżŪ­ć?ł cīż[ī\ ‡aH 4VM9% J’ńžh(ŪßŲŠøTj÷w~Gšją½& QŠhŽæ¼»ÓŠļ4æ÷ƒd.’æök jBš€Ö’øæĻ\žūßż®F€šPö `ĀżÆŸ’±Ķ§§§‰?€‹‹‹­ŪŪŪ‘ļ7‚@qL>}śŌZ__÷M€śæ’ŻæĖ\ž/~ķ×4ŌPeĮŸÄō–‹¬Ŗ2Dz@|øŗŗščŅoż~–——S—Å|?eD‰¹Čģ2ŌĻ_żģg™Ė’ų{ßÓHPC•’‚0Uf"ČsĻt¬®®¶ƒB“ččč(7š³³³ÓžiŠ\l#ęZJĒ$Qńźįżüē¹ėüŅ7ßh(Ø”Źęüé DD ¦j²K~aoo/ńłČꉀX<"@TEęU:ۜžžN\giiÉA€łƒž0sł/ė[ jźŻ°6<Ŗ@M”~›4»»»‰ĻĒüJĆn(ļ·ææŸø,‚D@=üéåeęņß’īw5ŌŌ»ŗ€I žæzīóēϕͯ”'‚<›››ÆžĻ* ŌĖæüö·5ŌŌ;MPQŠmŌóķœžžjxØ©żŁŸih°÷ĆŚpĢ7“VžŒžEٵ^oˆ‰y¶··æz.2±VVV(cų£e.’žw¾“»ūūū”f`ĒĄ–Qe5@Ó -ų³··Wyš'©ƒannn¢ŲĶĶĶ«ēÖ××ßä½Ä¼N‚?Š<ßłĶßĢ]ēģģ¬ż¦˜ĻPŹjŁ·ŖGƒ®®®¾z.óßžā/jó^#“(Æ²ĢŸ?¶³}ŗE°¦Ŗ›²I÷{?ųAęņßJl“ä×åWZ?ü£?Ź{œßŲp `•ež¤•x›™™8(J»]__æz>ęœįķFÅ&Ķ?Œ·æśŁĻ2—’žw櫑 ę*-ū¶ææŸų|dEöOL \FĢe355ÕzxxH\>‰%ߒ2 z3®F%i®!eų`|żÆŸ’~üŲZ^^.µķģģ“.//æl§3gP’óÆ’:w_śę  ņ^4ßīīnūLž¼’o’tnN#@ĆČüh°’śÓŸf.’ćļ}O#@ĆžL°6?Æ aźč?žGHš ”Ž’äO2—’ūßż] $ų0”žõÆ’ŗF€üh ’ņ“Ÿh˜P‚? ō?üaęņßZ]ÕHŠP‚? ōW?ūYęņ?žŽ÷44”ą@ĆüŸ—ģ½&h–Ÿ’ķßf.’§ss j`jjŖšŗ/‚¾@™? ó·’ē’d.Wņ šMš`Āü³ły &ųŠ ‚?äß’īļjh8Į€ ņÆż×54Üū·Ųéżż}ū‘deeÅQčÓPƒ?777­ƒƒƒÖÅÅÅ@ŪŁŁŁiķīī¶ęęę1€>m’«„`T^öķōō“555Õ~,-- ų ĒĒĒ­łłłö6?|ųŠ:??wäJśżļ~W#ĄØ$ų%Ü"+'‚3ŪŪŪC}ĆĻĻĻ­ö¾”ˆų{’óÆ’Z#m®®®ŚA˜ČŹyxxų’Ł»ŸŲ8ī;AōM®vžżJū’"‰HŗŲs#) 9Ł…ˆX€¤ģ-d $å,Ž&)Ļķ©› $ ū4Lņ6öE$'G’@‚ģ©2vī|ū„5T³ŖŗŖ»Ŗ’~>@ĆVWwuõ·Ŗ›]æo}ææ®oüīīnżõµƒFŻĻ?ś(sł÷ü^€‘Ńvņ'ŚÆĶĪĪöśˆÄ“J `”}žå—™Ėżžū‚#¢pņgnn®žh‰ökyLOO×VVVjĻž=«žžŗÅsīßæ__GJ ååe{ąŒļOM Œˆ y-ŽņTśÜ½{·¶ŗŗZŹĘ]½zµ~k¶““T{ųšaźóĶ¢*)’@±ķ—/_¶§€”ō×o¾É\n¾=²FādŚÓĄŠłÓW_ pĪø ¦’ö’¹ü·Ž$A„`0ż±EuóÆß_`€ K' ūTž ©ļ¾ł¦ Ą’ü@®­e.’ÖkÆ ŒØ®%k µ±±±ÄŪņņņ+_ZZŖ­®®ŚC Ö?ż4słwīŒØŹ“?333õäĪŌŌTķńćǹŸ···W»wļ^ż¹“““ö@?¹~]`DU–üŁŲŲØ'nvww;^×ŃŃQbuĄ(śĶgŸ Ŗ’äO“w[\\,}½>®---u“īHMLLœ»æÓõ š? ©“äϹūR«ŠŠäR³‡ڃg”–üyśōé+’ŽJÉÉÉR7öīŻ»ö0²“|ņÆjÅ[[[„ÆsuuõÜ}1/Ą(ųķö¶ -U–ü™™™éʐüųćBŠ’~óŁg‚ä2šÉŸĆĆC{z’żŸž)słŻŸžT€ŗOžLNNŚ‹ĄČ[ŗuK€ŗŹ’?«««]yWÆ^µ€”ö‡ƒAr«,łsļŽ½Ņי”Pš››³€”öó>Ź\žĆ·ß$ą„Ņ’?óóóēī[ZZ*uc«H(ō»?¶˜ćšƒ;w x©“äĻĘĘƹū>|˜x;ĘĘĘĪŻ—”p&yń¢åc¾ūę›C(΁ņŽĪ*µķŪōōō¹ūk333mÆsgg'õd¦¬Ä@æśpm-słwŽxC€W”šü‰DM’ŻŻŻz'ęē9lѶ$œœœŌźĻ™M|Ģżū÷ķ=`čżv{;s¹–o@³ eÆšōō4µRēéÓ§õ[’Ōoy\¾|¹¶¼¼lļ#ļGļ¼#Ą+Ę«Xéńńqe<11‘«z`Š=śäA «$łsńāÅzP$jŹs EK8€Qš«G2—’ņömAĪÆr周Łnѧ>ƃƒƒŌ9…FŃķ÷ŽąœńŖ_`ff¦^­ą¢r§ˆ7nԟϟœœ“·€‘ń‡ƒAŚr”[/­ąš+w’*y"Y0ź~žŃG™ĖųöŪ‚$ŗŠĖ—čHöĒĆĆĢåæ~’}A•ÖömyyY4Jš—/Z>ęŪÆæ.P@¢Ņ’?ŃĀmll¬~[]]Y€6}ø¶–¹ü;o¼!H@Ŗń*VzļŽ½z(곣ŪŪe€~»½¹üƒ;w H5^åŹŸ?^»vķZ=477'Ś%ųŃ;ļj¼[/ōōéSmįZxōÉ'‚td¼/Ŗ-@²_=z”¹ü—·o ©“äĻĪĪNķōō“¶¾¾žū9ĶmįNNNģ€ ·ß{O€L„Wž,,,Ō“@q[YYÉż¼h wéŅ„z"hyyŁžFĪæüž÷‚t¬Ņ¶oKKK/AwļŽĶż¼¼l E£ąē}”¹üdz³‚“Ōµ9VWW_&‚ęēēs='ŚĀĶĪĪÖA333ŚĀCķĻ_¹üƒ;w hi¼/ŗ±±ń2tćʍ\ĻŁŻŻÕZyń¢åc¾żśė#¤qĪ”ēpÖxÆ7`kk«~²r||\›žžĪõœF[8I `X“jłö7Ž$ —ń~ِĘü>‘:88Ø]¹rÅŽFĘļ¾ų"słÆß_€\.ōćFMNNÖöööź’’››«Ł[tĶĀĀBķšš°²õļļļ 2…üż~0rļ9ZĒÅAU‰ß˜KKK.†Ī…~Žø8į_\\“—čŖÕÕÕŚćĒ‚®yōÉ'‚ .ž©ņ ˜S2@qį “ń~Ū HųÄ|>q“ų NNN®śÕ£G™Ėyū¶ U¤Ź ?蕾Øü‰+¬ļŻ»Wųy®Ņ j››•¬÷ŹżÆµæ¼x!Ąärū½÷Fņ}_ė­ŚŚ’üŸ•¬{źęMC«gÉŸčÆžšįĆĀĻ»|łr½÷»Ä0 žłóĻ(UWŪ¾E§ŃŅ­hāg}}½vzzZoĶ!ń ‹×Ö2—’xvV€B*ÆüYXXh{Ņģ»wļÖ[Ā «?żuęņīÜ$ Ņ“?1IvTų“›š™žž®mmmÕ.^¼hļC-ϼOß~żu )-łÓī>abb¢>ĻÕ«Wķ`düü£2—O«[  „Ķł³··Wų9+++õy|¢ZHā5æūā‹Ģåæ~’}A »ŠķœŸŸÆmllˆ<@ ߟš °®$®\¹RŸĒgR뀺Õ'OØD„ɟĶĶĶŚÜܜ(4yųńĒ™Ėyū¶ m/{…÷ļ߯Ļć7‰€öÜ~ļ=AŚRZåĻĪĪŽhäšĻŸ.@eʅ »>\[Ė\žćŁYAŚ&łŠežśėĢåæ~’}AŚ&łŠE’vz*ЇNNNjKKKµ±±±Wn333µ­­-Ź!čž’śÆ™Ėæ79)H@] 6åu*±Ü•Ų‹3> > UĒWl{#ę2ŸĶh½¼»»[æŁGĄ ɬüY^^>—éNūĆ_’I­ņŪ0Hžõßž-s¹–oéā¼3®Ź€²\½z53ńsÖåĖ— ھō‘ļOM 0ņ666j/^¶,“&D†ČO®_`¤D•N$kŹ-|"™t||\?W€³ā¢ƒ$óóóõ֣à 3ł},óö²ŒĢwL¾ ­ģķķÕ®]»Öņq1ē„ŗx^œ{Ę Ė—/'.{šąĄHPłŠcߟš`čM¦T9޽{·ķVoiS—©ž`HžTØUĖ·o½öš #įłóē‰÷§M¼Ż©ū÷ļ'Ž_EõOt¾˜››;ש"īŪŪŪ«äżÅGĶ8ā÷•Ó“÷wõźÕŹö_Ś{Ž×än ½Œe$E£5bŅ1±åüwSŅg¬ńŁ6oĢą|Ä6œ½å}\žē“ū=Ż‹ ¢2V7%-•üQ¢ šõO?Ķ\žĮ;‚ ½pNU?UÉXĖŖ *¢1Ø7;;[{śōé¹åqßµkןI‹AQQAė»yófb½øļ޽{/÷:`l¼VŚūŪßßåµŚ‰kR{÷Wƍ÷ÆŁj{›o&ąāłIė-²O»ĖV1žššŖ=~ü8ń˜‰Ų6„»-)¶e L]o“¦<›\Œż‘ÖŖ2öaģĖF£ŹŽYŁėėå1ß,¶įģ-ļćņ<§YćūŖÕ÷tŒq¶ó]ŅĪ~k싇śA#ęB?œ“~<Ēµ²®‚ˆõ˜Œ F?¹~]€”—4ąŖ¾ź{zz:ń\2®~ī¤Ņ!Īw Ē nĻž=kk°ø× 1Ąmõā=ēWé' Ö¶Ʌ+W®tœl9;¾ƒ¢iĒOŅŲĮTB+Õx? ,ĒóÓöI?Ē2’—.]*ōœų¼Ä q»Ēi拘&Å)ĒÓŖ[‰$FÄl{{»'ɳ~Óėļ^iēóÖü÷aee„Šwu*|`“åNž­ŗÉūć `Xżę³Ļ ‡"Į“4×ĪĄdC‘dD’ØZ__/t„w»‰Ÿ†"ƒ‰‘(jw<Ä@x¬#BóˆĮņ“ń†H‰uڼRGGGGķ>æŪ±l>ö£ź “ćōąą +ŸÓų<$ßqܶ› N;ę“.RŽXu²Ÿ¢:dss35Y8 zyĢ÷R\0_F·¢Ø„Šļū²`?Ą@ĻłsćĘ {č[«Ožd._|÷]A†^Z…M•-ßb±ģ÷’•ŒHŖ*Hüž_\Ģ=øƒ Y‰ŸłłłśÕā1ĒQÄtbb¢ķ÷…i·qž• §§§õ[ Ų§½V¬£ŒŖ‘-*āQä8l%m?E,ś5–±Ķy?Q·4IUTUHK„vŅ¢*ķ¹Iß Y‰ŲųLÅž‰ż·Vß[$Ü]æ}tK?­?ń½ßŃń]öÕŠH€u*-Œ– ƒ¼ń&$śŁŸæž:s¹ł~€Qvå~/&»īTóĄīåĖ—SŪ‰ÅżYƒēQYƒ ­¤µJk1Ոw$UFy¤ ¶Ę mҹw x6Ķ“®.Ģزh§ VÄ.mš3lŹŠ¤ķŠ«ę“t±­ķT¤mÖūźu,c槉č“Ļ^¼F‘¹MŹŸ„N+“Z‰÷<ŪqH8}ŠųŒ„UU båJ§śéū#IówmھĖóœēū¦Õ߇FĖČ“J©F¬Ż  8žŅ>O‘|Šć{PŪėÅ dåO|żB ūŅęo-»*'MZLjNēØ9Q²ę‘‰Įµ8oĶŖh÷Źģ\[Ė\®åЊnå(ū¢ÄH,½(1®²Žą¤ĮōtNK&t£-OŚ `;ƒ…QÓ<€WĘw2xƒėE„ 䯖¢ŪŻėX¦UJ~żŒīödńQ%—4WOlK§wZé—&m»ā}ŒBņ§ßæ?ŖĒdŅ÷yĢcŌNKÓųöy‹eķ&'#é)ń£m\Ź÷×o¾É\ž“ė×   ŹžōŗŻAø“9@’®ö¶E‘ąjGŁm„b0øŻA’2Z—% īfĶ”Ō±ŒĮčv?Y- «–,)2ųŸöł,ū{ଓ¶]£ Ÿæ?Ŗ”vQAžVoiŅ:.%U1ęÕÉöĆAņ€”Ufėšv5b ¾ˆ“ėŖIŪMp„i·‚©“÷™Vé‘÷*ų“Ē­BčV,Óę³é¤z¬_*WŠ$H“[u«ģźĀA×/ßUJkŁŁÉߛ²»'MOO;ņ’?ńĆŖ1yXÜ“|FÕ£O>É\ž7Ž$€.)są°ÓAĶ“„Bъ‡ø¼Œł¤ķ)š źä}W)m6m‚ö<ĖŖ&źu,“Ś„…*+^Ŗ–`ĶSĮV]WukT“?ĆüżŃĪq–U˜WT;&i§mį URÕøŹŸčyöŠoVŸ<É\¾tė– tIŚ@]™Ay„%²éŅ&Ŗæwļ^}Žˆ²+;Ņ*HņJJ“3pYF+­²+>²ŽG?Ę2ķŲégiŸ‘<‰×¤diɈų|ĘĪńyKŗ„͵4ōæwūųū£Ūļ;ķų+kŻķ$Ązń7č?—üyšąĮ+7€~c¾€ž‘6pŲOWėļīī¦.kUńpķŚµśt;ƒ®ūūū‰ēÜiƒÜynIėLŗÆ•*R[U‚¤ “f ¦öc,ˈa/$%ņ¼ļ¤–oķV?Dģū!*ķ²>££ŖŸæ?zń7„ i•zż–‡9ZiƒæƒŌŖüųųøåc¢ķW žZ›Æ4U¾ØšŹ’Ōņmeeeąb8ØķųÓŗ³ŖÜŅ’¤EcŠØšyüų±/Oõ"˜6Ē@+“ü‰?äŚ¼ż®UĖ· 㮽FKŚ¤ÓŻŗ’yĶ¢ā$ęÖĶÓĀ*Žo ^ś<¼e%Ņę)ŖÓvV;Ž“dµ~KZ–6J’hŸ> “ŹFā‡Z'„žĶ·©©)mŽ€¾÷šć3—æöw'HĄHI°uq_q1@½½½ė±1ˆēŅż ŠIą;=žŅ’9I-įśiīœ"±ģ§Ö†E%Ķה•ČMj –·å[T]ŗt)ó1Qł Ų“Ćłż0čJOžÄĶų©$ą¼’ė?ż'AFJŚ {7®°O›Ū„HE@扊žl>88Č5(Z4ėĶänēI«^JšC&Śä%Ij ×n•ZÆc™Õ&­ß„}v“öE«y±Z‰y³’DŅÆw•_£ūż0ČJMžÄrŚĶ*$] Ą`čtж•“¹]Ņ–{%­5^–č¶ēą18ŚźÜøH øŖ÷I/tŚb°Żt½Žå0NŸTɕt|ÆÆÆēZ_Z‹¹xž ~z½Ļ‡ńū£Y/*ŪłJMž“*“-Süøķ·ģĄhūpmM¤UŚTŁ–*ėjń²ę“)K§óóĹq$Ņ%‹TY cĀ m€aaį•'UwtrŃiÆc9čū2i¾¦ēϟŸ»/©óLó¾M““¾hĻūüaQVuĶ0~”ż}%­ZoŠēpz§“äOÖŠ(±Ž?ži™źĘ²ø%•cŸÕ(!•ųśĶś§Ÿ @‚“¹7bąµŖ–>i žłłłžÅacc#ńž²ZJÅU÷iēŌIƝŌ6nXēŚMJā<~üų•'µ‚Ė;öŠ±ģFkÅ*„Ķ×töū$)ŁŠj\©!mޱAN`“»ķķ“„ļ³ŅƒeŒS¦“Śķ*-ł“”…Ž?‘؉?"ń(nIįÅīĘņxģŁŽžĶL¦¦¦ģ5€“6?M$āÜ2銾–€é†ÅÅÅÄūÓ*SŚ‘6ˆ›ō¾Ó‡Q;³E&šļu,{ъŖ’.">;ųž” Ź›)ė» Ÿ’Eķ¾§v>£ōżqVZNZ›Ń"ž>}Złß`“”–üŁßß?w_Ś\Ļž=ĖõÅŁXGó•YE'¬ØŚ_^¼€ YsA¤]įß®ŁŁŁÄū;­ś©¢õN‘CŁŅ®&ÖAݤX7 IWóŌļu,Óļ;9fū”ÕTRÕąŁÄnR’7ļ@yR»øvō2”\Ö{JK:ųž(¦“JÖ“ćhX»@wŒWµā¬¾øĶ}“GĶ_€Ķ_v£üĒč?«Ož@†Mke  f],ŲékD­v÷Ҥū±ÅŌ°¶nJJ$4Zæ5·€ eĢIÕ­X¦%P;9fū”m\Śē&*ܒŖÜVVVŗ¾IĒN/e%Ś“”ŻRlZæ%ĶG:©dM« m§%@Ceɟ²ēäižć5 L€Įa¾€Ö²1šÕé dVāēųųø”÷ŠĪą^¼Æ“6te$ņH«āŲŽŽ.ĖAU¤’„*±^Ē2-¹ŚĪ1ŪOū’žżū‰Ÿ©¤j­"ß!eTŻõ²:*ķ¢ė"Ó%Db!i®«A9ę{%«Z5mN ,iIĪ8Fµ|:QJņ§Œ+•ņ\™Šf[§Yķ­ŗ)ƌfgg3ÓΘQŚg6ļćv6yŸ³~›‚ Ymżņ~‡Ę1ĒN»•L£ņż‘ōw!¾S’*w2„8Žāļ^¼×xĻ1Ļ]–².ZčYņ'~@œż1Ņī˜ų¢ķeU€ß¦ō;oXŗuKņ(¬Č•ϧ§§÷žbš/¶»Ŗ+¼£½\Łs%“ŚŽ¼‰Ÿ¼­|ŅZĄDQžAń< ¹Ę{Ģ[)•ż8xsČd āw:čŚėXĘąrŚ€tżŌrŖÕøOŚü7­4Ŗ/Ņ4’@ķ|/tc¼*ösT{eU?¶ÉĪNē„ļ¤ĻI¼’“„kŽćčģžųŹRJŪ·vžH”õ°Œł†ŖōżÆ\f1pZf{›ŲuV5Iv'‰¶øš;ļ€zēŗ׬yAš5ryZlåŃiņ”*iķ¾Bó\Ćķźu,ć8‹ć­ƒ~L(ßøq#uY‘㼬ĻtģŪĘs“Ž›n%Ļ"AŅnņ+*v‰†¤qŗ<Š£öż‘$āTĘß±øhAā(S)ɟ¤Õ­ęšQ­ óżt.»:MÅscYūe‰×‰$S^Ńź(žķøņŠsę,:‰}ˆy(āõŚDŒAėxķNq#ńŠĻiiļ­ģ L{Ė8ފ¾vTošļ¦“q¦2’ ß“ł[’looæ’ÜIŗ(:ĻÜÖe‰äWžöŽĶß­.ęn'5 ßYNJ&]#N±ļŖ¼h]ćU­ųęĶ›„ż!čW®­e.’q‹žōü‡ĘąYÜbp5ђ’q•}\éW­7_ęÕŅuž½5‹$S܃vIƒ}1øŗ¹¹YLҤėyÄ@`<7Öƒ©‘pJ ŽE<ˆe$0āµc7ÖqŽõ§%¢b›bŪĪī"‰®<ń.[ć½5ßŖ|ķf,³^;ڃ5WØÄv4Ŗåā–Ō"°ū§Č±RV…M|nIčVqj¾ 9öQóvåŁoeʶQy·¤J ³ßIIßļ“³·v“é½<ęŪ=ŽŹ:®Ļ ń¾’ŖĀ¢Š-žĘ5ŽßNę8Æņ½ƒÆ“9āĒns’Źø‚ Č#‘0Źś‚Jśņ—z©Õ\>1ßÅÅąź tŒˆA»8W­z°2Ī}»QՔ$Īķ£² “ÖZō>–ŃlęPéµH$zė­~ś¼Žņ÷G|g÷ź{ ”Vł“ōC7&;‹IļŅ~'eż³ž,..&žQčWß}óMAŗŖŌ¶oi#FŅ&’@Ķ’ŹLļŻ»—X>›ōü ņč•?č­Š-Ź\Y“xKKŅ$Iėi¹»»›k=I=LŗeõɓĢåęūza¼ģĘ$”EtpŬ~Ć@/µšļēƒ;w čŗŅ“?цķōō“6??ŸėńQżÓNO¼@?ūöėÆ ŠućU­8ęé‰MÜÖ××k÷ļ߯œœ$>6*xā1yIü½ö/æ’½ }i¼/²°°P[^^®We=&«bhbb¢öģŁ3‰ /“šļgńŻw č‰ń~Ū ³CgoQ5tõźU{ č Ÿłeęņ„[· č‰q!(Ÿł~€^‘ü(čŸ?’\€¾%łPP«ł~īžō§‚ōŒä@A<<Ģ\n¾ —$†Hfņgyy¹666Ö··Ų>€n2ߊļ.I"¹vÖéé© Ąæūpm-sł/oß$ §“}(ąĻ_¹üö{ļ ŠS*śź{ ]*rśĶgŸe.’ÖkÆ Šs™•?ĖĖĖõµŚź“'™Ė—nŻ$ ēTžäd¾`Hž ˜ØÄ«üÖ,ĻsT‰@ļIžäšč“O2—ē7 č ’?9˜ļ’?9üõ›o2—’äśuAś‚äĻ€YZZ Õ!,/^¬žžÖ®^½ZŪßßO|Ģńńqżq{åßńš0ŖĢ÷ ’Ņ*fffź ƒnŽ–——GvĒķķķÕ¶··—]ŗtI…”čWe.’ąĪAś†¶o,nQ‘311qnŁĆ‡ĻUļÕųŃ;ļĀ€š­ōb„N«± ’?Cąää¤v÷īŻÄe1č°µµ%H=šüłsA ė$†Äźźj}®Ÿ$7oެMNN “ó7öɓĢåßó7 g’*ī€Zķ‚ h+mą®^½ZŪßßeŁŃŃQżDčąą@" xųńǙ˗nݤö’޾]ū/ļ½Wśz’å÷æÆż—_žR€č‰Ņ*vvvꉇNo‘œø’~źė¬ÆÆæ|ģņņ²=˜`ooƶ¹¹™øljjŖ677'HP󿝦ŲŲxY½ęœ(Cßµ}‹Ŗ”Hź4<7nÜxełāābmaaĮžk!~,Fü’<}śT;Čį//^Płł{Œu8gŹŌ÷sžlmm›ĖęńćĒõūi-@óóó‰ĖāĒdĢ$k5ßĻ;o½%H@G"Ł“D·“jD\“ę “p`،ĀFĘ\6Ļž={徛7oŚ{9Ełx“ÓKrļŽ½z|€óÖ?ż4s¹ł~čō\-ŗ_t&I“×wl—»M±=ķßņ‹]QtłņåsĖž?^’słżż~ ²““ō²Ź Z} Žķ E­±M0JĘic›[½Eū7Š9<<¬­ÆÆ'.»vķZmffF f¾Ŗį¢;ņ“v÷żvlĒųG$}fggLŒ¤JžHL”#*¦¢ (Éīī®–šļ>\[Ė\žĆ·ß$ RisR˜§†KœŸ7w:ŁŽŽÖ¢½ĆļĻ©©)`¤ Įh’Ą¼qć†@@‚ßnog.7ßP–“““ś9zćęā×Ī$µ¼oˆÄŚōō“ 0ō$F\”‘ō}W“ō„ååås÷E |‰5FÉ@%āJŹ799™Xf£źO_}%*ŚŻ7Üææ>ęqö>•ü™››³Ē*ɵ••`ä­>y’¹Ü|?ż-Z»EŅ'© FĮĄ$öööj»»»öXŖ––^é3ÜøĄ(i5ßĻwī@É666źķ˜b¢öĘ-&¼s”nŲŁŁ©æVtF8» q‹ķjg9.°‹õž½„u“h~ÜŁ[œ±ŗŗz.–[\TėѦ<é}DĒįįaåÆū*öūÕ«W:Žg±“xķ“Ņ«c»čū€Qva62~T=}śŌŽśĀwß|SJƒÅ‘lyžüyāņø’įƇõ[ˆ‰ŚĖœÆ#N‹‹‹-"ĘķĮƒ…¶#¶gggsmKÖ㢂!k ;āŪ³ææßņuāÜŗq~}åŹ•JߛE¢”YŅņqL„®ēńćĒõ[ˆ–^eWvD,[]|z6ŽŃB=āŪ]V\ŅbsV¼ļĘ1ِVļēl<ūķŲŚWZåOŚUEeÜŅ?1Y@™žpp ]琗.]JMü$‰AäØĀčT ŽĒ¹fžÄOŚvDURÆEŅ'¶#ā˜'ńÓ,žqč—Į÷ؼŠķÉJü4‹äG<§Œ9‚ćĀÓXWŃ®#q OMMõÅ1qV$hоŸ²>c@ļņʛ¬([«ł~~œó*WŅÅ y»m½#aŃé {sµD;bĄ?­b£[¢-Z‘äYššuõR$^ķˆX'ULYž=&ŗUM•T”ÓHäEUŪµk×zözo`“?攪š»/¾Č\¾tė– t •ó$,¢ŌåĖ——Åó;iõućʍŌ׌¶YŃś*nń’Ń-K/«$².ˆ¼{÷n½[Fć½Äæ³Ü¼y³gļ£UĖŗVū !’ķTµJāÅė7Ž‹<ĒDlG7Ūé5‹×n·ŖķģgĢ·0Ų. āFKü½b¾€öE¢$-ńó¦¦V4Ļ[ŅIõNTED„HXYY©·K{ĶF’)i~•I‹Fūµ¤ē7Ÿæ¦ĶæŅéyn$Ź¢ź#mŽ™xŻÕÕÕś’§%;ā=”Ń>­ˆŲęęxÄ{‰c!IÜ-ÖŅÄ~-ˬÄO$Ϛ g‰¬9£"Ō‹±‹ˆeRÅOŚ\>YóÅ<@ńӎ§nŪ@{¦ņ'®¬988š£”OďŻę›I€41hŸVŻU)i ”†8ßxöģY)ŪÆIŸ8æLKü$Åz’^V’lnnÖßGÄ7-ńÓ,ķ¼ŗŒrEĶ6µS}œ–ų ńcū³*™’’E÷[¼F«Ź—Xž5FŃė¶€!*•bÓbŅźsež\„Užų-iWŁåż‘ ƒhńŻw MiÕóóó/+RZ‰čććć—U;Č›ōi>߉ „ę$ÉŃŃQĻā:77×ÖóŅāqÉ»?ŹŪ”w®™Ę6&Ķ”g>©H6¦%#‹^tOKōDåLUķÓZ'U.„}®"‰˜Ōś/«Šßʅ€a' ńûʣÉ|?ķI«äˆDJZ[©¬ßū‘0ź•^ĪåRöyS’¤dJ7D%VŽÄOC$€āJŅ*é‘V%GŚɓ$Ī½Ó®˜æŖHŅ)+‰˜U‰ō/ɆN/Z0¾żśė‚І“ÖQķ*M•)-iŠ«j™Nō2‰Ö¬J¬6GQĢWSō¼1Iy[ē5ė·äI;ŻY¢RØß>o@ū.Ć&Jī«nCŲܗH—vVŃJ³._¾ÜÓvkĶā¤ŻFÆDR®U’¤Ņ’y%µāĖ’¶Ÿ:­źŠ÷‘T鯷µµÕµxĘg£Q)”“żŚüĄ`’üa(™{€²™ļ =i•ų///÷¬„V‘÷ŁĻŅ*²ŗ­Ó9q¢2%i¾š“łvŅZŪµ[õsö}$“OŸ>ķj<ć³Q¦MZ’&­-`^÷īŻ+ōzUŖ,ł3He²ųqšv•S•āD+NŃbՇkk™Ė?øsGJ-Į“¤Uf“Ņīó’­~¦¹g»qaOŸ>mū¹i&fķדć'Īs÷ööŚŚŽ“ć"+ P„q!\‘ųŁßß/ü¼˜4ķVT“˜ÓŽ€Aõ×o¾É\ž“ė×  iē QĮRō|"_f›ė“9‰ŅÄ©(£Ņ،„BæU<ÅŅEEEMZX«*žõõõÄūÆ]»Vx;b®ć“ćb”ڦ›ļśKeÉóĀT+~ä¶JüDė½ćććŚééé+·ø*ķÖüŲøŻæ?óu¢ņØŻ«£€Ń6ĻJœOäm‹Õ­:­ä)rA]V‚"y¤ 8OKXå]GÄqww·ļŽ‘"  xim֞={ÖņłYsNŁŽūøtéRā²+W®ŒTĖō2Žm <•%†©¾„żČźFŅ&N Źų”ū²±Ī“+ĢŚ¹: zéŃ'Ÿd.’Īo@‰²ZFĒłM ø§]D›ÅņN«n._¾œx¬;+y „V ¼I¤“šHĘä=N{\¬#+©¢åY§q¬RÄ9+±Ÿā<7ķ=DĀ%ļÜ=qŽ›µ­’’ė˜7ĶØ]$Yʱ ”§“äOsuH™eų¼*ķ*ÆØņ)z•[QńC;­ȏ9Éź“'ŁooŻ$€’Å9K–HA÷ę[ŅÅf±®¢­«³:TDõF$ā¼&Ī«bą?³ćõggg3ĻĖ ¤÷Ÿ$-‘óųÄs"1Ńčš况®A8W?‡Ųż·ŲOi­ŽBфKV•P#) ³Ęq’ć$īϚ3)+±4ŠŠŪ@9JKž$ üRys7=|ų0ńk·āū:iĀÖ~¾z š™ļ ūāœ%OK®Vb¾–vĻ¢=všH*ÄyM${bą?©5Z¼v§¾tƒV­Ö#1ļ#niēji ŠŖ/*Ģ»!öAžuŃ„¢„KT µŚ‘0kń߬äS«÷3ģŹ8¶€r”Śöm{{ūÜē¼åÖ“/~äv;ĪE'eqīŅŖ(K$²Z›µJŠDEOŅÅlyĻy[µUĖ#*IŚŻ†³ŚM2œM”D›“fyē/*S' “˜OŖ“mŽż‘Õę<ÆØõŠŸ²Žm s„&¢»ł|ō=ŽR^ɂr$ż ķUl㊷fݾB ŚŃŖåŪ÷&'  BQµƒäEڧ5ę7={į[Ņ#­’?sØ"H×.sāśŲ†•••\Ķ:ϊķ*Ņž.XgĻ+“.$̚Ÿ©JE‰h}‰Ä²¶7ā•+E“@1Ū®z¹Ē6Š™ń²W䓒‹‹‹‰ż];¹ā«¤žÅsss=Ł–¤+Žüp`<üųćĢåęūčŽ8§‹AópOŖˆ¤F$iā1IēēŸ½å=OŒ¤G<>’iƉ’“×n~Żv*>b>×xŽęęę¹N$ b»āż·J:ÅöÅzā\¼¹’'$Š”¤VœĆ7æ^^¼yö˜ˆŠž¤żńŠĒD¢ÆģöēQ¹I Xģ’“m8ÓNć•t,å=žŹ8ĖŚ–*ŽķŖ4>3eĘśĶ…*VŚ««„Źš£wŽ€.Š÷@ļEŅ!’½z톸 ÆŒ‹śā½2ŚŅõĖ1Ńė±…HLō*91,Ź:¶€āJOžDE½QjŪ7‰ŸŽˆ K{”“€@§>\[Ė\žĪ[o 0ŠJKžō²D~”$•œß»wÆ'Ū’Tŗ=,-^ėŸ~š¹Ü|?Ą +-ł³øø˜ŗlee„>‰fŚÄķŽņNä9 ŗ]żU?»»»ēī¾Ģ0Čžž? /Dg…¼7€³Ę«\łśśz=I³““TŸD“rĢĻϟ»/ŖŗU}‰ŸK—.»bbĀĪ ÆżåÅ A†^eÉŸĖ—/kV‘“$OT_U]yÕVI‰ŸpxxhēŠ×VŸ<`蕒ü‰Jf[[[¢[”ū÷ļ'Žttō²õCY•@±c~ŸXēƒsåŹÕ]ō½Vóż ƒ e¬$iŠ’źÕ«¢[”ØĄ‰Ūžž~źc¢čģ\LÓÓÓµ™™™ś¾IKŌDõNÜvvvēōI³··g§@ø ƒ+.‘ČÉJɜ" ¼b^'čwśź+AFBesžDåՋŠęęfO^ūʍ? óż£¢“äO“;+ꈔ;"֑„™ŸŸļŹėMLLŌŽĶėĄ@łķö¶ #”“äOs„ĻóēĻk'''"ÜEõ$PTE‚¦l‘\ŠõĒ~M›3’ÄłdŅ €ņ•ŚöķžżūÆüūŅ„K"ÜQ  šĘ镕•s•Y­DņčīŻ»µķķķ—ė‰ä "óż£äB™+[^^®·ŪßßyßŲؘ+yzlii©~€QõįŚš #c¼ģīķķ›{&@“““õeŻö»/¾`d”–ü‰Ź’™™™śķšššÜœ3GGGµk×®ÕAeݢҀ’PZŪ·ØźŁŻŻQ Æüįą@€‘2.Ą0[}ņ$słgg *’?ĄPk5ßĻwī0T$€‘öķ×_`ؔ6ēĻĪĪŽh}å_~’{A ēĘĘĘž=Üøq£¶µµeg •?ĄŠj5ßĻā»ļ •8ś_’kØŽĻÓ§OķT€"ł ­Ļæü2słŅ­[‚D%^üļ’-ōĢ!F•ł~膃ĶĶJÖūč“OjæzōØŅ×ˆź¹‡l' É`(żóēŸ 0ŠNOOh‹¶oĄPj5ßĻŻŸžT€”$ł „?f.7ß0¬$†Hiɟ™™™ŚŲŲXWoĖĖĖö pŽł~€Q¦ņ:®­e.’åķŪ‚ -É`čüłėÆ3—ß~ļ=A†–䥑ü†Źo>ū,sł·^{M€”VZņggg§vzzŚńķąą v’žżŌ×Y__łŲååe{xÅź“'™Ė—nŻ$`Øõ]åĻääd=©ÓHšÜøqć•å‹‹‹µ……{Hd¾`Ōõ}Ū·­­­Śńńń+÷=~üø~?ƈ9.^¼X{öģŁ+÷ݼyÓŽ^ńč“O2—ē7 zヲ”WÆ^­MLL¼rŸöoĄYęū äOhnõķßžśĶ7™Ėrżŗ Co ’?333ö@†q!†ł~ž’?ĄPųÕ£G™Ė?øsG€‘0Pɟ““{ hĖŽyG€‘0Pɟ¹¹9{ ĆĄ$öööj»»»öpĪź“'™Ėæ79)HĄČˆäOTü\»vĶŽ=üųćĢåK·n 0pĘĘĘrßĪŗP֊fffŗ^™³¾¾n-™ļ%ーń ö Œøæ¼x!g lņēōōŌŽZĪ÷óĪ[o 0R2ł#ń4¬śięróż£f`’?W®\©Hü…üż~ ĄH¹P֊vvvDč*óżœ7.Ą śpm-słß~[€‘#ł ¬ßnog.7ß0Š$€”õż©)AFŽä0žōÕW‚@ņH«Ožd.7ß0ŖJKžœœœŌöööŚzīŲŲXmnn®¶³³c¹“šļēƒ;w I%¶¶¶ź‰›ø]ŗt©¶““Ōöŗž>}Z›}¹¾X7@»¾ūꛂŒ¤¶’?Qį š›7oV¶a±īx Õ@@³?@ŠĀɟ™™™Śµk×ŗ¶Q tńāE{ x©Õ|??ž÷ߣźB‘Gęłóē]ßČxĶØ:>>–jæūā‹ĢåK·n 0²rWžT™ų™˜˜Čõø˜WčääÄ^2™ļe¹’?sss™‰Ÿ+W®Ō«rŚŸ':§§§õuLOOg>6@Ąčś—ß’^2“LžDbęéÓ§‰Ė¢b'’6{{{„“c‹uD)֙•Ņś FW«ł~ß}W€‘Ö2ł399™x’üü|„-Ų" •@I¢ ikkĖŽ€ōł—_f.7ß0,⢸¼7€³Z&’ڽEUĪĘĘFå>i'27oŽ“÷€s¾żśė‚Œ“ĢäĻĀĀBāżķĪķÓ®“  h7Œóż“–™üyüųń¹ū677»¾‘Q”4ŠÜܜ=#Ä|?­}BÆ.IÕFGGGö ŒVóż|pēŽ #o\†Gjņ'©Ņ&©õZ7ݽ{7×vŒŖB•?333=ŻŲ¤–s’?@øūӟ @MŪ7`H,Żŗ%5É€”"ł ¼o½öš üMjņēźÕ«ēīŪŲŲčéƚßH¢åĄHMž\¼xńÜ}GGG=ŻŲÕÕÕs÷ĶĢĢŲ‹0ānæ÷ž üMį¶o'''=ŪŲēϟŸ»Oņą?d&&&&ĪŻ—Ō®zŻrčOęūxUfņ')į­ß»¾”‹‹‹ēī»råŠ=#īƒ;wąŒĢäĻÜÜ\āżSSS]ŻČ“Ön[[[ö ŒøŸ\æ.g“œóg~~>ńž±±±®ląĀĀBmww7qŁää¤=pFĖäOÖ\;U'€b~”Ē'.;88°÷`Č=śä“Ģåßyć Ah2žēAĻž=K]  „„„R7jgg§¾ŽżżżÄå1׏Ŗ~«Ožd._ŗuKšäJžDNZū·ššįĆz²&ęę999i{cVWWėė™Ķ|ÜŽŽž=#ąÆß|“¹Ü|?ē]ČūĄh’I—“jœsó\ŗtéåæ§§§ė ”ø%‰ ŸXļŃŃQī >==µ×€”W¤Ķ¶ó$ଠEɟ………ŌyxšE2(n<(ecŠĄčhÕņķ{ZĄ$/ś„ØŌɚØ 1ĒÄ 1ļ[\ ]Õ­¬‹čĢƏ?Ī\n¾€dćķ<)ęŠdL“u«ŚĮĮ9~x)ę‡+Ņ.”įõ£wŽ€ć<9ęģ‰$Šüü|©ułņåzŅ'Ö=©„ gœœœdøPĘJ¢\Üb@nii)÷œ@gŻøq£¶¼¼\Æ*€<67+Yļ­_ü¢öł—_ pżü£2—æóÖ[‚āB™+»xńāĖD@»~»½¹Ü|?éʅ4’ƒ@ É Æ“jł@6É Æ“jłö?ū™ d™äĻįᔽ Cą'ׯ @†ĢäĻŲŲXmyyy ß`$}ā}LMMŁŪŠē“|č\ĖŹŸŌ“' õĘ$}`šhłйÜmß?~<I ½½=IRZ¾“–™ü™žž>w_# 499Y;99é›7²±±Qß®k×®%.?>>¶· }ø¶&%ČLžģģģ¤&MŽŽŽj—.]Ŗ'\"ńŅ ‘|š™™©oĆāābācīŽ½[;==­]¼xŃŽ€>¶žé§‚P‚–mß"iɓū÷ļ§>&/‘€‰ĒVжn„O$Ÿvww711QßīÕÕU{¹ēüY^^®'S._¾œś˜ēϟæL5n1GŠÖÖV[•GńŗŃb®±¾hė––šiŲÜÜģ«–t@6-ߏs”čė‰•ØŗÉ#ęŠ[7ÄE‘0‹–oåoēIVpŪŪŪ}ń&¢)¶Gāuć<9ęŽéeh~~¾žśQ ¦Õ'O Dće¬¤‘Š[$dŖtåŹ•ŚĮĮAżµ666ģAp?žX4αņÜĪ/{…‘iœ€lnnÖēįéD${VVV^®sooÆ699iĻ$øPåŹēęęź·f'''õ$N’Ø"FĆ£O>€’]čŋ^¼xQ’ØżźŃ£Ģå‹ļ¾[[’ōS(`\€~õĮ;‚PäŠZ¾TCņč‰<-ß(NņčKZ¾“Gņčŗß|ö™ TDņčŗ’žO’”¹üdz³‚Š&É ļüśż÷ M’?@WiłP-É «“|ؖäŠW“|čŒäŠ5Z¾TOņčš×Ö2—kłŠ9É kžśĶ7™Ėµ|čœäŠ’üłē‚Š’?@Wüü£2—’šķ·  ’?@WhłŠ„ØZž–oß~żu8cll,÷cOOO xIåP9-ßŗGņ؜–oŻ#łTź-£å@y$€Jż·ų‡ĢåZ¾”KņØŌŸæž:słwī@‰.P•<-ß¾ūę›#nooƶµµUłė,// 60$€Ź“jłö½ÉIA‚IŸ›7ov嵓«ä8pŲ½žŗ"¤€š'ĶššpŽÆkooWPŽŠņ ;„?³¼¼|d]”9€JŁÜÜ,ū4·R-ßHįOśūūżµ‹‹‹GÖ (*d”š'cÖÖÖŖz¼óēĻē]?99©ŲäüéOŖ"üɘžžžŖsww÷Čŗ©©)Å ēō#EČįON:GO[[[ŅŃŃqdżIŚÉ!üɘ|Ģčč艏»¹¹ydŻŹŹJ²··§čMLĖ7€ģžD`}}½*ǹzõź‘uķķķ ŠÄ“|ČįO$:;;O|Œ™™™¼ėk1ĻPŸ š››;²nkk«*!Ķźźź‘uad‘ łhłMŸ Ī»>„4---Éüüü±Bž3gĪä=v[[›ā4‘·nŻ*ŗżėАBŸŒŹ7BēĄåĖ—s!ŠĮŅßß_Q “¶¶–wż£GrĒ+>—?.ŗżo¼”H)$üÉØ0Bghhج}WVV’ĶĶ͊Žæææ_pŪ;wr!ńś?’¹"d”š'ĆĀhžr ć(·ļ½óNŃķ/Ÿ=«H)%üÉømllŌģų!jmmUh€&£å@ć…÷āå.‡ "ŠŁŁ™ūƒ/„@U?žŽŽ^M&Ņ„œ–o_<}Z”Rź”Ä#„@‡ēöY^^Ī-A˜#č¤Ē>x¢pxx87ļqŅņ Ū„?ėļļĻ-ÕZĶ…åĄAČtŅ€ €tŠņ Ū„?œX­B&źļÆĖhó©å@ŗ™óxā;ß’~Ńķ½ŻŻŠrĀą‰‡;;E·kł~Ā §œ–oæó„/)@Ź €œR-ß¾ŅŁ©H ürJµ|{ļśuEȀSJĄqLNNY×ßߟ[Č-ßā!üįX¦¦¦ņ®OKų³¼¼ģ"Tą{ļ¼St»–o@,jż>ŃĆP@ˆNKK‹"Tčo67‹n×ņ Ȳ½½'’=00Póóķļļ+:ŠPęü!:­­­ŠPŪŪ%÷Ńņ Ȳ’ļPųŠ Œü!:{uųćŽč" &ßłž÷‹n’ņóĻ+ÅŚ÷Ņųxņó_žR€T0ņšœ–oq1ņ‡cŃĒ å“|ūjW—Bdˆ‘?ŠÄ“|ˆšš˜–ońž@“śõĒ—ÜGĖ7€ģ1ē4©ļ½óNŃķZ¾4VKKKŁūš“8ĢČhR’ń/’²čö¼ń†"dššP9-ß^zį…Č mߚĄņņņ“emm-yōčQŽż:::’ĪĪĪ¤ææ’É@œJµ|ūĀsĻ)@F "“““”ŒŽŽ&[[[}]Ų?,+++ÉŌŌŌ“õ­­­ÉĢĢL2<<¬ø‘Šņ ^ھE$„3aRŲ .TüF ]¾|9wģ¶¶¶dssS±2¬œ–oÆōö*@F "0??Ÿ fīܹSós… Ø««+MZ¾ÄMų“q===¹Q9õB 8…sd‹–oq3ēO†…ąg}}½āÆėėė+ø-Ģ÷S‰ŠbnnnĪ|@Ńņ Ū„?533S2ų¹zõj299yāmįSSS·‡‘G!ˆ éVŖå٧ķ[Få]Fõģļļē–Ucnžž³µµ5ļ>/¾ų¢‹v’~Ńķ7FF ć„?4::šwżīīn²¼¼\Ósļķķ%y·…€l»88Ø'üÉ ŁŁŁ#ėVWW«2ʧ!ä:²¾Xk8OĖ7€ę ü‰@hÅVļłvęēē c“|hŸŒ m×žÕØ fnnīČŗZ· v“|ˆƒš'cÖÖ֎¬;ž|C¾—įįį#ė„?éōÖ­[ŠŠ$„?ŠęīŻ+ŗ]Ė7€x-ß""ü‰ĄĢĢLCĪ›ož! ±„?ÓßßdŻŲŲXC¾—|s 囀t{óŹEˆˆš'õżFż¬¬¬YߣŁébdĢ•×^S€ˆ2hhhčČŗ0śg~~¾.ēĮO{{ū‘õ­­­.TÉžž~Ł ĄaŸ *ņ\¾|¹ę#o&''ó?Įęꦋ1Z¾ÄGų“Qy×omm%---¹„Z#ĀHŸ0æO8ęŌŌTŽ}Μ9““µµ¹0£å@|N)A6…8KKKÉśśzĮ}ĀH °čėėKśūū“žžž‚AM½–ååå¼sś²¶¶ę¢@ 2,.!Č)œJré1M—æö5Eˆ¶o ÅÅņœūܹs‚€ ū£×_W€ "ęć !ĢŠŠP]Ī×ŚŚšģīīęŚĪN·?üPš”š'"óóó¹(Œ Mµ…p)ooÆąœA¤ĆŪ·oŻ®å@¼„? #B@‚š°Ü¼y3éėė«č!<ŗzõjr’žż'Ē įqŠņ ^§” ~£££¹€ę å@s3ņ"£å@sž@“Ńņ nĀˆČ[·n)@“ž@DęīŻ+ŗżėŠ9įDāĮövÉ}~šĘ 9įDāÕ±1E@ų±ųčńć¢Ūß½vM‘š€š"šÖ­[%÷y„·W”š€š"0wļ^Ńķ/Ÿ=«HMBų÷`{»ä>ļ]æ®PMBų÷źŲ˜"šÄ)%€lūčńcEˆPKKKŁūīļļ+š„‘?aoŻŗ„{V‘š”š2ęĮövÉ}Ž»~]”š”š2ęÕ±1E  įdĢGŻžīµkŠŠÄ„?!oŻŗUrŸWz{  ‰  CęīŻ+ŗżå³g  É  #lo—Üē½ė×  É  #^SJž@F|ōųqŃķļ^»¦H Žŗu«ä>Æōö*ĀȂ¹{÷ŠnłģYE Gų)÷`{»ä>ļ]æ®Pä å^SŹvJ Ż>züøčöwÆ]S$€ķļļ+p,Fž@нuėVÉ}^éķU(žž@ŠĶŻ»WtūĖgĻ*Ož@J=ŲŽ.¹Ļ{ׯ+Ož@J½:6¦TLų)õŃćĒE·æ{ķš"p„šRč­[·JīóJoÆBp„šRhīŽ½¢Ū_>{V‘ČKų)ó`{»ä>ļ]æ®Pä%ü€”¹4>®›šRęįĪNŃķļ^»¦H$ü€™YX(¹Ļ+½½ @AĀH‘Ł»w‹nłģYE (į¤Äƒķķ’ū¼wżŗBP”šRāŅųø"pbĀH‰‡;;E·æ{ķš"P’šR`fa”ä>Æōö*%  fļŽ-ŗżå³g €² Įlo—Üē½ė× €² Į.+UsJ ئŃŃŃdmm­fĒßÜ܌®fwvŠnæ12ā €ŗŲŪŪKΟ?_—s---%mmmŠ5 ü jBč3;;«UvqpPšPKKKŁūīļļWåœķķķuū’ėéé‰ņ”Hmߨšš“0ÕÕŪŻ­DikkK FŒü &6krÜ0?ĪĻłĖ¦©ćĀō“&¢ŗ—w]ø øPcFžDDų)ucdDؘšą7’÷%÷¹88ØPTLų š›O?-ŗ½·»[‘8įŌŁƒķķ’ū,LO+Ē"ü€:»4>®ԌšźģįĪNŃķ7FF €cž@Ķ,,”Üēāą BplĀØ£Ł»w‹nļķīV$NDųuņ`{»ä> ÓÓ Ą‰ N.+5'ü€:yø³Stū‘EąÄ„?P3 %÷¹88ØPœ˜šź`öīŻ¢Ū{»» €Ŗž@=ŲŽ.¹ĻĀō“BPĀرKććŠ@Ż ĘīģŻ~cdD‘ØįŌŠĢĀBÉ}.*UsJ  vfļŽUŽe_€cž4‘––EØ£ŪۊŠ$~óé§ī·MśŽdww7ikks1HmߚÄņņ²"ŌŁ„ńqEh’żįCEhRēϟWRĒČ€&ŌŪŻ]“ćžźüdėWæRąßzø³£ī³Uóó_ž²®ēˆ”V§>÷¹äłŹW¢ØTBųŠ„¦§krÜŪ~˜¼}ū¶ffaAÜg«ŖėĀ…ŗž#†Zµ’ÓM­ ھ@ ĢŽ½[tūē?’yE &„?Pe¶·KīsśŸü… &„?Pe—ĘĒ€†ž@•=ÜŁ)ŗżĘȈ"P3ĀØ¢™……’ū\T(jFųU4{÷nŃķ½ŻŻŠ@M  Jlo—ÜgazZ”Ø)įTÉ„ńqE į„?P%wvŠnæ12¢HԜšŖ`fa”ä> €šž@ĢŽ½[t{ow·"PĀ8”ŪŪ%÷Y˜žV(źBų'ti|\H įœŠĆ¢ŪoŒŒ(u#ü€˜YX(¹ĻÅĮA… n„?p³wļŻŽŪŻ­HŌÕ)%€ćy°½]rŸ…éi…ąXŗ.\({ŪĘ⢂OłĒti|\HįÓƝ¢ŪoŒŒ(u§ķĆĢĀBÉ}.*ŠōZZZźv®żż}HŒü€c™½{·čöŽīnEسååeEHŒü€Š=ŲŽ.¹ĻĀō“B2~åJŅŻÕU“cóĶ7ąįTčŅųø"T(?/½š‚Bԁ¶oP”‡;;E·ßQ$Fų˜YX(¹ĻÅĮA… a„?PŁ»w‹nļķīV$Jųez°½]rŸ…éi… ”„?P¦KććŠ@ź  LwvŠnæ12¢H4œšŹ0³°PrŸ‹ƒƒ @Ć   ³wļŻŽŪŻ­H¤‚šJx°½]rŸ…éi… „?PĀ„ńqE 3„?PĀƝ¢ŪoŒŒ(©!ü€"fJīsqpP”H į1{÷nŃķ½ŻŻŠ@ŖœR8¾…éiE &6Ÿśwׅ ·fä@D„?pL7FF€ŌžĄ1]TRGųĒŠŪŻ­¤Ņ)%€§żĆ?üCÉ}¦§ ȼµµµdttT!"#ü€gü?’ķæ)Š^|ńÅŗėļžīļ’ä… “}€güęÓO‹næ12¢Hśä“O NŒü€CnųaÉ}.*•«ßųF2zéRMŽŻuį‚Ō™‘?pČŪ·oŻŽŪŻ­H¤šš~ė×\rŸ…éi… Մ?š[’Ūw¾£džš>ó»ßśVņŃćĒE÷¹12¢P¤Ž)% Ł•; ńÅĮAÅ õŒü ©•ü|„³S±(ĖĻ~ń‹L~ß¶·]<ˆ„‘?4­rƒŸą'7o*ełę›o&_xī¹ä’zļ½ä‹§Ogā{óŽ=ÜŁqń FžŠ”* ~Ģõ@„Ā,›½{·ģż?ŌRUShŽžb*¹—­ć#ˆFhu#ų MŅ>ŖęOŽ~ŪE€ ˆB~*iu#ų€$yé…"$ü óĀäŲå?_xī9Įuõīµk©ü¾¾>0ąāTĮŹŹJŅŅŅRӅņ |ös]ėėQ넳³Ó…NLų@¦…ą'LŽ]ŽüüՏ¬hŌÕ+½½©ü¾~šĘ.NFLNN*BŁŚŚRąÄ„?dV%ĮĻW:;?4ĢĖgϦźū Dń:„dU%ĮĻOnŽT0ę½ėד® Róż,LO»(UņüéÓÉ_¾’~MŽ¦Ÿ™¬ų?®\I¾łŚkU?īķ?L޾};÷ßµj!ü³_ü"łę›oŗˆ@Uł@Ō?pŌW»ŗ"&ü SžļæżŪ²÷ Y ~H‹?yūķT|—æö5"§ķ™QIė“ü˜Č€4yé…Rń}üŃ믻9#ȄJ‚ŸšD³ą€4 '4Ņ—ŸŽE€& ü õ* ~nŒŒx¢€ŌjōĆ ÓÓ.4į©VišsqpPŃHµ/<÷\ĆĪż;_ś’ M@ų@jUüüł(ų ~rófCĪūę•+ŠMBų±™™™¤§§'iii9ÖŅŁŁ™;@½żść+~¾ŚÕ„pdB£Fß\yķ5Å€&!ü‰Ģčč蓹fll,Y__?ö±¶¶¶rĒ88Žłóēعüœłö·ĖŽ_š@]żĘ7źz¾/?’¼¢@žDbxx8ŠĢĪĪÖģ|šĮ“ hooOсŖūėŠ‚Ÿõ÷ßüI£—.Õõ|j54†š'ćB˜;wīŌõ¼ķķķIæ TM~žķüAE_óÅÓ§€ĢŖēh÷Lh.Ÿ ĮOaeee%<œŌq‚ČŗzĘ¹12¢ŲŠd„?Vnšsīܹdnn.ŁŲŲHö÷÷K.»»»Éāāb244TÖń@ĄIüéOZQšÓŪŻ­hD”^£q.*64įOFµµµÜÖŃŃńTг““”›س³³ģcŸ?>™ŸŸrŒp¼pÜBzzz\ b!ųłĆżØģż7 €Ø¼yåJMļ” hNŸ ŚÜÜL=ztd}kkk.Ø ŪĖ zŹŽŽFå³¾¾žkCP®Ū~(ų é]yķµšazZ‘  2Øææ’Čŗ3gĪŌ%| £‚BĄ”Oµ' ^ß{ēäķŪ·ĖŽ_š@̾ā}4PeŸ ŚŚŚ:²nmm­®ßC¾(ßh$€g…ąēĻīß/{Į±«ÕčœwÆ]S\hRŸ 5ä¼}}}GօÖp…¼:6Vvšó…ēžüоxśtMŽūJoÆā@“ždĢņņņ‘u“““ ł^–––ެ›ŸŸw‘€¼Bšó7eÄ!ųł«’XŃh7FFŖz¼Æ (*41įO5×N˜’؞–––š. üؒą'Ģ{ ų Ł\¬źń~šĘŠ‘©©©š½G<šé§Ÿ*tżmÓBqaŽčz]‹F=œģµķõä'ü ¦~÷[ßŖ(ųłÉĶ›Š@SzłģYE ažöæüEh°æŽŲPŖnee„nē A5Ķ%_‡" =N)Aö­­­%=== ‘ær%éīźŖśqņŸ’sņļ’ā/źś’‚Ÿ?.k_ĮĶī½ėד® N|œ?’į32æ÷Łū¤?|żõšū›o¾©Ą)ō'oæ]“ć¾uė֓³juŽ×’ų“ĒŸ|R·’’så÷HåB{Öjś}özüļ’ęß$’ś_ż+ך˜š'cĀpŻg ē zĖ7æO¾ļØL~^zį…Ŗ·ŽOVü„7¾ŚÓ@u|µ‘ŠXm§O×äż!éU«ėżÅĻ~–j}ŽSŸ’|]’?hüĻ•ū/}©ę×ć_üóīšC“Óö-ėėė 9ļåĖ—¬ž?P¹“>•żõĪń¤‡š'õždM›9 ?š““>”ė¾ Ÿ ŗ’~Žõ!ŚŪŪ«łłCš“o“ŃÄĄ‹Mģל¼}ūvŁūßńäqÜŃ;_xī9År„?T¬µZ{{{ŅŁŁY“hrr20j3¶Ķåö‡ę&¦Ė™o»ģÆ ĮO­&·€¬;īĆ?¹ySń€œSJMūūū[½mmmåB ąĢ™3Éččh2<<\ń9677“™™™dvv¶ä¾«««. 4ŸżāÉ[·n%óŁļ‡ć s˜tŠ £xŹ?ļ@˜< ždŲīī§0Jēņå˹„VęęęĢ‘ ­ÜBŲógŚMVźĻųĆä«]] %„Q<æ’Żļ–½’Õo|Cр'„?ÖÖÖVVTKaďąāZ¹U2wO¹?P¾JGńŒ^ŗ¤hĄŸŒ PhZ»•Óž­Z:::rmį€ģ«F+·RÖß?łāéӊ øüµÆ%s÷ī•ÜļĖĻ?ÆXĄS>§qsó„hbb¢¦ē ”Om$ųlūŽ;ļ$].ä–o¾ł¦ąRč^½¬żB‹8€Ć„?‘™œœĢ…@Õ ‚śśśrķŻĀ1CčFŁZ¹żŸļæ’äß՚ç˜0QõĘā¢ąN œQ=īµĄ³“}‹X‚ĀrŲņņr²¶¶–ģķķåżš0Ogg§y| ćźŃŹ-ŸŽöĻ’­_ż*÷ßõć»pB ÓÓÉļ÷»·ßQ$ąįO“éļļĻ-@\~żńǹ°§#z £{BKš‹ƒƒ¹‡Foß¾ķ‚@•üĪ—¾TtūĮ=ą0į@F… efa!łčńćŗž7L>=zé’3P'o^¹’÷įŠŽīnÅņždD£Z¹…–ĀčžÆvu¹ŠW^{-oųZĀä#üH©““rļ+uČ.į@ʼžĒœ<žä“ŗžóė¹ĄG+7H§0ŹēĢ·æżäßļ^»¦(@AĀhRmmmÉ£G"…źü„§‡CŲóŅ /(8dĄ³h¼ŅŪ«(@AŸSŽ£„„åČ299©0ŗ~‚ŸęZ¹žč'7o ~ RFž½½½dffF! ˜ŸŸW„&F÷ōżŽļ%’ųż£ÜæW’ė}²mfa”&ē¬Ē9loGqŽßüżß×ü‡9GzĪńų³kĆ9žūgƓĪńŸVW“’÷ć3łūŠļÜtżĪõ{Żļ\æs÷;×ļõęū{ų’£–¢.//'żżż5ż™­Ē9źł{½–×ćg?ūYņŅK/ÕģųŸ|ņI²¶¶VÓs<łłÅ/¢ų ¤[Ėžžž“ūĶgKŸ’PÖNKĖ‘u©ż“ļ{«„CÆ”Lo*WVVüpl÷ļ߯[x ”å©ʵ}#:gΜ©Ū¹zzz2WŸĪĪN?$œˆąŅMŪ7¢†čRXh‰wēĪ…ČØ”””Ü5lkkS /įĒŅ×w“C %ŁBƒ €²!Œd a§i€r™óšŌęęf2<²nrr²!ßĖŅŅŅ‘uóóó.4š'šk'Ģ’¤‹š "Ÿ¬­­)#üɘžžž#놇‡ņ½ä›ß'ß÷ŌOĖžžžĮ/¶ō)I.ZKĖ‘u‡®cÓ}ŠäžśĄŽČŸX®jž ¦–zzzRHų“A÷ļßĻ»>@{{{5?~Ö×׏¬Ÿ˜˜pq Į“}Ėź…+2ҧ££#Y[[KŚŚŚŖzĪÉÉÉdjjŖąv-ß !“}‹A± ekk+iooĻDa”Īüüü±Ī±¹¹™ŒŽŽęŽ–bĮĻźźŖ‹)`äO†…o!äi“¹¹¹dxxŲ€ĘxjäĻ)õČ®ŠÖmww·”PńFé ķ[ʅ(ŒŽŗzõj]Ļę ēü@ŗ"133“ c&&&jzžś„ŃFa> }„?‘™œœĢ…@Õ ‚śśśrķŻĀ1CčFéŌ>Š’­åĻ–>%‰Ūņņr²¶¶–ģķķåŻŚøuvvjēŁŃņŌ?„?™öTų£ķ@D„?žDDųį@D„?9„мZZZjh_Ø;#"bättt$ĆĆĆ ‘™ššzņß ‘ŁÜÜLīܹć5Mp/ļėėKśūū¼_2dyy9YYYQ¦åА³åšžRI ‰~ü¶ķ[ųc2ܐ€8_ć!ęē“^ćŠ÷ņš”šä䤂€÷ė@†„{÷AČė5N½n-‡’”ķ@D„?žDDųį@D„?žDDųį@D„?žDDųį@D„?žDDųį@D„?žDDųį@D„?žI[[›"@„śśśÜ怔jmmUˆXgg§"ŠPĀhb»»»ÉĶ›7“„„%ŀ-//'Éžž¾b@„zzz’ÅÅÅduuU1 ā÷ėsssÉää¤b@„ööö¼_‡ˆ ēīćŠAC“ŗĮ,¶xD [Z’ĆČ€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆššĢüü|Ņßߟ“““<µ„uĖĖĖ )¶··—LNN&===GīåaŻččh²¹¹©PQKKKyß«¼_Ū -ūūū’½üŁŅ§$§¶¶¶äŃ£GeķŪŃŃįƒ#ˆéfßŅņŌæŻūŒNīܹSŃ×ōõõy°2 „ŗeæW?077—ūŻÄõū ½½Żż2ś·öIy½SĖĆ’0ņ"ž7£Jž˜ÜŚŚŹ}MųZ ŪĀ( »Āįž\i𬬬ä¾6ŒśŅéüłó¹z+ ~‚Ė—/ē^ćįĆb Ļ?pĀˆXxpjjźŲ_¾Öӄm³³³ŠB›'|@,†ō ­?ųąƒ'|X,‚8~'@5  R333EŸž˜˜HīßæŸ[BĖˆŠź-Ÿp #€ › ‡læ~ChSHh ±øøXÖ½<AšŚŚšĀBJ„@v}}½ąö”””'Æļ°„÷īÅ-ŁæļūĒaĪˆõÅ] ļčĘĘF®§x„_·»»››7Čžļsž@v_æįąRe„'‡ }€äõé~Æ®®}ś?ß| ĢŪ ńżNĢé^—ĻŽŅ÷^›4ÜR’ĆČˆP”&܄Š?ÅnTž&„l½ ­öē@ż„9@ņ  —37Œš £ņéļļW`h°BļĒĆūšRmŸĀūüBļ×ü@öxߣż»ŅNų‘ Oę›06“‹(W哏' !½–––rź†?«1GŠ8łę9wī\Es„ł‚Z[[¬_YYQ`h°|!Mw+Q( Æ} ;̱ Ł%ü! „?™B Uņ¤o”‘Cž†ōO’‡ ē`¹pį‚u!…ęå o„ =“qœcÕQ(œ9ĪDļł^įdGøO›§H·g’ž-!m„?™|O† ”+•ļk“’€Ś “Ą?ėĢ™3Ē:V”‡9fff$ßė/ĢåU­ßģčźźŖŚļ ń<0M  "…ž.4w@1…¾¦Š9€“Ė÷Įm9óüœō@}¬ÆÆWķ5īC&Č®|h˜(²Ķ}™4ž@DŖżįP£ĪüOĒyˆć€ö/óqB6…€ĻĪÓ;77§0qĀŅHųÉ7AōÕ«W}¼|mfņØæƧ‹-÷ļßW(ˆT¾ł»ŽŪ&؏½½½dllģ©uÉšš°ā@†xƒ¬ž@äNņ“šI¾h<-Ž ]ņ=¬q\łŹņ2¤[{{ū‘u>D†ģY^^V2Aų‘;ɰSCV ¾ŖłĮp!F@ö…¶QłŒŽŽ*¤TOOĻ‘u ”oōķ³÷鹙ZKKĖSKXWźk”šN)Ä”O  » ĶÓēĆaȶš“ń³m£s|Aŗ_·ėėėO­;wī\ŅŁŁ©8AĻŽ¾ ÷ąš¹\WWWŃÆ £ņFę·¶¶ę¾¦­­MA©# †Š‡MMMå]Æ-d÷ż~ų€h`` ļv-h ½ņ½n=żńN©ąēY=ʵ‚Ģ7*ŖEų‘Øē{kkk )ŚJäcddć}ż³mbĀ>T =+<9\‹‘@uä{Ŗßk8Fzļ'%ü*¶··§Ņ{t±? €øĢĶĶyo)Z­>چ×-·šĄUx­ßæ?·,..–œwSD- >Ģ€ęęų ­# ŁŻŻU$ˆĢåĖ—Ķ)Z5ĪĪĪ>µ®££CūUˆŲÄÄDnd_ųŒ.¼ÖĆ\Śa9ž|®‹NŲö)Ä=jž@†Œö)4ĒO°ŗŗźIˆTU~hĶ é’ožsõBözų:;įa¬RĀ>…Ź ÷ōłłyE¦j„?QįIĀb£}‚šĒ„‰d![ÆėšRxķ“‹ O ‡¹}ŠyńÅ@GÖmll( D`iiéČŗJēń e €ĀØ^ØįDōGbŒēŽ O†'żWVV īśŠ‡?Dųl ÆŻƒv1į)į0Ź/¼¦‹Ķ  ±fff’­­­§Ö;w.o dó5īLJ—ćŽē‡††ņn3ŸÕ"ü€Œ8hńVź‰ĄŠęĶˆS˜C ŲM哜jwŸ;²>ßH€B-ŽFGG‡Ŗž@$ŒĘ€ø…Öm„Z¼…§Ć‡ĀڼAü @Åę’j+ß}śø£€ęuēĪE *„?‰Z !7%4^ø‡Ń>ėėė÷éččČ}ød‚Xh.”•ł&æyó¦ĀEõõõ)5#üHŌ"üÉ÷ę؟ętuuŻ'Lė hNaŽ ½B øšG¹K¾‘{a~æ|ūś{ā`~NjIų‘;ÉĀ|}‰ĆäŃ@ķ…y=ŠĶķ³øø˜ķćFȆ|Žž” ä(Dų‘;Iū—>ųąČŗšAPūūw”^ß-ŽĪŸ?ÆPqžÜ€ę¶··§Ԍš"&y~Vµ'‰UhرB#~ęęę“xƒˆ ¹…ÖŽĻŹ÷ł‡š"RĶžßž<€Ę(ŌĘmuuÕ\Ȱ0jÆŚļ߅Įżżż¹‘¹'Yīßæäøa2ų|ū†óõ•Æ…k->?óą5Õ"üˆś°hrr²ācõōōYēÉØ­šĒć£GŽ¬#~ņŻ›ģČ÷AN¾×{%¾ūūIļćŽ÷S-ĀˆL¾€fjjŖāćlmmYw’łƒ€Ņņż”F ų€²ÆŠ‡C'iż–ÆUĢÕ«Wj ŚÓ-äūŚ3gĪ(4U#üČ h*™: [}V¾6@uå{ųB['ˆGkkė‘uĒ:V¾÷ģA5[A’S”Ļ܎ӆ±P÷žµµ5…¦j„?”|O"|šĮeÜ)tóńĮŌ–?ō ~…ŽS r )“æQ?P[łä#q+©Fūēkżęų‚jž@„ …<—/_N:;;ón[ZZŹż™ļę311”ØPc…ęčĖ7±ģI 1ƒV…ڹ„×f©yĀhžbÆa£~ ¶Āł„nį]ģĮė°-ģ³¾¾žwūIZĮBŽ÷—ūūūO~¾>[ċÓ ¼ ī„?N=‰ ŁŽ8ęk%sčŽDvß.—ßßėŻėā|’Fų@Ņ%|Nöā‹/ŗ“Ź·™‡’aäDlww÷D_/ų€ź«ę<”żŒŒ ~BŪ¶ÕÕÕT¾/€Ć„?±ŠV"Ü@ņõ#-åęĶ›‚ؑš>=ß\•X\\,Ų~؝…{łIęé ļ?Ō’¶oŠ$Ā…żżżūŠ”O©^ć@õ„¹zĘĘĘŹŚ7ŒĪs„€tŸ„ĶĪĪ–µļÜÜ\2<<¬hŌĀSm߄?ŁfĪ€X ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üˆˆš "Ā€ˆ""üų’Ł»’#©“ĄKÕżĻ&‚="XĮš8"ĄĮŚģv˜Ąģ9‚…šE°÷½T‰oü¦ßXZģ $ąr‰™ĖĖĖ/Ā’Q›±æD¼öüü<łū˜Æ`}?~TĄŖ>|ų €Æ$ą"1s,’8æżöŪčϊåćr³€R{P¤f}’ż÷ @Ef_²RĻÜwĻž=S1€Æ$`e¹½!"‰3Uß Ÿ?’üS”4*fü¤öī7ÉXYjoˆWÆ^ĶžÜŪŪŪäĻśé'…А˜-3·#écʐ"łP#AóŻwß%n߀ż’­‰žīxžüłŁļæ’®`€,ÉŲ€Z{CľAĒRū Š.ÉŲ€?Vłœ~ųaš5‡£F»#֊ļ{M,+r(öŠ™F©ĻźŽÜŽF‡×Ś÷ž8†f-„Ž3E·V~ß=Oeõücš^ćˆ2e\Jļ·ä5C³Ź¢ī =Ė8~üńĒYåĻrčQNµåĪ5·łĢ’ūßEĻæ¤ŻĢ©ÓSļ{Īē¬QæJźV‘d?īėjÕ«¹ååTŅODY–~_tĖAõ’x&A;Æä~jĶLŻr?{Šö?¶Dy ]ŪÜŹ©ŗ6÷õ¾.ö6Š GåuówļŽ žėåĖ—»ŻĄ8“qķQVüńGŃ{¢Lc—5‚Cń\āŻ}æ Ü?’?°LžüZSi@ļPå:8)Iś¤Ī»'“Šķ[*ŠĻĆē2Fģ/Ļ#7 śPŒx€Ł”öPėŽs3 ¦&\sķ4īõøĪF wŖ6––óÖ¬Qæ¢ĶŁą<Śē©÷C›SNŻ=L /Õ§NłŽč¾'ĘÖõ-÷³{h’Q’§ŌæH–”ĪҜS¹$5D’?„ītĻå”}Fj€Ļįńžż{T_IžĄŹ?~œüyaj-I“„9n$󜿇j-E““HĢ śŠņŖ½ZLkŒö¤N_š<~7fÄū÷~(fšL-æ%ŪJŖœ÷dśU«Å¬“S%×j•S”ĆŲv²dāg­ŗ¾å~vķ?žÖ(1— ¾”2«Uń·QĶP“½©ż’Žžü€•õ%xŗ%‰j-WKŒĘ?v{{ū͈ÓųYĪqāēāābŌØÕŌ5lQ.įquuÕ;Z7Ź#åp}Ér~ĒÆ9\ÖØ/`śęĶ›Ń×ĮĆTP.~– ,>|ųšģóēĻÉó'F§Ģ H¹¾¾®ö|S3׎¾}ūõæsÉ”øļT›éŽĆĻ8{”ģĮZõ+ÕĘ¢īōµÆŌ²|ajp}J[ģ^I­\9õõń»\Qš¼XŖ.õ?wOQ'Zėg÷ŅžgĶõµŸÜŅ™Qf¹eór‰Ÿ¾ž?Ź'~Ÿ2g†Ü±ć¶×wļ———ƒĢż^XŌĮ!ī€U<~ü8"EĒłłłŻŪ·o«;uŽ÷ļß¾¦;^¼x1é‡Ēēϟ'}Ęõõõąk¦ˆū?žœ‹‹‹æ¼&ž=Tnq}s®)Ź%õžWÆ^—ŃŲĻ~ųšaŃµŻŽŽfŸgéóóęĶąynnnzėĪńs™Sļså:¶ †ĪsyyY|Žhļ%eŽ_d-1ødŸŒÜhŁ£~KöXHŌßśhŁŌhō1ėļG¹¤F÷Ļ]¢&¤FNĒ(ėŅåt¢ŽåfvĪfĖĶl‹Qž%K¦Å ‚›››źĻ&5¢|l¹žšĆßüģüü|š}c6Hßć?kÖÆŌ Ą1åżOI»]J®ļ³¬Uīµcg1ŻUšŪ;å.3 bνo½ŸŻ[ūY‘%żr”YŖķ„Ć%ŠāY¦fl–ž§ėgs3”jīÓß3%3ęr3±ĶŽö@ņNä.±ĢU‰OŸ>} ~uÉ 8Ö B”^rĮ’øßŅ͵S ®9{ŹŲ½–x–¹dćŲ kčSĖ&.!t¼œP÷ÜĒlL¹’¤Ź5öÓJ5?7Õ>¦,97„_8„5ė×\„żĻRR}Xßr™9¹@|iņ 413õŁē®o©ėŁB?»Ēö?f‰Łh;©%ą²¹%źĘ.e  TĀ~Ī>E‡bpɘ}žRKīńļąž‘ü€ŠĄYß:÷„bćš.T#ȝ’Ū3#%ˆ[źŚč— råö2厙éÖ©=>č+ Ję®ē0™ŗĻ)ūwķeŸ­ŌƱrū¾,-×/NyŽQļRß%I9{ķ”<ū1IžCµ¾§ų>ßSū3[Ŗ“ҵŪZÉͩ߄ĘĪž3x`K$ąÄ"Š#äcäséc”|$ĘŽŗRc©¹½µĒŗĖlš~j©€\ķ‘ļ‘™č\S*č_ŗŌSjÉ·ć„h”iē_»ķ¶RæjõMØ>E;M-Y—[ŽŖD*_2”f ;u¾© ¼ŅåѶŚĻī­ż-ÕI%ćŽs‰”©ßū¹āܾeļIF€1$`C"xŽbٜ1³mŽÅ{Ż?ä>[3h>U*‰ŪT"eź 0§ž§ŒŁ{ćX,·xl©½·RĖĖm՚õ+'–ĮÜzā1'•T\ņ½5g=å’SƒżkŲņ~-[o’©²‹ÄfŖ ¦–‰›[_ēīū3§ĶģälTŒząņį(āX®gĢØÕŲ|o³ī³‘KųÕ“D‚"<ĖöŸ”Æ¤Ė$*£µ’Øqž˜µ·'kׯÜ~(‘xŒ²‹÷.¹\ŌTk.yŁw®šmj©äēūٽµ’9‰õ\Ÿžš67)›ŖÆ©DüSg<ģŃßģG­—ģ‰ĄŹŠ²U1h+ĖāÜw”ķŽŌPKųõ×_“?ÆdĢŻĻ”½?:Ké"čyÜf†fޤ®cŹ~‘ˆ=¬{šį³•ś‰¾Ļ~÷īŻ—£syyłåŁž:ą›š¹“ŌŽC} ’1›Ü—ōgkŻÓÖūٽµ’µfæĢ­o5ėė’ß+[%ł;#Æćˆ€č“'O²Æ‹ß·¾ßĪE¢.ētm¦tŸŸNjD{I1¼d-ه…r±$f$µK'ƒbŖ¹ĖFM‘š=¶T:εFp?•X™ū=É£± ›­ö³[n’{I€HŌĢcŁ7h@ÜbvOj#ę €²®­³$~ö#·„Õ”ŻņMĻž=“ųY@Ģ&‹žnŹ,“h“ńlāX3 “„(KŻ÷~Vū`+$ !¹å~ꮑOyłGŠoŹr>±<Õķķ­%śVšöķŪo~Kæ„.³ŲI%„_{m‰ŪėėkĻ„˜Yåå6ÅĖ—/g-Kˆ~Vū`Ė$`%?żōÓ×ē‡Gmēēē ūDJ–¢Še§"łA¾Ć# kšæŗŗśęü5޽³$Vjä~.aP²üTĢV‰ņ=ƒĖ.Śs7GߊµėW”[÷š(ךy¾­%€N9Ó³oæ”ÖūYķæ¾Ō¾R”³ē4&P¹Y ¤Õ0õ#Š·„„÷ŌĪĪ?~ü͌øXśķ0›Z,·9¹Ąo|ļS™Ÿņ^£¶µX‚+žiß²k‘Šk^²¦ö²Y*QĖ”Jjo£1†–ĒŪr?«żū®Ų3`%k„NųŪ«¹ːZZ/–ņ‰Qݧ HF’ćؚū|üųqņ{ū–W›+T§“ÆUæöžÄN-77uÉÉ\9­½ĢXjö\*y·D¹µ~vKķæÄ”$]ß~R¹dĶŌd`Ŗm\__ūC`ÉXQ_`gźFśżńĒÉßµ:‚6xL­5J>U¾S µ’€Ēr³”ĘŠ#К źßŽŽ~żļT" ŹbĢŽVCKÕōęĶ›¢r™RWĘŽGō—JI—&ąÖŖ_©Äī”`yźz×X.-—˜ī Øē¤Źéźźjõ>8WŸĘ~Ÿ•Ōµ­÷³{h’‡b–š˜™Fń¬S×÷žżūŽļä±ēéė;¶:c `«$`eŸ?Īž.fcF‰÷å?©ąv+reU’ģŠdÄóēĻ«\G. Uüīö3©!—dIĶ\‰:Sŗ¬R®QƆžéP}¬ł¼ć|„e²FżŖŻWÖ0ösk^’õõuņ„u¤–„źž”-÷³k¶’1Rõ ž>ØQ†Kö’Sū›Ś‡Œż¬)ßĖ5åźpoę{Ģü€S‰QÜKÖŽ5ųļĖ2#1ā:f8M{Ԙ54£«DŒ˜ŽkI-”•{ĪSž¤–(+F|÷- £śsĖA­ł\JōĶSVc÷µ83āłLŻ =÷œęŪZż{MK–ĻZåš}ĪVfD̽§’÷o¹ŸŻbūωŁPs¾7ćōõĻђ׌ƒŖf{_Ɣļe€„HžĄ År*ŲØ$‰€ĢZĮ£-‰DWi ļPir{LĮ°Ü’7}...¾<·. Ń·ōÓ”xĪSx¤œZēŗk-Ł&ŒSŹ£ös)‘«?c÷‚‰²ZF.wχKœE9×ĻiJ‚£ DO±FżšZĪĒ÷7vo£šāü‘äœ*žė֖õŒ{Ū'­k[īg·Öž‡¾7§$čKė],m6'¹×¶·ÄϜļe€Åž@æ³ģlB,?5v9–WÆ^)ø%å—[&©¦’eośžŻķķmń8±Ņååå7ƋkˆĻé“ZŅk‰zV²ĢŅĻe-QīCĖ@=~üø÷łäźņŌ%ƒJžAœ³o9æ±ÖØ_©ŗŸ;āµ[÷_rż±„WĶē³”øĘØßK×µ­ö³[j’%Ė’ µ”ø—9KĘ}Շ³‚åäödĪ÷2Ą YöķĮŻ’¦‰aŅa°1å÷ß’ĖĻb¦Ń}›! pO<8ü‡eß"łŠÉ€†Hž4Dņ !’? ‘ühˆä@C$"łАwwwŻųæćB‘ģʃĆ˜łŠÉ€†Hž4Dņ !’? ‘ühˆä@C$"łŠÉ€†Hž4Dņ !’? ‘ühˆä@C$"łŠÉ€†Hž4Dņ !’? ‘ühˆä@C$"łŠÉ€†Hž4Dņ !’? ‘ühˆä@C$"łŠÉ€†Hž4Dņ !’? ‘ühˆä@C$"łŠÉ€†Hž4Dņ !’? ‘ühˆä@C$"łŠÉ€†Hž4Dņ !’€½ūæŠ"yß{{{“ćććÉęꦎŠ‘µ~y}ž{ŪŃ<°Ā ŠbŃ'ć śŻR¶AVš9::š|śō©ó÷v®“6õŁVü(ĢÕÕU5ćvżgŠņ.//ż¾­­­Éśśśd{{»śsww÷Ÿæåłžż{õg®×C;yNųõėW/ļæ³c.)€®Yń`$ ō|žüyr||<¹¾¾äg>}śtņęĶ›ÉįįįdssӇĄō‡³į£O@§×żżżżÉÅÅÅŌ×½~żŗŗ?0®WŠļŗƒųi2”Ą<ĪĪĪŖ±€V¦ž"[š`@YĮ'Į›”‚>ŁŪŪ›|łņÅĆż бŃ'  ņ¼}ū¶Õ÷üųńc%BŗĘō»ažA^¾|9Ó÷fŸy²2Y‚Cµ‰²zP½Již)²Šó`&‚?cj:Ć’nnn&ėėė>$īXPŒ>ƒŸ;µŪ§˜^ŪWŠļę1KšpÕV(ĄŌ_d’„}ś‘ĄOfž¼¼,r’NNN„~`Ie5ž6”Ÿ­­­jŹņDtļųųxņģŁ³™B?™į’ģ쬚]|Öķėׯ“ƒƒƒÉĘĘƽ?coooņęĶ «g“•ÕęŃfÉ3ˆŠ@™ÖRzóßŪŽęh/šÓÓÓVߓNĀB}Ź DłŁ2ū·Õ~xōaam­ńkx®@ŸžēččhņįƇFÆMxU‚?Ęō»~œŸŸO^¾|Łųõ777žĘ3õŁ‚?:<<œ|śō©ńė‡üĄĢOбŃ' s ß~łņ„Śj›››“ŻŻŻ•\Ļø‚~׏6“dU²ĻŸ?ėćüBŪYµæ~żZłB±OбŃ'ć śŻBc…××מKĆŌ_d?Ń>Ż8::jüڬō£øčCÓŠå³ā@W7V-f7«6ĖÖ§­Ā Oč€qżĪ10“©æäżKū o}}]#¬Øóóóž.ü5œ«««j‹ķķķ•9ļ·žV†Ÿ?N¾’^Ģg²ŖēĘÜē€ū `Q<ŃĆ»[É’÷åĖ—É›7o&›››Õ å]myæĆĆĆźżūāēćććŖ ®Éž¼|łņŸmŚėR|ŸöŠWš÷“ŁCķłüłóŚżŁ³gö—ŗØ½d)čüüłsÖhrÜÓś›¾6™5:‡›¶S>›ūƳōæ??ēĘüǹææßÉ5£{ūøf“ŁćJæJ’,JŚ?÷9‹{Ķź«ĻŻŻś ”Ō}oŚxŌv˵"c\Ę:÷Ą"Y»½½½ļėēæ·ĶŠ\ ß...æžĒU‘Ö*J!_ŠŽNOOGłł{{{3u§H0ˆŁ~żś5ų~ļģģTū=Ԋm x®čEŚ „ėėėA~Žū÷ļ«žZ‚węŲūīÕ1’Ł×Jķ]Čń~ųša®ž±įŻ»w­vWmµ*ēFV0Čq¶¹ęĪćéÓ§Õqęg.㘺ŠćJécŁXū·J÷9‹~=kzĶā3ė*¼•÷Éq 5¶×^æ~]õł”į~čÅŌ_d[ń #™µ¼Ģ˜æJ³œ×…œõjc…~"EĪm?ŪzęģźQ )„¬Wäh{ ‹.…žõlęÆ^½,ŲłĢėÕ ²c{}ī¼}ūvž÷éÓ§ŖÆuĢc˜gՂzŒ˜„×¹ŃnģĪĻ{ńāÅ …į9ē²JIJ©ĘÜēPŅóAV¦:ōy&©ū]Ęś¾WZŌū `\‚?I!VV’i#nŪŪŪK[›Ā¹·Õéc’ŽõńćĒÖ3zgVķҤP{ЧļśhøOśoöcŒĆXēN}̧ųÆōƒ|FcWéÜHH¶،uŻX–śŒ+øĻ””禒žj’żw‘Å1ļ7€2ü„ ŗ“Y«ti3[õååeU“£££ÖĮ”ŅԁŸŪ<666Ŗ™·ó^)ŗžV$|uuUm)ŒĖ–Bń?‹įó~‡‡‡­÷#ŸĒÉÉÉŌ¢ó­­­Ŗˆ1ūڶ˜9ū¾“ĀŪ6üuńtŠŅg9®’å3L[¶)}śōiÕŁŚœC)„ĻyדķėvĻłŚG±tśCśŠ<…°Æ_æ®ŚÆ>wjćœ'9žĒĪÕü’œņŅg²2†s£ßs#ūœ•bŚö’|_Ó±xڹ°³³³ć«q÷9˄ɪ1m®QļßæŸi?چ"Ӈf ÆŌ÷ńy6hņsó<’~×ę:ögßĖõ3żwĢg³±ī7€²¬ŻŽŽŽ÷õóßŪŽę˜M ß½{7×{ŌÅĘ)n[$³iל >„š/ŸI]°<«|™ <…‡ŁĻ/ö!EÓłYm “ū(ŠmSżĄsÅ }(ÅŹ]}mŪ>}!”]™µø3ćE ķē]•$²iĖ.fßļ²O !Ÿ}VakŅÖi£&«ĻŌÆMŸźb\[µs£ĶłāģģcW«ČdßÓŽ9Æŗŗ&5¦WŹł,JßæUŗĻ)½,Ū1ĪZsßÅ8œēōéōń¶ŒŹ9±*÷Ą(¦ž’Wš ']­zSKQs ŗRō8on_Ē›ż³ oŁ“-Öžöķ[gEļÕĆĀ£éCMgeļ2Ōš§6į½®Ā?mW ė³ ŗæ–5ų“±xŚ8···×iXdUĻ|ßóēϽ6ĻłüJ7ʘj\)ē³X¦ż[†ūĮŸįŽq–šaŸ×±˜eå”®®5„ßo£üÓ<³K?&a åżĒ µ)Š®%š“b5¦kS,¹µµUĶØŽŁ“ÄĄ£Mƒ ä8ū.tlÓÆS|™U-fÕ¶8Ø CÓāŌ¾śÄąOĻ-ś|×}Ą¹1ūŲ÷õė×¹Vs+±uqžWŹł,–m’–į>Gšg˜clś2șg“ɚ†jėėlaļRļ7€QMżEāķŠÆ`§8+u)NNĮXW2 tŠéR€²l uQÖTŪŠOĀJ?~üśi(“MūLVQX„U/īÓ“š2”ésCĢnž°ÅŁŁY£×&Ų—•Pf‘š^›āü÷ļßö9'8pssS·<,c{_Eø«|n0;ć īs[Ęž6”Ÿ!Ē”ś-×Ķ\?›Źõx)ļ7€² ž (Å®)0K(į—6…fMžžžŖCG}JQzS)ģĢģŚc®N“ˆęjj bS žbŽ&żgčćKqgV§jāŻ»w­ß?ēē§OŸæ>E±)’RĘ…Öūöķ[o«Ķ¬ņ¹ŃĘŠēDéŒ+øĻalY™©ĶŲ?Ę8t·Oµy&+ŠŚēżP>Į€‘$ü’B³„€ź PÓ"ꦲ"Š«WÆŖP~^Ššŗ”BĶüŒ&2³×?U“)ņ[“‚Ųįš؏5Ćy›‚ä¶…  v4µ··7ZQl~NĘ?œœœ“ ?:7śyļ¬l“ ‰kŒq÷9””Ķ>ę8t·_µY…¬ļ@ė÷ĄbXK‘é=Īo;š`\)bžüłsµ5 Ų<&Em)n›·x,ūöāŋƯ?;;«Vˆą7Ü’+\­’ĢJH­ē’5Yõ#2cyWE± Œ5õĄsÅ£RšUŖ“¢Š±Ā mŚ¢MūēxĢk*įĄ1WĖŹq½|ł²÷>1źr‹>æµµÕkŠd•ĻZĀB³WēgåŗYŚźrCŒ©Ę•r>‹eŽæeøĻY†6.õŪž×cCµ\7޾}Ūųõ?~l“,õ~(ĘŌ_$ž„}Ź•pN Ÿļ®”bŗĢŠf‘Q;UYVI˜EŠćšJ‘ę*…~RxĻ)^__ėČS4 6Ĭ…•]Inrε9/ۜC wŒ]k¦łė{L[åsć߷ ²Ō?ėłóē’ŚĒ©Ś¬@±ØŒ+øĻĮ84›\#rŻi:ŁBś’×`'ńD,–ŻŻŻŖŲ23sgĖlÓYŧ­j>{ölę•"ŚĢ¶¾Ģki‡|&™¹»ŽR¤žéÓ'Űö”Ģ·‡Žf Ś-Ó94kHp™ĒbēF?ēƟ}’ęę¦Zń`VŁĒ¬āpwæ8iSœ¾ŒćŖq÷9ō”ͳEiĻmö§ļėß÷ĄāüXp™iśēϟUčõė×­æ?ś³?_^^6~ķ²ĶčŸ—źāׯUų·lŚØ/£¦3ŹG)³įóJ ž¬‚„C²B]@³\ēŗ†Ż å[† q÷9‡fWāø(ųÄZ Dļqž{ŪŃ<‹)A›6ĮœHAu›™÷S ŚŌ÷œ %Eē)¼kSĢ8ĶĪĪNU\˜-ķ~7•æ’łY4mļ¼oW”¾?ćųšaéĪæ¦m±ˆēвŸ÷„ßŖŸM\]]M޼yÓK !«čWļæhżĖø²ķ4Ęž­Ś}Ī*ÜĒŽuŒ‹Ü¶é[ »«€LżEā_ŚO rRŒ?³µ-ģīĀĘĘFU¬“Ā śOŹŗV“-ĻėSč܇\·õZ‘Õ”r­»¾¾žéz™vŻßßoŖbq„ Ł9żŸ Ü-ž’ņåKu ė"”ąCVŹ–†–a% ć īs05Ū€=ŃP®Ģb}xxXŠd6Šz{ńāÅ䯻w“ÓÓÓQB?‘¢ æ’ž»ŚĢ†zw’²„šgY äE 1÷ööæ¾Ļq;דE”6yöģY«bŲ‰gõ¤ĢĪ]ÆH”v~_æ~­Ś“ōMĮ(ĪqĪ<ägÜż™ggg­®ƒ÷É3WžoØe5%³¬Üēąüšż™%«Ć Eš ’‚‡ĶŌį™ēϟO>}śTĶ8½h Ź ŁwĆ@ 1-sŃ@‰2ö6ÕözÓfõ†E ƒfåˆ\˚z’ž}U›cUŪLVģhjQĆc]CYĶdlĖų87–[Ā@9wź PĀ -Ģ"“/,µ̸ҽe }¹Ļ”„qØ“q5aϦ¬˜ IšFv7ģ“™vŪ,š„˜rŒ9ÖH(Rč_ŸimŽūāāb” g³ÆYÕ®©¬ø‘–h'EłM-ćJ‚mĪ”ÜGĶ½›scŃ%¬öŖC@[[[­¾?!‰ŅĻćŹāļć+ǹϔ”kX&)eH®6,ĢKšF’ĄĻööö҇}’š¬h”bE€žōY<ڶ“MąŲŚņe|3~Ļ&Eų{{{ļJX¢K ÷Õ/ūж ēFémœšC ĖtWšyśōić×}~ üqŸC)żk–q«ł7ŗ6aøGĄ`)|Nąēņņ²“÷ŪŲŲØ SsrrRÆżųń£šÉŗ«-3bē}ĻĪĪŖŸóśõėÖ3d?$3g ō£ĶŲŗ³³Óśż?~üŲųµ××דĶĶĶ…h·Ņ75DŃß2_#ŪČRšĀĢe‘ó!÷TMeuޱ郙•ēƲÉžgئJ_½ĪøŅżµ;ū8Ōł•ŸóįĆ÷9,¼6Ļéc‡31> ”l-ż÷8’½ķhčÖÕÕUULšėׯÖߛpO !*¹h:ǘˆl)šn+a¢Ņ‹éEĘā„+›JĄs–āĪ|O›Ņ̲ŸėEV[(ö&ym­÷vk*EĄ 7•WW«“i‡ž+É=NŠĻ—„’“•cisŲwŸūĻĆįļžōņåĖÖß7OŸX„sˆć[õs£ÄĻ~ž1vČže\™.a§WÆ^5~}&Śč; •ė}žõŪž{Į,mā>§¼ń~Æił÷«6ᾃƒƒQVĖy×ę߯jš7Ø“ żhmź/­ųIŽóēĻń¤p/«ėŌ+īäūSXPśJ ŁæĢNžż­W J‘TSY)Ejf,˜OĘā6”Ÿ¬0kQgŠ/Pm*×Āxf…ŠeŠUČę”ū‡EY%i)ņlŗāTŻĘ( -„„X~ؙńósf)Īǹ±¬ēࢫŒ+Ó%ŠęŽ%į…>ļ[*Źł;Ė$!īs(Uśu›ó,A×L–3ŌæeŅ™pŚ„~²öŲ««IšŅ“˜:+Ž$,“B‡#-ŗĘ„Ų#! “““Fߓb'Å?Ą"J±uŠĒ2ę÷Y$9M~nĘŽ>4žž•gu y¤p3×°6NOO«ö*1Ō“Š>ŅÖ}|Ži—6”įE—6lÓīļŽ½«śĻ¼}·)r͊ m¤8¶ĻćĻū꿛®6óŠø€sc“¹-JhÕøŅģ¾­ķ}K×”„rßÖfõ”ūĘ÷9”*ēl›ž–É`śž ēpĪå/^“ś¾¬ō“ÉĘ ųHa@“D3sh]ų³ŒRø‘P“Āš’,ĖJĄjHYŠ­ćā⢚É>Å­Łī®†Ö‡c¦x-?+?·MåĮĮAgŜ¹†åżŚŖ@ŁŅN}Ģņ¶Ļ{7”»m1pڽ‹ėVŽ=ļ“¶H»Ģ"żoQ„/¦Ø²¬l•öŹ90dą.ż½ĖāųüųQ­ś8Ėń'0>ļ“ļĻ~äżZ1¬ĶĢż‚?Īy幨ĻkCDiŗŚCVĒ[¤ÉŒ+Óåųņ ŽFJČy•¾>ėµ>?;m’ąĮĮ÷9 } k{®ÕĻ?ŗ:ļė rē\n*cč·o߬ōŒj-Eų÷8’½ķhčF +ęIŁ*ĶŹā&;)T³ś°RÄ8Kc)ĶX—?ėšgž;[ŠJėbēü=…o)ZkS¬vŸ°Õ”®e³ļ]²¦}rĶČV·ÓCr<łŁi§üż”ėLÓkK®Ė)O“ę©7)Ķžfe¼ü¬. YøĒo’°°¶6ųϬŪ%Ÿł<ż<ŸC «³Ż=·ŚģCśR½„OŻH!h×ēQöłļ撞ūÜÉū¤sģw÷ńīXŅf<ɌżuŃl“ó;«4]õ²˜䑶¼s£Żuõī1“écõ˜Ūtb†ūś’¢ö/ćŹĆŅڬ”8ķ~„¾Ÿū³]Ņļšö¹ōļ|_Ó{©YŪdÕļsJļ—ńšVÆ“Óvü½Ū÷Ņļ²5¹—Ļy”Ū¬!³H`©U~V”’­MżE¢ąō,EL™÷1«pIĮGfY}L_EcŒõ„899dUµŪ„uŒ™ģ§i¶-å³Ķg–ū„¦ū²čĮŸŅūŠ]{{{UQs×R(žāŲŽżĻ `Ó·ąĻźÆ]ł¾WÆ^-Żusģže\YŒė}Ż×†h“U¾ĻüļįÖ÷æE ž÷˜ś‹Ä'Śʰj«ŚdFēĢī ° ! ˜™č2MįŲ”ŸH1o‚ž777UŲ¦™õ»^Eé1)JL›µ’)„­?³6E½ó®JQŠ»}(!‚e°öqoX{Īß1¤0?åõ ż87†:7†ŗN=&…ßC^7‡xę4®”½æū̼JǽŠ÷9®ae]Ć2.fµŗōIŠ„ü˜SŠS –`MŠ„SŌZ‚qÖaŸ_2MįjVŲÉ~¤ĄÆ„6j» E½’Ł÷!>·“Ó}ÅęMC³ĖV›>”Ϭ¤>TŪŲŲØVŃčóŲsžęŲĻĪĪ9ƒŖS”{_!vÓb}…ŁĪYĪō»ĆĆĆю#ćl}ķ\ÖĀoćŹć×ūRśn—i×ū”ŪÄ}%\ĆĘ żł¼$č ”Jš ŃĒLń„kŗŚĄ¢H°&EĀßR‹“ė¾”Ļżn_J°acĻĻ©;²?~œ{\I›eü؋^³O]ibˆU(XŻsć¾ćČŁĒu4ם?ūĘĻUZÕøræÜ'Ōķ’ć˜÷<ŖÆ÷wĻ“i×ū±øĻaĢkX@«Ē£šŗ\(ļ•÷¬ƒg„>/Üg-æä¼ĒłļmGó@G'ŚŚŚ£ÆIqÕ*…R\ńöķŪG_—b£¦Å?P’½gK!£ŁĆː"ī»÷[ł\–9ĒcĶ1냬ҹQ÷ūU8Ē+å?«ōģź>:55p ųČlĮ§§§¾.³eÆĀ̵9ʗ/_>śŗĢlU –ŲŌąĻķż;>>nōŗ‹‹‹ju e’d&ą&”ŸČŖ@°Ŗ`ėė듯_æ6~}‚1 .Ŋ7ßæÆ?9¦ĖĖĖFßóžżūÉžž¾ĪĄŹZ»½½½ļėēæ·ĶŻŗŗŗš<ž|¦ļŻŲؘUa˜‰J?ά֓•Ž~żśÕśūśÉ±Ą’[›ś?`x»»»“‹‹‹NŽkkk«zæ¬Ø³¹¹Yż} ÷d%Ÿóóójkŗ’Ļ4OŸ>­Ž+Ē+@šJōóēĻ*¤ÓE`fœœœL޼y£!X%Sƒ?O“Œc}}½Z1ēęęfņśõė•lƒ¬šóķŪ·IˆB?šo‚?0²€>ž\…_:88XźćŻŲؘœUĒ›U¶··uøĒZŠļļqž{ŪŃ<0®«««*”ķśśz!!AŸ£££Éžž~rž±6õ žĄā9??’g»øø(bŸvvv&»»»UĄĒ*>Јą¬Ŗ¬”-¾’>łłógė÷Hˆ'+õdč€N ž@¦žh(ąHš $ųśK‹īū÷ļ“Ÿ?žėkłļ|żOWWWÕö§‹‹ -ÜŽŽj虱0˜£££ĘÆŻŻŻ­6`m‚4½öņņRCĄ€Į|ųš”ÕėX&ēēē’łŚC+Ļ<ōŚėėk +Dš`¬ÆÆO~żśµ”ĒussćZk»ņĢ}ƤJ'ųPø>ī/FŚ€é—ĄNī•dÉ}“››2EÖÖž1nšk”hžą“ąüüü׃Ļ×z* ŠoOŖĢ^•Y}ŒŹ Ó©%÷L®®®4H‡-..öƒ8 é$¬ȘSü?0y Ay ę!(𚭭­Žééi„ŸŁÜÜŌpśķ±K”O‚>Ś ¦G™PhĒüü|?ØȘS,Ā;0›`L2KJŲ–zżŁŁ™ącqwwלT ŖŻŗ¾/‚= ł<>>j”†­­­õ’›ĮfŲ ó”%ųc”0E–^¤ą"K!) ŲŽŽÖPŒM¶Ķååå”>.yūū{ ūńćĒÆ`™zė±øøųŪl;¹_Åš&ų-Č ?7U«988č?Ą¬HńĄŽŽ^®m›[[[„gūI1L®ēk¹.ü€z$ŌSĢÜsss£AJ˜ŸŸ’m¶b)Ā<]&ųĄ›Ž_ ž¤p¦­)”€ŃeęÜ"Ų“Ļćć£Fł§•••_³īäæEp'’˜V‚?¼)4»»»½Æ_æö’?3eVŸŒŒ T÷ćĒ_Įž«««™śī™-xp¶āæ óšg‚? £ūe ¾zż‡”ķa“!Å,?žģu‹L¦Ģś“('”žbꞛ››©’¾żūĆYĢĄPŸ?44/7rŸžžz———ż’3‚RżķŪ·žC°¹¹¹—åååŽśśzyķ5Y¶·· `LŠ„šRHœ‡o}æb)Śā½öȁMµI޳Ģg­2Rc™÷{m©;l3‰ė¤+2F™6ŪŪŪ«ō¾é'677GŚN²¤ÆÉŗMß3©}Å[żeÕ%mš¶g{Tł¼mn£y]•u“};ūųkļ·ŗŗŚļ#rL{k}4Õ?L{æ “/÷Droéµū"Ÿ?īĻž;-”Ÿłłłž,Ą'''½ŪŪŪž3šĮ%A§Ü÷śؗ EÅčFƒ#$§ų,7?‡¹Ł››Ŗ¹‰<ŒL»<L’’ųųXėūžžžö—B§)ÜĪĒ®Hq}1c@Żß’¹¼’ó6Y[[ė?|ģR›X'³¹N2ūÉ[rž˜s½ūūūZoĪ;÷÷÷ūKäa}*ꚓAś°*AŒŖ...śĖĪĪĪoēŁŁōo÷9Ž īĖu®.ǀْūx¹O•{˜MŽ§źŠ•••_ϵ»xĻ` žĄ˜å†iQō™‚¶ĆĆĆŅ?›pŒQ¢&GśūōŪM+e$Į,)ģσÉq޲—CŻāĆČƒŁ………žŸ3*ᓆh­“É”0Eś‰6GMŸ”‘G#õ‹ČĘ%ŁŚģ+Ÿ §mllōŪdš‚'ŁĘŹ\w¼NK[ †¤fé8LÆÜ“ʽ‡,Ó2;Ļ[2SB=¹Oä~ @÷ż”  ;üɔčL—IĻĶĶõVWWĒVȞߛß?Īxƒ3ŽuEŠ×SĢ’ŽĢ+ÓŹ:iFŁ`öąwLą'ūgPć,,ČļN_1Žu°OśŹ„Ęśy.³Ļd½Ģb_‘ķņł±¬ĶŠĻkDZĮĻPFī'äžFīAå>Oīs .¹'•ĮG¦%ō“AT2«uZŹóē§§§ß–œ2›³ŠĄd0ćtLfńÉ ŲqŌPŸ»»»žC³Q Ųó`.!‚,9F¼$…čyH—‡–ļ=”Ģæē!ę8äaź{ĒøĢ6’ÆĆŒ2˜öĪØŒ ³Üßߗž¹¢˜üč訒 ³Ź1ūąąąŻ×e½”ż#qfÉśxi½}B–„V†żlYł3ļÓ¤QfžZ\\ģo#i2ū@ <²M¦Ķ«l“ƒm’um{šfzk.BieŪ+ė¤8f †ąņ^YŠķsŲccŽc b]^^VžżtśmąuuÜ›$¹w[† ‚5Ń?č·Ē~¤]¦ß,‡y3|3÷˜!`f žĄ¤ŹŻ·F€üa’ž0+Ŗ†~ź*v,cöÓ*!ƒ.ņ yŪÄ,õ‰ć\'MĖg]]]źgó ?ūkA§aBŠu„,Ŗ†~Śź'#aÉ|æ÷fa”Y›R˜2ŅU{ĖĮŸŖæ³Ķkb²²ščōŪćļs“+ĄdęyYī_ĮžÜū€¼YĢó‡öīrć`²ģķķU*¢Ońt[Åģy ųššŠĢ¢©§8æŒ<ˆnk½Ģ²i^'Ćcp)‚'mH›UīKņ‡u||\)ōÓf?™a))Ye%$4+ēķ ~5=šĄłły„öO’ŸšQyrO ÷@rļ*м“,//÷ ŹżŸ ģ1K”ŸÜĻȽ¶Ü?ɀƒKBP¹/ęŁ/Ćüsó`r¤čųėׯ„_?ŽYŪRŠ>ˁ–ķķķŅÆü±NŚÖę¬6ƒX<99)żśĒĒĒžĻ #ūūūī'··*ᓲ¤xcš]^^¶J«äłöķ›*ę©.ƒjmmmõļ„ŻŽŽž)ܓöĢ}³¶īŸ0[ęrźß’¾¬iƌ¦žÉ×,--õ˜yą‘CϽr‚‰“ž8³>”±±±1ÖŁ ņš±JHiZöÓל”—ŌĀČ )eg‚šµ>q\뤕 ­¹¹JÆĻŒ\ ēKBX§§§„_Ÿā†Ŗē”)z([2ī~²u’°SÓże•ķ„®~¢ŹļÜŻŻm=Ų4Īc•~»Ś Y óž<_̟g5¤3¬•••žõk4в7‹yž¢} »Rh˜Ėt[ŠÓˆ~bܳBäe•bź.+‚ Åß Ķęß°Nŗ(3ی3ōSōKU‚?™a„Źl<ŁŖšteöœ|ŽJÆvF¤.G”Ė4«`XĀ<õĖC¹ļg°fē`RžŒ(šee‹qĻÖ6)3ØŹķŖĮ*¬“IӅŃC<ŚŚŚ*ž©:ūҤõ“…Ģ„” OŁY²oLcšĒ`PŸ„wā\Ük«ĻüüüÆ{ņß®Üg€QžŒØJ|W ü»Ų†™=äźźŹeĢœ®„ńr)ü©ŗ]$3IżäóĻÓT»š:a#&‰0O³ęÉ=“bI˜Ē½f‰ąĄˆŹĪFü—„J5„u‚~²Óż¤~H'3› »>Ā<Pžą@‹fżĮe§ Ŗ„޲¶¶Ö œ%3 Μ’??ŸŻhnnĪFhŠńž©kżd>@̆{Š„®ūe³D˜š!ųŠ¢ļßæĻäƒĪŸ?öō3ęāāb&šĶĶĶ?…F°NØ·ŅOv·]€zä¾Xī¹øøŠ ļę€ńü Qß¾}ėķģģTś™­­­Žńń±P‰uB‹īīīJæ6᯦¤ųfRŪ%…0@w®éīɽ°›› 2@˜&‹ąĄˆÖÖÖzWWW„^›Ķć~€Z„ˆ}T Šģļļ—~żĮĮA&¬Ś—ž©¬ŖżX•~2Å8™MŖ+NOOk`t?~üčßOȽaf·žĀ<0½F”‡§e Śæ~żŚ^ŒS[ĮŸ¾ĒØżäąēHO ”0*+}üõõµ™~¦T—śm€q(fµÉ ƒŸ 沿æ_i0‘.XYYé߇ȵü`ø'÷2@JīætŁ\nj¾ąūߗ5Ķ@]RX›¢Čē^9ĮTČćQ‚ē”tŽ'…ī)NĻĆöBĄēįtöÆ,e‹Ö‹™;ņ~eFąĢ,FUf?”ēUC‘`RfŃ(S|PDFąĢļŖ3Rg’”‚šE ÓiƒĮõ?ĄČŸ³ äߏ['-]hĶĶ•~mB$eGĆĶ(“ٲ,--½ųš¢_Č:„ŹēŹū ö;u+fڹææźē‹}$Ėkķ1(żfö•ah‘B”ŗgł©²½ŌµŒćwNŅgģRæ=-Ņ&e ® ¹w%×*Ć^›wÉĘĘF’Ś#÷SĖÜ'č7 eh…ą³*Ž)ś-[ŌߤēEüe pG ž¼µ’·ķä䤒Ąæģg©»J!ų(aƒ·¤Š¼Ź (ÖI Wa MnĆ®›:f{*+œō3]č+_ÓDągŲķeRē¤}Ę.õŪÓ@šĘ£ eŌBŗ"÷8ģ, ` ¼Y(ó‡öhNB yøžššŠŸ½gņ0<įbf¶å| x3SÉ8$ĄP+Ŗ˜ghØCfæIp  UGdµNŗ'ŪGś‰ĢČ5.é+®ÆÆ­›6„o*śŹŒĪŪ鷋6i*ōC·÷Ė®ōŪļÉą™Y4×ŲH”Xś³?OZč'&ģīīöļkęŗ¼Xr’ ×k žż³Bš y-ņpśģģ¬_`ß“<O}†æ®(Ŗ+l‘ņłžMQJ %ßż„CŁ`E!“š Ü 3s„uҽ~"ß±Ķ~"E4ł]EįĢ8ĻŪ #ēód{G8-æóöö¶’Ņo»MÆ®õŪ¹VŻŪŪė84šY^^īķļļ÷nnn&źūdŠ£££_×āÅR™F™‰€Ń žo¾·ĶühYF£L}QŲžŪ£Ī”€@‚>ƒČó`ü­Q/Ē5"fQD%#vŽ:»G¾{ ²‹Y9²$ŠņŚ÷+[,ŠdȤÜdżg½šHx#Ū”u2ŻżDÖóزn‹—Š"šü®.ŹöRģ+E;ä³×9#PŃE(­ŲVS<]ٹyšY]]ķ}żśuāf}é¼t£2š²¹ÜP}Į÷æ/kš€ŗ¤X{}}żO’Źq‡ß)Nü¬Ķ<‘PB–YüīÖɄ_hU٬Žca±^ĀHø0™ōÉ-“ź)d„ 0R,4#Ļ]óģ(÷„‹gHUf|K]ÜßĻżäā~æ{ĖĄsm?ļb²äžŌw žę? žŠĮfź*̃ą“ōÉ,¦ öloo+ņhšq~~Ž»øøėēȌméļ:a6yŽE!!Ó ®®®Ž|ŻÖÖV’Ąp§ąož£ąmü`¦®Ā<ž™“ ĻŹŹŹÆbļ„„%+ !™¹'LJÓÓÓNĪ£££ŽŽŽž3Äó.Šė˜J?s{{ė`ˆSš7’Qš€6ž0SWa„ĄĢ›” OfsHø'ˇ¬8€äųp||Ü{||œˆĻ›0č?¬8˜1žwuKśįŌŽ“¬²ÖÖÖśŸ€j§^ożć_“Ąhŗō™ŸŸ’īÉĄxŽUgL芄”hŽłły?0“åęęęÅט} `6 žTŌÅ OfdųōéSo{{»÷ńćG+ `Ģ~žüŁļ—_+Ž®jqq±’~ q¦Ÿ_ZZłó毙ommõ'ĶI?{uuÕÉĻ–ćĄééi„Ÿ1Ų@żŽŃ„ O æ3Ņw źF-ś ~u~Ņ×ēø“0gS>|ųšźlpwwwżßo¶×A¹ę8<<,õś³³3Į€Ģ===½ō÷ß’¾¬iź’¢Ö××’ō÷Ƈ`²/“ęęJæÖ±ŗ©kAŸ¦‹æØGŠ/..†śŁžń'€qó¼«=Ufüg['z~~Ž_ ³Ž0Ś)ų[’hĘ`ę ś0ŠAžü¹ņĻ­¬¬ōA?~ԈtZB>™y4 ķüfŽ uf–Ÿ~¾’nvą]‚?ĄŌō  ™ż Ź±e~~¾ų1ĆP–ą0u}hZÕŠĻĘĘFļüü\Ć“ !˧OŸ43ēīDĒf€Éö‡&Ø×ÓÓSéØG‚>)Ąž››ė/;;;c ż$čsrrņėś?ÅVB?Ó„jčēąą`¦B?ÅńøĢ2 Ÿ/„2æ+Aį2rn‘מūT]677 FŒźēϟżó±—ū™R”žļUŖŪf'ēóµ!ŪÅńńq?ÄS¦Ö××-ļmw9WÆsØŗļ_]]5²-<_† @5±żżųń£Ņū¦æėŹńæJŸÖ”œSd»Ķļxķ÷///’ŚŽlŪ½½½±ō½@y‚?ĄÄō`\ŅĒW ż” t0ŪžyMм‹ļsŽtīsqqń+‘÷&˜–Āż"č“¢ņœŻÜÜ ż™ūß+ß/ļ™÷Īļ`6÷…ō›ƒ”†lūūū•B2e·»ÓÓÓßB ȼµ?2ŗģßGGG„_Ÿž.į”qŹļÆŅļÖ&K=xżūłóēžv›ķwTłN_æ~żÕ÷V ¢ķü:OŠ€®Rh[ÖÖ֖āYJϲńRČ%ēE‘wŽ/Éūężó{²æ'ÅļyķźźźHAŸ÷ä½ó;Ź~.&_±½'H–XSŪü{.*fH±ķ5'}ÉĘĘFé×'œ2®0`~o~Y¹^LøiŌßY\§nóś7ū_ Ą€nü:GŠ€.Ŗ2ŪĄüü¼‚q†–ĄX΁ŖĶźs®zē|ēµĻT„ų½ĪĻ•sC³°L·ćććĪ}¦bŸ~hFf²Éń²¬ĶĶͱ|Ī*æ7”ßQ®sīP„+Ēu \Hų.ŸĆ>ć÷MŒ[Š›RL:ī¦HŠ'ŸEø€A)HÆ2ūD Ųé¶Ģ2’`Kf‰(sN“²²Ņ/F/fŹæ’Ž’sž›‚śafäÉv¾¼¼Ü»¼¼ģægB@łLe¶’œGŸ)…āł¹ü|¾Wž[|®afrI›d–ās1}²Ķ$pŸ°Ķ[Ū}ĪÓ³ TQ%Ū`¶æōĻU®;ŠšĆŃŃQ„hĀj„®…Ź~ž2ļ÷Öēé¢ō iß²ż@Ö›ĮŚü¾²ė'}ట-Įšl×UśĒ„¦²MfÉžÓŌõw±ģīī:æ€1™ĖˆT/Kż}YÓ<Ōy‘¾¾¾ž§æå8ÄČ ³®Ž4ĘOŠ€I“ó*!ŠY~š™ ŗÜNćų|U~ēkRąŠĀ0”—jļļļWž¹yæ7ĆO‚9—Ŗ*a œ]\\TžŁėėėŹ”Ūģä“IĀcŁ.š>OO-K~W• EÕšO٧ü›åķÆjuvvÖŹģ?és?ž\śõ•8…lSUfSK@®®ż¢ź~żņ„Yį€ŃO»ŽüGĮŚ ų3ŻrAŸ›0yPŸ¹įœæęf0}˜dyö•Pʚõń…(FūĻe†“āĪĪĪŲB?)JJTŠŠ²ä>§ŠU$ąPÅ03²0ŻÖֆƒłņņ²÷ćĒŚžæęYoŽs ¶å™pĮ‡|ĒĢTVžM;Ÿ£.Ł/Ŗl¶½fdvzŹöMĻų“õ\6ō“ķ§ÉŠO®i3›P“Ū^fŗ½½-õŚ|ę6f\žEšxUnÖ¾ņÉH7/ŻxŲŚŚźßf Ó¬źØöÉ£)Ąn"D–÷̹Ņ0ņsMĶf•gÓyę\Öééi+3~0²żå:¢ŒĢNU5J9UŚ5³å:“ !„)#ĮĪl?U%¹ ęĮ|Fūfō`rś@yf!a»»»­„ĒŖž?„P½i™Ń¢Ź¬?ĀŌ©Ź [¶½ę¤ÆŁŲŲhd½ÕŻ7³äśõėׯ„^›™‡Ę”J˜õŚńMÓ/ä³\]]ÕžŽiʈe0}rO1žŒ+Ü3(AŸ|į&EŠz›œ­…é–ŁŚPõ9o[Ļ…sĪwzzZźµBv¼§f’Ķ6óćĒŪSĒ%š’ ąćć㻯ĶĢ9 ŸŌNĢušÅÅE©×f0ŠaśÅ²ė&ü4®zœ*aŖ¬«6‚”0ė`Źä‚ŗŹˆaeŗā<°Č`ņ ś@}ņ †U÷ģuüžµµ5+†ĪI}D=ło®cØWÖķźźj©×īļļ÷C“£„nüŹĢ¶edf²aÆWĖǦIæ_f€į&!žģM“+79r#annī×ņłóēĘC?+++żB?0™RŒœ"¢Üć+ī-¦Ąi\År śd“ä§§§ž’Ļ'ō@—T b¤hgc6¤>"ŪĘóśˆÆ_æ żL©Ģv“ėIJF-­lߓė× œ1ģv\ÖśśśoŪ{Ū‹@t‹ąLˆL5ż<䓋ü²#Ō%S æ6ķ5ŠMϋ䖗—ūrcł<‚>LšŖA„¦ź˜™t°>Baöä:1³ė”qss3ō,9ĒĒĒżŸ/c”š™*Į€AŃŠ=yȝX l\Ž_rppŠ’\@we¶ī-e⮌| Oī- ÷0ÉRx\eP¾ū<[Ø&”Š„-ėŖ•X[[ė---õ—ĢxšYd łsžnPBFtK®m˜)s}›.rŻ9øžß“żżżRÆ=;;ūÓ6ŠĮ³Ü@(Āw)äó\FßōPŗ'…q¹ĒŲöģąoō`e*ĒŪĆĆĆžĻ(x_0HXc˜Į ŠėĶĶM}ī”Jš'3ז‘ķ µ8eåõed°Ü²Æ% ×ĶūC@{Š™|2’L1unLdđ.‡~®ÆÆ= €ŽČ Bƒ÷WWWĒśI”]zzzź/¹źž"Ó&é™õ§ŠĢZĮŪRģ¶Y×8 •B?é~»žś™^¹Ī5gŁŽŹ^¦†ēęęęŻ×%ÜŅö,~———æ®±»¼$”4Oš’›}eóyČ'#{ 3BĶ8ĢĻĻ÷o”U™ØO1˜P ؊ūŒ;;;cæĒ(襬Źóæ*RL,üó¶Ģ^¶ŁŁīWsSVf]É5HĀB‚>³%לeøćüüüŻ}95<ļIķL]į–ŌU¹Pü€$䓛J Čß3ĶžžžÄ„|žĖƒū|/7Ź =)&JqšóĮ„Ę=cø üCžež*®®®„ŽŠva³™ °ĶvG‚©«(+żoŪ³®Š-©ĶÉõiļ]³nnn¶¾–żÅw(žĄ^ łdš2Ó’N‚LQlähÖK³†ÆÆÆ÷‹ƒĒ-£(___ śĄ āI ¶ŠßŹńdv„²Ž›‘”n‚?Ųf»cooÆōk3ӏ%Uö‰  ńŚulž¾Ģ@¾9ާ>Ø.9ŲŲŲ(}īŠvt—ą¼#!ŸĮ6³LSČē¹Ü,s³źW„gŗ6kxFKĪ}Į‡‡‡_AŸbš#ąe9¦W ’¤9Ē’YÓV)Īæøøh-•ß“ŁĮ6Ū U=h#ō3 3¬d–¤Y—Į3ŹƒOOO’žÉ’ēļߓ1š8fW™µ*æVBĆĄŪąŁÅ}¦Õ}ņéĀ›mȍÓb@=žĻ¾¼¼\ŖøØi™ńūņņņ·Ł|r_0#å„73äU•óœLBł(mÓä뇑Ā鋃m–׌;õ!“š9łĒ>˜`NĻg–*3ÓTČhź8ū»»»„^›Špśėü`feV›ĮŃ5³|žü¹?:Ļ,ʃžY] š0žéŅĢįóóóż‚¢ŪŪŪ_AŸÜmc“l˜)ŽĶńuee„ņĻę\!ē 9.O[Ao[LŃtYyFŪä³Ź ™Ł–R@ ¶ŁÉ”ė˜&ÆēƲŖ2Ć4‡F«J[”Ło3Ėn1nB?efŻmzf„ćććž e3ęg€Ł%ųĄLČynŗ >t___ļÄčšć–’yčļA?”—¢³ĮŁĆ3›ĻįįįŲ óRō”™½‹OŠˆSd¤phVžGž õ³WWWż‚ŽœSäü"čÓ jržŻ&HUg*ė%ψ3ä(ē}ĢŪlūŹ"×[M|·Ō’äznœ×rUź5²Żé—ŖoļŁ~r|żśõ뻯Ķń¼qóŁ«ģūūūS?c šŗ¹Üšéœ"ē՚€:/V“yī•ćŠHr”žr¹Š5śĶŪśyo ćÜäRĄ4Ė=Å· gžūæ’»÷_’õ_żūœ]»ēøµµÕæ‡g`莜Wd6Ÿ:¤ 8Ēś‚š.BNx!³ŒdÉl&‘™G™a 3,¤ŲŗŖĢ ”vLØb˜ļ‘6+¾ĆØ.//k9׏{$äUFĻŃ_“"ņ²Ūb›aƒq}.Ūlõu0Ź6› Óźźjåk QƒióŌJŒ:Xl]ūj>O U½[ļŃöžŲv’ÕÄq÷ąąą×ģ@mÉq6”žaśül[÷²æ/͜v½ł‚?“”©ąOń@>²B>ĶØė¦`ćg<n4Ó8nriOķ©=µ§öŌžŚS{jOķ©=µ§öŌžŚS{jĻ·½v’²kЁ|²“1ś00š: ‘_’‚ß ąW ņē—ōĖ3Õb”ģ¦Ą¼l%F:÷6Hńü»ę™åą÷-¾O¾[ĀJ÷÷÷„Ž+3$ęēņ^ež1 žüƬl³Ć]s޲ĶŪW¦]²®Ź„Š`c~W•žÆĶ}uŲšGŚ” "}u¶·Į}"Ī6—6ČŁõąO¤ F rmllō·“qČ6šuxss3ō{d[HP8K¶ƒŖĒī|†"ܓ%ŪĪKżŌõõõP!GążÓ®7’Qš€6ŌüÉf1“OŁ`ŒNš§^ ‡“§öŌžŚS{jOķ©=µ§öŌžŚS{jOķ©=»ēß’żß{’ń’ѹϕ"ʄ|R“L®<ēĢń¼Ībó6Õ1»FW‚Ö'''æBeƒ8‚?’:7•ąm¶ś5ēØŪl—Ś;«²Ÿ„ī}µŽšŹk )B ]ŽF‘u7l=QŚČńzÜøÉžŪåĮĒ€)÷ęI׌€.*B>¹(Ļ „,™Ö8£źżĆʽnj ‘o‹{]ż¤Č(Eµ···żB©,)~ś€É—óŽ×s|Ļģ99ęO’”[,^Hs¾’ø¾{Šłóūg1ƒ"¶YķżR{W MÕ:KČņąą ‘ļ8 µ6£¬nŗ 3éä¾Eΰ颋‹‹Ī“ĢĮĘ.¬¹y‘‹×ĮO¦0ņFQ„gž04īŃs3ŚrQ\–„øOšĮ€é•Āżó‹s€„~Sčžpįäŗß=ēAMē|īN?Oņ|ŗģł$³Ė6;žön*üR¶½3“ŌøŚ:U4‚5ÄŁu¹žĪ¾ZU1ÓS×Ī2«NĪ>ѕs…Qf†'ųĄXŚwvvz777Iž)‚>ėėėżŃhĒķßžķßzGGG½ĖĖĖžR|N…¤0Ūr>ó‚„€‹0P |S„œ¢ļ×6-…Ä)rOQqĪSŠĻŃd89EöÅw®#üT”?>·Y}°ĶN¦"ü’%m4źĢ'E{___—jļ²³ž4y=W„ rLŲŻŻ9ō‘ćI¶įYŲW«„¦ņŚ—‚v]’ķ4ūÄą¹ĀŁŁYćĮį×Ī ś1;1“o.ā—ĪIsÜÖ<Ō%7;ņ€ż¹\ˆęfE¼ó²\ĄW™Źzl'5ssŃžÆœ{iOķ©=µ§öŌžŚS{jOķ©=µ§öŌžŚS{jĻ!¤Ų$Å/wÜ3ųĢĀ6tWĪ‹Gߟ„ē{e„ŠłĒS÷½˜^¶Łńō G” f±?Šģ#†ŠYo>ōü Æ^:%”åźźJĆu€ąO½iOķ©=µ§öŌžŚS{jOķ©=µ§öŌžŚS{/Ÿććć©»w(ų3ķ͇>hŗ&Sč&(ō|*ū••3"£Ņf6ŸŒD›S–ĻŸ?0˜)‚?t^ģ' ”©gĆ@GGG½ÅÅE S ƒå>`ņYXXčö50³˜H ķķķõīīī~ Ą„8??’5£ĻśśzļōōT£ ˜K”ō ¾’}YÓ<Ō%£uęĮżsƇj“`Š·oßśĖżż½ń‚­­­~ū@[r?jgggģŸc~~¾?ĄPfZZZ*õ3Ÿ>}ź]]]Õś9š¾O tŚÜ[’hʦZÖłņ巙noo{»»»ż‡śōś£Ø¦Xš–ĄOf÷Wčgee„wrrņė^įϟ?ū÷ˆ~Ś&ųĄĢÉCüćććžCżā’õõuę›Y e„Ņ>ōŪźöćĒžż§¶?¹ē—€Šū€ł™Ż`RžĄß}üų±?Śč`čņņ²_0+{ żŁ‘ Eąguuµ’©I‹‹‹½£££ŽĆĆĆÆ{|¹ēg6`’ žĄ+>}śŌ/ (вœõ666¦ś{///÷¾’n`hm~ÖÖÖśƒ÷÷ī2 ĶŽŽ^’÷L ĮØ`ss³w~~ž§0PŠ ¦ÉśśzļųųŲ  ’ĢØŁµėüĢĻĻ÷z···æīĖešš ŽÓ¶üŽĮūƒu,ƙ{åaĀ÷æ/kš€ŗäaxĀ$ĻMėCķĢ”åźźj¢æĒÖÖV’{Ą[ųIēęę¦Ö÷Mą'÷&˜Rsoż£ ŪŪŪæüłššŠ;99é­¬¬LŌ÷8==ĖØ©L†b†Ÿ………ZC? ü\__’z€Y%ų-ųšįC? ōćĒßĀ@GGG½ÅÅÅNöĢZ“““Ō/²€BīwÕų €ü€1IhooÆwww÷+ t{{Ū;88č\čžž¾žÉg`¶%š377ן-ŗN™1;÷Č~žEš:$įš/_¾ü) “»»Ū›ŸŸėg{||ģ-//÷¾’nEĢ ¦?y~'ų—0ŠńńqļēĻŸæ…¶¶¶ĘZ__ļfĆ·oß üdęk€· žĄJ(ƒa ėėė~Ø ūūū½½½=+`ЁŸZß7÷°r?+3_š6Į˜?~ģcA ,———…¾~żŚūō铆˜2ēēē~r €r`Š%˜ó< tvvÖŪŲŲØåżÆ®®ś³0ł~üųŃūšįCļóēϵ¾ļŹŹŠĄĄ`ĘlnnöGm œœōÖÖֆzæūūū~AČŻŻĘ˜@Eąguuµ÷ųųXŪū&šóššŠ†#ųō¶··{ßæ’JAFĀ@)Ī(#!ĖĖĖż÷`2üüł³ńĄOŽ€į ž’‚Œ„Rœ1:::ź-..¾śsėėė½oß¾i@€KąēćĒ½………Z?óóó½ėėk€ ž„¤XcooÆwww÷[čąąą·0ŠĪĪNļĖ—/  c?777µ½oų)Ž€śžCK(!ŸĮ0Šķķm’ß2cݐ{5?“ē/šØÓŅŅ’:"ŸÓÓÓŚß÷ääÄĄ/-ü˜2?ÓįM0¾}ū֛››«=ō“ĄĻÓӓŠ@ĖĢų0įųŁŁŁ©ż}·¶¶śļ ĄxžL( ŒŸ?ö~üųŃ’óĒ{>|Š(Bš`¤PēÓ§O½ĒĒĒZßWą`ŗĢĶĶ•~ķÓӓ˜`ßæļ_ӟžžŽō>½ćććŽŅŅ’FčĮ€ ŃTągee„_ d“ß÷U)ž-K‘-UeŸ/_¾ō¾~żZėū^\\ōĪĻĻ50@‡žtœĄ@÷®ÓāćĒƒ©øGPX[[ÓČąŲ@Ēü” ŗ)£÷ęAüźźj­=‹‹‹½‡‡‡žĆ~”€÷ÆĶ“Č  Å’ė“,ƒ·½½­±˜ø{Ļķķķilpģ cꞞž^śūļ_ĶØMF ]__’Óßær€™V<˜æ¹¹©õ}ēēēū÷źŒź9¼DŌĶ}R  ē.ĪIąeß¾}ėķģģTś™ŪŪŪŽŅŅ’Ę£6ÆÕŻ”‘|677ū÷rÆ”{äīī®’žēēēż@‘ć8v0žKų7’Qš€6ž@9)Äųé.Į`ZĻ]œ“@}× Zä¬- oä÷™©e: SĄæµµÕ’9`üĘŃGO± €?wßożć_“Ąųmoo÷NOOkßĖĖĖßFō`ņd6–dgym°ĮŸé“ŁxŖ„~VVVś!@ Ątü£¦?'''ż÷`x™9„ź5Ūęęfmæ?9\]]Y3ŖŹ¶d¶h_Wūčq»Øßš€<˜˜›››ŗÅCNŗģĖ—/żćUŻ”Ÿ~žžž„~jšķŪ·ŽĮĮAéן™ÕZ¼5sČK2ć€cĄt2ćĄŒĖĆĆi12ÕĀ?tM¼ļģģŌž¾É3ļ @½rŸ1ĖŻŻ]?\1°XZZź¬dšźVå?÷>|ų ŃĒ.€)%ųĄ‹v’Ē’˜ØĻūæ’ļ’ķżŸ’÷’¬8:Ką`²„P:3"˜6TŌJń>ąŲ0ŻxŃŽ’üŸõy’śWĮ:éĒ½ÕÕÕŚßWą¦×żż½F Oš  ü|śō©÷ųųXėū®¬¬ōGżżšįƒFś÷˜^‚?5ų`Xwwwż%qæŻ>?~tnüĻėŸ?ö–––ś ś}@?”Ÿ€éó‡&]¦¦ųpuuµÖŠO?ż¶ &ū|ńŪ·ożsƹ¹¹—åååŽśśzyķ5Y¶··ū”šir~~Ž’^9ē-Ó> ƾ.Ž{{{æBBmųņåĖ›ė¬Xņŗ²ņłßk“\¤=Ņ6ƽfss³ÓŪKÖ}>c™ö{oI[„ĶņžuŖņŹŹ:)ó~WWW|ĪēĖ[aĆ·¶Į—–ōućīo«n7m+Óg4µ–]ļmģ]ßOõūć;ȱ¼ŽćĀósƒ²Ē‡Iź£'uŸ}¾ķæyž\uÉ~ćQŽ 㢟ź$ų0‚"š“Āƛ››ŚŽw~~¾w}}-š0ĮRlXåå|qgg§–sĘÓÓÓßĀA)dgQć0Rx;XŌūłóēž÷Ŗ#DßūśõėoŎU *Ēym‘BĪĮ Smrqqѩ턘%upŻē3Ö!m•6Ė{†:RLĢh묊ōumļžĖ~T…ĀgōūćQĆÅł@Žåu*Ī  >0éwϲķļļļ×zo5ūAŽGƒańlo“vĪ<ėżš/‚?Ch:šS¼?“„n§€.ņuĪłš²ę¼4æ·j~ŪmS„}Rx[wQļ[…@šh£²#ńæō»+Āa)äœÖķ„ųž…¾Ź £Ź>Øø{4ŁoONNŁ'šŲĪŖÉņ½\{£ßoO‚æÅyR`—qĻN6k×}'mž ²½A œ4tŃOuüØ(£, ü0h0ŌŠFŲē%ł½łż];Ÿl›6Ć>oµŃøŠ L‹vI@lÜŪK“…ÆÕœßóččØ\aōėąŻŻŻŅÆĻ¾^uę:öÆ*ŪŁÖÖVėŸ‘Ł3‹żžKmPĢī’ąļøĪ“žĢ°ŻŽń£K뾐hW‚.ś  Œæh€rņ ŗ‰3°b#€Étww×/Œ„1Åē)ˆĶņZ@!‰ P¤š½zž=ƒć–ĻœļT„mˆßŪŪė/U rÓ._¾|),*Šd8>>nu{)Āže?ėāāāÆmcpäų¼W–bŪvĢö’Ā×ĖĖĖŚgiÉzĢņU÷‡ü\ŁŪ[ūĘŚŚZ’½ŗ(ėóąą Ō¶]v[)ó~o}ž÷d_I{—„!×ĻŁ¦ŚŗŽ­ņ{²_s¶2ūZf*+kŲu/§ßorv®"š3źĄ9iĢĪR“É[LJĮ6*Śéy»ēżŽ;6LbŻ5£ÜG-ÖQÖ{™ļžm-3 U9|~N˜sŠl;ć„Ķr?”7÷ōōōŅßĻuę .9‰ĢĻ½r:p¾~{v6QßćųÆķ}żŪß~ū»<ŲĻ÷€Q üPh¢€Ö}R˜\).¬R˜]Č=‹ŠŽZęǜOÖ1zzŻē$Uužē~P E˶K 'S 9źļ|éŪ0²}$`1ģĢMłŁl›Ćlu®‡*ūGŠmӆuĶV•õ™m0ūY¼U®źŽ—Rl[6hÓÖµEś®*…Õ···­g]W™”Ļ4ÉŪŻ$|žq>ż~½ēƒŹĶ÷©;€‘`EŚŖ[Ok=ī}v˜ąw$ų›õSĒzĻŗĪvY}ŖJ@+Ū_]ē¤ś  J׿Ö?ž”}^–Ā”<$Æ;ōsttŌ ī)ĄäJAcÕŠOŠęr˜"Ą: ĪnIaćĆĆCoee„3m“ļV¶Ų·h“:Ļ³nŅ.9ļ.#†.·Q0)¶Ī÷æ¾¾ījŖHp"»£Jš¦ģž‘BÖb”ū:·ĮćĘ1j’,Ø:ŲRÓ3dū©śÉ¾f¦ŗ`šśżĮż1}ļ0³½å|&ķQWųć¹¼g‚f/iNŚ7³ēT —ūA]aŻb]ē< ļ›m«Šœæti™Ę~ŽąĄ3Eą§JįPyМµyŠ ĄäŖ2ŗ{$ŲŠdš;ō)Ę»¼¼{Ū” $³ģ¤Ą·É0|Ī»3«GłĢ R²M +#Ė7±}¤84ž•~®ŽĻ‘kŖ²źUŸv÷ó³ 3Ęg’jr_Æņ޹.7u č÷—ŠēĀĀB„ŠGqÆ¬ĪŠćQu&Ģ¦ĻĻIrĪ™sϲrž_G@T?ŌIšąŸšüT)~£›²ė^€É’0I•ŠOŠöŖĪŽ1¬¦°±ź(ŽuI!a™ŠO =SÜFo•€ĀÅÅE–6eŌõ¦ƒ/ń>m^ÖĶĶM’gą- Ź„++3é6qMœ}6ūn™Ķu9ć6­ż~ŽėĖĖĖ„_Ÿs•„sķ“Ó!}q•ŠO›ēĒ‘sĪl£Uö‹QCįś  n‚?ĄĢĖččy\wą§¹RĄtH±Ūׯ_Kæ>EmĻf’óŚ6 ) D„ š=m½ķĻ—¢Ķ„ńĖŲßßoķse†¦„„ŚŚv«hóƌ?“+ė®ŹŒ¹ęĪ,uIw•}v}#ĢJæ_å{å\ }A3Ŗ0~¹ÆZ„/Ēłńąq JŲe”pē‡Ąk€™U~VWW{µ½oFĪH늊¦KĀ-e„~\E)ÜŻŻmķ÷„ų¾l j\#…ooo—~mžY?™”©-¹ī©²M”A„ŽöĪ Zł|¹>cņdŸNYun÷UfbČĢ_mĢ2³Ųļ§Ļ/{_-3żčļ§K•cž8Ļ ¹_[evĢ6CįÓ~~ŒFš˜9)Ll2šSŠ˜)næææ/żś¶G®JAüØŹp¦Ų³­̟«RDŁF8©Ķõ3Žß™YŽŽŽJæ>×e¹>›››ėÆ«\³19Ŗ z‘™ĮŖ„(ßźcĖĢ2)jĒ>³ŠļēŲééié×gĘ÷̦«’/ŪwįüxŲĻŃÖēžöóC`4‚?ĄĢųłógæŠpyy¹ÖĄOF‰¼½½ų˜b)T-+—Ʃ̀MŁ‚ß:ŠżG±¶¶Vźu™¦imŽę>.Yߙe„Ŗ“®Ł*‚@Uö?Ś—ž®ŹŗĪ a£ģ +;Cöū®š3Ū¦µßÆŅ?g2p~Ü™„ØŹ¬?mĶX9 ē‡Ąš€™ŗ •F”|O___÷E]yp @3ŖĢhх¢Ö¶éUŚe}}żW˜cKž.G1iö…ĢŠšŁQ‡•õ¶³³óŪŗL°MØ[²®JæžóēĻżkéaWŁkö¶ŠµaVūżI;?¢^UśŲ®­’*ŸgšĻ!…`ržS­˜…§ģ(äe ~ŚI€ń©2cä,…Ā«ü2›r=–ė²€¶¶¶jyĻ č0Ź>'4~_¾|éĻčPÖ0Åʙ½§ģ€éŸĢŹ ĶŖ2ĄŽ{hĪ»Ä NĄ¤ü¦ÖŽŽ^ouuµŅCč÷œœœüš&#gß%€‘pĪÓÓSļöö¶·¶¶VŪ{ßßß’ æ‡ńČģ‹‹‹„^›Ą@®ŪĖŗ»»ėķļļ—zķŃёėvp~„õo{¦†ą0uR ”bÆÆ_æÖöž ü¤@m{{[š&³ąL¶:)¼,£ģg?É5V–³³³ŚŚ=?! ×oŻļsŻž°P›››„^—Y‡ŖŠēGĢŽś·=“Fš˜*_¾|é-//×6ĖĄŌćņņņWУˋBŠö%Š1*Ā@ pŒāōō“śńć‡FnQ‚]Yeåz;3ė¾%AžĢōžłłłŅA"šóŽśd6e ō “ø=ęŲ0n‚?ĄTČĆćÖņ~)0ų Pe6’.·U\›sš®}&¦CĀ@Ł—Š ŠĆĆCokkkØ÷Z]]ķ}ūöM£¶¼žJ½6w¼uķąVŁ}…¼ŗĮzp~ōż°õß% —õéÓ'+;Į`ā÷z÷÷÷µ¼_F"7B0ƒŖü•-PoR[!›÷—„ą—Q|ų𔿠! •••J?æ³³#|Ö²ĢČ[vÖ¦‹‹‹Wūˆ²rdĘŽ*aD&’T0SŪdœ]]] …M™*ēéē»rνä*2ėĄø ž+³ü|üų±·ææ_Ėūe”ʑŀēŖĪ9īĮŖĆJ£la ~ģ©k»Kńxmčā~ĮædŸ_\\,õŚ„³r’|ŻÜܼū³™ ʌ½ĶšŸŸÆ“ŽŪ$ų3> ųUQ%(B÷U=ßķB?ćL•{É »W tą0‘2pfł)STĘŁŁ™B^•Y$RX^VfżWČ%ēŹU½-UŠ~Sšł¼°†•BÜĢņRÖ“Ļ4ŃÕļWås …—-ĪN°ČŒbķģoeåŌV_ŸßsxxhŃŃŃQé×f¶īY™kZAUÖBąćĒg©ŖēÕ] ųLœ< ĶHĄuČØ™åLj£¼'…Uf<ųüłsėįņü¾ŗĪ•«œŸļīī–zķććcæąWų‡ŗ˜åå_ŗŗ_e†¦²³3%0R„&ˇģ›ūZūf¶łY ‘tIöÕĢ¢]VĀ?éfķ\`ZæoÖŁŁ#įųq…Ņ_dū++”¦ŖA!€¦ž#Å?sssµĶņ“‘”§}ÄgźUµĄ|}}½µāĘüžü¾q8>>.]ō›šOfļĢĻ@›ūd Ķ'M•ĻÜåY 2[LŁY!Ņ—åZżōō“Ōu½ąG;2`JfW*+!®&Ć?¹G”cIŽ)t£/®²}ē“ޜ–>ŗŽż±ŹśOų'š¶ĀP9¦äžr•ŠĻĮĮĮŲg'$ųL„ eÄō:d–Ÿ‡‡#CPYŠĖĪ\QHqcŠ ›*ųĢūężó{†uww7ņēHŃo•’÷÷÷mfC•ėŗI¼̵pY Źtyö›Pomm½ūŗfÆ®®¾ūŗĢ4ęŗ¾]U›Ł&ė.īOĀ£Ü#2KT3r.‘{nU·‘œ Lź¾~ŲY—Ó‡äwgŸOæY~ŗ+ė8”¼ŖŠP“Ēœėä½ėœÅeŚśč:Ī«öÅŗĻymē£9IßSeęųœŪ___›é褹§§§ū¼œkź<©iJłWŽC@Ī×oĻĪ&ź{’õƽÆūŪo—‡æFī˜l)źøøøØå½RX$š@ĻY3:~Ž'9_Mqt!ē°)‚Ģ=Ž,e‹‹ū"yæ2ҙŨʨõļIĮnŽÆJ±åKēši—,i“Ŗēņł iæbI{¼Iē`›+E«eėYy[Ÿ1…Š)ä}o½VŁę²>ĻĻĻūEŪeĆ>Ļ÷‡I]·)Ļ YĆģCYƒ}Ė`;äĻé_ņom¢g]¦{X)Ģn"Š5]īS²Ž|ģ˶¹““ōāq/ū{Ł}½øē“÷jó˜§ß{?/»>Źn+Y²­ä}_“~­8žēĻÆ $”÷[ļ3 }ō8·Ļb¦ŖĒėēm“„ĢzŹśN½vīQFKMĢņ£ŸŖģŽož£ąmü€É;_ü`œņ°6#øÖ„©·)ŹLqcf HA|ī‡EŌ)&-3{fŻĮŸĮsū¼o—ggŲŲŲč˜Jaē?¤-?žÜ™õ{rrRyF†.®Ū·ĀT£Ju]³,”õŚ}é2ź,Ųļ²®÷)£¬Ć¦öń¶yśżÉ=Č,puĪś×Å>ŗ Ūē$œ6}ßX?TŁßśĒ?“Š%yP^Wč'EB?Ō&Āė^€É—ōŒlžsόJ?9÷M!s1ĀzWä³m“€Meʦ¦Rß5]¤7Å©]ł<£Jz¾S†‘a)?::Ŗüséēf!ō3 ²³%@1 ü<ßĒĶņÜ=ƒēćŚV^’N3‡M[=ķē9_όéCÜ7&…ąŠ ¹2uŽ¹»»Ū°¬č€¶äÜ3’žõĆ8MĖyoŠ)sīūŅģeC@MϚœ¶ÉL0i›|ŽǶŃ>eŒc¶“i•ķpooolė63€g6˜i-äĶwj*h1Ž} ŪJ z413£Ią!ŪeÖOÓū~¶żC^ õuå˜ĒĖēŶҕó€:fū›ę>ŗÉóĄq„Ąņ;‹s„œÆw)(P†ą0v)öY^^ī=>>Žü^)ČCÜććc ĄŲlnnöCEcf¶u6 œė&čS-fÉyļ[a÷.įó™R[“O”JAf“EĄ …¤Š83†æ7KŠi³¾hfŻfDżl·uφ•ĄÖóż!Åü³0Ģ`ń|Ś`Ōż&mYēĢU}wZfošVY?žŸżrŌ>=?›÷ģ³³ķąe:Åy@ś£¦Ž’Å /ÅŅTŸ2-}t“ėæh£¢¬sF ¼W޳ ż‡ć€I6—Īģßs¬Ó<Ō%7×Ö××’ō÷Ƈ€œÆßžMŌ÷8žė_{_’ö·ßž.sŒÜŠmyh®u~"yˆ M™››«ż=Ż'"#3Vüķkˆ“U!m„|śöėvś„č’ššŠµ¾¾Ü,N’šęƇæh`Ŗł”‘Q;H0©œĖ–“ ˆ"qū³s?@čG_€ąвŒźšb®ūūūZŽocc£w~~®a ćr?ąęę¦Ōk…~ąžŠ@[Ž{ µ…~.//…~ ć¾’Ž›››+ś988ś€2ćŠøĢņóéÓ§Ņ>ļY[[ė ݾPuÖߓ““Žöö¶Ę€2ćŠØĢȓY~ź żœ ż@‡żųń£·““TyÖßŪŪ[”xFšhLFõżüłs-ļµ²²Ņ{zzźmnnjXč /_¾ōęęęz«««•?™Ł7×ü æüj—yRčS×,?'''żŃ‚€īųłó篰O–ĆĆĆŹļqyyif_xĆ_4ĄšR€|||Ü;==ÕšOŸ>}ź]]]Õņ^‹‹‹żćķ‡4,ŒY:ß¾}«åžųīīn’ž:š6Į€!ŻŻŻõVWW5üS:uŽŽz{{{:"3üŒśų€j†”ąš›››½‹‹‹ZŽk~~¾œ5Ėtļścc£ņ=€\럟Ÿ÷g ŖüØŁķŁŁD}Ž’ü_’«÷’żēZq %³ü¤hēńń±–÷;88č}łņEĆ@G}ūö­·°°šīėöÉĢ>ŪŪŪ Fš‡&†±··×[]]­%ō“b ŪŪ[”čøĢŠ{ttōāµżīīn’śžéé©÷óēO”؁€Jīīīz?~¬m–Ÿe``2d0ÜČ,Ą@³Ģų”–y–——k›åēśśZč&Š“Ì?Ą»~žüŁ[ZZŖm–ŸŽłł¹†€7˜ńxSfäYXXØ-ō“Y~„~ą}fü^”Y~>ž’ģŻæN%iš/ź v[jéĘŲ€ßąGr€7p€½[N]ąo µ“«,HÕꌓ`ō>Ö mļ™iۜł­žČ”²ŒXė’óH”žÉZ|+ā‹oÅß÷żŽ„„āńń±•öVVVŠėėk ©ųüƒ³³³N•Ÿ¶’~®®®$ż0žŸŸ[_Ž"ńxÕĆĆC±µµUĢĢĢSSS]/™ ōųųø33(0üŹ*?ŪŪŪ­“·øøŲIlųųń£Ī€š$žĄJŅM’eņ"¼jrĶīīnq{{Ūč{///æ%ś,,,ēēēÅÓÓS£6ļī½½ĪĢ i7ķgŪ€į“g„y–—ēzmøøøhüÜ&™Äßy’t“d™›››J›—š'''Åņņņ·6’4”¢*RŁ'³±±Ń8ŃēGŅ~¶­Lņ҆C&Ź3Ā6¤ŹĻ—/_Šõõu HüJŅKYaēšš°Õ¶“4”¢“Y:ßśžü÷Tö„$%YI Īõõuē9a[U~NOOæ=ūš‘ųšõė×ĪKļ$½ōŗĀNd–ĪłłłĪ÷–Ra(ß? Ź Ģ(śr€ŽJåšÕÕÕVښ››ėTłI…q  Ļ’p3;;Ū—„Ÿ—;ߛŁ;óā½n…”ÅÅÅāč訸ææ/žŸŸß]>žÜłl^ōבE³ŽŖ’@o•ÕĄS9¼ yųšš Ź“ģƒ.€žIĀĶłłyW›Ä›õõõ?ü[^¤'‘'I=Uՙ½sgg§8>>®½®©Ü“eww·ó’§ŠOfMbO©ž“䔓“«ÉsŹļMOOKų€’ų}’$˜:/ӓčsvvV+ł%I@IjZMhss³óŻmÉK’Ģ š łłłJė—D”|hGžŃå¹[[ÕČ»8Øī']½—ź'''•>›„Ÿ/_¾tž¦nś2Y&ŸvŗquuÕjŅĻKIŹś­­­żš³ >(+ĶäY[*m·‘ō“*?÷÷÷’~ $ž@TM`Łßßļ$ü$A¦‰²ĀNŚ«#I?IźµĖĖĖJ‰IU“„€×=<ø¹¹łįgņĀüąą ÕļM{UŖėÄĪĪN_’~JU« %IØ/ĻZ«ņółóēžU ^'ńzģśśŗŅēŖVŖ«j2ŃÖÖV_ūeii©XYYłįēR¹Ø.yRåēšš°•ö2¹PŚĢ3=ą?MMMU^sĒ [`Hōź„yÕv½“€Ńw||\ĢĪζRå'RåGUnœŗF[Y‘ēńń±•öR­»j5s wTü1—žĄų:;;ėTłi+éēźźJŅ ‰?Šc™e³Š¼œļ…ƒƒƒ~’[Š›››~īćĒ¼!Ļ···[ikqq±x~~öL†ˆÄč±™™™beeå‡ŸŪŻŻm½:O’yNNN*}6ßßOUæOü£ĖĖĖbjjŖø»»k„½‹‹‹āööVĒ@ y¦v||¬#€ž’ų}P%Éåé驘ŸŸļTĀiC*żŌ™é3ß߯$›¬Ū§OŸ~ų¹ƒ¾“ēx­“•*?_¾|)Ö××u,¼"“š$0Õ·3ĻĖeyy¹ŲŪŪÓI@O}ŠŠ{yi¾¹¹YœŸŸæū¹$ß,,,sss›…Ü(Ō•ņā?mÕussÓ©P”6’„Ō ٦*³NOO› ^ø¾¾.VWW[kļččØļ•Ą`T$/1uƒ¦āōÉŁŁY±²²R鳏™2#@q¶¶¶:‰@_æ~ż‡Ļ&I't’ØSĪ ŠMŅO©L>J‚Īkß׍““› ¬_•¤Ÿr»€æĖäBm%żdā”Tł‘ōĆOāōQfä\[[«õ7IÄI„ bvvöÕR”‡‡‡d”*2‹ēžžž?—ņū’““uÆ»­e²OŚ©3óĮÕÕUĻ*Ą(É9yĘöéÓ§VŚĖ³Į‡‡‡Ī„C@w¾NßĘš–ŗś+•{ņ²>I1M*ótćōō“S=ؔ„”*’“ÓÖl¢ļ™žžīō¤(:Ļņ2)PņģMĀŒ`–––ŠÆ_æv*Ūä…{ÆĶĶĶ_¾|łCŅĻĮĮAńłóē”é“TBJŸHś`Ņ• :m%żģģģtž½Iś€Ń#ń(UņĀ=I9›››­·æøøŲIīyk&Ļ$ =??÷ä»ė¬c¶?•`Ņķīī ­T ϤC÷÷÷Åńń±Ž€%ń†@’rĪĪĪ:I8YR (É8u«åół»¼ĢO;···äž)æūččØoۜõLĀOÖŃL£Lŗ²"ĻÉÉI+ķåł› ŪĄØ»¾¾ž¶0ŗ2am¹sÆ:‰Ū £ q\Æ»šŽq-cŹ śKā ”TJ2N.Ėd *K>Ÿæėöe~f-Ū:==-ęęęZŪ¦“•6Ėö³ž~ (ŠŁŁŁVŖüDŖ€ēł0zLŗµµÕyŸ>55Õõ’øÄ [švÖ'UIQe;VWWæ-ļ}.ż•~Ö‹ĖĖĖb}}½Ń>ż~[Óę°lŪĘl*Ū–ū1÷æ£6n_SĘčdBŽ*ŪżŽ>ę±; ŹćŹ{ū¢ī’ßk»5ØgEUÖ3Ÿ«ÓO?ś­.//wĘkĘī{}3ć5ŪP„onn*·ŁdĢå¼8 ’ŠóŽ8)k+ļ‘a9G9—0N¦€’ŚuĶæ/+ŗųž0÷ļeģēā77ŅY$öšż sn|¾÷ĘyFś:÷žāb¤¶ć—ß/~žå—‘ߎć_-N~ūķ’¶²²ā„90“<’ēh­“·¶¶6r/Õa%ˆ£mž“Ąų_¼u¾/±ėwkæVx÷< ĘĻŅÖDuä¹Pī‘»øŠżöcæĘōōtgßę;{-}šļiė^vŲĘķ{’’mļõxŽŁŁéló÷c·cĪ8ČóŽōOæ~_„TWĪń¬Ē”lßįįa£ßGŽĮÆēē篣0Ž^{æ3(ݼĖh昊q’g–Ć6Ių$ŸĖīĒ ļžG‰? ņ†Ł mĘń:WāĻ`HüFź˜u|\ģķķµÖŽÕÕÕŠĢ ćNāŠĶuĮ÷ēūŖAݽ$‘¼öź"‰L½4Æėōō“³^½–Ąęķķķlćżż}§Ź@/$‘)ś żÖøĶs¾L ŃOŁöüV‘¼öż6OrāOYcūį5½Ndؚ”ņŚ$0ƒ: ź7Śė>ī—~'ž ā:dŠcd’ĻeŒĪc†÷žćOśś£¬ņÓVŅO‚ņā^ŅŒ†L@ŻA%żDēģœõčEÕŠlć°I|ōōŪ āM*éēččØ'I?I Čv-//4Pŗ·Y^īĒ·¶}PÉ&å6Or€x’}²666†&é'>}śTĢĪĪöm<¾%Ļ™¾³ƒ:½Æ/׋į’*7ƒŗŌ™äsćåƒ.€Žk{ĘՋ‹‹Ī,•ĄšK€k3Ŗ®g6ų\ėgIpģk  M²I– w+ĮōsssöŚŖ\‘vRaē½ūŸÅÅÅN@&1Ø›ŌšššŠIXJ‚QŽ2Ų6I2 |nKŚ:99©õ7›››æ«ŗķåžĪ}åŻŻŻž[&„hs{Źļ«[å&ć6ė‘„ĪXŹ6„ņDÕ}YīĒž$™e|eæ4I2ÉžM’ey+!ė½}ś½ü÷^TfM*‡å˜–q˜ch•„øŸsL©3插;½ŖōŽx-'™©ŗīéŸrl¾œH&me)Ēf·æŒ×$E {uźl’žž~„cTÕ¾­ŅŽ{ėÓė±RõŲ6.cd’ĻeŒ§©7Źz^ē¾P÷Š–·JäŽ[yi\ēĘżÅÅHmĒ/æ’^üüĖ/#æĒæžZœüöŪž-ŁOƒ–ė? ä©*rf„„ĮéEĄē¤0Ł× 2M°hÓäć&Xžęꦫæo;7A÷IJJq¶/ėÖ yö“ļŖŲŪVņO¶«jՄģē¬k[Õ[²æ³ 6n3Ł n"S’¼ŚŚ·u÷eĘVś”-uöēKy™żŠ4p?É'éĖ6*یź=F7ś‘d«Ļ·ń[H"MöC7I•IüČ8jć·šŚ»‡nd|¦oŗ=öäo³M݌Ė6ƒ’ńXõ¼Śļß][×!ć8F&ł\Ąh?fxļ?ž¤ 7ņ¢7/įŪJśÉKhI?0’Ō’åĖ—NyI(I:Č=HsļPW‚Ģ“¼Š–$1d]Ź„¤^IPvś°N„…½½½Ę÷VŁ®ŖI" Ŗ.«r“%ū;}ÜfŅOڬ(1–żŪę¾-÷e³ŖHe…¶Ŗdä»ė&ż”}ß]ė‘@ńlŽ 9>Lš$¤Fär“™—vŹćWŠźČ–*7/Ēg“cOĪO—Ÿ?ī$0֑Źožc Æa#m'½Lņ¹ €ń&ńz A'mĶΚ™ų3곤Æürß †n3ić„Ü;$5 'u$·ĶäŸ~JŅ@äŸ¦÷Wuś© ½– ćĆżi‚Čó©"ėœ„™&źTōˆü®Ś)Ē…R…kRŌ­¶Ōė}šņwžńžq_UĘRÓ ž&ÉC9ö¢o’’d†µµµŚē#†KRz9Fź^{4=†;—0)$ž}½IÆŗd&Uyʕ€¹6äe|f¾ģU@ Š?eå—~ĶģžwļUgœ/õböż~Ž‹U ŠOeV± —`ķ*Ņæżŗ?Ķoäāā¢Ņg?}śTwõ= Ģ®“ō“¤Š~›$$0½nQ“}W'駟ū 2Ž3īė$3 *ˆ?Wzh˜jHuś"ĒŚü Ć!‰(mTüŃµĒ ĘČ$ŸĖ˜ tŠ/uK"KéŪa‘ŽQ}øJ÷“"yQŪT{$üĄųH é Mą›ūŠ:“äoF5P;³÷WOČžčGlĀ0O€š} ėIBUæ·!÷כ››Åłłł?»··W;˜=cüää¤ņē“pŅļźMłķ¦ß———Ēņø˜X’ģ»aŽß/Ŗ&Š•Aü½N²(„BTś!æŁŁŁŹŸOå$ÕL/I?żJ<ī÷™äs“CāĄKpĒĀĀ‚Žu@Ž3Č@ }IģdāG’aņżU‚N#ė ČļWyŪ÷eušōiÕļÉ}b™Ą1L}šx„Ŗ‰/ƒJK?W·u“,ź|vmmm`÷é3;;;µ’”FEöļ(ģƒ—ĒŒü–Ÿžž*}¾_Aüżœ`9}PgL¶1AĶäYcæ’~Ź1R5Ł„éłxŅĻeLŽŸtĄpĖŠø—mu_æ~ķ¼—0YTü€.ŻŽŽ³³³“~RńłĖ—/’~`L KĄm u*qT•ŸōEŖŽ”K’fNNNF.éēeŸę~nqq±ė6份½ż‡~I¢G/«.ÕIģŹ$Ÿ/×­ßK/‚ ėl’0T6IāŽø©SÕbŲŖĖŌYŸq ā—lÄ0Œ‘I?—0Y$ž}óüü\y©Z‚%/–———·377ש4ŽA<ĄpÄ}GŽ’—AÆ ŗĒĄ×ōk&†HPŖ*µ!‰W/“2QD›‰@żŖč4¬źLąa’ūĄ˜€į4éē2&‹ÄØéųųø°ÖŌŹŹJńšš Ca„Ō™äŖźŠ/żŖŠ$˜$Ć$aåšš°•{§$ŌģļļGGGÅÕÕÕ·%É6Ćr}•mNrNÖįžž¾³ŽmIE¤2ØüĘėwĆčģc€~ū   ŗŻŻŻāää¤q; ZØōSÆgĘO5Ó„„„N’J]©†šź@ėėėcQ5A^ö÷ååeg‰6*„RJ’€²ø·üOm&Z}’»‘č1ųc×0ķUF€Q;—0śTü€Š€ÖFŅOf©˜ō[sŖšžž®Õvīqfggk%ż$i„¬Ų“jØ[[[c‘ōóÖżd’^V&ŗøø(ÖÖÖµ{~~Ž©”*K½’ŹJ½Ø|Łö"Ʊ«_źTo®{,œĖą5 ‚ĢZżéÓ§Ęķœžžvf±č·:Ō©®‘J6©>SU&CHpkÖg\}ŖH2P*•Į¾I‚J2T7–——kķßT$ŖŖN’ĆØØSQ!ūhŠ&} Ūä)IøėűØgŅĻeL‰?šŽĢ*›—ČwwwŪŹĢ’™Į˜,Ć0{{īmźLfPõŽ%•föööjŻ™ įuI‚J‚C™“øøXėļ“|U5°9IGUcÅŚ:ÉmTžmjÖėŒĮ»†„’čXĒī ĮqDõuFÄxC‚J’ōóųųŲø­ūū{³¼Ą„†`Ó:Įēsss•ƒiė“›J?IP’Ŗ’(UGÕ¤„“æ¶¶Vé³777CQõ¦Mu'åtņFŻd“QP·O‡a"•$PÖItLņžcōö\9Éē2&‹Ä™(¦™'7ƹiĢ’‹Ū©©©ŚK‚Ź6Ņ^ŚMūĆ0 ōC¬Ч§§FķLOOwfjĪ37`2ŸŸ4ł'IY‡:ŸÆ*“Uõ#~ÜfōOŸžžÖŗ—­ŖNå„$]$éa\ä}ss³ņēSõgPćÓuŖu’£££ZǚA'`---MōńˆöŽæ“g’ĻeL–ŗFC”‰7mĢ.ś#łŽņ{Ŗ<¤KšB6eƟ,I8€Q•gqŪÉ,Ł©°½½żmĪ~JO I†ØZķgŲ$˜7ż¦—*ąß+¹’×J«uś­n s(²ß«HlHbB‘t1,c2æį~UÉ÷äūĘ]’Æ2qJU īO æ’,RÅ%1Pub¶ö÷÷^ˆvŌ9¦Ŗš48“~.`2Hü!‘ ’Ü(äFqaa”V‰ķa– åĆĆĆĪ6•‰@.šV ŹČó¬¦2Cv/ڀѕx€¼?ļe€pŚĪwÜÜÜTž›ĢvsS5Š6ræÕ‹{„$S%.”jå–QS'Y¬›Ä²ģ“:ū1Éd½Ćż’’Tń©#‰'½Üžņ÷[„zÅ[Fm"’¬o1xww× Üļe"e‹2>–——kż]*żdRĘC _9æ{88“|.`2Hü*os™J9ćśź„lcyќ$ ĢŚĆ /ņ›Ք2««Ę0ŽŖöNOOæłß'wē¹i#H?U)^Ę Ō½é6X¹īäŸmU=JĢEŚÉöv;¹jÄØ×”¼&ČæÕGʐŖ•F2 Åśśz×c: u”c8㮟ĮīėmŽwē7x’īļõ½ķOŸ7ż ēļ³ļż~ėTÅÅ ÄC9ÕQ&R¶u-×#æķ$%ĮØŖŒŸĻŸ?«ō3fźžÆrŽ3)ö`#“z.`üM=??æzžū÷eE÷@oäaOn8ėÜ6Q'“żūõģ×:žžžöµœ9ƒ¹±z­õē!éėÜū‹‹‘ŚŽ_~’½ųł—_F~;Žżµ8łķ·ø'6ĆPE^ģ¶ń,Ģs.˜ o=ś^1’’{ŽŖ“®­­ub ņ7oUČ÷gÉd›Mīe؞`ł¹7‘ĄÕŗÉF‘D†ōO•ūØÄ0d{ó]Mv^źöuÖł­„£lW’A²Ō©Qncś¤j²O©­ē`mÄ“¼ÜžŒćŗć+됀črÉv½ÖI²HūmŹ:śō©Q‹‹‹ßö}¶żå:f{ņ›+ĆUū¹ÜæiÆŹ±$UŒźŒ½aRN¤\÷7šrüå÷™e~~ž‡ŸĻ>I¢F· „‘„„¶«ü$”×DZIXĒ6d|d¢ėnĒāĖcĮĖćtžļņÜM²1ā\ĄŲy÷Ä)ńFäFąū¹0-—¦ąšjóbŚq!;ž$ž0I×¹tĻ-ńčB^Ą&¤jÜ£TŌS5ńēe0ų{‰"żÖ‹÷óUū¤×2!CīóŖ®K7בּ±±14汓P$H9÷ømÜ/÷J’ä²/Ś–@ü:Ézżü­fŸTI|‡g£0{‘šS’Ō1\zyO‚I7«Œē2ĘĪ»'Οōō÷ nŅO”r3ž Ūr)gšH6ł “~ʇ ¹‰Ķz½\Ļ? •|yć“ä%,č…”­®®6n'/eˆ3nĻņ€žX__’6™f‚PŽŽZ©.’퓤v³$˜tŽ–qY’xŌt¦żÄT$ę ¬“‘%÷woŻ“UŻęŗ ł¾l[¹ļ²d’°Ńvu˜Üw¦Ż2᠜@vP“O¼¶ķkŁ/½  N²S‚“˶—c;æ£Qż½¦Ļ¾ßæĒĒĒļ>g˜ōgŁž2¹°Ü©öÕf%“•6ˤĀņXcŅJåĢÉoøéńÆ|ʈs¼g*'Ž×īisžŃ=ŠŽ·ŗŗ:ö•Ré'UŖżlnn Ŗ –³³³Īs¶¦;ϲffft*00 Rµ&I,U¤ņĻŌŌT'@aœ$H"Տfgg‹ĒĒĒ~>³©Ž[Š[™€g{{»q;+++Åķķ­NāōI’X0PU’”Š7£š“dŸ[d;’šS„ŹO$éēįįĮ  ²L¾sxxŲøāśśZ‡CAāōQö÷÷kżMŖć”I@å²¾¾ŽIJbͰȺd²nåz&٧n°Åāāb§­™™€JRiśüü¼q;yvw||¬C€”ńA@„Īīīn'įī6>}śŌY’ōš¹¹¹N„ r‰„„„®’iRy§¬¾SĪtZµrO]§§§™Y Ŗ<’Źä9M]\\t&µ&`’€s{{Ū©lÓ$č- tČŅ«¶Ių ®<[KŅĻÓÓSć¶>žÜ™8`Ųü¤ `pŹ ēēēΌ¢ÓÓÓ³ķkkkŗ/_:Ū.é€:R”zvv¶qŅOžĒŻßßKś†–ÄėėėYJ““`ƒĶĶͱھQģļļKö¹¼¼ģ$>@×××ÅĀĀBćvęęę: D©0¬$žĄJ°ĮŁŁY'A¦\R(É@£P(ė˜użüłó·õORÓĮĮdŗ–gf«««ŪY\\ģ$żxV »ŗFC*eyMf9½½½ķ+”’ūųųŲ“õHRĻŅŅRgI‚Rž÷ćĒv=•Ie·³¶¶Ö©F 0 $žĄHāäĘÕÖÖVq~~ŽøāųųX‡#CāC+ŽÜÜÜ4nēččØŲŻŻÕ”ĄH‘ųĄPšŸŸ/·suu„b60’$ž0T¾~żŚIśyzzjÜÖēϟ‹„„% Œ$‰? ‡‡‡baa”q;ÓÓӝ¶ffft*0²~Ņ ƒėėėV’~ęęę$żcAāwvvV¬®®6ngeeEŅ06$ž0PÅöövćvÖÖÖ:UƒĘ…Äfkk«8<茚õė×NŅĻÓÓSć¶īļļ;m0y.//‹ėėėĪrww÷źgvwwuAÅĖL«³³³“~¦§§‹/_¾HśčƒĶĶĶŚ³¾¾®ć€žłųńc155Ulll'''o&ż8f0I$žŠHf^]^^nÜĪÜÜ\§jŠĢ̌N胳³³bæņē/..TŲ33č³ŗ€n{{{ŪYYYé$Š_åįᔸ¼¼ģ,„TcMõ­­-ą˜ Ą€Hü + r8<}jÜĪžž~'€$ń€Z>~üXÜÜÜ4nēōō“ŲŚŚŅ”oų@%_æ~-–––ŠĒĒĒĘm]]]uˆčėėėo’·g1ŠŪŪŪĪ³ŃłłłĪŠo?é~äįį”óR»¤Ÿūū{&ĄŲ»¼¼,Ö×׋©©©ĘĖĢĢL§bjŚl[õhC’Ŗ|×ĮĮAåūÕ|6÷¬ŻöoöÓĖ䈦~vv֙<£É>Ļveūś­ßcbÜÖÆĻhŽ;ĻVŖōĮźźź·„Źq¦ĶßB7æż:U¦›W»}65JćÆ+MŽEÆ/s|Ėqnr\lóRöSĘ~~oµ·¼¼Üł -,,ōķ\2©×%Ąė$žš®Ģh™—ŚOOOŚ™žž.¾|łbVL`lļ^āollŸ>}j„ķ܏ŸŸwŚ|t›€īqõ^PyĖķÜÆ6šØ"ū©LŠH»Ż2g’—Įõ³³³Åöövqww×hŸg»Ź ó“ļ`²äw†— {{{µdźg^&å˜6ØźKŅÉĖóP9Vš‹^;^ęų–ć\ł=I„q’uĶ:æģ§Œż¦Ļ>_žK²ģīī“_\—Ąx‘ųĄ›ģ”-›š››ė¼čĪ `€QÖÖ,Éć> 9L’T>(«“ˆ’žŻfęžQSµŹĘkI.e v›hæ×Æe söķ$°»Ü’m×/mē;Ŗ®£­ėI®HXÆĘūä˜V&xwĆée"d’Nśy*%¤'9/õ2Ł„É9¤<_g]³Ī½vrrŅł®ģŸ~&nŗ.€ń$ń€We†Ę;5µ²²R<<<čP`¬¤²BkSł`ŽŽŽ&¦¢j*ž¤ÆūØżRöm¶_»§-×)Żż–õʾW‰e| cՌņ÷ ņŌpčG"d7RQfÉ.ß{y|,au¾ĪžIN*·õņøķŗĘŪ]Ą÷2cqĮK›››f„Ü3żĒżQž.æU$`:A¼¹§ś¾’L&XH[ć,U  žz||üįē;•Źjłßō_äS£›Š< Ų^XX(®®®:m& (ėT%Š>ÕoĖuJ|ž.ŸķŹ’–ėÕMŠ~ś$Įõåz1^2^NOOß ąĻ˜OņGöÕćJ)ć/c/ FU~_/I`H€ŻcPö÷÷ų¹óŖ®S•öŽ[ŸQ”}ŽmdŽIŁo9.UŁž«2N’čXgœ¼+ÓÓӝńÖļ*ąå±¶ź9¤ģŸüžŅ7/«i+Ky^ī6Ń*ē ^·]—Ąų›z~~~ķßs×»¢{hKnüRRų{oœ‡`¤Æsļ/.Fj;~łż÷āē_~łķų—æü„ų׿żķ’öOśSńożėHmĒńÆæ'æżö‡ĖK³2Pś!/’3;gS ¾H`Ą8Éģ¹móœFKīs+}6Ϲ§Æ”’–'°6A·mQ×¹¾iėŗ„kŖōmŅ» žN²ĆŽŽ^ķæŪŁŁła`u·÷Ā PO@7÷ćŸ?nmŒ jLŒÓ˜m³OŹg4IHČøŹé…§ņ]uŗIž©"æé$ž”¤‹ōMݤ“$wä˜×Ę9£ÉqŖ­ē„o½—éFž»§oŗ=~ęo³MŻ$%±Æ­ßóø]—Ą{÷Bó'ż@)/}ŪHśÉĖkI?ĄøI€kÕąŚ—ÕŚ’*Iv™äąŚT9łņåK§o»­˜ åä×­ņVŅO©“|“6»½Ī>;MŁĘ:Tü_å˜Č±§WI?åŹoŖĪo"ÉsIF”÷r\Iõœ: &y6—±ÓfBĘĖćTŠźČ¹sXŽUeß4M€É¹$æ›’sØ#Õ¼Śųżø.€É!ń€ĪKß¼ ½»»kÜÖÕÕUOƒQ%ŌU™ įm >īö~3Ņmg„Ķ&RØķ@źlcŒ$ø§­ßD±gÜõgŸTMģ(ÆIjéõ¾Éł0I˜©FUU*9%Q¤‰&ÉC۽蛒sX[[«õwm¬‡ė˜&\f†ĢK÷:³†¾åžžŽLĆ“®W÷›i3Õŗ‘æ;>>īÉö&@»NEóóóNą9“1öŖ&sd™TM”7r|©“ō“Ä–~ī$aę¹bdĪĒĒĒb}}½ļ}™Ŗ<½N|Iå›:}‘ßOž  ‰?,Į “~¦§§;³|6µ`\˜Yæ= ŲīåżfŖ.,..Öś›$åōŗ¢Fݤ¢:•ąGæ‰Ŗ$žōF*ķķķUž|’~uŽÉؓšņéÓ§ž%M¾&•ŻŚ¬Źöžŗ‰<ż)ĮŪtogg§/Ūu“xvww{¾N?uŖžHĄ -uŖkwƒ?&­­­ <©#ć UU'©©é9¤ŸÕÉsÜĪwV•$(×%@&P‚666·“Ł<t(0RuęččØņēS]uyy¹˜ššź»Ŗo}}½/ßS7¹Ø_Õ#źugRxK3²ä™P–$ÆåøōŚŅÄ6ŽßWwww•?ßĻź9m®G?Ö»_ēA}§ė˜tĄdIšĘÉÉIćv2ėšŁŁ™&īž*¶u'SČLū ßž’L¤¤Žŗ•f&Mæ*5Ōłžģ;&©źœd‘üļćć£quž·„ŚOĪIà 糜#“\RuÜö:ɬŸÕ~\—½¤āĄÉĖī6’~ö÷÷%ż+³łłņ„X\\ģŗÜnoowfŻ/—T‘qÆÅ÷÷ńšR|2.^;šŸē=’~ĘCbźœ†ķüXē<čøķŗØFāĄ„ČĢm¼P?==-t(0ŃfffŠŪŪŪN m*¢¶įīīīA·¹p ä9Ly\X]]Ū„ ž®jŜ–j?Ćŗ>®K\—Ąøų0ę¾~żŚyńŪʬÆWWWÅÖ֖Nų¹ßJģóósq_¬¬¬“Övī欀Ūņ{€Éžüīóū?<}ś44ē€$ÆÕ±»»kg».*’ų0FōqxxŲø± ¦V ¶M@‚śėØl ļ™žž®üŁT‰č§q}VQ'"•~TJoub¶¶¶¾Ī©^'y-É$ʱė ŗ”LüÉŻŌŌTē¢!„^.üXfbl#é'$^Ī NbgNOO+>A¹Šęų«*ÕFšßłž6ž{ £›››žģŸnåÓ(ēcßŃŃQ­ń3čź9KKKc9Ęp]ĆbØ+ž<==ēēēÅźźj'Ø\r‘˜—ĪŹü]^¦noo7nēāā¢S5€Į† {Ć2V“ō3??oēōAśŗgLżZ×q•DžµµµŹŸ?99XņO~›•?Ÿ¤¦ŗ‰Bø.€I÷Ó(®ōŻŻ]§,äĀĀĀ‚raŃļņ±ƒ– ‡6f{½ŗŗ*Ö××u(Ą;žŸŸ[_^s}}]ł³333:ŒÖ䣥ÜÜ\åϧźO/Ā6;;Ū™Dšzdžŗ2÷ ¬źĻƽjLĘ~ßb’’PÓÆ„ØLD”øĶ:I?©4>čźDø.€QōÓ8mLŖmllü!(7"™Ru `\å…~Óą—$ż“‘<@;ź$R˜…Ÿ¶ל|ā¶ŚN8HRA‚ĒÖ­^&É“iee„ņg{²]9Žd"īA'XÕy>•q7*ūø[‰{¬3>2©zåzy^Čļ<æ÷åååZ—J?*».ŗóaÜ70Łä¹ŁÉņRŹ`fvŠ,²‹5™Å.3½õkÖ¶^Ė‹¼Ģšę ¾¼D­3«ękņŅ]Ņ@5y†•Ąų²jAŪĻ“ņģo{{»ņē777Uo„uSćßĒ\½§L8X\\ģŒć$Ō•gŽy֑*B“$‰Vu’(VWW;æż¦oŅß9Ž%¦[777­öEö’ŽŽ^­¾Č³­q®"“ä¦$ĢŌł=fŸfIŅPĘI•œ²96ŌM›žžīüm7Ē\—7”‰?¹xĻMEn{%7‡Y^^ä3¹ųq‘ Ą°Ź=k/ļ™!/óĀźžž¾•—O“"A!M3"/bĒ90 M‰i)žæŸˆvnnīŪ$“u&WHŒL&łIPw݉Ź€nč…²2GdƒČóė—I,IŹo"Ļ~_Ęd„¢OŖ™düWūłåļŅÖ +Ó“)żrzzZ+ø¾LģHŸd_U©°QorÜh;a§-IZH"OäŸ|6Kś"żń–vŅÆ/+å’ΘĖ«[Õj~e\csEös*9•æŸōO–*ļbņ[kśü1É(Ŗüø.š›z~~~ķßsµ»2L+ZŽäåā»ß3:äę³¼ą5ó0@wrüĪ,+ß{ć<Ą{7óSSc»mWWW#Uqā­ėÜū‹‹‘ź÷_~’½ųł—_F~;žå/)žõoūĆæżÓŸžTüŪ_’:RŪqüėÆÅÉoæżįßņ²ģå jxļTGž}ę>?–ø•”YŸ$ T ō’‘:ĻŪzæ›g€U“śłN¹j_ōūY͠׫gmłŖcØ­gĪżś S_'1¤źŗōāwš}ŻtĀ›·$!"ńˆ£rĢü^žēe\sņ[/~†a Ć:Žėu L°w/ >ŒŹVäF"ėßĻ0¹2[ėööv£6¦§§UŽ1ŗō2!čāā¢ŲÜÜģ¼8oŪĶĶMē…~™”ļ=88ųVfmĢĄx{{«#FŒÄŸ’ä“dŸ$ߔ‰8Y666Šóóóāéé©ēėļxY(ė¢"ŒÆ$ż4}ö˜É‹ęēēu&Ąˆ‘ųóŠaHš©*ėRVJ€J@0>ņœņīī®QūūūÅśśŗĪAų3ˆŸ¹¹¹bgg§øææ/žŸŸ’°äß677‹éé鮌N@*Ł0āRé;Ļ)›X[[+t&Ąˆś0ī˜ 8IšÉr}}Ż×j=y©žÄ¢Ŗ³iĪĻĻw^ęæ”u®ū‚ooÆ“Ģ”ļFOž ¦Ņw™„(ĻE]cSń' >II’ĶĖź=©€“äŸ>}źYŅO^ ļļļ’CŸ¼Tƚōó–?v¶«l3ŖŲŻŻ5ŗ`åYēźźjćvnoou&Ąˆ¹Š?/+ų$™§ß’x“dž¦ =ŻJP–Tz|||ósIrʬ IFĒŅŅRć6®®®:UĮmC›ų3čŸÅÅÅbkk«“ą“$›a“u;<<4‚`Œä¹ß{žTqzzjB €11”‰?y)}ssÓ—ļšžžī$÷ä…ŗ—įĄ ēēēŚHÅņ<ė`<|˜¤MŸŻŻŻN¢ĻĢĢĢHoĖõõµŃ c"•Ļ›VųĪóϳ³3 0FĘ2ń'U|2«e–„„„±Ū¾$żüØ"R^ņ«`Ćļįį”ŲŲŲhŌFž‰š, ?¦¦¦ZoółłYĒÆłÄŸ•••N‚Ļ8Tń©* =^ĄxhcŸŪŪŪ‰y> 0IF&ńgnn®“Ü3®U|€É“¤ŸĒĒĒFmœžžóóó:` eāO^v'Į' Ą8ŚŻŻ-nnnµ±³³ć9*ĄŹÄŸƒƒ{[gggÅÉÉI£6ÖÖ֊ććc 0Ę~Ņżs{{[loo7jcnn®ø¼¼Ō™cn(>~üXLMM½¹\__Dē¦rŃ{Ū‘åėׯF!Lˆ<ĢóϦFå)ĶØų3`ggg:&D’~žžžµqqqQĢĻĻėL€ ńgĄTü€É°µµUÜŻŻ5jćččØX__יBāO<<<‡‡‡:(Ž‹óóóFmlnn»»»:`‚|čE£ž>I3ļ9;;+®ÆÆ‡®3³N©ąÓtÖN`|ä¹įŽŽ^£6ęęę:ĻE˜,=Iüéu„›¦3c“?…0¦2QŠźźj£6¦§§‹ŪŪ[ 0~Ņƒ³¶¶&ńĘŲŅŅRć6R1hffFgL ‰?²³³S\^^źSėėėÅććc£6NOO[I`4}Šż377Ww^ųćėąą ųōéS£6677‹­­- 0Įz’ųsuuÕčļwww‹»»»7’ūŃŃŃHĢr™uœ™™1Ź`‚¤Ņ÷įįa£6VVVг³3 0įz’ųóńćĒF’£d™$Ō4ż€¶=<<Ś˜žž.®ÆÆu&ÅOŗ mT*潽ՑtHühAŖ”?==5jćōō“˜ŸŸ×™t|ʕŗ¾¾¶g€‘±µµUÜÜÜ4jcæÓ”Tühąģģ¬8??oŌĘŚŚZqpp 3ųƒŗ ;···Åööv£6ęęęŠĖĖK 0"žŸŸuŠ7*žtįėׯÅĒ·“ä!xÄ€.,--OOOŚøŗŗ*ffft&Æj”ųsppPLMMUZņŁŖ®ÆÆ+·;ŹK>†ĒÖÖVńųųŲØ£££V*0¾TüØįųųø8??oŌĘęęf±»»«3x—Ä€ŠR­|ooÆQ‹‹‹ÅŁŁ™Īą‡>č’ļׯ’šo777ÅŌŌŌČoŪżż}1???²ė’ššP¬®®6jczzŗ“<U4JüÉKś•••ŹŸ­jff¦r»£l”ƒč’ēżÆ±Ż¶T¹988Łõ’ųńcć6’ō“ēŸPE£ÄŸ­­­ĪҶ„„%³^Ą˜żÆ’µųæžŪ©už?’ū/žļ‡‡±ŁėėėÅććc£6NOO;Ļ> Ŗŗ†Ū‡’ņ_ŠžóŸGj’?żilśww·ųōéS£6vvvz2‰ćķ']šŗ³³³āää¤Q+++Åńń±Ī 6‰?Æxxx(¶··µ1==]\__ėLŗ"ńą;_æ~-–––·s{{«3čšÄŸwd&Ī­­­bff¦˜ššśį’Ļ­ÆÆ———:FŲĒ‹§§§Fm\\\óóó:€®lāOfÜ<>>īĢŗłZN·2g™č³ŗŗZœŸŸW~ĮŸĻ}śō©ŲŲŲų¶IŹŗ£!“ŻŻŻ5jcæólšų0J+[Vąy||l½ķ$ē$‰Øķ¶“4;;[LOOwÖ?ß §³³³Īd@Mlnn:€ĘF¢āO’}Ź <½Hśyxxč$ēō¢ķRŖ-//wŖ åū€į’‰{¶··µ177×I€6 uāO^“'į§é ›ļIĪĀĀBß¶) @ł>/’`x¤"x&jźööVgК”Mü988håEū|üų±ņg‹ūūūāłłłÕåōō“˜žž®ŌVf•üĆaii©qWWWŠßš’³wG”qŻłžą’†‡“d†äöŠ *Ż·ī4H~I.󢒗npP™töI„xŁ·–dš< ܎%÷Ą<܀Kŗ/—Y%ö„é±DĢ$`•^†kæX‚N÷ĪĀ®JLĀ{ĮRgćķ;M£½æ3WŚø»J:eéTéóƒ:Ö’üĻ’üź”Ó9Ußóč”BjµZšŸŸokģŠŠPŗsēĪ7B8ķˆpŃĪĪN[c>|˜=­³T*=wLµZĶž į v@ž‰ŽCĄé‰ūzķŽ'|žx(ŠQ2ķ(\š'‚0×®]k9.?Ož<ÉĘW*•c§ŻpQ„~ŽņÄĻE(ÖŲŹģ쬫NI<hee%ד““Yx€³”ÆÆÆćĄó.ųÓNfdd$ ī ū8ń~;fffŽśłŗF£ŃrĢŚŚš®?p VWWŪ~8ŠóĽŹz½®˜¼… žD¦•ų@>ĶĶͶžā900jµŚ±"8ŌJ;! sāa$Ÿ˜˜Č5y“ęi·+Š‹T*•–ctü€“uÜ.ß_ŹÓ•Zéļ¶ēż@>B6ķvū™u…@)—Ėioo/×ĖĖ˹P­ōŸµn·‹O'ŗżÅūŁŲŲČ5ĒĢĢLŖV«Š ĄKw¦‚?§Ńķgww×UPÆ×ÓĀĀB®9&&&R­VSLND”‚?£££-Ē4cĻßn˜§“ÜĒ— Z)•J®Dx‰677ÓŌŌT®9†††ŅźźŖbpb üLccc/³±±q¬×ēęęŅŚŚZĖqńį}µZķČłÄ:Ū9f¹\v%ĄK]¹;q.ĻC‰ą8ś‹¶ č“råŹ•#}Č^©TŅüü|[c;Õķ'Öėl%‚N:žĄĖ”Ÿ½½½\s¬ÆÆ»Ą‰+\š'>„Ÿ™™i9n||<ėō¼ī?ńĻŁŁŁŌ×××Vם099™…„ņŠŽA±¾vt*h|SÜ«ŪŚŚŹ5Ē­[·tķąTœ+ā¢" Į•••Ž‹§t¶ÓU§CCC©^ÆkßXk"ht”'‡ĘFGG]…šÄ}ĘV÷[‰‡Å}?8 犺°įD(ꌵk/żXśi6›muuā)”¾0/G£ŃČ}o1ĻƂ ś‹¼øĘŽĄĄ@ŚŽŽś€—$:tēš#īćmnn*&§Ŗæč ŒM|Ą¾ææŸfff:6o„‰"TtRO쌧ƒ>|ų0ūŅA©TråĄK»óŠŽAyŠßM‹­ÕjY(¶ååå466v¤ż#|sēĪl’½ģīć© Ē‹®BųĀš|•J%ķģģäš#ī=ŗ—@œėօW«Õl+Š•Ėå싱'knn.­­­åšcrr²P÷8ŪĪ)ĮŃ5 E€©×ėi~~>×Ńa<怢čW ›5›Ķ455•kށü p€®6::š{ŽĶĶM… p€®U.—ÓŽŽ^®9īܹ“J„’bP8ē”čFÕj5mlläšćʍ©R©(&mŪßßWąÄōLšgww7mnnžģń4ŅŲ€£©×ėiee%×innN1(¬® ž4T«ÕŅŚŚZO¼‚?p4ńąŸ©©©\s „ÕÕUÅ Šŗ"ųĄW«Õ“··ē€3,:~wāa:ŻŅ5€³­ŠĮŸü\¹rÅ«dFGGs? h}}= *&…WŲąO„RIkkk^! ]ĮwvvrĶqėÖ­Žt €“PČąO|€Ÿ'ō344”J„Ņ7>Ą/Śś±F µZ­–VVVrĶ199™fgg€®QøąĻźźź‘>ĄČ>ō°Š{āžįµk×rĶ122’źõŗbŠU ü‰O;"šÓl6Óąą WzTܼråJ®9ā^b£ŃPLŗNŃ“±±ŃrL<swwWčz\¹\Ī=G„~ÜK õwć¢ėõŗWz\„~vvvrͱ¼¼œFGG€®Ō•ĮŌ@o›m«;ų‹ĢĢ̤jµŖ˜t­~%Š$:~/,,äšcll,Õj5Å «.ų311ŃrĢīī®WzP³ŁLSSS¹ęJFC1čz… ž“óĪxā'Š[ā?£££¹ēś W.ųS*•Ņ­[·^8ꌵkŁ“?€ŽQ.—ÓŽŽ^®9īܹ“Żc€^Š_ÄEĶĪΦ7n¼pĢšš°'w@ØV«ikk+×qO±R©(&=£æØ ›››Kėėė/3>>žFGGÓīī®WŗT½^O+++¹ę˜œœĢī)@/é/ņāŹårŚßßĻ>“žx č… R©TJ›››^Qč"ŃÕ{jj*×CCCYxzM7,2>“ņäI{ī˜tńāÅŌ×ח…€jµZj6›^a(Øčä]½óņ@ zÕ¹".jvvö¹ÖGųgccć…ūG茵kŁVd7nÜHsss®B )ރ±E€.~Ęqžgpp0ŽŽn¾h%ī#äµ¾¾žŻ›€^TČąO„ Z…{ ×Eąfuu5ŪžüżŠÉŠXz¢CVt֊ŠÜq­­­}ėŸ ea¾jµźK8Ą”J„’ė^DX^^NårY18Q}}}Ÿs_a€oÕÆP F#ė”Ę6<<œu­z!øūėkā8.\Hóóó¹æhó<øā8q¼x’oœ+pvÅMž÷‘vMNNf€^&ų§(0Ń'1ććć/-|s ŗiÅń"„“÷Ė5ǵµµ•kœs<‘7BHĄŁŻć!$yŒŒŒdŻ‹ × žĄ)ˆ/¦„}öööN䘲¹xń≯ŃĶ(BHFŠPŠŪā W®\É5ĒĄĄ€ūœ犸Øč‚½(¾ÜR*•N4|_„‰ĄO‘E=bcccŽ’ŠĆžīļž.÷B?œ%:žĄ i6›Yw›ć„~FFFŅ­[·ŅöövŚßßOsssmķ·ŗŗzģŠĻäädzųšav¼£n±Ī7ndOą=Šč¢V@ļłĒüĒ\ū///gU€³BšNH¹\>ŅųĪiāI¶³³³GśbKtιråJŪć#¤³¾¾~xĢz½žFGGu®±Ī'E‡£˜+D^jךš°ššŒøWR­V€3EšN@­VK;;;m=ü“ŪÕēy*•J[ć"šóäɓ,¤sŌpR»"@į„8N» Ų'Ö011‘ū^ t#Į8üiGtÜéėXb޽½½–ćĘĘƲpĶąąą‰Ō!Ž ååå–ccżŃå8Ū†††ŅźźŖBp& žĄKA—vŗżD§ŸNuÜiēĖ0Ńé§ŃhœJMŖÕjš™™i9neeE×čRśÓŸ:2ĻiŻæ€"8wVN“Łlf[ˆ ʋ¾čTŚ ®t¢Óρ­­­–cN»›NtAZXXh9.BLŗĖo’ł^\ѹT*)&gVĻ"ŌA‚ “Ó]„•āw‡’<22’…(„č&E·=ó^zŽ{č.×ÓÓ?ü!×·nŻņpμžn]x„*•Jźėė˶įįį¬{H'B?GŻU¦¦¦×2::šuŽf||ü𿯻a‹õĻŗżńĒé×ėė¹ę˜œœ<õnÅP]ü‰®:AŸµµµB®1‚@/^< ķīīŗŅh©Ńhtl®NÜß’ę7闷oēšchh(ÕėuÅ€r®[Ÿ•••®+p„€.\ø1¢ P©Tr՝1år¹ķk<:Yuźż°^duuµķµ½ q®-Ē ŗˆą łŸžĶæI’Ćæž×]³Ž’ķ?’ēōæž§’䅃ņł£GéüÅ/rĶqp ųļ ü‰.(ććć™+žĮ›ģģģœčyģķķe]ŠĘĘĘ:ŚŁ…īpćʍ4??’Ā1qMF§×ĒÜÜ\ĖąOü~vvöŌĀhqģvDˆ 8;žņ_ż«ōW?śQ׬÷·ŪŪ^4ųgļžõ_ēž#ī‹xü’ś‹¼ø/5ōįž[·n„'Ož¤żżżg¶üėņ bŽƒ9×××ÓČČHŪūF‡“ųņB§:»ŠāZŽk³Żė#ļ“mcŽ;wī“a“Óøć=ø¶¶ÖrÜĢ̌/ū@ø¾ø˜¾xü8×ĖĖĖittT1ąk ü‰ D«)"x³½½}ī‰N"'ˆ-Ņ8 “Üē ūšĻŁŅn'Ÿø>.^¼˜;T©TŚ’¬®®žH āšóZYYi96‚RµZĶ…÷÷æłMśõśz®9āįŗžĄ72ųSÆ×Ū żDą':ūD8¢T*śŗ#“»»›=“ž`z¶Ä5×k;į°pźėėĖ®­ćÅ"ü”øVǼråJG: =OĢóGČ(Ī«•XļĖZ ŠYŃķ'±±1’€ē(\š'‚3SSS-ĒŻøqć0LP4ńtŅv€ˆīDœq½Ę5>11q¤ż666²ŠL„€b‹k¬ŻB8j'öõ Q†ņv¤Š_;ę‹yŪ ü„ōÅz‹ųŽžuūćÓēšć¤:@7*\šgnn®å˜™™™¶Ę¦v»§,,,ø Ļ ųBK„Ć"är+++i||ü0t°Eg “EčęĻÆæ ķļļg V”“µµµg‚F_Ÿ’Ū¶č^õēc#Ą·³³s¤óŗuė–N?Š%~’ÕW闷oēš#īxųŻ&īµwzxžsE[PZ©Õj]QÜčv!„VįžD‡Ī–ƒpXt·‰×?ŗśästbžĶ’2DJąŗĖõÅÅ\ū é† -®ćĻŽŽŽ ?66ÖUn'Š#šp¶EØŃhdOō[__ϾōrVLLLd¼ »|r’~śōĮƒ\s“ó 8ėś•Š£\.§f³™…€"sćʍ400ŠSēĮ¦8Å9F·«>ŻåęŅR®ż'''³ū Ą‹ ž@AE fnn.ķīīf!™Ų>|˜fffŗŖ+ŠČČHZ^^><‡6łbtÆŪœ¾xü8×µZM!  状 ±±±“±±ńÜßĒļ"8P*•ŗ¢ĄFĆUFnjŽŽf_Œł¶/Ēlnn~cŪŪŪ;±÷m¬-¶õtĖū8šßõUśåķŪ¹ęøuė–ĄŠ¦Ā*•Ź ƒ?c"ŌPtŃ©e~~¾­s†¼‚7/ĖõÅÅ\ū’Å_üEšUHhSŃü ¼pĢÖÖVį;ŠD觝ĘÄĄ°…÷ÉżūéÓrĶį8p4żE\T£Ńh9fgg'õõõ„z½^øõǚ.\ø­±±Pt7—–rĻńŻļ~W!ą ü‰8ėėėmššŹ@sss§¾īXC¬%ÖŌŽ8ĒĮĮAW!…VūÕÆŅ+œ°ž¢.¬\.§ķķķ400ŠÖųłłł,t[µZMĶfó„Æquu5[ēĮqc ķŠs‹} Č>ō(-|ų”BĄ)8WäŕJ„“»»›yVVVŚŽ/Ę~Ūų±±±ģg«PP£Ń8ģ ĒßÜÜĢöŁŁŁÉ}N±†˜ŗĮĶ„%E€Sr®YÆ×S­VĖ:älmm{ž¶Ēµ;¶]ѹ(?£££®:ŗĀ'÷ļ§O>īÅ gęg?K³ļ¾ŪUk¾rÅ ¼4ŸÜæŸ>}š ×LOēg ųĒŲŲXŪc£Ćpņņv~:>ž~8<¬Š‚?Ą‰‰Ž/@qEčē‹ĒsĶŻ~€ĪčWąóGŅņŻ»¹ęų›Ÿ’\! ƒtü²n?yü TJļ\ŗ¤ō¼¾¾¾ŽĻ¹ææÆ°Ą·Źüi4ŁĘń”Ėål€ÓōÉżūéÓrĶńĮō“B@‡åžĢĻĻ«b‚?œ¶ė‹‹¹ö’éųxśįš°B@‡õ+œ]7—–Ņ—OŸęšC·x9ąŒśüŃ£“|÷n®9žęē?WHxIĪ)tŸĶĶĶŌh4²Ÿ±mmmų†††Ņččh*—ˇ?€īŻ~ņųA©”޹tI!ą%Éü©V«'śe’8\»vķČūŒŒdĮ„ƒ­T*e[;ĒŪŻŻĶĶf3ū¹³³s¤c ¤ÕÕU”Ž%®»Z­–źõzŚŪŪ+ŌŚā½ŪŚŚŚ·ž~bb"U*•ģļ  x>¹?}śąA®9>˜žVHx‰rŚ Šä”‡©©©¶ĘŽ„¹¹¹Žm"$ž7Wƒ"”±²²ņÜ9"¬1>>žżļÉÉÉģ\ąy"h6;;ūĀkŖ[D (¶Ææwgff²÷ēąą NŁõÅÅ\ūO]¾œ~8<¬šõyq¬‰€@«ŠOtōyųšaŚßßĻŗņœTwE'Ž[{^$Ā}}}Ā?<#Ā>Ń'® .ōDčēy²sŒs÷it4NŽĶ„„ōåÓ§ĒŽ’ÕóēÓūWÆ*$¼d… žDW‹/fsžg`` moog”ƒī<§é “¼¼üĀqd:©pŁŸøn{=ģó<ixxXNŲē„å»wsĶ!ō'£ĮŸZ­–ęēē_8&ŗėDp¢T*nżŃ½åɓ'Y0éy"ō üsvU*•,š³µµÕ‘ł†††ŅÄÄDŗqćF<[__ĻBqŻØ:±Å5ó޹s';N¼£ŪV'DNNĘõÅÅ\ū’ TJļ\ŗ¤pĪmAĶf3]»vķ…c"ąPō€Ąąą`v.Lz^×¢’ĢĪĪfA'Άø&¢ĖĻ‹:Y=O„{āz‰ŠŠiŽāš>«Å^tŽńžŒmggēHLjP¼¢‹ŠyŻ»—īöY®9>˜žVH8!…ėųÓN gnn®+ŠA‰V”ž………¬s½/1ĆĆĆm‡~¢cTt×9čøūGš§ˆ]®¾.ÖļŃXļA§ ėµ+ŗ Å{Ēū:ļęŅR®ż§._N?VH8!… ž4–c¢cJ·h'¤QōīEtĘA·œVFFF²°L_^ŌY§[Dˆguu5 -//·µO„£Šp€n”Ÿ/Ÿ>=öžÆž?ŸŽæzU!ąõ+ĮéÓŁ¤÷Eœ–ćnÜø‘677³°L/ŖV«Y(ĀM­Dų'Ęł}žčQZ¾{7×B?pņŗ2ųĮk„›Ōjµ–c&''³€ŠY±±–ćVVVR³Łt@N×sķ’Öo¤w.]RH8a… ž“Óédvv¶k ÜNąƒŽÖh4²ī5­œ•ŠĻÕÕÕ¶ĘÕėuäšŃ½{éžgŸåšćƒéé#ļó§?żé6??Ÿśśśŗv‹{—ī÷p’ ü©V«-Ēlll“5®ē²³³Ór\„Rq%’J„Ņ™:ßų¢L;]€|n.-åŚźņåōż×_?ņ~’ĒżÆ=WĖxøĖµkׄ81… žDfdd¤åø•••,(±»»[ø¢Ęšbm±ĘV&''ÓččØ+€Ž‹ŠĻ—OŸ{’WϟOļ_½Ŗ¦ˆ÷$čM犸ØÕÕÕ4<<Ür\tÓ¹pįBj4Y‘Óų—Ė哵µÕÖų”””TÆ×]…d677Ļ\Ģ—dąåłüŃ£“|÷n®9:ś¹ōĘiéß’ū®Ŗć»ż×éžgŸ¹ 8żE\TtĖŁŽŽNm M€śśśŅģģ쉆 šĶfÖ„(Žk8Jč'ö„÷E¬ÕjõLÕ%Boķ¼_Ś­š¬’ł?ü‡\ūæõĘéK—NQQįŸšŒi慅…ĆPlŃA%Ā@Ń(O (B:TˆpFt:˜?:­­­i®™™”Ÿ3frr²å˜Įœ•K¼§¦¦ZŽ‹šŸąŻG÷ī„ßå¼’ōĮō“BĄ·Ųßßļųš<犾Ąlnnf_žßŪŪ;ņž¦ˆ-A§mdd$;Ÿq¶Ōjµ“²²ŅrÜĘĘF([__ļŁĄK„ńŚķŒa;ąčn.-åŚźņåōż×_WH8eżŻ°Č D·ž'Ož¹PDą'Ö&”Ÿ³)^÷ó“k||< E·Ŗ<ŖŠ"®żxĒ9µś¹qćFŖT*.8¢ż|łōé±÷õüłōžÕ« ŠßM‹šDtĢŁßßO>,th`` -//gkų!DŸķķķ#ķŖ.\øfJ„RÖ§‚@Ķf3ĶĶĶe×}¬żāŋm~B„~bąh~»½–ļŽĶ5‡Šǹn]xt‰ЁDŌjµ#… :-‚HVˆ€|›ļD,®‘#ķ»³³“¦¦¦²ķ뢣TĢļ‰˜’¤®æ÷DØ-އ±u⽁¹˜+Ī8ŗė‹‹¹ö’ń›o¦w.]RH(ˆs½r"Õj5Ū¾ī ŪQC/r“ØT*B>K\“ѹ'®Ÿ¼™Ų’4o²žü= “ļ£{÷ŅļšĶ\sčöÅr®—O.B‚9Õąą`Ö1'@³³³ieeåĢÕ@‡蜛KK¹öŸŗ|9}’õ× ¤_ ątEØ^Ƨżżżōäɓ433ÓÓē;44”īܹ“o„ž„~ æż|łōé±÷õüyŻ~ € @"T«Õ²PLlŪŪŪéʍYX¦[ÅŚ———³PSœS³ŁL•Jŋ ņŪķķ“|÷n®9>˜žVH( sJÅU*•ŅÜÜ\¶}]£Ń8Ü666 ±Ö±±±T.—³P.>pr®/.ꌒĒo¾™~ņÖ[ $ų](6±µŻub ›››iww÷ČNJOt"ŠM Šå£{÷Ņļžłæżėż«W JšzXt Š-“ŗĖæżŪæĶµ’ŌåĖéūÆæ®PPżJŻēśāb®ż_=^·(8Įč2æŻŽNæ^_Ļ5ĒÓÓ 'ų]&o·ŸæłfśÉ[o)$œąt‘īŻKæk6sĶńžÕ« ]@šŗČæżŪæĶµ’ĢĻ~–¾’śė ]@šŗÄõÅÅ\ūļµ×Ņģ»ļ*$t ĮčæŻŽNæ^_Ļ5ĒūWÆ*$tĮčy»żüųĶ7ÓOŽzK! ‹ž@Įżæücś]³™kŻ~ ūœS(¶’ēæż·\ūĻüģgéūÆæ®Š}}}Ÿs_a€o„ćō°ļ½öZš}÷]…€.$ų=ģż«Wŗ”ąōØæłfśÉ[o)t)ĮčQLO+t1ĮčAæxļ½ōW^QčbēzédvwwÓęęf¶Å’n4ٟolltdž‘‘‘488˜J„R¶ŽŽž€¢ųŽkÆ„÷Ž~[! ĖuUš'Ā<«««Y '~īķķčń·¶¶²Ÿķ‰"$T.—³­R©øŅhĖÓ?ü!÷ļ_½ŖŠ ü‰`Ol+++]YŲ Ŷ°°šĢŸ ¤jµšfgg³nAšu’ē_䌒Ēo¾™~ņÖ[ = æ‹ŲÜÜĢĀ0}}}‡Ū•+Wŗ6ōó"Ń„(Ā@ĆĆƇē z½īj8ćnüqīŽ?LO+$ōˆS ž4›ĶT©Tƒ//^ģɐO»vvvŅŌŌŌa=FGGS£Ńpuœ!æ’ź«ōĖŪ·sĶń‹÷ŽKßyåÅ€qī¤A–čź!—“466–żĢ5DZ»»›u%:°±±ńR×¼µµ•ĘĒĒ’yrr2ÕjµģčM7—–rķ’½×^Kļ½ż¶B@y©ĮŸ«ĢĶĶ„½½½ŽĶAžr¹œ…xb+•J…+jt4Š P„āgŽ PtC:čˆ444”źõzVzĆß’ę7é×ėė¹ęx’źU…€ÓŃąOtʉ ĻĀĀB®y&&&²`K„R)d°§•Xsl±žoa אַ®¹RŒ?č400Õ{vv֕ ŠÅ®/.ꌒĒo¾™~ņÖ[ =¦?ļŃŻ&B:}}}éĀ… GżLNN¦õõõ“ææøE &Ā,ŻśiGt*ŠnHQ»ƒs~ņäIŗuėVÖѧ]ŃI錵kYķc‹šEų €īqūćÓēšćƒéi…€tģąO„W"l2<<œ666ŚŚ'B-ĖĖĖĻ„|¢óM‡ĪŗĮĮĮ,øóõ0ŠĆ‡³`T»"tį«x](¾ßõUśåķŪ¹ęųÅ{ļ„ļ¼ņŠb@:vš'‚*­Œ=ÓĶ'B-ÕjUÕŪįŖF}½+Š7ŅĄĄ€āō€›KK¹ö’ŽkÆ„÷Ž~[! Gõwr²?ś4 Ż|:(ĀVsssiww÷™ PtR »üżo~“~½¾žkަ§zX®ą Ļé:E'„ƒ×`yyY  \_\̵’OĒĒÓ_żčG =ģÜqwŒÅS­V³ €āŗżńĒé‹ĒsĶńžÕ« =īœĄÉłżW_„_޾kŽ_¼÷^śĪ+Æ(&œ‚żż}ENLæĄÉ¹¾ø˜k’ļ½öZzļķ·ĪĮ8!ŸÜæŸ>}š ×LO+$œēĪŹ‰6›Ķl ›››iww÷¹cĖår¶@'Ż\Zʵ’OĒĒÓ_żčG gDĻ"ŌS«ÕŅźźjŚŁŁÉ=ߋ‚?ń»ĆI³³³©Z­ŗ¢ųV·?ž8}ńųq®9ŽæzU!ą éļօGЧR©¤¾¾¾lN  żÕÖÖVššš:\ĖččhÖUĀļæś*żņöķ\süā½÷Ņw^yE1ą éŗąOtÕ9ś¬­­rŗxńāahwwוp†]_\̵’¹žžōŽŪo+$œ1]ü9ü¬¬¬tU#tįĀ…488˜u)ąlłäžżō郹ęxõ_ü …€3ØšĮŸF£Ń±ĄĻŠŠPĖ~ž“½½½¬KQ¹\vÕœ!7—–rĻ€³§Šß˜››KćććGŚ'B=·nŻJOžü0×Cßż®B™~%€Īø¹“”k’”Jé»’ņ_*$ü€ųäžżō郹ęų`zZ!€C… žŒ½š÷©ŁlvM†« ą ČŪķē§ććé‡ĆĆ *\š§R©tdLģīī¦łłłž9¾]ķWæJ_<~œkŻ~€?WøąĻģģlxᘭ­­T*• ]ŲżŒŽŽ¶711ŃÖ8ŠéóGŅ‡ęšćo~žs…¾”戋j4-Ēģģ줾¾¾TÆ× ·žXӅ ²5¶3€īusi)מ?(•Ņ;—.)$t‰ų|ŖÓĄó2ųpÖ××Ū;55•} 277wźėŽ5ÄZbMķˆstt©OīßOŸ>xkަ§ųVżE]X¹\NŪŪŪi`` ­ńóóó‡OE«V«©Łl¾ō5®®®fė<8n¬”]qn±/Żėśāb®ż:>ž~8<¬Ą·:WäŕJ„“»»›yVVVŚŽ/Ę~Ūų±±±ģg«PP£Ń8ģ ĒßÜÜĢöŁŁŁÉ}N±†˜€īvsi)}łōi®9tū^ä\7,²^ƧZ­–uČŁŚŚ:ö<mkwl»¢sQ~FGG]u]īóGŅņŻ»¹ęų›Ÿ’\!€źļ–…fwö÷÷ÓĢĢL×xhh(=|ų0ė$ōŠ¢ŪO?(•Ņ;—.)$šBżŻøččž 'Ož¤‰‰‰Ā­/ŗūÜŗu+[c³Łųč!ŸÜæŸ>}š ×LO+$ŠR7/>ŗ­®®f›ŲÖ××O-499™¶··³uDwŸŁŁYW@ŗ¾ø˜k’©Ė—Ó‡‡hé\/L¹\Ī¶Æ‹N£ŃH›››ŁĻčĄ³³³s乇††R©TŹę>ń3‚Gœ7—–Ņ—OŸ{’WϟOļ_½Ŗ@[Īõś F8§R©d×ē„å»wsĶ!ōEæ@kŃķ'”JéK—h[!ƒ?år9õõõeŻzŖÕjŚÜÜōJpj>ŗw/}śąA®9>˜žVHąH Żńgoo/­¬¬¤‹/fA ƒ­R©¤z½īÕąDäķö3ułrśįš°BGŅߍ‹^[[KSSSĻ„J„RŖÕj©ŁlzUč˜ż|łōé±÷õüłōžÕ« YƜČĪĪNŗvķZ>  ¦jµš677½ŅŁē„å»wsĶ!ōW/ŸÜŽŽ^ZYYI/^|¦;P„RIõzŻ«Ą ]_\̵’J„ōĪ„K K!ƒ?«««iyy9MLL¼”ł×ÖÖŅŌŌŌ3a R©”jµZj6›® ŅG÷ī„ūŸ}–kŽ’åßż;…Ž­ĮŸĮĮĮT­V³Šžžžį¶½½nŻŗ•FFF:~ĢtķŚµ4<<|:XG£Ńp„œ17—–rķ?ułrśžėÆ+$plżŻ“ŲčŹ3;;›677æš™™ICCC=ŽŽŽ^ZYYIćććĻt*—Ė©^Æ§ŻŻ]W@ŠŠĻ—OŸ{’WϟOļ_½Ŗ@.ż½pŖÕj©Łl>zųšaččń666ŅŌŌTŗpįĀa(Ö077—­€īõł£GiłīŻ\sżŠßĖ'7::š‚¢3Ļ×AwīÜI“““ ķģģ¤łłł4<<|LÕj55 W@—ø¾ø˜k’·Žx#½sé’B¹õŸÅ“®T*©^Æ?zņäIZ^^N;ĪŽŽ^ZYYIććć‡a ŲŹåņįń(ŽīŻK÷?ū,×LO+$ō°Æ?l®SĄóō+ĮwŠguuõ™Z¶··Ó­[·ŅČČHĒŽµ±±‘¦¦¦²nDĒĶ„„\ūO]¾œ¾’śė t„ąO „R)ĶĪΦĶĶĶo‚fffŅŠŠ"ō€ż|łōé±÷õüłōžÕ« tŒąĻ1E (:ö4›ĶōäɓtēĪ499™č9ńß>}}}]æEÖ?÷Ē?žŃ ¤ßno§å»wsĶ!ōӛļī~ćĻęēē»žß‰årŁ‹ ŠĪ)AūvwwÓźźj¶­­­)pfŌėõž=·’żæü/0®/.ꌒ­7ŽHļ\ŗ¤=č’ž–ąO/ˆ0l£Ń(8ĮŸoO²ŽpO|±mkkKA¾Å÷^{­ė¾äžw’ń?¦Ōįų3Ż»—~×lęšćƒéi…į ‹O|J„’«   üv{;ė–ĒÓÓ ¼4]ü9ųĶ··w¢ĒžœœŌÅ \_\̵’ß|3żä­·xi üŁŻŻ= ÷¬­­ųń‡††².>±éāŠ[>ŗw/ż®ł’±w?±q¤÷š‹Šrx=SĀb/`¤€ģ!±ń’ŗĢäFр}²! `€āĢy-ČHLŹ ryˆtn/0#ČaĐōNN `ŠŲKĘ‘€Ē¾ģ ’@r±#»X€ėoē-™£įŸŖīźīźīĻ(ŒFź®®zź©§««~æēwŠÓ:¾ūŽ{č«V&žÜøq£ŲŁŁŲēŻ¼y³“ą£ŠĄdųĻõW=½ł›ß,¾üÖ[č«Ė““³©ā“äž••U|&Ō’ū=½’·¾šÕ~€ŪğTńI’O–+W®8Ņ?Łß/~°½ŻÓ:žņ;ßѐĄ@Œ|āĻōōtq÷īŻĪ2??ļˆp¦^«ż|żķ·‹o¼óކb¤: >ŖųP×ßžš‡ÅOzZĒwß{OCӬğ$õ,--+++ŖųЈ’üWÕÓū—æłĶāĖo½„!€ieāĻ“'OóĒß’~Oļ’Ņ›oŖö Ü%MĄ8ūÉž~ńƒķķžÖ!é†Ėš€qÖkµŸÆæżvńwŽŃL¬—/_»»»?ĻĻĻW®\Ń("ń€±õÆ’ė?=8čiŖż'MMM5¾Īććc ĖŲō{żĄ÷‚ļō]MĻž=+=zTlmmõ“ž›7oÅģģ¬FhČ„^޼¾¾ŽłATeÉkė\@V]ļ(/uŚ€śžåüžŽļŪß.¾üÖ[€±’ >+++Æā{Nś‰§OŸJśh˜Š?pŠ/½łf±rēŽ†ą3vww?³‡‡‡¾off¦ü–e~~¾³ÜøqCƒ’M“ß"GGG}Y’ĀĀ‚Fh˜Ä8Åwß{O#L°'Ož=źĢVŻ„$eŁŁŁ9sķ¹¹¹ĪŒŪwļŽu€F„ĀO~öööśś9łM@³.ių¬ÆæżvńwŽŃ$I>©Ę355ÕYnß¾ŻXŅOU Ą[^^~µ W®\)666 'Ļž=+®^½ŚUŅO*ųUÜææ³d&ķßĶĻĻ; Ą™:“T577׳Ķ@»Øų’æ/½łf±rēŽ†C™:3R_æ~½ė¤Ÿ™™™āĮƒÅžžž«ŁÆė.yo֑uu+•€²ŁAyĄY2éAU ~_“”ÄĘĀ/’õ_{^Ēwß{OCŒ™®%A&³\'a¦ŽTÖyüųń«¤Ģ–½²²RĢĪĪv½=yo֑u•ė}ųšaē³ź:™ōņåKx%UB÷öö*æžÉ“'  „$ž0žæžēžŽ’õ·ß.¾ńĪ;`ŒÜøq£“S7į'y’“dš:3dwėīŻ»ĻŹg®­­Õ~öļźÕ«„"€HÕÓŖ–––: ŠNy|ōQĻžņ;ßѐc"U~¦¦¦ŠZļKå$ß 3f}}żU ŗ677{ŖFŒTü©*“Š^­LüÉĢ[¹ÖRē‚t˜rSž¼ż˜ŸŸ×zō‹O?-žģƒzZǟ¾ūnńÅ7ŽŠ˜c ³Z§ŹO™Ż:É6m v˶d›²muvfźN!`rå·ćAş!ŚŪŪ™$&€¶śŽūļ÷ōž/½łfńī·¾„!Ę@&ę[^^®õžēϟw’…Ś*Ū¶½½]ė=GGGÅÕ«W‹ƒƒøP&k ½.k‚įJā‹f€īüć\ü fäė¾ūŽ{` $Aęžżū•_?==ŻIŒIuœ¶ĖóÄ/^³³³¤žŖęēēUžahr~•Ég鋣p®õCĪĮŻŻŻWē2śå°÷[?¤NN|¬aœ× @óTüé£ņĀ €žųćļæ§÷żķ·‹o¼óކq 6­Ség”’~JŁÖls¶½Ŗ$ Żŗu«µĒģīŻ»żšššźzI2ŌŹŹJk«=yņ¤s zŁĒrI[„ͲĪ6ģ×EĒļŚµkÅāābgIŖ¶ĆT«Ņī'ƒŁĻ“äĆģĖėļϾ—ķPžŻYcP·’·Įiū}^?Ö/›•ĄŽōæøWŁļóŽOŚÆjŸeε‹„ļollt‚Å«|^Łæ.źcåwŁ$ō³“źō7ŪgœoņZ¦_żąäŅϤ’r,:ļ;®ī’kó|ok‚„6^ƒ£AāOŸnäbóéÓ§ O>ųč£āŸžóžÖń—ßłŽ†u“[ņ̾ō"q<ØÕv Z¦|~żn2Ų’ūeĘõ2š=½é{{{=Æ7ķw2@8Ēw+Ē9×>+Ē8żūd2Eśžźźj±³³Óč~”ße'ūY¾7U(Ä8O9v¼–.Ē¢&¾ćNž>Ź÷ęÉİ$”µmä5(0ś’ųÓkVõE?/šbŲĖõė×½Ųą³~ńé§ÅŸ}šAOėųÓwß-¾ųĘ`Ä%H«NšßƇ[3s7²ķه:†¬\Īh ½¦ƒ‡Ļr’žżWI1ƒŖ( üņYń ö3śYÕ)ILe°mmPxņÖMv«Ŗ—Łé3īdūˆŚ“œÆ7oެüś-÷«Ŗō‘|~ń7 śe÷ū^&f$Č·‰Ą÷*ż+ŃĆŚē~r®żz,Ė1N’DŸ:M¾7Ėü6$+3\“<ĪOŖ$x•ßo‰ äµt) eå8”kė~&µõh?Zp#€z¾÷žū=½’Ko¾Y¼ū­oiH€—*2 ŅŖjnn®,5ź²Ł—ŖČ;ˆŖ0'•³P'€rX²ß夒 (ģ‡$•AųƐŠM'²Lböģį9†ŁŽ¶ąfŅ6uʝn¤æNOOW~}æ’æšüÜ„„„žĘßIī—'÷}X‰å>÷šøÕ6“~® śŚ Š|ŸJŗ˜L®?&O™Ä’Æa}æ&Ś–ō:ØkP Ż.k‚įŁŽŽī\ PŻ?żģgŶ·{ZĒ_~ē;` Ō XMõ™qŚ÷$µT•`ŹALJ˜Ł±“ˆR%x/ĮÖ fĪ’`ņÓXä–äš, ĀėV gff^UĢhBڳN†H@vŽW5x¾Ü’æ½½½ĻüŪĀĀB£Ē4Ÿ•Čė^ęf²Ōi×ģOĪĒŖ½eī½{÷†؞ķd"[Ž{ö¹Š“c‚gY)#ŸWõųåÜėvŪ&¹_&¹5cE/ĮŠsŅ~YĪJT7ń+}8ÉU9?ź$r”ēV’[MØ<ž\tÆJå˜:×.kkk]mGŻ$ōŒ)Ż&°dœĖqĻļ‡*Ÿ›ß&‡ź÷׏ś[ƳaÅyśhÆ©ćććSמjYčz„cö#æi¹±¢4-0irĆō“g|-sŚMåw¾ņ•āĆ?’󑦏¹?ś£āŸ~ś™æūŚÆöćżŪk·oīļžāŻw‹?±ź%ć°|ōQńg|š¹æßüx¤ŽÅØķĒ’ū=%ž|żķ·‹’ēæü—֏;ņ'Åǟ|ņ™æ{ó7Šżõ_sķG£cn&&Ŗ Ągõć9˜ū¤ŻKšUÕ Įl%škœ$÷¢ ķĻ\ćļļwU¦©~Ÿc{ ½ń&š.ƒ;;;]½æ‰ė :xŁļ<'iŖRFö?m˜gĪĆJbJ xSÕ³Ņ6 ެz.'ą3mŠÄēVIžK‚UöµJU§ņµŁŸ&ŽM‚ŒWWW+æžńÆ~Ē¢"IhoŸņć,/^¼čŖ=&±_v3ƼŽ36ōZ ,Ē8mŁD†QøĪ™ōs-ū’äŚōćō½~UG¬{^EÓÉ?u®išģ»ĆśÜ¶oß$óć~lOÓM’W$Ö2ćt×Vł=˜>ŌĶ„IŒjb"‰Qø†włvŽ?^Ņ>ż•›ø¹ųĢž\g‘ōŠ&Ŗż¬Ü¹£!Ę@¦źõ+ˆu˜ź'ŲnR- ĮČ “k"x7ķ9žyöšŠŗh×Ė3Ū}V ČO^ö»©¤Ÿr’ ŽTP_ÖW5č6ķvoņ|JhŚ(ĮŻUd¶ö^“źž7ē\޼y³ÓæÓ.é—i›&²²ž¶su>'±Ż“Ē$÷Ė|vݤŸ² Ņ›ŲŽ|æd’Ó·3†»I>×"ß)é?ebmæ”ēUŖ"IČÖõ żåśc²$a&Õsźü~+{“ÉīYO9ę%¦³Ž|7r„a^ƒķŌ—ÄŸ2Į„Ū%7?Ļ“˜zżŒA-ł‹ĻAĢō0ī6>ü°§÷’žābń»×®iH kżxžDwźŽc„»ŗū4襣L’˜*CłÜ~”%Ą­Ź3ę×%ˆ®ŪäŸ:ļkbVš~JkY/’YīXŲĻ€šާæT‘mīõ|Õóē¬Õ[¦ļ5™„uš¬?ēRÕmķwlB*0 īļf{&½_Ö©f–±Æé`ų“Ņ·3†o÷8 Ė(˜ÄsmXņŻX'łg“·'Ż$ó“Øn»~·¼¦O’J?«*ßѽ&€Ź5(Š>*ž0~²æÆŚÆd¾:E VYéfP3£g–ėŖ3¶—’ü“ź“*UˆŖŻęX¦q„æ¤jEOŸ>-666†ŅvI‚H°å ¤W•vé„¢Õy²Ļ[[[•Ē€nß&¹_&ų¼NŅO'źō^$H9ĮŃUcFÕ$kƖm®læ··7°¾Žė6_Œ¢“U¤ŸA~·•æÓĻźL"0̰A_ƒķ"ń€‘ŠDµŸ/æõ–†˜P 27m­b”Ą¹ač&hžįƇµßÓOm ĘĪ~'ś" Źō±L åŅŅR„×®®®¼ķž?>šs/ćWÄ¶$µÕM޼HÖwūöķJÆMrH7żf’ūe‚x777+æ>Ńƒ_=īÉ“r®µEjʃėÉ’ŠquŚjßm'ǘ:É?ĆHĘ5(Š.h½Tūł‡żØ§uØö0ٚĢmƒ6Į;9AÄUƒ6#A{ J¬ūU„zG‚åė~F?eVóŖ ÚQ¼N2č2‰eĆJ"L°ōĶ›7+æ¾éĄŠ~čOz欓„˜~0¬Ąčō’{÷īõõĀøŸkmR§ķ$žŒ>ד§N{ó»ķä8S§²Ż Ą†y “G+ru|||ę"s`²Øö@ÆlH’õ»‚N„ß51=zTėõ³³³µ*Bׯ_/¦¦¦:Ļŗ‡Ż«U&sX†ub<77W+€µ²ÆURSQ”© äœ#I’«¢ŪĄŌIī—yķįįaßĘø¦„*Ęøēs­ßó•%ēt–\äÜ9miĆ5®?Ś~ż1Ŗ2T©īŌ–ļ¶n·cŪŻ†kP TüĄĶ€ÜĄ=kiÓĢNm¤ŚMĒğŗ³ßb‚Å6L☠;uŖžtSE ĮŹ?®ż¾TŗvķŚ«ēÅiÆŗ‰G½ŚŚŚŖ¼Ć“°°P¹M”-Éuśkf¢ļuģKµ“åååJÆĶy×m`ź$÷Ė:c@ā“|8L“Rq`\ϵ^$*ē`śąYqP‹‹‹åžżū%^r>œ¶ŌI `ō¹ž˜,£öŻVŹŲZ§źĻ Ą&!įØFāOŸå¦ņy†]¢ ķ¾÷žū=½ł›ßTķ` ÕM0ĒY¹ė&¬ "X» ‰?Q'@®ŪĄć|ʋ/:³pw+A£ ō>4ćŌÆd :}&ŪēMrŁļ„mµméŪé©ōяs”—żž™™éŗßNz欳’mž½(f\Œć¹V·_f›Nö’Ū·owyźTØćüdŖóŪ«m‰-u¶gż„-× ĄšIüņ—ĢŲĄéžńĒ?.>žä“žÖ”ŚĄxŖõōéӱڒ63l 3Ė’¢%ekčĪʇöōž’ć7³ųāohH€1U7Ń"µć"Į¹u“m&ķ~t€\€’œs||\ģļļw­š’Ź e"Pł9~ŹU¤BGŻdƒƒƒbuuµŅk?~[[[+³„ēóĖ ęj7•”ŁÓĖ$ T¢ h4ÉjœūyŻäæŖÆOp’¤%³_Ŗšį\kJ®‡ņ}użśõZ•9"‰P><5‘'m•d©$(ēŚ#ōå"F ć<£šŻę»h«Ö'ž”3K\»vmä“|N3N³Š4©×j?‘`8ĘW&¬[õg’%źžNā3É: ^IŒź÷ń:™T&ݼy³§õnmmu~óŌ­.QUŖ3¼ŠŻĘe’ƒ3“Ÿąū*R9Ŗźų—€ż*ń z®[}LæÄ¹6üs-‰9WÆ^ķlkU¹Ž*}’€œ}”Č3ük˜qdœ×ē‡-c\[~GœŌźÄŸühķff‰Q077׳›|VÕ~˜ u“Zž>}:š@õ&%ж΄‰y.9‰•aŌ\UŖ Z’žĒqķęścņŌ9ę¹noĖwKŻI%VVVl``Z™ų“ Ø:7¬3»D™PsŚņąĮƒ Ė*–3T\“ä&k$‰§źłĢų‘‹Óģ— ?ēūū?.~jfGjH`Yž÷ՑäŸQ ŌźfāÄųzvž6kÖ9®yę['0qrĢrģp]G•@ŬūęĶ›•Ö·³³ÓŠź"TS5pūčččĢ„„üżįįį…ėxųša1??ßhŸŸä~Y7łpŲß]uƒ¢kķ9ןō\W©dќ‹āÖNō7ˆÄד§īwUń3y@äČ$ź'€Aj]āOÕ Øü zžüy''³Kœ—P“ ɬ7É:gÉMņŖ7Bsć7ÆM2O™ tŽ“$1MMMMüĶ€*6>üP#P[žÖMžIå„ć{ŠjεvkIØØó4ˆž4)ź“gαAqłœŗ‰åĘyŖŹdķuʧa'¶ÖM“T “Ö%žT¹ČϬK¹øÆ{±uŃMŪ$uó£!É@łaž$ ónĘdżmp0LŖżŠ‹/ć;£}®µEĪßIźOuæĻQż¤L.ד'ć]ÕJO‘ÄÖa%’¤æÕłĶ•¤¦&+²TѺğ*?\{™¹daaįÜļõ†~¶’¼’ė僃,Ÿ£Ś½J äóēĻkæ/³OMMµ"Š6ېm©;{ž…&@2Ūāāā@‚VK™IæNŅOŚj‚Ŗėö×9īI’ŗčŁy)Į·yĘŻęźH|vüHŅĄE»PĘG$ȶJ°kÕŖR]ߘš~Y7‘'ćģ ¤ó9ł<Ęć\ėG¬#1SĆLP†$Wé/„TżéēuTbļ2†f,$ד'}­NßOņOj•ō•ń6æ»ź$ż¤Źģ°«“©U‰?łŃxŃŠ\ü÷’-}QéŌ\<ö*Ÿq^ņO\»vMņĄ ūĆöTķē7~ć74"yžx||\k†éRfŸOšWžł ņy^>+Ÿ™Ļīfü<ŸF•ólmmõ=™ŖL’ŚŁŁ©üž#ŸU'y„”§“äŌėsŌ³T}vz^7L• Õ·›ŖuOŸ>żĢ8›±3Ēļ“ļ©üdÓ>'ĒŲ‹Q3ę$»ŪļTēŚšĪµēŖ‰J„2š¾NtłV&å{²Ū„Ÿq“c\7ł”Ū2ޜ<·sī–×Qå’’Ļߟ«ŅžĪ’D¾nåóŌķ’9ęu’G_’ ‘ć“õTMŹq+³¤ļŌM~Ė9[V¦Ė“øÓ¹ qŽMŠÜŲhźĒt.Srō¼Čł¬\”LŖ^«ż¬Ü¹SüżĒkHĪ”@­l•³ĪŸYEvņ¬o¬ ’˳Ģ6åyg¬žö­Ņ.9½‡‹$ˆ5ĮœMµYŚ’¼}KæŲÜÜģ,ƒī&&›L‚\#³$P»›jƒŚ÷<_Ļqąb ¼O’ļ6Į0ēĻ0ŪzŅūe֗ĄęŒ_ŻVčH€|–$)6=žęæćP…eŅε2±!ĮļuĻ„$?Ō­“wŃwX7Ū2ź2^dLėeæĖs»ÉėˆaœÓ®?&OŽy¾Ūŗ=ę9ŽłNkā{ķ"IųiĖ$ 1rš(“~Q՟ü0jŖļ“åœvż1y^?ęĆH†Ėg–×Ųżų Š”ÖUüɍƒ‹Ō™ķ”Ū÷7UŅ?ĄŁ>ųč£žŖż|éĶ7Uū  :KRGžļ%č+KĄ’ ŌYØĘõfże Y–X'©c˜³®7-‰KŁÆ“ VMT®Hū•ՐŹ$©^Ÿ%_ŌGrlŹ})«Fåwӕ8\šõžģ “FpųiūvOd?ƒqž€ß2™ėäqf2Ü(J€w@ڼ¶-Õ­ōĖĮŒ©i³×ǜsæ‹Ęé{jŅϵœKåqOßīõŗ'ż)ūVVĶȒ—³śLÕļīqN"K?(Ļėœ‡½Žeå189Vw Œó®?ŚrĢĖdøņ;.IqMžĖŗ²Ī2 ®<7Ś–ģ pš©\¼œö[)×±CŪØ©© _“É°īöI~0mmm]ų»×Ź?پ£££s_“ JCć.7į?÷÷g|-“ß,ÆOšĪW¾R|ųē>Rū1÷GTüāÓO?ów_ūÕ~¼?būqķöķĻżŻ_¼ūnń‡#–ü0ū‘¤•?ūąƒĻżżžćĒ#u,†¹’×üÅæüņ—]æ’’žO’©ųƒÆ}­óē;ņ'Åǟ|ņ™ó7Šżõ_Ōń—ż—1wœæ;ĄŠĻĄX€qUå9V]ī“Ž‡%ž¬Žć{–ׄ¤ŸDfŸ„ ł×Ļ‹IŁoōĖ6Ž=ŁGU čU’²čOķĆ\oē+ē>|øÜĘ-NvžķSDŸtxxX\½zµ“”äœŗ?d2[×E‰?ł÷üH9Én~@$ü¢¤?Ā€I•„£^’~Rķ§Lś€aK0”ē~œg¤č—ĘĘKb”TĘ0†i#¼KmÜØ”ÅLIÅ*’” ™]­\ņó¢‹÷ÜŲH™ā‹<}ś“³Ī”0®" ?©&”¤¤*I?)·ģ‡0‰6>ü°§÷ÆÜ¹£€±v¹­–ä™$š$©§®$e¹H’yRÖsggēĀ×®®®v–˜žžžÜŒ(YO•DŸ“²žgĻžé…ĄÄQķąb—Ū¼q©„s||Üło•äœn$ń&†R٧Ŗ$ųōŗ=Iś988č”˜4Ŗż\ģŅ(ld’s’“¶¶Ö—õ?yņ¤xųšįĄögii©xłņ„¤`"%駗j?æ3;«Ś0.ŅĘ®ÆÆw€²ģļļ<(Y÷Ż»w;ėMRNædŻłŒGéyĄÄŚü›æééżŖż“āŅØnųģģl±²²ņŖŠiK7’”S&5‘T”ud]~ž­ŚO/Rķēļ¼£!€‰pYœ.‰EI*:iww·888čü÷¬÷ĢĻĻw>Oµ€ź$žŌP&õÜŗuKcŌ¤Ś@=—4żö‹O?ķ¹ŚĻwß{OCEā}×kµŸw¾ņ•ā÷¾śU L‰?ōUŖż<ü»æėi+wīhH`āHü ÆTūčŽÄśFµ€īõ”ų³¾¾^LMMYŗ\Ņ~ćLµ€ī©ųT&™ €:žég?ė¹ŚĻwß{OCKā}ŃkµŸÆæżvń»×®iH`b]Ö4-Õ~~°½ŻÓ:VīÜѐ@ėk``zJü™-“bķ0ŽTūč]O‰?wļŽķ,PRķ —5ō׳gĻ:ˤģėśśz#ėŗqćFg`ōōZķē÷Uū($ž@ß%ęžżū±Æ;;;„)FĻOö÷UūhČ%M@SšØöóå·ŽŅ…Ä’j?’š£õ“Õ~~MāPķ Y—5ōלśzgyōčQ±²²RÕz’ÜÜ\qėÖ­VģĖżū÷Ļż÷„„„āīŻ»|Öģģ¬Ī0BTūh^+vww‹—/_Ž}ć'±ArĆäHBL™“ ĶĶĶJļŪŪŪė,ÓÓӝä”a&]”ų“ž|ćĘ `}ļż÷{z’ņ7æ©ŚĄkZ™ų“¤ˆ±oüµµµN%&ĻĘĘFgI‚[’ž>}zį{R)čöķŪ?§ P’€ęēē5&C÷?žqńń'Ÿō“Õ~`š2Ó(3 0 .kž+W®Ož<éü9IJuŸ‹ä5ׯ_ļüyaa”“¤zòńį‡=½?Õ~¾ųĘluuuä÷aff¦888p0€±uI@;¤zO’Ž‹ķķķbzzŗŅūRėŚµkÅŌŌT'q(U„`PTū†éššP#0rņL§ź*ž@ ŻøqćUĻĘĘFåŁW·¶¶:K¬­­ėė뀾RķF×_¼ūnń‡ßśÖČlo ’šO’ԁ&J+Rł¤ zš12[’“1;;{꿟õ÷pŅŹŹJgI"Py677+½ļžżū„쇩MRķ7»»»’¶%v •‰?IRh»'Ož=*ž>}zękŽŽŽŠåååΟ—––:Ƈn]¹r„sndIŠ­[·Š ßWöĆ,333~˜ŠBЫr~Oļæ÷ķo«ö MÕgļb>€aŗ¤ ŗ“?Iž9>>ī,ŪŪŪŹ*gŁŚŚ*¦¦¦:•[ WIzöģY§ļ=ž¼“ŠSE*X-..vśbf'*g*€ŗžžć‹Ÿō“Õ~€aI"ĻÕ«W+MøYĘ|ōx/-}öīŻ»ž[e $ž4$T2Ģ‹/й¹¹3_·¹¹ŁIŚČk” IąÉš$=~üųÜ““öööŠėׯwnÜ$‘MŸ Ž?ģéż©ö0l™(occCCĄZ^^®żž$P`Ün³Ä~¬ÆÆwb“rķŚµNā£MāOĆņĆ97ÖÖÖĪ|ĶŃŃQgÖ3ĮŠ“2'I@ēõĮ×=}ś“Ó'ĖŖT’€8j?Ą(yņäIēX&Ō{½²A&Ź[]]ÕHĘķ‘•źU³³³öIģĒżū÷;±IŒ‰?}’Ł3īŻ»wīkrƒśŁ“”*TKKK•ß—ŖTeYr8j?Ą(øqćFē™×ķŪ·;ĻĄööö4 šJēč„Lʉq{Ųž={öŖ­²¤zÕįᔆcśč¢¤‰Ģ®‘ä č§T”Źģ.IŚßß/ęęę*æ7³ää&Q֑uąoųƞŖżüÖ¾ Ś0Ņ^Ÿu¾‰¼ķłs3Öäųfģ)²2¹hŪd’O&Ÿ•ż²‰±²”öL»–mQ&P'Ŗi«“Ē'ēwÕX•óŽI›æĒ`’µ2ń§źģEĆXŹYP²äĘVnžÖŽĒ™™™ o& [ŖżüĖ/ŁõūSķē¾ö5 ĄPå¹\™\‘@ó^ƒ£«ŲÜÜģ“ēs‡ł\° ϶ܿ ū~š$²”žI¾šdŽI;%Į”ŒUH<@7quåŲgrѲŠ0NJ$ •ćdöæéä³*QöÉTŸǤ“Ŗ•fNė 9O˱#m5)ßc€Š?C±““Ō)µ ŠvŖż0ŹŹ éLī7¬äŠ|n>?AżC¹¦ßŲhŻqIņÕ$’;&żÓMRE’Kr~&ĮaÉ.„TÓÉX1čćP&¢%iXćäiR}f’“NN&=•ße9O‡ż=¦ ĒeM087oŽģܐɍ)€¶Sķ€Q•Ių ÜK{&óKA–ŁŁŁS_“`ōgĻžuž&h’<ł÷nZžM>|ųšÜ€ń¹¹¹Nšöµn‚RŚ:•B’ĢR§BJHžąĮƒbee„Ögęx¬­­]ųŗ—ŖŪTe}ēmϤ“QSŽ ėėėŖKULOOwŖŠdÉ19ķø—cB–$®t»m9 õōKśW·Ucfff:}$mQ„’'Y$}2ķ]·’RŁi’ōķI‰·Č¾–IiUŪ,Ē„üĪ:™—ue)ūg·ßłK2Ööövå$»q·`Ų$žōYäꕛĄØQķ€QT'’¤Ł'ąøĪs½dg9™$‘ąöŌ·©rF¶'Ū•D„†§ņwMH{e’Ė6HPy’Ŗī’źźźæż~ؑh’ĻĢ>\$ŪR5€¼Źś“ń’vŗØ$± Nß8mLHĀEŽmŻjBy}’÷÷÷7HņG’6źŽQ‰}H2Y7‰7yOŚ ģćIhɟė$Ge{“tR÷˜ŒŖōĶģļEß[9&ŻV”Ė{ӖuūĀāāb'y±Ī˜5ć6 S+†Uę¾[¹AQΘ’mWŃNWg»I¹a Šf½TūłŁYÕ~ø<Æ«\_7xł"I°HP{–lĻE•€%I&ƒ:Ł÷: XI4馲ͨsL†ė¬ä‚T[JņASĻżKPVīIrįy•žNsķŚµ®*¬œ¦›ÄȦĒČHŪ–żænå”lYUiR5uLŹĮn’ĮŅĖD7 ’Z™ų“YEųµÜlĶų:³^ “j? ZݤŸTJčgąxŪs?Ÿ‘ꓦœą­j’A‚ŲÓ^8&½žwݚžžīœÆżLd(«ŽŌÆ2†<ž¼§m«›ōÓļ1²”dزjMÕJ/i»$T„šŅ(~Wu«_“g¦ķ“˜ÄÕ:Ļ£Ū08—4@»å†śõė×%ż##Õ~¾ńĪ;€Ił:Aō žTµˆyæxń¢“T0i >33S鵩Œ4É<“ft›“$—$> ŖzIŚõĮƒµĒ’lc7’XS'égcd$a+I<9U%I(‰*“"‰_żHś9)U˜źƒŒƒŖ\“Nā@Ėå&žŅŅ’†F†j? R‚Ž777+æ¾_Ī“ öIMjIEˆŖ$ž8&Ć0ØŹ6Ÿūķ¼²R<|ų°ņėŽŽ:ļ©+YVWW[=FžģouO2qb’šĘŻöööĄ’Ņź&ņ¤bŠF@n˜ō’ŅĄųSķ€A«óęĶ”“'hū޽{w|2ĮYU“afŒ$aՙpkk«S§īgŒĀy²ĻÕ©V'©iå{£Ī9Ū«$ŖÖł®JņŠ—50(ufgšÕ`#č»ļ½§˜ģV~ż°+CÜŗu«Vu¢¶*“AŹ’¾|ł²SUä4ł7“¶Ju›$: SĘ„$ōT• «&ē¤?ģķķĢyr;–——k½¾›jH£ ß¾«‰?ĄĄ˜„ `¼½ó•Ææ÷ÕÆj&šU„’Ű'K՟¶K2UžéŌMŖĀ1EĆHŖx]Rõ§jņOg®£6F–R„(‰ł¤§u|÷½÷4$“Ģ£GŠåååZļYZZźLĪ(©Ä1a°*æ6 `ż¤“Qm“ééik­LüÉ,2M$żĢĶ͵śhʬ ÓʇöōžÆæżvń»×®iH†baa”ŲŁŁ©ōŚ'Ož ½šE@öž®ó76ŠÕÕÕŹÆ_[[ėT’Į1a82>UUg«3F&1-Õ¤Śbkk«/m0ŠZ—ų“›IUo<…Ž˜D»»»Ņęło–aTĒŹLR©Ģ•›Øå-©öóƒķķžÖ±rēŽ†`hņ|¢ź³ÅĶĶĶĪsÅaDāOžÕI0ŁžÕoĻy“I•g®ĆnėTŚyśōiå××IĪÉk«Ž‘نŒQm˜Ą“īX½²²¢3cķRŪ6(‰?U¤šĻńńqēõ’~W¹±š›”éćSSSƖėׯwd–£a$żÄįįaēęo¶cqqń3Ū—%7‘«žĻ ‡j?Œŗ»wļÖzż°ƒĆ‘xTgSUF‚Iį˜L°6<ϬÓ?29cÄŸŗc^Ż1µ’U'Q.±#Ī`ܵ.ń§Źl3¹™•Y‘`Üä&fn¦–É3×®]ėĢ>wtt4rū’¤ åååĻ$åĘrö€įSķ€qŹKKK•_Ÿē.Ož<ʶ&Į NUnU­īƒ–•‰āśłüŁ1iÆL“˜Ŗ?ƒc‘mØŖ›äĮŌź«ĆNœŸŸ×Ÿśø msi7z3aĮ œLö¹zõj­»£&Ó²Ł×<“HE#†£×j?ææøØŚ­ ļéééŹÆæ}ūöĄżóy™0mŅä9ŲØģ÷¤L^7JĒdPõūvÉ?»ź‹$9Ö©öSJ"ĻĶ›7+æ>Ļt‡•ü“dĪĆĆĆŹÆORSŻD!Ęk<€I1’‰?W®\qäy¹ •›ćžģs–Ģ•ŠFI2 Ą`żd_µĘJŻ żś*°=Ÿ“χv«#Ą%ˆ˜ź'3JĶĶĶ5²ż¹©-`pšØöóå·ŽŅ“FŌ·k&·'°½ŸĻ'²Ž¬?ŸÓ­$jŌ•Ŗīßæß—D“»wļv&€;::jæøqćFå×f¢¾~%Ż8&£!mŸ¤ƒ~&_•ćĀÓ§OkõŸ&¶)ćI¾˜gŁy¦¾Ó/åd™×Æ_Æõ¾Tśd¢“7n@Ū“.ń§ŹM£ĢF£(7Ss³øĪÜR’{ró“„ž¬7ēEnnęŹĶ°¦gėŹvg½IZŹēä¦tf^z}[²}I ź&‰/ @J±ōWŖżüƏ~ŌÓ:Tū ņ#Ļ)¦§§k½Æœ ,Ļ@ŗI“9)ļĻvd}gUóØó „›ķŁŲŲØõśT:i"°?üYOö=ĮŲŻŲŁŁi“OŌŻÆ“EŻösLĘK£Ņžå˜ŠDś*ćĀYīŻ»×hbC֕g¹u¤ļdŪ³½Ž‘'·#ϟėN–™ńżłóē*żŒ±¶ŒŪŠ6­KüIBĮE7»3+–Y;5¹ Zg­Ü“Lu“É=¹ŁtBOÓ²}9³½e„ T#Ŗ*7vs“wPeć&j?Œ³<§Č3†:Ļ&J™ø-Ļrąž%“•å™GžKf2““ņ’™”-Ļnņŗņ=y’y‰©¶‘g(u““źČö<|ų°Ö{ŹĄž“_Õ Hiē¼¶LhH·É%ż’gN™XƎÕÕÕWmńśńϟ˄œÆJ‚cŅ^ē‹Ņ†åłćö=/ł%’–×$iØźøpÖv%Į„ é»y†[w"ĒģC9Fž|&\EĪ”2 -K’5źVŸJĀRY!ˆńՖqŚęr7*?øsĆč<¹“½½]«Ģ/ SÕ¾:77÷j†£qż(«tå&w•Y¬r“·|0@sTū`RäŁDŅ ^7ø¼”ÉʲܿæēķIž’”ėło?+©$°9ĻZņLµŽĆĆĆγœŗUIĪ“„—n¶„) šNxŻ˜“EŽżEĒ?ÉU’3“v*«"UéyĶ ©r|šØųtž<ĆĶ™s#Ļ±ėŽ“UĻ&$į' L޶ŒŪŠ&—ŚøQ¹É˜f.šé*7"sS\rm—‘¹Ét‘Ü“Ģ ¬qIśy]nP§ P’›.’›Ėż¾” 0i¾÷žū=½ł›ßT퀑QN2–ēŽUžMōCžwf2ĆaT©H0žĖ,-- eߓ¼ĻĻóž:“9&AŖi™œ.Ļįś”Ź3@ǤŻŅ?2N¤"×°”~Źć3(e¼Eöæ›Jiż’1»lI?“©-ć6“Å„¶nXrƒé¢ņ™e+åµs㾬*mSe¶˜ÜąŸ”›–InŖrć<3ųT-ĄłžńĒ?.>žä“žÖ”Ś£(Ļól"ä?¾pņĮ&Ü»wÆHŸē§%XTMj"į"ĮÓŁ÷~PŸ”ē]ŁļӒŖ&Uō+É$Ļįś•tS÷y–cŅĪq"ū9Čq"UGņYłĢa$¾¾’‰·Č¶¤æ #9-Ÿ¹ææßنŒŁĆlŚ”Mć6 Ū„¶o`Õņ™‘ćöķŪÅŌŌŌ«%7‚R8I¹IW.¹Iƒ’>W„4ś¤ĶTT5Q/>čŻĘ‡öōžTūłāohHFŚ­[·:öepūƒz®”$ś”ėYņ|2ōg9ļßś„|īš%Uˆz­ī‘żĪ3ܲ*G–<×9kßŖV˜éw’I™t“ćŸćÖk‚G’7ҧ“ń'rœ{•ć[V]Ź’Äƒ|VŪ¤æ”ēIŁŁī&+•mQ&„•}5¾B›Ēm–Ė£ōC>Kn’Æ®®VzOŖei«Ģą¤,5„I»‰™ʙElggĒĮč3Õ~ąóņ¬"“f9K&Lpp^Ūdõ‰<#ęsĀ$|œ—ō‘„„,Mī÷°÷ł“ćŸgĻYŚĄ1Żq¢<6åqĒ6HµØ×+F‹$”LĀgŽĆ6¶mÜ%“–Ą 0ŽF"ń'•Ar©JÕ>KµčN“É>£$Ö©¼į˜ąŲŠ—ڼq©š355Uܾ}[Ņc/3ĘMå³śOµ€ŃÕŚŠ?™ēššŠbäU-ēžŖV“”ü“ľ½½½ĘŚ€Ó}ļż÷{z’½o[µ€!ieāOÓI?333­,­­Ü÷äXZZ*¶¶¶Ī}M’`’äņģŁ³±oģćņņņ…Æ›žž–ųЃæ’ųćā§=­Cµ€įi]āĻśśz­¤Ÿ{÷īuŽsåŹG“ÖŚŲŲø0ń'vvvŠ©©©b{{{l^ęēē+Uś‰T ‡ß£~ŲÓūSķ€ĻŹ}ü¦kXąT—ڶAUż×ÖÖ:A’P!釶KM2OU‹‹‹‡+++ÅĖ—/G~’www; ?٧ŖI?9ĒoŻŗ„ótIµ€Ń׺ğ*Õ~Rķ`Ģ\nū>zōØŲŲŲč$9ģķķśšT’É233Ó©Ž" žM*e‰*‰BŒ.Õ~ĘĻåQŲČTIµ’XYY)677O}Żįįį«$ Ņōōt§JI™ōšzņƒ*&ĄØKµŸłå/»~’ōæūwÅŃ’ļ eZ™ų“䜝FÖuttŌYW¹¾ū÷ļ·f?×ÖÖ:ŠzŃkµŸ…ė׋’÷æžW Š2—4ĄčźµŚĻ—Ž|³˜ū’AC“Ä€ÖkµŸ•;w4"@KIüQIśé„ŚĻļĢĪšµÆiH€–ŗÜʍšŸŸŸˆĘŸÕ€®mžĶßōō~Õ~€qšģٳ‘Żö’ößž[ńŪæżŪ# rÆŪżnčV&žlll82ēHµŸ^¤ŚĻ7ŽyGC#oqqQ#“ÄŚŚZ±¾¾®! A—4ĄčQķ˜4?Łß×-7ŹÕ— ­.k€Ń¢ŚĄÆķ?~'Õ~žįG?źiŖżŒ>‰?}“±±qīæ§ņO„NR퀐ųÓg ēžūEÉAĄdQķ€’ÄŸ!KÕ€Ņ÷Žæ§÷/ó›ŖżŒ ‰?}öņåKTņ?žqńń'Ÿō“Õ~ʇğ>zōčQ±··§!€J6>ü°§÷§ŚĻßxCCŒ ‰?}ņäɓbyyłĀ×-,,h,@µ>ē²&hV~īŽ½[Uz}^  ŚĄh˜ššj|ĒĒĒ8•ÄŸšvww‹—/_¾śs–gĻž‡‡‡µ×577'ńPķ€Sµ2ńēʍÅĪĪĪX7üĢĢL'ią{ļæßÓūļ}ūŪŖżŒ”Kš`š–––Šƒƒ ’ńĒÅO{¼O ŚĄxŗ¬ ēę͛ţGŠ+W®h ą³w7”qŻ{š€’r{ŃŲ$*/ä.-\py#5³QɃ=p±±BńN_ąeˆ¦[vZÄ`Y†kЇwip°ŒµJ@½F^“KŠŽéŁØtہnŚRŒÅL7fžgZžÜ;.éH„śx8Č÷ęœ_óžŗ‹›äÕ 1üķ·©®Ū~ØM6žģ°øŻgqq1¬ÆÆ‡ÉÉI„ą5Ū~ų-¹ń'—ĖUUˆŁl69ņł|rļŹ=@)lūą·TdńgxxŲ›jڽééTŪ~ŽŪ·Ļ¶€·Gå—vŪŅ@ķSü(³øķē§ēĻ·|}ÜöÓsś“ jœā@™Łö@)ŹČ¶J„ųPF¶żPŖŠ(ž¬¬¬„‰‰‰ŠŃŃ2™LhhhHŽ|>_¶{ˆŸµń¹~ÄūŠ÷ļ`«Ę>LµķēPS“m?udWŠ?“““!›Ķ¾.Ö8p tww‡„ÕÕՊ )ŽW¼æxŸ÷ļ?–JeŪ›Q¶āĻąąąėŅĢĒ–––Ŗ:øx’± ōĖ­@6o·ż¼\[ŪņõqŪĻ§Ē  Žģhń§P(„\.—c®]»VÓAĘ­@śūū}³€?aŪ›µ#şXųÉd2įŲ±caaa”īB½uėVRŠ„'[€Ū~؊m-žÄ’K,»ÄĀĻźźjŖYmmm”«««lAĢĪΆõõõšųńćpóęĶpęĢ™Ō3cé)nŹēó¾iPĒlū`+¶­ų399™”\¶²į'–|fff’āĶĘ‹8å,žlˆÅ„žžžäy~y?±ŌŁŁ¹„™sssÉ ‰‰ ß8Ø3±ō“fŪĻ‘lÖ¶€:µ-şĮĮĮšńĒoꚣG†/^¼.łTśVœXŠÅ"Šżū÷Cccć¦ftwwŪžuęÖßüMŖėmūØ_©‹?±ōsķŚµ’Ļ[sbq¦P(„L&SµĮutt„•••äY6³ (n’Éf³¾yP⶟4⶟“­­‚Ø æ$n;€·IUü™œœ,¹ō·ćÄ ?qkN­ŁŲŌÖÖVŅłKKKÉ! ¶Łö@©Š?]]]% 1q;N5oų)Åģģl˜™™)é܅……Šßßļ5ʶŅŚrńgxx8¬®®¾ó¼Ć‡'…˜z‘ĻēĆćĒK:÷Ö­[”P(ų@łłÕ«ŌŪ~®ōö  Īm¹ų399YŅy± Tor¹\/éÜĮĮAßBØ1i·ż“~ōQų«ßżNunĖş¹¹¹wžsōčŃŠŃŃQ—Įvuu%Ļ’.<+++¾‰PCʧ¦R]ßö¬ŲZńgvv¶¤óźµō³!–¶3O öŁöĄ†="Ų9¹\®¤ó …‚°€„m?lŲŃāO&“‘0@‰lūą—v“ų³²²"a€]éķÆķhńgvv¶®Ć- ¾a@IN““„››Ąk[*žäół’Ī››› Åb±nƝœœ,é¼\.ē›u®’ģY!š'¶¼ń§³³³¤óśūūė2Ųøķ'ŸJQj‘ ØM¶żš&[.ž”ZčyšąA®»`;::J:ļĢ™3!“Éų&@³ķ€7Łrń'—Ė•¼õēāŋapp°nBĶf³aii©¤sė±üŸ“·ŪöĄķIsńÄÄDhll,éÜk×®…|>_Óa®¬¬$Ū{J-żÜ¼y3) Õ韾å_Rϰ퀷ٓv@”P(łÜ¹¹¹ŠŠŠ†jMWWW8pą@X]]-éüø-©ææß7ŖŲßžŻß„ŗ>nūłąąAAšF©‹?qcĶāābɛ¢īīīš)ÅĀO|–»wļ–|Ķ™3gj²üõäĒÅÅšücŖ¶żš[ölǐXžYYY GŻŌu \.·©ĶA»mxx8d2™M~¢«WƆÉÉIß<ØrĆß~›źzŪ~x—=Ū9,–wn޼¹éė±cĒ’"M,Ōō÷÷‡b±X1!ĶĪΆ|>ŸÜ_<.^¼VWW75#nDzüųqō­€*·żüščQŖ¶żš.{¶{`,ķ¬ÆÆ‡3gĪléśXعuėVhnn~]“ŁŲ K3q[NÜ.“ŻāĢ8;Žü¬_~v{{{˜››Ūņģ .$óć\ ś]Mu}÷©S¶żšN{wjšFA'iīŽ½›z^Ü jŅŁŁ&&&|Ė †üæ’ū0’äIŖ¶żTÆųKć¶[ü„zo²g'‡g2™¤ų’aÅųųx]ŚŲؘ¼ææ0Ų’-bįēŚµkįĄIŃ$“É$E X>”<~Yō‰Gsssø{÷®``Äm?iœhi Ÿ?.H¶lĻv Z]]MŠ@±|²QD‰„E ķ£čåq}t4¼\[K5ćF_Ÿ HeĻN„”_ņł|˜•z‰bV1³Ķ}…[ōćābŸšJ5ć›óēĆūū÷ €T¶\ü‰„”õõõ0>>^rŃdnn.“··æ.²d2™Šßßo+PųŪ|b1“|bV1³R>|8Üæ?y'+++¾Ł°E熆RĻųōųqAZź?]]]IŃ$Nfff’J©VWWĆ­[·žd+P,¾Ä™“““5xĢjbb"yʍgŽŲę³ˆ™”źčŃ£įńćĒIö±8ŌŃŃį )\ ?=.*ĀŽķ–Ļē_oļŁŲ`óąĮƒMĶˆÅ—»wļ&ǟ‹„¢ųńČårÉQib±§P($‘6~n¦Ģó.mmmIq(›ĶśöĄ6śqq1ŒOM €Š±w§ĒbŹ/·ö †įįįT%˜„„„·–‚Ž$…6 2ńēo•e⦔XŚy“XąŁųkńĻŪYä)Ņ ’üā=;ćÜŠØ({ĖõA±øh«Ū€6+…āĶĶĶUĶK‰…„X’źččš €2ø>:~zž\T”=»ń”Ū€Ö×דcff&“µµÕķKˆĻ>>>ž:XŒRś€ņųqq1ŒOM €Š³·n"ŸĻ‡ŁŁŁ×’9_&&&’cccO­ˆ%Ÿ®®®¤Ų“Éd|`—]%‹æÄ  \öVāMō@ƒƒƒÉńK…B!Ł………Š µ±±1)3ÅrOüŸØAš›¶}ęśśŗ`€7²ńØzßĻχļffRĶųęüłšžžżĀ b(žU/nūIćDKKųōųqAPQ€Ŗv}t4¼\[K5ćF_Ÿ Ø8Š?@Õśqq1ŒOM„šńĶłóįżżū… @ÅQüŖÖ¹””T×ÉfĆ§Ē €Š¤ųT„룣į§ēĻSĶø30 HØsł|>444ŌŌŸ €Ś ųTĆųŌTŖ_öō„&Ō±ĮĮĮ077WsĻŸ)>ÕoÆ€jsnh(ÕõG²ŁŠsś“ €_yoß¾šassUŻs,Eæ\[óņjāPU®Ž†Ÿž?O5ćĪĄ€ €7Š„ŸoæžŗŖīłģåĖažÉ/ ķP-āo³ŸšJ5ćĖžžšĮĮƒĀ ā)žUćŅČHŖėd³”ēōiAP€Ŗ0öšaxZ,¦šqg`@T Å ā=[^_„šŃ}źTųąąAaP5€Šwnh(Õõ‡ššĀ•Ž^APU€Š6öšaxZ,¦šqg`@TÅ b=[^_„šŃ}źTų°¹Y˜TÅ bJuż”¦¦p„·WT%Å "=|ž‹©fÜ$UKńØ8?æz¾K5£ūŌ©šas³0ØZŠ?@Ź42’źś÷öķ Wz{ @UŪ+ ’Ü›ž?A°#Ö×ׅ”āP1ž×æžkųÆcc©f|ŅŽN¶¶ €Ŗ·G@„ø÷·›źś÷öķ³ķ€š”ųTŒ’ųĒT×+żPK€ššI{{8ŁŚ*j†āPõŽŪ·Ļ¶jŽāPõ”~ØEŠ?@U;ŃŅN¶¶ €š³W@5³ķŹļ’Ūæżźæ››› U’lÆ^½ņ‚Ø6žUė›óēĆūū÷ Źģń?üCĶ>Ū??īP1lüŖŅ‰––šéń悀 qą/’2ü·æžėŖŗē룣įi±čåP±€²»42’zʍ¾>A@Łūžźwæ«Ŗ{¶9€J·G@9}??¾›™I5ć›óēż‹ŗŌ<Å ¬®Ž¦ŗžDKKųōųqAP󀲉„ŸŸž?O5ćF_Ÿ Ø Š?@Yüøøʧ¦RĶų²§'¼ææ0Ø Š?@YœJuż‘l6ōœ>-Hź†ā°ć®Ž†Ÿž?O5ćĪĄ€ Ø+Š?P&''Ćąą`Čēó!“É„†††7ńÆÅsā¹ńšJļńmϰqÄgjƏ‹‹a|j*Ռ/{zĀ €ŗ²WPYfgg“ŅĖÜÜܦÆ]]]M®{Óµmmmaxx8är9!eunh(ÕõG²ŁŠsś“ Ø;6ž@ˆŪy66ł“··o©ōó.qę±cĒ’ĻˆåŸb±(x`Ē] ?=žjʝAP1޵Ń~+ĄŪ(žĄ.YYY É?Šūų揓m=å²°°š››“²Q”Pš2€ńly9ŒOM„šńeOOųąąAaP—`tuu…„ģź}IJQÜ @±ˆ°Ī „ŗžH6zNŸ$ukÆ |b¹&›Ķ¦ŽīsųšįdΟ›››ŪҼx?±ˆtįĀ…0<<ģE©=|ž‹©fÜčė$uMńŹd«„ŸĪĪԌ·ņlF±X żżż›Ś*tėÖ­099 …¦?`óååšÕŲXŖݧN…››… @]Ū#(|>_ré§±±1<~ü8¬ÆÆ‡‰‰‰-•pbÉ(–xāŒ8+Ī,ÅŅŅR²ż'–¶āÜŠPŖė55…+½½‚ ī)ž@ÄņĪĀĀBIēŽæ?Ł”Ėå¶ķóć¬8s3 cĒŽ%›†6cģįĆš“XL5ćĪĄ€  (ž@Y”Z ™™™ ;v  .”tžÅ‹Cæ”äŁņrųjl,Ռ’ōŃGįĆęfa@Pü‹6„lū‰eœ|>_–{ŠE¤X2*Å­[·Źv_@u;74”zĘ©ÖVAĄPüV(J:opp°¬÷Ėæ}[°C`‡e2™ŠÖÖöĪó®]»V[tņł|X\\,é܏?ž8tttxÉP§.Œvā”AܤSŠøEgvvv×ļ7›Ķ†/^„ĘĘĘwžūąĮƒ¤ÜT(¼hØ#÷¦§Ć vā”A܊sęĢ™’Īmoo¹\.¬¬¬ģź=Ē2O¼‡£G¾óÜÕÕÕpģرиo`ēżüźUųüöķT3>łæ’ßųmŠ?P&“““įšįĆ%»°°8”o†‡‡wõ¾ć&Ÿ«WÆnź¾cØX,zéP£®Ž¦ŗž½}ūĀ¾>AĄ;(ž@Å2L)t6ÄM:/^ Əø=hpp0ĢĪĪ–m»Nü¼Ē‡ĘĘĘ’Ī ęęę×÷«µćūłłšŻĢLŖJ?PÅ(³ĶlŠy“„k×®…öööd»Ī/KAńˆ%·ųĢх 6}æ% xd³Ł$ :]Iuż‰––p²µUPÅŲ±œ³¾¾:;;«īއ‡‡SŻūŅŅR²ÉØ>±ōórm-Õ Ū~ tŠ?°‹&&&’Ķųųxhll¬Ź{O³½ØßĻχļffRĶųęüłšžžżĀ ŖÅæ7¾ŻĄŪ(ž@čźź +++É?ܛ™™ mmmUsļŪ‹^¼xQ•Œ€ŅÄm?iœhi Ÿ?.HŲÅØ0ł|>ĢĪĪžÉoś{üųqøyóf8sęL8|ųpEŽw&“y½(‹‹‹įĀ… U·ÉųµXśy¹¶–jʍ¾>AĄ&ķT¾\.—żżżUsĻŁl6 'Ē/ …¤ŲĘcaaĮ € öżü|ųnf&ՌoΟļļß/LŲ$Å ¬6JL@uø>:šźś--įÓćĒ [°GĄ›ÄŅĻOϟ§šq£ÆO°EŠ?ĄÆüøøʧ¦RĶų²§'¼ææ0`‹€_974”źś#Łlč9}Z‚āš'®Ž†Ÿž?O5ćĪĄ€  %ÅąµĆųŌTŖ_öō„&¤¤ų¼vnh(ÕõG²ŁŠsś“ `(žeÓŠŠPņ188(0(³ė££į§ēĻSĶø30 HŲ&Š?U`bbbSÅ©J<ŚŪŪ½H€ öly9ŒOM„šńeOOųąąAa”Łæ’ūæ’źæ»vķZÕ’=Eæ @ń āĶĪΆīīnA°£Ī „ŗžH6zNŸ$Ą.ųŸ’ų5ł\±¼¤üŌ;Å€ —ĻēĆżū÷Ccc£0Ųc†§ÅbŖ7śś ŪlÆ*_GGGXYY©źgˆ›‹ŚŪŪ½L€ óly9|56–jF÷©SįĆęfaTć}FæžŗŖīłģåĖažÉ/ą€²ikk+łÜl6+0(ƒsCC©®?ŌŌ®ōö v€āP6qć P9Ę> O‹ÅT3ī vČ@ży¶¼¾K5£ūŌ©šas³0`‡(ž@:74”źśCMMįJoÆ `)ž@¹7=ž‹©fÜ$ģ°½"€śńó«WįóŪ·SĶč>u*|ŲÜ,LźRCCƶĻ\__,šF6ž@¹42’źś÷öķ Wz{ e ųuāŽōtųįŃ£T3nōõ ŹdÆ ś …0;;›üŒĒĀĀBŁļįšįĆ!—Ė…|>’ś'P¹~~õ*|~ūvŖŸ“·‡“­­Ā€2Qü V,Ćššp˜˜˜«««uoKKKÉńąĮƒ7žõ3gĪ„ŽŽŽŠÕÕåE@ø42’źś÷öķ³ķŹl r¬¬¬$E™†††ähnn·nŻŖøŅO)b!Ø»»ūõ³Ä£ææ?yF ¼īMO‡=J5CéŹOńvŁ/Ė>wļŽ­Łg%¦ųŒńYół|²ŃŲY?æz>æ};ՌOŚŪĆÉÖVa@™)žĄ.‰…Ÿ\.WóeŸ·™››K6ÅŠÄĄ/ģė££©®oß>Ū~`—ģ”_GGGxšąĮ¶Ķ;|ųpR"ŠG6›ż“c»Ä¢R”Pxż3nė‰?RĻīīīNŽńńńdū°=¾ŸŸßĶ̤š”ō»GńŹ(–eb9guuuÓ×ĘrORŚĪBO©2™LČēóɟć=üÖ3Ę >ńXZZŚŌgÄņĻššpR(Ņ»42’źś--įdk« `—ģ”G,Ä477—\śill ÷ļßėėėÉƏşŻ(żlF¼æĮĮĮä~ć}æxń"œ9s¦äėć”X2Š›…€­‹„Ÿ—kk©fŲö»KńŹdc[Ī»=z4)ĖÄāĖomÖ©±Ä399™”€ĘĒĒKŗ&–£*½ą•ģūłłšŻĢLŖߜ?Žßæ_˜°‹  āœ„„„wžwõźÕP(’²L-źźźJ @±Üō.±üĻ6/nūIćDKKųōųqAĄ.Sü2~ē9IAØÄrS[[Ū;Ļ»{÷n(‹¾@° ±ōórm-Ռ}}‚€  ų;lvv6Ł^ó.õRśŁ099YŅy¾DP¢ļēēĆw33©f|sž|x’~a@Pü ‘Ķfėźy3™LI[€Ņ]Muż‰––šéńć‚€ ”ų5`j~>üōüyŖ7śś Dń*D”PØ»g^YYńā`›ü÷'OR]’eOOx’~A@Qü–ĻēK:Æ«««®r™˜˜ Ū–°uG²ŁŠsś“  Ā(ž@tvv¾óœX‚©—’Ėģģlčīī~ēyŠ?Pw„HńŹ`xxø¤óęęęBCCCRŒ©U¹\.“··—tnÜ ģ¬/{zĀ J“¾¾¾ķĄŪ(ž@d2™033Sņł± @żżżaee„ꟿP($…ŸųLq³Q)®^½:::|y`ÉfCĻéӂ€ „ųe’ĻēĆāā⦮¹uėV8pą@R˜Éf³Éœj(‹Å088˜žā½;v¬äĀOK?ńz`gŻT°½"€ņ‰åõõõ¤477·©k—––BwwwrüŅŃ£G“yq£Nœ’\±Ü7łĢĪĪ&ĒfŠ=oÓŲؘ̊Ļģ¬/{zĀ *˜āģ‚Xn‰›{bI'ma&^æ„›Ż6>>ŗŗŗ|9  Žd³”ēōiA@…Ū#Ų™L&٘óāÅ‹ŠŁŁY—Ä ??N¶ )ż@iĘ>L=ćF_Ÿ   (žĄ.‹ ‰‰‰¤üK@.\Øéē=|ųpø’~ņ¼qėQ.—ó%€=[^_„šŃ}źTų°¹Y˜P ‚ÄŠššpRЉĒāābøzõjR–©VńŽĒĒĒ“RS|¦b±:::¼lŲ‚sCC©®?ŌŌ®ōö ŖÄ^@åŹf³app09~ivvöõ177W÷ŚÖÖņł|Rź±Å¶ßŲƇįi±˜jʝA@Qü* 6ńx—ø]§ų’b`”P+++›ž¬X≛ˆā”Š»ćŁņrųjl,ՌīS§Ā‡ĶĶĀ€*¢ų5,n ŠGTJQØL—FFR]Ø©)\éķ$T™="€Źuoz:Ģ?y’jʝA@Rü€ õly9|~ūvŖݧN…››… UHń*Ōļ/^Luż{ūö…+½½‚€*„ųčģåĖįåŚZŖ7śś ULń*ĢõŃŃ0’äIŖ>ū,œlm&T1ÅØ ÷¦§ĆųŌTŖ'ZZB’ٳĀ€*§ųāŁņrųüöķT355…;Ā€ ųā÷/¦žńķ×_ j„āT€³—/‡—kk©füį‹/Ā j„āģ²ė££ažÉ“T3žó±cįdk«0 †ģģž{ÓÓa|j*õœXüv^CCƶĻ\__,šF6žĄ.y¶¼>æ}[Ą)žĄ.łżÅ‹BŽJńvĮŁĖ—Ć˵5Ao„ųev}t4Ģ?y"ą7)ž@Ż›žćSS‚ŽIńŹäŁņrųüöķT355 ź„ā”Éļ/^L=ćŪÆæ$Ō Å(ƒ³—/‡—kk©füį‹/Ā ź„āģ°ė££ažÉ“T3.|öY8ŁŚ*LØ#Š?°ƒīMO‡ń©©T3N““„ž³g… uFńvȳååšłķŪ©fjj w„ uHńvČļ/^L=ćŪÆæ$Ō)ÅŲg/_/×ÖRĶųĆ_„&Ō)ÅŲf×GGĆü“'©f\ųģ³p²µU˜PĒ`Ż›žćSS©fœhi żgĻ źœāl“gĖĖįóŪ·SĶ8ŌŌī Pü€ķņū‹SĻųöėÆ $`œ½|9¼\[K5ć_|>8xP˜@BńRŗ>:ꟶ¹ŃќĘxqóęšd}½bs&ų÷pvd$ĢiŒ'ÖÆ­­Š Ģ‹ąÜÅÕÉɰżąĮœĘxtõźšęīŻŠ Ģ›ąÜEkWWĪc ōō($° ‚?pŪצ¦rć]»BUe„b "ųßŃ70ĪŽę4ʋ›7‡'ėėX0Įų†³##įšą`Nc<±~}člmUL '‚?šgW''ĆöƒsćŃÕ«Ć›»w+&3Įų³Ö®®œĒčéQH`QžĄ×¶8®MMå4Ę»v…ŖŹJÅ…ą%Æo` œĶiŒ7oOÖ×+&°h(iSׯ‡Ćƒƒ9ńÄśõ”³µU1€E%ų¤Vuuu(++Ėō²oß¾ļĶėśõėvnŠüļßż.§Æ_Y^ŽÜ½[!€¢ŅÖ֖łkņ;-ŻŻŻv.™"ų¤ŅššpøråJQĪmź³Ļģą"r¦·W „ä#Œ’FĒ/Źżw§t@š-W K^ܼ9SŪūŽłóįŚŌ”W¤ut„ŖŹJ…J³™ŗŗ:9޲ćČ4Į S:[[3µ½^ŗ$ųS¤¶47‡MMM ”Œx ōųڵ™ŗ&ü ė–)ĢO}mmŲÓŽ®@^ žP2žļ­[9±²¼< ōō(&w‚?”Œ›’žļ9q¦·W!€‚ü $l;p ēŽ?‡::BUe„b!ų@ŃėēFGscKssŲŌŌ¤˜@ĮžPŌĪŽŒ„Ćƒƒ9Q_[ö“·+&PP‚?­«““aūĮƒ9±²¼< ōō(&Pp‚?­Ö®®œĒ8ÓŪ«Ą’ü (m;p \›šŹiŒC”Ŗ²R1€%!ų@ŃéēFGscKssŲŌŌ¤˜Ą’ü Øœ ‡s£¾¶6ģioWL`I žP4®NN†ķę4ĘŹņņ0ŠÓ£˜Ą’ü h“vuå<ʙŽ^…RAš€¢°ķĄpmj*§1ut„ŖŹJÅRAš€ĢėēFGscKssŲŌŌ¤˜@jžigGFĀįĮĮœĘØÆ­ {ŚŪHĮ2ėźädŲ~š`Nc,’‹æ==Š ¤Īr% «Z»ŗrcż~¤ĄœŻŗuK€‚ü€"5==śūū“Ē,š˜˜°¹§m„kSS9óĄ_ž„b@J ‡īīīĢm÷W_}~÷»ß…ŹŹŹPQQaG¦L©(žŽ'ž¾¤®®.óū Ī”„„ÅĮ0‚?P„ā›Šfff‚¢Õ70ĪŽ*‘¹vķ{Ļ]øp!Y`!n~õÕŸß·o_¦ēuźŌ©¢ŁG{÷īĶl  P–)—x7d”ŠŁŁ‘‘pxpP! ČüĒ’Ø)7>>ž©ķų×µÓR.ž €{ÓńJÄų‰™ŚŽ£§O‡żGŚq|ĖÕÉɰżąA…€ŃT[ŽźéÉŌ6×lÜų½ē~µukxīé§3?b‘µß‘“vu…‘Ė—æõÜź+ĀčŪogj±{Æy̟Ž?dF|³@©ü ¶8®MMå4Ęš…2Cš€ŌėēFGscKssųĖåĖČ ĮRķģČH8<8˜ÓõµµaO{»b™"ų@j]œ ŪĢiŒ•ååa §G1€Ģü µZ»ŗrćLoÆB™$ų@*m;p \›šŹiŒC”Ŗ²R1€Lü uś¹ŃќĘŲŅÜ655)&Y‚?¤ŹŁ‘‘pxp0§1źkkƞövÅ2Mš€Ōø:9¶<˜Ó+ĖĖĆ@Ob™'ų@j“vuå<ʙŽ^…Š‚ą©°ķĄpmj*§1ut„ŖŹJÅŠ‚ąK®o` œĶiŒ-ĶĶaSS“bECš€%uvd$ĢiŒśŚŚ°§½]1€¢"ųĄ’¹:9¶<˜Ó+ĖĖĆ@ObEGš€%ÓŚÕ•ógz{(JĖ•€„°ķĄpmj*§1ut„ŖŹJÅ ¦fćĘyĶ?z4YīfüÄ …īHĒ ®o` œĶiŒ-ĶĶaSS“bEKš€‚:;2ę4F}mmŲÓŽ®˜@Qü `®NN†ķę4ĘŹņņ0ŠÓ£˜%nzz:“µµ…ŠŠŠPVVVKKKK³søm¹P(­]]9q¦·W!ÕÕÕaff¦ØętźŌ©dłĶo~źźźģdtü 0¶8®MMå4Ę”ŽŽPUY©˜]čē›b7#ˆtü ļś¹ŃќĘŲŅÜ655)&wõīžżįńµk3µĶ57ŚqܕŽ?äÕŁ‘‘pxp0§1źkkƞövÅJŠąysur2l?x0§1V–—‡žÅJŽąyÓŚÕ•ógz{(I‚?äŶµ©©œĘ8ŌŃŖ*+(I‚?,ŗ¾pnt4§1¶47‡MMMŠ ”,ĮÕŁ‘‘pxp0§1źkkƞövÅJšą‹ęźädŲ~š`Nc¬,/==Š ”<ĮMkWWĪcœéķUH€ ųĄ"Łvą@ø65•Ó‡::BUe„bĮAߥ@87:šÓ[š›Ć¦¦&Åų3Įrrvd$ĢiŒśŚŚ°§½]1¾Aš€»:9¶<˜Ó+ĖĖĆ@Ob|‡ą ÖŚÕ•ógz{ą–+ ńÅW_…?Üø‘Ó‡::BUe„b܁ą ņ‡’üϜ¾~KssŲŌŌ¤@¦ŒŸ8qĻĻ×lÜų½ē~µukxī駘·eJ@”Õ×ֆ=ķķ p‚?ŌŹņņ0ŠÓ£÷!ų@AéķU€9ü `ut„ŖŹJ…˜Įīė“Ļ>ĖyŒ-ĶĶaSS“b,±įįįPVV–łJą÷ōį„KįŹ'Ÿä4F}mmŲÓŽ®˜)ŠŻŻ]“s·ƒ(*Ė•€»łāʍšÜ+Æä4ĘŹņņ0ŠÓ£˜)oÜņųڵ™ŚęƃƒvEOš€»zjēΜĒ8ÓŪ«)C?­­™ŚfĮJĮ2%ąN~ńśėįŚŌTNcźčU••Š °‚?|ĻŃÓ§ĆūCC9±„¹9ljjRL€Z®Ģśh|<“vu…ė7oę4N}mmŲÓŽ® 9ü ¼wž|xłČ‘EkeyyčéQT€ ž”°Å üĢśX‚?%Øo` \ōqut„ĒjjXē/_57†'ÖÆ{ŚŪCUe„¢äłZńčéÓįśĶ›Š)!ųPB~ńśėįż””¼ŒżīžżįńµkXtēFG“%ztõźŠŁŚ655)LŽ>æ|ė­0rł²b@J ž”€|~V–—'wŻö†; Y¹2|zżś·ž»65^>r$Y¢g“ė’‡V¬P°9ˆ}bg]} ŠŌ7n„Ö®®šńÄD^Ęų‡? ’ėwțų»æ ’gr2 ūÜM¼ÉĮģb7 WwģŠō®~]æŲÕg¶cŅ\üśµ×ĀĻ^zIń ŠL¾?·/(—-Sl ļzzĀ?nß>§uc@č¹W^¹żļ7o[Ÿ~ŗäŗ½wž|ŅÕē^©»9ŌŃ«©qą@Jž‰|~Źx Üüź+E ®Ŗ²2¼»’·=suxp0Y¢śŚŚŠŁŚZ”Ż€āõ`ģź3Ūłh”žml ›ššt"‚?÷Ńųxų¹~óę¢żćźźäīŚŪ#—/+6°$bXē•­[Ćž£G}{›S B-$|䟱@Ƽwž|r‡é|~žmlt‡g µāõJ Ž|<1±ØćĘ Ļ?nߞ|C0±P -µŲÕ'^’-ö|æėLoƃ RJš #bąēå#Gņ2¶Ą±3ĶO·mĖĖĶ¢8īž£G“%zbżś°§½}NŻ€Cߥ@ŅŁ'_óū®7vķ*Ų܀łüH¹|~¶47'o`Ȋ‡V¬HĀ??{é„‚¼Ž¹ŃŃd‰]½:¬ųįż5Z»ŗ’®C…öāęĶįÉśz¤˜ą@Jżāõ×ĆūCCyūPGGŲŌŌ¤Č@&=VS“\×äė& wsmj*/ć.Eč'v2źlmu0@Ź ž¤ŒĄĄżÅk›ĘĒƱ>PŒyŠ‹ŽÜ½[! Rą‹7Ā/ßz+/Ÿ•ååa §'¹#6@1ŁÓŽž„–¢cN–éķURazz: gvū+**B]]I^Œer»æüņĖšŪßž¶h¾7ŒĄ’üXB1šÓŚÕ>ž˜Xō±~€RÆ{~ŗm[ø65„sšīžżį”+‚‚»:9ł½ē.^¼3?·'N„––;™EµsēNEH‰^x!ō÷÷+°d–)@įÅĄĻS;w†uĻ?æč”Ÿųłõkƅ~ē” $Äš÷wØ£#<¾v­B°$īü)YķĢĢĶńćĒXR:žŲļoŽL?‹ķĒÕÕɛŻÜ¹(5U••I'›ē^yE1īāŁĘʰ©©I!H•Õ+V„Ń·ßĪŌ6ĒĪĶ#—/ŪyäŻÆ¶n Ļ=żtf¶÷čéÓa’Ń£ß{~üĉLÕżĆK—ü<¤ŽŽ?öŸ·n-źx1šsńķ·Ć™Ž^” dÅN6ÆlŻŖw¹n|uĒ…€ Ņń n~õUŽĘ~bżśä[Ā>’ßÖ§Ÿ‡÷‡†ćĻV–—'7вIš Ž;>¼|äH^Ę~¶±Ńšī"^/}xéRø65„_ś€lüXD?K/†]~ŗm[ø~ófI×įPGGØŖ¬t@@† ž,‚£§O‡żGęeģųF­MMMŠ 0G­XzzĀĻ^z©dkšāęĶ®% žäąÆæŽŹĖŲ? ÷XMMr]•Æ®¬iöÄśõ”³µÕAE@š`~Ņ/^[}4>Ž}šAÉĢłŃÕ«Ć›»wŪłP$ H ‡ééé066vū1Š333óo͚5”ŗŗ:łø®®.TTTÜ~lhhPp $}qćFhķź OL,śŲ+ĖĖĆ@OOrGjĻžöö$ü3rłrIĢ÷LoƝEDš2āäɓÉ>W®\ÉūėÅט} .ÜwżU«V% –––d‰!€b!šmńŗė§Ū¶…kSSE=Ļw÷ļ­Xa‡@Y¦>”­­-”••Ż^6nÜŽ?^ŠĻBÄ®B§N [¶l ?üšķķŽ īīīdNY?OķÜÖ=’ü¢‡~~\]žēo„~甀ˆįŸbvØ£#<¾v­ EFšR¢ææ? ÉİLMMMņ)1“oß¾dN³A ¾¾>;HµĘĒĆ?üüēy üD”—‡3½½”Ŗ²R± $^ƒ½±kWQĪķŁĘʰ©©ÉN€"$ųKhzz:ŌÕÕ%˜Ų)'†dŠ]œćĪ;o‡€Nž<é@Rc6šó³—^ ×oŽĢŪėğ…(¼'ėėƋ›7՜bGŁWwģ°s H-W(¼ųihh/^,ČėmŲ°aĮŪ™ĻmŒ! 7&;v,“µµ98€%qvd$üāõ×óöy¤¢"|śõĻU¤Cgkkø:9ŽŹü\V–—‡ž;Š˜ąX___Ņń&ėÖ­K‚C³K윳”†‡‡“%vļYhP(v<źģģLʉ] į½óēĆĖGŽäeģg“;.·vu ž¤L¼^ūšŅ„pmj*Óó8ÓŪZ±Ā€"¶L  pbG›ł†~b·ž”””pėÖ­ŪĖŲŲX jiiYņŠOĆGŻŻŻÉv}s;cŸU«VĶyœŲč'?łI2@>ÅĄOĶʍy żÄĄĻų‰ɛČHÆš‰s²źPGGØŖ¬“# Č ž@Än6ĒŸÓŗ±£ĻēŸž„gbœ¬É¢tšžžNę±wļŽ9ݾ}ū„€¼ųÅėÆē-šßp%š±SĪ@OO&·żÅ͛æ¦&;J€ą@ģ„sųšįū®»ćüę7æIÖOC'ŸÅƒ<1ōĀ /Ģiżž9yņ¤ƒX³Ÿ÷‡†}ģŁĄ7\dĻc55Éu]–<±~}člmµó D,WČæŲķē~bčgbb¢č?ßÕßߟtjll¼ļŗ³ƒ*~ņöYY^ŽÜ½;<¾v­"d\¼‘ĆGććįŲ¤~[]½:¹J‡Ž?g1ørįĀ…ū®1Åś™ÕŠŠŽ;vßõfffĀšš°ƒ˜—/nÜOķܙ—?1šóė×^ ’üĪ;B?EdO{{ØÆ­Mżvžéķµ³ Äž@žĶi½–––’ŖKģę³f͚ū®'ų@”ÅŠnģÖ¹eee™\ęŅYÆĶ~Ö=’|ųxbbQĒžfąē±šß(Eh §'¹žK«w÷ļ­XaGA ķŪ·/sæCœĖ ’’ÕæXŅ·Ä„z‹abb"yļM–’¦łĶ„ŗŗ:tww'«…\ÅpĒcŹ’;éZā9kb‘’Ž„DšX2ń Ņų’ÓįƇ“ĪsdĆGććį§Ū¶å%šóćÆ‡‹oæ-šP"ŅŚQē•­[už…”ų—k׊vnćććv0%ļ#ßäY ^Ęøõõõ) ß8_SSŽ?^4Ó¼råJ¦®««³ƒÉI¼łö–-[’cŠt‰ē¬xīšėŌ€o[®š_~²#ž¶µ«+\æysŃĒŽŸx§gwR(-U••į]»ĀöƒS³MĻ66†­O?mē@JüĒ’X“sūņĘ ; @t5!ÅÜ1CXēWūø3ĮH‰x7—ĪĪĪ’ś>ŽÉ²ąŻżū3uWŻ£§O‡żGķžČgą§¾¶6¼¹{·Ą@ {²¾>¼øys8<8øäŪoLńźŽv ¤\Smmx«§'SŪ»g!čs6~āD¦¶7ž}äņåo=·zŊ0śöŪ™?Weńœ{§żĪU!|xéRxī•Wģ8ņ.^ćw¶¶fj›k6nüŽsæŚŗ5<—±ƒÜiĄĀ,SČÆ†††°f͚ū®·sēĪ0<<\2u‰mU碭­ĶA|ĖĻ^ziŃC?ńīÉń!ŗüÅ7ÅkÅ„“²¼<¹NJ›ą@ww÷œÖkllL:’³Ųé§ŗŗzNŻ~^xį…d]€|™ üø{2߯]½zÉ^’LoƛS‚?P±kM ±ĢEģüSVVśūū‹Ŗ1š»=üšĆįŹ•+÷]ÕŖUEW`~F._ĪŪŲ‡::~øÆ¾Y ńŗµŖ²Ņ PbˆeƆ s^Ė–-I(v¼Éj&†}b·£8ų™K—Ÿ(†~&&&4PB>¼t)üņ­·ĀS;w†š“åģČČ¢æĪlągSS“¢p_±ćĪÆ_{­ Æ¹„¹Łu+pŪr%€ĀN‚0ūöķ›ó×Äī81—YĻ<óLhiiI–ŠŠŠTĢ-†|Nž<™,§NZš8ėÖ­ ccc(2_Üø‘ybĄ'.צ¦ śś1šćMS,Äc55ÉuåĖGŽäżµžXæ>ģioWtą6Į(°üéģģ įāŋ #kāņĶ0Š7­Y³&é4»Duuu ÅĪ;³Żwbp)škēžł:vģXhkks@F}4>~;ŲsnttÉ·geyyčéIŽ ¹ˆ7“ˆ×»ļ åķ5]½:¼¹{·bß"ųK pbW›Ų%'—ŠŻÄ.AqÉW@g± ü@vÄ79ĶvīłųĻįą“ų ^ݱ#\œ #—/ēeüx- š]‚?°„f@Ńɓ'“šĖĢĢLIĢż™gž żżż źBäĻ7n|+Üsmj*3ŪžćźźäMX?äK ēüĆϮ߼¹Øć¾»ØŖ¬T`ą{ %ZZZ’@ŃÄÄDčīīĒ/šł­Zµ*tvv&‹°,­xwāŁ`O\ūĶJ…?ńW­Xaēwgz{Ć?nß¾hć½²ukx|ķZ…īHšRØŗŗ:醗Y±#Šģ’ö®@1äƒL1äSWWg‡@JŌlÜXTóų`)ÄĪĪŽĪūkW–—']līGš–ŠlØ'|>ž˜(śłÖ×Ö&”žšy¬¦Ę@¦½¹{wųé¶móīÄC?nlĢ…ąäŃ7nÜöÄĒł¾(kā‹gƒ=ń±Ŗ²ŅA@Q;ÓŪÖ=’üœ×?ŌŃįfĄœ ž@Ž®NNŽöÄåśĶ›E=ßGWƾ쉋;PŹāuń»ū÷‡ē^yå¾ėnin›šš ˜3Į˜ƒŁPO ų|<1Qōó­Æ­½Ż¹Ē]ˆąŽā5tģäóņ‘#÷¼ÖŽÓŽ®XĄ¼ž@‰™žžżżżaxx8łøæ¹Ü±7kV–—ßīŲĆ=U••v4ä vņ‰7 yhč{Ÿ«xšĮ0ŠÓ£HĄ¼ žÓŻŻ=ēu’X ÓÓÓ”ŖŖ*©cV’Œóųꫯ’y<šĄ™œGÜž8čo’öo3=ų3IÜž8å˳yiüå—_&óˆßߏ<ņHfēææ’ū߇ņņņddU±sć¾Čź<ā97ī8ųĒ£¬ž«fƟœsssļ~Ī?_Ēż‘åsīĶ›7Ć_żÕ_9ē:ē.śĻėńŗ#«ēŖ8x#ē\ē\ēÜ»ŸsżŽÄ9×Ļėw’y=ĖēÜųżżo’öoιιι÷8ēś‰s®sī^÷·@ē\ēÜā?ēfłw$QœCKKKņ&`a2rńÓŻŻ­,H¼hŽæ$Ū¹s§b@­Zµ* 5Ę%žĀŹ]j€\•ŻŗuėNϽlP(.±sT ‘Å®KĄü­Y³ęv°'>fłĪY@*”Żó“‚?°$īüY¦>>‚?B‚?B‚?B‚?BĖ•ŠĆššp˜žžccc·£ų8333ļńÖ¬YŖ««“ėźźBEEÅķdž†€<+»uė֝žžzŁ <'OžL–š¹råJź¶oÕŖUI Ø„„%Yb@ø§²{~RšŅgbb"tww‡ćĒg~.1ŌŁŁŚŚŚnw‚?żżżI@fff¦Øēƒ@1Ōē %NšŅjzz:444„‹/–äüc(žZZZ ”"ĮH›B~6lŲ°ąķ,Ō6;v,“µµ98(%‚?&}}}aēĪ9±nŻŗ$84»TTT,霆‡‡“åäɓ9…b 8N]]€R ųi;Ś?~|^_»õtww'Ÿ¬éļļafff^_·wļŽdĪPäīüY¦>P13׊OģčółēŸ‡Ģ‹p²ś‰bŠizz:™G óĢÕ¾}ū(y:ž@Œ…Ÿüä'÷]oÕŖUIЧ®®®hk1Ÿ®G'Nœ--- ŠÕ=;žž@Ď=.\øē:1ō311***о1ÜŌŲŲxßõbMbĒ (R÷ ž,SČÆ\¹_č'źļļ/‰ŠOƒPĒŽ»ļz333IHJ‘ąäŁŲŲ؜Ökii)©ŗ“µµ…5kÖÜw=ĮJ•ą°dŖ««īBšRHšR¢ÆÆÆ¤ę;==.\ø`ĒĄ]”ŻŗuėNϽlPXÕÕÕįŹ•+÷]ohh(444”DMā<ēüOźEØģ^ŸŌń  »»{Nė566}ēŸŲé'yęśyį…„~(Y:ž@“µµ…ćĒĻyżcĒŽ%_S,bą§„„eNŸhÕŖUÉ×@ÓńŅ ææ?lŲ0÷<Ż–-[BYYYŅń&~mÅąNģvēńšĆĻ+ō311į  ¤éųƒ0ūöķĖiŒgžy&éž—ŠŠŠTĢ+†|Nž<™,§NZš8ėÖ­ cccJĮ=;žžĄˆ!™†††pńāżŒæf͚¤SŠģÕÕÕ-($;ļĢvßNēŚ¹g¾Ž;ŚŚŚ ” ĮH«|€²Bą€uĻąĻ2õ„;šŒ…Ą;qāDXµjUÉĢż™gž Ÿžy2w”ų>ĮH‰–––¤P Œ‡^x”ØęCM{÷ī½ö9yņd|ī¬,¾’†æ^6(¤G ŹĢ.333©ŽÖņ‰A¦ĪĪĪPWWgēĄ•Żó“‚?}ĆĆĆall,LLLÜ~¼råJ^^+†zb˜'.ÕÕÕÉcCCƒó'ų)tĻąĻ2õ€ōü€ü€ü€ü€ü€Z®PšvļŽžéŸž)łxÕŖU”ŗŗZQ #žō§?…‰‰‰pćʍäß?śŃĀƒ>Ø0_~łeøråJņś~šƒš÷’÷ałr殀¬øzõjųģ³Ļ’}ōŃšČ#( dD¼’ķožš‡?$×ākÖ¬ńūuȐO?ż4\»v-łxŻŗuaxxXQ€’PvėÖ­;=Ļ‚”ŠSCCCøpį‚BIįóĻ?W ”Żė“ĖŌJĻć?®dVģŽP –+”žxą{ĻmŲ°AėSȈ²²ļßächh(韤[¼önllüŽó·nŻRȀxķ}įĀ…o=·wļŽŠŻŻ­8~æŁƽ÷ķŪ÷­ēbĒ€R ć¤ą¤ą¤ą¤ą¤ą¤ą¤ą¤ą¤ą¤ą¤ą¤ą¤ą¤ą¤ą¤ąš’ػ㫶±­oĄfÖżŸ¤ † * ©€¤B@ * TT@Rßżł~š—É>Ā–},?ĻZZ3sƑ„#éIŽūl B B B B B B B`}łņe°¾¾ž÷///Ž5 ̉³³³üwĘõæžśKĆĄȘ½³³ó’ķääDĆĄœųöķŪšz#’žwīĄ|8::śĒēŻūu˜Ļżh`!,=>>>÷æ’ﲩy 3KÆżŸ*ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@ĻüüłsšķŪ·ĮŸž9XZZ{łšįĆąüü\Ć@„2Væ6–h$˜’»»»ĮńńńĞɛeuuušåĖ—Į?42t`ŅļՍį0ß2vē’śė/ -Mņ™»‹ÅųŌLāōĄ÷ļ߇? ęäżū÷ƒĻŸ?nnn&²ī‹‹‹ĮĒ’~ĄÉŸ~¤€:dLĪX ĢF‚„ĢŪ<3Æ­­ ööö&öLŽøææ|żśu°±±ń “h¼M’uß½{×É{õ×Ęš¼ĖĻ;} NŸ>}ŽŻĄdyÄ˜cyé˜ ·¶¶†? NC~ųl~¤Ģ÷³“j?Ąō„"@$œ`ŽYøŗŗ&e;’|Œ–¤Ż¦ŖO’użś5õmČ»ü¼Óo&ŚŹ6³—É5’|zzŖ1 Ę#ńęP“š3ė—Žł~ @0™åZ‰æĄ’4 ?©P“$e»²}Ąó’d“¤ŻIWõG¶%Ū$fėąą`8¹Ę,’‚`QHüÄ˜#?~ühš³¼¼<Ųßß<<< ‹–³³³Įęęfńw4 @Š`:”Yž€éH nfżm›š³²²28::jõLžåśśz°³³Óz;³}«««‡ą‰¼·Īūė¶ ?‹///[į···Ć÷ńy/ßF“t~~ī€Ą ž÷5tĢļŪć‘ųsāųųx°±±Qüłü(™ó²23å…e©>  Ķßē‡ĶR[[[ĆļŗŃTż«ivbč»LĀ‘@ÜŅYčŪ ßŻŻ ¾|łŅź™<’䛟=Mq›$ TĢöę»`Ńe<Ķ{ėRĶ{õ,łŪTŪm# øyGž÷ņYĒÉÉI«æ’ųń£ ~0%łż½Ķó>šv%]d"6“ot±˜ōؙĘł”pooÆč³™M83 OźĒĮü°™›ŻŻŻ¢Ļg6¤%ćIPd”dŸ¶U’€ń%y¦Ķ$ īĶŽ6Hx”& h}}½ųoÖÖÖ$’°Š2~–VėĖŪ$ūLRŽ•g½©X*Ūl €īä¹=n”žžŒÆ$ńgŅļÕśę?šź–KK‹g6Ā®f̌G„eĪ”ÜĢlüŸīdÉ‹©Šü3‹Y >m~h¼½½> w)÷ yÖ.}O@¦ÜoĄ¢ÉųWšōÓå{õF*ę>!}Jd¼Wł&/AĒ„ć109%•t$ž¼NÅØXzJœÜŽŽīü‡Ąüؘļ)‘ $åOąŸ2Vomm XĢXłõė×ĮÕՕ¤ØPžļļļ‹>{}}ŻyŅĻ[žĶs‘@cX4„ć_*żL+ĮęƇĆ$£%3!ķdrŒQI?+++ƒŻŻ]”ų·QļŚKßy,2‰?P±üXbyyyj?¶łōÓ§O"s©“ŖN‚<4Mm*ģ&ŃMŖŅ—H„ūi*}gn‚˜œLV¹““4ø¹¹yõsūūūƒ»»»Į»wļ4LPIL[iŒĄ"“ų•ŹŽ„3 OkFĀȋμō,‘ķŸö§0®6lgQQ'‰F©PŠÅžĄ¢Œć™Ń’ÆæžŅ`Šc "ŽŚŚzõ3™dóöö¶Õ$ĄdŸÓM. 0šÄØTé‹ÅśL{փ6/=%žĄ?ĒŠĒĒDZ—ĖĖK * ŽÜܬ®®ĪdĶ@ ććÓ®Ų×fŪ€ńüųńcXåēāāāÕĻmoo~žü9³g{XWWWÆž’yĻĄh B©ąóėׯ¢ĻĪbfįČKŠ©śćĒLęIĶƍ1o7‹q¼t¢¬Ņ÷ļĄæ„bĄĘĘĘČĻe‚­óós *y×®/@‰?P”$ž”šU©Ó6U†$žŠG³Ŗŗ“Ą¤Ņ C§,’Ņj»m*ŪOBĘćŅń»4Aų?wwwĆēōÓÓÓW?·¾¾>¼š¬ Ż“ų09 B£Jœ6fYź“ĶC—ÄęIžck ŽŸ?>~üXüłf(Uś„\śŽ?ÕVWW5“ēóµµµ‘I¶'''*衍Š[YYłG Zyæ|ł2|.^ZZ{IµßLĄ÷źóNāĢŁĻS³œń Ķ„?h/Ėžļßæoõ7*Ąō% (Č 2ŚŲŲ(Ŗō³¼¼<ø¾¾6Ó1“s“`{xxųźēTüšš0ųōé“F€)3–gą$ś4‰:Iäżśõėąžž~"ßss3ųüłóš½z֟ū†YLč0  2óTg–‡`‘$8)?z¶‘Šł!˜œē=~³$É6AJ™Eø TJ@ŃØä§R} ėĶ:€2ƒ3ęŽJ°ŻßßN¤įł¦ėüü|ägNOO‡‰>Ӓū†<Æ7I@©0/ž£ `~ĶÓĢłŌL…ŠN~ņćg[···­Ŗõ’–÷Ś[[[¬;k%`Y²““dŁüīœü_“jz?~üšl 3R’ų3KIJ5 ,;;;’€€ź©ųIq“2ė`Ū¤Ÿ?>> l€Š­ÆÆ“{×ŠNˆSågTŅĻööö0AČX ³“÷Üo±²²28::<<< ßu—.™ +•ž’üŪVŽĆē}¼ä f '=ĄüŹ,ęń??.¦²Ążż}«æĻ™××׎+ĄH°rfNąrĘžÜÜŻŻixEŖü|üųqäē.//«Æ0}—÷ŻmŽqg‚Œ$ī$'ĻĒ_¾|¼{÷®Õw&į÷ąą`˜ü›õä}yÖŪFžÕsĻP#‰?0™=0?^&ąwcccäŒÅĻi~Ķ™ ęOīÖÖֆ÷ ŽžO&øČsóÕÕÕČēćł Ö€Ł+MĀMeŸŒßyžt„¾¼/ĻzS9ØMPī9T j$ń*Ó× ĮG,ŗ$ēd¦Ā,eÉ쁿~żzÓŗŗüAųŸg¼m³dFįŻŻŻĮŹŹJėļĖ}A’½O€’łšįư*nÉ3²äYØG*ļ” & 2mĖ”Ś/ØWf&NŠp}Žæ?ųśõė›×µææ?ÕD€ö’“s||<ø»»ū{ÜĪŽF*åz’†`%87cįÅÅÅ«Ÿ[^^Īāļ%ļéooo‹?Ÿūoß¾i8 `Žå‡ĆY*5issÓĮ`a$Ų7JIöÉĢÄWWWoZO˜šŹ>Y2K"0š™ŽONNŠ’&ÕTž`%‰'šFUČM…½$ɚ€(µŗŗ:|ē^*ļśj!ń*“ŁJĶ:ńgŌĖÖ§MŠg čM¢O–½½½āgęßeņŒėėėapp˜ĢZ żńéÓ§V³ ßßßKü`aä·ļ$ńŒŖ”›I2ņÜ,x‹6ļÜS‘  2m¾’>³ķlóŻmö ęEž›Ź>‡‡‡oZG–ö÷÷’®ź“ušŻś+eµ©üóķŪ7ĄBHāOÉ$łL*5“oLz)}¾Ouß6ėåoūĄ?e®RĘp `Ž.ņ2qVŚ<ŌdCč‹Ģ˜ ­­­7Uöł½Ŗ™ü`±“ygžŖ?Ąä˜Ä˜G BmJŠĪj¶æóóó¢Ļķģģ8 ōBt’šóõė×Ö»»»;xxxPÕ:§K–T˜µ6³ “ÓfĀkIB@-$ž@…>|ų0XYY)śģ,fĪĆĻĶĶMŃgŪ$1@~üų1x÷īŻąšš°Õße2Œ&Łēųųxø mqŚųżruuUō9“v5‘ų•*M蹿æŸz՟Ņdž8™Į€y>ßŲŲüśõ«čóĖĖ˃ĖĖĖa²Ož×%ūĄt“y=ķwźĻd’”$Ž“—&’Äéé隣9ĻĮ“Ŗ|ŪĽ%±(“~”H\^&į( `Ś–ŸūßÓsnjØO‚ŠņĄÓV~,̃If1ųżĆ»»»į)kZÜō»;åo€éČ Ļ •ČKÕ¼\ŹäĒĄŅĄÜyńĀļŠ{y÷wāoŪó^=É;YG–ē$Q(ļ׳äy}œūÕ`öJßæonnN,8<“?/1iĶ3yŽŻæōLž1łźźźĶß³³³Óy5`€KÆžŸ`žä”%?6–ĪHŠ•õõõįƒÓsV@w$žĄģĒŲy"ń€E—ąTõ©•„˜æw LS·Ę ·Ä¾•y5ńēķó'y(JąNf˜¶Ģ¦ššš0Ü>ō…€č§$Õä}śååå`yy¹ŠmJ°šķķķp»$żŠW©Ö“I®k¶»»;óķÉ{LĄ‘ēq±oĄ<‘ųs.3ęA$G™Ńæ«-óą•ļČwŸŸ{č w$ž@æe†įå=÷õõõT'ÖŹ,ĀM`Q–Üw¬®®:(,„Äš’ż\œÉ92)Ę4äłæ™|#ļLĄĢ£„tbĻųžßeSóĄ|»»»žx˜%’~uuõźēó0•3Ó‡üčĄĀHšOާgĘß,łļQļÕ#rå½z‚˜ņĻ,I22”kbݚēņ,æ~żzõoVVV†1nMĢ[óL0‡–^ż?%žĄL¼šųó‡ö€śHü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ Iü€ÜŻŻ Žżõ×`iiiāK֛õē{€ūńćĒšžłĆ‡ƒ?’üsä=ö÷ļß5Š+ą’KąÓ$ŸµµµĮŽŽŽąźźŖ“ļĖz³ž|O󝫫«ĆĄĘI*MD$É$”&Ź9ߘw„ ž „v’č“{āęšŲŲŲŽ3_\\ nnn4Ęx ·}‹{A^"ń€…–ĄĀfęš­­­Ī’|JŻßߟ”©Ą"ܗæ{÷n˜č“{b€E—ЇÆ%Šh$XXHO kž9<‰HME oß¾9pōNŖä¾üׯ_`šæw—©x’Ń,’Ÿ?VWWĒ *\__V ŹzR‘ē5ł‘>ߙ`Ęq« }žüyøœœœ >}śä`œJ"_‰ĒĒGĄBČlö„–——‡ŸĻ’Dž,¹7ÜĻś—E½?śOā #s’8ÓĘŹŹŹąųųųĶ?¶æ””d ¬7KŪ$¤ģCö%ÉD0ĻrO[z?,XyŸxÆ!€æż” X­’~vvv†³‡ŽŻŻu2Ćff&Ļ6%(ߓļk#ՃRqØDÖ_²ŒŖ^“Ö&™]ŅóŹ»J„’įøUĆ€ž‘ų@ļŸŸ‹>»¼¼<ø½½VŌ™¦|_<Ö×׋’&3v‘”µŁÜÜŌ@o%Įyiiipss£1€‘ų@ļ•Ī ž¤ŸTą)­¤Ó…?~ ö÷÷‹?qq1Ll€>S苼Ģ$@¹æI²O–ÓÓS ¼č?š€>Ėčæ~ż*śl- 4Ć $õ”8>>Vłfäīīnø$©'ļõšf)}7 š‰?ōŚ÷ļߋ?[Ó,āIXz’ž}ŃgÆ®®h˜‘¼Ė;<<Ō@'$žŠk™is½{÷n°³³38==-ś|œjJ\%³¦ĀR¶;ĒčžžžŁĻ-//žüóĻį’ż›Te£ĢŗšļĪR2ūźęęę`uuuø YņļŌ§9®9žłgžūęęfäßåųFslõēZl®‹ü{éuŃ\Ÿłg­ū2*Aņ龤ŸqėœśÆy8›ū¤Zūāy?Ÿš{ÄQēÓÓūÕę8ĢK’ūńęžü¹ūń\+m&R0ĘĖ“Ł|·cśƒ¦ß{ķŚZYYł»Ķr]Õų°}ų$Ē‚ęśyķżĢļǾéW˜’ĒĒĒē–ļŠ›››yü-YNNNŖŚöĖĖĖāmĻg_2‰u”“c>󒿿żĒåååām)Y²Ī6m¹²²2ŃļoöłśśŗÓc[ŪłöŚqnsͽv¾•ø½½}ÜŁŁ™ų1}nɹ“óķįįaāżĪ4ƃi¶_Ś`ÜcÜFśļ.®ń¬ó„±a×Ė8r>¶Ł—ŻŻŻ™Œem¶3ćDś€®śĒ>‹ŲuÕoͲ/~ķ>pZćbŪ{Œiō%Y’¤ŽĆø÷ämnjńīgßr?;ć’¬ßu¼Ö¬ÆÆwŅēM«Ķę­ŸÖū×®©³³³Īī?ŅV9&}x^ž—{žYģ#Š©ĮkĖRŸč³6Up>ž<œå“¦mõ`ß,5VūłöķŪ°rŃŅŅŅąšššÕYvß"ėĢŗ3Ėlf'ž]Žeóż[[[ÆĪZūV™ycccųŸ>}rĮuģųųųļcŗ¶¶V\k\9wr¾½’žļs®¦¾¢TfržVūåŚČu—ļŹņåĖ—gÆÓIõ1éæ»øĘ³Ī¬»Ęćž}?;;+žüׯ_g²żmśĘQU%ō‹«/żWĪįf½]õ[/ķKŚ/ķČtĻ§ČśŸ‡Œ_³č‹»¼'īņ9¢oc¼±L›ĶR¶?ÕZšž ¤2Ņ[ś¼¦ĶŅ÷Lś`ūšIö©?~ģlH[å˜ä{ę±&įąą ų=ŽkĖåå„6ą_$žŠkm“1’ÄńR" eąŃéM:Łē9 VJ`Qs#ćłžĖi|#A.ł^ĮąŻ\ĒiŪ½½½©Ó×ιœ_ žš‡ćŻ“_‚¼fÕ~I:Éu:‰Ąó&p~Z}LĶĒżĆ‡ƒŻŻŻVŸŸöxpqqQōŁ£££įų« ż×ÓsxVŅ~iĒEĘ­į|Ź÷>M6鶙£Łēi%JŒ«Ļc¼±L›ÕšŒžķŸfņ_“4‰žnūšI]CÓīS#ߙkåīīĪ -Ą„üGŠg™Ķv8So©&‘$¶··‡ %5VŌ©M‚ŸTRb}}}€ž\»f=IŽ) o$07K©•••į6$`'ēÉS ąÉ6$Ą·m`T‚©NNNTš€ åų¼%H©9Ērl?¾OsĪ·ęœkū=ł|ĶĒ;ŪōÖ@ßō}iū—ś¾“×[Ś­ <_^^žķK‰/õ1iļ·ŚÜÜīĻsū”s­¹ęGķOsÜk‘ él{ɌķéĻ2¦åo¦qż&ąÆō|k’7õō©’zKæ•õgŪ_Ū‡fŻĶ~“½Wɵ™~`Q×Ę9Ÿš±#Ē#ĢĻõSĶqh[9£yęH•ænżćܗ×4V-ņo,Óf³–¾„d›ń§éóžk·“×8ż]®ÕQõį“y>(}VrßńÖw49ļR(Õk¼S˜€Ź€ŁŻŻ}Ģcš$–åååĒĒ“““LJ‡‡ź÷½tæ.//_\ĒęęęXm¶¾¾žx}}ż¦ķO[Oźø½¹ŪnĒķķķ‹ėJ[—®gZJ·)ēĀkJĻ•×Ī·Hūµ=Ē^kó¶m±²²Ņźūۜ[]ū6ēW³ģļļŻniƒ\gm¾w{{»hŻŁ¾¶ū”kvœ>ś-ߣöz™”¶×Ė[ūć6rM–öĶ‹ŅŒŪ?öyģčc’•u¶Y×$ī)³Ž¬«Ķ½Q ÷²„żmŪc¾®ķqĶ3Ćøm’æ˳GÉž{Ož\ߙmĶłšė/ˤśŅ.ĒŪyćŻĻ¶śÜf“6‰ž`Ü14żlé=_Ū{Ń>öį“x73‰¾’ččh¬6Ź»°¶ßŁę:­ķ~wŚÜOā^ØĘąµEā ć-Į¤ćŠ%bځsϾ˜aāO‚ņ&”mŲļĒcRA­YOišŃkĮF'ž”®'Įy]-ē¼+=Žm<ŗ<öm‚Ō²o]$€äx“ œŌ9Õ,é{'é-A‡ÓNü‰6x¹nŗ”`Āq?ūŲHüYœž«ō¾„ĖěŅ ®ūƒ]$ž“é‡JīK¦5†LzLģ*ÉlRś>Ę»ŸtvNĢc›Ķśśé:a©MāéØmčk>ĖğI¾yĖ9X:įC×ĻĖ}|)ńzåÕğ?Ō<`Qüõ×_Çᓓ“Næēęęfšõė×Įēϟkkkƒ„„„,ļŽ½nĖĮĮĮąū÷ļ½lėåååĮĆĆĆąŪ·oYßźźźąččØõßåX’ųńcŲꓐõœŸŸ}öāābšóēOŽäŗøŗŗł¹õõõĮŻŻŻÄŽļsē]Ο%ŪŪµOŸ> ūžgggĆóóĻ?’œųväx¤Żņ„m÷įƇūķŅ¶ŻŁŁöńi‡IJ„­ŽŅMūų§ JÜßß¾|łŅÉväšÜŪŪ+;õ‹§oż×ńńqŃßē>©«ó7÷+»»»EżĮ¤ī×j‘{ģŅ~hsss8vd¼éB3†”‹ęœźb[2†e?s^tuĪ½åŁ¬Ļc¼±L›ÕØéó^ŗ÷wܹ¼¼,žüĒŸmg}ųäåøLņ½ČÓkv{{»ųżH®[ŽNā 'c IbJiĒ$żśõkrxx8ŲŚŚśWbP=ÆAIśIp̤ƒJŚś%€oŅĮŃ&§Æ‰]]+m·i*ēzL@Ł<ōk§§§E×gś¾.‚ż~—ļ( žK ŲļņéKƒ)“@ŅõłķiĢ8 iƒ•••¢Ļ&É¢4صķq/‘ ī—śh}ĄāŻ—õ­’*½¶ŗNĄČv„ŻF)Mjžér]ŚMė^-Ēbæč³ū&u›1!×MWÉžćŒ©}ćeڬ6Óčóņ¬|{{ūęgk}ųd5÷N]&#•&;÷ķ~`$ž°°l™ …¦,n"X–„g)3Æ' ūiµ jĢKIWĮm‚cs » p, ė"Ø~”œė9]T{˜G + šoĪÉiĪōŸ¾«4H.³{?ķGJ«t•ä÷ŅžŌ^ł§ĶX1éŠō»©z7JŃ_ ŚÕčæę¹’ŠŅ„†iō]IĘnīu_Zśˆ›¤łTÜ,‘ Ó®t”ó¤“2[®‹qļ#Ó×vYeœg…Ećeڬ&£ī½&)IS„•ó2)J3†źĆ'o÷Nm’äTDÄų’‘Ą²G< †LBPWRhVIA mŖ%Š©Öj@ ųčr6ŁŅ€’iT ; ś¤<­Dø'•pĻJi’[‚Y˜5m ’+©8‘Ąæ&ų¼tŸŗNņ{©½g śšć$±–H¢é¤Ś/cgi ÷ØėW°8śŲµ¹_É5˜{»i//śł”c:«d§6Ēś÷„²·ōĖó|œę}Œ7–i³šL{œÉóx’sJ4~ōį“•÷WÓŗw*}¤"2Ąx$žĄIJąJŖżžŌ,———Ć Ō·”ˆŽ#ÕR (ŪU[P—I?óøĢ·&½’¾źmL攀óÓž}Ėw'˜.| †/Ż’š÷gV’ÄZ:#y’u&1–”&\&)i }ķęYūÆ·öM©p ÜŪ%PøÖĶ...Ŗ; Ę.Qŗ?/}Gm•~Āo,ÓfÓ×õ䓸–2†źĆ'Ėd(żóMćK M–—‚[2cpŻD”„4ąķ5 –MPOfüņ“ v¹“Ä÷Ü?“X7OJg».­>ŃeߊĢę=źŲ“™I{V}NkØ_³“céx“ĄĄq*Cdģ+łž$#„ķōōµ’j¤:c’ĀS­±ķ½]¾ė÷ļKRysƙ%ėēß}^›>|–Ņē¶©ö–±®Ö€oc¼±L›Ķ¦Ļ™Õų¹²²RtŲęš[„>ž#ń¦ 3n'@å¹ •¾¶™įöw :IĄR‚J`Ń4ē~ž™ŁžÆ®®j’K÷7ŸK5‰y؟Ҫi³NX__Æ>H4×FDGÉ~$ō-U2†•?&š³MĄ·>@’5ż×S ō}xxžsܾ" A¹O|ķ^1}b‚‘51Øō>8Õ9g­Ķńé[ŠxéqZ“1ŽX¦Ķjés&-U'1é‰>¼= Gż#ńf,AšOgįMpHž;Až„蔀ķYWž\B—’4ppp0Ńą±yÖ×dæŅ¾oÖżM:k—`Ė“““¢YŌ÷öö†cOž¦ŅY×'q¾źō_ó&żDSM+ēnI堷ʽąsļI¦ĢµūÄ>'=­øōšŚ`Ęc|wĒČX¦Ķé=ß=ɤ,}8‹ģMuIpL‚///[ż]*.d¶c蓳„ŅC–$/L3ą/³½'ięYsJgGš„Zŗī’ ī\GmŠōōq<{||.¹Ē›VՂ\£§§§ƒæÆ„N7 I}Q{6\’Ę2m0 RIŗ½½¬­­’M32ōįüŸäģŠO„śA‚ž“ˆ%’Žü÷sJŖ„@ĶĪĻχ³“JŅIą|*Ē„2HÉ:“L0ŹĪĪNqU }‹4Ęż^!ÉŪ¹®R )K›ŹmåZO"P$øż-×(o×&éź­I“øŸ×±L›”€ēIü w’ųrxxXōŁĢøž{ąeM(²»»;¬ęS"¢0Ļromm½éoWVV†ĮĖĻT/ŗ$_$1pdf÷yčļŗ JķņŚjż_³··7¬üóZ bŖŅ}üų±čšl{Žé˜ēžkÜ{¾$ݽ”x—@ć&)h’‰A lOūĪū=d*tĢK՟6:5h|ŽĒxc™6£æļ'ōįĢŹš€¾É¬½„ę!%ŲŠµ®…· ųˬŻgggƒĒĒĒį’ ¬$ž-JĄ_›żl 6k Ž.1Ėě63}×46MdÜ)­Ҷ_Ń,ĪŲŃמ«ėk8IA© ”ä»ę¼Ļņšš0¼’$•뢭ō§ó~æ™ŖfótæÓŵbŒÆcŒ7–i³ŚĢrōµ¬`‘Iü wŚ$ž,Ā ņP"Ä³” ęT(•j]ł›EOŒŪÜÜ,ś\‚īę%x¾Ķ1Õy;ėė孒4°½½=ņs©šq||üāøyqq1r'''­f9×,ŽŲŃĒžkV’ō’k!×ēÓ¤ T,•ėzž+3”ö9ŸŅF³¼ß)éC#‰\}Ó÷1ŽX¦Ķ<ēżóž±Tirŗ>€E&ń€ŽI qi0éżżżpvąšµ Ā4£,o=Ēč?K/%<'3|Oė\ƽ²K9ŗhćYjČ9‹ž;‡‡‡s{½'ų³¤"ČŽŽŽæ+KƒsčXZH°øcGūÆŚ¤Ż’“²²2ń{ĪŚ“ésڜ{³·kF1ĘĖ<̇YMvRz 'IU£Iü —Śd$ø«ÖŹ?™į¾MšYŪ@kź7 ³YO5Jƒ“Ō7;k6O[¬ÆÆ}öėׯ3 bĢw.---‘Ŗm„3d'é`ŚAu}čgKg~’}_K®½$¼eLÕ,ŽŲŃĒž+ē~Ég§½/‹Pį2UJ+žžĪ¤F¾3ß]"ūҦjڼčūo,Óf5ʵ4ķq ÷™d„D®s}8Œ&ń€^JPPiPY¤‚AmĮÜIśIp\©ģÆą’łŠf¦ź®gßĻy?ėj?µJZip×,µ |˹÷{—®ū±ŅóżčččļO°eIUš˜fņf®—‹‹‹^ōAūūū#?—}mŚ6Ēäźźję}Ö"÷};śÖ•~^£n“;>~ü8<ĘÓ<Ÿņ%’@ŁēsÄļ~V›M_ŽwLė-×xvKĒĻ$żčĆ`4‰?ōV‚Ä23p©÷döfEOąŁŚŚŚą×Æ_EŸO€Ģ"ĢęŽ'„ēfĻŗ tN‚\-Am„ĮŅI<˜FW¶§Mµ­YJĀßÉÉIŃgÓ§¼’~*m˜ąĀŅ~,‰‹O«‡$°MEŒ3v] ēDŸ‚@Ó^%żPŚ6Ēboooägs¾5U°˜cGßśÆģĻöövŃ=ē4ļŪJūĒ>T4k3väO£jDĪŁ|Wé=ż,Ŗ[MSŸĒxc™6«ŁÖÖVē}^īJī#ćåļÕõįš2‰?ōZ7www[żM‚Ė’” ŸiĪ"›ŠĀå»Ūž%ødšŪÉd”×&č8Ä“ ąN0Rεšf5OX©TĀź" =ėĢvä,©l2>”ĶłtvvVüłÆuąvĢ1Jpa‰Tžyn_sŽßŽŽo‚4'}­4Ē”–sbŅJƒ%766F~&cķ8ē”>`qĒŽ¾õ_łļ’ŠÓØ6ŁģOÉłœ}éCåȶcG*8tÕ§4ēw›€ńÜÓ7Õ/ś¬Æc¼±¬żż¬6›®®ś¼ęZ*½GH"ós÷”śpŗz^č‰?ōŽńńńąśśŗ(ó©ż$Č#,Y ”Ą‚qƒJņ÷ rII“č“% „~™%7ė\2rü××׋>ŪT9ČߌsžåZȹ– ž¶ēZ×°Uš¤×“Gžfܤ·$&É/ķ’uŽŠ^ŗ-„;Ņ'äŚĪLŲŁĪfyIśØ‡‡‡ā¾®©r6©$ĒĢĘß“ćĶĶMŃß$Ų’µYüü—}ZYY)ZßżżżšūÓnćĢžė,ķŽōĶ}•vj“pń’ōeé_ōw³¾Ž}źæŚT3iö#ēņ$+4Éä„ū“ŖE]WT™¦¶cGŚhcĒÓ¾ éWJ“łso°h÷ō}ćŻĻ¶æŸ5žO_Óēå|ķŁ¢«k)ĒūµļՇÓÕó2Ą¼ū&`4³Ą&ņ­^ ŹRCx‚~Š °d¾%Č7M d,‘Ą£&ų(P ”NPŅsēAĪ¬?’, ŌN°uÖ5Ė*@MāĄ×Æ_‹>Ÿ ÆfęlÓĻU,H[$ˆ+ķQŌŻH‚]I»¤ŸÉ><żž&,ŪÕH~élā9vi—Ś$žkē»ŚöuM’ćÓž%Ū›ży®-³žęÜzė¬čłŽŅ ÷ģSŽ]›}J»eš§ēwĪģS–ß5ēF‚KŽŽŽ{{{sß„]€YzŻ=gRv‹Ņ;^¾ÖūŅ5AĖłgI;ęœ|ZY+ķš}höå„ū½œÓĶž“=Æ'''Wš…IĶ1xéžÄ1HY›Ŗ'ŽSŻc¼ūŁ·ŻĻ¶¹YŌńŅrń“Ā]īš>ļ¹k©éėJļž“ Yž[·>œ§ŗ|^˜kĻ-ß Ē.//———óh'Ęx÷³Ś¬.»Ņēg’RÅhšż]ĘšßŪTNMĻĖóDā /„MP‚aų•€ŽiKKŽšą˜,wwwcÉ0š€¬&Ų­‹s0ēX› ¶œūOå:ØIĪżęŗĢµŃ…ĶĶĶæƒ·šäŗē»“,x{{;Ń@³&€mĮą ¤lĪÆģG×ATæ·kß»8?ŗLąu½ō±J0e©$btŒŚ÷>ĄŲ±XżW“”vL{Nė^3ßóōų-z r’›±#Ē9Ē»k9gsī6÷öéOXœ1Žż¬6«éž!IĆ]öOŪ5ż]Ęp}8óš¼ 0–‡”’åū—MĶO–æžfIĄGNnnnŠž6Ņ “IąĖźźźšŸF(•ó-ē_–üūÕÕÕČó-ēX–Gåœė›­5×äkķ‘ ½ßÆ»I %)Ūpqqń쒟@ė|W³Ō~ ŅŽM{–œcŁæ¦M›v­i›żiöéžžžÅĻ&`0Ū>/Ē }€±£Ÿż×ļū‘åµ¾ė„sÜ=ętϧfiīó›>fÜźv,Ęo,ÓfćČśKśØ$Ȍņn£iŪQ}_Ó¶M›vQżQĄ‚Zzõ’”ųóa’‰?@^MüłCū@}$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…$ž@…–Ÿūßæ’wŁŌ<Š™„מO B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B B Bą’µoĒƒü­§±£<`Hü€!ń†Ä`Hü€!ń†ī"¦ÕméEIEND®B`‚DHARMa/vignettes/DHARMaForBayesians.Rmd0000644000176200001440000002276214665273541017252 0ustar liggesusers--- title: "DHARMa for Bayesians" author: "Florian Hartig, Theoretical Ecology, University of Regensburg [website](https://www.uni-regensburg.de/biologie-vorklinische-medizin/theoretische-oekologie/mitarbeiter/hartig/)" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{DHARMa for Bayesians} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} abstract: "The 'DHARMa' package uses a simulation-based approach to create readily interpretable scaled (quantile) residuals for fitted (generalized) linear mixed models. This Vignette describes how to user DHARMa vor checking Bayesian models. It is recommended to read this AFTER the general DHARMa vignette, as all comments made there (in pacticular regarding the interpretation of the residuals) also apply to Bayesian models. \n \n \n" editor_options: chunk_output_type: console --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=5.5, fig.align='center', warning=FALSE, message=FALSE) ``` ```{r, echo = F, message = F} library(DHARMa) set.seed(123) ``` # Basic workflow In principle, DHARMa residuals can be calculated and interpreted for Bayesian models in very much the same way as for frequentist models. Therefore, all comments regarding tests, residual interpretation etc. from the main vignette are equally valid for Bayesian model checks. There are some minor differences regarding the expected null distribution of the residuals, in particular in the low data limit, but I believe that for most people, these are of less concern. The main difference for a Bayesian user is that, unlike for users of directly supported regression packages such as lme4 or glmmTMB, most Bayesian users will have to create the simulations for the fitted model themselves and then feed them into DHARMa by hand. The basic workflow for Bayesians that work with BUGS, JAGS, STAN or similar is: 1. Create posterior predictive simulations for your model 2. Read these in with the createDHARMa function 3. Interpret those as described in the main vignette This is more easy as it sounds. For the major Bayesian samplers (e.g. BUGS, JAGS, STAN), it amounts to adding a block with data simulations to the model, and observing those during the MCMC sampling. Then, feed the simulations into DHARMa via createDHARMa, and all else will work pretty much the same as in the main vignette. ## Example in Jags Here is an example, with JAGS ```{r, eval = F} library(rjags) library(BayesianTools) set.seed(123) dat <- DHARMa::createData(200, overdispersion = 0.2) Data = as.list(dat) Data$nobs = nrow(dat) Data$nGroups = length(unique(dat$group)) modelCode = "model{ for(i in 1:nobs){ observedResponse[i] ~ dpois(lambda[i]) # poisson error distribution lambda[i] <- exp(eta[i]) # inverse link function eta[i] <- intercept + env*Environment1[i] # linear predictor } intercept ~ dnorm(0,0.0001) env ~ dnorm(0,0.0001) # Posterior predictive simulations for (i in 1:nobs) { observedResponseSim[i]~dpois(lambda[i]) } }" jagsModel <- jags.model(file= textConnection(modelCode), data=Data, n.chains = 3) para.names <- c("intercept","env", "lambda", "observedResponseSim") Samples <- coda.samples(jagsModel, variable.names = para.names, n.iter = 5000) x = BayesianTools::getSample(Samples) colnames(x) # problem: all the variables are in one array - this is better in STAN, where this is a list - have to extract the right columns by hand posteriorPredDistr = x[,3:202] # this is the uncertainty of the mean prediction (lambda) posteriorPredSim = x[,203:402] # these are the simulations sim = createDHARMa(simulatedResponse = t(posteriorPredSim), observedResponse = dat$observedResponse, fittedPredictedResponse = apply(posteriorPredDistr, 2, median), integerResponse = T) plot(sim) ``` In the created plots, you will see overdispersion, which is completely expected, as the simulated data has overdispersion and a RE, which is not accounted for by the Jags model. ## Exercise As an exercise, you could now: * Add a RE * Account for overdispersion, e.g. via an OLRE or a negative Binomial And check how the residuals improve. # Conditional vs. unconditional simulations in hierarchical models The most important consideration in using DHARMa with Bayesian models is how to create the simulations. You can see in my jags code that the block ```{r, eval=F} # Posterior predictive simulations for (i in 1:nobs) { observedResponseSim[i]~dpois(lambda[i]) } ``` performs the posterior predictive simulations. Here, we just take the predicted lambda (mean perdictions) during the MCMC simulations and sample from the assumed distribution. This will works for any non-hierarchical model. When we move to hierarchical or multi-level models, including GLMMs, the issue of simulation becomes a bit more complicated. In a hierarchical model, there are several random processes that sit on top of each other. In the same way as explained in the main vignette at the point conditional / unconditional simulations, we will have to decide which of these random processes should be included in the posterior predictive simulations. As an example, imagine we add a RE in the likelihood of the previous model, to account for the group structure in the data. ```{r, eval = F} for(i in 1:nobs){ observedResponse[i] ~ dpois(lambda[i]) # poisson error distribution lambda[i] <- exp(eta[i]) # inverse link function eta[i] <- intercept + env*Environment1[i] + RE[group[i]] # linear predictor } for(j in 1:nGroups){ RE[j] ~ dnorm(0,tauRE) } ``` The predictions lambda[i] now depend on a lower-level stochastic effect, which is described by RE[j] ~ dnorm(0,tauRE). We can now decide to create posterior predictive simulations conditional on posterior estimates RE[j] (conditional simulations), in which case we would have to change nothing in the block for the posterior predictive simulations. Alternatively, we can decide that we want to re-simulate the RE (unconditional simulations), in which case we have to copy the entire structure of the likelihood in the predictions ```{r, eval=F} for(j in 1:nGroups){ RESim[j] ~ dnorm(0,tauRE) } for (i in 1:nobs) { observedResponseSim[i] ~ dpois(lambdaSim[i]) lambdaSim[i] <- exp(etaSim[i]) etaSim[i] <- intercept + env*Environment1[i] + RESim[group[i]] } ``` Essentially, you can remember that if you want full (uncoditional) simulations, you basically have to copy the entire likelihood of the hierarchical model, minus the priors, and sample along the hierarchical model structure. If you want to condition on a part of this structure, just cut the DAG at the point on which you want to condition on. # Statistical differences between Bayesian vs. MLE quantile residuals A common question is if there are differences between Bayesian and MLE quantile residuals. First of all, note that MLE and Bayesian quantile residuals are not exactly identical. The main difference is in how the simulation of the data under the fitted model are performed: * For models fitted by MLE, simulations in DHARMa are with the MLE (point estimate) * For models fitted with Bayes, simulations are practically always performed while also drawing from the posterior parameter uncertainty (as a point estimate is not available). Thus, Bayesian posterior predictive simulations include the parametric uncertainty of the model, additionally to the sampling uncertainty. From this we can directly conclude that Bayesian and MLE quantile residuals are asymptotically identical (and via the usual arguments uniformly distributed), but become more different the smaller n becomes. To examine what those differences are, let's imagine that we start with a situation of infinite data. In this case, we have a "sharp" posterior that can be viewed as identical to the MLE. If we reduce the number of data, there are two things happening 1. The posterior gets wider, with the likelihood component being normally distributed, at least initially 2. The influence of the prior increases, the faster the stronger the prior is. Thus, if we reduce the data, for weak / uninformative priors, we will simulate data while sampling parameters from a normal distribution around the MLE, while for strong priors, we will effectively sample data while drawing parameters of the model from the prior. In particular in the latter case (prior dominates, which can be checked via prior sensitivity analysis), you may see residual patterns that are caused by the prior, even though the model structure is correct. In some sense, you could say that the residuals check if the combination of prior + structure is compatible with the data. It's a philosophical debate how to react on such a deviation, as the prior is not really negotiable in a Bayesian analysis. Of course, also the MLE distribution might get problems in low data situations, but I would argue that MLE is usually only used anyway if the MLE is reasonably sharp. In practice, I have self experienced problems with MLE estimates. It's a bit different in the Bayesian case, where it is possible and often done to fit very complex models with limited data. In this case, many of the general issues in defining null distributions for Bayesian p-values (as, e.g., reviewed in [Conn et al., 2018](https://esajournals.onlinelibrary.wiley.com/doi/10.1002/ecm.1314)) apply. I would add though that while I find it important that users are aware of those differences, I have found that in practice these issues are small, and usually overruled by the much stronger effects of model error. DHARMa/vignettes/DHARMa.Rmd0000644000176200001440000017254014704245735014742 0ustar liggesusers--- title: "DHARMa: residual diagnostics for hierarchical (multi-level/mixed) regression models" author: "Florian Hartig, Theoretical Ecology, University of Regensburg [website](https://www.uni-regensburg.de/biologie-vorklinische-medizin/theoretische-oekologie/mitarbeiter/hartig/)" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true pdf_document: toc: true vignette: > %\VignetteIndexEntry{The import package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} abstract: "The 'DHARMa' package uses a simulation-based approach to create readily interpretable scaled (quantile) residuals for fitted generalized linear (mixed) models. Currently supported are linear and generalized linear (mixed) models from 'lme4' (classes 'lmerMod', 'glmerMod'), 'glmmTMB', 'GLMMadaptive' and 'spaMM'; phylogenetic linear models from 'phylolm' (classes 'phylolm' and 'phyloglm'); generalized additive models ('gam' from 'mgcv'); 'glm' (including 'negbin' from 'MASS', but excluding quasi-distributions) and 'lm' model classes. Moreover, externally created simulations, e.g. posterior predictive simulations from Bayesian software such as 'JAGS', 'STAN', or 'BUGS' can be processed as well. The resulting residuals are standardized to values between 0 and 1 and can be interpreted as intuitively as residuals from a linear regression. The package also provides a number of plot and test functions for typical model misspecification problems, such as over/underdispersion, zero-inflation, and residual spatial, temporal and phylogenetic autocorrelation. \n \n \n" editor_options: chunk_output_type: console --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=6.5, fig.height=4.5, fig.align='center', warning=FALSE, message=FALSE, cache = T) ``` ```{r, echo = F, message = F} library(DHARMa) set.seed(123) ``` # Motivation The interpretation of conventional residuals for generalized linear (mixed) and other hierarchical statistical models is often problematic. As an example, here the result of conventional Deviance, Pearson and raw residuals for two Poisson GLMMs, one that is lacking a quadratic effect, and one that fits the data perfectly. Could you tell which is the correct model? ```{r, echo = F, fig.width=6, fig.height=3} library(lme4) overdispersedData = createData(sampleSize = 250, overdispersion = 0, quadraticFixedEffects = -2, family = poisson()) fittedModelOverdispersed <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = overdispersedData) plotConventionalResiduals(fittedModelOverdispersed) testData = createData(sampleSize = 250, intercept = 0, overdispersion = 0, family = poisson(), randomEffectVariance = 0) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = testData) plotConventionalResiduals(fittedModel) ``` Just for completeness - it was the second one. But don't get too excited if you got it right. Probably you were just lucky - I can't really tell a difference. But even so, would you have added a quadratic effect, instead of adding an overdispersion correction? The point here is that misspecifications in GL(M)Ms cannot reliably be diagnosed with standard residual plots, and thus GLMMs are often not as thoroughly checked as they should. One reason why GL(M)Ms residuals are harder to interpret is that the expected distribution of the data (aka predictive distribution) changes with the fitted values. Reweighting with the expected dispersion, as done in Pearson residuals, or using deviance residuals, helps to some extent, but it does not lead to visually homogenous residuals, even if the model is correctly specified. As a result, standard residual plots, when interpreted in the same way as for linear models, seem to show all kind of problems, such as non-normality, heteroscedasticity, even if the model is correctly specified. Questions on the R mailing lists and forums show that practitioners are regularly confused about whether such patterns in GL(M)M residuals are a problem or not. But even experienced statistical analysts currently have few options to diagnose misspecification problems in GLMMs. In my experience, the current standard practice is to eyeball the residual plots for major misspecifications, potentially have a look at the random effect distribution, and then run a test for overdispersion, which is usually positive, after which the model is modified towards an overdispersed / zero-inflated distribution. This approach, however, has a number of drawbacks, notably: - Overdispersion is often the result of missing predictors or a misspecified model structure. Standard residual plots make it difficult to identify these problems by examining residual correlations or patterns of residuals against predictors. - Not all overdispersion is the same. For count data, the negative binomial creates a different distribution than adding observation-level random effects to the Poisson. Once overdispersion is corrected for, such violations of distributional assumptions are not detectable with standard overdispersion tests (because the tests only looks at total dispersion), and nearly impossible to see visually from standard residual plots. - Dispersion frequently varies with predictors (heteroscedasticity). This can have a significant effect on the inference. While it is standard to tests for heteroscedasticity in linear regressions, heteroscedasticity is currently hardly ever tested for in GLMMs, although it is likely as frequent and influential. - Moreover, if residuals are checked, they are usually checked conditional on the fitted random effect estimates. Thus, standard checks only check the final level of the random structure in a GLMM. One can perform extra checks on the random effects, but it is somewhat unsatisfactory that there is no check on the entire model structure. DHARMa aims at solving these problems by creating readily interpretable residuals for generalized linear (mixed) models that are standardized to values between 0 and 1, and that can be interpreted as intuitively as residuals for the linear model. This is achieved by a simulation-based approach, similar to the Bayesian p-value or the parametric bootstrap, that transforms the residuals to a standardized scale. The basic steps are: 1. Simulate new response data from the fitted model for each observation. 2. For each observation, calculate the empirical cumulative density function for the simulated observations, which describes the possible values (and their probability) at the predictor combination of the observed value, assuming the fitted model is correct. 3. The residual is then defined as the value of the empirical density function at the value of the observed data, so a residual of 0 means that all simulated values are larger than the observed value, and a residual of 0.5 means half of the simulated values are larger than the observed value. These steps are visualized in the following figure The key advantage of this definition is that the so-defined residuals always have the same, known distribution, independent of the model that is fit, if the model is correctly specified. To see this, note that, if the observed data was created from the same data-generating process that we simulate from, all values of the cumulative distribution should appear with equal probability. That means we expect the distribution of the residuals to be flat, regardless of the model structure (Poisson, binomial, random effects and so on). I currently prepare a more exact statistical justification for the approach in an accompanying paper, but if you must provide a reference in the meantime, I would suggest citing - Dunn, K. P., and Smyth, G. K. (1996). Randomized quantile residuals. Journal of Computational and Graphical Statistics 5, 1-10. - Gelman, A. & Hill, J. Data analysis using regression and multilevel/hierarchical models Cambridge University Press, 2006 p.s.: DHARMa stands for "Diagnostics for HierArchical Regression Models" - which, strictly speaking, would make DHARM. But in German, Darm means intestines; plus, the meaning of DHARMa in Hinduism makes the current abbreviation so much more suitable for a package that tests whether your model is in harmony with your data: > From Wikipedia, 28/08/16: In Hinduism, dharma signifies behaviours that are considered to be in accord with rta, the order that makes life and universe possible, and includes duties, rights, laws, conduct, virtues and 'right way of living'. # Workflow in DHARMa ## Installing, loading and citing the package If you haven't installed the package yet, either run ```{r, eval = F} install.packages("DHARMa") ``` Or follow the instructions on to install a development version. Loading and citation ```{r} library(DHARMa) citation("DHARMa") ``` ## Calculating scaled residuals Let's assume we have a fitted model that is supported by DHARMa. ```{r} testData = createData(sampleSize = 250) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = testData) ``` Most functions in DHARMa can be calculated directly on the fitted model object. For example, if you are only interested in testing for dispersion problems, you could run ```{r, results = "hide", fig.show='hide'} testDispersion(fittedModel) ``` In this case, the randomized quantile residuals are calculated on the fly inside the function call. If you work in this way, however, residual calculation will be repeated by every test / plot you call, and this can take a while. It is therefore highly recommended to first calculate the residuals once, using the simulateResiduals() function ```{r} simulationOutput <- simulateResiduals(fittedModel = fittedModel, plot = F) ``` which calculates randomized quantile residuals according to the algorithm discussed above. The function returns an object of class DHARMa, containing the simulations and the scaled residuals, which can later be passed on to all other plots and test functions. When specifying the optional argument plot = T, the standard DHARMa residual plot is displayed directly. The interpretation of the plot will be discussed below. Using the simulateResiduals function has the added benefit that you can modify the way in which residuals are calculated. For example, you may want to change the number of simulations, or the REs to condition on. See ?simulateResiduals and section "simulation options" below for details. The calculated (scaled) residuals can be plotted and tested via a number of DHARMa functions (see below), or accessed directly via ```{r, results = "hide"} residuals(simulationOutput) ``` To interpret the residuals, remember that a scaled residual value of 0.5 means that half of the simulated data are higher than the observed value, and half of them lower. A value of 0.99 would mean that nearly all simulated data are lower than the observed value. The minimum/maximum values for the residuals are 0 and 1. For a correctly specified model we would expect asymptotically - a uniform (flat) distribution of the scaled residuals - uniformity in y direction if we plot against any predictor. Note: the uniform distribution is the only differences to "conventional" residuals as calculated for a linear regression. If you cannot get used to this, you can transform the uniform distribution to another distribution, for example normal, via ```{r, eval = F} residuals(simulationOutput, quantileFunction = qnorm, outlierValues = c(-7,7)) ``` These normal residuals will behave exactly like the residuals of a linear regression. However, for reasons of a) numeric stability with low number of simulations, which makes it neccessary to descide on which value outliers are to be transformed and b) my conviction that it is much easier to visually detect deviations from uniformity than normality, DHARMa checks all residuals in the uniform space, and I would personally advice against using the transformation. ## Plotting the scaled residuals The main plot function for the calculated DHARMa object produced by simulateResiduals() is the plot.DHARMa() function ```{r} plot(simulationOutput) ``` The function creates two plots, which can also be called separately, and provide extended explanations / examples in the help ```{r, eval = F} plotQQunif(simulationOutput) # left plot in plot.DHARMa() plotResiduals(simulationOutput) # right plot in plot.DHARMa() ``` - plotQQunif (left panel) creates a qq-plot to detect overall deviations from the expected distribution, by default with added tests for correct distribution (KS test), dispersion and outliers. Note that outliers in DHARMa are values that are by default defined as values outside the simulation envelope, not in terms of a particular quantile. Thus, which values will appear as outliers will depend on the number of simulations. If you want outliers in terms of a particuar quantile, you can use the outliers() function. - plotResiduals (right panel) produces a plot of the residuals against the predicted value (or alternatively, other variable). Simulation outliers (data points that are outside the range of simulated values) are highlighted as red stars. These points should be carefully interpreted, because we actually don't know "how much" these values deviate from the model expectation. Note also that the probability of an outlier depends on the number of simulations, so whether the existence of outliers is a reason for concern depends also on the number of simulations. To provide a visual aid in detecting deviations from uniformity in y-direction, the plot function calculates an (optional default) quantile regression, which compares the empirical 0.25, 0.5 and 0.75 quantiles in y direction (red solid lines) with the theoretical 0.25, 0.5 and 0.75 quantiles (dashed black line), and provides a p-value for the deviation from the expected quantile. The significance of the deviation to the expected quantiles is tested and displayed visually, and can be additionally extracted with the testQuantiles function. By default, plotResiduals plots against predicted values. However, you can also use it to plot residuals against a specific other predictors (highly recommend). ```{r, eval = F} plotResiduals(simulationOutput, form = YOURPREDICTOR) ``` If the predictor is a factor, or if there is just a small number of observations on the x axis, plotResiduals will plot a box plot with additional tests instead of a scatter plot. ```{r, eval = F} plotResiduals(simulationOutput, form = testData$group) ``` See ?plotResiduas for details, but very shortly: under H0 (perfect model), we would expect those boxes to range homogeneously from 0.25-0.75. To see whether there are deviations from this expecation, the plot calculates a test for uniformity per box, and a test for homogeneity of variances between boxes. A positive test will be highlighted in red. ## Goodness-of-fit tests on the scaled residuals To support the visual inspection of the residuals, the DHARMa package provides a number of specialized goodness-of-fit tests on the simulated residuals: - testUniformity() - tests if the overall distribution conforms to expectations. - testOutliers() - tests if there are more simulation outliers than expected. - testDispersion() - tests if the simulated dispersion is equal to the observed dispersion. - testQuantiles() - fits a quantile regression or residuals against a predictor (default predicted value), and tests of this conforms to the expected quantile. - testCategorical(simulationOutput, catPred = testData\$group) tests residuals against a categorical predictor. - testZeroinflation() - tests if there are more zeros in the data than expected from the simulations. - testGeneric() - test if a generic summary statistics (user-defined) deviates from model expectations. - testTemporalAutocorrelation() - tests for temporal autocorrelation in the residuals. - testSpatialAutocorrelation() - tests for spatial autocorrelation in the residuals. Can also be used with a generic distance function. - testPhylogeneticAutocorrelation() - tests for phylogenetic signal in the residuals. See the help of the functions and further comments below for a more detailed description. ## Simulation options There are a few important technical details regarding how the simulations are performed, in particular regarding the treatments of random effects and integer responses. It is strongly recommended to read the help of ```{r, eval = F} ?simulateResiduals ``` #### Refit ```{r, eval= F} simulationOutput <- simulateResiduals(fittedModel = fittedModel, refit = T) ``` - if refit = F (default), new datasets are simulated from the fitted model, and residuals are calculated by comparing the observed data to the new data - if refit = T, a parametric bootstrap is performed, meaning that the model is refit to all new datasets, and residuals are created by comparing observed residuals against refitted residuals The second option is much much slower, and also seemed to have lower power in some tests I ran. **It is therefore not recommended for standard residual diagnostics!** I only recommend using it if you know what you are doing, and have particular reasons, for example if you estimate that the tested model is biased. A bias could, for example, arise in small data situations, or when estimating models with shrinkage estimators that include a purposeful bias, such as ridge/lasso, random effects or the splines in GAMs. My idea was then that simulated data would not fit to the observations, but that residuals for model fits on simulated data would have the same patterns/bias than model fits on the observed data. Note also that refit = T can sometimes run into numerical problems, if the fitted model does not converge on the newly simulated data. #### Conditinal vs. unconditinal simulations The second option is the treatment of the stochastic hierarchy. In a hierarchical model, several layers of stochasticity are placed on top of each other. Specifically, in a GLMM, we have a lower level stochastic process (random effect), whose result enters into a higher level (e.g. Poisson distribution). For other hierarchical models, such as state-space models, similar considerations apply, but the hierarchy can be more complex. When simulating, we have to decide if we want to re-simulate all stochastic levels, or only a subset of those. For example, in a GLMM, it is common to only simulate the last stochastic level (e.g. Poisson) conditional on the fitted random effects, meaning that the random effects are set on the fitted values. For controlling how many levels should be re-simulated, the simulateResidual function allows to pass on parameters to the simulate function of the fitted model object. Please refer to the help of the different simulate functions (e.g. ?simulate.merMod) for details. For merMod (lme4) model objects, the relevant parameters are "use.u", and "re.form", as, e.g., in ```{r, eval= F} simulationOutput <- simulateResiduals(fittedModel = fittedModel, n = 250, use.u = T) ``` If the model is correctly specified and the fitting procedure is unbiased (disclaimer: GLMM estimators are not always unbiased), the simulated residuals should be flat regardless how many hierarchical levels we re-simulate. The most thorough procedure would be therefore to test all possible options. If testing only one option, I would recommend to re-simulate all levels, because this essentially tests the model structure as a whole. This is the default setting in the DHARMa package. A potential drawback is that re-simulating the random effects creates more variability, which may reduce power for detecting problems in the upper-level stochastic processes, in particular overdispersion (see section on dispersion tests below). *Note:* Although unconditional residuals implicitly also test the normal distribution of the REs, it is probably not a bad idea to look addditionally check for normality of the RE distribution. As this is not based on quantile residuals, there is no special DHARMa function for this, so you should just extract the REs, and then run e.g. a Shapiro test. #### Integer treatment / randomization A third option is the treatment of integer responses. The background of this option is that, for integer-valued variables, some additional steps are necessary to make sure that the residual distribution becomes flat (essentially, we have to smoothen away the integer nature of the data). The idea is explained in - Dunn, K. P., and Smyth, G. K. (1996). Randomized quantile residuals. Journal of Computational and Graphical Statistics 5, 1-10. DHARMa currently implements two procedures for randomization. The default procedure will randomize automatically. The second option requires knowledge about whether the model is integer-valued, which is usually implemented automatically. See ?simulateResiduals for details. Usually, these options should simply be kept at their defaults. #### Calculating residuals for groups or subsets In many situations, it can be useful to look at residuals per group, e.g. to see how much the model over / underpredicts per plot, year or subject. To do this, use the recalculateResiduals() function, together with a grouping variable (group) or a subsetting variable (sel), which can also be used in combination. ```{r, eval= F} simulationOutput = recalculateResiduals(simulationOutput, group = testData$group) ``` Note, however, that you will have to change the selection of variables that you provide to plots and tests (e.g. in plotResiduals or testSpatialAutocorrelation) accordingly when you group or subset residuals. #### Reproducibility notes, random seed and random state As DHARMa uses simulations to calculate the residuals, a naive implementation of the algorithm would mean that residuals would look slightly different each time a DHARMa calculation is executed. This might both be confusing and bear the danger that a user would run the simulation several times and take the result that looks better (which would amount to multiple testing / p-hacking). By default, DHARMa therefore fixes the random seed to the same value every time a simulation is run, and afterwards restores the random state to the old value. This means that you will get exactly the same residual plot each time. If you want to avoid this behavior, for example for simulation experiments on DHARMa, use seed = NULL -\> no seed set, but random state will be restored, or seed = F -\> no seed set, and random state will not be restored. Whether or not you fix the seed, the setting for the random seed and the random state are stored in ```{r, eval = F} simulationOutput$randomState ``` If you want to reproduce simulations for such a run, set the variable .Random.seed by hand, and simulate with seed = NULL. Moreover (general advice), to ensure reproducibility, it's advisable to add a set.seed() at the beginning, and a session.info() at the end of your script. The latter will list the version number of R and all loaded packages. # Interpreting residuals and recognizing misspecification problems ## General remarks on interperting residual patterns and tests So far, all shown DHARMa results were calculated for a correctly specified model, resulting in "perfect" residual plots and diagnostics. In this section, we discuss how to recognize and interpret diagnostics that indicate a misspecified model. Before going into the details, note, however, that 1. **No residual pattern does not "prove" that the model is correct**: The fact that none of the DHARMa tests indicate a problem does not "prove" that the model is correctly specified. For any model, there are likely a large number of structural problems that do not create a pattern in the DHARMa diagnostics. In good old Popper fashion, you should interpret no residual problems as your working hypothesis not being rejected in that particular test, which increases confidence in the model, but does not constitute a conclusive proof. So, keep your skepticism alive, and if you find the results fishy, keep searching and testing. 2. **Once a residual effect is statistically significant, look at the magnitude to decide if there is a problem**: It is crucial to note that significance is NOT a measure of the strength of the residual pattern, it is a measure of the signal/noise ratio, i.e. whether you are sure there is a pattern at all. Significance in hypothesis tests depends on at least 2 ingredients: the strength of the signal and the number of data points. If you have a lot of data points, residual diagnostics will nearly inevitably become significant, because having a perfectly fitting model is very unlikely. That, however, doesn't necessarily mean that you need to change your model. The p-values confirm that there is a deviation from your null hypothesis. It is, however, in your discretion to decide whether this deviation is worth worrying about. For example, if you see a dispersion parameter of 1.01, I would not worry, even if the dispersion test is significant. A significant value of 5, however, is clearly a reason to move to a model that accounts for overdispersion. 3. **A residual pattern does not indicate that the model is unusable**: While a significant pattern in the residuals indicates with good reliability that the observed data did likely not originate from the fitted model, this doesn't necessarily imply that the inferential results from this wrong model are not usable. There are many situations in statistics where it is common practice to work with "wrong models". For example, many statistical models use shrinkage estimators, which purposefully bias parameter estimates to certain values. Random effects are a special case of this. If DHARMa residuals for these estimators are calculated, they will often show a slight pattern in the residuals even if the model is correctly specified, and tests for this can get significant for large sample sizes. For this reason, DHARMa is excluding RE estimates in the predictions when plotting res \~ pred. Another example is data that is missing at random (MAR). Since it is known that this phenomenon does not create a bias on the fixed effects estimates, it is common practice to fit these data with standard mixed models. Nevertheless, DHARMa recognizes that the observed data looks different from what would be expected from the model assumptions, and flags the model as problematic (see [here](https://github.com/florianhartig/DHARMa/issues/101)). **Important conclusion: DHARMa only flags a difference between the observed and expected data - the user has to decide whether this difference is actually a problem for the analysis!** ## Recognizing over/underdispersion GL(M)Ms often display over/underdispersion, which means that residual variance is larger/smaller than expected under the fitted model. This phenomenon is most common for GLM families with constant (fixed) dispersion, in particular for Poisson and binomial models, but it can also occur in GLM families that adjust the variance (such as the beta or negative binomial) when distribution assumptions are violated. A few general rules of thumb about dealing with dispersion problems: - Dispersion is a property of the residuals, i.e. you can detect dispersion problems only AFTER fitting the model. It doesn't make sense to look at the dispersion of your response variable - Overdispersion is more common than underdispersion - If overdispersion is present, the main effect is that confidence intervals tend to be too narrow, and p-values to small, leading to inflated type I error. The opposite is true for underdispersion, i.e. the main issue of underdispersion is that you loose power. - A common reason for overdispersion is a misspecified model. When overdispersion is detected, one should therefore first search for problems in the model specification (e.g. by plotting residuals against predictors with DHARMa), and only if this doesn't lead to success, overdispersion corrections such as individual-level random effects or changes in the distribution should be applied ### Residual patterns of over/underdispersion This this is how **overdispersion** looks like in the DHARMa residuals. Note that we get more residuals around 0 and 1, which means that more residuals are in the tail of distribution than would be expected under the fitted model. ```{r} testData = createData(sampleSize = 200, overdispersion = 1.5, family = poisson()) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` If you see this pattern, note that overdispersion is often caused by model misfit. Thus, before moving to a GLM with variable dispersion (for count data this would typically be a negative binomial), you should check your model for misfit, e.g. by plotting residuals against all predictors using plotResiduals(). Next, this is an example of underdispersion. Here, we get too many residuals around 0.5, which means that we are not getting as many residuals as we would expect in the tail of the distribution than expected from the fitted model. ```{r} testData = createData(sampleSize = 500, intercept=0, fixedEffects = 2, overdispersion = 0, family = poisson(), roundPoissonVariance = 0.001, randomEffectVariance = 0) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` If you see this pattern, note that a common reason for underdispersion is overfitting, i.e. your model is too complex. Other possible explanations to check for include zero-inflation (best to check by comparing to a ZIP model, but see also DHARMa::testZeroInflation), non-independence of the data (e.g. temporal autocorrelation, check via DHARMa:: testTemporalAutocorrelation) that your predictors can use to overfit, or that your data-generating process is simply not a Poisson process. From a technical side, underdispersion is not as concerning as over dispersion, as it will usually bias p-values to the conservative side, but if your goal is to get a good power, you may want to consider a simpler model. If that is not helping, you can move to a distribution for underdispersed count data (e.g. Conway-Maxwell-Poisson, generalized Poisson). ### Formal tests for over/underdispersion Although, as discussed above, over/underdispersion will show up in the residuals, and it's possible to detect it with the testUniformity function, simulations show that this test is less powerful than more targeted tests. DHARMa contains several overdispersion tests that compare the dispersion of simulated residuals to the observed residuals. 1. *default:* a non-parametric test that compares the variance of the simulated residuals to the observed residuals (default), which has some analogy to the variance test implemented in aer::dispersiontest 2. *PearsonChisq:* alternatively, DHARMa implements the Pearson-chi2 test that is popular in the literature, suggested in the glmm Wiki, and implemented in some other R packages such as performance::check_overdispersion 3. *refit* if residual simulations are done via refit, DHARMa will compare the the Pearson residuals of the re-fitted simulations to the original Pearson residuals. This is essentially a nonparametric version of test 2. All of these tests are included in the testDispersion function, see ?testDispersion for details. ```{r overDispersionTest, echo = T, fig.width=4, fig.height=4} testDispersion(simulationOutput) ``` IMPORTANT INFO: we have made extensive simulations, which have shown that the various tests have certain advantages and disadvantages. The basic results are that: - The most powerful and reliable test is option 3, but this costs a lot of time and is not available for all regression packages, as it requires that Pearson residuals are available - Option 2, the parametric Pearson-chi2 is fast if Pearson residuals are available, but based on a naive expectation of df (counts RE as 1 df) and the test statistic is thus biased towards underdispersion for mixed models. Similar to the df approximation, Bias increasing with the number of RE levels. When testing only for overdispersion (alternative = "greater"), this makes the test more conservative, but it also costs power. - The DHARMa default option 1 is fast, nearly unbiased (i.e. you can test under and overdispersion), and only slightly less powerful as test 3, PROVIDED that simulations are made conditional on the fitted REs. Note that the latter is not the DHARMa default, so you have to actively request conditional simulations, e.g. for lme4 by specifying re.form = NULL. Power compared to the parametric Pearson-chi2 test depends on the number of RE levels, it will be more powerful for typical number of RE levels. As support for these statements, here results of the simulation, which compares the uniform (KS) test with the standard simuation-based test (conditional and unconditional) and the Pearson-chi2 test (two-sided and greater) for an n=200 Poisson GLMM with 30 RE levels. Thus, my current recommendation is: for most users, use the default DHARMa test, but create simulations conditionally. ## Zero-inflation / k-inflation or deficits A common special case of overdispersion is zero-inflation, which is the situation when more zeros appear in the observation than expected under the fitted model. Zero-inflation requires special correction steps. More generally, we can also have too few zeros, or too much or too few of any other values. We'll discuss that at the end of this section. ### Residual patterns Here an example of a typical zero-inflated count dataset, plotted against the environmental predictor ```{r} testData = createData(sampleSize = 500, intercept = 2, fixedEffects = c(1), overdispersion = 0, family = poisson(), quadraticFixedEffects = c(-3), randomEffectVariance = 0, pZeroInflation = 0.6) par(mfrow = c(1,2)) plot(testData$Environment1, testData$observedResponse, xlab = "Envrionmental Predictor", ylab = "Response") hist(testData$observedResponse, xlab = "Response", main = "") ``` We see a hump-shaped dependence of the environment, but with too many zeros. In the normal DHARMa residual plots, zero-inflation will look pretty much like overdispersion ```{r, fig.height=5.5} fittedModel <- glmer(observedResponse ~ Environment1 + I(Environment1^2) + (1|group) , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` The reason is that the model will usually try to find a compromise between the zeros, and the other values, which will lead to excess variance in the residuals. ### Formal tests for zero-inflation DHARMa also has a special test for zero-inflation, which compares the distribution of expected zeros in the data against the observed zeros ```{r, fig.width=4, fig.height=4} testZeroInflation(simulationOutput) ``` This test is likely better suited for detecting zero-inflation than the standard plot, but note that also overdispersion will lead to excess zeros, so only seeing too many zeros is not a reliable diagnostics for moving towards a zero-inflated model. A reliable differentiation between overdispersion and zero-inflation will usually only be possible when directly comparing alternative models, e.g. through residual comparison / model selection of a model with / without zero-inflation, or by simply fitting a model with zero-inflation and looking at the parameter estimate for the zero-inflation. A good option is the R package glmmTMB, which is also supported by DHARMa. We can use this to fit ```{r, eval= F} # requires glmmTMB fittedModel <- glmmTMB(observedResponse ~ Environment1 + I(Environment1^2) + (1|group), ziformula = ~1 , family = "poisson", data = testData) summary(fittedModel) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` ### Testing generic summary statistics, e.g. for k-inflation or deficits To test for generic excess / deficits of particular values, we have the function testGeneric, which compares the values of a generic, user-provided summary statistics Choose one of alternative = c("greater", "two.sided", "less") to test for inflation / deficit or both. Default is "greater" = inflation. ```{r, fig.width=4.5, fig.height=4.5} countOnes <- function(x) sum(x == 1) # testing for number of 1s testGeneric(simulationOutput, summary = countOnes, alternative = "greater") # 1-inflation ``` ## Heteroscedasticity So far, most of the things that we have tested could also have been detected with parametric tests. Here, we come to the first issue that is difficult to detect with current tests, and that is usually neglected. Heteroscedasticity means that there is a systematic dependency of the dispersion / variance on another variable in the model. It is not sufficiently appreciated that also binomial or Poisson models can show heteroscedasticity. Basically, it means that the level of over/underdispersion depends on another parameter. Here an example where we create such data ```{r} testData = createData(sampleSize = 500, intercept = -1.5, overdispersion = function(x){return(rnorm(length(x), sd = 1 * abs(x)))}, family = poisson(), randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` The exact p-values for the quantile lines in the plot can be displayed via ```{r, eval = F} testQuantiles(simulationOutput) ``` As mentioned above, the equivalent test for categorical predictors (plot function will switch automatically) would be ```{r, eval = F} testCategorical(simulationOutput, catPred = testData$group) ``` Adding a simple overdispersion correction will try to find a compromise between the different levels of dispersion in the model. The qq plot looks better now, but there is still a pattern in the residuals ```{r} testData = createData(sampleSize = 500, intercept = 0, overdispersion = function(x){return(rnorm(length(x), sd = 2*abs(x)))}, family = poisson(), randomEffectVariance = 0) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) + (1|ID), family = "poisson", data = testData) # plotConventionalResiduals(fittedModel) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` To remove this pattern, you would need to make the dispersion parameter dependent on a predictor (e.g. in JAGS), or apply a transformation on the data. ## Detecting missing predictors or wrong functional assumptions A second test that is typically run for LMs, but not for GL(M)Ms is to plot residuals against the predictors in the model (or potentially predictors that were not in the model) to detect possible misspecifications. Doing this is *highly recommended*. For that purpose, you can retrieve the residuals via ```{r, eval = F} simulationOutput$scaledResiduals ``` Note again that the residual values are scaled between 0 and 1. If you plot the residuals against predictors, space or time, the resulting plots should not only show no systematic dependency of those residuals on the covariates, but they should also again be flat for each fixed situation. That means that if you have, for example, a categorical predictor: treatment / control, the distribution of residuals for each predictor alone should be flat as well. Here an example with a missing quadratic effect in the model and 2 predictors ```{r} testData = createData(sampleSize = 200, intercept = 1, fixedEffects = c(1,2), overdispersion = 0, family = poisson(), quadraticFixedEffects = c(-3,0)) fittedModel <- glmer(observedResponse ~ Environment1 + Environment2 + (1|group), family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # plotConventionalResiduals(fittedModel) plot(simulationOutput, quantreg = T) # testUniformity(simulationOutput = simulationOutput) ``` It is difficult to see that there is a problem at all in the general plot, but it becomes clear if we plot against the environment ```{r} par(mfrow = c(1,2)) plotResiduals(simulationOutput, testData$Environment1) plotResiduals(simulationOutput, testData$Environment2) ``` ## Residual correlation structures (temporal, spatial, phylogenetic) If a distance between residuals can be defined (temporal, spatial, phylogenetic), you should check if there is a distance-dependence in the residuals, which would suggest to move to a GLS (generalized least squares) structure for analysis. The three functions to test for this in DHARMa are: - testTemporalAutocrrelation based on the Durbin-Watson test. - testSpatialAutocorrelation, based on Moran's I, can also be used for generic distance functions. - testPhylogeneticAutocorrelation, based on Moran's I test from `Moran.I` function on package `ape`. Here a short example for the spatial case, see help of the functions for extended examples. ```{r, fig.width=4, fig.height=4} testData = createData(sampleSize = 100, family = poisson(), spatialAutocorrelation = 3, numGroups = 1, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , data = testData, family = poisson() ) simulationOutput <- simulateResiduals(fittedModel = fittedModel) testSpatialAutocorrelation(simulationOutput = simulationOutput, x = testData$x, y= testData$y) # plot(simulationOutput) ``` Note that all these tests are most sensitive against homogeneous residual structure, and might miss local and heterogeneous (non-stationary) residual structures. Additional visual checks can be useful. However, standard DHARMa simulations from models with temporal / spatial / phylogenetic conditional autoregressive terms will still have the respective correlation in the DHARMa residuals, unless the package you are using is modelling the autoregressive terms as explicit REs and is able to simulate conditional on the fitted REs. It means that the residuals will still show significant autocorrelation, even if the model fully accounts for this strucuture, and other tests, such as dispersion, uniformity, may have inflated type I error. See the example below with a `glmmTMB` model with a spatial autocorrelation structure: ```{r, fig.width=4, fig.height=4} library(glmmTMB) testData$pos <- numFactor(testData$x, testData$y) fittedModel2 <- glmmTMB(observedResponse ~ Environment1 + exp(pos + 0|group), data = testData, family = poisson()) simulationOutput2 <- simulateResiduals(fittedModel = fittedModel2) testSpatialAutocorrelation(simulationOutput = simulationOutput2, x = testData$x, y= testData$y) # plot(simulationOutput2) ``` One of the options to solve it and get correct tests and no residual pattern (if the model is correct) is to rotate the residual space according to the coariance structure of the fitted model, such that the rotated residuals are conditionally independent. The argument rotation in `simulateResiduals` does it (see also `?getQuantile` for details about the rotation options): ```{r, fig.width=4, fig.height=4} # rotation of the residuals simulationOutput3 <- simulateResiduals(fittedModel = fittedModel2, rotation = "estimated") testSpatialAutocorrelation(simulationOutput = simulationOutput3, x = testData$x, y= testData$y) # plot(simulationOutput3) ``` # Case studies and examples **Note:** More real-world examples can be found on the DHARMa GitHub repository. ## Budworm example (count-proportion n/k binomial) This example comes from [Jochen Fruend](https://jochenfruend.wordpress.com/). Measured are the number of parasitized observations, with population density as a covariate: ```{r, echo = F} data = structure(list(N_parasitized = c(226, 689, 481, 960, 1177, 266, 46, 4, 884, 310, 19, 4, 7, 1, 3, 0, 365, 388, 369, 829, 532, 5), N_adult = c(1415, 2227, 2854, 3699, 2094, 376, 8, 1, 1379, 323, 2, 2, 11, 2, 0, 1, 1394, 1392, 1138, 719, 685, 3), density.attack = c(216.461273226486, 214.662143448767, 251.881252132684, 400.993643475831, 207.897856251888, 57.0335141562012, 6.1642552100285, 0.503930659141302, 124.673812637575, 27.3764667492035, 0.923453215863429, 0.399890030241684, 0.829818131526174, 0.146640466903247, 0.216795117773948, 0.215498663908284, 110.635445098884, 91.3766566822467, 126.157080458047, 82.9699108890686, 61.0476207779938, 0.574539291305784), Plot = structure(c(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L ), .Label = c("1", "2", "3", "4"), class = "factor"), PY = c("p1y82", "p1y83", "p1y84", "p1y85", "p1y86", "p1y87", "p1y88", "p1y89", "p2y86", "p2y87", "p2y88", "p2y89", "p2y90", "p2y91", "p2y92", "p2y93", "p3y88", "p3y89", "p3y90", "p3y91", "p3y92", "p3y93" ), Year = c(82, 83, 84, 85, 86, 87, 88, 89, 86, 87, 88, 89, 90, 91, 92, 93, 88, 89, 90, 91, 92, 93), ID = 1:22), .Names = c("N_parasitized", "N_adult", "density.attack", "Plot", "PY", "Year", "ID"), row.names = c("p1y82", "p1y83", "p1y84", "p1y85", "p1y86", "p1y87", "p1y88", "p1y89", "p2y86", "p2y87", "p2y88", "p2y89", "p2y90", "p2y91", "p2y92", "p2y93", "p3y88", "p3y89", "p3y90", "p3y91", "p3y92", "p3y93" ), class = "data.frame") data$logDensity = log10(data$density.attack+1) ``` ```{r, fig.height=4, fig.width=4} plot(N_parasitized / (N_adult + N_parasitized ) ~ logDensity, xlab = "Density", ylab = "Proportion infected", data = data) ``` Let's fit the data with a regular binomial n/k glm ```{r} mod1 <- glm(cbind(N_parasitized, N_adult) ~ logDensity, data = data, family=binomial) simulationOutput <- simulateResiduals(fittedModel = mod1) plot(simulationOutput) ``` We see various signals of overdispersion - QQ: s-shaped QQ plot, distribution test (KS) signficant - QQ: Dispersion test is significant - QQ: Outlier test significant - Res \~ predicted: Quantile fits are spread out too far OK, so let's add overdispersion through an individual-level random effect ```{r} mod2 <- glmer(cbind(N_parasitized, N_adult) ~ logDensity + (1|ID), data = data, family=binomial) simulationOutput <- simulateResiduals(fittedModel = mod2) plot(simulationOutput) ``` The overdispersion looks better, but you can see that the residuals still look a bit irregular (although tests are n.s.). The raw data looks a bit humped-shaped, so we might be tempted to add a quadratic effect. ```{r} mod3 <- glmer(cbind(N_parasitized, N_adult) ~ logDensity + I(logDensity^2) + (1|ID), data = data, family=binomial) simulationOutput <- simulateResiduals(fittedModel = mod3) plot(simulationOutput) ``` The residuals look perfect now. That being said, we dont' have a lot of data, and we have to be sure we're not overfitting. A likelihood ratio test tells us that the quadratic effect is not significantly supported. ```{r} anova(mod2, mod3) ``` Also AIC differences are small, although slightly in favor of model 3 ```{r} AIC(mod2) AIC(mod3) ``` I guess you could use either Model 2 or 3 - the broader point is: increasing model complexity will nearly always improve the residuals, but according to standard statistical arguments (power, bias-variance trade-off) it's not always advisable to get them perfect, just good enough! ## Owl example (count data) The next examples uses the fairly well known Owl dataset which is provided in glmmTMB (see ?Owls for more info about the data). The following shows a sequence of models, all checked with DHARMa. The example is discussed in a talk at ISEC 2018, see slides [here](https://www.slideshare.net/florianhartig/mon-c5hartig2493). ```{r, error=TRUE} library(glmmTMB) m1 <- glm(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)), data=Owls , family = poisson) res <- simulateResiduals(m1) plot(res) ``` OK, this is highly overdispersed. Let's add a RE on nest: ```{r, error=TRUE} m2 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), data=Owls , family = poisson) res <- simulateResiduals(m2) plot(res) ``` Somewhat better, but not good. Move to neg binom, to adjust dispersion, and checking dispersion and residuals against FoodTreatment predictor: ```{r, error=TRUE} m3 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), data=Owls , family = nbinom1) res <- simulateResiduals(m3, plot = T) par(mfrow = c(1,2)) testDispersion(res) plotResiduals(res, Owls$FoodTreatment) ``` We see underdispersion now. In a model with variable dispersion, this is often the signal that some other distributional assumptions are violated. Let's check for zero-inflation: ```{r, fig.height=4, fig.width=4} testZeroInflation(res) ``` It looks as if there is some zero-inflation, although non-significant. Fitting a zero-inflated model: ```{r, error=TRUE} m4 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), ziformula = ~ FoodTreatment + SexParent, data=Owls , family = nbinom1) res <- simulateResiduals(m4, plot = T) ``` ```{r, error=TRUE, fig.width=7} par(mfrow = c(1,3)) plotResiduals(res, Owls$FoodTreatment) testDispersion(res) testZeroInflation(res) ``` This looks a lot better. Trying a slightly different model specification, adding a dispersion model as well: ```{r, error=TRUE} m5 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), dispformula = ~ FoodTreatment + SexParent , ziformula = ~ FoodTreatment + SexParent, data=Owls , family = nbinom1) res <- simulateResiduals(m5, plot = T) ``` ```{r, error=TRUE, fig.width=7} par(mfrow = c(1,3)) plotResiduals(res, Owls$FoodTreatment) testDispersion(res) testZeroInflation(res) ``` but that does not seem to make things better. Both models would be acceptable in terms of their fit to the data. Which one should you prefer? This is not a question for residual checks. Residual checks tell you which models can be rejected with the data. Which of the typically many acceptable models you should fit must be decided by your scientific question, and/or possibly by model selection methods. In doubt, I would tend towards the simpler model though. # Notes on particular data types ## Poisson data The main concern in Poisson data is dispersion. See comments in the section on the dispersion test, in particular regarding the advantage of conditional simulations in this case. To address overdispersion, I would recommend to prefer the negative binomial model over observation-level random effects, because this mode will be easier to test in DHARMa and its dispersion can be easier modeled, e.g. with glmmTMB. The third option would be quasi models, but there are few advantages, except runtime. Note also that quasi models cannot be tested with DHARMa. Once dispersion is adjusted, you should check for heteroscedasticity (via standard plot, also against all predictors), and for zero-inflation. As noted, zero-inflation tests are often negative, and rather show up as underdispersion. Work through the owl example below. ## Proportional data Proportional data expressed as percentage or fractions of a whole, i.e. non-count-based data, is often modeled with beta regressions. Those can be tested with DHARMa. Note that beta regressions are often 0 or 1 inflated. Both should be tested with testZeroInflation or testGeneric. techniques for analysing continuous (also called non-count-based or non-binomial) proportions (e.g. percent cover, fraction time spent on an activity) Note: discrete proportions, i.e. count-based proportions, of the type k/n should NOT be modeled with the beta regression. Use the binomial (see below). ## Binomial data Binomial data behave slightly different depending on whether we have a 0/1 response (Bernoulli) or a k/n response (true binomial). A k/n response, in particular when n is large, will behave similar to the Poisson, in that it approaches a normal distribution with fixed dispersion for large n and becomes more assymetric at its borders (for k = 0 or n). You should check for dispersion and consider a beta-binomial in cases of overdispersion. Things are a bit different for the 0/1 response. Let's look at the residuals of a clearly misspecified binomial model (missing predictor) with 0/1 response data. ```{r} testData = createData(sampleSize = 500, overdispersion = 0, fixedEffects = 5, family = binomial(), randomEffectVariance = 3, numGroups = 25) fittedModel <- glm(observedResponse ~ 1, family = "binomial", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) ``` If you would do the same with a binomial k/n response or count data, such a misspecification would produce overdispersion (try it out). For the 0/1 response we see neither dispersion problems nor a misfit in the general res \~ predicted plot. ```{r} plot(simulationOutput, asFactor = T) ``` Even though the misfit is clearly visible if we plot the residuals against the missing predictor. ```{r, fig.width=4, fig.height=4} plotResiduals(simulationOutput, testData$Environment1, quantreg = T) ``` However, we can see the overdispersion arising from the misfit if we group our residuals, which basically transforms the 0/1 response in a k/n response. To show this, let's look at the dispersion test for the same model, once ungrouped (left), and grouped according to the variable group which was in the data (right). ```{r} par(mfrow = c(1,2)) testDispersion(simulationOutput) simulationOutput2 = recalculateResiduals(simulationOutput, group = testData$group) testDispersion(simulationOutput2) ``` In general, you can group according to any variable that you like, including continous variables or space. However, some variables make more sense than others. To understand this, consider a simulation where we create binonial data with true probabilities p, drawn from a uniform distribution: ```{r} n = 1000 p = runif(n, 0.1, 0.9) # true probabilities obs = rbinom(n, 1, p) ``` The important thing to note here is that `rbinom(n, 1, p)` will generate an identical overall distribution as `rbinom(1, n, mean(p))`. What that means: a misfit of the prediction will not show up unless they are wrong in the overall mean. Consequenly, if we fit: ```{r} fit <- glm(obs ~ 1, family = "binomial") # wrong model, assumes equal probabilities library(DHARMa) res <- simulateResiduals(fit, plot = T) # nothing to see ``` We see no misfit, and neither do we see a misfit when we group the data points randomly, as the mean of a random group is still fit well by the overall mean. ```{r} res2 <- recalculateResiduals(res, group = rep(1:20, each = 10)) plot(res2) ``` However, we see thes misfit / overdispersion if we aggregate according to something that is correlated to the misfit. For our example, let's just group according to the true means: ```{r} grouping = cut(p, breaks = quantile(p, seq(0,1,0.02))) res3 <- recalculateResiduals(res, group = grouping) plot(res3) ``` In this case, the groups have a different mean than the fitted grand mean, and the misfit shows up in the residuals. Thus, what we are looking for in the grouping is variables that may correlate with the misfit. If you don't have a natural grouping variable, you can introduce arbitrary grouping variables, e.g. via discretising a predictor or the response (usually preferable), and grouping according to that, or via discretising space (e.g. group observation in spatial blocks). The pattern appears only, however, if the grouping variable correlates with the model error. Consider the following example: We create data and fit a model with a missing predictor (Environment2): ```{r} set.seed(123) testData = createData(sampleSize = 500, overdispersion = 0, fixedEffects = c(0,3), family = binomial(), randomEffectVariance = 3, numGroups = 50) ``` Apparently, no problem with the residuals for the misfitted model: ```{r} res <- simulateResiduals(fittedModel = fittedModel) fittedModel <- glm(observedResponse ~ Environment1, family = "binomial", data = testData) plot(res) ``` 1. Grouping according to the RE produces overdispersion because RE is missing in the first model: ```{r} res2 = recalculateResiduals(res , group = testData$group) plot(res2) ``` 2. Grouping according to a random factor does not produce an effect. In this respect, the model has correct dispersion: ```{r} grouping = as.factor(sample.int(50, 500, replace = T)) res2 = recalculateResiduals(res , group = grouping) plot(res2) ``` 3. Grouping according to the response doesn't create a pattern: ```{r} x = predict(fittedModel) grouping = cut(x, breaks = quantile(x, seq(0,1,0.02))) res2 = recalculateResiduals(res , group = grouping) plot(res2) ``` 4. Grouping according to the missing variable creates pattern, because in relation to this variable, the model is overdispersed: ```{r} x = testData$Environment2 grouping = cut(x, breaks = quantile(x, seq(0,1,0.02))) res2 = recalculateResiduals(res , group = grouping) plot(res2) ``` 5. Grouping according to space does not create a pattern, because there is no missing spatial predictor: ```{r} x = testData$x grouping = cut(x, breaks = quantile(x, seq(0,1,0.02))) res3 = recalculateResiduals(res , group = grouping) plot(res3) ``` **Conclusions:** if you see overdispersion or a pattern after grouping, it highlights a model error that is structured by group. As the pattern usually highlights a model misfit, rather than a dispersion problem akin to what happens in an overdispersed binomial (which has major impacts on p-values and CIs), I view this binomial grouping pattern as less critical. Likely, most conclusions will not change if you ignore the problem. Nevertheless, you should try to understand the reason for it. When the group is spatial, it could be the sign of residual spatial autocorrelation which could be addressed by a spatial RE or a spatial model. When grouped by a continuous variable, it could be the sign of a nonlinear effect. # Supported packages and frameworks ## lm and glm lm and glm and MASS::glm.nb are fully supported. ## lme4 lme4 model classes are fully supported. Possible to condition on REs via re.form, see help of predict.merMod ## mgcv When using mgcv with DHARMa, it is highly recommended to also install mgcViz. Since version 0.4.5, this will allow DHARMa to fall back on the simulate.gam function in mgcViz, which is more general than the default simulate function. For example, without mgcViz, it will not be possible to simulate from mgcv::gam objects fitted with extended families. If you absolutely want to use DHARMa without mgcViz, you should make sure that simulate(model) works correctly for the model object for which you want to calculate DHARMa residuals. ## gamm4 Models fitted with gamm4 return a list that contains a lme4 object under the name "mer". You can test this object like an lme4 model, so e.g. simulateResiduals(myGamm4Model\$mer). All remarks regarding lme4 objects apply. ## glmmTMB glmmTMB is nearly fully supported since DHARMa 0.2.7 and glmmTMB 1.0.0. A remaining limitation is that you can't adjust whether simulation are conditional or not, so simulateResiduals(model, re.form = NULL) will have no effect, simulations will always be done from the full model. ## spaMM spaMM is supported by DHARMa since 0.2.1 ## GLMMadaptive GLMMadaptive is supported by DHARMa since 0.3.4. ## phylolm phylom (version \>= 2.6.5) is supported by DHARMa since 0.4.7 for both model classes phylolm and phyloglm. ## phyr DHARMa residuals work with phyr, but the correct implementation is not fully tested as of DHARMa 0.4.2. See also ## brms brms can be made to work together with DHARMa, see ## Unsupported packages If confronted with an unsupported package, DHARMa will try to use standard S3 functions such as coef(), simulate() etc. to perform simulations. If no error occurs, a residual object will be calculated, and a warning will be provided that this package has not been checked for full functionality. In many cases, the results can be used though (but no guarantee, maybe check with null simulations if the results are OK). Other than that, see my general comments about [adding new R packages to DHARMa](https://github.com/florianhartig/DHARMa/wiki/Adding-new-R-packages-to-DHARMA) ## Importing external simulations (e.g. from Bayesian software or unsupported packages) DHARMa can also import external simulations from a fitted model via createDHARMa(), which will be interesting for unsupported packages and for Bayesians. **Bayesians should note the extra Vignette on "DHARMa for Bayesians" regarding the interpretation of these residuals.** Here, an example for how to create simulations for a Poisson glm. Of course, it doesn't make sense to do this as glm is a supported model class, but you could do the same in case you want to check a model class that is currently not supported by DHARMa. ```{r, eval = T} testData = createData(sampleSize = 200, overdispersion = 0.5, family = poisson()) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulatePoissonGLM <- function(fittedModel, n){ pred = predict(fittedModel, type = "response") nObs = length(pred) sim = matrix(nrow = nObs, ncol = n) for(i in 1:n) sim[,i] = rpois(nObs, pred) return(sim) } sim = simulatePoissonGLM(fittedModel, 100) DHARMaRes = createDHARMa(simulatedResponse = sim, observedResponse = testData$observedResponse, fittedPredictedResponse = predict(fittedModel), integerResponse = T) plot(DHARMaRes, quantreg = F) ``` DHARMa/vignettes/dispersion.png0000644000176200001440000013131714665273541016166 0ustar liggesusers‰PNG  IHDRæ&›ž=„ IDATxģ½½Žō0’¦›÷3Ö“ÕČkXwŒ1×)gqÜc¶=ʦ.`1ĘY ż2Śéŗļ¢Œ¾õt@)#Š$)R¢$’zų>ž“Į‡df¼%„ņ1š‚ @€ tNąŃłü˜ @€ @`@ü² @€ @ {ˆßī—˜ B€ @€ā—=@€ @Ż@üvæÄL€ @€æģ@€ @čžā·ū%f‚€ @€ €ųe@€ @€@÷æŻ/1„ @€ Ä/{€ @€ŗ'€ųķ~‰™  @€  ~Ł€ @€ Š=Äo÷KĢ!@€ @ńĖ€ @€ ī ~»_b&@€ @ˆ_ö @€ tOńŪż3A@€ @@ü² @€ @ {ˆßī—˜ B€ @€ā—=@€ @Ż@üvæÄL€ @€æģ@€ @čžā·ū%f‚€ @€ €ųe@€ @€@÷æŻ/1„ @€ Ä/{€ @€ŗ'€ųķ~‰™  @€  ~Ł€ @€ Š=Äo÷KĢ!@€ @ńĖ€ @€ ī ~»_b&@€ @ˆ_ö Š9ßįūłĒšx~æ»fū;ü|}?»śø¢qIWŲ_Ƙwc›ļē9ųż~Ngģń¾÷²ßŸįūKśrēö9|}’Ī­³#µnxżž|_OŻ{ÆxĻÆŸįwĻ\ĀCR@@ü^ža!³Ĝų ~†ÆQD!~3°õSµŠ>jH`¾sPFüž _īT¾_öONŹ>[’£nz¤wŸĻ!¹»Š0äC€@5æÕ,†@ĒPīŽ+æ?_ā”#~Y'zm@蔿sįėgŗäŗĢ› ĶłĻįū]÷ė-œ×«z_x ]9ć6lńĢϬˆA€ĄLń;³ tI@9¹Jü.œē_sūäókqėęģšk§X;Ħżćuˤį¹s¼ÕRśūœ’¾(_±IŗžnŁ”~¦šiģ?éĆžž| O¹]܉ƒ§»ęæōć¶QW÷ćVUuUĻŻNśó­śŸūžżžž"F>ę1 óZ8fīj¼ŗ]õ9q“óIcdų¬O˜įh³f5®’÷ø¦Ö—NćeģIÜz¼7+µļ]ł;’įD¦jń£Åćē|u»łźģt¶ģ]5²£2üøµwė©ķżż~ļ‡ŁVeۜ9öńžŠĆ"9ҘRso­žł]ÜZ=īهĒšT÷r/ęś±Oę±R×yęŖßS\?ó™Ńć/ź§ž‘­~ŸpļQį=:Ļ‚ ¾ ~ūZOf|P޲rŖgV;„:>wqv6u¹8Ŗ³ƒ: ‚W='ʔ=Į1_vĖßNųlÓŲķ׶¹ø®ėg L[Dżóś×NøkµYń֎ü§×übū=ē%ƘmSæzŽ£Ņ{_ üwY78—q~†Į6£żKŪóx­°ū_ŚæXD—xļ]o¹_õzĪlŻŸóĖõ^›ęēąź²ų°÷#ĆżQIĘņĻAŪ?MłU±÷lĒjn‹sbź9įżś£ž³Op®jĢ`·nŖŽĀęFóZéyĪL„ 5«µ}“e] '’€"€ųmh±0ŲB@9ŗŹé\8§źŠ”Ī÷;œKgqvDÕUGuµJ÷”ūv°\Hż}=UgY>÷§óēžŌ¼¾Ōƒ¼¼c«ŗŠ—ę[0¹«]Æ ¹«K>1ŖĘҽß^ŻĒ|%Ķ]aÓ¢T®.ūūX^µÕK•,śy_õSó^edDĀĒśØ¾Ž ÕœÜÕģPm˼^ć_ ŽW1×yY{Ööƒg5ÕśĢH¾gܚĪóƒįĘ<ß|5’å9ŠėµŲŪļaĶkī±ō3Kļ {€š×‚ļāŖöҾåĖu[–­§‚s•§c)ūRÖyń²~¶SĻs®ļDÆŚšŁ{]ē>ÜZÆīŃÅų$ ōGńŪߚ2#@`AĄļÄköķ'ŗvŚqU³Ć©źŁ±ŌĪéŌÜŽ;׎ł²7X°IOó÷÷ēuŪØŗ ō¶ßĻ@·—øž“›n“÷9,„\_ŸżĢ¬“ X\~‹Źå:h¶sæę–]ļ˜2«)Œ3ZŠĶ7ŗw†z]Ę? ĢlŽĶTd¶}ŽRģ+Ū³¦~•ĶÆ é>§?:¼lQs™ŖŖ¶j]|vŗ±tæKvóŗėu”y‡Bݟ³Ó=yłżR¶Ś>Cö½ŪŗH¤ż¢^ ”mÓfIõ˜ ¾2_ŽŌ—ŸŻ\?į Ø¹>öØĢ€@Ææ½®,ó‚^üNüģĄ.o\ˆ1åŁĪ§.ڱ|_UāsĢ›ū9™.ŸąłJkók~ƌż¶ßĻąs‹¤Ö{-ßsUāHśüœKh|ÅV ļ:¼żóeēāēpBŒ“€›×Lęć’Ž“ā„×ßūgU7‰WĢž³ŁŲwģĶä5ę›ß××ėjž4×w½÷­·~{ßķßõ¦”ęö–]ŗ­o£Udünų‹ķ{ö†k²OuæśźébMē»+Āsu£ų¹Éų¾¶a›gv﹯Ģń³eĻb>¾ļ動„€ś%€ųķwm™ 0PΟŸN¢ąšN-4½jČQÖN¦śNaxĢiģp¹Ļ&5/7ž»5ŌŻ¦¬oQŽ#~ßm…‹ }6©:ŠĶŌUØ~ ? p¼ė Ć~Œ™Ć(&6ŻŖ/µ†ńCFPæ×ß=‰ųmÜ|ėøķ‡ķ1{Ģd(¾ūu¢tnēn/—‡C9aõ궞Ū\®ÆŌæė$~ŻT>Ę ģ o]ĶāŸ9č3>æy™5}s‰­ĖhAö:ĢOģ Ģ3X~žĮ{›%ļQ˜€@ææż®-3ƒF~'>Ohzp×·rNߎf„zxĢ©QøÜ欫±õU!’•k?ƒOSSė“‹'-ޤĻϹxę0Väęsü?Ę ōįg“&jVŲüž ßOŌÕ7Š·’°}Ü^rėüž«©3ßē—ÖÅœC£ȚjVņdģ±Ŗūża#4ĒüļłIĖŅGWĢž³i?’WvŽ"÷½/T?ć­óó÷Z½ó]ŲŖ…r [_u56ōäl}Vę'„{nłż¤ńŹŃ㉠‡ś[zlõóæóH¾}·`§Ļ…f§ņĶYŽ|ŌŚæŸŽī,Õ·B«½8O‚ > ~ū\Wf¼ ('W9yaV ‚·H/Ł,~’gžŻRU_ļ;L“±y6©y½Ē3ĪūŪ~UW1x#2‘YŠ›žÜ8ļ>­Ķžŗ‹±§E|% ĀŽægĢÅwQÕ¼Wi²\·×Lē?,ØyżÄ¦„Dœ ž×Ųc»ź×՟ėZ{Ģd*–NüĪų Ÿ¹ĄīļüŪĮb’tøˆ­ĘĪéjōkĆfaŸ]ŪÅ<, Vs“}¼ÓźZƦį¹Ī}Ļu„“ ķ\bó~ٱż ø«Ķrū¼²AĶĻ+ąē©ƒ ŠÄoWĖÉd OŹĮUNē윦 ÷P åDŖßæK÷5ݲ)ūō³@KkĀcNõĀå!±cžöģzõó«D“Üšźg°“n™rWܞrõŅ9ŹćŒ—7†J ÷DŽÆ§fćž$kė†ęČW‚Ąļų»¹-ēļc>Š$³n~F1±éffųĮŹż4Žź¦ńŠŁ`fʙ“Źv#Čē=§E±k©ŚØsćņ}ē`ī'õ<ĶÖ-bÆŪŸå=±§O¶<•ūŅWą‡ćCāžĖó;ķ÷łwquėš\u-wq5å\¼Śüŗŗ³8öņ¼ĪūĪ€³Åœē•=ŗœ )@ż@üö³–Ģ€@÷ę«Ņ"컟2„Ą‚g`ƒ ,ˆß,\T† + ąų_IŸ±k ĄØa°h•ā·Õ•Ćn@7$€ćĆEgŹ œ€² ~³pQ€®$€ć%}Ę®g †UĄ@ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVW»!@€ @ ™ā7!@€ @ UˆßVWĪc÷ćńųö{€=Ą`°Ųģö@¹=ąq»Éj”ā·Ń…ó™ķŽäxA€ @eą_—įXK/Ø„ZV¢€Īé€ @/ų×}mÄoGėÉįģh1™  @€ĄåšÆ/_‚¢ ~‹ā¼¶3ēµü€ @ /ųם­g_Ó¹r6æĆ÷3öÅśÆįē`ó8œ¦{@€ [Ąæīk¹¹ņ[`=æŸćS–ŸßæįŽ~¾Öė„['•p8“0Q € @IšÆ“05S ń»{©¦+¾QįūcÉĻļ!"‘wYĆį܅Ę€ @XĄæ^ąh>ųݽ„?Ć×ć1|„ÜÓ<^ż=īögēīŤ@€ ¼ ą_æQtAüī^F®üīFH€ @ؐā·ĀEŁaāw&-ä™å?4‰}(ŽÓ;GüžŽüø9œĒ±„g@h‹ĄV^®¤śŚK™ EtŚP­µÕł×å¹qé#'“shó%½V.õŽ ×ʏ•K™ ²ļ.ż¶õPĪZüėr,kč ń[Ć*²ĆY$Ż@€@³Dˆh”ź‹K= xŌBÖ'&„NŲ¦˜ÉįL”D@h•@LČÅÄēšĄ•ĒĘ •Y[Bõ$_¬¤u(eˆŪĻtµŗ±»-ų×m­×šµˆß5BÉå/üõ“Ü¢tEgi¢ō@5ŠbŠÅõwx­Š”“n·®LׯwcĘŚ‰€õՑ²#ÄķŃkē›Oé¼£ē@’X#€½FØ­rÄoÉõśł÷*żĖį,¹˜ō@Wˆ‰(ż^»:ŒµŻSęxä¶?R܊-W®cC wų×}­0ā·£õäpv“˜L€Ą ˆ˜‹…ZäJ\_ŁµM)³ŲSڜUĒŚF8žžõńŒĻń{&ķƒĒāp ˜ī!@ 8Tį(BW‡ZōJ<„æŲ$RŚė:G\ŁŁG p.üėsy=ā÷hĀ'öĻį<6CA€Ą.Z@źøž>Ƌk±+qŗ:Ō}øxŹĖ¶Ł’Ž#~Sl¤ p-üėkł—ń[šč…żq8/„ĻŠ€ °J &.­č żÆ¼.īė3dˆÆīZŽq+}‡ģ!˜ ČyI ēVĒĒšÆg|ęˆß3i<‡ó`Ąt@Łbެ¼.ķDÆ\įÕa®č›S¶Uüfƒ¢:#sĪrėž‰ ’śLŚĒ…ų=žńi#p8OCĶ@€ !źČjń›*zcW{SǵõDąŚüÜt Ehž@īy8²ž™0ńÆĻ¤}üXˆßćŸ6‡ó4Ō @†ĄVGW_ŻÕq{„7$z·Ž+ķDųŗPņrBƒ$ŗ$s&ĪØ{&düė3i?ā÷xʧĄį< 5A€€"°ÅŁÕB×Ę]ZüŚž„¾Ķ„Eą†ŹsóÕŌ‰B k¹gck}ēĆęž; <žõY¤Ļń{ēSFįpž‚łsŸÆń ūėē³čˆœßŸļįė{}°Ÿ/÷Aņ5Œ5O¶quŽWŚóū=|=_²ĻÆįūwÕZ*@1gWßÖ,õDø†B©§C[W—­ÅEųn½²+żGPīȾßꊣܶgAĒæ>‹ō9ć ~Ļį|Ź(ĪS0rŖū¾Ü_G”öBü~Z}ÓĶļw=æōļM·ÓŽE Å)Öā× X›¶żŁrIŪzG„wĮ”1&:S¹ā4·~h\_ž™xńÆĻ¤}üXˆßćŸ6‡3‚ś%PŸ__ĆóukĶóć’ßļšķ®¾ÅJĆšū=·}<žĆ—“×āWĘy•}ŠÄ«Žž±^öÉ­Ao;ēy’~?§Ū†ž_ĆWäŹļG’r!Yģq Ś’bõ|¾ų~ ?æffœ·†õūū=öēÖmŗZ«øĻÓNéµr­Ęō“«æé© ‘€Ļ å‰h …ŗ]؎Ė×õt¼Ō•]×'/ܙ€>W:ž+bu}ŻOÉų™ė„}&ķćĒBüĻų“8œŌ/Ń#ĀVDā[|½š.ÄźKxuĘų,’õ“ zÅEX/ꉐ…ń몣ܖ¬M5,¹n6,śՓžµRļ5ł…1NŅĪkæĻ&õ#ćLEóœÜŲim^¬_}Lė&ā’u+·š¼¬§žĄuqY‡wUĶĮeŚō»"@ĄG Ēy YW¦æÓ»V76ī^ńė›'yøčł’?øæĀXŻ3ĖĪ\üė3i?ā÷xʧĄįŒ ~‰œ·a&Wo„©C“ ŅBėwųłvß}]]}¼šj#bJĘŃĀr!źÜxŗŒ’cEÄÆķ_½Kåć9<æ~†_}Ļo„“ķѧϮ„qœųżčWƍŽČŌ÷˜ß¬Ņ#ÓXį„ĒWÖ ½_jBąBŽ­ÜŚ,åkBV‹^‰‡ŚHŸ„Ć;¬s„ĄŲ¹²LŽÕ½¢lmn%ĖńÆKҼ¾/ÄļõkPĢg凊œÆ6.[Ķł£x•$BĪ= éG®ŚnæŸO3”ü¶#6–Od¾Z9„ż¼ś‘Ū£åšÆ×÷^#œD8ŚÄÉž™Ū{N¬&ŒcķōÅoÜ6e‘ŸM«ŖD!‰@ȱŃ+aHĄŗü?’łĻ‹+½k¢×µ‘q÷^Ł•~XO@`& ēĀŚĻy_£ófKƏį__æ%-@ü–¤yq_ĪȼDĪ[hQ„[NWLŸĆó9_!“-xUՌ£ėŁ>ō˜:nėé>ĀP7²· Ū«¤^‘÷;ŒOŽ~=łxŌłĘž÷­ĖßæŸWhćūÅļT%<ĪŁW~—W«åźū|;ūbJ$ ·µŽ®^ }Ā× ^÷Ļ•‰Ų•ŠW_ņģX[Å/Ė|°ēK§Ļ½ŸV՝ƒ]÷śäZ‡ųĶ%Vq}gdq^¢nł_Ļ•[×…Ō•ļ˾…åK ’¾„žļ¶ē…Ø–zƫƋ2Ļķ¾/óå*ę(FķXśŖØī¢3¶æFąŗ‡_¹½£ÅƗӢkæGü&Œ3Ķń{zH–»ūĶśµ6fĢå,€”“¶“§=§£Ī= h‡XĒEģJčŹD“ŠŲ•0Gō>žū_÷Oµ5~ĻcÖˆX;O„„o܊6KńÆŪ\·Õˆß™ó9œ‘E!ę{j°†cZ IŸņ°%wŪņsųśš¾÷«ćµsą‰Ė®«óÄhyś± 3†‘±ŽāŠ’ŻWĪµ§=[;ä»Ļ1NQū}̆įweaę®0{{·øųśĒZ¬ ܗ@ČIĮ+”«ē¾>Ńė®ųJ]ŽĀ÷/ÜŽŠŲkł÷]-fu±ó“#z×Gź³žu_ėŠųķh=9œ‘ÅQ'"/RõÖEpŗõņ3ł{ˆ9Č®L‹^—»Ņ½Ņ~mÜXł½WŒŁC N vv¬čuéXżųH}—ā_÷µ¾ˆßb멯Ö}>Ōč}…«ŲxŸq8?™¼suoŃœ¢x(„@ÆbNÆÆĢ W½.!+ßéÕ¢Wniv”Ōs”Æß”¼^׀yA µsd…ļZżRvµŚžu«+ē·ńėē’—;ޚ¹|hĪtūéü39ūæ«øn‡s5 @`I`Ķńµå!į«…­ŽūÄÆķ3%½“š ` ¬#DÆ%––ĘæNćŌJ-Äļī•š®ų¾Ÿ"¬śs‚WēųĄKĄNŸ( D ¬9ʾr}µW_ńÕb7÷õĖ‹N€B@ąM vŽ\Ā÷*;‚¬źˆßŻĖ3=čǧiĒ«æĻ×ļ§ŗqĘ[Jē«Į»‡6p8 ’€ ą%r”å{½.Ōuœ ÕĀ× \¹ŗkóuZ÷‹{ &šˆ%W†čõbĖŹÄæĪĀU}eÄļī%Š_ł}(Ulo…Ž=“é€Ći€„ 5GŁ'~×Ó1¢& IDAT„Æ+‰ßµ1„|a( @ J@ĪM,DųF&ā_'£j¢"ā·Ą2M¢vł_÷Ó-OłŲq ’OĮžŻ‡ó‚ 1'9T¦Æöq›³1‘$ @ t^%ћ1£ žu¬Ŗ"~K-Ņė)¹óŽĆĒ _7 g©Å¤@ż‡87 _y¢³¾„ŁĘ×Ęź‡.3ĄyÖĪ•+ŸżŠé—GÖŚœg}»#į_·»v>Ėæ>*ęq8]8̆ p5Ē×W»ĶY„Æž)£Tį{ŠéŻšS›—+z]{^išÆÓ8µR ńŪŹJ%ŲÉįL€D@7 `c›–ļöŚü=Ā×ö%éąfŠ8„€œ”XhEÆKĒź»2^yšÆóxÕ^ń{ā ńĄ«a3 ›ˆ9¾"z%ŌuK ߛ¢gŚ(F@ŸĶPÜ ßP=_ĢĄu„ųķk±æ ¬§}s‹„˜&B€Ą“ƒė‹‹ą•PźČmĖś;¾’§osŻź,ż0%ŗ„ĄķČyŠ…ÖŒÕ•²Ū,8aÄoA˜t…ų­`J™Ąį,E’~ “E@\_(bWB]'õj/Ā·­ż€µķŠē2Gųžæ¶ų×ē3?rDÄļ‘tOī›Ćy2p†ƒ p1˜“ģŹDšJ(õåŹ®ļjÆ+ÓW|„® „Ƌ0<š& ēh-Dō^·Ģų××±?bdÄo1ŖæĆ÷sz¬¼}ƒšŅ_ĆO±±üq8ż\ȅ Š+5‡ŁŠ^W_D,Ā·×]Į¼Z!°v~„Üś•’ [aŠ‚ų×-¬RŗˆßtVĮšÓƒ¬Ćóū7Xgxżp“NøuR ‡3 • tA ęų†ŹbĀ÷ńß’Źß.v“؝@č|Ś|Do+‰]Ē:”²ń»›ätÅ7EŌŽ"łł=D$ņ.k8œ»šŃ€@3¬“¼–ŃėB¹ā«ó\<õVēf a(*#°vN„܊^—–²XXŁt»1’ŗ›„'‚ųݽž?Ć×ć1|„ÜÓ<^ż=īögēīŤ@Mˆ9Ą¶L‹ÜšÕāW××q×//@ Ÿ€=“±“¾±ŗŗ,ß*Z¤ĄæN%ÕF=ÄļīuāŹļn„t@É“Ćėāņ½^ź2-\]<&|m]›vżņ‚ņ č3‹#zóŁžÕń{ésĘAüą,ßł^żå;æHÓ {š9Ļ>ńkÅ+Ā÷Žū†ŁŸOĄwV}yVōŗ“ƞ/ļüYŻsDÄo_ėŽų-¶žÓķϾ7±1ļĄļśŹŹN7—ēšńü®ßļįł8ī¶m™! |š9æ¾<-|}¢w|°Õ’ėū©Ļŗ¾»žyAé|g2”g}ĘP=›Ÿn 5K(ē_—°†>ö@üī%XQū݇s·±Ÿkz D|EH1€@5¬lÓVøĘ„ÆĄ¶¾MW3q @ģyŒ„·Š^×'Æó ģöÆĻ7™#æ8­•;œ+æ­Į^@ˆ9Ņ®Ģ WŸšuuōĻŁ6:Ż 6¦S¬O)·¢×„„l-ÖĀ„O4łIkmlłq³ ē#”ōƏ°>ó ~óxU]»ÜįT?uä~ļ÷ł=üĆšóõžß.Ę €J°rLų:Įæ"†u(}—¶™ž Š9k”¾kõmy¬ī8‡ržõéÕ7gÄo}k²Ł¢r‡S’Ō‘‹æ®öž~O‰o¶’†€ ` X'٠אųÕ¢6%.}Ū1ICCŅoļī½ī ņj—@9’ŗ]=YŽųķh5ĖĪßįūł¦_:rńē0^šEüv“[˜  P §Ęmč!W”6Ņg-sÅŌD@ĪG(“¢×„CuCł5Ķ[¶(ē_oŸVe ~Ėņ¼“·¢‡sŗ/Ńėn}~}’—Ūž/]b‡:$ ęˆułßŸ)]F@Ÿ;_\ü }uÖņ.›%PŌæ.jm!€ųŻB­Ņ6ĪJ³ hē9&|sŹtŸaɆĄ­ č3bć"v%“å)é[Ćķpņų×}-*ā·Šzŗ‡AÉåc¼_xŗuų÷Ūˆ čé†ĆéB J h':&nĒ[żēæ,~ćWžģlŪé>+6fAą2ś|ųāŚgsq_µ¼Ė&ĒĄ‡Ąæ> ķ%#~ `čõDdׯ[O_š}0=AłČŪ†‹N}Ūó0 ÓOń3G¶ ]@X8ÕVĄź“_.ņ’śĮ `§rCKś|ųāZųśŹ×ņ–£‘ź‰@Q’ŗ'0Īń»{į¦+¼ Q;~Göõ}YտɪØH“ÜįŌ¼šM;Śžy$b€ś& Ž“ˆ×Š“ķϹś¾«¾ŅŸ yAKś|ųāß%/RKåüėe椮!€ųŻĶ]ż&®ō5Š_ĻUŅP¾“Ū–;œś§Ž”Q<ķYĮ @`q¾Eųŗ0$~ķC®DųŹU_éKĀmŃ ż³ ÷ß~©13M œ­{%~Äļnņ=^ł}Żŗ½ømū•§nļŽŽ ܌€8ą[„ÆļŖÆōēB^€Ą’€>¾8Āwɋ”Ÿā×Ļ„Õ\Äo•³·ūæóėÉĘÖ]”=œ“½śƒįјš]Ųžś©&›§ł‡ p4ē€kį›—«¾ŅF;óGŪM’h€>¾øö|唼Ö8`ļ~nÆšź‡«Yh-߂׉,žö\ˆź±Żšfv,_z‡–œ3-Āu-̹Ży9 )@ $\%įĖÉ!€æ˜C«žŗˆßś×(ŁBg2Ŗ±"¼ņxQŲN`Mųśl„²¾ź+¼ yAKś|ųā[„ļrRw#€æŲ׊#~;ZĻ}‡S?äjzˆ—ž€˜ćžy5ŹpÆF'Ł€Ąéœ®…¬‹š•Ÿ3 ]õuķ¬3śd•°gƦg?ę1øø-„+.fHńDŲ' …ų=² qōoår8…tZÆ4NŌ‚ö°b×>ŁYÄÆ«¾īŹÆuĪ÷YEkōCĄž ›vŸ÷śŸ-÷„ū”ĆLöĄ_ÜK°®öˆßŗÖĆk~Ć^‹{;HŹŌW~“4_‰7³ę— @ zVųŗ“æ¾:’'·;ćœWæŌxߣŠyŚoŅł”ųEÓ`؊ ą/V¼8LCün€Vk“}‡S‹_Æu¶ūķŚĒk’ųōōM@¬cĀ7tÕ×ē¤÷MŽŁA €ļlč<„oGjÅ ą/Ęł“VŠųmmÅ"öī=œÓmŁĖ[ƒōĒ?ņ;æś»ĘnósKęw‡#(’ŠöņJ„J€Ą- 8\‹^‰‡Äƾ®~čŖļ-2iZäśāŚń•Ū<Ó=I¼ ą/¾QtAü[F#ŌĢ÷K#Eć4‰r‡óš+æīē¢Žśöēkü~Ī;= Ć(ĪuĘε+Ēk§!4‡ŗ"ąœj»: _]GĒ­s.é®`1l g!"|7@„IžbM“ˆßĖ&WLŸßæįŽ^b.Z'Ü:©¤ķĆi·Mźwxü#BŪ¼’¶• “ 8g\ Xń+Ou–2ßUߐSņt՝ ÉGųV·dĶ„æŲü.&€ų]ąŲ’˜®ų¦ˆŚQ$?懈DŽbĄ»Ķ¾Ć©Å¦‹‡n>ź ¶ßMÉ„ĶXæßˆß÷jj#*|µų;®^ø3ø”į{ēŻqÜÜ÷ł×ĒŁEĻŪ ~·qS­&”˜t7īxõ×:ÕÓŽhė‡“Ūž÷īŚCWg\®ęŚ0tÕ×֓~|įUsc\\MĄwlĀ÷źUźwüÖżė~WfŪĢæŪø©V½\łUSŗ,ŖÆ8»?˜ļQ'ż…!ŻxŽĢŅYQ'Ü Łµ“½ź+żųĀšč”@ o¾ó`ó¾}g‡æxõ ”ń[€§|ē7ŖĶZūĪļĖ^ż2ŏ»r]`)²ŗąĶ, •!qĀׄ®-÷ ߊӝݼ pGr¾b”öSbõ¤ģŽ™ó>ų‹ūųÕÖń[lEōUKĻ÷eü®ÆL”ÜįĢø•[o0,Ē«ĮÉc2 °›€8Ó.“ā6'-ż„ÄļnCé s ¾ .lƒ&ć/6øh“æ8­•;œNü>‡ŲĆ«/a“łĄ+ż”Š_2…š' ņ”ėźź«¾ŅOHųŗr^ø9±P®ĒźIŁŻ2ßrŹł×ål¢§ķæŪŁUײäį,ż›ŗÕĮ†ńw„k“ › z ˆ#-”OųŹĆ­|e:OśpaHüÖKĖ p }.Bq„ļ1ģéÕO ¤ķÜ3 ~Ϥ}šXågģn¾ó{š2Ņ= P1ķŒk!+q÷3F1ń+W}u?ߊÓN% ĻE(Žš=uIŒ‹%ŻķÄoGKZNü^ÅżÜ‘ž`Óń”ßRα¼^9ó„. °€uĘEšź0EųŗśŅ—_Jž„ū¬„5Ś" ū>jŸ VOŹŚ"€µµĄ_¬ue¶Ł…ųŻĘ­ŹV­ĪQųF 6–G©·,­óŹ›-µ!½Ä”v”¼¹qŻˆ_ēā¼ pvļ‡Ņß»ģˆśę‰æXߚģ±ń»‡^emĖĪŲmĻre¶ōķĻ ŁŹ|ąÕŚņ”ćµ6å€@ė“Cž+v]żœŪæ­ļģO% ĻU(®EƋ‡źéüŌń©ų‹)”Ś©ƒųmg­V--y8}WYuŽų@¬ČUŚUc?* ~?TA@;Õ„…/·;W±Äq}®Bq„ļ ƐJś×“q:Äļéȏ°Üį Q}åUĒ MI‹k_—cyAĮ]Ž—ĻZņ ^hĒ|‹ųumt.Īķνģę‘KĄž…PZ ßP›Ÿk õ!B1…R;uæķ¬ÕŖ„åēļšż| 󿦅ųüł…ØLn¼¢ü[«—!¼J„€ĄY¬s-ā×=ÕyķÉĪ®®»ŻŁö¾®/ōLĄž…PįŪó.honåüėöęŽ£ÅˆßŽVµōįÅ®¢"†Ē«¾A’­",Ķ«UŲ „ hēÜ ß5ńė¾®æų [A Ś' ĻR,Žšm­{›žb_+Šųķh=9œy‹ Æ<^­×ÖÕQńÖa’’€uŠ­ųż¬‘«kŪ»tHųŗ2^葀ļ„ņō{s؎Ķļ‘sŖ‹žb]ė±×Äļ^‚µēpę-¼ņxµ^{tŖžś·ųR,|švŚśŃö[[„ÆKŠ_=6qōBĄž£Xz|~Żm«§ĖzįÄ<ź&€æX÷śäZ‡·–K¬āśū§~ȕ‹/æo;(•ž‰£ė€īćuŻŒ¼Ąø‡æŪąŻ°•v°]\‹^ß"|mŸ’¾!^¦|²æSĀŁĒHū)#×'/œEń,Ņ猃ų=‡ó)£p8ó0Ć+WėµGē ńŪś2žbæĻYOæ¾¶ky§LŠA pµżnĖĒ÷f®ųž“: ³…žājõ¶AüÖ»6Ł–q8óĮ+WėµG ńŪś2žbæuĪs„Æ«kŪĒҧLˆA pŲ^÷•ļĖߓV‡a¶Ą_ÜJ®Īvˆß:×e“UE§ū)£Ēt‹óüóCĻįūw“i—4ŅŖ”ų%†1č%Ę=€ų½„}Kƒzō×ωöŻņģk—’×l…@Œ@Ź~—:ö3Yņ×ĀŲų”Aą(næņꇫŁĻZåēō;æÓOMß’ćż¶ļUKPŽ×U3`Ü£³…ųĶAv»ŗ>Ē[~Ļׅ"~m(ķøź{»-Ƅ‡!ėNwVĘ÷bõ\9?k!°!püÅ«Č3.āw7××Ć”žßĆÕEĖNõš+uxߗ^ķŽ2tp Äļ%Ų›Ōē|‹ųµ‚WŅŅĘ'|åIāRG‡MĮXč=×Ā7„¾Ō O6N!PĪæ>Å\Y!€ų]“^<‰ßē×ך|<†ē…÷—;œó•ßń–ē—°×ńu.õ×(Ē«ž¹bį0]mąŹ/[!@@œlŠĄ …ŗ®/Žų Ą&» ¾=ĖCųv±ģ·œžb_ĖŽųݽž/ńū½ļļĒN÷ ļī=§ƒ¢‡s¼āė~īčõ=ßńŖo[ßł]cW”׌`”_N`t¼æ—ÆC­ųœöčµWymŚõ…š­u„±k/ßY‰åļ½nsv}ņ‚@ škX…r6 ~w³\Šßwwońų' aē›~R^I˜ŗ©4:`ˆßnÖ³äDBŽ»ˆ_ū€+]?&|Öu%^Ņvś‚Ą™d§†ćū.Ā÷Ģ%b¬ą/õĀ.æ»įÄÆīW į×”uq©8‡3$¼ņxµ^{tĀæ­/cqūCN¼_jńŖÆó¹ź[|™č°z§ÄĒ÷Ü̟1’~+˜.&@ąMń¢‹āw÷2&ˆßŻc¤uŠėį<ź»Ę½ņJŪ-÷«5:bˆßū-üŹŒÅŁ¶”ˆ_-|ķU^›v} |W€SÜ{6ÖŅć{ķĘ«½®o^؍žbm+²ĻÄļ>~Ć0 ~w#;˜8ŚĶĻt¹§MófVfåZéeÜKˆßV–ė;}Nüć/B?k¤ėĒ„/·;Ÿ²| r½ēSāö3;„®sĀ”Łš³‘UŻń[õņä×üįŖõłÄl®üęķjū ~ż\n‰§Š_©ÆC®śŽy7õ7w½·SāZų¦Ō·uś#Ȍz!мŻĖBšā·ČŗéåpNOĢžÆš"~kŲ]ķŪ€ųm KĪĄ:Ž.*|cW}}żŗ<^h…@h‡ņµčuńP½P~+\°ó¾zńÆļ»‚Ė™7(~Żķ±‘ŸÜ.5 §åtÆMYQWښ¢‡sń.÷“Gņļ,¶ÆŪ æ~ÄoérĻžĘ=ĢmĻ÷\|3ėīņ°õż“61įĖķĪ4ÉęČ>O gß`ņRŪI½ęaš- õÆoI°®Iw'~˜W,Ÿżp‰„ĖŲ7 Ļ“~”)n²ˆšē÷šƙ]Ź›Y6²¦ŒēńŪō–2^o_¾¾ś.ŪK­ ż\I “æCłŚ Չå_9Wʆ@üÅZõ×mFüž|ɕĒõšł]Z&ÕæĪĀr‡såźz8V­,Ēku(*T@`tŌæ¬Äµ&Ĝq-|å)ϱś®,&|]9/ŌN`mūŹĒ÷Ӎ?cĹØ}G`Ÿ%€æh‰“nFüΘļ!Ģ곦ĒJĪń z—~ÓēŸ[³$ÆÜ±©>ŃYCüž¾²}޼䭉ßŲ-Ļ҇+›:ę@ąƒ€ŽÆ)ńń}ōż5(¾ßū”Œ. ą/öµ¬ ŠßZąwų~Ę®J’]Łr‡óõ}[õ7ą?ą OƒN¶9¼.Į±(莡øæŻ­kĪ„bν¾.žzå7ÖgŽmŌ…Ą™bū6Tf?SCõBłgĪ± P’€Ūū¼ś!Ščj^/4õ˜¾güł=ŗĪšśīź‘·dŸy8õ‡Ūbž %ĪäÕ–nM7Äo·ė»61żž%ńńéĪłćųtZ-~¾k4)o€œŌp|’ä6ē֗ū7Ą_Ü®ŅfMŠßéūæéWe? ńQ{ŌS‹e~gNߦŲŃJx&ÆV˜ōlēč¼!~{^āčÜ|ļY"~µš-qÕ7j…ø˜€ļ,„ņĘ÷MuXØ^(’ā©2<ŠĄ_,‚±šNæÓ-¹)bóŹ“=I_=ųg˜ŹNĻÕuõÄ劝äoe{°Yé5/ĖkėŒiwŃ‰Cüž…»Ŗqä}I‡"|]ā·Ŗå˜ č3°ß3¾®]·B±••J³³Yń›$6Óģ¬Õć•ß—š5GaśĄkšRžW÷ļk7–›|õRóx3K%ÕG=Äoėøeņ~ µųŻ{Ėóūh3„öæ/_ __łZŽóa œEń,ŅēŒÓ ųõ ³spłG‘ļüFuYSßł c]˜RßÖÉ0‡Ŗh’€;¼ś!Ąjö³–Ć™‡Ó~ų•LŸµ$gņ:kNŒ&ąÖ[®Üņį†A‰}OŅOwve)Āי-żČž‘“/˜CB ‰€Ž§6>¾'Ŗ»»lłZ:É*A ų‹,¢šB£āwķÖgn{VkœÕß¹ńŲ®}(–(O4|s5ŽĢ6£k²!ā·ÉeŪd“ļżg‹ųÕż ~7-.$ ÷ƍ#|/\†nŽžbsK5øIń«æū9Ž&+O¾Ÿü$y‡…gNq]h?TH±\gņ:Ā~śĢ#0:{Üöœ­Įھ÷+|]}å××ĘęÉ{žĶo&߀€Ż§:=¾ņżŽģ¦XŠžb)’uōÓ ų®J¾æ’é~BH=ˆiĆ*]ęs¬8ópŠ#hCż{D¼$É3y•“›¾¶>Äļ6x µJyßŃĀ7ō„g7eéKŽē$-aCX0õFdśĀń}p£š½B¦ üÅŽę͊ß÷Õ]ūŪÆ6Żü„O ÜįŌ·@«ń[łP§Š†R~dØ,Ū-ĒkÓš4:™Ąčō!~O¦~īp©ļ7)āW÷%ļo:ĻÅyA FvŸźōų>øįĮV5Ī› püųHŸ3Nƒāwś ž÷•ßĮ5%ŠĪAXĻ(»ēČī1>8K> ?Ā×Uuża*qqu(eG†[W`7Æ­Óīć^Fü^Āž¬ASŽgR„ƳW÷%ļi:ļ¬91rč=jćņynó×Ņ9ćS=Ą_ģkUæĆ0żĪüP+÷`ĆÜö\bƒš?(xŗŒ}XŠ£hĆX›e3£Y¼™EńtWˆųķnIJ}Ńā7ŌĘu,eņ>&i ƒ“€@dośB¾.ō•‡ņ*˜&@ąrų‹—/AQšæŽĄųŠ«÷½ĻÓ÷€§7÷Y%Õ@gåē>ń+¢ā4ŚPŹõ2Mū!~%[×'Ž7q?på·ĖEN}/I¾¶/y’Ņł]BdRMŠūÓĘõg”- „›†ń(L œ]Ų0ŗŪD Yń»i¶7*y8ŻŽ[šp }`†ņŁtaØN©|¹Ž¬’¼¼YŃDüVµ&„Œń½wl}³³I÷'ļ]’WŹfś@)²7CįųŽ—qÅ·”]ō^ą/ö²’Ó<æ­g¹Ć©Æ¤Ū+§ēżĪočƒ<%?eYĖńJ:W@ÄļÕĖP||ßū_źr¹ņ«óbq+|]]^؍@tg>ٹ¶¹aj €æXĆ*”³”Yń«ėwÄį~ņč1÷·¢vz:ópĘ>l·”‰“YņŹšŚŹÉk—® IDATĶŹ'ąÖ[ļ³āńG³o§ĒĆ?pßūˆ_]–"|™ŗ/~ąTčŁ|{TņĘ÷¼Dń›=0 p#ų‹}-v“ŽŚ‡š}ÆÉėŠeģ~ŻwŻž"gNłp-†„ČŽžc«|&Ƙ”C`t¹ņ{ģ“FÉyŠā7å·}}}Ÿ4-†@ß•¼ńżį›Ä‘JX#€æøFØ­ņÅļ$pƒśv¼|χ^•=œÓOJéŠĒėgŽÜ—ŲŅį"8t$Ėņ B~-ƽŒų­e9vŪ‘óŽ£…ÆkēæĪ µ>wM(D`mÆŹg÷Z½BęŠ ŗ&€æŲ×ņ6(~ķļü.Äž Ҳ“ļT¹Ćł¾ę/ śŠūŚj‰rŸŽŚÆoåĖńņõN^mFgń[Ū²l¶'ē½@‹_Ÿšu}¹×ZŸ›„! ˆķÕń½nåW…Ķ”;tM±ÆåmPüŹļüzžFüū=žö§įžėŸ’\ą“žē~Åö²¼¤WĀZ ŒŽ!ā·ÖåI¶ĖwŽ}Ow–zNüJń›Œ™Š} Ē÷8nw®på0©uų‹­ÆąŅ~Äļ’džŌ$jŸź’ó(†Ķ÷e]Ēć÷‘ÕC£6 mRćį }Hęē‰ßĻö³øM)s kä]` wCÄļ.†W7ķųM¾n^ŗo{å÷źy3>ģÕūÕÅĒ÷7„/‡Ą_<ėe"~w£·Wt#ä:ųIŌµNūamÓćmĻ’ž_ 'T×Y»-Z×M‰×Īk÷Ö¤ƒŃ9Dü.˜“–šėšuuSÅÆī× _WĘ WŠ{ŌĘĒ÷6¾ē{õ1~ĒšūZÜFÄÆ¾ŗ:Åõ›żg\}÷ö„õ²WzĒ+¼Ķ_łõÜöģ™Ó¼öƒ[§’ö§?¼’‚m×õś[Pė>Rć¼™mY½vیū ńŪģ¦žk©—"|]]÷’6.Dü6»Eŗ5\ļOן“¶L§»…ĆÄ püÅ Ÿ8D#ā÷D"›†A.¢Ūsõw¼źkæ¼i°`£r‡ó%|Ųµ"?hHFžp.÷9ƾ¾/—ĻėF'ńŪäbūĪļZžæ”ŗ†.ó½w4 £»! ÷§ļi\ńķf­™H½Źł×õĪńN–5"~õ­Å.~¬ˆÜ¼F«~šēõ”4~@!¹yŒHĆr‡3Ą8šSG“’‹ģ‡śž“8°.ŒõćxIy²”'T÷‹Ž;ÄO˜FuCŒ\æÕ­KŠArNSĆįėśr/ݧ¼wč¼ū؁£č½hćņYaóuś(»čw"ąĪÆ~4²š“ų*fż¬ÉꙔ<œ¾[·øņ뛬žą^ÄÕӞłžŸFŲŃØūńŁ"Īő”÷ȱ¤ļ³Ē<{<7Oū’¹Ś1Iļ' Ļhjœś='.-x>AÄ Š ßY=ŻŁWߗ'ó×eņ^”ó¤!Ī$ ÷ #|Ļ\ ʂĄD g’śŽkÜ ų½ß2黵xėtĘ[«WųŻmļÖHI‹S+aJW‡ pŠ9Ķæ’óƒūgū’YI¾ļ½AźBąL²'C”ų”r—Ļ (Kń[–ēÕ½!~Æ^‚ć—=œžŸ:z~æķµ]Ā72†żĪqģĆߖ‰s+”-„­¤!ć „ĪcŽšu}ÄÄÆĆ÷¾pü,Ÿō¾“ńįėŚš‚Ź(ė_—µŽņ 4"~ķÆOT~_1<īÖā|Äēµ(w8#?u§ūfšp;{ąiÓÖAˆ„ÅÉua¬ž.Ū7/ZC9ōŁŪ _gō-ļ ’–0ĒfźB Ł{¾P„Æ }å’WĀś€–Źł×Ė~I]C ń{ œ¼Q=WJßb܉õćy¹Ć¢ń™Ē)T;0¦®¾2¾|ųƅāģ"€5\ā؃ĄŚłŻ[.³”~äż@Ņ.䁳 čżgćß³Wƒń °$PĪæ^öKźˆßÜåIŌÓO1:|żp“N ijv¹Ćyŕßa°·5ŪyÆŻķź[§!”‡l)“†ĄuBēµT¾ĢLś“÷IK(õ!pŁw”PÄoØÜåó‚Ž#PĪæ>ĪFzN'ŠØų½ž*ėŒx²%EŌŽ"ł°Ū†‡”ģįō0>Švį)H{¦0vżÄ]r|u; !²ģYĖI;KRź‹Åku„!Ī Ūņ«ćŹxAĒ(ė_g'=§hRüŽWOø8 įō}䯟„ŚćÕßćnępĪk°ę(ģ)ŸG!” °÷<¦¶w¶®Õ-1ś€@*Ų~įėĀX½Ō±Øl#€½[­­æ“ŲL½ x<ųÆü&|’öx°»Gˆ9 {ĖvG€Ą›@č<¦<ŻŁujÆóe0ē‹K=BMĄ·’$į{4}ś‡@:Äo:«j6+~“®“ž“r«nŌ¦¦¾ó;}’6:Ÿ“ŲīF‰#½¶Ńˆ ×5ńėųٳś”gŪĪ„yAą,¾ż'yß³Vq Fń›Ę©•Z Šß×wQ«SfÓiż”µˆŸš}Łr‡36—ćnŪ>źŠˆCqDx”Ķō ;ˆÉ5įėŚŗ—ķĆ'~…„­kÓRG°{O§ÅwŠy¾ųŃ6Ņ? 0(ē_C“ ŠßĮ]–īTž½xE9œįš9”¼Üa…G„æŌ|ׯ­ė¾®Ž¼¤~čŒK=BI@ö”/DųIž¾!°žõ6nµ¶jPüĘ®JŗßÓ=ē7uk\Pg|U|ކ/OcśŹ}yń‘)…,ß9ŹÉsżŁś>ń«Ē•śrĘ%ķB^8ƒ€Žs6.Āׅ¶L§Ļ°“1 ™žõĢ¢‡Xƒā·ģĒĢa’į4?oŌį„uķ@Äāā#€Ł«ōzo±³—R&ōt]ŸšuåņŅuå|ė<©G£čżfćߣØÓ/öŲļ_ﷁŹ@ü–cyyO{ēųRoĮ›žėĖ'ži€u:BiqĄ™€©ŠyKĶ—®mżTń+ēŚ¶—~ !p»ßlZÄÆĶ·é#l£O@ N`ÆļŅ³ 4(~Sn{žn~ėø³©^4޾Ćéø>‡ļ_e¼ūnõ źR#žµE(-Ž2ų“„a Ī „ĪZj¾ąŃõׄÆk#õåLKŚ…¼ p4½ßlį{4}ś‡Ą>ūüė}cÓŗ<ÅļüĄ«ßś]<kÉuŹ3¬¦Ē}‡ó^ā×-šu@Biq–% ÕÓłÕl @Eō±ńŌ§;»éŲ¶kāW×÷ćŠaJ‡ōž³q¾.“e:Ż!¦fģóÆ›™ęm mPüĘźhüĶŻ×ÕJæĆŠī;œ÷ænOhē"‡YĀX])»ĆžcŽH% ēĀŠšu”Æ\ņd,I»pMųŗ6R?t~„_B”& {Ļ"|KÓ¦?C`Ÿ}ŒMōŗ@ƒāwŗ¢¼„y¼śūś-ZßĪØ™–ūē=ÅÆ[\ŸSŹēŁ…”::æ™Ķƒ”8€>¾øˆ__™ä‰y’^ }õåüź¶RGŠ{ĶĘEüŚ|›>Ā.ś„Ņ ģóÆÓĒ”ę9æń1髽cüńĀēš¼t”}‡sś£‚|‡Ć>yZg#–|évgš†ÄĪSJ™žjJ}WG^ŗ¾œ]'õ!Pš€Žg6.Ÿ±6ߦKŪD€@>}žužx“8–@ƒā7õ;æńŪ£ÅzMļĪ}Ü­ÓK‹"€÷YEk“M vŽRĖ4”6¾śrfm{]—8J°ūL§EųŗPēŪx)[čŲG’zæŚZ·)~Gо+•óUÉńg{ntÕ×!įpī?^Öłˆ„CĪ“ÆĶ~Ėčķš…Ü<=ėŌ¶ŅF×÷W©G’ō¾³q„oIŅōsą_ŸĆł¬Qæg!jgg™µ²ĪJ©tėčm(qnģLSśŌmt}ÄÆ&CüHzߣøˆ_›oÓGŚG߀@üė<^µ×FüÖ¾Böq83`­TµŽH©ōʰC ”3“ņp+i«aHž„{Ÿņ¬ū&RdśB„o)Źōs ą_ŸĖūčŃæG>±gYŲ>ēeo^Y é uˆ“Tńkgfūō‰_ŻF×ēŖÆ&Cü(zĻŁø_Ś2>Ź6ś…¶ĄæŽĪ®Ę–ˆßéū½Ļļ_÷“«įėńæßŖ?L–ńł»æ5B?Ź&gy²Ś))/o%=B ±sr¤šu漗Ųį¾¶®nG[ Ȟ³”öOl™No—v€Ą±šÆå{vļˆß³±“9‡ó˜uÓĪI©ų1–Ņ+®%Pā|ųfŚÆnk£ė‡@ ±ż&ā7VĒ•ń‚ź$€]ēŗlµ ń»•\…ķ8œĒ-ʚÓbĖCW›t½ć¬„g\C@ļļ­qkyj?ŗŻZ]—8öˆķ7„ļ^ŗ“‡ĄõšÆÆ_ƒ’4*~õmŠĆšūżœoƒ~~īęčó^K[Žćž|Ķ6·i+6‡óM’HĢĮ±eˆßC–€N+&`ĻĄ–“oz©żč¶kmt]āŲC ¶×Dųŗ0VoĻų“…Ž'€}<ć3GhRüŽæį+"÷÷{x:qłõ3rĖ^ńs@~ŠßQŒ‹}o#¦zGšĘį|Ć>,s`lų°e ćŹŲ½æ%ķ›Rj?ŗķZ]—8öķ7„ļ^²“‡@=šÆėY‹–4(~ˆ|]a}‹Ź1}üUÖ¾æĘ¾¹āt…śC« ;£Ī›‡œ›/āׅ¶L§‡„Ŗ% ÷³Žo}Ą•LT÷‹K}Ęź¹2^(E ¶×DüĘź°K­ż@ąXų×Ēņ=»÷ęÅļx„÷1‹Żéč9}~µµnTüֆQģŃBXDłą.UO~~N*]*9œÅ‘&učsnlžˆ_Ś2NJ؀€Ž·[ā”)ųśJ¾®?i+ēMŅ.äōž²ńįĖ^,± ōs ą_ŸĖūčŃæG>±牰ĶPÖ ņ„Å!Gx$›#ąŪß¹y”IūśIæŗœ5|¤ŠūÉĘEųŗŠ–étźXԃź!€]ĻZ”°ń[‚b%}p8Æ]ķą„ā>§ÜÖ½vŒ8»_·¤C#„śņ‰_Ū‡nė;g¶>iäŠ{LǾ¹$©¶ą_·µ^kÖ"~×,?śIŌĪ‚‹µ±+ķ…ā>ĒÜÖŻ8<Ķ p(»Ouzļ®œįŗ?‰ū„Æ+³/©ļ;_¶.i䐿å EüśŹ$/w<źCõĄæ®g-JX‚ų-Ańą>äƒ5%<ŲŗO ĪN(ē܅”:.Ÿj#ŪÆ{ÅoØļń«ŪŹłŅyµqжč½dćņ¹lómŗ­c-   ~5öćˆßöך=ēÅåėųŲ“ĻA·u\šj!ąŪŸ’·Wųŗ9J_:L¾¶­ļlÕĀ;Ś$ ÷¤Ž‹šu”Ī·ń6gÕ€€Ąæ}„Šß×Ļٟz§æ†Ÿ>Ö'kĪ,\‡W¶ŠÖōį†2VlŻ»ŗ]l]/%®ūŅõ¾š ńōž²qæ6_§KŲ@€ĄµšÆÆå_zō&ÅļĻ×cx%ßwvl]ŅH! {ʦ_׎ ŠÄogėŁŽt¦+­ņ!䏻ŗźć%ßłŠq¾óėCw»<ŸC„ó|N¼.DZŗŻ–¹dĀvĻétŠšM٧ŗO‰ĒÄÆ!mB”­O)BūIū”:.Ÿ ŠÄo_kŚę•ß*×`E”ų]_ĮĮįõ†1§É•‰ųua¬n½3IJÄö^JŁ_1įėźŪ—Ɲg듆Ą½l\ÄÆĶ×鵞)‡Ś$€Żęŗ…¬Fü†Č4˜ĻįlcŃ“³ä‹#€ŪXĒ^­ōķɜ¼.9ż¹ŗ¾W¬_}ņ #ŪOß9Ź Š?üė¾Öø]ńūū=<ī÷~õæēš}Ō£”Xwg‹ō21ęh¹2Ą±zķĢK["Ūske)ó\ėĆWnūõÕŃy¶>iÄč½cćŚĒ°e:ėŸ2@ mų×mƟµ¾MńųžlŅwo-ŽŅζS;N¾8ø­õģĮZß>ĢÉ[cÓ—Ōõõ)e”Š×†<„„öĀ7DŒ|܋žu_ėŻ ų?]łčßŅ­ył9œ5Ǝ߶ÓåņEüŗ0VĻß3¹Č'ąŪg„på¬ńõæ–gg”ėĖŃy¶>iÄč½cć"~m¾NĒś¦ 胞uė(³hPü®ü®īĮæ„+ąj 9œ5®JÜ&ķDłāāÜ#€ć)ŻOĄ·’\^ŠųM=Ō,ßׯ®/ēCēłŚ½olz/Łøū,u’l¾Nūś$蓾u_ėŚ¦ųukĄÓž?v"‡óISڱ²qqō]hĖtŗ© cl5ōʉ§NĄ×g®šucé~äLč¼T{ØwozĻŲ8Ā÷Ž{ƒŁCĄG’ŚG„ݼvÅo»Ģ³œĆyŚS:¶N˜Mūœ}[Ē„yA ‡€o„䄎ź ń›Jz% „ö£ĖįėĀX½’öŠ P?üėś×(ĒBÄo­Źėr8+_ óbWjYĀ0TĄ›@ź¾²õŽD"¶¤¾hJ@ö  ¾‡b§s4M’ŗéåū0ńū¤Ż g»k§-·NŁ–“ī8B¶ģ-×&õźń›Jz% „ö£Ėń«SŅś‚Ś!€ŻĪZ„Xڈų~Žčłżėžv5|©'1ŹÖ2ü~RfßYg? sĄRĖś”ĮLŽ"`÷RŹÓSm±}KzÆšużŲƤŚD½ūżē Åš•IŽ}É1s@’ŗÆ=Јųķ śQ³įpEöš~ÅéŚ^c9£¶@Ą·ÆÖÄoμ|ż»¼½ā× _×'/¬ķG¾. Õa­Ń„}Ąæīk}æīŹļs/ūÖbü$®üśŠ×˜#&e>! e8lm­÷ŁÖź}āākĀ7g?پSŅ”łŪ¶¾=jK>»‡tZįγq(B÷&€ųķkż»ææßĻįń@üöµMļ;ė„Ł“OŲ:÷„ĒĢcģ>YKĒś²ek}ŁrŪ^ҶžKŪ=/u !ą#ąŪC’‡šõ#°æ–HŪéfÄļĻ×ō£óņa §ļ·½0[¬ēpn”VqŌB”¾zõĻ Ļ$ąŪ#±¼Ūbż„ŹBżŪś¾½jK>ģžŃiķCč|‡" üė¾ö@3āwƾrŪó\±ŽŲļ÷š|<†ÆžĄÅį¬cɏ°Ā:d:-‚Ą…:ßʏ°‹>Ū$`÷F,;ĆX_¾²P’¾ŗ²×uYØ=łŠūDǾģ @ ‡žu­śė6(~‡ļēcØēźnŹÓ§õUėćnÉępÖą¶ZØ7_\Dx+įū“óķŸX^™X?”2_’¾ŗ²Ēu™Æ-ypō>±qæ6_§”@@ą_ ‰>ĀÅļ$6Ļø’š¼ÄćC¶‚œ+æÉ©' 3_Ü'|õā£PŚ;ߞåå°šõ{²³«złśņå…Ś“o¾½"yß{ļ f-æ[ØÕŪ¦Ańė~ź÷«Ā‡Z½®?æ÷kÄļā÷‚Č~āĄ…Ā¼ß zh•€Ż7±'<ēĪŃöķŅ[ÄÆÆŸP^®ŌļŸ@hÆø|¾.ŒÕėŸ3„r ~shÕ_·Ań›r›ńq·Æ-éō`.õSLˆß5d”gˆ9m"~]«—9$Õ;!`÷DHüęN×öėŅß\ŠŌßKĄ·užˆ_gć{m = ŠÄo_kŚ ų­¦Ÿ[zŻų­Į“Š:l:npAO0Yļ _W–ū²}Æ„CżÆµÓå”>Čæ/½?lį{ß}ĮĢ!°—āw/ĮŗŚ#~Z—č}<Ÿ<ķł(Ę7ī×:v6-ŲęŪōŽnźvķcé8±~Be¾žCu}ł¾ö䯛€oŸHž_Jž ļMŁC1ˆßöŹŚæ".śIŹźvć*Öbz2µ;4g< ‹ĆYÅ¢Ÿf„uŽl|ŚRT?Ż±tīdb}łŹ|żūź¹¼ŠöõAŽ} „öĻø‡”«w_zĢX#€½FØ­ņ6ÅoąéŹr»ńB³ĘeępÖø*ĒŚsęBĀĮ¶9ÖBzƁ€]óX:ĒŽX?”2Ū؞ĖķaŪéūˆķŸq½Äo¬Ž}é1s@ …žu „vź4(~ćæó; `ūÄåJÖcēĒ=Œ‹ĆYÉBŸlFĢ©K-;Łd†;‘@źpõr^9żJ]_’RfC„Əyš€Ż36ķ>Ż?›ÆÓŗ?ā€|šÆ}TŚĶkPü®üĪo•?ƒ“oƒČxJøo$Z·H@;r{ā-Ī›× äģ‰õŽę¾~sŸšģėĆ剚u”­3[@ģīģŽŠiży©ómüī ™? °Nń»ĪØ„ ŠßvÆü½18œG®·ėŠmM×;C,ŪBĄīƒROx¶żJ:&~­żŅĘŠųõ•Ł~Hߓ€ooHĀ÷ž{‚YCą(ų×G‘½¦ßÅļ0 |ē×»[8œ^,·ÉĒoox`7˜ØŻ GŠßįėŠ[Ū$š½ĮĘÜ9EŁ+”PÄoØÜåó‚ J’:•TõŚæŽmuO{žŸģ,¼ĖšøļśŹVćp ‰ū†1gϕń…n{_‚}Ķ\Æéøžłćųū¾6?gÖ¶­¤sÄÆ“±”ģOŚ2—ęß¾Šyņ¹«ólŠ€rą_ēŠŖæn»ā·"¶ņ”éē÷oŲŖĄÕźpƒüg>³[XGO§×ąŌķ‘ĖŻę$k™ę°ńõWBųŗ~eśĘpy¼īM “/$_„Æ %ĻŽ›"³‡r ą_ē«»>āw÷śÄ惬»?śIŌNMū¾qŸ³§óÖ†Ō½/Į>f.ėøęĢ6ŌW ń»¶/sģ¤nŸBūĻåŹĄ— IDAT#|ū\sfą_×° ålhXüNO}ÖxK~āhåéÓz­~5‡Sþw<ę$ŽŽā_’1^e[«woŠķĪ~m]uyĪ,u;‰—¾NKæ6̱“ŗż°ūA§µ óm¼?*Ģ8ƒžõ”Ļ£Mń;ŠČē`ļ2žn?žĢ?'W~åKļ[ XĒO§å*[LlHż­ćÓī:²vkaŽ…”¾RÅoØ½Ė—żŖ“c'uū#Ś’/āWҾ°?*Ģ8‹ā÷,ŅēŒÓ ųÄę׏ŠĻ×c8ū °|ē7dÓh)ßłõ/¹‡š9’'‚|čœŽ¹¬oJ˜j\J_¶ŽķŪ–KZö”¤}”ķ‹ō}ųöƒĪK¾®>/@[ ~·’«³]ƒāׯf¹ŗ{š­Åįeō܆żxĢßC:į–lgxuīZ¢D_\„Ēš¾+æēm×9ōóF9s³}®„mß±ś²Cul_¤ļC “'$_„Æ %Ļއ3…Ž €}ÕėślPüĘÆüżP©ė–j}dē:£;Öš9ƒ:OÄøżŻ”×Uā{ÅÆō“j’9ķl]Żńū°ūA§¾÷ŪĢWĄæ¾Šü1ć6(~‡axŻBlo3NŗżųŽUōŹį¬bŖ4B;¾ø`_™Ī«rrõ& ×ŹÅ÷ _×±ķs-ż6fC[Ū·ī‹ų½Ų½ Óß{ķf « ą__½eĒoPü®Ü^¬o5ć_CąėĮeIVЇ³‚EØŲķ<śāąŠ/Į4ߚ†ņŗ«„ŚĒņuß±zkeŗā÷"°¶7Dü®Õ»5f E’ś(²×ōŪ ų½T £r8[X„kmŒ9‹"~×nv}šŖ@lmuYŽåŗ‹ĒžģģŹõ˶ĶIė~ˆß‹ĄŚ>AųŽk?0[Ō@’ŗ†U(gā·ĖĖ{āp^¾Ms.EĒźHY“½‰‘²&)a*__©ā××Öå„ģÆTūØ×Š¾‘|¾.”<_Ųf\I’śJśåĒnWüž~Ļ[œ#O.Ļ®ŗ9œÕ-IµłĘܼj'wCĆR×.ÆæTįėĘšµwyˆßŌøg½Š¾÷Žś¼Õ»'9f I’śHŗē÷ݦų üf.¼js9ĻßöŒssŹ y=#Ö+§OWWæBm¾šqK “o\¾s<å_¬žķ“4 æ%(ÖÓGƒjiś©£ē÷Ɨ"?uäÅB&>ĜȜ²ŽÉ8•€]«+Ÿšlm‘“_Jž O…Ę`U°{Į¦S„ÆkĆ €ĄæGP½®ĻÅļō“gū3Go„ćUįū<įł=ļa’2®ÓÄ!°FĄ:™[ÓkćP~»f{ÅÆķo-­gŖ+ā7TīņyŻ“@lOø2„ļ=÷³†@Mæ5­Ę~[æ\ł -;‡3D†ü5ēst@’śńūš±ŗ±1(;†€]½Ā×Yiū\KĖĢBõ¾BˆŠķÉįėBÉó…¶_Ņ€JĄæ.Ióś¾æĆ0š_ļĪįpz±™@ĄēPź<0ܶšóÄ*zbńT“b}ųʤ__™Ėcß!BŠ¾÷ßóõ!#ø€žõвMńė€š“ēmĮįü@BF˜#Ŗ…L¬^ĘpTŻI ¶ŗ,gŻĪÅSŸšlŪIZÄÆ¤m˜cuū"`÷‚N»Ļ2ł§óm¼/"Ģؕžu­+³Ķ®vÅļ¶łvŻŠĆŁõņ>9ėXśŅkbʵįußśųņR­ńµ‰_é××Īå±W„”%Ś3’šµÄHCWĄæ¾’~ł±æńļü–GŌNĪvÖŖVKÅł …"h\ŖćņyK Ę^—åX”Ū¹x įŪ'9¶Q·vŸŁtŠšumxA8‹žõY¤Ļ§Ań»ņ“ēsøU9 ‡³ŹeiĪ(ėŒŚ4ųś%µkJ§Zźkæ&~}m$Oöˆ¤m˜jõś"`÷M‹šu”-Ó龨0@ vų×µÆPž} Š_yąÕ=Ī(¶¼ĪŹrhGÓq»²ēŚń*OĄ®ĒOx^¾nVÖIĖŽ“/,O…[ ąŪ ’‡šma±÷$€Ż×ŗ7(~§+æśƒņ3~OaĢįģėp^=qJC”ˆœ˜¾z=Žo×£“ųįėB;–K»—/_ņd_Hچ=® sZ'`÷NėĻpoćė£P€@yų×å™^Łcƒā÷J\™cæ~’iś`ßæ™ķ3«s83Q}•€u>mzMčøś¼Źųą’—?NüŚüœm[æ6ß„ŻĖ—Ÿš—cuū!°¶?DüĘźõCƒ™@­ĄænmÅāö"~ć|’K¾ęŸfx:•ė~Šéł=Ģz÷ųļ*s8“—‹Š‰bĪØ”!€aØ&Ģל”t_1įėź¹—®Ÿϱ‰ŗżXŪ#)ĀWö^?T˜  Šüė–VkŻÖ¦Äļļ÷óżŪn#~ż¬OšŒ£šUBwŸWzGūU½Ņ¶q8K„?G`Õyżė?ųY›¶ŹŚ:HyŽ)Ņ&%LŁ ±~rģ¢nbūĮ•‰šua¬n4˜ Š*üėVWĪow3āw¾JPŗ+«U`ĻŻŃ6e«°oƒ>īūČNMXš@Ģ1Ų—^«WŚ®;õ·ĘVŹs˜H›µŠõ¹V'Vžcuū!Ūß~֙™@ wų×}­p#ā×’Ū¾G_IM[źIüŽ·:/üæó=ĻcÉŃör8 @¢0˜#›ZVŲ¤Ūt—Ź×ÕK}åöé«Ļ-ļ©“ļWĻ·_$į{æżĄŒ!Š2üė–WļÓöFÄÆēźŖ›ĖĮWR?qłs¦«ŅkWtC"Łßē–\ēj“É! Īėž0g<źNRyēšŹé3TwMüęŲCŻ~„ö‹ä‹ų•“/ģ‡3Z'€Żś .ķGü.ylN}ܖ­{z=õłčļ(s85tāGš9Ŗ¹yGŁÖkæšo觍\Ō—īok|MųęŲ“j7õź'°¶Ÿ¾õÆ!BKų×K­§æ­Æ ²ŸĆ©`=ŒĄšs›Z~˜uly†ÄoĪ“uŸ±'<»>u]‰‹šu”äŁ0ĒźöAĄī›įėB[¦Ó}Š`€@/šÆ{YÉiˆßŽÖ“ĆŁŃbV>ķØ†ā"Bå•O±ó4搚uur^ŗĻ˜ųÕõtœµĶ”}ŗzųāß{ģf  ą_÷µŖˆß×3ķ»ĮŪ āpngGĖ|>Wē‰@āź`>[i”yĘāR?%Ōż!|]’¼īE@ļ)_į{ÆżĄl!Šüė¾V“)ń«?@ćńµ‡Oµµˆń¹>æ•ŲÖ̰¶u>GWē‰Öy6Ž:ƒ#ķ·¬BéB}¤äĖzņā}×MŚ7és*V·oJĢh™ā·åÕū“½ńūi89Ÿ8œŸLČ9ž@Ģ”ue"˜BõŽ·°ĶB¼l~ĪģlŪÜ4k™C»ļŗ©{Gžx«ß7)f“N’ŗõ\Śų]ņh:Åįlzłš6>ę؊`ājaŽǘ겜^u»Püļ’»÷aD²Ž”v9vP·m”=`óEųŗŠ–étŪ4°蝾u_+Œų-¶žæĆ÷sy ²žą<Žæ›ĆYl1é(“€vd}qNą4°>†¾¼“ަZ¾ö6oMų²~9Äū¬k÷L(­?’Bu\>/@µĄæ®}…ņģCüęńņ֞dõžßæŽņ1óõ[æŃ:įÖI%Ī$LT:ˆ@ĢĮuekų ³šģÖ² =å9gr¶O_zMüśŚø<^÷ Z›š½Ē~`–ø üė¾Vń»{=§+¾)¢vÉĻļ!"‘wYĆį܅ĘX'Ų¦Ąė-3—ö‰ßõžę¶OßSž¾3/bKv’¬„EüĘź-G @ ^ų×õ®ĶĖæ[Ø-Śü _Ēšõ³Čō'Ę«æĒŻžĢįōc'÷\1‡×•‰Õ;׌śF³\|Ā×ÕÉyŁ>­ųuĀń›Cō>uķŽYK#|ļ³7˜)īB’ŗÆ•Füī^O®üīFHŻXu’śQūźu#cB>¾¼Œ.?4d…Æė?$|}cė¼;ØŪ½Ö)q¾. ÕoCw'€ųķk ~ ¬§|ē7zõ—ļü M-9æ._®žŗŠWÆ„y–“ÕĒĀ——3¦moÅoģŖÆm«Ó96P·=z­Sā)Ā×õĆ €@kæ­­XÜ^ÄoœOFétū³vńæė+Fr8…a ÖfĄ”z5ĢįLBl~®Mŗ=Ā7—Žżźėż’_|ĪE®ųŗ¾xAh‘žu‹«¶ńfÓ\ ‡³¹%ėŽąē9T§{8f‚!6ß4‹&m[-~åŠļ–[ž£ƒRŲ,»_ÖŅßa° HĒ~ņ‘2öG»{ Ł7v ’ €ųż@Ņn†{SåŚ¬9бņŚęr”=1ŗ,w|ŻV _—/āW×I‰ēŚ@ż6¤¬½®£xļ‹·A`›•|īnćF+“D€sŽŅj­ŪŠZZgŌL g3Ku;C}qj^ļ°|Ž~³_{՗[Ń{ßmžłłö`,į;säswfA ½ąœ÷µ²ˆßŽÖ“ĆŁŃbv8•˜3½VÖ!ŽqJ”yūÄo.ƒPßkĀ7ō2×ƾ„öH(_‹^ÕÓł}ūœ Ÿ»ŸLȁ@o8ē}­(ā·£õäpv“˜NE;Å9ńNqxŃOų:V9ÆŪ5ńj›3>uė'ZēP¾ū|Ń’Bõt~żö[Čēī~†ōŚ pĪk_”<ūæy¼Ŗ®Ķį¬zy0n¼bO;˱Ūn{Øē½Ļ{Ø?„o.É>ė‡öG(?Wōŗ~īņās÷.+Ķ<ļL€sŽ×ź#~;ZOgG‹ŁńTB¶Ė‰ßžź_YĪvšµwyZųŗøŌę”ŪsʦnżdŻSB-z]<„«s§Ÿ»wZmęzWœó¾VńŪŃzr8;ZĢΧs¢EŒłźō€Å7ÆX^īœC}iń+u„uHųŗz¼ś kžŗĻż/µ]“ŅgĮēn:+jB UœóVWĪo7ā×Ļ„É\g“Ėv[£CĪōš kXhŽ”üÜłJ?ś§“šõ]õ•66Ģ›śu°ėŗ–Ž"z]Ÿw|ń¹{ĒUgĪw#Ą9ļkÅæ­'‡³£Å¼ĮTbøąPVń„ęŹĻ§īGÄ/Ā7—b_õõžX‹kŃėākõuy_ŌŅgĆēn:+jB UœóVWĪo7ā×Ļ„É\g“Ėvk£µólć1Ü"4;?.ń„gĒD÷éÄļæż‡’»¾ĀօŗÄ[ä‹ĶŸd=SB÷ł”’„“‘:Ÿ#ß'‡ĻŻū¬53½/Īy_kųķh=9œ-ę¦"“ {hvn:-Āׅ:?w č¶÷]õ]柌ņj›€¬jˆčݾŽ|īngGK“B€sŽŹJ„ىųMćŌD-gĖ„‘!']„ZØÜÓU•Y!ū]¾ˆ_['w"¶½¾ņ]ß^˜ęņ¹K}»ÖŅß};ƒĻŻ}üh pĪ[X„tæé¬ŖÆÉį¬~‰00@ ę ĒÄZ »Ŗ²cs •åNĄöƒšĶ%ŲG}»bi-z]wS×įgųz<†ÆŸ¹žļ÷ó½ŸßæsĮ”±É½’?ć_ƒ2sŸ5?_‹9ļėŒÖWąœ_Ež˜qæĒp½¤Wē%Ų“ ķhėø`§ćM(֕¶/'žk€ķ;$|m=›Ī—śu°ėø–ÖŽžZ]]^Ēlė³‚ĻŻ­k2‰ŠóDoĄĪßļįłxĒhļOĮ°‚ģŹ pĪ+_ Lóæ™Ąj®Īį¬yu°-•€vøu<&€Sū>«ž¶;7žk£īß=Ż9ō„g]ĻĘsǤ~=ģZ†ŅZōŗx؞/æžŁÖg Ÿ»[פaˆųŻŗ€·jĒ9ļk¹æ…ÖS߾㉾µG†˜ź¼F:~…N„d“|Ī·Ė‹‰_W^Ó+8‡æüq|ĄUØ|Ėt_>ń«Ė}ń-cҦ¾õōå¹ĻżĻWĒ—WĒ,ė¶ā>Ÿ»>±Ŗó$>…²ßf_Hʇa§Ž“Ź/śłZģÕå•a×Ēsųž–:_ĆĻ«ÆÆŸåøc;3Īl‹ŚS+āלvK{|syłfģĒó{8ėĘn5;¢…ÜēœVy7ˆß 4½9źŪf^oĀęń[6]܂€ĻOÉ«NĢĪŠ“„M®żŅĪ…>į+OxÖõt߯“’o‘iĒuāzÕA’+"~—öĻB÷-€_cæĶ§„ēą™ć›‘–ģ=ē-Ķõ¶"~wÆņļšż| ļ7ĆwS¾žk_šĶ÷Żf_„ù­ė"`ńŌō•³ˆŁXZųŗyŹxNäZń»v„ܵåÕ&Y÷Xh…T¬®-k“Ź5VļłÜµktVz)ŸÓyS|į -Ä”®ė,°é€/5 b¶į1|ćśņBՙ枱F³ø}‰źŃ¶Y`rµsü¬ANöœó6fx/+æ»×;žęöóõD#~wƦƒ›°NyJś*D)¶ÅźäŚ-}ł„ÆĖ[æ¹ćQæ²ī±ŠŠØX]]VĒ Ū²ā>N±Ļ×Ńy:.kØótܕ›ōB(K{§ņU2ÓĘUóµKĶ{·q­Ę]ˆn•æč{²ē-†Uµ)ź±÷£-øĻ9oa5öۈųŻĶ0š×JÕļ(€æ~ÄÆ‚B “cžOčŗx•ūlŻ-ĘH>ń‹šŻB“ž6²ę±P ßX=[V’ģė“š>N±OČé<—µŅy:īŹMz!*„½­gŚøb_»Ō¼wūųÕßK^Ęē+ČÆ»üŌ÷źē2½zjě!pŸsŽĢ’ģ2ń» ßŌx]ŌNo€“S»EfŸ1Ī}üh]'ė¤ėtLä9mSn|‹2FLų:6RO‡[Ę£Ķõō†āÓgĢ䤇źųņƟ]»Üēs×'ätžŽĖzź<wå&ķ¬®Ś"ß“ł(»hÉ{·‰_Ož«»P0^ģx’t’ĒŽPCņ«&pŸs^õ23ń[å$€åÖ_§ņ×AÄÆyˆš9ģ.Æń²-%?6ēX™ėŪ ß˜ų ė—²: „ÖRņµčuqÉ_ ėœm[VŻĒ)ž„\ų{“>”§ótÜ­±Mī¢[Ü~lŪXqüŚ;%Äļ‡}©ūRŪØć©ķ©W#ūœó闷 ń[žée=r8/CĻĄ'9ņW ąM.’ˆ\9Ģ2&Ā÷„MWĮ²Ž”нļė”z6悩uaĀ}>w_Ą?iYž /ō÷ =§ćnémŚeé'%Ļuļūˆ=mR…®ÆžbĢ÷_į.j,ĖWvĀüµy“½‹?¼Ŗ“Eą>ē¼­uŁj-āw+¹ Ūq8+\L*FĄ:ļ’ń{Åm¾bƒ EųŗŠ–¹ōž—k/W}ux%‡=ó”m˜€oļč¼-¢wļž [{Ļ’{}īNbNöü¶ī¤M=Āt!pm¹MæöĻKĖKįči浩ynȈųŠŸ‹?.}<ÜŹŲūxßņ<ĶgĖóƒO_³$hŒĄ½Īyc‹³Į\Äļh[›¬7xkĻS;ē>~“®Ÿ€vüu<&üŽš•ßĘEüŚ|—ŽórķµąÕqaPzĢ=öŅv;ß:ź<.Ōł±ųvkh"Ąēnˆ łč‡ē¼Ÿµt3Aü6°žŚÉY‹70L„Ą.!ēžLń²!%Ļä]’ZšJ<6w׆W[Öö‘žX«+åmhĒZœāvÖ K!°•ē|+¹:Ū!~ė\—MVq87a£QƒÄ”·aL–š¦3'½×»:”9»ŠgĖŽ1i.ßź<„ļ¹ė±6Ÿ»k„(‡@ū8ēķÆ”žāWÓh<Īįl|1?™€:‚ɝÆTŌćåÄWŗ]-ւWĒeĪ>[V;„BU|kØór…oU“ėŌ>w;]X¦E€s®`tEü[ijɇŚA™ćĒżÄ‘LĆ)$ļ@@‹1軺—‹''^b\¼’ö’Ü?—–¹ŗøµg?—€]?›ž?KҾć{®õ÷ĻŻū®=3æĪy_kų-°žņæĖ§šŽ_OŒÖ1Mr“Ī\bŌo€’Q(in³īCĒc·rõJ¼DųŗŠ'~µ=/1.}œC@Ö,"|ĻY‡-£š¹»…m ŠĪy[ėµf-āwŠjyą‡Ł=ķF‘üž b»“šœ…īk„Q$‹h¶āŌ„„ż«žĒX¶«§óBńWŲI`Ļ9ß94Ķ €ų=źU]r8Æ"Ļøµš ŒŌ<—:įæ©}ÆqŅżČųZüJžŌ[ėņ:ČzłB„ok“ՊŪ|ī.¬¢„æó«ćŖŹ°³ZØźJÆųŲĒtÄ,“}mt^(īéŸ,l p›s¾M‹Mæ-®ZĄfg Ł·$ą.O®¢śŹE\ŚPÄÆĶwi_?{óō8bÆĪ“žo¹°MZÖŹ"|¹włÜnE–«· „¼Jøź½-ńIĢj”:õ³üŽš×š³Ė®Īg›ež.×qe'Qģ p—s¾QSMæM-WÜXgœ„÷"ą.OĤ-×ā2'nū)‘–ńÅVJžōÆÕls¶²V¾P }å:ÆĶŁßĆźŪ|īŽĀ6Eüš[˜?¶§Bw–WŠ]¦ĶŲ§Ī Å?'›Üęœo¢Ó^#Äo{k“˜ĆDCĮM h”ć"*užˆĖœP·/—ńÅF-|]™‡WżbūĮ½WĖæX½śg‰…·łÜõ‰ŌQ§>ÕӞµ ķ SĒ'Ŗ_WķmĻ‹ļ/ģŃ}źxČņ!Gą6ē<K³µæĶ.ݧįĪO&ä@Ą'.“°“qŸk”Æßy2®Ųå¾ó+y®^õˆķ½.ŒÕ«–XčÜés÷ć)Ģļۜ竽uÜuŪńgŒ¤Ž§ ėˆNåŽė,v‡ļēcx¼ŗč•~<†Ļ[©§ös[ö)öøÓ9ßO«žæõÆQ²…ĪdTT¼ŸČq™Šu”Ææy2†Ų¤…ÆŒ{³„knŗ±}ąŽ£å_¬^s“¾±ĮwūÜ]~?7ķw~}»“æļ«Ēr6&‘¼ĢnƒĶ¢Ų1’śŃżčøˆm-–o¼A™zw;ēE UÜ ā·āÅÉ5Ć™KŒśw!"6%Ńé „NØÆłn =¶æ®^uˆķ÷ž,’bõźž!ÖY|īZ"¤!ŠĪy_kŠųķh=9œ-&S)NĄ ³6Ģy²³Ŗ¾ø3–Ž _WĘ«n±µŃėĀX½ŗgˆu>|īśØ¾pĪ;[Ͼ¦sļŁp8ļ½žĢ~€+x%#|]_>Į›š'¶HØÅÆŲ#įśĢØq%YC_čŽ—åŸÆ\ņ®“Ÿ±·ąsw;;ZB œóVV*ĶN®ü¦qj¢‡³‰eĀȋ ˆ Ģ E¤¤†k"X÷ćl‘ś>».FĘšzm\DÆ m™NGŗ§Ør|īV¾5ųBä IDAT@˜8ē VŌā·¢ÅŲk ‡s/AŚ÷NĄ Ÿø\ĖÓB„t\ʉߎפåłÅö‚{?–±z-ĻŪļõ“gÖw%€Ż×Ź#~;ZOgG‹ÉTŠŠDgJØŪ•ŽÆļĘćU'Š^Į+aØ^³ĀŖ\|īę£>Ś#Ą9ooĶb#~ct+ćp6¶`˜{ŸYž®Ü×®TŽŚųn^uķ¼†źÕ9+¬ŚB€ĻŻ-Ōh¶pĪŪZÆ5kæk„*ēp6“X˜zqā3÷W”¾ró}Ā׿¬?mtڶŲįėņ“ųućšŖ‹@lķŻ{Æü‹Õ«kFXS‚Ąķ?w¾†Ēć9|’¦Óüż~¾ĻĖ3§apˆŸįėń¾~^~¾ęx°Ķ¹åē|®żwķöē¼³ €ųķhA9œ-&SŁM &B\ٚšM5`m]ž*|]^uŠėē‹‹ču”Æ\ņź˜ V”&pūĻŻlń; Õ2¢WVS‹_—ņ«Ć#ę|õœī5žķĻygĖųݽ Ó›šv€āńÆ’æ½óG–föólək¶į̉ĖÉqtӻ޳gæü ŲĮēMÜzƒ»›į’ ”Õ“„ĮĄšœŖSčo«ū~Ćtņįäī”§Bö±DplŁ-ų‘¾ŗ\ü=Āl֐yXŚźc}©mżČ“¼ĒŸw7Šßń.m“ ׂW§›o`äŠ>5ėA&æĪæl®æM&ōo÷ū½'lkÜdqÖP¢Ķ·(‰„ŗ£Ų,kėņ»evJy„o™åSj÷žwå ”Æ£ėsćęŹõć»ńqēßłcĻi›W7ŽåżūŪ½ÕWō5ӟŸé«i*Ž=ńØĖ†ōƱ’žé~µ™6Åzj?ŽżžķęOn»ļī7ʘ^ćec“ą 1wƒ/r,9EĪ؃­Ÿ?}¼I?3ŽŁŗD~=½ė|żˆō8’ā·ŻAšˆĆāl6™ŗ!’P©©;*䚱u›£üĄnž€ę_“–‹Ī°-µĻHĶ·ŲsŽa¶ŽßŃõޘ„²^ ŖłGń6}ē·…S¾DŁ$씪‹vÕµÓĢF7ļÓ%eŗ^§».ŚRĀv­ćxżµŪäŸ&ŠŪz©ž”vęß,Fm#¤SŸbķL„c~9cwā;Ę£n¾ōejž¬;äWŲ³ĪWF‡Ć ~["žĢZ_¶Åā\fD‹ļ$P rYjs•ܘ’żv]ų÷źņ»)}MY8ĪŹ®}:¹o&š˜óī ¼Fm6Lj/ˆEŒõ‚m&$浑ˆ1+C^śFgcŁ>”.Ė„C³ōĪtš÷ēē§mˌ?ī°½Ż4ÆLʑGCBūŠ2‚;ńÕgą’ųS(³se½"_Oą1ė¼É­["~o=}©ó,Ī”¹gČ P.Ā7lsķŽ¢”O„Æ'~ņ»Ü¼”ŹEģźm®ż4©'xĢy7°jfµ`ÓiÕDīžöbĢ AŻPDd’Ó$޼>ŗ,—¶mŻ»ūż„j.®č–ī;ųY£‰)ń!Ž:ŽOń ķŃļŒŌ{} eÖ%ņ«Ā÷(Śe»Ž\,•Ł‹YÉ/õ+{Bķ·xĢy7w‡t&~sĖ`„'Ōfe¦O4„Ėré~Ģ(®ßæŻŸšūĀĆķÖ^’‰/+ŻW1:źxĒvK ķ_F°IܦϬ~3éS(ŖŲl#š˜u¾ Ļķz!~Oœ²ģ'§|`q6‰™ĖX"KõGč«…ÆWŹųkK ǹT."×nK}¤®­÷X»Ēœw=‘õŚ»‹o}ŽÆJvŪl2MOTĒ2¹S ō}¼ļŗĪļ&ū”{ōż§ū‘;Ąā÷ūݽķ÷[ž Nśxk§ž;æ‰0÷ę ¶Ģs‹²jYēÕDīŻń{ƒł³c„ü ĀĮEģ" ‚cėv׹…Īž?ß°Ŗ¼9(•厄„>¶ī€00y#Oŗ(ī?ĄWwv‘:‰_ēMČńk¼įqfég„ąL¼õõė$v‡_Óßŗ<äŻG©ūžSß°3‰Mń!:Õæøn“éķtĘהɌĆ,Fk˱“ųrgZ|FüZ–ŸÉ?i†š¹£"~Ļå}čh,ĪCńbü¬šüŅw{„ŻQ!ˆ}»ńkĖ%”?O²+,×lƱŅū_c#“åO;ļöĀO֏ü®•rW5Ycś-Čs!8³ł7¼¤Y 氟‰€ķĒ–ßŗßł•¾ÆéĪ£8U~ ¢;ɃØēÜWŁćSŸƒOŹöš6碐!…7Q K-ö§xGWBŃģƒ‚eā8ŪMž¶Ī7AŗQ'Äļ&kÉUē!źļL 'LDų~āĪgĪÆ„ņ;ĻÅ|_ākėõE¦NŪvµł+0Ą‡Ļą¼ūł9ĄM€u~4įsķ#~›ńžĆŃVSZ*ŲlŠÄ‹3ĮAę‹”‰ˆß\›#1äĘ\*?ҧo·½ÄÖÖOĒąéK(³ķjóßĪ—ųÖą¼»Ž­!pG¬ó;ĪZŽgÄožMu<ž’>>cŗßļ(¶1]ÖfYœk‰ŃžjE‰×īČų¼ńjŹŽōé[m×pÕm±Šū×mÖ¦æ•-qķ#Ąyw?zCąXēw˜„zæõ¬2-ū;¾5¢6ŠäāĖ2CT³8+AŃģ֊ŻžčõXµé£}śFūµl„'xC™ŌÆŻ~#SbjK€ón[žXƒĄ °ĪÆ8+Ū}Büng7ōĢæaf:Žż=īńgēŒ87%°V¤čöG‡¬Ē’4/¶jG]˜ÖnĆqĻūÆķoŪµ‹KO Ąy÷ ³LŒO'Ą:’®=ń»{>¹ó»!  X1ņņ½ŽŅK­B»£’<ßDų†­W“OßbßcW*óo(+õ)Õ} Gā8—Åēņf4|‚ėüŌńŪ€­|ē7y%½µĖw~-ņ˜ȉ“ń;3Öø ē›ˆ_ƾ± _gĪcV*Cš~Ż.pū€ø(¾ż °ĪŻŖā·Łt„æA7»H;𻾋SH°½#’č)՝kiü\Ż~ŻuŒ³\łģx:<źœk_*æ+3ü¾&'wūߎõæf0ż¦ķ5ēéXƖ¾žęÕĻ!¤ęŻ1ĒÅa|üóÓočēČ6Ėūū¤u¾mīÕ ń{Æł*zĖā,ā”ņĀJb„TwFH„ńsugųu·1r¬rå-oƒ?AąIēŻ(~Żņ‡’Żŗ#Ø_ͦŽ‹ī‰š}wæūĘņįē>DŠ1čōb0hp¼æOZē˜ŠĆ]@üŽų¼Xœē±f¤vrāg©¼yKK>Ųś¼„gÖX>Kłœą åK}½śgR'ź3 <鼛æ]×ÅÆvMbīĢ9ųüX+Å×ßßīżzĶī¬öųø—¢–9étŗÜėµĒūū¤u~9=Ö Äļ±|OµĪā<7ƒ5 ą –„²ĆV™XņĆÖW}H#Ėf)Ž]Ž’R?Æž!ˆ ó"öžw坹p¤>·]ź—«ßR^掠ļfėŪ{¬×>J>jDĶ»ūżżŽƒ0ĘŅnj“Ÿˆńéø’Ž-b©ßŠ™ ÉĘ3Ż„ģżö¢vnOĘ3mvüaø3¬Ž“½ŗ×Š»ķˬ_Ż‰a»}™7ł@$0²0/“Č·\Ó¹ź!e}·ū‚āQ¶›÷ĒNKČļ]ēžMŹ>Gńū9öĶGfq6GŠĮ Xįr…—ZIøÖ·—‹OÆNś=yėq)•ɢݖś”źžĢžŲ?G`ĻyWŽ)aėżéś\z©ŸW浬(~GįÖ[ļ…‡Ÿ®ė‘¢…M“§ŌܬO׋ĘD€9"»æė¬ī¤Ī„ā`gkČ1–¹ŗ¹'Ē–äüØ)?Ö3¬Æ!P3_{Ū<鼛æókfE ½ö%-ØäUÄōO÷g&zrEŽ–,}•`žŁß“-öź„lŲF›Ž˜›-Łóź‡Ī«äƃ±jø3ʏw܃Ž8rō#ŸXĒŗąÆ3DoŪāUāwšCŁ?d+į­ó]Äļ’Żu1ŸųūĢę÷Ģ壿ś¢iūšPö^DŚžg‚±ckįkėĪōė cŁųKy¹h±ŪRŸ\ŻŁ±ēüŲS~v Ÿo«»ö}ŅEń:ń[‹™;“3įZ'Pś;ŅĆx3ĆŖHŹ=»^™××[e^_]¦ÓSužš ­bżū·ūóūīäåZ!Ö÷ļŸī÷­’dŅO%±MfeŚGöM&„Qänæģ'‰čNi+H_›Dü~ŃŌ²8æh2oŹÖ‹Ułno©’™į[?¾óĒ»-#·BWēu»šō'ē½Ęæ½mΌļč±ö²ų–žO:ļV‹ßäū£™=ŃL±L‹ŗZ¢ŪÕēWī,öjÖē¾.'RåQģ¼½¹m¾i3nØ?ŻO¼ż›¹zAüīŽŽć¦ū˜]ĶzīļhĖKĢ„tߨQÄ­gÓ+3¬ö=ōư[ÕfņåIė|Šś{Sˆß/š[ēMęÅBiu”*Ā7l=›g‡ķł ā×֝ķŪ‘ćŁŲÖ굥µéµ¶ŽŒÓŚ^ė›×ž?žõ_:ūļµ[*³¾]9æĖSźķ¼‡ü“Ī»õā×y‹o|5<Šš»CŪ –Ąs™Žˆ‰bHÕ°jlYĢĻE“Ü=õő3–Z”© Ć£×Q½z}u™NOw½§8Õ@I²ļ72 uClɰ“>Nf&NÅnŽu_æģß4V÷]Żż?e¾3bsĪux3µō[į»öwŃnƟ)¢4õ¤užFž9ÄļĶ+‹ó‹&óÔ콘‘kķäŹC»³’¬o„üپķÆĒŽŗpŒńž×ŲŻŪŚ¾küŖmė [VkK·[ŪQķµOWL[Ö6_ņٶõņkū?é¼»Fü†ż³ ś˜”ÄѬ¾“½ˆqkD£ģō£°ŪŅ^(‘čžĘ°gW—ia;ٜÅ3޶Õ}„½.Ói‹Ä¶i,3Q9ˆA-ōR!ƟūŸś¾ĢŗW~GŲŚ³ł>ę±OdžÓż‰ó$sž2JŃŪšXwrn‘>}«eßE0‹æ^æĄVŪĶū£}“ō“Ö¹ÄüĶ[ÄļĶ.‹ó‹&óÄPJ{[źDąęīīz6O ×½ćģł$egś¶4–ųtę6½(™.ŅÖų°WĖś5~IŪµ"Hś…ķž¾ŚŽM·d’³eĒ<#ļńŅe%t»\śČžžmĪ»¹½‹ņĒHÄļ½i°Īļ=Ö{ÄÆ%rć<‹óʓw¢ėŽÜŚ2økūŁög…mǭ͟å_§Ö§3Śåo(Æ’,vµžčv9į¤ĖuūO§[°lƒfä„KćxķmY©’ė8ļ¶Ų;±qk³G“C4żÕōõ}£dßwī<Ļæ•›–±8o:qŗ½õb±$n„nĶ]ĻƎ¦½1kĖŽņ­vü#Ū…ćÄŚ’ZŽāfķÖś“kgWČēŚ¶.o1¶åaó­}Öö<’u™nū„4ē]»÷‘$Ł£čś;×÷'Ā:æ’źæšĘĶÓ,Ī›Oą÷øøl%nK¾mµŗKi\]wŌ‹­ōg§×ŠŚRūß«'egĆ_īŅF Å\ś.±ąēæ?ź…W;—1Ż!p[\_ßvź\Ēæ.–{²8ļ9o9Ə¼°{äžķ\¬{˽±Je"|ĆV·«õC÷93]«kźöų\ĖØŌnĻų^_ODzķ֖•b°ukmKū£|ūvėgĖlŸ»ęķ‘ē¼{UlBąZXēך½Ž ~÷¼Pē…&cĮ•£/&KāVźĀö(?ĀoRŻŚwėTkūKöֈ×RŪ„q¶Ō[66æÅęž>V¬yłµömL-ņk}8£½ĒŹ–mõ£³»Łą¼{·Ć_¬'Ą:_ĻģŹ=æĶfgxõ{öūtśėĶM ±8Ėl½p\ÓO¬×GźŽ·2ī' ĖŲŽVßѕ“×.”żóŸ’’smö”—źž:ķ÷Qé=qŸŃ׊µ_3ī'ö[sŸ^[/v[ęõkU&q°ķ pŽeO€Ą÷`×#~̧üYń­vĆˊmvśĀāÜ pE÷½’"P=;R—ÆKõžĶ½e+Ь!ž_ŽX¶ˆ>ø„­“Ķm­ķ\~`­ķ›ó±Ey.®O”[±ęå[łåķOW+[«ĒŹ–­±ēµ½Ÿ+ūó¤ónś;®ęÅyćoŽ^y¶Žņmé7c½śł “#ƏŠü)vŸ“Ο0§ˆßݳÜĄjZQ$æ»æ»Ēō °8}.[K½‹ĀŚ2Ø^{©»’ø?kYIūµŪ5"ī’ųG—ū;žč•ŗ„m­ ŻÓnÉ]oŒ®óҶ½Ķ/Ķmoó„ž¶­—?²Īvķž{‡v¹[•߁Į||Ņy7Š_÷¦w/·ī³ø×GOܖlŠš}wæĆ”ÜDy=śC„³ĻÖ=i–ō9£#~ws^qŠ;ųæYœė'sĻ…¤ˆXkCŹsāÖ¶’D¾†”öĖ_{Ź<±$e’ö?ž[žsĀ7W¾G¤Ööݳī+±–¶šæM—śIķ£óҦ“ÕķmŗŌOźlŸÖłš}ųŪŚ¬eųmń_1ž'wóā7ü¬ėO÷zMbīŠsuœO+®ƒīļā†āw÷z’¹ć8|Æå'­óļÅ)2ÄļÄbcź™w~×^„åŚo„¾Ŗ[nģ„r±^;©»²Ąõü¶eZ­I‹Ą)mKöl?¼zD®¾³[+Pד+łXŖ³ž{yĖZē½ö”L·!żļ#U žĘ8‘Ą“.Š‹ā×tćŻĢį](ŽröQźōĘg•ļī÷7ėš˜õ ‡±ō±>ķ'b|z4;[Äjæ;3f?²ńLwi{ææŻ[½÷e²'ćƒ6ūč,Œk埓ίEžoæ øŹq:Š9Fæģ;æįĀiL.šõĮ»Ł˜©”3§½Š÷ņKü”ˆo+B%÷˜«”‡ø%­·"`u™¤„.l„lķÖó9” ń?·•v¹m®Ÿ”ēśIyh—ģƒźiiSŚęś¶*×Ā6¤sve^$ī°-ł-uŗ½—–v¹ķŅžŪŗ>]Ķä ;Ē­­5B5×Ę3×֖{}kŹņāwøžÕ]Ÿ×B7Ś¢TÄ]h3‰ĻX?¶ŽÄ†ēuęi¼Dœ;vgckŪN{]=ˆė$ŽÄ^ßēō2ƒ`Olxķ(ū=ėü#3h‘Ąö£vŃ,•Ÿ pęāōÄĚ²œpŌå£@U¢-H’ū?w”ī#é±ohćō—v„­'ŗ$Ɯ`ŅåŅÖŪźv¹“ē·”åśčri{•­»6Æ9‰°Ōe:-õ[·šS.½Õ¶ōūÄ1€1!s „ćėSžś;“žü‰hK„¦¢“BU’ƒų <'‘č‰Ę¾l|ژÉ}Ÿ6-ĻŪĘV†K~ĒfKö¼ze_bwWķH~ŒĄ“ÖłĒ Ÿ8šsŽŚ'BżŌPg.Ī DD`jQ¢ÓRDdš-ł„klcėB~Eż’łÆ’„óž“ńĢ%Ń+u„žwŖ_³ ógE©Ķė9–“n#ez«ėu:Ŗ"<Ų¶ō[€Ąwē‚§üåļü"ęĢyWĪ›¢ńśG‰åŚą§ū3™9Ń(oK–¾J0ĻlˆoŚ–N{õR6l£Ms—:i²dĻ« ¬’Ūd®@ąIėü ¼öį9Gķ£IVŲļōņČOE‡•MĪ\œÆ’õŸF‘$'“d«źµŲŃé`#ž›¤“ɉ5©ĻmµčŅiiÆĖtZźsŪ$>峓_Ŗ—vv»ŌOūØÓÖŽĶ‡¶ž”v„:ićm—śyõR¶r—¦9 KĒļ§ü­æ%±˜yćńLøD£‚Žß‘ʛŁ&åž]ÆĢė«“^_]¦Óc§įķĪÆį;1¹jźIėüŖsŠŅÆēµ[R;ŁVNyåg¹¦E‘燮τ=_uŪ„zŻV§—ś-Õk[:½Ōo©^ŪŅé„~KõŚ–N/õóź)ƒ zOŗ(®æĆw`åÆK3Žń47†» S?_4ĪķévõßłĘ µ ;B_—©^_]¦Ó½m¹ėś`Ē%OZēWa~¤ˆß#éžl›Åy2p†ƒ GxŅy·^üŹoÖ¦wĖwh{xN"s.ŻßŽ¢Y•ѳŸ&ś£w]g,U=ŪóščuTÆ^_]¦ÓÓ]ļ)N5ÉKxŅ:æä4v ńŪč'ͱ8?IŸ±!@ąižtŽ]#~Ć~ w7£ž?½Ó›Ö÷ā5ȹ»±ƒøģ*į+; i“ M#Fc]¦…­tāoŪź¾Ņ^—é“cgä89±ˆI¶#š¤už1Č'Œųm{ž†éĄhéAæŁ°Ź‹SĮ @8˜ē݃c Ą:æĄ$4tńŪ¦|z™~²h ŸBۘ.k³,εÄh@ŲN€óīvvō„Ą]°Īļ2Su~"~ė8Ze^®ąōˆ"łżŪżuźZ±8[PÄ @ ŽēŻ:N“‚Ą °Īļ<{sßæs&+KŅļr;Ē»æĒ=žĢā,ҧ€ Š”ēŻ¦81K`_rZ6;…ų݌N:rēWH°… <‰ÅOšmb}*ÖłwĶ<ā·Į|Źw~Ē’y6łĪÆG…2@€Ąm pQ|Ū©ĆqT`W£ŗECÄo³iź Äż?𻾋SH°… O€óīńŒŸ&Ą:’ō “ńŪ–ēG­±8?ŠŸĮ!@ąa8ļ>lĀ ÷‘Xēß5ķˆß/šOēM&”@€Ąå p޽üį v`ļFx)ˆßKMĒ>gXœūųŃ€ °†ēŻ5“h {`ßsŽr^#~sdnXĪā¼į¤į2 ܖēŻŪNŽC šė¼Õ-"~o1MuN²8ė8Ń € Š‚ēŻ±k`_{~Öz‡ų]KģĀķĆāäģģģģģģģģģķö _žćŚJˆß•ĄhŽŌ3’Æ-mxŽ›gšž9¼÷2ĢßZģ3k‰•ŪóĢgmķ·ó\˃ö×%p®‚¹.<[IąŪrÄ·r‡XhĻ@ŖaŗZ” < p6TĮs“….0]“²ž+-4’vž įS}#ˆßM֕\żöƒńµŻŪąŁ–g°Ó¶Lį Ļ5ĪŽ_Xókf§®ķŁsČxuóRŪźlžµ~Ńīśæ×Ÿ£KzxöA‡ńŚīš¼7Ļą=sxļ9dž˜æµŲgÖ+·‡g™ĻŚŚoē¹–ķÆKń{ݹ¹“gß~#¾¶»<Ūņ Ö`Ś–)<ṆĄŁū k~ĶģŌµ={Æn^j[Ķ³Ö/Ś]Ÿā÷śstIĻ>č0^ŪŻž÷ę¼gļ=‡Ģó·–ūĢZbåöš,óY[ūķ<×ņ żu ~Æ;7—öģŪrÄ×v÷ƒg[žĮLŪ2…'<×8{aĶÆ)%9wZIDAT™ŗ¶gĻ!ćÕĶKm«³yÖśE»ė@ü^Ž.éįŁĘk»ĄóŽ<ƒ÷Ģį½ēłcžÖ`ŸYK¬Üže>kkæēZ“æ.ÄļuēęŅž}ūAŽųŚī~šlĖ3Xƒi[¦š„ēgļ/¬ł5³S×öģ9d¼ŗy©mu6ĻZæhw}ˆßėĻB€ @€ĄNˆßé@€ @×'€ų½žį! @€ ģ$€ųŻ ī€ @€ p}ˆßėĻB€ @€ĄNˆßé@€ @×'€ų½žį! @€ ģ$€ųŻ ī€ @€ p}ˆßėĻB€ @€ĄNˆßé@€ @×'€ų½žį! @€ ģ$€ųŻ ī€ @€ p}ˆßėĻB€ @€ĄNˆßoßżĻO÷z½†’w÷ū·"¢š>5m*†ŚŻd»÷ė§ūć9±Ē®gokŁj?žvæo™ļ~ūö&}µŻ­,ōŪąĒßß÷ņ>½Įī‚§Ū«wśņēēս޿Żléī“»= ÓsµŗŸń˜¤öÕ³WŪ5~µŹnń#[ ±ß¶Ųm“¶³Ę¤­Šo˜Ļd “¶•ēķW«ō?ŅcĢ«Kāæ6Ų•®Ķ·|IcĢĢĻ»ĶcÓKēlŻ.¤k|Æicķ™_ŸųQźsŪųnv#sĮö2旙Š8|ź¤fóžK¶Ķ‡>¶Ģę=»G”Łqm¾8¦\€;ā׌±ł¢Ż†•v\›Ÿ %'ŒiĪĒ }õfķŲüĢīAv\›w†ķćQsęõ±e6ļŲ=¬ČŽmóKĒöŽųµvl~Én«z;®Ķ{ćċµiõšÜś$V ǚ/Yƒółś®ųścŒŚ?ć>m°ŻĻm~éø;¶Ķ;#ĒŌ^*Ęø&/cŒ”pζ1Z6ŚŪ2›·6ĻÆˆoō„ŠĒĘc󣍳_nv“ųNę*æW™‰Óżč ö®_<éé‹°ÄÆš>5m£evųOr×B ©čé»M#Żą‡wÓu]*7Ųm—ŪāGņL÷éŽĪkܧ·ŲŸZo÷ś" VüīµŪ*Ī~Äõgםöi£]m¢Iz‹~ŸļYƒs°il”ŽgP>÷Ģķī/ŁāGßg:žō^¤¾o±»?ßĀ_¼ćčpž?“Łb×÷°IińœmGØń½¦µ{`~U|ƒÅ>7ŽļV×1ī˜ŽEń» ߍ;W@L|5}jڳ‡d·ś1œ0¢€Šis¾Õnė [ść>åoiwOĢĶüčOņćÅj3»{‚śīō%ŠŠ÷o÷k{Ži·Ad½‰~ÄøĘ+o6Śu,ķ+ŚāGģ£īØyl±ėŁŁ[ÖĀĮFņT »{c ż7łaŽ'ƒ‰ųŻd·E@Ž-¾čó6©Ė·ŲÕ¶Z¦£_Æ.{ζcÕų^ÓĘŚ=*æ6¾ąĒRŸ»Ēē±¾źžéłJŁĒ ~?>r üœ‹°x1‚O\¬éSÓFģ¹mį‡Ē¢…Żq7ō#^øÉw›ŚŻf#?ś»Nj?odwWlŅy/ŖoœæńŽŒ\Ō«˜e;ø×ē›āāMŽ5WĻĪ ł@ü~ ż‡.žąœ_p·¦OM›3BoįG“a>ha·Eü­üˆvŌÅw+»{cÜėĒWx™[r×iÆŻ½q龛}IY›‰ßĶvµs Ņ›üD”óńŠ£ęq“ŻńXüč…SųJ…>®˜»‰ģZךäwūį?>[uiĄ‚‘=ńžņÕs¾ÜcwĮåÕÕ[|‰ĀA‰łaPŁwćC[ģ®v~C‡č—^[ŽßkŚ8¦/Ŗ‰Ļ:įõł¦ųB¼1ž ž#ģ\æ Äļe¦ādG¶üjśŌ“9#Ō~DęDŚĀn‹ų[ųm¼ŗń‘ąąW »W‰/ś1 SW‰oėx*ń Ā0yŅUbléG“5¬Å–v÷ģ«ü˜Ż5”ńæ$> 'n×ņɵOŒ6ĢäĘĖ•C'w˜BŁ ĒŁrżså Cš™Ź™+ObT¢~ˆ1|˜ˆųU\fĄ.ˆóf®I–†ōśäę?W¾4F«zĻ×%Ū±ĻEÆc–|§žcæC’ၷ<RÓ§¦Ķ”·šĆ;·°Ū"ž½~ 'Œń‚M|ŚkWģģŻ¶ō#Ę:\°““ū‰’gw~6ŃUoŽC©K?¢­‹=r¹!¾^ü:Æ_ŸŽfū„Tną&]›n·ų”ēI9“Ģė»ŹVÓä_z‘/w·Ć>īä_š8ŖÕćj˜Ō“Ń枕®‰Ļśāõł–ųblźŽÆÄ~ÕųÄ?¶'€ųżų|ȁxpČ=Śä\œ7kśŌ“9#ä~dO+¹ļŽųś 5ē„Q;ĒGÄcmīˆĻšJöŪ–vg­,Ųą‹ĢŻōŪÜrqŚoć]™ vWz^×¼„і¾š¾éōŽ)ę·Ä7ī™GžĒXļ<Īæ8ÆŪ?[³ŽūčpmŠrmūLƒDn}iÓ5¾×“Ń6ĻJ×Äg}ńś|A|r.œ}€āæj|vnČŒā÷cč?=pś½Ań&~Ś›}ÓjMŸš62Ś‘Ū~x'[’LG8'ō/ŹNńć˜šśG¹åūE³Ż¶/ó;lmģīsƒ·šĆ ńÅ»gsįwĶ}tK|Ć^“›ĒX½ĆīžRYŲąG.®ä\±Į®ņŖm²/ńÜ1~Ż¢Ż¦ń&ó³\ć{M›œżĖ«ā3ć»}īß}®cĢ\½ Äļe¦āŽÄƒ¢śŪę=—l›}l™Ķ{v(³ćŚüҘ±½ˆ&ÕŲŚ±yÕōФ×ęķąĆ…›ūI©nkķŲ¼n{dŚŽkóĪŲUBŠŚ±yĒīaEvl›Æxó½× ÷\(Kö[ĖÉę+ø5ibǵygTD„Ī…ØµcóŽŻCŠģø6Ÿt©­·łœŻÖåv\›wʋū§~a™w\µvlޱ{X‘Ūęg/ÓŸz}l™ĶĻģžP}pĪŁvhė«Ķ‡ö¶Ģę­Ķ3ņчŠų“/¹>6›×6ĪJē|Õć{ėM×KŚĘcóŅŽķ# ~9ķSŠņ Z’„±IQ–¼)>Q¢†Äöém—ķNć*ūįĒ7śT8—ķŽO”żHćKŪ¦Ė¾ä»\ƒĒi[Žnq_Kćś‹Ó)>} 'm®_š§ģ‹£Ä¶1ŽńŽĢTS¶;µ;:UöƏļNsø%¾žĀzŚGa?LHŁīѳ6Ł/ūįĻ_ß§|‘^¶;tŖģ‡ŸŻ?Æ<ßśü’}Ō?”ķ=sŽ}÷œķĻaļ5m/Ž+Zßč„Ū§Æ½c|©Ļ²Ź6ŻOÓ¶iŻČ‡Ä# ~9ķ @€ @ąYæĻšo¢… @€ šHˆßGN;AC€ @xÄļ³ę›h!@€ <’ā÷‘ÓNŠ€ @€žEńū¬ł&Z@€ @$€ų}ä“4 @€ g@ü>k¾‰€ @€Ą# ~9ķ @€ @ąYæĻšo¢… @€ šHˆßGN;AC€ @xÄļ³ę›h!@€ <’ā÷‘ÓNŠ€ @€žEńū¬ł&Z@€ @$€ų}ä“4 @€ g@ü>k¾‰€ @€Ą# ~9ķ @€ @ąYæĻšo¢… @€ šHˆßGN;AC€ @xÄļ³ę›h!@€ <’ā÷‘ÓNŠ€ @€žEńū¬ł&Z@€ @$€ų}ä“4 @€ g@ü>k¾‰€ @€Ą# ~9ķ @€ @ąYæĻšo¢… @€ šHˆßGN;AC€ @xÄļ³ę›h!œLąO÷ózuÆā’O÷ēdÆ.3ܟŸīõzwæƒG=«Ÿ[Ąø“Æ—™m @ü~x€Ą£üżķŽ£Ų{Tä~°‰ųõ›P @€@ˆß6±@5æ)%Äoʃ @ą@ˆßįb€ ¬ų Ń¾»ßßšpxL:< ķ=Z;/ūūūN«~÷Ļ›M6ŠĪéqģ±OōļÕĶ=6"µ<¦K?~ŚOā-<ö<ų£}–ĀŻt'r:ī«ćm2ž–Ęvē&<¹-ó×ū3'°ķēPbćQž’„ Ań{UlB€€O@[üŽ«n2¢÷o7Uõe©8JĖzQ'ā±ėŗA°„¢K3 “Éī0v,ųŪż¾_ŻkŖŒ’ü¼ŗ×ąŪņ˜^,AŠØü…¢ųŸĘ&±$® }zW'Ń;¶ŃõĆ0›ü8Žvƒ­Ä¶ńuV śļ{F.ź±÷™oƒĻl @G@üA›€ ąXæ©huVrDZÆOūˆH˽D«·~Ÿ^„ö‚L÷×ćč“ 1 Céć“ńÄdt5āŒųMlŖ±$™śŃ’ńCĒ—Š?±ķ“Iźe@½ķūŒŗ¶[ę*/ōJŲgøč‘HC€Z@ü¶"‰@X&° ~'1LYeŹ¢@ᨆ. Ŗ\]RnĘÕćč“2½KkśG·ĆćĄ"ŽUĒĞķ×ēŻ~ĮDāsĘfb_µIśŚqC»…±ķÜ$ö֏“Ī»źO€ Аā·!LLA€Ą(’ĮjÅT4“eQ,Ea7}×U¾C*[WPåDš?>¢<Š<Ė£æ2†Żö]ę~Ļļ&œq:ļ× wTõ8c\9–Śf#oÜąßšøś>ń8¶į•āI;oÆla¢€ °‘āw#8ŗA€Ą9Į–ˆ$±ė #U¦EžtYŚęÄÆ-¶ĆZ5^°]5¦é3ö[{ēwL’½įįƃKķ£NĻĶ %ŽæNŪdlĖÅņ“žI¹7ŽW&ŁB€Ś@ü¶å‰5@(Č 6+¦¢^åæ#ŗE8Õ|75 >Ųž±+׌é“IDą(}į“Óoj:¤T›ĮfĀ'źóéå\cf†T²©JēIŻN§CĖ®¶Očē•ĶG¦€ Š‚ā·El@€@Uāwxģv|yÓō®<~›ŠĒŽ…ō„ćV¼ŖĪØ`īžĆ#æ2Ų`jyL_ŠĶś ~”_xe×wrń;õ—;Ó:¶šÕąšSP©”‘ćÆG&e¹>zlĖÕéƒųŗl!@ąˆß 3  V‰ßŠgPĆ÷Nå7bµķŝžīÆóx±€QxöżģŻÓŲ|h£Ē3å1=‘×÷LūUüĪÆń3±ƒųżł~ņ(|7ø™æ„±s¢ÕōI¹z\¼2”Ģ€ Š–ā·-O¬A€Ī! ā·ųHó9®0  @ąæw˜%|„ Xˆ_K„< @ Hń[ÄC% @ࢿ܂ «ų’v€³Čc-jIEND®B`‚DHARMa/data/0000755000176200001440000000000014665273541012174 5ustar liggesusersDHARMa/data/hurricanes.rda0000644000176200001440000000704414665273541015034 0ustar liggesusersż7zXZi"Ž6!ĻXĢą-· ę])TW"änRʟ’Ł#ĘM ^fāÜvŖH°'jFs]"ĒĘä’e™`¢«uŸüUĆ˜ļ…RH†Kü§D2ūGž œ6ų Ol‹EŌ8GĻĢĶj(Āī²īČYILiææ>‹AŹ6ŪņŠT®¶ķßĶ_M0¦5ų }Ę ē‹āŹųĶ•94kcs’YŲ5ž®Y(ĶIś™uƱģXR¾A Š~Lr~ßĆśZ=;ēfĒ‘z}œ&)"å˜OĻø Ć\|Æ’¾0s¬¹`O‹ó$ļŌ*m,$V…q{/Įä‘}ŖJ›×d˜DĪoĦ*Ÿ³µ‘ÉDsüĻ㢿+ƛ*³×ź­R ²ĪF„IŻyįŹ³Q"B/Ÿńņtūj»$ƒŽRUõ»&vźŽĶŖ& ³”AīDW˜¾ š!ø»JҤÆķyÕĮÜ.<Ą•“VĒ8>†³CO4“>*¶¤ˆšÓK[«m&V"»X†–ŌĘæ4w#®#[äŪt%X<ń½>-œgÅ-§Aųē (N£żšųč«‚µ¢/x€€¦©|žßuŒā£ÉӇÜč×¼?ŲćźJ\JŒÜbjWčaʵ–X“ß"O?–ój¬P[A0łrƒ¼fJ^…œœq/«’­#õ5‚č@‡Ķ|s•żõŲæŸj6Ć٠Ɠ“c±ŽZŽzź°ŅÖÖž³#n1 1|x£}ģ™žõ3s„ū¾:ĘĘt½ē§øŪå©ęS2d‚īŒ “d:ØtÄU¤±ź·÷Ś–č_|WęeĆB¹Y“š]’Aff)zģ–ŗņ:N‰%õZ$f¾’ó`,(äKŽæ*t1•¬¢čә;ęĶkÆ¦ÄŖ„ß#OOĪź]¬Į“Ŗ[”N±Õŗ …KfżĢ!c…xžŠĻ²Ģ.LŖČČ-L³Ķ"ǼOYŅ{÷ČžČĆūś]Œ$:ošŠ'‡“x '$xĮ­¶Ė±6Rńŗ¾ŌDŁų‹ż÷Ź«ĀiĮɍ¶#Ąz‘žf'ģłĪŸ\U÷Ć©ä4*ƒ‚ ~Ņ»c& ¾ÜŸc‡=‡|D¦Zń}^Ć 80j*±lS3ŖˆįŸBó֐€ęܦĒÉ æ.¶ż{ZiQyh_Gł°‹Åqćq§ŪßNŚĶ—Ź©²(yŽūĻŠŌecł¼aC6pūīĶ(Ž&©g„ÅųvжdšNkTq(]-i!łvęķŠĄaåśØS“R Ø-Īæ†u³ķ֔¬Ųō¦9ʖIhį{ągiF9ź®u mæ[K²~ˆ{8Ē&.(G„Qä&čLžAwøY}ŗ9S®bĄĄ“ø•!žš.ė6ŚįT'Ą ŽŠž4ø³Ÿ}bd.éćĘÕ¾Fž@µpS“vf?+NnĀōŠaŖIn¾=&P}Yˆū2ķÄDZt@ȳҺń÷]ŗb+Yqg·7š ȉŽ|śĄ«źhźĶĀcæ˜6„*ćĮų”NÅŪ—Õ°cčń§ZS!p!TRĢķL”É×eP¦Ŗūr‹@Dż`“Į’Ł`i˹ķāį”%ƒ®†\‹½v0ĪóTŽś/†rNčņōŻÖč”Tū’1%ł”ŌUŸN][0•G¼B“Jsxž°+Žјto¦ĘĒ‚XsźwK^CśĘ°!—%ą!#]9°T(ż0X¶n^F14óÅ¢łjŻłd‡čĘcŖö¹| x8ž‹ÖŚĀµ¼b°žČµ™j¤°cĒåÅZ#Ƌ|š½q.‚Q‰šb¦Ó”ąĖ™›ÅŌ!TŖŹA¼~fXĄą‚‹ī}ćo©1æ ½a3pp|̱Åqø:ć° äĢįŌrņ˜1£MŁw 5œ4ėņh8¢Ī[I„Ōńļš'©š™Ę»ģ’vĒĖK2i.ž£\¼-Ė蒌’ėėx.żž/u§^ģ8£T…c>ķĶĘANk\ ÖwĀs@„ĻG›²:-ŗŗNąŌ aHEڊģ ±AŚį|ŸHóu[ķ#ĒŠUµGm·ˆč<ƶ%øž?-fŖßU"&eć;Æä’K䂊½½tPȧG8c5ž½æä±V N"ܾeGŠŽ“ø@5°Nō½ģ—]ó•S„˜ĒŖ—óāšXQP=ÜG°­I’]ŖÉŒŸÄiI+ŽÄ<ĄĆ„@¬0ēg/Pč¦^,l E%ę'[-m"ū1¢ĶpĶ-©›? ³ˆ;e.ę’tę‡vż…ĘčÅ+l³™”™īpr ėÆM”2DöŲņP ’Ē쓽ä”`įo z`}:Š“,ļDN‚ØčŒŗ[DAöyA»»`M½­Ę9WąéKKR±»ņņz ĘcĒVеq’RH:ą0Ē(×ųc™ć¶z©×Äå}—Ķ]i_°©)ˆ#ük=K—Ąhʬ„ÜDĢ”ŁvŻŽØØ>7Ź4ā>u¦Åžń_Ę聦FĶßx+a…cäü»ė/”3']p®Ģ»'²-)ßĶ>6ŽN4 ©ŚŚ®hœĮK=ꝡYmN\ŁNĀZzä2ų::é%švuöluī­Ŗ$‹xą.ēйk$¾mbėņėœĆčŠõŠŠA(}Épe™NŲRHĮ:ćsƒć'Ä ˆ*“Äåå|“±ģP­t+ŃV‚-•IUÜ­V4ė“` tč?˜=ņ¤<51ׄ“… ·É?ļ$Œ)“#tߌ–Ø4¾ƒÅˉĄ|¾ÄJׁR¬J „0Õś/›µĪA,”ä@B“>’)TdœSĖ@÷Õ/Ńŗ«JüU,>ŚtĢŗéSŽ “ŻóØ”°CœQŅI$ų įSD;±Ō•®&rŸbyš™S³ 9…‹‹Ēg|o“ŌÖü{MÅŠ4ö[LīhĪäÕžŒHB§·ńŲ–ßUßĶ”‘Tļ_ŃēUāż[|„SÓ±ķ)š²sĖ0dé@meXó׍ 1vk~’0čŅĖ„~uCQm~Ų5±6`šŗšłļŲ4A,¬ūįĄņł‘55¾²Ż½¼–Rz IŽ)kM”LoÓsrū}Čz㜒 —’=źY*'*§ä§“Z+łx®§„H„YųLM)6FN÷ NŌoāŠW¬aŁD¦>;-ņÆĒ’ÕJ•7BCzBč÷RµĄ·ƒÓŽ{ļ?e¹”$}vēxN.}LĆV] ‡’>$Cʐ†ż†V;ĀīgE(”ś±…BœØ0󵋓ü*š\üĢAż­PŹ_‰…=M”»0Ųz Ó¶ų1įm €ā!gÕ-Q:½°?äĖoŌś˜0egvˆ}‡ēk9[”;TĆŃ÷ȋߓ+ēXT~¢źšų:Ģ(!0”„ŅįŻ_‚ˆljAŃ~аy‘ĘåzuS-FFܟ`Żņ-šd?Ķ<ˆ+fāj?hƝĘGJ` .=WśéMßĮ±!& >­Ģō… ’ØwIĒ•Ėé1„ĮsÜČŅāīąBĀ1+ ŃöĖŗė ®ŅäŚ1ŃÅ0FŃ/Ę y7xzČV½‘ƽ—]2<ʊ™ Ńcõč—*H¬ķ»²;S—®%·”­Ė3ŹÉ‰ē&‹ŖD‹Å퐄 Õa¹Œ7ƍ(Gd"Wä§XūeõZ"ź°ƒģp°Į]LG'š\¢‹a‚åž™ˆ:;dāŚē\’2ŸŃSč£Iō&p÷ńLUķ„”6ARóć\°Ā°Š°8]&‘N½²G»XehŸŃN$LēB.¦_„z•¾¹ dĒē*žø[­ŅC>0 ‹YZDHARMa/NAMESPACE0000644000176200001440000000433514703461527012503 0ustar liggesusers# Generated by roxygen2: do not edit by hand S3method(getFamily,default) S3method(getFamily,phyloglm) S3method(getFamily,phylolm) S3method(getFitted,HLfit) S3method(getFitted,MixMod) S3method(getFitted,default) S3method(getFitted,gam) S3method(getFitted,phyloglm) S3method(getFitted,phylolm) S3method(getFixedEffects,MixMod) S3method(getFixedEffects,default) S3method(getObservedResponse,HLfit) S3method(getObservedResponse,default) S3method(getObservedResponse,phyloglm) S3method(getObservedResponse,phylolm) S3method(getPearsonResiduals,default) S3method(getPearsonResiduals,gam) S3method(getRefit,HLfit) S3method(getRefit,MixMod) S3method(getRefit,default) S3method(getRefit,glmmTMB) S3method(getRefit,lm) S3method(getRefit,phyloglm) S3method(getRefit,phylolm) S3method(getResiduals,MixMod) S3method(getResiduals,default) S3method(getSimulations,HLfit) S3method(getSimulations,MixMod) S3method(getSimulations,default) S3method(getSimulations,gam) S3method(getSimulations,glmmTMB) S3method(getSimulations,lmerMod) S3method(getSimulations,negbin) S3method(getSimulations,phyloglm) S3method(getSimulations,phylolm) S3method(hist,DHARMa) S3method(plot,DHARMa) S3method(plot,DHARMaBenchmark) S3method(print,DHARMa) S3method(residuals,DHARMa) export(benchmarkRuntime) export(createDHARMa) export(createData) export(getFamily) export(getFitted) export(getFixedEffects) export(getObservedResponse) export(getPearsonResiduals) export(getQuantile) export(getRandomState) export(getRefit) export(getResiduals) export(getSimulations) export(outliers) export(plotConventionalResiduals) export(plotQQunif) export(plotResiduals) export(plotSimulatedResiduals) export(recalculateResiduals) export(runBenchmarks) export(simulateLRT) export(simulateResiduals) export(testCategorical) export(testDispersion) export(testGeneric) export(testOutliers) export(testOverdispersion) export(testOverdispersionParametric) export(testPhylogeneticAutocorrelation) export(testQuantiles) export(testResiduals) export(testSimulatedResiduals) export(testSpatialAutocorrelation) export(testTemporalAutocorrelation) export(testUniformity) export(testZeroInflation) export(transformQuantiles) import(grDevices) import(graphics) import(stats) import(utils) importFrom(lme4,fixef) importFrom(lme4,refit) DHARMa/NEWS.md0000644000176200001440000002337314704245735012367 0ustar liggesusersNOTE: for more news about the package, see https://github.com/florianhartig/DHARMa/releases # DHARMa 0.4.7 ## New Features * Includes support for the package phylolm. * New function for testing residual phylogenetic autocorrelation testPylogeneticAutocorrelation(). ## Major change * New optional argument 'rotation' in simulateResiduals() for the rotation of the residual space prior to calculating the quantile residuals to account for residual covariance as created by temporal, spatial or phylogenetic autocorrelation. ## Minor changes * Residual plots: Option to change the red color for significant results to any color of preference through options(DHARMaSignalColor = "red"). This is a good option for color-blind friendly plots. * Documentation: improved help files and vignette. # DHARMa 0.4.6 ## New Features * new function benchmarkRuntime() for checking runtime of functions * new function simulatedLRT() to generate a simulated likelihood ratio test ## Bugfixes * fixed issue with parallelization in runBenchmarks * various minor bugfixed and help improvements # DHARMa 0.4.5 ## Minor changes * included option to simulate mgcv models using the functions implemented in mgcViz which should improve mgcv compatibility with DHARMa * Added option to include plot title in plot() #320 ## Bugfixes * fixed issues with plotting, see #313 and #274 # DHARMa 0.4.4 ## Major changes * remodelled tests so that all tested packages can be used conditionally ## Minor changes * re-introduced glmmTMB to suggests * phyr moved to enhances * re-modelled package unit tests * added RStan, CmdStanR, rjags, BayesianTools to enhances, as they could be used with DHARMa * moved parallel calculations in runBenchmark to R native parallel functions ## New features * Added rotation option to all functions that create residuals (simulateResiduals, recalculateResiduals, createDHARMa) ## Documentation * Added help comments about autocorrelation structures, in particular in simulateResiduals, testSpatialAutocorrelation, testTemporalAutocorrelation * Added example about the use of rotation in testTemporalAutocorrelation * Improved help of dispersion test # DHARMa 0.4.3 ## Bugfixes * Removed glmmTMB completely, see #289 # DHARMa 0.4.2 ## Bugfixes * Moved glmmTMB from suggest to import because this package is used in the vignette, see #289 ## Minor changes * Added hurricane dataset * Help and vignette updates * phyr added in suggests # DHARMa 0.4.1 ## Bugfixes * Force method = traditional for refit = T, as it turns out that the PIT method is not a good idea on the residuals, see #272 # DHARMa 0.4.0 This is actually a bugfix release for 0.3.4, but on reflection I decided that 0.4.0 should have been a minor release, so I pushed the version number up to 0.4.0 ## Bugfixes * bugfix in getResiduals, which had consequences for quantile residual calculations with refit = T # DHARMa 0.3.4 0.3.4 is a relatively important release with various minor improvements a smaller new features, most noteworthy the support of glmmAdaptive ## New features * added parametric dispersion test in testDispersion (0.3.3.2) * support for glmmAdaptive (0.3.3.1) * new plot for categorical predictors * new plots for result of runBenchmarks ## Major changes ## Minor changes * changed test statistics in standard dispersion test to standardized variance, to be more in line with standard dispersion parameters * defaults for plot function unified * removed option to provide no x,y / time in the correlation tests * recalculateResiduals now allows subsetting #246 * better input checking in correlation tests #190 ## Bugfixes * bugfix in runBenchmarks included in https://github.com/florianhartig/DHARMa/pull/247 * bugfix in testQuantiles https://github.com/florianhartig/DHARMa/pull/261 * bugfix in getRandomState https://github.com/florianhartig/DHARMa/issues/254 # DHARMa 0.3.3 ## Bugfixes * bugfix in testOutliers, see https://github.com/florianhartig/DHARMa/issues/197 * bugfix in the ecdf / PIT residual function, see https://github.com/florianhartig/DHARMa/issues/195 # DHARMa 0.3.2 ## Bugfixes * bugfix in testOutliers, see https://github.com/florianhartig/DHARMa/issues/182 # DHARMa 0.3.1 ## Major changes * added PIT quantile calculations based on suggestion in #168. For details see ?getQuantiles ## Bugfixes * bugfix for passing on parameters to plot.default through plotR.DHARMa and plotResiduals ## Minor changes * added checks / warnings for models fit with weights # DHARMa 0.3.0 ## New features * quantile regressions switched to qgam, which also calculates p-values on the quantile estimates * new testQuantiles function, based on the qgam quantile regressions ## Changes * syntax change for plotResiduals, see ?plotResiduals * transformQuantiles is deprecated, functionality included in residuals() * nearly all functions can now also be called directly with a fitted model (for computational efficiency, however, it is still recommended to calculate the residuals first) * qqPlot now shows disribution, dispersion and outlier test ## Bugfixes * bugfix #158 for fitting glmmTMB binomial with proportions # DHARMa 0.2.7 ## New features * added smooth scatter in plotResiduals https://github.com/florianhartig/DHARMa/commit/da01d8c7a9a74558817e4a73fe826084164cf05d * glmmTMB now fully supported through new compulsory version 1.0 of glmmTMB, which includes the re.form argument in the simulations required by DHARMa https://github.com/florianhartig/DHARMa/pull/140 # DHARMa 0.2.6 ## Bugfixes * return of plotResiduals set to invisible # DHARMa 0.2.5 ## New features * transformQuantiles function added to transform uniform DHARMa residuals to normal or other residuals ## Minor changes * several smaller corrections to help # DHARMa 0.2.4 ## Bugfixes * corrected issues in the vignette * small corrections to help # DHARMa 0.2.3 ## Bugfixes * added missing distributions https://github.com/florianhartig/DHARMa/pull/104 * bugfix in simulate residuals https://github.com/florianhartig/DHARMa/issues/107 # DHARMa 0.2.2 ## Bugfixes * fixes bug in Vignette (header lost) # DHARMa 0.2.1 ## New features * Outlier highlighting (in plots) and formal outlier test, implemented in https://github.com/florianhartig/DHARMa/pull/99 * Supporting now also models fit with the spaMM package ## Major changes * Remodelled createDHARMa function * option to directly provide scaled residuals was removed * Rewrote ecdf function for DHARMa to get fully balanced scale, in the course of https://github.com/florianhartig/DHARMa/pull/99 ## Minor changes * a number of smaller updates, mostly to help files ## Bugfixes * fixes #82 / Bug in recalculateResiduals # DHARMa 0.2.0 ## New features * support for glmmTMB https://github.com/florianhartig/DHARMa/issues/16, implemented since https://github.com/florianhartig/DHARMa/releases/tag/v0.1.6.2 * support for grouping of residuals, see https://github.com/florianhartig/DHARMa/issues/22 * residual function for DHARMa ## Major changes * remodeled benchmarks functions in https://github.com/florianhartig/DHARMa/releases/tag/v0.1.6.3 * remodeled dispersion testsin https://github.com/florianhartig/DHARMa/releases/tag/v0.1.6.4, adresses https://github.com/florianhartig/DHARMa/issues/62 ## Minor changes * changed plot function names in https://github.com/florianhartig/DHARMa/releases/tag/v0.1.6.1 ## Bugfixes * fixed bug with zeroinflation test for k/n binomial data https://github.com/florianhartig/DHARMa/issues/55 * fixed bug with p-value calculation via ecdf https://github.com/florianhartig/DHARMa/issues/55 # DHARMa 0.1.6 ## New features * option to apply rank tranformation of x values in plotResiduals, see https://github.com/florianhartig/DHARMa/issues/44 * option to convert predictor to factor * random seed is fixed, random state is recorded ## Minor changes * changed syntax in tests for sptial / temporal autocorrlation * null provides now random. Also, custom distance matrices can be provided to testSpatialAutocorrelation * slight changes to plot layout ## Bugfixes * error catching for crashes in plot function https://github.com/florianhartig/DHARMa/issues/42 * bugfix for glmer.nb parametricOverdispersinTest https://github.com/florianhartig/DHARMa/issues/47 # DHARMa 0.1.5 ## Minor changes * fixes a bug in version 0.1.4 that occurred when running simulateResiduals with refit = T. Apologies for any inconvenience. # DHARMa 0.1.4 ## Major changes * new experimental non-parametric dispersion test on simulated residuals. Extended simulations to compare dispersion tests ## Minor changes * supports for binomial with response coded as factor * error catching for refit procedure https://github.com/florianhartig/DHARMa/issues/18 * warnings in case the refit procedure fails or produces identical parameter values https://github.com/florianhartig/DHARMa/issues/20 # DHARMa 0.1.3 ## Major changes * includes support for model class 'gam' from package 'mgcv'. Required overwriting the 'fitted' function for gam, see https://github.com/florianhartig/DHARMa/issues/12 ## Minor changes * plotResiduals includes support for factors * updates to the help # DHARMa 0.1.2 * This bugfix release fixes an issue with backwards compatibility introduced in the 0.1.1 release, which used the 'startsWith' function that is only available in R base since 3.3.0. In 0.1.2, all occurences of 'startsWith' were replaced with 'grepl', which restores the compatibility with older R versions. # DHARMa 0.1.1 * including now the negative binomial models from MASS and lme4, as well as the possibility to create synthetic data from the negative binomial family * includes a createDHARMa function that allows using the plot functions of DHARMa also with externally created simualtions, for example for Bayesian predictive simulations # DHARMA 0.1.0 * initial release, with support for lm, glm, lme4 DHARMa/inst/0000755000176200001440000000000014704246643012235 5ustar liggesusersDHARMa/inst/examples/0000755000176200001440000000000014703461527014052 5ustar liggesusersDHARMa/inst/examples/simulateLRTHelp.R0000644000176200001440000000135514665273541017223 0ustar liggesusers library(DHARMa) library(lme4) # create test data set.seed(123) dat <- createData(sampleSize = 200, randomEffectVariance = 1) # define Null and alternative model (should be nested) m1 = glmer(observedResponse ~ Environment1 + (1|group), data = dat, family = "poisson") m0 = glm(observedResponse ~ Environment1 , data = dat, family = "poisson") \dontrun{ # run LRT - n should be increased to at least 250 for a real study out = simulateLRT(m0, m1, n = 10) # To inspect warnings thrown during the refits: out = simulateLRT(m0, m1, saveModels = TRUE, suppressWarnings = FALSE, n = 10) summary(out$saveModels[[2]]$refittedM1) # RE SD = 0, no problem # If there are warnings that seem problematic, # could try changing the optimizer or iterations } DHARMa/inst/examples/testDispersionHelp.R0000644000176200001440000000323414665273541020033 0ustar liggesuserslibrary(lme4) set.seed(123) testData = createData(sampleSize = 100, overdispersion = 0.5, randomEffectVariance = 1) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # default DHARMa dispersion test - simulation-based testDispersion(simulationOutput) testDispersion(simulationOutput, alternative = "less", plot = FALSE) # only underdispersion testDispersion(simulationOutput, alternative = "greater", plot = FALSE) # only oversispersion # for mixed models, the test is usually more powerful if residuals are calculated # conditional on fitted REs simulationOutput <- simulateResiduals(fittedModel = fittedModel, re.form = NULL) testDispersion(simulationOutput) # DHARMa also implements the popular Pearson-chisq test that is also on the glmmWiki by Ben Bolker # The issue with this test is that it requires the df of the model, which are not well defined # for GLMMs. It is biased towards underdispersion, with bias getting larger with the number # of RE groups. In doubt, only test for overdispersion testDispersion(simulationOutput, type = "PearsonChisq", alternative = "greater") # if refit = TRUE, a different test on simulated Pearson residuals will calculated (see help) simulationOutput2 <- simulateResiduals(fittedModel = fittedModel, refit = TRUE, seed = 12, n = 20) testDispersion(simulationOutput2) # often useful to test dispersion per group (in particular for binomial data, see vignette) simulationOutputAggregated = recalculateResiduals(simulationOutput2, group = testData$group) testDispersion(simulationOutputAggregated) DHARMa/inst/examples/testQuantilesHelp.R0000644000176200001440000000174114665273541017662 0ustar liggesuserstestData = createData(sampleSize = 200, overdispersion = 0.0, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # run the quantile test x = testQuantiles(simulationOutput) x # the test shows a combined p-value, corrected for multiple testing \dontrun{ # accessing results of the test x$pvals # pvalues for the individual quantiles x$qgamFits # access the fitted quantile regression summary(x$qgamFits[[1]]) # summary of the first fitted quantile # possible to test user-defined quantiles testQuantiles(simulationOutput, quantiles = c(0.7)) # example with missing environmental predictor fittedModel <- glm(observedResponse ~ 1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) testQuantiles(simulationOutput, predictor = testData$Environment1) plot(simulationOutput) plotResiduals(simulationOutput) } DHARMa/inst/examples/createDataHelp.R0000644000176200001440000000237014665273541017051 0ustar liggesuserstestData = createData(sampleSize = 500, intercept = 2, fixedEffects = c(1), overdispersion = 0, family = poisson(), quadraticFixedEffects = c(-3), randomEffectVariance = 0) par(mfrow = c(1,2)) plot(testData$Environment1, testData$observedResponse) hist(testData$observedResponse) # with zero-inflation testData = createData(sampleSize = 500, intercept = 2, fixedEffects = c(1), overdispersion = 0, family = poisson(), quadraticFixedEffects = c(-3), randomEffectVariance = 0, pZeroInflation = 0.6) par(mfrow = c(1,2)) plot(testData$Environment1, testData$observedResponse) hist(testData$observedResponse) # binomial with multiple trials testData = createData(sampleSize = 40, intercept = 2, fixedEffects = c(1), overdispersion = 0, family = binomial(), quadraticFixedEffects = c(-3), randomEffectVariance = 0, binomialTrials = 20) plot(observedResponse1 / observedResponse0 ~ Environment1, data = testData, ylab = "Proportion 1") # spatial / temporal correlation testData = createData(sampleSize = 100, family = poisson(), spatialAutocorrelation = 3, temporalAutocorrelation = 3) plot(log(observedResponse) ~ time, data = testData) plot(log(observedResponse) ~ x, data = testData) DHARMa/inst/examples/wrappersHelp.R0000644000176200001440000000112014665273541016647 0ustar liggesuserstestData = createData(sampleSize = 400, family = gaussian()) fittedModel <- lm(observedResponse ~ Environment1 , data = testData) # response that was used to fit the model getObservedResponse(fittedModel) # predictions of the model for these points getFitted(fittedModel) # extract simulations from the model as matrix getSimulations(fittedModel, nsim = 2) # extract simulations from the model for refit (often requires different structure) x = getSimulations(fittedModel, nsim = 2, type = "refit") getRefit(fittedModel, x[[1]]) getRefit(fittedModel, getObservedResponse(fittedModel)) DHARMa/inst/examples/checkModelHelp.R0000644000176200001440000000074414665273541017055 0ustar liggesusers testData = createData(sampleSize = 200, overdispersion = 0.5, randomEffectVariance = 0.5, family = gaussian(), hasNA = TRUE) fittedModel <- lm(observedResponse ~ Environment1 , data = testData) res <- simulateResiduals(fittedModel) # throws NA message # get the indices of the rows that were used to fit the model sel = as.numeric(rownames(model.frame(fittedModel))) # now use the indices when plotting against other variables plotResiduals(res, form = testData$Environment1[sel]) DHARMa/inst/examples/testsHelp.R0000644000176200001440000000300614677165224016154 0ustar liggesuserstestData = createData(sampleSize = 100, overdispersion = 0.5, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # the plot function shows 2 plots and runs 4 tests # i) KS test i) Dispersion test iii) Outlier test iv) quantile test plot(simulationOutput, quantreg = TRUE) # testResiduals tests distribution, dispersion and outliers testResiduals(simulationOutput) ####### Individual tests ####### # KS test for correct distribution of residuals testUniformity(simulationOutput) # KS test for correct distribution within and between groups testCategorical(simulationOutput, testData$group) # Dispersion test - for details see ?testDispersion testDispersion(simulationOutput) # tests under and overdispersion # Outlier test (number of observations outside simulation envelope) # Use type = "boostrap" for exact values, see ?testOutliers testOutliers(simulationOutput, type = "binomial") # testing zero inflation testZeroInflation(simulationOutput) # testing generic summaries countOnes <- function(x) sum(x == 1) # testing for number of 1s testGeneric(simulationOutput, summary = countOnes) # 1-inflation testGeneric(simulationOutput, summary = countOnes, alternative = "less") # 1-deficit means <- function(x) mean(x) # testing if mean prediction fits testGeneric(simulationOutput, summary = means) spread <- function(x) sd(x) # testing if mean sd fits testGeneric(simulationOutput, summary = spread) DHARMa/inst/examples/testSpatialAutocorrelationHelp.R0000644000176200001440000000625314703461527022404 0ustar liggesuserstestData = createData(sampleSize = 40, family = gaussian()) fittedModel <- lm(observedResponse ~ Environment1, data = testData) res = simulateResiduals(fittedModel) # Standard use testSpatialAutocorrelation(res, x = testData$x, y = testData$y) # Alternatively, one can provide a distance matrix dM = as.matrix(dist(cbind(testData$x, testData$y))) testSpatialAutocorrelation(res, distMat = dM) # You could add a spatial variogram via # library(gstat) # dat = data.frame(res = residuals(res), x = testData$x, y = testData$y) # coordinates(dat) = ~x+y # vario = variogram(res~1, data = dat, alpha=c(0,45,90,135)) # plot(vario, ylim = c(-1,1)) # if there are multiple observations with the same x values, # create first ar group with unique values for each location # then aggregate the residuals per location, and calculate # spatial autocorrelation on the new group # modifying x, y, so that we have the same location per group # just for completeness testData$x = as.numeric(testData$group) testData$y = as.numeric(testData$group) # calculating x, y positions per group groupLocations = aggregate(testData[, 6:7], list(testData$group), mean) # calculating residuals per group res2 = recalculateResiduals(res, group = testData$group) # running the spatial test on grouped residuals testSpatialAutocorrelation(res2, groupLocations$x, groupLocations$y) # careful when using REs to account for spatially clustered (but not grouped) # data. this originates from https://github.com/florianhartig/DHARMa/issues/81 # Assume our data is divided into clusters, where observations are close together # but not at the same point, and we suspect that observations in clusters are # autocorrelated clusters = 100 subsamples = 10 size = clusters * subsamples testData = createData(sampleSize = size, family = gaussian(), numGroups = clusters ) testData$x = rnorm(clusters)[testData$group] + rnorm(size, sd = 0.01) testData$y = rnorm(clusters)[testData$group] + rnorm(size, sd = 0.01) # It's a good idea to use a RE to take out the cluster effects. This accounts # for the autocorrelation within clusters library(lme4) fittedModel <- lmer(observedResponse ~ Environment1 + (1|group), data = testData) # DHARMa default is to re-simulate REs - this means spatial pattern remains # because residuals are still clustered res = simulateResiduals(fittedModel) testSpatialAutocorrelation(res, x = testData$x, y = testData$y) # However, it should disappear if you just calculate an aggregate residuals per cluster # Because at least how the data are simulated, cluster are spatially independent res2 = recalculateResiduals(res, group = testData$group) testSpatialAutocorrelation(res2, x = aggregate(testData$x, list(testData$group), mean)$x, y = aggregate(testData$y, list(testData$group), mean)$x) # For lme4, it's also possible to simulated residuals conditional on fitted # REs (re.form). Conditional on the fitted REs (i.e. accounting for the clusters) # the residuals should now be indepdendent. The remaining RSA we see here is # probably due to the RE shrinkage res = simulateResiduals(fittedModel, re.form = NULL) testSpatialAutocorrelation(res, x = testData$x, y = testData$y) DHARMa/inst/examples/hurricanes.R0000644000176200001440000000155614665273541016353 0ustar liggesusers\dontrun{ # Loading hurricanes dataset library(DHARMa) data(hurricanes) str(hurricanes) # this is the model fit by Jung et al. library(glmmTMB) originalModelGAM = glmmTMB(alldeaths ~ scale(MasFem) * (scale(Minpressure_Updated_2014) + scale(NDAM)), data = hurricanes, family = nbinom2) # no significant deviation in the general DHARMa plot res <- simulateResiduals(originalModelGAM) plot(res) # but residuals ~ NDAM looks funny, which was pointed # out by Bob O'Hara in a blog post after publication of the paper plotResiduals(res, hurricanes$NDAM) # we also find temporal autocorrelation res2 = recalculateResiduals(res, group = hurricanes$Year) testTemporalAutocorrelation(res2, time = unique(hurricanes$Year)) # task: try to address these issues - in many instances, this will # make the MasFem predictor n.s. }DHARMa/inst/examples/testPhylogeneticAutocorrelationHelp.R0000644000176200001440000000140514703461527023433 0ustar liggesusers\dontrun{ library(DHARMa) library(phylolm) set.seed(123) tre = rcoal(60) b0 = 0; b1 = 1; x <- runif(length(tre$tip.label), 0, 1) y <- b0 + b1*x + rTrait(n = 1, phy = tre,model="BM", parameters = list(ancestral.state = 0, sigma2 = 10)) dat = data.frame(trait = y, pred = x) fit = lm(trait ~ pred, data = dat) res = simulateResiduals(fit, plot = T) testPhylogeneticAutocorrelation(res, tree = tre) fit = phylolm(trait ~ pred, data = dat, phy = tre, model = "BM") summary(fit) # phylogenetic autocorrelation still present in residuals res = simulateResiduals(fit, plot = T) # with "rotation" the residual autcorrelation is gone, see ?simulateResiduals. res = simulateResiduals(fit, plot = T, rotation = "estimated") } DHARMa/inst/examples/plotsHelp.R0000644000176200001440000000342014665273541016152 0ustar liggesuserstestData = createData(sampleSize = 200, family = poisson(), randomEffectVariance = 1, numGroups = 10) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) ######### main plotting function ############# # for all functions, quantreg = T will be more # informative, but slower plot(simulationOutput, quantreg = FALSE) ############# Distribution ###################### plotQQunif(simulationOutput = simulationOutput, testDispersion = FALSE, testUniformity = FALSE, testOutliers = FALSE) hist(simulationOutput ) ############# residual plots ############### # rank transformation, using a simulationOutput plotResiduals(simulationOutput, rank = TRUE, quantreg = FALSE) # smooth scatter plot - usually used for large datasets, default for n > 10000 plotResiduals(simulationOutput, rank = TRUE, quantreg = FALSE, smoothScatter = TRUE) # residual vs predictors, using explicit values for pred, residual plotResiduals(simulationOutput, form = testData$Environment1, quantreg = FALSE) # if pred is a factor, or if asFactor = TRUE, will produce a boxplot plotResiduals(simulationOutput, form = testData$group) # to diagnose overdispersion and heteroskedasticity it can be useful to # display residuals as absolute deviation from the expected mean 0.5 plotResiduals(simulationOutput, absoluteDeviation = TRUE, quantreg = FALSE) # All these options can also be provided to the main plotting function # If you want to plot summaries per group, use simulationOutput = recalculateResiduals(simulationOutput, group = testData$group) plot(simulationOutput, quantreg = FALSE) # we see one residual point per RE DHARMa/inst/examples/createDharmaHelp.R0000644000176200001440000000173514665273541017400 0ustar liggesusers## READING IN HAND-CODED SIMULATIONS testData = createData(sampleSize = 50, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1, data = testData, family = "poisson") # in DHARMA, using the simulate.glm function of glm sims = simulateResiduals(fittedModel) plot(sims, quantreg = FALSE) # Doing the same with a handcoded simulate function. # of course this code will only work with a 1-par glm model simulateMyfit <- function(n=10, fittedModel){ int = coef(fittedModel)[1] slo = coef(fittedModel)[2] pred = exp(int + slo * testData$Environment1) predSim = replicate(n, rpois(length(pred), pred)) return(predSim) } sims = simulateMyfit(250, fittedModel) dharmaRes <- createDHARMa(simulatedResponse = sims, observedResponse = testData$observedResponse, fittedPredictedResponse = predict(fittedModel, type = "response"), integer = TRUE) plot(dharmaRes, quantreg = FALSE) DHARMa/inst/examples/testOutliersHelp.R0000644000176200001440000000275714665273541017533 0ustar liggesusersset.seed(123) testData = createData(sampleSize = 200, overdispersion = 1, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # default outlier test (with plot) testOutliers(simulationOutput) # default test uses "bootstrap" for nObs <= 500, or else binomial # binomial is faster, but not exact for integer-valued distributions, see help testOutliers(simulationOutput, type = "binomial") testOutliers(simulationOutput, type = "bootstrap") # note that default is to test outliers at BOTH margins for both an excess AND a lack # of outliers. In the case above, the test reported an excess of outliers (you) # can see this because expected frequency < observed. Moreover, if we plot the residuals plotResiduals(simulationOutput, quantreg = FALSE) # we see that we mostly have an excess of outliers at the upper margin. # Let's see what would have happened if we would just have checked the lower margin # (lower margin means residuals with value 0, i.e. lower tail of the simualtion # envelope) testOutliers(simulationOutput, margin = "lower", plot = FALSE) # OK, now the frequency of outliers is 0, so we have too few, but this is n.s. against # the expectation # just for completeness, what would have happened if we would have checked both # margins, but just for a lack of outliers (sign of underdispersion) testOutliers(simulationOutput, alternative = "less", plot = FALSE) DHARMa/inst/examples/testTemporalAutocorrelationHelp.R0000644000176200001440000001425114665273541022573 0ustar liggesuserstestData = createData(sampleSize = 40, family = gaussian(), randomEffectVariance = 0) fittedModel <- lm(observedResponse ~ Environment1, data = testData) res = simulateResiduals(fittedModel) # Standard use testTemporalAutocorrelation(res, time = testData$time) # If you have several observations per time step, e.g. # because you have several locations, you will have to # aggregate timeSeries1 = createData(sampleSize = 40, family = gaussian(), randomEffectVariance = 0) timeSeries1$location = 1 timeSeries2 = createData(sampleSize = 40, family = gaussian(), randomEffectVariance = 0) timeSeries2$location = 2 testData = rbind(timeSeries1, timeSeries2) fittedModel <- lm(observedResponse ~ Environment1, data = testData) res = simulateResiduals(fittedModel) # Will not work because several residuals per time # testTemporalAutocorrelation(res, time = testData$time) # aggregating residuals by time res = recalculateResiduals(res, group = testData$time) testTemporalAutocorrelation(res, time = unique(testData$time)) # testing only subgroup location 1, could do same with loc 2 res = recalculateResiduals(res, sel = testData$location == 1) testTemporalAutocorrelation(res, time = unique(testData$time)) # example to demonstrate problems with strong temporal correlations and # how to possibly remove them by rotating residuals # note that if your model allows to condition on estimated REs, this may # be preferable! \dontrun{ set.seed(123) # Gaussian error # Create AR data with 5 observations per time point n <- 100 x <- MASS::mvrnorm(mu = rep(0,n), Sigma = .9 ^ as.matrix(dist(1:n)) ) y <- rep(x, each = 5) + 0.2 * rnorm(5*n) times <- factor(rep(1:n, each = 5), levels=1:n) levels(times) group <- factor(rep(1,n*5)) dat0 <- data.frame(y,times,group) # fit model / would be similar for nlme::gls and similar models model = glmmTMB(y ~ ar1(times + 0 | group), data=dat0) # Note that standard residuals still show problems because of autocorrelation res <- simulateResiduals(model) plot(res) # The reason is that most (if not all) autoregressive models treat the # autocorrelated error as random, i.e. the autocorrelated error structure # is not used for making predictions. If you then make predictions based # on the fixed effects and calculate residuals, the autocorrelation in the # residuals remains. We can see this if we again calculate the auto- # correlation test res2 <- recalculateResiduals(res, group=dat0$times) testTemporalAutocorrelation(res2, time = 1:length(res2$scaledResiduals)) # so, how can we check then if the current model correctly removed the # autocorrelation? # Option 1: rotate the residuals in the direction of the autocorrelation # to make the independent. Note that this only works perfectly for gls # type models as nonlinear link function make the residuals covariance # different from a multivariate normal distribution # this can be either done by extracting estimated AR1 covariance cov <- VarCorr(model) cov <- cov$cond$group # extract covariance matrix of REs # grouped according to times, rotated with estimated Cov - how all fine! res3 <- recalculateResiduals(res, group=dat0$times, rotation=cov) plot(res3) testTemporalAutocorrelation(res3, time = 1:length(res2$scaledResiduals)) # alternatively, you can let DHARMa estimate the covariance from the # simulations res4 <- recalculateResiduals(res, group=dat0$times, rotation="estimated") plot(res4) testTemporalAutocorrelation(res3, time = 1:length(res2$scaledResiduals)) # Alternatively, in glmmTMB, we can condition on the estimated correlated # residuals. Unfortunately, in this case, we will have to do simulations by # hand as glmmTMB does not allow to simulate conditional on a fitted # correlation structure # re.form = NULL creates predictions conditional on the fitted temporally # autocorreated REs pred = predict(model, re.form = NULL) # now we simulate data, conditional on the autocorrelation part, with the # uncorrelated residual error simulations = sapply(1:250, function(i) rnorm(length(pred), pred, summary(model)$sigma)) res5 = createDHARMa(simulations, dat0$y, pred) plot(res5) res5b <- recalculateResiduals(res5, group=dat0$times) testTemporalAutocorrelation(res5b, time = 1:length(res5b$scaledResiduals)) # Poisson error # note that for GLMMs, covariances will be estimated at the scale of the # linear predictor, while residual covariance will be at the responses scale # and thus further distorted by the link. Thus, for GLMMs with a nonlinear # link, there will be no exact rotation for a given covariance structure set.seed(123) # Create AR data with 5 observations per time point n <- 100 x <- MASS::mvrnorm(mu = rep(0,n), Sigma = .9 ^ as.matrix(dist(1:n)) ) y <- rpois(n = n*5, lambda = exp(rep(x, each = 5))) times <- factor(rep(1:n, each = 5), levels=1:n) levels(times) group <- factor(rep(1,n*5)) dat0 <- data.frame(y,times,group) # fit model model = glmmTMB(y ~ ar1(times + 0 | group), data=dat0, family = poisson) res <- simulateResiduals(model) # grouped according to times, unrotated res2 <- recalculateResiduals(res, group=dat0$times) testTemporalAutocorrelation(res2, time = 1:length(res2$scaledResiduals)) # grouped according to times, rotated with estimated Cov - problems remain cov <- VarCorr(model) cov <- cov$cond$group # extract covariance matrix of REs res3 <- recalculateResiduals(res, group=dat0$times, rotation=cov) testTemporalAutocorrelation(res3, time = 1:length(res2$scaledResiduals)) # grouped according to times, rotated with covariance estimated from residual # simulations at the response scale res4 <- recalculateResiduals(res, group=dat0$times, rotation="estimated") testTemporalAutocorrelation(res4, time = 1:length(res2$scaledResiduals)) } DHARMa/inst/examples/runBenchmarksHelp.R0000644000176200001440000000371514665273541017622 0ustar liggesusers# define a function that will run a simulation and return a number of statistics, typically p-values returnStatistics <- function(control = 0){ testData = createData(sampleSize = 20, family = poisson(), overdispersion = control, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1, data = testData, family = poisson()) res <- simulateResiduals(fittedModel = fittedModel, n = 250) out <- c(testUniformity(res, plot = FALSE)$p.value, testDispersion(res, plot = FALSE)$p.value) return(out) } # testing a single return returnStatistics() # running benchmark for a fixed simulation, increase nRep for sensible results out = runBenchmarks(returnStatistics, nRep = 5) # plotting results depend on whether a vector or a single value is provided for control plot(out) \dontrun{ # running benchmark with varying control values out = runBenchmarks(returnStatistics, controlValues = c(0,0.5,1), nRep = 100) plot(out) # running benchmark can be done using parallel cores out = runBenchmarks(returnStatistics, nRep = 100, parallel = TRUE) out = runBenchmarks(returnStatistics, controlValues = c(0,0.5,1), nRep = 10, parallel = TRUE) # Alternative plot function using vioplot, provides nicer pictures plot.DHARMaBenchmark <- function(x, ...){ if(length(x$controlValues)== 1){ vioplot::vioplot(x$simulations[,x$nSummaries:1], las = 2, horizontal = TRUE, side = "right", areaEqual = FALSE, main = "p distribution under H0", ylim = c(-0.15,1), ...) abline(v = 1, lty = 2) abline(v = c(0.05, 0), lty = 2, col = "red") text(-0.1, x$nSummaries:1, labels = x$summaries$propSignificant[-1]) }else{ res = x$summaries$propSignificant matplot(res$controlValues, res[,-1], type = "l", main = "Power analysis", ylab = "Power", ...) legend("bottomright", colnames(res[,-1]), col = 1:x$nSummaries, lty = 1:x$nSummaries, lwd = 2) } } } DHARMa/inst/examples/getRandomStateHelp.R0000644000176200001440000000127214665273541017735 0ustar liggesusers set.seed(13) runif(1) # testing the function in standard settings currentSeed = .Random.seed x = getRandomState(123) runif(1) x$restoreCurrent() all(.Random.seed == currentSeed) # if no seed was set in env, this will also be restored rm(.Random.seed) # now, there is no random seed x = getRandomState(123) exists(".Random.seed") # TRUE runif(1) x$restoreCurrent() exists(".Random.seed") # False runif(1) # re-create a seed # with seed = false currentSeed = .Random.seed x = getRandomState(FALSE) runif(1) x$restoreCurrent() all(.Random.seed == currentSeed) # with seed = NULL currentSeed = .Random.seed x = getRandomState(NULL) runif(1) x$restoreCurrent() all(.Random.seed == currentSeed) DHARMa/inst/examples/simulateResidualsHelp.R0000644000176200001440000000212014665273541020504 0ustar liggesuserslibrary(lme4) testData = createData(sampleSize = 100, overdispersion = 0.5, family = poisson()) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # standard plot plot(simulationOutput) # one of the possible test, for other options see ?testResiduals / vignette testDispersion(simulationOutput) # the calculated residuals can be accessed via residuals(simulationOutput) # transform residuals to other pdf, see ?residuals.DHARMa for details residuals(simulationOutput, quantileFunction = qnorm, outlierValues = c(-7,7)) # get residuals that are outside the simulation envelope outliers(simulationOutput) # calculating aggregated residuals per group simulationOutput2 = recalculateResiduals(simulationOutput, group = testData$group) plot(simulationOutput2, quantreg = FALSE) # calculating residuals only for subset of the data simulationOutput3 = recalculateResiduals(simulationOutput, sel = testData$group == 1 ) plot(simulationOutput3, quantreg = FALSE) DHARMa/inst/examples/benchmarkRuntimeHelp.R0000644000176200001440000000105514665273541020311 0ustar liggesusers createModel = function(){ testData = createData(family = poisson(), overdispersion = 1, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1, data = testData, family = poisson()) return(fittedModel) } a = function(m){ testUniformity(m, plot = FALSE)$p.value } b = function(m){ testDispersion(m, plot = FALSE)$p.value } c = function(m){ testDispersion(m, plot = FALSE, type = "PearsonChisq")$p.value } evaluationFunctions = list(a,b, c) benchmarkRuntime(createModel, evaluationFunctions, 2) DHARMa/inst/doc/0000755000176200001440000000000014704246643013002 5ustar liggesusersDHARMa/inst/doc/DHARMaForBayesians.html0000644000176200001440000006405014704246643017177 0ustar liggesusers DHARMa for Bayesians

DHARMa for Bayesians

Florian Hartig, Theoretical Ecology, University of Regensburg website

2024-10-17

Abstract

The ā€˜DHARMa’ package uses a simulation-based approach to create readily interpretable scaled (quantile) residuals for fitted (generalized) linear mixed models. This Vignette describes how to user DHARMa vor checking Bayesian models. It is recommended to read this AFTER the general DHARMa vignette, as all comments made there (in pacticular regarding the interpretation of the residuals) also apply to Bayesian models.

Basic workflow

In principle, DHARMa residuals can be calculated and interpreted for Bayesian models in very much the same way as for frequentist models. Therefore, all comments regarding tests, residual interpretation etc. from the main vignette are equally valid for Bayesian model checks. There are some minor differences regarding the expected null distribution of the residuals, in particular in the low data limit, but I believe that for most people, these are of less concern.

The main difference for a Bayesian user is that, unlike for users of directly supported regression packages such as lme4 or glmmTMB, most Bayesian users will have to create the simulations for the fitted model themselves and then feed them into DHARMa by hand. The basic workflow for Bayesians that work with BUGS, JAGS, STAN or similar is:

  1. Create posterior predictive simulations for your model
  2. Read these in with the createDHARMa function
  3. Interpret those as described in the main vignette

This is more easy as it sounds. For the major Bayesian samplers (e.g.Ā BUGS, JAGS, STAN), it amounts to adding a block with data simulations to the model, and observing those during the MCMC sampling. Then, feed the simulations into DHARMa via createDHARMa, and all else will work pretty much the same as in the main vignette.

Example in Jags

Here is an example, with JAGS

library(rjags)
library(BayesianTools)

set.seed(123)

dat <- DHARMa::createData(200, overdispersion = 0.2)

Data = as.list(dat)
Data$nobs = nrow(dat)
Data$nGroups = length(unique(dat$group))

modelCode = "model{

  for(i in 1:nobs){
    observedResponse[i] ~ dpois(lambda[i])  # poisson error distribution
    lambda[i] <- exp(eta[i]) # inverse link function
    eta[i] <- intercept + env*Environment1[i]  # linear predictor
  }
  
  intercept ~ dnorm(0,0.0001)
  env ~ dnorm(0,0.0001)

  # Posterior predictive simulations 
  for (i in 1:nobs) {
    observedResponseSim[i]~dpois(lambda[i])
  }

}"

jagsModel <- jags.model(file= textConnection(modelCode), data=Data, n.chains = 3)
para.names <- c("intercept","env", "lambda", "observedResponseSim")
Samples <- coda.samples(jagsModel, variable.names = para.names, n.iter = 5000)

x = BayesianTools::getSample(Samples)

colnames(x) # problem: all the variables are in one array - this is better in STAN, where this is a list - have to extract the right columns by hand
posteriorPredDistr = x[,3:202] # this is the uncertainty of the mean prediction (lambda)
posteriorPredSim = x[,203:402] # these are the simulations 

sim = createDHARMa(simulatedResponse = t(posteriorPredSim), observedResponse = dat$observedResponse, fittedPredictedResponse = apply(posteriorPredDistr, 2, median), integerResponse = T)
plot(sim)

In the created plots, you will see overdispersion, which is completely expected, as the simulated data has overdispersion and a RE, which is not accounted for by the Jags model.

Exercise

As an exercise, you could now:

  • Add a RE
  • Account for overdispersion, e.g.Ā via an OLRE or a negative Binomial

And check how the residuals improve.

Conditional vs.Ā unconditional simulations in hierarchical models

The most important consideration in using DHARMa with Bayesian models is how to create the simulations. You can see in my jags code that the block

  # Posterior predictive simulations 
  for (i in 1:nobs) {
    observedResponseSim[i]~dpois(lambda[i])
  }

performs the posterior predictive simulations. Here, we just take the predicted lambda (mean perdictions) during the MCMC simulations and sample from the assumed distribution. This will works for any non-hierarchical model.

When we move to hierarchical or multi-level models, including GLMMs, the issue of simulation becomes a bit more complicated. In a hierarchical model, there are several random processes that sit on top of each other. In the same way as explained in the main vignette at the point conditional / unconditional simulations, we will have to decide which of these random processes should be included in the posterior predictive simulations.

As an example, imagine we add a RE in the likelihood of the previous model, to account for the group structure in the data.

  for(i in 1:nobs){
    observedResponse[i] ~ dpois(lambda[i])  # poisson error distribution
    lambda[i] <- exp(eta[i]) # inverse link function
    eta[i] <- intercept + env*Environment1[i] + RE[group[i]] # linear predictor
  }
  
  for(j in 1:nGroups){
   RE[j] ~ dnorm(0,tauRE)  
  }

The predictions lambda[i] now depend on a lower-level stochastic effect, which is described by RE[j] ~ dnorm(0,tauRE). We can now decide to create posterior predictive simulations conditional on posterior estimates RE[j] (conditional simulations), in which case we would have to change nothing in the block for the posterior predictive simulations. Alternatively, we can decide that we want to re-simulate the RE (unconditional simulations), in which case we have to copy the entire structure of the likelihood in the predictions

  for(j in 1:nGroups){
   RESim[j] ~ dnorm(0,tauRE)  
  }

  for (i in 1:nobs) {
    observedResponseSim[i] ~ dpois(lambdaSim[i]) 
    lambdaSim[i] <- exp(etaSim[i]) 
    etaSim[i] <- intercept + env*Environment1[i] + RESim[group[i]] 
  }

Essentially, you can remember that if you want full (uncoditional) simulations, you basically have to copy the entire likelihood of the hierarchical model, minus the priors, and sample along the hierarchical model structure. If you want to condition on a part of this structure, just cut the DAG at the point on which you want to condition on.

Statistical differences between Bayesian vs.Ā MLE quantile residuals

A common question is if there are differences between Bayesian and MLE quantile residuals.

First of all, note that MLE and Bayesian quantile residuals are not exactly identical. The main difference is in how the simulation of the data under the fitted model are performed:

  • For models fitted by MLE, simulations in DHARMa are with the MLE (point estimate)

  • For models fitted with Bayes, simulations are practically always performed while also drawing from the posterior parameter uncertainty (as a point estimate is not available).

Thus, Bayesian posterior predictive simulations include the parametric uncertainty of the model, additionally to the sampling uncertainty. From this we can directly conclude that Bayesian and MLE quantile residuals are asymptotically identical (and via the usual arguments uniformly distributed), but become more different the smaller n becomes.

To examine what those differences are, let’s imagine that we start with a situation of infinite data. In this case, we have a ā€œsharpā€ posterior that can be viewed as identical to the MLE.

If we reduce the number of data, there are two things happening

  1. The posterior gets wider, with the likelihood component being normally distributed, at least initially

  2. The influence of the prior increases, the faster the stronger the prior is.

Thus, if we reduce the data, for weak / uninformative priors, we will simulate data while sampling parameters from a normal distribution around the MLE, while for strong priors, we will effectively sample data while drawing parameters of the model from the prior.

In particular in the latter case (prior dominates, which can be checked via prior sensitivity analysis), you may see residual patterns that are caused by the prior, even though the model structure is correct. In some sense, you could say that the residuals check if the combination of prior + structure is compatible with the data. It’s a philosophical debate how to react on such a deviation, as the prior is not really negotiable in a Bayesian analysis.

Of course, also the MLE distribution might get problems in low data situations, but I would argue that MLE is usually only used anyway if the MLE is reasonably sharp. In practice, I have self experienced problems with MLE estimates. It’s a bit different in the Bayesian case, where it is possible and often done to fit very complex models with limited data. In this case, many of the general issues in defining null distributions for Bayesian p-values (as, e.g., reviewed in Conn et al., 2018) apply.

I would add though that while I find it important that users are aware of those differences, I have found that in practice these issues are small, and usually overruled by the much stronger effects of model error.

DHARMa/inst/doc/DHARMa.html0000644000176200001440001777524214704246642014714 0ustar liggesusers DHARMa: residual diagnostics for hierarchical (multi-level/mixed) regression models

DHARMa: residual diagnostics for hierarchical (multi-level/mixed) regression models

Florian Hartig, Theoretical Ecology, University of Regensburg website

2024-10-17

Abstract

The ā€˜DHARMa’ package uses a simulation-based approach to create readily interpretable scaled (quantile) residuals for fitted generalized linear (mixed) models. Currently supported are linear and generalized linear (mixed) models from ā€˜lme4’ (classes ā€˜lmerMod’, ā€˜glmerMod’), ā€˜glmmTMB’, ā€˜GLMMadaptive’ and ā€˜spaMM’; phylogenetic linear models from ā€˜phylolm’ (classes ā€˜phylolm’ and ā€˜phyloglm’); generalized additive models (ā€˜gam’ from ā€˜mgcv’); ā€˜glm’ (including ā€˜negbin’ from ā€˜MASS’, but excluding quasi-distributions) and ā€˜lm’ model classes. Moreover, externally created simulations, e.g.Ā posterior predictive simulations from Bayesian software such as ā€˜JAGS’, ā€˜STAN’, or ā€˜BUGS’ can be processed as well. The resulting residuals are standardized to values between 0 and 1 and can be interpreted as intuitively as residuals from a linear regression. The package also provides a number of plot and test functions for typical model misspecification problems, such as over/underdispersion, zero-inflation, and residual spatial, temporal and phylogenetic autocorrelation.

Motivation

The interpretation of conventional residuals for generalized linear (mixed) and other hierarchical statistical models is often problematic. As an example, here the result of conventional Deviance, Pearson and raw residuals for two Poisson GLMMs, one that is lacking a quadratic effect, and one that fits the data perfectly. Could you tell which is the correct model?

Just for completeness - it was the second one. But don’t get too excited if you got it right. Probably you were just lucky - I can’t really tell a difference. But even so, would you have added a quadratic effect, instead of adding an overdispersion correction? The point here is that misspecifications in GL(M)Ms cannot reliably be diagnosed with standard residual plots, and thus GLMMs are often not as thoroughly checked as they should.

One reason why GL(M)Ms residuals are harder to interpret is that the expected distribution of the data (aka predictive distribution) changes with the fitted values. Reweighting with the expected dispersion, as done in Pearson residuals, or using deviance residuals, helps to some extent, but it does not lead to visually homogenous residuals, even if the model is correctly specified. As a result, standard residual plots, when interpreted in the same way as for linear models, seem to show all kind of problems, such as non-normality, heteroscedasticity, even if the model is correctly specified. Questions on the R mailing lists and forums show that practitioners are regularly confused about whether such patterns in GL(M)M residuals are a problem or not.

But even experienced statistical analysts currently have few options to diagnose misspecification problems in GLMMs. In my experience, the current standard practice is to eyeball the residual plots for major misspecifications, potentially have a look at the random effect distribution, and then run a test for overdispersion, which is usually positive, after which the model is modified towards an overdispersed / zero-inflated distribution. This approach, however, has a number of drawbacks, notably:

  • Overdispersion is often the result of missing predictors or a misspecified model structure. Standard residual plots make it difficult to identify these problems by examining residual correlations or patterns of residuals against predictors.

  • Not all overdispersion is the same. For count data, the negative binomial creates a different distribution than adding observation-level random effects to the Poisson. Once overdispersion is corrected for, such violations of distributional assumptions are not detectable with standard overdispersion tests (because the tests only looks at total dispersion), and nearly impossible to see visually from standard residual plots.

  • Dispersion frequently varies with predictors (heteroscedasticity). This can have a significant effect on the inference. While it is standard to tests for heteroscedasticity in linear regressions, heteroscedasticity is currently hardly ever tested for in GLMMs, although it is likely as frequent and influential.

  • Moreover, if residuals are checked, they are usually checked conditional on the fitted random effect estimates. Thus, standard checks only check the final level of the random structure in a GLMM. One can perform extra checks on the random effects, but it is somewhat unsatisfactory that there is no check on the entire model structure.

DHARMa aims at solving these problems by creating readily interpretable residuals for generalized linear (mixed) models that are standardized to values between 0 and 1, and that can be interpreted as intuitively as residuals for the linear model. This is achieved by a simulation-based approach, similar to the Bayesian p-value or the parametric bootstrap, that transforms the residuals to a standardized scale. The basic steps are:

  1. Simulate new response data from the fitted model for each observation.

  2. For each observation, calculate the empirical cumulative density function for the simulated observations, which describes the possible values (and their probability) at the predictor combination of the observed value, assuming the fitted model is correct.

  3. The residual is then defined as the value of the empirical density function at the value of the observed data, so a residual of 0 means that all simulated values are larger than the observed value, and a residual of 0.5 means half of the simulated values are larger than the observed value.

These steps are visualized in the following figure

The key advantage of this definition is that the so-defined residuals always have the same, known distribution, independent of the model that is fit, if the model is correctly specified. To see this, note that, if the observed data was created from the same data-generating process that we simulate from, all values of the cumulative distribution should appear with equal probability. That means we expect the distribution of the residuals to be flat, regardless of the model structure (Poisson, binomial, random effects and so on).

I currently prepare a more exact statistical justification for the approach in an accompanying paper, but if you must provide a reference in the meantime, I would suggest citing

  • Dunn, K. P., and Smyth, G. K. (1996). Randomized quantile residuals. Journal of Computational and Graphical Statistics 5, 1-10.

  • Gelman, A. & Hill, J. Data analysis using regression and multilevel/hierarchical models Cambridge University Press, 2006

p.s.: DHARMa stands for ā€œDiagnostics for HierArchical Regression Modelsā€ - which, strictly speaking, would make DHARM. But in German, Darm means intestines; plus, the meaning of DHARMa in Hinduism makes the current abbreviation so much more suitable for a package that tests whether your model is in harmony with your data:

From Wikipedia, 28/08/16: In Hinduism, dharma signifies behaviours that are considered to be in accord with rta, the order that makes life and universe possible, and includes duties, rights, laws, conduct, virtues and ā€˜right way of living’.

Workflow in DHARMa

Installing, loading and citing the package

If you haven’t installed the package yet, either run

install.packages("DHARMa")

Or follow the instructions on https://github.com/florianhartig/DHARMa to install a development version.

Loading and citation

library(DHARMa)
citation("DHARMa")
## Para citar o pacote 'DHARMa' em publicaƧƵes use:
## 
##   Hartig F (2024). _DHARMa: Residual Diagnostics for Hierarchical
##   (Multi-Level / Mixed) Regression Models_. R package version 0.4.7,
##   <http://florianhartig.github.io/DHARMa/>.
## 
## Uma entrada BibTeX para usuƔrios(as) de LaTeX Ʃ
## 
##   @Manual{,
##     title = {DHARMa: Residual Diagnostics for Hierarchical (Multi-Level / Mixed) Regression Models},
##     author = {Florian Hartig},
##     year = {2024},
##     note = {R package version 0.4.7},
##     url = {http://florianhartig.github.io/DHARMa/},
##   }

Calculating scaled residuals

Let’s assume we have a fitted model that is supported by DHARMa.

testData = createData(sampleSize = 250)
fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) , 
                     family = "poisson", data = testData)

Most functions in DHARMa can be calculated directly on the fitted model object. For example, if you are only interested in testing for dispersion problems, you could run

testDispersion(fittedModel)

In this case, the randomized quantile residuals are calculated on the fly inside the function call. If you work in this way, however, residual calculation will be repeated by every test / plot you call, and this can take a while. It is therefore highly recommended to first calculate the residuals once, using the simulateResiduals() function

simulationOutput <- simulateResiduals(fittedModel = fittedModel, plot = F)

which calculates randomized quantile residuals according to the algorithm discussed above. The function returns an object of class DHARMa, containing the simulations and the scaled residuals, which can later be passed on to all other plots and test functions. When specifying the optional argument plot = T, the standard DHARMa residual plot is displayed directly. The interpretation of the plot will be discussed below. Using the simulateResiduals function has the added benefit that you can modify the way in which residuals are calculated. For example, you may want to change the number of simulations, or the REs to condition on. See ?simulateResiduals and section ā€œsimulation optionsā€ below for details.

The calculated (scaled) residuals can be plotted and tested via a number of DHARMa functions (see below), or accessed directly via

residuals(simulationOutput)

To interpret the residuals, remember that a scaled residual value of 0.5 means that half of the simulated data are higher than the observed value, and half of them lower. A value of 0.99 would mean that nearly all simulated data are lower than the observed value. The minimum/maximum values for the residuals are 0 and 1. For a correctly specified model we would expect asymptotically

  • a uniform (flat) distribution of the scaled residuals

  • uniformity in y direction if we plot against any predictor.

Note: the uniform distribution is the only differences to ā€œconventionalā€ residuals as calculated for a linear regression. If you cannot get used to this, you can transform the uniform distribution to another distribution, for example normal, via

residuals(simulationOutput, quantileFunction = qnorm, outlierValues = c(-7,7))

These normal residuals will behave exactly like the residuals of a linear regression. However, for reasons of a) numeric stability with low number of simulations, which makes it neccessary to descide on which value outliers are to be transformed and b) my conviction that it is much easier to visually detect deviations from uniformity than normality, DHARMa checks all residuals in the uniform space, and I would personally advice against using the transformation.

Plotting the scaled residuals

The main plot function for the calculated DHARMa object produced by simulateResiduals() is the plot.DHARMa() function

plot(simulationOutput)

The function creates two plots, which can also be called separately, and provide extended explanations / examples in the help

plotQQunif(simulationOutput) # left plot in plot.DHARMa()
plotResiduals(simulationOutput) # right plot in plot.DHARMa()
  • plotQQunif (left panel) creates a qq-plot to detect overall deviations from the expected distribution, by default with added tests for correct distribution (KS test), dispersion and outliers. Note that outliers in DHARMa are values that are by default defined as values outside the simulation envelope, not in terms of a particular quantile. Thus, which values will appear as outliers will depend on the number of simulations. If you want outliers in terms of a particuar quantile, you can use the outliers() function.

  • plotResiduals (right panel) produces a plot of the residuals against the predicted value (or alternatively, other variable). Simulation outliers (data points that are outside the range of simulated values) are highlighted as red stars. These points should be carefully interpreted, because we actually don’t know ā€œhow muchā€ these values deviate from the model expectation. Note also that the probability of an outlier depends on the number of simulations, so whether the existence of outliers is a reason for concern depends also on the number of simulations.

To provide a visual aid in detecting deviations from uniformity in y-direction, the plot function calculates an (optional default) quantile regression, which compares the empirical 0.25, 0.5 and 0.75 quantiles in y direction (red solid lines) with the theoretical 0.25, 0.5 and 0.75 quantiles (dashed black line), and provides a p-value for the deviation from the expected quantile. The significance of the deviation to the expected quantiles is tested and displayed visually, and can be additionally extracted with the testQuantiles function.

By default, plotResiduals plots against predicted values. However, you can also use it to plot residuals against a specific other predictors (highly recommend).

plotResiduals(simulationOutput, form = YOURPREDICTOR)

If the predictor is a factor, or if there is just a small number of observations on the x axis, plotResiduals will plot a box plot with additional tests instead of a scatter plot.

plotResiduals(simulationOutput, form = testData$group)

See ?plotResiduas for details, but very shortly: under H0 (perfect model), we would expect those boxes to range homogeneously from 0.25-0.75. To see whether there are deviations from this expecation, the plot calculates a test for uniformity per box, and a test for homogeneity of variances between boxes. A positive test will be highlighted in red.

Goodness-of-fit tests on the scaled residuals

To support the visual inspection of the residuals, the DHARMa package provides a number of specialized goodness-of-fit tests on the simulated residuals:

  • testUniformity() - tests if the overall distribution conforms to expectations.
  • testOutliers() - tests if there are more simulation outliers than expected.
  • testDispersion() - tests if the simulated dispersion is equal to the observed dispersion.
  • testQuantiles() - fits a quantile regression or residuals against a predictor (default predicted value), and tests of this conforms to the expected quantile.
  • testCategorical(simulationOutput, catPred = testData$group) tests residuals against a categorical predictor.
  • testZeroinflation() - tests if there are more zeros in the data than expected from the simulations.
  • testGeneric() - test if a generic summary statistics (user-defined) deviates from model expectations.
  • testTemporalAutocorrelation() - tests for temporal autocorrelation in the residuals.
  • testSpatialAutocorrelation() - tests for spatial autocorrelation in the residuals. Can also be used with a generic distance function.
  • testPhylogeneticAutocorrelation() - tests for phylogenetic signal in the residuals.

See the help of the functions and further comments below for a more detailed description.

Simulation options

There are a few important technical details regarding how the simulations are performed, in particular regarding the treatments of random effects and integer responses. It is strongly recommended to read the help of

?simulateResiduals

Refit

simulationOutput <- simulateResiduals(fittedModel = fittedModel, refit = T)
  • if refit = F (default), new datasets are simulated from the fitted model, and residuals are calculated by comparing the observed data to the new data

  • if refit = T, a parametric bootstrap is performed, meaning that the model is refit to all new datasets, and residuals are created by comparing observed residuals against refitted residuals

The second option is much much slower, and also seemed to have lower power in some tests I ran. It is therefore not recommended for standard residual diagnostics! I only recommend using it if you know what you are doing, and have particular reasons, for example if you estimate that the tested model is biased. A bias could, for example, arise in small data situations, or when estimating models with shrinkage estimators that include a purposeful bias, such as ridge/lasso, random effects or the splines in GAMs. My idea was then that simulated data would not fit to the observations, but that residuals for model fits on simulated data would have the same patterns/bias than model fits on the observed data.

Note also that refit = T can sometimes run into numerical problems, if the fitted model does not converge on the newly simulated data.

Conditinal vs.Ā unconditinal simulations

The second option is the treatment of the stochastic hierarchy. In a hierarchical model, several layers of stochasticity are placed on top of each other. Specifically, in a GLMM, we have a lower level stochastic process (random effect), whose result enters into a higher level (e.g.Ā Poisson distribution). For other hierarchical models, such as state-space models, similar considerations apply, but the hierarchy can be more complex. When simulating, we have to decide if we want to re-simulate all stochastic levels, or only a subset of those. For example, in a GLMM, it is common to only simulate the last stochastic level (e.g.Ā Poisson) conditional on the fitted random effects, meaning that the random effects are set on the fitted values.

For controlling how many levels should be re-simulated, the simulateResidual function allows to pass on parameters to the simulate function of the fitted model object. Please refer to the help of the different simulate functions (e.g.Ā ?simulate.merMod) for details. For merMod (lme4) model objects, the relevant parameters are ā€œuse.uā€, and ā€œre.formā€, as, e.g., in

simulationOutput <- simulateResiduals(fittedModel = fittedModel, n = 250, use.u = T)

If the model is correctly specified and the fitting procedure is unbiased (disclaimer: GLMM estimators are not always unbiased), the simulated residuals should be flat regardless how many hierarchical levels we re-simulate. The most thorough procedure would be therefore to test all possible options. If testing only one option, I would recommend to re-simulate all levels, because this essentially tests the model structure as a whole. This is the default setting in the DHARMa package. A potential drawback is that re-simulating the random effects creates more variability, which may reduce power for detecting problems in the upper-level stochastic processes, in particular overdispersion (see section on dispersion tests below).

Note: Although unconditional residuals implicitly also test the normal distribution of the REs, it is probably not a bad idea to look addditionally check for normality of the RE distribution. As this is not based on quantile residuals, there is no special DHARMa function for this, so you should just extract the REs, and then run e.g.Ā a Shapiro test.

Integer treatment / randomization

A third option is the treatment of integer responses. The background of this option is that, for integer-valued variables, some additional steps are necessary to make sure that the residual distribution becomes flat (essentially, we have to smoothen away the integer nature of the data). The idea is explained in

  • Dunn, K. P., and Smyth, G. K. (1996). Randomized quantile residuals. Journal of Computational and Graphical Statistics 5, 1-10.

DHARMa currently implements two procedures for randomization. The default procedure will randomize automatically. The second option requires knowledge about whether the model is integer-valued, which is usually implemented automatically. See ?simulateResiduals for details. Usually, these options should simply be kept at their defaults.

Calculating residuals for groups or subsets

In many situations, it can be useful to look at residuals per group, e.g.Ā to see how much the model over / underpredicts per plot, year or subject. To do this, use the recalculateResiduals() function, together with a grouping variable (group) or a subsetting variable (sel), which can also be used in combination.

simulationOutput = recalculateResiduals(simulationOutput, group = testData$group)

Note, however, that you will have to change the selection of variables that you provide to plots and tests (e.g.Ā in plotResiduals or testSpatialAutocorrelation) accordingly when you group or subset residuals.

Reproducibility notes, random seed and random state

As DHARMa uses simulations to calculate the residuals, a naive implementation of the algorithm would mean that residuals would look slightly different each time a DHARMa calculation is executed. This might both be confusing and bear the danger that a user would run the simulation several times and take the result that looks better (which would amount to multiple testing / p-hacking). By default, DHARMa therefore fixes the random seed to the same value every time a simulation is run, and afterwards restores the random state to the old value. This means that you will get exactly the same residual plot each time. If you want to avoid this behavior, for example for simulation experiments on DHARMa, use seed = NULL -> no seed set, but random state will be restored, or seed = F -> no seed set, and random state will not be restored. Whether or not you fix the seed, the setting for the random seed and the random state are stored in

simulationOutput$randomState

If you want to reproduce simulations for such a run, set the variable .Random.seed by hand, and simulate with seed = NULL.

Moreover (general advice), to ensure reproducibility, it’s advisable to add a set.seed() at the beginning, and a session.info() at the end of your script. The latter will list the version number of R and all loaded packages.

Interpreting residuals and recognizing misspecification problems

General remarks on interperting residual patterns and tests

So far, all shown DHARMa results were calculated for a correctly specified model, resulting in ā€œperfectā€ residual plots and diagnostics. In this section, we discuss how to recognize and interpret diagnostics that indicate a misspecified model. Before going into the details, note, however, that

  1. No residual pattern does not ā€œproveā€ that the model is correct: The fact that none of the DHARMa tests indicate a problem does not ā€œproveā€ that the model is correctly specified. For any model, there are likely a large number of structural problems that do not create a pattern in the DHARMa diagnostics. In good old Popper fashion, you should interpret no residual problems as your working hypothesis not being rejected in that particular test, which increases confidence in the model, but does not constitute a conclusive proof. So, keep your skepticism alive, and if you find the results fishy, keep searching and testing.

  2. Once a residual effect is statistically significant, look at the magnitude to decide if there is a problem: It is crucial to note that significance is NOT a measure of the strength of the residual pattern, it is a measure of the signal/noise ratio, i.e.Ā whether you are sure there is a pattern at all. Significance in hypothesis tests depends on at least 2 ingredients: the strength of the signal and the number of data points. If you have a lot of data points, residual diagnostics will nearly inevitably become significant, because having a perfectly fitting model is very unlikely. That, however, doesn’t necessarily mean that you need to change your model. The p-values confirm that there is a deviation from your null hypothesis. It is, however, in your discretion to decide whether this deviation is worth worrying about. For example, if you see a dispersion parameter of 1.01, I would not worry, even if the dispersion test is significant. A significant value of 5, however, is clearly a reason to move to a model that accounts for overdispersion.

  3. A residual pattern does not indicate that the model is unusable: While a significant pattern in the residuals indicates with good reliability that the observed data did likely not originate from the fitted model, this doesn’t necessarily imply that the inferential results from this wrong model are not usable. There are many situations in statistics where it is common practice to work with ā€œwrong modelsā€. For example, many statistical models use shrinkage estimators, which purposefully bias parameter estimates to certain values. Random effects are a special case of this. If DHARMa residuals for these estimators are calculated, they will often show a slight pattern in the residuals even if the model is correctly specified, and tests for this can get significant for large sample sizes. For this reason, DHARMa is excluding RE estimates in the predictions when plotting res ~ pred. Another example is data that is missing at random (MAR). Since it is known that this phenomenon does not create a bias on the fixed effects estimates, it is common practice to fit these data with standard mixed models. Nevertheless, DHARMa recognizes that the observed data looks different from what would be expected from the model assumptions, and flags the model as problematic (see here).

Important conclusion: DHARMa only flags a difference between the observed and expected data - the user has to decide whether this difference is actually a problem for the analysis!

Recognizing over/underdispersion

GL(M)Ms often display over/underdispersion, which means that residual variance is larger/smaller than expected under the fitted model. This phenomenon is most common for GLM families with constant (fixed) dispersion, in particular for Poisson and binomial models, but it can also occur in GLM families that adjust the variance (such as the beta or negative binomial) when distribution assumptions are violated. A few general rules of thumb about dealing with dispersion problems:

  • Dispersion is a property of the residuals, i.e.Ā you can detect dispersion problems only AFTER fitting the model. It doesn’t make sense to look at the dispersion of your response variable
  • Overdispersion is more common than underdispersion
  • If overdispersion is present, the main effect is that confidence intervals tend to be too narrow, and p-values to small, leading to inflated type I error. The opposite is true for underdispersion, i.e.Ā the main issue of underdispersion is that you loose power.
  • A common reason for overdispersion is a misspecified model. When overdispersion is detected, one should therefore first search for problems in the model specification (e.g.Ā by plotting residuals against predictors with DHARMa), and only if this doesn’t lead to success, overdispersion corrections such as individual-level random effects or changes in the distribution should be applied

Residual patterns of over/underdispersion

This this is how overdispersion looks like in the DHARMa residuals. Note that we get more residuals around 0 and 1, which means that more residuals are in the tail of distribution than would be expected under the fitted model.

testData = createData(sampleSize = 200, overdispersion = 1.5, family = poisson())
fittedModel <- glm(observedResponse ~  Environment1 , family = "poisson", 
                   data = testData)

simulationOutput <- simulateResiduals(fittedModel = fittedModel)
plot(simulationOutput)

If you see this pattern, note that overdispersion is often caused by model misfit. Thus, before moving to a GLM with variable dispersion (for count data this would typically be a negative binomial), you should check your model for misfit, e.g.Ā by plotting residuals against all predictors using plotResiduals().

Next, this is an example of underdispersion. Here, we get too many residuals around 0.5, which means that we are not getting as many residuals as we would expect in the tail of the distribution than expected from the fitted model.

testData = createData(sampleSize = 500, intercept=0, fixedEffects = 2,
                      overdispersion = 0, family = poisson(),
                      roundPoissonVariance = 0.001, randomEffectVariance = 0)
fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) , 
                     family = "poisson", data = testData)

simulationOutput <- simulateResiduals(fittedModel = fittedModel)
plot(simulationOutput)

If you see this pattern, note that a common reason for underdispersion is overfitting, i.e.Ā your model is too complex. Other possible explanations to check for include zero-inflation (best to check by comparing to a ZIP model, but see also DHARMa::testZeroInflation), non-independence of the data (e.g.Ā temporal autocorrelation, check via DHARMa:: testTemporalAutocorrelation) that your predictors can use to overfit, or that your data-generating process is simply not a Poisson process.

From a technical side, underdispersion is not as concerning as over dispersion, as it will usually bias p-values to the conservative side, but if your goal is to get a good power, you may want to consider a simpler model. If that is not helping, you can move to a distribution for underdispersed count data (e.g.Ā Conway-Maxwell-Poisson, generalized Poisson).

Formal tests for over/underdispersion

Although, as discussed above, over/underdispersion will show up in the residuals, and it’s possible to detect it with the testUniformity function, simulations show that this test is less powerful than more targeted tests. DHARMa contains several overdispersion tests that compare the dispersion of simulated residuals to the observed residuals.

  1. default: a non-parametric test that compares the variance of the simulated residuals to the observed residuals (default), which has some analogy to the variance test implemented in aer::dispersiontest
  2. PearsonChisq: alternatively, DHARMa implements the Pearson-chi2 test that is popular in the literature, suggested in the glmm Wiki, and implemented in some other R packages such as performance::check_overdispersion
  3. refit if residual simulations are done via refit, DHARMa will compare the the Pearson residuals of the re-fitted simulations to the original Pearson residuals. This is essentially a nonparametric version of test 2.

All of these tests are included in the testDispersion function, see ?testDispersion for details.

testDispersion(simulationOutput)

## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.067211, p-value < 2.2e-16
## alternative hypothesis: two.sided

IMPORTANT INFO: we have made extensive simulations, which have shown that the various tests have certain advantages and disadvantages. The basic results are that:

  • The most powerful and reliable test is option 3, but this costs a lot of time and is not available for all regression packages, as it requires that Pearson residuals are available
  • Option 2, the parametric Pearson-chi2 is fast if Pearson residuals are available, but based on a naive expectation of df (counts RE as 1 df) and the test statistic is thus biased towards underdispersion for mixed models. Similar to the df approximation, Bias increasing with the number of RE levels. When testing only for overdispersion (alternative = ā€œgreaterā€), this makes the test more conservative, but it also costs power.
  • The DHARMa default option 1 is fast, nearly unbiased (i.e.Ā you can test under and overdispersion), and only slightly less powerful as test 3, PROVIDED that simulations are made conditional on the fitted REs. Note that the latter is not the DHARMa default, so you have to actively request conditional simulations, e.g.Ā for lme4 by specifying re.form = NULL. Power compared to the parametric Pearson-chi2 test depends on the number of RE levels, it will be more powerful for typical number of RE levels.

As support for these statements, here results of the simulation, which compares the uniform (KS) test with the standard simuation-based test (conditional and unconditional) and the Pearson-chi2 test (two-sided and greater) for an n=200 Poisson GLMM with 30 RE levels.

Thus, my current recommendation is: for most users, use the default DHARMa test, but create simulations conditionally.

Zero-inflation / k-inflation or deficits

A common special case of overdispersion is zero-inflation, which is the situation when more zeros appear in the observation than expected under the fitted model. Zero-inflation requires special correction steps. More generally, we can also have too few zeros, or too much or too few of any other values. We’ll discuss that at the end of this section.

Residual patterns

Here an example of a typical zero-inflated count dataset, plotted against the environmental predictor

testData = createData(sampleSize = 500, intercept = 2, fixedEffects = c(1),
                      overdispersion = 0, family = poisson(), 
                      quadraticFixedEffects = c(-3), randomEffectVariance = 0,
                      pZeroInflation = 0.6)

par(mfrow = c(1,2))
plot(testData$Environment1, testData$observedResponse, xlab = "Envrionmental Predictor", 
     ylab = "Response")
hist(testData$observedResponse, xlab = "Response", main = "")

We see a hump-shaped dependence of the environment, but with too many zeros. In the normal DHARMa residual plots, zero-inflation will look pretty much like overdispersion

fittedModel <- glmer(observedResponse ~ Environment1 + I(Environment1^2) + (1|group) , 
                     family = "poisson", data = testData)

simulationOutput <- simulateResiduals(fittedModel = fittedModel)
plot(simulationOutput)

The reason is that the model will usually try to find a compromise between the zeros, and the other values, which will lead to excess variance in the residuals.

Formal tests for zero-inflation

DHARMa also has a special test for zero-inflation, which compares the distribution of expected zeros in the data against the observed zeros

testZeroInflation(simulationOutput)

## 
##  DHARMa zero-inflation test via comparison to expected zeros with
##  simulation under H0 = fitted model
## 
## data:  simulationOutput
## ratioObsSim = 1.9533, p-value < 2.2e-16
## alternative hypothesis: two.sided

This test is likely better suited for detecting zero-inflation than the standard plot, but note that also overdispersion will lead to excess zeros, so only seeing too many zeros is not a reliable diagnostics for moving towards a zero-inflated model. A reliable differentiation between overdispersion and zero-inflation will usually only be possible when directly comparing alternative models, e.g.Ā through residual comparison / model selection of a model with / without zero-inflation, or by simply fitting a model with zero-inflation and looking at the parameter estimate for the zero-inflation. A good option is the R package glmmTMB, which is also supported by DHARMa. We can use this to fit

# requires glmmTMB
fittedModel <- glmmTMB(observedResponse ~ Environment1 + I(Environment1^2) + (1|group), ziformula = ~1 , family = "poisson", data = testData)
summary(fittedModel)

simulationOutput <- simulateResiduals(fittedModel = fittedModel)
plot(simulationOutput)

Testing generic summary statistics, e.g.Ā for k-inflation or deficits

To test for generic excess / deficits of particular values, we have the function testGeneric, which compares the values of a generic, user-provided summary statistics

Choose one of alternative = c(ā€œgreaterā€, ā€œtwo.sidedā€, ā€œlessā€) to test for inflation / deficit or both. Default is ā€œgreaterā€ = inflation.

countOnes <- function(x) sum(x == 1)  # testing for number of 1s
testGeneric(simulationOutput, summary = countOnes, alternative = "greater") # 1-inflation

## 
##  DHARMa generic simulation test
## 
## data:  simulationOutput
## ratioObsSim = 0.19944, p-value = 1
## alternative hypothesis: greater

Heteroscedasticity

So far, most of the things that we have tested could also have been detected with parametric tests. Here, we come to the first issue that is difficult to detect with current tests, and that is usually neglected.

Heteroscedasticity means that there is a systematic dependency of the dispersion / variance on another variable in the model. It is not sufficiently appreciated that also binomial or Poisson models can show heteroscedasticity. Basically, it means that the level of over/underdispersion depends on another parameter. Here an example where we create such data

testData = createData(sampleSize = 500, intercept = -1.5,  
                      overdispersion = function(x){return(rnorm(length(x), sd = 1 * abs(x)))}, 
                      family = poisson(), randomEffectVariance = 0)
fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData)

simulationOutput <- simulateResiduals(fittedModel = fittedModel)
plot(simulationOutput)

The exact p-values for the quantile lines in the plot can be displayed via

testQuantiles(simulationOutput)

As mentioned above, the equivalent test for categorical predictors (plot function will switch automatically) would be

testCategorical(simulationOutput, catPred = testData$group)

Adding a simple overdispersion correction will try to find a compromise between the different levels of dispersion in the model. The qq plot looks better now, but there is still a pattern in the residuals

testData = createData(sampleSize = 500, intercept = 0, overdispersion = function(x){return(rnorm(length(x), sd = 2*abs(x)))}, 
                      family = poisson(), randomEffectVariance = 0)
fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) + (1|ID), 
                     family = "poisson", data = testData)

# plotConventionalResiduals(fittedModel)

simulationOutput <- simulateResiduals(fittedModel = fittedModel)
plot(simulationOutput)

To remove this pattern, you would need to make the dispersion parameter dependent on a predictor (e.g.Ā in JAGS), or apply a transformation on the data.

Detecting missing predictors or wrong functional assumptions

A second test that is typically run for LMs, but not for GL(M)Ms is to plot residuals against the predictors in the model (or potentially predictors that were not in the model) to detect possible misspecifications. Doing this is highly recommended. For that purpose, you can retrieve the residuals via

simulationOutput$scaledResiduals

Note again that the residual values are scaled between 0 and 1. If you plot the residuals against predictors, space or time, the resulting plots should not only show no systematic dependency of those residuals on the covariates, but they should also again be flat for each fixed situation. That means that if you have, for example, a categorical predictor: treatment / control, the distribution of residuals for each predictor alone should be flat as well.

Here an example with a missing quadratic effect in the model and 2 predictors

testData = createData(sampleSize = 200, intercept = 1, fixedEffects = c(1,2),
                      overdispersion = 0, family = poisson(), 
                      quadraticFixedEffects = c(-3,0))
fittedModel <- glmer(observedResponse ~ Environment1 + Environment2 + (1|group),
                     family = "poisson", data = testData)
simulationOutput <- simulateResiduals(fittedModel = fittedModel)
# plotConventionalResiduals(fittedModel)
plot(simulationOutput, quantreg = T)

# testUniformity(simulationOutput = simulationOutput)

It is difficult to see that there is a problem at all in the general plot, but it becomes clear if we plot against the environment

par(mfrow = c(1,2))
plotResiduals(simulationOutput, testData$Environment1)
plotResiduals(simulationOutput, testData$Environment2)

Residual correlation structures (temporal, spatial, phylogenetic)

If a distance between residuals can be defined (temporal, spatial, phylogenetic), you should check if there is a distance-dependence in the residuals, which would suggest to move to a GLS (generalized least squares) structure for analysis.

The three functions to test for this in DHARMa are:

  • testTemporalAutocrrelation based on the Durbin-Watson test.
  • testSpatialAutocorrelation, based on Moran’s I, can also be used for generic distance functions.
  • testPhylogeneticAutocorrelation, based on Moran’s I test from Moran.I function on package ape.

Here a short example for the spatial case, see help of the functions for extended examples.

testData = createData(sampleSize = 100, family = poisson(), 
                      spatialAutocorrelation = 3, numGroups = 1,
                       randomEffectVariance = 0)
fittedModel <- glm(observedResponse ~ Environment1 , data = testData, 
                   family = poisson() )
simulationOutput <- simulateResiduals(fittedModel = fittedModel)
testSpatialAutocorrelation(simulationOutput = simulationOutput, x = testData$x,
                           y= testData$y)

## 
##  DHARMa Moran's I test for distance-based autocorrelation
## 
## data:  simulationOutput
## observed = 0.123622, expected = -0.010101, sd = 0.015421, p-value <
## 2.2e-16
## alternative hypothesis: Distance-based autocorrelation
# plot(simulationOutput)

Note that all these tests are most sensitive against homogeneous residual structure, and might miss local and heterogeneous (non-stationary) residual structures. Additional visual checks can be useful.

However, standard DHARMa simulations from models with temporal / spatial / phylogenetic conditional autoregressive terms will still have the respective correlation in the DHARMa residuals, unless the package you are using is modelling the autoregressive terms as explicit REs and is able to simulate conditional on the fitted REs. It means that the residuals will still show significant autocorrelation, even if the model fully accounts for this strucuture, and other tests, such as dispersion, uniformity, may have inflated type I error. See the example below with a glmmTMB model with a spatial autocorrelation structure:

library(glmmTMB)
testData$pos <- numFactor(testData$x, testData$y)
fittedModel2 <- glmmTMB(observedResponse ~ Environment1 + exp(pos + 0|group), 
                        data = testData, family = poisson())
simulationOutput2 <- simulateResiduals(fittedModel = fittedModel2)
testSpatialAutocorrelation(simulationOutput = simulationOutput2, x = testData$x, 
                           y= testData$y)

## 
##  DHARMa Moran's I test for distance-based autocorrelation
## 
## data:  simulationOutput2
## observed = 0.138255, expected = -0.010101, sd = 0.015421, p-value <
## 2.2e-16
## alternative hypothesis: Distance-based autocorrelation
# plot(simulationOutput2)

One of the options to solve it and get correct tests and no residual pattern (if the model is correct) is to rotate the residual space according to the coariance structure of the fitted model, such that the rotated residuals are conditionally independent. The argument rotation in simulateResiduals does it (see also ?getQuantile for details about the rotation options):

# rotation of the residuals 
simulationOutput3 <- simulateResiduals(fittedModel = fittedModel2,
                                       rotation = "estimated")
testSpatialAutocorrelation(simulationOutput = simulationOutput3, x = testData$x, 
                           y= testData$y)

## 
##  DHARMa Moran's I test for distance-based autocorrelation
## 
## data:  simulationOutput3
## observed = 0.035283, expected = -0.010101, sd = 0.015451, p-value =
## 0.003311
## alternative hypothesis: Distance-based autocorrelation
# plot(simulationOutput3)

Case studies and examples

Note: More real-world examples can be found on the DHARMa GitHub repository.

Budworm example (count-proportion n/k binomial)

This example comes from Jochen Fruend. Measured are the number of parasitized observations, with population density as a covariate:

plot(N_parasitized / (N_adult + N_parasitized ) ~ logDensity, 
     xlab = "Density", ylab = "Proportion infected", data = data)

Let’s fit the data with a regular binomial n/k glm

mod1 <- glm(cbind(N_parasitized, N_adult) ~ logDensity, data = data, family=binomial)
simulationOutput <- simulateResiduals(fittedModel = mod1)
plot(simulationOutput)

We see various signals of overdispersion

  • QQ: s-shaped QQ plot, distribution test (KS) signficant
  • QQ: Dispersion test is significant
  • QQ: Outlier test significant
  • Res ~ predicted: Quantile fits are spread out too far

OK, so let’s add overdispersion through an individual-level random effect

mod2 <- glmer(cbind(N_parasitized, N_adult) ~ logDensity + (1|ID), data = data, family=binomial)
simulationOutput <- simulateResiduals(fittedModel = mod2)
plot(simulationOutput)

The overdispersion looks better, but you can see that the residuals still look a bit irregular (although tests are n.s.). The raw data looks a bit humped-shaped, so we might be tempted to add a quadratic effect.

mod3 <- glmer(cbind(N_parasitized, N_adult) ~ logDensity + I(logDensity^2) + (1|ID), data = data, family=binomial)
simulationOutput <- simulateResiduals(fittedModel = mod3)
plot(simulationOutput)

The residuals look perfect now. That being said, we dont’ have a lot of data, and we have to be sure we’re not overfitting. A likelihood ratio test tells us that the quadratic effect is not significantly supported.

anova(mod2, mod3)
## Data: data
## Models:
## mod2: cbind(N_parasitized, N_adult) ~ logDensity + (1 | ID)
## mod3: cbind(N_parasitized, N_adult) ~ logDensity + I(logDensity^2) + (1 | ID)
##      npar    AIC    BIC  logLik deviance  Chisq Df Pr(>Chisq)  
## mod2    3 214.68 217.95 -104.34   208.68                       
## mod3    4 213.54 217.90 -102.77   205.54 3.1401  1    0.07639 .
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

Also AIC differences are small, although slightly in favor of model 3

AIC(mod2) 
## [1] 214.6776
AIC(mod3)
## [1] 213.5375

I guess you could use either Model 2 or 3 - the broader point is: increasing model complexity will nearly always improve the residuals, but according to standard statistical arguments (power, bias-variance trade-off) it’s not always advisable to get them perfect, just good enough!

Owl example (count data)

The next examples uses the fairly well known Owl dataset which is provided in glmmTMB (see ?Owls for more info about the data). The following shows a sequence of models, all checked with DHARMa. The example is discussed in a talk at ISEC 2018, see slides here.

library(glmmTMB)
m1 <- glm(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)), data=Owls , family = poisson)
res <- simulateResiduals(m1)
plot(res)

OK, this is highly overdispersed. Let’s add a RE on nest:

m2 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), data=Owls , family = poisson)
res <- simulateResiduals(m2)
plot(res)

Somewhat better, but not good. Move to neg binom, to adjust dispersion, and checking dispersion and residuals against FoodTreatment predictor:

m3 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), data=Owls , family = nbinom1)

res <- simulateResiduals(m3, plot = T)

par(mfrow = c(1,2))
testDispersion(res)
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.63438, p-value < 2.2e-16
## alternative hypothesis: two.sided
plotResiduals(res, Owls$FoodTreatment)

We see underdispersion now. In a model with variable dispersion, this is often the signal that some other distributional assumptions are violated. Let’s check for zero-inflation:

testZeroInflation(res)

## 
##  DHARMa zero-inflation test via comparison to expected zeros with
##  simulation under H0 = fitted model
## 
## data:  simulationOutput
## ratioObsSim = 1.2488, p-value = 0.064
## alternative hypothesis: two.sided

It looks as if there is some zero-inflation, although non-significant. Fitting a zero-inflated model:

m4 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) +
                (1|Nest), 
              ziformula = ~ FoodTreatment + SexParent,  data=Owls , family = nbinom1)

res <- simulateResiduals(m4, plot = T)

par(mfrow = c(1,3))
plotResiduals(res, Owls$FoodTreatment)
testDispersion(res)
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.81335, p-value = 0.168
## alternative hypothesis: two.sided
testZeroInflation(res)

## 
##  DHARMa zero-inflation test via comparison to expected zeros with
##  simulation under H0 = fitted model
## 
## data:  simulationOutput
## ratioObsSim = 1.0389, p-value = 0.616
## alternative hypothesis: two.sided

This looks a lot better. Trying a slightly different model specification, adding a dispersion model as well:

m5 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) +
                (1|Nest), 
              dispformula = ~ FoodTreatment + SexParent , 
              ziformula = ~ FoodTreatment + SexParent,  data=Owls , family = nbinom1)

res <- simulateResiduals(m5, plot = T)

par(mfrow = c(1,3))
plotResiduals(res, Owls$FoodTreatment)
testDispersion(res)
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 0.78311, p-value = 0.104
## alternative hypothesis: two.sided
testZeroInflation(res)

## 
##  DHARMa zero-inflation test via comparison to expected zeros with
##  simulation under H0 = fitted model
## 
## data:  simulationOutput
## ratioObsSim = 1.0465, p-value = 0.608
## alternative hypothesis: two.sided

but that does not seem to make things better.

Both models would be acceptable in terms of their fit to the data. Which one should you prefer? This is not a question for residual checks. Residual checks tell you which models can be rejected with the data. Which of the typically many acceptable models you should fit must be decided by your scientific question, and/or possibly by model selection methods. In doubt, I would tend towards the simpler model though.

Notes on particular data types

Poisson data

The main concern in Poisson data is dispersion. See comments in the section on the dispersion test, in particular regarding the advantage of conditional simulations in this case. To address overdispersion, I would recommend to prefer the negative binomial model over observation-level random effects, because this mode will be easier to test in DHARMa and its dispersion can be easier modeled, e.g.Ā with glmmTMB. The third option would be quasi models, but there are few advantages, except runtime. Note also that quasi models cannot be tested with DHARMa.

Once dispersion is adjusted, you should check for heteroscedasticity (via standard plot, also against all predictors), and for zero-inflation. As noted, zero-inflation tests are often negative, and rather show up as underdispersion. Work through the owl example below.

Proportional data

Proportional data expressed as percentage or fractions of a whole, i.e.Ā non-count-based data, is often modeled with beta regressions. Those can be tested with DHARMa. Note that beta regressions are often 0 or 1 inflated. Both should be tested with testZeroInflation or testGeneric.

techniques for analysing continuous (also called non-count-based or non-binomial) proportions (e.g.Ā percent cover, fraction time spent on an activity)

Note: discrete proportions, i.e.Ā count-based proportions, of the type k/n should NOT be modeled with the beta regression. Use the binomial (see below).

Binomial data

Binomial data behave slightly different depending on whether we have a 0/1 response (Bernoulli) or a k/n response (true binomial).

A k/n response, in particular when n is large, will behave similar to the Poisson, in that it approaches a normal distribution with fixed dispersion for large n and becomes more assymetric at its borders (for k = 0 or n). You should check for dispersion and consider a beta-binomial in cases of overdispersion.

Things are a bit different for the 0/1 response. Let’s look at the residuals of a clearly misspecified binomial model (missing predictor) with 0/1 response data.

testData = createData(sampleSize = 500, overdispersion = 0, fixedEffects = 5, 
                      family = binomial(), randomEffectVariance = 3, 
                      numGroups = 25)

fittedModel <- glm(observedResponse ~ 1, family = "binomial", data = testData)

simulationOutput <- simulateResiduals(fittedModel = fittedModel)

If you would do the same with a binomial k/n response or count data, such a misspecification would produce overdispersion (try it out). For the 0/1 response we see neither dispersion problems nor a misfit in the general res ~ predicted plot.

plot(simulationOutput, asFactor = T)

Even though the misfit is clearly visible if we plot the residuals against the missing predictor.

plotResiduals(simulationOutput, testData$Environment1, quantreg = T)

However, we can see the overdispersion arising from the misfit if we group our residuals, which basically transforms the 0/1 response in a k/n response.

To show this, let’s look at the dispersion test for the same model, once ungrouped (left), and grouped according to the variable group which was in the data (right).

par(mfrow = c(1,2))
testDispersion(simulationOutput)
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0023, p-value = 0.824
## alternative hypothesis: two.sided
simulationOutput2 = recalculateResiduals(simulationOutput, group = testData$group)
testDispersion(simulationOutput2)

## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 3.0805, p-value < 2.2e-16
## alternative hypothesis: two.sided

In general, you can group according to any variable that you like, including continous variables or space. However, some variables make more sense than others. To understand this, consider a simulation where we create binonial data with true probabilities p, drawn from a uniform distribution:

n = 1000
p = runif(n, 0.1, 0.9) # true probabilities
obs = rbinom(n, 1, p)

The important thing to note here is that rbinom(n, 1, p) will generate an identical overall distribution as rbinom(1, n, mean(p)). What that means: a misfit of the prediction will not show up unless they are wrong in the overall mean. Consequenly, if we fit:

fit <- glm(obs ~ 1, family = "binomial") # wrong model, assumes equal probabilities
library(DHARMa)
res <- simulateResiduals(fit, plot = T) # nothing to see

We see no misfit, and neither do we see a misfit when we group the data points randomly, as the mean of a random group is still fit well by the overall mean.

res2 <- recalculateResiduals(res, group = rep(1:20, each = 10))
plot(res2) 

However, we see thes misfit / overdispersion if we aggregate according to something that is correlated to the misfit. For our example, let’s just group according to the true means:

grouping = cut(p, breaks = quantile(p, seq(0,1,0.02)))
res3 <- recalculateResiduals(res, group = grouping)
plot(res3)

In this case, the groups have a different mean than the fitted grand mean, and the misfit shows up in the residuals. Thus, what we are looking for in the grouping is variables that may correlate with the misfit.

If you don’t have a natural grouping variable, you can introduce arbitrary grouping variables, e.g.Ā via discretising a predictor or the response (usually preferable), and grouping according to that, or via discretising space (e.g.Ā group observation in spatial blocks). The pattern appears only, however, if the grouping variable correlates with the model error. Consider the following example:

We create data and fit a model with a missing predictor (Environment2):

set.seed(123)

testData = createData(sampleSize = 500, overdispersion = 0, fixedEffects = c(0,3), family = binomial(), randomEffectVariance = 3, numGroups = 50)

Apparently, no problem with the residuals for the misfitted model:

res <- simulateResiduals(fittedModel = fittedModel)
fittedModel <- glm(observedResponse ~ Environment1, family = "binomial", 
                   data = testData)
plot(res)

  1. Grouping according to the RE produces overdispersion because RE is missing in the first model:
res2 = recalculateResiduals(res , group = testData$group)
plot(res2)

  1. Grouping according to a random factor does not produce an effect. In this respect, the model has correct dispersion:
grouping = as.factor(sample.int(50, 500, replace = T))

res2 = recalculateResiduals(res , group = grouping)
plot(res2)

  1. Grouping according to the response doesn’t create a pattern:
x = predict(fittedModel)
grouping = cut(x, breaks = quantile(x, seq(0,1,0.02)))
res2 = recalculateResiduals(res , group = grouping)
plot(res2)

  1. Grouping according to the missing variable creates pattern, because in relation to this variable, the model is overdispersed:
x = testData$Environment2
grouping = cut(x, breaks = quantile(x, seq(0,1,0.02)))
res2 = recalculateResiduals(res , group = grouping)
plot(res2)

  1. Grouping according to space does not create a pattern, because there is no missing spatial predictor:
x = testData$x
grouping = cut(x, breaks = quantile(x, seq(0,1,0.02)))
res3 = recalculateResiduals(res , group = grouping)
plot(res3)

Conclusions: if you see overdispersion or a pattern after grouping, it highlights a model error that is structured by group. As the pattern usually highlights a model misfit, rather than a dispersion problem akin to what happens in an overdispersed binomial (which has major impacts on p-values and CIs), I view this binomial grouping pattern as less critical. Likely, most conclusions will not change if you ignore the problem. Nevertheless, you should try to understand the reason for it. When the group is spatial, it could be the sign of residual spatial autocorrelation which could be addressed by a spatial RE or a spatial model. When grouped by a continuous variable, it could be the sign of a nonlinear effect.

Supported packages and frameworks

lm and glm

lm and glm and MASS::glm.nb are fully supported.

lme4

lme4 model classes are fully supported.

Possible to condition on REs via re.form, see help of predict.merMod

mgcv

When using mgcv with DHARMa, it is highly recommended to also install mgcViz. Since version 0.4.5, this will allow DHARMa to fall back on the simulate.gam function in mgcViz, which is more general than the default simulate function. For example, without mgcViz, it will not be possible to simulate from mgcv::gam objects fitted with extended families.

If you absolutely want to use DHARMa without mgcViz, you should make sure that simulate(model) works correctly for the model object for which you want to calculate DHARMa residuals.

gamm4

Models fitted with gamm4 return a list that contains a lme4 object under the name ā€œmerā€. You can test this object like an lme4 model, so e.g.Ā simulateResiduals(myGamm4Model$mer). All remarks regarding lme4 objects apply.

glmmTMB

glmmTMB is nearly fully supported since DHARMa 0.2.7 and glmmTMB 1.0.0. A remaining limitation is that you can’t adjust whether simulation are conditional or not, so simulateResiduals(model, re.form = NULL) will have no effect, simulations will always be done from the full model.

spaMM

spaMM is supported by DHARMa since 0.2.1

GLMMadaptive

GLMMadaptive is supported by DHARMa since 0.3.4.

phylolm

phylom (version >= 2.6.5) is supported by DHARMa since 0.4.7 for both model classes phylolm and phyloglm.

phyr

DHARMa residuals work with phyr, but the correct implementation is not fully tested as of DHARMa 0.4.2. See also https://github.com/florianhartig/DHARMa/issues/235

brms

brms can be made to work together with DHARMa, see https://github.com/florianhartig/DHARMa/issues/33

Unsupported packages

If confronted with an unsupported package, DHARMa will try to use standard S3 functions such as coef(), simulate() etc. to perform simulations. If no error occurs, a residual object will be calculated, and a warning will be provided that this package has not been checked for full functionality. In many cases, the results can be used though (but no guarantee, maybe check with null simulations if the results are OK). Other than that, see my general comments about adding new R packages to DHARMa

Importing external simulations (e.g.Ā from Bayesian software or unsupported packages)

DHARMa can also import external simulations from a fitted model via createDHARMa(), which will be interesting for unsupported packages and for Bayesians.

Bayesians should note the extra Vignette on ā€œDHARMa for Bayesiansā€ regarding the interpretation of these residuals.

Here, an example for how to create simulations for a Poisson glm. Of course, it doesn’t make sense to do this as glm is a supported model class, but you could do the same in case you want to check a model class that is currently not supported by DHARMa.

testData = createData(sampleSize = 200, overdispersion = 0.5, family = poisson())
fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData)

simulatePoissonGLM <- function(fittedModel, n){
  pred = predict(fittedModel, type = "response")
  nObs = length(pred)
  sim = matrix(nrow = nObs, ncol = n)
  for(i in 1:n) sim[,i] = rpois(nObs, pred)
  return(sim)
}

sim = simulatePoissonGLM(fittedModel, 100)

DHARMaRes = createDHARMa(simulatedResponse = sim, observedResponse = testData$observedResponse, 
             fittedPredictedResponse = predict(fittedModel), integerResponse = T)
plot(DHARMaRes, quantreg = F)

DHARMa/inst/doc/DHARMaForBayesians.Rmd0000644000176200001440000002276214665273541016764 0ustar liggesusers--- title: "DHARMa for Bayesians" author: "Florian Hartig, Theoretical Ecology, University of Regensburg [website](https://www.uni-regensburg.de/biologie-vorklinische-medizin/theoretische-oekologie/mitarbeiter/hartig/)" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true vignette: > %\VignetteIndexEntry{DHARMa for Bayesians} \usepackage[utf8]{inputenc} %\VignetteEngine{knitr::rmarkdown} abstract: "The 'DHARMa' package uses a simulation-based approach to create readily interpretable scaled (quantile) residuals for fitted (generalized) linear mixed models. This Vignette describes how to user DHARMa vor checking Bayesian models. It is recommended to read this AFTER the general DHARMa vignette, as all comments made there (in pacticular regarding the interpretation of the residuals) also apply to Bayesian models. \n \n \n" editor_options: chunk_output_type: console --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=8.5, fig.height=5.5, fig.align='center', warning=FALSE, message=FALSE) ``` ```{r, echo = F, message = F} library(DHARMa) set.seed(123) ``` # Basic workflow In principle, DHARMa residuals can be calculated and interpreted for Bayesian models in very much the same way as for frequentist models. Therefore, all comments regarding tests, residual interpretation etc. from the main vignette are equally valid for Bayesian model checks. There are some minor differences regarding the expected null distribution of the residuals, in particular in the low data limit, but I believe that for most people, these are of less concern. The main difference for a Bayesian user is that, unlike for users of directly supported regression packages such as lme4 or glmmTMB, most Bayesian users will have to create the simulations for the fitted model themselves and then feed them into DHARMa by hand. The basic workflow for Bayesians that work with BUGS, JAGS, STAN or similar is: 1. Create posterior predictive simulations for your model 2. Read these in with the createDHARMa function 3. Interpret those as described in the main vignette This is more easy as it sounds. For the major Bayesian samplers (e.g. BUGS, JAGS, STAN), it amounts to adding a block with data simulations to the model, and observing those during the MCMC sampling. Then, feed the simulations into DHARMa via createDHARMa, and all else will work pretty much the same as in the main vignette. ## Example in Jags Here is an example, with JAGS ```{r, eval = F} library(rjags) library(BayesianTools) set.seed(123) dat <- DHARMa::createData(200, overdispersion = 0.2) Data = as.list(dat) Data$nobs = nrow(dat) Data$nGroups = length(unique(dat$group)) modelCode = "model{ for(i in 1:nobs){ observedResponse[i] ~ dpois(lambda[i]) # poisson error distribution lambda[i] <- exp(eta[i]) # inverse link function eta[i] <- intercept + env*Environment1[i] # linear predictor } intercept ~ dnorm(0,0.0001) env ~ dnorm(0,0.0001) # Posterior predictive simulations for (i in 1:nobs) { observedResponseSim[i]~dpois(lambda[i]) } }" jagsModel <- jags.model(file= textConnection(modelCode), data=Data, n.chains = 3) para.names <- c("intercept","env", "lambda", "observedResponseSim") Samples <- coda.samples(jagsModel, variable.names = para.names, n.iter = 5000) x = BayesianTools::getSample(Samples) colnames(x) # problem: all the variables are in one array - this is better in STAN, where this is a list - have to extract the right columns by hand posteriorPredDistr = x[,3:202] # this is the uncertainty of the mean prediction (lambda) posteriorPredSim = x[,203:402] # these are the simulations sim = createDHARMa(simulatedResponse = t(posteriorPredSim), observedResponse = dat$observedResponse, fittedPredictedResponse = apply(posteriorPredDistr, 2, median), integerResponse = T) plot(sim) ``` In the created plots, you will see overdispersion, which is completely expected, as the simulated data has overdispersion and a RE, which is not accounted for by the Jags model. ## Exercise As an exercise, you could now: * Add a RE * Account for overdispersion, e.g. via an OLRE or a negative Binomial And check how the residuals improve. # Conditional vs. unconditional simulations in hierarchical models The most important consideration in using DHARMa with Bayesian models is how to create the simulations. You can see in my jags code that the block ```{r, eval=F} # Posterior predictive simulations for (i in 1:nobs) { observedResponseSim[i]~dpois(lambda[i]) } ``` performs the posterior predictive simulations. Here, we just take the predicted lambda (mean perdictions) during the MCMC simulations and sample from the assumed distribution. This will works for any non-hierarchical model. When we move to hierarchical or multi-level models, including GLMMs, the issue of simulation becomes a bit more complicated. In a hierarchical model, there are several random processes that sit on top of each other. In the same way as explained in the main vignette at the point conditional / unconditional simulations, we will have to decide which of these random processes should be included in the posterior predictive simulations. As an example, imagine we add a RE in the likelihood of the previous model, to account for the group structure in the data. ```{r, eval = F} for(i in 1:nobs){ observedResponse[i] ~ dpois(lambda[i]) # poisson error distribution lambda[i] <- exp(eta[i]) # inverse link function eta[i] <- intercept + env*Environment1[i] + RE[group[i]] # linear predictor } for(j in 1:nGroups){ RE[j] ~ dnorm(0,tauRE) } ``` The predictions lambda[i] now depend on a lower-level stochastic effect, which is described by RE[j] ~ dnorm(0,tauRE). We can now decide to create posterior predictive simulations conditional on posterior estimates RE[j] (conditional simulations), in which case we would have to change nothing in the block for the posterior predictive simulations. Alternatively, we can decide that we want to re-simulate the RE (unconditional simulations), in which case we have to copy the entire structure of the likelihood in the predictions ```{r, eval=F} for(j in 1:nGroups){ RESim[j] ~ dnorm(0,tauRE) } for (i in 1:nobs) { observedResponseSim[i] ~ dpois(lambdaSim[i]) lambdaSim[i] <- exp(etaSim[i]) etaSim[i] <- intercept + env*Environment1[i] + RESim[group[i]] } ``` Essentially, you can remember that if you want full (uncoditional) simulations, you basically have to copy the entire likelihood of the hierarchical model, minus the priors, and sample along the hierarchical model structure. If you want to condition on a part of this structure, just cut the DAG at the point on which you want to condition on. # Statistical differences between Bayesian vs. MLE quantile residuals A common question is if there are differences between Bayesian and MLE quantile residuals. First of all, note that MLE and Bayesian quantile residuals are not exactly identical. The main difference is in how the simulation of the data under the fitted model are performed: * For models fitted by MLE, simulations in DHARMa are with the MLE (point estimate) * For models fitted with Bayes, simulations are practically always performed while also drawing from the posterior parameter uncertainty (as a point estimate is not available). Thus, Bayesian posterior predictive simulations include the parametric uncertainty of the model, additionally to the sampling uncertainty. From this we can directly conclude that Bayesian and MLE quantile residuals are asymptotically identical (and via the usual arguments uniformly distributed), but become more different the smaller n becomes. To examine what those differences are, let's imagine that we start with a situation of infinite data. In this case, we have a "sharp" posterior that can be viewed as identical to the MLE. If we reduce the number of data, there are two things happening 1. The posterior gets wider, with the likelihood component being normally distributed, at least initially 2. The influence of the prior increases, the faster the stronger the prior is. Thus, if we reduce the data, for weak / uninformative priors, we will simulate data while sampling parameters from a normal distribution around the MLE, while for strong priors, we will effectively sample data while drawing parameters of the model from the prior. In particular in the latter case (prior dominates, which can be checked via prior sensitivity analysis), you may see residual patterns that are caused by the prior, even though the model structure is correct. In some sense, you could say that the residuals check if the combination of prior + structure is compatible with the data. It's a philosophical debate how to react on such a deviation, as the prior is not really negotiable in a Bayesian analysis. Of course, also the MLE distribution might get problems in low data situations, but I would argue that MLE is usually only used anyway if the MLE is reasonably sharp. In practice, I have self experienced problems with MLE estimates. It's a bit different in the Bayesian case, where it is possible and often done to fit very complex models with limited data. In this case, many of the general issues in defining null distributions for Bayesian p-values (as, e.g., reviewed in [Conn et al., 2018](https://esajournals.onlinelibrary.wiley.com/doi/10.1002/ecm.1314)) apply. I would add though that while I find it important that users are aware of those differences, I have found that in practice these issues are small, and usually overruled by the much stronger effects of model error. DHARMa/inst/doc/DHARMa.Rmd0000644000176200001440000017254014704245735014454 0ustar liggesusers--- title: "DHARMa: residual diagnostics for hierarchical (multi-level/mixed) regression models" author: "Florian Hartig, Theoretical Ecology, University of Regensburg [website](https://www.uni-regensburg.de/biologie-vorklinische-medizin/theoretische-oekologie/mitarbeiter/hartig/)" date: "`r Sys.Date()`" output: rmarkdown::html_vignette: toc: true pdf_document: toc: true vignette: > %\VignetteIndexEntry{The import package} %\VignetteEngine{knitr::rmarkdown} %\VignetteEncoding{UTF-8} abstract: "The 'DHARMa' package uses a simulation-based approach to create readily interpretable scaled (quantile) residuals for fitted generalized linear (mixed) models. Currently supported are linear and generalized linear (mixed) models from 'lme4' (classes 'lmerMod', 'glmerMod'), 'glmmTMB', 'GLMMadaptive' and 'spaMM'; phylogenetic linear models from 'phylolm' (classes 'phylolm' and 'phyloglm'); generalized additive models ('gam' from 'mgcv'); 'glm' (including 'negbin' from 'MASS', but excluding quasi-distributions) and 'lm' model classes. Moreover, externally created simulations, e.g. posterior predictive simulations from Bayesian software such as 'JAGS', 'STAN', or 'BUGS' can be processed as well. The resulting residuals are standardized to values between 0 and 1 and can be interpreted as intuitively as residuals from a linear regression. The package also provides a number of plot and test functions for typical model misspecification problems, such as over/underdispersion, zero-inflation, and residual spatial, temporal and phylogenetic autocorrelation. \n \n \n" editor_options: chunk_output_type: console --- ```{r global_options, include=FALSE} knitr::opts_chunk$set(fig.width=6.5, fig.height=4.5, fig.align='center', warning=FALSE, message=FALSE, cache = T) ``` ```{r, echo = F, message = F} library(DHARMa) set.seed(123) ``` # Motivation The interpretation of conventional residuals for generalized linear (mixed) and other hierarchical statistical models is often problematic. As an example, here the result of conventional Deviance, Pearson and raw residuals for two Poisson GLMMs, one that is lacking a quadratic effect, and one that fits the data perfectly. Could you tell which is the correct model? ```{r, echo = F, fig.width=6, fig.height=3} library(lme4) overdispersedData = createData(sampleSize = 250, overdispersion = 0, quadraticFixedEffects = -2, family = poisson()) fittedModelOverdispersed <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = overdispersedData) plotConventionalResiduals(fittedModelOverdispersed) testData = createData(sampleSize = 250, intercept = 0, overdispersion = 0, family = poisson(), randomEffectVariance = 0) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = testData) plotConventionalResiduals(fittedModel) ``` Just for completeness - it was the second one. But don't get too excited if you got it right. Probably you were just lucky - I can't really tell a difference. But even so, would you have added a quadratic effect, instead of adding an overdispersion correction? The point here is that misspecifications in GL(M)Ms cannot reliably be diagnosed with standard residual plots, and thus GLMMs are often not as thoroughly checked as they should. One reason why GL(M)Ms residuals are harder to interpret is that the expected distribution of the data (aka predictive distribution) changes with the fitted values. Reweighting with the expected dispersion, as done in Pearson residuals, or using deviance residuals, helps to some extent, but it does not lead to visually homogenous residuals, even if the model is correctly specified. As a result, standard residual plots, when interpreted in the same way as for linear models, seem to show all kind of problems, such as non-normality, heteroscedasticity, even if the model is correctly specified. Questions on the R mailing lists and forums show that practitioners are regularly confused about whether such patterns in GL(M)M residuals are a problem or not. But even experienced statistical analysts currently have few options to diagnose misspecification problems in GLMMs. In my experience, the current standard practice is to eyeball the residual plots for major misspecifications, potentially have a look at the random effect distribution, and then run a test for overdispersion, which is usually positive, after which the model is modified towards an overdispersed / zero-inflated distribution. This approach, however, has a number of drawbacks, notably: - Overdispersion is often the result of missing predictors or a misspecified model structure. Standard residual plots make it difficult to identify these problems by examining residual correlations or patterns of residuals against predictors. - Not all overdispersion is the same. For count data, the negative binomial creates a different distribution than adding observation-level random effects to the Poisson. Once overdispersion is corrected for, such violations of distributional assumptions are not detectable with standard overdispersion tests (because the tests only looks at total dispersion), and nearly impossible to see visually from standard residual plots. - Dispersion frequently varies with predictors (heteroscedasticity). This can have a significant effect on the inference. While it is standard to tests for heteroscedasticity in linear regressions, heteroscedasticity is currently hardly ever tested for in GLMMs, although it is likely as frequent and influential. - Moreover, if residuals are checked, they are usually checked conditional on the fitted random effect estimates. Thus, standard checks only check the final level of the random structure in a GLMM. One can perform extra checks on the random effects, but it is somewhat unsatisfactory that there is no check on the entire model structure. DHARMa aims at solving these problems by creating readily interpretable residuals for generalized linear (mixed) models that are standardized to values between 0 and 1, and that can be interpreted as intuitively as residuals for the linear model. This is achieved by a simulation-based approach, similar to the Bayesian p-value or the parametric bootstrap, that transforms the residuals to a standardized scale. The basic steps are: 1. Simulate new response data from the fitted model for each observation. 2. For each observation, calculate the empirical cumulative density function for the simulated observations, which describes the possible values (and their probability) at the predictor combination of the observed value, assuming the fitted model is correct. 3. The residual is then defined as the value of the empirical density function at the value of the observed data, so a residual of 0 means that all simulated values are larger than the observed value, and a residual of 0.5 means half of the simulated values are larger than the observed value. These steps are visualized in the following figure The key advantage of this definition is that the so-defined residuals always have the same, known distribution, independent of the model that is fit, if the model is correctly specified. To see this, note that, if the observed data was created from the same data-generating process that we simulate from, all values of the cumulative distribution should appear with equal probability. That means we expect the distribution of the residuals to be flat, regardless of the model structure (Poisson, binomial, random effects and so on). I currently prepare a more exact statistical justification for the approach in an accompanying paper, but if you must provide a reference in the meantime, I would suggest citing - Dunn, K. P., and Smyth, G. K. (1996). Randomized quantile residuals. Journal of Computational and Graphical Statistics 5, 1-10. - Gelman, A. & Hill, J. Data analysis using regression and multilevel/hierarchical models Cambridge University Press, 2006 p.s.: DHARMa stands for "Diagnostics for HierArchical Regression Models" - which, strictly speaking, would make DHARM. But in German, Darm means intestines; plus, the meaning of DHARMa in Hinduism makes the current abbreviation so much more suitable for a package that tests whether your model is in harmony with your data: > From Wikipedia, 28/08/16: In Hinduism, dharma signifies behaviours that are considered to be in accord with rta, the order that makes life and universe possible, and includes duties, rights, laws, conduct, virtues and 'right way of living'. # Workflow in DHARMa ## Installing, loading and citing the package If you haven't installed the package yet, either run ```{r, eval = F} install.packages("DHARMa") ``` Or follow the instructions on to install a development version. Loading and citation ```{r} library(DHARMa) citation("DHARMa") ``` ## Calculating scaled residuals Let's assume we have a fitted model that is supported by DHARMa. ```{r} testData = createData(sampleSize = 250) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = testData) ``` Most functions in DHARMa can be calculated directly on the fitted model object. For example, if you are only interested in testing for dispersion problems, you could run ```{r, results = "hide", fig.show='hide'} testDispersion(fittedModel) ``` In this case, the randomized quantile residuals are calculated on the fly inside the function call. If you work in this way, however, residual calculation will be repeated by every test / plot you call, and this can take a while. It is therefore highly recommended to first calculate the residuals once, using the simulateResiduals() function ```{r} simulationOutput <- simulateResiduals(fittedModel = fittedModel, plot = F) ``` which calculates randomized quantile residuals according to the algorithm discussed above. The function returns an object of class DHARMa, containing the simulations and the scaled residuals, which can later be passed on to all other plots and test functions. When specifying the optional argument plot = T, the standard DHARMa residual plot is displayed directly. The interpretation of the plot will be discussed below. Using the simulateResiduals function has the added benefit that you can modify the way in which residuals are calculated. For example, you may want to change the number of simulations, or the REs to condition on. See ?simulateResiduals and section "simulation options" below for details. The calculated (scaled) residuals can be plotted and tested via a number of DHARMa functions (see below), or accessed directly via ```{r, results = "hide"} residuals(simulationOutput) ``` To interpret the residuals, remember that a scaled residual value of 0.5 means that half of the simulated data are higher than the observed value, and half of them lower. A value of 0.99 would mean that nearly all simulated data are lower than the observed value. The minimum/maximum values for the residuals are 0 and 1. For a correctly specified model we would expect asymptotically - a uniform (flat) distribution of the scaled residuals - uniformity in y direction if we plot against any predictor. Note: the uniform distribution is the only differences to "conventional" residuals as calculated for a linear regression. If you cannot get used to this, you can transform the uniform distribution to another distribution, for example normal, via ```{r, eval = F} residuals(simulationOutput, quantileFunction = qnorm, outlierValues = c(-7,7)) ``` These normal residuals will behave exactly like the residuals of a linear regression. However, for reasons of a) numeric stability with low number of simulations, which makes it neccessary to descide on which value outliers are to be transformed and b) my conviction that it is much easier to visually detect deviations from uniformity than normality, DHARMa checks all residuals in the uniform space, and I would personally advice against using the transformation. ## Plotting the scaled residuals The main plot function for the calculated DHARMa object produced by simulateResiduals() is the plot.DHARMa() function ```{r} plot(simulationOutput) ``` The function creates two plots, which can also be called separately, and provide extended explanations / examples in the help ```{r, eval = F} plotQQunif(simulationOutput) # left plot in plot.DHARMa() plotResiduals(simulationOutput) # right plot in plot.DHARMa() ``` - plotQQunif (left panel) creates a qq-plot to detect overall deviations from the expected distribution, by default with added tests for correct distribution (KS test), dispersion and outliers. Note that outliers in DHARMa are values that are by default defined as values outside the simulation envelope, not in terms of a particular quantile. Thus, which values will appear as outliers will depend on the number of simulations. If you want outliers in terms of a particuar quantile, you can use the outliers() function. - plotResiduals (right panel) produces a plot of the residuals against the predicted value (or alternatively, other variable). Simulation outliers (data points that are outside the range of simulated values) are highlighted as red stars. These points should be carefully interpreted, because we actually don't know "how much" these values deviate from the model expectation. Note also that the probability of an outlier depends on the number of simulations, so whether the existence of outliers is a reason for concern depends also on the number of simulations. To provide a visual aid in detecting deviations from uniformity in y-direction, the plot function calculates an (optional default) quantile regression, which compares the empirical 0.25, 0.5 and 0.75 quantiles in y direction (red solid lines) with the theoretical 0.25, 0.5 and 0.75 quantiles (dashed black line), and provides a p-value for the deviation from the expected quantile. The significance of the deviation to the expected quantiles is tested and displayed visually, and can be additionally extracted with the testQuantiles function. By default, plotResiduals plots against predicted values. However, you can also use it to plot residuals against a specific other predictors (highly recommend). ```{r, eval = F} plotResiduals(simulationOutput, form = YOURPREDICTOR) ``` If the predictor is a factor, or if there is just a small number of observations on the x axis, plotResiduals will plot a box plot with additional tests instead of a scatter plot. ```{r, eval = F} plotResiduals(simulationOutput, form = testData$group) ``` See ?plotResiduas for details, but very shortly: under H0 (perfect model), we would expect those boxes to range homogeneously from 0.25-0.75. To see whether there are deviations from this expecation, the plot calculates a test for uniformity per box, and a test for homogeneity of variances between boxes. A positive test will be highlighted in red. ## Goodness-of-fit tests on the scaled residuals To support the visual inspection of the residuals, the DHARMa package provides a number of specialized goodness-of-fit tests on the simulated residuals: - testUniformity() - tests if the overall distribution conforms to expectations. - testOutliers() - tests if there are more simulation outliers than expected. - testDispersion() - tests if the simulated dispersion is equal to the observed dispersion. - testQuantiles() - fits a quantile regression or residuals against a predictor (default predicted value), and tests of this conforms to the expected quantile. - testCategorical(simulationOutput, catPred = testData\$group) tests residuals against a categorical predictor. - testZeroinflation() - tests if there are more zeros in the data than expected from the simulations. - testGeneric() - test if a generic summary statistics (user-defined) deviates from model expectations. - testTemporalAutocorrelation() - tests for temporal autocorrelation in the residuals. - testSpatialAutocorrelation() - tests for spatial autocorrelation in the residuals. Can also be used with a generic distance function. - testPhylogeneticAutocorrelation() - tests for phylogenetic signal in the residuals. See the help of the functions and further comments below for a more detailed description. ## Simulation options There are a few important technical details regarding how the simulations are performed, in particular regarding the treatments of random effects and integer responses. It is strongly recommended to read the help of ```{r, eval = F} ?simulateResiduals ``` #### Refit ```{r, eval= F} simulationOutput <- simulateResiduals(fittedModel = fittedModel, refit = T) ``` - if refit = F (default), new datasets are simulated from the fitted model, and residuals are calculated by comparing the observed data to the new data - if refit = T, a parametric bootstrap is performed, meaning that the model is refit to all new datasets, and residuals are created by comparing observed residuals against refitted residuals The second option is much much slower, and also seemed to have lower power in some tests I ran. **It is therefore not recommended for standard residual diagnostics!** I only recommend using it if you know what you are doing, and have particular reasons, for example if you estimate that the tested model is biased. A bias could, for example, arise in small data situations, or when estimating models with shrinkage estimators that include a purposeful bias, such as ridge/lasso, random effects or the splines in GAMs. My idea was then that simulated data would not fit to the observations, but that residuals for model fits on simulated data would have the same patterns/bias than model fits on the observed data. Note also that refit = T can sometimes run into numerical problems, if the fitted model does not converge on the newly simulated data. #### Conditinal vs. unconditinal simulations The second option is the treatment of the stochastic hierarchy. In a hierarchical model, several layers of stochasticity are placed on top of each other. Specifically, in a GLMM, we have a lower level stochastic process (random effect), whose result enters into a higher level (e.g. Poisson distribution). For other hierarchical models, such as state-space models, similar considerations apply, but the hierarchy can be more complex. When simulating, we have to decide if we want to re-simulate all stochastic levels, or only a subset of those. For example, in a GLMM, it is common to only simulate the last stochastic level (e.g. Poisson) conditional on the fitted random effects, meaning that the random effects are set on the fitted values. For controlling how many levels should be re-simulated, the simulateResidual function allows to pass on parameters to the simulate function of the fitted model object. Please refer to the help of the different simulate functions (e.g. ?simulate.merMod) for details. For merMod (lme4) model objects, the relevant parameters are "use.u", and "re.form", as, e.g., in ```{r, eval= F} simulationOutput <- simulateResiduals(fittedModel = fittedModel, n = 250, use.u = T) ``` If the model is correctly specified and the fitting procedure is unbiased (disclaimer: GLMM estimators are not always unbiased), the simulated residuals should be flat regardless how many hierarchical levels we re-simulate. The most thorough procedure would be therefore to test all possible options. If testing only one option, I would recommend to re-simulate all levels, because this essentially tests the model structure as a whole. This is the default setting in the DHARMa package. A potential drawback is that re-simulating the random effects creates more variability, which may reduce power for detecting problems in the upper-level stochastic processes, in particular overdispersion (see section on dispersion tests below). *Note:* Although unconditional residuals implicitly also test the normal distribution of the REs, it is probably not a bad idea to look addditionally check for normality of the RE distribution. As this is not based on quantile residuals, there is no special DHARMa function for this, so you should just extract the REs, and then run e.g. a Shapiro test. #### Integer treatment / randomization A third option is the treatment of integer responses. The background of this option is that, for integer-valued variables, some additional steps are necessary to make sure that the residual distribution becomes flat (essentially, we have to smoothen away the integer nature of the data). The idea is explained in - Dunn, K. P., and Smyth, G. K. (1996). Randomized quantile residuals. Journal of Computational and Graphical Statistics 5, 1-10. DHARMa currently implements two procedures for randomization. The default procedure will randomize automatically. The second option requires knowledge about whether the model is integer-valued, which is usually implemented automatically. See ?simulateResiduals for details. Usually, these options should simply be kept at their defaults. #### Calculating residuals for groups or subsets In many situations, it can be useful to look at residuals per group, e.g. to see how much the model over / underpredicts per plot, year or subject. To do this, use the recalculateResiduals() function, together with a grouping variable (group) or a subsetting variable (sel), which can also be used in combination. ```{r, eval= F} simulationOutput = recalculateResiduals(simulationOutput, group = testData$group) ``` Note, however, that you will have to change the selection of variables that you provide to plots and tests (e.g. in plotResiduals or testSpatialAutocorrelation) accordingly when you group or subset residuals. #### Reproducibility notes, random seed and random state As DHARMa uses simulations to calculate the residuals, a naive implementation of the algorithm would mean that residuals would look slightly different each time a DHARMa calculation is executed. This might both be confusing and bear the danger that a user would run the simulation several times and take the result that looks better (which would amount to multiple testing / p-hacking). By default, DHARMa therefore fixes the random seed to the same value every time a simulation is run, and afterwards restores the random state to the old value. This means that you will get exactly the same residual plot each time. If you want to avoid this behavior, for example for simulation experiments on DHARMa, use seed = NULL -\> no seed set, but random state will be restored, or seed = F -\> no seed set, and random state will not be restored. Whether or not you fix the seed, the setting for the random seed and the random state are stored in ```{r, eval = F} simulationOutput$randomState ``` If you want to reproduce simulations for such a run, set the variable .Random.seed by hand, and simulate with seed = NULL. Moreover (general advice), to ensure reproducibility, it's advisable to add a set.seed() at the beginning, and a session.info() at the end of your script. The latter will list the version number of R and all loaded packages. # Interpreting residuals and recognizing misspecification problems ## General remarks on interperting residual patterns and tests So far, all shown DHARMa results were calculated for a correctly specified model, resulting in "perfect" residual plots and diagnostics. In this section, we discuss how to recognize and interpret diagnostics that indicate a misspecified model. Before going into the details, note, however, that 1. **No residual pattern does not "prove" that the model is correct**: The fact that none of the DHARMa tests indicate a problem does not "prove" that the model is correctly specified. For any model, there are likely a large number of structural problems that do not create a pattern in the DHARMa diagnostics. In good old Popper fashion, you should interpret no residual problems as your working hypothesis not being rejected in that particular test, which increases confidence in the model, but does not constitute a conclusive proof. So, keep your skepticism alive, and if you find the results fishy, keep searching and testing. 2. **Once a residual effect is statistically significant, look at the magnitude to decide if there is a problem**: It is crucial to note that significance is NOT a measure of the strength of the residual pattern, it is a measure of the signal/noise ratio, i.e. whether you are sure there is a pattern at all. Significance in hypothesis tests depends on at least 2 ingredients: the strength of the signal and the number of data points. If you have a lot of data points, residual diagnostics will nearly inevitably become significant, because having a perfectly fitting model is very unlikely. That, however, doesn't necessarily mean that you need to change your model. The p-values confirm that there is a deviation from your null hypothesis. It is, however, in your discretion to decide whether this deviation is worth worrying about. For example, if you see a dispersion parameter of 1.01, I would not worry, even if the dispersion test is significant. A significant value of 5, however, is clearly a reason to move to a model that accounts for overdispersion. 3. **A residual pattern does not indicate that the model is unusable**: While a significant pattern in the residuals indicates with good reliability that the observed data did likely not originate from the fitted model, this doesn't necessarily imply that the inferential results from this wrong model are not usable. There are many situations in statistics where it is common practice to work with "wrong models". For example, many statistical models use shrinkage estimators, which purposefully bias parameter estimates to certain values. Random effects are a special case of this. If DHARMa residuals for these estimators are calculated, they will often show a slight pattern in the residuals even if the model is correctly specified, and tests for this can get significant for large sample sizes. For this reason, DHARMa is excluding RE estimates in the predictions when plotting res \~ pred. Another example is data that is missing at random (MAR). Since it is known that this phenomenon does not create a bias on the fixed effects estimates, it is common practice to fit these data with standard mixed models. Nevertheless, DHARMa recognizes that the observed data looks different from what would be expected from the model assumptions, and flags the model as problematic (see [here](https://github.com/florianhartig/DHARMa/issues/101)). **Important conclusion: DHARMa only flags a difference between the observed and expected data - the user has to decide whether this difference is actually a problem for the analysis!** ## Recognizing over/underdispersion GL(M)Ms often display over/underdispersion, which means that residual variance is larger/smaller than expected under the fitted model. This phenomenon is most common for GLM families with constant (fixed) dispersion, in particular for Poisson and binomial models, but it can also occur in GLM families that adjust the variance (such as the beta or negative binomial) when distribution assumptions are violated. A few general rules of thumb about dealing with dispersion problems: - Dispersion is a property of the residuals, i.e. you can detect dispersion problems only AFTER fitting the model. It doesn't make sense to look at the dispersion of your response variable - Overdispersion is more common than underdispersion - If overdispersion is present, the main effect is that confidence intervals tend to be too narrow, and p-values to small, leading to inflated type I error. The opposite is true for underdispersion, i.e. the main issue of underdispersion is that you loose power. - A common reason for overdispersion is a misspecified model. When overdispersion is detected, one should therefore first search for problems in the model specification (e.g. by plotting residuals against predictors with DHARMa), and only if this doesn't lead to success, overdispersion corrections such as individual-level random effects or changes in the distribution should be applied ### Residual patterns of over/underdispersion This this is how **overdispersion** looks like in the DHARMa residuals. Note that we get more residuals around 0 and 1, which means that more residuals are in the tail of distribution than would be expected under the fitted model. ```{r} testData = createData(sampleSize = 200, overdispersion = 1.5, family = poisson()) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` If you see this pattern, note that overdispersion is often caused by model misfit. Thus, before moving to a GLM with variable dispersion (for count data this would typically be a negative binomial), you should check your model for misfit, e.g. by plotting residuals against all predictors using plotResiduals(). Next, this is an example of underdispersion. Here, we get too many residuals around 0.5, which means that we are not getting as many residuals as we would expect in the tail of the distribution than expected from the fitted model. ```{r} testData = createData(sampleSize = 500, intercept=0, fixedEffects = 2, overdispersion = 0, family = poisson(), roundPoissonVariance = 0.001, randomEffectVariance = 0) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` If you see this pattern, note that a common reason for underdispersion is overfitting, i.e. your model is too complex. Other possible explanations to check for include zero-inflation (best to check by comparing to a ZIP model, but see also DHARMa::testZeroInflation), non-independence of the data (e.g. temporal autocorrelation, check via DHARMa:: testTemporalAutocorrelation) that your predictors can use to overfit, or that your data-generating process is simply not a Poisson process. From a technical side, underdispersion is not as concerning as over dispersion, as it will usually bias p-values to the conservative side, but if your goal is to get a good power, you may want to consider a simpler model. If that is not helping, you can move to a distribution for underdispersed count data (e.g. Conway-Maxwell-Poisson, generalized Poisson). ### Formal tests for over/underdispersion Although, as discussed above, over/underdispersion will show up in the residuals, and it's possible to detect it with the testUniformity function, simulations show that this test is less powerful than more targeted tests. DHARMa contains several overdispersion tests that compare the dispersion of simulated residuals to the observed residuals. 1. *default:* a non-parametric test that compares the variance of the simulated residuals to the observed residuals (default), which has some analogy to the variance test implemented in aer::dispersiontest 2. *PearsonChisq:* alternatively, DHARMa implements the Pearson-chi2 test that is popular in the literature, suggested in the glmm Wiki, and implemented in some other R packages such as performance::check_overdispersion 3. *refit* if residual simulations are done via refit, DHARMa will compare the the Pearson residuals of the re-fitted simulations to the original Pearson residuals. This is essentially a nonparametric version of test 2. All of these tests are included in the testDispersion function, see ?testDispersion for details. ```{r overDispersionTest, echo = T, fig.width=4, fig.height=4} testDispersion(simulationOutput) ``` IMPORTANT INFO: we have made extensive simulations, which have shown that the various tests have certain advantages and disadvantages. The basic results are that: - The most powerful and reliable test is option 3, but this costs a lot of time and is not available for all regression packages, as it requires that Pearson residuals are available - Option 2, the parametric Pearson-chi2 is fast if Pearson residuals are available, but based on a naive expectation of df (counts RE as 1 df) and the test statistic is thus biased towards underdispersion for mixed models. Similar to the df approximation, Bias increasing with the number of RE levels. When testing only for overdispersion (alternative = "greater"), this makes the test more conservative, but it also costs power. - The DHARMa default option 1 is fast, nearly unbiased (i.e. you can test under and overdispersion), and only slightly less powerful as test 3, PROVIDED that simulations are made conditional on the fitted REs. Note that the latter is not the DHARMa default, so you have to actively request conditional simulations, e.g. for lme4 by specifying re.form = NULL. Power compared to the parametric Pearson-chi2 test depends on the number of RE levels, it will be more powerful for typical number of RE levels. As support for these statements, here results of the simulation, which compares the uniform (KS) test with the standard simuation-based test (conditional and unconditional) and the Pearson-chi2 test (two-sided and greater) for an n=200 Poisson GLMM with 30 RE levels. Thus, my current recommendation is: for most users, use the default DHARMa test, but create simulations conditionally. ## Zero-inflation / k-inflation or deficits A common special case of overdispersion is zero-inflation, which is the situation when more zeros appear in the observation than expected under the fitted model. Zero-inflation requires special correction steps. More generally, we can also have too few zeros, or too much or too few of any other values. We'll discuss that at the end of this section. ### Residual patterns Here an example of a typical zero-inflated count dataset, plotted against the environmental predictor ```{r} testData = createData(sampleSize = 500, intercept = 2, fixedEffects = c(1), overdispersion = 0, family = poisson(), quadraticFixedEffects = c(-3), randomEffectVariance = 0, pZeroInflation = 0.6) par(mfrow = c(1,2)) plot(testData$Environment1, testData$observedResponse, xlab = "Envrionmental Predictor", ylab = "Response") hist(testData$observedResponse, xlab = "Response", main = "") ``` We see a hump-shaped dependence of the environment, but with too many zeros. In the normal DHARMa residual plots, zero-inflation will look pretty much like overdispersion ```{r, fig.height=5.5} fittedModel <- glmer(observedResponse ~ Environment1 + I(Environment1^2) + (1|group) , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` The reason is that the model will usually try to find a compromise between the zeros, and the other values, which will lead to excess variance in the residuals. ### Formal tests for zero-inflation DHARMa also has a special test for zero-inflation, which compares the distribution of expected zeros in the data against the observed zeros ```{r, fig.width=4, fig.height=4} testZeroInflation(simulationOutput) ``` This test is likely better suited for detecting zero-inflation than the standard plot, but note that also overdispersion will lead to excess zeros, so only seeing too many zeros is not a reliable diagnostics for moving towards a zero-inflated model. A reliable differentiation between overdispersion and zero-inflation will usually only be possible when directly comparing alternative models, e.g. through residual comparison / model selection of a model with / without zero-inflation, or by simply fitting a model with zero-inflation and looking at the parameter estimate for the zero-inflation. A good option is the R package glmmTMB, which is also supported by DHARMa. We can use this to fit ```{r, eval= F} # requires glmmTMB fittedModel <- glmmTMB(observedResponse ~ Environment1 + I(Environment1^2) + (1|group), ziformula = ~1 , family = "poisson", data = testData) summary(fittedModel) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` ### Testing generic summary statistics, e.g. for k-inflation or deficits To test for generic excess / deficits of particular values, we have the function testGeneric, which compares the values of a generic, user-provided summary statistics Choose one of alternative = c("greater", "two.sided", "less") to test for inflation / deficit or both. Default is "greater" = inflation. ```{r, fig.width=4.5, fig.height=4.5} countOnes <- function(x) sum(x == 1) # testing for number of 1s testGeneric(simulationOutput, summary = countOnes, alternative = "greater") # 1-inflation ``` ## Heteroscedasticity So far, most of the things that we have tested could also have been detected with parametric tests. Here, we come to the first issue that is difficult to detect with current tests, and that is usually neglected. Heteroscedasticity means that there is a systematic dependency of the dispersion / variance on another variable in the model. It is not sufficiently appreciated that also binomial or Poisson models can show heteroscedasticity. Basically, it means that the level of over/underdispersion depends on another parameter. Here an example where we create such data ```{r} testData = createData(sampleSize = 500, intercept = -1.5, overdispersion = function(x){return(rnorm(length(x), sd = 1 * abs(x)))}, family = poisson(), randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` The exact p-values for the quantile lines in the plot can be displayed via ```{r, eval = F} testQuantiles(simulationOutput) ``` As mentioned above, the equivalent test for categorical predictors (plot function will switch automatically) would be ```{r, eval = F} testCategorical(simulationOutput, catPred = testData$group) ``` Adding a simple overdispersion correction will try to find a compromise between the different levels of dispersion in the model. The qq plot looks better now, but there is still a pattern in the residuals ```{r} testData = createData(sampleSize = 500, intercept = 0, overdispersion = function(x){return(rnorm(length(x), sd = 2*abs(x)))}, family = poisson(), randomEffectVariance = 0) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) + (1|ID), family = "poisson", data = testData) # plotConventionalResiduals(fittedModel) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ``` To remove this pattern, you would need to make the dispersion parameter dependent on a predictor (e.g. in JAGS), or apply a transformation on the data. ## Detecting missing predictors or wrong functional assumptions A second test that is typically run for LMs, but not for GL(M)Ms is to plot residuals against the predictors in the model (or potentially predictors that were not in the model) to detect possible misspecifications. Doing this is *highly recommended*. For that purpose, you can retrieve the residuals via ```{r, eval = F} simulationOutput$scaledResiduals ``` Note again that the residual values are scaled between 0 and 1. If you plot the residuals against predictors, space or time, the resulting plots should not only show no systematic dependency of those residuals on the covariates, but they should also again be flat for each fixed situation. That means that if you have, for example, a categorical predictor: treatment / control, the distribution of residuals for each predictor alone should be flat as well. Here an example with a missing quadratic effect in the model and 2 predictors ```{r} testData = createData(sampleSize = 200, intercept = 1, fixedEffects = c(1,2), overdispersion = 0, family = poisson(), quadraticFixedEffects = c(-3,0)) fittedModel <- glmer(observedResponse ~ Environment1 + Environment2 + (1|group), family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # plotConventionalResiduals(fittedModel) plot(simulationOutput, quantreg = T) # testUniformity(simulationOutput = simulationOutput) ``` It is difficult to see that there is a problem at all in the general plot, but it becomes clear if we plot against the environment ```{r} par(mfrow = c(1,2)) plotResiduals(simulationOutput, testData$Environment1) plotResiduals(simulationOutput, testData$Environment2) ``` ## Residual correlation structures (temporal, spatial, phylogenetic) If a distance between residuals can be defined (temporal, spatial, phylogenetic), you should check if there is a distance-dependence in the residuals, which would suggest to move to a GLS (generalized least squares) structure for analysis. The three functions to test for this in DHARMa are: - testTemporalAutocrrelation based on the Durbin-Watson test. - testSpatialAutocorrelation, based on Moran's I, can also be used for generic distance functions. - testPhylogeneticAutocorrelation, based on Moran's I test from `Moran.I` function on package `ape`. Here a short example for the spatial case, see help of the functions for extended examples. ```{r, fig.width=4, fig.height=4} testData = createData(sampleSize = 100, family = poisson(), spatialAutocorrelation = 3, numGroups = 1, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , data = testData, family = poisson() ) simulationOutput <- simulateResiduals(fittedModel = fittedModel) testSpatialAutocorrelation(simulationOutput = simulationOutput, x = testData$x, y= testData$y) # plot(simulationOutput) ``` Note that all these tests are most sensitive against homogeneous residual structure, and might miss local and heterogeneous (non-stationary) residual structures. Additional visual checks can be useful. However, standard DHARMa simulations from models with temporal / spatial / phylogenetic conditional autoregressive terms will still have the respective correlation in the DHARMa residuals, unless the package you are using is modelling the autoregressive terms as explicit REs and is able to simulate conditional on the fitted REs. It means that the residuals will still show significant autocorrelation, even if the model fully accounts for this strucuture, and other tests, such as dispersion, uniformity, may have inflated type I error. See the example below with a `glmmTMB` model with a spatial autocorrelation structure: ```{r, fig.width=4, fig.height=4} library(glmmTMB) testData$pos <- numFactor(testData$x, testData$y) fittedModel2 <- glmmTMB(observedResponse ~ Environment1 + exp(pos + 0|group), data = testData, family = poisson()) simulationOutput2 <- simulateResiduals(fittedModel = fittedModel2) testSpatialAutocorrelation(simulationOutput = simulationOutput2, x = testData$x, y= testData$y) # plot(simulationOutput2) ``` One of the options to solve it and get correct tests and no residual pattern (if the model is correct) is to rotate the residual space according to the coariance structure of the fitted model, such that the rotated residuals are conditionally independent. The argument rotation in `simulateResiduals` does it (see also `?getQuantile` for details about the rotation options): ```{r, fig.width=4, fig.height=4} # rotation of the residuals simulationOutput3 <- simulateResiduals(fittedModel = fittedModel2, rotation = "estimated") testSpatialAutocorrelation(simulationOutput = simulationOutput3, x = testData$x, y= testData$y) # plot(simulationOutput3) ``` # Case studies and examples **Note:** More real-world examples can be found on the DHARMa GitHub repository. ## Budworm example (count-proportion n/k binomial) This example comes from [Jochen Fruend](https://jochenfruend.wordpress.com/). Measured are the number of parasitized observations, with population density as a covariate: ```{r, echo = F} data = structure(list(N_parasitized = c(226, 689, 481, 960, 1177, 266, 46, 4, 884, 310, 19, 4, 7, 1, 3, 0, 365, 388, 369, 829, 532, 5), N_adult = c(1415, 2227, 2854, 3699, 2094, 376, 8, 1, 1379, 323, 2, 2, 11, 2, 0, 1, 1394, 1392, 1138, 719, 685, 3), density.attack = c(216.461273226486, 214.662143448767, 251.881252132684, 400.993643475831, 207.897856251888, 57.0335141562012, 6.1642552100285, 0.503930659141302, 124.673812637575, 27.3764667492035, 0.923453215863429, 0.399890030241684, 0.829818131526174, 0.146640466903247, 0.216795117773948, 0.215498663908284, 110.635445098884, 91.3766566822467, 126.157080458047, 82.9699108890686, 61.0476207779938, 0.574539291305784), Plot = structure(c(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L ), .Label = c("1", "2", "3", "4"), class = "factor"), PY = c("p1y82", "p1y83", "p1y84", "p1y85", "p1y86", "p1y87", "p1y88", "p1y89", "p2y86", "p2y87", "p2y88", "p2y89", "p2y90", "p2y91", "p2y92", "p2y93", "p3y88", "p3y89", "p3y90", "p3y91", "p3y92", "p3y93" ), Year = c(82, 83, 84, 85, 86, 87, 88, 89, 86, 87, 88, 89, 90, 91, 92, 93, 88, 89, 90, 91, 92, 93), ID = 1:22), .Names = c("N_parasitized", "N_adult", "density.attack", "Plot", "PY", "Year", "ID"), row.names = c("p1y82", "p1y83", "p1y84", "p1y85", "p1y86", "p1y87", "p1y88", "p1y89", "p2y86", "p2y87", "p2y88", "p2y89", "p2y90", "p2y91", "p2y92", "p2y93", "p3y88", "p3y89", "p3y90", "p3y91", "p3y92", "p3y93" ), class = "data.frame") data$logDensity = log10(data$density.attack+1) ``` ```{r, fig.height=4, fig.width=4} plot(N_parasitized / (N_adult + N_parasitized ) ~ logDensity, xlab = "Density", ylab = "Proportion infected", data = data) ``` Let's fit the data with a regular binomial n/k glm ```{r} mod1 <- glm(cbind(N_parasitized, N_adult) ~ logDensity, data = data, family=binomial) simulationOutput <- simulateResiduals(fittedModel = mod1) plot(simulationOutput) ``` We see various signals of overdispersion - QQ: s-shaped QQ plot, distribution test (KS) signficant - QQ: Dispersion test is significant - QQ: Outlier test significant - Res \~ predicted: Quantile fits are spread out too far OK, so let's add overdispersion through an individual-level random effect ```{r} mod2 <- glmer(cbind(N_parasitized, N_adult) ~ logDensity + (1|ID), data = data, family=binomial) simulationOutput <- simulateResiduals(fittedModel = mod2) plot(simulationOutput) ``` The overdispersion looks better, but you can see that the residuals still look a bit irregular (although tests are n.s.). The raw data looks a bit humped-shaped, so we might be tempted to add a quadratic effect. ```{r} mod3 <- glmer(cbind(N_parasitized, N_adult) ~ logDensity + I(logDensity^2) + (1|ID), data = data, family=binomial) simulationOutput <- simulateResiduals(fittedModel = mod3) plot(simulationOutput) ``` The residuals look perfect now. That being said, we dont' have a lot of data, and we have to be sure we're not overfitting. A likelihood ratio test tells us that the quadratic effect is not significantly supported. ```{r} anova(mod2, mod3) ``` Also AIC differences are small, although slightly in favor of model 3 ```{r} AIC(mod2) AIC(mod3) ``` I guess you could use either Model 2 or 3 - the broader point is: increasing model complexity will nearly always improve the residuals, but according to standard statistical arguments (power, bias-variance trade-off) it's not always advisable to get them perfect, just good enough! ## Owl example (count data) The next examples uses the fairly well known Owl dataset which is provided in glmmTMB (see ?Owls for more info about the data). The following shows a sequence of models, all checked with DHARMa. The example is discussed in a talk at ISEC 2018, see slides [here](https://www.slideshare.net/florianhartig/mon-c5hartig2493). ```{r, error=TRUE} library(glmmTMB) m1 <- glm(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)), data=Owls , family = poisson) res <- simulateResiduals(m1) plot(res) ``` OK, this is highly overdispersed. Let's add a RE on nest: ```{r, error=TRUE} m2 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), data=Owls , family = poisson) res <- simulateResiduals(m2) plot(res) ``` Somewhat better, but not good. Move to neg binom, to adjust dispersion, and checking dispersion and residuals against FoodTreatment predictor: ```{r, error=TRUE} m3 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), data=Owls , family = nbinom1) res <- simulateResiduals(m3, plot = T) par(mfrow = c(1,2)) testDispersion(res) plotResiduals(res, Owls$FoodTreatment) ``` We see underdispersion now. In a model with variable dispersion, this is often the signal that some other distributional assumptions are violated. Let's check for zero-inflation: ```{r, fig.height=4, fig.width=4} testZeroInflation(res) ``` It looks as if there is some zero-inflation, although non-significant. Fitting a zero-inflated model: ```{r, error=TRUE} m4 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), ziformula = ~ FoodTreatment + SexParent, data=Owls , family = nbinom1) res <- simulateResiduals(m4, plot = T) ``` ```{r, error=TRUE, fig.width=7} par(mfrow = c(1,3)) plotResiduals(res, Owls$FoodTreatment) testDispersion(res) testZeroInflation(res) ``` This looks a lot better. Trying a slightly different model specification, adding a dispersion model as well: ```{r, error=TRUE} m5 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), dispformula = ~ FoodTreatment + SexParent , ziformula = ~ FoodTreatment + SexParent, data=Owls , family = nbinom1) res <- simulateResiduals(m5, plot = T) ``` ```{r, error=TRUE, fig.width=7} par(mfrow = c(1,3)) plotResiduals(res, Owls$FoodTreatment) testDispersion(res) testZeroInflation(res) ``` but that does not seem to make things better. Both models would be acceptable in terms of their fit to the data. Which one should you prefer? This is not a question for residual checks. Residual checks tell you which models can be rejected with the data. Which of the typically many acceptable models you should fit must be decided by your scientific question, and/or possibly by model selection methods. In doubt, I would tend towards the simpler model though. # Notes on particular data types ## Poisson data The main concern in Poisson data is dispersion. See comments in the section on the dispersion test, in particular regarding the advantage of conditional simulations in this case. To address overdispersion, I would recommend to prefer the negative binomial model over observation-level random effects, because this mode will be easier to test in DHARMa and its dispersion can be easier modeled, e.g. with glmmTMB. The third option would be quasi models, but there are few advantages, except runtime. Note also that quasi models cannot be tested with DHARMa. Once dispersion is adjusted, you should check for heteroscedasticity (via standard plot, also against all predictors), and for zero-inflation. As noted, zero-inflation tests are often negative, and rather show up as underdispersion. Work through the owl example below. ## Proportional data Proportional data expressed as percentage or fractions of a whole, i.e. non-count-based data, is often modeled with beta regressions. Those can be tested with DHARMa. Note that beta regressions are often 0 or 1 inflated. Both should be tested with testZeroInflation or testGeneric. techniques for analysing continuous (also called non-count-based or non-binomial) proportions (e.g. percent cover, fraction time spent on an activity) Note: discrete proportions, i.e. count-based proportions, of the type k/n should NOT be modeled with the beta regression. Use the binomial (see below). ## Binomial data Binomial data behave slightly different depending on whether we have a 0/1 response (Bernoulli) or a k/n response (true binomial). A k/n response, in particular when n is large, will behave similar to the Poisson, in that it approaches a normal distribution with fixed dispersion for large n and becomes more assymetric at its borders (for k = 0 or n). You should check for dispersion and consider a beta-binomial in cases of overdispersion. Things are a bit different for the 0/1 response. Let's look at the residuals of a clearly misspecified binomial model (missing predictor) with 0/1 response data. ```{r} testData = createData(sampleSize = 500, overdispersion = 0, fixedEffects = 5, family = binomial(), randomEffectVariance = 3, numGroups = 25) fittedModel <- glm(observedResponse ~ 1, family = "binomial", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) ``` If you would do the same with a binomial k/n response or count data, such a misspecification would produce overdispersion (try it out). For the 0/1 response we see neither dispersion problems nor a misfit in the general res \~ predicted plot. ```{r} plot(simulationOutput, asFactor = T) ``` Even though the misfit is clearly visible if we plot the residuals against the missing predictor. ```{r, fig.width=4, fig.height=4} plotResiduals(simulationOutput, testData$Environment1, quantreg = T) ``` However, we can see the overdispersion arising from the misfit if we group our residuals, which basically transforms the 0/1 response in a k/n response. To show this, let's look at the dispersion test for the same model, once ungrouped (left), and grouped according to the variable group which was in the data (right). ```{r} par(mfrow = c(1,2)) testDispersion(simulationOutput) simulationOutput2 = recalculateResiduals(simulationOutput, group = testData$group) testDispersion(simulationOutput2) ``` In general, you can group according to any variable that you like, including continous variables or space. However, some variables make more sense than others. To understand this, consider a simulation where we create binonial data with true probabilities p, drawn from a uniform distribution: ```{r} n = 1000 p = runif(n, 0.1, 0.9) # true probabilities obs = rbinom(n, 1, p) ``` The important thing to note here is that `rbinom(n, 1, p)` will generate an identical overall distribution as `rbinom(1, n, mean(p))`. What that means: a misfit of the prediction will not show up unless they are wrong in the overall mean. Consequenly, if we fit: ```{r} fit <- glm(obs ~ 1, family = "binomial") # wrong model, assumes equal probabilities library(DHARMa) res <- simulateResiduals(fit, plot = T) # nothing to see ``` We see no misfit, and neither do we see a misfit when we group the data points randomly, as the mean of a random group is still fit well by the overall mean. ```{r} res2 <- recalculateResiduals(res, group = rep(1:20, each = 10)) plot(res2) ``` However, we see thes misfit / overdispersion if we aggregate according to something that is correlated to the misfit. For our example, let's just group according to the true means: ```{r} grouping = cut(p, breaks = quantile(p, seq(0,1,0.02))) res3 <- recalculateResiduals(res, group = grouping) plot(res3) ``` In this case, the groups have a different mean than the fitted grand mean, and the misfit shows up in the residuals. Thus, what we are looking for in the grouping is variables that may correlate with the misfit. If you don't have a natural grouping variable, you can introduce arbitrary grouping variables, e.g. via discretising a predictor or the response (usually preferable), and grouping according to that, or via discretising space (e.g. group observation in spatial blocks). The pattern appears only, however, if the grouping variable correlates with the model error. Consider the following example: We create data and fit a model with a missing predictor (Environment2): ```{r} set.seed(123) testData = createData(sampleSize = 500, overdispersion = 0, fixedEffects = c(0,3), family = binomial(), randomEffectVariance = 3, numGroups = 50) ``` Apparently, no problem with the residuals for the misfitted model: ```{r} res <- simulateResiduals(fittedModel = fittedModel) fittedModel <- glm(observedResponse ~ Environment1, family = "binomial", data = testData) plot(res) ``` 1. Grouping according to the RE produces overdispersion because RE is missing in the first model: ```{r} res2 = recalculateResiduals(res , group = testData$group) plot(res2) ``` 2. Grouping according to a random factor does not produce an effect. In this respect, the model has correct dispersion: ```{r} grouping = as.factor(sample.int(50, 500, replace = T)) res2 = recalculateResiduals(res , group = grouping) plot(res2) ``` 3. Grouping according to the response doesn't create a pattern: ```{r} x = predict(fittedModel) grouping = cut(x, breaks = quantile(x, seq(0,1,0.02))) res2 = recalculateResiduals(res , group = grouping) plot(res2) ``` 4. Grouping according to the missing variable creates pattern, because in relation to this variable, the model is overdispersed: ```{r} x = testData$Environment2 grouping = cut(x, breaks = quantile(x, seq(0,1,0.02))) res2 = recalculateResiduals(res , group = grouping) plot(res2) ``` 5. Grouping according to space does not create a pattern, because there is no missing spatial predictor: ```{r} x = testData$x grouping = cut(x, breaks = quantile(x, seq(0,1,0.02))) res3 = recalculateResiduals(res , group = grouping) plot(res3) ``` **Conclusions:** if you see overdispersion or a pattern after grouping, it highlights a model error that is structured by group. As the pattern usually highlights a model misfit, rather than a dispersion problem akin to what happens in an overdispersed binomial (which has major impacts on p-values and CIs), I view this binomial grouping pattern as less critical. Likely, most conclusions will not change if you ignore the problem. Nevertheless, you should try to understand the reason for it. When the group is spatial, it could be the sign of residual spatial autocorrelation which could be addressed by a spatial RE or a spatial model. When grouped by a continuous variable, it could be the sign of a nonlinear effect. # Supported packages and frameworks ## lm and glm lm and glm and MASS::glm.nb are fully supported. ## lme4 lme4 model classes are fully supported. Possible to condition on REs via re.form, see help of predict.merMod ## mgcv When using mgcv with DHARMa, it is highly recommended to also install mgcViz. Since version 0.4.5, this will allow DHARMa to fall back on the simulate.gam function in mgcViz, which is more general than the default simulate function. For example, without mgcViz, it will not be possible to simulate from mgcv::gam objects fitted with extended families. If you absolutely want to use DHARMa without mgcViz, you should make sure that simulate(model) works correctly for the model object for which you want to calculate DHARMa residuals. ## gamm4 Models fitted with gamm4 return a list that contains a lme4 object under the name "mer". You can test this object like an lme4 model, so e.g. simulateResiduals(myGamm4Model\$mer). All remarks regarding lme4 objects apply. ## glmmTMB glmmTMB is nearly fully supported since DHARMa 0.2.7 and glmmTMB 1.0.0. A remaining limitation is that you can't adjust whether simulation are conditional or not, so simulateResiduals(model, re.form = NULL) will have no effect, simulations will always be done from the full model. ## spaMM spaMM is supported by DHARMa since 0.2.1 ## GLMMadaptive GLMMadaptive is supported by DHARMa since 0.3.4. ## phylolm phylom (version \>= 2.6.5) is supported by DHARMa since 0.4.7 for both model classes phylolm and phyloglm. ## phyr DHARMa residuals work with phyr, but the correct implementation is not fully tested as of DHARMa 0.4.2. See also ## brms brms can be made to work together with DHARMa, see ## Unsupported packages If confronted with an unsupported package, DHARMa will try to use standard S3 functions such as coef(), simulate() etc. to perform simulations. If no error occurs, a residual object will be calculated, and a warning will be provided that this package has not been checked for full functionality. In many cases, the results can be used though (but no guarantee, maybe check with null simulations if the results are OK). Other than that, see my general comments about [adding new R packages to DHARMa](https://github.com/florianhartig/DHARMa/wiki/Adding-new-R-packages-to-DHARMA) ## Importing external simulations (e.g. from Bayesian software or unsupported packages) DHARMa can also import external simulations from a fitted model via createDHARMa(), which will be interesting for unsupported packages and for Bayesians. **Bayesians should note the extra Vignette on "DHARMa for Bayesians" regarding the interpretation of these residuals.** Here, an example for how to create simulations for a Poisson glm. Of course, it doesn't make sense to do this as glm is a supported model class, but you could do the same in case you want to check a model class that is currently not supported by DHARMa. ```{r, eval = T} testData = createData(sampleSize = 200, overdispersion = 0.5, family = poisson()) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulatePoissonGLM <- function(fittedModel, n){ pred = predict(fittedModel, type = "response") nObs = length(pred) sim = matrix(nrow = nObs, ncol = n) for(i in 1:n) sim[,i] = rpois(nObs, pred) return(sim) } sim = simulatePoissonGLM(fittedModel, 100) DHARMaRes = createDHARMa(simulatedResponse = sim, observedResponse = testData$observedResponse, fittedPredictedResponse = predict(fittedModel), integerResponse = T) plot(DHARMaRes, quantreg = F) ``` DHARMa/inst/doc/DHARMa.R0000644000176200001440000004362514704246641014131 0ustar liggesusers## ----global_options, include=FALSE-------------------------------------------- knitr::opts_chunk$set(fig.width=6.5, fig.height=4.5, fig.align='center', warning=FALSE, message=FALSE, cache = T) ## ----echo = F, message = F---------------------------------------------------- library(DHARMa) set.seed(123) ## ----echo = F, fig.width=6, fig.height=3-------------------------------------- library(lme4) overdispersedData = createData(sampleSize = 250, overdispersion = 0, quadraticFixedEffects = -2, family = poisson()) fittedModelOverdispersed <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = overdispersedData) plotConventionalResiduals(fittedModelOverdispersed) testData = createData(sampleSize = 250, intercept = 0, overdispersion = 0, family = poisson(), randomEffectVariance = 0) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = testData) plotConventionalResiduals(fittedModel) ## ----eval = F----------------------------------------------------------------- # install.packages("DHARMa") ## ----------------------------------------------------------------------------- library(DHARMa) citation("DHARMa") ## ----------------------------------------------------------------------------- testData = createData(sampleSize = 250) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = testData) ## ----results = "hide", fig.show='hide'---------------------------------------- testDispersion(fittedModel) ## ----------------------------------------------------------------------------- simulationOutput <- simulateResiduals(fittedModel = fittedModel, plot = F) ## ----results = "hide"--------------------------------------------------------- residuals(simulationOutput) ## ----eval = F----------------------------------------------------------------- # residuals(simulationOutput, quantileFunction = qnorm, outlierValues = c(-7,7)) ## ----------------------------------------------------------------------------- plot(simulationOutput) ## ----eval = F----------------------------------------------------------------- # plotQQunif(simulationOutput) # left plot in plot.DHARMa() # plotResiduals(simulationOutput) # right plot in plot.DHARMa() ## ----eval = F----------------------------------------------------------------- # plotResiduals(simulationOutput, form = YOURPREDICTOR) ## ----eval = F----------------------------------------------------------------- # plotResiduals(simulationOutput, form = testData$group) ## ----eval = F----------------------------------------------------------------- # ?simulateResiduals ## ----eval= F------------------------------------------------------------------ # simulationOutput <- simulateResiduals(fittedModel = fittedModel, refit = T) ## ----eval= F------------------------------------------------------------------ # simulationOutput <- simulateResiduals(fittedModel = fittedModel, n = 250, use.u = T) ## ----eval= F------------------------------------------------------------------ # simulationOutput = recalculateResiduals(simulationOutput, group = testData$group) ## ----eval = F----------------------------------------------------------------- # simulationOutput$randomState ## ----------------------------------------------------------------------------- testData = createData(sampleSize = 200, overdispersion = 1.5, family = poisson()) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ## ----------------------------------------------------------------------------- testData = createData(sampleSize = 500, intercept=0, fixedEffects = 2, overdispersion = 0, family = poisson(), roundPoissonVariance = 0.001, randomEffectVariance = 0) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ## ----overDispersionTest, echo = T, fig.width=4, fig.height=4------------------ testDispersion(simulationOutput) ## ----------------------------------------------------------------------------- testData = createData(sampleSize = 500, intercept = 2, fixedEffects = c(1), overdispersion = 0, family = poisson(), quadraticFixedEffects = c(-3), randomEffectVariance = 0, pZeroInflation = 0.6) par(mfrow = c(1,2)) plot(testData$Environment1, testData$observedResponse, xlab = "Envrionmental Predictor", ylab = "Response") hist(testData$observedResponse, xlab = "Response", main = "") ## ----fig.height=5.5----------------------------------------------------------- fittedModel <- glmer(observedResponse ~ Environment1 + I(Environment1^2) + (1|group) , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ## ----fig.width=4, fig.height=4------------------------------------------------ testZeroInflation(simulationOutput) ## ----eval= F------------------------------------------------------------------ # # requires glmmTMB # fittedModel <- glmmTMB(observedResponse ~ Environment1 + I(Environment1^2) + (1|group), ziformula = ~1 , family = "poisson", data = testData) # summary(fittedModel) # # simulationOutput <- simulateResiduals(fittedModel = fittedModel) # plot(simulationOutput) ## ----fig.width=4.5, fig.height=4.5-------------------------------------------- countOnes <- function(x) sum(x == 1) # testing for number of 1s testGeneric(simulationOutput, summary = countOnes, alternative = "greater") # 1-inflation ## ----------------------------------------------------------------------------- testData = createData(sampleSize = 500, intercept = -1.5, overdispersion = function(x){return(rnorm(length(x), sd = 1 * abs(x)))}, family = poisson(), randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ## ----eval = F----------------------------------------------------------------- # testQuantiles(simulationOutput) ## ----eval = F----------------------------------------------------------------- # testCategorical(simulationOutput, catPred = testData$group) ## ----------------------------------------------------------------------------- testData = createData(sampleSize = 500, intercept = 0, overdispersion = function(x){return(rnorm(length(x), sd = 2*abs(x)))}, family = poisson(), randomEffectVariance = 0) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group) + (1|ID), family = "poisson", data = testData) # plotConventionalResiduals(fittedModel) simulationOutput <- simulateResiduals(fittedModel = fittedModel) plot(simulationOutput) ## ----eval = F----------------------------------------------------------------- # simulationOutput$scaledResiduals ## ----------------------------------------------------------------------------- testData = createData(sampleSize = 200, intercept = 1, fixedEffects = c(1,2), overdispersion = 0, family = poisson(), quadraticFixedEffects = c(-3,0)) fittedModel <- glmer(observedResponse ~ Environment1 + Environment2 + (1|group), family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # plotConventionalResiduals(fittedModel) plot(simulationOutput, quantreg = T) # testUniformity(simulationOutput = simulationOutput) ## ----------------------------------------------------------------------------- par(mfrow = c(1,2)) plotResiduals(simulationOutput, testData$Environment1) plotResiduals(simulationOutput, testData$Environment2) ## ----fig.width=4, fig.height=4------------------------------------------------ testData = createData(sampleSize = 100, family = poisson(), spatialAutocorrelation = 3, numGroups = 1, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , data = testData, family = poisson() ) simulationOutput <- simulateResiduals(fittedModel = fittedModel) testSpatialAutocorrelation(simulationOutput = simulationOutput, x = testData$x, y= testData$y) # plot(simulationOutput) ## ----fig.width=4, fig.height=4------------------------------------------------ library(glmmTMB) testData$pos <- numFactor(testData$x, testData$y) fittedModel2 <- glmmTMB(observedResponse ~ Environment1 + exp(pos + 0|group), data = testData, family = poisson()) simulationOutput2 <- simulateResiduals(fittedModel = fittedModel2) testSpatialAutocorrelation(simulationOutput = simulationOutput2, x = testData$x, y= testData$y) # plot(simulationOutput2) ## ----fig.width=4, fig.height=4------------------------------------------------ # rotation of the residuals simulationOutput3 <- simulateResiduals(fittedModel = fittedModel2, rotation = "estimated") testSpatialAutocorrelation(simulationOutput = simulationOutput3, x = testData$x, y= testData$y) # plot(simulationOutput3) ## ----echo = F----------------------------------------------------------------- data = structure(list(N_parasitized = c(226, 689, 481, 960, 1177, 266, 46, 4, 884, 310, 19, 4, 7, 1, 3, 0, 365, 388, 369, 829, 532, 5), N_adult = c(1415, 2227, 2854, 3699, 2094, 376, 8, 1, 1379, 323, 2, 2, 11, 2, 0, 1, 1394, 1392, 1138, 719, 685, 3), density.attack = c(216.461273226486, 214.662143448767, 251.881252132684, 400.993643475831, 207.897856251888, 57.0335141562012, 6.1642552100285, 0.503930659141302, 124.673812637575, 27.3764667492035, 0.923453215863429, 0.399890030241684, 0.829818131526174, 0.146640466903247, 0.216795117773948, 0.215498663908284, 110.635445098884, 91.3766566822467, 126.157080458047, 82.9699108890686, 61.0476207779938, 0.574539291305784), Plot = structure(c(1L, 1L, 1L, 1L, 1L, 1L, 1L, 1L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 2L, 3L, 3L, 3L, 3L, 3L, 3L ), .Label = c("1", "2", "3", "4"), class = "factor"), PY = c("p1y82", "p1y83", "p1y84", "p1y85", "p1y86", "p1y87", "p1y88", "p1y89", "p2y86", "p2y87", "p2y88", "p2y89", "p2y90", "p2y91", "p2y92", "p2y93", "p3y88", "p3y89", "p3y90", "p3y91", "p3y92", "p3y93" ), Year = c(82, 83, 84, 85, 86, 87, 88, 89, 86, 87, 88, 89, 90, 91, 92, 93, 88, 89, 90, 91, 92, 93), ID = 1:22), .Names = c("N_parasitized", "N_adult", "density.attack", "Plot", "PY", "Year", "ID"), row.names = c("p1y82", "p1y83", "p1y84", "p1y85", "p1y86", "p1y87", "p1y88", "p1y89", "p2y86", "p2y87", "p2y88", "p2y89", "p2y90", "p2y91", "p2y92", "p2y93", "p3y88", "p3y89", "p3y90", "p3y91", "p3y92", "p3y93" ), class = "data.frame") data$logDensity = log10(data$density.attack+1) ## ----fig.height=4, fig.width=4------------------------------------------------ plot(N_parasitized / (N_adult + N_parasitized ) ~ logDensity, xlab = "Density", ylab = "Proportion infected", data = data) ## ----------------------------------------------------------------------------- mod1 <- glm(cbind(N_parasitized, N_adult) ~ logDensity, data = data, family=binomial) simulationOutput <- simulateResiduals(fittedModel = mod1) plot(simulationOutput) ## ----------------------------------------------------------------------------- mod2 <- glmer(cbind(N_parasitized, N_adult) ~ logDensity + (1|ID), data = data, family=binomial) simulationOutput <- simulateResiduals(fittedModel = mod2) plot(simulationOutput) ## ----------------------------------------------------------------------------- mod3 <- glmer(cbind(N_parasitized, N_adult) ~ logDensity + I(logDensity^2) + (1|ID), data = data, family=binomial) simulationOutput <- simulateResiduals(fittedModel = mod3) plot(simulationOutput) ## ----------------------------------------------------------------------------- anova(mod2, mod3) ## ----------------------------------------------------------------------------- AIC(mod2) AIC(mod3) ## ----error=TRUE--------------------------------------------------------------- library(glmmTMB) m1 <- glm(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)), data=Owls , family = poisson) res <- simulateResiduals(m1) plot(res) ## ----error=TRUE--------------------------------------------------------------- m2 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), data=Owls , family = poisson) res <- simulateResiduals(m2) plot(res) ## ----error=TRUE--------------------------------------------------------------- m3 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), data=Owls , family = nbinom1) res <- simulateResiduals(m3, plot = T) par(mfrow = c(1,2)) testDispersion(res) plotResiduals(res, Owls$FoodTreatment) ## ----fig.height=4, fig.width=4------------------------------------------------ testZeroInflation(res) ## ----error=TRUE--------------------------------------------------------------- m4 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), ziformula = ~ FoodTreatment + SexParent, data=Owls , family = nbinom1) res <- simulateResiduals(m4, plot = T) ## ----error=TRUE, fig.width=7-------------------------------------------------- par(mfrow = c(1,3)) plotResiduals(res, Owls$FoodTreatment) testDispersion(res) testZeroInflation(res) ## ----error=TRUE--------------------------------------------------------------- m5 <- glmmTMB(SiblingNegotiation ~ FoodTreatment*SexParent + offset(log(BroodSize)) + (1|Nest), dispformula = ~ FoodTreatment + SexParent , ziformula = ~ FoodTreatment + SexParent, data=Owls , family = nbinom1) res <- simulateResiduals(m5, plot = T) ## ----error=TRUE, fig.width=7-------------------------------------------------- par(mfrow = c(1,3)) plotResiduals(res, Owls$FoodTreatment) testDispersion(res) testZeroInflation(res) ## ----------------------------------------------------------------------------- testData = createData(sampleSize = 500, overdispersion = 0, fixedEffects = 5, family = binomial(), randomEffectVariance = 3, numGroups = 25) fittedModel <- glm(observedResponse ~ 1, family = "binomial", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) ## ----------------------------------------------------------------------------- plot(simulationOutput, asFactor = T) ## ----fig.width=4, fig.height=4------------------------------------------------ plotResiduals(simulationOutput, testData$Environment1, quantreg = T) ## ----------------------------------------------------------------------------- par(mfrow = c(1,2)) testDispersion(simulationOutput) simulationOutput2 = recalculateResiduals(simulationOutput, group = testData$group) testDispersion(simulationOutput2) ## ----------------------------------------------------------------------------- n = 1000 p = runif(n, 0.1, 0.9) # true probabilities obs = rbinom(n, 1, p) ## ----------------------------------------------------------------------------- fit <- glm(obs ~ 1, family = "binomial") # wrong model, assumes equal probabilities library(DHARMa) res <- simulateResiduals(fit, plot = T) # nothing to see ## ----------------------------------------------------------------------------- res2 <- recalculateResiduals(res, group = rep(1:20, each = 10)) plot(res2) ## ----------------------------------------------------------------------------- grouping = cut(p, breaks = quantile(p, seq(0,1,0.02))) res3 <- recalculateResiduals(res, group = grouping) plot(res3) ## ----------------------------------------------------------------------------- set.seed(123) testData = createData(sampleSize = 500, overdispersion = 0, fixedEffects = c(0,3), family = binomial(), randomEffectVariance = 3, numGroups = 50) ## ----------------------------------------------------------------------------- res <- simulateResiduals(fittedModel = fittedModel) fittedModel <- glm(observedResponse ~ Environment1, family = "binomial", data = testData) plot(res) ## ----------------------------------------------------------------------------- res2 = recalculateResiduals(res , group = testData$group) plot(res2) ## ----------------------------------------------------------------------------- grouping = as.factor(sample.int(50, 500, replace = T)) res2 = recalculateResiduals(res , group = grouping) plot(res2) ## ----------------------------------------------------------------------------- x = predict(fittedModel) grouping = cut(x, breaks = quantile(x, seq(0,1,0.02))) res2 = recalculateResiduals(res , group = grouping) plot(res2) ## ----------------------------------------------------------------------------- x = testData$Environment2 grouping = cut(x, breaks = quantile(x, seq(0,1,0.02))) res2 = recalculateResiduals(res , group = grouping) plot(res2) ## ----------------------------------------------------------------------------- x = testData$x grouping = cut(x, breaks = quantile(x, seq(0,1,0.02))) res3 = recalculateResiduals(res , group = grouping) plot(res3) ## ----eval = T----------------------------------------------------------------- testData = createData(sampleSize = 200, overdispersion = 0.5, family = poisson()) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulatePoissonGLM <- function(fittedModel, n){ pred = predict(fittedModel, type = "response") nObs = length(pred) sim = matrix(nrow = nObs, ncol = n) for(i in 1:n) sim[,i] = rpois(nObs, pred) return(sim) } sim = simulatePoissonGLM(fittedModel, 100) DHARMaRes = createDHARMa(simulatedResponse = sim, observedResponse = testData$observedResponse, fittedPredictedResponse = predict(fittedModel), integerResponse = T) plot(DHARMaRes, quantreg = F) DHARMa/inst/doc/DHARMaForBayesians.R0000644000176200001440000000535714704246643016441 0ustar liggesusers## ----global_options, include=FALSE-------------------------------------------- knitr::opts_chunk$set(fig.width=8.5, fig.height=5.5, fig.align='center', warning=FALSE, message=FALSE) ## ----echo = F, message = F---------------------------------------------------- library(DHARMa) set.seed(123) ## ----eval = F----------------------------------------------------------------- # library(rjags) # library(BayesianTools) # # set.seed(123) # # dat <- DHARMa::createData(200, overdispersion = 0.2) # # Data = as.list(dat) # Data$nobs = nrow(dat) # Data$nGroups = length(unique(dat$group)) # # modelCode = "model{ # # for(i in 1:nobs){ # observedResponse[i] ~ dpois(lambda[i]) # poisson error distribution # lambda[i] <- exp(eta[i]) # inverse link function # eta[i] <- intercept + env*Environment1[i] # linear predictor # } # # intercept ~ dnorm(0,0.0001) # env ~ dnorm(0,0.0001) # # # Posterior predictive simulations # for (i in 1:nobs) { # observedResponseSim[i]~dpois(lambda[i]) # } # # }" # # jagsModel <- jags.model(file= textConnection(modelCode), data=Data, n.chains = 3) # para.names <- c("intercept","env", "lambda", "observedResponseSim") # Samples <- coda.samples(jagsModel, variable.names = para.names, n.iter = 5000) # # x = BayesianTools::getSample(Samples) # # colnames(x) # problem: all the variables are in one array - this is better in STAN, where this is a list - have to extract the right columns by hand # posteriorPredDistr = x[,3:202] # this is the uncertainty of the mean prediction (lambda) # posteriorPredSim = x[,203:402] # these are the simulations # # sim = createDHARMa(simulatedResponse = t(posteriorPredSim), observedResponse = dat$observedResponse, fittedPredictedResponse = apply(posteriorPredDistr, 2, median), integerResponse = T) # plot(sim) ## ----eval=F------------------------------------------------------------------- # # Posterior predictive simulations # for (i in 1:nobs) { # observedResponseSim[i]~dpois(lambda[i]) # } ## ----eval = F----------------------------------------------------------------- # for(i in 1:nobs){ # observedResponse[i] ~ dpois(lambda[i]) # poisson error distribution # lambda[i] <- exp(eta[i]) # inverse link function # eta[i] <- intercept + env*Environment1[i] + RE[group[i]] # linear predictor # } # # for(j in 1:nGroups){ # RE[j] ~ dnorm(0,tauRE) # } ## ----eval=F------------------------------------------------------------------- # for(j in 1:nGroups){ # RESim[j] ~ dnorm(0,tauRE) # } # # for (i in 1:nobs) { # observedResponseSim[i] ~ dpois(lambdaSim[i]) # lambdaSim[i] <- exp(etaSim[i]) # etaSim[i] <- intercept + env*Environment1[i] + RESim[group[i]] # } DHARMa/README.md0000644000176200001440000000161014704245735012536 0ustar liggesusers# DHARMa - Residual Diagnostics for HierARchical Models The DHARMa package creates readily interpretable residuals for generalized linear (mixed) models that are standardized to values between 0 and 1. This is achieved by a simulation-based approach, similar to the Bayesian p-value or the parametric bootstrap: 1) simulate new data from the fitted model 2) from this simulated data, calculate the cummulative density function 3) residual is the value of the empirical density function at the value of the observed data. The package includes various functions that deal with issues such as * Misfit * Heteroscedasticity * Under/Overdispersion * Zero-inflation * Residual temporal autocorrelation * Residual spatial autocorrelation * Residual phylogenetic autocorrelation To get more information, install the package and run ```{r} library(DHARMa) ?DHARMa vignette("DHARMa", package="DHARMa") ``` DHARMa/build/0000755000176200001440000000000014704246643012357 5ustar liggesusersDHARMa/build/vignette.rds0000644000176200001440000000036014704246643014715 0ustar liggesusers‹uŻ ‚@…ן, *źÖ'š ‘ ‚ˆ.ŗt+I]Yń®'Ļ&ŻVźbgv¾įģ9{v!:1Mč^9 Ļ“ćÄĘīųŪõqŽ1Yv$`|5-bȊv«źf§uć4g¼tsļp„b³čōī…q÷óBO=®·2MZż°m×Ŗr$óŖnjZäóVÓiÉW’ĆĶ7Óěƒ R*#[šAœČ/NqłŒƒˆ«&S }šÓ,*dč­+ĘqVlĪ*OšMŽÉXš¦yö… 2‘„N%xŽzœž/£ē«ĀļDHARMa/man/0000755000176200001440000000000014704245735012034 5ustar liggesusersDHARMa/man/createDHARMa.Rd0000644000176200001440000000706014677165224014511 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DHARMa.R \name{createDHARMa} \alias{createDHARMa} \title{Create a DHARMa object from hand-coded simulations or Bayesian posterior predictive simulations.} \usage{ createDHARMa(simulatedResponse, observedResponse, fittedPredictedResponse = NULL, integerResponse = FALSE, seed = 123, method = c("PIT", "traditional"), rotation = NULL) } \arguments{ \item{simulatedResponse}{matrix of observations simulated from the fitted model - row index for observations and colum index for simulations.} \item{observedResponse}{true observations.} \item{fittedPredictedResponse}{optional fitted predicted response. For Bayesian posterior predictive simulations, using the median posterior prediction as fittedPredictedResponse is recommended. If not provided, the mean simulatedResponse will be used.} \item{integerResponse}{if T, noise will be added at to the residuals to maintain a uniform expectations for integer responses (such as Poisson or Binomial). Unlike in \link{simulateResiduals}, the nature of the data is not automatically detected, so this MUST be set by the user appropriately.} \item{seed}{the random seed to be used within DHARMa. The default setting, recommended for most users, is keep the random seed on a fixed value 123. This means that you will always get the same randomization and thus teh same result when running the same code. NULL = no new seed is set, but previous random state will be restored after simulation. FALSE = no seed is set, and random state will not be restored. The latter two options are only recommended for simulation experiments. See vignette for details.} \item{method}{the quantile randomization method used. The two options implemented at the moment are probability integral transform (PIT-) residuals (current default), and the "traditional" randomization procedure, that was used in DHARMa until version 0.3.0. For details, see \link{getQuantile}.} \item{rotation}{optional rotation of the residual space to remove residual autocorrelation. See details in \link{simulateResiduals}, section \emph{residual auto-correlation} for an extended explanation, and \link{getQuantile} for syntax.} } \description{ Create a DHARMa object from hand-coded simulations or Bayesian posterior predictive simulations. } \details{ The use of this function is to convert simulated residuals (e.g. from a point estimate, or Bayesian p-values) to a DHARMa object, to make use of the plotting / test functions in DHARMa. } \note{ Either scaled residuals or (simulatedResponse AND observed response) have to be provided. } \examples{ ## READING IN HAND-CODED SIMULATIONS testData = createData(sampleSize = 50, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1, data = testData, family = "poisson") # in DHARMA, using the simulate.glm function of glm sims = simulateResiduals(fittedModel) plot(sims, quantreg = FALSE) # Doing the same with a handcoded simulate function. # of course this code will only work with a 1-par glm model simulateMyfit <- function(n=10, fittedModel){ int = coef(fittedModel)[1] slo = coef(fittedModel)[2] pred = exp(int + slo * testData$Environment1) predSim = replicate(n, rpois(length(pred), pred)) return(predSim) } sims = simulateMyfit(250, fittedModel) dharmaRes <- createDHARMa(simulatedResponse = sims, observedResponse = testData$observedResponse, fittedPredictedResponse = predict(fittedModel, type = "response"), integer = TRUE) plot(dharmaRes, quantreg = FALSE) } DHARMa/man/getSimulations.Rd0000644000176200001440000000765714704245735015351 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compatibility.R \name{getSimulations} \alias{getSimulations} \alias{getSimulations.default} \alias{getSimulations.negbin} \alias{getSimulations.gam} \alias{getSimulations.lmerMod} \alias{getSimulations.glmmTMB} \alias{getSimulations.HLfit} \alias{getSimulations.MixMod} \alias{getSimulations.phylolm} \alias{getSimulations.phyloglm} \title{Get model simulations} \usage{ getSimulations(object, nsim = 1, type = c("normal", "refit"), ...) \method{getSimulations}{default}(object, nsim = 1, type = c("normal", "refit"), ...) \method{getSimulations}{negbin}(object, nsim = 1, type = c("normal", "refit"), ...) \method{getSimulations}{gam}(object, nsim = 1, type = c("normal", "refit"), mgcViz = TRUE, ...) \method{getSimulations}{lmerMod}(object, nsim = 1, type = c("normal", "refit"), ...) \method{getSimulations}{glmmTMB}(object, nsim = 1, type = c("normal", "refit"), ...) \method{getSimulations}{HLfit}(object, nsim = 1, type = c("normal", "refit"), ...) \method{getSimulations}{MixMod}(object, nsim = 1, type = c("normal", "refit"), ...) \method{getSimulations}{phylolm}(object, nsim = 1, type = c("normal", "refit"), ...) \method{getSimulations}{phyloglm}(object, nsim = 1, type = c("normal", "refit"), ...) } \arguments{ \item{object}{a fitted model.} \item{nsim}{number of simulations.} \item{type}{if simulations should be prepared for getQuantile or for refit.} \item{...}{additional parameters to be passed on, usually to the simulate function of the respective model class.} \item{mgcViz}{whether simulations should be created with mgcViz (if mgcViz is available)} } \value{ A matrix with simulations. } \description{ Wrapper to simulate from a fitted model. } \details{ The purpose of this function is to wrap or implement the simulate function of different model classes and thus return simulations from fitted models in a standardized way. Note: GLMM and other regression packages often differ in how simulations are produced, and which parameters can be used to modify this behavior. One important difference is how to modifiy which hierarchical levels are held constant, and which are re-simulated. In lme4, this is controlled by the re.form argument (see \link[lme4:simulate.merMod]{lme4::simulate.merMod}). In glmmTMB, the package version 1.1.10 has a temporary solution to simulate conditional to all random effects (see \link[glmmTMB:set_simcodes]{glmmTMB::set_simcodes} val = "fix", and issue \href{https://github.com/glmmTMB/glmmTMB/issues/888}{#888} in glmmTMB GitHub repository. For other packages, please consult the help. If the model was fit with weights and the respective model class does not include the weights in the simulations, getSimulations will throw a warning. The background is if weights are used on the likelihood directly, then what is fitted is effectively a pseudo likelihood, and there is no way to directly simulate from the specified likelihood. Whether or not residuals can be used in this case depends very much on what is tested and how weights are used. I'm sorry to say that it is hard to give a general recommendation, you have to consult someone that understands how weights are processed in the respective model class. } \examples{ testData = createData(sampleSize = 400, family = gaussian()) fittedModel <- lm(observedResponse ~ Environment1 , data = testData) # response that was used to fit the model getObservedResponse(fittedModel) # predictions of the model for these points getFitted(fittedModel) # extract simulations from the model as matrix getSimulations(fittedModel, nsim = 2) # extract simulations from the model for refit (often requires different structure) x = getSimulations(fittedModel, nsim = 2, type = "refit") getRefit(fittedModel, x[[1]]) getRefit(fittedModel, getObservedResponse(fittedModel)) } \seealso{ \link{getObservedResponse}, \link{getRefit}, \link{getFixedEffects}, \link{getFitted} } \author{ Florian Hartig } DHARMa/man/getRefit.Rd0000644000176200001440000000404714703461527014077 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compatibility.R \name{getRefit} \alias{getRefit} \alias{getRefit.default} \alias{getRefit.lm} \alias{getRefit.glmmTMB} \alias{getRefit.HLfit} \alias{getRefit.MixMod} \alias{getRefit.phylolm} \alias{getRefit.phyloglm} \title{Get model refit} \usage{ getRefit(object, newresp, ...) \method{getRefit}{default}(object, newresp, ...) \method{getRefit}{lm}(object, newresp, ...) \method{getRefit}{glmmTMB}(object, newresp, ...) \method{getRefit}{HLfit}(object, newresp, ...) \method{getRefit}{MixMod}(object, newresp, ...) \method{getRefit}{phylolm}(object, newresp, ...) \method{getRefit}{phyloglm}(object, newresp, ...) } \arguments{ \item{object}{a fitted model.} \item{newresp}{the new response that should be used to refit the model.} \item{...}{additional parameters to be passed on to the refit or update class that is used to refit the model.} } \description{ Wrapper to refit a fitted model. } \details{ The purpose of this wrapper is to standardize the refit of a model. The behavior of this function depends on the supplied model. When available, it uses the refit method, otherwise it will use update. For glmmTMB: since version 1.0, glmmTMB has a refit function, but this didn't work, so I switched back to this implementation, which is a hack based on the update function. } \examples{ testData = createData(sampleSize = 400, family = gaussian()) fittedModel <- lm(observedResponse ~ Environment1 , data = testData) # response that was used to fit the model getObservedResponse(fittedModel) # predictions of the model for these points getFitted(fittedModel) # extract simulations from the model as matrix getSimulations(fittedModel, nsim = 2) # extract simulations from the model for refit (often requires different structure) x = getSimulations(fittedModel, nsim = 2, type = "refit") getRefit(fittedModel, x[[1]]) getRefit(fittedModel, getObservedResponse(fittedModel)) } \seealso{ \link{getObservedResponse}, \link{getSimulations}, \link{getFixedEffects} } \author{ Florian Hartig } DHARMa/man/checkDots.Rd0000644000176200001440000000057414665273541014242 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helper.R \name{checkDots} \alias{checkDots} \title{Check dot operator} \usage{ checkDots(name, default, ...) } \arguments{ \item{name}{variable name} \item{default}{variable default} } \description{ Check dot operator } \details{ modified from https://github.com/lcolladotor/dots } \keyword{internal} DHARMa/man/DHARMa.ecdf.Rd0000644000176200001440000000060714703461527014220 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helper.R \name{DHARMa.ecdf} \alias{DHARMa.ecdf} \title{Modified ECDF function.} \usage{ DHARMa.ecdf(x) } \description{ Modified ECDF function. } \details{ Ensures symmetric ECDF (standard ECDF is <), and that 0 / 1 values are only produced if the data is strictly < > than the observed data. } \keyword{internal} DHARMa/man/getFitted.Rd0000644000176200001440000000414714677165224014253 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compatibility.R \name{getFitted} \alias{getFitted} \alias{getFitted.default} \alias{getFitted.gam} \alias{getFitted.HLfit} \alias{getFitted.MixMod} \alias{getFitted.phylolm} \alias{getFitted.phyloglm} \title{Get fitted/predicted values} \usage{ getFitted(object, ...) \method{getFitted}{default}(object, ...) \method{getFitted}{gam}(object, ...) \method{getFitted}{HLfit}(object, ...) \method{getFitted}{MixMod}(object, ...) \method{getFitted}{phylolm}(object, ...) \method{getFitted}{phyloglm}(object, ...) } \arguments{ \item{object}{A fitted model.} \item{...}{Additional parameters to be passed on, usually to the simulate function of the respective model class.} } \description{ Wrapper to get the fitted/predicted response of model at the response scale. } \details{ The purpose of this wrapper is to standardize extract the fitted values, which is implemented via predict(model, type = "response") for most model classes. If you implement this function for a new model class, you should include an option to modifying which random effects (REs) are included in the predictions. If this option is not available, it is essential that predictions are provided marginally/unconditionally, i.e. without the RE estimates (because of https://github.com/florianhartig/DHARMa/issues/43), which corresponds to re-form = ~0 in lme4. } \examples{ testData = createData(sampleSize = 400, family = gaussian()) fittedModel <- lm(observedResponse ~ Environment1 , data = testData) # response that was used to fit the model getObservedResponse(fittedModel) # predictions of the model for these points getFitted(fittedModel) # extract simulations from the model as matrix getSimulations(fittedModel, nsim = 2) # extract simulations from the model for refit (often requires different structure) x = getSimulations(fittedModel, nsim = 2, type = "refit") getRefit(fittedModel, x[[1]]) getRefit(fittedModel, getObservedResponse(fittedModel)) } \seealso{ \link{getObservedResponse}, \link{getSimulations}, \link{getRefit}, \link{getFixedEffects} } \author{ Florian Hartig } DHARMa/man/hurricanes.Rd0000644000176200001440000000524214703461527014467 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{hurricanes} \alias{hurricanes} \title{Hurricanes} \format{ A 'data.frame': 92 obs. of 14 variables \describe{ \item{Year}{Year of the hurricane (1950-2012) } \item{Name}{Name of the hurricane } \item{MasFem}{Masculinity-femininity rating of the hurricane's name in the range 1 = very masculine, 11 = very feminine.} \item{MinPressure_before}{Minimum air pressure (909-1002).} \item{Minpressure_Updated_2014}{Updated minimum air pressure (909-1003).} \item{Gender_MF}{Binary gender categorization based on MasFem (male = 0, female = 1).} \item{Category}{Strength of the hurricane in categories (1:7). (1 = not at all, 7 = very intense).} \item{alldeaths}{Human deaths occured (1:256).} \item{NDAM}{Normalized damage in millions (1:75.000). The raw (dollar) amounts of property damage caused by hurricanes were obtained, and the unadjusted dollar amounts were normalized to 2013 monetary values by adjusting them to inflation, wealth and population density.} \item{Elapsed_Yrs}{Elapsed years since the occurrence of hurricanes (1:63).} \item{Source}{MWR/wikipedia ()} \item{ZMasFem}{Scaled (MasFem)} \item{ZMinPressure_A}{Scaled (Minpressure_Updated_2014)} \item{ZNDAM}{Scaled (NDAM)} ... } } \description{ A data set on hurricane strength and fatalities in the US between 1950 and 2012. The data originates from the study by Jung et al., PNAS, 2014, who claim that the masculinity / femininity of a hurricane name has a causal effect on fatalities, presumably through a different perception of danger caused by the names. } \examples{ \dontrun{ # Loading hurricanes dataset library(DHARMa) data(hurricanes) str(hurricanes) # this is the model fit by Jung et al. library(glmmTMB) originalModelGAM = glmmTMB(alldeaths ~ scale(MasFem) * (scale(Minpressure_Updated_2014) + scale(NDAM)), data = hurricanes, family = nbinom2) # no significant deviation in the general DHARMa plot res <- simulateResiduals(originalModelGAM) plot(res) # but residuals ~ NDAM looks funny, which was pointed # out by Bob O'Hara in a blog post after publication of the paper plotResiduals(res, hurricanes$NDAM) # we also find temporal autocorrelation res2 = recalculateResiduals(res, group = hurricanes$Year) testTemporalAutocorrelation(res2, time = unique(hurricanes$Year)) # task: try to address these issues - in many instances, this will # make the MasFem predictor n.s. } } \references{ Jung, K., Shavitt, S., Viswanathan, M., & Hilbe, J. M. (2014). Female hurricanes are deadlier than male hurricanes. Proceedings of the National Academy of Sciences, 111(24), 8782-8787. } DHARMa/man/getRandomState.Rd0000644000176200001440000000323514677165224015252 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/random.R \name{getRandomState} \alias{getRandomState} \title{Record and restore a random state} \usage{ getRandomState(seed = NULL) } \arguments{ \item{seed}{seed argument to set.seed(), typically a number. Additional options: NULL = no seed is set, but return includes function for restoring random seed. F = function does nothing, i.e. neither seed is changed, nor does the returned function do anything.} } \value{ A list with various infos about the random state that after function execution, as well as a function to restore the previous state before the function execution. } \description{ The aim of this function is to record, manipulate and restore a random state. } \details{ This function is intended for two (not mutually exclusive tasks): a) record the current random state. b) change the current random state in a way that the previous state can be restored. } \examples{ set.seed(13) runif(1) # testing the function in standard settings currentSeed = .Random.seed x = getRandomState(123) runif(1) x$restoreCurrent() all(.Random.seed == currentSeed) # if no seed was set in env, this will also be restored rm(.Random.seed) # now, there is no random seed x = getRandomState(123) exists(".Random.seed") # TRUE runif(1) x$restoreCurrent() exists(".Random.seed") # False runif(1) # re-create a seed # with seed = false currentSeed = .Random.seed x = getRandomState(FALSE) runif(1) x$restoreCurrent() all(.Random.seed == currentSeed) # with seed = NULL currentSeed = .Random.seed x = getRandomState(NULL) runif(1) x$restoreCurrent() all(.Random.seed == currentSeed) } \author{ Florian Hartig } DHARMa/man/getPossibleModels.Rd0000644000176200001440000000043014665273541015746 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compatibility.R \name{getPossibleModels} \alias{getPossibleModels} \title{get possible models} \usage{ getPossibleModels() } \description{ returns a list of supported model classes } \keyword{internal} DHARMa/man/simulateLRT.Rd0000644000176200001440000001040414677165224014532 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulateLRT.R \name{simulateLRT} \alias{simulateLRT} \title{Simulated likelihood ratio tests for (generalized) linear mixed models} \usage{ simulateLRT(m0, m1, n = 250, seed = 123, plot = TRUE, suppressWarnings = TRUE, saveModels = FALSE, ...) } \arguments{ \item{m0}{null Model.} \item{m1}{alternative Model.} \item{n}{number of simulations.} \item{seed}{random seed.} \item{plot}{whether null distribution should be plotted.} \item{suppressWarnings}{whether to suppress warnings that occur during refitting the models to simulated data. See details for explanations.} \item{saveModels}{Whether to save refitted models.} \item{...}{additional parameters to pass on to the simulate function of the model object. See \link{getSimulations} for details.} } \description{ This function uses the DHARMa model wrappers to generate simulated likelihood ratio tests (LRTs) for (generalized) linear mixed models based on a parametric bootstrap. The motivation for using a simulated LRT rather than a standard ANOVA or AIC for model selection in mixed models is that df for mixed models are not clearly defined, thus standard ANOVA based on Chi2 statistics or AIC are unreliable, in particular for models with large contributions of REs to the likelihood. Interpretation of the results as in a normal LRT: the null hypothesis is that m0 is correct, the tests checks if the increase in likelihood of m1 is higher than expected, using data simulated from m0. } \details{ The function performs a simulated LRT, which works as follows: \enumerate{ \item H0: Model 1 is correct. \item Our test statistic is the log LRT of M1/M2. Empirical value will always be > 1 because in a nested setting, the more complex model cannot have a worse likelihood. \item To generate an expected distribution of the test statistic under H0, we simulate new response data under M0, refit M0 and M1 on this data, and calculate the LRs. \item Based on this, calculate p-values etc. in the usual way. } About warnings: warnings such as "boundary (singular) fit: see ?isSingular" will likely occur in this function and are not necessarily the sign of a problem. lme4 warns if RE variances are fit to zero. This is desired / likely in this case, however, because we are simulating data with zero RE variances. Therefore, warnings are turned off per default. For diagnostic reasons, you can turn warnings on, and possibly also inspect fitted models via the parameter saveModels to see if there are any other problems in the re-fitted models. Data simulations are performed by \link{getSimulations}, which is a wrapper for the respective model functions. The default for all packages, wherever possible, is to generate marginal simulations (meaning that REs are re-simulated as well). I see no sensible reason to change this, but if you want to and if supported by the respective regression package, you could do so by supplying the necessary arguments via ... } \note{ The logic of an LRT assumes that m0 is nested in m1, which guarantees that the L(M1) > L(M0). The function does not explicitly check if models are nested and will work as long as data can be simulated from M0 that can be refit with M) and M1; however, I would strongly advice against using this for non-nested models unless you have a good statistical reason for doing so. Also, note that LRTs may be unreliable when fit with REML or some other kind of penalized / restricted ML. Therefore, you should fit model with ML for use in this function. } \examples{ library(DHARMa) library(lme4) # create test data set.seed(123) dat <- createData(sampleSize = 200, randomEffectVariance = 1) # define Null and alternative model (should be nested) m1 = glmer(observedResponse ~ Environment1 + (1|group), data = dat, family = "poisson") m0 = glm(observedResponse ~ Environment1 , data = dat, family = "poisson") \dontrun{ # run LRT - n should be increased to at least 250 for a real study out = simulateLRT(m0, m1, n = 10) # To inspect warnings thrown during the refits: out = simulateLRT(m0, m1, saveModels = TRUE, suppressWarnings = FALSE, n = 10) summary(out$saveModels[[2]]$refittedM1) # RE SD = 0, no problem # If there are warnings that seem problematic, # could try changing the optimizer or iterations } } \author{ Florian Hartig } DHARMa/man/testPDistribution.Rd0000644000176200001440000000101414677165224016021 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runBenchmarks.R \name{testPDistribution} \alias{testPDistribution} \title{Plot distribution of p-values.} \usage{ testPDistribution(x, plot = TRUE, main = "p distribution \\n expected is flat at 1", ...) } \arguments{ \item{x}{vector of p values.} \item{plot}{should the values be plotted.} \item{main}{title for the plot.} \item{...}{additional arguments to hist.} } \description{ Plot distribution of p-values. } \author{ Florian Hartig } DHARMa/man/testOverdispersion.Rd0000644000176200001440000000117014677165224016240 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testOverdispersion} \alias{testOverdispersion} \title{Simulated overdisperstion tests} \usage{ testOverdispersion(simulationOutput, ...) } \arguments{ \item{simulationOutput}{an object of class DHARMa with simulated quantile residuals, either created via \link{simulateResiduals} or by \link{createDHARMa} for simulations created outside DHARMa} \item{...}{additional arguments to \link{testDispersion}} } \description{ Simulated overdisperstion tests } \details{ Deprecated, switch your code to using the \link{testDispersion} function } DHARMa/man/testQuantiles.Rd0000644000176200001440000000523114677165224015174 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testQuantiles} \alias{testQuantiles} \title{Test for quantiles} \usage{ testQuantiles(simulationOutput, predictor = NULL, quantiles = c(0.25, 0.5, 0.75), plot = TRUE) } \arguments{ \item{simulationOutput}{an object of class DHARMa, either created via \link{simulateResiduals} for supported models or by \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} \item{predictor}{an optional predictor variable to be used, instead of the predicted response (default)} \item{quantiles}{the quantiles to be tested} \item{plot}{if TRUE, the function will create an additional plot} } \description{ This function tests } \details{ The function fits quantile regressions (via package qgam) on the residuals, and compares their location to the expected location (because of the uniform distributionm, the expected location is 0.5 for the 0.5 quantile). A significant p-value for the splines means the fitted spline deviates from a flat line at the expected location (p-values of intercept and spline are combined via Benjamini & Hochberg adjustment to control the FDR) The p-values of the splines are combined into a total p-value via Benjamini & Hochberg adjustment to control the FDR. } \examples{ testData = createData(sampleSize = 200, overdispersion = 0.0, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # run the quantile test x = testQuantiles(simulationOutput) x # the test shows a combined p-value, corrected for multiple testing \dontrun{ # accessing results of the test x$pvals # pvalues for the individual quantiles x$qgamFits # access the fitted quantile regression summary(x$qgamFits[[1]]) # summary of the first fitted quantile # possible to test user-defined quantiles testQuantiles(simulationOutput, quantiles = c(0.7)) # example with missing environmental predictor fittedModel <- glm(observedResponse ~ 1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) testQuantiles(simulationOutput, predictor = testData$Environment1) plot(simulationOutput) plotResiduals(simulationOutput) } } \seealso{ \link{testResiduals}, \link{testUniformity}, \link{testOutliers}, \link{testDispersion}, \link{testZeroInflation}, \link{testGeneric}, \link{testTemporalAutocorrelation}, \link{testSpatialAutocorrelation}, \link{testQuantiles}, \link{testCategorical} } \author{ Florian Hartig } DHARMa/man/print.DHARMa.Rd0000644000176200001440000000067314677165224014463 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DHARMa.R \name{print.DHARMa} \alias{print.DHARMa} \title{Print simulated residuals} \usage{ \method{print}{DHARMa}(x, ...) } \arguments{ \item{x}{an object with simulated residuals created by \link{simulateResiduals}.} \item{...}{optional arguments for compatibility with the generic function, no function implemented.} } \description{ Print simulated residuals } DHARMa/man/testDispersion.Rd0000644000176200001440000001521014677165224015344 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testDispersion} \alias{testDispersion} \title{DHARMa dispersion tests} \usage{ testDispersion(simulationOutput, alternative = c("two.sided", "greater", "less"), plot = T, type = c("DHARMa", "PearsonChisq"), ...) } \arguments{ \item{simulationOutput}{an object of class DHARMa, either created via \link{simulateResiduals} for supported models or by \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} \item{alternative}{a character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis. Greater corresponds to testing only for overdispersion. It is recommended to keep the default setting (testing for both over and underdispersion)} \item{plot}{whether to provide a plot for the results} \item{type}{which test to run. Default is DHARMa, other options are PearsonChisq (see details)} \item{...}{arguments to pass on to \link{testGeneric}} } \description{ This function performs simulation-based tests for over/underdispersion. If type = "DHARMa" (default and recommended), simulation-based dispersion tests are performed. Their behavior differs depending on whether simulations are done with refit = F, or refit = T, and whether data is simulated conditional (e.g. re.form ~0 in lme4) (see below). If type = "PearsonChisq", a chi2 test on Pearson residuals is performed. } \details{ Over / underdispersion means that the observed data is more / less dispersed than expected under the fitted model. There is no unique way to test for dispersion problems, and there are a number of different dispersion tests implemented in various R packages. The testDispersion function implements several dispersion tests: \strong{Simulation-based dispersion tests (type == "DHARMa")} If type = "DHARMa" (default and recommended), simulation-based dispersion tests are performed. Their behavior differs depending on whether simulations are done with refit = F, or refit = T #' \strong{Important:} for either refit = T or F, the results of type = "DHARMa" dispersion test will differ depending on whether simulations are done conditional (= conditional on fitted random effects) or unconditional (= REs are re-simulated). How to change between conditional or unconditional simulations is discussed in \link{simulateResiduals}. The general default in DHARMa is to use unconditional simulations, because this has advantages in other situations, but dispersion tests for models with strong REs specifically may increase substantially in power / sensitivity when switching to conditional simulations. I therefore recommend checking dispersion with conditional simulations if supported by the used regression package. If refit = F, the function uses \link{testGeneric} to compare the variance of the observed raw residuals (i.e. var(observed - predicted), displayed as a red line) against the variance of the simulated residuals (i.e. var(simulated - predicted), histogram). The variances are scaled to the mean simulated variance. A significant ratio > 1 indicates overdispersion, a significant ratio < 1 underdispersion. If refit = T, the function compares the approximate deviance (via squared pearson residuals) with the same quantity from the models refitted with simulated data. Applying this is much slower than the previous alternative. Given the computational cost, I would suggest that most users will be satisfied with the standard dispersion test. ** Analytical dispersion tests (type == "PearsonChisq")** This is the test described in https://bbolker.github.io/mixedmodels-misc/glmmFAQ.html#overdispersion, identical to performance::check_overdispersion. Works only if the fitted model provides df.residual and Pearson residuals. The test statistics is biased to lower values under quite general conditions, and will therefore tend to test significant for underdispersion. It is recommended to use this test only for overdispersion, i.e. use alternative == "greater". Also, obviously, it requires that Pearson residuals are available for the chosen model, which will not be the case for all models / packages. } \note{ For particular model classes / situations, there may be more powerful and thus preferable over the DHARMa test. The advantage of the DHARMa test is that it directly targets the spread of the data (unless other tests such as dispersion/df, which essentially measure fit and may thus be triggered by problems other than dispersion as well), and it makes practically no assumptions about the fitted model, other than the availability of simulations. } \examples{ library(lme4) set.seed(123) testData = createData(sampleSize = 100, overdispersion = 0.5, randomEffectVariance = 1) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # default DHARMa dispersion test - simulation-based testDispersion(simulationOutput) testDispersion(simulationOutput, alternative = "less", plot = FALSE) # only underdispersion testDispersion(simulationOutput, alternative = "greater", plot = FALSE) # only oversispersion # for mixed models, the test is usually more powerful if residuals are calculated # conditional on fitted REs simulationOutput <- simulateResiduals(fittedModel = fittedModel, re.form = NULL) testDispersion(simulationOutput) # DHARMa also implements the popular Pearson-chisq test that is also on the glmmWiki by Ben Bolker # The issue with this test is that it requires the df of the model, which are not well defined # for GLMMs. It is biased towards underdispersion, with bias getting larger with the number # of RE groups. In doubt, only test for overdispersion testDispersion(simulationOutput, type = "PearsonChisq", alternative = "greater") # if refit = TRUE, a different test on simulated Pearson residuals will calculated (see help) simulationOutput2 <- simulateResiduals(fittedModel = fittedModel, refit = TRUE, seed = 12, n = 20) testDispersion(simulationOutput2) # often useful to test dispersion per group (in particular for binomial data, see vignette) simulationOutputAggregated = recalculateResiduals(simulationOutput2, group = testData$group) testDispersion(simulationOutputAggregated) } \seealso{ \link{testResiduals}, \link{testUniformity}, \link{testOutliers}, \link{testDispersion}, \link{testZeroInflation}, \link{testGeneric}, \link{testTemporalAutocorrelation}, \link{testSpatialAutocorrelation}, \link{testQuantiles}, \link{testCategorical} } \author{ Florian Hartig } DHARMa/man/plotConventionalResiduals.Rd0000644000176200001440000000055614665273541017545 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{plotConventionalResiduals} \alias{plotConventionalResiduals} \title{Conventional residual plot} \usage{ plotConventionalResiduals(fittedModel) } \arguments{ \item{fittedModel}{a fitted model object} } \description{ Convenience function to draw conventional residual plots } DHARMa/man/testTemporalAutocorrelation.Rd0000644000176200001440000002313014677165224020103 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testTemporalAutocorrelation} \alias{testTemporalAutocorrelation} \title{Test for temporal autocorrelation} \usage{ testTemporalAutocorrelation(simulationOutput, time, alternative = c("two.sided", "greater", "less"), plot = TRUE) } \arguments{ \item{simulationOutput}{an object of class DHARMa, either created via \link{simulateResiduals} for supported models or by \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} \item{time}{the time, in the same order as the data points.} \item{alternative}{a character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis} \item{plot}{whether to plot output} } \description{ This function performs a standard test for temporal autocorrelation on the simulated residuals } \details{ The function performs a Durbin-Watson test on the uniformly scaled residuals, and plots the residuals against time. The DB test was originally be designed for normal residuals. In simulations, I didn't see a problem with this setting though. The alternative is to transform the uniform residuals to normal residuals and perform the DB test on those. Testing for temporal autocorrelation requires unique time values - if you have several observations per time value, either use \link{recalculateResiduals} function to aggregate residuals per time step, or extract the residuals from the fitted object, and plot / test each of them independently for temporally repeated subgroups (typical choices would be location / subject etc.). Note that the latter must be done by hand, outside testTemporalAutocorrelation. } \note{ Standard DHARMa simulations from models with (temporal / spatial / phylogenetic) conditional autoregressive terms will still have the respective temporal / spatial / phylogenetic correlation in the DHARMa residuals, unless the package you are using is modelling the autoregressive terms as explicit REs and is able to simulate conditional on the fitted REs. This has two consequences \enumerate{ \item If you check the residuals for such a model, they will still show significant autocorrelation, even if the model fully accounts for this structure. \item Because the DHARMa residuals for such a model are not statistically independent any more, other tests (e.g. dispersion, uniformity) may have inflated type I error, i.e. you will have a higher likelihood of spurious residual problems. } There are three (non-exclusive) routes to address these issues when working with spatial / temporal / other autoregressive models: \enumerate{ \item Simulate conditional on the fitted CAR structures (see conditional simulations in the help of \link{simulateResiduals}) \item Rotate simulations prior to residual calculations (see parameter rotation in \link{simulateResiduals}) \item Use custom tests / plots that explicitly compare the correlation structure in the simulated data to the correlation structure in the observed data. } } \examples{ testData = createData(sampleSize = 40, family = gaussian(), randomEffectVariance = 0) fittedModel <- lm(observedResponse ~ Environment1, data = testData) res = simulateResiduals(fittedModel) # Standard use testTemporalAutocorrelation(res, time = testData$time) # If you have several observations per time step, e.g. # because you have several locations, you will have to # aggregate timeSeries1 = createData(sampleSize = 40, family = gaussian(), randomEffectVariance = 0) timeSeries1$location = 1 timeSeries2 = createData(sampleSize = 40, family = gaussian(), randomEffectVariance = 0) timeSeries2$location = 2 testData = rbind(timeSeries1, timeSeries2) fittedModel <- lm(observedResponse ~ Environment1, data = testData) res = simulateResiduals(fittedModel) # Will not work because several residuals per time # testTemporalAutocorrelation(res, time = testData$time) # aggregating residuals by time res = recalculateResiduals(res, group = testData$time) testTemporalAutocorrelation(res, time = unique(testData$time)) # testing only subgroup location 1, could do same with loc 2 res = recalculateResiduals(res, sel = testData$location == 1) testTemporalAutocorrelation(res, time = unique(testData$time)) # example to demonstrate problems with strong temporal correlations and # how to possibly remove them by rotating residuals # note that if your model allows to condition on estimated REs, this may # be preferable! \dontrun{ set.seed(123) # Gaussian error # Create AR data with 5 observations per time point n <- 100 x <- MASS::mvrnorm(mu = rep(0,n), Sigma = .9 ^ as.matrix(dist(1:n)) ) y <- rep(x, each = 5) + 0.2 * rnorm(5*n) times <- factor(rep(1:n, each = 5), levels=1:n) levels(times) group <- factor(rep(1,n*5)) dat0 <- data.frame(y,times,group) # fit model / would be similar for nlme::gls and similar models model = glmmTMB(y ~ ar1(times + 0 | group), data=dat0) # Note that standard residuals still show problems because of autocorrelation res <- simulateResiduals(model) plot(res) # The reason is that most (if not all) autoregressive models treat the # autocorrelated error as random, i.e. the autocorrelated error structure # is not used for making predictions. If you then make predictions based # on the fixed effects and calculate residuals, the autocorrelation in the # residuals remains. We can see this if we again calculate the auto- # correlation test res2 <- recalculateResiduals(res, group=dat0$times) testTemporalAutocorrelation(res2, time = 1:length(res2$scaledResiduals)) # so, how can we check then if the current model correctly removed the # autocorrelation? # Option 1: rotate the residuals in the direction of the autocorrelation # to make the independent. Note that this only works perfectly for gls # type models as nonlinear link function make the residuals covariance # different from a multivariate normal distribution # this can be either done by extracting estimated AR1 covariance cov <- VarCorr(model) cov <- cov$cond$group # extract covariance matrix of REs # grouped according to times, rotated with estimated Cov - how all fine! res3 <- recalculateResiduals(res, group=dat0$times, rotation=cov) plot(res3) testTemporalAutocorrelation(res3, time = 1:length(res2$scaledResiduals)) # alternatively, you can let DHARMa estimate the covariance from the # simulations res4 <- recalculateResiduals(res, group=dat0$times, rotation="estimated") plot(res4) testTemporalAutocorrelation(res3, time = 1:length(res2$scaledResiduals)) # Alternatively, in glmmTMB, we can condition on the estimated correlated # residuals. Unfortunately, in this case, we will have to do simulations by # hand as glmmTMB does not allow to simulate conditional on a fitted # correlation structure # re.form = NULL creates predictions conditional on the fitted temporally # autocorreated REs pred = predict(model, re.form = NULL) # now we simulate data, conditional on the autocorrelation part, with the # uncorrelated residual error simulations = sapply(1:250, function(i) rnorm(length(pred), pred, summary(model)$sigma)) res5 = createDHARMa(simulations, dat0$y, pred) plot(res5) res5b <- recalculateResiduals(res5, group=dat0$times) testTemporalAutocorrelation(res5b, time = 1:length(res5b$scaledResiduals)) # Poisson error # note that for GLMMs, covariances will be estimated at the scale of the # linear predictor, while residual covariance will be at the responses scale # and thus further distorted by the link. Thus, for GLMMs with a nonlinear # link, there will be no exact rotation for a given covariance structure set.seed(123) # Create AR data with 5 observations per time point n <- 100 x <- MASS::mvrnorm(mu = rep(0,n), Sigma = .9 ^ as.matrix(dist(1:n)) ) y <- rpois(n = n*5, lambda = exp(rep(x, each = 5))) times <- factor(rep(1:n, each = 5), levels=1:n) levels(times) group <- factor(rep(1,n*5)) dat0 <- data.frame(y,times,group) # fit model model = glmmTMB(y ~ ar1(times + 0 | group), data=dat0, family = poisson) res <- simulateResiduals(model) # grouped according to times, unrotated res2 <- recalculateResiduals(res, group=dat0$times) testTemporalAutocorrelation(res2, time = 1:length(res2$scaledResiduals)) # grouped according to times, rotated with estimated Cov - problems remain cov <- VarCorr(model) cov <- cov$cond$group # extract covariance matrix of REs res3 <- recalculateResiduals(res, group=dat0$times, rotation=cov) testTemporalAutocorrelation(res3, time = 1:length(res2$scaledResiduals)) # grouped according to times, rotated with covariance estimated from residual # simulations at the response scale res4 <- recalculateResiduals(res, group=dat0$times, rotation="estimated") testTemporalAutocorrelation(res4, time = 1:length(res2$scaledResiduals)) } } \seealso{ \link{testResiduals}, \link{testUniformity}, \link{testOutliers}, \link{testDispersion}, \link{testZeroInflation}, \link{testGeneric}, \link{testTemporalAutocorrelation}, \link{testSpatialAutocorrelation}, \link{testQuantiles}, \link{testCategorical} } \author{ Florian Hartig } DHARMa/man/residuals.DHARMa.Rd0000644000176200001440000000517114677165224015320 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DHARMa.R \name{residuals.DHARMa} \alias{residuals.DHARMa} \title{Return residuals of a DHARMa simulation} \usage{ \method{residuals}{DHARMa}(object, quantileFunction = NULL, outlierValues = NULL, ...) } \arguments{ \item{object}{an object with simulated residuals created by \link{simulateResiduals}} \item{quantileFunction}{optional - a quantile function to transform the uniform 0/1 scaling of DHARMa to another distribution} \item{outlierValues}{if a quantile function with infinite support (such as dnorm) is used, residuals that are 0/1 are mapped to -Inf / Inf. outlierValues allows to convert -Inf / Inf values to an optional min / max value.} \item{...}{optional arguments for compatibility with the generic function, no function implemented} } \description{ Return residuals of a DHARMa simulation } \details{ the function accesses the slot $scaledResiduals in a fitted DHARMa object, and optionally transforms the standard DHARMa quantile residuals (which have a uniform distribution) to a particular pdf. } \note{ some of the papers on simulated quantile residuals transforming the residuals (which are natively uniform) back to a normal distribution. I presume this is because of the larger familiarity of most users with normal residuals. Personally, I never considered this desirable, for the reasons explained in https://github.com/florianhartig/DHARMa/issues/39, but with this function, I wanted to give users the option to plot normal residuals if they so wish. } \examples{ library(lme4) testData = createData(sampleSize = 100, overdispersion = 0.5, family = poisson()) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # standard plot plot(simulationOutput) # one of the possible test, for other options see ?testResiduals / vignette testDispersion(simulationOutput) # the calculated residuals can be accessed via residuals(simulationOutput) # transform residuals to other pdf, see ?residuals.DHARMa for details residuals(simulationOutput, quantileFunction = qnorm, outlierValues = c(-7,7)) # get residuals that are outside the simulation envelope outliers(simulationOutput) # calculating aggregated residuals per group simulationOutput2 = recalculateResiduals(simulationOutput, group = testData$group) plot(simulationOutput2, quantreg = FALSE) # calculating residuals only for subset of the data simulationOutput3 = recalculateResiduals(simulationOutput, sel = testData$group == 1 ) plot(simulationOutput3, quantreg = FALSE) } DHARMa/man/testZeroInflation.Rd0000644000176200001440000001112014703461527015777 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testZeroInflation} \alias{testZeroInflation} \title{Tests for zero-inflation} \usage{ testZeroInflation(simulationOutput, ...) } \arguments{ \item{simulationOutput}{an object of class DHARMa, either created via \link{simulateResiduals} for supported models or by \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} \item{...}{further arguments to \link{testGeneric}} } \description{ This function compares the observed number of zeros with the zeros expected from simulations. } \details{ Zero-inflation means that the observed data contain more zeros than would be expected under the fitted model. Zero-inflation must always be accessed with respect to a particular model, so the mere fact that there are many zeros in the observed data is not an indication of zero-inflation, see Warton, D. I. (2005). Many zeros does not mean zero inflation: comparing the goodness-of-fit of parametric models to multivariate abundance data. Environmetrics 16(3), 275-289. The testZeroInflation function simulates new datasets from the fitted model and compares this null distribution (gray histogram in the plot) with the observed values (red line in the plot). Technically, it is a wrapper for \link{testGeneric}, with the summary argument set to function(x) sum(x == 0). The test statistic is the ratio of observed to simulated zeros. A value < 1 means that the observed data have fewer zeros than expected, a value > 1 means that they have more zeros than expected (aka zero inflation). By default, the function tests both sides, so it would also test for fewer zeros than expected. } \note{ Zero-inflation can occur for a number of reasons other than an underlying data generating process corresponding to a ZIP model. Vice versa, it is very well possible that no zero-inflation will be observed when fitting models to data derived from a ZIP process. The latter is due to the fact that excess zeros can often be explained by other model parameters, such as the theta parameter in the negative binomial. For this reason, results of the zero-inflation test should be interpreted as a residual pattern that can have many reasons, not as a decision criterion for whether or not to fit a ZIP model. To decide whether to add a ZIP term, I would advise relying on appropriate model selection techniques such as AIC, BIC, WAIC, Bayes factor, or LRT. Note that these tests are often not reliable in GLMMs because it is difficult to determine the df spent by the different models. The \link{simulateLRT} function in DHARMa provides a nonparametric alternative to obtain p-values for LRT is nested models with unknown df. } \examples{ testData = createData(sampleSize = 100, overdispersion = 0.5, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # the plot function shows 2 plots and runs 4 tests # i) KS test i) Dispersion test iii) Outlier test iv) quantile test plot(simulationOutput, quantreg = TRUE) # testResiduals tests distribution, dispersion and outliers testResiduals(simulationOutput) ####### Individual tests ####### # KS test for correct distribution of residuals testUniformity(simulationOutput) # KS test for correct distribution within and between groups testCategorical(simulationOutput, testData$group) # Dispersion test - for details see ?testDispersion testDispersion(simulationOutput) # tests under and overdispersion # Outlier test (number of observations outside simulation envelope) # Use type = "boostrap" for exact values, see ?testOutliers testOutliers(simulationOutput, type = "binomial") # testing zero inflation testZeroInflation(simulationOutput) # testing generic summaries countOnes <- function(x) sum(x == 1) # testing for number of 1s testGeneric(simulationOutput, summary = countOnes) # 1-inflation testGeneric(simulationOutput, summary = countOnes, alternative = "less") # 1-deficit means <- function(x) mean(x) # testing if mean prediction fits testGeneric(simulationOutput, summary = means) spread <- function(x) sd(x) # testing if mean sd fits testGeneric(simulationOutput, summary = spread) } \seealso{ \link{testResiduals}, \link{testUniformity}, \link{testOutliers}, \link{testDispersion}, \link{testZeroInflation}, \link{testGeneric}, \link{testTemporalAutocorrelation}, \link{testSpatialAutocorrelation}, \link{testQuantiles}, \link{testCategorical} } \author{ Florian Hartig } DHARMa/man/runBenchmarks.Rd0000644000176200001440000000765314677165224015143 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runBenchmarks.R \name{runBenchmarks} \alias{runBenchmarks} \title{Benchmark calculations} \usage{ runBenchmarks(calculateStatistics, controlValues = NULL, nRep = 10, alpha = 0.05, parallel = FALSE, exportGlobal = FALSE, ...) } \arguments{ \item{calculateStatistics}{the statistics to be benchmarked. Should return one value, or a vector of values. If controlValues are given, must accept a parameter control} \item{controlValues}{optionally, a vector with a control parameter (e.g. to vary the strength of a problem the test should be specific to). See help for an example} \item{nRep}{number of replicates per level of the controlValues} \item{alpha}{significance level} \item{parallel}{whether to use parallel computations. Possible values are F, T (sets the cores automatically to number of available cores -1), or an integer number for the number of cores that should be used for the cluster} \item{exportGlobal}{whether the global environment should be exported to the parallel nodes. This will use more memory. Set to true only if you function calculate statistics depends on other functions or global variables.} \item{...}{additional parameters to calculateStatistics} } \value{ A object with list structure of class DHARMaBenchmark. Contains an entry simulations with a matrix of simulations, and an entry summaries with an list of summaries (significant (T/F), mean, p-value for KS-test uniformity). Can be plotted with \link{plot.DHARMaBenchmark} } \description{ This function runs statistical benchmarks, including Power / Type I error simulations for an arbitrary test with a control parameter } \note{ The benchmark function in DHARMa are intended for development purposes, and for users that want to test / confirm the properties of functions in DHARMa. If you are running an applied data analysis, they are probably of little use. } \examples{ # define a function that will run a simulation and return a number of statistics, typically p-values returnStatistics <- function(control = 0){ testData = createData(sampleSize = 20, family = poisson(), overdispersion = control, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1, data = testData, family = poisson()) res <- simulateResiduals(fittedModel = fittedModel, n = 250) out <- c(testUniformity(res, plot = FALSE)$p.value, testDispersion(res, plot = FALSE)$p.value) return(out) } # testing a single return returnStatistics() # running benchmark for a fixed simulation, increase nRep for sensible results out = runBenchmarks(returnStatistics, nRep = 5) # plotting results depend on whether a vector or a single value is provided for control plot(out) \dontrun{ # running benchmark with varying control values out = runBenchmarks(returnStatistics, controlValues = c(0,0.5,1), nRep = 100) plot(out) # running benchmark can be done using parallel cores out = runBenchmarks(returnStatistics, nRep = 100, parallel = TRUE) out = runBenchmarks(returnStatistics, controlValues = c(0,0.5,1), nRep = 10, parallel = TRUE) # Alternative plot function using vioplot, provides nicer pictures plot.DHARMaBenchmark <- function(x, ...){ if(length(x$controlValues)== 1){ vioplot::vioplot(x$simulations[,x$nSummaries:1], las = 2, horizontal = TRUE, side = "right", areaEqual = FALSE, main = "p distribution under H0", ylim = c(-0.15,1), ...) abline(v = 1, lty = 2) abline(v = c(0.05, 0), lty = 2, col = "red") text(-0.1, x$nSummaries:1, labels = x$summaries$propSignificant[-1]) }else{ res = x$summaries$propSignificant matplot(res$controlValues, res[,-1], type = "l", main = "Power analysis", ylab = "Power", ...) legend("bottomright", colnames(res[,-1]), col = 1:x$nSummaries, lty = 1:x$nSummaries, lwd = 2) } } } } \seealso{ \link{plot.DHARMaBenchmark} } \author{ Florian Hartig } DHARMa/man/getQuantile.Rd0000644000176200001440000001017214703461527014604 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/helper.R \name{getQuantile} \alias{getQuantile} \title{Calculate Residual Quantiles} \usage{ getQuantile(simulations, observed, integerResponse, method = c("PIT", "traditional"), rotation = NULL) } \arguments{ \item{simulations}{A matrix with simulations from a fitted model. Rows = observations, columns = replicate simulations.} \item{observed}{A vector with the observed data.} \item{integerResponse}{Is the response integer-valued? Only has an effect for method = "traditional".} \item{method}{The quantile randomization method used. See details.} \item{rotation}{Optional rotation of the residuals. You can either provide as a known or estimated covariance matrix (e.g. when fitting an AR1 model), or use the argument "estimated", in which case the residual covariance will be approximated by simulations. See comments in details.} } \description{ Calculates residual quantiles from a given simulation. } \details{ The function calculates residual quantiles from the simulated data. For continuous distributions, this will simply be the value of the ecdf. \strong{Randomization procedure for discrete data} For discrete data, there are two options implemented. The current default (available since DHARMa 0.3.1) are probability integral transform (PIT-) residuals (Smith, 1985; Dunn & Smyth, 1996; see also Warton, et al., 2017). Before DHARMa 0.3.1, a different randomization procedure was used, in which the a U(-0.5, 0.5) distribution was added on observations and simulations for discrete distributions. For a completely discrete distribution, the two procedures should deliver equivalent results, but the second method has the disadvantage that (a) one has to know if the distribution is discrete (DHARMa tries to recognize this automatically), and (b) that it leads to inefficiencies for some distributions such as the Tweedie, which are partly continuous, partly discrete (see e.g. \href{https://github.com/florianhartig/DHARMa/issues/168}{issue #168} on DHARMa GitHub page). \strong{Rotation (optional)} The getQuantile function includes an additional option to rotate residuals prior to calculating the quantile residuals. This option should ONLY be used when the fitted model includes a particular residuals covariance structure, such as an AR1 or a spatial or phylogenetic CAR model. For these models, residuals calculated from unconditional simulations will include the specified covariance structure, which will trigger e.g. temporal autocorrelation tests and can inflate type I errors of other tests. The idea of the rotation is to rotate the residual space according to the covariance structure of the fitted model, such that the rotated residuals are conditional independent (provided the fitted model is correct). If the residual covariance of the fitted model at the response scale can be extracted (e.g. when fitting gls type models), it would be best to extract it and provide this covariance matrix to the rotation option. If that is not the case, providing the argument "estimated" to rotation will estimate the covariance from the data simulated by the model. This is probably without alternative for GLMMs, where the covariance at the response scale is likely not known / provided, but note, that this approximation will tend to have considerable error and may be slow to compute for high-dimensional data. If you try to estimate the rotation from simulations, you should set n as high as possible! See \link{testTemporalAutocorrelation} for a practical example. The rotation of residuals implemented here is similar to the Variogram.lme() and Variongram.gls() functions in nlme package using the argument resType = "normalized". } \references{ Smith, J. Q. "Diagnostic checks of non-standard time series models." Journal of Forecasting 4.3 (1985): 283-291. Dunn, P.K., & Smyth, G.K. (1996). Randomized quantile residuals. Journal of Computational and Graphical Statistics 5, 236-244. Warton, David I., LoĆÆc Thibaut, and Yi Alice Wang. "The PIT-trap—A ā€œmodel-freeā€ bootstrap procedure for inference about regression models with discrete, multivariate responses." PloS one 12.7 (2017). } DHARMa/man/plot.DHARMa.Rd0000644000176200001440000000776714701671620014306 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{plot.DHARMa} \alias{plot.DHARMa} \title{DHARMa standard residual plots} \usage{ \method{plot}{DHARMa}(x, title = "DHARMa residual", ...) } \arguments{ \item{x}{An object of class DHARMa with simulated residuals created by \link{simulateResiduals}.} \item{title}{The title for both panels (plotted via mtext, outer = TRUE).} \item{...}{Further options for \link{plotResiduals}. Consider in particular parameters quantreg, rank and asFactor. xlab, ylab and main cannot be changed when using plot.DHARMa, but can be changed when using \link{plotResiduals}.} } \description{ This S3 function creates standard plots for the simulated residuals contained in an object of class DHARMa, using \link{plotQQunif} (left panel) and \link{plotResiduals} (right panel) } \details{ The function creates a plot with two panels. The left panel is a uniform qq plot (calling \link{plotQQunif}), and the right panel shows residuals against predicted values (calling \link{plotResiduals}), with outliers highlighted in red (default color but see Note). Very briefly, we would expect that a correctly specified model shows: a) a straight 1-1 line, as well as non-significance of the displayed tests in the qq-plot (left) -> evidence for an the correct overall residual distribution (for more details on the interpretation of this plot, see \link{plotQQunif}) b) visual homogeneity of residuals in both vertical and horizontal direction, as well as n.s. of quantile tests in the res ~ predictor plot (for more details on the interpretation of this plot, see \link{plotResiduals}) Deviations from these expectations can be interpreted similar to a linear regression. See the vignette for detailed examples. Note that, unlike \link{plotResiduals}, plot.DHARMa command uses the default rank = T. } \note{ The color for highlighting outliers and significant tests can be changed by setting \code{options(DHARMaSignalColor = "red")} to a different color. See \code{getOption("DHARMaSignalColor")} for the current setting. This is convenient for a color-blind friendly display, since red and black are difficult for some people to separate. } \examples{ testData = createData(sampleSize = 200, family = poisson(), randomEffectVariance = 1, numGroups = 10) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) ######### main plotting function ############# # for all functions, quantreg = T will be more # informative, but slower plot(simulationOutput, quantreg = FALSE) ############# Distribution ###################### plotQQunif(simulationOutput = simulationOutput, testDispersion = FALSE, testUniformity = FALSE, testOutliers = FALSE) hist(simulationOutput ) ############# residual plots ############### # rank transformation, using a simulationOutput plotResiduals(simulationOutput, rank = TRUE, quantreg = FALSE) # smooth scatter plot - usually used for large datasets, default for n > 10000 plotResiduals(simulationOutput, rank = TRUE, quantreg = FALSE, smoothScatter = TRUE) # residual vs predictors, using explicit values for pred, residual plotResiduals(simulationOutput, form = testData$Environment1, quantreg = FALSE) # if pred is a factor, or if asFactor = TRUE, will produce a boxplot plotResiduals(simulationOutput, form = testData$group) # to diagnose overdispersion and heteroskedasticity it can be useful to # display residuals as absolute deviation from the expected mean 0.5 plotResiduals(simulationOutput, absoluteDeviation = TRUE, quantreg = FALSE) # All these options can also be provided to the main plotting function # If you want to plot summaries per group, use simulationOutput = recalculateResiduals(simulationOutput, group = testData$group) plot(simulationOutput, quantreg = FALSE) # we see one residual point per RE } \seealso{ \link{plotResiduals}, \link{plotQQunif} } DHARMa/man/transformQuantiles.Rd0000644000176200001440000000175714677165224016241 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/transformQuantiles.R \name{transformQuantiles} \alias{transformQuantiles} \title{Transform quantiles to pdf (deprecated)} \usage{ transformQuantiles(res, quantileFunction = qnorm, outlierValue = 7) } \arguments{ \item{res}{an object with simulated residuals created by \link{simulateResiduals}} \item{quantileFunction}{optional - a quantile function to transform the uniform 0/1 scaling of DHARMa to another distribution} \item{outlierValue}{if a quantile function with infinite support (such as dnorm) is used, residuals that are 0/1 are mapped to -Inf / Inf. outlierValues allows to convert -Inf / Inf values to an optional min / max value.} } \description{ The purpose of this function was to transform the DHARMa quantile residuals (which have a uniform distribution) to a particular pdf. Since DHARMa 0.3.0, this functionality is integrated in the \link{residuals.DHARMa} function. Please switch to using this function. } DHARMa/man/testCategorical.Rd0000644000176200001440000000667414677165224015460 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testCategorical} \alias{testCategorical} \title{Test for categorical dependencies} \usage{ testCategorical(simulationOutput, catPred, quantiles = c(0.25, 0.5, 0.75), plot = TRUE) } \arguments{ \item{simulationOutput}{an object of class DHARMa, either created via \link{simulateResiduals} for supported models or by \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} \item{catPred}{a categorical predictor with the same dimensions as the residuals in simulationOutput} \item{quantiles}{whether to draw the quantile lines.} \item{plot}{if TRUE, the function will create an additional plot} } \description{ This function tests if there are probles in a res ~ group structure. It performs two tests: test for within-group uniformity, and test for between-group homogeneity of variances } \details{ The function tests for two common problems: are residuals within each group distributed according to model assumptions, and is the variance between group heterogeneous. The test for within-group uniformity is performed via multipe KS-tests, with adjustment of p-values for multiple testing. If the plot is drawn, problematic groups are highlighted in red, and a corresponding message is displayed in the plot. The test for homogeneity of variances is done with a Levene test. A significant p-value means that group variances are not constant. In this case, you should consider modelling variances, e.g. via ~dispformula in glmmTMB. } \examples{ testData = createData(sampleSize = 100, overdispersion = 0.5, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # the plot function shows 2 plots and runs 4 tests # i) KS test i) Dispersion test iii) Outlier test iv) quantile test plot(simulationOutput, quantreg = TRUE) # testResiduals tests distribution, dispersion and outliers testResiduals(simulationOutput) ####### Individual tests ####### # KS test for correct distribution of residuals testUniformity(simulationOutput) # KS test for correct distribution within and between groups testCategorical(simulationOutput, testData$group) # Dispersion test - for details see ?testDispersion testDispersion(simulationOutput) # tests under and overdispersion # Outlier test (number of observations outside simulation envelope) # Use type = "boostrap" for exact values, see ?testOutliers testOutliers(simulationOutput, type = "binomial") # testing zero inflation testZeroInflation(simulationOutput) # testing generic summaries countOnes <- function(x) sum(x == 1) # testing for number of 1s testGeneric(simulationOutput, summary = countOnes) # 1-inflation testGeneric(simulationOutput, summary = countOnes, alternative = "less") # 1-deficit means <- function(x) mean(x) # testing if mean prediction fits testGeneric(simulationOutput, summary = means) spread <- function(x) sd(x) # testing if mean sd fits testGeneric(simulationOutput, summary = spread) } \seealso{ \link{testResiduals}, \link{testUniformity}, \link{testOutliers}, \link{testDispersion}, \link{testZeroInflation}, \link{testGeneric}, \link{testTemporalAutocorrelation}, \link{testSpatialAutocorrelation}, \link{testQuantiles}, \link{testCategorical} } \author{ Florian Hartig } DHARMa/man/benchmarkRuntime.Rd0000644000176200001440000000251014677165224015622 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runBenchmarks.R \name{benchmarkRuntime} \alias{benchmarkRuntime} \title{Benchmark runtimes of several functions} \usage{ benchmarkRuntime(createModel, evaluationFunctions, n) } \arguments{ \item{createModel}{a function that creates and returns a fitted model.} \item{evaluationFunctions}{a list of functions that are to be evaluated on the fitted models.} \item{n}{number of replicates.} } \description{ Benchmark runtimes of several functions } \details{ This is a small helper function designed to benchmark runtimes of several operations that are to be performed on a list of fitted models. In the example, this is used to benchmark the runtimes of several DHARMa tests. } \examples{ createModel = function(){ testData = createData(family = poisson(), overdispersion = 1, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1, data = testData, family = poisson()) return(fittedModel) } a = function(m){ testUniformity(m, plot = FALSE)$p.value } b = function(m){ testDispersion(m, plot = FALSE)$p.value } c = function(m){ testDispersion(m, plot = FALSE, type = "PearsonChisq")$p.value } evaluationFunctions = list(a,b, c) benchmarkRuntime(createModel, evaluationFunctions, 2) } \author{ Florian Hartig } DHARMa/man/getResiduals.Rd0000644000176200001440000000302014677165224014754 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compatibility.R \name{getResiduals} \alias{getResiduals} \alias{getResiduals.default} \alias{getResiduals.MixMod} \title{Get model residuals} \usage{ getResiduals(object, ...) \method{getResiduals}{default}(object, ...) \method{getResiduals}{MixMod}(object, ...) } \arguments{ \item{object}{a fitted model.} \item{...}{additional parameters to be passed on, usually to the residual function of the respective model class.} } \description{ Wrapper to get the residuals of a fitted model. } \details{ The purpose of this wrapper is to standardize the extraction of model residuals. Similar to some other functions, a key question is whether to calculate those conditional or unconditional on the fitted Random Effects. } \examples{ testData = createData(sampleSize = 400, family = gaussian()) fittedModel <- lm(observedResponse ~ Environment1 , data = testData) # response that was used to fit the model getObservedResponse(fittedModel) # predictions of the model for these points getFitted(fittedModel) # extract simulations from the model as matrix getSimulations(fittedModel, nsim = 2) # extract simulations from the model for refit (often requires different structure) x = getSimulations(fittedModel, nsim = 2, type = "refit") getRefit(fittedModel, x[[1]]) getRefit(fittedModel, getObservedResponse(fittedModel)) } \seealso{ \link{getObservedResponse}, \link{getSimulations}, \link{getRefit}, \link{getFixedEffects}, \link{getFitted} } \author{ Florian Hartig } DHARMa/man/testGeneric.Rd0000644000176200001440000000760014677165224014605 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testGeneric} \alias{testGeneric} \title{Test for a generic summary statistic based on simulated data} \usage{ testGeneric(simulationOutput, summary, alternative = c("two.sided", "greater", "less"), plot = T, methodName = "DHARMa generic simulation test") } \arguments{ \item{simulationOutput}{an object of class DHARMa, either created via \link{simulateResiduals} for supported models or by \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} \item{summary}{a function that can be applied to simulated / observed data. See examples below} \item{alternative}{a character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis} \item{plot}{whether to plot the simulated summary} \item{methodName}{name of the test (will be used in plot)} } \description{ This function tests if a user-defined summary differs when applied to simulated / observed data. } \details{ This function applies a user-defined summary to the simulated/observed data of a DHARMa object and then performs a hypothesis test using the ratio Obs / Sim as the test statistic. The summary is applied directly to the data and not to the residuals, but it can easily be remodeled to apply summaries to the residuals by simply defining something like f = function(x) summary (x - predictions), as done in \link{testDispersion} } \note{ The summary function you specify will be applied to the data as it appears in your fitted model, which may not always be what you want. As an example, consider the case where we want to test for n-inflation in k/n data. If you provide your data via cbind (k, n-k), you have to test for n-inflation, but if you provide your data via k/n and weights = n, you should test for 1-inflation. When in doubt, check how the data is represented internally in model.frame(model) or via simulate(model). } \examples{ testData = createData(sampleSize = 100, overdispersion = 0.5, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # the plot function shows 2 plots and runs 4 tests # i) KS test i) Dispersion test iii) Outlier test iv) quantile test plot(simulationOutput, quantreg = TRUE) # testResiduals tests distribution, dispersion and outliers testResiduals(simulationOutput) ####### Individual tests ####### # KS test for correct distribution of residuals testUniformity(simulationOutput) # KS test for correct distribution within and between groups testCategorical(simulationOutput, testData$group) # Dispersion test - for details see ?testDispersion testDispersion(simulationOutput) # tests under and overdispersion # Outlier test (number of observations outside simulation envelope) # Use type = "boostrap" for exact values, see ?testOutliers testOutliers(simulationOutput, type = "binomial") # testing zero inflation testZeroInflation(simulationOutput) # testing generic summaries countOnes <- function(x) sum(x == 1) # testing for number of 1s testGeneric(simulationOutput, summary = countOnes) # 1-inflation testGeneric(simulationOutput, summary = countOnes, alternative = "less") # 1-deficit means <- function(x) mean(x) # testing if mean prediction fits testGeneric(simulationOutput, summary = means) spread <- function(x) sd(x) # testing if mean sd fits testGeneric(simulationOutput, summary = spread) } \seealso{ \link{testResiduals}, \link{testUniformity}, \link{testOutliers}, \link{testDispersion}, \link{testZeroInflation}, \link{testGeneric}, \link{testTemporalAutocorrelation}, \link{testSpatialAutocorrelation}, \link{testQuantiles}, \link{testCategorical} } \author{ Florian Hartig } DHARMa/man/outliers.Rd0000644000176200001440000000426014677165224014176 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DHARMa.R \name{outliers} \alias{outliers} \title{Return outliers} \usage{ outliers(object, lowerQuantile = 0, upperQuantile = 1, return = c("index", "logical")) } \arguments{ \item{object}{an object with simulated residuals created by \link{simulateResiduals}.} \item{lowerQuantile}{lower threshold for outliers. Default is zero = outside simulation envelope.} \item{upperQuantile}{upper threshold for outliers. Default is 1 = outside simulation envelope.} \item{return}{wheter to return an indices of outliers or a logical vector.} } \description{ Returns the outliers of a DHARMa object. } \details{ First of all, note that the standard definition of outlier in the DHARMa plots and outlier tests is an observation that is outside the simulation envelope. How far outside that is depends a lot on how many simulations you do. If you have 100 data points and to 100 simulations, you would expect to have one "outlier" on average, even with a perfectly fitting model. This is in fact what the outlier test tests. Thus, keep in mind that for a small number of simulations, outliers are mostly a technical term: these are points that are outside our simulations, but we don't know how far away they are. If you are seriously interested in HOW FAR outside the expected distribution a data point is, you should increase the number of simulations in \link{simulateResiduals} to be sure to get the tail of the data distribution correctly. In this case, it may make sense to adjust lowerQuantile and upperQuantile, e.g. to 0.025, 0.975, which would define outliers as values outside the central 95\% of the distribution. Also, note that outliers are particularly concerning if they have a strong influence on the model fit. One could test the influence, for example, by removing them from the data, or by some meausures of leverage, e.g. generalisations for Cook's distance as in Pinho, L. G. B., Nobre, J. S., & Singer, J. M. (2015). Cook’s distance for generalized linear mixed models. Computational Statistics & Data Analysis, 82, 126–136. doi:10.1016/j.csda.2014.08.008. At the moment, however, no such function is provided in DHARMa. } DHARMa/man/testSpatialAutocorrelation.Rd0000644000176200001440000001671714703461527017725 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testSpatialAutocorrelation} \alias{testSpatialAutocorrelation} \title{Test for distance-based spatial (or similar type) autocorrelation} \usage{ testSpatialAutocorrelation(simulationOutput, x = NULL, y = NULL, distMat = NULL, alternative = c("two.sided", "greater", "less"), plot = TRUE) } \arguments{ \item{simulationOutput}{An object of class DHARMa, either created via \link{simulateResiduals} for supported models or via \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} \item{x}{The x coordinate, in the same order as the data points. Must be specified unless distMat is provided.} \item{y}{The y coordinate, in the same order as the data points. Must be specified unless distMat is provided.} \item{distMat}{Optional distance matrix. If not provided, euclidean distances based on x and y will be calculated. See details for explanation.} \item{alternative}{A character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis.} \item{plot}{If T, and if x and y is provided, plot the output (see Details).} } \description{ This function performs a Moran's I test for distance-based spatial (or similar type) autocorrelation on the calculated quantile residuals. } \details{ The function performs Moran.I test from the package ape on the DHARMa residuals. If a distance matrix (distMat) is provided, calculations will be based on this distance matrix, and x,y coordinates will only used for the plotting (if provided). If distMat is not provided, the function will calculate the euclidean distances between x,y coordinates, and test Moran.I based on these distances. If plot = T, a plot will be produced showing each residual with at its x,y position, colored according to the residual value. Residuals with 0.5 are colored white, everything below 0.5 is colored increasinly red, everything above 0.5 is colored increasingly blue. Testing for spatial autocorrelation requires unique x,y values - if you have several observations per location, either use the \link{recalculateResiduals} function to aggregate residuals per location, or extract the residuals from the fitted object, and plot / test each of them independently for spatially repeated subgroups (a typical scenario would repeated spatial observation, in which case one could plot / test each time step separately for temporal autocorrelation). Note that the latter must be done by hand, outside \link{testSpatialAutocorrelation}. } \note{ Standard DHARMa simulations from models with (temporal / spatial / phylogenetic) conditional autoregressive terms will still have the respective temporal / spatial / phylogenetic correlation in the DHARMa residuals, unless the package you are using is modelling the autoregressive terms as explicit REs and is able to simulate conditional on the fitted REs. This has two consequences: \enumerate{ \item If you check the residuals for such a model, they will still show significant autocorrelation, even if the model fully accounts for this structure. \item Because the DHARMa residuals for such a model are not statistically independent any more, other tests (e.g. dispersion, uniformity) may have inflated type I error, i.e. you will have a higher likelihood of spurious residual problems. } There are three (non-exclusive) routes to address these issues when working with spatial / temporal / phylogenetic / other autoregressive models: \enumerate{ \item Simulate conditional on the fitted CAR structures (see conditional simulations in the help of \link{simulateResiduals}). \item Rotate simulations prior to residual calculations (see parameter rotation in \link{simulateResiduals}). \item Use custom tests / plots that explicitly compare the correlation structure in the simulated data to the correlation structure in the observed data. } } \examples{ testData = createData(sampleSize = 40, family = gaussian()) fittedModel <- lm(observedResponse ~ Environment1, data = testData) res = simulateResiduals(fittedModel) # Standard use testSpatialAutocorrelation(res, x = testData$x, y = testData$y) # Alternatively, one can provide a distance matrix dM = as.matrix(dist(cbind(testData$x, testData$y))) testSpatialAutocorrelation(res, distMat = dM) # You could add a spatial variogram via # library(gstat) # dat = data.frame(res = residuals(res), x = testData$x, y = testData$y) # coordinates(dat) = ~x+y # vario = variogram(res~1, data = dat, alpha=c(0,45,90,135)) # plot(vario, ylim = c(-1,1)) # if there are multiple observations with the same x values, # create first ar group with unique values for each location # then aggregate the residuals per location, and calculate # spatial autocorrelation on the new group # modifying x, y, so that we have the same location per group # just for completeness testData$x = as.numeric(testData$group) testData$y = as.numeric(testData$group) # calculating x, y positions per group groupLocations = aggregate(testData[, 6:7], list(testData$group), mean) # calculating residuals per group res2 = recalculateResiduals(res, group = testData$group) # running the spatial test on grouped residuals testSpatialAutocorrelation(res2, groupLocations$x, groupLocations$y) # careful when using REs to account for spatially clustered (but not grouped) # data. this originates from https://github.com/florianhartig/DHARMa/issues/81 # Assume our data is divided into clusters, where observations are close together # but not at the same point, and we suspect that observations in clusters are # autocorrelated clusters = 100 subsamples = 10 size = clusters * subsamples testData = createData(sampleSize = size, family = gaussian(), numGroups = clusters ) testData$x = rnorm(clusters)[testData$group] + rnorm(size, sd = 0.01) testData$y = rnorm(clusters)[testData$group] + rnorm(size, sd = 0.01) # It's a good idea to use a RE to take out the cluster effects. This accounts # for the autocorrelation within clusters library(lme4) fittedModel <- lmer(observedResponse ~ Environment1 + (1|group), data = testData) # DHARMa default is to re-simulate REs - this means spatial pattern remains # because residuals are still clustered res = simulateResiduals(fittedModel) testSpatialAutocorrelation(res, x = testData$x, y = testData$y) # However, it should disappear if you just calculate an aggregate residuals per cluster # Because at least how the data are simulated, cluster are spatially independent res2 = recalculateResiduals(res, group = testData$group) testSpatialAutocorrelation(res2, x = aggregate(testData$x, list(testData$group), mean)$x, y = aggregate(testData$y, list(testData$group), mean)$x) # For lme4, it's also possible to simulated residuals conditional on fitted # REs (re.form). Conditional on the fitted REs (i.e. accounting for the clusters) # the residuals should now be indepdendent. The remaining RSA we see here is # probably due to the RE shrinkage res = simulateResiduals(fittedModel, re.form = NULL) testSpatialAutocorrelation(res, x = testData$x, y = testData$y) } \seealso{ \link{testResiduals}, \link{testUniformity}, \link{testOutliers}, \link{testDispersion}, \link{testZeroInflation}, \link{testGeneric}, \link{testTemporalAutocorrelation}, \link{testSpatialAutocorrelation}, \link{testQuantiles}, \link{testCategorical} } \author{ Florian Hartig } DHARMa/man/testResiduals.Rd0000644000176200001440000000535314703461527015162 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testResiduals} \alias{testResiduals} \title{DHARMa general residual test} \usage{ testResiduals(simulationOutput, plot = TRUE) } \arguments{ \item{simulationOutput}{an object of class DHARMa, either created via \link{simulateResiduals} for supported models or by \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} \item{plot}{if TRUE, plots functions of the tests are called.} } \description{ Calls uniformity, dispersion and outliers tests. } \details{ This function is a wrapper for the various test functions implemented in DHARMa. Currently, this function calls the functions \link{testUniformity}, \link{testDispersion}, and \link{testOutliers}. All other tests (see list below) have to be called by hand. } \examples{ testData = createData(sampleSize = 100, overdispersion = 0.5, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # the plot function shows 2 plots and runs 4 tests # i) KS test i) Dispersion test iii) Outlier test iv) quantile test plot(simulationOutput, quantreg = TRUE) # testResiduals tests distribution, dispersion and outliers testResiduals(simulationOutput) ####### Individual tests ####### # KS test for correct distribution of residuals testUniformity(simulationOutput) # KS test for correct distribution within and between groups testCategorical(simulationOutput, testData$group) # Dispersion test - for details see ?testDispersion testDispersion(simulationOutput) # tests under and overdispersion # Outlier test (number of observations outside simulation envelope) # Use type = "boostrap" for exact values, see ?testOutliers testOutliers(simulationOutput, type = "binomial") # testing zero inflation testZeroInflation(simulationOutput) # testing generic summaries countOnes <- function(x) sum(x == 1) # testing for number of 1s testGeneric(simulationOutput, summary = countOnes) # 1-inflation testGeneric(simulationOutput, summary = countOnes, alternative = "less") # 1-deficit means <- function(x) mean(x) # testing if mean prediction fits testGeneric(simulationOutput, summary = means) spread <- function(x) sd(x) # testing if mean sd fits testGeneric(simulationOutput, summary = spread) } \seealso{ \link{testResiduals}, \link{testUniformity}, \link{testOutliers}, \link{testDispersion}, \link{testZeroInflation}, \link{testGeneric}, \link{testTemporalAutocorrelation}, \link{testSpatialAutocorrelation}, \link{testQuantiles}, \link{testCategorical} } \author{ Florian Hartig } DHARMa/man/checkSimulations.Rd0000644000176200001440000000071314677165224015634 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulateResiduals.R \name{checkSimulations} \alias{checkSimulations} \title{Check simulated data} \usage{ checkSimulations(simulatedResponse, nObs, nSim) } \arguments{ \item{simulatedResponse}{the simulated response} \item{nObs}{number of observations} \item{nSim}{number of simulations} } \description{ The function checks if the simulated data seems fine. } \keyword{internal} DHARMa/man/getPearsonResiduals.Rd0000644000176200001440000000314514677165224016314 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compatibility.R \name{getPearsonResiduals} \alias{getPearsonResiduals} \alias{getPearsonResiduals.default} \alias{getPearsonResiduals.gam} \title{Get Pearson residuals} \usage{ getPearsonResiduals(object, ...) \method{getPearsonResiduals}{default}(object, ...) \method{getPearsonResiduals}{gam}(object, ...) } \arguments{ \item{object}{a fitted model.} \item{...}{additional parameters to be passed on, usually to the residual function of the respective model class.} } \description{ Wrapper to get the Pearson residuals of a fitted model. } \details{ The purpose of this wrapper is to extract the Pearson residuals of a fitted model. This needed to be adopted because for some reason, mgcv uses the argument "scaled.pearson" for what most packags define as "pearson". See comments in ?residuals.gam. } \examples{ testData = createData(sampleSize = 400, family = gaussian()) fittedModel <- lm(observedResponse ~ Environment1 , data = testData) # response that was used to fit the model getObservedResponse(fittedModel) # predictions of the model for these points getFitted(fittedModel) # extract simulations from the model as matrix getSimulations(fittedModel, nsim = 2) # extract simulations from the model for refit (often requires different structure) x = getSimulations(fittedModel, nsim = 2, type = "refit") getRefit(fittedModel, x[[1]]) getRefit(fittedModel, getObservedResponse(fittedModel)) } \seealso{ \link{getObservedResponse}, \link{getSimulations}, \link{getRefit}, \link{getFixedEffects}, \link{getFitted} } \author{ Florian Hartig } DHARMa/man/plotSimulatedResiduals.Rd0000644000176200001440000000135414677165224017033 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{plotSimulatedResiduals} \alias{plotSimulatedResiduals} \title{DHARMa standard residual plots} \usage{ plotSimulatedResiduals(simulationOutput, ...) } \arguments{ \item{simulationOutput}{an object with simulated residuals created by \link{simulateResiduals}} \item{...}{further options for \link{plotResiduals}. Consider in particular parameters quantreg, rank and asFactor. xlab, ylab and main cannot be changed when using plotSimulatedResiduals, but can be changed when using plotResiduals.} } \description{ DEPRECATED, use plot() instead } \note{ This function is deprecated. Use \link{plot.DHARMa} } \seealso{ \link{plotResiduals}, \link{plotQQunif} } DHARMa/man/testOverdispersionParametric.Rd0000644000176200001440000000073414677165224020255 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testOverdispersionParametric} \alias{testOverdispersionParametric} \title{Parametric overdisperstion tests} \usage{ testOverdispersionParametric(...) } \arguments{ \item{...}{arguments will be ignored, the parametric tests is no longer recommend} } \description{ Parametric overdisperstion tests } \details{ Deprecated, switch your code to using the \link{testDispersion} function. } DHARMa/man/testUniformity.Rd0000644000176200001440000000562014703461527015371 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testUniformity} \alias{testUniformity} \title{Test for overall uniformity} \usage{ testUniformity(simulationOutput, alternative = c("two.sided", "less", "greater"), plot = TRUE) } \arguments{ \item{simulationOutput}{an object of class DHARMa, either created via \link{simulateResiduals} for supported models or by \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} \item{alternative}{a character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis. See \link[stats:ks.test]{stats::ks.test} for details} \item{plot}{if TRUE, plots calls \link{plotQQunif} as well} } \description{ This function tests the overall uniformity of the simulated residuals in a DHARMa object } \details{ The function applies a \link[stats:ks.test]{stats::ks.test} for uniformity on the simulated residuals. } \examples{ testData = createData(sampleSize = 100, overdispersion = 0.5, randomEffectVariance = 0) fittedModel <- glm(observedResponse ~ Environment1 , family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # the plot function shows 2 plots and runs 4 tests # i) KS test i) Dispersion test iii) Outlier test iv) quantile test plot(simulationOutput, quantreg = TRUE) # testResiduals tests distribution, dispersion and outliers testResiduals(simulationOutput) ####### Individual tests ####### # KS test for correct distribution of residuals testUniformity(simulationOutput) # KS test for correct distribution within and between groups testCategorical(simulationOutput, testData$group) # Dispersion test - for details see ?testDispersion testDispersion(simulationOutput) # tests under and overdispersion # Outlier test (number of observations outside simulation envelope) # Use type = "boostrap" for exact values, see ?testOutliers testOutliers(simulationOutput, type = "binomial") # testing zero inflation testZeroInflation(simulationOutput) # testing generic summaries countOnes <- function(x) sum(x == 1) # testing for number of 1s testGeneric(simulationOutput, summary = countOnes) # 1-inflation testGeneric(simulationOutput, summary = countOnes, alternative = "less") # 1-deficit means <- function(x) mean(x) # testing if mean prediction fits testGeneric(simulationOutput, summary = means) spread <- function(x) sd(x) # testing if mean sd fits testGeneric(simulationOutput, summary = spread) } \seealso{ \link{testResiduals}, \link{testUniformity}, \link{testOutliers}, \link{testDispersion}, \link{testZeroInflation}, \link{testGeneric}, \link{testTemporalAutocorrelation}, \link{testSpatialAutocorrelation}, \link{testQuantiles}, \link{testCategorical} } \author{ Florian Hartig } DHARMa/man/hist.DHARMa.Rd0000644000176200001440000000565714701671620014273 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{hist.DHARMa} \alias{hist.DHARMa} \title{Histogram of DHARMa residuals} \usage{ \method{hist}{DHARMa}(x, breaks = seq(-0.02, 1.02, len = 53), col = c(.Options$DHARMaSignalColor, rep("lightgrey", 50), .Options$DHARMaSignalColor), main = "Hist of DHARMa residuals", xlab = "Residuals (outliers are marked red)", cex.main = 1, ...) } \arguments{ \item{x}{A DHARMa simulation output (class DHARMa)} \item{breaks}{Breaks for hist() function.} \item{col}{Color for histogram bars.} \item{main}{Plot title.} \item{xlab}{Plot x-axis label.} \item{cex.main}{Plot cex.main.} \item{...}{Other arguments to be passed on to hist().} } \description{ The function produces a histogram from a DHARMa output. Outliers are marked red. } \details{ The function calls hist() to create a histogram of the scaled residuals. Outliers are marked red as default but it can be changed by setting \code{options(DHARMaSignalColor = "red")} to a different color. See \code{getOption("DHARMaSignalColor")} for the current setting. } \examples{ testData = createData(sampleSize = 200, family = poisson(), randomEffectVariance = 1, numGroups = 10) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) ######### main plotting function ############# # for all functions, quantreg = T will be more # informative, but slower plot(simulationOutput, quantreg = FALSE) ############# Distribution ###################### plotQQunif(simulationOutput = simulationOutput, testDispersion = FALSE, testUniformity = FALSE, testOutliers = FALSE) hist(simulationOutput ) ############# residual plots ############### # rank transformation, using a simulationOutput plotResiduals(simulationOutput, rank = TRUE, quantreg = FALSE) # smooth scatter plot - usually used for large datasets, default for n > 10000 plotResiduals(simulationOutput, rank = TRUE, quantreg = FALSE, smoothScatter = TRUE) # residual vs predictors, using explicit values for pred, residual plotResiduals(simulationOutput, form = testData$Environment1, quantreg = FALSE) # if pred is a factor, or if asFactor = TRUE, will produce a boxplot plotResiduals(simulationOutput, form = testData$group) # to diagnose overdispersion and heteroskedasticity it can be useful to # display residuals as absolute deviation from the expected mean 0.5 plotResiduals(simulationOutput, absoluteDeviation = TRUE, quantreg = FALSE) # All these options can also be provided to the main plotting function # If you want to plot summaries per group, use simulationOutput = recalculateResiduals(simulationOutput, group = testData$group) plot(simulationOutput, quantreg = FALSE) # we see one residual point per RE } \seealso{ \link{plotSimulatedResiduals}, \link{plotResiduals} } DHARMa/man/recalculateResiduals.Rd0000644000176200001440000000751414677165224016475 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulateResiduals.R \name{recalculateResiduals} \alias{recalculateResiduals} \title{Recalculate residuals with grouping} \usage{ recalculateResiduals(simulationOutput, group = NULL, aggregateBy = sum, sel = NULL, seed = 123, method = c("PIT", "traditional"), rotation = NULL) } \arguments{ \item{simulationOutput}{an object with simulated residuals created by \link{simulateResiduals}.} \item{group}{group of each data point.} \item{aggregateBy}{function for the aggregation. Default is sum. This should only be changed if you know what you are doing. Note in particular that the expected residual distribution might not be flat any more if you choose general functions, such as sd etc.} \item{sel}{an optional vector for selecting the data to be aggregated.} \item{seed}{the random seed to be used within DHARMa. The default setting, recommended for most users, is keep the random seed on a fixed value 123. This means that you will always get the same randomization and thus teh same result when running the same code. NULL = no new seed is set, but previous random state will be restored after simulation. FALSE = no seed is set, and random state will not be restored. The latter two options are only recommended for simulation experiments. See vignette for details.} \item{method}{the quantile randomization method used. The two options implemented at the moment are probability integral transform (PIT-) residuals (current default), and the "traditional" randomization procedure, that was used in DHARMa until version 0.3.0. For details, see \link{getQuantile}.} \item{rotation}{optional rotation of the residual space to remove residual autocorrelation. See details in \link{simulateResiduals}, section \emph{residual auto-correlation} for an extended explanation, and \link{getQuantile} for syntax.} } \value{ an object of class DHARMa, similar to what is returned by \link{simulateResiduals}, but with additional outputs for the new grouped calculations. Note that the relevant outputs are 2x in the object, the first is the grouped calculations (which is returned by $name access), and later another time, under identical name, the original output. Moreover, there is a function 'aggregateByGroup', which can be used to aggregate predictor variables in the same way as the variables calculated here. } \description{ The purpose of this function is to recalculate scaled residuals per group, based on the simulations done by \link{simulateResiduals}. } \details{ The function aggregates the observed and simulated data per group according to the function provided by the aggregateBy option. DHARMa residuals are then calculated exactly as for a single data point (see \link{getQuantile} for details). } \examples{ library(lme4) testData = createData(sampleSize = 100, overdispersion = 0.5, family = poisson()) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # standard plot plot(simulationOutput) # one of the possible test, for other options see ?testResiduals / vignette testDispersion(simulationOutput) # the calculated residuals can be accessed via residuals(simulationOutput) # transform residuals to other pdf, see ?residuals.DHARMa for details residuals(simulationOutput, quantileFunction = qnorm, outlierValues = c(-7,7)) # get residuals that are outside the simulation envelope outliers(simulationOutput) # calculating aggregated residuals per group simulationOutput2 = recalculateResiduals(simulationOutput, group = testData$group) plot(simulationOutput2, quantreg = FALSE) # calculating residuals only for subset of the data simulationOutput3 = recalculateResiduals(simulationOutput, sel = testData$group == 1 ) plot(simulationOutput3, quantreg = FALSE) } DHARMa/man/plotQQunif.Rd0000644000176200001440000000636214701671620014425 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{plotQQunif} \alias{plotQQunif} \title{Quantile-quantile plot for a uniform distribution} \usage{ plotQQunif(simulationOutput, testUniformity = TRUE, testOutliers = TRUE, testDispersion = TRUE, ...) } \arguments{ \item{simulationOutput}{A DHARMa simulation output (class DHARMa).} \item{testUniformity}{If T, the function \link{testUniformity} will be called and the result will be added to the plot.} \item{testOutliers}{If T, the function \link{testOutliers} will be called and the result will be added to the plot.} \item{testDispersion}{If T, the function \link{testDispersion} will be called and the result will be added to the plot.} \item{...}{Arguments to be passed on to \link[gap:qqunif]{gap::qqunif}.} } \description{ The function produces a uniform quantile-quantile plot from a DHARMa output. Optionally, tests for uniformity, outliers and dispersion can be added. } \details{ The function calls qqunif() from the R package gap to create a quantile-quantile plot for a uniform distribution, and overlays tests for particular distributional problems as specified. When tests are displayed, significant p-values are highlighted in the color red by default. This can be changed by setting \code{options(DHARMaSignalColor = "red")} to a different color. See \code{getOption("DHARMaSignalColor")} for the current setting. } \examples{ testData = createData(sampleSize = 200, family = poisson(), randomEffectVariance = 1, numGroups = 10) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) ######### main plotting function ############# # for all functions, quantreg = T will be more # informative, but slower plot(simulationOutput, quantreg = FALSE) ############# Distribution ###################### plotQQunif(simulationOutput = simulationOutput, testDispersion = FALSE, testUniformity = FALSE, testOutliers = FALSE) hist(simulationOutput ) ############# residual plots ############### # rank transformation, using a simulationOutput plotResiduals(simulationOutput, rank = TRUE, quantreg = FALSE) # smooth scatter plot - usually used for large datasets, default for n > 10000 plotResiduals(simulationOutput, rank = TRUE, quantreg = FALSE, smoothScatter = TRUE) # residual vs predictors, using explicit values for pred, residual plotResiduals(simulationOutput, form = testData$Environment1, quantreg = FALSE) # if pred is a factor, or if asFactor = TRUE, will produce a boxplot plotResiduals(simulationOutput, form = testData$group) # to diagnose overdispersion and heteroskedasticity it can be useful to # display residuals as absolute deviation from the expected mean 0.5 plotResiduals(simulationOutput, absoluteDeviation = TRUE, quantreg = FALSE) # All these options can also be provided to the main plotting function # If you want to plot summaries per group, use simulationOutput = recalculateResiduals(simulationOutput, group = testData$group) plot(simulationOutput, quantreg = FALSE) # we see one residual point per RE } \seealso{ \link{plotSimulatedResiduals}, \link{plotResiduals} } DHARMa/man/DHARMa-package.Rd0000644000176200001440000000363314704245735014715 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DHARMa.R \docType{package} \name{DHARMa-package} \alias{DHARMa} \alias{DHARMa-package} \title{DHARMa: Residual Diagnostics for Hierarchical (Multi-Level / Mixed) Regression Models} \description{ The 'DHARMa' package uses a simulation-based approach to create readily interpretable scaled (quantile) residuals for fitted (generalized) linear mixed models. Currently supported are linear and generalized linear (mixed) models from 'lme4' (classes 'lmerMod', 'glmerMod'), 'glmmTMB', 'GLMMadaptive', and 'spaMM'; phylogenetic linear models from 'phylolm' (classes 'phylolm' and 'phyloglm'); generalized additive models ('gam' from 'mgcv'); 'glm' (including 'negbin' from 'MASS', but excluding quasi-distributions) and 'lm' model classes. Moreover, externally created simulations, e.g. posterior predictive simulations from Bayesian software such as 'JAGS', 'STAN', or 'BUGS' can be processed as well. The resulting residuals are standardized to values between 0 and 1 and can be interpreted as intuitively as residuals from a linear regression. The package also provides a number of plot and test functions for typical model misspecification problems, such as over/underdispersion, zero-inflation, and residual spatial, phylogenetic and temporal autocorrelation. } \details{ To get started with the package, look at the vignette and start with \link{simulateResiduals} } \references{ vignette("DHARMa", package="DHARMa") } \seealso{ Useful links: \itemize{ \item \url{http://florianhartig.github.io/DHARMa/} \item Report bugs at \url{https://github.com/florianhartig/DHARMa/issues} } } \author{ \strong{Maintainer}: Florian Hartig \email{florian.hartig@biologie.uni-regensburg.de} (\href{https://orcid.org/0000-0002-6255-9059}{ORCID}) Other contributors: \itemize{ \item Lukas Lohse [contributor] \item Melina de Souza leite [contributor] } } \keyword{internal} DHARMa/man/plotResiduals.Rd0000644000176200001440000001576114701671620015160 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{plotResiduals} \alias{plotResiduals} \title{Generic res ~ pred scatter plot with spline or quantile regression on top} \usage{ plotResiduals(simulationOutput, form = NULL, quantreg = NULL, rank = TRUE, asFactor = NULL, smoothScatter = NULL, quantiles = c(0.25, 0.5, 0.75), absoluteDeviation = FALSE, ...) } \arguments{ \item{simulationOutput}{An object, usually a DHARMa object, from which residual values can be extracted. Alternatively, a vector with residuals or a fitted model can be provided, which will then be transformed into a DHARMa object.} \item{form}{Optional predictor against which the residuals should be plotted. Default is to used the predicted(simulationOutput).} \item{quantreg}{Whether to perform a quantile regression based on \link{testQuantiles} or a smooth spline around the mean. Default NULL chooses T for nObs < 2000, and F otherwise.} \item{rank}{If T, the values provided in form will be rank transformed. This will usually make patterns easier to spot visually, especially if the distribution of the predictor is skewed. If form is a factor, this has no effect.} \item{asFactor}{Should a numeric predictor provided in form be treated as a factor. Default is to choose this for < 10 unique values, as long as enough predictions are available to draw a boxplot.} \item{smoothScatter}{if T, a smooth scatter plot will plotted instead of a normal scatter plot. This makes sense when the number of residuals is very large. Default NULL chooses T for nObs > 10000, and F otherwise.} \item{quantiles}{For a quantile regression, which quantiles should be plotted. Default is 0.25, 0.5, 0.75.} \item{absoluteDeviation}{If T, switch from displaying normal quantile residuals to absolute deviation from the mean expectation of 0.5 (calculated as 2 * abs(res - 0.5)). The purpose of this is to test explicitly for heteroskedasticity, see details.} \item{...}{Additional arguments to plot / boxplot.} } \value{ If quantile tests are performed, the function returns them invisibly. } \description{ The function creates a generic residual plot with either spline or quantile regression to highlight patterns in the residuals. Outliers are highlighted in red by default (but see Details). } \details{ The function plots residuals against a predictor (by default against the fitted value, extracted from the DHARMa object, or any other predictor). Outliers are highlighted in red as default (for information on definition and interpretation of outliers, see \link{testOutliers}). This can be changed by setting \code{options(DHARMaSignalColor = "red")} to a different color. See \code{getOption("DHARMaSignalColor")} for the current setting. To provide a visual aid for detecting deviations from uniformity in the y-direction, the plot function calculates an (optional) quantile regression of the residuals, by default for the 0.25, 0.5 and 0.75 quantiles. Since the residuals should be uniformly distributed for a correctly specified model, the theoretical expectations for these regressions are straight lines at 0.25, 0.5 and 0.75, shown as dashed black lines on the plot. However, even for a perfect model, some deviation from these expectations is to be expected by chance, especially if the sample size is small. The function therefore tests whether the deviation of the fitted quantile regression from the expectation is significant, using \link{testQuantiles}. If so, the significant quantile regression is highlighted in red (as default) and a warning is displayed in the plot. Overdispersion typically manifests itself as Q1 (0.25) deviating towards 0 and Q3 (0.75) deviating towards 1. Heteroskedasticity manifests itself as non-parallel quantile lines. To diagnose heteroskedasticity and overdispersion, it can be helpful to additionally plot the absolute deviation of the residuals from the mean expectation of 0.5, using the option absoluteDeviation = T. In this case, we would again expect Q1-Q3 quantile lines at 0.25, 0.5, 0.75, but greater dispersion (also locally in the case of heteroskedasticity) always manifests itself in deviations towards 1. The quantile regression can take some time to calculate, especially for larger data sets. For this reason, quantreg = F can be set to generate a smooth spline instead. This is the default for n > 2000. If form is a factor, a boxplot will be plotted instead of a scatter plot. The distribution for each factor level should be uniformly distributed, so the box should go from 0.25 to 0.75, with the median line at 0.5 (within-group). To test if deviations from those expecations are significant, KS-tests per group and a Levene test for homogeneity of variances is performed. See \link{testCategorical} for details. } \note{ If nObs > 10000, the scatter plot is replaced by graphics::smoothScatter #' @note The color for highlighting outliers and quantile lines/splines with significant tests can be changed by setting \code{options(DHARMaSignalColor = "red")} to a different color. See \code{getOption("DHARMaSignalColor")} for the current setting. This is convenient for a color-blind friendly display, since red and black are difficult for some people to separate. } \examples{ testData = createData(sampleSize = 200, family = poisson(), randomEffectVariance = 1, numGroups = 10) fittedModel <- glm(observedResponse ~ Environment1, family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) ######### main plotting function ############# # for all functions, quantreg = T will be more # informative, but slower plot(simulationOutput, quantreg = FALSE) ############# Distribution ###################### plotQQunif(simulationOutput = simulationOutput, testDispersion = FALSE, testUniformity = FALSE, testOutliers = FALSE) hist(simulationOutput ) ############# residual plots ############### # rank transformation, using a simulationOutput plotResiduals(simulationOutput, rank = TRUE, quantreg = FALSE) # smooth scatter plot - usually used for large datasets, default for n > 10000 plotResiduals(simulationOutput, rank = TRUE, quantreg = FALSE, smoothScatter = TRUE) # residual vs predictors, using explicit values for pred, residual plotResiduals(simulationOutput, form = testData$Environment1, quantreg = FALSE) # if pred is a factor, or if asFactor = TRUE, will produce a boxplot plotResiduals(simulationOutput, form = testData$group) # to diagnose overdispersion and heteroskedasticity it can be useful to # display residuals as absolute deviation from the expected mean 0.5 plotResiduals(simulationOutput, absoluteDeviation = TRUE, quantreg = FALSE) # All these options can also be provided to the main plotting function # If you want to plot summaries per group, use simulationOutput = recalculateResiduals(simulationOutput, group = testData$group) plot(simulationOutput, quantreg = FALSE) # we see one residual point per RE } \seealso{ \link{plotQQunif}, \link{testQuantiles}, \link{testOutliers} } DHARMa/man/testOutliers.Rd0000644000176200001440000001132614701671620015025 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testOutliers} \alias{testOutliers} \title{Test for outliers} \usage{ testOutliers(simulationOutput, alternative = c("two.sided", "greater", "less"), margin = c("both", "upper", "lower"), type = c("default", "bootstrap", "binomial"), nBoot = 100, plot = TRUE, plotBoostrap = FALSE) } \arguments{ \item{simulationOutput}{an object of class DHARMa, either created via \link{simulateResiduals} for supported models or by \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} \item{alternative}{a character string specifying whether the test should test if observations are "greater", "less" or "two.sided" (default) compared to the simulated null hypothesis} \item{margin}{whether to test for outliers only at the lower, only at the upper, or both sides (default) of the simulated data distribution} \item{type}{either default, bootstrap or binomial. See details} \item{nBoot}{number of boostrap replicates. Only used ot type = "bootstrap"} \item{plot}{if TRUE, the function will create an additional plot} \item{plotBoostrap}{if plot should be produced of outlier frequencies calculated under the bootstrap} } \description{ This function tests if the number of observations outside the simulatio envelope are larger or smaller than expected } \details{ DHARMa residuals are created by simulating from the fitted model, and comparing the simulated values to the observed data. It can occur that all simulated values are higher or smaller than the observed data, in which case they get the residual value of 0 and 1, respectively. I refer to these values as simulation outliers, or simply outliers. Because no data was simulated in the range of the observed value, we don't know "how strongly" these values deviate from the model expectation, so the term "outlier" should be used with a grain of salt. It is not a judgment about the magnitude of the residual deviation, but simply a dichotomous sign that we are outside the simulated range. Moreover, the number of outliers will decrease as we increase the number of simulations. To test if the outliers are a concern, testOutliers implements 2 options (bootstrap, binomial), which can be chosen via the parameter "type". The third option (default) chooses bootstrap for integer-valued distribubtions with nObs < 500, and else binomial. The binomial test considers that under the null hypothesis that the model is correct, and for continuous distributions (i.e. data and the model distribution are identical and continous), the probability that a given observation is higher than all simulations is 1/(nSim +1), and binomial distributed. The testOutlier function can test this null hypothesis via type = "binomial". In principle, it would be nice if we could extend this idea to integer-valued distributions, which are randomized via the PIT procedure (see \link{simulateResiduals}), the rate of "true" outliers is more difficult to calculate, and in general not 1/(nSim +1). The testOutlier function implements a small tweak that calculates the rate of residuals that are closer than 1/(nSim+1) to the 0/1 border, which roughly occur at a rate of nData /(nSim +1). This approximate value, however, is generally not exact, and may be particularly off non-bounded integer-valued distributions (such as Poisson or Negative Binomial). For this reason, the testOutlier function implements an alternative procedure that uses the bootstrap to generate a simulation-based expectation for the outliers. It is recommended to use the bootstrap for integer-valued distributions (and integer-valued only, because it has no advantage for continuous distributions, ideally with reasonably high values of nSim and nBoot (I recommend at least 1000 for both). Because of the high runtime, however, this option is switched off for type = default when nObs > 500. Both binomial or bootstrap generate a null expectation, and then test for an excess or lack of outliers. Per default, testOutliers() looks for both, so if you get a significant p-value, you have to check if you have to many or too few outliers. An excess of outliers is to be interpreted as too many values outside the simulation envelope. This could be caused by overdispersion, or by what we classically call outliers. A lack of outliers would be caused, for example, by underdispersion. } \seealso{ \link{testResiduals}, \link{testUniformity}, \link{testOutliers}, \link{testDispersion}, \link{testZeroInflation}, \link{testGeneric}, \link{testTemporalAutocorrelation}, \link{testSpatialAutocorrelation}, \link{testQuantiles}, \link{testCategorical} } \author{ Florian Hartig } DHARMa/man/ensurePredictor.Rd0000644000176200001440000000115314701671620015471 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/plots.R \name{ensurePredictor} \alias{ensurePredictor} \title{Ensures the existence of a valid predictor to plot residuals against} \usage{ ensurePredictor(simulationOutput, predictor = NULL) } \arguments{ \item{simulationOutput}{A DHARMa simulation output or an object that can be converted into a DHARMa simulation output.} \item{predictor}{An optional predictor. If no predictor is provided, will try to extract the fitted value.} } \description{ Ensures the existence of a valid predictor to plot residuals against } \keyword{internal} DHARMa/man/createData.Rd0000644000176200001440000000652114677165224014367 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/createData.R \name{createData} \alias{createData} \title{Simulate test data} \usage{ createData(sampleSize = 100, intercept = 0, fixedEffects = 1, quadraticFixedEffects = NULL, numGroups = 10, randomEffectVariance = 1, overdispersion = 0, family = poisson(), scale = 1, cor = 0, roundPoissonVariance = NULL, pZeroInflation = 0, binomialTrials = 1, temporalAutocorrelation = 0, spatialAutocorrelation = 0, factorResponse = FALSE, replicates = 1, hasNA = FALSE) } \arguments{ \item{sampleSize}{sample size of the dataset.} \item{intercept}{intercept (linear scale).} \item{fixedEffects}{vector of fixed effects (linear scale).} \item{quadraticFixedEffects}{vector of quadratic fixed effects (linear scale).} \item{numGroups}{number of groups for the random effect.} \item{randomEffectVariance}{variance of the random effect (intercept).} \item{overdispersion}{if this is a numeric value, it will be used as the sd of a random normal variate that is added to the linear predictor. Alternatively, a random function can be provided that takes as input the linear predictor.} \item{family}{family.} \item{scale}{scale if the distribution has a scale (e.g. sd for the Gaussian)} \item{cor}{correlation between predictors.} \item{roundPoissonVariance}{if set, this creates a uniform noise on the possion response. The aim of this is to create heteroscedasticity.} \item{pZeroInflation}{probability to set any data point to zero.} \item{binomialTrials}{Number of trials for the binomial. Only active if family == binomial.} \item{temporalAutocorrelation}{strength of temporalAutocorrelation.} \item{spatialAutocorrelation}{strength of spatial Autocorrelation.} \item{factorResponse}{should the response be transformed to a factor (inteded to be used for 0/1 data).} \item{replicates}{number of datasets to create.} \item{hasNA}{should an NA be added to the environmental predictor (for test purposes).} } \description{ This function creates synthetic dataset with various problems such as overdispersion, zero-inflation, etc. } \examples{ testData = createData(sampleSize = 500, intercept = 2, fixedEffects = c(1), overdispersion = 0, family = poisson(), quadraticFixedEffects = c(-3), randomEffectVariance = 0) par(mfrow = c(1,2)) plot(testData$Environment1, testData$observedResponse) hist(testData$observedResponse) # with zero-inflation testData = createData(sampleSize = 500, intercept = 2, fixedEffects = c(1), overdispersion = 0, family = poisson(), quadraticFixedEffects = c(-3), randomEffectVariance = 0, pZeroInflation = 0.6) par(mfrow = c(1,2)) plot(testData$Environment1, testData$observedResponse) hist(testData$observedResponse) # binomial with multiple trials testData = createData(sampleSize = 40, intercept = 2, fixedEffects = c(1), overdispersion = 0, family = binomial(), quadraticFixedEffects = c(-3), randomEffectVariance = 0, binomialTrials = 20) plot(observedResponse1 / observedResponse0 ~ Environment1, data = testData, ylab = "Proportion 1") # spatial / temporal correlation testData = createData(sampleSize = 100, family = poisson(), spatialAutocorrelation = 3, temporalAutocorrelation = 3) plot(log(observedResponse) ~ time, data = testData) plot(log(observedResponse) ~ x, data = testData) } DHARMa/man/checkModel.Rd0000644000176200001440000000142314677165224014364 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compatibility.R \name{checkModel} \alias{checkModel} \title{Check if the fitted model is supported by DHARMa} \usage{ checkModel(fittedModel, stop = F) } \arguments{ \item{fittedModel}{a fitted model.} \item{stop}{whether to throw an error if the model is not supported by DHARMa.} } \description{ The function checks if the fitted model is supported by DHARMa, and if there are other issues that could create problems. } \details{ The main purpose of this function os to check if the fitted model class is supported by DHARMa. The function additionally checks for properties of the fitted model that could create problems for calculating residuals or working with the resuls in DHARMa. } \keyword{internal} DHARMa/man/testSimulatedResiduals.Rd0000644000176200001440000000126614677165224017036 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testSimulatedResiduals} \alias{testSimulatedResiduals} \title{Residual tests} \usage{ testSimulatedResiduals(simulationOutput) } \arguments{ \item{simulationOutput}{an object of class DHARMa, either created via \link{simulateResiduals} for supported models or by \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} } \description{ Residual tests } \details{ Deprecated, switch your code to using the \link{testResiduals} function } \author{ Florian Hartig } DHARMa/man/ensureDHARMa.Rd0000644000176200001440000000116014665273541014541 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/DHARMa.R \name{ensureDHARMa} \alias{ensureDHARMa} \title{Ensures that an object is of class DHARMa} \usage{ ensureDHARMa(simulationOutput, convert = FALSE) } \arguments{ \item{simulationOutput}{a DHARMa simulation output or an object that can be converted into a DHARMa simulation output} \item{convert}{if TRUE, attempts to convert model + numeric to DHARMa, if "Model", converts only supported models to DHARMa} } \value{ an object of class DHARMa } \description{ Ensures that an object is of class DHARMa } \details{ The } \keyword{internal} DHARMa/man/getFamily.Rd0000644000176200001440000000127214703461527014244 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compatibility.R \name{getFamily} \alias{getFamily} \alias{getFamily.default} \alias{getFamily.phylolm} \alias{getFamily.phyloglm} \title{Get model family} \usage{ getFamily(object, ...) \method{getFamily}{default}(object, ...) \method{getFamily}{phylolm}(object, ...) \method{getFamily}{phyloglm}(object, ...) } \arguments{ \item{object}{a fitted model.} \item{...}{additional parameters to be passed on.} } \description{ Wrapper to get the family of a fitted model. } \seealso{ \link{getObservedResponse}, \link{getSimulations}, \link{getRefit}, \link{getFixedEffects}, \link{getFitted} } \author{ Florian Hartig } DHARMa/man/simulateResiduals.Rd0000644000176200001440000002302014704245735016017 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/simulateResiduals.R \name{simulateResiduals} \alias{simulateResiduals} \title{Create simulated residuals} \usage{ simulateResiduals(fittedModel, n = 250, refit = FALSE, integerResponse = NULL, plot = FALSE, seed = 123, method = c("PIT", "traditional"), rotation = NULL, ...) } \arguments{ \item{fittedModel}{A fitted model of a class supported by DHARMa.} \item{n}{Number of simulations. The smaller the number, the higher the stochastic error on the residuals. Also, for very small n, discretization artefacts can influence the tests. Default is 250, which is a relatively safe value. You can consider increasing to 1000 to stabilize the simulated values.} \item{refit}{If FALSE, new data will be simulated and scaled residuals will be created by comparing observed data with new data. If TRUE, the model will be refitted on the simulated data (parametric bootstrap), and scaled residuals will be created by comparing observed with refitted residuals.} \item{integerResponse}{If TRUE, noise will be added at to the residuals to maintain a uniform expectations for integer responses (such as Poisson or Binomial). Usually, the model will automatically detect the appropriate setting, so there is no need to adjust this setting.} \item{plot}{If TRUE, \link{plotResiduals} will be directly run after the residuals have been calculated.} \item{seed}{The random seed to be used within DHARMa. The default setting, recommended for most users, is keep the random seed on a fixed value 123. This means that you will always get the same randomization and thus the same result when running the same code. If NULL, no new seed is set, but previous random state will be restored after simulation. If FALSE, no seed is set, and random state will not be restored. The latter two options are only recommended for simulation experiments. See vignette for details.} \item{method}{For refit = FALSE, the quantile randomization method is used. The two options implemented at the moment are probability integral transform (PIT-) residuals (current default), and the "traditional" randomization procedure, that was used in DHARMa until version 0.3.0. refit = T will always use "traditional", respectively of the value of method. For details, see \link{getQuantile}.} \item{rotation}{Optional rotation of the residual space prior to calculating the quantile residuals. The main purpose of this is to account for residual covariance as created by temporal, spatial or phylogenetic autocorrelation. See details below, section \emph{residual autocorrelation} as well as the help of \link{getQuantile} and, for a practical example, \link{testTemporalAutocorrelation}.} \item{...}{Further parameters to pass on to the simulate function of the model object. An important use of this is to specify whether simulations should be conditional on the current random effect estimates, e.g. via re.form. Note that not all models support syntax to specify conditional or unconditional simulations. See details and \link{getSimulations}.} } \value{ An S3 class of type "DHARMa". Implemented S3 functions include \link{plot.DHARMa}, \link{print.DHARMa} and \link{residuals.DHARMa}. For other functions that can be used on a DHARMa object, see section "See Also" below. } \description{ The function creates scaled residuals by simulating from the fitted model. Residuals can be extracted with \link{residuals.DHARMa}. See \link{testResiduals} for an overview of residual tests, \link{plot.DHARMa} for an overview of available plots. } \details{ There are a number of important considerations when simulating from a more complex (hierarchical) model: \strong{Re-simulating random effects / hierarchical structure}: in a hierarchical model, we have several stochastic processes aligned on top of each other. Specifically, in a GLMM, we have a lower level stochastic process (random effect), whose result enters into a higher level (e.g. Poisson distribution). For other hierarchical models such as state-space models, similar considerations apply. In such a situation, we have to decide if we want to re-simulate all stochastic levels, or only a subset of those. For example, in a GLMM, it is common to only simulate the last stochastic level (e.g. Poisson) conditional on the fitted random effects. This is often referred to as a conditional simulation. For controlling how many levels should be re-simulated, the simulateResidual function allows to pass on parameters to the simulate function of the fitted model object. Please refer to the help of the different simulate functions (e.g. ?simulate.merMod) for details. For merMod (lme4) model objects, the relevant parameters are parameters are use.u and re.form. For glmmTMB model objects, the package version 1.1.10 has a temporary solution to simulate conditional to all random effects (see \link[glmmTMB:set_simcodes]{glmmTMB::set_simcodes} val = "fix", and issue \href{https://github.com/glmmTMB/glmmTMB/issues/888}{#888} in glmmTMB GitHub repository. If the model is correctly specified, the simulated residuals should be flat regardless how many hierarchical levels we re-simulate. The most thorough procedure would therefore be to test all possible options. If testing only one option, I would recommend to re-simulate all levels, because this essentially tests the model structure as a whole. This is the default setting in the DHARMa package. A potential drawback is that re-simulating the lower-level random effects creates more variability, which may reduce power for detecting problems in the upper-level stochastic processes. In particular dispersion tests may produce different results when switching from conditional to unconditional simulations, and often the conditional simulation is more sensitive. \strong{Refitting or not}: a third issue is how residuals are calculated. simulateResiduals has two options that are controlled by the refit parameter: \enumerate{ \item if refit = FALSE (default), new data is simulated from the fitted model, and residuals are calculated by comparing the observed data to the new data. \item if refit = TRUE, a parametric bootstrap is performed, meaning that the model is refit on the new data, and residuals are created by comparing observed residuals against refitted residuals. I advise against using this method per default (see more comments in the vignette), unless you are really sure that you need it. } \strong{Residuals per group}: In many situations, it can be useful to look at residuals per group, e.g. to see how much the model over / underpredicts per plot, year or subject. To do this, use \link{recalculateResiduals}, together with a grouping variable (see also help). \strong{Transformation to other distributions}: DHARMa calculates residuals for which the theoretical expectation (assuming a correctly specified model) is uniform. To transform this residuals to another distribution (e.g. so that a correctly specified model will have normal residuals) see \link{residuals.DHARMa}. \strong{Integer responses}: this is only relevant if method = "traditional", in which case it activates the randomization of the residuals. Usually, this does not need to be changed, as DHARMa will try to automatically check if the fitted model has an integer or discrete distribution via the family argument. However, in some cases the family does not allow to uniquely identify the distribution type. For example, a tweedie distribution can be interger or continuous. Therefore, DHARMa will additionally check the simulation results for repeated values, and will change the distribution type if repeated values are found (a message is displayed in this case). \strong{Residual autocorrelation}: a common problem is residual autocorrelation. Spatial, temporal and phylogenetic autocorrelation can be tested with \link{testSpatialAutocorrelation}, \link{testTemporalAutocorrelation} and \link{testPhylogeneticAutocorrelation}. If simulations are unconditional, residual correlations will be maintained, even if the autocorrelation is addressed by an appropriate CAR structure. This may be a problem, because autocorrelation may create apparently systematic patterns in plots or tests such as \link{testUniformity}. To reduce this problem, either simulate conditional on fitted correlated REs, or rotate residuals via the rotation parameter (the latter will likely only work in approximately linear models). See \link{getQuantile} for details on the rotation. } \examples{ library(lme4) testData = createData(sampleSize = 100, overdispersion = 0.5, family = poisson()) fittedModel <- glmer(observedResponse ~ Environment1 + (1|group), family = "poisson", data = testData) simulationOutput <- simulateResiduals(fittedModel = fittedModel) # standard plot plot(simulationOutput) # one of the possible test, for other options see ?testResiduals / vignette testDispersion(simulationOutput) # the calculated residuals can be accessed via residuals(simulationOutput) # transform residuals to other pdf, see ?residuals.DHARMa for details residuals(simulationOutput, quantileFunction = qnorm, outlierValues = c(-7,7)) # get residuals that are outside the simulation envelope outliers(simulationOutput) # calculating aggregated residuals per group simulationOutput2 = recalculateResiduals(simulationOutput, group = testData$group) plot(simulationOutput2, quantreg = FALSE) # calculating residuals only for subset of the data simulationOutput3 = recalculateResiduals(simulationOutput, sel = testData$group == 1 ) plot(simulationOutput3, quantreg = FALSE) } \seealso{ \link{testResiduals}, \link{plotResiduals}, \link{recalculateResiduals}, \link{outliers} } DHARMa/man/getObservedResponse.Rd0000644000176200001440000000303414677165224016316 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compatibility.R \name{getObservedResponse} \alias{getObservedResponse} \alias{getObservedResponse.default} \alias{getObservedResponse.HLfit} \alias{getObservedResponse.phylolm} \alias{getObservedResponse.phyloglm} \title{Get model response} \usage{ getObservedResponse(object, ...) \method{getObservedResponse}{default}(object, ...) \method{getObservedResponse}{HLfit}(object, ...) \method{getObservedResponse}{phylolm}(object, ...) \method{getObservedResponse}{phyloglm}(object, ...) } \arguments{ \item{object}{a fitted model.} \item{...}{additional parameters.} } \description{ Extract the response of a fitted model. } \details{ The purpose of this function is to safely extract the observed response (dependent variable) of the fitted model classes. } \examples{ testData = createData(sampleSize = 400, family = gaussian()) fittedModel <- lm(observedResponse ~ Environment1 , data = testData) # response that was used to fit the model getObservedResponse(fittedModel) # predictions of the model for these points getFitted(fittedModel) # extract simulations from the model as matrix getSimulations(fittedModel, nsim = 2) # extract simulations from the model for refit (often requires different structure) x = getSimulations(fittedModel, nsim = 2, type = "refit") getRefit(fittedModel, x[[1]]) getRefit(fittedModel, getObservedResponse(fittedModel)) } \seealso{ \link{getRefit}, \link{getSimulations}, \link{getFixedEffects}, \link{getFitted} } \author{ Florian Hartig } DHARMa/man/testPhylogeneticAutocorrelation.Rd0000644000176200001440000000720114703461527020746 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/tests.R \name{testPhylogeneticAutocorrelation} \alias{testPhylogeneticAutocorrelation} \title{Test for phylogenetic autocorrelation} \usage{ testPhylogeneticAutocorrelation(simulationOutput, tree, alternative = c("two.sided", "greater", "less")) } \arguments{ \item{simulationOutput}{an object of class DHARMa, either created via \link{simulateResiduals} for supported models or via \link{createDHARMa} for simulations created outside DHARMa, or a supported model. Providing a supported model directly is discouraged, because simulation settings cannot be changed in this case.} \item{tree}{A phylogenetic tree object.} \item{alternative}{A character string specifying whether the test should test if observations are "greater", "less" or "two.sided" compared to the simulated null hypothesis of no phylogenetic correlation.} } \description{ This function performs a Moran's I test for phylogenetic autocorrelation on the calculated quantile residuals. } \details{ The function performs Moran.I test from the package ape on the DHARMa residuals, based on the phylogenetic distance matrix internally created from the provided tree. For custom distance matrices, you can use \link{testSpatialAutocorrelation}. } \note{ Standard DHARMa simulations from models with (temporal / spatial / phylogenetic) conditional autoregressive terms will still have the respective temporal / spatial / phylogenetic correlation in the DHARMa residuals, unless the package you are using is modelling the autoregressive terms as explicit REs and is able to simulate conditional on the fitted REs. This has two consequences: \enumerate{ \item If you check the residuals for such a model, they will still show significant autocorrelation, even if the model fully accounts for this structure. \item Because the DHARMa residuals for such a model are not statistically independent any more, other tests (e.g. dispersion, uniformity) may have inflated type I error, i.e. you will have a higher likelihood of spurious residual problems. } There are three (non-exclusive) routes to address these issues when working with spatial / temporal / phylogenetic autoregressive models: \enumerate{ \item Simulate conditional on the fitted CAR structures (see conditional simulations in the help of \link{simulateResiduals}). \item Rotate simulations prior to residual calculations (see parameter rotation in \link{simulateResiduals}). \item Use custom tests / plots that explicitly compare the correlation structure in the simulated data to the correlation structure in the observed data. } } \examples{ \dontrun{ library(DHARMa) library(phylolm) set.seed(123) tre = rcoal(60) b0 = 0; b1 = 1; x <- runif(length(tre$tip.label), 0, 1) y <- b0 + b1*x + rTrait(n = 1, phy = tre,model="BM", parameters = list(ancestral.state = 0, sigma2 = 10)) dat = data.frame(trait = y, pred = x) fit = lm(trait ~ pred, data = dat) res = simulateResiduals(fit, plot = T) testPhylogeneticAutocorrelation(res, tree = tre) fit = phylolm(trait ~ pred, data = dat, phy = tre, model = "BM") summary(fit) # phylogenetic autocorrelation still present in residuals res = simulateResiduals(fit, plot = T) # with "rotation" the residual autcorrelation is gone, see ?simulateResiduals. res = simulateResiduals(fit, plot = T, rotation = "estimated") } } \seealso{ \link{testResiduals}, \link{testUniformity}, \link{testOutliers}, \link{testDispersion}, \link{testZeroInflation}, \link{testGeneric}, \link{testTemporalAutocorrelation}, \link{testSpatialAutocorrelation}, \link{testQuantiles}, \link{testCategorical} } \author{ Florian Hartig } DHARMa/man/plot.DHARMaBenchmark.Rd0000644000176200001440000000215614677165224016116 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/runBenchmarks.R \name{plot.DHARMaBenchmark} \alias{plot.DHARMaBenchmark} \title{Plots DHARMa benchmarks} \usage{ \method{plot}{DHARMaBenchmark}(x, ...) } \arguments{ \item{x}{object of class DHARMaBenchmark, created by \link{runBenchmarks}.} \item{...}{parameters to pass to the plot function.} } \description{ The function plots the result of an object of class DHARMaBenchmark, created by \link{runBenchmarks}. } \details{ The function will create two types of plots, depending on whether the run contains only a single value (or no value) of the control parameter, or whether a vector of control values is provided: If a single or no value of the control parameter is provided, the function will create box plots of the estimated p-values, with the number of significant p-values plotted to the left. If a control parameter is provided, the function will plot the proportion of significant p-values against the control parameter, with 95\% CIs based based on the performed replicates displayed as confidence bands. } \seealso{ \link{runBenchmarks} } DHARMa/man/getFixedEffects.Rd0000644000176200001440000000233514677165224015370 0ustar liggesusers% Generated by roxygen2: do not edit by hand % Please edit documentation in R/compatibility.R \name{getFixedEffects} \alias{getFixedEffects} \alias{getFixedEffects.default} \alias{getFixedEffects.MixMod} \title{Extract fixed effects of a supported model} \usage{ getFixedEffects(object, ...) \method{getFixedEffects}{default}(object, ...) \method{getFixedEffects}{MixMod}(object, ...) } \arguments{ \item{object}{a fitted model.} \item{...}{additional parameters.} } \description{ A wrapper to extract fixed effects of a supported model. } \examples{ testData = createData(sampleSize = 400, family = gaussian()) fittedModel <- lm(observedResponse ~ Environment1 , data = testData) # response that was used to fit the model getObservedResponse(fittedModel) # predictions of the model for these points getFitted(fittedModel) # extract simulations from the model as matrix getSimulations(fittedModel, nsim = 2) # extract simulations from the model for refit (often requires different structure) x = getSimulations(fittedModel, nsim = 2, type = "refit") getRefit(fittedModel, x[[1]]) getRefit(fittedModel, getObservedResponse(fittedModel)) } \seealso{ \link{getObservedResponse}, \link{getSimulations}, \link{getRefit}, \link{getFitted} } DHARMa/DESCRIPTION0000644000176200001440000000457414704441036012771 0ustar liggesusersPackage: DHARMa Title: Residual Diagnostics for Hierarchical (Multi-Level / Mixed) Regression Models Version: 0.4.7 Date: 2024-10-16 Authors@R: c(person("Florian", "Hartig", email = "florian.hartig@biologie.uni-regensburg.de", role = c("aut", "cre"), comment=c(ORCID="0000-0002-6255-9059")), person("Lukas", "Lohse", role = "ctb"), person("Melina", "de Souza leite", role = "ctb")) Description: The 'DHARMa' package uses a simulation-based approach to create readily interpretable scaled (quantile) residuals for fitted (generalized) linear mixed models. Currently supported are linear and generalized linear (mixed) models from 'lme4' (classes 'lmerMod', 'glmerMod'), 'glmmTMB', 'GLMMadaptive', and 'spaMM'; phylogenetic linear models from 'phylolm' (classes 'phylolm' and 'phyloglm'); generalized additive models ('gam' from 'mgcv'); 'glm' (including 'negbin' from 'MASS', but excluding quasi-distributions) and 'lm' model classes. Moreover, externally created simulations, e.g. posterior predictive simulations from Bayesian software such as 'JAGS', 'STAN', or 'BUGS' can be processed as well. The resulting residuals are standardized to values between 0 and 1 and can be interpreted as intuitively as residuals from a linear regression. The package also provides a number of plot and test functions for typical model misspecification problems, such as over/underdispersion, zero-inflation, and residual spatial, phylogenetic and temporal autocorrelation. Depends: R (>= 3.0.2) Imports: stats, graphics, utils, grDevices, Matrix, parallel, gap, lmtest, ape, qgam (>= 1.3.2), lme4 Suggests: knitr, testthat (>= 3.0.0), rmarkdown, KernSmooth, sfsmisc, MASS, mgcv, mgcViz (>= 0.1.9), spaMM (>= 3.2.0), GLMMadaptive, glmmTMB (>= 1.1.2.3), phylolm (>= 2.6.5) Enhances: phyr, rstan, rjags, BayesianTools License: GPL (>= 3) URL: http://florianhartig.github.io/DHARMa/ LazyData: TRUE BugReports: https://github.com/florianhartig/DHARMa/issues RoxygenNote: 7.3.2 VignetteBuilder: knitr Encoding: UTF-8 Config/testthat/edition: 3 NeedsCompilation: no Packaged: 2024-10-17 17:47:16 UTC; melinaleite Author: Florian Hartig [aut, cre] (), Lukas Lohse [ctb], Melina de Souza leite [ctb] Maintainer: Florian Hartig Repository: CRAN Date/Publication: 2024-10-18 11:10:22 UTC